skb                94 arch/m68k/emu/nfeth.c 	struct sk_buff *skb;
skb               107 arch/m68k/emu/nfeth.c 	skb = dev_alloc_skb(pktlen + 2);
skb               108 arch/m68k/emu/nfeth.c 	if (!skb) {
skb               115 arch/m68k/emu/nfeth.c 	skb->dev = dev;
skb               116 arch/m68k/emu/nfeth.c 	skb_reserve(skb, 2);		/* 16 Byte align  */
skb               117 arch/m68k/emu/nfeth.c 	skb_put(skb, pktlen);		/* make room */
skb               118 arch/m68k/emu/nfeth.c 	nf_call(nfEtherID + XIF_READBLOCK, priv->ethX, virt_to_phys(skb->data),
skb               121 arch/m68k/emu/nfeth.c 	skb->protocol = eth_type_trans(skb, dev);
skb               122 arch/m68k/emu/nfeth.c 	netif_rx(skb);
skb               144 arch/m68k/emu/nfeth.c static int nfeth_xmit(struct sk_buff *skb, struct net_device *dev)
skb               150 arch/m68k/emu/nfeth.c 	data = skb->data;
skb               151 arch/m68k/emu/nfeth.c 	len = skb->len;
skb               166 arch/m68k/emu/nfeth.c 	dev_kfree_skb(skb);
skb                71 arch/mips/net/bpf_jit.h 	extern u8 func(unsigned long *skb, int offset); \
skb                72 arch/mips/net/bpf_jit.h 	extern u8 func##_negative(unsigned long *skb, int offset); \
skb                73 arch/mips/net/bpf_jit.h 	extern u8 func##_positive(unsigned long *skb, int offset)
skb                42 arch/um/drivers/daemon_kern.c static int daemon_read(int fd, struct sk_buff *skb, struct uml_net_private *lp)
skb                44 arch/um/drivers/daemon_kern.c 	return net_recvfrom(fd, skb_mac_header(skb),
skb                45 arch/um/drivers/daemon_kern.c 			    skb->dev->mtu + ETH_HEADER_OTHER);
skb                48 arch/um/drivers/daemon_kern.c static int daemon_write(int fd, struct sk_buff *skb, struct uml_net_private *lp)
skb                50 arch/um/drivers/daemon_kern.c 	return daemon_user_write(fd, skb->data, skb->len,
skb                77 arch/um/drivers/net_kern.c 	struct sk_buff *skb;
skb                80 arch/um/drivers/net_kern.c 	skb = dev_alloc_skb(lp->max_packet);
skb                81 arch/um/drivers/net_kern.c 	if (skb == NULL) {
skb                89 arch/um/drivers/net_kern.c 	skb->dev = dev;
skb                90 arch/um/drivers/net_kern.c 	skb_put(skb, lp->max_packet);
skb                91 arch/um/drivers/net_kern.c 	skb_reset_mac_header(skb);
skb                92 arch/um/drivers/net_kern.c 	pkt_len = (*lp->read)(lp->fd, skb, lp);
skb                95 arch/um/drivers/net_kern.c 		skb_trim(skb, pkt_len);
skb                96 arch/um/drivers/net_kern.c 		skb->protocol = (*lp->protocol)(skb);
skb                98 arch/um/drivers/net_kern.c 		dev->stats.rx_bytes += skb->len;
skb               100 arch/um/drivers/net_kern.c 		netif_rx(skb);
skb               104 arch/um/drivers/net_kern.c 	kfree_skb(skb);
skb               207 arch/um/drivers/net_kern.c static int uml_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb               217 arch/um/drivers/net_kern.c 	len = (*lp->write)(lp->fd, skb, lp);
skb               218 arch/um/drivers/net_kern.c 	skb_tx_timestamp(skb);
skb               220 arch/um/drivers/net_kern.c 	if (len == skb->len) {
skb               222 arch/um/drivers/net_kern.c 		dev->stats.tx_bytes += skb->len;
skb               240 arch/um/drivers/net_kern.c 	dev_consume_skb_any(skb);
skb               901 arch/um/drivers/net_kern.c unsigned short eth_protocol(struct sk_buff *skb)
skb               903 arch/um/drivers/net_kern.c 	return eth_type_trans(skb, skb->dev);
skb                34 arch/um/drivers/pcap_kern.c static int pcap_read(int fd, struct sk_buff *skb, struct uml_net_private *lp)
skb                36 arch/um/drivers/pcap_kern.c 	return pcap_user_read(fd, skb_mac_header(skb),
skb                37 arch/um/drivers/pcap_kern.c 			      skb->dev->mtu + ETH_HEADER_OTHER,
skb                41 arch/um/drivers/pcap_kern.c static int pcap_write(int fd, struct sk_buff *skb, struct uml_net_private *lp)
skb                47 arch/um/drivers/slip_kern.c static int slip_read(int fd, struct sk_buff *skb, struct uml_net_private *lp)
skb                49 arch/um/drivers/slip_kern.c 	return slip_user_read(fd, skb_mac_header(skb), skb->dev->mtu,
skb                53 arch/um/drivers/slip_kern.c static int slip_write(int fd, struct sk_buff *skb, struct uml_net_private *lp)
skb                55 arch/um/drivers/slip_kern.c 	return slip_user_write(fd, skb->data, skb->len,
skb                52 arch/um/drivers/slirp_kern.c static int slirp_read(int fd, struct sk_buff *skb, struct uml_net_private *lp)
skb                54 arch/um/drivers/slirp_kern.c 	return slirp_user_read(fd, skb_mac_header(skb), skb->dev->mtu,
skb                58 arch/um/drivers/slirp_kern.c static int slirp_write(int fd, struct sk_buff *skb, struct uml_net_private *lp)
skb                60 arch/um/drivers/slirp_kern.c 	return slirp_user_write(fd, skb->data, skb->len,
skb                51 arch/um/drivers/umcast_kern.c static int umcast_read(int fd, struct sk_buff *skb, struct uml_net_private *lp)
skb                53 arch/um/drivers/umcast_kern.c 	return net_recvfrom(fd, skb_mac_header(skb),
skb                54 arch/um/drivers/umcast_kern.c 			    skb->dev->mtu + ETH_HEADER_OTHER);
skb                57 arch/um/drivers/umcast_kern.c static int umcast_write(int fd, struct sk_buff *skb, struct uml_net_private *lp)
skb                59 arch/um/drivers/umcast_kern.c 	return umcast_user_write(fd, skb->data, skb->len,
skb                39 arch/um/drivers/vde_kern.c static int vde_read(int fd, struct sk_buff *skb, struct uml_net_private *lp)
skb                44 arch/um/drivers/vde_kern.c 		return vde_user_read(pri->conn, skb_mac_header(skb),
skb                45 arch/um/drivers/vde_kern.c 				     skb->dev->mtu + ETH_HEADER_OTHER);
skb                51 arch/um/drivers/vde_kern.c static int vde_write(int fd, struct sk_buff *skb, struct uml_net_private *lp)
skb                56 arch/um/drivers/vde_kern.c 		return vde_user_write((void *)pri->conn, skb->data,
skb                57 arch/um/drivers/vde_kern.c 				      skb->len);
skb               269 arch/um/drivers/vector_kern.c 	struct sk_buff *skb,
skb               276 arch/um/drivers/vector_kern.c 	nr_frags = skb_shinfo(skb)->nr_frags;
skb               278 arch/um/drivers/vector_kern.c 		if (skb_linearize(skb) != 0)
skb               283 arch/um/drivers/vector_kern.c 		vp->form_header(iov[iov_index].iov_base, skb, vp);
skb               286 arch/um/drivers/vector_kern.c 	iov[iov_index].iov_base = skb->data;
skb               288 arch/um/drivers/vector_kern.c 		iov[iov_index].iov_len = skb->len - skb->data_len;
skb               291 arch/um/drivers/vector_kern.c 		iov[iov_index].iov_len = skb->len;
skb               294 arch/um/drivers/vector_kern.c 		skb_frag = &skb_shinfo(skb)->frags[frag];
skb               309 arch/um/drivers/vector_kern.c static int vector_enqueue(struct vector_queue *qi, struct sk_buff *skb)
skb               322 arch/um/drivers/vector_kern.c 	if (skb)
skb               323 arch/um/drivers/vector_kern.c 		packet_len = skb->len;
skb               327 arch/um/drivers/vector_kern.c 		*(qi->skbuff_vector + qi->tail) = skb;
skb               331 arch/um/drivers/vector_kern.c 			skb,
skb               346 arch/um/drivers/vector_kern.c 	if (skb != NULL) {
skb               347 arch/um/drivers/vector_kern.c 		packet_len = skb->len;
skb               348 arch/um/drivers/vector_kern.c 		dev_consume_skb_any(skb);
skb               357 arch/um/drivers/vector_kern.c 	struct sk_buff *skb;
skb               362 arch/um/drivers/vector_kern.c 		skb = *(qi->skbuff_vector + skb_index);
skb               366 arch/um/drivers/vector_kern.c 		bytes_compl += skb->len;
skb               368 arch/um/drivers/vector_kern.c 		dev_consume_skb_any(skb);
skb               822 arch/um/drivers/vector_kern.c 	struct sk_buff *skb;
skb               837 arch/um/drivers/vector_kern.c 	skb = prep_skb(vp, &hdr);
skb               839 arch/um/drivers/vector_kern.c 	if (skb == NULL) {
skb               855 arch/um/drivers/vector_kern.c 	if (skb != NULL) {
skb               859 arch/um/drivers/vector_kern.c 					vp->header_rxbuffer, skb, vp);
skb               861 arch/um/drivers/vector_kern.c 					dev_kfree_skb_irq(skb);
skb               868 arch/um/drivers/vector_kern.c 					skb->ip_summed = CHECKSUM_UNNECESSARY;
skb               871 arch/um/drivers/vector_kern.c 			pskb_trim(skb, pkt_len - vp->rx_header_size);
skb               872 arch/um/drivers/vector_kern.c 			skb->protocol = eth_type_trans(skb, skb->dev);
skb               873 arch/um/drivers/vector_kern.c 			vp->dev->stats.rx_bytes += skb->len;
skb               875 arch/um/drivers/vector_kern.c 			netif_rx(skb);
skb               877 arch/um/drivers/vector_kern.c 			dev_kfree_skb_irq(skb);
skb               890 arch/um/drivers/vector_kern.c static int writev_tx(struct vector_private *vp, struct sk_buff *skb)
skb               896 arch/um/drivers/vector_kern.c 	iov_count = prep_msg(vp, skb, (struct iovec *) &iov);
skb               914 arch/um/drivers/vector_kern.c 		vp->dev->stats.tx_bytes += skb->len;
skb               919 arch/um/drivers/vector_kern.c 	consume_skb(skb);
skb               923 arch/um/drivers/vector_kern.c 	consume_skb(skb);
skb               938 arch/um/drivers/vector_kern.c 	struct sk_buff *skb;
skb               968 arch/um/drivers/vector_kern.c 		skb = (*skbuff_vector);
skb               973 arch/um/drivers/vector_kern.c 					skb,
skb               982 arch/um/drivers/vector_kern.c 					dev_kfree_skb_irq(skb);
skb               988 arch/um/drivers/vector_kern.c 					skb->ip_summed = CHECKSUM_UNNECESSARY;
skb               991 arch/um/drivers/vector_kern.c 			pskb_trim(skb,
skb               993 arch/um/drivers/vector_kern.c 			skb->protocol = eth_type_trans(skb, skb->dev);
skb               998 arch/um/drivers/vector_kern.c 			vp->dev->stats.rx_bytes += skb->len;
skb              1000 arch/um/drivers/vector_kern.c 			netif_rx(skb);
skb              1006 arch/um/drivers/vector_kern.c 			if (skb != NULL)
skb              1007 arch/um/drivers/vector_kern.c 				dev_kfree_skb_irq(skb);
skb              1040 arch/um/drivers/vector_kern.c static int vector_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb              1053 arch/um/drivers/vector_kern.c 		writev_tx(vp, skb);
skb              1061 arch/um/drivers/vector_kern.c 	netdev_sent_queue(vp->dev, skb->len);
skb              1062 arch/um/drivers/vector_kern.c 	queue_depth = vector_enqueue(vp->tx_queue, skb);
skb              1078 arch/um/drivers/vector_kern.c 	if (skb->len < TX_SMALL_PACKET) {
skb               109 arch/um/drivers/vector_kern.h 		struct sk_buff *skb, struct vector_private *vp);
skb               111 arch/um/drivers/vector_kern.h 		struct sk_buff *skb, struct vector_private *vp);
skb                67 arch/um/drivers/vector_transports.c 	struct sk_buff *skb, struct vector_private *vp)
skb                97 arch/um/drivers/vector_transports.c 		struct sk_buff *skb, struct vector_private *vp)
skb               115 arch/um/drivers/vector_transports.c 		struct sk_buff *skb, struct vector_private *vp)
skb               120 arch/um/drivers/vector_transports.c 		skb,
skb               131 arch/um/drivers/vector_transports.c 	uint8_t *header, struct sk_buff *skb, struct vector_private *vp)
skb               166 arch/um/drivers/vector_transports.c 	uint8_t *header, struct sk_buff *skb, struct vector_private *vp)
skb               197 arch/um/drivers/vector_transports.c 	uint8_t *header, struct sk_buff *skb, struct vector_private *vp)
skb               212 arch/um/drivers/vector_transports.c 	virtio_net_hdr_to_skb(skb, vheader, virtio_legacy_is_little_endian());
skb                37 arch/um/include/shared/net_kern.h 	int (*read)(int, struct sk_buff *skb, struct uml_net_private *);
skb                38 arch/um/include/shared/net_kern.h 	int (*write)(int, struct sk_buff *skb, struct uml_net_private *);
skb                48 arch/um/include/shared/net_kern.h 	int (*read)(int, struct sk_buff *skb, struct uml_net_private *);
skb                49 arch/um/include/shared/net_kern.h 	int (*write)(int, struct sk_buff *skb, struct uml_net_private *);
skb                67 arch/um/include/shared/net_kern.h extern unsigned short eth_protocol(struct sk_buff *skb);
skb                39 arch/um/os-Linux/drivers/ethertap_kern.c static int etap_read(int fd, struct sk_buff *skb, struct uml_net_private *lp)
skb                43 arch/um/os-Linux/drivers/ethertap_kern.c 	len = net_recvfrom(fd, skb_mac_header(skb),
skb                44 arch/um/os-Linux/drivers/ethertap_kern.c 			   skb->dev->mtu + 2 + ETH_HEADER_ETHERTAP);
skb                48 arch/um/os-Linux/drivers/ethertap_kern.c 	skb_pull(skb, 2);
skb                53 arch/um/os-Linux/drivers/ethertap_kern.c static int etap_write(int fd, struct sk_buff *skb, struct uml_net_private *lp)
skb                55 arch/um/os-Linux/drivers/ethertap_kern.c 	skb_push(skb, 2);
skb                56 arch/um/os-Linux/drivers/ethertap_kern.c 	return net_send(fd, skb->data, skb->len);
skb                38 arch/um/os-Linux/drivers/tuntap_kern.c static int tuntap_read(int fd, struct sk_buff *skb, struct uml_net_private *lp)
skb                40 arch/um/os-Linux/drivers/tuntap_kern.c 	return net_read(fd, skb_mac_header(skb),
skb                41 arch/um/os-Linux/drivers/tuntap_kern.c 			skb->dev->mtu + ETH_HEADER_OTHER);
skb                44 arch/um/os-Linux/drivers/tuntap_kern.c static int tuntap_write(int fd, struct sk_buff *skb, struct uml_net_private *lp)
skb                46 arch/um/os-Linux/drivers/tuntap_kern.c 	return net_write(fd, skb->data, skb->len);
skb                87 arch/xtensa/platforms/iss/network.c 		int (*read)(struct iss_net_private *lp, struct sk_buff **skb);
skb                88 arch/xtensa/platforms/iss/network.c 		int (*write)(struct iss_net_private *lp, struct sk_buff **skb);
skb                89 arch/xtensa/platforms/iss/network.c 		unsigned short (*protocol)(struct sk_buff *skb);
skb               196 arch/xtensa/platforms/iss/network.c static int tuntap_read(struct iss_net_private *lp, struct sk_buff **skb)
skb               199 arch/xtensa/platforms/iss/network.c 			(*skb)->data, (*skb)->dev->mtu + ETH_HEADER_OTHER);
skb               202 arch/xtensa/platforms/iss/network.c static int tuntap_write(struct iss_net_private *lp, struct sk_buff **skb)
skb               204 arch/xtensa/platforms/iss/network.c 	return simc_write(lp->tp.info.tuntap.fd, (*skb)->data, (*skb)->len);
skb               207 arch/xtensa/platforms/iss/network.c unsigned short tuntap_protocol(struct sk_buff *skb)
skb               209 arch/xtensa/platforms/iss/network.c 	return eth_type_trans(skb, skb->dev);
skb               276 arch/xtensa/platforms/iss/network.c 	struct sk_buff *skb;
skb               285 arch/xtensa/platforms/iss/network.c 	skb = dev_alloc_skb(dev->mtu + 2 + ETH_HEADER_OTHER);
skb               286 arch/xtensa/platforms/iss/network.c 	if (skb == NULL) {
skb               291 arch/xtensa/platforms/iss/network.c 	skb_reserve(skb, 2);
skb               295 arch/xtensa/platforms/iss/network.c 	skb->dev = dev;
skb               296 arch/xtensa/platforms/iss/network.c 	skb_reset_mac_header(skb);
skb               297 arch/xtensa/platforms/iss/network.c 	pkt_len = lp->tp.read(lp, &skb);
skb               298 arch/xtensa/platforms/iss/network.c 	skb_put(skb, pkt_len);
skb               301 arch/xtensa/platforms/iss/network.c 		skb_trim(skb, pkt_len);
skb               302 arch/xtensa/platforms/iss/network.c 		skb->protocol = lp->tp.protocol(skb);
skb               304 arch/xtensa/platforms/iss/network.c 		lp->stats.rx_bytes += skb->len;
skb               306 arch/xtensa/platforms/iss/network.c 		netif_rx_ni(skb);
skb               309 arch/xtensa/platforms/iss/network.c 	kfree_skb(skb);
skb               413 arch/xtensa/platforms/iss/network.c static int iss_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb               421 arch/xtensa/platforms/iss/network.c 	len = lp->tp.write(lp, &skb);
skb               423 arch/xtensa/platforms/iss/network.c 	if (len == skb->len) {
skb               425 arch/xtensa/platforms/iss/network.c 		lp->stats.tx_bytes += skb->len;
skb               443 arch/xtensa/platforms/iss/network.c 	dev_kfree_skb(skb);
skb               359 crypto/ablkcipher.c static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
skb               373 crypto/ablkcipher.c 	return nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
skb               377 crypto/ablkcipher.c static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
skb                27 crypto/acompress.c static int crypto_acomp_report(struct sk_buff *skb, struct crypto_alg *alg)
skb                35 crypto/acompress.c 	return nla_put(skb, CRYPTOCFGA_REPORT_ACOMP, sizeof(racomp), &racomp);
skb                38 crypto/acompress.c static int crypto_acomp_report(struct sk_buff *skb, struct crypto_alg *alg)
skb               151 crypto/aead.c  static int crypto_aead_report(struct sk_buff *skb, struct crypto_alg *alg)
skb               165 crypto/aead.c  	return nla_put(skb, CRYPTOCFGA_REPORT_AEAD, sizeof(raead), &raead);
skb               168 crypto/aead.c  static int crypto_aead_report(struct sk_buff *skb, struct crypto_alg *alg)
skb               513 crypto/ahash.c static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
skb               524 crypto/ahash.c 	return nla_put(skb, CRYPTOCFGA_REPORT_HASH, sizeof(rhash), &rhash);
skb               527 crypto/ahash.c static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
skb                24 crypto/akcipher.c static int crypto_akcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
skb                32 crypto/akcipher.c 	return nla_put(skb, CRYPTOCFGA_REPORT_AKCIPHER,
skb                36 crypto/akcipher.c static int crypto_akcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
skb               501 crypto/blkcipher.c static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
skb               515 crypto/blkcipher.c 	return nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
skb               519 crypto/blkcipher.c static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
skb                72 crypto/crypto_user_base.c static int crypto_report_cipher(struct sk_buff *skb, struct crypto_alg *alg)
skb                84 crypto/crypto_user_base.c 	return nla_put(skb, CRYPTOCFGA_REPORT_CIPHER,
skb                88 crypto/crypto_user_base.c static int crypto_report_comp(struct sk_buff *skb, struct crypto_alg *alg)
skb                96 crypto/crypto_user_base.c 	return nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS, sizeof(rcomp), &rcomp);
skb               100 crypto/crypto_user_base.c 			     struct crypto_user_alg *ualg, struct sk_buff *skb)
skb               115 crypto/crypto_user_base.c 	if (nla_put_u32(skb, CRYPTOCFGA_PRIORITY_VAL, alg->cra_priority))
skb               122 crypto/crypto_user_base.c 		if (nla_put(skb, CRYPTOCFGA_REPORT_LARVAL, sizeof(rl), &rl))
skb               128 crypto/crypto_user_base.c 		if (alg->cra_type->report(skb, alg))
skb               136 crypto/crypto_user_base.c 		if (crypto_report_cipher(skb, alg))
skb               141 crypto/crypto_user_base.c 		if (crypto_report_comp(skb, alg))
skb               158 crypto/crypto_user_base.c 	struct sk_buff *skb = info->out_skb;
skb               163 crypto/crypto_user_base.c 	nlh = nlmsg_put(skb, NETLINK_CB(in_skb).portid, info->nlmsg_seq,
skb               172 crypto/crypto_user_base.c 	err = crypto_report_one(alg, ualg, skb);
skb               174 crypto/crypto_user_base.c 		nlmsg_cancel(skb, nlh);
skb               178 crypto/crypto_user_base.c 	nlmsg_end(skb, nlh);
skb               190 crypto/crypto_user_base.c 	struct sk_buff *skb;
skb               202 crypto/crypto_user_base.c 	skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
skb               203 crypto/crypto_user_base.c 	if (!skb)
skb               207 crypto/crypto_user_base.c 	info.out_skb = skb;
skb               217 crypto/crypto_user_base.c 		kfree_skb(skb);
skb               221 crypto/crypto_user_base.c 	return nlmsg_unicast(net->crypto_nlsk, skb, NETLINK_CB(in_skb).portid);
skb               224 crypto/crypto_user_base.c static int crypto_dump_report(struct sk_buff *skb, struct netlink_callback *cb)
skb               232 crypto/crypto_user_base.c 	info.in_skb = cb->skb;
skb               233 crypto/crypto_user_base.c 	info.out_skb = skb;
skb               249 crypto/crypto_user_base.c 	res = skb->len;
skb               260 crypto/crypto_user_base.c static int crypto_update_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
skb               268 crypto/crypto_user_base.c 	if (!netlink_capable(skb, CAP_NET_ADMIN))
skb               296 crypto/crypto_user_base.c static int crypto_del_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
skb               303 crypto/crypto_user_base.c 	if (!netlink_capable(skb, CAP_NET_ADMIN))
skb               333 crypto/crypto_user_base.c static int crypto_add_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
skb               342 crypto/crypto_user_base.c 	if (!netlink_capable(skb, CAP_NET_ADMIN))
skb               381 crypto/crypto_user_base.c static int crypto_del_rng(struct sk_buff *skb, struct nlmsghdr *nlh,
skb               384 crypto/crypto_user_base.c 	if (!netlink_capable(skb, CAP_NET_ADMIN))
skb               421 crypto/crypto_user_base.c static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
skb               424 crypto/crypto_user_base.c 	struct net *net = sock_net(skb->sk);
skb               455 crypto/crypto_user_base.c 			err = netlink_dump_start(net->crypto_nlsk, skb, nlh, &c);
skb               469 crypto/crypto_user_base.c 	return link->doit(skb, nlh, attrs);
skb               472 crypto/crypto_user_base.c static void crypto_netlink_rcv(struct sk_buff *skb)
skb               475 crypto/crypto_user_base.c 	netlink_rcv_skb(skb, &crypto_user_rcv_msg);
skb                31 crypto/crypto_user_stat.c static int crypto_report_aead(struct sk_buff *skb, struct crypto_alg *alg)
skb                45 crypto/crypto_user_stat.c 	return nla_put(skb, CRYPTOCFGA_STAT_AEAD, sizeof(raead), &raead);
skb                48 crypto/crypto_user_stat.c static int crypto_report_cipher(struct sk_buff *skb, struct crypto_alg *alg)
skb                62 crypto/crypto_user_stat.c 	return nla_put(skb, CRYPTOCFGA_STAT_CIPHER, sizeof(rcipher), &rcipher);
skb                65 crypto/crypto_user_stat.c static int crypto_report_comp(struct sk_buff *skb, struct crypto_alg *alg)
skb                78 crypto/crypto_user_stat.c 	return nla_put(skb, CRYPTOCFGA_STAT_COMPRESS, sizeof(rcomp), &rcomp);
skb                81 crypto/crypto_user_stat.c static int crypto_report_acomp(struct sk_buff *skb, struct crypto_alg *alg)
skb                94 crypto/crypto_user_stat.c 	return nla_put(skb, CRYPTOCFGA_STAT_ACOMP, sizeof(racomp), &racomp);
skb                97 crypto/crypto_user_stat.c static int crypto_report_akcipher(struct sk_buff *skb, struct crypto_alg *alg)
skb               112 crypto/crypto_user_stat.c 	return nla_put(skb, CRYPTOCFGA_STAT_AKCIPHER,
skb               116 crypto/crypto_user_stat.c static int crypto_report_kpp(struct sk_buff *skb, struct crypto_alg *alg)
skb               129 crypto/crypto_user_stat.c 	return nla_put(skb, CRYPTOCFGA_STAT_KPP, sizeof(rkpp), &rkpp);
skb               132 crypto/crypto_user_stat.c static int crypto_report_ahash(struct sk_buff *skb, struct crypto_alg *alg)
skb               144 crypto/crypto_user_stat.c 	return nla_put(skb, CRYPTOCFGA_STAT_HASH, sizeof(rhash), &rhash);
skb               147 crypto/crypto_user_stat.c static int crypto_report_shash(struct sk_buff *skb, struct crypto_alg *alg)
skb               159 crypto/crypto_user_stat.c 	return nla_put(skb, CRYPTOCFGA_STAT_HASH, sizeof(rhash), &rhash);
skb               162 crypto/crypto_user_stat.c static int crypto_report_rng(struct sk_buff *skb, struct crypto_alg *alg)
skb               175 crypto/crypto_user_stat.c 	return nla_put(skb, CRYPTOCFGA_STAT_RNG, sizeof(rrng), &rrng);
skb               180 crypto/crypto_user_stat.c 				 struct sk_buff *skb)
skb               195 crypto/crypto_user_stat.c 	if (nla_put_u32(skb, CRYPTOCFGA_PRIORITY_VAL, alg->cra_priority))
skb               202 crypto/crypto_user_stat.c 		if (nla_put(skb, CRYPTOCFGA_STAT_LARVAL, sizeof(rl), &rl))
skb               209 crypto/crypto_user_stat.c 		if (crypto_report_aead(skb, alg))
skb               213 crypto/crypto_user_stat.c 		if (crypto_report_cipher(skb, alg))
skb               217 crypto/crypto_user_stat.c 		if (crypto_report_cipher(skb, alg))
skb               221 crypto/crypto_user_stat.c 		if (crypto_report_cipher(skb, alg))
skb               225 crypto/crypto_user_stat.c 		if (crypto_report_comp(skb, alg))
skb               229 crypto/crypto_user_stat.c 		if (crypto_report_acomp(skb, alg))
skb               233 crypto/crypto_user_stat.c 		if (crypto_report_acomp(skb, alg))
skb               237 crypto/crypto_user_stat.c 		if (crypto_report_akcipher(skb, alg))
skb               241 crypto/crypto_user_stat.c 		if (crypto_report_kpp(skb, alg))
skb               245 crypto/crypto_user_stat.c 		if (crypto_report_ahash(skb, alg))
skb               249 crypto/crypto_user_stat.c 		if (crypto_report_shash(skb, alg))
skb               253 crypto/crypto_user_stat.c 		if (crypto_report_rng(skb, alg))
skb               273 crypto/crypto_user_stat.c 	struct sk_buff *skb = info->out_skb;
skb               278 crypto/crypto_user_stat.c 	nlh = nlmsg_put(skb, NETLINK_CB(in_skb).portid, info->nlmsg_seq,
skb               287 crypto/crypto_user_stat.c 	err = crypto_reportstat_one(alg, ualg, skb);
skb               289 crypto/crypto_user_stat.c 		nlmsg_cancel(skb, nlh);
skb               293 crypto/crypto_user_stat.c 	nlmsg_end(skb, nlh);
skb               305 crypto/crypto_user_stat.c 	struct sk_buff *skb;
skb               317 crypto/crypto_user_stat.c 	skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
skb               318 crypto/crypto_user_stat.c 	if (!skb)
skb               322 crypto/crypto_user_stat.c 	info.out_skb = skb;
skb               332 crypto/crypto_user_stat.c 		kfree_skb(skb);
skb               336 crypto/crypto_user_stat.c 	return nlmsg_unicast(net->crypto_nlsk, skb, NETLINK_CB(in_skb).portid);
skb                24 crypto/kpp.c   static int crypto_kpp_report(struct sk_buff *skb, struct crypto_alg *alg)
skb                32 crypto/kpp.c   	return nla_put(skb, CRYPTOCFGA_REPORT_KPP, sizeof(rkpp), &rkpp);
skb                35 crypto/kpp.c   static int crypto_kpp_report(struct sk_buff *skb, struct crypto_alg *alg)
skb                74 crypto/rng.c   static int crypto_rng_report(struct sk_buff *skb, struct crypto_alg *alg)
skb                84 crypto/rng.c   	return nla_put(skb, CRYPTOCFGA_REPORT_RNG, sizeof(rrng), &rrng);
skb                87 crypto/rng.c   static int crypto_rng_report(struct sk_buff *skb, struct crypto_alg *alg)
skb                42 crypto/scompress.c static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg)
skb                50 crypto/scompress.c 	return nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
skb                54 crypto/scompress.c static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg)
skb               402 crypto/shash.c static int crypto_shash_report(struct sk_buff *skb, struct crypto_alg *alg)
skb               414 crypto/shash.c 	return nla_put(skb, CRYPTOCFGA_REPORT_HASH, sizeof(rhash), &rhash);
skb               417 crypto/shash.c static int crypto_shash_report(struct sk_buff *skb, struct crypto_alg *alg)
skb               941 crypto/skcipher.c static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
skb               957 crypto/skcipher.c 	return nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
skb               961 crypto/skcipher.c static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
skb                99 drivers/acpi/event.c 	struct sk_buff *skb;
skb               109 drivers/acpi/event.c 	skb = genlmsg_new(size, GFP_ATOMIC);
skb               110 drivers/acpi/event.c 	if (!skb)
skb               114 drivers/acpi/event.c 	msg_header = genlmsg_put(skb, 0, acpi_event_seqnum++,
skb               118 drivers/acpi/event.c 		nlmsg_free(skb);
skb               124 drivers/acpi/event.c 	    nla_reserve(skb, ACPI_GENL_ATTR_EVENT,
skb               127 drivers/acpi/event.c 		nlmsg_free(skb);
skb               140 drivers/acpi/event.c 	genlmsg_end(skb, msg_header);
skb               142 drivers/acpi/event.c 	genlmsg_multicast(&acpi_event_genl_family, skb, 0, 0, GFP_ATOMIC);
skb               112 drivers/atm/adummy.c adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
skb               115 drivers/atm/adummy.c 		vcc->pop(vcc, skb);
skb               117 drivers/atm/adummy.c 		dev_kfree_skb_any(skb);
skb               386 drivers/atm/ambassador.c static inline void dump_skb (char * prefix, unsigned int vc, struct sk_buff * skb) {
skb               389 drivers/atm/ambassador.c   unsigned char * data = skb->data;
skb               391 drivers/atm/ambassador.c   for (i=0; i<skb->len && i < 256;i++)
skb               397 drivers/atm/ambassador.c   (void) skb;
skb               425 drivers/atm/ambassador.c static void amb_kfree_skb (struct sk_buff * skb) {
skb               426 drivers/atm/ambassador.c   if (ATM_SKB(skb)->vcc->pop) {
skb               427 drivers/atm/ambassador.c     ATM_SKB(skb)->vcc->pop (ATM_SKB(skb)->vcc, skb);
skb               429 drivers/atm/ambassador.c     dev_kfree_skb_any (skb);
skb               437 drivers/atm/ambassador.c   struct sk_buff * skb = tx_descr->skb;
skb               442 drivers/atm/ambassador.c   atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
skb               448 drivers/atm/ambassador.c   amb_kfree_skb (skb);
skb               457 drivers/atm/ambassador.c   struct sk_buff * skb = bus_to_virt (rx->handle);
skb               474 drivers/atm/ambassador.c 	if (atm_charge (atm_vcc, skb->truesize)) {
skb               477 drivers/atm/ambassador.c 	  ATM_SKB(skb)->vcc = atm_vcc;
skb               478 drivers/atm/ambassador.c 	  skb_put (skb, rx_len);
skb               480 drivers/atm/ambassador.c 	  dump_skb ("<<<", vc, skb);
skb               484 drivers/atm/ambassador.c 	  __net_timestamp(skb);
skb               486 drivers/atm/ambassador.c 	  atm_vcc->push (atm_vcc, skb);
skb               491 drivers/atm/ambassador.c 	  PRINTD (DBG_INFO|DBG_RX, "dropped thanks to atm_charge (vc %hu, truesize %u)", vc, skb->truesize);
skb               518 drivers/atm/ambassador.c   dev_kfree_skb_any (skb);
skb               779 drivers/atm/ambassador.c     struct sk_buff * skb = alloc_skb (rxq->buffer_size, priority);
skb               780 drivers/atm/ambassador.c     if (!skb) {
skb               784 drivers/atm/ambassador.c     if (check_area (skb->data, skb->truesize)) {
skb               785 drivers/atm/ambassador.c       dev_kfree_skb_any (skb);
skb               790 drivers/atm/ambassador.c 	    skb, skb->head, (long) skb_end_offset(skb));
skb               791 drivers/atm/ambassador.c     rx.handle = virt_to_bus (skb);
skb               792 drivers/atm/ambassador.c     rx.host_address = cpu_to_be32 (virt_to_bus (skb->data));
skb               794 drivers/atm/ambassador.c       dev_kfree_skb_any (skb);
skb              1293 drivers/atm/ambassador.c static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
skb              1297 drivers/atm/ambassador.c   unsigned int tx_len = skb->len;
skb              1298 drivers/atm/ambassador.c   unsigned char * tx_data = skb->data;
skb              1308 drivers/atm/ambassador.c   dump_skb (">>>", vc, skb);
skb              1318 drivers/atm/ambassador.c   ATM_SKB(skb)->vcc = atm_vcc;
skb              1320 drivers/atm/ambassador.c   if (skb->len > (size_t) atm_vcc->qos.txtp.max_sdu) {
skb              1325 drivers/atm/ambassador.c   if (check_area (skb->data, skb->len)) {
skb              1342 drivers/atm/ambassador.c   tx_descr->skb = skb;
skb              1373 drivers/atm/ambassador.c static void amb_free_rx_skb (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
skb              1385 drivers/atm/ambassador.c 	  skb, atm_vcc, vcc);
skb              1387 drivers/atm/ambassador.c   rx.handle = virt_to_bus (skb);
skb              1388 drivers/atm/ambassador.c   rx.host_address = cpu_to_be32 (virt_to_bus (skb->data));
skb              1390 drivers/atm/ambassador.c   skb->data = skb->head;
skb              1391 drivers/atm/ambassador.c   skb_reset_tail_pointer(skb);
skb              1392 drivers/atm/ambassador.c   skb->len = 0;
skb              1401 drivers/atm/ambassador.c   dev_kfree_skb_any (skb);
skb               426 drivers/atm/ambassador.h   struct sk_buff * skb;
skb                48 drivers/atm/atmtcp.c 	struct sk_buff *skb;
skb                55 drivers/atm/atmtcp.c 	skb = alloc_skb(sizeof(*msg),GFP_KERNEL);
skb                56 drivers/atm/atmtcp.c 	if (!skb) return -ENOMEM;
skb                60 drivers/atm/atmtcp.c 		dev_kfree_skb(skb);
skb                63 drivers/atm/atmtcp.c 	atm_force_charge(out_vcc,skb->truesize);
skb                64 drivers/atm/atmtcp.c 	new_msg = skb_put(skb, sizeof(*new_msg));
skb                71 drivers/atm/atmtcp.c 	out_vcc->push(out_vcc,skb);
skb               191 drivers/atm/atmtcp.c static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
skb               200 drivers/atm/atmtcp.c 		if (vcc->pop) vcc->pop(vcc,skb);
skb               201 drivers/atm/atmtcp.c 		else dev_kfree_skb(skb);
skb               207 drivers/atm/atmtcp.c 		if (vcc->pop) vcc->pop(vcc,skb);
skb               208 drivers/atm/atmtcp.c 		else dev_kfree_skb(skb);
skb               213 drivers/atm/atmtcp.c 	size = skb->len+sizeof(struct atmtcp_hdr);
skb               216 drivers/atm/atmtcp.c 		if (vcc->pop) vcc->pop(vcc,skb);
skb               217 drivers/atm/atmtcp.c 		else dev_kfree_skb(skb);
skb               224 drivers/atm/atmtcp.c 	hdr->length = htonl(skb->len);
skb               225 drivers/atm/atmtcp.c 	skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
skb               226 drivers/atm/atmtcp.c 	if (vcc->pop) vcc->pop(vcc,skb);
skb               227 drivers/atm/atmtcp.c 	else dev_kfree_skb(skb);
skb               283 drivers/atm/atmtcp.c static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
skb               291 drivers/atm/atmtcp.c 	if (!skb->len) return 0;
skb               293 drivers/atm/atmtcp.c 	hdr = (struct atmtcp_hdr *) skb->data;
skb               296 drivers/atm/atmtcp.c 		    (struct atmtcp_control *) skb->data);
skb               307 drivers/atm/atmtcp.c 	skb_pull(skb,sizeof(struct atmtcp_hdr));
skb               308 drivers/atm/atmtcp.c 	new_skb = atm_alloc_charge(out_vcc,skb->len,GFP_KERNEL);
skb               314 drivers/atm/atmtcp.c 	skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
skb               319 drivers/atm/atmtcp.c 	if (vcc->pop) vcc->pop(vcc,skb);
skb               320 drivers/atm/atmtcp.c 	else dev_kfree_skb(skb);
skb               344 drivers/atm/eni.c static int do_rx_dma(struct atm_vcc *vcc,struct sk_buff *skb,
skb               358 drivers/atm/eni.c 	if (skb) {
skb               359 drivers/atm/eni.c 		paddr = dma_map_single(&eni_dev->pci_dev->dev,skb->data,skb->len,
skb               363 drivers/atm/eni.c 		ENI_PRV_PADDR(skb) = paddr;
skb               368 drivers/atm/eni.c 		ENI_PRV_SIZE(skb) = size+skip;
skb               370 drivers/atm/eni.c 		ATM_SKB(skb)->vcc = vcc;
skb               474 drivers/atm/eni.c 	if (skb) {
skb               475 drivers/atm/eni.c 		ENI_PRV_POS(skb) = eni_vcc->descr+size+1;
skb               476 drivers/atm/eni.c 		skb_queue_tail(&eni_dev->rx_queue,skb);
skb               477 drivers/atm/eni.c 		eni_vcc->last = skb;
skb               486 drivers/atm/eni.c 		dma_unmap_single(&eni_dev->pci_dev->dev,paddr,skb->len,
skb               489 drivers/atm/eni.c 	if (skb) dev_kfree_skb_irq(skb);
skb               517 drivers/atm/eni.c 	struct sk_buff *skb;
skb               535 drivers/atm/eni.c 	skb = length ? atm_alloc_charge(vcc,length,GFP_ATOMIC) : NULL;
skb               536 drivers/atm/eni.c 	if (!skb) {
skb               540 drivers/atm/eni.c 	skb_put(skb,length);
skb               541 drivers/atm/eni.c 	skb->tstamp = eni_vcc->timestamp;
skb               543 drivers/atm/eni.c 	if (do_rx_dma(vcc,skb,1,length >> 2,length >> 2)) return 1;
skb               554 drivers/atm/eni.c 	struct sk_buff *skb;
skb               605 drivers/atm/eni.c 	skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
skb               606 drivers/atm/eni.c 	if (!skb) {
skb               610 drivers/atm/eni.c 	skb_put(skb,length);
skb               612 drivers/atm/eni.c 	if (do_rx_dma(vcc,skb,1,size,eff)) return 1;
skb               731 drivers/atm/eni.c 	struct sk_buff *skb;
skb               738 drivers/atm/eni.c 		skb = skb_dequeue(&eni_dev->rx_queue);
skb               739 drivers/atm/eni.c 		if (!skb) {
skb               747 drivers/atm/eni.c 		EVENT("dequeued (size=%ld,pos=0x%lx)\n",ENI_PRV_SIZE(skb),
skb               748 drivers/atm/eni.c 		    ENI_PRV_POS(skb));
skb               750 drivers/atm/eni.c 		vcc = ATM_SKB(skb)->vcc;
skb               754 drivers/atm/eni.c 		if (!EEPMOK(eni_vcc->rx_pos,ENI_PRV_SIZE(skb),
skb               758 drivers/atm/eni.c 			skb_queue_head(&eni_dev->rx_queue,skb);
skb               762 drivers/atm/eni.c 		eni_vcc->rx_pos = ENI_PRV_POS(skb) & (eni_vcc->words-1);
skb               763 drivers/atm/eni.c 		dma_unmap_single(&eni_dev->pci_dev->dev,ENI_PRV_PADDR(skb),skb->len,
skb               765 drivers/atm/eni.c 		if (!skb->len) dev_kfree_skb_irq(skb);
skb               767 drivers/atm/eni.c 			EVENT("pushing (len=%ld)\n",skb->len,0);
skb               769 drivers/atm/eni.c 				*(unsigned long *) skb->data =
skb               770 drivers/atm/eni.c 				    ntohl(*(unsigned long *) skb->data);
skb               771 drivers/atm/eni.c 			memset(skb->cb,0,sizeof(struct eni_skb_prv));
skb               772 drivers/atm/eni.c 			vcc->push(vcc,skb);
skb              1033 drivers/atm/eni.c static enum enq_res do_tx(struct sk_buff *skb)
skb              1045 drivers/atm/eni.c 	NULLCHECK(skb);
skb              1046 drivers/atm/eni.c 	EVENT("do_tx: skb=0x%lx, %ld bytes\n",(unsigned long) skb,skb->len);
skb              1047 drivers/atm/eni.c 	vcc = ATM_SKB(skb)->vcc;
skb              1056 drivers/atm/eni.c 		unsigned int hack = *((char *) skb->data)-'0';
skb              1059 drivers/atm/eni.c 			skb->data += hack;
skb              1060 drivers/atm/eni.c 			skb->len -= hack;
skb              1065 drivers/atm/eni.c 	if ((unsigned long) skb->data & 3)
skb              1082 drivers/atm/eni.c 		size = skb->len+4*AAL5_TRAILER+ATM_CELL_PAYLOAD-1;
skb              1103 drivers/atm/eni.c DPRINTK("iovcnt = %d\n",skb_shinfo(skb)->nr_frags);
skb              1104 drivers/atm/eni.c 	if (!skb_shinfo(skb)->nr_frags) dma_size += 5;
skb              1105 drivers/atm/eni.c 	else dma_size += 5*(skb_shinfo(skb)->nr_frags+1);
skb              1117 drivers/atm/eni.c 	paddr = dma_map_single(&eni_dev->pci_dev->dev,skb->data,skb->len,
skb              1119 drivers/atm/eni.c 	ENI_PRV_PADDR(skb) = paddr;
skb              1126 drivers/atm/eni.c 	if (!skb_shinfo(skb)->nr_frags)
skb              1127 drivers/atm/eni.c 		if (aal5) put_dma(tx->index,eni_dev->dma,&j,paddr,skb->len);
skb              1128 drivers/atm/eni.c 		else put_dma(tx->index,eni_dev->dma,&j,paddr+4,skb->len-4);
skb              1131 drivers/atm/eni.c 		for (i = -1; i < skb_shinfo(skb)->nr_frags; i++)
skb              1134 drivers/atm/eni.c 				    skb->data,
skb              1135 drivers/atm/eni.c 				    skb_headlen(skb));
skb              1138 drivers/atm/eni.c 				    skb_frag_page(&skb_shinfo(skb)->frags[i]) +
skb              1139 drivers/atm/eni.c 					skb_frag_off(&skb_shinfo(skb)->frags[i]),
skb              1140 drivers/atm/eni.c 				    skb_frag_size(&skb_shinfo(skb)->frags[i]));
skb              1142 drivers/atm/eni.c 	if (skb->len & 3) {
skb              1144 drivers/atm/eni.c 			4 - (skb->len & 3));
skb              1159 drivers/atm/eni.c             (aal5 ? 0 : (skb->data[3] & 0xf)) |
skb              1160 drivers/atm/eni.c 	    (ATM_SKB(skb)->atm_options & ATM_ATMOPT_CLP ? MID_SEG_CLP : 0),
skb              1162 drivers/atm/eni.c 	DPRINTK("size: %d, len:%d\n",size,skb->len);
skb              1164 drivers/atm/eni.c 		writel(skb->len,tx->send+
skb              1172 drivers/atm/eni.c 	ENI_PRV_POS(skb) = tx->tx_pos;
skb              1173 drivers/atm/eni.c 	ENI_PRV_SIZE(skb) = size;
skb              1178 drivers/atm/eni.c 	skb_queue_tail(&eni_dev->tx_queue,skb);
skb              1187 drivers/atm/eni.c 	struct sk_buff *skb;
skb              1195 drivers/atm/eni.c 			while ((skb = skb_dequeue(&tx->backlog))) {
skb              1196 drivers/atm/eni.c 				res = do_tx(skb);
skb              1199 drivers/atm/eni.c 				skb_queue_head(&tx->backlog,skb);
skb              1212 drivers/atm/eni.c 	struct sk_buff *skb;
skb              1218 drivers/atm/eni.c 	while ((skb = skb_dequeue(&eni_dev->tx_queue))) {
skb              1219 drivers/atm/eni.c 		vcc = ATM_SKB(skb)->vcc;
skb              1223 drivers/atm/eni.c 		DPRINTK("dequeue_tx: next 0x%lx curr 0x%x\n",ENI_PRV_POS(skb),
skb              1225 drivers/atm/eni.c 		if (ENI_VCC(vcc)->txing < tx->words && ENI_PRV_POS(skb) ==
skb              1227 drivers/atm/eni.c 			skb_queue_head(&eni_dev->tx_queue,skb);
skb              1230 drivers/atm/eni.c 		ENI_VCC(vcc)->txing -= ENI_PRV_SIZE(skb);
skb              1231 drivers/atm/eni.c 		dma_unmap_single(&eni_dev->pci_dev->dev,ENI_PRV_PADDR(skb),skb->len,
skb              1233 drivers/atm/eni.c 		if (vcc->pop) vcc->pop(vcc,skb);
skb              1234 drivers/atm/eni.c 		else dev_kfree_skb_irq(skb);
skb              1962 drivers/atm/eni.c 	struct sk_buff *skb;
skb              1983 drivers/atm/eni.c 	skb_queue_walk(&eni_dev->tx_queue, skb) {
skb              1986 drivers/atm/eni.c 		if (ATM_SKB(skb)->vcc != vcc) continue;
skb              1987 drivers/atm/eni.c 		dsc = tx->send+ENI_PRV_POS(skb)*4;
skb              2051 drivers/atm/eni.c static int eni_send(struct atm_vcc *vcc,struct sk_buff *skb)
skb              2057 drivers/atm/eni.c 		if (vcc->pop) vcc->pop(vcc,skb);
skb              2058 drivers/atm/eni.c 		else dev_kfree_skb(skb);
skb              2061 drivers/atm/eni.c 	if (!skb) {
skb              2063 drivers/atm/eni.c 		if (vcc->pop) vcc->pop(vcc,skb);
skb              2067 drivers/atm/eni.c 		if (skb->len != ATM_CELL_SIZE-1) {
skb              2068 drivers/atm/eni.c 			if (vcc->pop) vcc->pop(vcc,skb);
skb              2069 drivers/atm/eni.c 			else dev_kfree_skb(skb);
skb              2072 drivers/atm/eni.c 		*(u32 *) skb->data = htonl(*(u32 *) skb->data);
skb              2075 drivers/atm/eni.c 	ATM_SKB(skb)->vcc = vcc;
skb              2077 drivers/atm/eni.c 	res = do_tx(skb);
skb              2080 drivers/atm/eni.c 	skb_queue_tail(&ENI_VCC(vcc)->tx->backlog,skb);
skb              2329 drivers/atm/eni.c 	struct sk_buff *skb; /* dummy for sizeof */
skb              2331 drivers/atm/eni.c 	BUILD_BUG_ON(sizeof(skb->cb) < sizeof(struct eni_skb_prv));
skb               132 drivers/atm/eni.h #define ENI_PRV_SIZE(skb) (((struct eni_skb_prv *) (skb)->cb)->size)
skb               133 drivers/atm/eni.h #define ENI_PRV_POS(skb) (((struct eni_skb_prv *) (skb)->cb)->pos)
skb               134 drivers/atm/eni.h #define ENI_PRV_PADDR(skb) (((struct eni_skb_prv *) (skb)->cb)->paddr)
skb               363 drivers/atm/firestream.c static inline void fs_kfree_skb (struct sk_buff * skb) 
skb               365 drivers/atm/firestream.c 	if (ATM_SKB(skb)->vcc->pop)
skb               366 drivers/atm/firestream.c 		ATM_SKB(skb)->vcc->pop (ATM_SKB(skb)->vcc, skb);
skb               368 drivers/atm/firestream.c 		dev_kfree_skb_any (skb);
skb               697 drivers/atm/firestream.c 	struct sk_buff *skb;
skb               724 drivers/atm/firestream.c 				    td->flags, td->next, td->bsa, td->aal_bufsize, td->skb );
skb               726 drivers/atm/firestream.c 			skb = td->skb;
skb               727 drivers/atm/firestream.c 			if (skb == FS_VCC (ATM_SKB(skb)->vcc)->last_skb) {
skb               728 drivers/atm/firestream.c 				FS_VCC (ATM_SKB(skb)->vcc)->last_skb = NULL;
skb               729 drivers/atm/firestream.c 				wake_up_interruptible (& FS_VCC (ATM_SKB(skb)->vcc)->close_wait);
skb               741 drivers/atm/firestream.c 			atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
skb               744 drivers/atm/firestream.c 			fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
skb               745 drivers/atm/firestream.c 			fs_kfree_skb (skb);
skb               767 drivers/atm/firestream.c 	struct sk_buff *skb;
skb               785 drivers/atm/firestream.c 			    pe->skb, pe->fp);
skb               800 drivers/atm/firestream.c 				skb = pe->skb;
skb               803 drivers/atm/firestream.c 				fs_dprintk (FS_DEBUG_QUEUE, "Got skb: %p\n", skb);
skb               806 drivers/atm/firestream.c 				skb_put (skb, qe->p1 & 0xffff); 
skb               807 drivers/atm/firestream.c 				ATM_SKB(skb)->vcc = atm_vcc;
skb               809 drivers/atm/firestream.c 				__net_timestamp(skb);
skb               810 drivers/atm/firestream.c 				fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
skb               811 drivers/atm/firestream.c 				atm_vcc->push (atm_vcc, skb);
skb               823 drivers/atm/firestream.c 				fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p\n", pe->skb);
skb               824 drivers/atm/firestream.c 				dev_kfree_skb_any (pe->skb);
skb              1157 drivers/atm/firestream.c static int fs_send (struct atm_vcc *atm_vcc, struct sk_buff *skb)
skb              1167 drivers/atm/firestream.c 		    atm_vcc, skb, vcc, dev);
skb              1169 drivers/atm/firestream.c 	fs_dprintk (FS_DEBUG_ALLOC, "Alloc t-skb: %p (atm_send)\n", skb);
skb              1171 drivers/atm/firestream.c 	ATM_SKB(skb)->vcc = atm_vcc;
skb              1173 drivers/atm/firestream.c 	vcc->last_skb = skb;
skb              1183 drivers/atm/firestream.c 		    *(int *) skb->data);
skb              1185 drivers/atm/firestream.c 	td->flags =  TD_EPI | TD_DATA | skb->len;
skb              1187 drivers/atm/firestream.c 	td->bsa  = virt_to_bus (skb->data);
skb              1188 drivers/atm/firestream.c 	td->skb = skb;
skb              1197 drivers/atm/firestream.c 	dq[qd].skb   = td->skb;
skb              1470 drivers/atm/firestream.c 	struct sk_buff *skb;
skb              1479 drivers/atm/firestream.c 		skb = alloc_skb (fp->bufsize, gfp_flags);
skb              1480 drivers/atm/firestream.c 		fs_dprintk (FS_DEBUG_ALLOC, "Alloc rec-skb: %p(%d)\n", skb, fp->bufsize);
skb              1481 drivers/atm/firestream.c 		if (!skb) break;
skb              1485 drivers/atm/firestream.c 			fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p\n", skb);
skb              1486 drivers/atm/firestream.c 			dev_kfree_skb_any (skb);
skb              1491 drivers/atm/firestream.c 			    skb, ne, skb->data, skb->head);
skb              1495 drivers/atm/firestream.c 		ne->bsa   = virt_to_bus (skb->data);
skb              1497 drivers/atm/firestream.c 		ne->skb = skb;
skb              1948 drivers/atm/firestream.c 			i, da[qd], dq[qd].flags, dq[qd].bsa, dq[qd].skb, dq[qd].dev);
skb              1975 drivers/atm/firestream.c 				fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p\n", fp->skb);
skb              1976 drivers/atm/firestream.c 				dev_kfree_skb_any (fp->skb);
skb              1981 drivers/atm/firestream.c 			fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p\n", fp->skb);
skb              1982 drivers/atm/firestream.c 			dev_kfree_skb_any (fp->skb);
skb                44 drivers/atm/firestream.h 	struct sk_buff *skb;
skb               789 drivers/atm/fore200e.c 		entry, txq->tail, entry->vc_map, entry->skb);
skb               807 drivers/atm/fore200e.c 	    dev_kfree_skb_any(entry->skb);
skb               830 drivers/atm/fore200e.c 		dev_kfree_skb_any(entry->skb);
skb               838 drivers/atm/fore200e.c 		    vcc->pop(vcc, entry->skb);
skb               841 drivers/atm/fore200e.c 		    dev_kfree_skb_any(entry->skb);
skb               964 drivers/atm/fore200e.c     struct sk_buff*      skb;
skb               993 drivers/atm/fore200e.c     skb = alloc_skb(pdu_len, GFP_ATOMIC);
skb               994 drivers/atm/fore200e.c     if (skb == NULL) {
skb              1001 drivers/atm/fore200e.c     __net_timestamp(skb);
skb              1005 drivers/atm/fore200e.c 	*((u32*)skb_put(skb, 4)) = cell_header;
skb              1019 drivers/atm/fore200e.c 	skb_put_data(skb, buffer->data.align_addr, rpd->rsd[i].length);
skb              1026 drivers/atm/fore200e.c     DPRINTK(3, "rx skb: len = %d, truesize = %d\n", skb->len, skb->truesize);
skb              1035 drivers/atm/fore200e.c     if (atm_charge(vcc, skb->truesize) == 0) {
skb              1040 drivers/atm/fore200e.c 	dev_kfree_skb_any(skb);
skb              1046 drivers/atm/fore200e.c     vcc->push(vcc, skb);
skb              1467 drivers/atm/fore200e.c fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
skb              1478 drivers/atm/fore200e.c     int                     tx_len       = skb->len;
skb              1500 drivers/atm/fore200e.c 	dev_kfree_skb_any(skb);
skb              1506 drivers/atm/fore200e.c 	cell_header = (u32*) skb->data;
skb              1507 drivers/atm/fore200e.c 	skb_data    = skb->data + 4;    /* skip 4-byte cell header */
skb              1508 drivers/atm/fore200e.c 	skb_len     = tx_len = skb->len  - 4;
skb              1515 drivers/atm/fore200e.c 	skb_data = skb->data;
skb              1516 drivers/atm/fore200e.c 	skb_len  = skb->len;
skb              1538 drivers/atm/fore200e.c 		vcc->pop(vcc, skb);
skb              1541 drivers/atm/fore200e.c 		dev_kfree_skb_any(skb);
skb              1584 drivers/atm/fore200e.c 		vcc->pop(vcc, skb);
skb              1587 drivers/atm/fore200e.c 		dev_kfree_skb_any(skb);
skb              1599 drivers/atm/fore200e.c     entry->skb    = skb;
skb               526 drivers/atm/fore200e.h     struct sk_buff*         skb;         /* related skb                              */
skb               100 drivers/atm/he.c static int he_send(struct atm_vcc *vcc, struct sk_buff *skb);
skb              1648 drivers/atm/he.c 	struct sk_buff *skb;
skb              1724 drivers/atm/he.c 		skb = atm_alloc_charge(vcc, he_vcc->pdu_len + rx_skb_reserve,
skb              1726 drivers/atm/he.c 		if (!skb) {
skb              1732 drivers/atm/he.c 			skb_reserve(skb, rx_skb_reserve);
skb              1734 drivers/atm/he.c 		__net_timestamp(skb);
skb              1737 drivers/atm/he.c 			skb_put_data(skb, &heb->data, heb->len);
skb              1742 drivers/atm/he.c 				skb->len = ATM_AAL0_SDU;
skb              1743 drivers/atm/he.c 				skb_set_tail_pointer(skb, skb->len);
skb              1748 drivers/atm/he.c 				skb->len = AAL5_LEN(skb->data, he_vcc->pdu_len);
skb              1749 drivers/atm/he.c 				skb_set_tail_pointer(skb, skb->len);
skb              1752 drivers/atm/he.c 					skb->ip_summed = CHECKSUM_COMPLETE;
skb              1753 drivers/atm/he.c 					skb->csum = TCP_CKSUM(skb->data,
skb              1761 drivers/atm/he.c 		if (skb->len > vcc->qos.rxtp.max_sdu)
skb              1762 drivers/atm/he.c 			hprintk("pdu_len (%d) > vcc->qos.rxtp.max_sdu (%d)!  cid 0x%x\n", skb->len, vcc->qos.rxtp.max_sdu, cid);
skb              1766 drivers/atm/he.c 		ATM_SKB(skb)->vcc = vcc;
skb              1769 drivers/atm/he.c 		vcc->push(vcc, skb);
skb              1856 drivers/atm/he.c 		if (tpd->skb) {	/* && !TBRQ_MULTIPLE(he_dev->tbrq_head) */
skb              1858 drivers/atm/he.c 				tpd->vcc->pop(tpd->vcc, tpd->skb);
skb              1860 drivers/atm/he.c 				dev_kfree_skb_any(tpd->skb);
skb              2093 drivers/atm/he.c 			if (tpd->skb) {
skb              2095 drivers/atm/he.c 					tpd->vcc->pop(tpd->vcc, tpd->skb);
skb              2097 drivers/atm/he.c 					dev_kfree_skb_any(tpd->skb);
skb              2433 drivers/atm/he.c 		tpd->skb = NULL;
skb              2489 drivers/atm/he.c he_send(struct atm_vcc *vcc, struct sk_buff *skb)
skb              2503 drivers/atm/he.c 	if ((skb->len > HE_TPD_BUFSIZE) ||
skb              2504 drivers/atm/he.c 	    ((vcc->qos.aal == ATM_AAL0) && (skb->len != ATM_AAL0_SDU))) {
skb              2505 drivers/atm/he.c 		hprintk("buffer too large (or small) -- %d bytes\n", skb->len );
skb              2507 drivers/atm/he.c 			vcc->pop(vcc, skb);
skb              2509 drivers/atm/he.c 			dev_kfree_skb_any(skb);
skb              2515 drivers/atm/he.c 	if (skb_shinfo(skb)->nr_frags) {
skb              2518 drivers/atm/he.c 			vcc->pop(vcc, skb);
skb              2520 drivers/atm/he.c 			dev_kfree_skb_any(skb);
skb              2530 drivers/atm/he.c 			vcc->pop(vcc, skb);
skb              2532 drivers/atm/he.c 			dev_kfree_skb_any(skb);
skb              2541 drivers/atm/he.c 		char *pti_clp = (void *) (skb->data + 3);
skb              2550 drivers/atm/he.c 		skb_pull(skb, ATM_AAL0_SDU - ATM_CELL_PAYLOAD);
skb              2554 drivers/atm/he.c 	tpd->iovec[slot].addr = dma_map_single(&he_dev->pci_dev->dev, skb->data,
skb              2555 drivers/atm/he.c 				skb_headlen(skb), DMA_TO_DEVICE);
skb              2556 drivers/atm/he.c 	tpd->iovec[slot].len = skb_headlen(skb);
skb              2559 drivers/atm/he.c 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb              2560 drivers/atm/he.c 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
skb              2564 drivers/atm/he.c 			tpd->skb = NULL;	/* not the last fragment
skb              2572 drivers/atm/he.c 					vcc->pop(vcc, skb);
skb              2574 drivers/atm/he.c 					dev_kfree_skb_any(skb);
skb              2592 drivers/atm/he.c 	tpd->address0 = dma_map_single(&he_dev->pci_dev->dev, skb->data, skb->len, DMA_TO_DEVICE);
skb              2593 drivers/atm/he.c 	tpd->length0 = skb->len | TPD_LST;
skb              2598 drivers/atm/he.c 	tpd->skb = skb;
skb              2600 drivers/atm/he.c 	ATM_SKB(skb)->vcc = vcc;
skb               133 drivers/atm/he.h 	struct sk_buff *skb;
skb               449 drivers/atm/horizon.c static inline void dump_skb (char * prefix, unsigned int vc, struct sk_buff * skb) {
skb               452 drivers/atm/horizon.c   unsigned char * data = skb->data;
skb               454 drivers/atm/horizon.c   for (i=0; i<skb->len && i < 256;i++)
skb               460 drivers/atm/horizon.c   (void) skb;
skb               770 drivers/atm/horizon.c static void hrz_kfree_skb (struct sk_buff * skb) {
skb               771 drivers/atm/horizon.c   if (ATM_SKB(skb)->vcc->pop) {
skb               772 drivers/atm/horizon.c     ATM_SKB(skb)->vcc->pop (ATM_SKB(skb)->vcc, skb);
skb               774 drivers/atm/horizon.c     dev_kfree_skb_any (skb);
skb               995 drivers/atm/horizon.c 	struct sk_buff * skb = dev->rx_skb;
skb              1000 drivers/atm/horizon.c 	dump_skb ("<<<", dev->rx_channel, skb);
skb              1002 drivers/atm/horizon.c 	PRINTD (DBG_RX|DBG_SKB, "push %p %u", skb->data, skb->len);
skb              1005 drivers/atm/horizon.c 	  struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
skb              1008 drivers/atm/horizon.c 	  __net_timestamp(skb);
skb              1010 drivers/atm/horizon.c 	  vcc->push (vcc, skb);
skb              1155 drivers/atm/horizon.c 	struct sk_buff * skb = dev->tx_skb;
skb              1159 drivers/atm/horizon.c 	atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
skb              1162 drivers/atm/horizon.c 	hrz_kfree_skb (skb);
skb              1291 drivers/atm/horizon.c 	  struct sk_buff * skb = atm_alloc_charge (atm_vcc, rx_len, GFP_ATOMIC);
skb              1292 drivers/atm/horizon.c 	  if (skb) {
skb              1294 drivers/atm/horizon.c 	    dev->rx_skb = skb;
skb              1299 drivers/atm/horizon.c 	    skb_put (skb, rx_len);
skb              1300 drivers/atm/horizon.c 	    ATM_SKB(skb)->vcc = atm_vcc;
skb              1306 drivers/atm/horizon.c 	    dev->rx_addr = skb->data;
skb              1308 drivers/atm/horizon.c 		    skb->data, rx_len);
skb              1548 drivers/atm/horizon.c static int hrz_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
skb              1561 drivers/atm/horizon.c 	  channel, skb->data, skb->len);
skb              1563 drivers/atm/horizon.c   dump_skb (">>>", channel, skb);
skb              1567 drivers/atm/horizon.c     hrz_kfree_skb (skb);
skb              1572 drivers/atm/horizon.c   ATM_SKB(skb)->vcc = atm_vcc;
skb              1574 drivers/atm/horizon.c   if (skb->len > atm_vcc->qos.txtp.max_sdu) {
skb              1576 drivers/atm/horizon.c     hrz_kfree_skb (skb);
skb              1582 drivers/atm/horizon.c     hrz_kfree_skb (skb);
skb              1608 drivers/atm/horizon.c     char * s = skb->data;
skb              1619 drivers/atm/horizon.c     hrz_kfree_skb (skb);
skb              1628 drivers/atm/horizon.c   buffers_required = (skb->len+(ATM_AAL5_TRAILER-1)) / ATM_CELL_PAYLOAD + 3;
skb              1643 drivers/atm/horizon.c       hrz_kfree_skb (skb);
skb              1685 drivers/atm/horizon.c     unsigned int tx_len = skb->len;
skb              1686 drivers/atm/horizon.c     unsigned int tx_iovcnt = skb_shinfo(skb)->nr_frags;
skb              1688 drivers/atm/horizon.c     dev->tx_skb = skb;
skb              1696 drivers/atm/horizon.c 	      skb->data, tx_len);
skb              1698 drivers/atm/horizon.c       hrz_kfree_skb (skb);
skb              1705 drivers/atm/horizon.c       dev->tx_addr = skb->data;
skb              1707 drivers/atm/horizon.c 	      skb->data, tx_len);
skb               124 drivers/atm/idt77252.c static int idt77252_send(struct atm_vcc *vcc, struct sk_buff *skb);
skb               582 drivers/atm/idt77252.c sb_pool_add(struct idt77252_dev *card, struct sk_buff *skb, int queue)
skb               588 drivers/atm/idt77252.c 	while (pool->skb[index]) {
skb               594 drivers/atm/idt77252.c 	pool->skb[index] = skb;
skb               595 drivers/atm/idt77252.c 	IDT77252_PRV_POOL(skb) = POOL_HANDLE(queue, index);
skb               602 drivers/atm/idt77252.c sb_pool_remove(struct idt77252_dev *card, struct sk_buff *skb)
skb               607 drivers/atm/idt77252.c 	handle = IDT77252_PRV_POOL(skb);
skb               617 drivers/atm/idt77252.c 	card->sbpool[queue].skb[index] = NULL;
skb               633 drivers/atm/idt77252.c 	return card->sbpool[queue].skb[index];
skb               670 drivers/atm/idt77252.c 	struct sk_buff *skb;
skb               676 drivers/atm/idt77252.c 	while ((skb = skb_dequeue(&scq->transmit))) {
skb               677 drivers/atm/idt77252.c 		dma_unmap_single(&card->pcidev->dev, IDT77252_PRV_PADDR(skb),
skb               678 drivers/atm/idt77252.c 				 skb->len, DMA_TO_DEVICE);
skb               680 drivers/atm/idt77252.c 		vcc = ATM_SKB(skb)->vcc;
skb               682 drivers/atm/idt77252.c 			vcc->pop(vcc, skb);
skb               684 drivers/atm/idt77252.c 			dev_kfree_skb(skb);
skb               687 drivers/atm/idt77252.c 	while ((skb = skb_dequeue(&scq->pending))) {
skb               688 drivers/atm/idt77252.c 		dma_unmap_single(&card->pcidev->dev, IDT77252_PRV_PADDR(skb),
skb               689 drivers/atm/idt77252.c 				 skb->len, DMA_TO_DEVICE);
skb               691 drivers/atm/idt77252.c 		vcc = ATM_SKB(skb)->vcc;
skb               693 drivers/atm/idt77252.c 			vcc->pop(vcc, skb);
skb               695 drivers/atm/idt77252.c 			dev_kfree_skb(skb);
skb               703 drivers/atm/idt77252.c push_on_scq(struct idt77252_dev *card, struct vc_map *vc, struct sk_buff *skb)
skb               719 drivers/atm/idt77252.c 	skb_queue_tail(&scq->transmit, skb);
skb               726 drivers/atm/idt77252.c 		vc->estimator->cells += (skb->len + 47) / 48;
skb               742 drivers/atm/idt77252.c 	tbd = &IDT77252_PRV_TBD(skb);
skb               794 drivers/atm/idt77252.c 	struct sk_buff *skb;
skb               800 drivers/atm/idt77252.c 	skb = skb_dequeue(&scq->transmit);
skb               801 drivers/atm/idt77252.c 	if (skb) {
skb               802 drivers/atm/idt77252.c 		TXPRINTK("%s: freeing skb at %p.\n", card->name, skb);
skb               804 drivers/atm/idt77252.c 		dma_unmap_single(&card->pcidev->dev, IDT77252_PRV_PADDR(skb),
skb               805 drivers/atm/idt77252.c 				 skb->len, DMA_TO_DEVICE);
skb               807 drivers/atm/idt77252.c 		vcc = ATM_SKB(skb)->vcc;
skb               810 drivers/atm/idt77252.c 			vcc->pop(vcc, skb);
skb               812 drivers/atm/idt77252.c 			dev_kfree_skb(skb);
skb               820 drivers/atm/idt77252.c 	while ((skb = skb_dequeue(&scq->pending))) {
skb               821 drivers/atm/idt77252.c 		if (push_on_scq(card, vc, skb)) {
skb               822 drivers/atm/idt77252.c 			skb_queue_head(&vc->scq->pending, skb);
skb               831 drivers/atm/idt77252.c 	  struct sk_buff *skb, int oam)
skb               839 drivers/atm/idt77252.c 	if (skb->len == 0) {
skb               840 drivers/atm/idt77252.c 		printk("%s: invalid skb->len (%d)\n", card->name, skb->len);
skb               845 drivers/atm/idt77252.c 		 card->name, skb->len);
skb               847 drivers/atm/idt77252.c 	tbd = &IDT77252_PRV_TBD(skb);
skb               848 drivers/atm/idt77252.c 	vcc = ATM_SKB(skb)->vcc;
skb               850 drivers/atm/idt77252.c 	IDT77252_PRV_PADDR(skb) = dma_map_single(&card->pcidev->dev, skb->data,
skb               851 drivers/atm/idt77252.c 						 skb->len, DMA_TO_DEVICE);
skb               856 drivers/atm/idt77252.c 		if (skb->len != 52)
skb               860 drivers/atm/idt77252.c 		tbd->word_2 = IDT77252_PRV_PADDR(skb) + 4;
skb               862 drivers/atm/idt77252.c 		tbd->word_4 = (skb->data[0] << 24) | (skb->data[1] << 16) |
skb               863 drivers/atm/idt77252.c 			      (skb->data[2] <<  8) | (skb->data[3] <<  0);
skb               881 drivers/atm/idt77252.c 		if (skb->len > 52)
skb               891 drivers/atm/idt77252.c 		tbd->word_2 = IDT77252_PRV_PADDR(skb) + 4;
skb               893 drivers/atm/idt77252.c 		tbd->word_4 = (skb->data[0] << 24) | (skb->data[1] << 16) |
skb               894 drivers/atm/idt77252.c 			      (skb->data[2] <<  8) | (skb->data[3] <<  0);
skb               898 drivers/atm/idt77252.c 		tbd->word_1 = SAR_TBD_EPDU | SAR_TBD_AAL5 | skb->len;
skb               899 drivers/atm/idt77252.c 		tbd->word_2 = IDT77252_PRV_PADDR(skb);
skb               900 drivers/atm/idt77252.c 		tbd->word_3 = skb->len;
skb               915 drivers/atm/idt77252.c 	skb_queue_tail(&vc->scq->pending, skb);
skb               917 drivers/atm/idt77252.c 	while ((skb = skb_dequeue(&vc->scq->pending))) {
skb               918 drivers/atm/idt77252.c 		if (push_on_scq(card, vc, skb)) {
skb               919 drivers/atm/idt77252.c 			skb_queue_head(&vc->scq->pending, skb);
skb               928 drivers/atm/idt77252.c 	dma_unmap_single(&card->pcidev->dev, IDT77252_PRV_PADDR(skb),
skb               929 drivers/atm/idt77252.c 			 skb->len, DMA_TO_DEVICE);
skb              1013 drivers/atm/idt77252.c 	struct sk_buff *skb;
skb              1028 drivers/atm/idt77252.c 	skb = sb_pool_skb(card, le32_to_cpu(rsqe->word_2));
skb              1029 drivers/atm/idt77252.c 	if (skb == NULL) {
skb              1042 drivers/atm/idt77252.c 		 card->name, vpi, vci, skb, skb->data);
skb              1047 drivers/atm/idt77252.c 		recycle_rx_skb(card, skb);
skb              1055 drivers/atm/idt77252.c 		recycle_rx_skb(card, skb);
skb              1061 drivers/atm/idt77252.c 	dma_sync_single_for_cpu(&card->pcidev->dev, IDT77252_PRV_PADDR(skb),
skb              1062 drivers/atm/idt77252.c 				skb_end_pointer(skb) - skb->data,
skb              1071 drivers/atm/idt77252.c 		cell = skb->data;
skb              1103 drivers/atm/idt77252.c 		recycle_rx_skb(card, skb);
skb              1109 drivers/atm/idt77252.c 		recycle_rx_skb(card, skb);
skb              1112 drivers/atm/idt77252.c 	skb->len = (stat & SAR_RSQE_CELLCNT) * ATM_CELL_PAYLOAD;
skb              1116 drivers/atm/idt77252.c 	__skb_queue_tail(&rpp->queue, skb);
skb              1117 drivers/atm/idt77252.c 	rpp->len += skb->len;
skb              1123 drivers/atm/idt77252.c 		l1l2 = (unsigned char *) ((unsigned long) skb->data + skb->len - 6);
skb              1147 drivers/atm/idt77252.c 			skb = dev_alloc_skb(rpp->len);
skb              1148 drivers/atm/idt77252.c 			if (!skb) {
skb              1155 drivers/atm/idt77252.c 			if (!atm_charge(vcc, skb->truesize)) {
skb              1157 drivers/atm/idt77252.c 				dev_kfree_skb(skb);
skb              1161 drivers/atm/idt77252.c 				skb_put_data(skb, sb->data, sb->len);
skb              1165 drivers/atm/idt77252.c 			skb_trim(skb, len);
skb              1166 drivers/atm/idt77252.c 			ATM_SKB(skb)->vcc = vcc;
skb              1167 drivers/atm/idt77252.c 			__net_timestamp(skb);
skb              1169 drivers/atm/idt77252.c 			vcc->push(vcc, skb);
skb              1177 drivers/atm/idt77252.c 		if (!atm_charge(vcc, skb->truesize)) {
skb              1178 drivers/atm/idt77252.c 			recycle_rx_skb(card, skb);
skb              1182 drivers/atm/idt77252.c 		dma_unmap_single(&card->pcidev->dev, IDT77252_PRV_PADDR(skb),
skb              1183 drivers/atm/idt77252.c 				 skb_end_pointer(skb) - skb->data,
skb              1185 drivers/atm/idt77252.c 		sb_pool_remove(card, skb);
skb              1187 drivers/atm/idt77252.c 		skb_trim(skb, len);
skb              1188 drivers/atm/idt77252.c 		ATM_SKB(skb)->vcc = vcc;
skb              1189 drivers/atm/idt77252.c 		__net_timestamp(skb);
skb              1191 drivers/atm/idt77252.c 		vcc->push(vcc, skb);
skb              1194 drivers/atm/idt77252.c 		if (skb->truesize > SAR_FB_SIZE_3)
skb              1196 drivers/atm/idt77252.c 		else if (skb->truesize > SAR_FB_SIZE_2)
skb              1198 drivers/atm/idt77252.c 		else if (skb->truesize > SAR_FB_SIZE_1)
skb              1798 drivers/atm/idt77252.c push_rx_skb(struct idt77252_dev *card, struct sk_buff *skb, int queue)
skb              1804 drivers/atm/idt77252.c 	skb->data = skb->head;
skb              1805 drivers/atm/idt77252.c 	skb_reset_tail_pointer(skb);
skb              1806 drivers/atm/idt77252.c 	skb->len = 0;
skb              1808 drivers/atm/idt77252.c 	skb_reserve(skb, 16);
skb              1812 drivers/atm/idt77252.c 		skb_put(skb, SAR_FB_SIZE_0);
skb              1815 drivers/atm/idt77252.c 		skb_put(skb, SAR_FB_SIZE_1);
skb              1818 drivers/atm/idt77252.c 		skb_put(skb, SAR_FB_SIZE_2);
skb              1821 drivers/atm/idt77252.c 		skb_put(skb, SAR_FB_SIZE_3);
skb              1830 drivers/atm/idt77252.c 	memset(&skb->data[(skb->len & ~(0x3f)) - 64], 0, 2 * sizeof(u32));
skb              1832 drivers/atm/idt77252.c 	handle = IDT77252_PRV_POOL(skb);
skb              1833 drivers/atm/idt77252.c 	addr = IDT77252_PRV_PADDR(skb);
skb              1847 drivers/atm/idt77252.c 	struct sk_buff *skb;
skb              1852 drivers/atm/idt77252.c 		skb = dev_alloc_skb(size);
skb              1853 drivers/atm/idt77252.c 		if (!skb)
skb              1856 drivers/atm/idt77252.c 		if (sb_pool_add(card, skb, queue)) {
skb              1861 drivers/atm/idt77252.c 		paddr = dma_map_single(&card->pcidev->dev, skb->data,
skb              1862 drivers/atm/idt77252.c 				       skb_end_pointer(skb) - skb->data,
skb              1864 drivers/atm/idt77252.c 		IDT77252_PRV_PADDR(skb) = paddr;
skb              1866 drivers/atm/idt77252.c 		if (push_rx_skb(card, skb, queue)) {
skb              1875 drivers/atm/idt77252.c 	dma_unmap_single(&card->pcidev->dev, IDT77252_PRV_PADDR(skb),
skb              1876 drivers/atm/idt77252.c 			 skb_end_pointer(skb) - skb->data, DMA_FROM_DEVICE);
skb              1878 drivers/atm/idt77252.c 	handle = IDT77252_PRV_POOL(skb);
skb              1879 drivers/atm/idt77252.c 	card->sbpool[POOL_QUEUE(handle)].skb[POOL_INDEX(handle)] = NULL;
skb              1882 drivers/atm/idt77252.c 	dev_kfree_skb(skb);
skb              1887 drivers/atm/idt77252.c recycle_rx_skb(struct idt77252_dev *card, struct sk_buff *skb)
skb              1889 drivers/atm/idt77252.c 	u32 handle = IDT77252_PRV_POOL(skb);
skb              1892 drivers/atm/idt77252.c 	dma_sync_single_for_device(&card->pcidev->dev, IDT77252_PRV_PADDR(skb),
skb              1893 drivers/atm/idt77252.c 				   skb_end_pointer(skb) - skb->data,
skb              1896 drivers/atm/idt77252.c 	err = push_rx_skb(card, skb, POOL_QUEUE(handle));
skb              1898 drivers/atm/idt77252.c 		dma_unmap_single(&card->pcidev->dev, IDT77252_PRV_PADDR(skb),
skb              1899 drivers/atm/idt77252.c 				 skb_end_pointer(skb) - skb->data,
skb              1901 drivers/atm/idt77252.c 		sb_pool_remove(card, skb);
skb              1902 drivers/atm/idt77252.c 		dev_kfree_skb(skb);
skb              1916 drivers/atm/idt77252.c 	struct sk_buff *skb, *tmp;
skb              1918 drivers/atm/idt77252.c 	skb_queue_walk_safe(&rpp->queue, skb, tmp)
skb              1919 drivers/atm/idt77252.c 		recycle_rx_skb(card, skb);
skb              1943 drivers/atm/idt77252.c idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
skb              1953 drivers/atm/idt77252.c 		dev_kfree_skb(skb);
skb              1959 drivers/atm/idt77252.c 		dev_kfree_skb(skb);
skb              1971 drivers/atm/idt77252.c 		dev_kfree_skb(skb);
skb              1975 drivers/atm/idt77252.c 	if (skb_shinfo(skb)->nr_frags != 0) {
skb              1978 drivers/atm/idt77252.c 		dev_kfree_skb(skb);
skb              1981 drivers/atm/idt77252.c 	ATM_SKB(skb)->vcc = vcc;
skb              1983 drivers/atm/idt77252.c 	err = queue_skb(card, vc, skb, oam);
skb              1986 drivers/atm/idt77252.c 		dev_kfree_skb(skb);
skb              1993 drivers/atm/idt77252.c static int idt77252_send(struct atm_vcc *vcc, struct sk_buff *skb)
skb              1995 drivers/atm/idt77252.c 	return idt77252_send_skb(vcc, skb, 0);
skb              2003 drivers/atm/idt77252.c 	struct sk_buff *skb;
skb              2005 drivers/atm/idt77252.c 	skb = dev_alloc_skb(64);
skb              2006 drivers/atm/idt77252.c 	if (!skb) {
skb              2011 drivers/atm/idt77252.c 	refcount_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
skb              2013 drivers/atm/idt77252.c 	skb_put_data(skb, cell, 52);
skb              2015 drivers/atm/idt77252.c 	return idt77252_send_skb(vcc, skb, 1);
skb              3039 drivers/atm/idt77252.c 	struct sk_buff *skb;
skb              3055 drivers/atm/idt77252.c 			skb = card->sbpool[i].skb[j];
skb              3056 drivers/atm/idt77252.c 			if (skb) {
skb              3058 drivers/atm/idt77252.c 						 IDT77252_PRV_PADDR(skb),
skb              3059 drivers/atm/idt77252.c 						 (skb_end_pointer(skb) -
skb              3060 drivers/atm/idt77252.c 						  skb->data),
skb              3062 drivers/atm/idt77252.c 				card->sbpool[i].skb[j] = NULL;
skb              3063 drivers/atm/idt77252.c 				dev_kfree_skb(skb);
skb              3742 drivers/atm/idt77252.c 	struct sk_buff *skb;
skb              3746 drivers/atm/idt77252.c 	if (sizeof(skb->cb) < sizeof(struct atm_skb_data) +
skb              3749 drivers/atm/idt77252.c 		       __func__, (unsigned long) sizeof(skb->cb),
skb               340 drivers/atm/idt77252.h 	struct sk_buff		*skb[FBQ_SIZE];
skb               794 drivers/atm/idt77252.h #define IDT77252_PRV_TBD(skb)	\
skb               795 drivers/atm/idt77252.h 	(((struct idt77252_skb_prv *)(ATM_SKB(skb)+1))->tbd)
skb               796 drivers/atm/idt77252.h #define IDT77252_PRV_PADDR(skb)	\
skb               797 drivers/atm/idt77252.h 	(((struct idt77252_skb_prv *)(ATM_SKB(skb)+1))->paddr)
skb               798 drivers/atm/idt77252.h #define IDT77252_PRV_POOL(skb)	\
skb               799 drivers/atm/idt77252.h 	(((struct idt77252_skb_prv *)(ATM_SKB(skb)+1))->pool)
skb               180 drivers/atm/iphase.c   struct sk_buff        *skb;
skb               204 drivers/atm/iphase.c            if (!(skb = dev->desc_tbl[i].txskb) || 
skb               612 drivers/atm/iphase.c static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb);
skb               615 drivers/atm/iphase.c    struct sk_buff *skb;
skb               620 drivers/atm/iphase.c    while (num_desc && (skb = skb_dequeue(&iadev->tx_backlog))) {
skb               621 drivers/atm/iphase.c       if (!(vcc = ATM_SKB(skb)->vcc)) {
skb               622 drivers/atm/iphase.c          dev_kfree_skb_any(skb);
skb               627 drivers/atm/iphase.c          dev_kfree_skb_any(skb);
skb               631 drivers/atm/iphase.c       if (ia_pkt_tx (vcc, skb)) {
skb               632 drivers/atm/iphase.c          skb_queue_head(&iadev->tx_backlog, skb);
skb               641 drivers/atm/iphase.c    struct sk_buff *skb = NULL, *skb1 = NULL;
skb               647 drivers/atm/iphase.c        skb = rtne->data.txskb;
skb               648 drivers/atm/iphase.c        if (!skb) {
skb               652 drivers/atm/iphase.c        vcc = ATM_SKB(skb)->vcc;
skb               655 drivers/atm/iphase.c            dev_kfree_skb_any(skb);
skb               662 drivers/atm/iphase.c            dev_kfree_skb_any(skb);
skb               667 drivers/atm/iphase.c        while (skb1 && (skb1 != skb)) {
skb               687 drivers/atm/iphase.c        if ((vcc->pop) && (skb->len != 0))
skb               689 drivers/atm/iphase.c           vcc->pop(vcc, skb);
skb               690 drivers/atm/iphase.c           IF_EVENT(printk("Tx Done - skb 0x%lx return\n",(long)skb);)
skb               693 drivers/atm/iphase.c           dev_kfree_skb_any(skb);
skb              1103 drivers/atm/iphase.c 	struct sk_buff *skb;  
skb              1177 drivers/atm/iphase.c         if (!(skb = atm_alloc_charge(vcc, len, GFP_ATOMIC))) {
skb              1182 drivers/atm/iphase.c 	skb_put(skb,len);  
skb              1184 drivers/atm/iphase.c         ATM_SKB(skb)->vcc = vcc;
skb              1185 drivers/atm/iphase.c         ATM_DESC(skb) = desc;        
skb              1186 drivers/atm/iphase.c 	skb_queue_tail(&iadev->rx_dma_q, skb);  
skb              1190 drivers/atm/iphase.c 	wr_ptr->sys_pkt_addr = dma_map_single(&iadev->pci->dev, skb->data,
skb              1275 drivers/atm/iphase.c   struct sk_buff *skb;  
skb              1294 drivers/atm/iphase.c       skb = skb_dequeue(&iadev->rx_dma_q);  
skb              1295 drivers/atm/iphase.c       if (!skb)  
skb              1297 drivers/atm/iphase.c       desc = ATM_DESC(skb);
skb              1300 drivers/atm/iphase.c       if (!(len = skb->len))
skb              1303 drivers/atm/iphase.c 	  dev_kfree_skb_any(skb);  
skb              1314 drivers/atm/iphase.c           vcc = ATM_SKB(skb)->vcc;
skb              1317 drivers/atm/iphase.c               dev_kfree_skb_any(skb);
skb              1324 drivers/atm/iphase.c              atm_return(vcc, skb->truesize);
skb              1325 drivers/atm/iphase.c              dev_kfree_skb_any(skb);
skb              1329 drivers/atm/iphase.c           trailer = (struct cpcs_trailer*)((u_char *)skb->data +
skb              1330 drivers/atm/iphase.c                                  skb->len - sizeof(*trailer));
skb              1333 drivers/atm/iphase.c                               (skb->len - sizeof(struct cpcs_trailer))))
skb              1337 drivers/atm/iphase.c                                                             length, skb->len);)
skb              1338 drivers/atm/iphase.c              atm_return(vcc, skb->truesize);
skb              1339 drivers/atm/iphase.c              dev_kfree_skb_any(skb);
skb              1342 drivers/atm/iphase.c           skb_trim(skb, length);
skb              1345 drivers/atm/iphase.c 	  IF_RXPKT(printk("\nDmad Recvd data: len = %d \n", skb->len);  
skb              1346 drivers/atm/iphase.c           xdump(skb->data, skb->len, "RX: ");
skb              1350 drivers/atm/iphase.c 	  vcc->push(vcc,skb);  
skb              1687 drivers/atm/iphase.c         struct sk_buff *skb;
skb              1702 drivers/atm/iphase.c             skb = skb_dequeue(&iadev->tx_dma_q); 
skb              1703 drivers/atm/iphase.c             if (!skb) break;
skb              1707 drivers/atm/iphase.c 		dma_unmap_single(&iadev->pci->dev, dle->sys_pkt_addr, skb->len,
skb              1710 drivers/atm/iphase.c             vcc = ATM_SKB(skb)->vcc;
skb              1714 drivers/atm/iphase.c                   dev_kfree_skb_any(skb);
skb              1722 drivers/atm/iphase.c                   dev_kfree_skb_any(skb);
skb              1726 drivers/atm/iphase.c                if ((vcc->pop) && (skb->len != 0))
skb              1728 drivers/atm/iphase.c                  vcc->pop(vcc, skb);
skb              1731 drivers/atm/iphase.c                  dev_kfree_skb_any(skb);
skb              1735 drivers/atm/iphase.c                IA_SKB_STATE(skb) |= IA_DLED;
skb              1736 drivers/atm/iphase.c                skb_queue_tail(&iavcc->txing_skb, skb);
skb              1738 drivers/atm/iphase.c             IF_EVENT(printk("tx_dle_intr: enque skb = 0x%p \n", skb);)
skb              2612 drivers/atm/iphase.c         struct sk_buff *skb = NULL;
skb              2631 drivers/atm/iphase.c            while((skb = skb_dequeue(&iadev->tx_backlog))) {
skb              2632 drivers/atm/iphase.c               if (ATM_SKB(skb)->vcc == vcc){ 
skb              2633 drivers/atm/iphase.c                  if (vcc->pop) vcc->pop(vcc, skb);
skb              2634 drivers/atm/iphase.c                  else dev_kfree_skb_any(skb);
skb              2637 drivers/atm/iphase.c                  skb_queue_tail(&tmp_tx_backlog, skb);
skb              2639 drivers/atm/iphase.c            while((skb = skb_dequeue(&tmp_tx_backlog))) 
skb              2640 drivers/atm/iphase.c              skb_queue_tail(&iadev->tx_backlog, skb);
skb              2897 drivers/atm/iphase.c static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
skb              2912 drivers/atm/iphase.c 		vcc->pop(vcc, skb);
skb              2914 drivers/atm/iphase.c 		dev_kfree_skb_any(skb);
skb              2918 drivers/atm/iphase.c         if (skb->len > iadev->tx_buf_sz - 8) {
skb              2921 drivers/atm/iphase.c                  vcc->pop(vcc, skb);
skb              2923 drivers/atm/iphase.c                  dev_kfree_skb_any(skb);
skb              2926 drivers/atm/iphase.c         if ((unsigned long)skb->data & 3) {
skb              2929 drivers/atm/iphase.c                  vcc->pop(vcc, skb);
skb              2931 drivers/atm/iphase.c                  dev_kfree_skb_any(skb);
skb              2950 drivers/atm/iphase.c 		    vcc->pop(vcc, skb);   
skb              2952 drivers/atm/iphase.c 		    dev_kfree_skb_any(skb);
skb              2965 drivers/atm/iphase.c         iadev->desc_tbl[desc-1].txskb = skb;
skb              2966 drivers/atm/iphase.c         IA_SKB_STATE(skb) = 0;
skb              2984 drivers/atm/iphase.c 	total_len = skb->len + sizeof(struct cpcs_trailer);  
skb              2986 drivers/atm/iphase.c 	IF_TX(printk("ia packet len:%d padding:%d\n", total_len, total_len - skb->len);)  
skb              2991 drivers/atm/iphase.c                   skb, skb->data, skb->len, desc);)
skb              2994 drivers/atm/iphase.c 	trailer->length = ((skb->len & 0xff) << 8) | ((skb->len & 0xff00) >> 8);
skb              2999 drivers/atm/iphase.c                                                         skb->len, tcnter++);  
skb              3000 drivers/atm/iphase.c         xdump(skb->data, skb->len, "TX: ");
skb              3018 drivers/atm/iphase.c 	wr_ptr->sys_pkt_addr = dma_map_single(&iadev->pci->dev, skb->data,
skb              3019 drivers/atm/iphase.c 					      skb->len, DMA_TO_DEVICE);
skb              3023 drivers/atm/iphase.c 	wr_ptr->bytes = skb->len;  
skb              3050 drivers/atm/iphase.c         ATM_DESC(skb) = vcc->vci;
skb              3051 drivers/atm/iphase.c         skb_queue_tail(&iadev->tx_dma_q, skb);
skb              3077 drivers/atm/iphase.c static int ia_send(struct atm_vcc *vcc, struct sk_buff *skb)
skb              3083 drivers/atm/iphase.c         if ((!skb)||(skb->len>(iadev->tx_buf_sz-sizeof(struct cpcs_trailer))))
skb              3085 drivers/atm/iphase.c             if (!skb)
skb              3087 drivers/atm/iphase.c             else dev_kfree_skb_any(skb);
skb              3092 drivers/atm/iphase.c             dev_kfree_skb_any(skb);
skb              3096 drivers/atm/iphase.c         ATM_SKB(skb)->vcc = vcc;
skb              3099 drivers/atm/iphase.c            skb_queue_tail(&iadev->tx_backlog, skb);
skb              3102 drivers/atm/iphase.c            if (ia_pkt_tx (vcc, skb)) {
skb              3103 drivers/atm/iphase.c               skb_queue_tail(&iadev->tx_backlog, skb);
skb               128 drivers/atm/iphase.h #define ATM_DESC(skb) (skb->protocol)
skb               129 drivers/atm/iphase.h #define IA_SKB_STATE(skb) (skb->protocol)
skb               683 drivers/atm/lanai.c static inline void lanai_free_skb(struct atm_vcc *atmvcc, struct sk_buff *skb)
skb               686 drivers/atm/lanai.c 		atmvcc->pop(atmvcc, skb);
skb               688 drivers/atm/lanai.c 		dev_kfree_skb_any(skb);
skb               765 drivers/atm/lanai.c 	struct sk_buff *skb;
skb               773 drivers/atm/lanai.c 	while ((skb = skb_dequeue(&lvcc->tx.backlog)) != NULL)
skb               774 drivers/atm/lanai.c 		lanai_free_skb(lvcc->tx.atmvcc, skb);
skb              1279 drivers/atm/lanai.c 	struct lanai_vcc *lvcc, struct sk_buff *skb, int pdusize)
skb              1282 drivers/atm/lanai.c 	APRINTK(pdusize == aal5_size(skb->len),
skb              1284 drivers/atm/lanai.c 	    pdusize, aal5_size(skb->len));
skb              1286 drivers/atm/lanai.c 	pad = pdusize - skb->len - 8;
skb              1289 drivers/atm/lanai.c 	vcc_tx_memcpy(lvcc, skb->data, skb->len);
skb              1291 drivers/atm/lanai.c 	vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
skb              1293 drivers/atm/lanai.c 	lanai_free_skb(lvcc->tx.atmvcc, skb);
skb              1302 drivers/atm/lanai.c 	struct sk_buff *skb;
skb              1308 drivers/atm/lanai.c 		skb = skb_dequeue(&lvcc->tx.backlog);
skb              1309 drivers/atm/lanai.c 		if (skb == NULL)
skb              1311 drivers/atm/lanai.c 		n = aal5_size(skb->len);
skb              1314 drivers/atm/lanai.c 			skb_queue_head(&lvcc->tx.backlog, skb);
skb              1317 drivers/atm/lanai.c 		lanai_send_one_aal5(lanai, lvcc, skb, n);
skb              1328 drivers/atm/lanai.c 	struct sk_buff *skb)
skb              1335 drivers/atm/lanai.c 	n = aal5_size(skb->len);
skb              1340 drivers/atm/lanai.c 		skb_queue_tail(&lvcc->tx.backlog, skb);
skb              1343 drivers/atm/lanai.c 	lanai_send_one_aal5(lanai, lvcc, skb, n);
skb              1354 drivers/atm/lanai.c 	struct sk_buff *skb)
skb              1358 drivers/atm/lanai.c 	lanai_free_skb(lvcc->tx.atmvcc, skb);
skb              1381 drivers/atm/lanai.c 	struct sk_buff *skb;
skb              1407 drivers/atm/lanai.c 	skb = atm_alloc_charge(lvcc->rx.atmvcc, size, GFP_ATOMIC);
skb              1408 drivers/atm/lanai.c 	if (unlikely(skb == NULL)) {
skb              1412 drivers/atm/lanai.c 	skb_put(skb, size);
skb              1413 drivers/atm/lanai.c 	vcc_rx_memcpy(skb->data, lvcc, size);
skb              1414 drivers/atm/lanai.c 	ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
skb              1415 drivers/atm/lanai.c 	__net_timestamp(skb);
skb              1416 drivers/atm/lanai.c 	lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
skb              2389 drivers/atm/lanai.c static int lanai_send(struct atm_vcc *atmvcc, struct sk_buff *skb)
skb              2398 drivers/atm/lanai.c 	if (unlikely(skb == NULL)) {
skb              2407 drivers/atm/lanai.c 	ATM_SKB(skb)->vcc = atmvcc;
skb              2411 drivers/atm/lanai.c 			vcc_tx_aal5(lanai, lvcc, skb);
skb              2415 drivers/atm/lanai.c 			if (unlikely(skb->len != ATM_CELL_SIZE-1))
skb              2418 drivers/atm/lanai.c 			cpu_to_be32s((u32 *) skb->data);
skb              2420 drivers/atm/lanai.c 			vcc_tx_aal0(lanai, lvcc, skb);
skb              2427 drivers/atm/lanai.c 	lanai_free_skb(atmvcc, skb);
skb               132 drivers/atm/nicstar.c static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb);
skb               134 drivers/atm/nicstar.c 		     struct sk_buff *skb);
skb               139 drivers/atm/nicstar.c static void recycle_rx_buf(ns_dev * card, struct sk_buff *skb);
skb               147 drivers/atm/nicstar.c static void which_list(ns_dev * card, struct sk_buff *skb);
skb               874 drivers/atm/nicstar.c 	scq->skb = kmalloc_array(size / NS_SCQE_SIZE,
skb               875 drivers/atm/nicstar.c 				 sizeof(*scq->skb),
skb               877 drivers/atm/nicstar.c 	if (!scq->skb) {
skb               896 drivers/atm/nicstar.c 		scq->skb[i] = NULL;
skb               908 drivers/atm/nicstar.c 			if (scq->skb[i] != NULL) {
skb               909 drivers/atm/nicstar.c 				vcc = ATM_SKB(scq->skb[i])->vcc;
skb               911 drivers/atm/nicstar.c 					vcc->pop(vcc, scq->skb[i]);
skb               913 drivers/atm/nicstar.c 					dev_kfree_skb_any(scq->skb[i]);
skb               921 drivers/atm/nicstar.c 				dev_kfree_skb_any(scq->skb[i]);
skb               924 drivers/atm/nicstar.c 				if (scq->skb[i] != NULL) {
skb               926 drivers/atm/nicstar.c 						vcc->pop(vcc, scq->skb[i]);
skb               928 drivers/atm/nicstar.c 						dev_kfree_skb_any(scq->skb[i]);
skb               932 drivers/atm/nicstar.c 	kfree(scq->skb);
skb               942 drivers/atm/nicstar.c static void push_rxbufs(ns_dev * card, struct sk_buff *skb)
skb               953 drivers/atm/nicstar.c 	handle1 = skb;
skb               955 drivers/atm/nicstar.c 			       skb->data,
skb               956 drivers/atm/nicstar.c 			       (NS_PRV_BUFTYPE(skb) == BUF_SM
skb               959 drivers/atm/nicstar.c 	NS_PRV_DMA(skb) = addr1; /* save so we can unmap later */
skb               970 drivers/atm/nicstar.c 	if (NS_PRV_BUFTYPE(skb) == BUF_SM) {
skb              1000 drivers/atm/nicstar.c 		if (NS_PRV_BUFTYPE(skb) == BUF_SM) {
skb              1035 drivers/atm/nicstar.c 		writel(NS_CMD_WRITE_FREEBUFQ | NS_PRV_BUFTYPE(skb),
skb              1041 drivers/atm/nicstar.c 			(NS_PRV_BUFTYPE(skb) == BUF_SM ? "small" : "large"),
skb              1498 drivers/atm/nicstar.c 				scq->skb[index] = NULL;
skb              1533 drivers/atm/nicstar.c 			if (scq->skb[i] && ATM_SKB(scq->skb[i])->vcc == vcc) {
skb              1534 drivers/atm/nicstar.c 				ATM_SKB(scq->skb[i])->vcc = NULL;
skb              1535 drivers/atm/nicstar.c 				atm_return(vcc, scq->skb[i]->truesize);
skb              1623 drivers/atm/nicstar.c static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
skb              1638 drivers/atm/nicstar.c 		dev_kfree_skb_any(skb);
skb              1646 drivers/atm/nicstar.c 		dev_kfree_skb_any(skb);
skb              1654 drivers/atm/nicstar.c 		dev_kfree_skb_any(skb);
skb              1658 drivers/atm/nicstar.c 	if (skb_shinfo(skb)->nr_frags != 0) {
skb              1661 drivers/atm/nicstar.c 		dev_kfree_skb_any(skb);
skb              1665 drivers/atm/nicstar.c 	ATM_SKB(skb)->vcc = vcc;
skb              1667 drivers/atm/nicstar.c 	NS_PRV_DMA(skb) = dma_map_single(&card->pcidev->dev, skb->data,
skb              1668 drivers/atm/nicstar.c 					 skb->len, DMA_TO_DEVICE);
skb              1671 drivers/atm/nicstar.c 		buflen = (skb->len + 47 + 8) / 48 * 48;	/* Multiple of 48 */
skb              1673 drivers/atm/nicstar.c 		scqe.word_2 = cpu_to_le32(NS_PRV_DMA(skb));
skb              1674 drivers/atm/nicstar.c 		scqe.word_3 = cpu_to_le32(skb->len);
skb              1677 drivers/atm/nicstar.c 				    ATM_SKB(skb)->
skb              1684 drivers/atm/nicstar.c 		scqe.word_2 = cpu_to_le32(NS_PRV_DMA(skb) + NS_AAL0_HEADER);
skb              1686 drivers/atm/nicstar.c 		if (*skb->data & 0x02)	/* Payload type 1 - end of pdu */
skb              1689 drivers/atm/nicstar.c 		    cpu_to_le32(*((u32 *) skb->data) & ~NS_TBD_VC_MASK);
skb              1707 drivers/atm/nicstar.c 	if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
skb              1709 drivers/atm/nicstar.c 		dev_kfree_skb_any(skb);
skb              1718 drivers/atm/nicstar.c 		     struct sk_buff *skb)
skb              1750 drivers/atm/nicstar.c 	scq->skb[index] = skb;
skb              1752 drivers/atm/nicstar.c 		card->index, skb, index);
skb              1805 drivers/atm/nicstar.c 			scq->skb[index] = NULL;
skb              1907 drivers/atm/nicstar.c 	struct sk_buff *skb;
skb              1923 drivers/atm/nicstar.c 		skb = scq->skb[i];
skb              1925 drivers/atm/nicstar.c 			card->index, skb, i);
skb              1926 drivers/atm/nicstar.c 		if (skb != NULL) {
skb              1928 drivers/atm/nicstar.c 					 NS_PRV_DMA(skb),
skb              1929 drivers/atm/nicstar.c 					 skb->len,
skb              1931 drivers/atm/nicstar.c 			vcc = ATM_SKB(skb)->vcc;
skb              1933 drivers/atm/nicstar.c 				vcc->pop(vcc, skb);
skb              1935 drivers/atm/nicstar.c 				dev_kfree_skb_irq(skb);
skb              1937 drivers/atm/nicstar.c 			scq->skb[i] = NULL;
skb              1971 drivers/atm/nicstar.c 	struct sk_buff *skb;
skb              1982 drivers/atm/nicstar.c 	skb = idr_remove(&card->idr, id);
skb              1983 drivers/atm/nicstar.c 	if (!skb) {
skb              1989 drivers/atm/nicstar.c 				NS_PRV_DMA(skb),
skb              1990 drivers/atm/nicstar.c 				(NS_PRV_BUFTYPE(skb) == BUF_SM
skb              1994 drivers/atm/nicstar.c 			 NS_PRV_DMA(skb),
skb              1995 drivers/atm/nicstar.c 			 (NS_PRV_BUFTYPE(skb) == BUF_SM
skb              2003 drivers/atm/nicstar.c 		recycle_rx_buf(card, skb);
skb              2011 drivers/atm/nicstar.c 		recycle_rx_buf(card, skb);
skb              2022 drivers/atm/nicstar.c 		cell = skb->data;
skb              2055 drivers/atm/nicstar.c 		recycle_rx_buf(card, skb);
skb              2069 drivers/atm/nicstar.c 				recycle_rx_buf(card, skb);
skb              2101 drivers/atm/nicstar.c 	iov->iov_base = (void *)skb;
skb              2107 drivers/atm/nicstar.c 		if (NS_PRV_BUFTYPE(skb) != BUF_SM) {
skb              2111 drivers/atm/nicstar.c 			which_list(card, skb);
skb              2113 drivers/atm/nicstar.c 			recycle_rx_buf(card, skb);
skb              2120 drivers/atm/nicstar.c 		if (NS_PRV_BUFTYPE(skb) != BUF_LG) {
skb              2124 drivers/atm/nicstar.c 			which_list(card, skb);
skb              2138 drivers/atm/nicstar.c 						(skb->data + iov->iov_len - 6);
skb              2160 drivers/atm/nicstar.c 			if (!atm_charge(vcc, skb->truesize)) {
skb              2161 drivers/atm/nicstar.c 				push_rxbufs(card, skb);
skb              2164 drivers/atm/nicstar.c 				skb_put(skb, len);
skb              2165 drivers/atm/nicstar.c 				dequeue_sm_buf(card, skb);
skb              2166 drivers/atm/nicstar.c 				ATM_SKB(skb)->vcc = vcc;
skb              2167 drivers/atm/nicstar.c 				__net_timestamp(skb);
skb              2168 drivers/atm/nicstar.c 				vcc->push(vcc, skb);
skb              2190 drivers/atm/nicstar.c 				push_rxbufs(card, skb);
skb              2194 drivers/atm/nicstar.c 				if (!atm_charge(vcc, skb->truesize)) {
skb              2195 drivers/atm/nicstar.c 					push_rxbufs(card, skb);
skb              2198 drivers/atm/nicstar.c 					dequeue_lg_buf(card, skb);
skb              2199 drivers/atm/nicstar.c 					skb_push(skb, NS_SMBUFSIZE);
skb              2200 drivers/atm/nicstar.c 					skb_copy_from_linear_data(sb, skb->data,
skb              2202 drivers/atm/nicstar.c 					skb_put(skb, len - NS_SMBUFSIZE);
skb              2203 drivers/atm/nicstar.c 					ATM_SKB(skb)->vcc = vcc;
skb              2204 drivers/atm/nicstar.c 					__net_timestamp(skb);
skb              2205 drivers/atm/nicstar.c 					vcc->push(vcc, skb);
skb              2322 drivers/atm/nicstar.c static void recycle_rx_buf(ns_dev * card, struct sk_buff *skb)
skb              2324 drivers/atm/nicstar.c 	if (unlikely(NS_PRV_BUFTYPE(skb) == BUF_NONE)) {
skb              2327 drivers/atm/nicstar.c 		dev_kfree_skb_any(skb);
skb              2329 drivers/atm/nicstar.c 		push_rxbufs(card, skb);
skb              2677 drivers/atm/nicstar.c static void which_list(ns_dev * card, struct sk_buff *skb)
skb              2679 drivers/atm/nicstar.c 	printk("skb buf_type: 0x%08x\n", NS_PRV_BUFTYPE(skb));
skb               647 drivers/atm/nicstar.h #define NS_PRV_BUFTYPE(skb)   \
skb               648 drivers/atm/nicstar.h         (((struct ns_skb_prv *)(ATM_SKB(skb)+1))->buf_type)
skb               649 drivers/atm/nicstar.h #define NS_PRV_DMA(skb) \
skb               650 drivers/atm/nicstar.h         (((struct ns_skb_prv *)(ATM_SKB(skb)+1))->dma)
skb               651 drivers/atm/nicstar.h #define NS_PRV_IOVCNT(skb) \
skb               652 drivers/atm/nicstar.h         (((struct ns_skb_prv *)(ATM_SKB(skb)+1))->iovcnt)
skb               670 drivers/atm/nicstar.h 	struct sk_buff **skb;	/* Pointer to an array of pointers
skb                97 drivers/atm/solos-pci.c #define SKB_CB(skb)		((struct solos_skb_cb *)skb->cb)
skb               162 drivers/atm/solos-pci.c static void fpga_queue(struct solos_card *card, int port, struct sk_buff *skb,
skb               173 drivers/atm/solos-pci.c static inline void solos_pop(struct atm_vcc *vcc, struct sk_buff *skb)
skb               176 drivers/atm/solos-pci.c                 vcc->pop(vcc, skb);
skb               178 drivers/atm/solos-pci.c                 dev_kfree_skb_any(skb);
skb               187 drivers/atm/solos-pci.c 	struct sk_buff *skb;
skb               193 drivers/atm/solos-pci.c 	skb = alloc_skb(sizeof(*header) + buflen, GFP_KERNEL);
skb               194 drivers/atm/solos-pci.c 	if (!skb) {
skb               199 drivers/atm/solos-pci.c 	header = skb_put(skb, sizeof(*header));
skb               203 drivers/atm/solos-pci.c 	skb_put(skb, buflen);
skb               218 drivers/atm/solos-pci.c 	fpga_queue(card, prm.port, skb, NULL);
skb               242 drivers/atm/solos-pci.c 	struct sk_buff *skb;
skb               249 drivers/atm/solos-pci.c 	skb = alloc_skb(sizeof(*header) + buflen, GFP_KERNEL);
skb               250 drivers/atm/solos-pci.c 	if (!skb) {
skb               255 drivers/atm/solos-pci.c 	header = skb_put(skb, sizeof(*header));
skb               260 drivers/atm/solos-pci.c 	skb_put(skb, buflen);
skb               274 drivers/atm/solos-pci.c 	fpga_queue(card, prm.port, skb, NULL);
skb               282 drivers/atm/solos-pci.c 	skb = prm.response;
skb               284 drivers/atm/solos-pci.c 	if (!skb)
skb               287 drivers/atm/solos-pci.c 	buflen = skb->len;
skb               290 drivers/atm/solos-pci.c 	if (skb->data[buflen - 1] == '\n')
skb               293 drivers/atm/solos-pci.c 	if (buflen == 2 && !strncmp(skb->data, "OK", 2))
skb               295 drivers/atm/solos-pci.c 	else if (buflen == 5 && !strncmp(skb->data, "ERROR", 5))
skb               300 drivers/atm/solos-pci.c 		skb->data[buflen] = 0;
skb               303 drivers/atm/solos-pci.c 			 skb->data);
skb               306 drivers/atm/solos-pci.c 	kfree_skb(skb);
skb               311 drivers/atm/solos-pci.c static char *next_string(struct sk_buff *skb)
skb               314 drivers/atm/solos-pci.c 	char *this = skb->data;
skb               316 drivers/atm/solos-pci.c 	for (i = 0; i < skb->len; i++) {
skb               319 drivers/atm/solos-pci.c 			skb_pull(skb, i + 1);
skb               339 drivers/atm/solos-pci.c static int process_status(struct solos_card *card, int port, struct sk_buff *skb)
skb               347 drivers/atm/solos-pci.c 	str = next_string(skb);
skb               362 drivers/atm/solos-pci.c 	str = next_string(skb);
skb               375 drivers/atm/solos-pci.c 	str = next_string(skb);
skb               382 drivers/atm/solos-pci.c 	state_str = next_string(skb);
skb               393 drivers/atm/solos-pci.c 	snr = next_string(skb);
skb               396 drivers/atm/solos-pci.c 	attn = next_string(skb);
skb               410 drivers/atm/solos-pci.c static int process_command(struct solos_card *card, int port, struct sk_buff *skb)
skb               417 drivers/atm/solos-pci.c 	if (skb->len < 7)
skb               420 drivers/atm/solos-pci.c 	if (skb->data[0] != 'L'    || !isdigit(skb->data[1]) ||
skb               421 drivers/atm/solos-pci.c 	    !isdigit(skb->data[2]) || !isdigit(skb->data[3]) ||
skb               422 drivers/atm/solos-pci.c 	    !isdigit(skb->data[4]) || !isdigit(skb->data[5]) ||
skb               423 drivers/atm/solos-pci.c 	    skb->data[6] != '\n')
skb               426 drivers/atm/solos-pci.c 	err = kstrtoint(&skb->data[1], 10, &cmdpid);
skb               433 drivers/atm/solos-pci.c 			prm->response = skb;
skb               434 drivers/atm/solos-pci.c 			skb_pull(skb, 7);
skb               449 drivers/atm/solos-pci.c 	struct sk_buff *skb;
skb               453 drivers/atm/solos-pci.c 	skb = skb_dequeue(&card->cli_queue[SOLOS_CHAN(atmdev)]);
skb               455 drivers/atm/solos-pci.c 	if(skb == NULL)
skb               458 drivers/atm/solos-pci.c 	len = skb->len;
skb               459 drivers/atm/solos-pci.c 	memcpy(buf, skb->data, len);
skb               461 drivers/atm/solos-pci.c 	kfree_skb(skb);
skb               467 drivers/atm/solos-pci.c 	struct sk_buff *skb;
skb               474 drivers/atm/solos-pci.c 	skb = alloc_skb(size + sizeof(*header), GFP_ATOMIC);
skb               475 drivers/atm/solos-pci.c 	if (!skb) {
skb               480 drivers/atm/solos-pci.c 	header = skb_put(skb, sizeof(*header));
skb               487 drivers/atm/solos-pci.c 	skb_put_data(skb, buf, size);
skb               489 drivers/atm/solos-pci.c 	fpga_queue(card, dev, skb, NULL);
skb               774 drivers/atm/solos-pci.c 			struct sk_buff *skb;
skb               779 drivers/atm/solos-pci.c 				skb = card->rx_skb[port];
skb               782 drivers/atm/solos-pci.c 				dma_unmap_single(&card->dev->dev, SKB_CB(skb)->dma_addr,
skb               785 drivers/atm/solos-pci.c 				header = (void *)skb->data;
skb               787 drivers/atm/solos-pci.c 				skb_put(skb, size + sizeof(*header));
skb               788 drivers/atm/solos-pci.c 				skb_pull(skb, sizeof(*header));
skb               807 drivers/atm/solos-pci.c 				skb = netdev_alloc_skb_ip_align(NULL, size + 1);
skb               808 drivers/atm/solos-pci.c 				if (!skb) {
skb               814 drivers/atm/solos-pci.c 				memcpy_fromio(skb_put(skb, size),
skb               823 drivers/atm/solos-pci.c 				print_buffer(skb);
skb               835 drivers/atm/solos-pci.c 					dev_kfree_skb_any(skb);
skb               838 drivers/atm/solos-pci.c 				atm_charge(vcc, skb->truesize);
skb               839 drivers/atm/solos-pci.c 				vcc->push(vcc, skb);
skb               844 drivers/atm/solos-pci.c 				if (process_status(card, port, skb) &&
skb               846 drivers/atm/solos-pci.c 					dev_warn(&card->dev->dev, "Bad status packet of %d bytes on port %d:\n", skb->len, port);
skb               847 drivers/atm/solos-pci.c 					print_buffer(skb);
skb               849 drivers/atm/solos-pci.c 				dev_kfree_skb_any(skb);
skb               854 drivers/atm/solos-pci.c 				if (process_command(card, port, skb))
skb               861 drivers/atm/solos-pci.c 					dev_kfree_skb_any(skb);
skb               863 drivers/atm/solos-pci.c 					skb_queue_tail(&card->cli_queue[port], skb);
skb               874 drivers/atm/solos-pci.c 			struct sk_buff *skb = dev_alloc_skb(RX_DMA_SIZE);
skb               875 drivers/atm/solos-pci.c 			if (skb) {
skb               876 drivers/atm/solos-pci.c 				SKB_CB(skb)->dma_addr =
skb               877 drivers/atm/solos-pci.c 					dma_map_single(&card->dev->dev, skb->data,
skb               879 drivers/atm/solos-pci.c 				iowrite32(SKB_CB(skb)->dma_addr,
skb               881 drivers/atm/solos-pci.c 				card->rx_skb[port] = skb;
skb               921 drivers/atm/solos-pci.c 	struct sk_buff *skb;
skb               930 drivers/atm/solos-pci.c 	skb = alloc_skb(sizeof(*header), GFP_KERNEL);
skb               931 drivers/atm/solos-pci.c 	if (!skb) {
skb               936 drivers/atm/solos-pci.c 	header = skb_put(skb, sizeof(*header));
skb               943 drivers/atm/solos-pci.c 	fpga_queue(card, SOLOS_CHAN(vcc->dev), skb, NULL);
skb               955 drivers/atm/solos-pci.c 	struct sk_buff *skb, *tmpskb;
skb               960 drivers/atm/solos-pci.c 	skb_queue_walk_safe(&card->tx_queue[port], skb, tmpskb) {
skb               961 drivers/atm/solos-pci.c 		if (SKB_CB(skb)->vcc == vcc) {
skb               962 drivers/atm/solos-pci.c 			skb_unlink(skb, &card->tx_queue[port]);
skb               963 drivers/atm/solos-pci.c 			solos_pop(vcc, skb);
skb               968 drivers/atm/solos-pci.c 	skb = alloc_skb(sizeof(*header), GFP_KERNEL);
skb               969 drivers/atm/solos-pci.c 	if (!skb) {
skb               973 drivers/atm/solos-pci.c 	header = skb_put(skb, sizeof(*header));
skb               980 drivers/atm/solos-pci.c 	skb_get(skb);
skb               981 drivers/atm/solos-pci.c 	fpga_queue(card, port, skb, NULL);
skb               983 drivers/atm/solos-pci.c 	if (!wait_event_timeout(card->param_wq, !skb_shared(skb), 5 * HZ))
skb               987 drivers/atm/solos-pci.c 	dev_kfree_skb(skb);
skb              1028 drivers/atm/solos-pci.c static void fpga_queue(struct solos_card *card, int port, struct sk_buff *skb,
skb              1034 drivers/atm/solos-pci.c 	SKB_CB(skb)->vcc = vcc;
skb              1038 drivers/atm/solos-pci.c 	skb_queue_tail(&card->tx_queue[port], skb);
skb              1053 drivers/atm/solos-pci.c 	struct sk_buff *skb;
skb              1079 drivers/atm/solos-pci.c 			skb = skb_dequeue(&card->tx_queue[port]);
skb              1080 drivers/atm/solos-pci.c 			if (!skb)
skb              1084 drivers/atm/solos-pci.c 			if (skb && !card->using_dma) {
skb              1085 drivers/atm/solos-pci.c 				memcpy_toio(TX_BUF(card, port), skb->data, skb->len);
skb              1087 drivers/atm/solos-pci.c 				oldskb = skb; /* We're done with this skb already */
skb              1088 drivers/atm/solos-pci.c 			} else if (skb && card->using_dma) {
skb              1089 drivers/atm/solos-pci.c 				unsigned char *data = skb->data;
skb              1092 drivers/atm/solos-pci.c 					memcpy(data, skb->data, skb->len);
skb              1094 drivers/atm/solos-pci.c 				SKB_CB(skb)->dma_addr = dma_map_single(&card->dev->dev, data,
skb              1095 drivers/atm/solos-pci.c 								       skb->len, DMA_TO_DEVICE);
skb              1096 drivers/atm/solos-pci.c 				card->tx_skb[port] = skb;
skb              1097 drivers/atm/solos-pci.c 				iowrite32(SKB_CB(skb)->dma_addr,
skb              1137 drivers/atm/solos-pci.c static int psend(struct atm_vcc *vcc, struct sk_buff *skb)
skb              1143 drivers/atm/solos-pci.c 	pktlen = skb->len;
skb              1146 drivers/atm/solos-pci.c 		solos_pop(vcc, skb);
skb              1150 drivers/atm/solos-pci.c 	if (!skb_clone_writable(skb, sizeof(*header))) {
skb              1154 drivers/atm/solos-pci.c 		if (skb_headroom(skb) < sizeof(*header))
skb              1155 drivers/atm/solos-pci.c 			expand_by = sizeof(*header) - skb_headroom(skb);
skb              1157 drivers/atm/solos-pci.c 		ret = pskb_expand_head(skb, expand_by, 0, GFP_ATOMIC);
skb              1160 drivers/atm/solos-pci.c 			solos_pop(vcc, skb);
skb              1165 drivers/atm/solos-pci.c 	header = skb_push(skb, sizeof(*header));
skb              1173 drivers/atm/solos-pci.c 	fpga_queue(card, SOLOS_CHAN(vcc->dev), skb, vcc);
skb              1359 drivers/atm/solos-pci.c 		struct sk_buff *skb;
skb              1384 drivers/atm/solos-pci.c 		skb = alloc_skb(sizeof(*header), GFP_KERNEL);
skb              1385 drivers/atm/solos-pci.c 		if (!skb) {
skb              1390 drivers/atm/solos-pci.c 		header = skb_put(skb, sizeof(*header));
skb              1397 drivers/atm/solos-pci.c 		fpga_queue(card, i, skb, NULL);
skb              1408 drivers/atm/solos-pci.c 			struct sk_buff *skb;
skb              1415 drivers/atm/solos-pci.c 			skb = card->rx_skb[i];
skb              1416 drivers/atm/solos-pci.c 			if (skb) {
skb              1417 drivers/atm/solos-pci.c 				dma_unmap_single(&card->dev->dev, SKB_CB(skb)->dma_addr,
skb              1419 drivers/atm/solos-pci.c 				dev_kfree_skb(skb);
skb              1421 drivers/atm/solos-pci.c 			skb = card->tx_skb[i];
skb              1422 drivers/atm/solos-pci.c 			if (skb) {
skb              1423 drivers/atm/solos-pci.c 				dma_unmap_single(&card->dev->dev, SKB_CB(skb)->dma_addr,
skb              1424 drivers/atm/solos-pci.c 						 skb->len, DMA_TO_DEVICE);
skb              1425 drivers/atm/solos-pci.c 				dev_kfree_skb(skb);
skb              1427 drivers/atm/solos-pci.c 			while ((skb = skb_dequeue(&card->tx_queue[i])))
skb              1428 drivers/atm/solos-pci.c 				dev_kfree_skb(skb);
skb               176 drivers/atm/zatm.c 	struct sk_buff	*skb;	/* back pointer to skb (for poll_rx) */
skb               183 drivers/atm/zatm.c 	struct sk_buff *skb;
skb               216 drivers/atm/zatm.c 		skb = alloc_skb(size,GFP_ATOMIC);
skb               217 drivers/atm/zatm.c 		if (!skb) {
skb               222 drivers/atm/zatm.c 		skb_reserve(skb,(unsigned char *) ((((unsigned long) skb->data+
skb               224 drivers/atm/zatm.c 		    skb->data);
skb               225 drivers/atm/zatm.c 		head = (struct rx_buffer_head *) skb->data;
skb               226 drivers/atm/zatm.c 		skb_reserve(skb,sizeof(struct rx_buffer_head));
skb               229 drivers/atm/zatm.c 		head->buffer = virt_to_bus(skb->data);
skb               231 drivers/atm/zatm.c 		head->skb = skb;
skb               232 drivers/atm/zatm.c 		EVENT("enq skb 0x%08lx/0x%08lx\n",(unsigned long) skb,
skb               238 drivers/atm/zatm.c 		zatm_dev->last_free[pool] = skb;
skb               239 drivers/atm/zatm.c 		skb_queue_tail(&zatm_dev->pool[pool],skb);
skb               373 drivers/atm/zatm.c 		struct sk_buff *skb;
skb               403 drivers/atm/zatm.c 		skb = ((struct rx_buffer_head *) bus_to_virt(here[2]))->skb;
skb               404 drivers/atm/zatm.c 		__net_timestamp(skb);
skb               406 drivers/atm/zatm.c printk("[-3..0] 0x%08lx 0x%08lx 0x%08lx 0x%08lx\n",((unsigned *) skb->data)[-3],
skb               407 drivers/atm/zatm.c   ((unsigned *) skb->data)[-2],((unsigned *) skb->data)[-1],
skb               408 drivers/atm/zatm.c   ((unsigned *) skb->data)[0]);
skb               410 drivers/atm/zatm.c 		EVENT("skb 0x%lx, here 0x%lx\n",(unsigned long) skb,
skb               415 drivers/atm/zatm.c 		size = error ? 0 : ntohs(((__be16 *) skb->data)[cells*
skb               417 drivers/atm/zatm.c 		EVENT("got skb 0x%lx, size %d\n",(unsigned long) skb,size);
skb               424 drivers/atm/zatm.c 			if (skb == zatm_dev->last_free[pos])
skb               426 drivers/atm/zatm.c 			skb_unlink(skb, zatm_dev->pool + pos);
skb               463 drivers/atm/zatm.c 			dev_kfree_skb_irq(skb);
skb               467 drivers/atm/zatm.c 		if (!atm_charge(vcc,skb->truesize)) {
skb               468 drivers/atm/zatm.c 			dev_kfree_skb_irq(skb);
skb               471 drivers/atm/zatm.c 		skb->len = size;
skb               472 drivers/atm/zatm.c 		ATM_SKB(skb)->vcc = vcc;
skb               473 drivers/atm/zatm.c 		vcc->push(vcc,skb);
skb               632 drivers/atm/zatm.c static int do_tx(struct sk_buff *skb)
skb               641 drivers/atm/zatm.c 	DPRINTK("sending skb %p\n",skb);
skb               642 drivers/atm/zatm.c 	vcc = ATM_SKB(skb)->vcc;
skb               645 drivers/atm/zatm.c 	EVENT("iovcnt=%d\n",skb_shinfo(skb)->nr_frags,0);
skb               647 drivers/atm/zatm.c 	if (!skb_shinfo(skb)->nr_frags) {
skb               657 drivers/atm/zatm.c 		dsc[2] = skb->len;
skb               658 drivers/atm/zatm.c 		dsc[3] = virt_to_bus(skb->data);
skb               662 drivers/atm/zatm.c 		    (ATM_SKB(skb)->atm_options & ATM_ATMOPT_CLP ?
skb               674 drivers/atm/zatm.c 			uPD98401_TXBD_SIZE * ATM_SKB(skb)->iovcnt, GFP_ATOMIC);
skb               677 drivers/atm/zatm.c 				vcc->pop(vcc, skb);
skb               679 drivers/atm/zatm.c 				dev_kfree_skb_irq(skb);
skb               686 drivers/atm/zatm.c 		    (ATM_SKB(skb)->atm_options & ATM_ATMOPT_CLP ?
skb               689 drivers/atm/zatm.c 		dsc[2] = ATM_SKB(skb)->iovcnt * uPD98401_TXBD_SIZE;
skb               691 drivers/atm/zatm.c 		for (i = 0; i < ATM_SKB(skb)->iovcnt; i++) {
skb               692 drivers/atm/zatm.c 			*put++ = ((struct iovec *) skb->data)[i].iov_len;
skb               694 drivers/atm/zatm.c 			    skb->data)[i].iov_base);
skb               699 drivers/atm/zatm.c 	ZATM_PRV_DSC(skb) = dsc;
skb               700 drivers/atm/zatm.c 	skb_queue_tail(&zatm_vcc->tx_queue,skb);
skb               715 drivers/atm/zatm.c 	struct sk_buff *skb;
skb               719 drivers/atm/zatm.c 	skb = skb_dequeue(&zatm_vcc->tx_queue);
skb               720 drivers/atm/zatm.c 	if (!skb) {
skb               726 drivers/atm/zatm.c if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
skb               728 drivers/atm/zatm.c   *ZATM_PRV_DSC(skb));
skb               730 drivers/atm/zatm.c 	*ZATM_PRV_DSC(skb) = 0; /* mark as invalid */
skb               732 drivers/atm/zatm.c 	if (vcc->pop) vcc->pop(vcc,skb);
skb               733 drivers/atm/zatm.c 	else dev_kfree_skb_irq(skb);
skb               734 drivers/atm/zatm.c 	while ((skb = skb_dequeue(&zatm_vcc->backlog)))
skb               735 drivers/atm/zatm.c 		if (do_tx(skb) == RING_BUSY) {
skb               736 drivers/atm/zatm.c 			skb_queue_head(&zatm_vcc->backlog,skb);
skb              1532 drivers/atm/zatm.c static int zatm_send(struct atm_vcc *vcc,struct sk_buff *skb)
skb              1536 drivers/atm/zatm.c 	EVENT(">zatm_send 0x%lx\n",(unsigned long) skb,0);
skb              1538 drivers/atm/zatm.c 		if (vcc->pop) vcc->pop(vcc,skb);
skb              1539 drivers/atm/zatm.c 		else dev_kfree_skb(skb);
skb              1542 drivers/atm/zatm.c 	if (!skb) {
skb              1544 drivers/atm/zatm.c 		if (vcc->pop) vcc->pop(vcc,skb);
skb              1547 drivers/atm/zatm.c 	ATM_SKB(skb)->vcc = vcc;
skb              1548 drivers/atm/zatm.c 	error = do_tx(skb);
skb              1550 drivers/atm/zatm.c 	skb_queue_tail(&ZATM_VCC(vcc)->backlog,skb);
skb               102 drivers/atm/zatm.h #define ZATM_PRV_DSC(skb) (((struct zatm_skb_prv *) (skb)->cb)->dsc)
skb               125 drivers/block/aoe/aoe.h 	struct sk_buff *skb;		/* command skb freed on module exit */
skb                85 drivers/block/aoe/aoechr.c 	struct sk_buff *skb;
skb               106 drivers/block/aoe/aoechr.c 	skb = aoecmd_ata_id(d);
skb               111 drivers/block/aoe/aoechr.c 	if (!skb && !msleep_interruptible(250)) {
skb               116 drivers/block/aoe/aoechr.c 	if (skb) {
skb               119 drivers/block/aoe/aoechr.c 		__skb_queue_tail(&queue, skb);
skb                65 drivers/block/aoe/aoecmd.c 	struct sk_buff *skb;
skb                67 drivers/block/aoe/aoecmd.c 	skb = alloc_skb(len + MAX_HEADER, GFP_ATOMIC);
skb                68 drivers/block/aoe/aoecmd.c 	if (skb) {
skb                69 drivers/block/aoe/aoecmd.c 		skb_reserve(skb, MAX_HEADER);
skb                70 drivers/block/aoe/aoecmd.c 		skb_reset_mac_header(skb);
skb                71 drivers/block/aoe/aoecmd.c 		skb_reset_network_header(skb);
skb                72 drivers/block/aoe/aoecmd.c 		skb->protocol = __constant_htons(ETH_P_AOE);
skb                73 drivers/block/aoe/aoecmd.c 		skb_checksum_none_assert(skb);
skb                75 drivers/block/aoe/aoecmd.c 	return skb;
skb               171 drivers/block/aoe/aoecmd.c skb_pool_put(struct aoedev *d, struct sk_buff *skb)
skb               173 drivers/block/aoe/aoecmd.c 	__skb_queue_tail(&d->skbpool, skb);
skb               179 drivers/block/aoe/aoecmd.c 	struct sk_buff *skb = skb_peek(&d->skbpool);
skb               181 drivers/block/aoe/aoecmd.c 	if (skb && atomic_read(&skb_shinfo(skb)->dataref) == 1) {
skb               182 drivers/block/aoe/aoecmd.c 		__skb_unlink(skb, &d->skbpool);
skb               183 drivers/block/aoe/aoecmd.c 		return skb;
skb               186 drivers/block/aoe/aoecmd.c 	    (skb = new_skb(ETH_ZLEN)))
skb               187 drivers/block/aoe/aoecmd.c 		return skb;
skb               209 drivers/block/aoe/aoecmd.c 	struct sk_buff *skb;
skb               226 drivers/block/aoe/aoecmd.c 	skb = f->skb;
skb               227 drivers/block/aoe/aoecmd.c 	if (skb == NULL) {
skb               228 drivers/block/aoe/aoecmd.c 		f->skb = skb = new_skb(ETH_ZLEN);
skb               229 drivers/block/aoe/aoecmd.c 		if (!skb) {
skb               235 drivers/block/aoe/aoecmd.c 	if (atomic_read(&skb_shinfo(skb)->dataref) != 1) {
skb               236 drivers/block/aoe/aoecmd.c 		skb = skb_pool_get(d);
skb               237 drivers/block/aoe/aoecmd.c 		if (skb == NULL)
skb               239 drivers/block/aoe/aoecmd.c 		skb_pool_put(d, f->skb);
skb               240 drivers/block/aoe/aoecmd.c 		f->skb = skb;
skb               243 drivers/block/aoe/aoecmd.c 	skb->truesize -= skb->data_len;
skb               244 drivers/block/aoe/aoecmd.c 	skb_shinfo(skb)->nr_frags = skb->data_len = 0;
skb               245 drivers/block/aoe/aoecmd.c 	skb_trim(skb, 0);
skb               297 drivers/block/aoe/aoecmd.c skb_fillup(struct sk_buff *skb, struct bio *bio, struct bvec_iter iter)
skb               303 drivers/block/aoe/aoecmd.c 		skb_fill_page_desc(skb, frag++, bv.bv_page,
skb               323 drivers/block/aoe/aoecmd.c 	struct sk_buff *skb;
skb               326 drivers/block/aoe/aoecmd.c 	skb = f->skb;
skb               327 drivers/block/aoe/aoecmd.c 	h = (struct aoe_hdr *) skb_mac_header(skb);
skb               329 drivers/block/aoe/aoecmd.c 	skb_put(skb, sizeof(*h) + sizeof(*ah));
skb               330 drivers/block/aoe/aoecmd.c 	memset(h, 0, skb->len);
skb               353 drivers/block/aoe/aoecmd.c 		skb_fillup(skb, f->buf->bio, f->iter);
skb               355 drivers/block/aoe/aoecmd.c 		skb->len += f->iter.bi_size;
skb               356 drivers/block/aoe/aoecmd.c 		skb->data_len = f->iter.bi_size;
skb               357 drivers/block/aoe/aoecmd.c 		skb->truesize += f->iter.bi_size;
skb               365 drivers/block/aoe/aoecmd.c 	skb->dev = t->ifp->nd;
skb               373 drivers/block/aoe/aoecmd.c 	struct sk_buff *skb;
skb               399 drivers/block/aoe/aoecmd.c 	skb = skb_clone(f->skb, GFP_ATOMIC);
skb               400 drivers/block/aoe/aoecmd.c 	if (skb) {
skb               403 drivers/block/aoe/aoecmd.c 		__skb_queue_tail(&queue, skb);
skb               417 drivers/block/aoe/aoecmd.c 	struct sk_buff *skb;
skb               426 drivers/block/aoe/aoecmd.c 		skb = new_skb(sizeof *h + sizeof *ch);
skb               427 drivers/block/aoe/aoecmd.c 		if (skb == NULL) {
skb               431 drivers/block/aoe/aoecmd.c 		skb_put(skb, sizeof *h + sizeof *ch);
skb               432 drivers/block/aoe/aoecmd.c 		skb->dev = ifp;
skb               433 drivers/block/aoe/aoecmd.c 		__skb_queue_tail(queue, skb);
skb               434 drivers/block/aoe/aoecmd.c 		h = (struct aoe_hdr *) skb_mac_header(skb);
skb               454 drivers/block/aoe/aoecmd.c 	struct sk_buff *skb;
skb               463 drivers/block/aoe/aoecmd.c 	skb = f->skb;
skb               470 drivers/block/aoe/aoecmd.c 	h = (struct aoe_hdr *) skb_mac_header(skb);
skb               487 drivers/block/aoe/aoecmd.c 	skb->dev = t->ifp->nd;
skb               488 drivers/block/aoe/aoecmd.c 	skb = skb_clone(skb, GFP_ATOMIC);
skb               489 drivers/block/aoe/aoecmd.c 	if (skb == NULL)
skb               493 drivers/block/aoe/aoecmd.c 	__skb_queue_tail(&queue, skb);
skb               557 drivers/block/aoe/aoecmd.c 	struct sk_buff *skb;
skb               567 drivers/block/aoe/aoecmd.c 	skb = nf->skb;
skb               568 drivers/block/aoe/aoecmd.c 	nf->skb = f->skb;
skb               574 drivers/block/aoe/aoecmd.c 	f->skb = skb;
skb               584 drivers/block/aoe/aoecmd.c 	struct sk_buff *skb;
skb               603 drivers/block/aoe/aoecmd.c 	skb = f->skb;
skb               609 drivers/block/aoe/aoecmd.c 		skb_fill_page_desc(skb, frag, empty_page, 0, m);
skb               611 drivers/block/aoe/aoecmd.c 	skb->len += f->iter.bi_size;
skb               612 drivers/block/aoe/aoecmd.c 	skb->data_len = f->iter.bi_size;
skb               613 drivers/block/aoe/aoecmd.c 	skb->truesize += f->iter.bi_size;
skb               615 drivers/block/aoe/aoecmd.c 	skb = skb_clone(f->skb, GFP_ATOMIC);
skb               616 drivers/block/aoe/aoecmd.c 	if (skb) {
skb               619 drivers/block/aoe/aoecmd.c 		__skb_queue_tail(&queue, skb);
skb               801 drivers/block/aoe/aoecmd.c 			ifp = getif(t, f->skb->dev);
skb              1023 drivers/block/aoe/aoecmd.c bvcpy(struct sk_buff *skb, struct bio *bio, struct bvec_iter iter, long cnt)
skb              1032 drivers/block/aoe/aoecmd.c 		skb_copy_bits(skb, soff, p, bv.bv_len);
skb              1082 drivers/block/aoe/aoecmd.c 	struct sk_buff *skb;
skb              1094 drivers/block/aoe/aoecmd.c 	skb = f->r_skb;
skb              1098 drivers/block/aoe/aoecmd.c 	if (!skb)		/* just fail the buf. */
skb              1101 drivers/block/aoe/aoecmd.c 	hout = (struct aoe_hdr *) skb_mac_header(f->skb);
skb              1104 drivers/block/aoe/aoecmd.c 	hin = (struct aoe_hdr *) skb->data;
skb              1105 drivers/block/aoe/aoecmd.c 	skb_pull(skb, sizeof(*hin));
skb              1106 drivers/block/aoe/aoecmd.c 	ahin = (struct aoe_atahdr *) skb->data;
skb              1107 drivers/block/aoe/aoecmd.c 	skb_pull(skb, sizeof(*ahin));
skb              1121 drivers/block/aoe/aoecmd.c 		if (skb->len < n) {
skb              1125 drivers/block/aoe/aoecmd.c 			       skb->len, n);
skb              1137 drivers/block/aoe/aoecmd.c 		bvcpy(skb, f->buf->bio, f->iter, n);
skb              1142 drivers/block/aoe/aoecmd.c 		ifp = getif(t, skb->dev);
skb              1148 drivers/block/aoe/aoecmd.c 		if (skb->len < 512) {
skb              1152 drivers/block/aoe/aoecmd.c 				skb->len);
skb              1155 drivers/block/aoe/aoecmd.c 		if (skb_linearize(skb))
skb              1158 drivers/block/aoe/aoecmd.c 		ataid_complete(d, t, skb->data);
skb              1186 drivers/block/aoe/aoecmd.c 	dev_kfree_skb(skb);
skb              1279 drivers/block/aoe/aoecmd.c ktcomplete(struct frame *f, struct sk_buff *skb)
skb              1284 drivers/block/aoe/aoecmd.c 	f->r_skb = skb;
skb              1302 drivers/block/aoe/aoecmd.c aoecmd_ata_rsp(struct sk_buff *skb)
skb              1312 drivers/block/aoe/aoecmd.c 	h = (struct aoe_hdr *) skb->data;
skb              1320 drivers/block/aoe/aoecmd.c 		return skb;
skb              1350 drivers/block/aoe/aoecmd.c 			return skb;
skb              1357 drivers/block/aoe/aoecmd.c 	ktcomplete(f, skb);
skb              1382 drivers/block/aoe/aoecmd.c 	struct sk_buff *skb;
skb              1392 drivers/block/aoe/aoecmd.c 	skb = f->skb;
skb              1393 drivers/block/aoe/aoecmd.c 	h = (struct aoe_hdr *) skb_mac_header(skb);
skb              1395 drivers/block/aoe/aoecmd.c 	skb_put(skb, sizeof *h + sizeof *ah);
skb              1396 drivers/block/aoe/aoecmd.c 	memset(h, 0, skb->len);
skb              1408 drivers/block/aoe/aoecmd.c 	skb->dev = t->ifp->nd;
skb              1414 drivers/block/aoe/aoecmd.c 	skb = skb_clone(skb, GFP_ATOMIC);
skb              1415 drivers/block/aoe/aoecmd.c 	if (skb)
skb              1418 drivers/block/aoe/aoecmd.c 	return skb;
skb              1525 drivers/block/aoe/aoecmd.c aoecmd_cfg_rsp(struct sk_buff *skb)
skb              1537 drivers/block/aoe/aoecmd.c 	h = (struct aoe_hdr *) skb_mac_header(skb);
skb              1583 drivers/block/aoe/aoecmd.c 	n = skb->dev->mtu;
skb              1589 drivers/block/aoe/aoecmd.c 	setifbcnt(t, skb->dev, n);
skb              1658 drivers/block/aoe/aoecmd.c 	struct sk_buff *skb;
skb              1669 drivers/block/aoe/aoecmd.c 		skb = f->r_skb;
skb              1677 drivers/block/aoe/aoecmd.c 		dev_kfree_skb(skb);
skb               405 drivers/block/aoe/aoedev.c skbfree(struct sk_buff *skb)
skb               410 drivers/block/aoe/aoedev.c 	if (skb == NULL)
skb               412 drivers/block/aoe/aoedev.c 	while (atomic_read(&skb_shinfo(skb)->dataref) != 1 && i-- > 0)
skb               417 drivers/block/aoe/aoedev.c 			skb->dev ? skb->dev->name : "netif",
skb               421 drivers/block/aoe/aoedev.c 	skb->truesize -= skb->data_len;
skb               422 drivers/block/aoe/aoedev.c 	skb_shinfo(skb)->nr_frags = skb->data_len = 0;
skb               423 drivers/block/aoe/aoedev.c 	skb_trim(skb, 0);
skb               424 drivers/block/aoe/aoedev.c 	dev_kfree_skb(skb);
skb               430 drivers/block/aoe/aoedev.c 	struct sk_buff *skb, *tmp;
skb               432 drivers/block/aoe/aoedev.c 	skb_queue_walk_safe(&d->skbpool, skb, tmp)
skb               433 drivers/block/aoe/aoedev.c 		skbfree(skb);
skb               515 drivers/block/aoe/aoedev.c 		skbfree(f->skb);
skb                57 drivers/block/aoe/aoenet.c 	struct sk_buff *skb;
skb                60 drivers/block/aoe/aoenet.c 	while ((skb = skb_dequeue(&skbtxq))) {
skb                62 drivers/block/aoe/aoenet.c 		ifp = skb->dev;
skb                63 drivers/block/aoe/aoenet.c 		if (dev_queue_xmit(skb) == NET_XMIT_DROP && net_ratelimit())
skb               115 drivers/block/aoe/aoenet.c 	struct sk_buff *skb, *tmp;
skb               118 drivers/block/aoe/aoenet.c 	skb_queue_walk_safe(queue, skb, tmp) {
skb               119 drivers/block/aoe/aoenet.c 		__skb_unlink(skb, queue);
skb               121 drivers/block/aoe/aoenet.c 		skb_queue_tail(&skbtxq, skb);
skb               131 drivers/block/aoe/aoenet.c aoenet_rcv(struct sk_buff *skb, struct net_device *ifp, struct packet_type *pt, struct net_device *orig_dev)
skb               141 drivers/block/aoe/aoenet.c 	skb = skb_share_check(skb, GFP_ATOMIC);
skb               142 drivers/block/aoe/aoenet.c 	if (skb == NULL)
skb               146 drivers/block/aoe/aoenet.c 	skb_push(skb, ETH_HLEN);	/* (1) */
skb               148 drivers/block/aoe/aoenet.c 	if (skb->len >= sn) {
skb               149 drivers/block/aoe/aoenet.c 		sn -= skb_headlen(skb);
skb               150 drivers/block/aoe/aoenet.c 		if (sn > 0 && !__pskb_pull_tail(skb, sn))
skb               153 drivers/block/aoe/aoenet.c 	h = (struct aoe_hdr *) skb->data;
skb               167 drivers/block/aoe/aoenet.c 				h->minor, skb->dev->name,
skb               175 drivers/block/aoe/aoenet.c 		skb = aoecmd_ata_rsp(skb);
skb               178 drivers/block/aoe/aoenet.c 		aoecmd_cfg_rsp(skb);
skb               187 drivers/block/aoe/aoenet.c 	if (!skb)
skb               190 drivers/block/aoe/aoenet.c 	dev_kfree_skb(skb);
skb                38 drivers/block/drbd/drbd_nl.c int drbd_adm_new_minor(struct sk_buff *skb, struct genl_info *info);
skb                39 drivers/block/drbd/drbd_nl.c int drbd_adm_del_minor(struct sk_buff *skb, struct genl_info *info);
skb                41 drivers/block/drbd/drbd_nl.c int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info);
skb                42 drivers/block/drbd/drbd_nl.c int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info);
skb                43 drivers/block/drbd/drbd_nl.c int drbd_adm_down(struct sk_buff *skb, struct genl_info *info);
skb                45 drivers/block/drbd/drbd_nl.c int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info);
skb                46 drivers/block/drbd/drbd_nl.c int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info);
skb                47 drivers/block/drbd/drbd_nl.c int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info);
skb                48 drivers/block/drbd/drbd_nl.c int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info);
skb                49 drivers/block/drbd/drbd_nl.c int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info);
skb                50 drivers/block/drbd/drbd_nl.c int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info);
skb                51 drivers/block/drbd/drbd_nl.c int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info);
skb                52 drivers/block/drbd/drbd_nl.c int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info);
skb                53 drivers/block/drbd/drbd_nl.c int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info);
skb                54 drivers/block/drbd/drbd_nl.c int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info);
skb                55 drivers/block/drbd/drbd_nl.c int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info);
skb                56 drivers/block/drbd/drbd_nl.c int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info);
skb                57 drivers/block/drbd/drbd_nl.c int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info);
skb                58 drivers/block/drbd/drbd_nl.c int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info);
skb                59 drivers/block/drbd/drbd_nl.c int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info);
skb                60 drivers/block/drbd/drbd_nl.c int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info);
skb                61 drivers/block/drbd/drbd_nl.c int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info);
skb                62 drivers/block/drbd/drbd_nl.c int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info);
skb                63 drivers/block/drbd/drbd_nl.c int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info);
skb                64 drivers/block/drbd/drbd_nl.c int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info);
skb                66 drivers/block/drbd/drbd_nl.c int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb);
skb                67 drivers/block/drbd/drbd_nl.c int drbd_adm_dump_resources(struct sk_buff *skb, struct netlink_callback *cb);
skb                68 drivers/block/drbd/drbd_nl.c int drbd_adm_dump_devices(struct sk_buff *skb, struct netlink_callback *cb);
skb                70 drivers/block/drbd/drbd_nl.c int drbd_adm_dump_connections(struct sk_buff *skb, struct netlink_callback *cb);
skb                72 drivers/block/drbd/drbd_nl.c int drbd_adm_dump_peer_devices(struct sk_buff *skb, struct netlink_callback *cb);
skb                74 drivers/block/drbd/drbd_nl.c int drbd_adm_get_initial_state(struct sk_buff *skb, struct netlink_callback *cb);
skb                88 drivers/block/drbd/drbd_nl.c static void drbd_adm_send_reply(struct sk_buff *skb, struct genl_info *info)
skb                90 drivers/block/drbd/drbd_nl.c 	genlmsg_end(skb, genlmsg_data(nlmsg_data(nlmsg_hdr(skb))));
skb                91 drivers/block/drbd/drbd_nl.c 	if (genlmsg_reply(skb, info))
skb                97 drivers/block/drbd/drbd_nl.c static int drbd_msg_put_info(struct sk_buff *skb, const char *info)
skb               105 drivers/block/drbd/drbd_nl.c 	nla = nla_nest_start_noflag(skb, DRBD_NLA_CFG_REPLY);
skb               109 drivers/block/drbd/drbd_nl.c 	err = nla_put_string(skb, T_info_text, info);
skb               111 drivers/block/drbd/drbd_nl.c 		nla_nest_cancel(skb, nla);
skb               114 drivers/block/drbd/drbd_nl.c 		nla_nest_end(skb, nla);
skb               119 drivers/block/drbd/drbd_nl.c static int drbd_msg_sprintf_info(struct sk_buff *skb, const char *fmt, ...)
skb               126 drivers/block/drbd/drbd_nl.c 	nla = nla_nest_start_noflag(skb, DRBD_NLA_CFG_REPLY);
skb               130 drivers/block/drbd/drbd_nl.c 	txt = nla_reserve(skb, T_info_text, 256);
skb               132 drivers/block/drbd/drbd_nl.c 		nla_nest_cancel(skb, nla);
skb               141 drivers/block/drbd/drbd_nl.c 	nlmsg_trim(skb, (char*)txt + NLA_ALIGN(txt->nla_len));
skb               142 drivers/block/drbd/drbd_nl.c 	nla_nest_end(skb, nla);
skb               160 drivers/block/drbd/drbd_nl.c 	struct sk_buff *skb, struct genl_info *info, unsigned flags)
skb               768 drivers/block/drbd/drbd_nl.c int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info)
skb               775 drivers/block/drbd/drbd_nl.c 	retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
skb              1572 drivers/block/drbd/drbd_nl.c int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
skb              1581 drivers/block/drbd/drbd_nl.c 	retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
skb              1790 drivers/block/drbd/drbd_nl.c int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
skb              1809 drivers/block/drbd/drbd_nl.c 	retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
skb              2228 drivers/block/drbd/drbd_nl.c int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info)
skb              2235 drivers/block/drbd/drbd_nl.c 	retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
skb              2426 drivers/block/drbd/drbd_nl.c int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
skb              2437 drivers/block/drbd/drbd_nl.c 	retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_CONNECTION);
skb              2568 drivers/block/drbd/drbd_nl.c int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
skb              2583 drivers/block/drbd/drbd_nl.c 	retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
skb              2783 drivers/block/drbd/drbd_nl.c int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info)
skb              2792 drivers/block/drbd/drbd_nl.c 	retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_CONNECTION);
skb              2837 drivers/block/drbd/drbd_nl.c int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
skb              2850 drivers/block/drbd/drbd_nl.c 	retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
skb              2974 drivers/block/drbd/drbd_nl.c int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info)
skb              2981 drivers/block/drbd/drbd_nl.c 	retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
skb              3012 drivers/block/drbd/drbd_nl.c int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info)
skb              3018 drivers/block/drbd/drbd_nl.c 	retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
skb              3060 drivers/block/drbd/drbd_nl.c static int drbd_adm_simple_request_state(struct sk_buff *skb, struct genl_info *info,
skb              3066 drivers/block/drbd/drbd_nl.c 	retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
skb              3089 drivers/block/drbd/drbd_nl.c int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info)
skb              3095 drivers/block/drbd/drbd_nl.c 	retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
skb              3140 drivers/block/drbd/drbd_nl.c int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info)
skb              3145 drivers/block/drbd/drbd_nl.c 	retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
skb              3160 drivers/block/drbd/drbd_nl.c int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info)
skb              3166 drivers/block/drbd/drbd_nl.c 	retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
skb              3188 drivers/block/drbd/drbd_nl.c int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info)
skb              3190 drivers/block/drbd/drbd_nl.c 	return drbd_adm_simple_request_state(skb, info, NS(susp, 1));
skb              3193 drivers/block/drbd/drbd_nl.c int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info)
skb              3199 drivers/block/drbd/drbd_nl.c 	retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
skb              3249 drivers/block/drbd/drbd_nl.c int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info)
skb              3251 drivers/block/drbd/drbd_nl.c 	return drbd_adm_simple_request_state(skb, info, NS(disk, D_OUTDATED));
skb              3254 drivers/block/drbd/drbd_nl.c static int nla_put_drbd_cfg_context(struct sk_buff *skb,
skb              3260 drivers/block/drbd/drbd_nl.c 	nla = nla_nest_start_noflag(skb, DRBD_NLA_CFG_CONTEXT);
skb              3264 drivers/block/drbd/drbd_nl.c 	    nla_put_u32(skb, T_ctx_volume, device->vnr))
skb              3266 drivers/block/drbd/drbd_nl.c 	if (nla_put_string(skb, T_ctx_resource_name, resource->name))
skb              3270 drivers/block/drbd/drbd_nl.c 		    nla_put(skb, T_ctx_my_addr, connection->my_addr_len, &connection->my_addr))
skb              3273 drivers/block/drbd/drbd_nl.c 		    nla_put(skb, T_ctx_peer_addr, connection->peer_addr_len, &connection->peer_addr))
skb              3276 drivers/block/drbd/drbd_nl.c 	nla_nest_end(skb, nla);
skb              3281 drivers/block/drbd/drbd_nl.c 		nla_nest_cancel(skb, nla);
skb              3305 drivers/block/drbd/drbd_nl.c int drbd_adm_dump_resources(struct sk_buff *skb, struct netlink_callback *cb)
skb              3332 drivers/block/drbd/drbd_nl.c 	dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
skb              3340 drivers/block/drbd/drbd_nl.c 	err = nla_put_drbd_cfg_context(skb, resource, NULL, NULL);
skb              3343 drivers/block/drbd/drbd_nl.c 	err = res_opts_to_skb(skb, &resource->res_opts, !capable(CAP_SYS_ADMIN));
skb              3347 drivers/block/drbd/drbd_nl.c 	err = resource_info_to_skb(skb, &resource_info, !capable(CAP_SYS_ADMIN));
skb              3351 drivers/block/drbd/drbd_nl.c 	err = resource_statistics_to_skb(skb, &resource_statistics, !capable(CAP_SYS_ADMIN));
skb              3355 drivers/block/drbd/drbd_nl.c 	genlmsg_end(skb, dh);
skb              3362 drivers/block/drbd/drbd_nl.c 	return skb->len;
skb              3422 drivers/block/drbd/drbd_nl.c int drbd_adm_dump_devices(struct sk_buff *skb, struct netlink_callback *cb)
skb              3461 drivers/block/drbd/drbd_nl.c 	dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
skb              3471 drivers/block/drbd/drbd_nl.c 		err = nla_put_drbd_cfg_context(skb, device->resource, NULL, device);
skb              3478 drivers/block/drbd/drbd_nl.c 			err = disk_conf_to_skb(skb, disk_conf, !capable(CAP_SYS_ADMIN));
skb              3484 drivers/block/drbd/drbd_nl.c 		err = device_info_to_skb(skb, &device_info, !capable(CAP_SYS_ADMIN));
skb              3489 drivers/block/drbd/drbd_nl.c 		err = device_statistics_to_skb(skb, &device_statistics, !capable(CAP_SYS_ADMIN));
skb              3494 drivers/block/drbd/drbd_nl.c 	genlmsg_end(skb, dh);
skb              3501 drivers/block/drbd/drbd_nl.c 	return skb->len;
skb              3511 drivers/block/drbd/drbd_nl.c int drbd_adm_dump_connections(struct sk_buff *skb, struct netlink_callback *cb)
skb              3587 drivers/block/drbd/drbd_nl.c 	dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
skb              3598 drivers/block/drbd/drbd_nl.c 		err = nla_put_drbd_cfg_context(skb, resource, connection, NULL);
skb              3603 drivers/block/drbd/drbd_nl.c 			err = net_conf_to_skb(skb, net_conf, !capable(CAP_SYS_ADMIN));
skb              3608 drivers/block/drbd/drbd_nl.c 		err = connection_info_to_skb(skb, &connection_info, !capable(CAP_SYS_ADMIN));
skb              3612 drivers/block/drbd/drbd_nl.c 		err = connection_statistics_to_skb(skb, &connection_statistics, !capable(CAP_SYS_ADMIN));
skb              3617 drivers/block/drbd/drbd_nl.c 	genlmsg_end(skb, dh);
skb              3626 drivers/block/drbd/drbd_nl.c 	return skb->len;
skb              3673 drivers/block/drbd/drbd_nl.c int drbd_adm_dump_peer_devices(struct sk_buff *skb, struct netlink_callback *cb)
skb              3729 drivers/block/drbd/drbd_nl.c 	dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
skb              3742 drivers/block/drbd/drbd_nl.c 		err = nla_put_drbd_cfg_context(skb, device->resource, peer_device->connection, device);
skb              3746 drivers/block/drbd/drbd_nl.c 		err = peer_device_info_to_skb(skb, &peer_device_info, !capable(CAP_SYS_ADMIN));
skb              3750 drivers/block/drbd/drbd_nl.c 		err = peer_device_statistics_to_skb(skb, &peer_device_statistics, !capable(CAP_SYS_ADMIN));
skb              3756 drivers/block/drbd/drbd_nl.c 	genlmsg_end(skb, dh);
skb              3763 drivers/block/drbd/drbd_nl.c 	return skb->len;
skb              3777 drivers/block/drbd/drbd_nl.c static int nla_put_status_info(struct sk_buff *skb, struct drbd_device *device,
skb              3804 drivers/block/drbd/drbd_nl.c 	if (nla_put_drbd_cfg_context(skb, resource, the_only_connection(resource), device))
skb              3807 drivers/block/drbd/drbd_nl.c 	if (res_opts_to_skb(skb, &device->resource->res_opts, exclude_sensitive))
skb              3815 drivers/block/drbd/drbd_nl.c 		err = disk_conf_to_skb(skb, disk_conf, exclude_sensitive);
skb              3822 drivers/block/drbd/drbd_nl.c 			err = net_conf_to_skb(skb, nc, exclude_sensitive);
skb              3828 drivers/block/drbd/drbd_nl.c 	nla = nla_nest_start_noflag(skb, DRBD_NLA_STATE_INFO);
skb              3831 drivers/block/drbd/drbd_nl.c 	if (nla_put_u32(skb, T_sib_reason, sib ? sib->sib_reason : SIB_GET_STATUS_REPLY) ||
skb              3832 drivers/block/drbd/drbd_nl.c 	    nla_put_u32(skb, T_current_state, device->state.i) ||
skb              3833 drivers/block/drbd/drbd_nl.c 	    nla_put_u64_0pad(skb, T_ed_uuid, device->ed_uuid) ||
skb              3834 drivers/block/drbd/drbd_nl.c 	    nla_put_u64_0pad(skb, T_capacity,
skb              3836 drivers/block/drbd/drbd_nl.c 	    nla_put_u64_0pad(skb, T_send_cnt, device->send_cnt) ||
skb              3837 drivers/block/drbd/drbd_nl.c 	    nla_put_u64_0pad(skb, T_recv_cnt, device->recv_cnt) ||
skb              3838 drivers/block/drbd/drbd_nl.c 	    nla_put_u64_0pad(skb, T_read_cnt, device->read_cnt) ||
skb              3839 drivers/block/drbd/drbd_nl.c 	    nla_put_u64_0pad(skb, T_writ_cnt, device->writ_cnt) ||
skb              3840 drivers/block/drbd/drbd_nl.c 	    nla_put_u64_0pad(skb, T_al_writ_cnt, device->al_writ_cnt) ||
skb              3841 drivers/block/drbd/drbd_nl.c 	    nla_put_u64_0pad(skb, T_bm_writ_cnt, device->bm_writ_cnt) ||
skb              3842 drivers/block/drbd/drbd_nl.c 	    nla_put_u32(skb, T_ap_bio_cnt, atomic_read(&device->ap_bio_cnt)) ||
skb              3843 drivers/block/drbd/drbd_nl.c 	    nla_put_u32(skb, T_ap_pending_cnt, atomic_read(&device->ap_pending_cnt)) ||
skb              3844 drivers/block/drbd/drbd_nl.c 	    nla_put_u32(skb, T_rs_pending_cnt, atomic_read(&device->rs_pending_cnt)))
skb              3851 drivers/block/drbd/drbd_nl.c 		err = nla_put(skb, T_uuids, sizeof(si->uuids), device->ldev->md.uuid);
skb              3857 drivers/block/drbd/drbd_nl.c 		if (nla_put_u32(skb, T_disk_flags, device->ldev->md.flags) ||
skb              3858 drivers/block/drbd/drbd_nl.c 		    nla_put_u64_0pad(skb, T_bits_total, drbd_bm_bits(device)) ||
skb              3859 drivers/block/drbd/drbd_nl.c 		    nla_put_u64_0pad(skb, T_bits_oos,
skb              3864 drivers/block/drbd/drbd_nl.c 			if (nla_put_u64_0pad(skb, T_bits_rs_total,
skb              3866 drivers/block/drbd/drbd_nl.c 			    nla_put_u64_0pad(skb, T_bits_rs_failed,
skb              3878 drivers/block/drbd/drbd_nl.c 			if (nla_put_u32(skb, T_prev_state, sib->os.i) ||
skb              3879 drivers/block/drbd/drbd_nl.c 			    nla_put_u32(skb, T_new_state, sib->ns.i))
skb              3883 drivers/block/drbd/drbd_nl.c 			if (nla_put_u32(skb, T_helper_exit_code,
skb              3888 drivers/block/drbd/drbd_nl.c 			if (nla_put_string(skb, T_helper, sib->helper_name))
skb              3893 drivers/block/drbd/drbd_nl.c 	nla_nest_end(skb, nla);
skb              3903 drivers/block/drbd/drbd_nl.c int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info)
skb              3909 drivers/block/drbd/drbd_nl.c 	retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
skb              3925 drivers/block/drbd/drbd_nl.c static int get_one_status(struct sk_buff *skb, struct netlink_callback *cb)
skb              3991 drivers/block/drbd/drbd_nl.c 		dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
skb              4006 drivers/block/drbd/drbd_nl.c 			if (nla_put_drbd_cfg_context(skb, resource, connection, NULL))
skb              4012 drivers/block/drbd/drbd_nl.c 				if (nc && net_conf_to_skb(skb, nc, 1) != 0)
skb              4024 drivers/block/drbd/drbd_nl.c 		if (nla_put_status_info(skb, device, NULL)) {
skb              4026 drivers/block/drbd/drbd_nl.c 			genlmsg_cancel(skb, dh);
skb              4030 drivers/block/drbd/drbd_nl.c 		genlmsg_end(skb, dh);
skb              4041 drivers/block/drbd/drbd_nl.c         return skb->len;
skb              4054 drivers/block/drbd/drbd_nl.c int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb)
skb              4103 drivers/block/drbd/drbd_nl.c 	return get_one_status(skb, cb);
skb              4106 drivers/block/drbd/drbd_nl.c int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info)
skb              4113 drivers/block/drbd/drbd_nl.c 	retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
skb              4134 drivers/block/drbd/drbd_nl.c int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info)
skb              4141 drivers/block/drbd/drbd_nl.c 	retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
skb              4180 drivers/block/drbd/drbd_nl.c int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info)
skb              4189 drivers/block/drbd/drbd_nl.c 	retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
skb              4283 drivers/block/drbd/drbd_nl.c int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info)
skb              4291 drivers/block/drbd/drbd_nl.c 	retcode = drbd_adm_prepare(&adm_ctx, skb, info, 0);
skb              4346 drivers/block/drbd/drbd_nl.c int drbd_adm_new_minor(struct sk_buff *skb, struct genl_info *info)
skb              4352 drivers/block/drbd/drbd_nl.c 	retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
skb              4453 drivers/block/drbd/drbd_nl.c int drbd_adm_del_minor(struct sk_buff *skb, struct genl_info *info)
skb              4458 drivers/block/drbd/drbd_nl.c 	retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
skb              4502 drivers/block/drbd/drbd_nl.c int drbd_adm_down(struct sk_buff *skb, struct genl_info *info)
skb              4511 drivers/block/drbd/drbd_nl.c 	retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
skb              4565 drivers/block/drbd/drbd_nl.c int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info)
skb              4571 drivers/block/drbd/drbd_nl.c 	retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
skb              4633 drivers/block/drbd/drbd_nl.c void notify_resource_state(struct sk_buff *skb,
skb              4644 drivers/block/drbd/drbd_nl.c 	if (!skb) {
skb              4646 drivers/block/drbd/drbd_nl.c 		skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
skb              4648 drivers/block/drbd/drbd_nl.c 		if (!skb)
skb              4654 drivers/block/drbd/drbd_nl.c 	dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_RESOURCE_STATE);
skb              4659 drivers/block/drbd/drbd_nl.c 	if (nla_put_drbd_cfg_context(skb, resource, NULL, NULL) ||
skb              4660 drivers/block/drbd/drbd_nl.c 	    nla_put_notification_header(skb, type) ||
skb              4662 drivers/block/drbd/drbd_nl.c 	     resource_info_to_skb(skb, resource_info, true)))
skb              4665 drivers/block/drbd/drbd_nl.c 	err = resource_statistics_to_skb(skb, &resource_statistics, !capable(CAP_SYS_ADMIN));
skb              4668 drivers/block/drbd/drbd_nl.c 	genlmsg_end(skb, dh);
skb              4670 drivers/block/drbd/drbd_nl.c 		err = drbd_genl_multicast_events(skb, GFP_NOWAIT);
skb              4678 drivers/block/drbd/drbd_nl.c 	nlmsg_free(skb);
skb              4684 drivers/block/drbd/drbd_nl.c void notify_device_state(struct sk_buff *skb,
skb              4695 drivers/block/drbd/drbd_nl.c 	if (!skb) {
skb              4697 drivers/block/drbd/drbd_nl.c 		skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
skb              4699 drivers/block/drbd/drbd_nl.c 		if (!skb)
skb              4705 drivers/block/drbd/drbd_nl.c 	dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_DEVICE_STATE);
skb              4710 drivers/block/drbd/drbd_nl.c 	if (nla_put_drbd_cfg_context(skb, device->resource, NULL, device) ||
skb              4711 drivers/block/drbd/drbd_nl.c 	    nla_put_notification_header(skb, type) ||
skb              4713 drivers/block/drbd/drbd_nl.c 	     device_info_to_skb(skb, device_info, true)))
skb              4716 drivers/block/drbd/drbd_nl.c 	device_statistics_to_skb(skb, &device_statistics, !capable(CAP_SYS_ADMIN));
skb              4717 drivers/block/drbd/drbd_nl.c 	genlmsg_end(skb, dh);
skb              4719 drivers/block/drbd/drbd_nl.c 		err = drbd_genl_multicast_events(skb, GFP_NOWAIT);
skb              4727 drivers/block/drbd/drbd_nl.c 	nlmsg_free(skb);
skb              4733 drivers/block/drbd/drbd_nl.c void notify_connection_state(struct sk_buff *skb,
skb              4744 drivers/block/drbd/drbd_nl.c 	if (!skb) {
skb              4746 drivers/block/drbd/drbd_nl.c 		skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
skb              4748 drivers/block/drbd/drbd_nl.c 		if (!skb)
skb              4754 drivers/block/drbd/drbd_nl.c 	dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_CONNECTION_STATE);
skb              4759 drivers/block/drbd/drbd_nl.c 	if (nla_put_drbd_cfg_context(skb, connection->resource, connection, NULL) ||
skb              4760 drivers/block/drbd/drbd_nl.c 	    nla_put_notification_header(skb, type) ||
skb              4762 drivers/block/drbd/drbd_nl.c 	     connection_info_to_skb(skb, connection_info, true)))
skb              4765 drivers/block/drbd/drbd_nl.c 	connection_statistics_to_skb(skb, &connection_statistics, !capable(CAP_SYS_ADMIN));
skb              4766 drivers/block/drbd/drbd_nl.c 	genlmsg_end(skb, dh);
skb              4768 drivers/block/drbd/drbd_nl.c 		err = drbd_genl_multicast_events(skb, GFP_NOWAIT);
skb              4776 drivers/block/drbd/drbd_nl.c 	nlmsg_free(skb);
skb              4782 drivers/block/drbd/drbd_nl.c void notify_peer_device_state(struct sk_buff *skb,
skb              4794 drivers/block/drbd/drbd_nl.c 	if (!skb) {
skb              4796 drivers/block/drbd/drbd_nl.c 		skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
skb              4798 drivers/block/drbd/drbd_nl.c 		if (!skb)
skb              4804 drivers/block/drbd/drbd_nl.c 	dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_PEER_DEVICE_STATE);
skb              4809 drivers/block/drbd/drbd_nl.c 	if (nla_put_drbd_cfg_context(skb, resource, peer_device->connection, peer_device->device) ||
skb              4810 drivers/block/drbd/drbd_nl.c 	    nla_put_notification_header(skb, type) ||
skb              4812 drivers/block/drbd/drbd_nl.c 	     peer_device_info_to_skb(skb, peer_device_info, true)))
skb              4815 drivers/block/drbd/drbd_nl.c 	peer_device_statistics_to_skb(skb, &peer_device_statistics, !capable(CAP_SYS_ADMIN));
skb              4816 drivers/block/drbd/drbd_nl.c 	genlmsg_end(skb, dh);
skb              4818 drivers/block/drbd/drbd_nl.c 		err = drbd_genl_multicast_events(skb, GFP_NOWAIT);
skb              4826 drivers/block/drbd/drbd_nl.c 	nlmsg_free(skb);
skb              4839 drivers/block/drbd/drbd_nl.c 	struct sk_buff *skb = NULL;
skb              4847 drivers/block/drbd/drbd_nl.c 	skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
skb              4849 drivers/block/drbd/drbd_nl.c 	if (!skb)
skb              4853 drivers/block/drbd/drbd_nl.c 	dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_HELPER);
skb              4859 drivers/block/drbd/drbd_nl.c 	if (nla_put_drbd_cfg_context(skb, resource, connection, device) ||
skb              4860 drivers/block/drbd/drbd_nl.c 	    nla_put_notification_header(skb, type) ||
skb              4861 drivers/block/drbd/drbd_nl.c 	    drbd_helper_info_to_skb(skb, &helper_info, true))
skb              4863 drivers/block/drbd/drbd_nl.c 	genlmsg_end(skb, dh);
skb              4864 drivers/block/drbd/drbd_nl.c 	err = drbd_genl_multicast_events(skb, GFP_NOWAIT);
skb              4865 drivers/block/drbd/drbd_nl.c 	skb = NULL;
skb              4875 drivers/block/drbd/drbd_nl.c 	nlmsg_free(skb);
skb              4880 drivers/block/drbd/drbd_nl.c static void notify_initial_state_done(struct sk_buff *skb, unsigned int seq)
skb              4886 drivers/block/drbd/drbd_nl.c 	dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_INITIAL_STATE_DONE);
skb              4891 drivers/block/drbd/drbd_nl.c 	if (nla_put_notification_header(skb, NOTIFY_EXISTS))
skb              4893 drivers/block/drbd/drbd_nl.c 	genlmsg_end(skb, dh);
skb              4897 drivers/block/drbd/drbd_nl.c 	nlmsg_free(skb);
skb              4919 drivers/block/drbd/drbd_nl.c static int get_initial_state(struct sk_buff *skb, struct netlink_callback *cb)
skb              4933 drivers/block/drbd/drbd_nl.c 		notify_initial_state_done(skb, seq);
skb              4940 drivers/block/drbd/drbd_nl.c 		notify_resource_state_change(skb, seq, state_change->resource,
skb              4946 drivers/block/drbd/drbd_nl.c 		notify_connection_state_change(skb, seq, &state_change->connections[n],
skb              4952 drivers/block/drbd/drbd_nl.c 		notify_device_state_change(skb, seq, &state_change->devices[n],
skb              4958 drivers/block/drbd/drbd_nl.c 		notify_peer_device_state_change(skb, seq, &state_change->peer_devices[n],
skb              4973 drivers/block/drbd/drbd_nl.c 	return skb->len;
skb              4976 drivers/block/drbd/drbd_nl.c int drbd_adm_get_initial_state(struct sk_buff *skb, struct netlink_callback *cb)
skb              4983 drivers/block/drbd/drbd_nl.c 			return get_initial_state(skb, cb);
skb              5022 drivers/block/drbd/drbd_nl.c 	return get_initial_state(skb, cb);
skb              1540 drivers/block/drbd/drbd_state.c void notify_resource_state_change(struct sk_buff *skb,
skb              1553 drivers/block/drbd/drbd_state.c 	notify_resource_state(skb, seq, resource, &resource_info, type);
skb              1556 drivers/block/drbd/drbd_state.c void notify_connection_state_change(struct sk_buff *skb,
skb              1567 drivers/block/drbd/drbd_state.c 	notify_connection_state(skb, seq, connection, &connection_info, type);
skb              1570 drivers/block/drbd/drbd_state.c void notify_device_state_change(struct sk_buff *skb,
skb              1580 drivers/block/drbd/drbd_state.c 	notify_device_state(skb, seq, device, &device_info, type);
skb              1583 drivers/block/drbd/drbd_state.c void notify_peer_device_state_change(struct sk_buff *skb,
skb              1597 drivers/block/drbd/drbd_state.c 	notify_peer_device_state(skb, seq, peer_device, &peer_device_info, type);
skb               156 drivers/block/nbd.c static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info);
skb              1809 drivers/block/nbd.c static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info)
skb              1818 drivers/block/nbd.c 	if (!netlink_capable(skb, CAP_SYS_ADMIN))
skb              2000 drivers/block/nbd.c static int nbd_genl_disconnect(struct sk_buff *skb, struct genl_info *info)
skb              2005 drivers/block/nbd.c 	if (!netlink_capable(skb, CAP_SYS_ADMIN))
skb              2038 drivers/block/nbd.c static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
skb              2046 drivers/block/nbd.c 	if (!netlink_capable(skb, CAP_SYS_ADMIN))
skb              2240 drivers/block/nbd.c static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info)
skb              2296 drivers/block/nbd.c 	struct sk_buff *skb;
skb              2300 drivers/block/nbd.c 	skb = genlmsg_new(nla_total_size(sizeof(u32)), GFP_KERNEL);
skb              2301 drivers/block/nbd.c 	if (!skb)
skb              2303 drivers/block/nbd.c 	msg_head = genlmsg_put_reply(skb, info, &nbd_genl_family, 0,
skb              2306 drivers/block/nbd.c 		nlmsg_free(skb);
skb              2309 drivers/block/nbd.c 	ret = nla_put_u32(skb, NBD_ATTR_INDEX, index);
skb              2311 drivers/block/nbd.c 		nlmsg_free(skb);
skb              2314 drivers/block/nbd.c 	genlmsg_end(skb, msg_head);
skb              2315 drivers/block/nbd.c 	genlmsg_reply(skb, info);
skb              2320 drivers/block/nbd.c 	struct sk_buff *skb;
skb              2324 drivers/block/nbd.c 	skb = genlmsg_new(nla_total_size(sizeof(u32)), GFP_KERNEL);
skb              2325 drivers/block/nbd.c 	if (!skb)
skb              2327 drivers/block/nbd.c 	msg_head = genlmsg_put(skb, 0, 0, &nbd_genl_family, 0,
skb              2330 drivers/block/nbd.c 		nlmsg_free(skb);
skb              2333 drivers/block/nbd.c 	ret = nla_put_u32(skb, NBD_ATTR_INDEX, index);
skb              2335 drivers/block/nbd.c 		nlmsg_free(skb);
skb              2338 drivers/block/nbd.c 	genlmsg_end(skb, msg_head);
skb              2339 drivers/block/nbd.c 	genlmsg_multicast(&nbd_genl_family, skb, 0, 0, GFP_KERNEL);
skb                80 drivers/bluetooth/bfusb.c 	struct sk_buff *skb;
skb                85 drivers/bluetooth/bfusb.c 	skb = skb_dequeue(&data->completed_q);
skb                86 drivers/bluetooth/bfusb.c 	if (skb) {
skb                87 drivers/bluetooth/bfusb.c 		urb = ((struct bfusb_data_scb *) skb->cb)->urb;
skb                88 drivers/bluetooth/bfusb.c 		kfree_skb(skb);
skb                96 drivers/bluetooth/bfusb.c 	struct sk_buff *skb;
skb               101 drivers/bluetooth/bfusb.c 	while ((skb = skb_dequeue(&data->pending_q))) {
skb               102 drivers/bluetooth/bfusb.c 		urb = ((struct bfusb_data_scb *) skb->cb)->urb;
skb               104 drivers/bluetooth/bfusb.c 		skb_queue_tail(&data->completed_q, skb);
skb               111 drivers/bluetooth/bfusb.c static int bfusb_send_bulk(struct bfusb_data *data, struct sk_buff *skb)
skb               113 drivers/bluetooth/bfusb.c 	struct bfusb_data_scb *scb = (void *) skb->cb;
skb               117 drivers/bluetooth/bfusb.c 	BT_DBG("bfusb %p skb %p len %d", data, skb, skb->len);
skb               127 drivers/bluetooth/bfusb.c 	usb_fill_bulk_urb(urb, data->udev, pipe, skb->data, skb->len,
skb               128 drivers/bluetooth/bfusb.c 			bfusb_tx_complete, skb);
skb               132 drivers/bluetooth/bfusb.c 	skb_queue_tail(&data->pending_q, skb);
skb               138 drivers/bluetooth/bfusb.c 		skb_unlink(skb, &data->pending_q);
skb               148 drivers/bluetooth/bfusb.c 	struct sk_buff *skb;
skb               161 drivers/bluetooth/bfusb.c 				(skb = skb_dequeue(&data->transmit_q))) {
skb               162 drivers/bluetooth/bfusb.c 			if (bfusb_send_bulk(data, skb) < 0) {
skb               163 drivers/bluetooth/bfusb.c 				skb_queue_head(&data->transmit_q, skb);
skb               175 drivers/bluetooth/bfusb.c 	struct sk_buff *skb = (struct sk_buff *) urb->context;
skb               176 drivers/bluetooth/bfusb.c 	struct bfusb_data *data = (struct bfusb_data *) skb->dev;
skb               178 drivers/bluetooth/bfusb.c 	BT_DBG("bfusb %p urb %p skb %p len %d", data, urb, skb, skb->len);
skb               186 drivers/bluetooth/bfusb.c 		data->hdev->stat.byte_tx += skb->len;
skb               192 drivers/bluetooth/bfusb.c 	skb_unlink(skb, &data->pending_q);
skb               193 drivers/bluetooth/bfusb.c 	skb_queue_tail(&data->completed_q, skb);
skb               204 drivers/bluetooth/bfusb.c 	struct sk_buff *skb;
skb               215 drivers/bluetooth/bfusb.c 	skb = bt_skb_alloc(size, GFP_ATOMIC);
skb               216 drivers/bluetooth/bfusb.c 	if (!skb) {
skb               221 drivers/bluetooth/bfusb.c 	skb->dev = (void *) data;
skb               223 drivers/bluetooth/bfusb.c 	scb = (struct bfusb_data_scb *) skb->cb;
skb               228 drivers/bluetooth/bfusb.c 	usb_fill_bulk_urb(urb, data->udev, pipe, skb->data, size,
skb               229 drivers/bluetooth/bfusb.c 			bfusb_rx_complete, skb);
skb               231 drivers/bluetooth/bfusb.c 	skb_queue_tail(&data->pending_q, skb);
skb               237 drivers/bluetooth/bfusb.c 		skb_unlink(skb, &data->pending_q);
skb               238 drivers/bluetooth/bfusb.c 		kfree_skb(skb);
skb               257 drivers/bluetooth/bfusb.c 		struct sk_buff *skb;
skb               306 drivers/bluetooth/bfusb.c 		skb = bt_skb_alloc(pkt_len, GFP_ATOMIC);
skb               307 drivers/bluetooth/bfusb.c 		if (!skb) {
skb               312 drivers/bluetooth/bfusb.c 		hci_skb_pkt_type(skb) = pkt_type;
skb               314 drivers/bluetooth/bfusb.c 		data->reassembly = skb;
skb               335 drivers/bluetooth/bfusb.c 	struct sk_buff *skb = (struct sk_buff *) urb->context;
skb               336 drivers/bluetooth/bfusb.c 	struct bfusb_data *data = (struct bfusb_data *) skb->dev;
skb               341 drivers/bluetooth/bfusb.c 	BT_DBG("bfusb %p urb %p skb %p len %d", data, urb, skb, skb->len);
skb               353 drivers/bluetooth/bfusb.c 	skb_put(skb, count);
skb               380 drivers/bluetooth/bfusb.c 	skb_unlink(skb, &data->pending_q);
skb               381 drivers/bluetooth/bfusb.c 	kfree_skb(skb);
skb               450 drivers/bluetooth/bfusb.c static int bfusb_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
skb               457 drivers/bluetooth/bfusb.c 	BT_DBG("hdev %p skb %p type %d len %d", hdev, skb,
skb               458 drivers/bluetooth/bfusb.c 	       hci_skb_pkt_type(skb), skb->len);
skb               460 drivers/bluetooth/bfusb.c 	switch (hci_skb_pkt_type(skb)) {
skb               473 drivers/bluetooth/bfusb.c 	memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1);
skb               475 drivers/bluetooth/bfusb.c 	count = skb->len;
skb               494 drivers/bluetooth/bfusb.c 		skb_copy_from_linear_data_offset(skb, sent, skb_put(nskb, size), size);
skb               514 drivers/bluetooth/bfusb.c 	kfree_skb(skb);
skb               237 drivers/bluetooth/bluecard_cs.c 		register struct sk_buff *skb;
skb               259 drivers/bluetooth/bluecard_cs.c 		skb = skb_dequeue(&(info->txq));
skb               260 drivers/bluetooth/bluecard_cs.c 		if (!skb)
skb               263 drivers/bluetooth/bluecard_cs.c 		if (hci_skb_pkt_type(skb) & 0x80) {
skb               273 drivers/bluetooth/bluecard_cs.c 		len = bluecard_write(iobase, offset, skb->data, skb->len);
skb               281 drivers/bluetooth/bluecard_cs.c 		if (hci_skb_pkt_type(skb) & 0x80) {
skb               287 drivers/bluetooth/bluecard_cs.c 			switch (hci_skb_pkt_type(skb)) {
skb               320 drivers/bluetooth/bluecard_cs.c 		if (len == skb->len) {
skb               321 drivers/bluetooth/bluecard_cs.c 			kfree_skb(skb);
skb               323 drivers/bluetooth/bluecard_cs.c 			skb_pull(skb, len);
skb               324 drivers/bluetooth/bluecard_cs.c 			skb_queue_head(&(info->txq), skb);
skb               563 drivers/bluetooth/bluecard_cs.c 	struct sk_buff *skb;
skb               568 drivers/bluetooth/bluecard_cs.c 	skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_KERNEL);
skb               569 drivers/bluetooth/bluecard_cs.c 	if (!skb) {
skb               577 drivers/bluetooth/bluecard_cs.c 		hci_skb_pkt_type(skb) = PKT_BAUD_RATE_460800;
skb               581 drivers/bluetooth/bluecard_cs.c 		hci_skb_pkt_type(skb) = PKT_BAUD_RATE_230400;
skb               585 drivers/bluetooth/bluecard_cs.c 		hci_skb_pkt_type(skb) = PKT_BAUD_RATE_115200;
skb               591 drivers/bluetooth/bluecard_cs.c 		hci_skb_pkt_type(skb) = PKT_BAUD_RATE_57600;
skb               595 drivers/bluetooth/bluecard_cs.c 	skb_put_data(skb, cmd, sizeof(cmd));
skb               597 drivers/bluetooth/bluecard_cs.c 	skb_queue_tail(&(info->txq), skb);
skb               652 drivers/bluetooth/bluecard_cs.c static int bluecard_hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
skb               656 drivers/bluetooth/bluecard_cs.c 	switch (hci_skb_pkt_type(skb)) {
skb               669 drivers/bluetooth/bluecard_cs.c 	memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1);
skb               670 drivers/bluetooth/bluecard_cs.c 	skb_queue_tail(&(info->txq), skb);
skb                48 drivers/bluetooth/bpa10x.c 	struct sk_buff *skb = urb->context;
skb                49 drivers/bluetooth/bpa10x.c 	struct hci_dev *hdev = (struct hci_dev *) skb->dev;
skb                65 drivers/bluetooth/bpa10x.c 	kfree_skb(skb);
skb               248 drivers/bluetooth/bpa10x.c 	struct sk_buff *skb;
skb               253 drivers/bluetooth/bpa10x.c 	skb = __hci_cmd_sync(hdev, 0xfc0e, sizeof(req), req, HCI_INIT_TIMEOUT);
skb               254 drivers/bluetooth/bpa10x.c 	if (IS_ERR(skb))
skb               255 drivers/bluetooth/bpa10x.c 		return PTR_ERR(skb);
skb               257 drivers/bluetooth/bpa10x.c 	bt_dev_info(hdev, "%s", (char *)(skb->data + 1));
skb               259 drivers/bluetooth/bpa10x.c 	hci_set_fw_info(hdev, "%s", skb->data + 1);
skb               261 drivers/bluetooth/bpa10x.c 	kfree_skb(skb);
skb               265 drivers/bluetooth/bpa10x.c static int bpa10x_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
skb               275 drivers/bluetooth/bpa10x.c 	skb->dev = (void *) hdev;
skb               282 drivers/bluetooth/bpa10x.c 	*(u8 *)skb_push(skb, 1) = hci_skb_pkt_type(skb);
skb               284 drivers/bluetooth/bpa10x.c 	switch (hci_skb_pkt_type(skb)) {
skb               296 drivers/bluetooth/bpa10x.c 		dr->wLength      = __cpu_to_le16(skb->len);
skb               301 drivers/bluetooth/bpa10x.c 				skb->data, skb->len, bpa10x_tx_complete, skb);
skb               310 drivers/bluetooth/bpa10x.c 				skb->data, skb->len, bpa10x_tx_complete, skb);
skb               319 drivers/bluetooth/bpa10x.c 				skb->data, skb->len, bpa10x_tx_complete, skb);
skb               346 drivers/bluetooth/bpa10x.c 	struct sk_buff *skb;
skb               354 drivers/bluetooth/bpa10x.c 	skb = __hci_cmd_sync(hdev, 0xfc0e, sizeof(req), req, HCI_INIT_TIMEOUT);
skb               355 drivers/bluetooth/bpa10x.c 	if (IS_ERR(skb))
skb               356 drivers/bluetooth/bpa10x.c 		return PTR_ERR(skb);
skb               358 drivers/bluetooth/bpa10x.c 	kfree_skb(skb);
skb               190 drivers/bluetooth/bt3c_cs.c 		register struct sk_buff *skb;
skb               196 drivers/bluetooth/bt3c_cs.c 		skb = skb_dequeue(&(info->txq));
skb               197 drivers/bluetooth/bt3c_cs.c 		if (!skb) {
skb               203 drivers/bluetooth/bt3c_cs.c 		len = bt3c_write(iobase, 256, skb->data, skb->len);
skb               205 drivers/bluetooth/bt3c_cs.c 		if (len != skb->len)
skb               208 drivers/bluetooth/bt3c_cs.c 		kfree_skb(skb);
skb               410 drivers/bluetooth/bt3c_cs.c static int bt3c_hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
skb               415 drivers/bluetooth/bt3c_cs.c 	switch (hci_skb_pkt_type(skb)) {
skb               428 drivers/bluetooth/bt3c_cs.c 	memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1);
skb               429 drivers/bluetooth/bt3c_cs.c 	skb_queue_tail(&(info->txq), skb);
skb                32 drivers/bluetooth/btbcm.c 	struct sk_buff *skb;
skb                34 drivers/bluetooth/btbcm.c 	skb = __hci_cmd_sync(hdev, HCI_OP_READ_BD_ADDR, 0, NULL,
skb                36 drivers/bluetooth/btbcm.c 	if (IS_ERR(skb)) {
skb                37 drivers/bluetooth/btbcm.c 		int err = PTR_ERR(skb);
skb                42 drivers/bluetooth/btbcm.c 	if (skb->len != sizeof(*bda)) {
skb                44 drivers/bluetooth/btbcm.c 		kfree_skb(skb);
skb                48 drivers/bluetooth/btbcm.c 	bda = (struct hci_rp_read_bd_addr *)skb->data;
skb                85 drivers/bluetooth/btbcm.c 	kfree_skb(skb);
skb                93 drivers/bluetooth/btbcm.c 	struct sk_buff *skb;
skb                96 drivers/bluetooth/btbcm.c 	skb = __hci_cmd_sync(hdev, 0xfc01, 6, bdaddr, HCI_INIT_TIMEOUT);
skb                97 drivers/bluetooth/btbcm.c 	if (IS_ERR(skb)) {
skb                98 drivers/bluetooth/btbcm.c 		err = PTR_ERR(skb);
skb               102 drivers/bluetooth/btbcm.c 	kfree_skb(skb);
skb               113 drivers/bluetooth/btbcm.c 	struct sk_buff *skb;
skb               118 drivers/bluetooth/btbcm.c 	skb = __hci_cmd_sync(hdev, 0xfc2e, 0, NULL, HCI_INIT_TIMEOUT);
skb               119 drivers/bluetooth/btbcm.c 	if (IS_ERR(skb)) {
skb               120 drivers/bluetooth/btbcm.c 		err = PTR_ERR(skb);
skb               125 drivers/bluetooth/btbcm.c 	kfree_skb(skb);
skb               152 drivers/bluetooth/btbcm.c 		skb = __hci_cmd_sync(hdev, opcode, cmd->plen, cmd_param,
skb               154 drivers/bluetooth/btbcm.c 		if (IS_ERR(skb)) {
skb               155 drivers/bluetooth/btbcm.c 			err = PTR_ERR(skb);
skb               160 drivers/bluetooth/btbcm.c 		kfree_skb(skb);
skb               173 drivers/bluetooth/btbcm.c 	struct sk_buff *skb;
skb               175 drivers/bluetooth/btbcm.c 	skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT);
skb               176 drivers/bluetooth/btbcm.c 	if (IS_ERR(skb)) {
skb               177 drivers/bluetooth/btbcm.c 		int err = PTR_ERR(skb);
skb               181 drivers/bluetooth/btbcm.c 	kfree_skb(skb);
skb               191 drivers/bluetooth/btbcm.c 	struct sk_buff *skb;
skb               193 drivers/bluetooth/btbcm.c 	skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL,
skb               195 drivers/bluetooth/btbcm.c 	if (IS_ERR(skb)) {
skb               197 drivers/bluetooth/btbcm.c 			   PTR_ERR(skb));
skb               198 drivers/bluetooth/btbcm.c 		return skb;
skb               201 drivers/bluetooth/btbcm.c 	if (skb->len != sizeof(struct hci_rp_read_local_name)) {
skb               203 drivers/bluetooth/btbcm.c 		kfree_skb(skb);
skb               207 drivers/bluetooth/btbcm.c 	return skb;
skb               212 drivers/bluetooth/btbcm.c 	struct sk_buff *skb;
skb               214 drivers/bluetooth/btbcm.c 	skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL,
skb               216 drivers/bluetooth/btbcm.c 	if (IS_ERR(skb)) {
skb               218 drivers/bluetooth/btbcm.c 			   PTR_ERR(skb));
skb               219 drivers/bluetooth/btbcm.c 		return skb;
skb               222 drivers/bluetooth/btbcm.c 	if (skb->len != sizeof(struct hci_rp_read_local_version)) {
skb               224 drivers/bluetooth/btbcm.c 		kfree_skb(skb);
skb               228 drivers/bluetooth/btbcm.c 	return skb;
skb               233 drivers/bluetooth/btbcm.c 	struct sk_buff *skb;
skb               235 drivers/bluetooth/btbcm.c 	skb = __hci_cmd_sync(hdev, 0xfc79, 0, NULL, HCI_INIT_TIMEOUT);
skb               236 drivers/bluetooth/btbcm.c 	if (IS_ERR(skb)) {
skb               238 drivers/bluetooth/btbcm.c 			   PTR_ERR(skb));
skb               239 drivers/bluetooth/btbcm.c 		return skb;
skb               242 drivers/bluetooth/btbcm.c 	if (skb->len != 7) {
skb               244 drivers/bluetooth/btbcm.c 		kfree_skb(skb);
skb               248 drivers/bluetooth/btbcm.c 	return skb;
skb               253 drivers/bluetooth/btbcm.c 	struct sk_buff *skb;
skb               255 drivers/bluetooth/btbcm.c 	skb = __hci_cmd_sync(hdev, 0xfc6e, 0, NULL, HCI_INIT_TIMEOUT);
skb               256 drivers/bluetooth/btbcm.c 	if (IS_ERR(skb)) {
skb               258 drivers/bluetooth/btbcm.c 			   PTR_ERR(skb));
skb               259 drivers/bluetooth/btbcm.c 		return skb;
skb               262 drivers/bluetooth/btbcm.c 	if (skb->len != 9) {
skb               264 drivers/bluetooth/btbcm.c 		kfree_skb(skb);
skb               268 drivers/bluetooth/btbcm.c 	return skb;
skb               273 drivers/bluetooth/btbcm.c 	struct sk_buff *skb;
skb               275 drivers/bluetooth/btbcm.c 	skb = __hci_cmd_sync(hdev, 0xfc5a, 0, NULL, HCI_INIT_TIMEOUT);
skb               276 drivers/bluetooth/btbcm.c 	if (IS_ERR(skb)) {
skb               278 drivers/bluetooth/btbcm.c 			   PTR_ERR(skb));
skb               279 drivers/bluetooth/btbcm.c 		return skb;
skb               282 drivers/bluetooth/btbcm.c 	if (skb->len != 5) {
skb               284 drivers/bluetooth/btbcm.c 		kfree_skb(skb);
skb               288 drivers/bluetooth/btbcm.c 	return skb;
skb               293 drivers/bluetooth/btbcm.c 	struct sk_buff *skb;
skb               296 drivers/bluetooth/btbcm.c 	skb = btbcm_read_verbose_config(hdev);
skb               297 drivers/bluetooth/btbcm.c 	if (IS_ERR(skb))
skb               298 drivers/bluetooth/btbcm.c 		return PTR_ERR(skb);
skb               300 drivers/bluetooth/btbcm.c 	bt_dev_info(hdev, "BCM: chip id %u", skb->data[1]);
skb               301 drivers/bluetooth/btbcm.c 	kfree_skb(skb);
skb               304 drivers/bluetooth/btbcm.c 	skb = btbcm_read_controller_features(hdev);
skb               305 drivers/bluetooth/btbcm.c 	if (IS_ERR(skb))
skb               306 drivers/bluetooth/btbcm.c 		return PTR_ERR(skb);
skb               308 drivers/bluetooth/btbcm.c 	bt_dev_info(hdev, "BCM: features 0x%2.2x", skb->data[1]);
skb               309 drivers/bluetooth/btbcm.c 	kfree_skb(skb);
skb               312 drivers/bluetooth/btbcm.c 	skb = btbcm_read_local_name(hdev);
skb               313 drivers/bluetooth/btbcm.c 	if (IS_ERR(skb))
skb               314 drivers/bluetooth/btbcm.c 		return PTR_ERR(skb);
skb               316 drivers/bluetooth/btbcm.c 	bt_dev_info(hdev, "%s", (char *)(skb->data + 1));
skb               317 drivers/bluetooth/btbcm.c 	kfree_skb(skb);
skb               364 drivers/bluetooth/btbcm.c 	struct sk_buff *skb;
skb               375 drivers/bluetooth/btbcm.c 	skb = btbcm_read_local_version(hdev);
skb               376 drivers/bluetooth/btbcm.c 	if (IS_ERR(skb))
skb               377 drivers/bluetooth/btbcm.c 		return PTR_ERR(skb);
skb               379 drivers/bluetooth/btbcm.c 	ver = (struct hci_rp_read_local_version *)skb->data;
skb               382 drivers/bluetooth/btbcm.c 	kfree_skb(skb);
skb               407 drivers/bluetooth/btbcm.c 		skb = btbcm_read_usb_product(hdev);
skb               408 drivers/bluetooth/btbcm.c 		if (IS_ERR(skb))
skb               409 drivers/bluetooth/btbcm.c 			return PTR_ERR(skb);
skb               411 drivers/bluetooth/btbcm.c 		vid = get_unaligned_le16(skb->data + 1);
skb               412 drivers/bluetooth/btbcm.c 		pid = get_unaligned_le16(skb->data + 3);
skb               413 drivers/bluetooth/btbcm.c 		kfree_skb(skb);
skb               457 drivers/bluetooth/btbcm.c 	struct sk_buff *skb;
skb               481 drivers/bluetooth/btbcm.c 	skb = btbcm_read_local_name(hdev);
skb               482 drivers/bluetooth/btbcm.c 	if (IS_ERR(skb))
skb               483 drivers/bluetooth/btbcm.c 		return PTR_ERR(skb);
skb               485 drivers/bluetooth/btbcm.c 	bt_dev_info(hdev, "%s", (char *)(skb->data + 1));
skb               486 drivers/bluetooth/btbcm.c 	kfree_skb(skb);
skb               499 drivers/bluetooth/btbcm.c 	struct sk_buff *skb;
skb               508 drivers/bluetooth/btbcm.c 	skb = btbcm_read_verbose_config(hdev);
skb               509 drivers/bluetooth/btbcm.c 	if (!IS_ERR(skb)) {
skb               511 drivers/bluetooth/btbcm.c 			    skb->data[1], get_unaligned_le16(skb->data + 5));
skb               512 drivers/bluetooth/btbcm.c 		kfree_skb(skb);
skb               516 drivers/bluetooth/btbcm.c 	skb = btbcm_read_usb_product(hdev);
skb               517 drivers/bluetooth/btbcm.c 	if (!IS_ERR(skb)) {
skb               519 drivers/bluetooth/btbcm.c 			    get_unaligned_le16(skb->data + 1),
skb               520 drivers/bluetooth/btbcm.c 			    get_unaligned_le16(skb->data + 3));
skb               521 drivers/bluetooth/btbcm.c 		kfree_skb(skb);
skb               525 drivers/bluetooth/btbcm.c 	skb = btbcm_read_controller_features(hdev);
skb               526 drivers/bluetooth/btbcm.c 	if (!IS_ERR(skb)) {
skb               527 drivers/bluetooth/btbcm.c 		bt_dev_info(hdev, "BCM: features 0x%2.2x", skb->data[1]);
skb               528 drivers/bluetooth/btbcm.c 		kfree_skb(skb);
skb               532 drivers/bluetooth/btbcm.c 	skb = btbcm_read_local_name(hdev);
skb               533 drivers/bluetooth/btbcm.c 	if (!IS_ERR(skb)) {
skb               534 drivers/bluetooth/btbcm.c 		bt_dev_info(hdev, "%s", (char *)(skb->data + 1));
skb               535 drivers/bluetooth/btbcm.c 		kfree_skb(skb);
skb                26 drivers/bluetooth/btintel.c 	struct sk_buff *skb;
skb                28 drivers/bluetooth/btintel.c 	skb = __hci_cmd_sync(hdev, HCI_OP_READ_BD_ADDR, 0, NULL,
skb                30 drivers/bluetooth/btintel.c 	if (IS_ERR(skb)) {
skb                31 drivers/bluetooth/btintel.c 		int err = PTR_ERR(skb);
skb                37 drivers/bluetooth/btintel.c 	if (skb->len != sizeof(*bda)) {
skb                39 drivers/bluetooth/btintel.c 		kfree_skb(skb);
skb                43 drivers/bluetooth/btintel.c 	bda = (struct hci_rp_read_bd_addr *)skb->data;
skb                56 drivers/bluetooth/btintel.c 	kfree_skb(skb);
skb                65 drivers/bluetooth/btintel.c 	struct sk_buff *skb;
skb                67 drivers/bluetooth/btintel.c 	skb = __hci_cmd_sync(hdev, 0xfc11, 2, param, HCI_CMD_TIMEOUT);
skb                68 drivers/bluetooth/btintel.c 	if (IS_ERR(skb)) {
skb                70 drivers/bluetooth/btintel.c 			   PTR_ERR(skb));
skb                71 drivers/bluetooth/btintel.c 		return PTR_ERR(skb);
skb                73 drivers/bluetooth/btintel.c 	kfree_skb(skb);
skb                82 drivers/bluetooth/btintel.c 	struct sk_buff *skb;
skb                92 drivers/bluetooth/btintel.c 	skb = __hci_cmd_sync(hdev, 0xfc11, 2, param, HCI_CMD_TIMEOUT);
skb                93 drivers/bluetooth/btintel.c 	if (IS_ERR(skb)) {
skb                95 drivers/bluetooth/btintel.c 			   PTR_ERR(skb));
skb                96 drivers/bluetooth/btintel.c 		return PTR_ERR(skb);
skb                98 drivers/bluetooth/btintel.c 	kfree_skb(skb);
skb               106 drivers/bluetooth/btintel.c 	struct sk_buff *skb;
skb               109 drivers/bluetooth/btintel.c 	skb = __hci_cmd_sync(hdev, 0xfc31, 6, bdaddr, HCI_INIT_TIMEOUT);
skb               110 drivers/bluetooth/btintel.c 	if (IS_ERR(skb)) {
skb               111 drivers/bluetooth/btintel.c 		err = PTR_ERR(skb);
skb               116 drivers/bluetooth/btintel.c 	kfree_skb(skb);
skb               124 drivers/bluetooth/btintel.c 	struct sk_buff *skb;
skb               138 drivers/bluetooth/btintel.c 	skb = __hci_cmd_sync(hdev, 0xfc43, 3, param, HCI_INIT_TIMEOUT);
skb               139 drivers/bluetooth/btintel.c 	if (IS_ERR(skb)) {
skb               140 drivers/bluetooth/btintel.c 		err = PTR_ERR(skb);
skb               147 drivers/bluetooth/btintel.c 	kfree_skb(skb);
skb               175 drivers/bluetooth/btintel.c 	struct sk_buff *skb;
skb               180 drivers/bluetooth/btintel.c 	skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT);
skb               181 drivers/bluetooth/btintel.c 	if (IS_ERR(skb)) {
skb               183 drivers/bluetooth/btintel.c 			   PTR_ERR(skb));
skb               186 drivers/bluetooth/btintel.c 	kfree_skb(skb);
skb               188 drivers/bluetooth/btintel.c 	skb = __hci_cmd_sync(hdev, 0xfc22, 1, &type, HCI_INIT_TIMEOUT);
skb               189 drivers/bluetooth/btintel.c 	if (IS_ERR(skb)) {
skb               191 drivers/bluetooth/btintel.c 			   PTR_ERR(skb));
skb               195 drivers/bluetooth/btintel.c 	if (skb->len != 13) {
skb               197 drivers/bluetooth/btintel.c 		kfree_skb(skb);
skb               201 drivers/bluetooth/btintel.c 	bt_dev_err(hdev, "Exception info %s", (char *)(skb->data + 1));
skb               203 drivers/bluetooth/btintel.c 	kfree_skb(skb);
skb               233 drivers/bluetooth/btintel.c 		struct sk_buff *skb;
skb               239 drivers/bluetooth/btintel.c 		skb = __hci_cmd_sync(hdev, 0xfc09, fragment_len + 1,
skb               241 drivers/bluetooth/btintel.c 		if (IS_ERR(skb))
skb               242 drivers/bluetooth/btintel.c 			return PTR_ERR(skb);
skb               244 drivers/bluetooth/btintel.c 		kfree_skb(skb);
skb               257 drivers/bluetooth/btintel.c 	struct sk_buff *skb;
skb               278 drivers/bluetooth/btintel.c 		skb = __hci_cmd_sync(hdev, 0xfc8b, cmd_plen, fw_ptr,
skb               280 drivers/bluetooth/btintel.c 		if (IS_ERR(skb)) {
skb               282 drivers/bluetooth/btintel.c 				   PTR_ERR(skb));
skb               284 drivers/bluetooth/btintel.c 			return PTR_ERR(skb);
skb               288 drivers/bluetooth/btintel.c 		kfree_skb(skb);
skb               302 drivers/bluetooth/btintel.c 	struct sk_buff *skb;
skb               308 drivers/bluetooth/btintel.c 	skb = __hci_cmd_sync(hdev, 0xfc52, 8, mask, HCI_INIT_TIMEOUT);
skb               309 drivers/bluetooth/btintel.c 	if (IS_ERR(skb)) {
skb               310 drivers/bluetooth/btintel.c 		err = PTR_ERR(skb);
skb               314 drivers/bluetooth/btintel.c 	kfree_skb(skb);
skb               340 drivers/bluetooth/btintel.c 	struct sk_buff *skb;
skb               342 drivers/bluetooth/btintel.c 	skb = __hci_cmd_sync(hdev, 0xfc05, 0, NULL, HCI_CMD_TIMEOUT);
skb               343 drivers/bluetooth/btintel.c 	if (IS_ERR(skb)) {
skb               345 drivers/bluetooth/btintel.c 			   PTR_ERR(skb));
skb               346 drivers/bluetooth/btintel.c 		return PTR_ERR(skb);
skb               349 drivers/bluetooth/btintel.c 	if (skb->len != sizeof(*ver)) {
skb               351 drivers/bluetooth/btintel.c 		kfree_skb(skb);
skb               355 drivers/bluetooth/btintel.c 	memcpy(ver, skb->data, sizeof(*ver));
skb               357 drivers/bluetooth/btintel.c 	kfree_skb(skb);
skb               394 drivers/bluetooth/btintel.c 	struct sk_buff *skb;
skb               420 drivers/bluetooth/btintel.c 	skb = hci_cmd_sync(ctx->hdev, ctx->op_read, sizeof(cp), &cp,
skb               422 drivers/bluetooth/btintel.c 	if (IS_ERR(skb)) {
skb               423 drivers/bluetooth/btintel.c 		err = PTR_ERR(skb);
skb               429 drivers/bluetooth/btintel.c 	if (skb->len != sizeof(*rp) + val_size) {
skb               436 drivers/bluetooth/btintel.c 	rp = (struct ibt_rp_reg_access *)skb->data;
skb               448 drivers/bluetooth/btintel.c 	kfree_skb(skb);
skb               458 drivers/bluetooth/btintel.c 	struct sk_buff *skb;
skb               492 drivers/bluetooth/btintel.c 	skb = hci_cmd_sync(ctx->hdev, ctx->op_write, plen, cp, HCI_CMD_TIMEOUT);
skb               493 drivers/bluetooth/btintel.c 	if (IS_ERR(skb)) {
skb               494 drivers/bluetooth/btintel.c 		err = PTR_ERR(skb);
skb               499 drivers/bluetooth/btintel.c 	kfree_skb(skb);
skb               561 drivers/bluetooth/btintel.c 	struct sk_buff *skb;
skb               565 drivers/bluetooth/btintel.c 	skb = __hci_cmd_sync(hdev, 0xfc01, sizeof(params), &params,
skb               567 drivers/bluetooth/btintel.c 	if (IS_ERR(skb)) {
skb               569 drivers/bluetooth/btintel.c 		return PTR_ERR(skb);
skb               572 drivers/bluetooth/btintel.c 	kfree_skb(skb);
skb               581 drivers/bluetooth/btintel.c 	struct sk_buff *skb;
skb               583 drivers/bluetooth/btintel.c 	skb = __hci_cmd_sync(hdev, 0xfc0d, 0, NULL, HCI_INIT_TIMEOUT);
skb               584 drivers/bluetooth/btintel.c 	if (IS_ERR(skb)) {
skb               586 drivers/bluetooth/btintel.c 			   PTR_ERR(skb));
skb               587 drivers/bluetooth/btintel.c 		return PTR_ERR(skb);
skb               590 drivers/bluetooth/btintel.c 	if (skb->len != sizeof(*params)) {
skb               592 drivers/bluetooth/btintel.c 		kfree_skb(skb);
skb               596 drivers/bluetooth/btintel.c 	memcpy(params, skb->data, sizeof(*params));
skb               598 drivers/bluetooth/btintel.c 	kfree_skb(skb);
skb               174 drivers/bluetooth/btmrvl_drv.h bool btmrvl_check_evtpkt(struct btmrvl_private *priv, struct sk_buff *skb);
skb               175 drivers/bluetooth/btmrvl_drv.h int btmrvl_process_event(struct btmrvl_private *priv, struct sk_buff *skb);
skb                54 drivers/bluetooth/btmrvl_main.c bool btmrvl_check_evtpkt(struct btmrvl_private *priv, struct sk_buff *skb)
skb                56 drivers/bluetooth/btmrvl_main.c 	struct hci_event_hdr *hdr = (void *) skb->data;
skb                62 drivers/bluetooth/btmrvl_main.c 		ec = (void *) (skb->data + HCI_EVENT_HDR_SIZE);
skb                73 drivers/bluetooth/btmrvl_main.c 				kfree_skb(skb);
skb                83 drivers/bluetooth/btmrvl_main.c int btmrvl_process_event(struct btmrvl_private *priv, struct sk_buff *skb)
skb                89 drivers/bluetooth/btmrvl_main.c 	event = (struct btmrvl_event *) skb->data;
skb               169 drivers/bluetooth/btmrvl_main.c 		kfree_skb(skb);
skb               178 drivers/bluetooth/btmrvl_main.c 	struct sk_buff *skb;
skb               186 drivers/bluetooth/btmrvl_main.c 	skb = bt_skb_alloc(HCI_COMMAND_HDR_SIZE + len, GFP_KERNEL);
skb               187 drivers/bluetooth/btmrvl_main.c 	if (!skb) {
skb               192 drivers/bluetooth/btmrvl_main.c 	hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
skb               197 drivers/bluetooth/btmrvl_main.c 		skb_put_data(skb, param, len);
skb               199 drivers/bluetooth/btmrvl_main.c 	hci_skb_pkt_type(skb) = MRVL_VENDOR_PKT;
skb               201 drivers/bluetooth/btmrvl_main.c 	skb_queue_head(&priv->adapter->tx_queue, skb);
skb               361 drivers/bluetooth/btmrvl_main.c static int btmrvl_tx_pkt(struct btmrvl_private *priv, struct sk_buff *skb)
skb               365 drivers/bluetooth/btmrvl_main.c 	if (!skb || !skb->data)
skb               368 drivers/bluetooth/btmrvl_main.c 	if (!skb->len || ((skb->len + BTM_HEADER_LEN) > BTM_UPLD_SIZE)) {
skb               370 drivers/bluetooth/btmrvl_main.c 						skb->len, BTM_UPLD_SIZE);
skb               374 drivers/bluetooth/btmrvl_main.c 	skb_push(skb, BTM_HEADER_LEN);
skb               381 drivers/bluetooth/btmrvl_main.c 	skb->data[0] = (skb->len & 0x0000ff);
skb               382 drivers/bluetooth/btmrvl_main.c 	skb->data[1] = (skb->len & 0x00ff00) >> 8;
skb               383 drivers/bluetooth/btmrvl_main.c 	skb->data[2] = (skb->len & 0xff0000) >> 16;
skb               384 drivers/bluetooth/btmrvl_main.c 	skb->data[3] = hci_skb_pkt_type(skb);
skb               387 drivers/bluetooth/btmrvl_main.c 		ret = priv->hw_host_to_card(priv, skb->data, skb->len);
skb               427 drivers/bluetooth/btmrvl_main.c static int btmrvl_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
skb               431 drivers/bluetooth/btmrvl_main.c 	BT_DBG("type=%d, len=%d", hci_skb_pkt_type(skb), skb->len);
skb               438 drivers/bluetooth/btmrvl_main.c 	switch (hci_skb_pkt_type(skb)) {
skb               452 drivers/bluetooth/btmrvl_main.c 	skb_queue_tail(&priv->adapter->tx_queue, skb);
skb               569 drivers/bluetooth/btmrvl_main.c 	struct sk_buff *skb;
skb               577 drivers/bluetooth/btmrvl_main.c 	skb = __hci_cmd_sync(hdev, BT_CMD_SET_BDADDR, sizeof(buf), buf,
skb               579 drivers/bluetooth/btmrvl_main.c 	if (IS_ERR(skb)) {
skb               580 drivers/bluetooth/btmrvl_main.c 		ret = PTR_ERR(skb);
skb               585 drivers/bluetooth/btmrvl_main.c 	kfree_skb(skb);
skb               600 drivers/bluetooth/btmrvl_main.c 	struct sk_buff *skb;
skb               655 drivers/bluetooth/btmrvl_main.c 		skb = skb_dequeue(&adapter->tx_queue);
skb               656 drivers/bluetooth/btmrvl_main.c 		if (skb) {
skb               657 drivers/bluetooth/btmrvl_main.c 			if (btmrvl_tx_pkt(priv, skb))
skb               660 drivers/bluetooth/btmrvl_main.c 				priv->btmrvl_dev.hcidev->stat.byte_tx += skb->len;
skb               662 drivers/bluetooth/btmrvl_main.c 			kfree_skb(skb);
skb               761 drivers/bluetooth/btmrvl_sdio.c 	struct sk_buff *skb = NULL;
skb               792 drivers/bluetooth/btmrvl_sdio.c 	skb = bt_skb_alloc(num_blocks * blksz + BTSDIO_DMA_ALIGN, GFP_KERNEL);
skb               793 drivers/bluetooth/btmrvl_sdio.c 	if (!skb) {
skb               799 drivers/bluetooth/btmrvl_sdio.c 	if ((unsigned long) skb->data & (BTSDIO_DMA_ALIGN - 1)) {
skb               800 drivers/bluetooth/btmrvl_sdio.c 		skb_put(skb, (unsigned long) skb->data &
skb               802 drivers/bluetooth/btmrvl_sdio.c 		skb_pull(skb, (unsigned long) skb->data &
skb               806 drivers/bluetooth/btmrvl_sdio.c 	payload = skb->data;
skb               837 drivers/bluetooth/btmrvl_sdio.c 		hci_skb_pkt_type(skb) = type;
skb               838 drivers/bluetooth/btmrvl_sdio.c 		skb_put(skb, buf_len);
skb               839 drivers/bluetooth/btmrvl_sdio.c 		skb_pull(skb, SDIO_HEADER_LEN);
skb               842 drivers/bluetooth/btmrvl_sdio.c 			if (btmrvl_check_evtpkt(priv, skb))
skb               843 drivers/bluetooth/btmrvl_sdio.c 				hci_recv_frame(hdev, skb);
skb               845 drivers/bluetooth/btmrvl_sdio.c 			hci_recv_frame(hdev, skb);
skb               852 drivers/bluetooth/btmrvl_sdio.c 		hci_skb_pkt_type(skb) = HCI_VENDOR_PKT;
skb               853 drivers/bluetooth/btmrvl_sdio.c 		skb_put(skb, buf_len);
skb               854 drivers/bluetooth/btmrvl_sdio.c 		skb_pull(skb, SDIO_HEADER_LEN);
skb               856 drivers/bluetooth/btmrvl_sdio.c 		if (btmrvl_process_event(priv, skb))
skb               857 drivers/bluetooth/btmrvl_sdio.c 			hci_recv_frame(hdev, skb);
skb               866 drivers/bluetooth/btmrvl_sdio.c 		kfree_skb(skb);
skb               867 drivers/bluetooth/btmrvl_sdio.c 		skb = NULL;
skb               874 drivers/bluetooth/btmrvl_sdio.c 		kfree_skb(skb);
skb               260 drivers/bluetooth/btmtksdio.c 			       struct sk_buff *skb)
skb               266 drivers/bluetooth/btmtksdio.c 	if (unlikely(skb_headroom(skb) < sizeof(*sdio_hdr))) {
skb               267 drivers/bluetooth/btmtksdio.c 		err = pskb_expand_head(skb, sizeof(*sdio_hdr), 0,
skb               274 drivers/bluetooth/btmtksdio.c 	skb_push(skb, sizeof(*sdio_hdr));
skb               276 drivers/bluetooth/btmtksdio.c 	sdio_hdr = (void *)skb->data;
skb               277 drivers/bluetooth/btmtksdio.c 	sdio_hdr->len = cpu_to_le16(skb->len);
skb               279 drivers/bluetooth/btmtksdio.c 	sdio_hdr->bt_type = hci_skb_pkt_type(skb);
skb               281 drivers/bluetooth/btmtksdio.c 	err = sdio_writesb(bdev->func, MTK_REG_CTDR, skb->data,
skb               282 drivers/bluetooth/btmtksdio.c 			   round_up(skb->len, MTK_SDIO_BLOCK_SIZE));
skb               286 drivers/bluetooth/btmtksdio.c 	bdev->hdev->stat.byte_tx += skb->len;
skb               288 drivers/bluetooth/btmtksdio.c 	kfree_skb(skb);
skb               293 drivers/bluetooth/btmtksdio.c 	skb_pull(skb, sizeof(*sdio_hdr));
skb               307 drivers/bluetooth/btmtksdio.c 	struct sk_buff *skb;
skb               314 drivers/bluetooth/btmtksdio.c 	while ((skb = skb_dequeue(&bdev->txq))) {
skb               315 drivers/bluetooth/btmtksdio.c 		err = btmtksdio_tx_packet(bdev, skb);
skb               318 drivers/bluetooth/btmtksdio.c 			skb_queue_head(&bdev->txq, skb);
skb               329 drivers/bluetooth/btmtksdio.c static int btmtksdio_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
skb               332 drivers/bluetooth/btmtksdio.c 	struct hci_event_hdr *hdr = (void *)skb->data;
skb               346 drivers/bluetooth/btmtksdio.c 		bdev->evt_skb = skb_clone(skb, GFP_KERNEL);
skb               353 drivers/bluetooth/btmtksdio.c 	err = hci_recv_frame(hdev, skb);
skb               388 drivers/bluetooth/btmtksdio.c 	struct sk_buff *skb;
skb               395 drivers/bluetooth/btmtksdio.c 	skb = bt_skb_alloc(rx_size, GFP_KERNEL);
skb               396 drivers/bluetooth/btmtksdio.c 	if (!skb)
skb               399 drivers/bluetooth/btmtksdio.c 	skb_put(skb, rx_size);
skb               401 drivers/bluetooth/btmtksdio.c 	err = sdio_readsb(bdev->func, skb->data, MTK_REG_CRDR, rx_size);
skb               405 drivers/bluetooth/btmtksdio.c 	sdio_hdr = (void *)skb->data;
skb               417 drivers/bluetooth/btmtksdio.c 	hci_skb_pkt_type(skb) = sdio_hdr->bt_type;
skb               420 drivers/bluetooth/btmtksdio.c 	skb_pull(skb, sizeof(*sdio_hdr));
skb               438 drivers/bluetooth/btmtksdio.c 	if (skb->len < (&pkts[i])->hlen) {
skb               445 drivers/bluetooth/btmtksdio.c 			dlen = skb->data[(&pkts[i])->loff];
skb               448 drivers/bluetooth/btmtksdio.c 			dlen = get_unaligned_le16(skb->data +
skb               455 drivers/bluetooth/btmtksdio.c 	pad_size = skb->len - (&pkts[i])->hlen -  dlen;
skb               464 drivers/bluetooth/btmtksdio.c 	skb_trim(skb, skb->len - pad_size);
skb               467 drivers/bluetooth/btmtksdio.c 	(&pkts[i])->recv(bdev->hdev, skb);
skb               474 drivers/bluetooth/btmtksdio.c 	kfree_skb(skb);
skb               763 drivers/bluetooth/btmtksdio.c 	struct sk_buff *skb;
skb               831 drivers/bluetooth/btmtksdio.c 	skb = __hci_cmd_sync(hdev, 0xfc7a, sizeof(tci_sleep), &tci_sleep,
skb               833 drivers/bluetooth/btmtksdio.c 	if (IS_ERR(skb)) {
skb               834 drivers/bluetooth/btmtksdio.c 		err = PTR_ERR(skb);
skb               838 drivers/bluetooth/btmtksdio.c 	kfree_skb(skb);
skb               897 drivers/bluetooth/btmtksdio.c static int btmtksdio_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
skb               901 drivers/bluetooth/btmtksdio.c 	switch (hci_skb_pkt_type(skb)) {
skb               918 drivers/bluetooth/btmtksdio.c 	skb_queue_tail(&bdev->txq, skb);
skb               315 drivers/bluetooth/btmtkuart.c static int btmtkuart_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
skb               318 drivers/bluetooth/btmtkuart.c 	struct hci_event_hdr *hdr = (void *)skb->data;
skb               332 drivers/bluetooth/btmtkuart.c 		bdev->evt_skb = skb_clone(skb, GFP_KERNEL);
skb               339 drivers/bluetooth/btmtkuart.c 	err = hci_recv_frame(hdev, skb);
skb               379 drivers/bluetooth/btmtkuart.c 			struct sk_buff *skb = skb_dequeue(&bdev->txq);
skb               382 drivers/bluetooth/btmtkuart.c 			if (!skb)
skb               385 drivers/bluetooth/btmtkuart.c 			len = serdev_device_write_buf(serdev, skb->data,
skb               386 drivers/bluetooth/btmtkuart.c 						      skb->len);
skb               389 drivers/bluetooth/btmtkuart.c 			skb_pull(skb, len);
skb               390 drivers/bluetooth/btmtkuart.c 			if (skb->len > 0) {
skb               391 drivers/bluetooth/btmtkuart.c 				skb_queue_head(&bdev->txq, skb);
skb               395 drivers/bluetooth/btmtkuart.c 			switch (hci_skb_pkt_type(skb)) {
skb               407 drivers/bluetooth/btmtkuart.c 			kfree_skb(skb);
skb               734 drivers/bluetooth/btmtkuart.c 	struct sk_buff *skb;
skb               824 drivers/bluetooth/btmtkuart.c 	skb = __hci_cmd_sync(hdev, 0xfc7a, sizeof(tci_sleep), &tci_sleep,
skb               826 drivers/bluetooth/btmtkuart.c 	if (IS_ERR(skb)) {
skb               827 drivers/bluetooth/btmtkuart.c 		err = PTR_ERR(skb);
skb               831 drivers/bluetooth/btmtkuart.c 	kfree_skb(skb);
skb               864 drivers/bluetooth/btmtkuart.c static int btmtkuart_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
skb               871 drivers/bluetooth/btmtkuart.c 	memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1);
skb               874 drivers/bluetooth/btmtkuart.c 	if (unlikely(skb_headroom(skb) < sizeof(*shdr)) ||
skb               875 drivers/bluetooth/btmtkuart.c 	    (skb_tailroom(skb) < MTK_STP_TLR_SIZE)) {
skb               876 drivers/bluetooth/btmtkuart.c 		err = pskb_expand_head(skb, sizeof(*shdr), MTK_STP_TLR_SIZE,
skb               883 drivers/bluetooth/btmtkuart.c 	dlen = skb->len;
skb               884 drivers/bluetooth/btmtkuart.c 	shdr = skb_push(skb, sizeof(*shdr));
skb               890 drivers/bluetooth/btmtkuart.c 	skb_put_zero(skb, MTK_STP_TLR_SIZE);
skb               892 drivers/bluetooth/btmtkuart.c 	skb_queue_tail(&bdev->txq, skb);
skb                19 drivers/bluetooth/btqca.c 	struct sk_buff *skb;
skb                28 drivers/bluetooth/btqca.c 	skb = __hci_cmd_sync_ev(hdev, EDL_PATCH_CMD_OPCODE, EDL_PATCH_CMD_LEN,
skb                30 drivers/bluetooth/btqca.c 	if (IS_ERR(skb)) {
skb                31 drivers/bluetooth/btqca.c 		err = PTR_ERR(skb);
skb                37 drivers/bluetooth/btqca.c 	if (skb->len != sizeof(*edl) + sizeof(*ver)) {
skb                38 drivers/bluetooth/btqca.c 		bt_dev_err(hdev, "QCA Version size mismatch len %d", skb->len);
skb                43 drivers/bluetooth/btqca.c 	edl = (struct edl_event_hdr *)(skb->data);
skb                75 drivers/bluetooth/btqca.c 	kfree_skb(skb);
skb                85 drivers/bluetooth/btqca.c 	struct sk_buff *skb;
skb                90 drivers/bluetooth/btqca.c 	skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT);
skb                91 drivers/bluetooth/btqca.c 	if (IS_ERR(skb)) {
skb                92 drivers/bluetooth/btqca.c 		err = PTR_ERR(skb);
skb                97 drivers/bluetooth/btqca.c 	kfree_skb(skb);
skb               104 drivers/bluetooth/btqca.c 	struct sk_buff *skb;
skb               109 drivers/bluetooth/btqca.c 	skb = __hci_cmd_sync_ev(hdev, QCA_PRE_SHUTDOWN_CMD, 0,
skb               112 drivers/bluetooth/btqca.c 	if (IS_ERR(skb)) {
skb               113 drivers/bluetooth/btqca.c 		err = PTR_ERR(skb);
skb               118 drivers/bluetooth/btqca.c 	kfree_skb(skb);
skb               228 drivers/bluetooth/btqca.c 	struct sk_buff *skb;
skb               242 drivers/bluetooth/btqca.c 	skb = __hci_cmd_sync_ev(hdev, EDL_PATCH_CMD_OPCODE, seg_size + 2, cmd,
skb               244 drivers/bluetooth/btqca.c 	if (IS_ERR(skb)) {
skb               245 drivers/bluetooth/btqca.c 		err = PTR_ERR(skb);
skb               250 drivers/bluetooth/btqca.c 	if (skb->len != sizeof(*edl) + sizeof(*tlv_resp)) {
skb               256 drivers/bluetooth/btqca.c 	edl = (struct edl_event_hdr *)(skb->data);
skb               273 drivers/bluetooth/btqca.c 	kfree_skb(skb);
skb               282 drivers/bluetooth/btqca.c 	struct sk_buff *skb;
skb               284 drivers/bluetooth/btqca.c 	skb = bt_skb_alloc(sizeof(*hdr) + sizeof(*evt) + 1, GFP_KERNEL);
skb               285 drivers/bluetooth/btqca.c 	if (!skb)
skb               288 drivers/bluetooth/btqca.c 	hdr = skb_put(skb, sizeof(*hdr));
skb               292 drivers/bluetooth/btqca.c 	evt = skb_put(skb, sizeof(*evt));
skb               296 drivers/bluetooth/btqca.c 	skb_put_u8(skb, QCA_HCI_CC_SUCCESS);
skb               298 drivers/bluetooth/btqca.c 	hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
skb               300 drivers/bluetooth/btqca.c 	return hci_recv_frame(hdev, skb);
skb               359 drivers/bluetooth/btqca.c 	struct sk_buff *skb;
skb               367 drivers/bluetooth/btqca.c 	skb = __hci_cmd_sync_ev(hdev, EDL_NVM_ACCESS_OPCODE, sizeof(cmd), cmd,
skb               369 drivers/bluetooth/btqca.c 	if (IS_ERR(skb)) {
skb               370 drivers/bluetooth/btqca.c 		err = PTR_ERR(skb);
skb               375 drivers/bluetooth/btqca.c 	kfree_skb(skb);
skb               450 drivers/bluetooth/btqca.c 	struct sk_buff *skb;
skb               453 drivers/bluetooth/btqca.c 	skb = __hci_cmd_sync_ev(hdev, EDL_WRITE_BD_ADDR_OPCODE, 6, bdaddr,
skb               455 drivers/bluetooth/btqca.c 	if (IS_ERR(skb)) {
skb               456 drivers/bluetooth/btqca.c 		err = PTR_ERR(skb);
skb               461 drivers/bluetooth/btqca.c 	kfree_skb(skb);
skb                30 drivers/bluetooth/btqcomsmd.c 	struct sk_buff *skb;
skb                33 drivers/bluetooth/btqcomsmd.c 	skb = bt_skb_alloc(count, GFP_ATOMIC);
skb                34 drivers/bluetooth/btqcomsmd.c 	if (!skb) {
skb                39 drivers/bluetooth/btqcomsmd.c 	hci_skb_pkt_type(skb) = type;
skb                40 drivers/bluetooth/btqcomsmd.c 	skb_put_data(skb, data, count);
skb                42 drivers/bluetooth/btqcomsmd.c 	return hci_recv_frame(hdev, skb);
skb                63 drivers/bluetooth/btqcomsmd.c static int btqcomsmd_send(struct hci_dev *hdev, struct sk_buff *skb)
skb                68 drivers/bluetooth/btqcomsmd.c 	switch (hci_skb_pkt_type(skb)) {
skb                70 drivers/bluetooth/btqcomsmd.c 		ret = rpmsg_send(btq->acl_channel, skb->data, skb->len);
skb                76 drivers/bluetooth/btqcomsmd.c 		hdev->stat.byte_tx += skb->len;
skb                79 drivers/bluetooth/btqcomsmd.c 		ret = rpmsg_send(btq->cmd_channel, skb->data, skb->len);
skb                85 drivers/bluetooth/btqcomsmd.c 		hdev->stat.byte_tx += skb->len;
skb                93 drivers/bluetooth/btqcomsmd.c 		kfree_skb(skb);
skb               110 drivers/bluetooth/btqcomsmd.c 	struct sk_buff *skb;
skb               112 drivers/bluetooth/btqcomsmd.c 	skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT);
skb               113 drivers/bluetooth/btqcomsmd.c 	if (IS_ERR(skb))
skb               114 drivers/bluetooth/btqcomsmd.c 		return PTR_ERR(skb);
skb               115 drivers/bluetooth/btqcomsmd.c 	kfree_skb(skb);
skb                49 drivers/bluetooth/btrsi.c static int rsi_hci_send_pkt(struct hci_dev *hdev, struct sk_buff *skb)
skb                54 drivers/bluetooth/btrsi.c 	switch (hci_skb_pkt_type(skb)) {
skb                66 drivers/bluetooth/btrsi.c 	if (skb_headroom(skb) < RSI_HEADROOM_FOR_BT_HAL) {
skb                68 drivers/bluetooth/btrsi.c 		new_skb = skb_realloc_headroom(skb, RSI_HEADROOM_FOR_BT_HAL);
skb                71 drivers/bluetooth/btrsi.c 		bt_cb(new_skb)->pkt_type = hci_skb_pkt_type(skb);
skb                72 drivers/bluetooth/btrsi.c 		kfree_skb(skb);
skb                73 drivers/bluetooth/btrsi.c 		skb = new_skb;
skb                74 drivers/bluetooth/btrsi.c 		if (!IS_ALIGNED((unsigned long)skb->data, RSI_DMA_ALIGN)) {
skb                75 drivers/bluetooth/btrsi.c 			u8 *skb_data = skb->data;
skb                76 drivers/bluetooth/btrsi.c 			int skb_len = skb->len;
skb                78 drivers/bluetooth/btrsi.c 			skb_push(skb, RSI_DMA_ALIGN);
skb                79 drivers/bluetooth/btrsi.c 			skb_pull(skb, PTR_ALIGN(skb->data,
skb                80 drivers/bluetooth/btrsi.c 						RSI_DMA_ALIGN) - skb->data);
skb                81 drivers/bluetooth/btrsi.c 			memmove(skb->data, skb_data, skb_len);
skb                82 drivers/bluetooth/btrsi.c 			skb_trim(skb, skb_len);
skb                86 drivers/bluetooth/btrsi.c 	return h_adapter->proto_ops->coex_send_pkt(h_adapter->priv, skb,
skb                94 drivers/bluetooth/btrsi.c 	struct sk_buff *skb;
skb                97 drivers/bluetooth/btrsi.c 	skb = dev_alloc_skb(pkt_len);
skb                98 drivers/bluetooth/btrsi.c 	if (!skb)
skb               101 drivers/bluetooth/btrsi.c 	memcpy(skb->data, pkt + RSI_FRAME_DESC_SIZE, pkt_len);
skb               102 drivers/bluetooth/btrsi.c 	skb_put(skb, pkt_len);
skb               103 drivers/bluetooth/btrsi.c 	h_adapter->hdev->stat.byte_rx += skb->len;
skb               105 drivers/bluetooth/btrsi.c 	hci_skb_pkt_type(skb) = pkt[14];
skb               107 drivers/bluetooth/btrsi.c 	return hci_recv_frame(hdev, skb);
skb               183 drivers/bluetooth/btrtl.c 	struct sk_buff *skb;
skb               185 drivers/bluetooth/btrtl.c 	skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL,
skb               187 drivers/bluetooth/btrtl.c 	if (IS_ERR(skb)) {
skb               189 drivers/bluetooth/btrtl.c 			    PTR_ERR(skb));
skb               190 drivers/bluetooth/btrtl.c 		return skb;
skb               193 drivers/bluetooth/btrtl.c 	if (skb->len != sizeof(struct hci_rp_read_local_version)) {
skb               195 drivers/bluetooth/btrtl.c 		kfree_skb(skb);
skb               199 drivers/bluetooth/btrtl.c 	return skb;
skb               205 drivers/bluetooth/btrtl.c 	struct sk_buff *skb;
skb               208 drivers/bluetooth/btrtl.c 	skb = __hci_cmd_sync(hdev, 0xfc6d, 0, NULL, HCI_INIT_TIMEOUT);
skb               209 drivers/bluetooth/btrtl.c 	if (IS_ERR(skb)) {
skb               211 drivers/bluetooth/btrtl.c 			    PTR_ERR(skb));
skb               212 drivers/bluetooth/btrtl.c 		return PTR_ERR(skb);
skb               215 drivers/bluetooth/btrtl.c 	if (skb->len != sizeof(*rom_version)) {
skb               217 drivers/bluetooth/btrtl.c 		kfree_skb(skb);
skb               221 drivers/bluetooth/btrtl.c 	rom_version = (struct rtl_rom_version_evt *)skb->data;
skb               227 drivers/bluetooth/btrtl.c 	kfree_skb(skb);
skb               392 drivers/bluetooth/btrtl.c 	struct sk_buff *skb;
skb               400 drivers/bluetooth/btrtl.c 		struct sk_buff *skb;
skb               416 drivers/bluetooth/btrtl.c 		skb = __hci_cmd_sync(hdev, 0xfc20, frag_len + 1, dl_cmd,
skb               418 drivers/bluetooth/btrtl.c 		if (IS_ERR(skb)) {
skb               420 drivers/bluetooth/btrtl.c 				    PTR_ERR(skb));
skb               421 drivers/bluetooth/btrtl.c 			ret = -PTR_ERR(skb);
skb               425 drivers/bluetooth/btrtl.c 		if (skb->len != sizeof(struct rtl_download_response)) {
skb               427 drivers/bluetooth/btrtl.c 			kfree_skb(skb);
skb               432 drivers/bluetooth/btrtl.c 		kfree_skb(skb);
skb               436 drivers/bluetooth/btrtl.c 	skb = btrtl_read_local_version(hdev);
skb               437 drivers/bluetooth/btrtl.c 	if (IS_ERR(skb)) {
skb               438 drivers/bluetooth/btrtl.c 		ret = PTR_ERR(skb);
skb               443 drivers/bluetooth/btrtl.c 	rp = (struct hci_rp_read_local_version *)skb->data;
skb               446 drivers/bluetooth/btrtl.c 	kfree_skb(skb);
skb               538 drivers/bluetooth/btrtl.c 	struct sk_buff *skb;
skb               551 drivers/bluetooth/btrtl.c 	skb = btrtl_read_local_version(hdev);
skb               552 drivers/bluetooth/btrtl.c 	if (IS_ERR(skb)) {
skb               553 drivers/bluetooth/btrtl.c 		ret = PTR_ERR(skb);
skb               557 drivers/bluetooth/btrtl.c 	resp = (struct hci_rp_read_local_version *)skb->data;
skb               565 drivers/bluetooth/btrtl.c 	kfree_skb(skb);
skb               673 drivers/bluetooth/btrtl.c 	struct sk_buff *skb;
skb               679 drivers/bluetooth/btrtl.c 	skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT);
skb               680 drivers/bluetooth/btrtl.c 	if (IS_ERR(skb)) {
skb               681 drivers/bluetooth/btrtl.c 		ret = PTR_ERR(skb);
skb               685 drivers/bluetooth/btrtl.c 	kfree_skb(skb);
skb                64 drivers/bluetooth/btsdio.c static int btsdio_tx_packet(struct btsdio_data *data, struct sk_buff *skb)
skb                71 drivers/bluetooth/btsdio.c 	skb_push(skb, 4);
skb                72 drivers/bluetooth/btsdio.c 	skb->data[0] = (skb->len & 0x0000ff);
skb                73 drivers/bluetooth/btsdio.c 	skb->data[1] = (skb->len & 0x00ff00) >> 8;
skb                74 drivers/bluetooth/btsdio.c 	skb->data[2] = (skb->len & 0xff0000) >> 16;
skb                75 drivers/bluetooth/btsdio.c 	skb->data[3] = hci_skb_pkt_type(skb);
skb                77 drivers/bluetooth/btsdio.c 	err = sdio_writesb(data->func, REG_TDAT, skb->data, skb->len);
skb                79 drivers/bluetooth/btsdio.c 		skb_pull(skb, 4);
skb                84 drivers/bluetooth/btsdio.c 	data->hdev->stat.byte_tx += skb->len;
skb                86 drivers/bluetooth/btsdio.c 	kfree_skb(skb);
skb                94 drivers/bluetooth/btsdio.c 	struct sk_buff *skb;
skb               101 drivers/bluetooth/btsdio.c 	while ((skb = skb_dequeue(&data->txq))) {
skb               102 drivers/bluetooth/btsdio.c 		err = btsdio_tx_packet(data, skb);
skb               105 drivers/bluetooth/btsdio.c 			skb_queue_head(&data->txq, skb);
skb               116 drivers/bluetooth/btsdio.c 	struct sk_buff *skb;
skb               129 drivers/bluetooth/btsdio.c 	skb = bt_skb_alloc(len - 4, GFP_KERNEL);
skb               130 drivers/bluetooth/btsdio.c 	if (!skb) {
skb               138 drivers/bluetooth/btsdio.c 	skb_put(skb, len - 4);
skb               140 drivers/bluetooth/btsdio.c 	err = sdio_readsb(data->func, skb->data, REG_RDAT, len - 4);
skb               142 drivers/bluetooth/btsdio.c 		kfree_skb(skb);
skb               148 drivers/bluetooth/btsdio.c 	hci_skb_pkt_type(skb) = hdr[3];
skb               150 drivers/bluetooth/btsdio.c 	err = hci_recv_frame(data->hdev, skb);
skb               236 drivers/bluetooth/btsdio.c static int btsdio_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
skb               242 drivers/bluetooth/btsdio.c 	switch (hci_skb_pkt_type(skb)) {
skb               259 drivers/bluetooth/btsdio.c 	skb_queue_tail(&data->txq, skb);
skb               489 drivers/bluetooth/btusb.c 	int (*recv_event)(struct hci_dev *hdev, struct sk_buff *skb);
skb               580 drivers/bluetooth/btusb.c 	struct sk_buff *skb;
skb               585 drivers/bluetooth/btusb.c 	skb = data->evt_skb;
skb               590 drivers/bluetooth/btusb.c 		if (!skb) {
skb               591 drivers/bluetooth/btusb.c 			skb = bt_skb_alloc(HCI_MAX_EVENT_SIZE, GFP_ATOMIC);
skb               592 drivers/bluetooth/btusb.c 			if (!skb) {
skb               597 drivers/bluetooth/btusb.c 			hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
skb               598 drivers/bluetooth/btusb.c 			hci_skb_expect(skb) = HCI_EVENT_HDR_SIZE;
skb               601 drivers/bluetooth/btusb.c 		len = min_t(uint, hci_skb_expect(skb), count);
skb               602 drivers/bluetooth/btusb.c 		skb_put_data(skb, buffer, len);
skb               606 drivers/bluetooth/btusb.c 		hci_skb_expect(skb) -= len;
skb               608 drivers/bluetooth/btusb.c 		if (skb->len == HCI_EVENT_HDR_SIZE) {
skb               610 drivers/bluetooth/btusb.c 			hci_skb_expect(skb) = hci_event_hdr(skb)->plen;
skb               612 drivers/bluetooth/btusb.c 			if (skb_tailroom(skb) < hci_skb_expect(skb)) {
skb               613 drivers/bluetooth/btusb.c 				kfree_skb(skb);
skb               614 drivers/bluetooth/btusb.c 				skb = NULL;
skb               621 drivers/bluetooth/btusb.c 		if (!hci_skb_expect(skb)) {
skb               623 drivers/bluetooth/btusb.c 			data->recv_event(data->hdev, skb);
skb               624 drivers/bluetooth/btusb.c 			skb = NULL;
skb               628 drivers/bluetooth/btusb.c 	data->evt_skb = skb;
skb               636 drivers/bluetooth/btusb.c 	struct sk_buff *skb;
skb               641 drivers/bluetooth/btusb.c 	skb = data->acl_skb;
skb               646 drivers/bluetooth/btusb.c 		if (!skb) {
skb               647 drivers/bluetooth/btusb.c 			skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC);
skb               648 drivers/bluetooth/btusb.c 			if (!skb) {
skb               653 drivers/bluetooth/btusb.c 			hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
skb               654 drivers/bluetooth/btusb.c 			hci_skb_expect(skb) = HCI_ACL_HDR_SIZE;
skb               657 drivers/bluetooth/btusb.c 		len = min_t(uint, hci_skb_expect(skb), count);
skb               658 drivers/bluetooth/btusb.c 		skb_put_data(skb, buffer, len);
skb               662 drivers/bluetooth/btusb.c 		hci_skb_expect(skb) -= len;
skb               664 drivers/bluetooth/btusb.c 		if (skb->len == HCI_ACL_HDR_SIZE) {
skb               665 drivers/bluetooth/btusb.c 			__le16 dlen = hci_acl_hdr(skb)->dlen;
skb               668 drivers/bluetooth/btusb.c 			hci_skb_expect(skb) = __le16_to_cpu(dlen);
skb               670 drivers/bluetooth/btusb.c 			if (skb_tailroom(skb) < hci_skb_expect(skb)) {
skb               671 drivers/bluetooth/btusb.c 				kfree_skb(skb);
skb               672 drivers/bluetooth/btusb.c 				skb = NULL;
skb               679 drivers/bluetooth/btusb.c 		if (!hci_skb_expect(skb)) {
skb               681 drivers/bluetooth/btusb.c 			hci_recv_frame(data->hdev, skb);
skb               682 drivers/bluetooth/btusb.c 			skb = NULL;
skb               686 drivers/bluetooth/btusb.c 	data->acl_skb = skb;
skb               694 drivers/bluetooth/btusb.c 	struct sk_buff *skb;
skb               699 drivers/bluetooth/btusb.c 	skb = data->sco_skb;
skb               704 drivers/bluetooth/btusb.c 		if (!skb) {
skb               705 drivers/bluetooth/btusb.c 			skb = bt_skb_alloc(HCI_MAX_SCO_SIZE, GFP_ATOMIC);
skb               706 drivers/bluetooth/btusb.c 			if (!skb) {
skb               711 drivers/bluetooth/btusb.c 			hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
skb               712 drivers/bluetooth/btusb.c 			hci_skb_expect(skb) = HCI_SCO_HDR_SIZE;
skb               715 drivers/bluetooth/btusb.c 		len = min_t(uint, hci_skb_expect(skb), count);
skb               716 drivers/bluetooth/btusb.c 		skb_put_data(skb, buffer, len);
skb               720 drivers/bluetooth/btusb.c 		hci_skb_expect(skb) -= len;
skb               722 drivers/bluetooth/btusb.c 		if (skb->len == HCI_SCO_HDR_SIZE) {
skb               724 drivers/bluetooth/btusb.c 			hci_skb_expect(skb) = hci_sco_hdr(skb)->dlen;
skb               726 drivers/bluetooth/btusb.c 			if (skb_tailroom(skb) < hci_skb_expect(skb)) {
skb               727 drivers/bluetooth/btusb.c 				kfree_skb(skb);
skb               728 drivers/bluetooth/btusb.c 				skb = NULL;
skb               735 drivers/bluetooth/btusb.c 		if (!hci_skb_expect(skb)) {
skb               737 drivers/bluetooth/btusb.c 			hci_recv_frame(data->hdev, skb);
skb               738 drivers/bluetooth/btusb.c 			skb = NULL;
skb               742 drivers/bluetooth/btusb.c 	data->sco_skb = skb;
skb              1059 drivers/bluetooth/btusb.c 		struct sk_buff *skb;
skb              1061 drivers/bluetooth/btusb.c 		skb = bt_skb_alloc(urb->actual_length, GFP_ATOMIC);
skb              1062 drivers/bluetooth/btusb.c 		if (skb) {
skb              1063 drivers/bluetooth/btusb.c 			skb_put_data(skb, urb->transfer_buffer,
skb              1065 drivers/bluetooth/btusb.c 			hci_recv_diag(hdev, skb);
skb              1138 drivers/bluetooth/btusb.c 	struct sk_buff *skb = urb->context;
skb              1139 drivers/bluetooth/btusb.c 	struct hci_dev *hdev = (struct hci_dev *)skb->dev;
skb              1161 drivers/bluetooth/btusb.c 	kfree_skb(skb);
skb              1166 drivers/bluetooth/btusb.c 	struct sk_buff *skb = urb->context;
skb              1167 drivers/bluetooth/btusb.c 	struct hci_dev *hdev = (struct hci_dev *)skb->dev;
skb              1183 drivers/bluetooth/btusb.c 	kfree_skb(skb);
skb              1303 drivers/bluetooth/btusb.c static struct urb *alloc_ctrl_urb(struct hci_dev *hdev, struct sk_buff *skb)
skb              1324 drivers/bluetooth/btusb.c 	dr->wLength      = __cpu_to_le16(skb->len);
skb              1329 drivers/bluetooth/btusb.c 			     skb->data, skb->len, btusb_tx_complete, skb);
skb              1331 drivers/bluetooth/btusb.c 	skb->dev = (void *)hdev;
skb              1336 drivers/bluetooth/btusb.c static struct urb *alloc_bulk_urb(struct hci_dev *hdev, struct sk_buff *skb)
skb              1352 drivers/bluetooth/btusb.c 			  skb->data, skb->len, btusb_tx_complete, skb);
skb              1354 drivers/bluetooth/btusb.c 	skb->dev = (void *)hdev;
skb              1359 drivers/bluetooth/btusb.c static struct urb *alloc_isoc_urb(struct hci_dev *hdev, struct sk_buff *skb)
skb              1375 drivers/bluetooth/btusb.c 			 skb->data, skb->len, btusb_isoc_tx_complete,
skb              1376 drivers/bluetooth/btusb.c 			 skb, data->isoc_tx_ep->bInterval);
skb              1380 drivers/bluetooth/btusb.c 	__fill_isoc_descriptor(urb, skb->len,
skb              1383 drivers/bluetooth/btusb.c 	skb->dev = (void *)hdev;
skb              1432 drivers/bluetooth/btusb.c static int btusb_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
skb              1438 drivers/bluetooth/btusb.c 	switch (hci_skb_pkt_type(skb)) {
skb              1440 drivers/bluetooth/btusb.c 		urb = alloc_ctrl_urb(hdev, skb);
skb              1448 drivers/bluetooth/btusb.c 		urb = alloc_bulk_urb(hdev, skb);
skb              1459 drivers/bluetooth/btusb.c 		urb = alloc_isoc_urb(hdev, skb);
skb              1606 drivers/bluetooth/btusb.c 	struct sk_buff *skb;
skb              1611 drivers/bluetooth/btusb.c 	skb = __hci_cmd_sync(hdev, 0xfc3b, 1, &val, HCI_INIT_TIMEOUT);
skb              1612 drivers/bluetooth/btusb.c 	if (IS_ERR(skb))
skb              1613 drivers/bluetooth/btusb.c 		bt_dev_err(hdev, "BCM92035 command failed (%ld)", PTR_ERR(skb));
skb              1615 drivers/bluetooth/btusb.c 		kfree_skb(skb);
skb              1623 drivers/bluetooth/btusb.c 	struct sk_buff *skb;
skb              1627 drivers/bluetooth/btusb.c 	skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL,
skb              1629 drivers/bluetooth/btusb.c 	if (IS_ERR(skb)) {
skb              1630 drivers/bluetooth/btusb.c 		int err = PTR_ERR(skb);
skb              1635 drivers/bluetooth/btusb.c 	if (skb->len != sizeof(struct hci_rp_read_local_version)) {
skb              1637 drivers/bluetooth/btusb.c 		kfree_skb(skb);
skb              1641 drivers/bluetooth/btusb.c 	rp = (struct hci_rp_read_local_version *)skb->data;
skb              1657 drivers/bluetooth/btusb.c 	kfree_skb(skb);
skb              1707 drivers/bluetooth/btusb.c 	struct sk_buff *skb;
skb              1787 drivers/bluetooth/btusb.c 	skb = __hci_cmd_sync_ev(hdev, le16_to_cpu(cmd->opcode), cmd->plen,
skb              1789 drivers/bluetooth/btusb.c 	if (IS_ERR(skb)) {
skb              1791 drivers/bluetooth/btusb.c 			   cmd->opcode, PTR_ERR(skb));
skb              1792 drivers/bluetooth/btusb.c 		return PTR_ERR(skb);
skb              1799 drivers/bluetooth/btusb.c 	if (skb->len != evt->plen) {
skb              1802 drivers/bluetooth/btusb.c 		kfree_skb(skb);
skb              1806 drivers/bluetooth/btusb.c 	if (memcmp(skb->data, evt_param, evt->plen)) {
skb              1809 drivers/bluetooth/btusb.c 		kfree_skb(skb);
skb              1812 drivers/bluetooth/btusb.c 	kfree_skb(skb);
skb              1819 drivers/bluetooth/btusb.c 	struct sk_buff *skb;
skb              1835 drivers/bluetooth/btusb.c 	skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT);
skb              1836 drivers/bluetooth/btusb.c 	if (IS_ERR(skb)) {
skb              1838 drivers/bluetooth/btusb.c 			   PTR_ERR(skb));
skb              1839 drivers/bluetooth/btusb.c 		return PTR_ERR(skb);
skb              1841 drivers/bluetooth/btusb.c 	kfree_skb(skb);
skb              1970 drivers/bluetooth/btusb.c 	struct sk_buff *skb;
skb              1974 drivers/bluetooth/btusb.c 	skb = bt_skb_alloc(sizeof(*hdr) + sizeof(*evt) + 1, GFP_KERNEL);
skb              1975 drivers/bluetooth/btusb.c 	if (!skb)
skb              1978 drivers/bluetooth/btusb.c 	hdr = skb_put(skb, sizeof(*hdr));
skb              1982 drivers/bluetooth/btusb.c 	evt = skb_put(skb, sizeof(*evt));
skb              1986 drivers/bluetooth/btusb.c 	skb_put_u8(skb, 0x00);
skb              1988 drivers/bluetooth/btusb.c 	hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
skb              1990 drivers/bluetooth/btusb.c 	return hci_recv_frame(hdev, skb);
skb              2034 drivers/bluetooth/btusb.c static int btusb_recv_event_intel(struct hci_dev *hdev, struct sk_buff *skb)
skb              2039 drivers/bluetooth/btusb.c 		struct hci_event_hdr *hdr = (void *)skb->data;
skb              2041 drivers/bluetooth/btusb.c 		if (skb->len > HCI_EVENT_HDR_SIZE && hdr->evt == 0xff &&
skb              2043 drivers/bluetooth/btusb.c 			const void *ptr = skb->data + HCI_EVENT_HDR_SIZE + 1;
skb              2044 drivers/bluetooth/btusb.c 			unsigned int len = skb->len - HCI_EVENT_HDR_SIZE - 1;
skb              2046 drivers/bluetooth/btusb.c 			switch (skb->data[2]) {
skb              2066 drivers/bluetooth/btusb.c 	return hci_recv_frame(hdev, skb);
skb              2069 drivers/bluetooth/btusb.c static int btusb_send_frame_intel(struct hci_dev *hdev, struct sk_buff *skb)
skb              2076 drivers/bluetooth/btusb.c 	switch (hci_skb_pkt_type(skb)) {
skb              2079 drivers/bluetooth/btusb.c 			struct hci_command_hdr *cmd = (void *)skb->data;
skb              2087 drivers/bluetooth/btusb.c 				urb = alloc_bulk_urb(hdev, skb);
skb              2089 drivers/bluetooth/btusb.c 				urb = alloc_ctrl_urb(hdev, skb);
skb              2099 drivers/bluetooth/btusb.c 			urb = alloc_ctrl_urb(hdev, skb);
skb              2108 drivers/bluetooth/btusb.c 		urb = alloc_bulk_urb(hdev, skb);
skb              2119 drivers/bluetooth/btusb.c 		urb = alloc_isoc_urb(hdev, skb);
skb              2441 drivers/bluetooth/btusb.c 	struct sk_buff *skb;
skb              2452 drivers/bluetooth/btusb.c 	skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT);
skb              2453 drivers/bluetooth/btusb.c 	if (IS_ERR(skb)) {
skb              2454 drivers/bluetooth/btusb.c 		ret = PTR_ERR(skb);
skb              2458 drivers/bluetooth/btusb.c 	kfree_skb(skb);
skb              2464 drivers/bluetooth/btusb.c 	skb = __hci_cmd_sync(hdev, 0xfc3f, 0, NULL, HCI_INIT_TIMEOUT);
skb              2465 drivers/bluetooth/btusb.c 	if (IS_ERR(skb)) {
skb              2466 drivers/bluetooth/btusb.c 		ret = PTR_ERR(skb);
skb              2470 drivers/bluetooth/btusb.c 	kfree_skb(skb);
skb              2477 drivers/bluetooth/btusb.c 	struct sk_buff *skb;
skb              2483 drivers/bluetooth/btusb.c 	skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT);
skb              2484 drivers/bluetooth/btusb.c 	if (IS_ERR(skb)) {
skb              2486 drivers/bluetooth/btusb.c 		return PTR_ERR(skb);
skb              2488 drivers/bluetooth/btusb.c 	kfree_skb(skb);
skb              2559 drivers/bluetooth/btusb.c 	struct sk_buff *skb;
skb              2568 drivers/bluetooth/btusb.c 		skb = bt_skb_alloc(HCI_WMT_MAX_EVENT_SIZE, GFP_ATOMIC);
skb              2569 drivers/bluetooth/btusb.c 		if (!skb) {
skb              2574 drivers/bluetooth/btusb.c 		hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
skb              2575 drivers/bluetooth/btusb.c 		skb_put_data(skb, urb->transfer_buffer, urb->actual_length);
skb              2577 drivers/bluetooth/btusb.c 		hdr = (void *)skb->data;
skb              2588 drivers/bluetooth/btusb.c 			data->evt_skb = skb_clone(skb, GFP_ATOMIC);
skb              2593 drivers/bluetooth/btusb.c 		err = hci_recv_frame(hdev, skb);
skb              2924 drivers/bluetooth/btusb.c 	struct sk_buff *skb;
skb              3013 drivers/bluetooth/btusb.c 	skb = __hci_cmd_sync(hdev, 0xfc7a, sizeof(tci_sleep), &tci_sleep,
skb              3015 drivers/bluetooth/btusb.c 	if (IS_ERR(skb)) {
skb              3016 drivers/bluetooth/btusb.c 		err = PTR_ERR(skb);
skb              3020 drivers/bluetooth/btusb.c 	kfree_skb(skb);
skb              3061 drivers/bluetooth/btusb.c 	struct sk_buff *skb;
skb              3081 drivers/bluetooth/btusb.c 	skb = bt_skb_alloc(sizeof(cmd), GFP_KERNEL);
skb              3082 drivers/bluetooth/btusb.c 	if (!skb) {
skb              3087 drivers/bluetooth/btusb.c 	skb_put_data(skb, cmd, sizeof(cmd));
skb              3088 drivers/bluetooth/btusb.c 	hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
skb              3090 drivers/bluetooth/btusb.c 	ret = btusb_send_frame(hdev, skb);
skb              3093 drivers/bluetooth/btusb.c 		kfree_skb(skb);
skb              3104 drivers/bluetooth/btusb.c 	struct sk_buff *skb;
skb              3112 drivers/bluetooth/btusb.c 	skb = __hci_cmd_sync(hdev, 0xfc22, sizeof(buf), buf, HCI_INIT_TIMEOUT);
skb              3113 drivers/bluetooth/btusb.c 	if (IS_ERR(skb)) {
skb              3114 drivers/bluetooth/btusb.c 		ret = PTR_ERR(skb);
skb              3119 drivers/bluetooth/btusb.c 	kfree_skb(skb);
skb              3127 drivers/bluetooth/btusb.c 	struct sk_buff *skb;
skb              3137 drivers/bluetooth/btusb.c 	skb = __hci_cmd_sync(hdev, 0xfc0b, sizeof(buf), buf, HCI_INIT_TIMEOUT);
skb              3138 drivers/bluetooth/btusb.c 	if (IS_ERR(skb)) {
skb              3139 drivers/bluetooth/btusb.c 		ret = PTR_ERR(skb);
skb              3143 drivers/bluetooth/btusb.c 	kfree_skb(skb);
skb              3455 drivers/bluetooth/btusb.c 	struct sk_buff *skb;
skb              3466 drivers/bluetooth/btusb.c 	skb = bt_skb_alloc(2, GFP_KERNEL);
skb              3467 drivers/bluetooth/btusb.c 	if (!skb) {
skb              3472 drivers/bluetooth/btusb.c 	skb_put_u8(skb, 0xf0);
skb              3473 drivers/bluetooth/btusb.c 	skb_put_u8(skb, enable);
skb              3478 drivers/bluetooth/btusb.c 			  skb->data, skb->len, btusb_tx_complete, skb);
skb              3480 drivers/bluetooth/btusb.c 	skb->dev = (void *)hdev;
skb                84 drivers/bluetooth/btwilink.c static long st_receive(void *priv_data, struct sk_buff *skb)
skb                89 drivers/bluetooth/btwilink.c 	if (!skb)
skb                93 drivers/bluetooth/btwilink.c 		kfree_skb(skb);
skb                98 drivers/bluetooth/btwilink.c 	err = hci_recv_frame(lhst->hdev, skb);
skb               104 drivers/bluetooth/btwilink.c 	lhst->hdev->stat.byte_rx += skb->len;
skb               231 drivers/bluetooth/btwilink.c static int ti_st_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
skb               240 drivers/bluetooth/btwilink.c 	memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1);
skb               242 drivers/bluetooth/btwilink.c 	BT_DBG("%s: type %d len %d", hdev->name, hci_skb_pkt_type(skb),
skb               243 drivers/bluetooth/btwilink.c 	       skb->len);
skb               249 drivers/bluetooth/btwilink.c 	pkt_type = hci_skb_pkt_type(skb);
skb               250 drivers/bluetooth/btwilink.c 	len = hst->st_write(skb);
skb               148 drivers/bluetooth/dtl1_cs.c 		register struct sk_buff *skb;
skb               156 drivers/bluetooth/dtl1_cs.c 		skb = skb_dequeue(&(info->txq));
skb               157 drivers/bluetooth/dtl1_cs.c 		if (!skb)
skb               161 drivers/bluetooth/dtl1_cs.c 		len = dtl1_write(iobase, 32, skb->data, skb->len);
skb               163 drivers/bluetooth/dtl1_cs.c 		if (len == skb->len) {
skb               165 drivers/bluetooth/dtl1_cs.c 			kfree_skb(skb);
skb               167 drivers/bluetooth/dtl1_cs.c 			skb_pull(skb, len);
skb               168 drivers/bluetooth/dtl1_cs.c 			skb_queue_head(&(info->txq), skb);
skb               179 drivers/bluetooth/dtl1_cs.c static void dtl1_control(struct dtl1_info *info, struct sk_buff *skb)
skb               181 drivers/bluetooth/dtl1_cs.c 	u8 flowmask = *(u8 *)skb->data;
skb               185 drivers/bluetooth/dtl1_cs.c 	for (i = 0; i < skb->len; i++)
skb               186 drivers/bluetooth/dtl1_cs.c 		printk(" %02x", skb->data[i]);
skb               198 drivers/bluetooth/dtl1_cs.c 	kfree_skb(skb);
skb               384 drivers/bluetooth/dtl1_cs.c static int dtl1_hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
skb               390 drivers/bluetooth/dtl1_cs.c 	switch (hci_skb_pkt_type(skb)) {
skb               408 drivers/bluetooth/dtl1_cs.c 	nsh.len = skb->len;
skb               410 drivers/bluetooth/dtl1_cs.c 	s = bt_skb_alloc(NSHL + skb->len + 1, GFP_ATOMIC);
skb               415 drivers/bluetooth/dtl1_cs.c 	skb_copy_from_linear_data(skb, skb_put(s, skb->len), skb->len);
skb               416 drivers/bluetooth/dtl1_cs.c 	if (skb->len & 0x0001)
skb               425 drivers/bluetooth/dtl1_cs.c 	kfree_skb(skb);
skb                17 drivers/bluetooth/h4_recv.h 	int (*recv)(struct hci_dev *hdev, struct sk_buff *skb);
skb                42 drivers/bluetooth/h4_recv.h 					  struct sk_buff *skb,
skb                49 drivers/bluetooth/h4_recv.h 	if (IS_ERR(skb))
skb                50 drivers/bluetooth/h4_recv.h 		skb = NULL;
skb                55 drivers/bluetooth/h4_recv.h 		if (!skb) {
skb                60 drivers/bluetooth/h4_recv.h 				skb = bt_skb_alloc((&pkts[i])->maxlen,
skb                62 drivers/bluetooth/h4_recv.h 				if (!skb)
skb                65 drivers/bluetooth/h4_recv.h 				hci_skb_pkt_type(skb) = (&pkts[i])->type;
skb                66 drivers/bluetooth/h4_recv.h 				hci_skb_expect(skb) = (&pkts[i])->hlen;
skb                71 drivers/bluetooth/h4_recv.h 			if (!skb)
skb                78 drivers/bluetooth/h4_recv.h 		len = min_t(uint, hci_skb_expect(skb) - skb->len, count);
skb                79 drivers/bluetooth/h4_recv.h 		skb_put_data(skb, buffer, len);
skb                85 drivers/bluetooth/h4_recv.h 		if (skb->len < hci_skb_expect(skb))
skb                89 drivers/bluetooth/h4_recv.h 			if (hci_skb_pkt_type(skb) == (&pkts[i])->type)
skb                94 drivers/bluetooth/h4_recv.h 			kfree_skb(skb);
skb                98 drivers/bluetooth/h4_recv.h 		if (skb->len == (&pkts[i])->hlen) {
skb               108 drivers/bluetooth/h4_recv.h 				dlen = skb->data[(&pkts[i])->loff];
skb               109 drivers/bluetooth/h4_recv.h 				hci_skb_expect(skb) += dlen;
skb               111 drivers/bluetooth/h4_recv.h 				if (skb_tailroom(skb) < dlen) {
skb               112 drivers/bluetooth/h4_recv.h 					kfree_skb(skb);
skb               118 drivers/bluetooth/h4_recv.h 				dlen = get_unaligned_le16(skb->data +
skb               120 drivers/bluetooth/h4_recv.h 				hci_skb_expect(skb) += dlen;
skb               122 drivers/bluetooth/h4_recv.h 				if (skb_tailroom(skb) < dlen) {
skb               123 drivers/bluetooth/h4_recv.h 					kfree_skb(skb);
skb               129 drivers/bluetooth/h4_recv.h 				kfree_skb(skb);
skb               135 drivers/bluetooth/h4_recv.h 				(&pkts[i])->recv(hdev, skb);
skb               136 drivers/bluetooth/h4_recv.h 				skb = NULL;
skb               140 drivers/bluetooth/h4_recv.h 			(&pkts[i])->recv(hdev, skb);
skb               141 drivers/bluetooth/h4_recv.h 			skb = NULL;
skb               145 drivers/bluetooth/h4_recv.h 	return skb;
skb                76 drivers/bluetooth/hci_ag6xx.c 	struct sk_buff *skb;
skb                78 drivers/bluetooth/hci_ag6xx.c 	skb = skb_dequeue(&ag6xx->txq);
skb                79 drivers/bluetooth/hci_ag6xx.c 	if (!skb)
skb                80 drivers/bluetooth/hci_ag6xx.c 		return skb;
skb                83 drivers/bluetooth/hci_ag6xx.c 	memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
skb                84 drivers/bluetooth/hci_ag6xx.c 	return skb;
skb                87 drivers/bluetooth/hci_ag6xx.c static int ag6xx_enqueue(struct hci_uart *hu, struct sk_buff *skb)
skb                91 drivers/bluetooth/hci_ag6xx.c 	skb_queue_tail(&ag6xx->txq, skb);
skb               128 drivers/bluetooth/hci_ag6xx.c 		struct sk_buff *skb;
skb               137 drivers/bluetooth/hci_ag6xx.c 		skb = __hci_cmd_sync(hdev, 0xfc8e, fragment_len + 6, cmd_param,
skb               139 drivers/bluetooth/hci_ag6xx.c 		if (IS_ERR(skb))
skb               140 drivers/bluetooth/hci_ag6xx.c 			return PTR_ERR(skb);
skb               141 drivers/bluetooth/hci_ag6xx.c 		kfree_skb(skb);
skb               154 drivers/bluetooth/hci_ag6xx.c 	struct sk_buff *skb;
skb               206 drivers/bluetooth/hci_ag6xx.c 	skb = __hci_cmd_sync_ev(hdev, 0xfc2f, fw->size, fw->data,
skb               208 drivers/bluetooth/hci_ag6xx.c 	if (IS_ERR(skb)) {
skb               209 drivers/bluetooth/hci_ag6xx.c 		bt_dev_err(hdev, "Applying bddata failed (%ld)", PTR_ERR(skb));
skb               211 drivers/bluetooth/hci_ag6xx.c 		return PTR_ERR(skb);
skb               213 drivers/bluetooth/hci_ag6xx.c 	kfree_skb(skb);
skb               150 drivers/bluetooth/hci_ath.c 	struct sk_buff *skb;
skb               161 drivers/bluetooth/hci_ath.c 	skb = __hci_cmd_sync(hdev, 0xfc0b, dlen + 4, &cmd, HCI_INIT_TIMEOUT);
skb               162 drivers/bluetooth/hci_ath.c 	if (IS_ERR(skb))
skb               163 drivers/bluetooth/hci_ath.c 		return PTR_ERR(skb);
skb               164 drivers/bluetooth/hci_ath.c 	kfree_skb(skb);
skb               208 drivers/bluetooth/hci_ath.c static int ath_enqueue(struct hci_uart *hu, struct sk_buff *skb)
skb               212 drivers/bluetooth/hci_ath.c 	if (hci_skb_pkt_type(skb) == HCI_SCODATA_PKT) {
skb               213 drivers/bluetooth/hci_ath.c 		kfree_skb(skb);
skb               220 drivers/bluetooth/hci_ath.c 	if (hci_skb_pkt_type(skb) == HCI_COMMAND_PKT) {
skb               221 drivers/bluetooth/hci_ath.c 		struct hci_command_hdr *hdr = (void *)skb->data;
skb               224 drivers/bluetooth/hci_ath.c 			ath->cur_sleep = skb->data[HCI_COMMAND_HDR_SIZE];
skb               227 drivers/bluetooth/hci_ath.c 	BT_DBG("hu %p skb %p", hu, skb);
skb               230 drivers/bluetooth/hci_ath.c 	memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1);
skb               232 drivers/bluetooth/hci_ath.c 	skb_queue_tail(&ath->txq, skb);
skb               144 drivers/bluetooth/hci_bcm.c 	struct sk_buff *skb;
skb               157 drivers/bluetooth/hci_bcm.c 		skb = __hci_cmd_sync(hdev, 0xfc45, 1, &clock, HCI_INIT_TIMEOUT);
skb               158 drivers/bluetooth/hci_bcm.c 		if (IS_ERR(skb)) {
skb               159 drivers/bluetooth/hci_bcm.c 			int err = PTR_ERR(skb);
skb               165 drivers/bluetooth/hci_bcm.c 		kfree_skb(skb);
skb               176 drivers/bluetooth/hci_bcm.c 	skb = __hci_cmd_sync(hdev, 0xfc18, sizeof(param), &param,
skb               178 drivers/bluetooth/hci_bcm.c 	if (IS_ERR(skb)) {
skb               179 drivers/bluetooth/hci_bcm.c 		int err = PTR_ERR(skb);
skb               185 drivers/bluetooth/hci_bcm.c 	kfree_skb(skb);
skb               355 drivers/bluetooth/hci_bcm.c 	struct sk_buff *skb;
skb               360 drivers/bluetooth/hci_bcm.c 	skb = __hci_cmd_sync(hu->hdev, 0xfc27, sizeof(sleep_params),
skb               362 drivers/bluetooth/hci_bcm.c 	if (IS_ERR(skb)) {
skb               363 drivers/bluetooth/hci_bcm.c 		int err = PTR_ERR(skb);
skb               367 drivers/bluetooth/hci_bcm.c 	kfree_skb(skb);
skb               382 drivers/bluetooth/hci_bcm.c 	struct sk_buff *skb;
skb               387 drivers/bluetooth/hci_bcm.c 	skb = bt_skb_alloc(3, GFP_KERNEL);
skb               388 drivers/bluetooth/hci_bcm.c 	if (!skb)
skb               391 drivers/bluetooth/hci_bcm.c 	skb_put_u8(skb, BCM_LM_DIAG_PKT);
skb               392 drivers/bluetooth/hci_bcm.c 	skb_put_u8(skb, 0xf0);
skb               393 drivers/bluetooth/hci_bcm.c 	skb_put_u8(skb, enable);
skb               395 drivers/bluetooth/hci_bcm.c 	skb_queue_tail(&bcm->txq, skb);
skb               656 drivers/bluetooth/hci_bcm.c static int bcm_enqueue(struct hci_uart *hu, struct sk_buff *skb)
skb               660 drivers/bluetooth/hci_bcm.c 	bt_dev_dbg(hu->hdev, "hu %p skb %p", hu, skb);
skb               663 drivers/bluetooth/hci_bcm.c 	memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1);
skb               664 drivers/bluetooth/hci_bcm.c 	skb_queue_tail(&bcm->txq, skb);
skb               672 drivers/bluetooth/hci_bcm.c 	struct sk_buff *skb = NULL;
skb               683 drivers/bluetooth/hci_bcm.c 	skb = skb_dequeue(&bcm->txq);
skb               692 drivers/bluetooth/hci_bcm.c 	return skb;
skb               110 drivers/bluetooth/hci_bcsp.c static void bcsp_slip_msgdelim(struct sk_buff *skb)
skb               114 drivers/bluetooth/hci_bcsp.c 	skb_put_data(skb, &pkt_delim, 1);
skb               117 drivers/bluetooth/hci_bcsp.c static void bcsp_slip_one_byte(struct sk_buff *skb, u8 c)
skb               124 drivers/bluetooth/hci_bcsp.c 		skb_put_data(skb, &esc_c0, 2);
skb               127 drivers/bluetooth/hci_bcsp.c 		skb_put_data(skb, &esc_db, 2);
skb               130 drivers/bluetooth/hci_bcsp.c 		skb_put_data(skb, &c, 1);
skb               134 drivers/bluetooth/hci_bcsp.c static int bcsp_enqueue(struct hci_uart *hu, struct sk_buff *skb)
skb               138 drivers/bluetooth/hci_bcsp.c 	if (skb->len > 0xFFF) {
skb               140 drivers/bluetooth/hci_bcsp.c 		kfree_skb(skb);
skb               144 drivers/bluetooth/hci_bcsp.c 	switch (hci_skb_pkt_type(skb)) {
skb               147 drivers/bluetooth/hci_bcsp.c 		skb_queue_tail(&bcsp->rel, skb);
skb               151 drivers/bluetooth/hci_bcsp.c 		skb_queue_tail(&bcsp->unrel, skb);
skb               156 drivers/bluetooth/hci_bcsp.c 		kfree_skb(skb);
skb               275 drivers/bluetooth/hci_bcsp.c 	struct sk_buff *skb;
skb               281 drivers/bluetooth/hci_bcsp.c 	skb = skb_dequeue(&bcsp->unrel);
skb               282 drivers/bluetooth/hci_bcsp.c 	if (skb != NULL) {
skb               285 drivers/bluetooth/hci_bcsp.c 		nskb = bcsp_prepare_pkt(bcsp, skb->data, skb->len,
skb               286 drivers/bluetooth/hci_bcsp.c 					hci_skb_pkt_type(skb));
skb               288 drivers/bluetooth/hci_bcsp.c 			kfree_skb(skb);
skb               291 drivers/bluetooth/hci_bcsp.c 			skb_queue_head(&bcsp->unrel, skb);
skb               304 drivers/bluetooth/hci_bcsp.c 		skb = skb_dequeue(&bcsp->rel);
skb               305 drivers/bluetooth/hci_bcsp.c 		if (skb != NULL) {
skb               308 drivers/bluetooth/hci_bcsp.c 			nskb = bcsp_prepare_pkt(bcsp, skb->data, skb->len,
skb               309 drivers/bluetooth/hci_bcsp.c 						hci_skb_pkt_type(skb));
skb               311 drivers/bluetooth/hci_bcsp.c 				__skb_queue_tail(&bcsp->unack, skb);
skb               316 drivers/bluetooth/hci_bcsp.c 				skb_queue_head(&bcsp->rel, skb);
skb               350 drivers/bluetooth/hci_bcsp.c 	struct sk_buff *skb, *tmp;
skb               375 drivers/bluetooth/hci_bcsp.c 	skb_queue_walk_safe(&bcsp->unack, skb, tmp) {
skb               380 drivers/bluetooth/hci_bcsp.c 		__skb_unlink(skb, &bcsp->unack);
skb               381 drivers/bluetooth/hci_bcsp.c 		kfree_skb(skb);
skb               693 drivers/bluetooth/hci_bcsp.c 	struct sk_buff *skb;
skb               700 drivers/bluetooth/hci_bcsp.c 	while ((skb = __skb_dequeue_tail(&bcsp->unack)) != NULL) {
skb               702 drivers/bluetooth/hci_bcsp.c 		skb_queue_head(&bcsp->rel, skb);
skb                89 drivers/bluetooth/hci_h4.c static int h4_enqueue(struct hci_uart *hu, struct sk_buff *skb)
skb                93 drivers/bluetooth/hci_h4.c 	BT_DBG("hu %p skb %p", hu, skb);
skb                96 drivers/bluetooth/hci_h4.c 	memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1);
skb                97 drivers/bluetooth/hci_h4.c 	skb_queue_tail(&h4->txq, skb);
skb               155 drivers/bluetooth/hci_h4.c struct sk_buff *h4_recv_buf(struct hci_dev *hdev, struct sk_buff *skb,
skb               163 drivers/bluetooth/hci_h4.c 	if (IS_ERR(skb))
skb               164 drivers/bluetooth/hci_h4.c 		skb = NULL;
skb               177 drivers/bluetooth/hci_h4.c 		if (!skb) {
skb               182 drivers/bluetooth/hci_h4.c 				skb = bt_skb_alloc((&pkts[i])->maxlen,
skb               184 drivers/bluetooth/hci_h4.c 				if (!skb)
skb               187 drivers/bluetooth/hci_h4.c 				hci_skb_pkt_type(skb) = (&pkts[i])->type;
skb               188 drivers/bluetooth/hci_h4.c 				hci_skb_expect(skb) = (&pkts[i])->hlen;
skb               193 drivers/bluetooth/hci_h4.c 			if (!skb)
skb               200 drivers/bluetooth/hci_h4.c 		len = min_t(uint, hci_skb_expect(skb) - skb->len, count);
skb               201 drivers/bluetooth/hci_h4.c 		skb_put_data(skb, buffer, len);
skb               207 drivers/bluetooth/hci_h4.c 		if (skb->len < hci_skb_expect(skb))
skb               211 drivers/bluetooth/hci_h4.c 			if (hci_skb_pkt_type(skb) == (&pkts[i])->type)
skb               216 drivers/bluetooth/hci_h4.c 			kfree_skb(skb);
skb               220 drivers/bluetooth/hci_h4.c 		if (skb->len == (&pkts[i])->hlen) {
skb               230 drivers/bluetooth/hci_h4.c 				dlen = skb->data[(&pkts[i])->loff];
skb               231 drivers/bluetooth/hci_h4.c 				hci_skb_expect(skb) += dlen;
skb               233 drivers/bluetooth/hci_h4.c 				if (skb_tailroom(skb) < dlen) {
skb               234 drivers/bluetooth/hci_h4.c 					kfree_skb(skb);
skb               240 drivers/bluetooth/hci_h4.c 				dlen = get_unaligned_le16(skb->data +
skb               242 drivers/bluetooth/hci_h4.c 				hci_skb_expect(skb) += dlen;
skb               244 drivers/bluetooth/hci_h4.c 				if (skb_tailroom(skb) < dlen) {
skb               245 drivers/bluetooth/hci_h4.c 					kfree_skb(skb);
skb               251 drivers/bluetooth/hci_h4.c 				kfree_skb(skb);
skb               256 drivers/bluetooth/hci_h4.c 				hu->padding = (skb->len - 1) % alignment;
skb               260 drivers/bluetooth/hci_h4.c 				(&pkts[i])->recv(hdev, skb);
skb               261 drivers/bluetooth/hci_h4.c 				skb = NULL;
skb               264 drivers/bluetooth/hci_h4.c 			hu->padding = (skb->len - 1) % alignment;
skb               268 drivers/bluetooth/hci_h4.c 			(&pkts[i])->recv(hdev, skb);
skb               269 drivers/bluetooth/hci_h4.c 			skb = NULL;
skb               273 drivers/bluetooth/hci_h4.c 	return skb;
skb               138 drivers/bluetooth/hci_h5.c 	struct sk_buff *skb;
skb               165 drivers/bluetooth/hci_h5.c 	while ((skb = __skb_dequeue_tail(&h5->unack)) != NULL) {
skb               167 drivers/bluetooth/hci_h5.c 		skb_queue_head(&h5->rel, skb);
skb               268 drivers/bluetooth/hci_h5.c 	struct sk_buff *skb, *tmp;
skb               293 drivers/bluetooth/hci_h5.c 	skb_queue_walk_safe(&h5->unack, skb, tmp) {
skb               297 drivers/bluetooth/hci_h5.c 		__skb_unlink(skb, &h5->unack);
skb               298 drivers/bluetooth/hci_h5.c 		kfree_skb(skb);
skb               574 drivers/bluetooth/hci_h5.c static int h5_enqueue(struct hci_uart *hu, struct sk_buff *skb)
skb               578 drivers/bluetooth/hci_h5.c 	if (skb->len > 0xfff) {
skb               579 drivers/bluetooth/hci_h5.c 		BT_ERR("Packet too long (%u bytes)", skb->len);
skb               580 drivers/bluetooth/hci_h5.c 		kfree_skb(skb);
skb               586 drivers/bluetooth/hci_h5.c 		kfree_skb(skb);
skb               590 drivers/bluetooth/hci_h5.c 	switch (hci_skb_pkt_type(skb)) {
skb               593 drivers/bluetooth/hci_h5.c 		skb_queue_tail(&h5->rel, skb);
skb               597 drivers/bluetooth/hci_h5.c 		skb_queue_tail(&h5->unrel, skb);
skb               601 drivers/bluetooth/hci_h5.c 		BT_ERR("Unknown packet type %u", hci_skb_pkt_type(skb));
skb               602 drivers/bluetooth/hci_h5.c 		kfree_skb(skb);
skb               609 drivers/bluetooth/hci_h5.c static void h5_slip_delim(struct sk_buff *skb)
skb               613 drivers/bluetooth/hci_h5.c 	skb_put_data(skb, &delim, 1);
skb               616 drivers/bluetooth/hci_h5.c static void h5_slip_one_byte(struct sk_buff *skb, u8 c)
skb               623 drivers/bluetooth/hci_h5.c 		skb_put_data(skb, &esc_delim, 2);
skb               626 drivers/bluetooth/hci_h5.c 		skb_put_data(skb, &esc_esc, 2);
skb               629 drivers/bluetooth/hci_h5.c 		skb_put_data(skb, &c, 1);
skb               708 drivers/bluetooth/hci_h5.c 	struct sk_buff *skb, *nskb;
skb               723 drivers/bluetooth/hci_h5.c 	skb = skb_dequeue(&h5->unrel);
skb               724 drivers/bluetooth/hci_h5.c 	if (skb) {
skb               725 drivers/bluetooth/hci_h5.c 		nskb = h5_prepare_pkt(hu, hci_skb_pkt_type(skb),
skb               726 drivers/bluetooth/hci_h5.c 				      skb->data, skb->len);
skb               728 drivers/bluetooth/hci_h5.c 			kfree_skb(skb);
skb               732 drivers/bluetooth/hci_h5.c 		skb_queue_head(&h5->unrel, skb);
skb               741 drivers/bluetooth/hci_h5.c 	skb = skb_dequeue(&h5->rel);
skb               742 drivers/bluetooth/hci_h5.c 	if (skb) {
skb               743 drivers/bluetooth/hci_h5.c 		nskb = h5_prepare_pkt(hu, hci_skb_pkt_type(skb),
skb               744 drivers/bluetooth/hci_h5.c 				      skb->data, skb->len);
skb               746 drivers/bluetooth/hci_h5.c 			__skb_queue_tail(&h5->unack, skb);
skb               752 drivers/bluetooth/hci_h5.c 		skb_queue_head(&h5->rel, skb);
skb               857 drivers/bluetooth/hci_h5.c 	struct sk_buff *skb;
skb               875 drivers/bluetooth/hci_h5.c 	skb = __hci_cmd_sync(h5->hu->hdev, 0xfc17, sizeof(baudrate_data),
skb               877 drivers/bluetooth/hci_h5.c 	if (IS_ERR(skb)) {
skb               879 drivers/bluetooth/hci_h5.c 		err = PTR_ERR(skb);
skb               882 drivers/bluetooth/hci_h5.c 		kfree_skb(skb);
skb               156 drivers/bluetooth/hci_intel.c 	struct sk_buff *skb;
skb               167 drivers/bluetooth/hci_intel.c 	skb = bt_skb_alloc(sizeof(suspend), GFP_KERNEL);
skb               168 drivers/bluetooth/hci_intel.c 	if (!skb) {
skb               173 drivers/bluetooth/hci_intel.c 	skb_put_data(skb, suspend, sizeof(suspend));
skb               174 drivers/bluetooth/hci_intel.c 	hci_skb_pkt_type(skb) = HCI_LPM_PKT;
skb               179 drivers/bluetooth/hci_intel.c 	skb_queue_head(&intel->txq, skb);
skb               202 drivers/bluetooth/hci_intel.c 	struct sk_buff *skb;
skb               212 drivers/bluetooth/hci_intel.c 	skb = bt_skb_alloc(0, GFP_KERNEL);
skb               213 drivers/bluetooth/hci_intel.c 	if (!skb) {
skb               218 drivers/bluetooth/hci_intel.c 	hci_skb_pkt_type(skb) = HCI_LPM_WAKE_PKT;
skb               223 drivers/bluetooth/hci_intel.c 	skb_queue_head(&intel->txq, skb);
skb               246 drivers/bluetooth/hci_intel.c 	struct sk_buff *skb;
skb               252 drivers/bluetooth/hci_intel.c 	skb = bt_skb_alloc(sizeof(lpm_resume_ack), GFP_KERNEL);
skb               253 drivers/bluetooth/hci_intel.c 	if (!skb) {
skb               258 drivers/bluetooth/hci_intel.c 	skb_put_data(skb, lpm_resume_ack, sizeof(lpm_resume_ack));
skb               259 drivers/bluetooth/hci_intel.c 	hci_skb_pkt_type(skb) = HCI_LPM_PKT;
skb               262 drivers/bluetooth/hci_intel.c 	skb_queue_head(&intel->txq, skb);
skb               445 drivers/bluetooth/hci_intel.c 	struct sk_buff *skb;
skb               449 drivers/bluetooth/hci_intel.c 	skb = bt_skb_alloc(sizeof(*hdr) + sizeof(*evt) + 1, GFP_KERNEL);
skb               450 drivers/bluetooth/hci_intel.c 	if (!skb)
skb               453 drivers/bluetooth/hci_intel.c 	hdr = skb_put(skb, sizeof(*hdr));
skb               457 drivers/bluetooth/hci_intel.c 	evt = skb_put(skb, sizeof(*evt));
skb               461 drivers/bluetooth/hci_intel.c 	skb_put_u8(skb, 0x00);
skb               463 drivers/bluetooth/hci_intel.c 	hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
skb               465 drivers/bluetooth/hci_intel.c 	return hci_recv_frame(hdev, skb);
skb               473 drivers/bluetooth/hci_intel.c 	struct sk_buff *skb;
skb               498 drivers/bluetooth/hci_intel.c 	skb = __hci_cmd_sync(hdev, 0xfc05, 0, NULL, HCI_CMD_TIMEOUT);
skb               499 drivers/bluetooth/hci_intel.c 	if (IS_ERR(skb)) {
skb               501 drivers/bluetooth/hci_intel.c 			   PTR_ERR(skb));
skb               502 drivers/bluetooth/hci_intel.c 		return PTR_ERR(skb);
skb               504 drivers/bluetooth/hci_intel.c 	kfree_skb(skb);
skb               506 drivers/bluetooth/hci_intel.c 	skb = bt_skb_alloc(sizeof(speed_cmd), GFP_KERNEL);
skb               507 drivers/bluetooth/hci_intel.c 	if (!skb) {
skb               512 drivers/bluetooth/hci_intel.c 	skb_put_data(skb, speed_cmd, sizeof(speed_cmd));
skb               513 drivers/bluetooth/hci_intel.c 	hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
skb               517 drivers/bluetooth/hci_intel.c 	skb_queue_tail(&intel->txq, skb);
skb               533 drivers/bluetooth/hci_intel.c 	struct sk_buff *skb;
skb               860 drivers/bluetooth/hci_intel.c 	skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_CMD_TIMEOUT);
skb               861 drivers/bluetooth/hci_intel.c 	if (IS_ERR(skb))
skb               862 drivers/bluetooth/hci_intel.c 		return PTR_ERR(skb);
skb               863 drivers/bluetooth/hci_intel.c 	kfree_skb(skb);
skb               878 drivers/bluetooth/hci_intel.c static int intel_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
skb               888 drivers/bluetooth/hci_intel.c 	hdr = (void *)skb->data;
skb               894 drivers/bluetooth/hci_intel.c 	if (skb->len == 7 && hdr->evt == 0xff && hdr->plen == 0x05 &&
skb               895 drivers/bluetooth/hci_intel.c 	    skb->data[2] == 0x06) {
skb               896 drivers/bluetooth/hci_intel.c 		if (skb->data[3] != 0x00)
skb               907 drivers/bluetooth/hci_intel.c 	} else if (skb->len == 9 && hdr->evt == 0xff && hdr->plen == 0x07 &&
skb               908 drivers/bluetooth/hci_intel.c 		   skb->data[2] == 0x02) {
skb               913 drivers/bluetooth/hci_intel.c 	return hci_recv_frame(hdev, skb);
skb               931 drivers/bluetooth/hci_intel.c static int intel_recv_lpm(struct hci_dev *hdev, struct sk_buff *skb)
skb               933 drivers/bluetooth/hci_intel.c 	struct hci_lpm_pkt *lpm = (void *)skb->data;
skb               960 drivers/bluetooth/hci_intel.c 	kfree_skb(skb);
skb               999 drivers/bluetooth/hci_intel.c static int intel_enqueue(struct hci_uart *hu, struct sk_buff *skb)
skb              1004 drivers/bluetooth/hci_intel.c 	BT_DBG("hu %p skb %p", hu, skb);
skb              1026 drivers/bluetooth/hci_intel.c 	skb_queue_tail(&intel->txq, skb);
skb              1034 drivers/bluetooth/hci_intel.c 	struct sk_buff *skb;
skb              1036 drivers/bluetooth/hci_intel.c 	skb = skb_dequeue(&intel->txq);
skb              1037 drivers/bluetooth/hci_intel.c 	if (!skb)
skb              1038 drivers/bluetooth/hci_intel.c 		return skb;
skb              1041 drivers/bluetooth/hci_intel.c 	    (hci_skb_pkt_type(skb) == HCI_COMMAND_PKT)) {
skb              1042 drivers/bluetooth/hci_intel.c 		struct hci_command_hdr *cmd = (void *)skb->data;
skb              1055 drivers/bluetooth/hci_intel.c 	memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1);
skb              1057 drivers/bluetooth/hci_intel.c 	return skb;
skb               100 drivers/bluetooth/hci_ldisc.c 	struct sk_buff *skb = hu->tx_skb;
skb               102 drivers/bluetooth/hci_ldisc.c 	if (!skb) {
skb               106 drivers/bluetooth/hci_ldisc.c 			skb = hu->proto->dequeue(hu);
skb               113 drivers/bluetooth/hci_ldisc.c 	return skb;
skb               151 drivers/bluetooth/hci_ldisc.c 	struct sk_buff *skb;
skb               160 drivers/bluetooth/hci_ldisc.c 	while ((skb = hci_uart_dequeue(hu))) {
skb               164 drivers/bluetooth/hci_ldisc.c 		len = tty->ops->write(tty, skb->data, skb->len);
skb               167 drivers/bluetooth/hci_ldisc.c 		skb_pull(skb, len);
skb               168 drivers/bluetooth/hci_ldisc.c 		if (skb->len) {
skb               169 drivers/bluetooth/hci_ldisc.c 			hu->tx_skb = skb;
skb               173 drivers/bluetooth/hci_ldisc.c 		hci_uart_tx_complete(hu, hci_skb_pkt_type(skb));
skb               174 drivers/bluetooth/hci_ldisc.c 		kfree_skb(skb);
skb               273 drivers/bluetooth/hci_ldisc.c static int hci_uart_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
skb               277 drivers/bluetooth/hci_ldisc.c 	BT_DBG("%s: type %d len %d", hdev->name, hci_skb_pkt_type(skb),
skb               278 drivers/bluetooth/hci_ldisc.c 	       skb->len);
skb               287 drivers/bluetooth/hci_ldisc.c 	hu->proto->enqueue(hu, skb);
skb               394 drivers/bluetooth/hci_ldisc.c 	struct sk_buff *skb;
skb               429 drivers/bluetooth/hci_ldisc.c 	skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL,
skb               431 drivers/bluetooth/hci_ldisc.c 	if (IS_ERR(skb)) {
skb               433 drivers/bluetooth/hci_ldisc.c 		       hdev->name, PTR_ERR(skb));
skb               437 drivers/bluetooth/hci_ldisc.c 	if (skb->len != sizeof(*ver)) {
skb               443 drivers/bluetooth/hci_ldisc.c 	ver = (struct hci_rp_read_local_version *)skb->data;
skb               463 drivers/bluetooth/hci_ldisc.c 	kfree_skb(skb);
skb                88 drivers/bluetooth/hci_ll.c 	struct sk_buff *skb = NULL;
skb                94 drivers/bluetooth/hci_ll.c 	skb = bt_skb_alloc(1, GFP_ATOMIC);
skb                95 drivers/bluetooth/hci_ll.c 	if (!skb) {
skb               102 drivers/bluetooth/hci_ll.c 	skb_put_u8(skb, cmd);
skb               105 drivers/bluetooth/hci_ll.c 	skb_queue_tail(&ll->txq, skb);
skb               188 drivers/bluetooth/hci_ll.c 	struct sk_buff *skb = NULL;
skb               190 drivers/bluetooth/hci_ll.c 	while ((skb = skb_dequeue(&ll->tx_wait_q)))
skb               191 drivers/bluetooth/hci_ll.c 		skb_queue_tail(&ll->txq, skb);
skb               310 drivers/bluetooth/hci_ll.c static int ll_enqueue(struct hci_uart *hu, struct sk_buff *skb)
skb               315 drivers/bluetooth/hci_ll.c 	BT_DBG("hu %p skb %p", hu, skb);
skb               318 drivers/bluetooth/hci_ll.c 	memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1);
skb               327 drivers/bluetooth/hci_ll.c 		skb_queue_tail(&ll->txq, skb);
skb               332 drivers/bluetooth/hci_ll.c 		skb_queue_tail(&ll->tx_wait_q, skb);
skb               343 drivers/bluetooth/hci_ll.c 		skb_queue_tail(&ll->tx_wait_q, skb);
skb               348 drivers/bluetooth/hci_ll.c 		kfree_skb(skb);
skb               357 drivers/bluetooth/hci_ll.c static int ll_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
skb               362 drivers/bluetooth/hci_ll.c 	switch (hci_skb_pkt_type(skb)) {
skb               382 drivers/bluetooth/hci_ll.c 	kfree_skb(skb);
skb               456 drivers/bluetooth/hci_ll.c 	struct sk_buff *skb;
skb               459 drivers/bluetooth/hci_ll.c 	skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL,
skb               461 drivers/bluetooth/hci_ll.c 	if (IS_ERR(skb)) {
skb               463 drivers/bluetooth/hci_ll.c 			   PTR_ERR(skb));
skb               464 drivers/bluetooth/hci_ll.c 		return PTR_ERR(skb);
skb               466 drivers/bluetooth/hci_ll.c 	if (skb->len != sizeof(*ver)) {
skb               471 drivers/bluetooth/hci_ll.c 	ver = (struct hci_rp_read_local_version *)skb->data;
skb               482 drivers/bluetooth/hci_ll.c 	kfree_skb(skb);
skb               489 drivers/bluetooth/hci_ll.c 	struct sk_buff *skb;
skb               502 drivers/bluetooth/hci_ll.c 	skb = __hci_cmd_sync(lldev->hu.hdev, cmd->opcode, cmd->plen,
skb               504 drivers/bluetooth/hci_ll.c 	if (IS_ERR(skb)) {
skb               506 drivers/bluetooth/hci_ll.c 		return PTR_ERR(skb);
skb               508 drivers/bluetooth/hci_ll.c 	kfree_skb(skb);
skb               593 drivers/bluetooth/hci_ll.c 	struct sk_buff *skb;
skb               600 drivers/bluetooth/hci_ll.c 	skb = __hci_cmd_sync(hdev, HCI_VS_WRITE_BD_ADDR, sizeof(bdaddr_t),
skb               602 drivers/bluetooth/hci_ll.c 	if (!IS_ERR(skb))
skb               603 drivers/bluetooth/hci_ll.c 		kfree_skb(skb);
skb               605 drivers/bluetooth/hci_ll.c 	return PTR_ERR_OR_ZERO(skb);
skb               668 drivers/bluetooth/hci_ll.c 		struct sk_buff *skb;
skb               670 drivers/bluetooth/hci_ll.c 		skb = __hci_cmd_sync(hu->hdev, HCI_VS_UPDATE_UART_HCI_BAUDRATE,
skb               673 drivers/bluetooth/hci_ll.c 		if (!IS_ERR(skb)) {
skb               674 drivers/bluetooth/hci_ll.c 			kfree_skb(skb);
skb               122 drivers/bluetooth/hci_mrvl.c 	struct sk_buff *skb;
skb               124 drivers/bluetooth/hci_mrvl.c 	skb = skb_dequeue(&mrvl->txq);
skb               125 drivers/bluetooth/hci_mrvl.c 	if (!skb) {
skb               127 drivers/bluetooth/hci_mrvl.c 		skb = skb_dequeue(&mrvl->rawq);
skb               130 drivers/bluetooth/hci_mrvl.c 		memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
skb               133 drivers/bluetooth/hci_mrvl.c 	return skb;
skb               136 drivers/bluetooth/hci_mrvl.c static int mrvl_enqueue(struct hci_uart *hu, struct sk_buff *skb)
skb               140 drivers/bluetooth/hci_mrvl.c 	skb_queue_tail(&mrvl->txq, skb);
skb               147 drivers/bluetooth/hci_mrvl.c 	struct sk_buff *skb;
skb               150 drivers/bluetooth/hci_mrvl.c 	skb = bt_skb_alloc(0, GFP_ATOMIC);
skb               151 drivers/bluetooth/hci_mrvl.c 	if (!skb) {
skb               155 drivers/bluetooth/hci_mrvl.c 	hci_skb_pkt_type(skb) = type;
skb               157 drivers/bluetooth/hci_mrvl.c 	skb_queue_tail(&mrvl->txq, skb);
skb               161 drivers/bluetooth/hci_mrvl.c static int mrvl_recv_fw_req(struct hci_dev *hdev, struct sk_buff *skb)
skb               163 drivers/bluetooth/hci_mrvl.c 	struct hci_mrvl_pkt *pkt = (void *)skb->data;
skb               189 drivers/bluetooth/hci_mrvl.c 	kfree_skb(skb);
skb               193 drivers/bluetooth/hci_mrvl.c static int mrvl_recv_chip_ver(struct hci_dev *hdev, struct sk_buff *skb)
skb               195 drivers/bluetooth/hci_mrvl.c 	struct hci_mrvl_pkt *pkt = (void *)skb->data;
skb               224 drivers/bluetooth/hci_mrvl.c 	kfree_skb(skb);
skb               292 drivers/bluetooth/hci_mrvl.c 		struct sk_buff *skb;
skb               333 drivers/bluetooth/hci_mrvl.c 		skb = bt_skb_alloc(mrvl->tx_len, GFP_KERNEL);
skb               334 drivers/bluetooth/hci_mrvl.c 		if (!skb) {
skb               339 drivers/bluetooth/hci_mrvl.c 		bt_cb(skb)->pkt_type = MRVL_RAW_DATA;
skb               341 drivers/bluetooth/hci_mrvl.c 		skb_put_data(skb, fw_ptr, mrvl->tx_len);
skb               346 drivers/bluetooth/hci_mrvl.c 		skb_queue_tail(&mrvl->rawq, skb);
skb               149 drivers/bluetooth/hci_nokia.c static int nokia_enqueue(struct hci_uart *hu, struct sk_buff *skb);
skb               227 drivers/bluetooth/hci_nokia.c 	struct sk_buff *skb;
skb               233 drivers/bluetooth/hci_nokia.c 	skb = bt_skb_alloc(len, GFP_KERNEL);
skb               234 drivers/bluetooth/hci_nokia.c 	if (!skb)
skb               237 drivers/bluetooth/hci_nokia.c 	hci_skb_pkt_type(skb) = HCI_NOKIA_ALIVE_PKT;
skb               238 drivers/bluetooth/hci_nokia.c 	memset(skb->data, 0x00, len);
skb               240 drivers/bluetooth/hci_nokia.c 	hdr = skb_put(skb, sizeof(*hdr));
skb               242 drivers/bluetooth/hci_nokia.c 	pkt = skb_put(skb, sizeof(*pkt));
skb               245 drivers/bluetooth/hci_nokia.c 	nokia_enqueue(hu, skb);
skb               267 drivers/bluetooth/hci_nokia.c 	struct sk_buff *skb;
skb               273 drivers/bluetooth/hci_nokia.c 	skb = bt_skb_alloc(len, GFP_KERNEL);
skb               274 drivers/bluetooth/hci_nokia.c 	if (!skb)
skb               277 drivers/bluetooth/hci_nokia.c 	hci_skb_pkt_type(skb) = HCI_NOKIA_NEG_PKT;
skb               279 drivers/bluetooth/hci_nokia.c 	neg_hdr = skb_put(skb, sizeof(*neg_hdr));
skb               282 drivers/bluetooth/hci_nokia.c 	neg_cmd = skb_put(skb, sizeof(*neg_cmd));
skb               293 drivers/bluetooth/hci_nokia.c 	nokia_enqueue(hu, skb);
skb               360 drivers/bluetooth/hci_nokia.c 		struct sk_buff *skb;
skb               367 drivers/bluetooth/hci_nokia.c 			skb = __hci_cmd_sync(hu->hdev, opcode, cmd->plen,
skb               370 drivers/bluetooth/hci_nokia.c 			if (IS_ERR(skb)) {
skb               371 drivers/bluetooth/hci_nokia.c 				err = PTR_ERR(skb);
skb               376 drivers/bluetooth/hci_nokia.c 			kfree_skb(skb);
skb               510 drivers/bluetooth/hci_nokia.c static int nokia_enqueue(struct hci_uart *hu, struct sk_buff *skb)
skb               516 drivers/bluetooth/hci_nokia.c 	memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
skb               519 drivers/bluetooth/hci_nokia.c 	if (skb->len % 2) {
skb               520 drivers/bluetooth/hci_nokia.c 		err = skb_pad(skb, 1);
skb               523 drivers/bluetooth/hci_nokia.c 		skb_put_u8(skb, 0x00);
skb               526 drivers/bluetooth/hci_nokia.c 	skb_queue_tail(&btdev->txq, skb);
skb               532 drivers/bluetooth/hci_nokia.c 					 struct sk_buff *skb)
skb               541 drivers/bluetooth/hci_nokia.c 	hdr = (struct hci_nokia_neg_hdr *)skb->data;
skb               548 drivers/bluetooth/hci_nokia.c 	evt = skb_pull(skb, sizeof(*hdr));
skb               565 drivers/bluetooth/hci_nokia.c 	kfree_skb(skb);
skb               569 drivers/bluetooth/hci_nokia.c static int nokia_recv_alive_packet(struct hci_dev *hdev, struct sk_buff *skb)
skb               578 drivers/bluetooth/hci_nokia.c 	hdr = (struct hci_nokia_alive_hdr *)skb->data;
skb               586 drivers/bluetooth/hci_nokia.c 	pkt = skb_pull(skb, sizeof(*hdr));
skb               600 drivers/bluetooth/hci_nokia.c 	kfree_skb(skb);
skb               604 drivers/bluetooth/hci_nokia.c static int nokia_recv_radio(struct hci_dev *hdev, struct sk_buff *skb)
skb               609 drivers/bluetooth/hci_nokia.c 	hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
skb               610 drivers/bluetooth/hci_nokia.c 	return hci_recv_frame(hdev, skb);
skb               287 drivers/bluetooth/hci_qca.c 	struct sk_buff *skb = NULL;
skb               292 drivers/bluetooth/hci_qca.c 	skb = bt_skb_alloc(1, GFP_ATOMIC);
skb               293 drivers/bluetooth/hci_qca.c 	if (!skb) {
skb               299 drivers/bluetooth/hci_qca.c 	skb_put_u8(skb, cmd);
skb               301 drivers/bluetooth/hci_qca.c 	skb_queue_tail(&qca->txq, skb);
skb               724 drivers/bluetooth/hci_qca.c 	struct sk_buff *skb = NULL;
skb               741 drivers/bluetooth/hci_qca.c 		while ((skb = skb_dequeue(&qca->tx_wait_q)))
skb               742 drivers/bluetooth/hci_qca.c 			skb_queue_tail(&qca->txq, skb);
skb               769 drivers/bluetooth/hci_qca.c static int qca_enqueue(struct hci_uart *hu, struct sk_buff *skb)
skb               774 drivers/bluetooth/hci_qca.c 	BT_DBG("hu %p qca enq skb %p tx_ibs_state %d", hu, skb,
skb               778 drivers/bluetooth/hci_qca.c 	memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1);
skb               786 drivers/bluetooth/hci_qca.c 		skb_queue_tail(&qca->txq, skb);
skb               795 drivers/bluetooth/hci_qca.c 		skb_queue_tail(&qca->txq, skb);
skb               803 drivers/bluetooth/hci_qca.c 		skb_queue_tail(&qca->tx_wait_q, skb);
skb               813 drivers/bluetooth/hci_qca.c 		skb_queue_tail(&qca->tx_wait_q, skb);
skb               819 drivers/bluetooth/hci_qca.c 		kfree_skb(skb);
skb               828 drivers/bluetooth/hci_qca.c static int qca_ibs_sleep_ind(struct hci_dev *hdev, struct sk_buff *skb)
skb               836 drivers/bluetooth/hci_qca.c 	kfree_skb(skb);
skb               840 drivers/bluetooth/hci_qca.c static int qca_ibs_wake_ind(struct hci_dev *hdev, struct sk_buff *skb)
skb               848 drivers/bluetooth/hci_qca.c 	kfree_skb(skb);
skb               852 drivers/bluetooth/hci_qca.c static int qca_ibs_wake_ack(struct hci_dev *hdev, struct sk_buff *skb)
skb               860 drivers/bluetooth/hci_qca.c 	kfree_skb(skb);
skb               864 drivers/bluetooth/hci_qca.c static int qca_recv_acl_data(struct hci_dev *hdev, struct sk_buff *skb)
skb               871 drivers/bluetooth/hci_qca.c 	if (get_unaligned_le16(skb->data) == QCA_DEBUG_HANDLE)
skb               872 drivers/bluetooth/hci_qca.c 		return hci_recv_diag(hdev, skb);
skb               874 drivers/bluetooth/hci_qca.c 	return hci_recv_frame(hdev, skb);
skb               877 drivers/bluetooth/hci_qca.c static int qca_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
skb               883 drivers/bluetooth/hci_qca.c 		struct hci_event_hdr *hdr = (void *)skb->data;
skb               898 drivers/bluetooth/hci_qca.c 		kfree_skb(skb);
skb               903 drivers/bluetooth/hci_qca.c 	return hci_recv_frame(hdev, skb);
skb              1002 drivers/bluetooth/hci_qca.c 	struct sk_buff *skb;
skb              1010 drivers/bluetooth/hci_qca.c 	skb = bt_skb_alloc(sizeof(cmd), GFP_KERNEL);
skb              1011 drivers/bluetooth/hci_qca.c 	if (!skb) {
skb              1017 drivers/bluetooth/hci_qca.c 	skb_put_data(skb, cmd, sizeof(cmd));
skb              1018 drivers/bluetooth/hci_qca.c 	hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
skb              1020 drivers/bluetooth/hci_qca.c 	skb_queue_tail(&qca->txq, skb);
skb                48 drivers/bluetooth/hci_serdev.c 	struct sk_buff *skb = hu->tx_skb;
skb                50 drivers/bluetooth/hci_serdev.c 	if (!skb) {
skb                52 drivers/bluetooth/hci_serdev.c 			skb = hu->proto->dequeue(hu);
skb                56 drivers/bluetooth/hci_serdev.c 	return skb;
skb                64 drivers/bluetooth/hci_serdev.c 	struct sk_buff *skb;
skb                72 drivers/bluetooth/hci_serdev.c 		while ((skb = hci_uart_dequeue(hu))) {
skb                76 drivers/bluetooth/hci_serdev.c 						      skb->data, skb->len);
skb                79 drivers/bluetooth/hci_serdev.c 			skb_pull(skb, len);
skb                80 drivers/bluetooth/hci_serdev.c 			if (skb->len) {
skb                81 drivers/bluetooth/hci_serdev.c 				hu->tx_skb = skb;
skb                85 drivers/bluetooth/hci_serdev.c 			hci_uart_tx_complete(hu, hci_skb_pkt_type(skb));
skb                86 drivers/bluetooth/hci_serdev.c 			kfree_skb(skb);
skb               138 drivers/bluetooth/hci_serdev.c static int hci_uart_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
skb               142 drivers/bluetooth/hci_serdev.c 	BT_DBG("%s: type %d len %d", hdev->name, hci_skb_pkt_type(skb),
skb               143 drivers/bluetooth/hci_serdev.c 	       skb->len);
skb               145 drivers/bluetooth/hci_serdev.c 	hu->proto->enqueue(hu, skb);
skb               156 drivers/bluetooth/hci_serdev.c 	struct sk_buff *skb;
skb               193 drivers/bluetooth/hci_serdev.c 	skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL,
skb               195 drivers/bluetooth/hci_serdev.c 	if (IS_ERR(skb)) {
skb               197 drivers/bluetooth/hci_serdev.c 			   PTR_ERR(skb));
skb               201 drivers/bluetooth/hci_serdev.c 	if (skb->len != sizeof(*ver))
skb               204 drivers/bluetooth/hci_serdev.c 	kfree_skb(skb);
skb                60 drivers/bluetooth/hci_uart.h 	int (*enqueue)(struct hci_uart *hu, struct sk_buff *skb);
skb               122 drivers/bluetooth/hci_uart.h 	int (*recv)(struct hci_dev *hdev, struct sk_buff *skb);
skb               146 drivers/bluetooth/hci_uart.h struct sk_buff *h4_recv_buf(struct hci_dev *hdev, struct sk_buff *skb,
skb                65 drivers/bluetooth/hci_vhci.c static int vhci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
skb                69 drivers/bluetooth/hci_vhci.c 	memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1);
skb                70 drivers/bluetooth/hci_vhci.c 	skb_queue_tail(&data->readq, skb);
skb                79 drivers/bluetooth/hci_vhci.c 	struct sk_buff *skb;
skb                95 drivers/bluetooth/hci_vhci.c 	skb = bt_skb_alloc(4, GFP_KERNEL);
skb                96 drivers/bluetooth/hci_vhci.c 	if (!skb)
skb               101 drivers/bluetooth/hci_vhci.c 		kfree_skb(skb);
skb               128 drivers/bluetooth/hci_vhci.c 		kfree_skb(skb);
skb               132 drivers/bluetooth/hci_vhci.c 	hci_skb_pkt_type(skb) = HCI_VENDOR_PKT;
skb               134 drivers/bluetooth/hci_vhci.c 	skb_put_u8(skb, 0xff);
skb               135 drivers/bluetooth/hci_vhci.c 	skb_put_u8(skb, opcode);
skb               136 drivers/bluetooth/hci_vhci.c 	put_unaligned_le16(hdev->id, skb_put(skb, 2));
skb               137 drivers/bluetooth/hci_vhci.c 	skb_queue_tail(&data->readq, skb);
skb               158 drivers/bluetooth/hci_vhci.c 	struct sk_buff *skb;
skb               165 drivers/bluetooth/hci_vhci.c 	skb = bt_skb_alloc(len, GFP_KERNEL);
skb               166 drivers/bluetooth/hci_vhci.c 	if (!skb)
skb               169 drivers/bluetooth/hci_vhci.c 	if (!copy_from_iter_full(skb_put(skb, len), len, from)) {
skb               170 drivers/bluetooth/hci_vhci.c 		kfree_skb(skb);
skb               174 drivers/bluetooth/hci_vhci.c 	pkt_type = *((__u8 *) skb->data);
skb               175 drivers/bluetooth/hci_vhci.c 	skb_pull(skb, 1);
skb               182 drivers/bluetooth/hci_vhci.c 			kfree_skb(skb);
skb               186 drivers/bluetooth/hci_vhci.c 		hci_skb_pkt_type(skb) = pkt_type;
skb               188 drivers/bluetooth/hci_vhci.c 		ret = hci_recv_frame(data->hdev, skb);
skb               194 drivers/bluetooth/hci_vhci.c 		opcode = *((__u8 *) skb->data);
skb               195 drivers/bluetooth/hci_vhci.c 		skb_pull(skb, 1);
skb               197 drivers/bluetooth/hci_vhci.c 		if (skb->len > 0) {
skb               198 drivers/bluetooth/hci_vhci.c 			kfree_skb(skb);
skb               202 drivers/bluetooth/hci_vhci.c 		kfree_skb(skb);
skb               208 drivers/bluetooth/hci_vhci.c 		kfree_skb(skb);
skb               216 drivers/bluetooth/hci_vhci.c 				    struct sk_buff *skb,
skb               222 drivers/bluetooth/hci_vhci.c 	len = min_t(unsigned int, skb->len, count);
skb               224 drivers/bluetooth/hci_vhci.c 	if (copy_to_user(ptr, skb->data, len))
skb               232 drivers/bluetooth/hci_vhci.c 	switch (hci_skb_pkt_type(skb)) {
skb               251 drivers/bluetooth/hci_vhci.c 	struct sk_buff *skb;
skb               255 drivers/bluetooth/hci_vhci.c 		skb = skb_dequeue(&data->readq);
skb               256 drivers/bluetooth/hci_vhci.c 		if (skb) {
skb               257 drivers/bluetooth/hci_vhci.c 			ret = vhci_put_user(data, skb, buf, count);
skb               259 drivers/bluetooth/hci_vhci.c 				skb_queue_head(&data->readq, skb);
skb               261 drivers/bluetooth/hci_vhci.c 				kfree_skb(skb);
skb              3926 drivers/char/pcmcia/synclink_cs.c static netdev_tx_t hdlcdev_xmit(struct sk_buff *skb,
skb              3939 drivers/char/pcmcia/synclink_cs.c 	skb_copy_from_linear_data(skb, info->tx_buf, skb->len);
skb              3941 drivers/char/pcmcia/synclink_cs.c 	info->tx_put = info->tx_count = skb->len;
skb              3945 drivers/char/pcmcia/synclink_cs.c 	dev->stats.tx_bytes += skb->len;
skb              3948 drivers/char/pcmcia/synclink_cs.c 	dev_kfree_skb(skb);
skb              4212 drivers/char/pcmcia/synclink_cs.c 	struct sk_buff *skb = dev_alloc_skb(size);
skb              4218 drivers/char/pcmcia/synclink_cs.c 	if (skb == NULL) {
skb              4224 drivers/char/pcmcia/synclink_cs.c 	skb_put_data(skb, buf, size);
skb              4226 drivers/char/pcmcia/synclink_cs.c 	skb->protocol = hdlc_type_trans(skb, dev);
skb              4231 drivers/char/pcmcia/synclink_cs.c 	netif_rx(skb);
skb                66 drivers/connector/connector.c 	struct sk_buff *skb;
skb                96 drivers/connector/connector.c 	skb = nlmsg_new(size, gfp_mask);
skb                97 drivers/connector/connector.c 	if (!skb)
skb               100 drivers/connector/connector.c 	nlh = nlmsg_put(skb, 0, msg->seq, NLMSG_DONE, size, 0);
skb               102 drivers/connector/connector.c 		kfree_skb(skb);
skb               110 drivers/connector/connector.c 	NETLINK_CB(skb).dst_group = group;
skb               113 drivers/connector/connector.c 		return netlink_broadcast(dev->nls, skb, portid, group,
skb               115 drivers/connector/connector.c 	return netlink_unicast(dev->nls, skb, portid,
skb               131 drivers/connector/connector.c static int cn_call_callback(struct sk_buff *skb)
skb               136 drivers/connector/connector.c 	struct cn_msg *msg = nlmsg_data(nlmsg_hdr(skb));
skb               137 drivers/connector/connector.c 	struct netlink_skb_parms *nsp = &NETLINK_CB(skb);
skb               141 drivers/connector/connector.c 	nlh = nlmsg_hdr(skb);
skb               157 drivers/connector/connector.c 		kfree_skb(skb);
skb               170 drivers/connector/connector.c static void cn_rx_skb(struct sk_buff *skb)
skb               175 drivers/connector/connector.c 	if (skb->len >= NLMSG_HDRLEN) {
skb               176 drivers/connector/connector.c 		nlh = nlmsg_hdr(skb);
skb               180 drivers/connector/connector.c 		    skb->len < nlh->nlmsg_len ||
skb               184 drivers/connector/connector.c 		err = cn_call_callback(skb_get(skb));
skb               186 drivers/connector/connector.c 			kfree_skb(skb);
skb               129 drivers/crypto/chelsio/chcr_algo.c static inline int is_ofld_imm(const struct sk_buff *skb)
skb               131 drivers/crypto/chelsio/chcr_algo.c 	return (skb->len <= SGE_MAX_WR_LEN);
skb               762 drivers/crypto/chelsio/chcr_algo.c 	struct sk_buff *skb = NULL;
skb               787 drivers/crypto/chelsio/chcr_algo.c 	skb = alloc_skb(SGE_MAX_WR_LEN, flags);
skb               788 drivers/crypto/chelsio/chcr_algo.c 	if (!skb) {
skb               792 drivers/crypto/chelsio/chcr_algo.c 	chcr_req = __skb_put_zero(skb, transhdr_len);
skb               841 drivers/crypto/chelsio/chcr_algo.c 	reqctx->skb = skb;
skb               849 drivers/crypto/chelsio/chcr_algo.c 	return skb;
skb              1117 drivers/crypto/chelsio/chcr_algo.c 	struct sk_buff *skb;
skb              1168 drivers/crypto/chelsio/chcr_algo.c 	skb = create_cipher_wr(&wrparam);
skb              1169 drivers/crypto/chelsio/chcr_algo.c 	if (IS_ERR(skb)) {
skb              1171 drivers/crypto/chelsio/chcr_algo.c 		err = PTR_ERR(skb);
skb              1174 drivers/crypto/chelsio/chcr_algo.c 	skb->dev = u_ctx->lldi.ports[0];
skb              1175 drivers/crypto/chelsio/chcr_algo.c 	set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
skb              1176 drivers/crypto/chelsio/chcr_algo.c 	chcr_send_wr(skb);
skb              1190 drivers/crypto/chelsio/chcr_algo.c 				  struct sk_buff **skb,
skb              1284 drivers/crypto/chelsio/chcr_algo.c 	*skb = create_cipher_wr(&wrparam);
skb              1285 drivers/crypto/chelsio/chcr_algo.c 	if (IS_ERR(*skb)) {
skb              1286 drivers/crypto/chelsio/chcr_algo.c 		err = PTR_ERR(*skb);
skb              1303 drivers/crypto/chelsio/chcr_algo.c 	struct sk_buff *skb = NULL;
skb              1320 drivers/crypto/chelsio/chcr_algo.c 			     &skb, CHCR_ENCRYPT_OP);
skb              1321 drivers/crypto/chelsio/chcr_algo.c 	if (err || !skb)
skb              1323 drivers/crypto/chelsio/chcr_algo.c 	skb->dev = u_ctx->lldi.ports[0];
skb              1324 drivers/crypto/chelsio/chcr_algo.c 	set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
skb              1325 drivers/crypto/chelsio/chcr_algo.c 	chcr_send_wr(skb);
skb              1337 drivers/crypto/chelsio/chcr_algo.c 	struct sk_buff *skb = NULL;
skb              1352 drivers/crypto/chelsio/chcr_algo.c 			     &skb, CHCR_DECRYPT_OP);
skb              1353 drivers/crypto/chelsio/chcr_algo.c 	if (err || !skb)
skb              1355 drivers/crypto/chelsio/chcr_algo.c 	skb->dev = u_ctx->lldi.ports[0];
skb              1356 drivers/crypto/chelsio/chcr_algo.c 	set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
skb              1357 drivers/crypto/chelsio/chcr_algo.c 	chcr_send_wr(skb);
skb              1497 drivers/crypto/chelsio/chcr_algo.c 	struct sk_buff *skb = NULL;
skb              1518 drivers/crypto/chelsio/chcr_algo.c 	skb = alloc_skb(transhdr_len, flags);
skb              1519 drivers/crypto/chelsio/chcr_algo.c 	if (!skb)
skb              1521 drivers/crypto/chelsio/chcr_algo.c 	chcr_req = __skb_put_zero(skb, transhdr_len);
skb              1576 drivers/crypto/chelsio/chcr_algo.c 	req_ctx->hctx_wr.skb = skb;
skb              1577 drivers/crypto/chelsio/chcr_algo.c 	return skb;
skb              1579 drivers/crypto/chelsio/chcr_algo.c 	kfree_skb(skb);
skb              1589 drivers/crypto/chelsio/chcr_algo.c 	struct sk_buff *skb;
skb              1645 drivers/crypto/chelsio/chcr_algo.c 	skb = create_hash_wr(req, &params);
skb              1646 drivers/crypto/chelsio/chcr_algo.c 	if (IS_ERR(skb)) {
skb              1647 drivers/crypto/chelsio/chcr_algo.c 		error = PTR_ERR(skb);
skb              1660 drivers/crypto/chelsio/chcr_algo.c 	skb->dev = u_ctx->lldi.ports[0];
skb              1661 drivers/crypto/chelsio/chcr_algo.c 	set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
skb              1662 drivers/crypto/chelsio/chcr_algo.c 	chcr_send_wr(skb);
skb              1688 drivers/crypto/chelsio/chcr_algo.c 	struct sk_buff *skb;
skb              1731 drivers/crypto/chelsio/chcr_algo.c 	skb = create_hash_wr(req, &params);
skb              1732 drivers/crypto/chelsio/chcr_algo.c 	if (IS_ERR(skb)) {
skb              1733 drivers/crypto/chelsio/chcr_algo.c 		error = PTR_ERR(skb);
skb              1737 drivers/crypto/chelsio/chcr_algo.c 	skb->dev = u_ctx->lldi.ports[0];
skb              1738 drivers/crypto/chelsio/chcr_algo.c 	set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
skb              1739 drivers/crypto/chelsio/chcr_algo.c 	chcr_send_wr(skb);
skb              1752 drivers/crypto/chelsio/chcr_algo.c 	struct sk_buff *skb;
skb              1819 drivers/crypto/chelsio/chcr_algo.c 	skb = create_hash_wr(req, &params);
skb              1820 drivers/crypto/chelsio/chcr_algo.c 	if (IS_ERR(skb)) {
skb              1821 drivers/crypto/chelsio/chcr_algo.c 		error = PTR_ERR(skb);
skb              1826 drivers/crypto/chelsio/chcr_algo.c 	skb->dev = u_ctx->lldi.ports[0];
skb              1827 drivers/crypto/chelsio/chcr_algo.c 	set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
skb              1828 drivers/crypto/chelsio/chcr_algo.c 	chcr_send_wr(skb);
skb              1844 drivers/crypto/chelsio/chcr_algo.c 	struct sk_buff *skb;
skb              1911 drivers/crypto/chelsio/chcr_algo.c 	skb = create_hash_wr(req, &params);
skb              1912 drivers/crypto/chelsio/chcr_algo.c 	if (IS_ERR(skb)) {
skb              1913 drivers/crypto/chelsio/chcr_algo.c 		error = PTR_ERR(skb);
skb              1917 drivers/crypto/chelsio/chcr_algo.c 	skb->dev = u_ctx->lldi.ports[0];
skb              1918 drivers/crypto/chelsio/chcr_algo.c 	set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
skb              1919 drivers/crypto/chelsio/chcr_algo.c 	chcr_send_wr(skb);
skb              1934 drivers/crypto/chelsio/chcr_algo.c 	struct sk_buff *skb;
skb              1973 drivers/crypto/chelsio/chcr_algo.c 	skb = create_hash_wr(req, &params);
skb              1974 drivers/crypto/chelsio/chcr_algo.c 	if (IS_ERR(skb)) {
skb              1975 drivers/crypto/chelsio/chcr_algo.c 		error = PTR_ERR(skb);
skb              1979 drivers/crypto/chelsio/chcr_algo.c 	skb->dev = u_ctx->lldi.ports[0];
skb              1980 drivers/crypto/chelsio/chcr_algo.c 	set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
skb              1981 drivers/crypto/chelsio/chcr_algo.c 	chcr_send_wr(skb);
skb              2330 drivers/crypto/chelsio/chcr_algo.c 	struct sk_buff *skb = NULL;
skb              2379 drivers/crypto/chelsio/chcr_algo.c 	skb = alloc_skb(transhdr_len, flags);
skb              2380 drivers/crypto/chelsio/chcr_algo.c 	if (!skb) {
skb              2385 drivers/crypto/chelsio/chcr_algo.c 	chcr_req = __skb_put_zero(skb, transhdr_len);
skb              2451 drivers/crypto/chelsio/chcr_algo.c 	reqctx->skb = skb;
skb              2453 drivers/crypto/chelsio/chcr_algo.c 	return skb;
skb              2890 drivers/crypto/chelsio/chcr_algo.c 	struct sk_buff *skb = NULL;
skb              2939 drivers/crypto/chelsio/chcr_algo.c 	skb = alloc_skb(transhdr_len,  flags);
skb              2941 drivers/crypto/chelsio/chcr_algo.c 	if (!skb) {
skb              2946 drivers/crypto/chelsio/chcr_algo.c 	chcr_req = __skb_put_zero(skb, transhdr_len);
skb              2970 drivers/crypto/chelsio/chcr_algo.c 	reqctx->skb = skb;
skb              2972 drivers/crypto/chelsio/chcr_algo.c 	return skb;
skb              2974 drivers/crypto/chelsio/chcr_algo.c 	kfree_skb(skb);
skb              2987 drivers/crypto/chelsio/chcr_algo.c 	struct sk_buff *skb = NULL;
skb              3029 drivers/crypto/chelsio/chcr_algo.c 	skb = alloc_skb(transhdr_len, flags);
skb              3030 drivers/crypto/chelsio/chcr_algo.c 	if (!skb) {
skb              3035 drivers/crypto/chelsio/chcr_algo.c 	chcr_req = __skb_put_zero(skb, transhdr_len);
skb              3085 drivers/crypto/chelsio/chcr_algo.c 	reqctx->skb = skb;
skb              3086 drivers/crypto/chelsio/chcr_algo.c 	return skb;
skb              3610 drivers/crypto/chelsio/chcr_algo.c 	struct sk_buff *skb;
skb              3638 drivers/crypto/chelsio/chcr_algo.c 	skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[a_ctx(tfm)->rx_qidx], size);
skb              3640 drivers/crypto/chelsio/chcr_algo.c 	if (IS_ERR_OR_NULL(skb)) {
skb              3642 drivers/crypto/chelsio/chcr_algo.c 		return PTR_ERR_OR_ZERO(skb);
skb              3645 drivers/crypto/chelsio/chcr_algo.c 	skb->dev = u_ctx->lldi.ports[0];
skb              3646 drivers/crypto/chelsio/chcr_algo.c 	set_wr_txq(skb, CPL_PRIORITY_DATA, a_ctx(tfm)->tx_qidx);
skb              3647 drivers/crypto/chelsio/chcr_algo.c 	chcr_send_wr(skb);
skb               254 drivers/crypto/chelsio/chcr_algo.h #define FILL_LEN_PKD(calc_tx_flits_ofld, skb) \
skb               256 drivers/crypto/chelsio/chcr_algo.h 					   calc_tx_flits_ofld(skb) * 8), 16)))
skb               185 drivers/crypto/chelsio/chcr_core.c int chcr_send_wr(struct sk_buff *skb)
skb               187 drivers/crypto/chelsio/chcr_core.c 	return cxgb4_crypto_send(skb->dev, skb);
skb               234 drivers/crypto/chelsio/chcr_core.c int chcr_uld_tx_handler(struct sk_buff *skb, struct net_device *dev)
skb               236 drivers/crypto/chelsio/chcr_core.c 	return chcr_ipsec_xmit(skb, dev);
skb               215 drivers/crypto/chelsio/chcr_core.h int chcr_send_wr(struct sk_buff *skb);
skb               220 drivers/crypto/chelsio/chcr_core.h int chcr_uld_tx_handler(struct sk_buff *skb, struct net_device *dev);
skb               223 drivers/crypto/chelsio/chcr_core.h int chcr_ipsec_xmit(struct sk_buff *skb, struct net_device *dev);
skb               183 drivers/crypto/chelsio/chcr_crypto.h 	struct	sk_buff	*skb;
skb               262 drivers/crypto/chelsio/chcr_crypto.h 	struct sk_buff *skb;
skb               289 drivers/crypto/chelsio/chcr_crypto.h 	struct sk_buff *skb;
skb                78 drivers/crypto/chelsio/chcr_ipsec.c static bool chcr_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x);
skb               288 drivers/crypto/chelsio/chcr_ipsec.c static bool chcr_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
skb               292 drivers/crypto/chelsio/chcr_ipsec.c 		if (ip_hdr(skb)->ihl > 5)
skb               296 drivers/crypto/chelsio/chcr_ipsec.c 		if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
skb               300 drivers/crypto/chelsio/chcr_ipsec.c 	if (skb_shinfo(skb)->gso_size)
skb               312 drivers/crypto/chelsio/chcr_ipsec.c static inline int is_eth_imm(const struct sk_buff *skb,
skb               326 drivers/crypto/chelsio/chcr_ipsec.c 	if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen)
skb               331 drivers/crypto/chelsio/chcr_ipsec.c static inline unsigned int calc_tx_sec_flits(const struct sk_buff *skb,
skb               341 drivers/crypto/chelsio/chcr_ipsec.c 	hdrlen = is_eth_imm(skb, sa_entry);
skb               353 drivers/crypto/chelsio/chcr_ipsec.c 		return DIV_ROUND_UP(skb->len + hdrlen, sizeof(__be64));
skb               356 drivers/crypto/chelsio/chcr_ipsec.c 	flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
skb               374 drivers/crypto/chelsio/chcr_ipsec.c inline void *copy_esn_pktxt(struct sk_buff *skb,
skb               395 drivers/crypto/chelsio/chcr_ipsec.c 	qidx = skb->queue_mapping;
skb               406 drivers/crypto/chelsio/chcr_ipsec.c 	esphdr = (struct ip_esp_hdr *)skb_transport_header(skb);
skb               407 drivers/crypto/chelsio/chcr_ipsec.c 	iv = skb_transport_header(skb) + sizeof(struct ip_esp_hdr);
skb               408 drivers/crypto/chelsio/chcr_ipsec.c 	xo = xfrm_offload(skb);
skb               414 drivers/crypto/chelsio/chcr_ipsec.c 	iv = skb_transport_header(skb) + sizeof(struct ip_esp_hdr);
skb               417 drivers/crypto/chelsio/chcr_ipsec.c 	if (is_eth_imm(skb, sa_entry) && !skb_is_nonlinear(skb)) {
skb               422 drivers/crypto/chelsio/chcr_ipsec.c 		sc_imm->len = cpu_to_be32(skb->len);
skb               428 drivers/crypto/chelsio/chcr_ipsec.c inline void *copy_cpltx_pktxt(struct sk_buff *skb,
skb               443 drivers/crypto/chelsio/chcr_ipsec.c 	qidx = skb->queue_mapping;
skb               455 drivers/crypto/chelsio/chcr_ipsec.c 	if (skb_vlan_tag_present(skb)) {
skb               457 drivers/crypto/chelsio/chcr_ipsec.c 		cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
skb               462 drivers/crypto/chelsio/chcr_ipsec.c 	cpl->len = htons(skb->len);
skb               468 drivers/crypto/chelsio/chcr_ipsec.c 		pos = copy_esn_pktxt(skb, dev, pos, sa_entry);
skb               472 drivers/crypto/chelsio/chcr_ipsec.c inline void *copy_key_cpltx_pktxt(struct sk_buff *skb,
skb               486 drivers/crypto/chelsio/chcr_ipsec.c 	qidx = skb->queue_mapping;
skb               515 drivers/crypto/chelsio/chcr_ipsec.c 	pos = copy_cpltx_pktxt(skb, dev, pos, sa_entry);
skb               520 drivers/crypto/chelsio/chcr_ipsec.c inline void *chcr_crypto_wreq(struct sk_buff *skb,
skb               542 drivers/crypto/chelsio/chcr_ipsec.c 	int qidx = skb_get_queue_mapping(skb);
skb               549 drivers/crypto/chelsio/chcr_ipsec.c 	flits = calc_tx_sec_flits(skb, sa_entry, &immediate);
skb               555 drivers/crypto/chelsio/chcr_ipsec.c 		immdatalen = skb->len;
skb               559 drivers/crypto/chelsio/chcr_ipsec.c 		if (!skb_is_nonlinear(skb))
skb               592 drivers/crypto/chelsio/chcr_ipsec.c 				     (skb_transport_offset(skb) +
skb               601 drivers/crypto/chelsio/chcr_ipsec.c 	wr->req.sec_cpl.pldlen = htonl(skb->len + esnlen);
skb               602 drivers/crypto/chelsio/chcr_ipsec.c 	aadstart = sa_entry->esn ? 1 : (skb_transport_offset(skb) + 1);
skb               604 drivers/crypto/chelsio/chcr_ipsec.c 				  (skb_transport_offset(skb) +
skb               606 drivers/crypto/chelsio/chcr_ipsec.c 	ciphstart = skb_transport_offset(skb) + sizeof(struct ip_esp_hdr) +
skb               633 drivers/crypto/chelsio/chcr_ipsec.c 	pos = copy_key_cpltx_pktxt(skb, dev, pos, sa_entry);
skb               673 drivers/crypto/chelsio/chcr_ipsec.c int chcr_ipsec_xmit(struct sk_buff *skb, struct net_device *dev)
skb               675 drivers/crypto/chelsio/chcr_ipsec.c 	struct xfrm_state *x = xfrm_input_state(skb);
skb               692 drivers/crypto/chelsio/chcr_ipsec.c 	sp = skb_sec_path(skb);
skb               694 drivers/crypto/chelsio/chcr_ipsec.c out_free:       dev_kfree_skb_any(skb);
skb               700 drivers/crypto/chelsio/chcr_ipsec.c 	qidx = skb->queue_mapping;
skb               705 drivers/crypto/chelsio/chcr_ipsec.c 	flits = calc_tx_sec_flits(skb, sa_entry, &immediate);
skb               719 drivers/crypto/chelsio/chcr_ipsec.c 	    unlikely(cxgb4_map_skb(adap->pdev_dev, skb, addr) < 0)) {
skb               728 drivers/crypto/chelsio/chcr_ipsec.c 	pos = (void *)chcr_crypto_wreq(skb, dev, (void *)pos,
skb               742 drivers/crypto/chelsio/chcr_ipsec.c 		cxgb4_inline_tx_skb(skb, &q->q, sgl);
skb               743 drivers/crypto/chelsio/chcr_ipsec.c 		dev_consume_skb_any(skb);
skb               747 drivers/crypto/chelsio/chcr_ipsec.c 		cxgb4_write_sgl(skb, &q->q, (void *)sgl, end,
skb               749 drivers/crypto/chelsio/chcr_ipsec.c 		skb_orphan(skb);
skb               754 drivers/crypto/chelsio/chcr_ipsec.c 		q->q.sdesc[last_desc].skb = skb;
skb               320 drivers/crypto/chelsio/chtls/chtls.h 	void (*backlog_rcv)(struct sock *sk, struct sk_buff *skb);
skb               343 drivers/crypto/chelsio/chtls/chtls.h #define ULP_SKB_CB(skb) ((struct ulp_skb_cb *)&((skb)->cb[0]))
skb               344 drivers/crypto/chelsio/chtls/chtls.h #define BLOG_SKB_CB(skb) ((struct blog_skb_cb *)(skb)->cb)
skb               361 drivers/crypto/chelsio/chtls/chtls.h #define skb_ulp_mode(skb)  (ULP_SKB_CB(skb)->ulp_mode)
skb               402 drivers/crypto/chelsio/chtls/chtls.h static inline void *cplhdr(struct sk_buff *skb)
skb               404 drivers/crypto/chelsio/chtls/chtls.h 	return skb->data;
skb               416 drivers/crypto/chelsio/chtls/chtls.h 				   struct sk_buff *skb)
skb               418 drivers/crypto/chelsio/chtls/chtls.h 	skb_reset_mac_header(skb);
skb               419 drivers/crypto/chelsio/chtls/chtls.h 	skb_reset_network_header(skb);
skb               420 drivers/crypto/chelsio/chtls/chtls.h 	skb_reset_transport_header(skb);
skb               424 drivers/crypto/chelsio/chtls/chtls.h 		BLOG_SKB_CB(skb)->backlog_rcv = fn;
skb               425 drivers/crypto/chelsio/chtls/chtls.h 		__sk_add_backlog(sk, skb);
skb               427 drivers/crypto/chelsio/chtls/chtls.h 		fn(sk, skb);
skb               451 drivers/crypto/chelsio/chtls/chtls.h 				 struct sk_buff *skb, int through_l2t)
skb               457 drivers/crypto/chelsio/chtls/chtls.h 		cxgb4_l2t_send(csk->egress_dev, skb, csk->l2t_entry);
skb               460 drivers/crypto/chelsio/chtls/chtls.h 		cxgb4_ofld_send(csk->egress_dev, skb);
skb               485 drivers/crypto/chelsio/chtls/chtls.h void skb_entail(struct sock *sk, struct sk_buff *skb, int flags);
skb               132 drivers/crypto/chelsio/chtls/chtls_cm.c 	struct sk_buff *skb;
skb               134 drivers/crypto/chelsio/chtls/chtls_cm.c 	while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
skb               135 drivers/crypto/chelsio/chtls/chtls_cm.c 		skb_dst_set(skb, (void *)NULL);
skb               136 drivers/crypto/chelsio/chtls/chtls_cm.c 		kfree_skb(skb);
skb               143 drivers/crypto/chelsio/chtls/chtls_cm.c 	struct sk_buff *skb;
skb               145 drivers/crypto/chelsio/chtls/chtls_cm.c 	while ((skb = __skb_dequeue(&csk->txq))) {
skb               146 drivers/crypto/chelsio/chtls/chtls_cm.c 		sk->sk_wmem_queued -= skb->truesize;
skb               147 drivers/crypto/chelsio/chtls/chtls_cm.c 		__kfree_skb(skb);
skb               155 drivers/crypto/chelsio/chtls/chtls_cm.c 	struct sk_buff *skb;
skb               157 drivers/crypto/chelsio/chtls/chtls_cm.c 	while ((skb = __skb_dequeue(&tlsk->sk_recv_queue)) != NULL) {
skb               158 drivers/crypto/chelsio/chtls/chtls_cm.c 		skb_dst_set(skb, NULL);
skb               159 drivers/crypto/chelsio/chtls/chtls_cm.c 		kfree_skb(skb);
skb               163 drivers/crypto/chelsio/chtls/chtls_cm.c static void abort_arp_failure(void *handle, struct sk_buff *skb)
skb               165 drivers/crypto/chelsio/chtls/chtls_cm.c 	struct cpl_abort_req *req = cplhdr(skb);
skb               170 drivers/crypto/chelsio/chtls/chtls_cm.c 	cxgb4_ofld_send(cdev->lldi->ports[0], skb);
skb               173 drivers/crypto/chelsio/chtls/chtls_cm.c static struct sk_buff *alloc_ctrl_skb(struct sk_buff *skb, int len)
skb               175 drivers/crypto/chelsio/chtls/chtls_cm.c 	if (likely(skb && !skb_shared(skb) && !skb_cloned(skb))) {
skb               176 drivers/crypto/chelsio/chtls/chtls_cm.c 		__skb_trim(skb, 0);
skb               177 drivers/crypto/chelsio/chtls/chtls_cm.c 		refcount_add(2, &skb->users);
skb               179 drivers/crypto/chelsio/chtls/chtls_cm.c 		skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL);
skb               181 drivers/crypto/chelsio/chtls/chtls_cm.c 	return skb;
skb               184 drivers/crypto/chelsio/chtls/chtls_cm.c static void chtls_send_abort(struct sock *sk, int mode, struct sk_buff *skb)
skb               193 drivers/crypto/chelsio/chtls/chtls_cm.c 	if (!skb)
skb               194 drivers/crypto/chelsio/chtls/chtls_cm.c 		skb = alloc_ctrl_skb(csk->txdata_skb_cache, sizeof(*req));
skb               196 drivers/crypto/chelsio/chtls/chtls_cm.c 	req = (struct cpl_abort_req *)skb_put(skb, sizeof(*req));
skb               198 drivers/crypto/chelsio/chtls/chtls_cm.c 	skb_set_queue_mapping(skb, (csk->txq_idx << 1) | CPL_PRIORITY_DATA);
skb               202 drivers/crypto/chelsio/chtls/chtls_cm.c 	t4_set_arp_err_handler(skb, csk->cdev, abort_arp_failure);
skb               203 drivers/crypto/chelsio/chtls/chtls_cm.c 	send_or_defer(sk, tp, skb, mode == CPL_ABORT_SEND_RST);
skb               206 drivers/crypto/chelsio/chtls/chtls_cm.c static void chtls_send_reset(struct sock *sk, int mode, struct sk_buff *skb)
skb               230 drivers/crypto/chelsio/chtls/chtls_cm.c 		chtls_send_abort(sk, mode, skb);
skb               236 drivers/crypto/chelsio/chtls/chtls_cm.c 	kfree_skb(skb);
skb               259 drivers/crypto/chelsio/chtls/chtls_cm.c 	struct sk_buff *skb;
skb               267 drivers/crypto/chelsio/chtls/chtls_cm.c 	skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL);
skb               268 drivers/crypto/chelsio/chtls/chtls_cm.c 	req = (struct cpl_close_con_req *)__skb_put(skb, len);
skb               279 drivers/crypto/chelsio/chtls/chtls_cm.c 	skb_entail(sk, skb, ULPCB_FLAG_NO_HDR | ULPCB_FLAG_NO_APPEND);
skb               342 drivers/crypto/chelsio/chtls/chtls_cm.c 		struct sk_buff *skb;
skb               344 drivers/crypto/chelsio/chtls/chtls_cm.c 		skb = alloc_skb(sizeof(struct cpl_abort_req), GFP_ATOMIC);
skb               345 drivers/crypto/chelsio/chtls/chtls_cm.c 		if (skb)
skb               346 drivers/crypto/chelsio/chtls/chtls_cm.c 			chtls_send_reset(sk, CPL_ABORT_SEND_RST, skb);
skb               455 drivers/crypto/chelsio/chtls/chtls_cm.c 	struct sk_buff *skb;
skb               457 drivers/crypto/chelsio/chtls/chtls_cm.c 	skb = alloc_ctrl_skb(csk->txdata_skb_cache,
skb               460 drivers/crypto/chelsio/chtls/chtls_cm.c 	chtls_send_reset(child, CPL_ABORT_SEND_RST, skb);
skb               677 drivers/crypto/chelsio/chtls/chtls_cm.c static int chtls_pass_open_rpl(struct chtls_dev *cdev, struct sk_buff *skb)
skb               679 drivers/crypto/chelsio/chtls/chtls_cm.c 	struct cpl_pass_open_rpl *rpl = cplhdr(skb) + RSS_HDR;
skb               705 drivers/crypto/chelsio/chtls/chtls_cm.c static int chtls_close_listsrv_rpl(struct chtls_dev *cdev, struct sk_buff *skb)
skb               707 drivers/crypto/chelsio/chtls/chtls_cm.c 	struct cpl_close_listsvr_rpl *rpl = cplhdr(skb) + RSS_HDR;
skb               732 drivers/crypto/chelsio/chtls/chtls_cm.c 	struct sk_buff *skb;
skb               734 drivers/crypto/chelsio/chtls/chtls_cm.c 	while ((skb = dequeue_wr(sk)) != NULL)
skb               735 drivers/crypto/chelsio/chtls/chtls_cm.c 		kfree_skb(skb);
skb               803 drivers/crypto/chelsio/chtls/chtls_cm.c 			    struct sk_buff *skb)
skb               806 drivers/crypto/chelsio/chtls/chtls_cm.c 	kfree_skb(skb);
skb               809 drivers/crypto/chelsio/chtls/chtls_cm.c static void bl_pass_open_abort(struct sock *lsk, struct sk_buff *skb)
skb               811 drivers/crypto/chelsio/chtls/chtls_cm.c 	pass_open_abort(skb->sk, lsk, skb);
skb               815 drivers/crypto/chelsio/chtls/chtls_cm.c 					struct sk_buff *skb)
skb               832 drivers/crypto/chelsio/chtls/chtls_cm.c 		kfree_skb(skb);
skb               842 drivers/crypto/chelsio/chtls/chtls_cm.c 		pass_open_abort(sk, parent, skb);
skb               844 drivers/crypto/chelsio/chtls/chtls_cm.c 		BLOG_SKB_CB(skb)->backlog_rcv = bl_pass_open_abort;
skb               845 drivers/crypto/chelsio/chtls/chtls_cm.c 		__sk_add_backlog(parent, skb);
skb               851 drivers/crypto/chelsio/chtls/chtls_cm.c 					 struct sk_buff *skb)
skb               856 drivers/crypto/chelsio/chtls/chtls_cm.c 	process_cpl_msg(chtls_pass_open_arp_failure, sk, skb);
skb               918 drivers/crypto/chelsio/chtls/chtls_cm.c static void chtls_pass_accept_rpl(struct sk_buff *skb,
skb               933 drivers/crypto/chelsio/chtls/chtls_cm.c 	sk = skb->sk;
skb               940 drivers/crypto/chelsio/chtls/chtls_cm.c 	rpl5 = __skb_put_zero(skb, len);
skb               979 drivers/crypto/chelsio/chtls/chtls_cm.c 	set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id);
skb               980 drivers/crypto/chelsio/chtls/chtls_cm.c 	t4_set_arp_err_handler(skb, sk, chtls_accept_rpl_arp_failure);
skb               981 drivers/crypto/chelsio/chtls/chtls_cm.c 	cxgb4_l2t_send(csk->egress_dev, skb, csk->l2t_entry);
skb               992 drivers/crypto/chelsio/chtls/chtls_cm.c static int chtls_backlog_rcv(struct sock *sk, struct sk_buff *skb)
skb               994 drivers/crypto/chelsio/chtls/chtls_cm.c 	if (skb->protocol) {
skb               995 drivers/crypto/chelsio/chtls/chtls_cm.c 		kfree_skb(skb);
skb               998 drivers/crypto/chelsio/chtls/chtls_cm.c 	BLOG_SKB_CB(skb)->backlog_rcv(sk, skb);
skb              1122 drivers/crypto/chelsio/chtls/chtls_cm.c static  void mk_tid_release(struct sk_buff *skb,
skb              1129 drivers/crypto/chelsio/chtls/chtls_cm.c 	req = (struct cpl_tid_release *)__skb_put(skb, len);
skb              1131 drivers/crypto/chelsio/chtls/chtls_cm.c 	set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
skb              1146 drivers/crypto/chelsio/chtls/chtls_cm.c 				      struct sk_buff *skb)
skb              1169 drivers/crypto/chelsio/chtls/chtls_cm.c 	req = cplhdr(skb) + RSS_HDR;
skb              1171 drivers/crypto/chelsio/chtls/chtls_cm.c 	cdev = BLOG_SKB_CB(skb)->cdev;
skb              1183 drivers/crypto/chelsio/chtls/chtls_cm.c 		kfree_skb(skb);
skb              1220 drivers/crypto/chelsio/chtls/chtls_cm.c 	skb_set_network_header(skb, (void *)iph - (void *)req);
skb              1256 drivers/crypto/chelsio/chtls/chtls_cm.c 	kfree_skb(skb);
skb              1264 drivers/crypto/chelsio/chtls/chtls_cm.c 	kfree_skb(skb);
skb              1270 drivers/crypto/chelsio/chtls/chtls_cm.c static int chtls_pass_accept_req(struct chtls_dev *cdev, struct sk_buff *skb)
skb              1272 drivers/crypto/chelsio/chtls/chtls_cm.c 	struct cpl_pass_accept_req *req = cplhdr(skb) + RSS_HDR;
skb              1294 drivers/crypto/chelsio/chtls/chtls_cm.c 	BLOG_SKB_CB(skb)->cdev = cdev;
skb              1295 drivers/crypto/chelsio/chtls/chtls_cm.c 	process_cpl_msg(chtls_pass_accept_request, lsk, skb);
skb              1323 drivers/crypto/chelsio/chtls/chtls_cm.c static void chtls_abort_conn(struct sock *sk, struct sk_buff *skb)
skb              1408 drivers/crypto/chelsio/chtls/chtls_cm.c static void bl_add_pass_open_to_parent(struct sock *lsk, struct sk_buff *skb)
skb              1410 drivers/crypto/chelsio/chtls/chtls_cm.c 	struct sock *child = skb->sk;
skb              1412 drivers/crypto/chelsio/chtls/chtls_cm.c 	skb->sk = NULL;
skb              1413 drivers/crypto/chelsio/chtls/chtls_cm.c 	add_pass_open_to_parent(child, lsk, BLOG_SKB_CB(skb)->cdev);
skb              1414 drivers/crypto/chelsio/chtls/chtls_cm.c 	kfree_skb(skb);
skb              1417 drivers/crypto/chelsio/chtls/chtls_cm.c static int chtls_pass_establish(struct chtls_dev *cdev, struct sk_buff *skb)
skb              1419 drivers/crypto/chelsio/chtls/chtls_cm.c 	struct cpl_pass_establish *req = cplhdr(skb) + RSS_HDR;
skb              1431 drivers/crypto/chelsio/chtls/chtls_cm.c 		kfree_skb(skb);
skb              1453 drivers/crypto/chelsio/chtls/chtls_cm.c 			kfree_skb(skb);
skb              1458 drivers/crypto/chelsio/chtls/chtls_cm.c 			kfree_skb(skb);
skb              1461 drivers/crypto/chelsio/chtls/chtls_cm.c 			skb->sk = sk;
skb              1462 drivers/crypto/chelsio/chtls/chtls_cm.c 			BLOG_SKB_CB(skb)->cdev = cdev;
skb              1463 drivers/crypto/chelsio/chtls/chtls_cm.c 			BLOG_SKB_CB(skb)->backlog_rcv =
skb              1465 drivers/crypto/chelsio/chtls/chtls_cm.c 			__sk_add_backlog(lsk, skb);
skb              1489 drivers/crypto/chelsio/chtls/chtls_cm.c 		struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
skb              1492 drivers/crypto/chelsio/chtls/chtls_cm.c 		if (skb && tp->copied_seq - ULP_SKB_CB(skb)->seq >= skb->len)
skb              1493 drivers/crypto/chelsio/chtls/chtls_cm.c 			chtls_free_skb(sk, skb);
skb              1513 drivers/crypto/chelsio/chtls/chtls_cm.c static void handle_excess_rx(struct sock *sk, struct sk_buff *skb)
skb              1516 drivers/crypto/chelsio/chtls/chtls_cm.c 		chtls_abort_conn(sk, skb);
skb              1518 drivers/crypto/chelsio/chtls/chtls_cm.c 	kfree_skb(skb);
skb              1521 drivers/crypto/chelsio/chtls/chtls_cm.c static void chtls_recv_data(struct sock *sk, struct sk_buff *skb)
skb              1523 drivers/crypto/chelsio/chtls/chtls_cm.c 	struct cpl_rx_data *hdr = cplhdr(skb) + RSS_HDR;
skb              1531 drivers/crypto/chelsio/chtls/chtls_cm.c 		handle_excess_rx(sk, skb);
skb              1535 drivers/crypto/chelsio/chtls/chtls_cm.c 	ULP_SKB_CB(skb)->seq = ntohl(hdr->seq);
skb              1536 drivers/crypto/chelsio/chtls/chtls_cm.c 	ULP_SKB_CB(skb)->psh = hdr->psh;
skb              1537 drivers/crypto/chelsio/chtls/chtls_cm.c 	skb_ulp_mode(skb) = ULP_MODE_NONE;
skb              1539 drivers/crypto/chelsio/chtls/chtls_cm.c 	skb_reset_transport_header(skb);
skb              1540 drivers/crypto/chelsio/chtls/chtls_cm.c 	__skb_pull(skb, sizeof(*hdr) + RSS_HDR);
skb              1541 drivers/crypto/chelsio/chtls/chtls_cm.c 	if (!skb->data_len)
skb              1542 drivers/crypto/chelsio/chtls/chtls_cm.c 		__skb_trim(skb, ntohs(hdr->len));
skb              1547 drivers/crypto/chelsio/chtls/chtls_cm.c 		     tp->urg_seq - tp->rcv_nxt < skb->len))
skb              1549 drivers/crypto/chelsio/chtls/chtls_cm.c 			       skb->data[tp->urg_seq - tp->rcv_nxt];
skb              1556 drivers/crypto/chelsio/chtls/chtls_cm.c 	tcp_hdr(skb)->fin = 0;
skb              1557 drivers/crypto/chelsio/chtls/chtls_cm.c 	tp->rcv_nxt += skb->len;
skb              1559 drivers/crypto/chelsio/chtls/chtls_cm.c 	__skb_queue_tail(&sk->sk_receive_queue, skb);
skb              1567 drivers/crypto/chelsio/chtls/chtls_cm.c static int chtls_rx_data(struct chtls_dev *cdev, struct sk_buff *skb)
skb              1569 drivers/crypto/chelsio/chtls/chtls_cm.c 	struct cpl_rx_data *req = cplhdr(skb) + RSS_HDR;
skb              1578 drivers/crypto/chelsio/chtls/chtls_cm.c 	skb_dst_set(skb, NULL);
skb              1579 drivers/crypto/chelsio/chtls/chtls_cm.c 	process_cpl_msg(chtls_recv_data, sk, skb);
skb              1583 drivers/crypto/chelsio/chtls/chtls_cm.c static void chtls_recv_pdu(struct sock *sk, struct sk_buff *skb)
skb              1585 drivers/crypto/chelsio/chtls/chtls_cm.c 	struct cpl_tls_data *hdr = cplhdr(skb);
skb              1595 drivers/crypto/chelsio/chtls/chtls_cm.c 		handle_excess_rx(sk, skb);
skb              1599 drivers/crypto/chelsio/chtls/chtls_cm.c 	ULP_SKB_CB(skb)->seq = ntohl(hdr->seq);
skb              1600 drivers/crypto/chelsio/chtls/chtls_cm.c 	ULP_SKB_CB(skb)->flags = 0;
skb              1601 drivers/crypto/chelsio/chtls/chtls_cm.c 	skb_ulp_mode(skb) = ULP_MODE_TLS;
skb              1603 drivers/crypto/chelsio/chtls/chtls_cm.c 	skb_reset_transport_header(skb);
skb              1604 drivers/crypto/chelsio/chtls/chtls_cm.c 	__skb_pull(skb, sizeof(*hdr));
skb              1605 drivers/crypto/chelsio/chtls/chtls_cm.c 	if (!skb->data_len)
skb              1606 drivers/crypto/chelsio/chtls/chtls_cm.c 		__skb_trim(skb,
skb              1610 drivers/crypto/chelsio/chtls/chtls_cm.c 		     tp->rcv_nxt < skb->len))
skb              1612 drivers/crypto/chelsio/chtls/chtls_cm.c 			       skb->data[tp->urg_seq - tp->rcv_nxt];
skb              1614 drivers/crypto/chelsio/chtls/chtls_cm.c 	tcp_hdr(skb)->fin = 0;
skb              1616 drivers/crypto/chelsio/chtls/chtls_cm.c 	__skb_queue_tail(&tlsk->sk_recv_queue, skb);
skb              1619 drivers/crypto/chelsio/chtls/chtls_cm.c static int chtls_rx_pdu(struct chtls_dev *cdev, struct sk_buff *skb)
skb              1621 drivers/crypto/chelsio/chtls/chtls_cm.c 	struct cpl_tls_data *req = cplhdr(skb);
skb              1630 drivers/crypto/chelsio/chtls/chtls_cm.c 	skb_dst_set(skb, NULL);
skb              1631 drivers/crypto/chelsio/chtls/chtls_cm.c 	process_cpl_msg(chtls_recv_pdu, sk, skb);
skb              1635 drivers/crypto/chelsio/chtls/chtls_cm.c static void chtls_set_hdrlen(struct sk_buff *skb, unsigned int nlen)
skb              1637 drivers/crypto/chelsio/chtls/chtls_cm.c 	struct tlsrx_cmp_hdr *tls_cmp_hdr = cplhdr(skb);
skb              1639 drivers/crypto/chelsio/chtls/chtls_cm.c 	skb->hdr_len = ntohs((__force __be16)tls_cmp_hdr->length);
skb              1643 drivers/crypto/chelsio/chtls/chtls_cm.c static void chtls_rx_hdr(struct sock *sk, struct sk_buff *skb)
skb              1652 drivers/crypto/chelsio/chtls/chtls_cm.c 	cmp_cpl = cplhdr(skb);
skb              1657 drivers/crypto/chelsio/chtls/chtls_cm.c 	ULP_SKB_CB(skb)->seq = ntohl(cmp_cpl->seq);
skb              1658 drivers/crypto/chelsio/chtls/chtls_cm.c 	ULP_SKB_CB(skb)->flags = 0;
skb              1660 drivers/crypto/chelsio/chtls/chtls_cm.c 	skb_reset_transport_header(skb);
skb              1661 drivers/crypto/chelsio/chtls/chtls_cm.c 	__skb_pull(skb, sizeof(*cmp_cpl));
skb              1662 drivers/crypto/chelsio/chtls/chtls_cm.c 	tls_hdr_pkt = (struct tlsrx_cmp_hdr *)skb->data;
skb              1665 drivers/crypto/chelsio/chtls/chtls_cm.c 	if (!skb->data_len)
skb              1666 drivers/crypto/chelsio/chtls/chtls_cm.c 		__skb_trim(skb, TLS_HEADER_LENGTH);
skb              1671 drivers/crypto/chelsio/chtls/chtls_cm.c 	ULP_SKB_CB(skb)->flags |= ULPCB_FLAG_TLS_HDR;
skb              1674 drivers/crypto/chelsio/chtls/chtls_cm.c 		__skb_queue_tail(&sk->sk_receive_queue, skb);
skb              1676 drivers/crypto/chelsio/chtls/chtls_cm.c 		chtls_set_hdrlen(skb, tlsk->pldlen);
skb              1678 drivers/crypto/chelsio/chtls/chtls_cm.c 		__skb_queue_tail(&sk->sk_receive_queue, skb);
skb              1688 drivers/crypto/chelsio/chtls/chtls_cm.c static int chtls_rx_cmp(struct chtls_dev *cdev, struct sk_buff *skb)
skb              1690 drivers/crypto/chelsio/chtls/chtls_cm.c 	struct cpl_rx_tls_cmp *req = cplhdr(skb);
skb              1699 drivers/crypto/chelsio/chtls/chtls_cm.c 	skb_dst_set(skb, NULL);
skb              1700 drivers/crypto/chelsio/chtls/chtls_cm.c 	process_cpl_msg(chtls_rx_hdr, sk, skb);
skb              1715 drivers/crypto/chelsio/chtls/chtls_cm.c static void chtls_peer_close(struct sock *sk, struct sk_buff *skb)
skb              1751 drivers/crypto/chelsio/chtls/chtls_cm.c 	kfree_skb(skb);
skb              1754 drivers/crypto/chelsio/chtls/chtls_cm.c static void chtls_close_con_rpl(struct sock *sk, struct sk_buff *skb)
skb              1756 drivers/crypto/chelsio/chtls/chtls_cm.c 	struct cpl_close_con_rpl *rpl = cplhdr(skb) + RSS_HDR;
skb              1785 drivers/crypto/chelsio/chtls/chtls_cm.c 			chtls_abort_conn(sk, skb);
skb              1790 drivers/crypto/chelsio/chtls/chtls_cm.c 	kfree_skb(skb);
skb              1793 drivers/crypto/chelsio/chtls/chtls_cm.c static struct sk_buff *get_cpl_skb(struct sk_buff *skb,
skb              1796 drivers/crypto/chelsio/chtls/chtls_cm.c 	if (likely(!skb_is_nonlinear(skb) && !skb_cloned(skb))) {
skb              1797 drivers/crypto/chelsio/chtls/chtls_cm.c 		WARN_ONCE(skb->len < len, "skb alloc error");
skb              1798 drivers/crypto/chelsio/chtls/chtls_cm.c 		__skb_trim(skb, len);
skb              1799 drivers/crypto/chelsio/chtls/chtls_cm.c 		skb_get(skb);
skb              1801 drivers/crypto/chelsio/chtls/chtls_cm.c 		skb = alloc_skb(len, gfp);
skb              1802 drivers/crypto/chelsio/chtls/chtls_cm.c 		if (skb)
skb              1803 drivers/crypto/chelsio/chtls/chtls_cm.c 			__skb_put(skb, len);
skb              1805 drivers/crypto/chelsio/chtls/chtls_cm.c 	return skb;
skb              1808 drivers/crypto/chelsio/chtls/chtls_cm.c static void set_abort_rpl_wr(struct sk_buff *skb, unsigned int tid,
skb              1811 drivers/crypto/chelsio/chtls/chtls_cm.c 	struct cpl_abort_rpl *rpl = cplhdr(skb);
skb              1817 drivers/crypto/chelsio/chtls/chtls_cm.c static void send_defer_abort_rpl(struct chtls_dev *cdev, struct sk_buff *skb)
skb              1819 drivers/crypto/chelsio/chtls/chtls_cm.c 	struct cpl_abort_req_rss *req = cplhdr(skb);
skb              1829 drivers/crypto/chelsio/chtls/chtls_cm.c 	kfree_skb(skb);
skb              1832 drivers/crypto/chelsio/chtls/chtls_cm.c static void send_abort_rpl(struct sock *sk, struct sk_buff *skb,
skb              1835 drivers/crypto/chelsio/chtls/chtls_cm.c 	struct cpl_abort_req_rss *req = cplhdr(skb);
skb              1846 drivers/crypto/chelsio/chtls/chtls_cm.c 		send_defer_abort_rpl(cdev, skb);
skb              1851 drivers/crypto/chelsio/chtls/chtls_cm.c 	kfree_skb(skb);
skb              1868 drivers/crypto/chelsio/chtls/chtls_cm.c static void t4_defer_reply(struct sk_buff *skb, struct chtls_dev *cdev,
skb              1871 drivers/crypto/chelsio/chtls/chtls_cm.c 	DEFERRED_SKB_CB(skb)->handler = handler;
skb              1873 drivers/crypto/chelsio/chtls/chtls_cm.c 	__skb_queue_tail(&cdev->deferq, skb);
skb              1879 drivers/crypto/chelsio/chtls/chtls_cm.c static void chtls_send_abort_rpl(struct sock *sk, struct sk_buff *skb,
skb              1883 drivers/crypto/chelsio/chtls/chtls_cm.c 	struct cpl_abort_req_rss *req = cplhdr(skb) + RSS_HDR;
skb              1891 drivers/crypto/chelsio/chtls/chtls_cm.c 	reply_skb = get_cpl_skb(skb, sizeof(struct cpl_abort_rpl), gfp_any());
skb              1894 drivers/crypto/chelsio/chtls/chtls_cm.c 		t4_defer_reply(skb, cdev, send_defer_abort_rpl);
skb              1909 drivers/crypto/chelsio/chtls/chtls_cm.c 	kfree_skb(skb);
skb              1916 drivers/crypto/chelsio/chtls/chtls_cm.c static void bl_abort_syn_rcv(struct sock *lsk, struct sk_buff *skb)
skb              1922 drivers/crypto/chelsio/chtls/chtls_cm.c 	child = skb->sk;
skb              1926 drivers/crypto/chelsio/chtls/chtls_cm.c 	skb->sk	= NULL;
skb              1928 drivers/crypto/chelsio/chtls/chtls_cm.c 	send_abort_rpl(child, skb, BLOG_SKB_CB(skb)->cdev,
skb              1932 drivers/crypto/chelsio/chtls/chtls_cm.c static int abort_syn_rcv(struct sock *sk, struct sk_buff *skb)
skb              1960 drivers/crypto/chelsio/chtls/chtls_cm.c 		send_abort_rpl(sk, skb, cdev, CPL_ABORT_NO_RST, queue);
skb              1962 drivers/crypto/chelsio/chtls/chtls_cm.c 		skb->sk = sk;
skb              1963 drivers/crypto/chelsio/chtls/chtls_cm.c 		BLOG_SKB_CB(skb)->backlog_rcv = bl_abort_syn_rcv;
skb              1964 drivers/crypto/chelsio/chtls/chtls_cm.c 		__sk_add_backlog(psk, skb);
skb              1970 drivers/crypto/chelsio/chtls/chtls_cm.c static void chtls_abort_req_rss(struct sock *sk, struct sk_buff *skb)
skb              1972 drivers/crypto/chelsio/chtls/chtls_cm.c 	const struct cpl_abort_req_rss *req = cplhdr(skb) + RSS_HDR;
skb              1981 drivers/crypto/chelsio/chtls/chtls_cm.c 		kfree_skb(skb);
skb              2004 drivers/crypto/chelsio/chtls/chtls_cm.c 		if (sk->sk_state == TCP_SYN_RECV && !abort_syn_rcv(sk, skb))
skb              2011 drivers/crypto/chelsio/chtls/chtls_cm.c 	chtls_send_abort_rpl(sk, skb, csk->cdev, rst_status, queue);
skb              2014 drivers/crypto/chelsio/chtls/chtls_cm.c static void chtls_abort_rpl_rss(struct sock *sk, struct sk_buff *skb)
skb              2016 drivers/crypto/chelsio/chtls/chtls_cm.c 	struct cpl_abort_rpl_rss *rpl = cplhdr(skb) + RSS_HDR;
skb              2037 drivers/crypto/chelsio/chtls/chtls_cm.c 	kfree_skb(skb);
skb              2040 drivers/crypto/chelsio/chtls/chtls_cm.c static int chtls_conn_cpl(struct chtls_dev *cdev, struct sk_buff *skb)
skb              2042 drivers/crypto/chelsio/chtls/chtls_cm.c 	struct cpl_peer_close *req = cplhdr(skb) + RSS_HDR;
skb              2043 drivers/crypto/chelsio/chtls/chtls_cm.c 	void (*fn)(struct sock *sk, struct sk_buff *skb);
skb              2048 drivers/crypto/chelsio/chtls/chtls_cm.c 	opcode = ((const struct rss_header *)cplhdr(skb))->opcode;
skb              2071 drivers/crypto/chelsio/chtls/chtls_cm.c 	process_cpl_msg(fn, sk, skb);
skb              2075 drivers/crypto/chelsio/chtls/chtls_cm.c 	kfree_skb(skb);
skb              2079 drivers/crypto/chelsio/chtls/chtls_cm.c static void chtls_rx_ack(struct sock *sk, struct sk_buff *skb)
skb              2081 drivers/crypto/chelsio/chtls/chtls_cm.c 	struct cpl_fw4_ack *hdr = cplhdr(skb) + RSS_HDR;
skb              2113 drivers/crypto/chelsio/chtls/chtls_cm.c 			kfree_skb(skb);
skb              2136 drivers/crypto/chelsio/chtls/chtls_cm.c 	kfree_skb(skb);
skb              2139 drivers/crypto/chelsio/chtls/chtls_cm.c static int chtls_wr_ack(struct chtls_dev *cdev, struct sk_buff *skb)
skb              2141 drivers/crypto/chelsio/chtls/chtls_cm.c 	struct cpl_fw4_ack *rpl = cplhdr(skb) + RSS_HDR;
skb              2150 drivers/crypto/chelsio/chtls/chtls_cm.c 	process_cpl_msg(chtls_rx_ack, sk, skb);
skb                80 drivers/crypto/chelsio/chtls/chtls_cm.h typedef void (*defer_handler_t)(struct chtls_dev *dev, struct sk_buff *skb);
skb                88 drivers/crypto/chelsio/chtls/chtls_cm.h #define DEFERRED_SKB_CB(skb) ((struct deferred_skb_cb *)(skb)->cb)
skb                90 drivers/crypto/chelsio/chtls/chtls_cm.h #define WR_SKB_CB(skb) ((struct wr_skb_cb *)(skb)->cb)
skb               103 drivers/crypto/chelsio/chtls/chtls_cm.h #define skb_ulp_tls_inline(skb)      (ULP_SKB_CB(skb)->ulp.tls.ofld)
skb               104 drivers/crypto/chelsio/chtls/chtls_cm.h #define skb_ulp_tls_iv_imm(skb)      (ULP_SKB_CB(skb)->ulp.tls.iv)
skb               106 drivers/crypto/chelsio/chtls/chtls_cm.h void chtls_defer_reply(struct sk_buff *skb, struct chtls_dev *dev,
skb               174 drivers/crypto/chelsio/chtls/chtls_cm.h static inline void chtls_free_skb(struct sock *sk, struct sk_buff *skb)
skb               176 drivers/crypto/chelsio/chtls/chtls_cm.h 	skb_dst_set(skb, NULL);
skb               177 drivers/crypto/chelsio/chtls/chtls_cm.h 	__skb_unlink(skb, &sk->sk_receive_queue);
skb               178 drivers/crypto/chelsio/chtls/chtls_cm.h 	__kfree_skb(skb);
skb               181 drivers/crypto/chelsio/chtls/chtls_cm.h static inline void chtls_kfree_skb(struct sock *sk, struct sk_buff *skb)
skb               183 drivers/crypto/chelsio/chtls/chtls_cm.h 	skb_dst_set(skb, NULL);
skb               184 drivers/crypto/chelsio/chtls/chtls_cm.h 	__skb_unlink(skb, &sk->sk_receive_queue);
skb               185 drivers/crypto/chelsio/chtls/chtls_cm.h 	kfree_skb(skb);
skb               194 drivers/crypto/chelsio/chtls/chtls_cm.h static inline void enqueue_wr(struct chtls_sock *csk, struct sk_buff *skb)
skb               196 drivers/crypto/chelsio/chtls/chtls_cm.h 	WR_SKB_CB(skb)->next_wr = NULL;
skb               198 drivers/crypto/chelsio/chtls/chtls_cm.h 	skb_get(skb);
skb               201 drivers/crypto/chelsio/chtls/chtls_cm.h 		csk->wr_skb_head = skb;
skb               203 drivers/crypto/chelsio/chtls/chtls_cm.h 		WR_SKB_CB(csk->wr_skb_tail)->next_wr = skb;
skb               204 drivers/crypto/chelsio/chtls/chtls_cm.h 	csk->wr_skb_tail = skb;
skb               210 drivers/crypto/chelsio/chtls/chtls_cm.h 	struct sk_buff *skb = NULL;
skb               212 drivers/crypto/chelsio/chtls/chtls_cm.h 	skb = csk->wr_skb_head;
skb               214 drivers/crypto/chelsio/chtls/chtls_cm.h 	if (likely(skb)) {
skb               216 drivers/crypto/chelsio/chtls/chtls_cm.h 		csk->wr_skb_head = WR_SKB_CB(skb)->next_wr;
skb               217 drivers/crypto/chelsio/chtls/chtls_cm.h 		WR_SKB_CB(skb)->next_wr = NULL;
skb               219 drivers/crypto/chelsio/chtls/chtls_cm.h 	return skb;
skb                41 drivers/crypto/chelsio/chtls/chtls_hw.c static void __set_tcb_field(struct sock *sk, struct sk_buff *skb, u16 word,
skb                52 drivers/crypto/chelsio/chtls/chtls_hw.c 	req = (struct cpl_set_tcb_field *)__skb_put(skb, wrlen);
skb                54 drivers/crypto/chelsio/chtls/chtls_hw.c 	set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id);
skb                67 drivers/crypto/chelsio/chtls/chtls_hw.c 	struct sk_buff *skb;
skb                73 drivers/crypto/chelsio/chtls/chtls_hw.c 	skb = alloc_skb(wrlen, GFP_ATOMIC);
skb                74 drivers/crypto/chelsio/chtls/chtls_hw.c 	if (!skb)
skb                80 drivers/crypto/chelsio/chtls/chtls_hw.c 	__set_tcb_field(sk, skb, word, mask, val, 0, 1);
skb                81 drivers/crypto/chelsio/chtls/chtls_hw.c 	skb_set_queue_mapping(skb, (csk->txq_idx << 1) | CPL_PRIORITY_DATA);
skb                84 drivers/crypto/chelsio/chtls/chtls_hw.c 	enqueue_wr(csk, skb);
skb                85 drivers/crypto/chelsio/chtls/chtls_hw.c 	ret = cxgb4_ofld_send(csk->egress_dev, skb);
skb                87 drivers/crypto/chelsio/chtls/chtls_hw.c 		kfree_skb(skb);
skb               297 drivers/crypto/chelsio/chtls/chtls_hw.c 	struct sk_buff *skb;
skb               318 drivers/crypto/chelsio/chtls/chtls_hw.c 	skb = alloc_skb(len, GFP_KERNEL);
skb               319 drivers/crypto/chelsio/chtls/chtls_hw.c 	if (!skb)
skb               329 drivers/crypto/chelsio/chtls/chtls_hw.c 	kwr = (struct tls_key_req *)__skb_put_zero(skb, len);
skb               360 drivers/crypto/chelsio/chtls/chtls_hw.c 	set_wr_txq(skb, CPL_PRIORITY_DATA, csk->tlshws.txqid);
skb               363 drivers/crypto/chelsio/chtls/chtls_hw.c 	enqueue_wr(csk, skb);
skb               364 drivers/crypto/chelsio/chtls/chtls_hw.c 	cxgb4_ofld_send(csk->egress_dev, skb);
skb               398 drivers/crypto/chelsio/chtls/chtls_hw.c 	kfree_skb(skb);
skb                35 drivers/crypto/chelsio/chtls/chtls_io.c static int data_sgl_len(const struct sk_buff *skb)
skb                39 drivers/crypto/chelsio/chtls/chtls_io.c 	cnt = skb_shinfo(skb)->nr_frags;
skb                50 drivers/crypto/chelsio/chtls/chtls_io.c static int set_ivs_imm(struct sock *sk, const struct sk_buff *skb)
skb                52 drivers/crypto/chelsio/chtls/chtls_io.c 	int ivs_size = nos_ivs(sk, skb->len) * CIPHER_BLOCK_SIZE;
skb                53 drivers/crypto/chelsio/chtls/chtls_io.c 	int hlen = TLS_WR_CPL_LEN + data_sgl_len(skb);
skb                57 drivers/crypto/chelsio/chtls/chtls_io.c 		ULP_SKB_CB(skb)->ulp.tls.iv = 1;
skb                60 drivers/crypto/chelsio/chtls/chtls_io.c 	ULP_SKB_CB(skb)->ulp.tls.iv = 0;
skb                69 drivers/crypto/chelsio/chtls/chtls_io.c static int ivs_size(struct sock *sk, const struct sk_buff *skb)
skb                71 drivers/crypto/chelsio/chtls/chtls_io.c 	return set_ivs_imm(sk, skb) ? (nos_ivs(sk, skb->len) *
skb                94 drivers/crypto/chelsio/chtls/chtls_io.c 	struct sk_buff *skb;
skb                96 drivers/crypto/chelsio/chtls/chtls_io.c 	skb = alloc_skb(flowclen, GFP_ATOMIC);
skb                97 drivers/crypto/chelsio/chtls/chtls_io.c 	if (!skb)
skb               100 drivers/crypto/chelsio/chtls/chtls_io.c 	memcpy(__skb_put(skb, flowclen), flowc, flowclen);
skb               101 drivers/crypto/chelsio/chtls/chtls_io.c 	skb_set_queue_mapping(skb, (csk->txq_idx << 1) | CPL_PRIORITY_DATA);
skb               103 drivers/crypto/chelsio/chtls/chtls_io.c 	return skb;
skb               111 drivers/crypto/chelsio/chtls/chtls_io.c 	struct sk_buff *skb;
skb               118 drivers/crypto/chelsio/chtls/chtls_io.c 		skb = create_flowc_wr_skb(sk, flowc, flowclen);
skb               119 drivers/crypto/chelsio/chtls/chtls_io.c 		if (!skb)
skb               122 drivers/crypto/chelsio/chtls/chtls_io.c 		skb_entail(sk, skb,
skb               132 drivers/crypto/chelsio/chtls/chtls_io.c 	skb = create_flowc_wr_skb(sk, flowc, flowclen);
skb               133 drivers/crypto/chelsio/chtls/chtls_io.c 	if (!skb)
skb               135 drivers/crypto/chelsio/chtls/chtls_io.c 	send_or_defer(sk, tp, skb, 0);
skb               219 drivers/crypto/chelsio/chtls/chtls_io.c static int tls_copy_ivs(struct sock *sk, struct sk_buff *skb)
skb               232 drivers/crypto/chelsio/chtls/chtls_io.c 	number_of_ivs = nos_ivs(sk, skb->len);
skb               245 drivers/crypto/chelsio/chtls/chtls_io.c 	if (skb_ulp_tls_iv_imm(skb)) {
skb               247 drivers/crypto/chelsio/chtls/chtls_io.c 		iv_loc = (unsigned char *)__skb_push(skb, number_of_ivs *
skb               256 drivers/crypto/chelsio/chtls/chtls_io.c 		skb_shinfo(skb)->nr_frags--;
skb               266 drivers/crypto/chelsio/chtls/chtls_io.c 		skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page, 0,
skb               276 drivers/crypto/chelsio/chtls/chtls_io.c static void tls_copy_tx_key(struct sock *sk, struct sk_buff *skb)
skb               292 drivers/crypto/chelsio/chtls/chtls_io.c 	sc = (struct ulptx_idata *)__skb_push(skb, immdlen);
skb               310 drivers/crypto/chelsio/chtls/chtls_io.c static bool is_sg_request(const struct sk_buff *skb)
skb               312 drivers/crypto/chelsio/chtls/chtls_io.c 	return skb->peeked ||
skb               313 drivers/crypto/chelsio/chtls/chtls_io.c 		(skb->len > MAX_IMM_ULPTX_WR_LEN);
skb               319 drivers/crypto/chelsio/chtls/chtls_io.c static bool skb_urgent(struct sk_buff *skb)
skb               321 drivers/crypto/chelsio/chtls/chtls_io.c 	return ULP_SKB_CB(skb)->flags & ULPCB_FLAG_URG;
skb               340 drivers/crypto/chelsio/chtls/chtls_io.c static void tls_tx_data_wr(struct sock *sk, struct sk_buff *skb,
skb               360 drivers/crypto/chelsio/chtls/chtls_io.c 	iv_imm = skb_ulp_tls_iv_imm(skb);
skb               375 drivers/crypto/chelsio/chtls/chtls_io.c 	req = (unsigned char *)__skb_push(skb, sizeof(struct cpl_tx_tls_sfo));
skb               377 drivers/crypto/chelsio/chtls/chtls_io.c 	req = (unsigned char *)__skb_push(skb, (sizeof(struct
skb               390 drivers/crypto/chelsio/chtls/chtls_io.c 	if (is_sg_request(skb))
skb               397 drivers/crypto/chelsio/chtls/chtls_io.c 			      TX_URG_V(skb_urgent(skb)) |
skb               420 drivers/crypto/chelsio/chtls/chtls_io.c 	data_type = tls_content_type(ULP_SKB_CB(skb)->ulp.tls.type);
skb               479 drivers/crypto/chelsio/chtls/chtls_io.c static void make_tlstx_data_wr(struct sock *sk, struct sk_buff *skb,
skb               501 drivers/crypto/chelsio/chtls/chtls_io.c 	if (tls_copy_ivs(sk, skb))
skb               503 drivers/crypto/chelsio/chtls/chtls_io.c 	tls_copy_tx_key(sk, skb);
skb               504 drivers/crypto/chelsio/chtls/chtls_io.c 	tls_tx_data_wr(sk, skb, tls_len, tls_tx_imm, credits, expn_sz, pdus);
skb               508 drivers/crypto/chelsio/chtls/chtls_io.c static void make_tx_data_wr(struct sock *sk, struct sk_buff *skb,
skb               520 drivers/crypto/chelsio/chtls/chtls_io.c 	req = (struct fw_ofld_tx_data_wr *)__skb_push(skb, sizeof(*req));
skb               528 drivers/crypto/chelsio/chtls/chtls_io.c 	if (is_sg_request(skb))
skb               534 drivers/crypto/chelsio/chtls/chtls_io.c 			TX_URG_V(skb_urgent(skb)) |
skb               540 drivers/crypto/chelsio/chtls/chtls_io.c static int chtls_wr_size(struct chtls_sock *csk, const struct sk_buff *skb,
skb               547 drivers/crypto/chelsio/chtls/chtls_io.c 	wr_size += ivs_size(csk->sk, skb);
skb               553 drivers/crypto/chelsio/chtls/chtls_io.c 	if (!skb_ulp_tls_iv_imm(skb))
skb               554 drivers/crypto/chelsio/chtls/chtls_io.c 		skb_shinfo(skb)->nr_frags++;
skb               559 drivers/crypto/chelsio/chtls/chtls_io.c static bool is_ofld_imm(struct chtls_sock *csk, const struct sk_buff *skb)
skb               561 drivers/crypto/chelsio/chtls/chtls_io.c 	int length = skb->len;
skb               563 drivers/crypto/chelsio/chtls/chtls_io.c 	if (skb->peeked || skb->len > MAX_IMM_ULPTX_WR_LEN)
skb               566 drivers/crypto/chelsio/chtls/chtls_io.c 	if (likely(ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NEED_HDR)) {
skb               569 drivers/crypto/chelsio/chtls/chtls_io.c 		    skb_ulp_tls_inline(skb))
skb               570 drivers/crypto/chelsio/chtls/chtls_io.c 			length += chtls_wr_size(csk, skb, true);
skb               579 drivers/crypto/chelsio/chtls/chtls_io.c static unsigned int calc_tx_flits(const struct sk_buff *skb,
skb               585 drivers/crypto/chelsio/chtls/chtls_io.c 	cnt = skb_shinfo(skb)->nr_frags;
skb               586 drivers/crypto/chelsio/chtls/chtls_io.c 	if (skb_tail_pointer(skb) != skb_transport_header(skb))
skb               591 drivers/crypto/chelsio/chtls/chtls_io.c static void arp_failure_discard(void *handle, struct sk_buff *skb)
skb               593 drivers/crypto/chelsio/chtls/chtls_io.c 	kfree_skb(skb);
skb               600 drivers/crypto/chelsio/chtls/chtls_io.c 	struct sk_buff *skb;
skb               615 drivers/crypto/chelsio/chtls/chtls_io.c 	while (csk->wr_credits && (skb = skb_peek(&csk->txq)) &&
skb               616 drivers/crypto/chelsio/chtls/chtls_io.c 	       (!(ULP_SKB_CB(skb)->flags & ULPCB_FLAG_HOLD) ||
skb               618 drivers/crypto/chelsio/chtls/chtls_io.c 		unsigned int credit_len = skb->len;
skb               621 drivers/crypto/chelsio/chtls/chtls_io.c 		int tls_len = skb->len;/* TLS data len before IV/key */
skb               623 drivers/crypto/chelsio/chtls/chtls_io.c 		int len = skb->len;    /* length [ulp bytes] inserted by hw */
skb               627 drivers/crypto/chelsio/chtls/chtls_io.c 		immdlen = skb->len;
skb               628 drivers/crypto/chelsio/chtls/chtls_io.c 		if (!is_ofld_imm(csk, skb)) {
skb               629 drivers/crypto/chelsio/chtls/chtls_io.c 			immdlen = skb_transport_offset(skb);
skb               630 drivers/crypto/chelsio/chtls/chtls_io.c 			if (skb_ulp_tls_inline(skb))
skb               631 drivers/crypto/chelsio/chtls/chtls_io.c 				wr_size = chtls_wr_size(csk, skb, false);
skb               632 drivers/crypto/chelsio/chtls/chtls_io.c 			credit_len = 8 * calc_tx_flits(skb, immdlen);
skb               634 drivers/crypto/chelsio/chtls/chtls_io.c 			if (skb_ulp_tls_inline(skb)) {
skb               635 drivers/crypto/chelsio/chtls/chtls_io.c 				wr_size = chtls_wr_size(csk, skb, false);
skb               639 drivers/crypto/chelsio/chtls/chtls_io.c 		if (likely(ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NEED_HDR))
skb               654 drivers/crypto/chelsio/chtls/chtls_io.c 			if (skb_ulp_tls_inline(skb) &&
skb               655 drivers/crypto/chelsio/chtls/chtls_io.c 			    !skb_ulp_tls_iv_imm(skb))
skb               656 drivers/crypto/chelsio/chtls/chtls_io.c 				skb_shinfo(skb)->nr_frags--;
skb               660 drivers/crypto/chelsio/chtls/chtls_io.c 		__skb_unlink(skb, &csk->txq);
skb               661 drivers/crypto/chelsio/chtls/chtls_io.c 		skb_set_queue_mapping(skb, (csk->txq_idx << 1) |
skb               664 drivers/crypto/chelsio/chtls/chtls_io.c 			hws->txqid = (skb->queue_mapping >> 1);
skb               665 drivers/crypto/chelsio/chtls/chtls_io.c 		skb->csum = (__force __wsum)(credits_needed + csk->wr_nondata);
skb               669 drivers/crypto/chelsio/chtls/chtls_io.c 		enqueue_wr(csk, skb);
skb               671 drivers/crypto/chelsio/chtls/chtls_io.c 		if (likely(ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NEED_HDR)) {
skb               673 drivers/crypto/chelsio/chtls/chtls_io.c 			    (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_COMPL) ||
skb               678 drivers/crypto/chelsio/chtls/chtls_io.c 			if (skb_ulp_tls_inline(skb))
skb               679 drivers/crypto/chelsio/chtls/chtls_io.c 				make_tlstx_data_wr(sk, skb, tls_tx_imm,
skb               682 drivers/crypto/chelsio/chtls/chtls_io.c 				make_tx_data_wr(sk, skb, immdlen, len,
skb               687 drivers/crypto/chelsio/chtls/chtls_io.c 				ULP_SKB_CB(skb)->flags &= ~ULPCB_FLAG_NEED_HDR;
skb               689 drivers/crypto/chelsio/chtls/chtls_io.c 			struct cpl_close_con_req *req = cplhdr(skb);
skb               697 drivers/crypto/chelsio/chtls/chtls_io.c 			if ((ULP_SKB_CB(skb)->flags & ULPCB_FLAG_COMPL) &&
skb               703 drivers/crypto/chelsio/chtls/chtls_io.c 		total_size += skb->truesize;
skb               704 drivers/crypto/chelsio/chtls/chtls_io.c 		if (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_BARRIER)
skb               706 drivers/crypto/chelsio/chtls/chtls_io.c 		t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
skb               707 drivers/crypto/chelsio/chtls/chtls_io.c 		cxgb4_l2t_send(csk->egress_dev, skb, csk->l2t_entry);
skb               714 drivers/crypto/chelsio/chtls/chtls_io.c 		     struct sk_buff *skb)
skb               718 drivers/crypto/chelsio/chtls/chtls_io.c 		ULP_SKB_CB(skb)->flags = ULPCB_FLAG_URG |
skb               771 drivers/crypto/chelsio/chtls/chtls_io.c 		struct sk_buff *skb = skb_peek_tail(&csk->txq);
skb               774 drivers/crypto/chelsio/chtls/chtls_io.c 		mark_urg(tp, flags, skb);
skb               776 drivers/crypto/chelsio/chtls/chtls_io.c 		if (!(ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NO_APPEND) &&
skb               778 drivers/crypto/chelsio/chtls/chtls_io.c 			ULP_SKB_CB(skb)->flags |= ULPCB_FLAG_HOLD;
skb               782 drivers/crypto/chelsio/chtls/chtls_io.c 		ULP_SKB_CB(skb)->flags &= ~ULPCB_FLAG_HOLD;
skb               784 drivers/crypto/chelsio/chtls/chtls_io.c 		    ((ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NO_APPEND) ||
skb               819 drivers/crypto/chelsio/chtls/chtls_io.c void skb_entail(struct sock *sk, struct sk_buff *skb, int flags)
skb               824 drivers/crypto/chelsio/chtls/chtls_io.c 	ULP_SKB_CB(skb)->seq = tp->write_seq;
skb               825 drivers/crypto/chelsio/chtls/chtls_io.c 	ULP_SKB_CB(skb)->flags = flags;
skb               826 drivers/crypto/chelsio/chtls/chtls_io.c 	__skb_queue_tail(&csk->txq, skb);
skb               827 drivers/crypto/chelsio/chtls/chtls_io.c 	sk->sk_wmem_queued += skb->truesize;
skb               838 drivers/crypto/chelsio/chtls/chtls_io.c 	struct sk_buff *skb;
skb               840 drivers/crypto/chelsio/chtls/chtls_io.c 	skb = alloc_skb(size + TX_HEADER_LEN, sk->sk_allocation);
skb               841 drivers/crypto/chelsio/chtls/chtls_io.c 	if (likely(skb)) {
skb               842 drivers/crypto/chelsio/chtls/chtls_io.c 		skb_reserve(skb, TX_HEADER_LEN);
skb               843 drivers/crypto/chelsio/chtls/chtls_io.c 		skb_entail(sk, skb, ULPCB_FLAG_NEED_HDR);
skb               844 drivers/crypto/chelsio/chtls/chtls_io.c 		skb_reset_transport_header(skb);
skb               846 drivers/crypto/chelsio/chtls/chtls_io.c 	return skb;
skb               852 drivers/crypto/chelsio/chtls/chtls_io.c 	struct sk_buff *skb;
skb               854 drivers/crypto/chelsio/chtls/chtls_io.c 	skb = alloc_skb(((zcopy ? 0 : size) + TX_TLSHDR_LEN +
skb               857 drivers/crypto/chelsio/chtls/chtls_io.c 	if (likely(skb)) {
skb               858 drivers/crypto/chelsio/chtls/chtls_io.c 		skb_reserve(skb, (TX_TLSHDR_LEN +
skb               860 drivers/crypto/chelsio/chtls/chtls_io.c 		skb_entail(sk, skb, ULPCB_FLAG_NEED_HDR);
skb               861 drivers/crypto/chelsio/chtls/chtls_io.c 		skb_reset_transport_header(skb);
skb               862 drivers/crypto/chelsio/chtls/chtls_io.c 		ULP_SKB_CB(skb)->ulp.tls.ofld = 1;
skb               863 drivers/crypto/chelsio/chtls/chtls_io.c 		ULP_SKB_CB(skb)->ulp.tls.type = csk->tlshws.type;
skb               865 drivers/crypto/chelsio/chtls/chtls_io.c 	return skb;
skb               868 drivers/crypto/chelsio/chtls/chtls_io.c static void tx_skb_finalize(struct sk_buff *skb)
skb               870 drivers/crypto/chelsio/chtls/chtls_io.c 	struct ulp_skb_cb *cb = ULP_SKB_CB(skb);
skb               887 drivers/crypto/chelsio/chtls/chtls_io.c 					  struct sk_buff *skb,
skb               893 drivers/crypto/chelsio/chtls/chtls_io.c 	err = skb_do_copy_data_nocache(sk, skb, from, page_address(page) +
skb               894 drivers/crypto/chelsio/chtls/chtls_io.c 				       off, copy, skb->len);
skb               898 drivers/crypto/chelsio/chtls/chtls_io.c 	skb->len             += copy;
skb               899 drivers/crypto/chelsio/chtls/chtls_io.c 	skb->data_len        += copy;
skb               900 drivers/crypto/chelsio/chtls/chtls_io.c 	skb->truesize        += copy;
skb               989 drivers/crypto/chelsio/chtls/chtls_io.c 	struct sk_buff *skb;
skb              1016 drivers/crypto/chelsio/chtls/chtls_io.c 		skb = skb_peek_tail(&csk->txq);
skb              1017 drivers/crypto/chelsio/chtls/chtls_io.c 		if (skb) {
skb              1018 drivers/crypto/chelsio/chtls/chtls_io.c 			copy = mss - skb->len;
skb              1019 drivers/crypto/chelsio/chtls/chtls_io.c 			skb->ip_summed = CHECKSUM_UNNECESSARY;
skb              1032 drivers/crypto/chelsio/chtls/chtls_io.c 			if (skb)
skb              1033 drivers/crypto/chelsio/chtls/chtls_io.c 				ULP_SKB_CB(skb)->ulp.tls.type = hdr.type;
skb              1036 drivers/crypto/chelsio/chtls/chtls_io.c 		if (!skb || (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NO_APPEND) ||
skb              1039 drivers/crypto/chelsio/chtls/chtls_io.c 			if (skb) {
skb              1040 drivers/crypto/chelsio/chtls/chtls_io.c 				tx_skb_finalize(skb);
skb              1045 drivers/crypto/chelsio/chtls/chtls_io.c 				skb = get_record_skb(sk,
skb              1052 drivers/crypto/chelsio/chtls/chtls_io.c 				skb = get_tx_skb(sk,
skb              1056 drivers/crypto/chelsio/chtls/chtls_io.c 			if (unlikely(!skb))
skb              1059 drivers/crypto/chelsio/chtls/chtls_io.c 			skb->ip_summed = CHECKSUM_UNNECESSARY;
skb              1065 drivers/crypto/chelsio/chtls/chtls_io.c 		if (skb_tailroom(skb) > 0) {
skb              1066 drivers/crypto/chelsio/chtls/chtls_io.c 			copy = min(copy, skb_tailroom(skb));
skb              1069 drivers/crypto/chelsio/chtls/chtls_io.c 			err = skb_add_data_nocache(sk, skb,
skb              1074 drivers/crypto/chelsio/chtls/chtls_io.c 			int i = skb_shinfo(skb)->nr_frags;
skb              1083 drivers/crypto/chelsio/chtls/chtls_io.c 			    skb_can_coalesce(skb, i, page, off)) {
skb              1125 drivers/crypto/chelsio/chtls/chtls_io.c 							     skb, page,
skb              1137 drivers/crypto/chelsio/chtls/chtls_io.c 						&skb_shinfo(skb)->frags[i - 1],
skb              1140 drivers/crypto/chelsio/chtls/chtls_io.c 				skb_fill_page_desc(skb, i, page, off, copy);
skb              1151 drivers/crypto/chelsio/chtls/chtls_io.c 		if (unlikely(skb->len == mss))
skb              1152 drivers/crypto/chelsio/chtls/chtls_io.c 			tx_skb_finalize(skb);
skb              1162 drivers/crypto/chelsio/chtls/chtls_io.c 			ULP_SKB_CB(skb)->flags |= ULPCB_FLAG_NO_APPEND;
skb              1167 drivers/crypto/chelsio/chtls/chtls_io.c 		if (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NO_APPEND)
skb              1185 drivers/crypto/chelsio/chtls/chtls_io.c 	if (!skb->len) {
skb              1186 drivers/crypto/chelsio/chtls/chtls_io.c 		__skb_unlink(skb, &csk->txq);
skb              1187 drivers/crypto/chelsio/chtls/chtls_io.c 		sk->sk_wmem_queued -= skb->truesize;
skb              1188 drivers/crypto/chelsio/chtls/chtls_io.c 		__kfree_skb(skb);
skb              1224 drivers/crypto/chelsio/chtls/chtls_io.c 		struct sk_buff *skb = skb_peek_tail(&csk->txq);
skb              1227 drivers/crypto/chelsio/chtls/chtls_io.c 		if (!skb || (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NO_APPEND) ||
skb              1228 drivers/crypto/chelsio/chtls/chtls_io.c 		    (copy = mss - skb->len) <= 0) {
skb              1234 drivers/crypto/chelsio/chtls/chtls_io.c 				skb = get_record_skb(sk,
skb              1240 drivers/crypto/chelsio/chtls/chtls_io.c 				skb = get_tx_skb(sk, 0);
skb              1242 drivers/crypto/chelsio/chtls/chtls_io.c 			if (!skb)
skb              1249 drivers/crypto/chelsio/chtls/chtls_io.c 		i = skb_shinfo(skb)->nr_frags;
skb              1250 drivers/crypto/chelsio/chtls/chtls_io.c 		if (skb_can_coalesce(skb, i, page, offset)) {
skb              1251 drivers/crypto/chelsio/chtls/chtls_io.c 			skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
skb              1254 drivers/crypto/chelsio/chtls/chtls_io.c 			skb_fill_page_desc(skb, i, page, offset, copy);
skb              1256 drivers/crypto/chelsio/chtls/chtls_io.c 			tx_skb_finalize(skb);
skb              1261 drivers/crypto/chelsio/chtls/chtls_io.c 		skb->len += copy;
skb              1262 drivers/crypto/chelsio/chtls/chtls_io.c 		if (skb->len == mss)
skb              1263 drivers/crypto/chelsio/chtls/chtls_io.c 			tx_skb_finalize(skb);
skb              1264 drivers/crypto/chelsio/chtls/chtls_io.c 		skb->data_len += copy;
skb              1265 drivers/crypto/chelsio/chtls/chtls_io.c 		skb->truesize += copy;
skb              1274 drivers/crypto/chelsio/chtls/chtls_io.c 			ULP_SKB_CB(skb)->flags |= ULPCB_FLAG_NO_APPEND;
skb              1279 drivers/crypto/chelsio/chtls/chtls_io.c 		if (unlikely(ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NO_APPEND))
skb              1342 drivers/crypto/chelsio/chtls/chtls_io.c 	struct sk_buff *skb;
skb              1344 drivers/crypto/chelsio/chtls/chtls_io.c 	skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
skb              1345 drivers/crypto/chelsio/chtls/chtls_io.c 	if (!skb)
skb              1347 drivers/crypto/chelsio/chtls/chtls_io.c 	__skb_put(skb, sizeof(*req));
skb              1348 drivers/crypto/chelsio/chtls/chtls_io.c 	req = (struct cpl_rx_data_ack *)skb->head;
skb              1350 drivers/crypto/chelsio/chtls/chtls_io.c 	set_wr_txq(skb, CPL_PRIORITY_ACK, csk->port_id);
skb              1356 drivers/crypto/chelsio/chtls/chtls_io.c 	cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
skb              1418 drivers/crypto/chelsio/chtls/chtls_io.c 		struct sk_buff *skb;
skb              1431 drivers/crypto/chelsio/chtls/chtls_io.c 		skb = skb_peek(&sk->sk_receive_queue);
skb              1432 drivers/crypto/chelsio/chtls/chtls_io.c 		if (skb)
skb              1486 drivers/crypto/chelsio/chtls/chtls_io.c 		if (!skb->len) {
skb              1487 drivers/crypto/chelsio/chtls/chtls_io.c 			skb_dst_set(skb, NULL);
skb              1488 drivers/crypto/chelsio/chtls/chtls_io.c 			__skb_unlink(skb, &sk->sk_receive_queue);
skb              1489 drivers/crypto/chelsio/chtls/chtls_io.c 			kfree_skb(skb);
skb              1504 drivers/crypto/chelsio/chtls/chtls_io.c 		avail = skb->len - offset;
skb              1524 drivers/crypto/chelsio/chtls/chtls_io.c 		if (skb_copy_datagram_msg(skb, offset, msg, avail)) {
skb              1538 drivers/crypto/chelsio/chtls/chtls_io.c 		if ((avail + offset) >= skb->len) {
skb              1539 drivers/crypto/chelsio/chtls/chtls_io.c 			if (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_TLS_HDR) {
skb              1540 drivers/crypto/chelsio/chtls/chtls_io.c 				tp->copied_seq += skb->len;
skb              1541 drivers/crypto/chelsio/chtls/chtls_io.c 				hws->rcvpld = skb->hdr_len;
skb              1545 drivers/crypto/chelsio/chtls/chtls_io.c 			chtls_free_skb(sk, skb);
skb              1568 drivers/crypto/chelsio/chtls/chtls_io.c 	struct sk_buff *skb;
skb              1588 drivers/crypto/chelsio/chtls/chtls_io.c 		skb_queue_walk(&sk->sk_receive_queue, skb) {
skb              1589 drivers/crypto/chelsio/chtls/chtls_io.c 			offset = peek_seq - ULP_SKB_CB(skb)->seq;
skb              1590 drivers/crypto/chelsio/chtls/chtls_io.c 			if (offset < skb->len)
skb              1635 drivers/crypto/chelsio/chtls/chtls_io.c 		avail = skb->len - offset;
skb              1669 drivers/crypto/chelsio/chtls/chtls_io.c 			if (skb_copy_datagram_msg(skb, offset, msg, len)) {
skb              1723 drivers/crypto/chelsio/chtls/chtls_io.c 		struct sk_buff *skb;
skb              1736 drivers/crypto/chelsio/chtls/chtls_io.c 		skb = skb_peek(&sk->sk_receive_queue);
skb              1737 drivers/crypto/chelsio/chtls/chtls_io.c 		if (skb)
skb              1791 drivers/crypto/chelsio/chtls/chtls_io.c 		if (!skb->len) {
skb              1792 drivers/crypto/chelsio/chtls/chtls_io.c 			chtls_kfree_skb(sk, skb);
skb              1804 drivers/crypto/chelsio/chtls/chtls_io.c 		offset = tp->copied_seq - ULP_SKB_CB(skb)->seq;
skb              1805 drivers/crypto/chelsio/chtls/chtls_io.c 		avail = skb->len - offset;
skb              1826 drivers/crypto/chelsio/chtls/chtls_io.c 			if (skb_copy_datagram_msg(skb, offset,
skb              1843 drivers/crypto/chelsio/chtls/chtls_io.c 		if (avail + offset >= skb->len) {
skb              1844 drivers/crypto/chelsio/chtls/chtls_io.c 			if (likely(skb))
skb              1845 drivers/crypto/chelsio/chtls/chtls_io.c 				chtls_free_skb(sk, skb);
skb                76 drivers/crypto/chelsio/chtls/chtls_main.c static int listen_backlog_rcv(struct sock *sk, struct sk_buff *skb)
skb                78 drivers/crypto/chelsio/chtls/chtls_main.c 	if (likely(skb_transport_header(skb) != skb_network_header(skb)))
skb                79 drivers/crypto/chelsio/chtls/chtls_main.c 		return tcp_v4_do_rcv(sk, skb);
skb                80 drivers/crypto/chelsio/chtls/chtls_main.c 	BLOG_SKB_CB(skb)->backlog_rcv(sk, skb);
skb               204 drivers/crypto/chelsio/chtls/chtls_main.c 	struct sk_buff *skb;
skb               207 drivers/crypto/chelsio/chtls/chtls_main.c 	while ((skb = __skb_dequeue(&cdev->deferq)) != NULL) {
skb               209 drivers/crypto/chelsio/chtls/chtls_main.c 		DEFERRED_SKB_CB(skb)->handler(cdev, skb);
skb               336 drivers/crypto/chelsio/chtls/chtls_main.c 	struct sk_buff *skb;
skb               342 drivers/crypto/chelsio/chtls/chtls_main.c 	skb = alloc_skb(gl->tot_len + sizeof(struct cpl_pass_accept_req)
skb               344 drivers/crypto/chelsio/chtls/chtls_main.c 	if (unlikely(!skb))
skb               346 drivers/crypto/chelsio/chtls/chtls_main.c 	__skb_put(skb, gl->tot_len + sizeof(struct cpl_pass_accept_req)
skb               349 drivers/crypto/chelsio/chtls/chtls_main.c 	skb_copy_to_linear_data(skb, rsp, sizeof(struct cpl_rx_pkt));
skb               350 drivers/crypto/chelsio/chtls/chtls_main.c 	skb_copy_to_linear_data_offset(skb, sizeof(struct cpl_pass_accept_req)
skb               354 drivers/crypto/chelsio/chtls/chtls_main.c 	return skb;
skb               361 drivers/crypto/chelsio/chtls/chtls_main.c 	struct sk_buff *skb;
skb               364 drivers/crypto/chelsio/chtls/chtls_main.c 	skb = copy_gl_to_skb_pkt(gl, rsp, cdev->lldi->sge_pktshift);
skb               365 drivers/crypto/chelsio/chtls/chtls_main.c 	if (!skb)
skb               368 drivers/crypto/chelsio/chtls/chtls_main.c 	ret = chtls_handlers[opcode](cdev, skb);
skb               370 drivers/crypto/chelsio/chtls/chtls_main.c 		kfree_skb(skb);
skb               379 drivers/crypto/chelsio/chtls/chtls_main.c 	struct sk_buff *skb;
skb               387 drivers/crypto/chelsio/chtls/chtls_main.c 	skb = cdev->rspq_skb_cache[rspq_bin];
skb               388 drivers/crypto/chelsio/chtls/chtls_main.c 	if (skb && !skb_is_nonlinear(skb) &&
skb               389 drivers/crypto/chelsio/chtls/chtls_main.c 	    !skb_shared(skb) && !skb_cloned(skb)) {
skb               390 drivers/crypto/chelsio/chtls/chtls_main.c 		refcount_inc(&skb->users);
skb               391 drivers/crypto/chelsio/chtls/chtls_main.c 		if (refcount_read(&skb->users) == 2) {
skb               392 drivers/crypto/chelsio/chtls/chtls_main.c 			__skb_trim(skb, 0);
skb               393 drivers/crypto/chelsio/chtls/chtls_main.c 			if (skb_tailroom(skb) >= len)
skb               396 drivers/crypto/chelsio/chtls/chtls_main.c 		refcount_dec(&skb->users);
skb               398 drivers/crypto/chelsio/chtls/chtls_main.c 	skb = alloc_skb(len, GFP_ATOMIC);
skb               399 drivers/crypto/chelsio/chtls/chtls_main.c 	if (unlikely(!skb))
skb               403 drivers/crypto/chelsio/chtls/chtls_main.c 	__skb_put(skb, len);
skb               404 drivers/crypto/chelsio/chtls/chtls_main.c 	skb_copy_to_linear_data(skb, rsp, len);
skb               405 drivers/crypto/chelsio/chtls/chtls_main.c 	skb_reset_network_header(skb);
skb               406 drivers/crypto/chelsio/chtls/chtls_main.c 	skb_reset_transport_header(skb);
skb               407 drivers/crypto/chelsio/chtls/chtls_main.c 	ret = chtls_handlers[opcode](cdev, skb);
skb               410 drivers/crypto/chelsio/chtls/chtls_main.c 		kfree_skb(skb);
skb               417 drivers/crypto/chelsio/chtls/chtls_main.c 	struct sk_buff *skb = *skbs;
skb               423 drivers/crypto/chelsio/chtls/chtls_main.c 	__skb_push(skb, sizeof(struct rss_header));
skb               424 drivers/crypto/chelsio/chtls/chtls_main.c 	skb_copy_to_linear_data(skb, rsp, sizeof(struct rss_header));
skb               426 drivers/crypto/chelsio/chtls/chtls_main.c 	ret = chtls_handlers[opcode](cdev, skb);
skb               428 drivers/crypto/chelsio/chtls/chtls_main.c 		kfree_skb(skb);
skb               436 drivers/crypto/chelsio/chtls/chtls_main.c 	struct sk_buff *skb;
skb               450 drivers/crypto/chelsio/chtls/chtls_main.c 	skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN);
skb               451 drivers/crypto/chelsio/chtls/chtls_main.c 	if (unlikely(!skb))
skb               453 drivers/crypto/chelsio/chtls/chtls_main.c 	chtls_recv(cdev, &skb, rsp);
skb               123 drivers/firewire/net.c 	struct sk_buff *skb;
skb               192 drivers/firewire/net.c 	struct sk_buff *skb;
skb               217 drivers/firewire/net.c static int fwnet_header_create(struct sk_buff *skb, struct net_device *net,
skb               223 drivers/firewire/net.c 	h = skb_push(skb, sizeof(*h));
skb               269 drivers/firewire/net.c static int fwnet_header_parse(const struct sk_buff *skb, unsigned char *haddr)
skb               271 drivers/firewire/net.c 	memcpy(haddr, skb->dev->dev_addr, FWNET_ALEN);
skb               379 drivers/firewire/net.c 	new->skb = dev_alloc_skb(dg_size + LL_RESERVED_SPACE(net));
skb               380 drivers/firewire/net.c 	if (new->skb == NULL)
skb               383 drivers/firewire/net.c 	skb_reserve(new->skb, LL_RESERVED_SPACE(net));
skb               384 drivers/firewire/net.c 	new->pbuf = skb_put(new->skb, dg_size);
skb               419 drivers/firewire/net.c 	dev_kfree_skb_any(old->skb);
skb               488 drivers/firewire/net.c 					struct sk_buff *skb, u16 source_node_id,
skb               508 drivers/firewire/net.c 	skb->dev = net;
skb               509 drivers/firewire/net.c 	skb->ip_summed = CHECKSUM_NONE;
skb               516 drivers/firewire/net.c 	if (dev_hard_header(skb, net, ether_type,
skb               518 drivers/firewire/net.c 			   NULL, skb->len) >= 0) {
skb               523 drivers/firewire/net.c 		skb_reset_mac_header(skb);
skb               524 drivers/firewire/net.c 		skb_pull(skb, sizeof(*eth));
skb               525 drivers/firewire/net.c 		eth = (struct fwnet_header *)skb_mac_header(skb);
skb               529 drivers/firewire/net.c 				skb->pkt_type = PACKET_BROADCAST;
skb               532 drivers/firewire/net.c 				skb->pkt_type = PACKET_MULTICAST;
skb               536 drivers/firewire/net.c 				skb->pkt_type = PACKET_OTHERHOST;
skb               541 drivers/firewire/net.c 			rawp = (u16 *)skb->data;
skb               547 drivers/firewire/net.c 		skb->protocol = protocol;
skb               549 drivers/firewire/net.c 	status = netif_rx(skb);
skb               555 drivers/firewire/net.c 		net->stats.rx_bytes += skb->len;
skb               564 drivers/firewire/net.c 	dev_kfree_skb_any(skb);
skb               573 drivers/firewire/net.c 	struct sk_buff *skb;
skb               601 drivers/firewire/net.c 		skb = dev_alloc_skb(len + LL_RESERVED_SPACE(net));
skb               602 drivers/firewire/net.c 		if (unlikely(!skb)) {
skb               607 drivers/firewire/net.c 		skb_reserve(skb, LL_RESERVED_SPACE(net));
skb               608 drivers/firewire/net.c 		skb_put_data(skb, buf, len);
skb               610 drivers/firewire/net.c 		return fwnet_finish_incoming_packet(net, skb, source_node_id,
skb               694 drivers/firewire/net.c 		skb = skb_get(pd->skb);
skb               699 drivers/firewire/net.c 		return fwnet_finish_incoming_packet(net, skb, source_node_id,
skb               818 drivers/firewire/net.c 	dev_kfree_skb_any(ptask->skb);
skb               834 drivers/firewire/net.c 	struct sk_buff *skb = ptask->skb;
skb               849 drivers/firewire/net.c 		dev->netdev->stats.tx_bytes += skb->len;
skb               888 drivers/firewire/net.c 			skb_pull(skb,
skb               891 drivers/firewire/net.c 			skb_pull(skb, ptask->max_payload);
skb               899 drivers/firewire/net.c 			ptask->max_payload = skb->len + RFC2374_FRAG_HDR_SIZE;
skb               969 drivers/firewire/net.c 		bufhdr = skb_push(ptask->skb, RFC2374_UNFRAG_HDR_SIZE);
skb               976 drivers/firewire/net.c 		bufhdr = skb_push(ptask->skb, RFC2374_FRAG_HDR_SIZE);
skb               995 drivers/firewire/net.c 		switch (ptask->skb->protocol) {
skb              1005 drivers/firewire/net.c 		p = skb_push(ptask->skb, IEEE1394_GASP_HDR_SIZE);
skb              1015 drivers/firewire/net.c 				generation, SCODE_100, 0ULL, ptask->skb->data,
skb              1035 drivers/firewire/net.c 			ptask->skb->data, tx_len, fwnet_write_complete, ptask);
skb              1236 drivers/firewire/net.c static netdev_tx_t fwnet_tx(struct sk_buff *skb, struct net_device *net)
skb              1262 drivers/firewire/net.c 	skb = skb_share_check(skb, GFP_ATOMIC);
skb              1263 drivers/firewire/net.c 	if (!skb)
skb              1270 drivers/firewire/net.c 	memcpy(&hdr_buf, skb->data, sizeof(hdr_buf));
skb              1284 drivers/firewire/net.c 	skb_pull(skb, sizeof(hdr_buf));
skb              1285 drivers/firewire/net.c 	dg_size = skb->len;
skb              1321 drivers/firewire/net.c 	ptask->skb = skb;
skb              1358 drivers/firewire/net.c 	if (skb != NULL)
skb              1359 drivers/firewire/net.c 		dev_kfree_skb(skb);
skb               170 drivers/hsi/clients/ssi_protocol.c static void ssip_skb_to_msg(struct sk_buff *skb, struct hsi_msg *msg)
skb               176 drivers/hsi/clients/ssi_protocol.c 	BUG_ON(msg->sgt.nents != (unsigned int)(skb_shinfo(skb)->nr_frags + 1));
skb               179 drivers/hsi/clients/ssi_protocol.c 	sg_set_buf(sg, skb->data, skb_headlen(skb));
skb               180 drivers/hsi/clients/ssi_protocol.c 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb               183 drivers/hsi/clients/ssi_protocol.c 		frag = &skb_shinfo(skb)->frags[i];
skb               191 drivers/hsi/clients/ssi_protocol.c 	struct sk_buff *skb;
skb               193 drivers/hsi/clients/ssi_protocol.c 	skb = msg->context;
skb               195 drivers/hsi/clients/ssi_protocol.c 								skb);
skb               197 drivers/hsi/clients/ssi_protocol.c 	dev_kfree_skb(skb);
skb               202 drivers/hsi/clients/ssi_protocol.c 					struct sk_buff *skb, gfp_t flags)
skb               206 drivers/hsi/clients/ssi_protocol.c 	msg = hsi_alloc_msg(skb_shinfo(skb)->nr_frags + 1, flags);
skb               209 drivers/hsi/clients/ssi_protocol.c 	ssip_skb_to_msg(skb, msg);
skb               212 drivers/hsi/clients/ssi_protocol.c 	msg->context = skb;
skb               577 drivers/hsi/clients/ssi_protocol.c 	struct sk_buff *skb;
skb               590 drivers/hsi/clients/ssi_protocol.c 	skb = dmsg->context;
skb               596 drivers/hsi/clients/ssi_protocol.c 	ssip_set_cmd(msg, SSIP_START_TRANS_CMD(SSIP_BYTES_TO_FRAMES(skb->len),
skb               603 drivers/hsi/clients/ssi_protocol.c 						SSIP_BYTES_TO_FRAMES(skb->len));
skb               609 drivers/hsi/clients/ssi_protocol.c static void ssip_pn_rx(struct sk_buff *skb)
skb               611 drivers/hsi/clients/ssi_protocol.c 	struct net_device *dev = skb->dev;
skb               616 drivers/hsi/clients/ssi_protocol.c 		dev_kfree_skb(skb);
skb               619 drivers/hsi/clients/ssi_protocol.c 	if (unlikely(!pskb_may_pull(skb, SSIP_MIN_PN_HDR))) {
skb               623 drivers/hsi/clients/ssi_protocol.c 		dev_kfree_skb(skb);
skb               627 drivers/hsi/clients/ssi_protocol.c 	dev->stats.rx_bytes += skb->len;
skb               630 drivers/hsi/clients/ssi_protocol.c 	((u16 *)skb->data)[2] = ntohs(((u16 *)skb->data)[2]);
skb               632 drivers/hsi/clients/ssi_protocol.c 			((u16 *)skb->data)[2], ntohs(((u16 *)skb->data)[2]));
skb               634 drivers/hsi/clients/ssi_protocol.c 	skb->protocol = htons(ETH_P_PHONET);
skb               635 drivers/hsi/clients/ssi_protocol.c 	skb_reset_mac_header(skb);
skb               636 drivers/hsi/clients/ssi_protocol.c 	__skb_pull(skb, 1);
skb               637 drivers/hsi/clients/ssi_protocol.c 	netif_rx(skb);
skb               644 drivers/hsi/clients/ssi_protocol.c 	struct sk_buff *skb;
skb               653 drivers/hsi/clients/ssi_protocol.c 	skb = msg->context;
skb               654 drivers/hsi/clients/ssi_protocol.c 	ssip_pn_rx(skb);
skb               773 drivers/hsi/clients/ssi_protocol.c 	struct sk_buff *skb;
skb               794 drivers/hsi/clients/ssi_protocol.c 	skb = netdev_alloc_skb(ssi->netdev, len * 4);
skb               795 drivers/hsi/clients/ssi_protocol.c 	if (unlikely(!skb)) {
skb               799 drivers/hsi/clients/ssi_protocol.c 	skb->dev = ssi->netdev;
skb               800 drivers/hsi/clients/ssi_protocol.c 	skb_put(skb, len * 4);
skb               801 drivers/hsi/clients/ssi_protocol.c 	msg = ssip_alloc_data(ssi, skb, GFP_ATOMIC);
skb               811 drivers/hsi/clients/ssi_protocol.c 	dev_kfree_skb(skb);
skb               971 drivers/hsi/clients/ssi_protocol.c static int ssip_pn_xmit(struct sk_buff *skb, struct net_device *dev)
skb               977 drivers/hsi/clients/ssi_protocol.c 	if ((skb->protocol != htons(ETH_P_PHONET)) ||
skb               978 drivers/hsi/clients/ssi_protocol.c 					(skb->len < SSIP_MIN_PN_HDR))
skb               981 drivers/hsi/clients/ssi_protocol.c 	if ((skb->len & 3) && skb_pad(skb, 4 - (skb->len & 3)))
skb               988 drivers/hsi/clients/ssi_protocol.c 	if (skb_cow_head(skb, 0))
skb               992 drivers/hsi/clients/ssi_protocol.c 	((u16 *)skb->data)[2] = htons(((u16 *)skb->data)[2]);
skb               994 drivers/hsi/clients/ssi_protocol.c 	msg = ssip_alloc_data(ssi, skb, GFP_ATOMIC);
skb              1028 drivers/hsi/clients/ssi_protocol.c 	dev->stats.tx_bytes += skb->len;
skb              1034 drivers/hsi/clients/ssi_protocol.c 	dev_kfree_skb(skb);
skb               131 drivers/infiniband/core/addr.c int ib_nl_handle_ip_res_resp(struct sk_buff *skb,
skb               136 drivers/infiniband/core/addr.c 	    !(NETLINK_CB(skb).sk))
skb               149 drivers/infiniband/core/addr.c 	struct sk_buff *skb = NULL;
skb               168 drivers/infiniband/core/addr.c 	skb = nlmsg_new(len, GFP_KERNEL);
skb               169 drivers/infiniband/core/addr.c 	if (!skb)
skb               172 drivers/infiniband/core/addr.c 	data = ibnl_put_msg(skb, &nlh, seq, 0, RDMA_NL_LS,
skb               175 drivers/infiniband/core/addr.c 		nlmsg_free(skb);
skb               180 drivers/infiniband/core/addr.c 	header = skb_put(skb, NLMSG_ALIGN(sizeof(*header)));
skb               182 drivers/infiniband/core/addr.c 	nla_put(skb, attrtype, size, daddr);
skb               185 drivers/infiniband/core/addr.c 	nlmsg_end(skb, nlh);
skb               186 drivers/infiniband/core/addr.c 	rdma_nl_multicast(&init_net, skb, RDMA_NL_GROUP_LS, GFP_KERNEL);
skb               105 drivers/infiniband/core/core_priv.h 			      struct sk_buff *skb,
skb               109 drivers/infiniband/core/core_priv.h int ib_enum_all_devs(nldev_callback nldev_cb, struct sk_buff *skb,
skb               206 drivers/infiniband/core/core_priv.h int ib_nl_handle_resolve_resp(struct sk_buff *skb,
skb               209 drivers/infiniband/core/core_priv.h int ib_nl_handle_set_timeout(struct sk_buff *skb,
skb               212 drivers/infiniband/core/core_priv.h int ib_nl_handle_ip_res_resp(struct sk_buff *skb,
skb               402 drivers/infiniband/core/core_priv.h int ib_device_set_netns_put(struct sk_buff *skb,
skb              1642 drivers/infiniband/core/device.c int ib_device_set_netns_put(struct sk_buff *skb,
skb              1654 drivers/infiniband/core/device.c 	if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) {
skb              2315 drivers/infiniband/core/device.c int ib_enum_all_devs(nldev_callback nldev_cb, struct sk_buff *skb,
skb              2325 drivers/infiniband/core/device.c 		if (!rdma_dev_access_netns(dev, sock_net(skb->sk)))
skb              2328 drivers/infiniband/core/device.c 		ret = nldev_cb(dev, skb, cb, idx);
skb                65 drivers/infiniband/core/iwpm_msg.c 	struct sk_buff *skb = NULL;
skb                79 drivers/infiniband/core/iwpm_msg.c 	skb = iwpm_create_nlmsg(RDMA_NL_IWPM_REG_PID, &nlh, nl_client);
skb                80 drivers/infiniband/core/iwpm_msg.c 	if (!skb) {
skb                94 drivers/infiniband/core/iwpm_msg.c 	ret = ibnl_put_attr(skb, nlh, sizeof(u32), &msg_seq, IWPM_NLA_REG_PID_SEQ);
skb                97 drivers/infiniband/core/iwpm_msg.c 	ret = ibnl_put_attr(skb, nlh, IFNAMSIZ,
skb               101 drivers/infiniband/core/iwpm_msg.c 	ret = ibnl_put_attr(skb, nlh, IWPM_DEVNAME_SIZE,
skb               105 drivers/infiniband/core/iwpm_msg.c 	ret = ibnl_put_attr(skb, nlh, IWPM_ULIBNAME_SIZE,
skb               110 drivers/infiniband/core/iwpm_msg.c 	nlmsg_end(skb, nlh);
skb               115 drivers/infiniband/core/iwpm_msg.c 	ret = rdma_nl_multicast(&init_net, skb, RDMA_NL_GROUP_IWPM, GFP_KERNEL);
skb               117 drivers/infiniband/core/iwpm_msg.c 		skb = NULL; /* skb is freed in the netlink send-op handling */
skb               127 drivers/infiniband/core/iwpm_msg.c 	dev_kfree_skb(skb);
skb               149 drivers/infiniband/core/iwpm_msg.c 	struct sk_buff *skb = NULL;
skb               166 drivers/infiniband/core/iwpm_msg.c 	skb = iwpm_create_nlmsg(RDMA_NL_IWPM_ADD_MAPPING, &nlh, nl_client);
skb               167 drivers/infiniband/core/iwpm_msg.c 	if (!skb) {
skb               180 drivers/infiniband/core/iwpm_msg.c 	ret = ibnl_put_attr(skb, nlh, sizeof(u32), &msg_seq,
skb               184 drivers/infiniband/core/iwpm_msg.c 	ret = ibnl_put_attr(skb, nlh, sizeof(struct sockaddr_storage),
skb               195 drivers/infiniband/core/iwpm_msg.c 		ret = ibnl_put_attr(skb, nlh, sizeof(u32), &pm_msg->flags,
skb               201 drivers/infiniband/core/iwpm_msg.c 	nlmsg_end(skb, nlh);
skb               204 drivers/infiniband/core/iwpm_msg.c 	ret = rdma_nl_unicast_wait(&init_net, skb, iwpm_user_pid);
skb               206 drivers/infiniband/core/iwpm_msg.c 		skb = NULL; /* skb is freed in the netlink send-op handling */
skb               216 drivers/infiniband/core/iwpm_msg.c 	dev_kfree_skb(skb);
skb               236 drivers/infiniband/core/iwpm_msg.c 	struct sk_buff *skb = NULL;
skb               254 drivers/infiniband/core/iwpm_msg.c 	skb = iwpm_create_nlmsg(RDMA_NL_IWPM_QUERY_MAPPING, &nlh, nl_client);
skb               255 drivers/infiniband/core/iwpm_msg.c 	if (!skb) {
skb               270 drivers/infiniband/core/iwpm_msg.c 	ret = ibnl_put_attr(skb, nlh, sizeof(u32), &msg_seq,
skb               274 drivers/infiniband/core/iwpm_msg.c 	ret = ibnl_put_attr(skb, nlh, sizeof(struct sockaddr_storage),
skb               278 drivers/infiniband/core/iwpm_msg.c 	ret = ibnl_put_attr(skb, nlh, sizeof(struct sockaddr_storage),
skb               289 drivers/infiniband/core/iwpm_msg.c 		ret = ibnl_put_attr(skb, nlh, sizeof(u32), &pm_msg->flags,
skb               295 drivers/infiniband/core/iwpm_msg.c 	nlmsg_end(skb, nlh);
skb               298 drivers/infiniband/core/iwpm_msg.c 	ret = rdma_nl_unicast_wait(&init_net, skb, iwpm_user_pid);
skb               300 drivers/infiniband/core/iwpm_msg.c 		skb = NULL; /* skb is freed in the netlink send-op handling */
skb               309 drivers/infiniband/core/iwpm_msg.c 	dev_kfree_skb(skb);
skb               328 drivers/infiniband/core/iwpm_msg.c 	struct sk_buff *skb = NULL;
skb               344 drivers/infiniband/core/iwpm_msg.c 	skb = iwpm_create_nlmsg(RDMA_NL_IWPM_REMOVE_MAPPING, &nlh, nl_client);
skb               345 drivers/infiniband/core/iwpm_msg.c 	if (!skb) {
skb               353 drivers/infiniband/core/iwpm_msg.c 	ret = ibnl_put_attr(skb, nlh, sizeof(u32), &msg_seq,
skb               357 drivers/infiniband/core/iwpm_msg.c 	ret = ibnl_put_attr(skb, nlh, sizeof(struct sockaddr_storage),
skb               362 drivers/infiniband/core/iwpm_msg.c 	nlmsg_end(skb, nlh);
skb               364 drivers/infiniband/core/iwpm_msg.c 	ret = rdma_nl_unicast_wait(&init_net, skb, iwpm_user_pid);
skb               366 drivers/infiniband/core/iwpm_msg.c 		skb = NULL; /* skb is freed in the netlink send-op handling */
skb               376 drivers/infiniband/core/iwpm_msg.c 	if (skb)
skb               377 drivers/infiniband/core/iwpm_msg.c 		dev_kfree_skb_any(skb);
skb               401 drivers/infiniband/core/iwpm_msg.c int iwpm_register_pid_cb(struct sk_buff *skb, struct netlink_callback *cb)
skb               474 drivers/infiniband/core/iwpm_msg.c int iwpm_add_mapping_cb(struct sk_buff *skb, struct netlink_callback *cb)
skb               551 drivers/infiniband/core/iwpm_msg.c int iwpm_add_and_query_mapping_cb(struct sk_buff *skb,
skb               635 drivers/infiniband/core/iwpm_msg.c int iwpm_remote_info_cb(struct sk_buff *skb, struct netlink_callback *cb)
skb               715 drivers/infiniband/core/iwpm_msg.c int iwpm_mapping_info_cb(struct sk_buff *skb, struct netlink_callback *cb)
skb               772 drivers/infiniband/core/iwpm_msg.c int iwpm_ack_mapping_info_cb(struct sk_buff *skb, struct netlink_callback *cb)
skb               802 drivers/infiniband/core/iwpm_msg.c int iwpm_mapping_error_cb(struct sk_buff *skb, struct netlink_callback *cb)
skb               850 drivers/infiniband/core/iwpm_msg.c int iwpm_hello_cb(struct sk_buff *skb, struct netlink_callback *cb)
skb               485 drivers/infiniband/core/iwpm_util.c 	struct sk_buff *skb = NULL;
skb               487 drivers/infiniband/core/iwpm_util.c 	skb = dev_alloc_skb(IWPM_MSG_SIZE);
skb               488 drivers/infiniband/core/iwpm_util.c 	if (!skb)
skb               491 drivers/infiniband/core/iwpm_util.c 	if (!(ibnl_put_msg(skb, nlh, 0, 0, nl_client, nl_op,
skb               494 drivers/infiniband/core/iwpm_util.c 		dev_kfree_skb(skb);
skb               495 drivers/infiniband/core/iwpm_util.c 		skb = NULL;
skb               498 drivers/infiniband/core/iwpm_util.c 	return skb;
skb               624 drivers/infiniband/core/iwpm_util.c 	struct sk_buff *skb = NULL;
skb               630 drivers/infiniband/core/iwpm_util.c 	skb = iwpm_create_nlmsg(RDMA_NL_IWPM_MAPINFO_NUM, &nlh, nl_client);
skb               631 drivers/infiniband/core/iwpm_util.c 	if (!skb) {
skb               638 drivers/infiniband/core/iwpm_util.c 	ret = ibnl_put_attr(skb, nlh, sizeof(u32), &msg_seq, IWPM_NLA_MAPINFO_SEQ);
skb               641 drivers/infiniband/core/iwpm_util.c 	ret = ibnl_put_attr(skb, nlh, sizeof(u32),
skb               646 drivers/infiniband/core/iwpm_util.c 	nlmsg_end(skb, nlh);
skb               648 drivers/infiniband/core/iwpm_util.c 	ret = rdma_nl_unicast(&init_net, skb, iwpm_pid);
skb               650 drivers/infiniband/core/iwpm_util.c 		skb = NULL;
skb               658 drivers/infiniband/core/iwpm_util.c 	dev_kfree_skb(skb);
skb               662 drivers/infiniband/core/iwpm_util.c static int send_nlmsg_done(struct sk_buff *skb, u8 nl_client, int iwpm_pid)
skb               667 drivers/infiniband/core/iwpm_util.c 	if (!skb)
skb               669 drivers/infiniband/core/iwpm_util.c 	if (!(ibnl_put_msg(skb, &nlh, 0, 0, nl_client,
skb               672 drivers/infiniband/core/iwpm_util.c 		dev_kfree_skb(skb);
skb               676 drivers/infiniband/core/iwpm_util.c 	ret = rdma_nl_unicast(&init_net, skb, iwpm_pid);
skb               685 drivers/infiniband/core/iwpm_util.c 	struct sk_buff *skb = NULL;
skb               693 drivers/infiniband/core/iwpm_util.c 	skb = dev_alloc_skb(NLMSG_GOODSIZE);
skb               694 drivers/infiniband/core/iwpm_util.c 	if (!skb) {
skb               708 drivers/infiniband/core/iwpm_util.c 			if (!(ibnl_put_msg(skb, &nlh, 0, 0, nl_client,
skb               715 drivers/infiniband/core/iwpm_util.c 			ret = ibnl_put_attr(skb, nlh,
skb               722 drivers/infiniband/core/iwpm_util.c 			ret = ibnl_put_attr(skb, nlh,
skb               730 drivers/infiniband/core/iwpm_util.c 				ret = ibnl_put_attr(skb, nlh, sizeof(u32),
skb               737 drivers/infiniband/core/iwpm_util.c 			nlmsg_end(skb, nlh);
skb               754 drivers/infiniband/core/iwpm_util.c 				ret = send_nlmsg_done(skb, nl_client, iwpm_pid);
skb               755 drivers/infiniband/core/iwpm_util.c 				skb = NULL;
skb               765 drivers/infiniband/core/iwpm_util.c 				skb = dev_alloc_skb(NLMSG_GOODSIZE);
skb               766 drivers/infiniband/core/iwpm_util.c 				if (!skb) {
skb               780 drivers/infiniband/core/iwpm_util.c 		dev_kfree_skb(skb);
skb               783 drivers/infiniband/core/iwpm_util.c 	send_nlmsg_done(skb, nl_client, iwpm_pid);
skb               807 drivers/infiniband/core/iwpm_util.c 	struct sk_buff *skb = NULL;
skb               812 drivers/infiniband/core/iwpm_util.c 	skb = iwpm_create_nlmsg(RDMA_NL_IWPM_HELLO, &nlh, nl_client);
skb               813 drivers/infiniband/core/iwpm_util.c 	if (!skb) {
skb               819 drivers/infiniband/core/iwpm_util.c 	ret = ibnl_put_attr(skb, nlh, sizeof(u16), &abi_version,
skb               823 drivers/infiniband/core/iwpm_util.c 	nlmsg_end(skb, nlh);
skb               825 drivers/infiniband/core/iwpm_util.c 	ret = rdma_nl_unicast(&init_net, skb, iwpm_pid);
skb               827 drivers/infiniband/core/iwpm_util.c 		skb = NULL;
skb               835 drivers/infiniband/core/iwpm_util.c 	dev_kfree_skb(skb);
skb                82 drivers/infiniband/core/netlink.c get_cb_table(const struct sk_buff *skb, unsigned int type, unsigned int op)
skb                90 drivers/infiniband/core/netlink.c 	if (sock_net(skb->sk) != &init_net && type != RDMA_NL_NLDEV)
skb               131 drivers/infiniband/core/netlink.c void *ibnl_put_msg(struct sk_buff *skb, struct nlmsghdr **nlh, int seq,
skb               134 drivers/infiniband/core/netlink.c 	*nlh = nlmsg_put(skb, 0, seq, RDMA_NL_GET_TYPE(client, op), len, flags);
skb               141 drivers/infiniband/core/netlink.c int ibnl_put_attr(struct sk_buff *skb, struct nlmsghdr *nlh,
skb               144 drivers/infiniband/core/netlink.c 	if (nla_put(skb, type, len, data)) {
skb               145 drivers/infiniband/core/netlink.c 		nlmsg_cancel(skb, nlh);
skb               152 drivers/infiniband/core/netlink.c static int rdma_nl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
skb               165 drivers/infiniband/core/netlink.c 	cb_table = get_cb_table(skb, index, op);
skb               170 drivers/infiniband/core/netlink.c 	    !netlink_capable(skb, CAP_NET_ADMIN)) {
skb               181 drivers/infiniband/core/netlink.c 			err = cb_table[op].doit(skb, nlh, extack);
skb               190 drivers/infiniband/core/netlink.c 			err = netlink_dump_start(skb->sk, skb, nlh, &c);
skb               195 drivers/infiniband/core/netlink.c 		err = cb_table[op].doit(skb, nlh, extack);
skb               207 drivers/infiniband/core/netlink.c static int rdma_nl_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *,
skb               215 drivers/infiniband/core/netlink.c 	while (skb->len >= nlmsg_total_size(0)) {
skb               218 drivers/infiniband/core/netlink.c 		nlh = nlmsg_hdr(skb);
skb               221 drivers/infiniband/core/netlink.c 		if (nlh->nlmsg_len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len)
skb               239 drivers/infiniband/core/netlink.c 		err = cb(skb, nlh, &extack);
skb               245 drivers/infiniband/core/netlink.c 			netlink_ack(skb, nlh, err, &extack);
skb               249 drivers/infiniband/core/netlink.c 		if (msglen > skb->len)
skb               250 drivers/infiniband/core/netlink.c 			msglen = skb->len;
skb               251 drivers/infiniband/core/netlink.c 		skb_pull(skb, msglen);
skb               257 drivers/infiniband/core/netlink.c static void rdma_nl_rcv(struct sk_buff *skb)
skb               259 drivers/infiniband/core/netlink.c 	rdma_nl_rcv_skb(skb, &rdma_nl_rcv_msg);
skb               262 drivers/infiniband/core/netlink.c int rdma_nl_unicast(struct net *net, struct sk_buff *skb, u32 pid)
skb               267 drivers/infiniband/core/netlink.c 	err = netlink_unicast(rnet->nl_sock, skb, pid, MSG_DONTWAIT);
skb               272 drivers/infiniband/core/netlink.c int rdma_nl_unicast_wait(struct net *net, struct sk_buff *skb, __u32 pid)
skb               277 drivers/infiniband/core/netlink.c 	err = netlink_unicast(rnet->nl_sock, skb, pid, 0);
skb               282 drivers/infiniband/core/netlink.c int rdma_nl_multicast(struct net *net, struct sk_buff *skb,
skb               287 drivers/infiniband/core/netlink.c 	return nlmsg_multicast(rnet->nl_sock, skb, 0, group, flags);
skb               797 drivers/infiniband/core/nldev.c static int nldev_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
skb               813 drivers/infiniband/core/nldev.c 	device = ib_device_get_by_index(sock_net(skb->sk), index);
skb               823 drivers/infiniband/core/nldev.c 	nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
skb               834 drivers/infiniband/core/nldev.c 	return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
skb               843 drivers/infiniband/core/nldev.c static int nldev_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
skb               857 drivers/infiniband/core/nldev.c 	device = ib_device_get_by_index(sock_net(skb->sk), index);
skb               878 drivers/infiniband/core/nldev.c 		err = ib_device_set_netns_put(skb, device, ns_fd);
skb               897 drivers/infiniband/core/nldev.c 			     struct sk_buff *skb,
skb               907 drivers/infiniband/core/nldev.c 	nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
skb               911 drivers/infiniband/core/nldev.c 	if (fill_dev_info(skb, device)) {
skb               912 drivers/infiniband/core/nldev.c 		nlmsg_cancel(skb, nlh);
skb               916 drivers/infiniband/core/nldev.c 	nlmsg_end(skb, nlh);
skb               921 drivers/infiniband/core/nldev.c 	return skb->len;
skb               924 drivers/infiniband/core/nldev.c static int nldev_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
skb               930 drivers/infiniband/core/nldev.c 	return ib_enum_all_devs(_nldev_get_dumpit, skb, cb);
skb               933 drivers/infiniband/core/nldev.c static int nldev_port_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
skb               951 drivers/infiniband/core/nldev.c 	device = ib_device_get_by_index(sock_net(skb->sk), index);
skb               967 drivers/infiniband/core/nldev.c 	nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
skb               971 drivers/infiniband/core/nldev.c 	err = fill_port_info(msg, device, port, sock_net(skb->sk));
skb               978 drivers/infiniband/core/nldev.c 	return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
skb               987 drivers/infiniband/core/nldev.c static int nldev_port_get_dumpit(struct sk_buff *skb,
skb              1005 drivers/infiniband/core/nldev.c 	device = ib_device_get_by_index(sock_net(skb->sk), ifindex);
skb              1025 drivers/infiniband/core/nldev.c 		nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
skb              1031 drivers/infiniband/core/nldev.c 		if (fill_port_info(skb, device, p, sock_net(skb->sk))) {
skb              1032 drivers/infiniband/core/nldev.c 			nlmsg_cancel(skb, nlh);
skb              1036 drivers/infiniband/core/nldev.c 		nlmsg_end(skb, nlh);
skb              1042 drivers/infiniband/core/nldev.c 	return skb->len;
skb              1045 drivers/infiniband/core/nldev.c static int nldev_res_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
skb              1060 drivers/infiniband/core/nldev.c 	device = ib_device_get_by_index(sock_net(skb->sk), index);
skb              1070 drivers/infiniband/core/nldev.c 	nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
skb              1080 drivers/infiniband/core/nldev.c 	return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
skb              1090 drivers/infiniband/core/nldev.c 				 struct sk_buff *skb,
skb              1100 drivers/infiniband/core/nldev.c 	nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
skb              1104 drivers/infiniband/core/nldev.c 	if (fill_res_info(skb, device)) {
skb              1105 drivers/infiniband/core/nldev.c 		nlmsg_cancel(skb, nlh);
skb              1108 drivers/infiniband/core/nldev.c 	nlmsg_end(skb, nlh);
skb              1114 drivers/infiniband/core/nldev.c 	return skb->len;
skb              1117 drivers/infiniband/core/nldev.c static int nldev_res_get_dumpit(struct sk_buff *skb,
skb              1120 drivers/infiniband/core/nldev.c 	return ib_enum_all_devs(_nldev_res_get_dumpit, skb, cb);
skb              1185 drivers/infiniband/core/nldev.c static int res_get_common_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
skb              1204 drivers/infiniband/core/nldev.c 	device = ib_device_get_by_index(sock_net(skb->sk), index);
skb              1240 drivers/infiniband/core/nldev.c 	nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
skb              1249 drivers/infiniband/core/nldev.c 	has_cap_net_admin = netlink_capable(skb, CAP_NET_ADMIN);
skb              1257 drivers/infiniband/core/nldev.c 	return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
skb              1268 drivers/infiniband/core/nldev.c static int res_get_common_dumpit(struct sk_buff *skb,
skb              1301 drivers/infiniband/core/nldev.c 	device = ib_device_get_by_index(sock_net(skb->sk), index);
skb              1316 drivers/infiniband/core/nldev.c 	nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
skb              1320 drivers/infiniband/core/nldev.c 	if (fill_nldev_handle(skb, device)) {
skb              1325 drivers/infiniband/core/nldev.c 	table_attr = nla_nest_start_noflag(skb, fe->nldev_attr);
skb              1331 drivers/infiniband/core/nldev.c 	has_cap_net_admin = netlink_capable(cb->skb, CAP_NET_ADMIN);
skb              1351 drivers/infiniband/core/nldev.c 		entry_attr = nla_nest_start_noflag(skb, fe->entry);
skb              1358 drivers/infiniband/core/nldev.c 		ret = fe->fill_res_func(skb, has_cap_net_admin, res, port);
skb              1362 drivers/infiniband/core/nldev.c 			nla_nest_cancel(skb, entry_attr);
skb              1369 drivers/infiniband/core/nldev.c 		nla_nest_end(skb, entry_attr);
skb              1376 drivers/infiniband/core/nldev.c 	nla_nest_end(skb, table_attr);
skb              1377 drivers/infiniband/core/nldev.c 	nlmsg_end(skb, nlh);
skb              1388 drivers/infiniband/core/nldev.c 	return skb->len;
skb              1391 drivers/infiniband/core/nldev.c 	nla_nest_cancel(skb, table_attr);
skb              1394 drivers/infiniband/core/nldev.c 	nlmsg_cancel(skb, nlh);
skb              1402 drivers/infiniband/core/nldev.c 	static int nldev_res_get_##name##_dumpit(struct sk_buff *skb,          \
skb              1405 drivers/infiniband/core/nldev.c 		return res_get_common_dumpit(skb, cb, type);                   \
skb              1407 drivers/infiniband/core/nldev.c 	static int nldev_res_get_##name##_doit(struct sk_buff *skb,            \
skb              1411 drivers/infiniband/core/nldev.c 		return res_get_common_doit(skb, nlh, extack, type);            \
skb              1456 drivers/infiniband/core/nldev.c static int nldev_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
skb              1482 drivers/infiniband/core/nldev.c 	ndev = dev_get_by_name(sock_net(skb->sk), ndev_name);
skb              1503 drivers/infiniband/core/nldev.c static int nldev_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
skb              1517 drivers/infiniband/core/nldev.c 	device = ib_device_get_by_index(sock_net(skb->sk), index);
skb              1530 drivers/infiniband/core/nldev.c static int nldev_get_chardev(struct sk_buff *skb, struct nlmsghdr *nlh,
skb              1551 drivers/infiniband/core/nldev.c 		ibdev = ib_device_get_by_index(sock_net(skb->sk), index);
skb              1573 drivers/infiniband/core/nldev.c 	nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
skb              1602 drivers/infiniband/core/nldev.c 	return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
skb              1614 drivers/infiniband/core/nldev.c static int nldev_sys_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
skb              1630 drivers/infiniband/core/nldev.c 	nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
skb              1642 drivers/infiniband/core/nldev.c 	return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
skb              1645 drivers/infiniband/core/nldev.c static int nldev_set_sys_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
skb              1666 drivers/infiniband/core/nldev.c static int nldev_stat_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
skb              1687 drivers/infiniband/core/nldev.c 	device = ib_device_get_by_index(sock_net(skb->sk), index);
skb              1702 drivers/infiniband/core/nldev.c 	nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
skb              1742 drivers/infiniband/core/nldev.c 	return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
skb              1753 drivers/infiniband/core/nldev.c static int nldev_stat_del_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
skb              1774 drivers/infiniband/core/nldev.c 	device = ib_device_get_by_index(sock_net(skb->sk), index);
skb              1789 drivers/infiniband/core/nldev.c 	nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
skb              1810 drivers/infiniband/core/nldev.c 	return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
skb              1819 drivers/infiniband/core/nldev.c static int stat_get_doit_default_counter(struct sk_buff *skb,
skb              1836 drivers/infiniband/core/nldev.c 	device = ib_device_get_by_index(sock_net(skb->sk), index);
skb              1857 drivers/infiniband/core/nldev.c 	nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
skb              1899 drivers/infiniband/core/nldev.c 	return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
skb              1912 drivers/infiniband/core/nldev.c static int stat_get_doit_qp(struct sk_buff *skb, struct nlmsghdr *nlh,
skb              1924 drivers/infiniband/core/nldev.c 		return nldev_res_get_counter_doit(skb, nlh, extack);
skb              1931 drivers/infiniband/core/nldev.c 	device = ib_device_get_by_index(sock_net(skb->sk), index);
skb              1947 drivers/infiniband/core/nldev.c 	nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
skb              1971 drivers/infiniband/core/nldev.c 	return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
skb              1980 drivers/infiniband/core/nldev.c static int nldev_stat_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
skb              1992 drivers/infiniband/core/nldev.c 		return stat_get_doit_default_counter(skb, nlh, extack, tb);
skb              1996 drivers/infiniband/core/nldev.c 		ret = stat_get_doit_qp(skb, nlh, extack, tb);
skb              2007 drivers/infiniband/core/nldev.c static int nldev_stat_get_dumpit(struct sk_buff *skb,
skb              2020 drivers/infiniband/core/nldev.c 		ret = nldev_res_get_counter_dumpit(skb, cb);
skb               749 drivers/infiniband/core/sa_query.c static void ib_nl_set_path_rec_attrs(struct sk_buff *skb,
skb               762 drivers/infiniband/core/sa_query.c 	header = skb_put(skb, NLMSG_ALIGN(sizeof(*header)));
skb               777 drivers/infiniband/core/sa_query.c 		nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_SERVICE_ID,
skb               781 drivers/infiniband/core/sa_query.c 		nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_DGID,
skb               784 drivers/infiniband/core/sa_query.c 		nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_SGID,
skb               787 drivers/infiniband/core/sa_query.c 		nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_TCLASS,
skb               792 drivers/infiniband/core/sa_query.c 		nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_PKEY,
skb               797 drivers/infiniband/core/sa_query.c 		nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_QOS_CLASS,
skb               834 drivers/infiniband/core/sa_query.c 	struct sk_buff *skb = NULL;
skb               845 drivers/infiniband/core/sa_query.c 	skb = nlmsg_new(len, gfp_mask);
skb               846 drivers/infiniband/core/sa_query.c 	if (!skb)
skb               850 drivers/infiniband/core/sa_query.c 	data = ibnl_put_msg(skb, &nlh, query->seq, 0, RDMA_NL_LS,
skb               853 drivers/infiniband/core/sa_query.c 		nlmsg_free(skb);
skb               858 drivers/infiniband/core/sa_query.c 	ib_nl_set_path_rec_attrs(skb, query);
skb               861 drivers/infiniband/core/sa_query.c 	nlmsg_end(skb, nlh);
skb               863 drivers/infiniband/core/sa_query.c 	return rdma_nl_multicast(&init_net, skb, RDMA_NL_GROUP_LS, gfp_mask);
skb              1014 drivers/infiniband/core/sa_query.c int ib_nl_handle_set_timeout(struct sk_buff *skb,
skb              1027 drivers/infiniband/core/sa_query.c 	    !(NETLINK_CB(skb).sk))
skb              1090 drivers/infiniband/core/sa_query.c int ib_nl_handle_resolve_resp(struct sk_buff *skb,
skb              1102 drivers/infiniband/core/sa_query.c 	    !(NETLINK_CB(skb).sk))
skb               140 drivers/infiniband/hw/cxgb3/cxio_hal.c 	struct sk_buff *skb = alloc_skb(sizeof(*wqe), GFP_KERNEL);
skb               141 drivers/infiniband/hw/cxgb3/cxio_hal.c 	if (!skb) {
skb               145 drivers/infiniband/hw/cxgb3/cxio_hal.c 	wqe = skb_put_zero(skb, sizeof(*wqe));
skb               152 drivers/infiniband/hw/cxgb3/cxio_hal.c 	skb->priority = CPL_PRIORITY_CONTROL;
skb               153 drivers/infiniband/hw/cxgb3/cxio_hal.c 	return iwch_cxgb3_ofld_send(rdev_p->t3cdev_p, skb);
skb               498 drivers/infiniband/hw/cxgb3/cxio_hal.c 	struct sk_buff *skb;
skb               500 drivers/infiniband/hw/cxgb3/cxio_hal.c 	skb = alloc_skb(sizeof(*wqe), GFP_KERNEL);
skb               501 drivers/infiniband/hw/cxgb3/cxio_hal.c 	if (!skb) {
skb               541 drivers/infiniband/hw/cxgb3/cxio_hal.c 	wqe = skb_put_zero(skb, sizeof(*wqe));
skb               552 drivers/infiniband/hw/cxgb3/cxio_hal.c 	skb->priority = CPL_PRIORITY_CONTROL;
skb               553 drivers/infiniband/hw/cxgb3/cxio_hal.c 	return iwch_cxgb3_ofld_send(rdev_p->t3cdev_p, skb);
skb               555 drivers/infiniband/hw/cxgb3/cxio_hal.c 	kfree_skb(skb);
skb               812 drivers/infiniband/hw/cxgb3/cxio_hal.c 	struct sk_buff *skb = alloc_skb(sizeof(*wqe), GFP_ATOMIC);
skb               813 drivers/infiniband/hw/cxgb3/cxio_hal.c 	if (!skb)
skb               816 drivers/infiniband/hw/cxgb3/cxio_hal.c 	wqe = __skb_put(skb, sizeof(*wqe));
skb               839 drivers/infiniband/hw/cxgb3/cxio_hal.c 	skb->priority = 0;	/* 0=>ToeQ; 1=>CtrlQ */
skb               840 drivers/infiniband/hw/cxgb3/cxio_hal.c 	return iwch_cxgb3_ofld_send(rdev_p->t3cdev_p, skb);
skb               853 drivers/infiniband/hw/cxgb3/cxio_hal.c static int cxio_hal_ev_handler(struct t3cdev *t3cdev_p, struct sk_buff *skb)
skb               857 drivers/infiniband/hw/cxgb3/cxio_hal.c 	struct respQ_msg_t *rsp_msg = (struct respQ_msg_t *) skb->data;
skb               877 drivers/infiniband/hw/cxgb3/cxio_hal.c 		dev_kfree_skb_irq(skb);
skb               879 drivers/infiniband/hw/cxgb3/cxio_hal.c 		dev_kfree_skb_irq(skb);
skb               881 drivers/infiniband/hw/cxgb3/cxio_hal.c 		(*cxio_ev_cb) (rdev_p, skb);
skb               883 drivers/infiniband/hw/cxgb3/cxio_hal.c 		dev_kfree_skb_irq(skb);
skb               130 drivers/infiniband/hw/cxgb3/cxio_hal.h 					     struct sk_buff * skb);
skb               196 drivers/infiniband/hw/cxgb3/cxio_hal.h int iwch_cxgb3_ofld_send(struct t3cdev *tdev, struct sk_buff *skb);
skb               153 drivers/infiniband/hw/cxgb3/iwch.h extern void iwch_ev_dispatch(struct cxio_rdev *rdev_p, struct sk_buff *skb);
skb               109 drivers/infiniband/hw/cxgb3/iwch_cm.c static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp);
skb               137 drivers/infiniband/hw/cxgb3/iwch_cm.c static int iwch_l2t_send(struct t3cdev *tdev, struct sk_buff *skb, struct l2t_entry *l2e)
skb               144 drivers/infiniband/hw/cxgb3/iwch_cm.c 		kfree_skb(skb);
skb               147 drivers/infiniband/hw/cxgb3/iwch_cm.c 	error = l2t_send(tdev, skb, l2e);
skb               149 drivers/infiniband/hw/cxgb3/iwch_cm.c 		kfree_skb(skb);
skb               153 drivers/infiniband/hw/cxgb3/iwch_cm.c int iwch_cxgb3_ofld_send(struct t3cdev *tdev, struct sk_buff *skb)
skb               160 drivers/infiniband/hw/cxgb3/iwch_cm.c 		kfree_skb(skb);
skb               163 drivers/infiniband/hw/cxgb3/iwch_cm.c 	error = cxgb3_ofld_send(tdev, skb);
skb               165 drivers/infiniband/hw/cxgb3/iwch_cm.c 		kfree_skb(skb);
skb               169 drivers/infiniband/hw/cxgb3/iwch_cm.c static void release_tid(struct t3cdev *tdev, u32 hwtid, struct sk_buff *skb)
skb               173 drivers/infiniband/hw/cxgb3/iwch_cm.c 	skb = get_skb(skb, sizeof(*req), GFP_KERNEL);
skb               174 drivers/infiniband/hw/cxgb3/iwch_cm.c 	if (!skb)
skb               176 drivers/infiniband/hw/cxgb3/iwch_cm.c 	req = skb_put(skb, sizeof(*req));
skb               179 drivers/infiniband/hw/cxgb3/iwch_cm.c 	skb->priority = CPL_PRIORITY_SETUP;
skb               180 drivers/infiniband/hw/cxgb3/iwch_cm.c 	iwch_cxgb3_ofld_send(tdev, skb);
skb               187 drivers/infiniband/hw/cxgb3/iwch_cm.c 	struct sk_buff *skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
skb               189 drivers/infiniband/hw/cxgb3/iwch_cm.c 	if (!skb)
skb               191 drivers/infiniband/hw/cxgb3/iwch_cm.c 	req = skb_put(skb, sizeof(*req));
skb               201 drivers/infiniband/hw/cxgb3/iwch_cm.c 	skb->priority = CPL_PRIORITY_DATA;
skb               202 drivers/infiniband/hw/cxgb3/iwch_cm.c 	return iwch_cxgb3_ofld_send(ep->com.tdev, skb);
skb               208 drivers/infiniband/hw/cxgb3/iwch_cm.c 	struct sk_buff *skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
skb               210 drivers/infiniband/hw/cxgb3/iwch_cm.c 	if (!skb)
skb               212 drivers/infiniband/hw/cxgb3/iwch_cm.c 	req = skb_put(skb, sizeof(*req));
skb               222 drivers/infiniband/hw/cxgb3/iwch_cm.c 	skb->priority = CPL_PRIORITY_DATA;
skb               223 drivers/infiniband/hw/cxgb3/iwch_cm.c 	return iwch_cxgb3_ofld_send(ep->com.tdev, skb);
skb               323 drivers/infiniband/hw/cxgb3/iwch_cm.c static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp)
skb               325 drivers/infiniband/hw/cxgb3/iwch_cm.c 	if (skb && !skb_is_nonlinear(skb) && !skb_cloned(skb)) {
skb               326 drivers/infiniband/hw/cxgb3/iwch_cm.c 		skb_trim(skb, 0);
skb               327 drivers/infiniband/hw/cxgb3/iwch_cm.c 		skb_get(skb);
skb               329 drivers/infiniband/hw/cxgb3/iwch_cm.c 		skb = alloc_skb(len, gfp);
skb               331 drivers/infiniband/hw/cxgb3/iwch_cm.c 	return skb;
skb               358 drivers/infiniband/hw/cxgb3/iwch_cm.c static void arp_failure_discard(struct t3cdev *dev, struct sk_buff *skb)
skb               361 drivers/infiniband/hw/cxgb3/iwch_cm.c 	kfree_skb(skb);
skb               367 drivers/infiniband/hw/cxgb3/iwch_cm.c static void act_open_req_arp_failure(struct t3cdev *dev, struct sk_buff *skb)
skb               370 drivers/infiniband/hw/cxgb3/iwch_cm.c 	kfree_skb(skb);
skb               377 drivers/infiniband/hw/cxgb3/iwch_cm.c static void abort_arp_failure(struct t3cdev *dev, struct sk_buff *skb)
skb               379 drivers/infiniband/hw/cxgb3/iwch_cm.c 	struct cpl_abort_req *req = cplhdr(skb);
skb               383 drivers/infiniband/hw/cxgb3/iwch_cm.c 	iwch_cxgb3_ofld_send(dev, skb);
skb               389 drivers/infiniband/hw/cxgb3/iwch_cm.c 	struct sk_buff *skb;
skb               392 drivers/infiniband/hw/cxgb3/iwch_cm.c 	skb = get_skb(NULL, sizeof(*req), gfp);
skb               393 drivers/infiniband/hw/cxgb3/iwch_cm.c 	if (!skb) {
skb               397 drivers/infiniband/hw/cxgb3/iwch_cm.c 	skb->priority = CPL_PRIORITY_DATA;
skb               398 drivers/infiniband/hw/cxgb3/iwch_cm.c 	set_arp_failure_handler(skb, arp_failure_discard);
skb               399 drivers/infiniband/hw/cxgb3/iwch_cm.c 	req = skb_put(skb, sizeof(*req));
skb               403 drivers/infiniband/hw/cxgb3/iwch_cm.c 	return iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
skb               406 drivers/infiniband/hw/cxgb3/iwch_cm.c static int send_abort(struct iwch_ep *ep, struct sk_buff *skb, gfp_t gfp)
skb               411 drivers/infiniband/hw/cxgb3/iwch_cm.c 	skb = get_skb(skb, sizeof(*req), gfp);
skb               412 drivers/infiniband/hw/cxgb3/iwch_cm.c 	if (!skb) {
skb               416 drivers/infiniband/hw/cxgb3/iwch_cm.c 	skb->priority = CPL_PRIORITY_DATA;
skb               417 drivers/infiniband/hw/cxgb3/iwch_cm.c 	set_arp_failure_handler(skb, abort_arp_failure);
skb               418 drivers/infiniband/hw/cxgb3/iwch_cm.c 	req = skb_put_zero(skb, sizeof(*req));
skb               423 drivers/infiniband/hw/cxgb3/iwch_cm.c 	return iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
skb               429 drivers/infiniband/hw/cxgb3/iwch_cm.c 	struct sk_buff *skb;
skb               436 drivers/infiniband/hw/cxgb3/iwch_cm.c 	skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
skb               437 drivers/infiniband/hw/cxgb3/iwch_cm.c 	if (!skb) {
skb               453 drivers/infiniband/hw/cxgb3/iwch_cm.c 	skb->priority = CPL_PRIORITY_SETUP;
skb               454 drivers/infiniband/hw/cxgb3/iwch_cm.c 	set_arp_failure_handler(skb, act_open_req_arp_failure);
skb               456 drivers/infiniband/hw/cxgb3/iwch_cm.c 	req = skb_put(skb, sizeof(*req));
skb               467 drivers/infiniband/hw/cxgb3/iwch_cm.c 	return iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
skb               470 drivers/infiniband/hw/cxgb3/iwch_cm.c static void send_mpa_req(struct iwch_ep *ep, struct sk_buff *skb)
skb               479 drivers/infiniband/hw/cxgb3/iwch_cm.c 	BUG_ON(skb_cloned(skb));
skb               482 drivers/infiniband/hw/cxgb3/iwch_cm.c 	if (skb->data + mpalen + sizeof(*req) > skb_end_pointer(skb)) {
skb               483 drivers/infiniband/hw/cxgb3/iwch_cm.c 		kfree_skb(skb);
skb               484 drivers/infiniband/hw/cxgb3/iwch_cm.c 		skb=alloc_skb(mpalen + sizeof(*req), GFP_KERNEL);
skb               485 drivers/infiniband/hw/cxgb3/iwch_cm.c 		if (!skb) {
skb               490 drivers/infiniband/hw/cxgb3/iwch_cm.c 	skb_trim(skb, 0);
skb               491 drivers/infiniband/hw/cxgb3/iwch_cm.c 	skb_reserve(skb, sizeof(*req));
skb               492 drivers/infiniband/hw/cxgb3/iwch_cm.c 	skb_put(skb, mpalen);
skb               493 drivers/infiniband/hw/cxgb3/iwch_cm.c 	skb->priority = CPL_PRIORITY_DATA;
skb               494 drivers/infiniband/hw/cxgb3/iwch_cm.c 	mpa = (struct mpa_message *) skb->data;
skb               510 drivers/infiniband/hw/cxgb3/iwch_cm.c 	skb_get(skb);
skb               511 drivers/infiniband/hw/cxgb3/iwch_cm.c 	set_arp_failure_handler(skb, arp_failure_discard);
skb               512 drivers/infiniband/hw/cxgb3/iwch_cm.c 	skb_reset_transport_header(skb);
skb               513 drivers/infiniband/hw/cxgb3/iwch_cm.c 	len = skb->len;
skb               514 drivers/infiniband/hw/cxgb3/iwch_cm.c 	req = skb_push(skb, sizeof(*req));
skb               523 drivers/infiniband/hw/cxgb3/iwch_cm.c 	ep->mpa_skb = skb;
skb               524 drivers/infiniband/hw/cxgb3/iwch_cm.c 	iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
skb               535 drivers/infiniband/hw/cxgb3/iwch_cm.c 	struct sk_buff *skb;
skb               541 drivers/infiniband/hw/cxgb3/iwch_cm.c 	skb = get_skb(NULL, mpalen + sizeof(*req), GFP_KERNEL);
skb               542 drivers/infiniband/hw/cxgb3/iwch_cm.c 	if (!skb) {
skb               546 drivers/infiniband/hw/cxgb3/iwch_cm.c 	skb_reserve(skb, sizeof(*req));
skb               547 drivers/infiniband/hw/cxgb3/iwch_cm.c 	mpa = skb_put(skb, mpalen);
skb               561 drivers/infiniband/hw/cxgb3/iwch_cm.c 	skb_get(skb);
skb               562 drivers/infiniband/hw/cxgb3/iwch_cm.c 	skb->priority = CPL_PRIORITY_DATA;
skb               563 drivers/infiniband/hw/cxgb3/iwch_cm.c 	set_arp_failure_handler(skb, arp_failure_discard);
skb               564 drivers/infiniband/hw/cxgb3/iwch_cm.c 	skb_reset_transport_header(skb);
skb               565 drivers/infiniband/hw/cxgb3/iwch_cm.c 	req = skb_push(skb, sizeof(*req));
skb               574 drivers/infiniband/hw/cxgb3/iwch_cm.c 	ep->mpa_skb = skb;
skb               575 drivers/infiniband/hw/cxgb3/iwch_cm.c 	return iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
skb               584 drivers/infiniband/hw/cxgb3/iwch_cm.c 	struct sk_buff *skb;
skb               590 drivers/infiniband/hw/cxgb3/iwch_cm.c 	skb = get_skb(NULL, mpalen + sizeof(*req), GFP_KERNEL);
skb               591 drivers/infiniband/hw/cxgb3/iwch_cm.c 	if (!skb) {
skb               595 drivers/infiniband/hw/cxgb3/iwch_cm.c 	skb->priority = CPL_PRIORITY_DATA;
skb               596 drivers/infiniband/hw/cxgb3/iwch_cm.c 	skb_reserve(skb, sizeof(*req));
skb               597 drivers/infiniband/hw/cxgb3/iwch_cm.c 	mpa = skb_put(skb, mpalen);
skb               612 drivers/infiniband/hw/cxgb3/iwch_cm.c 	skb_get(skb);
skb               613 drivers/infiniband/hw/cxgb3/iwch_cm.c 	set_arp_failure_handler(skb, arp_failure_discard);
skb               614 drivers/infiniband/hw/cxgb3/iwch_cm.c 	skb_reset_transport_header(skb);
skb               615 drivers/infiniband/hw/cxgb3/iwch_cm.c 	len = skb->len;
skb               616 drivers/infiniband/hw/cxgb3/iwch_cm.c 	req = skb_push(skb, sizeof(*req));
skb               624 drivers/infiniband/hw/cxgb3/iwch_cm.c 	ep->mpa_skb = skb;
skb               626 drivers/infiniband/hw/cxgb3/iwch_cm.c 	return iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
skb               629 drivers/infiniband/hw/cxgb3/iwch_cm.c static int act_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
skb               632 drivers/infiniband/hw/cxgb3/iwch_cm.c 	struct cpl_act_establish *req = cplhdr(skb);
skb               652 drivers/infiniband/hw/cxgb3/iwch_cm.c 	send_mpa_req(ep, skb);
skb               657 drivers/infiniband/hw/cxgb3/iwch_cm.c static void abort_connection(struct iwch_ep *ep, struct sk_buff *skb, gfp_t gfp)
skb               661 drivers/infiniband/hw/cxgb3/iwch_cm.c 	send_abort(ep, skb, gfp);
skb               792 drivers/infiniband/hw/cxgb3/iwch_cm.c 	struct sk_buff *skb;
skb               795 drivers/infiniband/hw/cxgb3/iwch_cm.c 	skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
skb               796 drivers/infiniband/hw/cxgb3/iwch_cm.c 	if (!skb) {
skb               801 drivers/infiniband/hw/cxgb3/iwch_cm.c 	req = skb_put(skb, sizeof(*req));
skb               805 drivers/infiniband/hw/cxgb3/iwch_cm.c 	skb->priority = CPL_PRIORITY_ACK;
skb               806 drivers/infiniband/hw/cxgb3/iwch_cm.c 	iwch_cxgb3_ofld_send(ep->com.tdev, skb);
skb               810 drivers/infiniband/hw/cxgb3/iwch_cm.c static void process_mpa_reply(struct iwch_ep *ep, struct sk_buff *skb)
skb               833 drivers/infiniband/hw/cxgb3/iwch_cm.c 	if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
skb               841 drivers/infiniband/hw/cxgb3/iwch_cm.c 	skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
skb               842 drivers/infiniband/hw/cxgb3/iwch_cm.c 				  skb->len);
skb               843 drivers/infiniband/hw/cxgb3/iwch_cm.c 	ep->mpa_pkt_len += skb->len;
skb               932 drivers/infiniband/hw/cxgb3/iwch_cm.c 	abort_connection(ep, skb, GFP_KERNEL);
skb               938 drivers/infiniband/hw/cxgb3/iwch_cm.c static void process_mpa_request(struct iwch_ep *ep, struct sk_buff *skb)
skb               958 drivers/infiniband/hw/cxgb3/iwch_cm.c 	if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
skb               959 drivers/infiniband/hw/cxgb3/iwch_cm.c 		abort_connection(ep, skb, GFP_KERNEL);
skb               968 drivers/infiniband/hw/cxgb3/iwch_cm.c 	skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
skb               969 drivers/infiniband/hw/cxgb3/iwch_cm.c 				  skb->len);
skb               970 drivers/infiniband/hw/cxgb3/iwch_cm.c 	ep->mpa_pkt_len += skb->len;
skb               985 drivers/infiniband/hw/cxgb3/iwch_cm.c 		abort_connection(ep, skb, GFP_KERNEL);
skb               990 drivers/infiniband/hw/cxgb3/iwch_cm.c 		abort_connection(ep, skb, GFP_KERNEL);
skb              1000 drivers/infiniband/hw/cxgb3/iwch_cm.c 		abort_connection(ep, skb, GFP_KERNEL);
skb              1008 drivers/infiniband/hw/cxgb3/iwch_cm.c 		abort_connection(ep, skb, GFP_KERNEL);
skb              1040 drivers/infiniband/hw/cxgb3/iwch_cm.c static int rx_data(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
skb              1043 drivers/infiniband/hw/cxgb3/iwch_cm.c 	struct cpl_rx_data *hdr = cplhdr(skb);
skb              1048 drivers/infiniband/hw/cxgb3/iwch_cm.c 	skb_pull(skb, sizeof(*hdr));
skb              1049 drivers/infiniband/hw/cxgb3/iwch_cm.c 	skb_trim(skb, dlen);
skb              1056 drivers/infiniband/hw/cxgb3/iwch_cm.c 		process_mpa_reply(ep, skb);
skb              1059 drivers/infiniband/hw/cxgb3/iwch_cm.c 		process_mpa_request(ep, skb);
skb              1085 drivers/infiniband/hw/cxgb3/iwch_cm.c static int tx_ack(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
skb              1088 drivers/infiniband/hw/cxgb3/iwch_cm.c 	struct cpl_wr_ack *hdr = cplhdr(skb);
skb              1132 drivers/infiniband/hw/cxgb3/iwch_cm.c static int abort_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
skb              1176 drivers/infiniband/hw/cxgb3/iwch_cm.c static int act_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
skb              1179 drivers/infiniband/hw/cxgb3/iwch_cm.c 	struct cpl_act_open_rpl *rpl = cplhdr(skb);
skb              1196 drivers/infiniband/hw/cxgb3/iwch_cm.c 	struct sk_buff *skb;
skb              1200 drivers/infiniband/hw/cxgb3/iwch_cm.c 	skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
skb              1201 drivers/infiniband/hw/cxgb3/iwch_cm.c 	if (!skb) {
skb              1206 drivers/infiniband/hw/cxgb3/iwch_cm.c 	req = skb_put(skb, sizeof(*req));
skb              1218 drivers/infiniband/hw/cxgb3/iwch_cm.c 	skb->priority = 1;
skb              1219 drivers/infiniband/hw/cxgb3/iwch_cm.c 	return iwch_cxgb3_ofld_send(ep->com.tdev, skb);
skb              1222 drivers/infiniband/hw/cxgb3/iwch_cm.c static int pass_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
skb              1225 drivers/infiniband/hw/cxgb3/iwch_cm.c 	struct cpl_pass_open_rpl *rpl = cplhdr(skb);
skb              1238 drivers/infiniband/hw/cxgb3/iwch_cm.c 	struct sk_buff *skb;
skb              1242 drivers/infiniband/hw/cxgb3/iwch_cm.c 	skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
skb              1243 drivers/infiniband/hw/cxgb3/iwch_cm.c 	if (!skb) {
skb              1247 drivers/infiniband/hw/cxgb3/iwch_cm.c 	req = skb_put(skb, sizeof(*req));
skb              1251 drivers/infiniband/hw/cxgb3/iwch_cm.c 	skb->priority = 1;
skb              1252 drivers/infiniband/hw/cxgb3/iwch_cm.c 	return iwch_cxgb3_ofld_send(ep->com.tdev, skb);
skb              1255 drivers/infiniband/hw/cxgb3/iwch_cm.c static int close_listsrv_rpl(struct t3cdev *tdev, struct sk_buff *skb,
skb              1259 drivers/infiniband/hw/cxgb3/iwch_cm.c 	struct cpl_close_listserv_rpl *rpl = cplhdr(skb);
skb              1268 drivers/infiniband/hw/cxgb3/iwch_cm.c static void accept_cr(struct iwch_ep *ep, __be32 peer_ip, struct sk_buff *skb)
skb              1276 drivers/infiniband/hw/cxgb3/iwch_cm.c 	BUG_ON(skb_cloned(skb));
skb              1277 drivers/infiniband/hw/cxgb3/iwch_cm.c 	skb_trim(skb, sizeof(*rpl));
skb              1278 drivers/infiniband/hw/cxgb3/iwch_cm.c 	skb_get(skb);
skb              1292 drivers/infiniband/hw/cxgb3/iwch_cm.c 	rpl = cplhdr(skb);
skb              1300 drivers/infiniband/hw/cxgb3/iwch_cm.c 	skb->priority = CPL_PRIORITY_SETUP;
skb              1301 drivers/infiniband/hw/cxgb3/iwch_cm.c 	iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
skb              1307 drivers/infiniband/hw/cxgb3/iwch_cm.c 		      struct sk_buff *skb)
skb              1311 drivers/infiniband/hw/cxgb3/iwch_cm.c 	BUG_ON(skb_cloned(skb));
skb              1312 drivers/infiniband/hw/cxgb3/iwch_cm.c 	skb_trim(skb, sizeof(struct cpl_tid_release));
skb              1313 drivers/infiniband/hw/cxgb3/iwch_cm.c 	skb_get(skb);
skb              1316 drivers/infiniband/hw/cxgb3/iwch_cm.c 		release_tid(tdev, hwtid, skb);
skb              1320 drivers/infiniband/hw/cxgb3/iwch_cm.c 		rpl = cplhdr(skb);
skb              1321 drivers/infiniband/hw/cxgb3/iwch_cm.c 		skb->priority = CPL_PRIORITY_SETUP;
skb              1330 drivers/infiniband/hw/cxgb3/iwch_cm.c 		iwch_cxgb3_ofld_send(tdev, skb);
skb              1334 drivers/infiniband/hw/cxgb3/iwch_cm.c static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
skb              1337 drivers/infiniband/hw/cxgb3/iwch_cm.c 	struct cpl_pass_accept_req *req = cplhdr(skb);
skb              1402 drivers/infiniband/hw/cxgb3/iwch_cm.c 	accept_cr(child_ep, req->peer_ip, skb);
skb              1405 drivers/infiniband/hw/cxgb3/iwch_cm.c 	reject_cr(tdev, hwtid, req->peer_ip, skb);
skb              1410 drivers/infiniband/hw/cxgb3/iwch_cm.c static int pass_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
skb              1413 drivers/infiniband/hw/cxgb3/iwch_cm.c 	struct cpl_pass_establish *req = cplhdr(skb);
skb              1428 drivers/infiniband/hw/cxgb3/iwch_cm.c static int peer_close(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
skb              1519 drivers/infiniband/hw/cxgb3/iwch_cm.c static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
skb              1521 drivers/infiniband/hw/cxgb3/iwch_cm.c 	struct cpl_abort_req_rss *req = cplhdr(skb);
skb              1608 drivers/infiniband/hw/cxgb3/iwch_cm.c 	rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL);
skb              1627 drivers/infiniband/hw/cxgb3/iwch_cm.c static int close_con_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
skb              1681 drivers/infiniband/hw/cxgb3/iwch_cm.c static int terminate(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
skb              1689 drivers/infiniband/hw/cxgb3/iwch_cm.c 	skb_pull(skb, sizeof(struct cpl_rdma_terminate));
skb              1690 drivers/infiniband/hw/cxgb3/iwch_cm.c 	pr_debug("%s saving %d bytes of term msg\n", __func__, skb->len);
skb              1691 drivers/infiniband/hw/cxgb3/iwch_cm.c 	skb_copy_from_linear_data(skb, ep->com.qp->attr.terminate_buffer,
skb              1692 drivers/infiniband/hw/cxgb3/iwch_cm.c 				  skb->len);
skb              1693 drivers/infiniband/hw/cxgb3/iwch_cm.c 	ep->com.qp->attr.terminate_msg_len = skb->len;
skb              1698 drivers/infiniband/hw/cxgb3/iwch_cm.c static int ec_status(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
skb              1700 drivers/infiniband/hw/cxgb3/iwch_cm.c 	struct cpl_rdma_ec_status *rep = cplhdr(skb);
skb              2168 drivers/infiniband/hw/cxgb3/iwch_cm.c 	struct sk_buff *skb = NULL;
skb              2173 drivers/infiniband/hw/cxgb3/iwch_cm.c 	while ((skb = skb_dequeue(&rxq))) {
skb              2174 drivers/infiniband/hw/cxgb3/iwch_cm.c 		ep = *((void **) (skb->cb));
skb              2175 drivers/infiniband/hw/cxgb3/iwch_cm.c 		tdev = *((struct t3cdev **) (skb->cb + sizeof(void *)));
skb              2176 drivers/infiniband/hw/cxgb3/iwch_cm.c 		ret = work_handlers[G_OPCODE(ntohl((__force __be32)skb->csum))](tdev, skb, ep);
skb              2178 drivers/infiniband/hw/cxgb3/iwch_cm.c 			kfree_skb(skb);
skb              2189 drivers/infiniband/hw/cxgb3/iwch_cm.c static int sched(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
skb              2198 drivers/infiniband/hw/cxgb3/iwch_cm.c 	*((void **) skb->cb) = ctx;
skb              2199 drivers/infiniband/hw/cxgb3/iwch_cm.c 	*((struct t3cdev **) (skb->cb + sizeof(void *))) = tdev;
skb              2204 drivers/infiniband/hw/cxgb3/iwch_cm.c 	skb_queue_tail(&rxq, skb);
skb              2209 drivers/infiniband/hw/cxgb3/iwch_cm.c static int set_tcb_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
skb              2211 drivers/infiniband/hw/cxgb3/iwch_cm.c 	struct cpl_set_tcb_rpl *rpl = cplhdr(skb);
skb               107 drivers/infiniband/hw/cxgb3/iwch_ev.c void iwch_ev_dispatch(struct cxio_rdev *rdev_p, struct sk_buff *skb)
skb               110 drivers/infiniband/hw/cxgb3/iwch_ev.c 	struct respQ_msg_t *rsp_msg = (struct respQ_msg_t *) skb->data;
skb               231 drivers/infiniband/hw/cxgb3/iwch_ev.c 	dev_kfree_skb_irq(skb);
skb               664 drivers/infiniband/hw/cxgb3/iwch_qp.c 	struct sk_buff *skb;
skb               668 drivers/infiniband/hw/cxgb3/iwch_qp.c 	skb = alloc_skb(40, GFP_KERNEL);
skb               669 drivers/infiniband/hw/cxgb3/iwch_qp.c 	if (!skb) {
skb               673 drivers/infiniband/hw/cxgb3/iwch_qp.c 	wqe = skb_put_zero(skb, sizeof(struct t3_rdma_read_wr));
skb               685 drivers/infiniband/hw/cxgb3/iwch_qp.c 	skb->priority = CPL_PRIORITY_DATA;
skb               686 drivers/infiniband/hw/cxgb3/iwch_qp.c 	return iwch_cxgb3_ofld_send(ep->com.qp->rhp->rdev.t3cdev_p, skb);
skb               696 drivers/infiniband/hw/cxgb3/iwch_qp.c 	struct sk_buff *skb;
skb               699 drivers/infiniband/hw/cxgb3/iwch_qp.c 	skb = alloc_skb(40, GFP_ATOMIC);
skb               700 drivers/infiniband/hw/cxgb3/iwch_qp.c 	if (!skb) {
skb               704 drivers/infiniband/hw/cxgb3/iwch_qp.c 	wqe = skb_put_zero(skb, 40);
skb               716 drivers/infiniband/hw/cxgb3/iwch_qp.c 	skb->priority = CPL_PRIORITY_DATA;
skb               717 drivers/infiniband/hw/cxgb3/iwch_qp.c 	return iwch_cxgb3_ofld_send(qhp->rhp->rdev.t3cdev_p, skb);
skb               142 drivers/infiniband/hw/cxgb4/cm.c static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp);
skb               145 drivers/infiniband/hw/cxgb4/cm.c static int sched(struct c4iw_dev *dev, struct sk_buff *skb);
skb               202 drivers/infiniband/hw/cxgb4/cm.c static int c4iw_l2t_send(struct c4iw_rdev *rdev, struct sk_buff *skb,
skb               208 drivers/infiniband/hw/cxgb4/cm.c 		kfree_skb(skb);
skb               212 drivers/infiniband/hw/cxgb4/cm.c 	error = cxgb4_l2t_send(rdev->lldi.ports[0], skb, l2e);
skb               214 drivers/infiniband/hw/cxgb4/cm.c 		kfree_skb(skb);
skb               220 drivers/infiniband/hw/cxgb4/cm.c int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb)
skb               225 drivers/infiniband/hw/cxgb4/cm.c 		kfree_skb(skb);
skb               229 drivers/infiniband/hw/cxgb4/cm.c 	error = cxgb4_ofld_send(rdev->lldi.ports[0], skb);
skb               231 drivers/infiniband/hw/cxgb4/cm.c 		kfree_skb(skb);
skb               235 drivers/infiniband/hw/cxgb4/cm.c static void release_tid(struct c4iw_rdev *rdev, u32 hwtid, struct sk_buff *skb)
skb               239 drivers/infiniband/hw/cxgb4/cm.c 	skb = get_skb(skb, len, GFP_KERNEL);
skb               240 drivers/infiniband/hw/cxgb4/cm.c 	if (!skb)
skb               243 drivers/infiniband/hw/cxgb4/cm.c 	cxgb_mk_tid_release(skb, len, hwtid, 0);
skb               244 drivers/infiniband/hw/cxgb4/cm.c 	c4iw_ofld_send(rdev, skb);
skb               292 drivers/infiniband/hw/cxgb4/cm.c 	struct sk_buff *skb;
skb               298 drivers/infiniband/hw/cxgb4/cm.c 		skb = alloc_skb(len, GFP_KERNEL);
skb               299 drivers/infiniband/hw/cxgb4/cm.c 		if (!skb)
skb               301 drivers/infiniband/hw/cxgb4/cm.c 		skb_queue_tail(ep_skb_list, skb);
skb               455 drivers/infiniband/hw/cxgb4/cm.c static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp)
skb               457 drivers/infiniband/hw/cxgb4/cm.c 	if (skb && !skb_is_nonlinear(skb) && !skb_cloned(skb)) {
skb               458 drivers/infiniband/hw/cxgb4/cm.c 		skb_trim(skb, 0);
skb               459 drivers/infiniband/hw/cxgb4/cm.c 		skb_get(skb);
skb               460 drivers/infiniband/hw/cxgb4/cm.c 		skb_reset_transport_header(skb);
skb               462 drivers/infiniband/hw/cxgb4/cm.c 		skb = alloc_skb(len, gfp);
skb               463 drivers/infiniband/hw/cxgb4/cm.c 		if (!skb)
skb               466 drivers/infiniband/hw/cxgb4/cm.c 	t4_set_arp_err_handler(skb, NULL, NULL);
skb               467 drivers/infiniband/hw/cxgb4/cm.c 	return skb;
skb               475 drivers/infiniband/hw/cxgb4/cm.c static void arp_failure_discard(void *handle, struct sk_buff *skb)
skb               478 drivers/infiniband/hw/cxgb4/cm.c 	kfree_skb(skb);
skb               481 drivers/infiniband/hw/cxgb4/cm.c static void mpa_start_arp_failure(void *handle, struct sk_buff *skb)
skb               492 drivers/infiniband/hw/cxgb4/cm.c static int _put_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb)
skb               496 drivers/infiniband/hw/cxgb4/cm.c 	ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *)));
skb               501 drivers/infiniband/hw/cxgb4/cm.c static int _put_pass_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb)
skb               505 drivers/infiniband/hw/cxgb4/cm.c 	ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *)));
skb               517 drivers/infiniband/hw/cxgb4/cm.c static void queue_arp_failure_cpl(struct c4iw_ep *ep, struct sk_buff *skb,
skb               520 drivers/infiniband/hw/cxgb4/cm.c 	struct cpl_act_establish *rpl = cplhdr(skb);
skb               529 drivers/infiniband/hw/cxgb4/cm.c 	*((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *))) = ep;
skb               530 drivers/infiniband/hw/cxgb4/cm.c 	sched(ep->com.dev, skb);
skb               534 drivers/infiniband/hw/cxgb4/cm.c static void pass_accept_rpl_arp_failure(void *handle, struct sk_buff *skb)
skb               542 drivers/infiniband/hw/cxgb4/cm.c 	queue_arp_failure_cpl(ep, skb, FAKE_CPL_PASS_PUT_EP_SAFE);
skb               548 drivers/infiniband/hw/cxgb4/cm.c static void act_open_req_arp_failure(void *handle, struct sk_buff *skb)
skb               563 drivers/infiniband/hw/cxgb4/cm.c 	queue_arp_failure_cpl(ep, skb, FAKE_CPL_PUT_EP_SAFE);
skb               570 drivers/infiniband/hw/cxgb4/cm.c static void abort_arp_failure(void *handle, struct sk_buff *skb)
skb               575 drivers/infiniband/hw/cxgb4/cm.c 	struct cpl_abort_req *req = cplhdr(skb);
skb               579 drivers/infiniband/hw/cxgb4/cm.c 	skb_get(skb);
skb               580 drivers/infiniband/hw/cxgb4/cm.c 	ret = c4iw_ofld_send(rdev, skb);
skb               583 drivers/infiniband/hw/cxgb4/cm.c 		queue_arp_failure_cpl(ep, skb, FAKE_CPL_PUT_EP_SAFE);
skb               585 drivers/infiniband/hw/cxgb4/cm.c 		kfree_skb(skb);
skb               591 drivers/infiniband/hw/cxgb4/cm.c 	struct sk_buff *skb = skb_dequeue(&ep->com.ep_skb_list);
skb               596 drivers/infiniband/hw/cxgb4/cm.c 	if (WARN_ON(!skb))
skb               608 drivers/infiniband/hw/cxgb4/cm.c 	flowc = __skb_put(skb, flowclen);
skb               642 drivers/infiniband/hw/cxgb4/cm.c 	set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
skb               643 drivers/infiniband/hw/cxgb4/cm.c 	return c4iw_ofld_send(&ep->com.dev->rdev, skb);
skb               648 drivers/infiniband/hw/cxgb4/cm.c 	struct sk_buff *skb = skb_dequeue(&ep->com.ep_skb_list);
skb               652 drivers/infiniband/hw/cxgb4/cm.c 	if (WARN_ON(!skb))
skb               655 drivers/infiniband/hw/cxgb4/cm.c 	cxgb_mk_close_con_req(skb, wrlen, ep->hwtid, ep->txq_idx,
skb               658 drivers/infiniband/hw/cxgb4/cm.c 	return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
skb               663 drivers/infiniband/hw/cxgb4/cm.c 	struct sk_buff *skb;
skb               667 drivers/infiniband/hw/cxgb4/cm.c 	skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
skb               668 drivers/infiniband/hw/cxgb4/cm.c 	if (WARN_ON(!skb))
skb               671 drivers/infiniband/hw/cxgb4/cm.c 	set_wr_txq(skb, CPL_PRIORITY_CONTROL, ep->ctrlq_idx);
skb               672 drivers/infiniband/hw/cxgb4/cm.c 	req = (struct cpl_get_tcb *) skb_put(skb, wrlen);
skb               683 drivers/infiniband/hw/cxgb4/cm.c 	if (WARN_ON(c4iw_ofld_send(&ep->com.dev->rdev, skb)))
skb               721 drivers/infiniband/hw/cxgb4/cm.c 	struct sk_buff *skb;
skb               768 drivers/infiniband/hw/cxgb4/cm.c 	skb = get_skb(NULL, wrlen, GFP_KERNEL);
skb               769 drivers/infiniband/hw/cxgb4/cm.c 	if (!skb) {
skb               773 drivers/infiniband/hw/cxgb4/cm.c 	set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx);
skb               823 drivers/infiniband/hw/cxgb4/cm.c 	t4_set_arp_err_handler(skb, ep, act_open_req_arp_failure);
skb               828 drivers/infiniband/hw/cxgb4/cm.c 			req = skb_put(skb, wrlen);
skb               832 drivers/infiniband/hw/cxgb4/cm.c 			t5req = skb_put(skb, wrlen);
skb               837 drivers/infiniband/hw/cxgb4/cm.c 			t6req = skb_put(skb, wrlen);
skb               878 drivers/infiniband/hw/cxgb4/cm.c 			req6 = skb_put(skb, wrlen);
skb               882 drivers/infiniband/hw/cxgb4/cm.c 			t5req6 = skb_put(skb, wrlen);
skb               887 drivers/infiniband/hw/cxgb4/cm.c 			t6req6 = skb_put(skb, wrlen);
skb               932 drivers/infiniband/hw/cxgb4/cm.c 	ret = c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
skb               940 drivers/infiniband/hw/cxgb4/cm.c static int send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb,
skb               955 drivers/infiniband/hw/cxgb4/cm.c 	skb = get_skb(skb, wrlen, GFP_KERNEL);
skb               956 drivers/infiniband/hw/cxgb4/cm.c 	if (!skb) {
skb               960 drivers/infiniband/hw/cxgb4/cm.c 	set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
skb               962 drivers/infiniband/hw/cxgb4/cm.c 	req = skb_put_zero(skb, wrlen);
skb              1032 drivers/infiniband/hw/cxgb4/cm.c 	skb_get(skb);
skb              1033 drivers/infiniband/hw/cxgb4/cm.c 	t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
skb              1034 drivers/infiniband/hw/cxgb4/cm.c 	ep->mpa_skb = skb;
skb              1035 drivers/infiniband/hw/cxgb4/cm.c 	ret = c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
skb              1050 drivers/infiniband/hw/cxgb4/cm.c 	struct sk_buff *skb;
skb              1061 drivers/infiniband/hw/cxgb4/cm.c 	skb = get_skb(NULL, wrlen, GFP_KERNEL);
skb              1062 drivers/infiniband/hw/cxgb4/cm.c 	if (!skb) {
skb              1066 drivers/infiniband/hw/cxgb4/cm.c 	set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
skb              1068 drivers/infiniband/hw/cxgb4/cm.c 	req = skb_put_zero(skb, wrlen);
skb              1117 drivers/infiniband/hw/cxgb4/cm.c 	skb_get(skb);
skb              1118 drivers/infiniband/hw/cxgb4/cm.c 	set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
skb              1119 drivers/infiniband/hw/cxgb4/cm.c 	t4_set_arp_err_handler(skb, NULL, mpa_start_arp_failure);
skb              1120 drivers/infiniband/hw/cxgb4/cm.c 	ep->mpa_skb = skb;
skb              1122 drivers/infiniband/hw/cxgb4/cm.c 	return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
skb              1130 drivers/infiniband/hw/cxgb4/cm.c 	struct sk_buff *skb;
skb              1141 drivers/infiniband/hw/cxgb4/cm.c 	skb = get_skb(NULL, wrlen, GFP_KERNEL);
skb              1142 drivers/infiniband/hw/cxgb4/cm.c 	if (!skb) {
skb              1146 drivers/infiniband/hw/cxgb4/cm.c 	set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
skb              1148 drivers/infiniband/hw/cxgb4/cm.c 	req = skb_put_zero(skb, wrlen);
skb              1206 drivers/infiniband/hw/cxgb4/cm.c 	skb_get(skb);
skb              1207 drivers/infiniband/hw/cxgb4/cm.c 	t4_set_arp_err_handler(skb, NULL, mpa_start_arp_failure);
skb              1208 drivers/infiniband/hw/cxgb4/cm.c 	ep->mpa_skb = skb;
skb              1211 drivers/infiniband/hw/cxgb4/cm.c 	return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
skb              1214 drivers/infiniband/hw/cxgb4/cm.c static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb)
skb              1217 drivers/infiniband/hw/cxgb4/cm.c 	struct cpl_act_establish *req = cplhdr(skb);
skb              1253 drivers/infiniband/hw/cxgb4/cm.c 		ret = send_mpa_req(ep, skb, 1);
skb              1255 drivers/infiniband/hw/cxgb4/cm.c 		ret = send_mpa_req(ep, skb, mpa_rev);
skb              1415 drivers/infiniband/hw/cxgb4/cm.c 	struct sk_buff *skb;
skb              1421 drivers/infiniband/hw/cxgb4/cm.c 	skb = get_skb(NULL, wrlen, GFP_KERNEL);
skb              1422 drivers/infiniband/hw/cxgb4/cm.c 	if (!skb) {
skb              1438 drivers/infiniband/hw/cxgb4/cm.c 	cxgb_mk_rx_data_ack(skb, wrlen, ep->hwtid, ep->ctrlq_idx,
skb              1441 drivers/infiniband/hw/cxgb4/cm.c 	c4iw_ofld_send(&ep->com.dev->rdev, skb);
skb              1459 drivers/infiniband/hw/cxgb4/cm.c static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
skb              1477 drivers/infiniband/hw/cxgb4/cm.c 	if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
skb              1485 drivers/infiniband/hw/cxgb4/cm.c 	skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
skb              1486 drivers/infiniband/hw/cxgb4/cm.c 				  skb->len);
skb              1487 drivers/infiniband/hw/cxgb4/cm.c 	ep->mpa_pkt_len += skb->len;
skb              1705 drivers/infiniband/hw/cxgb4/cm.c static int process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
skb              1717 drivers/infiniband/hw/cxgb4/cm.c 	if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt))
skb              1725 drivers/infiniband/hw/cxgb4/cm.c 	skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
skb              1726 drivers/infiniband/hw/cxgb4/cm.c 				  skb->len);
skb              1727 drivers/infiniband/hw/cxgb4/cm.c 	ep->mpa_pkt_len += skb->len;
skb              1844 drivers/infiniband/hw/cxgb4/cm.c static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
skb              1847 drivers/infiniband/hw/cxgb4/cm.c 	struct cpl_rx_data *hdr = cplhdr(skb);
skb              1857 drivers/infiniband/hw/cxgb4/cm.c 	skb_pull(skb, sizeof(*hdr));
skb              1858 drivers/infiniband/hw/cxgb4/cm.c 	skb_trim(skb, dlen);
skb              1865 drivers/infiniband/hw/cxgb4/cm.c 		disconnect = process_mpa_reply(ep, skb);
skb              1870 drivers/infiniband/hw/cxgb4/cm.c 		disconnect = process_mpa_request(ep, skb);
skb              1917 drivers/infiniband/hw/cxgb4/cm.c static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
skb              1921 drivers/infiniband/hw/cxgb4/cm.c 	struct cpl_abort_rpl_rss6 *rpl = cplhdr(skb);
skb              1960 drivers/infiniband/hw/cxgb4/cm.c 	struct sk_buff *skb;
skb              1967 drivers/infiniband/hw/cxgb4/cm.c 	skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
skb              1968 drivers/infiniband/hw/cxgb4/cm.c 	req = __skb_put_zero(skb, sizeof(*req));
skb              2025 drivers/infiniband/hw/cxgb4/cm.c 	set_wr_txq(skb, CPL_PRIORITY_CONTROL, ep->ctrlq_idx);
skb              2027 drivers/infiniband/hw/cxgb4/cm.c 	return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
skb              2264 drivers/infiniband/hw/cxgb4/cm.c static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
skb              2267 drivers/infiniband/hw/cxgb4/cm.c 	struct cpl_act_open_rpl *rpl = cplhdr(skb);
skb              2377 drivers/infiniband/hw/cxgb4/cm.c static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
skb              2379 drivers/infiniband/hw/cxgb4/cm.c 	struct cpl_pass_open_rpl *rpl = cplhdr(skb);
skb              2395 drivers/infiniband/hw/cxgb4/cm.c static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
skb              2397 drivers/infiniband/hw/cxgb4/cm.c 	struct cpl_close_listsvr_rpl *rpl = cplhdr(skb);
skb              2412 drivers/infiniband/hw/cxgb4/cm.c static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
skb              2471 drivers/infiniband/hw/cxgb4/cm.c 	skb_get(skb);
skb              2472 drivers/infiniband/hw/cxgb4/cm.c 	rpl = cplhdr(skb);
skb              2474 drivers/infiniband/hw/cxgb4/cm.c 		skb_trim(skb, roundup(sizeof(*rpl5), 16));
skb              2478 drivers/infiniband/hw/cxgb4/cm.c 		skb_trim(skb, sizeof(*rpl));
skb              2499 drivers/infiniband/hw/cxgb4/cm.c 	set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx);
skb              2500 drivers/infiniband/hw/cxgb4/cm.c 	t4_set_arp_err_handler(skb, ep, pass_accept_rpl_arp_failure);
skb              2502 drivers/infiniband/hw/cxgb4/cm.c 	return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
skb              2505 drivers/infiniband/hw/cxgb4/cm.c static void reject_cr(struct c4iw_dev *dev, u32 hwtid, struct sk_buff *skb)
skb              2508 drivers/infiniband/hw/cxgb4/cm.c 	skb_trim(skb, sizeof(struct cpl_tid_release));
skb              2509 drivers/infiniband/hw/cxgb4/cm.c 	release_tid(&dev->rdev, hwtid, skb);
skb              2513 drivers/infiniband/hw/cxgb4/cm.c static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
skb              2516 drivers/infiniband/hw/cxgb4/cm.c 	struct cpl_pass_accept_req *req = cplhdr(skb);
skb              2654 drivers/infiniband/hw/cxgb4/cm.c 	if (accept_cr(child_ep, skb, req)) {
skb              2669 drivers/infiniband/hw/cxgb4/cm.c 	reject_cr(dev, hwtid, skb);
skb              2676 drivers/infiniband/hw/cxgb4/cm.c static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb)
skb              2679 drivers/infiniband/hw/cxgb4/cm.c 	struct cpl_pass_establish *req = cplhdr(skb);
skb              2708 drivers/infiniband/hw/cxgb4/cm.c static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
skb              2710 drivers/infiniband/hw/cxgb4/cm.c 	struct cpl_peer_close *hdr = cplhdr(skb);
skb              2812 drivers/infiniband/hw/cxgb4/cm.c static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
skb              2814 drivers/infiniband/hw/cxgb4/cm.c 	struct cpl_abort_req_rss6 *req = cplhdr(skb);
skb              2972 drivers/infiniband/hw/cxgb4/cm.c static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
skb              2976 drivers/infiniband/hw/cxgb4/cm.c 	struct cpl_close_con_rpl *rpl = cplhdr(skb);
skb              3020 drivers/infiniband/hw/cxgb4/cm.c static int terminate(struct c4iw_dev *dev, struct sk_buff *skb)
skb              3022 drivers/infiniband/hw/cxgb4/cm.c 	struct cpl_rdma_terminate *rpl = cplhdr(skb);
skb              3054 drivers/infiniband/hw/cxgb4/cm.c static int fw4_ack(struct c4iw_dev *dev, struct sk_buff *skb)
skb              3057 drivers/infiniband/hw/cxgb4/cm.c 	struct cpl_fw4_ack *hdr = cplhdr(skb);
skb              3744 drivers/infiniband/hw/cxgb4/cm.c static void active_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb,
skb              3795 drivers/infiniband/hw/cxgb4/cm.c static void passive_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb,
skb              3844 drivers/infiniband/hw/cxgb4/cm.c static int read_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
skb              3846 drivers/infiniband/hw/cxgb4/cm.c 	struct cpl_get_tcb_rpl *rpl = cplhdr(skb);
skb              3895 drivers/infiniband/hw/cxgb4/cm.c static int deferred_fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
skb              3897 drivers/infiniband/hw/cxgb4/cm.c 	struct cpl_fw6_msg *rpl = cplhdr(skb);
skb              3908 drivers/infiniband/hw/cxgb4/cm.c 			active_ofld_conn_reply(dev, skb, req);
skb              3911 drivers/infiniband/hw/cxgb4/cm.c 			passive_ofld_conn_reply(dev, skb, req);
skb              3923 drivers/infiniband/hw/cxgb4/cm.c static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos)
skb              3930 drivers/infiniband/hw/cxgb4/cm.c 	struct cpl_rx_pkt *cpl = cplhdr(skb);
skb              3936 drivers/infiniband/hw/cxgb4/cm.c 	dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *)));
skb              3944 drivers/infiniband/hw/cxgb4/cm.c 	__skb_pull(skb, sizeof(*req) + sizeof(struct rss_header));
skb              3952 drivers/infiniband/hw/cxgb4/cm.c 	tcp_parse_options(&init_net, skb, &tmp_opt, 0, NULL);
skb              3954 drivers/infiniband/hw/cxgb4/cm.c 	req = __skb_push(skb, sizeof(*req));
skb              3992 drivers/infiniband/hw/cxgb4/cm.c static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb,
skb              4000 drivers/infiniband/hw/cxgb4/cm.c 	struct cpl_pass_accept_req *cpl = cplhdr(skb);
skb              4036 drivers/infiniband/hw/cxgb4/cm.c 	req->cookie = (uintptr_t)skb;
skb              4043 drivers/infiniband/hw/cxgb4/cm.c 		kfree_skb(skb);
skb              4055 drivers/infiniband/hw/cxgb4/cm.c static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
skb              4063 drivers/infiniband/hw/cxgb4/cm.c 	struct rss_header *rss = (void *)skb->data;
skb              4064 drivers/infiniband/hw/cxgb4/cm.c 	struct cpl_rx_pkt *cpl = (void *)skb->data;
skb              4121 drivers/infiniband/hw/cxgb4/cm.c 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cpl->vlan));
skb              4128 drivers/infiniband/hw/cxgb4/cm.c 	skb_set_network_header(skb, (void *)iph - (void *)rss);
skb              4129 drivers/infiniband/hw/cxgb4/cm.c 	skb_set_transport_header(skb, (void *)tcph - (void *)rss);
skb              4130 drivers/infiniband/hw/cxgb4/cm.c 	skb_get(skb);
skb              4143 drivers/infiniband/hw/cxgb4/cm.c 	neigh = dst_neigh_lookup_skb(dst, skb);
skb              4183 drivers/infiniband/hw/cxgb4/cm.c 	build_cpl_pass_accept_req(skb, stid, iph->tos);
skb              4184 drivers/infiniband/hw/cxgb4/cm.c 	send_fw_pass_open_req(dev, skb, iph->daddr, tcph->dest, iph->saddr,
skb              4292 drivers/infiniband/hw/cxgb4/cm.c 	struct sk_buff *skb = NULL;
skb              4299 drivers/infiniband/hw/cxgb4/cm.c 	while ((skb = skb_dequeue(&rxq))) {
skb              4300 drivers/infiniband/hw/cxgb4/cm.c 		rpl = cplhdr(skb);
skb              4301 drivers/infiniband/hw/cxgb4/cm.c 		dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *)));
skb              4307 drivers/infiniband/hw/cxgb4/cm.c 			kfree_skb(skb);
skb              4309 drivers/infiniband/hw/cxgb4/cm.c 			ret = work_handlers[opcode](dev, skb);
skb              4311 drivers/infiniband/hw/cxgb4/cm.c 				kfree_skb(skb);
skb              4342 drivers/infiniband/hw/cxgb4/cm.c static int sched(struct c4iw_dev *dev, struct sk_buff *skb)
skb              4348 drivers/infiniband/hw/cxgb4/cm.c 	*((struct c4iw_dev **) (skb->cb + sizeof(void *))) = dev;
skb              4353 drivers/infiniband/hw/cxgb4/cm.c 	skb_queue_tail(&rxq, skb);
skb              4358 drivers/infiniband/hw/cxgb4/cm.c static int set_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
skb              4360 drivers/infiniband/hw/cxgb4/cm.c 	struct cpl_set_tcb_rpl *rpl = cplhdr(skb);
skb              4366 drivers/infiniband/hw/cxgb4/cm.c 	kfree_skb(skb);
skb              4370 drivers/infiniband/hw/cxgb4/cm.c static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
skb              4372 drivers/infiniband/hw/cxgb4/cm.c 	struct cpl_fw6_msg *rpl = cplhdr(skb);
skb              4385 drivers/infiniband/hw/cxgb4/cm.c 		kfree_skb(skb);
skb              4389 drivers/infiniband/hw/cxgb4/cm.c 		sched(dev, skb);
skb              4394 drivers/infiniband/hw/cxgb4/cm.c 		kfree_skb(skb);
skb              4400 drivers/infiniband/hw/cxgb4/cm.c static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb)
skb              4402 drivers/infiniband/hw/cxgb4/cm.c 	struct cpl_abort_req_rss *req = cplhdr(skb);
skb              4410 drivers/infiniband/hw/cxgb4/cm.c 		kfree_skb(skb);
skb              4423 drivers/infiniband/hw/cxgb4/cm.c 	sched(dev, skb);
skb                38 drivers/infiniband/hw/cxgb4/cq.c 		       struct c4iw_dev_ucontext *uctx, struct sk_buff *skb,
skb                46 drivers/infiniband/hw/cxgb4/cq.c 	set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
skb                48 drivers/infiniband/hw/cxgb4/cq.c 	res_wr = __skb_put_zero(skb, wr_len);
skb                61 drivers/infiniband/hw/cxgb4/cq.c 	c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, 0, __func__);
skb                79 drivers/infiniband/hw/cxgb4/cq.c 	struct sk_buff *skb;
skb               119 drivers/infiniband/hw/cxgb4/cq.c 	skb = alloc_skb(wr_len, GFP_KERNEL);
skb               120 drivers/infiniband/hw/cxgb4/cq.c 	if (!skb) {
skb               124 drivers/infiniband/hw/cxgb4/cq.c 	set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
skb               126 drivers/infiniband/hw/cxgb4/cq.c 	res_wr = __skb_put_zero(skb, wr_len);
skb               155 drivers/infiniband/hw/cxgb4/cq.c 	ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, 0, __func__);
skb              1107 drivers/infiniband/hw/cxgb4/device.c 	struct sk_buff *skb;
skb              1116 drivers/infiniband/hw/cxgb4/device.c 	skb = alloc_skb(gl->tot_len + sizeof(struct cpl_pass_accept_req) +
skb              1118 drivers/infiniband/hw/cxgb4/device.c 	if (unlikely(!skb))
skb              1121 drivers/infiniband/hw/cxgb4/device.c 	__skb_put(skb, gl->tot_len + sizeof(struct cpl_pass_accept_req) +
skb              1132 drivers/infiniband/hw/cxgb4/device.c 	skb_copy_to_linear_data(skb, rsp, sizeof(struct cpl_pass_accept_req) +
skb              1134 drivers/infiniband/hw/cxgb4/device.c 	skb_copy_to_linear_data_offset(skb, sizeof(struct rss_header) +
skb              1138 drivers/infiniband/hw/cxgb4/device.c 	return skb;
skb              1145 drivers/infiniband/hw/cxgb4/device.c 	struct sk_buff *skb;
skb              1150 drivers/infiniband/hw/cxgb4/device.c 	skb = copy_gl_to_skb_pkt(gl , rsp, dev->rdev.lldi.sge_pktshift);
skb              1151 drivers/infiniband/hw/cxgb4/device.c 	if (skb == NULL)
skb              1156 drivers/infiniband/hw/cxgb4/device.c 		kfree_skb(skb);
skb              1159 drivers/infiniband/hw/cxgb4/device.c 	c4iw_handlers[opcode](dev, skb);
skb              1170 drivers/infiniband/hw/cxgb4/device.c 	struct sk_buff *skb;
skb              1177 drivers/infiniband/hw/cxgb4/device.c 		skb = alloc_skb(256, GFP_ATOMIC);
skb              1178 drivers/infiniband/hw/cxgb4/device.c 		if (!skb)
skb              1180 drivers/infiniband/hw/cxgb4/device.c 		__skb_put(skb, len);
skb              1181 drivers/infiniband/hw/cxgb4/device.c 		skb_copy_to_linear_data(skb, &rsp[1], len);
skb              1200 drivers/infiniband/hw/cxgb4/device.c 		skb = cxgb4_pktgl_to_skb(gl, 128, 128);
skb              1201 drivers/infiniband/hw/cxgb4/device.c 		if (unlikely(!skb))
skb              1207 drivers/infiniband/hw/cxgb4/device.c 		c4iw_handlers[opcode](dev, skb);
skb              1210 drivers/infiniband/hw/cxgb4/device.c 		kfree_skb(skb);
skb                79 drivers/infiniband/hw/cxgb4/iw_cxgb4.h static inline void *cplhdr(struct sk_buff *skb)
skb                81 drivers/infiniband/hw/cxgb4/iw_cxgb4.h 	return skb->data;
skb               286 drivers/infiniband/hw/cxgb4/iw_cxgb4.h int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb);
skb               289 drivers/infiniband/hw/cxgb4/iw_cxgb4.h 				     struct sk_buff *skb,
skb               299 drivers/infiniband/hw/cxgb4/iw_cxgb4.h 	ret = c4iw_ofld_send(rdev, skb);
skb               943 drivers/infiniband/hw/cxgb4/iw_cxgb4.h typedef int (*c4iw_handler_func)(struct c4iw_dev *dev, struct sk_buff *skb);
skb                63 drivers/infiniband/hw/cxgb4/mem.c 				       struct sk_buff *skb,
skb                77 drivers/infiniband/hw/cxgb4/mem.c 	if (!skb) {
skb                78 drivers/infiniband/hw/cxgb4/mem.c 		skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
skb                79 drivers/infiniband/hw/cxgb4/mem.c 		if (!skb)
skb                82 drivers/infiniband/hw/cxgb4/mem.c 	set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
skb                84 drivers/infiniband/hw/cxgb4/mem.c 	req = __skb_put_zero(skb, wr_len);
skb               104 drivers/infiniband/hw/cxgb4/mem.c 		ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, 0, __func__);
skb               106 drivers/infiniband/hw/cxgb4/mem.c 		ret = c4iw_ofld_send(rdev, skb);
skb               111 drivers/infiniband/hw/cxgb4/mem.c 				  void *data, struct sk_buff *skb,
skb               137 drivers/infiniband/hw/cxgb4/mem.c 		if (!skb) {
skb               138 drivers/infiniband/hw/cxgb4/mem.c 			skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
skb               139 drivers/infiniband/hw/cxgb4/mem.c 			if (!skb)
skb               142 drivers/infiniband/hw/cxgb4/mem.c 		set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
skb               144 drivers/infiniband/hw/cxgb4/mem.c 		req = __skb_put_zero(skb, wr_len);
skb               177 drivers/infiniband/hw/cxgb4/mem.c 			ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, 0,
skb               180 drivers/infiniband/hw/cxgb4/mem.c 			ret = c4iw_ofld_send(rdev, skb);
skb               183 drivers/infiniband/hw/cxgb4/mem.c 		skb = NULL;
skb               191 drivers/infiniband/hw/cxgb4/mem.c 			       void *data, struct sk_buff *skb,
skb               215 drivers/infiniband/hw/cxgb4/mem.c 						 skb, remain ? NULL : wr_waitp);
skb               223 drivers/infiniband/hw/cxgb4/mem.c 		ret = _c4iw_write_mem_inline(rdev, addr, remain, data, skb,
skb               235 drivers/infiniband/hw/cxgb4/mem.c 			     void *data, struct sk_buff *skb,
skb               241 drivers/infiniband/hw/cxgb4/mem.c 		ret = _c4iw_write_mem_inline(rdev, addr, len, data, skb,
skb               247 drivers/infiniband/hw/cxgb4/mem.c 		ret = _c4iw_write_mem_inline(rdev, addr, len, data, skb,
skb               252 drivers/infiniband/hw/cxgb4/mem.c 	ret = _c4iw_write_mem_dma(rdev, addr, len, data, skb, wr_waitp);
skb               256 drivers/infiniband/hw/cxgb4/mem.c 		ret = _c4iw_write_mem_inline(rdev, addr, len, data, skb,
skb               275 drivers/infiniband/hw/cxgb4/mem.c 			   struct sk_buff *skb, struct c4iw_wr_wait *wr_waitp)
skb               334 drivers/infiniband/hw/cxgb4/mem.c 				sizeof(*tpt), tpt, skb, wr_waitp);
skb               361 drivers/infiniband/hw/cxgb4/mem.c 		     u32 pbl_addr, struct sk_buff *skb,
skb               365 drivers/infiniband/hw/cxgb4/mem.c 			       pbl_size, pbl_addr, skb, wr_waitp);
skb               377 drivers/infiniband/hw/cxgb4/mem.c 			     struct sk_buff *skb,
skb               381 drivers/infiniband/hw/cxgb4/mem.c 			       0, skb, wr_waitp);
skb               209 drivers/infiniband/hw/cxgb4/qp.c 	struct sk_buff *skb;
skb               308 drivers/infiniband/hw/cxgb4/qp.c 	skb = alloc_skb(wr_len, GFP_KERNEL);
skb               309 drivers/infiniband/hw/cxgb4/qp.c 	if (!skb) {
skb               313 drivers/infiniband/hw/cxgb4/qp.c 	set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
skb               315 drivers/infiniband/hw/cxgb4/qp.c 	res_wr = __skb_put_zero(skb, wr_len);
skb               381 drivers/infiniband/hw/cxgb4/qp.c 	ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, wq->sq.qid, __func__);
skb              1563 drivers/infiniband/hw/cxgb4/qp.c 	struct sk_buff *skb;
skb              1569 drivers/infiniband/hw/cxgb4/qp.c 	skb = skb_dequeue(&qhp->ep->com.ep_skb_list);
skb              1570 drivers/infiniband/hw/cxgb4/qp.c 	if (WARN_ON(!skb))
skb              1573 drivers/infiniband/hw/cxgb4/qp.c 	set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
skb              1575 drivers/infiniband/hw/cxgb4/qp.c 	wqe = __skb_put_zero(skb, sizeof(*wqe));
skb              1589 drivers/infiniband/hw/cxgb4/qp.c 	c4iw_ofld_send(&qhp->rhp->rdev, skb);
skb              1696 drivers/infiniband/hw/cxgb4/qp.c 	struct sk_buff *skb;
skb              1700 drivers/infiniband/hw/cxgb4/qp.c 	skb = skb_dequeue(&ep->com.ep_skb_list);
skb              1701 drivers/infiniband/hw/cxgb4/qp.c 	if (WARN_ON(!skb))
skb              1704 drivers/infiniband/hw/cxgb4/qp.c 	set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
skb              1706 drivers/infiniband/hw/cxgb4/qp.c 	wqe = __skb_put_zero(skb, sizeof(*wqe));
skb              1717 drivers/infiniband/hw/cxgb4/qp.c 	ret = c4iw_ref_send_wait(&rhp->rdev, skb, ep->com.wr_waitp,
skb              1752 drivers/infiniband/hw/cxgb4/qp.c 	struct sk_buff *skb;
skb              1757 drivers/infiniband/hw/cxgb4/qp.c 	skb = alloc_skb(sizeof(*wqe), GFP_KERNEL);
skb              1758 drivers/infiniband/hw/cxgb4/qp.c 	if (!skb) {
skb              1765 drivers/infiniband/hw/cxgb4/qp.c 		kfree_skb(skb);
skb              1768 drivers/infiniband/hw/cxgb4/qp.c 	set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
skb              1770 drivers/infiniband/hw/cxgb4/qp.c 	wqe = __skb_put_zero(skb, sizeof(*wqe));
skb              1820 drivers/infiniband/hw/cxgb4/qp.c 	ret = c4iw_ref_send_wait(&rhp->rdev, skb, qhp->ep->com.wr_waitp,
skb              2484 drivers/infiniband/hw/cxgb4/qp.c 	struct sk_buff *skb = srq->destroy_skb;
skb              2491 drivers/infiniband/hw/cxgb4/qp.c 	set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
skb              2493 drivers/infiniband/hw/cxgb4/qp.c 	res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
skb              2507 drivers/infiniband/hw/cxgb4/qp.c 	c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, 0, __func__);
skb              2525 drivers/infiniband/hw/cxgb4/qp.c 	struct sk_buff *skb;
skb              2578 drivers/infiniband/hw/cxgb4/qp.c 	skb = alloc_skb(wr_len, GFP_KERNEL);
skb              2579 drivers/infiniband/hw/cxgb4/qp.c 	if (!skb)
skb              2581 drivers/infiniband/hw/cxgb4/qp.c 	set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
skb              2583 drivers/infiniband/hw/cxgb4/qp.c 	res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
skb              2623 drivers/infiniband/hw/cxgb4/qp.c 	ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, wq->qid, __func__);
skb              1357 drivers/infiniband/hw/hfi1/hfi.h 				     struct sk_buff *skb, u64 pbc, u8 plen);
skb               167 drivers/infiniband/hw/hfi1/vnic.h 		       struct sk_buff *skb, u64 pbc, u8 plen);
skb               270 drivers/infiniband/hw/hfi1/vnic_main.c 					 u8 q_idx, struct sk_buff *skb, int err)
skb               272 drivers/infiniband/hw/hfi1/vnic_main.c 	struct ethhdr *mac_hdr = (struct ethhdr *)skb_mac_header(skb);
skb               278 drivers/infiniband/hw/hfi1/vnic_main.c 	stats->netstats.tx_bytes += skb->len + ETH_FCS_LEN;
skb               280 drivers/infiniband/hw/hfi1/vnic_main.c 	update_len_counters(tx_grp, skb->len);
skb               291 drivers/infiniband/hw/hfi1/vnic_main.c 	if (!__vlan_get_tag(skb, &vlan_tci))
skb               299 drivers/infiniband/hw/hfi1/vnic_main.c 					 u8 q_idx, struct sk_buff *skb, int err)
skb               301 drivers/infiniband/hw/hfi1/vnic_main.c 	struct ethhdr *mac_hdr = (struct ethhdr *)skb->data;
skb               307 drivers/infiniband/hw/hfi1/vnic_main.c 	stats->netstats.rx_bytes += skb->len + ETH_FCS_LEN;
skb               309 drivers/infiniband/hw/hfi1/vnic_main.c 	update_len_counters(rx_grp, skb->len);
skb               320 drivers/infiniband/hw/hfi1/vnic_main.c 	if (!__vlan_get_tag(skb, &vlan_tci))
skb               360 drivers/infiniband/hw/hfi1/vnic_main.c static netdev_tx_t hfi1_netdev_start_xmit(struct sk_buff *skb,
skb               364 drivers/infiniband/hw/hfi1/vnic_main.c 	u8 pad_len, q_idx = skb->queue_mapping;
skb               371 drivers/infiniband/hw/hfi1/vnic_main.c 	v_dbg("xmit: queue %d skb len %d\n", q_idx, skb->len);
skb               378 drivers/infiniband/hw/hfi1/vnic_main.c 	mdata = (struct opa_vnic_skb_mdata *)skb->data;
skb               379 drivers/infiniband/hw/hfi1/vnic_main.c 	skb_pull(skb, sizeof(*mdata));
skb               386 drivers/infiniband/hw/hfi1/vnic_main.c 	pad_len = -(skb->len + OPA_VNIC_ICRC_TAIL_LEN) & 0x7;
skb               394 drivers/infiniband/hw/hfi1/vnic_main.c 	pkt_len = (skb->len + pad_len) >> 2;
skb               399 drivers/infiniband/hw/hfi1/vnic_main.c 	skb_get(skb);
skb               400 drivers/infiniband/hw/hfi1/vnic_main.c 	v_dbg("pbc 0x%016llX len %d pad_len %d\n", pbc, skb->len, pad_len);
skb               401 drivers/infiniband/hw/hfi1/vnic_main.c 	err = dd->process_vnic_dma_send(dd, q_idx, vinfo, skb, pbc, pad_len);
skb               409 drivers/infiniband/hw/hfi1/vnic_main.c 	skb_pull(skb, OPA_VNIC_HDR_LEN);
skb               413 drivers/infiniband/hw/hfi1/vnic_main.c 		dev_kfree_skb_any(skb);
skb               419 drivers/infiniband/hw/hfi1/vnic_main.c 	hfi1_vnic_update_tx_counters(vinfo, q_idx, skb, err);
skb               420 drivers/infiniband/hw/hfi1/vnic_main.c 	dev_kfree_skb_any(skb);
skb               425 drivers/infiniband/hw/hfi1/vnic_main.c 				  struct sk_buff *skb,
skb               432 drivers/infiniband/hw/hfi1/vnic_main.c 	mdata = (struct opa_vnic_skb_mdata *)skb->data;
skb               439 drivers/infiniband/hw/hfi1/vnic_main.c 				      struct sk_buff *skb)
skb               445 drivers/infiniband/hw/hfi1/vnic_main.c 	skb_pull(skb, OPA_VNIC_HDR_LEN);
skb               448 drivers/infiniband/hw/hfi1/vnic_main.c 	if (unlikely(skb->len > max_len))
skb               450 drivers/infiniband/hw/hfi1/vnic_main.c 	else if (unlikely(skb->len < ETH_ZLEN))
skb               460 drivers/infiniband/hw/hfi1/vnic_main.c 	struct sk_buff *skb;
skb               462 drivers/infiniband/hw/hfi1/vnic_main.c 	skb = skb_dequeue(&rxq->skbq);
skb               463 drivers/infiniband/hw/hfi1/vnic_main.c 	if (unlikely(!skb))
skb               467 drivers/infiniband/hw/hfi1/vnic_main.c 	pad_info = skb->data + skb->len - 1;
skb               468 drivers/infiniband/hw/hfi1/vnic_main.c 	skb_trim(skb, (skb->len - OPA_VNIC_ICRC_TAIL_LEN -
skb               471 drivers/infiniband/hw/hfi1/vnic_main.c 	return skb;
skb               479 drivers/infiniband/hw/hfi1/vnic_main.c 	struct sk_buff *skb;
skb               486 drivers/infiniband/hw/hfi1/vnic_main.c 		skb = hfi1_vnic_get_skb(rxq);
skb               487 drivers/infiniband/hw/hfi1/vnic_main.c 		if (unlikely(!skb))
skb               490 drivers/infiniband/hw/hfi1/vnic_main.c 		rc = hfi1_vnic_decap_skb(rxq, skb);
skb               492 drivers/infiniband/hw/hfi1/vnic_main.c 		hfi1_vnic_update_rx_counters(vinfo, rxq->idx, skb, rc);
skb               494 drivers/infiniband/hw/hfi1/vnic_main.c 			dev_kfree_skb_any(skb);
skb               498 drivers/infiniband/hw/hfi1/vnic_main.c 		skb_checksum_none_assert(skb);
skb               499 drivers/infiniband/hw/hfi1/vnic_main.c 		skb->protocol = eth_type_trans(skb, rxq->netdev);
skb               501 drivers/infiniband/hw/hfi1/vnic_main.c 		napi_gro_receive(&rxq->napi, skb);
skb               529 drivers/infiniband/hw/hfi1/vnic_main.c 	struct sk_buff *skb;
skb               575 drivers/infiniband/hw/hfi1/vnic_main.c 	skb = netdev_alloc_skb(vinfo->netdev, packet->tlen);
skb               576 drivers/infiniband/hw/hfi1/vnic_main.c 	if (unlikely(!skb)) {
skb               581 drivers/infiniband/hw/hfi1/vnic_main.c 	memcpy(skb->data, packet->ebuf, packet->tlen);
skb               582 drivers/infiniband/hw/hfi1/vnic_main.c 	skb_put(skb, packet->tlen);
skb               583 drivers/infiniband/hw/hfi1/vnic_main.c 	skb_queue_tail(&rxq->skbq, skb);
skb                74 drivers/infiniband/hw/hfi1/vnic_sdma.c 	struct sk_buff         *skb;
skb                87 drivers/infiniband/hw/hfi1/vnic_sdma.c 	dev_kfree_skb_any(tx->skb);
skb                99 drivers/infiniband/hw/hfi1/vnic_sdma.c 		tx->skb->data,
skb               100 drivers/infiniband/hw/hfi1/vnic_sdma.c 		skb_headlen(tx->skb));
skb               104 drivers/infiniband/hw/hfi1/vnic_sdma.c 	for (i = 0; i < skb_shinfo(tx->skb)->nr_frags; i++) {
skb               105 drivers/infiniband/hw/hfi1/vnic_sdma.c 		skb_frag_t *frag = &skb_shinfo(tx->skb)->frags[i];
skb               136 drivers/infiniband/hw/hfi1/vnic_sdma.c 		hdrbytes + tx->skb->len + tx->plen,
skb               169 drivers/infiniband/hw/hfi1/vnic_sdma.c 		       struct sk_buff *skb, u64 pbc, u8 plen)
skb               189 drivers/infiniband/hw/hfi1/vnic_sdma.c 	tx->skb = skb;
skb               213 drivers/infiniband/hw/hfi1/vnic_sdma.c 		dev_kfree_skb_any(skb);
skb                95 drivers/infiniband/sw/rxe/rxe.h void rxe_rcv(struct sk_buff *skb);
skb               149 drivers/infiniband/sw/rxe/rxe_comp.c void rxe_comp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb)
skb               153 drivers/infiniband/sw/rxe/rxe_comp.c 	skb_queue_tail(&qp->resp_pkts, skb);
skb               157 drivers/infiniband/sw/rxe/rxe_comp.c 		rxe_counter_inc(SKB_TO_PKT(skb)->rxe, RXE_CNT_COMPLETER_SCHED);
skb               539 drivers/infiniband/sw/rxe/rxe_comp.c 	struct sk_buff *skb;
skb               542 drivers/infiniband/sw/rxe/rxe_comp.c 	while ((skb = skb_dequeue(&qp->resp_pkts))) {
skb               544 drivers/infiniband/sw/rxe/rxe_comp.c 		kfree_skb(skb);
skb               562 drivers/infiniband/sw/rxe/rxe_comp.c 	struct sk_buff *skb = NULL;
skb               592 drivers/infiniband/sw/rxe/rxe_comp.c 			skb = skb_dequeue(&qp->resp_pkts);
skb               593 drivers/infiniband/sw/rxe/rxe_comp.c 			if (skb) {
skb               594 drivers/infiniband/sw/rxe/rxe_comp.c 				pkt = SKB_TO_PKT(skb);
skb               656 drivers/infiniband/sw/rxe/rxe_comp.c 				kfree_skb(skb);
skb               657 drivers/infiniband/sw/rxe/rxe_comp.c 				skb = NULL;
skb               704 drivers/infiniband/sw/rxe/rxe_comp.c 					kfree_skb(skb);
skb               705 drivers/infiniband/sw/rxe/rxe_comp.c 					skb = NULL;
skb               733 drivers/infiniband/sw/rxe/rxe_comp.c 					kfree_skb(skb);
skb               734 drivers/infiniband/sw/rxe/rxe_comp.c 					skb = NULL;
skb               758 drivers/infiniband/sw/rxe/rxe_comp.c 				kfree_skb(skb);
skb               759 drivers/infiniband/sw/rxe/rxe_comp.c 				skb = NULL;
skb               776 drivers/infiniband/sw/rxe/rxe_comp.c 				kfree_skb(skb);
skb               777 drivers/infiniband/sw/rxe/rxe_comp.c 				skb = NULL;
skb               788 drivers/infiniband/sw/rxe/rxe_comp.c 	WARN_ON_ONCE(skb);
skb               796 drivers/infiniband/sw/rxe/rxe_comp.c 	WARN_ON_ONCE(skb);
skb                56 drivers/infiniband/sw/rxe/rxe_hdr.h static inline struct rxe_pkt_info *SKB_TO_PKT(struct sk_buff *skb)
skb                58 drivers/infiniband/sw/rxe/rxe_hdr.h 	BUILD_BUG_ON(sizeof(struct rxe_pkt_info) > sizeof(skb->cb));
skb                59 drivers/infiniband/sw/rxe/rxe_hdr.h 	return (void *)skb->cb;
skb                38 drivers/infiniband/sw/rxe/rxe_icrc.c u32 rxe_icrc_hdr(struct rxe_pkt_info *pkt, struct sk_buff *skb)
skb                48 drivers/infiniband/sw/rxe/rxe_icrc.c 		(skb->protocol == htons(ETH_P_IP) ?
skb                62 drivers/infiniband/sw/rxe/rxe_icrc.c 	if (skb->protocol == htons(ETH_P_IP)) { /* IPv4 */
skb                63 drivers/infiniband/sw/rxe/rxe_icrc.c 		memcpy(pshdr, ip_hdr(skb), hdr_size);
skb                71 drivers/infiniband/sw/rxe/rxe_icrc.c 		memcpy(pshdr, ipv6_hdr(skb), hdr_size);
skb               143 drivers/infiniband/sw/rxe/rxe_loc.h void rxe_loopback(struct sk_buff *skb);
skb               144 drivers/infiniband/sw/rxe/rxe_loc.h int rxe_send(struct rxe_pkt_info *pkt, struct sk_buff *skb);
skb               147 drivers/infiniband/sw/rxe/rxe_loc.h int rxe_prepare(struct rxe_pkt_info *pkt, struct sk_buff *skb, u32 *crc);
skb               239 drivers/infiniband/sw/rxe/rxe_loc.h u32 rxe_icrc_hdr(struct rxe_pkt_info *pkt, struct sk_buff *skb);
skb               241 drivers/infiniband/sw/rxe/rxe_loc.h void rxe_resp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb);
skb               243 drivers/infiniband/sw/rxe/rxe_loc.h void rxe_comp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb);
skb               251 drivers/infiniband/sw/rxe/rxe_loc.h 				  struct sk_buff *skb)
skb               264 drivers/infiniband/sw/rxe/rxe_loc.h 		memcpy(SKB_TO_PKT(skb), pkt, sizeof(*pkt));
skb               265 drivers/infiniband/sw/rxe/rxe_loc.h 		rxe_loopback(skb);
skb               268 drivers/infiniband/sw/rxe/rxe_loc.h 		err = rxe_send(pkt, skb);
skb               287 drivers/infiniband/sw/rxe/rxe_loc.h 	kfree_skb(skb);
skb               192 drivers/infiniband/sw/rxe/rxe_net.c static int rxe_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
skb               195 drivers/infiniband/sw/rxe/rxe_net.c 	struct net_device *ndev = skb->dev;
skb               198 drivers/infiniband/sw/rxe/rxe_net.c 	struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
skb               207 drivers/infiniband/sw/rxe/rxe_net.c 	if (skb_linearize(skb)) {
skb               213 drivers/infiniband/sw/rxe/rxe_net.c 	udph = udp_hdr(skb);
skb               220 drivers/infiniband/sw/rxe/rxe_net.c 	rxe_rcv(skb);
skb               230 drivers/infiniband/sw/rxe/rxe_net.c 	kfree_skb(skb);
skb               274 drivers/infiniband/sw/rxe/rxe_net.c static void prepare_udp_hdr(struct sk_buff *skb, __be16 src_port,
skb               279 drivers/infiniband/sw/rxe/rxe_net.c 	__skb_push(skb, sizeof(*udph));
skb               280 drivers/infiniband/sw/rxe/rxe_net.c 	skb_reset_transport_header(skb);
skb               281 drivers/infiniband/sw/rxe/rxe_net.c 	udph = udp_hdr(skb);
skb               285 drivers/infiniband/sw/rxe/rxe_net.c 	udph->len = htons(skb->len);
skb               289 drivers/infiniband/sw/rxe/rxe_net.c static void prepare_ipv4_hdr(struct dst_entry *dst, struct sk_buff *skb,
skb               295 drivers/infiniband/sw/rxe/rxe_net.c 	skb_scrub_packet(skb, xnet);
skb               297 drivers/infiniband/sw/rxe/rxe_net.c 	skb_clear_hash(skb);
skb               298 drivers/infiniband/sw/rxe/rxe_net.c 	skb_dst_set(skb, dst_clone(dst));
skb               299 drivers/infiniband/sw/rxe/rxe_net.c 	memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
skb               301 drivers/infiniband/sw/rxe/rxe_net.c 	skb_push(skb, sizeof(struct iphdr));
skb               302 drivers/infiniband/sw/rxe/rxe_net.c 	skb_reset_network_header(skb);
skb               304 drivers/infiniband/sw/rxe/rxe_net.c 	iph = ip_hdr(skb);
skb               315 drivers/infiniband/sw/rxe/rxe_net.c 			  skb_shinfo(skb)->gso_segs ?: 1);
skb               316 drivers/infiniband/sw/rxe/rxe_net.c 	iph->tot_len = htons(skb->len);
skb               320 drivers/infiniband/sw/rxe/rxe_net.c static void prepare_ipv6_hdr(struct dst_entry *dst, struct sk_buff *skb,
skb               326 drivers/infiniband/sw/rxe/rxe_net.c 	memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
skb               327 drivers/infiniband/sw/rxe/rxe_net.c 	IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED
skb               329 drivers/infiniband/sw/rxe/rxe_net.c 	skb_dst_set(skb, dst_clone(dst));
skb               331 drivers/infiniband/sw/rxe/rxe_net.c 	__skb_push(skb, sizeof(*ip6h));
skb               332 drivers/infiniband/sw/rxe/rxe_net.c 	skb_reset_network_header(skb);
skb               333 drivers/infiniband/sw/rxe/rxe_net.c 	ip6h		  = ipv6_hdr(skb);
skb               335 drivers/infiniband/sw/rxe/rxe_net.c 	ip6h->payload_len = htons(skb->len);
skb               340 drivers/infiniband/sw/rxe/rxe_net.c 	ip6h->payload_len = htons(skb->len - sizeof(*ip6h));
skb               343 drivers/infiniband/sw/rxe/rxe_net.c static int prepare4(struct rxe_pkt_info *pkt, struct sk_buff *skb)
skb               353 drivers/infiniband/sw/rxe/rxe_net.c 	dst = rxe_find_route(skb->dev, qp, av);
skb               359 drivers/infiniband/sw/rxe/rxe_net.c 	prepare_udp_hdr(skb, cpu_to_be16(qp->src_port),
skb               362 drivers/infiniband/sw/rxe/rxe_net.c 	prepare_ipv4_hdr(dst, skb, saddr->s_addr, daddr->s_addr, IPPROTO_UDP,
skb               369 drivers/infiniband/sw/rxe/rxe_net.c static int prepare6(struct rxe_pkt_info *pkt, struct sk_buff *skb)
skb               377 drivers/infiniband/sw/rxe/rxe_net.c 	dst = rxe_find_route(skb->dev, qp, av);
skb               383 drivers/infiniband/sw/rxe/rxe_net.c 	prepare_udp_hdr(skb, cpu_to_be16(qp->src_port),
skb               386 drivers/infiniband/sw/rxe/rxe_net.c 	prepare_ipv6_hdr(dst, skb, saddr, daddr, IPPROTO_UDP,
skb               394 drivers/infiniband/sw/rxe/rxe_net.c int rxe_prepare(struct rxe_pkt_info *pkt, struct sk_buff *skb, u32 *crc)
skb               398 drivers/infiniband/sw/rxe/rxe_net.c 	if (skb->protocol == htons(ETH_P_IP))
skb               399 drivers/infiniband/sw/rxe/rxe_net.c 		err = prepare4(pkt, skb);
skb               400 drivers/infiniband/sw/rxe/rxe_net.c 	else if (skb->protocol == htons(ETH_P_IPV6))
skb               401 drivers/infiniband/sw/rxe/rxe_net.c 		err = prepare6(pkt, skb);
skb               403 drivers/infiniband/sw/rxe/rxe_net.c 	*crc = rxe_icrc_hdr(pkt, skb);
skb               405 drivers/infiniband/sw/rxe/rxe_net.c 	if (ether_addr_equal(skb->dev->dev_addr, rxe_get_av(pkt)->dmac))
skb               411 drivers/infiniband/sw/rxe/rxe_net.c static void rxe_skb_tx_dtor(struct sk_buff *skb)
skb               413 drivers/infiniband/sw/rxe/rxe_net.c 	struct sock *sk = skb->sk;
skb               424 drivers/infiniband/sw/rxe/rxe_net.c int rxe_send(struct rxe_pkt_info *pkt, struct sk_buff *skb)
skb               428 drivers/infiniband/sw/rxe/rxe_net.c 	skb->destructor = rxe_skb_tx_dtor;
skb               429 drivers/infiniband/sw/rxe/rxe_net.c 	skb->sk = pkt->qp->sk->sk;
skb               434 drivers/infiniband/sw/rxe/rxe_net.c 	if (skb->protocol == htons(ETH_P_IP)) {
skb               435 drivers/infiniband/sw/rxe/rxe_net.c 		err = ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);
skb               436 drivers/infiniband/sw/rxe/rxe_net.c 	} else if (skb->protocol == htons(ETH_P_IPV6)) {
skb               437 drivers/infiniband/sw/rxe/rxe_net.c 		err = ip6_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);
skb               439 drivers/infiniband/sw/rxe/rxe_net.c 		pr_err("Unknown layer 3 protocol: %d\n", skb->protocol);
skb               442 drivers/infiniband/sw/rxe/rxe_net.c 		kfree_skb(skb);
skb               454 drivers/infiniband/sw/rxe/rxe_net.c void rxe_loopback(struct sk_buff *skb)
skb               456 drivers/infiniband/sw/rxe/rxe_net.c 	rxe_rcv(skb);
skb               463 drivers/infiniband/sw/rxe/rxe_net.c 	struct sk_buff *skb = NULL;
skb               485 drivers/infiniband/sw/rxe/rxe_net.c 	skb = alloc_skb(paylen + hdr_len + LL_RESERVED_SPACE(ndev),
skb               488 drivers/infiniband/sw/rxe/rxe_net.c 	if (unlikely(!skb)) {
skb               493 drivers/infiniband/sw/rxe/rxe_net.c 	skb_reserve(skb, hdr_len + LL_RESERVED_SPACE(ndev));
skb               496 drivers/infiniband/sw/rxe/rxe_net.c 	skb->dev	= ndev;
skb               500 drivers/infiniband/sw/rxe/rxe_net.c 		skb->protocol = htons(ETH_P_IP);
skb               502 drivers/infiniband/sw/rxe/rxe_net.c 		skb->protocol = htons(ETH_P_IPV6);
skb               506 drivers/infiniband/sw/rxe/rxe_net.c 	pkt->hdr	= skb_put_zero(skb, paylen);
skb               511 drivers/infiniband/sw/rxe/rxe_net.c 	return skb;
skb               156 drivers/infiniband/sw/rxe/rxe_qp.c 		kfree_skb(res->atomic.skb);
skb               157 drivers/infiniband/sw/rxe/rxe_recv.c 	struct sk_buff *skb = PKT_TO_SKB(pkt);
skb               168 drivers/infiniband/sw/rxe/rxe_recv.c 	if (skb->protocol == htons(ETH_P_IP)) {
skb               174 drivers/infiniband/sw/rxe/rxe_recv.c 		if (ip_hdr(skb)->daddr != saddr->s_addr) {
skb               176 drivers/infiniband/sw/rxe/rxe_recv.c 					    &ip_hdr(skb)->daddr,
skb               181 drivers/infiniband/sw/rxe/rxe_recv.c 		if (ip_hdr(skb)->saddr != daddr->s_addr) {
skb               183 drivers/infiniband/sw/rxe/rxe_recv.c 					    &ip_hdr(skb)->saddr,
skb               188 drivers/infiniband/sw/rxe/rxe_recv.c 	} else if (skb->protocol == htons(ETH_P_IPV6)) {
skb               194 drivers/infiniband/sw/rxe/rxe_recv.c 		if (memcmp(&ipv6_hdr(skb)->daddr, saddr, sizeof(*saddr))) {
skb               196 drivers/infiniband/sw/rxe/rxe_recv.c 					    &ipv6_hdr(skb)->daddr, saddr);
skb               200 drivers/infiniband/sw/rxe/rxe_recv.c 		if (memcmp(&ipv6_hdr(skb)->saddr, daddr, sizeof(*daddr))) {
skb               202 drivers/infiniband/sw/rxe/rxe_recv.c 					    &ipv6_hdr(skb)->saddr, daddr);
skb               269 drivers/infiniband/sw/rxe/rxe_recv.c static inline void rxe_rcv_pkt(struct rxe_pkt_info *pkt, struct sk_buff *skb)
skb               272 drivers/infiniband/sw/rxe/rxe_recv.c 		rxe_resp_queue_pkt(pkt->qp, skb);
skb               274 drivers/infiniband/sw/rxe/rxe_recv.c 		rxe_comp_queue_pkt(pkt->qp, skb);
skb               277 drivers/infiniband/sw/rxe/rxe_recv.c static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
skb               279 drivers/infiniband/sw/rxe/rxe_recv.c 	struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
skb               286 drivers/infiniband/sw/rxe/rxe_recv.c 	if (skb->protocol == htons(ETH_P_IP))
skb               287 drivers/infiniband/sw/rxe/rxe_recv.c 		ipv6_addr_set_v4mapped(ip_hdr(skb)->daddr,
skb               289 drivers/infiniband/sw/rxe/rxe_recv.c 	else if (skb->protocol == htons(ETH_P_IPV6))
skb               290 drivers/infiniband/sw/rxe/rxe_recv.c 		memcpy(&dgid, &ipv6_hdr(skb)->daddr, sizeof(dgid));
skb               301 drivers/infiniband/sw/rxe/rxe_recv.c 		pkt = SKB_TO_PKT(skb);
skb               316 drivers/infiniband/sw/rxe/rxe_recv.c 			skb_get(skb);
skb               320 drivers/infiniband/sw/rxe/rxe_recv.c 		rxe_rcv_pkt(pkt, skb);
skb               328 drivers/infiniband/sw/rxe/rxe_recv.c 	kfree_skb(skb);
skb               331 drivers/infiniband/sw/rxe/rxe_recv.c static int rxe_match_dgid(struct rxe_dev *rxe, struct sk_buff *skb)
skb               337 drivers/infiniband/sw/rxe/rxe_recv.c 	if (skb->protocol == htons(ETH_P_IP)) {
skb               338 drivers/infiniband/sw/rxe/rxe_recv.c 		ipv6_addr_set_v4mapped(ip_hdr(skb)->daddr,
skb               342 drivers/infiniband/sw/rxe/rxe_recv.c 		pdgid = (union ib_gid *)&ipv6_hdr(skb)->daddr;
skb               347 drivers/infiniband/sw/rxe/rxe_recv.c 					 1, skb->dev);
skb               356 drivers/infiniband/sw/rxe/rxe_recv.c void rxe_rcv(struct sk_buff *skb)
skb               359 drivers/infiniband/sw/rxe/rxe_recv.c 	struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
skb               366 drivers/infiniband/sw/rxe/rxe_recv.c 	if (unlikely(skb->len < pkt->offset + RXE_BTH_BYTES))
skb               369 drivers/infiniband/sw/rxe/rxe_recv.c 	if (unlikely(rxe_match_dgid(rxe, skb) < 0)) {
skb               379 drivers/infiniband/sw/rxe/rxe_recv.c 	if (unlikely(skb->len < header_size(pkt)))
skb               390 drivers/infiniband/sw/rxe/rxe_recv.c 	calc_icrc = rxe_icrc_hdr(pkt, skb);
skb               395 drivers/infiniband/sw/rxe/rxe_recv.c 		if (skb->protocol == htons(ETH_P_IPV6))
skb               397 drivers/infiniband/sw/rxe/rxe_recv.c 					    &ipv6_hdr(skb)->saddr);
skb               398 drivers/infiniband/sw/rxe/rxe_recv.c 		else if (skb->protocol == htons(ETH_P_IP))
skb               400 drivers/infiniband/sw/rxe/rxe_recv.c 					    &ip_hdr(skb)->saddr);
skb               410 drivers/infiniband/sw/rxe/rxe_recv.c 		rxe_rcv_mcast_pkt(rxe, skb);
skb               412 drivers/infiniband/sw/rxe/rxe_recv.c 		rxe_rcv_pkt(pkt, skb);
skb               420 drivers/infiniband/sw/rxe/rxe_recv.c 	kfree_skb(skb);
skb               385 drivers/infiniband/sw/rxe/rxe_req.c 	struct sk_buff		*skb;
skb               411 drivers/infiniband/sw/rxe/rxe_req.c 	skb = rxe_init_packet(rxe, av, paylen, pkt);
skb               412 drivers/infiniband/sw/rxe/rxe_req.c 	if (unlikely(!skb))
skb               470 drivers/infiniband/sw/rxe/rxe_req.c 	return skb;
skb               474 drivers/infiniband/sw/rxe/rxe_req.c 		       struct rxe_pkt_info *pkt, struct sk_buff *skb,
skb               482 drivers/infiniband/sw/rxe/rxe_req.c 	err = rxe_prepare(pkt, skb, &crc);
skb               593 drivers/infiniband/sw/rxe/rxe_req.c 	struct sk_buff *skb;
skb               717 drivers/infiniband/sw/rxe/rxe_req.c 	skb = init_req_packet(qp, wqe, opcode, payload, &pkt);
skb               718 drivers/infiniband/sw/rxe/rxe_req.c 	if (unlikely(!skb)) {
skb               723 drivers/infiniband/sw/rxe/rxe_req.c 	if (fill_packet(qp, wqe, &pkt, skb, payload)) {
skb               725 drivers/infiniband/sw/rxe/rxe_req.c 		kfree_skb(skb);
skb               738 drivers/infiniband/sw/rxe/rxe_req.c 	ret = rxe_xmit_packet(qp, &pkt, skb);
skb               107 drivers/infiniband/sw/rxe/rxe_resp.c void rxe_resp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb)
skb               110 drivers/infiniband/sw/rxe/rxe_resp.c 	struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
skb               112 drivers/infiniband/sw/rxe/rxe_resp.c 	skb_queue_tail(&qp->req_pkts, skb);
skb               123 drivers/infiniband/sw/rxe/rxe_resp.c 	struct sk_buff *skb;
skb               126 drivers/infiniband/sw/rxe/rxe_resp.c 		while ((skb = skb_dequeue(&qp->req_pkts))) {
skb               128 drivers/infiniband/sw/rxe/rxe_resp.c 			kfree_skb(skb);
skb               135 drivers/infiniband/sw/rxe/rxe_resp.c 	skb = skb_peek(&qp->req_pkts);
skb               136 drivers/infiniband/sw/rxe/rxe_resp.c 	if (!skb)
skb               139 drivers/infiniband/sw/rxe/rxe_resp.c 	*pkt_p = SKB_TO_PKT(skb);
skb               595 drivers/infiniband/sw/rxe/rxe_resp.c 	struct sk_buff *skb;
skb               608 drivers/infiniband/sw/rxe/rxe_resp.c 	skb = rxe_init_packet(rxe, &qp->pri_av, paylen, ack);
skb               609 drivers/infiniband/sw/rxe/rxe_resp.c 	if (!skb)
skb               637 drivers/infiniband/sw/rxe/rxe_resp.c 	err = rxe_prepare(ack, skb, &crc);
skb               639 drivers/infiniband/sw/rxe/rxe_resp.c 		kfree_skb(skb);
skb               651 drivers/infiniband/sw/rxe/rxe_resp.c 	return skb;
skb               661 drivers/infiniband/sw/rxe/rxe_resp.c 	struct sk_buff *skb;
skb               725 drivers/infiniband/sw/rxe/rxe_resp.c 	skb = prepare_ack_packet(qp, req_pkt, &ack_pkt, opcode, payload,
skb               727 drivers/infiniband/sw/rxe/rxe_resp.c 	if (!skb)
skb               745 drivers/infiniband/sw/rxe/rxe_resp.c 	err = rxe_xmit_packet(qp, &ack_pkt, skb);
skb               772 drivers/infiniband/sw/rxe/rxe_resp.c 	struct sk_buff *skb = PKT_TO_SKB(pkt);
skb               775 drivers/infiniband/sw/rxe/rxe_resp.c 	if (skb->protocol == htons(ETH_P_IP))
skb               776 drivers/infiniband/sw/rxe/rxe_resp.c 		memcpy(&hdr->roce4grh, ip_hdr(skb), sizeof(hdr->roce4grh));
skb               777 drivers/infiniband/sw/rxe/rxe_resp.c 	else if (skb->protocol == htons(ETH_P_IPV6))
skb               778 drivers/infiniband/sw/rxe/rxe_resp.c 		memcpy(&hdr->ibgrh, ipv6_hdr(skb), sizeof(hdr->ibgrh));
skb               894 drivers/infiniband/sw/rxe/rxe_resp.c 			struct sk_buff *skb = PKT_TO_SKB(pkt);
skb               897 drivers/infiniband/sw/rxe/rxe_resp.c 			if (skb->protocol == htons(ETH_P_IP))
skb               902 drivers/infiniband/sw/rxe/rxe_resp.c 			if (is_vlan_dev(skb->dev)) {
skb               904 drivers/infiniband/sw/rxe/rxe_resp.c 				wc->vlan_id = vlan_dev_vlan_id(skb->dev);
skb               963 drivers/infiniband/sw/rxe/rxe_resp.c 	struct sk_buff *skb;
skb               965 drivers/infiniband/sw/rxe/rxe_resp.c 	skb = prepare_ack_packet(qp, pkt, &ack_pkt, IB_OPCODE_RC_ACKNOWLEDGE,
skb               967 drivers/infiniband/sw/rxe/rxe_resp.c 	if (!skb) {
skb               972 drivers/infiniband/sw/rxe/rxe_resp.c 	err = rxe_xmit_packet(qp, &ack_pkt, skb);
skb               985 drivers/infiniband/sw/rxe/rxe_resp.c 	struct sk_buff *skb;
skb               988 drivers/infiniband/sw/rxe/rxe_resp.c 	skb = prepare_ack_packet(qp, pkt, &ack_pkt,
skb               991 drivers/infiniband/sw/rxe/rxe_resp.c 	if (!skb) {
skb              1002 drivers/infiniband/sw/rxe/rxe_resp.c 	memcpy(SKB_TO_PKT(skb), &ack_pkt, sizeof(ack_pkt));
skb              1003 drivers/infiniband/sw/rxe/rxe_resp.c 	memset((unsigned char *)SKB_TO_PKT(skb) + sizeof(ack_pkt), 0,
skb              1004 drivers/infiniband/sw/rxe/rxe_resp.c 	       sizeof(skb->cb) - sizeof(ack_pkt));
skb              1006 drivers/infiniband/sw/rxe/rxe_resp.c 	skb_get(skb);
skb              1008 drivers/infiniband/sw/rxe/rxe_resp.c 	res->atomic.skb = skb;
skb              1013 drivers/infiniband/sw/rxe/rxe_resp.c 	rc = rxe_xmit_packet(qp, &ack_pkt, skb);
skb              1041 drivers/infiniband/sw/rxe/rxe_resp.c 	struct sk_buff *skb;
skb              1044 drivers/infiniband/sw/rxe/rxe_resp.c 		skb = skb_dequeue(&qp->req_pkts);
skb              1046 drivers/infiniband/sw/rxe/rxe_resp.c 		kfree_skb(skb);
skb              1141 drivers/infiniband/sw/rxe/rxe_resp.c 			skb_get(res->atomic.skb);
skb              1143 drivers/infiniband/sw/rxe/rxe_resp.c 			rc = rxe_xmit_packet(qp, pkt, res->atomic.skb);
skb              1206 drivers/infiniband/sw/rxe/rxe_resp.c 	struct sk_buff *skb;
skb              1208 drivers/infiniband/sw/rxe/rxe_resp.c 	while ((skb = skb_dequeue(&qp->req_pkts))) {
skb              1210 drivers/infiniband/sw/rxe/rxe_resp.c 		kfree_skb(skb);
skb               183 drivers/infiniband/sw/rxe/rxe_verbs.h 			struct sk_buff	*skb;
skb               299 drivers/infiniband/sw/siw/siw.h 	struct sk_buff *skb;
skb               562 drivers/infiniband/sw/siw/siw.h int siw_tcp_rx_data(read_descriptor_t *rd_desc, struct sk_buff *skb,
skb               712 drivers/infiniband/sw/siw/siw.h 	crc = __skb_checksum(srx->skb, srx->skb_offset, len, crc,
skb                57 drivers/infiniband/sw/siw/siw_qp_rx.c 		rv = skb_copy_bits(srx->skb, srx->skb_offset, dest + pg_off,
skb               110 drivers/infiniband/sw/siw/siw_qp_rx.c 	rv = skb_copy_bits(srx->skb, srx->skb_offset, kva, len);
skb               878 drivers/infiniband/sw/siw/siw_qp_rx.c 	struct sk_buff *skb = srx->skb;
skb               912 drivers/infiniband/sw/siw/siw_qp_rx.c 	skb_copy_bits(skb, srx->skb_offset, infop, to_copy);
skb               931 drivers/infiniband/sw/siw/siw_qp_rx.c 	skb_copy_bits(skb, srx->skb_offset, infop, to_copy);
skb               954 drivers/infiniband/sw/siw/siw_qp_rx.c 	struct sk_buff *skb = srx->skb;
skb               964 drivers/infiniband/sw/siw/siw_qp_rx.c 	skb_copy_bits(skb, srx->skb_offset, tbuf, srx->fpdu_part_rem);
skb               999 drivers/infiniband/sw/siw/siw_qp_rx.c 	struct sk_buff *skb = srx->skb;
skb              1013 drivers/infiniband/sw/siw/siw_qp_rx.c 		skb_copy_bits(skb, srx->skb_offset,
skb              1082 drivers/infiniband/sw/siw/siw_qp_rx.c 		skb_copy_bits(skb, srx->skb_offset,
skb              1340 drivers/infiniband/sw/siw/siw_qp_rx.c int siw_tcp_rx_data(read_descriptor_t *rd_desc, struct sk_buff *skb,
skb              1347 drivers/infiniband/sw/siw/siw_qp_rx.c 	srx->skb = skb;
skb              1348 drivers/infiniband/sw/siw/siw_qp_rx.c 	srx->skb_new = skb->len - off;
skb               140 drivers/infiniband/ulp/ipoib/ipoib.h static inline void skb_add_pseudo_hdr(struct sk_buff *skb)
skb               142 drivers/infiniband/ulp/ipoib/ipoib.h 	char *data = skb_push(skb, IPOIB_PSEUDO_LEN);
skb               149 drivers/infiniband/ulp/ipoib/ipoib.h 	skb_reset_mac_header(skb);
skb               150 drivers/infiniband/ulp/ipoib/ipoib.h 	skb_pull(skb, IPOIB_HARD_LEN);
skb               185 drivers/infiniband/ulp/ipoib/ipoib.h 	struct sk_buff *skb;
skb               190 drivers/infiniband/ulp/ipoib/ipoib.h 	struct sk_buff *skb;
skb               260 drivers/infiniband/ulp/ipoib/ipoib.h 	struct sk_buff *skb;
skb               498 drivers/infiniband/ulp/ipoib/ipoib.h int ipoib_send(struct net_device *dev, struct sk_buff *skb,
skb               526 drivers/infiniband/ulp/ipoib/ipoib.h void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb);
skb               545 drivers/infiniband/ulp/ipoib/ipoib.h 	struct sk_buff *skb = tx_req->skb;
skb               546 drivers/infiniband/ulp/ipoib/ipoib.h 	skb_frag_t *frags = skb_shinfo(skb)->frags;
skb               547 drivers/infiniband/ulp/ipoib/ipoib.h 	int nr_frags = skb_shinfo(skb)->nr_frags;
skb               550 drivers/infiniband/ulp/ipoib/ipoib.h 	if (skb_headlen(skb)) {
skb               552 drivers/infiniband/ulp/ipoib/ipoib.h 		priv->tx_sge[0].length       = skb_headlen(skb);
skb               666 drivers/infiniband/ulp/ipoib/ipoib.h void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_tx *tx);
skb               675 drivers/infiniband/ulp/ipoib/ipoib.h void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb,
skb               721 drivers/infiniband/ulp/ipoib/ipoib.h void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_tx *tx)
skb               769 drivers/infiniband/ulp/ipoib/ipoib.h static inline void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb,
skb               772 drivers/infiniband/ulp/ipoib/ipoib.h 	dev_kfree_skb_any(skb);
skb               109 drivers/infiniband/ulp/ipoib/ipoib_cm.c 		dev_kfree_skb_any(priv->cm.srq_ring[id].skb);
skb               110 drivers/infiniband/ulp/ipoib/ipoib_cm.c 		priv->cm.srq_ring[id].skb = NULL;
skb               134 drivers/infiniband/ulp/ipoib/ipoib_cm.c 		dev_kfree_skb_any(rx->rx_ring[id].skb);
skb               135 drivers/infiniband/ulp/ipoib/ipoib_cm.c 		rx->rx_ring[id].skb = NULL;
skb               148 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	struct sk_buff *skb;
skb               151 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	skb = dev_alloc_skb(ALIGN(IPOIB_CM_HEAD_SIZE + IPOIB_PSEUDO_LEN, 16));
skb               152 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	if (unlikely(!skb))
skb               159 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	skb_reserve(skb, IPOIB_CM_RX_RESERVE);
skb               161 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	mapping[0] = ib_dma_map_single(priv->ca, skb->data, IPOIB_CM_HEAD_SIZE,
skb               164 drivers/infiniband/ulp/ipoib/ipoib_cm.c 		dev_kfree_skb_any(skb);
skb               173 drivers/infiniband/ulp/ipoib/ipoib_cm.c 		skb_fill_page_desc(skb, i, page, 0, PAGE_SIZE);
skb               181 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	rx_ring[id].skb = skb;
skb               182 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	return skb;
skb               191 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	dev_kfree_skb_any(skb);
skb               202 drivers/infiniband/ulp/ipoib/ipoib_cm.c 		if (rx_ring[i].skb) {
skb               205 drivers/infiniband/ulp/ipoib/ipoib_cm.c 			dev_kfree_skb_any(rx_ring[i].skb);
skb               527 drivers/infiniband/ulp/ipoib/ipoib_cm.c static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space,
skb               535 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	skb->tail += size;
skb               536 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	skb->len += size;
skb               539 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	num_frags = skb_shinfo(skb)->nr_frags;
skb               541 drivers/infiniband/ulp/ipoib/ipoib_cm.c 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
skb               547 drivers/infiniband/ulp/ipoib/ipoib_cm.c 			--skb_shinfo(skb)->nr_frags;
skb               552 drivers/infiniband/ulp/ipoib/ipoib_cm.c 			skb->data_len += size;
skb               553 drivers/infiniband/ulp/ipoib/ipoib_cm.c 			skb->truesize += size;
skb               554 drivers/infiniband/ulp/ipoib/ipoib_cm.c 			skb->len += size;
skb               565 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	struct sk_buff *skb, *newskb;
skb               594 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	skb = rx_ring[wr_id].skb;
skb               634 drivers/infiniband/ulp/ipoib/ipoib_cm.c 			skb_copy_from_linear_data(skb, small_skb->data, dlen);
skb               638 drivers/infiniband/ulp/ipoib/ipoib_cm.c 			skb = small_skb;
skb               665 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	skb_put_frags(skb, IPOIB_CM_HEAD_SIZE, wc->byte_len, newskb);
skb               668 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	skb->protocol = ((struct ipoib_header *) skb->data)->proto;
skb               669 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	skb_add_pseudo_hdr(skb);
skb               672 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	dev->stats.rx_bytes += skb->len;
skb               674 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	skb->dev = dev;
skb               676 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	skb->pkt_type = PACKET_HOST;
skb               677 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	netif_receive_skb(skb);
skb               708 drivers/infiniband/ulp/ipoib/ipoib_cm.c void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_tx *tx)
skb               713 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	unsigned int usable_sge = tx->max_send_sge - !!skb_headlen(skb);
skb               715 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	if (unlikely(skb->len > tx->mtu)) {
skb               717 drivers/infiniband/ulp/ipoib/ipoib_cm.c 			   skb->len, tx->mtu);
skb               720 drivers/infiniband/ulp/ipoib/ipoib_cm.c 		ipoib_cm_skb_too_long(dev, skb, tx->mtu - IPOIB_ENCAP_LEN);
skb               723 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	if (skb_shinfo(skb)->nr_frags > usable_sge) {
skb               724 drivers/infiniband/ulp/ipoib/ipoib_cm.c 		if (skb_linearize(skb) < 0) {
skb               728 drivers/infiniband/ulp/ipoib/ipoib_cm.c 			dev_kfree_skb_any(skb);
skb               732 drivers/infiniband/ulp/ipoib/ipoib_cm.c 		if (skb_shinfo(skb)->nr_frags > usable_sge) {
skb               736 drivers/infiniband/ulp/ipoib/ipoib_cm.c 			dev_kfree_skb_any(skb);
skb               741 drivers/infiniband/ulp/ipoib/ipoib_cm.c 		       tx->tx_head, skb->len, tx->qp->qp_num);
skb               751 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	tx_req->skb = skb;
skb               755 drivers/infiniband/ulp/ipoib/ipoib_cm.c 		dev_kfree_skb_any(skb);
skb               766 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	skb_orphan(skb);
skb               767 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	skb_dst_drop(skb);
skb               783 drivers/infiniband/ulp/ipoib/ipoib_cm.c 		dev_kfree_skb_any(skb);
skb               817 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	dev->stats.tx_bytes += tx_req->skb->len;
skb               819 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	dev_kfree_skb_any(tx_req->skb);
skb               994 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	struct sk_buff *skb;
skb              1036 drivers/infiniband/ulp/ipoib/ipoib_cm.c 		while ((skb = __skb_dequeue(&p->neigh->queue)))
skb              1037 drivers/infiniband/ulp/ipoib/ipoib_cm.c 			__skb_queue_tail(&skqueue, skb);
skb              1041 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	while ((skb = __skb_dequeue(&skqueue))) {
skb              1042 drivers/infiniband/ulp/ipoib/ipoib_cm.c 		skb->dev = p->dev;
skb              1043 drivers/infiniband/ulp/ipoib/ipoib_cm.c 		ret = dev_queue_xmit(skb);
skb              1234 drivers/infiniband/ulp/ipoib/ipoib_cm.c 		dev_kfree_skb_any(tx_req->skb);
skb              1431 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	struct sk_buff *skb;
skb              1438 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	while ((skb = skb_dequeue(&priv->cm.skb_queue))) {
skb              1442 drivers/infiniband/ulp/ipoib/ipoib_cm.c 		if (skb->protocol == htons(ETH_P_IP)) {
skb              1443 drivers/infiniband/ulp/ipoib/ipoib_cm.c 			memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
skb              1444 drivers/infiniband/ulp/ipoib/ipoib_cm.c 			icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
skb              1447 drivers/infiniband/ulp/ipoib/ipoib_cm.c 		else if (skb->protocol == htons(ETH_P_IPV6)) {
skb              1448 drivers/infiniband/ulp/ipoib/ipoib_cm.c 			memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
skb              1449 drivers/infiniband/ulp/ipoib/ipoib_cm.c 			icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
skb              1452 drivers/infiniband/ulp/ipoib/ipoib_cm.c 		dev_kfree_skb_any(skb);
skb              1462 drivers/infiniband/ulp/ipoib/ipoib_cm.c void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb,
skb              1468 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	skb_dst_update_pmtu(skb, mtu);
skb              1470 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	skb_queue_tail(&priv->cm.skb_queue, skb);
skb               115 drivers/infiniband/ulp/ipoib/ipoib_ib.c 		dev_kfree_skb_any(priv->rx_ring[id].skb);
skb               116 drivers/infiniband/ulp/ipoib/ipoib_ib.c 		priv->rx_ring[id].skb = NULL;
skb               125 drivers/infiniband/ulp/ipoib/ipoib_ib.c 	struct sk_buff *skb;
skb               131 drivers/infiniband/ulp/ipoib/ipoib_ib.c 	skb = dev_alloc_skb(buf_size + IPOIB_HARD_LEN);
skb               132 drivers/infiniband/ulp/ipoib/ipoib_ib.c 	if (unlikely(!skb))
skb               139 drivers/infiniband/ulp/ipoib/ipoib_ib.c 	skb_reserve(skb, sizeof(struct ipoib_pseudo_header));
skb               142 drivers/infiniband/ulp/ipoib/ipoib_ib.c 	mapping[0] = ib_dma_map_single(priv->ca, skb->data, buf_size,
skb               147 drivers/infiniband/ulp/ipoib/ipoib_ib.c 	priv->rx_ring[id].skb = skb;
skb               148 drivers/infiniband/ulp/ipoib/ipoib_ib.c 	return skb;
skb               150 drivers/infiniband/ulp/ipoib/ipoib_ib.c 	dev_kfree_skb_any(skb);
skb               177 drivers/infiniband/ulp/ipoib/ipoib_ib.c 	struct sk_buff *skb;
skb               191 drivers/infiniband/ulp/ipoib/ipoib_ib.c 	skb  = priv->rx_ring[wr_id].skb;
skb               199 drivers/infiniband/ulp/ipoib/ipoib_ib.c 		dev_kfree_skb_any(skb);
skb               200 drivers/infiniband/ulp/ipoib/ipoib_ib.c 		priv->rx_ring[wr_id].skb = NULL;
skb               221 drivers/infiniband/ulp/ipoib/ipoib_ib.c 	skb_put(skb, wc->byte_len);
skb               224 drivers/infiniband/ulp/ipoib/ipoib_ib.c 	dgid = &((struct ib_grh *)skb->data)->dgid;
skb               227 drivers/infiniband/ulp/ipoib/ipoib_ib.c 		skb->pkt_type = PACKET_HOST;
skb               229 drivers/infiniband/ulp/ipoib/ipoib_ib.c 		skb->pkt_type = PACKET_BROADCAST;
skb               231 drivers/infiniband/ulp/ipoib/ipoib_ib.c 		skb->pkt_type = PACKET_MULTICAST;
skb               233 drivers/infiniband/ulp/ipoib/ipoib_ib.c 	sgid = &((struct ib_grh *)skb->data)->sgid;
skb               247 drivers/infiniband/ulp/ipoib/ipoib_ib.c 			dev_kfree_skb_any(skb);
skb               252 drivers/infiniband/ulp/ipoib/ipoib_ib.c 	skb_pull(skb, IB_GRH_BYTES);
skb               254 drivers/infiniband/ulp/ipoib/ipoib_ib.c 	skb->protocol = ((struct ipoib_header *) skb->data)->proto;
skb               255 drivers/infiniband/ulp/ipoib/ipoib_ib.c 	skb_add_pseudo_hdr(skb);
skb               258 drivers/infiniband/ulp/ipoib/ipoib_ib.c 	dev->stats.rx_bytes += skb->len;
skb               259 drivers/infiniband/ulp/ipoib/ipoib_ib.c 	if (skb->pkt_type == PACKET_MULTICAST)
skb               262 drivers/infiniband/ulp/ipoib/ipoib_ib.c 	skb->dev = dev;
skb               265 drivers/infiniband/ulp/ipoib/ipoib_ib.c 		skb->ip_summed = CHECKSUM_UNNECESSARY;
skb               267 drivers/infiniband/ulp/ipoib/ipoib_ib.c 	napi_gro_receive(&priv->recv_napi, skb);
skb               277 drivers/infiniband/ulp/ipoib/ipoib_ib.c 	struct sk_buff *skb = tx_req->skb;
skb               282 drivers/infiniband/ulp/ipoib/ipoib_ib.c 	if (skb_headlen(skb)) {
skb               283 drivers/infiniband/ulp/ipoib/ipoib_ib.c 		mapping[0] = ib_dma_map_single(ca, skb->data, skb_headlen(skb),
skb               292 drivers/infiniband/ulp/ipoib/ipoib_ib.c 	for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
skb               293 drivers/infiniband/ulp/ipoib/ipoib_ib.c 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
skb               306 drivers/infiniband/ulp/ipoib/ipoib_ib.c 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
skb               312 drivers/infiniband/ulp/ipoib/ipoib_ib.c 		ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE);
skb               320 drivers/infiniband/ulp/ipoib/ipoib_ib.c 	struct sk_buff *skb = tx_req->skb;
skb               325 drivers/infiniband/ulp/ipoib/ipoib_ib.c 	if (skb_headlen(skb)) {
skb               326 drivers/infiniband/ulp/ipoib/ipoib_ib.c 		ib_dma_unmap_single(priv->ca, mapping[0], skb_headlen(skb),
skb               332 drivers/infiniband/ulp/ipoib/ipoib_ib.c 	for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
skb               333 drivers/infiniband/ulp/ipoib/ipoib_ib.c 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
skb               405 drivers/infiniband/ulp/ipoib/ipoib_ib.c 	dev->stats.tx_bytes += tx_req->skb->len;
skb               407 drivers/infiniband/ulp/ipoib/ipoib_ib.c 	dev_kfree_skb_any(tx_req->skb);
skb               547 drivers/infiniband/ulp/ipoib/ipoib_ib.c 	struct sk_buff *skb = tx_req->skb;
skb               556 drivers/infiniband/ulp/ipoib/ipoib_ib.c 		priv->tx_wr.mss		= skb_shinfo(skb)->gso_size;
skb               566 drivers/infiniband/ulp/ipoib/ipoib_ib.c int ipoib_send(struct net_device *dev, struct sk_buff *skb,
skb               573 drivers/infiniband/ulp/ipoib/ipoib_ib.c 	unsigned int usable_sge = priv->max_send_sge - !!skb_headlen(skb);
skb               575 drivers/infiniband/ulp/ipoib/ipoib_ib.c 	if (skb_is_gso(skb)) {
skb               576 drivers/infiniband/ulp/ipoib/ipoib_ib.c 		hlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
skb               577 drivers/infiniband/ulp/ipoib/ipoib_ib.c 		phead = skb->data;
skb               578 drivers/infiniband/ulp/ipoib/ipoib_ib.c 		if (unlikely(!skb_pull(skb, hlen))) {
skb               582 drivers/infiniband/ulp/ipoib/ipoib_ib.c 			dev_kfree_skb_any(skb);
skb               586 drivers/infiniband/ulp/ipoib/ipoib_ib.c 		if (unlikely(skb->len > priv->mcast_mtu + IPOIB_ENCAP_LEN)) {
skb               588 drivers/infiniband/ulp/ipoib/ipoib_ib.c 				   skb->len, priv->mcast_mtu + IPOIB_ENCAP_LEN);
skb               591 drivers/infiniband/ulp/ipoib/ipoib_ib.c 			ipoib_cm_skb_too_long(dev, skb, priv->mcast_mtu);
skb               597 drivers/infiniband/ulp/ipoib/ipoib_ib.c 	if (skb_shinfo(skb)->nr_frags > usable_sge) {
skb               598 drivers/infiniband/ulp/ipoib/ipoib_ib.c 		if (skb_linearize(skb) < 0) {
skb               602 drivers/infiniband/ulp/ipoib/ipoib_ib.c 			dev_kfree_skb_any(skb);
skb               606 drivers/infiniband/ulp/ipoib/ipoib_ib.c 		if (skb_shinfo(skb)->nr_frags > usable_sge) {
skb               610 drivers/infiniband/ulp/ipoib/ipoib_ib.c 			dev_kfree_skb_any(skb);
skb               617 drivers/infiniband/ulp/ipoib/ipoib_ib.c 		       skb->len, address, dqpn);
skb               627 drivers/infiniband/ulp/ipoib/ipoib_ib.c 	tx_req->skb = skb;
skb               630 drivers/infiniband/ulp/ipoib/ipoib_ib.c 		dev_kfree_skb_any(skb);
skb               634 drivers/infiniband/ulp/ipoib/ipoib_ib.c 	if (skb->ip_summed == CHECKSUM_PARTIAL)
skb               645 drivers/infiniband/ulp/ipoib/ipoib_ib.c 	skb_orphan(skb);
skb               646 drivers/infiniband/ulp/ipoib/ipoib_ib.c 	skb_dst_drop(skb);
skb               659 drivers/infiniband/ulp/ipoib/ipoib_ib.c 		dev_kfree_skb_any(skb);
skb               730 drivers/infiniband/ulp/ipoib/ipoib_ib.c 		if (priv->rx_ring[i].skb)
skb               812 drivers/infiniband/ulp/ipoib/ipoib_ib.c 				dev_kfree_skb_any(tx_req->skb);
skb               821 drivers/infiniband/ulp/ipoib/ipoib_ib.c 				if (!rx_req->skb)
skb               825 drivers/infiniband/ulp/ipoib/ipoib_ib.c 				dev_kfree_skb_any(rx_req->skb);
skb               826 drivers/infiniband/ulp/ipoib/ipoib_ib.c 				rx_req->skb = NULL;
skb               611 drivers/infiniband/ulp/ipoib/ipoib_main.c 	struct sk_buff *skb;
skb               613 drivers/infiniband/ulp/ipoib/ipoib_main.c 	while ((skb = __skb_dequeue(&path->queue)))
skb               614 drivers/infiniband/ulp/ipoib/ipoib_main.c 		dev_kfree_skb_irq(skb);
skb               703 drivers/infiniband/ulp/ipoib/ipoib_main.c static void push_pseudo_header(struct sk_buff *skb, const char *daddr)
skb               707 drivers/infiniband/ulp/ipoib/ipoib_main.c 	phdr = skb_push(skb, sizeof(*phdr));
skb               752 drivers/infiniband/ulp/ipoib/ipoib_main.c 	struct sk_buff *skb;
skb               803 drivers/infiniband/ulp/ipoib/ipoib_main.c 		while ((skb = __skb_dequeue(&path->queue)))
skb               804 drivers/infiniband/ulp/ipoib/ipoib_main.c 			__skb_queue_tail(&skqueue, skb);
skb               832 drivers/infiniband/ulp/ipoib/ipoib_main.c 			while ((skb = __skb_dequeue(&neigh->queue)))
skb               833 drivers/infiniband/ulp/ipoib/ipoib_main.c 				__skb_queue_tail(&skqueue, skb);
skb               849 drivers/infiniband/ulp/ipoib/ipoib_main.c 	while ((skb = __skb_dequeue(&skqueue))) {
skb               851 drivers/infiniband/ulp/ipoib/ipoib_main.c 		skb->dev = dev;
skb               852 drivers/infiniband/ulp/ipoib/ipoib_main.c 		ret = dev_queue_xmit(skb);
skb               946 drivers/infiniband/ulp/ipoib/ipoib_main.c static struct ipoib_neigh *neigh_add_path(struct sk_buff *skb, u8 *daddr,
skb               960 drivers/infiniband/ulp/ipoib/ipoib_main.c 		dev_kfree_skb_any(skb);
skb               996 drivers/infiniband/ulp/ipoib/ipoib_main.c 				push_pseudo_header(skb, neigh->daddr);
skb               997 drivers/infiniband/ulp/ipoib/ipoib_main.c 				__skb_queue_tail(&neigh->queue, skb);
skb              1005 drivers/infiniband/ulp/ipoib/ipoib_main.c 			path->ah->last_send = rn->send(dev, skb, path->ah->ah,
skb              1016 drivers/infiniband/ulp/ipoib/ipoib_main.c 			push_pseudo_header(skb, neigh->daddr);
skb              1017 drivers/infiniband/ulp/ipoib/ipoib_main.c 			__skb_queue_tail(&neigh->queue, skb);
skb              1031 drivers/infiniband/ulp/ipoib/ipoib_main.c 	dev_kfree_skb_any(skb);
skb              1039 drivers/infiniband/ulp/ipoib/ipoib_main.c static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
skb              1072 drivers/infiniband/ulp/ipoib/ipoib_main.c 			push_pseudo_header(skb, phdr->hwaddr);
skb              1073 drivers/infiniband/ulp/ipoib/ipoib_main.c 			__skb_queue_tail(&path->queue, skb);
skb              1083 drivers/infiniband/ulp/ipoib/ipoib_main.c 	path->ah->last_send = rn->send(dev, skb, path->ah->ah,
skb              1089 drivers/infiniband/ulp/ipoib/ipoib_main.c 	dev_kfree_skb_any(skb);
skb              1094 drivers/infiniband/ulp/ipoib/ipoib_main.c static netdev_tx_t ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb              1103 drivers/infiniband/ulp/ipoib/ipoib_main.c 	phdr = (struct ipoib_pseudo_header *) skb->data;
skb              1104 drivers/infiniband/ulp/ipoib/ipoib_main.c 	skb_pull(skb, sizeof(*phdr));
skb              1105 drivers/infiniband/ulp/ipoib/ipoib_main.c 	header = (struct ipoib_header *) skb->data;
skb              1116 drivers/infiniband/ulp/ipoib/ipoib_main.c 			dev_kfree_skb_any(skb);
skb              1126 drivers/infiniband/ulp/ipoib/ipoib_main.c 		ipoib_mcast_send(dev, phdr->hwaddr, skb);
skb              1137 drivers/infiniband/ulp/ipoib/ipoib_main.c 			neigh = neigh_add_path(skb, phdr->hwaddr, dev);
skb              1145 drivers/infiniband/ulp/ipoib/ipoib_main.c 		unicast_arp_send(skb, dev, phdr);
skb              1150 drivers/infiniband/ulp/ipoib/ipoib_main.c 		dev_kfree_skb_any(skb);
skb              1158 drivers/infiniband/ulp/ipoib/ipoib_main.c 			ipoib_cm_send(dev, skb, ipoib_cm_get(neigh));
skb              1162 drivers/infiniband/ulp/ipoib/ipoib_main.c 		neigh->ah->last_send = rn->send(dev, skb, neigh->ah->ah,
skb              1170 drivers/infiniband/ulp/ipoib/ipoib_main.c 		push_pseudo_header(skb, phdr->hwaddr);
skb              1172 drivers/infiniband/ulp/ipoib/ipoib_main.c 		__skb_queue_tail(&neigh->queue, skb);
skb              1176 drivers/infiniband/ulp/ipoib/ipoib_main.c 		dev_kfree_skb_any(skb);
skb              1199 drivers/infiniband/ulp/ipoib/ipoib_main.c static int ipoib_hard_header(struct sk_buff *skb,
skb              1208 drivers/infiniband/ulp/ipoib/ipoib_main.c 	header = skb_push(skb, sizeof(*header));
skb              1218 drivers/infiniband/ulp/ipoib/ipoib_main.c 	push_pseudo_header(skb, daddr);
skb              1445 drivers/infiniband/ulp/ipoib/ipoib_main.c 	struct sk_buff *skb;
skb              1448 drivers/infiniband/ulp/ipoib/ipoib_main.c 	while ((skb = __skb_dequeue(&neigh->queue))) {
skb              1450 drivers/infiniband/ulp/ipoib/ipoib_main.c 		dev_kfree_skb_any(skb);
skb               309 drivers/infiniband/ulp/ipoib/ipoib_multicast.c 		struct sk_buff *skb = skb_dequeue(&mcast->pkt_queue);
skb               313 drivers/infiniband/ulp/ipoib/ipoib_multicast.c 		skb->dev = dev;
skb               315 drivers/infiniband/ulp/ipoib/ipoib_multicast.c 		ret = dev_queue_xmit(skb);
skb               760 drivers/infiniband/ulp/ipoib/ipoib_multicast.c void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb)
skb               774 drivers/infiniband/ulp/ipoib/ipoib_multicast.c 		dev_kfree_skb_any(skb);
skb               790 drivers/infiniband/ulp/ipoib/ipoib_multicast.c 				dev_kfree_skb_any(skb);
skb               802 drivers/infiniband/ulp/ipoib/ipoib_multicast.c 			skb_push(skb, sizeof(struct ipoib_pseudo_header));
skb               803 drivers/infiniband/ulp/ipoib/ipoib_multicast.c 			skb_queue_tail(&mcast->pkt_queue, skb);
skb               806 drivers/infiniband/ulp/ipoib/ipoib_multicast.c 			dev_kfree_skb_any(skb);
skb               830 drivers/infiniband/ulp/ipoib/ipoib_multicast.c 		mcast->ah->last_send = rn->send(dev, skb, mcast->ah->ah,
skb                45 drivers/infiniband/ulp/ipoib/ipoib_netlink.c static int ipoib_fill_info(struct sk_buff *skb, const struct net_device *dev)
skb                50 drivers/infiniband/ulp/ipoib/ipoib_netlink.c 	if (nla_put_u16(skb, IFLA_IPOIB_PKEY, priv->pkey))
skb                54 drivers/infiniband/ulp/ipoib/ipoib_netlink.c 	if (nla_put_u16(skb, IFLA_IPOIB_MODE, val))
skb                58 drivers/infiniband/ulp/ipoib/ipoib_netlink.c 	if (nla_put_u16(skb, IFLA_IPOIB_UMCAST, val))
skb               334 drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c 				  struct sk_buff *skb, u8 def_port)
skb               337 drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c 	struct ethhdr *mac_hdr = (struct ethhdr *)skb_mac_header(skb);
skb               364 drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c 			  struct sk_buff *skb)
skb               366 drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c 	struct ethhdr *mac_hdr = (struct ethhdr *)skb_mac_header(skb);
skb               370 drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c 	if (!__vlan_get_tag(skb, &vlan_tci)) {
skb               387 drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c u8 opa_vnic_get_vl(struct opa_vnic_adapter *adapter, struct sk_buff *skb)
skb               389 drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c 	struct ethhdr *mac_hdr = (struct ethhdr *)skb_mac_header(skb);
skb               393 drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c 	if (skb_vlan_tag_present(skb)) {
skb               394 drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c 		u8 pcp = skb_vlan_tag_get(skb) >> VLAN_PRIO_SHIFT;
skb               412 drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c 			  struct sk_buff *skb)
skb               416 drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c 	switch (vlan_get_protocol(skb)) {
skb               418 drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c 		proto = ipv6_hdr(skb)->nexthdr;
skb               429 drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c 		proto = ip_hdr(skb)->protocol;
skb               447 drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c u8 opa_vnic_calc_entropy(struct sk_buff *skb)
skb               449 drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c 	u32 hash = skb_get_hash(skb);
skb               471 drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c static inline int opa_vnic_wire_length(struct sk_buff *skb)
skb               476 drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c 	pad_len = -(skb->len + OPA_VNIC_ICRC_TAIL_LEN) & 0x7;
skb               479 drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c 	return (skb->len + pad_len) >> 3;
skb               483 drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c void opa_vnic_encap_skb(struct opa_vnic_adapter *adapter, struct sk_buff *skb)
skb               491 drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c 	hdr = skb_push(skb, OPA_VNIC_HDR_LEN);
skb               493 drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c 	entropy = opa_vnic_calc_entropy(skb);
skb               495 drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c 	len = opa_vnic_wire_length(skb);
skb               496 drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c 	dlid = opa_vnic_get_dlid(adapter, skb, def_port);
skb               497 drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c 	sc = opa_vnic_get_sc(info, skb);
skb               498 drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c 	rc = opa_vnic_get_rc(info, skb);
skb               501 drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c 	mdata = skb_push(skb, sizeof(*mdata));
skb               502 drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c 	mdata->vl = opa_vnic_get_vl(adapter, skb);
skb               300 drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h void opa_vnic_encap_skb(struct opa_vnic_adapter *adapter, struct sk_buff *skb);
skb               301 drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h u8 opa_vnic_get_vl(struct opa_vnic_adapter *adapter, struct sk_buff *skb);
skb               302 drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h u8 opa_vnic_calc_entropy(struct sk_buff *skb);
skb                79 drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c static netdev_tx_t opa_netdev_start_xmit(struct sk_buff *skb,
skb                84 drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c 	v_dbg("xmit: queue %d skb len %d\n", skb->queue_mapping, skb->len);
skb                86 drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c 	if (unlikely(skb->len < ETH_ZLEN)) {
skb                87 drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c 		if (skb_padto(skb, ETH_ZLEN))
skb                90 drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c 		skb_put(skb, ETH_ZLEN - skb->len);
skb                93 drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c 	opa_vnic_encap_skb(adapter, skb);
skb                94 drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c 	return adapter->rn_ops->ndo_start_xmit(skb, netdev);
skb                97 drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c static u16 opa_vnic_select_queue(struct net_device *netdev, struct sk_buff *skb,
skb               105 drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c 	mdata = skb_push(skb, sizeof(*mdata));
skb               106 drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c 	mdata->entropy = opa_vnic_calc_entropy(skb);
skb               107 drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c 	mdata->vl = opa_vnic_get_vl(adapter, skb);
skb               108 drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c 	rc = adapter->rn_ops->ndo_select_queue(netdev, skb, sb_dev);
skb               109 drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c 	skb_pull(skb, sizeof(*mdata));
skb               384 drivers/isdn/capi/capi.c gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
skb               389 drivers/isdn/capi/capi.c 		u16 datahandle = CAPIMSG_U16(skb->data, CAPIMSG_BASELEN + 4 + 4 + 2);
skb               402 drivers/isdn/capi/capi.c static int handle_recv_skb(struct capiminor *mp, struct sk_buff *skb)
skb               404 drivers/isdn/capi/capi.c 	unsigned int datalen = skb->len - CAPIMSG_LEN(skb->data);
skb               421 drivers/isdn/capi/capi.c 		kfree_skb(skb);
skb               440 drivers/isdn/capi/capi.c 	nskb = gen_data_b3_resp_for(mp, skb);
skb               446 drivers/isdn/capi/capi.c 	datahandle = CAPIMSG_U16(skb->data, CAPIMSG_BASELEN + 4);
skb               451 drivers/isdn/capi/capi.c 		skb_pull(skb, CAPIMSG_LEN(skb->data));
skb               453 drivers/isdn/capi/capi.c 			 datahandle, skb->len);
skb               454 drivers/isdn/capi/capi.c 		ld->ops->receive_buf(tty, skb->data, NULL, skb->len);
skb               466 drivers/isdn/capi/capi.c 	kfree_skb(skb);
skb               478 drivers/isdn/capi/capi.c 	struct sk_buff *skb;
skb               480 drivers/isdn/capi/capi.c 	while ((skb = skb_dequeue(&mp->inqueue)) != NULL)
skb               481 drivers/isdn/capi/capi.c 		if (handle_recv_skb(mp, skb) < 0) {
skb               482 drivers/isdn/capi/capi.c 			skb_queue_head(&mp->inqueue, skb);
skb               490 drivers/isdn/capi/capi.c 	struct sk_buff *skb;
skb               507 drivers/isdn/capi/capi.c 		skb = __skb_dequeue(&mp->outqueue);
skb               508 drivers/isdn/capi/capi.c 		if (!skb) {
skb               512 drivers/isdn/capi/capi.c 		len = (u16)skb->len;
skb               517 drivers/isdn/capi/capi.c 		skb_push(skb, CAPI_DATA_B3_REQ_LEN);
skb               518 drivers/isdn/capi/capi.c 		memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
skb               519 drivers/isdn/capi/capi.c 		capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
skb               520 drivers/isdn/capi/capi.c 		capimsg_setu16(skb->data, 2, mp->ap->applid);
skb               521 drivers/isdn/capi/capi.c 		capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
skb               522 drivers/isdn/capi/capi.c 		capimsg_setu8 (skb->data, 5, CAPI_REQ);
skb               523 drivers/isdn/capi/capi.c 		capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
skb               524 drivers/isdn/capi/capi.c 		capimsg_setu32(skb->data, 8, mp->ncci);	/* NCCI */
skb               525 drivers/isdn/capi/capi.c 		capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
skb               526 drivers/isdn/capi/capi.c 		capimsg_setu16(skb->data, 16, len);	/* Data length */
skb               527 drivers/isdn/capi/capi.c 		capimsg_setu16(skb->data, 18, datahandle);
skb               528 drivers/isdn/capi/capi.c 		capimsg_setu16(skb->data, 20, 0);	/* Flags */
skb               531 drivers/isdn/capi/capi.c 			skb_pull(skb, CAPI_DATA_B3_REQ_LEN);
skb               534 drivers/isdn/capi/capi.c 			__skb_queue_head(&mp->outqueue, skb);
skb               540 drivers/isdn/capi/capi.c 		errcode = capi20_put_message(mp->ap, skb);
skb               549 drivers/isdn/capi/capi.c 			skb_pull(skb, CAPI_DATA_B3_REQ_LEN);
skb               552 drivers/isdn/capi/capi.c 			__skb_queue_head(&mp->outqueue, skb);
skb               561 drivers/isdn/capi/capi.c 		kfree_skb(skb);
skb               569 drivers/isdn/capi/capi.c static void capi_recv_message(struct capi20_appl *ap, struct sk_buff *skb)
skb               580 drivers/isdn/capi/capi.c 	if (CAPIMSG_CMD(skb->data) == CAPI_CONNECT_B3_CONF) {
skb               581 drivers/isdn/capi/capi.c 		u16 info = CAPIMSG_U16(skb->data, 12); // Info field
skb               583 drivers/isdn/capi/capi.c 			capincci_alloc(cdev, CAPIMSG_NCCI(skb->data));
skb               585 drivers/isdn/capi/capi.c 	if (CAPIMSG_CMD(skb->data) == CAPI_CONNECT_B3_IND)
skb               586 drivers/isdn/capi/capi.c 		capincci_alloc(cdev, CAPIMSG_NCCI(skb->data));
skb               588 drivers/isdn/capi/capi.c 	if (CAPIMSG_COMMAND(skb->data) != CAPI_DATA_B3) {
skb               589 drivers/isdn/capi/capi.c 		skb_queue_tail(&cdev->recvqueue, skb);
skb               595 drivers/isdn/capi/capi.c 	skb_queue_tail(&cdev->recvqueue, skb);
skb               600 drivers/isdn/capi/capi.c 	np = capincci_find(cdev, CAPIMSG_CONTROL(skb->data));
skb               603 drivers/isdn/capi/capi.c 		skb_queue_tail(&cdev->recvqueue, skb);
skb               610 drivers/isdn/capi/capi.c 		skb_queue_tail(&cdev->recvqueue, skb);
skb               614 drivers/isdn/capi/capi.c 	if (CAPIMSG_SUBCOMMAND(skb->data) == CAPI_IND) {
skb               615 drivers/isdn/capi/capi.c 		datahandle = CAPIMSG_U16(skb->data, CAPIMSG_BASELEN + 4 + 4 + 2);
skb               617 drivers/isdn/capi/capi.c 			 datahandle, skb->len-CAPIMSG_LEN(skb->data));
skb               618 drivers/isdn/capi/capi.c 		skb_queue_tail(&mp->inqueue, skb);
skb               622 drivers/isdn/capi/capi.c 	} else if (CAPIMSG_SUBCOMMAND(skb->data) == CAPI_CONF) {
skb               624 drivers/isdn/capi/capi.c 		datahandle = CAPIMSG_U16(skb->data, CAPIMSG_BASELEN + 4);
skb               627 drivers/isdn/capi/capi.c 			 CAPIMSG_U16(skb->data, CAPIMSG_BASELEN + 4 + 2));
skb               628 drivers/isdn/capi/capi.c 		kfree_skb(skb);
skb               635 drivers/isdn/capi/capi.c 		skb_queue_tail(&cdev->recvqueue, skb);
skb               650 drivers/isdn/capi/capi.c 	struct sk_buff *skb;
skb               657 drivers/isdn/capi/capi.c 	skb = skb_dequeue(&cdev->recvqueue);
skb               658 drivers/isdn/capi/capi.c 	if (!skb) {
skb               662 drivers/isdn/capi/capi.c 					       (skb = skb_dequeue(&cdev->recvqueue)));
skb               666 drivers/isdn/capi/capi.c 	if (skb->len > count) {
skb               667 drivers/isdn/capi/capi.c 		skb_queue_head(&cdev->recvqueue, skb);
skb               670 drivers/isdn/capi/capi.c 	if (copy_to_user(buf, skb->data, skb->len)) {
skb               671 drivers/isdn/capi/capi.c 		skb_queue_head(&cdev->recvqueue, skb);
skb               674 drivers/isdn/capi/capi.c 	copied = skb->len;
skb               676 drivers/isdn/capi/capi.c 	kfree_skb(skb);
skb               685 drivers/isdn/capi/capi.c 	struct sk_buff *skb;
skb               694 drivers/isdn/capi/capi.c 	skb = alloc_skb(count, GFP_USER);
skb               695 drivers/isdn/capi/capi.c 	if (!skb)
skb               698 drivers/isdn/capi/capi.c 	if (copy_from_user(skb_put(skb, count), buf, count)) {
skb               699 drivers/isdn/capi/capi.c 		kfree_skb(skb);
skb               702 drivers/isdn/capi/capi.c 	mlen = CAPIMSG_LEN(skb->data);
skb               703 drivers/isdn/capi/capi.c 	if (CAPIMSG_CMD(skb->data) == CAPI_DATA_B3_REQ) {
skb               705 drivers/isdn/capi/capi.c 		    (size_t)(mlen + CAPIMSG_DATALEN(skb->data)) != count) {
skb               706 drivers/isdn/capi/capi.c 			kfree_skb(skb);
skb               711 drivers/isdn/capi/capi.c 			kfree_skb(skb);
skb               715 drivers/isdn/capi/capi.c 	CAPIMSG_SETAPPID(skb->data, cdev->ap.applid);
skb               717 drivers/isdn/capi/capi.c 	if (CAPIMSG_CMD(skb->data) == CAPI_DISCONNECT_B3_RESP) {
skb               719 drivers/isdn/capi/capi.c 			kfree_skb(skb);
skb               723 drivers/isdn/capi/capi.c 		capincci_free(cdev, CAPIMSG_NCCI(skb->data));
skb               727 drivers/isdn/capi/capi.c 	cdev->errcode = capi20_put_message(&cdev->ap, skb);
skb               730 drivers/isdn/capi/capi.c 		kfree_skb(skb);
skb              1050 drivers/isdn/capi/capi.c 	struct sk_buff *skb;
skb              1055 drivers/isdn/capi/capi.c 	skb = mp->outskb;
skb              1056 drivers/isdn/capi/capi.c 	if (skb) {
skb              1058 drivers/isdn/capi/capi.c 		__skb_queue_tail(&mp->outqueue, skb);
skb              1059 drivers/isdn/capi/capi.c 		mp->outbytes += skb->len;
skb              1062 drivers/isdn/capi/capi.c 	skb = alloc_skb(CAPI_DATA_B3_REQ_LEN + count, GFP_ATOMIC);
skb              1063 drivers/isdn/capi/capi.c 	if (!skb) {
skb              1069 drivers/isdn/capi/capi.c 	skb_reserve(skb, CAPI_DATA_B3_REQ_LEN);
skb              1070 drivers/isdn/capi/capi.c 	skb_put_data(skb, buf, count);
skb              1072 drivers/isdn/capi/capi.c 	__skb_queue_tail(&mp->outqueue, skb);
skb              1073 drivers/isdn/capi/capi.c 	mp->outbytes += skb->len;
skb              1085 drivers/isdn/capi/capi.c 	struct sk_buff *skb;
skb              1091 drivers/isdn/capi/capi.c 	skb = mp->outskb;
skb              1092 drivers/isdn/capi/capi.c 	if (skb) {
skb              1093 drivers/isdn/capi/capi.c 		if (skb_tailroom(skb) > 0) {
skb              1094 drivers/isdn/capi/capi.c 			skb_put_u8(skb, ch);
skb              1098 drivers/isdn/capi/capi.c 		__skb_queue_tail(&mp->outqueue, skb);
skb              1099 drivers/isdn/capi/capi.c 		mp->outbytes += skb->len;
skb              1103 drivers/isdn/capi/capi.c 	skb = alloc_skb(CAPI_DATA_B3_REQ_LEN + CAPI_MAX_BLKSIZE, GFP_ATOMIC);
skb              1104 drivers/isdn/capi/capi.c 	if (skb) {
skb              1105 drivers/isdn/capi/capi.c 		skb_reserve(skb, CAPI_DATA_B3_REQ_LEN);
skb              1106 drivers/isdn/capi/capi.c 		skb_put_u8(skb, ch);
skb              1107 drivers/isdn/capi/capi.c 		mp->outskb = skb;
skb              1125 drivers/isdn/capi/capi.c 	struct sk_buff *skb;
skb              1130 drivers/isdn/capi/capi.c 	skb = mp->outskb;
skb              1131 drivers/isdn/capi/capi.c 	if (skb) {
skb              1133 drivers/isdn/capi/capi.c 		__skb_queue_tail(&mp->outqueue, skb);
skb              1134 drivers/isdn/capi/capi.c 		mp->outbytes += skb->len;
skb               315 drivers/isdn/capi/kcapi.c 	struct sk_buff *skb;
skb               323 drivers/isdn/capi/kcapi.c 	while ((skb = skb_dequeue(&ap->recv_queue))) {
skb               324 drivers/isdn/capi/kcapi.c 		if (CAPIMSG_CMD(skb->data) == CAPI_DATA_B3_IND)
skb               329 drivers/isdn/capi/kcapi.c 		ap->recv_message(ap, skb);
skb               344 drivers/isdn/capi/kcapi.c 			     struct sk_buff *skb)
skb               352 drivers/isdn/capi/kcapi.c 		cdb = capi_message2str(skb->data);
skb               363 drivers/isdn/capi/kcapi.c 	cmd = CAPIMSG_COMMAND(skb->data);
skb               364 drivers/isdn/capi/kcapi.c 	subcmd = CAPIMSG_SUBCOMMAND(skb->data);
skb               378 drivers/isdn/capi/kcapi.c 			       ctr->cnr, CAPIMSG_APPID(skb->data),
skb               380 drivers/isdn/capi/kcapi.c 			       CAPIMSG_LEN(skb->data));
skb               382 drivers/isdn/capi/kcapi.c 			cdb = capi_message2str(skb->data);
skb               389 drivers/isdn/capi/kcapi.c 				       ctr->cnr, CAPIMSG_APPID(skb->data),
skb               391 drivers/isdn/capi/kcapi.c 				       CAPIMSG_LEN(skb->data));
skb               397 drivers/isdn/capi/kcapi.c 	ap = get_capi_appl_by_nr(CAPIMSG_APPID(skb->data));
skb               400 drivers/isdn/capi/kcapi.c 		cdb = capi_message2str(skb->data);
skb               403 drivers/isdn/capi/kcapi.c 			       CAPIMSG_APPID(skb->data), cdb->buf);
skb               407 drivers/isdn/capi/kcapi.c 			       CAPIMSG_APPID(skb->data),
skb               411 drivers/isdn/capi/kcapi.c 	skb_queue_tail(&ap->recv_queue, skb);
skb               418 drivers/isdn/capi/kcapi.c 	kfree_skb(skb);
skb               769 drivers/isdn/capi/kcapi.c u16 capi20_put_message(struct capi20_appl *ap, struct sk_buff *skb)
skb               781 drivers/isdn/capi/kcapi.c 	if (skb->len < 12
skb               782 drivers/isdn/capi/kcapi.c 	    || !capi_cmd_valid(CAPIMSG_COMMAND(skb->data))
skb               783 drivers/isdn/capi/kcapi.c 	    || !capi_subcmd_valid(CAPIMSG_SUBCOMMAND(skb->data)))
skb               791 drivers/isdn/capi/kcapi.c 	ctr = get_capi_ctr_by_nr(CAPIMSG_CONTROLLER(skb->data));
skb               797 drivers/isdn/capi/kcapi.c 	cmd = CAPIMSG_COMMAND(skb->data);
skb               798 drivers/isdn/capi/kcapi.c 	subcmd = CAPIMSG_SUBCOMMAND(skb->data);
skb               815 drivers/isdn/capi/kcapi.c 			       CAPIMSG_CONTROLLER(skb->data),
skb               816 drivers/isdn/capi/kcapi.c 			       CAPIMSG_APPID(skb->data),
skb               818 drivers/isdn/capi/kcapi.c 			       CAPIMSG_LEN(skb->data));
skb               820 drivers/isdn/capi/kcapi.c 			_cdebbuf *cdb = capi_message2str(skb->data);
skb               823 drivers/isdn/capi/kcapi.c 				       CAPIMSG_CONTROLLER(skb->data),
skb               828 drivers/isdn/capi/kcapi.c 				       CAPIMSG_CONTROLLER(skb->data),
skb               829 drivers/isdn/capi/kcapi.c 				       CAPIMSG_APPID(skb->data),
skb               831 drivers/isdn/capi/kcapi.c 				       CAPIMSG_LEN(skb->data));
skb               834 drivers/isdn/capi/kcapi.c 	return ctr->send_message(ctr, skb);
skb               680 drivers/isdn/hardware/mISDN/avmfritz.c avm_l2l1B(struct mISDNchannel *ch, struct sk_buff *skb)
skb               685 drivers/isdn/hardware/mISDN/avmfritz.c 	struct mISDNhead *hh = mISDN_HEAD_P(skb);
skb               691 drivers/isdn/hardware/mISDN/avmfritz.c 		ret = bchannel_senddata(bch, skb);
skb               720 drivers/isdn/hardware/mISDN/avmfritz.c 		dev_kfree_skb(skb);
skb               847 drivers/isdn/hardware/mISDN/hfcmulti.c 	struct sk_buff *skb;
skb               856 drivers/isdn/hardware/mISDN/hfcmulti.c 	skb = _alloc_mISDN_skb(PH_CONTROL_IND, HFC_VOL_CHANGE_TX,
skb               858 drivers/isdn/hardware/mISDN/hfcmulti.c 	if (skb)
skb               859 drivers/isdn/hardware/mISDN/hfcmulti.c 		recv_Bchannel_skb(bch, skb);
skb               879 drivers/isdn/hardware/mISDN/hfcmulti.c 	struct sk_buff *skb;
skb               889 drivers/isdn/hardware/mISDN/hfcmulti.c 	skb = _alloc_mISDN_skb(PH_CONTROL_IND, HFC_VOL_CHANGE_TX,
skb               891 drivers/isdn/hardware/mISDN/hfcmulti.c 	if (skb)
skb               892 drivers/isdn/hardware/mISDN/hfcmulti.c 		recv_Bchannel_skb(bch, skb);
skb              1835 drivers/isdn/hardware/mISDN/hfcmulti.c 	struct sk_buff	*skb;
skb              1906 drivers/isdn/hardware/mISDN/hfcmulti.c 			skb = mI_alloc_skb(512, GFP_ATOMIC);
skb              1907 drivers/isdn/hardware/mISDN/hfcmulti.c 			if (!skb) {
skb              1912 drivers/isdn/hardware/mISDN/hfcmulti.c 			hh = mISDN_HEAD_P(skb);
skb              1915 drivers/isdn/hardware/mISDN/hfcmulti.c 			skb_put_data(skb, hc->chan[ch].coeff, 512);
skb              1916 drivers/isdn/hardware/mISDN/hfcmulti.c 			recv_Bchannel_skb(bch, skb);
skb              2186 drivers/isdn/hardware/mISDN/hfcmulti.c 	struct sk_buff	*skb, **sp = NULL;
skb              2318 drivers/isdn/hardware/mISDN/hfcmulti.c 				skb = *sp;
skb              2319 drivers/isdn/hardware/mISDN/hfcmulti.c 				*sp = mI_alloc_skb(skb->len, GFP_ATOMIC);
skb              2321 drivers/isdn/hardware/mISDN/hfcmulti.c 					skb_put_data(*sp, skb->data, skb->len);
skb              2322 drivers/isdn/hardware/mISDN/hfcmulti.c 					skb_trim(skb, 0);
skb              2326 drivers/isdn/hardware/mISDN/hfcmulti.c 					*sp = skb;
skb              2327 drivers/isdn/hardware/mISDN/hfcmulti.c 					skb = NULL;
skb              2330 drivers/isdn/hardware/mISDN/hfcmulti.c 				skb = NULL;
skb              2344 drivers/isdn/hardware/mISDN/hfcmulti.c 			*sp = skb;
skb              2369 drivers/isdn/hardware/mISDN/hfcmulti.c 	struct sk_buff	*skb;
skb              2377 drivers/isdn/hardware/mISDN/hfcmulti.c 	skb = _alloc_mISDN_skb(MPH_INFORMATION_IND, id, sizeof(data), &data,
skb              2379 drivers/isdn/hardware/mISDN/hfcmulti.c 	if (!skb)
skb              2381 drivers/isdn/hardware/mISDN/hfcmulti.c 	recv_Dchannel_skb(dch, skb);
skb              3325 drivers/isdn/hardware/mISDN/hfcmulti.c handle_dmsg(struct mISDNchannel *ch, struct sk_buff *skb)
skb              3330 drivers/isdn/hardware/mISDN/hfcmulti.c 	struct mISDNhead	*hh = mISDN_HEAD_P(skb);
skb              3337 drivers/isdn/hardware/mISDN/hfcmulti.c 		if (skb->len < 1)
skb              3340 drivers/isdn/hardware/mISDN/hfcmulti.c 		ret = dchannel_senddata(dch, skb);
skb              3434 drivers/isdn/hardware/mISDN/hfcmulti.c 		dev_kfree_skb(skb);
skb              3454 drivers/isdn/hardware/mISDN/hfcmulti.c handle_bmsg(struct mISDNchannel *ch, struct sk_buff *skb)
skb              3459 drivers/isdn/hardware/mISDN/hfcmulti.c 	struct mISDNhead	*hh = mISDN_HEAD_P(skb);
skb              3464 drivers/isdn/hardware/mISDN/hfcmulti.c 		if (!skb->len)
skb              3467 drivers/isdn/hardware/mISDN/hfcmulti.c 		ret = bchannel_senddata(bch, skb);
skb              3518 drivers/isdn/hardware/mISDN/hfcmulti.c 				       __func__, skb->len);
skb              3543 drivers/isdn/hardware/mISDN/hfcmulti.c 		dev_kfree_skb(skb);
skb              1573 drivers/isdn/hardware/mISDN/hfcpci.c hfcpci_l2l1D(struct mISDNchannel *ch, struct sk_buff *skb)
skb              1579 drivers/isdn/hardware/mISDN/hfcpci.c 	struct mISDNhead	*hh = mISDN_HEAD_P(skb);
skb              1586 drivers/isdn/hardware/mISDN/hfcpci.c 		ret = dchannel_senddata(dch, skb);
skb              1649 drivers/isdn/hardware/mISDN/hfcpci.c 		dev_kfree_skb(skb);
skb              1657 drivers/isdn/hardware/mISDN/hfcpci.c hfcpci_l2l1B(struct mISDNchannel *ch, struct sk_buff *skb)
skb              1662 drivers/isdn/hardware/mISDN/hfcpci.c 	struct mISDNhead	*hh = mISDN_HEAD_P(skb);
skb              1668 drivers/isdn/hardware/mISDN/hfcpci.c 		ret = bchannel_senddata(bch, skb);
skb              1694 drivers/isdn/hardware/mISDN/hfcpci.c 		dev_kfree_skb(skb);
skb               196 drivers/isdn/hardware/mISDN/hfcsusb.c hfcusb_l2l1B(struct mISDNchannel *ch, struct sk_buff *skb)
skb               201 drivers/isdn/hardware/mISDN/hfcsusb.c 	struct mISDNhead	*hh = mISDN_HEAD_P(skb);
skb               210 drivers/isdn/hardware/mISDN/hfcsusb.c 		ret = bchannel_senddata(bch, skb);
skb               236 drivers/isdn/hardware/mISDN/hfcsusb.c 		dev_kfree_skb(skb);
skb               273 drivers/isdn/hardware/mISDN/hfcsusb.c hfcusb_l2l1D(struct mISDNchannel *ch, struct sk_buff *skb)
skb               277 drivers/isdn/hardware/mISDN/hfcsusb.c 	struct mISDNhead	*hh = mISDN_HEAD_P(skb);
skb               289 drivers/isdn/hardware/mISDN/hfcsusb.c 		ret = dchannel_senddata(dch, skb);
skb               552 drivers/isdn/hardware/mISDN/mISDNipac.c isac_l1hw(struct mISDNchannel *ch, struct sk_buff *skb)
skb               558 drivers/isdn/hardware/mISDN/mISDNipac.c 	struct mISDNhead	*hh = mISDN_HEAD_P(skb);
skb               565 drivers/isdn/hardware/mISDN/mISDNipac.c 		ret = dchannel_senddata(dch, skb);
skb               585 drivers/isdn/hardware/mISDN/mISDNipac.c 		dev_kfree_skb(skb);
skb              1331 drivers/isdn/hardware/mISDN/mISDNipac.c hscx_l2l1(struct mISDNchannel *ch, struct sk_buff *skb)
skb              1336 drivers/isdn/hardware/mISDN/mISDNipac.c 	struct mISDNhead *hh = mISDN_HEAD_P(skb);
skb              1342 drivers/isdn/hardware/mISDN/mISDNipac.c 		ret = bchannel_senddata(bch, skb);
skb              1375 drivers/isdn/hardware/mISDN/mISDNipac.c 		dev_kfree_skb(skb);
skb              1467 drivers/isdn/hardware/mISDN/mISDNisar.c isar_l2l1(struct mISDNchannel *ch, struct sk_buff *skb)
skb              1472 drivers/isdn/hardware/mISDN/mISDNisar.c 	struct mISDNhead *hh = mISDN_HEAD_P(skb);
skb              1479 drivers/isdn/hardware/mISDN/mISDNisar.c 		ret = bchannel_senddata(bch, skb);
skb              1507 drivers/isdn/hardware/mISDN/mISDNisar.c 		val = (u32 *)skb->data;
skb              1559 drivers/isdn/hardware/mISDN/mISDNisar.c 		dev_kfree_skb(skb);
skb               737 drivers/isdn/hardware/mISDN/netjet.c nj_l2l1B(struct mISDNchannel *ch, struct sk_buff *skb)
skb               743 drivers/isdn/hardware/mISDN/netjet.c 	struct mISDNhead *hh = mISDN_HEAD_P(skb);
skb               749 drivers/isdn/hardware/mISDN/netjet.c 		ret = bchannel_senddata(bch, skb);
skb               778 drivers/isdn/hardware/mISDN/netjet.c 		dev_kfree_skb(skb);
skb               529 drivers/isdn/hardware/mISDN/w6692.c setvolume(struct w6692_ch *wch, int mic, struct sk_buff *skb)
skb               532 drivers/isdn/hardware/mISDN/w6692.c 	u16 *vol = (u16 *)skb->data;
skb               538 drivers/isdn/hardware/mISDN/w6692.c 	if (skb->len < 2)
skb               931 drivers/isdn/hardware/mISDN/w6692.c w6692_l2l1B(struct mISDNchannel *ch, struct sk_buff *skb)
skb               937 drivers/isdn/hardware/mISDN/w6692.c 	struct mISDNhead *hh = mISDN_HEAD_P(skb);
skb               943 drivers/isdn/hardware/mISDN/w6692.c 		ret = bchannel_senddata(bch, skb);
skb               976 drivers/isdn/hardware/mISDN/w6692.c 		dev_kfree_skb(skb);
skb              1057 drivers/isdn/hardware/mISDN/w6692.c w6692_l2l1D(struct mISDNchannel *ch, struct sk_buff *skb)
skb              1063 drivers/isdn/hardware/mISDN/w6692.c 	struct mISDNhead	*hh = mISDN_HEAD_P(skb);
skb              1070 drivers/isdn/hardware/mISDN/w6692.c 		ret = dchannel_senddata(dch, skb);
skb              1090 drivers/isdn/hardware/mISDN/w6692.c 		dev_kfree_skb(skb);
skb               241 drivers/isdn/mISDN/dsp.h extern void dsp_change_volume(struct sk_buff *skb, int volume);
skb               248 drivers/isdn/mISDN/dsp.h extern void dsp_cmx_receive(struct dsp *dsp, struct sk_buff *skb);
skb               249 drivers/isdn/mISDN/dsp.h extern void dsp_cmx_hdlc(struct dsp *dsp, struct sk_buff *skb);
skb               251 drivers/isdn/mISDN/dsp.h extern void dsp_cmx_transmit(struct dsp *dsp, struct sk_buff *skb);
skb               391 drivers/isdn/mISDN/dsp_audio.c dsp_change_volume(struct sk_buff *skb, int volume)
skb               413 drivers/isdn/mISDN/dsp_audio.c 	ii = skb->len;
skb               414 drivers/isdn/mISDN/dsp_audio.c 	p = skb->data;
skb              1182 drivers/isdn/mISDN/dsp_cmx.c dsp_cmx_receive(struct dsp *dsp, struct sk_buff *skb)
skb              1185 drivers/isdn/mISDN/dsp_cmx.c 	int len = skb->len;
skb              1186 drivers/isdn/mISDN/dsp_cmx.c 	struct mISDNhead *hh = mISDN_HEAD_P(skb);
skb              1287 drivers/isdn/mISDN/dsp_cmx.c 	p = skb->data;
skb              1860 drivers/isdn/mISDN/dsp_cmx.c dsp_cmx_transmit(struct dsp *dsp, struct sk_buff *skb)
skb              1873 drivers/isdn/mISDN/dsp_cmx.c 	d = skb->data;
skb              1876 drivers/isdn/mISDN/dsp_cmx.c 	if (space < skb->len) {
skb              1882 drivers/isdn/mISDN/dsp_cmx.c 			       skb->len, w, ww);
skb              1885 drivers/isdn/mISDN/dsp_cmx.c 		ww = (w + skb->len) & CMX_BUFF_MASK;
skb              1916 drivers/isdn/mISDN/dsp_cmx.c dsp_cmx_hdlc(struct dsp *dsp, struct sk_buff *skb)
skb              1927 drivers/isdn/mISDN/dsp_cmx.c 	if (skb->len < 1)
skb              1934 drivers/isdn/mISDN/dsp_cmx.c 			nskb = skb_clone(skb, GFP_ATOMIC);
skb              1950 drivers/isdn/mISDN/dsp_cmx.c 			nskb = skb_clone(skb, GFP_ATOMIC);
skb               281 drivers/isdn/mISDN/dsp_core.c dsp_control_req(struct dsp *dsp, struct mISDNhead *hh, struct sk_buff *skb)
skb               289 drivers/isdn/mISDN/dsp_core.c 	if (skb->len < sizeof(int)) {
skb               293 drivers/isdn/mISDN/dsp_core.c 	cont = *((int *)skb->data);
skb               294 drivers/isdn/mISDN/dsp_core.c 	len = skb->len - sizeof(int);
skb               295 drivers/isdn/mISDN/dsp_core.c 	data = skb->data + sizeof(int);
skb               363 drivers/isdn/mISDN/dsp_core.c 			       __func__, *((int *)skb->data));
skb               664 drivers/isdn/mISDN/dsp_core.c dsp_function(struct mISDNchannel *ch,  struct sk_buff *skb)
skb               672 drivers/isdn/mISDN/dsp_core.c 	hh = mISDN_HEAD_P(skb);
skb               687 drivers/isdn/mISDN/dsp_core.c 		if (skb->len < 1) {
skb               700 drivers/isdn/mISDN/dsp_core.c 			dsp_cmx_hdlc(dsp, skb);
skb               708 drivers/isdn/mISDN/dsp_core.c 				return dsp->up->send(dsp->up, skb);
skb               716 drivers/isdn/mISDN/dsp_core.c 			dsp_bf_decrypt(dsp, skb->data, skb->len);
skb               719 drivers/isdn/mISDN/dsp_core.c 			dsp_pipeline_process_rx(&dsp->pipeline, skb->data,
skb               720 drivers/isdn/mISDN/dsp_core.c 						skb->len, hh->id);
skb               723 drivers/isdn/mISDN/dsp_core.c 			dsp_change_volume(skb, dsp->rx_volume);
skb               726 drivers/isdn/mISDN/dsp_core.c 			digits = dsp_dtmf_goertzel_decode(dsp, skb->data,
skb               727 drivers/isdn/mISDN/dsp_core.c 							  skb->len, (dsp_options & DSP_OPT_ULAW) ? 1 : 0);
skb               732 drivers/isdn/mISDN/dsp_core.c 			dsp_cmx_receive(dsp, skb);
skb               767 drivers/isdn/mISDN/dsp_core.c 			return dsp->up->send(dsp->up, skb);
skb               773 drivers/isdn/mISDN/dsp_core.c 			       hh->id, skb->len, dsp->name);
skb               783 drivers/isdn/mISDN/dsp_core.c 			digits = dsp_dtmf_goertzel_decode(dsp, skb->data,
skb               784 drivers/isdn/mISDN/dsp_core.c 							  skb->len, 2);
skb               808 drivers/isdn/mISDN/dsp_core.c 			if (skb->len != sizeof(int)) {
skb               813 drivers/isdn/mISDN/dsp_core.c 			dsp->tx_volume = *((int *)skb->data);
skb               854 drivers/isdn/mISDN/dsp_core.c 			return dsp->up->send(dsp->up, skb);
skb               870 drivers/isdn/mISDN/dsp_core.c 			return dsp->up->send(dsp->up, skb);
skb               875 drivers/isdn/mISDN/dsp_core.c 		if (skb->len < 1) {
skb               887 drivers/isdn/mISDN/dsp_core.c 			skb_queue_tail(&dsp->sendq, skb);
skb               895 drivers/isdn/mISDN/dsp_core.c 			dsp_cmx_transmit(dsp, skb);
skb               901 drivers/isdn/mISDN/dsp_core.c 		ret = dsp_control_req(dsp, hh, skb);
skb               918 drivers/isdn/mISDN/dsp_core.c 			return ch->recv(ch->peer, skb);
skb               938 drivers/isdn/mISDN/dsp_core.c 			return ch->recv(ch->peer, skb);
skb               947 drivers/isdn/mISDN/dsp_core.c 		dev_kfree_skb(skb);
skb              1008 drivers/isdn/mISDN/dsp_core.c 	struct sk_buff *skb;
skb              1015 drivers/isdn/mISDN/dsp_core.c 	while ((skb = skb_dequeue(&dsp->sendq))) {
skb              1022 drivers/isdn/mISDN/dsp_core.c 			dev_kfree_skb(skb);
skb              1025 drivers/isdn/mISDN/dsp_core.c 		hh = mISDN_HEAD_P(skb);
skb              1029 drivers/isdn/mISDN/dsp_core.c 				if (dsp->up->send(dsp->up, skb))
skb              1030 drivers/isdn/mISDN/dsp_core.c 					dev_kfree_skb(skb);
skb              1032 drivers/isdn/mISDN/dsp_core.c 				dev_kfree_skb(skb);
skb              1037 drivers/isdn/mISDN/dsp_core.c 				if (dsp->ch.recv(dsp->ch.peer, skb)) {
skb              1038 drivers/isdn/mISDN/dsp_core.c 					dev_kfree_skb(skb);
skb              1042 drivers/isdn/mISDN/dsp_core.c 				dev_kfree_skb(skb);
skb                17 drivers/isdn/mISDN/hwchannel.c 	struct sk_buff	*skb;
skb                21 drivers/isdn/mISDN/hwchannel.c 		while ((skb = skb_dequeue(&dch->rqueue))) {
skb                23 drivers/isdn/mISDN/hwchannel.c 				err = dch->dev.D.recv(dch->dev.D.peer, skb);
skb                25 drivers/isdn/mISDN/hwchannel.c 					dev_kfree_skb(skb);
skb                27 drivers/isdn/mISDN/hwchannel.c 				dev_kfree_skb(skb);
skb                40 drivers/isdn/mISDN/hwchannel.c 	struct sk_buff	*skb;
skb                44 drivers/isdn/mISDN/hwchannel.c 		while ((skb = skb_dequeue(&bch->rqueue))) {
skb                47 drivers/isdn/mISDN/hwchannel.c 				err = bch->ch.recv(bch->ch.peer, skb);
skb                49 drivers/isdn/mISDN/hwchannel.c 					dev_kfree_skb(skb);
skb                51 drivers/isdn/mISDN/hwchannel.c 				dev_kfree_skb(skb);
skb               284 drivers/isdn/mISDN/hwchannel.c recv_Dchannel_skb(struct dchannel *dch, struct sk_buff *skb)
skb               286 drivers/isdn/mISDN/hwchannel.c 	skb_queue_tail(&dch->rqueue, skb);
skb               292 drivers/isdn/mISDN/hwchannel.c recv_Bchannel_skb(struct bchannel *bch, struct sk_buff *skb)
skb               301 drivers/isdn/mISDN/hwchannel.c 	skb_queue_tail(&bch->rqueue, skb);
skb               309 drivers/isdn/mISDN/hwchannel.c 	struct sk_buff	*skb;
skb               311 drivers/isdn/mISDN/hwchannel.c 	skb = _alloc_mISDN_skb(PH_DATA_CNF, mISDN_HEAD_ID(dch->tx_skb),
skb               313 drivers/isdn/mISDN/hwchannel.c 	if (!skb) {
skb               318 drivers/isdn/mISDN/hwchannel.c 	skb_queue_tail(&dch->rqueue, skb);
skb               340 drivers/isdn/mISDN/hwchannel.c 	struct sk_buff	*skb;
skb               348 drivers/isdn/mISDN/hwchannel.c 	skb = _alloc_mISDN_skb(PH_DATA_CNF, mISDN_HEAD_ID(bch->tx_skb),
skb               350 drivers/isdn/mISDN/hwchannel.c 	if (!skb) {
skb               356 drivers/isdn/mISDN/hwchannel.c 	skb_queue_tail(&bch->rqueue, skb);
skb               384 drivers/isdn/mISDN/hwchannel.c queue_ch_frame(struct mISDNchannel *ch, u_int pr, int id, struct sk_buff *skb)
skb               388 drivers/isdn/mISDN/hwchannel.c 	if (!skb) {
skb               392 drivers/isdn/mISDN/hwchannel.c 			hh = mISDN_HEAD_P(skb);
skb               395 drivers/isdn/mISDN/hwchannel.c 			if (!ch->recv(ch->peer, skb))
skb               398 drivers/isdn/mISDN/hwchannel.c 		dev_kfree_skb(skb);
skb               404 drivers/isdn/mISDN/hwchannel.c dchannel_senddata(struct dchannel *ch, struct sk_buff *skb)
skb               407 drivers/isdn/mISDN/hwchannel.c 	if (skb->len <= 0) {
skb               411 drivers/isdn/mISDN/hwchannel.c 	if (skb->len > ch->maxlen) {
skb               413 drivers/isdn/mISDN/hwchannel.c 		       __func__, skb->len, ch->maxlen);
skb               418 drivers/isdn/mISDN/hwchannel.c 		skb_queue_tail(&ch->squeue, skb);
skb               422 drivers/isdn/mISDN/hwchannel.c 		ch->tx_skb = skb;
skb               430 drivers/isdn/mISDN/hwchannel.c bchannel_senddata(struct bchannel *ch, struct sk_buff *skb)
skb               434 drivers/isdn/mISDN/hwchannel.c 	if (skb->len <= 0) {
skb               438 drivers/isdn/mISDN/hwchannel.c 	if (skb->len > ch->maxlen) {
skb               440 drivers/isdn/mISDN/hwchannel.c 		       __func__, skb->len, ch->maxlen);
skb               448 drivers/isdn/mISDN/hwchannel.c 		       __func__, skb->len, ch->next_skb->len);
skb               453 drivers/isdn/mISDN/hwchannel.c 		ch->next_skb = skb;
skb               457 drivers/isdn/mISDN/hwchannel.c 		ch->tx_skb = skb;
skb               864 drivers/isdn/mISDN/l1oip_core.c handle_dmsg(struct mISDNchannel *ch, struct sk_buff *skb)
skb               869 drivers/isdn/mISDN/l1oip_core.c 	struct mISDNhead	*hh = mISDN_HEAD_P(skb);
skb               876 drivers/isdn/mISDN/l1oip_core.c 		if (skb->len < 1) {
skb               881 drivers/isdn/mISDN/l1oip_core.c 		if (skb->len > MAX_DFRAME_LEN_L1 || skb->len > L1OIP_MAX_LEN) {
skb               887 drivers/isdn/mISDN/l1oip_core.c 		p = skb->data;
skb               888 drivers/isdn/mISDN/l1oip_core.c 		l = skb->len;
skb               900 drivers/isdn/mISDN/l1oip_core.c 		skb_trim(skb, 0);
skb               901 drivers/isdn/mISDN/l1oip_core.c 		queue_ch_frame(ch, PH_DATA_CNF, hh->id, skb);
skb               907 drivers/isdn/mISDN/l1oip_core.c 		skb_trim(skb, 0);
skb               909 drivers/isdn/mISDN/l1oip_core.c 			queue_ch_frame(ch, PH_ACTIVATE_IND, hh->id, skb);
skb               911 drivers/isdn/mISDN/l1oip_core.c 			queue_ch_frame(ch, PH_DEACTIVATE_IND, hh->id, skb);
skb               918 drivers/isdn/mISDN/l1oip_core.c 		skb_trim(skb, 0);
skb               920 drivers/isdn/mISDN/l1oip_core.c 			queue_ch_frame(ch, PH_ACTIVATE_IND, hh->id, skb);
skb               922 drivers/isdn/mISDN/l1oip_core.c 			queue_ch_frame(ch, PH_DEACTIVATE_IND, hh->id, skb);
skb               926 drivers/isdn/mISDN/l1oip_core.c 		dev_kfree_skb(skb);
skb              1084 drivers/isdn/mISDN/l1oip_core.c handle_bmsg(struct mISDNchannel *ch, struct sk_buff *skb)
skb              1089 drivers/isdn/mISDN/l1oip_core.c 	struct mISDNhead	*hh = mISDN_HEAD_P(skb);
skb              1095 drivers/isdn/mISDN/l1oip_core.c 		if (skb->len <= 0) {
skb              1100 drivers/isdn/mISDN/l1oip_core.c 		if (skb->len > MAX_DFRAME_LEN_L1 || skb->len > L1OIP_MAX_LEN) {
skb              1106 drivers/isdn/mISDN/l1oip_core.c 		l = skb->len;
skb              1107 drivers/isdn/mISDN/l1oip_core.c 		if (!memchr_inv(skb->data, 0xff, l)) {
skb              1112 drivers/isdn/mISDN/l1oip_core.c 			skb_trim(skb, 0);
skb              1113 drivers/isdn/mISDN/l1oip_core.c 			queue_ch_frame(ch, PH_DATA_CNF, hh->id, skb);
skb              1117 drivers/isdn/mISDN/l1oip_core.c 		l = skb->len;
skb              1118 drivers/isdn/mISDN/l1oip_core.c 		if (!memchr_inv(skb->data, 0x2a, l)) {
skb              1123 drivers/isdn/mISDN/l1oip_core.c 			skb_trim(skb, 0);
skb              1124 drivers/isdn/mISDN/l1oip_core.c 			queue_ch_frame(ch, PH_DATA_CNF, hh->id, skb);
skb              1129 drivers/isdn/mISDN/l1oip_core.c 		p = skb->data;
skb              1130 drivers/isdn/mISDN/l1oip_core.c 		l = skb->len;
skb              1143 drivers/isdn/mISDN/l1oip_core.c 		skb_trim(skb, 0);
skb              1144 drivers/isdn/mISDN/l1oip_core.c 		queue_ch_frame(ch, PH_DATA_CNF, hh->id, skb);
skb              1152 drivers/isdn/mISDN/l1oip_core.c 		skb_trim(skb, 0);
skb              1153 drivers/isdn/mISDN/l1oip_core.c 		queue_ch_frame(ch, PH_ACTIVATE_IND, hh->id, skb);
skb              1161 drivers/isdn/mISDN/l1oip_core.c 		skb_trim(skb, 0);
skb              1162 drivers/isdn/mISDN/l1oip_core.c 		queue_ch_frame(ch, PH_DEACTIVATE_IND, hh->id, skb);
skb              1166 drivers/isdn/mISDN/l1oip_core.c 		dev_kfree_skb(skb);
skb               138 drivers/isdn/mISDN/layer2.c l2up(struct layer2 *l2, u_int prim, struct sk_buff *skb)
skb               144 drivers/isdn/mISDN/layer2.c 	mISDN_HEAD_PRIM(skb) = prim;
skb               145 drivers/isdn/mISDN/layer2.c 	mISDN_HEAD_ID(skb) = (l2->ch.nr << 16) | l2->ch.addr;
skb               146 drivers/isdn/mISDN/layer2.c 	err = l2->up->send(l2->up, skb);
skb               150 drivers/isdn/mISDN/layer2.c 		dev_kfree_skb(skb);
skb               157 drivers/isdn/mISDN/layer2.c 	struct sk_buff	*skb;
skb               163 drivers/isdn/mISDN/layer2.c 	skb = mI_alloc_skb(len, GFP_ATOMIC);
skb               164 drivers/isdn/mISDN/layer2.c 	if (!skb)
skb               166 drivers/isdn/mISDN/layer2.c 	hh = mISDN_HEAD_P(skb);
skb               170 drivers/isdn/mISDN/layer2.c 		skb_put_data(skb, arg, len);
skb               171 drivers/isdn/mISDN/layer2.c 	err = l2->up->send(l2->up, skb);
skb               175 drivers/isdn/mISDN/layer2.c 		dev_kfree_skb(skb);
skb               180 drivers/isdn/mISDN/layer2.c l2down_skb(struct layer2 *l2, struct sk_buff *skb) {
skb               183 drivers/isdn/mISDN/layer2.c 	ret = l2->ch.recv(l2->ch.peer, skb);
skb               191 drivers/isdn/mISDN/layer2.c l2down_raw(struct layer2 *l2, struct sk_buff *skb)
skb               193 drivers/isdn/mISDN/layer2.c 	struct mISDNhead *hh = mISDN_HEAD_P(skb);
skb               197 drivers/isdn/mISDN/layer2.c 			skb_queue_tail(&l2->down_queue, skb);
skb               200 drivers/isdn/mISDN/layer2.c 		l2->down_id = mISDN_HEAD_ID(skb);
skb               202 drivers/isdn/mISDN/layer2.c 	return l2down_skb(l2, skb);
skb               206 drivers/isdn/mISDN/layer2.c l2down(struct layer2 *l2, u_int prim, u_int id, struct sk_buff *skb)
skb               208 drivers/isdn/mISDN/layer2.c 	struct mISDNhead *hh = mISDN_HEAD_P(skb);
skb               212 drivers/isdn/mISDN/layer2.c 	return l2down_raw(l2, skb);
skb               218 drivers/isdn/mISDN/layer2.c 	struct sk_buff	*skb;
skb               222 drivers/isdn/mISDN/layer2.c 	skb = mI_alloc_skb(len, GFP_ATOMIC);
skb               223 drivers/isdn/mISDN/layer2.c 	if (!skb)
skb               225 drivers/isdn/mISDN/layer2.c 	hh = mISDN_HEAD_P(skb);
skb               229 drivers/isdn/mISDN/layer2.c 		skb_put_data(skb, arg, len);
skb               230 drivers/isdn/mISDN/layer2.c 	err = l2down_raw(l2, skb);
skb               232 drivers/isdn/mISDN/layer2.c 		dev_kfree_skb(skb);
skb               237 drivers/isdn/mISDN/layer2.c ph_data_confirm(struct layer2 *l2, struct mISDNhead *hh, struct sk_buff *skb) {
skb               238 drivers/isdn/mISDN/layer2.c 	struct sk_buff *nskb = skb;
skb               253 drivers/isdn/mISDN/layer2.c 				dev_kfree_skb(skb);
skb               281 drivers/isdn/mISDN/layer2.c 	struct sk_buff *skb;
skb               284 drivers/isdn/mISDN/layer2.c 	skb = mI_alloc_skb(0, GFP_ATOMIC);
skb               285 drivers/isdn/mISDN/layer2.c 	if (!skb) {
skb               291 drivers/isdn/mISDN/layer2.c 	hh = mISDN_HEAD_P(skb);
skb               299 drivers/isdn/mISDN/layer2.c 		l2->ch.st->own.recv(&l2->ch.st->own, skb);
skb               414 drivers/isdn/mISDN/layer2.c enqueue_super(struct layer2 *l2, struct sk_buff *skb)
skb               416 drivers/isdn/mISDN/layer2.c 	if (l2down(l2, PH_DATA_REQ, l2_newid(l2), skb))
skb               417 drivers/isdn/mISDN/layer2.c 		dev_kfree_skb(skb);
skb               421 drivers/isdn/mISDN/layer2.c enqueue_ui(struct layer2 *l2, struct sk_buff *skb)
skb               425 drivers/isdn/mISDN/layer2.c 	if (l2down(l2, PH_DATA_REQ, l2_newid(l2), skb))
skb               426 drivers/isdn/mISDN/layer2.c 		dev_kfree_skb(skb);
skb               501 drivers/isdn/mISDN/layer2.c iframe_error(struct layer2 *l2, struct sk_buff *skb)
skb               504 drivers/isdn/mISDN/layer2.c 	int	rsp = *skb->data & 0x2;
skb               511 drivers/isdn/mISDN/layer2.c 	if (skb->len < i)
skb               513 drivers/isdn/mISDN/layer2.c 	if ((skb->len - i) > l2->maxlen)
skb               519 drivers/isdn/mISDN/layer2.c super_error(struct layer2 *l2, struct sk_buff *skb)
skb               521 drivers/isdn/mISDN/layer2.c 	if (skb->len != l2addrsize(l2) +
skb               528 drivers/isdn/mISDN/layer2.c unnum_error(struct layer2 *l2, struct sk_buff *skb, int wantrsp)
skb               530 drivers/isdn/mISDN/layer2.c 	int rsp = (*skb->data & 0x2) >> 1;
skb               535 drivers/isdn/mISDN/layer2.c 	if (skb->len != l2addrsize(l2) + 1)
skb               541 drivers/isdn/mISDN/layer2.c UI_error(struct layer2 *l2, struct sk_buff *skb)
skb               543 drivers/isdn/mISDN/layer2.c 	int rsp = *skb->data & 0x2;
skb               548 drivers/isdn/mISDN/layer2.c 	if (skb->len > l2->maxlen + l2addrsize(l2) + 1)
skb               554 drivers/isdn/mISDN/layer2.c FRMR_error(struct layer2 *l2, struct sk_buff *skb)
skb               557 drivers/isdn/mISDN/layer2.c 	u_char	*datap = skb->data + headers;
skb               558 drivers/isdn/mISDN/layer2.c 	int	rsp = *skb->data & 0x2;
skb               565 drivers/isdn/mISDN/layer2.c 		if (skb->len < headers + 5)
skb               572 drivers/isdn/mISDN/layer2.c 		if (skb->len < headers + 3)
skb               594 drivers/isdn/mISDN/layer2.c 	struct sk_buff	*skb;
skb               609 drivers/isdn/mISDN/layer2.c 	skb = skb_dequeue(&l2->tmp_queue);
skb               610 drivers/isdn/mISDN/layer2.c 	while (skb) {
skb               611 drivers/isdn/mISDN/layer2.c 		dev_kfree_skb(skb);
skb               612 drivers/isdn/mISDN/layer2.c 		skb = skb_dequeue(&l2->tmp_queue);
skb               617 drivers/isdn/mISDN/layer2.c send_uframe(struct layer2 *l2, struct sk_buff *skb, u_char cmd, u_char cr)
skb               624 drivers/isdn/mISDN/layer2.c 	if (skb)
skb               625 drivers/isdn/mISDN/layer2.c 		skb_trim(skb, 0);
skb               627 drivers/isdn/mISDN/layer2.c 		skb = mI_alloc_skb(i, GFP_ATOMIC);
skb               628 drivers/isdn/mISDN/layer2.c 		if (!skb) {
skb               634 drivers/isdn/mISDN/layer2.c 	skb_put_data(skb, tmp, i);
skb               635 drivers/isdn/mISDN/layer2.c 	enqueue_super(l2, skb);
skb               640 drivers/isdn/mISDN/layer2.c get_PollFlag(struct layer2 *l2, struct sk_buff *skb)
skb               642 drivers/isdn/mISDN/layer2.c 	return skb->data[l2addrsize(l2)] & 0x10;
skb               646 drivers/isdn/mISDN/layer2.c get_PollFlagFree(struct layer2 *l2, struct sk_buff *skb)
skb               650 drivers/isdn/mISDN/layer2.c 	PF = get_PollFlag(l2, skb);
skb               651 drivers/isdn/mISDN/layer2.c 	dev_kfree_skb(skb);
skb               716 drivers/isdn/mISDN/layer2.c 	struct sk_buff *skb = arg;
skb               719 drivers/isdn/mISDN/layer2.c 	if (get_PollFlagFree(l2, skb))
skb               729 drivers/isdn/mISDN/layer2.c 	struct sk_buff *skb = arg;
skb               732 drivers/isdn/mISDN/layer2.c 	if (get_PollFlagFree(l2, skb))
skb               744 drivers/isdn/mISDN/layer2.c 	struct sk_buff *skb = arg;
skb               747 drivers/isdn/mISDN/layer2.c 	if (get_PollFlagFree(l2, skb))
skb               776 drivers/isdn/mISDN/layer2.c 	struct sk_buff *skb = arg;
skb               778 drivers/isdn/mISDN/layer2.c 	skb_queue_tail(&l2->ui_queue, skb);
skb               787 drivers/isdn/mISDN/layer2.c 	struct sk_buff *skb = arg;
skb               789 drivers/isdn/mISDN/layer2.c 	skb_queue_tail(&l2->ui_queue, skb);
skb               795 drivers/isdn/mISDN/layer2.c 	struct sk_buff *skb;
skb               803 drivers/isdn/mISDN/layer2.c 	while ((skb = skb_dequeue(&l2->ui_queue))) {
skb               804 drivers/isdn/mISDN/layer2.c 		memcpy(skb_push(skb, i), header, i);
skb               805 drivers/isdn/mISDN/layer2.c 		enqueue_ui(l2, skb);
skb               813 drivers/isdn/mISDN/layer2.c 	struct sk_buff *skb = arg;
skb               815 drivers/isdn/mISDN/layer2.c 	skb_queue_tail(&l2->ui_queue, skb);
skb               823 drivers/isdn/mISDN/layer2.c 	struct sk_buff *skb = arg;
skb               825 drivers/isdn/mISDN/layer2.c 	skb_pull(skb, l2headersize(l2, 1));
skb               832 drivers/isdn/mISDN/layer2.c 	l2up(l2, DL_UNITDATA_IND, skb);
skb               838 drivers/isdn/mISDN/layer2.c 	struct sk_buff *skb = arg;
skb               843 drivers/isdn/mISDN/layer2.c 	dev_kfree_skb(skb);
skb               849 drivers/isdn/mISDN/layer2.c 	struct sk_buff *skb = arg;
skb               855 drivers/isdn/mISDN/layer2.c 	dev_kfree_skb(skb);
skb               861 drivers/isdn/mISDN/layer2.c 	struct sk_buff *skb = arg;
skb               867 drivers/isdn/mISDN/layer2.c 	dev_kfree_skb(skb);
skb               874 drivers/isdn/mISDN/layer2.c 	struct sk_buff *skb = arg;
skb               876 drivers/isdn/mISDN/layer2.c 	skb_trim(skb, 0);
skb               877 drivers/isdn/mISDN/layer2.c 	l2up(l2, DL_RELEASE_CNF, skb);
skb               883 drivers/isdn/mISDN/layer2.c 	struct sk_buff *skb = arg;
skb               887 drivers/isdn/mISDN/layer2.c 	dev_kfree_skb(skb);
skb               894 drivers/isdn/mISDN/layer2.c 	struct sk_buff *skb = arg;
skb               903 drivers/isdn/mISDN/layer2.c 	dev_kfree_skb(skb);
skb               910 drivers/isdn/mISDN/layer2.c 	struct sk_buff	*skb = arg;
skb               917 drivers/isdn/mISDN/layer2.c 	send_uframe(l2, NULL, UA | get_PollFlag(l2, skb), RSP);
skb               920 drivers/isdn/mISDN/layer2.c 	skb_trim(skb, 0);
skb               921 drivers/isdn/mISDN/layer2.c 	l2up(l2, DL_ESTABLISH_IND, skb);
skb               930 drivers/isdn/mISDN/layer2.c 	struct sk_buff *skb = arg;
skb               932 drivers/isdn/mISDN/layer2.c 	send_uframe(l2, skb, UA | get_PollFlag(l2, skb), RSP);
skb               939 drivers/isdn/mISDN/layer2.c 	struct sk_buff *skb = arg;
skb               941 drivers/isdn/mISDN/layer2.c 	send_uframe(l2, skb, DM | get_PollFlag(l2, skb), RSP);
skb               948 drivers/isdn/mISDN/layer2.c 	struct sk_buff	*skb = arg;
skb               951 drivers/isdn/mISDN/layer2.c 	send_uframe(l2, skb, UA | get_PollFlag(l2, skb), RSP);
skb               983 drivers/isdn/mISDN/layer2.c 	struct sk_buff	*skb = arg;
skb               989 drivers/isdn/mISDN/layer2.c 	send_uframe(l2, skb, UA | get_PollFlag(l2, skb), RSP);
skb              1001 drivers/isdn/mISDN/layer2.c 	struct sk_buff	*skb = arg;
skb              1004 drivers/isdn/mISDN/layer2.c 	if (!get_PollFlag(l2, skb)) {
skb              1008 drivers/isdn/mISDN/layer2.c 	dev_kfree_skb(skb);
skb              1038 drivers/isdn/mISDN/layer2.c 	struct sk_buff *skb = arg;
skb              1040 drivers/isdn/mISDN/layer2.c 	if (!get_PollFlag(l2, skb)) {
skb              1044 drivers/isdn/mISDN/layer2.c 	dev_kfree_skb(skb);
skb              1056 drivers/isdn/mISDN/layer2.c 	struct sk_buff *skb = arg;
skb              1058 drivers/isdn/mISDN/layer2.c 	if (!get_PollFlagFree(l2, skb)) {
skb              1068 drivers/isdn/mISDN/layer2.c 	struct sk_buff *skb = arg;
skb              1070 drivers/isdn/mISDN/layer2.c 	if (get_PollFlagFree(l2, skb)) {
skb              1088 drivers/isdn/mISDN/layer2.c 	struct sk_buff *skb = arg;
skb              1090 drivers/isdn/mISDN/layer2.c 	if (get_PollFlagFree(l2, skb)) {
skb              1102 drivers/isdn/mISDN/layer2.c 	struct sk_buff *skb;
skb              1112 drivers/isdn/mISDN/layer2.c 	skb = mI_alloc_skb(i, GFP_ATOMIC);
skb              1113 drivers/isdn/mISDN/layer2.c 	if (!skb) {
skb              1118 drivers/isdn/mISDN/layer2.c 	skb_put_data(skb, tmp, i);
skb              1119 drivers/isdn/mISDN/layer2.c 	enqueue_super(l2, skb);
skb              1186 drivers/isdn/mISDN/layer2.c 	struct sk_buff *skb = arg;
skb              1190 drivers/isdn/mISDN/layer2.c 	rsp = *skb->data & 0x2;
skb              1194 drivers/isdn/mISDN/layer2.c 	skb_pull(skb, l2addrsize(l2));
skb              1195 drivers/isdn/mISDN/layer2.c 	if (IsRNR(skb->data, l2)) {
skb              1200 drivers/isdn/mISDN/layer2.c 	if (IsREJ(skb->data, l2))
skb              1204 drivers/isdn/mISDN/layer2.c 		PollFlag = (skb->data[1] & 0x1) == 0x1;
skb              1205 drivers/isdn/mISDN/layer2.c 		nr = skb->data[1] >> 1;
skb              1207 drivers/isdn/mISDN/layer2.c 		PollFlag = (skb->data[0] & 0x10);
skb              1208 drivers/isdn/mISDN/layer2.c 		nr = (skb->data[0] >> 5) & 0x7;
skb              1210 drivers/isdn/mISDN/layer2.c 	dev_kfree_skb(skb);
skb              1247 drivers/isdn/mISDN/layer2.c 	struct sk_buff *skb = arg;
skb              1250 drivers/isdn/mISDN/layer2.c 		skb_queue_tail(&l2->i_queue, skb);
skb              1252 drivers/isdn/mISDN/layer2.c 		dev_kfree_skb(skb);
skb              1259 drivers/isdn/mISDN/layer2.c 	struct sk_buff *skb = arg;
skb              1261 drivers/isdn/mISDN/layer2.c 	skb_queue_tail(&l2->i_queue, skb);
skb              1269 drivers/isdn/mISDN/layer2.c 	struct sk_buff *skb = arg;
skb              1271 drivers/isdn/mISDN/layer2.c 	skb_queue_tail(&l2->i_queue, skb);
skb              1278 drivers/isdn/mISDN/layer2.c 	struct sk_buff	*skb = arg;
skb              1284 drivers/isdn/mISDN/layer2.c 		PollFlag = ((skb->data[i + 1] & 0x1) == 0x1);
skb              1285 drivers/isdn/mISDN/layer2.c 		ns = skb->data[i] >> 1;
skb              1286 drivers/isdn/mISDN/layer2.c 		nr = (skb->data[i + 1] >> 1) & 0x7f;
skb              1288 drivers/isdn/mISDN/layer2.c 		PollFlag = (skb->data[i] & 0x10);
skb              1289 drivers/isdn/mISDN/layer2.c 		ns = (skb->data[i] >> 1) & 0x7;
skb              1290 drivers/isdn/mISDN/layer2.c 		nr = (skb->data[i] >> 5) & 0x7;
skb              1293 drivers/isdn/mISDN/layer2.c 		dev_kfree_skb(skb);
skb              1308 drivers/isdn/mISDN/layer2.c 			skb_pull(skb, l2headersize(l2, 0));
skb              1309 drivers/isdn/mISDN/layer2.c 			l2up(l2, DL_DATA_IND, skb);
skb              1312 drivers/isdn/mISDN/layer2.c 			dev_kfree_skb(skb);
skb              1469 drivers/isdn/mISDN/layer2.c 	struct sk_buff	*skb, *nskb;
skb              1476 drivers/isdn/mISDN/layer2.c 	skb = skb_dequeue(&l2->i_queue);
skb              1477 drivers/isdn/mISDN/layer2.c 	if (!skb)
skb              1485 drivers/isdn/mISDN/layer2.c 	nskb = skb_realloc_headroom(skb, i);
skb              1489 drivers/isdn/mISDN/layer2.c 		skb_queue_head(&l2->i_queue, skb);
skb              1505 drivers/isdn/mISDN/layer2.c 	l2->windowar[p1] = skb;
skb              1519 drivers/isdn/mISDN/layer2.c 	struct sk_buff *skb = arg;
skb              1523 drivers/isdn/mISDN/layer2.c 	rsp = *skb->data & 0x2;
skb              1527 drivers/isdn/mISDN/layer2.c 	skb_pull(skb, l2addrsize(l2));
skb              1529 drivers/isdn/mISDN/layer2.c 	if (IsRNR(skb->data, l2)) {
skb              1536 drivers/isdn/mISDN/layer2.c 		PollFlag = (skb->data[1] & 0x1) == 0x1;
skb              1537 drivers/isdn/mISDN/layer2.c 		nr = skb->data[1] >> 1;
skb              1539 drivers/isdn/mISDN/layer2.c 		PollFlag = (skb->data[0] & 0x10);
skb              1540 drivers/isdn/mISDN/layer2.c 		nr = (skb->data[0] >> 5) & 0x7;
skb              1542 drivers/isdn/mISDN/layer2.c 	dev_kfree_skb(skb);
skb              1573 drivers/isdn/mISDN/layer2.c 	struct sk_buff *skb = arg;
skb              1575 drivers/isdn/mISDN/layer2.c 	skb_pull(skb, l2addrsize(l2) + 1);
skb              1577 drivers/isdn/mISDN/layer2.c 	if (!(skb->data[0] & 1) || ((skb->data[0] & 3) == 1) || /* I or S */
skb              1578 drivers/isdn/mISDN/layer2.c 	    (IsUA(skb->data) && (fi->state == ST_L2_7))) {
skb              1583 drivers/isdn/mISDN/layer2.c 	dev_kfree_skb(skb);
skb              1656 drivers/isdn/mISDN/layer2.c 	struct sk_buff *skb = arg;
skb              1661 drivers/isdn/mISDN/layer2.c 		l2up(l2, DL_RELEASE_IND, skb);
skb              1663 drivers/isdn/mISDN/layer2.c 		dev_kfree_skb(skb);
skb              1670 drivers/isdn/mISDN/layer2.c 	struct sk_buff *skb = arg;
skb              1680 drivers/isdn/mISDN/layer2.c 	dev_kfree_skb(skb);
skb              1687 drivers/isdn/mISDN/layer2.c 	struct sk_buff *skb = arg;
skb              1691 drivers/isdn/mISDN/layer2.c 	l2up(l2, DL_RELEASE_CNF, skb);
skb              1701 drivers/isdn/mISDN/layer2.c 	struct sk_buff *skb = arg;
skb              1708 drivers/isdn/mISDN/layer2.c 	l2up(l2, DL_RELEASE_IND, skb);
skb              1718 drivers/isdn/mISDN/layer2.c 	struct sk_buff *skb = arg;
skb              1724 drivers/isdn/mISDN/layer2.c 	dev_kfree_skb(skb);
skb              1731 drivers/isdn/mISDN/layer2.c 	struct sk_buff *skb = arg;
skb              1737 drivers/isdn/mISDN/layer2.c 	dev_kfree_skb(skb);
skb              1856 drivers/isdn/mISDN/layer2.c ph_data_indication(struct layer2 *l2, struct mISDNhead *hh, struct sk_buff *skb)
skb              1858 drivers/isdn/mISDN/layer2.c 	u_char	*datap = skb->data;
skb              1865 drivers/isdn/mISDN/layer2.c 	if (skb->len <= l) {
skb              1886 drivers/isdn/mISDN/layer2.c 			dev_kfree_skb(skb);
skb              1894 drivers/isdn/mISDN/layer2.c 			dev_kfree_skb(skb);
skb              1900 drivers/isdn/mISDN/layer2.c 		c = iframe_error(l2, skb);
skb              1902 drivers/isdn/mISDN/layer2.c 			ret = mISDN_FsmEvent(&l2->l2m, EV_L2_I, skb);
skb              1904 drivers/isdn/mISDN/layer2.c 		c = super_error(l2, skb);
skb              1906 drivers/isdn/mISDN/layer2.c 			ret = mISDN_FsmEvent(&l2->l2m, EV_L2_SUPER, skb);
skb              1908 drivers/isdn/mISDN/layer2.c 		c = UI_error(l2, skb);
skb              1910 drivers/isdn/mISDN/layer2.c 			ret = mISDN_FsmEvent(&l2->l2m, EV_L2_UI, skb);
skb              1912 drivers/isdn/mISDN/layer2.c 		c = unnum_error(l2, skb, CMD);
skb              1914 drivers/isdn/mISDN/layer2.c 			ret = mISDN_FsmEvent(&l2->l2m, EV_L2_SABME, skb);
skb              1916 drivers/isdn/mISDN/layer2.c 		c = unnum_error(l2, skb, RSP);
skb              1918 drivers/isdn/mISDN/layer2.c 			ret = mISDN_FsmEvent(&l2->l2m, EV_L2_UA, skb);
skb              1920 drivers/isdn/mISDN/layer2.c 		c = unnum_error(l2, skb, CMD);
skb              1922 drivers/isdn/mISDN/layer2.c 			ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DISC, skb);
skb              1924 drivers/isdn/mISDN/layer2.c 		c = unnum_error(l2, skb, RSP);
skb              1926 drivers/isdn/mISDN/layer2.c 			ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DM, skb);
skb              1928 drivers/isdn/mISDN/layer2.c 		c = FRMR_error(l2, skb);
skb              1930 drivers/isdn/mISDN/layer2.c 			ret = mISDN_FsmEvent(&l2->l2m, EV_L2_FRMR, skb);
skb              1942 drivers/isdn/mISDN/layer2.c l2_send(struct mISDNchannel *ch, struct sk_buff *skb)
skb              1945 drivers/isdn/mISDN/layer2.c 	struct mISDNhead	*hh =  mISDN_HEAD_P(skb);
skb              1962 drivers/isdn/mISDN/layer2.c 		ret = ph_data_indication(l2, hh, skb);
skb              1965 drivers/isdn/mISDN/layer2.c 		ret = ph_data_confirm(l2, hh, skb);
skb              1972 drivers/isdn/mISDN/layer2.c 					     EV_L2_DL_ESTABLISH_REQ, skb);
skb              1977 drivers/isdn/mISDN/layer2.c 		ret = mISDN_FsmEvent(&l2->l2m, EV_L1_DEACTIVATE, skb);
skb              1982 drivers/isdn/mISDN/layer2.c 		ret = l2->up->send(l2->up, skb);
skb              1985 drivers/isdn/mISDN/layer2.c 		ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DL_DATA, skb);
skb              1988 drivers/isdn/mISDN/layer2.c 		ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DL_UNITDATA, skb);
skb              1997 drivers/isdn/mISDN/layer2.c 						     EV_L2_DL_ESTABLISH_REQ, skb);
skb              2005 drivers/isdn/mISDN/layer2.c 				     skb);
skb              2013 drivers/isdn/mISDN/layer2.c 				     skb);
skb              2027 drivers/isdn/mISDN/layer2.c 		dev_kfree_skb(skb);
skb                37 drivers/isdn/mISDN/socket.c 	struct sk_buff  *skb;
skb                39 drivers/isdn/mISDN/socket.c 	skb = alloc_skb(len + L2_HEADER_LEN, gfp_mask);
skb                40 drivers/isdn/mISDN/socket.c 	if (likely(skb))
skb                41 drivers/isdn/mISDN/socket.c 		skb_reserve(skb, L2_HEADER_LEN);
skb                42 drivers/isdn/mISDN/socket.c 	return skb;
skb                61 drivers/isdn/mISDN/socket.c mISDN_send(struct mISDNchannel *ch, struct sk_buff *skb)
skb                68 drivers/isdn/mISDN/socket.c 		printk(KERN_DEBUG "%s len %d %p\n", __func__, skb->len, skb);
skb                71 drivers/isdn/mISDN/socket.c 	__net_timestamp(skb);
skb                72 drivers/isdn/mISDN/socket.c 	err = sock_queue_rcv_skb(&msk->sk, skb);
skb                95 drivers/isdn/mISDN/socket.c mISDN_sock_cmsg(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
skb               100 drivers/isdn/mISDN/socket.c 		skb_get_timestamp(skb, &tv);
skb               109 drivers/isdn/mISDN/socket.c 	struct sk_buff		*skb;
skb               124 drivers/isdn/mISDN/socket.c 	skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
skb               125 drivers/isdn/mISDN/socket.c 	if (!skb)
skb               135 drivers/isdn/mISDN/socket.c 			maddr->channel = (mISDN_HEAD_ID(skb) >> 16) & 0xff;
skb               136 drivers/isdn/mISDN/socket.c 			maddr->tei =  (mISDN_HEAD_ID(skb) >> 8) & 0xff;
skb               137 drivers/isdn/mISDN/socket.c 			maddr->sapi = mISDN_HEAD_ID(skb) & 0xff;
skb               146 drivers/isdn/mISDN/socket.c 	copied = skb->len + MISDN_HEADER_LEN;
skb               149 drivers/isdn/mISDN/socket.c 			refcount_dec(&skb->users);
skb               151 drivers/isdn/mISDN/socket.c 			skb_queue_head(&sk->sk_receive_queue, skb);
skb               154 drivers/isdn/mISDN/socket.c 	memcpy(skb_push(skb, MISDN_HEADER_LEN), mISDN_HEAD_P(skb),
skb               157 drivers/isdn/mISDN/socket.c 	err = skb_copy_datagram_msg(skb, 0, msg, copied);
skb               159 drivers/isdn/mISDN/socket.c 	mISDN_sock_cmsg(sk, msg, skb);
skb               161 drivers/isdn/mISDN/socket.c 	skb_free_datagram(sk, skb);
skb               170 drivers/isdn/mISDN/socket.c 	struct sk_buff		*skb;
skb               192 drivers/isdn/mISDN/socket.c 	skb = _l2_alloc_skb(len, GFP_KERNEL);
skb               193 drivers/isdn/mISDN/socket.c 	if (!skb)
skb               196 drivers/isdn/mISDN/socket.c 	if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
skb               201 drivers/isdn/mISDN/socket.c 	memcpy(mISDN_HEAD_P(skb), skb->data, MISDN_HEADER_LEN);
skb               202 drivers/isdn/mISDN/socket.c 	skb_pull(skb, MISDN_HEADER_LEN);
skb               207 drivers/isdn/mISDN/socket.c 		mISDN_HEAD_ID(skb) = maddr->channel;
skb               211 drivers/isdn/mISDN/socket.c 			mISDN_HEAD_ID(skb) = _pms(sk)->ch.nr;
skb               216 drivers/isdn/mISDN/socket.c 		       __func__, mISDN_HEAD_ID(skb));
skb               221 drivers/isdn/mISDN/socket.c 	err = _pms(sk)->ch.recv(_pms(sk)->ch.peer, skb);
skb               225 drivers/isdn/mISDN/socket.c 		skb = NULL;
skb               230 drivers/isdn/mISDN/socket.c 	kfree_skb(skb);
skb                21 drivers/isdn/mISDN/stack.c _queue_message(struct mISDNstack *st, struct sk_buff *skb)
skb                23 drivers/isdn/mISDN/stack.c 	struct mISDNhead	*hh = mISDN_HEAD_P(skb);
skb                27 drivers/isdn/mISDN/stack.c 		       __func__, hh->prim, hh->id, skb);
skb                28 drivers/isdn/mISDN/stack.c 	skb_queue_tail(&st->msgq, skb);
skb                36 drivers/isdn/mISDN/stack.c mISDN_queue_message(struct mISDNchannel *ch, struct sk_buff *skb)
skb                38 drivers/isdn/mISDN/stack.c 	_queue_message(ch->st, skb);
skb                59 drivers/isdn/mISDN/stack.c send_socklist(struct mISDN_sock_list *sl, struct sk_buff *skb)
skb                69 drivers/isdn/mISDN/stack.c 			cskb = skb_copy(skb, GFP_ATOMIC);
skb                82 drivers/isdn/mISDN/stack.c send_layer2(struct mISDNstack *st, struct sk_buff *skb)
skb                85 drivers/isdn/mISDN/stack.c 	struct mISDNhead	*hh = mISDN_HEAD_P(skb);
skb                95 drivers/isdn/mISDN/stack.c 				cskb = skb;
skb                96 drivers/isdn/mISDN/stack.c 				skb = NULL;
skb                98 drivers/isdn/mISDN/stack.c 				cskb = skb_copy(skb, GFP_KERNEL);
skb               120 drivers/isdn/mISDN/stack.c 				ret = ch->send(ch, skb);
skb               122 drivers/isdn/mISDN/stack.c 					skb = NULL;
skb               126 drivers/isdn/mISDN/stack.c 		ret = st->dev->teimgr->ctrl(st->dev->teimgr, CHECK_DATA, skb);
skb               128 drivers/isdn/mISDN/stack.c 			skb = NULL;
skb               136 drivers/isdn/mISDN/stack.c 	dev_kfree_skb(skb);
skb               140 drivers/isdn/mISDN/stack.c send_msg_to_layer(struct mISDNstack *st, struct sk_buff *skb)
skb               142 drivers/isdn/mISDN/stack.c 	struct mISDNhead	*hh = mISDN_HEAD_P(skb);
skb               149 drivers/isdn/mISDN/stack.c 		       __func__, hh->prim, hh->id, skb);
skb               152 drivers/isdn/mISDN/stack.c 			__net_timestamp(skb);
skb               153 drivers/isdn/mISDN/stack.c 			send_socklist(&st->l1sock, skb);
skb               155 drivers/isdn/mISDN/stack.c 		return st->layer1->send(st->layer1, skb);
skb               158 drivers/isdn/mISDN/stack.c 			send_socklist(&st->l1sock, skb);
skb               159 drivers/isdn/mISDN/stack.c 		send_layer2(st, skb);
skb               164 drivers/isdn/mISDN/stack.c 			return ch->send(ch, skb);
skb               174 drivers/isdn/mISDN/stack.c 			return ch->send(ch, skb);
skb               213 drivers/isdn/mISDN/stack.c 		struct sk_buff	*skb;
skb               221 drivers/isdn/mISDN/stack.c 			skb = skb_dequeue(&st->msgq);
skb               222 drivers/isdn/mISDN/stack.c 			if (!skb) {
skb               226 drivers/isdn/mISDN/stack.c 				skb = skb_dequeue(&st->msgq);
skb               227 drivers/isdn/mISDN/stack.c 				if (!skb)
skb               235 drivers/isdn/mISDN/stack.c 			err = send_msg_to_layer(st, skb);
skb               242 drivers/isdn/mISDN/stack.c 					       mISDN_HEAD_PRIM(skb),
skb               243 drivers/isdn/mISDN/stack.c 					       mISDN_HEAD_ID(skb), err);
skb               244 drivers/isdn/mISDN/stack.c 				dev_kfree_skb(skb);
skb               325 drivers/isdn/mISDN/stack.c l1_receive(struct mISDNchannel *ch, struct sk_buff *skb)
skb               329 drivers/isdn/mISDN/stack.c 	__net_timestamp(skb);
skb               330 drivers/isdn/mISDN/stack.c 	_queue_message(ch->st, skb);
skb               295 drivers/isdn/mISDN/tei.c 	struct sk_buff	*skb;
skb               299 drivers/isdn/mISDN/tei.c 	skb = mI_alloc_skb(len, GFP_ATOMIC);
skb               300 drivers/isdn/mISDN/tei.c 	if (!skb)
skb               302 drivers/isdn/mISDN/tei.c 	hh = mISDN_HEAD_P(skb);
skb               306 drivers/isdn/mISDN/tei.c 		skb_put_data(skb, arg, len);
skb               307 drivers/isdn/mISDN/tei.c 	err = mgr->up->send(mgr->up, skb);
skb               310 drivers/isdn/mISDN/tei.c 		dev_kfree_skb(skb);
skb               335 drivers/isdn/mISDN/tei.c 		struct sk_buff	*skb = skb_dequeue(&mgr->sendq);
skb               337 drivers/isdn/mISDN/tei.c 		if (!skb) {
skb               341 drivers/isdn/mISDN/tei.c 		mgr->lastid = mISDN_HEAD_ID(skb);
skb               343 drivers/isdn/mISDN/tei.c 		if (mgr->ch.recv(mgr->ch.peer, skb)) {
skb               344 drivers/isdn/mISDN/tei.c 			dev_kfree_skb(skb);
skb               357 drivers/isdn/mISDN/tei.c 				struct sk_buff	*skb;
skb               359 drivers/isdn/mISDN/tei.c 				skb = skb_dequeue(&mgr->sendq);
skb               360 drivers/isdn/mISDN/tei.c 				if (skb) {
skb               361 drivers/isdn/mISDN/tei.c 					mgr->lastid = mISDN_HEAD_ID(skb);
skb               362 drivers/isdn/mISDN/tei.c 					if (!mgr->ch.recv(mgr->ch.peer, skb))
skb               364 drivers/isdn/mISDN/tei.c 					dev_kfree_skb(skb);
skb               374 drivers/isdn/mISDN/tei.c mgr_send_down(struct manager *mgr, struct sk_buff *skb)
skb               376 drivers/isdn/mISDN/tei.c 	skb_queue_tail(&mgr->sendq, skb);
skb               386 drivers/isdn/mISDN/tei.c dl_unit_data(struct manager *mgr, struct sk_buff *skb)
skb               393 drivers/isdn/mISDN/tei.c 	skb_push(skb, 3);
skb               394 drivers/isdn/mISDN/tei.c 	skb->data[0] = 0x02; /* SAPI 0 C/R = 1 */
skb               395 drivers/isdn/mISDN/tei.c 	skb->data[1] = 0xff; /* TEI 127 */
skb               396 drivers/isdn/mISDN/tei.c 	skb->data[2] = UI;   /* UI frame */
skb               397 drivers/isdn/mISDN/tei.c 	mISDN_HEAD_PRIM(skb) = PH_DATA_REQ;
skb               398 drivers/isdn/mISDN/tei.c 	mISDN_HEAD_ID(skb) = new_id(mgr);
skb               399 drivers/isdn/mISDN/tei.c 	skb_queue_tail(&mgr->sendq, skb);
skb               434 drivers/isdn/mISDN/tei.c 	struct sk_buff *skb;
skb               447 drivers/isdn/mISDN/tei.c 	skb = _alloc_mISDN_skb(PH_DATA_REQ, new_id(mgr), 8, bp, GFP_ATOMIC);
skb               448 drivers/isdn/mISDN/tei.c 	if (!skb) {
skb               452 drivers/isdn/mISDN/tei.c 	mgr_send_down(mgr, skb);
skb               878 drivers/isdn/mISDN/tei.c ph_data_ind(struct manager *mgr, struct sk_buff *skb)
skb               884 drivers/isdn/mISDN/tei.c 	if (skb->len < 8) {
skb               887 drivers/isdn/mISDN/tei.c 			       __func__, skb->len);
skb               891 drivers/isdn/mISDN/tei.c 	if ((skb->data[0] >> 2) != TEI_SAPI) /* not for us */
skb               893 drivers/isdn/mISDN/tei.c 	if (skb->data[0] & 1) /* EA0 formal error */
skb               895 drivers/isdn/mISDN/tei.c 	if (!(skb->data[1] & 1)) /* EA1 formal error */
skb               897 drivers/isdn/mISDN/tei.c 	if ((skb->data[1] >> 1) != GROUP_TEI) /* not for us */
skb               899 drivers/isdn/mISDN/tei.c 	if ((skb->data[2] & 0xef) != UI) /* not UI */
skb               901 drivers/isdn/mISDN/tei.c 	if (skb->data[3] != TEI_ENTITY_ID) /* not tei entity */
skb               903 drivers/isdn/mISDN/tei.c 	mt = skb->data[6];
skb               923 drivers/isdn/mISDN/tei.c 		new_tei_req(mgr, &skb->data[4]);
skb               927 drivers/isdn/mISDN/tei.c 		tei_ph_data_ind(l2->tm, mt, &skb->data[4], skb->len - 4);
skb              1096 drivers/isdn/mISDN/tei.c mgr_send(struct mISDNchannel *ch, struct sk_buff *skb)
skb              1099 drivers/isdn/mISDN/tei.c 	struct mISDNhead	*hh =  mISDN_HEAD_P(skb);
skb              1109 drivers/isdn/mISDN/tei.c 		ret = ph_data_ind(mgr, skb);
skb              1131 drivers/isdn/mISDN/tei.c 		return dl_unit_data(mgr, skb);
skb              1134 drivers/isdn/mISDN/tei.c 		dev_kfree_skb(skb);
skb              1197 drivers/isdn/mISDN/tei.c check_data(struct manager *mgr, struct sk_buff *skb)
skb              1199 drivers/isdn/mISDN/tei.c 	struct mISDNhead	*hh =  mISDN_HEAD_P(skb);
skb              1210 drivers/isdn/mISDN/tei.c 	if (skb->len != 3)
skb              1212 drivers/isdn/mISDN/tei.c 	if (skb->data[0] & 3) /* EA0 and CR must be  0 */
skb              1214 drivers/isdn/mISDN/tei.c 	sapi = skb->data[0] >> 2;
skb              1215 drivers/isdn/mISDN/tei.c 	if (!(skb->data[1] & 1)) /* invalid EA1 */
skb              1217 drivers/isdn/mISDN/tei.c 	tei = skb->data[1] >> 1;
skb              1220 drivers/isdn/mISDN/tei.c 	if ((skb->data[2] & ~0x10) != SABME)
skb              1233 drivers/isdn/mISDN/tei.c 	ret = l2->ch.send(&l2->ch, skb);
skb              1284 drivers/isdn/mISDN/tei.c mgr_bcast(struct mISDNchannel *ch, struct sk_buff *skb)
skb              1287 drivers/isdn/mISDN/tei.c 	struct mISDNhead	*hhc, *hh = mISDN_HEAD_P(skb);
skb              1298 drivers/isdn/mISDN/tei.c 				cskb = skb;
skb              1299 drivers/isdn/mISDN/tei.c 				skb = NULL;
skb              1302 drivers/isdn/mISDN/tei.c 					cskb = skb_copy(skb, GFP_ATOMIC);
skb              1332 drivers/isdn/mISDN/tei.c 	dev_kfree_skb(skb);
skb               128 drivers/media/dvb-core/dvb_net.c static __be16 dvb_net_eth_type_trans(struct sk_buff *skb,
skb               134 drivers/media/dvb-core/dvb_net.c 	skb_reset_mac_header(skb);
skb               135 drivers/media/dvb-core/dvb_net.c 	skb_pull(skb,dev->hard_header_len);
skb               136 drivers/media/dvb-core/dvb_net.c 	eth = eth_hdr(skb);
skb               140 drivers/media/dvb-core/dvb_net.c 			skb->pkt_type=PACKET_BROADCAST;
skb               142 drivers/media/dvb-core/dvb_net.c 			skb->pkt_type=PACKET_MULTICAST;
skb               148 drivers/media/dvb-core/dvb_net.c 	rawp = skb->data;
skb               750 drivers/media/dvb-core/dvb_net.c 	if (h->priv->ule_dbit && skb->pkt_type == PACKET_OTHERHOST)
skb               895 drivers/media/dvb-core/dvb_net.c 	struct sk_buff *skb;
skb               938 drivers/media/dvb-core/dvb_net.c 	if (!(skb = dev_alloc_skb(pkt_len - 4 - 12 + 14 + 2 - snap))) {
skb               943 drivers/media/dvb-core/dvb_net.c 	skb_reserve(skb, 2);    /* longword align L3 header */
skb               944 drivers/media/dvb-core/dvb_net.c 	skb->dev = dev;
skb               947 drivers/media/dvb-core/dvb_net.c 	eth = skb_put(skb, pkt_len - 12 - 4 + 14 - snap);
skb               976 drivers/media/dvb-core/dvb_net.c 	skb->protocol = dvb_net_eth_type_trans(skb, dev);
skb               979 drivers/media/dvb-core/dvb_net.c 	stats->rx_bytes+=skb->len;
skb               980 drivers/media/dvb-core/dvb_net.c 	netif_rx(skb);
skb               997 drivers/media/dvb-core/dvb_net.c static netdev_tx_t dvb_net_tx(struct sk_buff *skb, struct net_device *dev)
skb               999 drivers/media/dvb-core/dvb_net.c 	dev_kfree_skb(skb);
skb               165 drivers/media/radio/wl128x/fmdrv_common.c static long (*g_st_write) (struct sk_buff *skb);
skb               188 drivers/media/radio/wl128x/fmdrv_common.c inline void dump_tx_skb_data(struct sk_buff *skb)
skb               194 drivers/media/radio/wl128x/fmdrv_common.c 	cmd_hdr = (struct fm_cmd_msg_hdr *)skb->data;
skb               196 drivers/media/radio/wl128x/fmdrv_common.c 	       fm_cb(skb)->completion ? " " : "*", cmd_hdr->hdr,
skb               200 drivers/media/radio/wl128x/fmdrv_common.c 	len_org = skb->len - FM_CMD_MSG_HDR_SIZE;
skb               206 drivers/media/radio/wl128x/fmdrv_common.c 			       skb->data[FM_CMD_MSG_HDR_SIZE + index]);
skb               213 drivers/media/radio/wl128x/fmdrv_common.c inline void dump_rx_skb_data(struct sk_buff *skb)
skb               219 drivers/media/radio/wl128x/fmdrv_common.c 	evt_hdr = (struct fm_event_msg_hdr *)skb->data;
skb               225 drivers/media/radio/wl128x/fmdrv_common.c 	len_org = skb->len - FM_EVT_MSG_HDR_SIZE;
skb               231 drivers/media/radio/wl128x/fmdrv_common.c 			       skb->data[FM_EVT_MSG_HDR_SIZE + index]);
skb               252 drivers/media/radio/wl128x/fmdrv_common.c 	struct sk_buff *skb;
skb               259 drivers/media/radio/wl128x/fmdrv_common.c 	while ((skb = skb_dequeue(&fmdev->rx_q))) {
skb               260 drivers/media/radio/wl128x/fmdrv_common.c 		if (skb->len < sizeof(struct fm_event_msg_hdr)) {
skb               262 drivers/media/radio/wl128x/fmdrv_common.c 			      skb,
skb               263 drivers/media/radio/wl128x/fmdrv_common.c 			      skb->len, sizeof(struct fm_event_msg_hdr));
skb               264 drivers/media/radio/wl128x/fmdrv_common.c 			kfree_skb(skb);
skb               268 drivers/media/radio/wl128x/fmdrv_common.c 		evt_hdr = (void *)skb->data;
skb               289 drivers/media/radio/wl128x/fmdrv_common.c 			kfree_skb(skb);
skb               295 drivers/media/radio/wl128x/fmdrv_common.c 			fmdev->resp_skb = skb;
skb               308 drivers/media/radio/wl128x/fmdrv_common.c 			fmdev->resp_skb = skb;
skb               314 drivers/media/radio/wl128x/fmdrv_common.c 			kfree_skb(skb);
skb               317 drivers/media/radio/wl128x/fmdrv_common.c 			fmerr("Nobody claimed SKB(%p),purging\n", skb);
skb               334 drivers/media/radio/wl128x/fmdrv_common.c 	struct sk_buff *skb;
skb               349 drivers/media/radio/wl128x/fmdrv_common.c 	skb = skb_dequeue(&fmdev->tx_q);
skb               350 drivers/media/radio/wl128x/fmdrv_common.c 	if (!skb)
skb               354 drivers/media/radio/wl128x/fmdrv_common.c 	fmdev->pre_op = fm_cb(skb)->fm_op;
skb               359 drivers/media/radio/wl128x/fmdrv_common.c 	fmdev->resp_comp = fm_cb(skb)->completion;
skb               362 drivers/media/radio/wl128x/fmdrv_common.c 	len = g_st_write(skb);
skb               364 drivers/media/radio/wl128x/fmdrv_common.c 		kfree_skb(skb);
skb               366 drivers/media/radio/wl128x/fmdrv_common.c 		fmerr("TX tasklet failed to send skb(%p)\n", skb);
skb               380 drivers/media/radio/wl128x/fmdrv_common.c 	struct sk_buff *skb;
skb               398 drivers/media/radio/wl128x/fmdrv_common.c 	skb = alloc_skb(size, GFP_ATOMIC);
skb               399 drivers/media/radio/wl128x/fmdrv_common.c 	if (!skb) {
skb               410 drivers/media/radio/wl128x/fmdrv_common.c 		hdr = skb_put(skb, FM_CMD_MSG_HDR_SIZE);
skb               422 drivers/media/radio/wl128x/fmdrv_common.c 		fm_cb(skb)->fm_op = fm_op;
skb               433 drivers/media/radio/wl128x/fmdrv_common.c 		fm_cb(skb)->fm_op = *((u8 *)payload + 2);
skb               436 drivers/media/radio/wl128x/fmdrv_common.c 		skb_put_data(skb, payload, payload_len);
skb               438 drivers/media/radio/wl128x/fmdrv_common.c 	fm_cb(skb)->completion = wait_completion;
skb               439 drivers/media/radio/wl128x/fmdrv_common.c 	skb_queue_tail(&fmdev->tx_q, skb);
skb               449 drivers/media/radio/wl128x/fmdrv_common.c 	struct sk_buff *skb;
skb               471 drivers/media/radio/wl128x/fmdrv_common.c 	skb = fmdev->resp_skb;
skb               475 drivers/media/radio/wl128x/fmdrv_common.c 	evt_hdr = (void *)skb->data;
skb               479 drivers/media/radio/wl128x/fmdrv_common.c 		kfree_skb(skb);
skb               486 drivers/media/radio/wl128x/fmdrv_common.c 		skb_pull(skb, sizeof(struct fm_event_msg_hdr));
skb               487 drivers/media/radio/wl128x/fmdrv_common.c 		memcpy(response, skb->data, evt_hdr->dlen);
skb               492 drivers/media/radio/wl128x/fmdrv_common.c 	kfree_skb(skb);
skb               499 drivers/media/radio/wl128x/fmdrv_common.c 		struct sk_buff **skb)
skb               507 drivers/media/radio/wl128x/fmdrv_common.c 	*skb = fmdev->resp_skb;
skb               511 drivers/media/radio/wl128x/fmdrv_common.c 	fm_evt_hdr = (void *)(*skb)->data;
skb               525 drivers/media/radio/wl128x/fmdrv_common.c 	struct sk_buff *skb;
skb               527 drivers/media/radio/wl128x/fmdrv_common.c 	if (!check_cmdresp_status(fmdev, &skb))
skb               571 drivers/media/radio/wl128x/fmdrv_common.c 	struct sk_buff *skb;
skb               574 drivers/media/radio/wl128x/fmdrv_common.c 	if (check_cmdresp_status(fmdev, &skb))
skb               577 drivers/media/radio/wl128x/fmdrv_common.c 	fm_evt_hdr = (void *)skb->data;
skb               582 drivers/media/radio/wl128x/fmdrv_common.c 	skb_pull(skb, sizeof(struct fm_event_msg_hdr));
skb               583 drivers/media/radio/wl128x/fmdrv_common.c 	memcpy(&fmdev->irq_info.flag, skb->data, fm_evt_hdr->dlen);
skb               698 drivers/media/radio/wl128x/fmdrv_common.c 	struct sk_buff *skb;
skb               707 drivers/media/radio/wl128x/fmdrv_common.c 	if (check_cmdresp_status(fmdev, &skb))
skb               711 drivers/media/radio/wl128x/fmdrv_common.c 	skb_pull(skb, sizeof(struct fm_event_msg_hdr));
skb               712 drivers/media/radio/wl128x/fmdrv_common.c 	rds_data = skb->data;
skb               713 drivers/media/radio/wl128x/fmdrv_common.c 	rds_len = skb->len;
skb               772 drivers/media/radio/wl128x/fmdrv_common.c 	rds_data = skb->data;
skb               773 drivers/media/radio/wl128x/fmdrv_common.c 	rds_len = skb->len;
skb               946 drivers/media/radio/wl128x/fmdrv_common.c 	struct sk_buff *skb;
skb               948 drivers/media/radio/wl128x/fmdrv_common.c 	if (check_cmdresp_status(fmdev, &skb))
skb               966 drivers/media/radio/wl128x/fmdrv_common.c 	struct sk_buff *skb;
skb               970 drivers/media/radio/wl128x/fmdrv_common.c 	if (check_cmdresp_status(fmdev, &skb))
skb               974 drivers/media/radio/wl128x/fmdrv_common.c 	skb_pull(skb, sizeof(struct fm_event_msg_hdr));
skb               975 drivers/media/radio/wl128x/fmdrv_common.c 	memcpy(&read_freq, skb->data, sizeof(read_freq));
skb              1027 drivers/media/radio/wl128x/fmdrv_common.c 	struct sk_buff *skb;
skb              1029 drivers/media/radio/wl128x/fmdrv_common.c 	if (check_cmdresp_status(fmdev, &skb))
skb              1436 drivers/media/radio/wl128x/fmdrv_common.c static long fm_st_receive(void *arg, struct sk_buff *skb)
skb              1442 drivers/media/radio/wl128x/fmdrv_common.c 	if (skb == NULL) {
skb              1447 drivers/media/radio/wl128x/fmdrv_common.c 	if (skb->cb[0] != FM_PKT_LOGICAL_CHAN_NUMBER) {
skb              1448 drivers/media/radio/wl128x/fmdrv_common.c 		fmerr("Received SKB (%p) is not FM Channel 8 pkt\n", skb);
skb              1452 drivers/media/radio/wl128x/fmdrv_common.c 	memcpy(skb_push(skb, 1), &skb->cb[0], 1);
skb              1453 drivers/media/radio/wl128x/fmdrv_common.c 	skb_queue_tail(&fmdev->rx_q, skb);
skb               129 drivers/media/radio/wl128x/fmdrv_common.h #define fm_cb(skb) ((struct fm_skb_cb *)(skb->cb))
skb                83 drivers/message/fusion/mptlan.c 	struct sk_buff	*skb;
skb               145 drivers/message/fusion/mptlan.c static unsigned short mpt_lan_type_trans(struct sk_buff *skb,
skb               519 drivers/message/fusion/mptlan.c 		if (priv->RcvCtl[i].skb != NULL) {
skb               525 drivers/message/fusion/mptlan.c 			dev_kfree_skb(priv->RcvCtl[i].skb);
skb               533 drivers/message/fusion/mptlan.c 		if (priv->SendCtl[i].skb != NULL) {
skb               537 drivers/message/fusion/mptlan.c 			dev_kfree_skb(priv->SendCtl[i].skb);
skb               578 drivers/message/fusion/mptlan.c 	sent = priv->SendCtl[ctx].skb;
skb               587 drivers/message/fusion/mptlan.c 	priv->SendCtl[ctx].skb = NULL;
skb               646 drivers/message/fusion/mptlan.c 		sent = priv->SendCtl[ctx].skb;
skb               653 drivers/message/fusion/mptlan.c 		priv->SendCtl[ctx].skb = NULL;
skb               675 drivers/message/fusion/mptlan.c mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev)
skb               690 drivers/message/fusion/mptlan.c 			__func__, skb));
skb               723 drivers/message/fusion/mptlan.c 	skb_reset_mac_header(skb);
skb               724 drivers/message/fusion/mptlan.c 	skb_pull(skb, 12);
skb               726 drivers/message/fusion/mptlan.c         dma = pci_map_single(mpt_dev->pcidev, skb->data, skb->len,
skb               729 drivers/message/fusion/mptlan.c 	priv->SendCtl[ctx].skb = skb;
skb               731 drivers/message/fusion/mptlan.c 	priv->SendCtl[ctx].len = skb->len;
skb               754 drivers/message/fusion/mptlan.c 	mac = skb_mac_header(skb);
skb               777 drivers/message/fusion/mptlan.c 			skb->len);
skb               818 drivers/message/fusion/mptlan.c mpt_lan_receive_skb(struct net_device *dev, struct sk_buff *skb)
skb               822 drivers/message/fusion/mptlan.c 	skb->protocol = mpt_lan_type_trans(skb, dev);
skb               826 drivers/message/fusion/mptlan.c 			IOC_AND_NETDEV_NAMES_s_s(dev), skb->len));
skb               828 drivers/message/fusion/mptlan.c 	dev->stats.rx_bytes += skb->len;
skb               831 drivers/message/fusion/mptlan.c 	skb->dev = dev;
skb               832 drivers/message/fusion/mptlan.c 	netif_rx(skb);
skb               854 drivers/message/fusion/mptlan.c 	struct sk_buff *skb, *old_skb;
skb               859 drivers/message/fusion/mptlan.c 	skb = priv->RcvCtl[ctx].skb;
skb               864 drivers/message/fusion/mptlan.c 		old_skb = skb;
skb               866 drivers/message/fusion/mptlan.c 		skb = (struct sk_buff *)dev_alloc_skb(len);
skb               867 drivers/message/fusion/mptlan.c 		if (!skb) {
skb               877 drivers/message/fusion/mptlan.c 		skb_copy_from_linear_data(old_skb, skb_put(skb, len), len);
skb               884 drivers/message/fusion/mptlan.c 	skb_put(skb, len);
skb               886 drivers/message/fusion/mptlan.c 	priv->RcvCtl[ctx].skb = NULL;
skb               899 drivers/message/fusion/mptlan.c 	return mpt_lan_receive_skb(dev, skb);
skb               910 drivers/message/fusion/mptlan.c 	struct sk_buff *skb;
skb               924 drivers/message/fusion/mptlan.c 		skb = priv->RcvCtl[ctx].skb;
skb               932 drivers/message/fusion/mptlan.c 		priv->RcvCtl[ctx].skb = NULL;
skb               935 drivers/message/fusion/mptlan.c 		dev_kfree_skb_any(skb);
skb               964 drivers/message/fusion/mptlan.c 	struct sk_buff *skb, *old_skb;
skb               991 drivers/message/fusion/mptlan.c 	skb    = priv->RcvCtl[ctx].skb;
skb              1012 drivers/message/fusion/mptlan.c 		skb = (struct sk_buff *)dev_alloc_skb(len);
skb              1013 drivers/message/fusion/mptlan.c 		if (!skb) {
skb              1024 drivers/message/fusion/mptlan.c 			old_skb = priv->RcvCtl[ctx].skb;
skb              1038 drivers/message/fusion/mptlan.c 			skb_copy_from_linear_data(old_skb, skb_put(skb, l), l);
skb              1052 drivers/message/fusion/mptlan.c 		old_skb = skb;
skb              1054 drivers/message/fusion/mptlan.c 		skb = (struct sk_buff *)dev_alloc_skb(len);
skb              1055 drivers/message/fusion/mptlan.c 		if (!skb) {
skb              1067 drivers/message/fusion/mptlan.c 		skb_copy_from_linear_data(old_skb, skb_put(skb, len), len);
skb              1081 drivers/message/fusion/mptlan.c 		priv->RcvCtl[ctx].skb = NULL;
skb              1090 drivers/message/fusion/mptlan.c 		skb_put(skb,len);
skb              1132 drivers/message/fusion/mptlan.c 	return mpt_lan_receive_skb(dev, skb);
skb              1147 drivers/message/fusion/mptlan.c 	struct sk_buff *skb;
skb              1203 drivers/message/fusion/mptlan.c 			skb = priv->RcvCtl[ctx].skb;
skb              1204 drivers/message/fusion/mptlan.c 			if (skb && (priv->RcvCtl[ctx].len != len)) {
skb              1209 drivers/message/fusion/mptlan.c 				dev_kfree_skb(priv->RcvCtl[ctx].skb);
skb              1210 drivers/message/fusion/mptlan.c 				skb = priv->RcvCtl[ctx].skb = NULL;
skb              1213 drivers/message/fusion/mptlan.c 			if (skb == NULL) {
skb              1214 drivers/message/fusion/mptlan.c 				skb = dev_alloc_skb(len);
skb              1215 drivers/message/fusion/mptlan.c 				if (skb == NULL) {
skb              1224 drivers/message/fusion/mptlan.c 				dma = pci_map_single(mpt_dev->pcidev, skb->data,
skb              1227 drivers/message/fusion/mptlan.c 				priv->RcvCtl[ctx].skb = skb;
skb              1487 drivers/message/fusion/mptlan.c mpt_lan_type_trans(struct sk_buff *skb, struct net_device *dev)
skb              1489 drivers/message/fusion/mptlan.c 	struct mpt_lan_ohdr *fch = (struct mpt_lan_ohdr *)skb->data;
skb              1492 drivers/message/fusion/mptlan.c 	skb_reset_mac_header(skb);
skb              1493 drivers/message/fusion/mptlan.c 	skb_pull(skb, sizeof(struct mpt_lan_ohdr));
skb              1511 drivers/message/fusion/mptlan.c 			skb->pkt_type = PACKET_BROADCAST;
skb              1513 drivers/message/fusion/mptlan.c 			skb->pkt_type = PACKET_MULTICAST;
skb              1517 drivers/message/fusion/mptlan.c 			skb->pkt_type = PACKET_OTHERHOST;
skb              1519 drivers/message/fusion/mptlan.c 			skb->pkt_type = PACKET_HOST;
skb              1523 drivers/message/fusion/mptlan.c 	fcllc = (struct fcllc *)skb->data;
skb              1531 drivers/message/fusion/mptlan.c 		skb_pull(skb, sizeof(struct fcllc));
skb                95 drivers/misc/sgi-xp/xpnet.c 	struct sk_buff *skb;
skb               151 drivers/misc/sgi-xp/xpnet.c 	struct sk_buff *skb;
skb               169 drivers/misc/sgi-xp/xpnet.c 	skb = dev_alloc_skb(msg->size + L1_CACHE_BYTES);
skb               170 drivers/misc/sgi-xp/xpnet.c 	if (!skb) {
skb               186 drivers/misc/sgi-xp/xpnet.c 	skb_reserve(skb, (L1_CACHE_BYTES - ((u64)skb->data &
skb               194 drivers/misc/sgi-xp/xpnet.c 	skb_put(skb, (msg->size - msg->leadin_ignore - msg->tailout_ignore));
skb               202 drivers/misc/sgi-xp/xpnet.c 			"%lu)\n", skb->data, &msg->data,
skb               205 drivers/misc/sgi-xp/xpnet.c 		skb_copy_to_linear_data(skb, &msg->data,
skb               208 drivers/misc/sgi-xp/xpnet.c 		dst = (void *)((u64)skb->data & ~(L1_CACHE_BYTES - 1));
skb               233 drivers/misc/sgi-xp/xpnet.c 		"skb->end=0x%p skb->len=%d\n", (void *)skb->head,
skb               234 drivers/misc/sgi-xp/xpnet.c 		(void *)skb->data, skb_tail_pointer(skb), skb_end_pointer(skb),
skb               235 drivers/misc/sgi-xp/xpnet.c 		skb->len);
skb               237 drivers/misc/sgi-xp/xpnet.c 	skb->protocol = eth_type_trans(skb, xpnet_device);
skb               238 drivers/misc/sgi-xp/xpnet.c 	skb->ip_summed = CHECKSUM_UNNECESSARY;
skb               243 drivers/misc/sgi-xp/xpnet.c 		(void *)skb->head, (void *)skb->data, skb_tail_pointer(skb),
skb               244 drivers/misc/sgi-xp/xpnet.c 		skb_end_pointer(skb), skb->len);
skb               247 drivers/misc/sgi-xp/xpnet.c 	xpnet_device->stats.rx_bytes += skb->len + ETH_HLEN;
skb               249 drivers/misc/sgi-xp/xpnet.c 	netif_rx_ni(skb);
skb               354 drivers/misc/sgi-xp/xpnet.c 			(void *)queued_msg->skb->head);
skb               356 drivers/misc/sgi-xp/xpnet.c 		dev_kfree_skb_any(queued_msg->skb);
skb               362 drivers/misc/sgi-xp/xpnet.c xpnet_send(struct sk_buff *skb, struct xpnet_pending_msg *queued_msg,
skb               374 drivers/misc/sgi-xp/xpnet.c 			&msg->data, skb->data, (size_t)embedded_bytes);
skb               375 drivers/misc/sgi-xp/xpnet.c 		skb_copy_from_linear_data(skb, &msg->data,
skb               383 drivers/misc/sgi-xp/xpnet.c 	msg->leadin_ignore = (u64)skb->data - start_addr;
skb               384 drivers/misc/sgi-xp/xpnet.c 	msg->tailout_ignore = end_addr - (u64)skb_tail_pointer(skb);
skb               411 drivers/misc/sgi-xp/xpnet.c xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb               419 drivers/misc/sgi-xp/xpnet.c 		"skb->end=0x%p skb->len=%d\n", (void *)skb->head,
skb               420 drivers/misc/sgi-xp/xpnet.c 		(void *)skb->data, skb_tail_pointer(skb), skb_end_pointer(skb),
skb               421 drivers/misc/sgi-xp/xpnet.c 		skb->len);
skb               423 drivers/misc/sgi-xp/xpnet.c 	if (skb->data[0] == 0x33) {
skb               424 drivers/misc/sgi-xp/xpnet.c 		dev_kfree_skb(skb);
skb               439 drivers/misc/sgi-xp/xpnet.c 		dev_kfree_skb(skb);
skb               444 drivers/misc/sgi-xp/xpnet.c 	start_addr = ((u64)skb->data & ~(L1_CACHE_BYTES - 1));
skb               445 drivers/misc/sgi-xp/xpnet.c 	end_addr = L1_CACHE_ALIGN((u64)skb_tail_pointer(skb));
skb               448 drivers/misc/sgi-xp/xpnet.c 	if (unlikely(skb->len <= XPNET_MSG_DATA_MAX)) {
skb               450 drivers/misc/sgi-xp/xpnet.c 		embedded_bytes = skb->len;
skb               461 drivers/misc/sgi-xp/xpnet.c 	queued_msg->skb = skb;
skb               463 drivers/misc/sgi-xp/xpnet.c 	if (skb->data[0] == 0xff) {
skb               468 drivers/misc/sgi-xp/xpnet.c 			xpnet_send(skb, queued_msg, start_addr, end_addr,
skb               472 drivers/misc/sgi-xp/xpnet.c 		dest_partid = (short)skb->data[XPNET_PARTID_OCTET + 1];
skb               473 drivers/misc/sgi-xp/xpnet.c 		dest_partid |= (short)skb->data[XPNET_PARTID_OCTET + 0] << 8;
skb               479 drivers/misc/sgi-xp/xpnet.c 			xpnet_send(skb, queued_msg, start_addr, end_addr,
skb               485 drivers/misc/sgi-xp/xpnet.c 	dev->stats.tx_bytes += skb->len;
skb               488 drivers/misc/sgi-xp/xpnet.c 		dev_kfree_skb(skb);
skb               408 drivers/misc/ti-st/st_core.c static void st_int_enqueue(struct st_data_s *st_gdata, struct sk_buff *skb)
skb               418 drivers/misc/ti-st/st_core.c 		skb_queue_tail(&st_gdata->txq, skb);
skb               421 drivers/misc/ti-st/st_core.c 		skb_queue_tail(&st_gdata->tx_waitq, skb);
skb               426 drivers/misc/ti-st/st_core.c 		kfree_skb(skb);
skb               429 drivers/misc/ti-st/st_core.c 		skb_queue_tail(&st_gdata->tx_waitq, skb);
skb               435 drivers/misc/ti-st/st_core.c 		kfree_skb(skb);
skb               459 drivers/misc/ti-st/st_core.c 	struct sk_buff *skb;
skb               475 drivers/misc/ti-st/st_core.c 		while ((skb = st_int_dequeue(st_data))) {
skb               480 drivers/misc/ti-st/st_core.c 			len = st_int_write(st_data, skb->data, skb->len);
skb               481 drivers/misc/ti-st/st_core.c 			skb_pull(skb, len);
skb               483 drivers/misc/ti-st/st_core.c 			if (skb->len) {
skb               485 drivers/misc/ti-st/st_core.c 				st_data->tx_skb = skb;
skb               489 drivers/misc/ti-st/st_core.c 			kfree_skb(skb);
skb               678 drivers/misc/ti-st/st_core.c long st_write(struct sk_buff *skb)
skb               684 drivers/misc/ti-st/st_core.c 	if (unlikely(skb == NULL || st_gdata == NULL
skb               690 drivers/misc/ti-st/st_core.c 	pr_debug("%d to be written", skb->len);
skb               691 drivers/misc/ti-st/st_core.c 	len = skb->len;
skb               694 drivers/misc/ti-st/st_core.c 	st_int_enqueue(st_gdata, skb);
skb                54 drivers/misc/ti-st/st_kim.c 	struct sk_buff *skb = kim_gdata->rx_skb;
skb                55 drivers/misc/ti-st/st_kim.c 	if (!skb)
skb                62 drivers/misc/ti-st/st_kim.c 	if (skb->data[2] == 0x01 && skb->data[3] == 0x01 &&
skb                63 drivers/misc/ti-st/st_kim.c 			skb->data[4] == 0x10 && skb->data[5] == 0x00) {
skb                71 drivers/misc/ti-st/st_kim.c 	} else if (unlikely(skb->data[5] != 0)) {
skb                73 drivers/misc/ti-st/st_kim.c 		pr_err("data6 %x", skb->data[5]);
skb                74 drivers/misc/ti-st/st_kim.c 		kfree_skb(skb);
skb                79 drivers/misc/ti-st/st_kim.c 	kfree_skb(skb);
skb               194 drivers/net/appletalk/cops.c static netdev_tx_t  cops_send_packet (struct sk_buff *skb,
skb               748 drivers/net/appletalk/cops.c         struct sk_buff *skb = NULL;
skb               786 drivers/net/appletalk/cops.c         skb = dev_alloc_skb(pkt_len);
skb               787 drivers/net/appletalk/cops.c         if(skb == NULL)
skb               797 drivers/net/appletalk/cops.c         skb->dev = dev;
skb               798 drivers/net/appletalk/cops.c         skb_put(skb, pkt_len);
skb               799 drivers/net/appletalk/cops.c         skb->protocol = htons(ETH_P_LOCALTALK);
skb               801 drivers/net/appletalk/cops.c         insb(ioaddr, skb->data, pkt_len);               /* Eat the Data */
skb               814 drivers/net/appletalk/cops.c                 dev_kfree_skb_any(skb);
skb               821 drivers/net/appletalk/cops.c                 lp->node_acquire = skb->data[0];
skb               822 drivers/net/appletalk/cops.c                 dev_kfree_skb_any(skb);
skb               831 drivers/net/appletalk/cops.c                 dev_kfree_skb_any(skb);
skb               835 drivers/net/appletalk/cops.c         skb_reset_mac_header(skb);    /* Point to entire packet. */
skb               836 drivers/net/appletalk/cops.c         skb_pull(skb,3);
skb               837 drivers/net/appletalk/cops.c         skb_reset_transport_header(skb);    /* Point to data (Skip header). */
skb               841 drivers/net/appletalk/cops.c         dev->stats.rx_bytes += skb->len;
skb               844 drivers/net/appletalk/cops.c         netif_rx(skb);
skb               869 drivers/net/appletalk/cops.c static netdev_tx_t cops_send_packet(struct sk_buff *skb,
skb               891 drivers/net/appletalk/cops.c 	outb(skb->len, ioaddr);
skb               892 drivers/net/appletalk/cops.c 	outb(skb->len >> 8, ioaddr);
skb               900 drivers/net/appletalk/cops.c 	outsb(ioaddr, skb->data, skb->len);	/* Send out the data. */
skb               909 drivers/net/appletalk/cops.c 	dev->stats.tx_bytes += skb->len;
skb               910 drivers/net/appletalk/cops.c 	dev_kfree_skb (skb);
skb                52 drivers/net/appletalk/ipddp.c static netdev_tx_t ipddp_xmit(struct sk_buff *skb,
skb               117 drivers/net/appletalk/ipddp.c static netdev_tx_t ipddp_xmit(struct sk_buff *skb, struct net_device *dev)
skb               119 drivers/net/appletalk/ipddp.c         struct rtable *rtable = skb_rtable(skb);
skb               151 drivers/net/appletalk/ipddp.c 		skb_pull(skb, 35-(sizeof(struct ddpehdr)+1));
skb               154 drivers/net/appletalk/ipddp.c 	ddp = (struct ddpehdr *)skb->data;
skb               155 drivers/net/appletalk/ipddp.c         ddp->deh_len_hops = htons(skb->len + (1<<10));
skb               179 drivers/net/appletalk/ipddp.c         skb->protocol = htons(ETH_P_ATALK);     /* Protocol has changed */
skb               182 drivers/net/appletalk/ipddp.c 	dev->stats.tx_bytes += skb->len;
skb               184 drivers/net/appletalk/ipddp.c 	aarp_send_ddp(rt->dev, skb, &rt->at, NULL);
skb               699 drivers/net/appletalk/ltpc.c static netdev_tx_t ltpc_xmit(struct sk_buff *skb, struct net_device *dev);
skb               725 drivers/net/appletalk/ltpc.c 	struct sk_buff *skb;
skb               752 drivers/net/appletalk/ltpc.c 	skb = dev_alloc_skb(3+sklen);
skb               753 drivers/net/appletalk/ltpc.c 	if (skb == NULL) 
skb               759 drivers/net/appletalk/ltpc.c 	skb->dev = dev;
skb               762 drivers/net/appletalk/ltpc.c 		skb_reserve(skb,8);
skb               763 drivers/net/appletalk/ltpc.c 	skb_put(skb,len+3);
skb               764 drivers/net/appletalk/ltpc.c 	skb->protocol = htons(ETH_P_LOCALTALK);
skb               766 drivers/net/appletalk/ltpc.c 	skb->data[0] = dnode;
skb               767 drivers/net/appletalk/ltpc.c 	skb->data[1] = snode;
skb               768 drivers/net/appletalk/ltpc.c 	skb->data[2] = llaptype;
skb               769 drivers/net/appletalk/ltpc.c 	skb_reset_mac_header(skb);	/* save pointer to llap header */
skb               770 drivers/net/appletalk/ltpc.c 	skb_pull(skb,3);
skb               773 drivers/net/appletalk/ltpc.c 	skb_copy_to_linear_data(skb, ltdmabuf, len);
skb               775 drivers/net/appletalk/ltpc.c 	skb_reset_transport_header(skb);
skb               778 drivers/net/appletalk/ltpc.c 	dev->stats.rx_bytes += skb->len;
skb               781 drivers/net/appletalk/ltpc.c 	netif_rx(skb);
skb               891 drivers/net/appletalk/ltpc.c static netdev_tx_t ltpc_xmit(struct sk_buff *skb, struct net_device *dev)
skb               901 drivers/net/appletalk/ltpc.c 	cbuf.dnode = skb->data[0];
skb               902 drivers/net/appletalk/ltpc.c 	cbuf.laptype = skb->data[2];
skb               903 drivers/net/appletalk/ltpc.c 	skb_pull(skb,3);	/* skip past LLAP header */
skb               904 drivers/net/appletalk/ltpc.c 	cbuf.length = skb->len;	/* this is host order */
skb               905 drivers/net/appletalk/ltpc.c 	skb_reset_transport_header(skb);
skb               914 drivers/net/appletalk/ltpc.c 	hdr = skb_transport_header(skb);
skb               915 drivers/net/appletalk/ltpc.c 	do_write(dev, &cbuf, sizeof(cbuf), hdr, skb->len);
skb               918 drivers/net/appletalk/ltpc.c 		printk("sent %d ddp bytes\n",skb->len);
skb               919 drivers/net/appletalk/ltpc.c 		for (i = 0; i < skb->len; i++)
skb               925 drivers/net/appletalk/ltpc.c 	dev->stats.tx_bytes += skb->len;
skb               927 drivers/net/appletalk/ltpc.c 	dev_kfree_skb(skb);
skb                43 drivers/net/arcnet/arc-rawmode.c 	struct sk_buff *skb;
skb                54 drivers/net/arcnet/arc-rawmode.c 	skb = alloc_skb(length + ARC_HDR_SIZE, GFP_ATOMIC);
skb                55 drivers/net/arcnet/arc-rawmode.c 	if (!skb) {
skb                59 drivers/net/arcnet/arc-rawmode.c 	skb_put(skb, length + ARC_HDR_SIZE);
skb                60 drivers/net/arcnet/arc-rawmode.c 	skb->dev = dev;
skb                62 drivers/net/arcnet/arc-rawmode.c 	pkt = (struct archdr *)skb->data;
skb                64 drivers/net/arcnet/arc-rawmode.c 	skb_reset_mac_header(skb);
skb                65 drivers/net/arcnet/arc-rawmode.c 	skb_pull(skb, ARC_HDR_SIZE);
skb                75 drivers/net/arcnet/arc-rawmode.c 		arcnet_dump_skb(dev, skb, "rx");
skb                77 drivers/net/arcnet/arc-rawmode.c 	skb->protocol = cpu_to_be16(ETH_P_ARCNET);
skb                78 drivers/net/arcnet/arc-rawmode.c 	netif_rx(skb);
skb                84 drivers/net/arcnet/arc-rawmode.c static int build_header(struct sk_buff *skb, struct net_device *dev,
skb                88 drivers/net/arcnet/arc-rawmode.c 	struct archdr *pkt = skb_push(skb, hdr_size);
skb               198 drivers/net/arcnet/arcdevice.h 	int (*build_header)(struct sk_buff *skb, struct net_device *dev,
skb               216 drivers/net/arcnet/arcdevice.h 	struct sk_buff *skb;	/* packet data buffer             */
skb               227 drivers/net/arcnet/arcdevice.h 	struct sk_buff *skb;	/* buffer from upper levels */
skb               343 drivers/net/arcnet/arcdevice.h void arcnet_dump_skb(struct net_device *dev, struct sk_buff *skb, char *desc);
skb               346 drivers/net/arcnet/arcdevice.h void arcnet_dump_skb(struct net_device *dev, struct sk_buff *skb, char *desc)
skb               357 drivers/net/arcnet/arcdevice.h netdev_tx_t arcnet_send_packet(struct sk_buff *skb,
skb                64 drivers/net/arcnet/arcnet.c static int null_build_header(struct sk_buff *skb, struct net_device *dev,
skb               104 drivers/net/arcnet/arcnet.c static int arcnet_header(struct sk_buff *skb, struct net_device *dev,
skb               147 drivers/net/arcnet/arcnet.c 		     struct sk_buff *skb, char *desc)
skb               154 drivers/net/arcnet/arcnet.c 		       16, 1, skb->data, skb->len, true);
skb               400 drivers/net/arcnet/arcnet.c 	struct sk_buff *ackskb, *skb;
skb               406 drivers/net/arcnet/arcnet.c 	skb = lp->outgoing.skb;
skb               407 drivers/net/arcnet/arcnet.c 	if (!skb || !skb->sk) {
skb               412 drivers/net/arcnet/arcnet.c 	sock_hold(skb->sk);
skb               413 drivers/net/arcnet/arcnet.c 	sk = skb->sk;
skb               414 drivers/net/arcnet/arcnet.c 	ackskb = skb_clone_sk(skb);
skb               415 drivers/net/arcnet/arcnet.c 	sock_put(skb->sk);
skb               426 drivers/net/arcnet/arcnet.c 	serr->ee.ee_data = skb_shinfo(skb)->tskey;
skb               430 drivers/net/arcnet/arcnet.c 	dev_kfree_skb(lp->outgoing.skb);
skb               431 drivers/net/arcnet/arcnet.c 	lp->outgoing.skb = NULL;
skb               595 drivers/net/arcnet/arcnet.c static int arcnet_header(struct sk_buff *skb, struct net_device *dev,
skb               609 drivers/net/arcnet/arcnet.c 	if (skb->len != 0 && len != skb->len)
skb               611 drivers/net/arcnet/arcnet.c 			   skb->len, len);
skb               625 drivers/net/arcnet/arcnet.c 		*(uint16_t *)skb_push(skb, 2) = type;
skb               627 drivers/net/arcnet/arcnet.c 		if (skb->network_header - skb->mac_header != 2)
skb               629 drivers/net/arcnet/arcnet.c 				   skb->network_header - skb->mac_header);
skb               644 drivers/net/arcnet/arcnet.c 	return proto->build_header(skb, dev, type, _daddr);
skb               648 drivers/net/arcnet/arcnet.c netdev_tx_t arcnet_send_packet(struct sk_buff *skb,
skb               661 drivers/net/arcnet/arcnet.c 		   lp->hw.status(dev), lp->cur_tx, lp->next_tx, skb->len, skb->protocol);
skb               663 drivers/net/arcnet/arcnet.c 	pkt = (struct archdr *)skb->data;
skb               668 drivers/net/arcnet/arcnet.c 		   skb->len, pkt->hard.dest);
skb               670 drivers/net/arcnet/arcnet.c 		arcnet_dump_skb(dev, skb, "tx");
skb               673 drivers/net/arcnet/arcnet.c 	if (skb->len - ARC_HDR_SIZE > XMTU && !proto->continue_tx) {
skb               675 drivers/net/arcnet/arcnet.c 		dev_kfree_skb(skb);
skb               690 drivers/net/arcnet/arcnet.c 		lp->outgoing.skb = skb;
skb               691 drivers/net/arcnet/arcnet.c 		if (proto->prepare_tx(dev, pkt, skb->len, txbuf) &&
skb               696 drivers/net/arcnet/arcnet.c 			dev->stats.tx_bytes += skb->len;
skb               700 drivers/net/arcnet/arcnet.c 			lp->outgoing.skb = skb;
skb               954 drivers/net/arcnet/arcnet.c 						dev->stats.tx_bytes += lp->outgoing.skb->len;
skb               956 drivers/net/arcnet/arcnet.c 							dev_kfree_skb_irq(lp->outgoing.skb);
skb              1131 drivers/net/arcnet/arcnet.c static int null_build_header(struct sk_buff *skb, struct net_device *dev,
skb                46 drivers/net/arcnet/capmode.c 	struct sk_buff *skb;
skb                59 drivers/net/arcnet/capmode.c 	skb = alloc_skb(length + ARC_HDR_SIZE + sizeof(int), GFP_ATOMIC);
skb                60 drivers/net/arcnet/capmode.c 	if (!skb) {
skb                64 drivers/net/arcnet/capmode.c 	skb_put(skb, length + ARC_HDR_SIZE + sizeof(int));
skb                65 drivers/net/arcnet/capmode.c 	skb->dev = dev;
skb                66 drivers/net/arcnet/capmode.c 	skb_reset_mac_header(skb);
skb                67 drivers/net/arcnet/capmode.c 	pkt = (struct archdr *)skb_mac_header(skb);
skb                68 drivers/net/arcnet/capmode.c 	skb_pull(skb, ARC_HDR_SIZE);
skb                89 drivers/net/arcnet/capmode.c 		arcnet_dump_skb(dev, skb, "rx");
skb                91 drivers/net/arcnet/capmode.c 	skb->protocol = cpu_to_be16(ETH_P_ARCNET);
skb                92 drivers/net/arcnet/capmode.c 	netif_rx(skb);
skb                98 drivers/net/arcnet/capmode.c static int build_header(struct sk_buff *skb,
skb               104 drivers/net/arcnet/capmode.c 	struct archdr *pkt = skb_push(skb, hdr_size);
skb               193 drivers/net/arcnet/capmode.c 		   lp->outgoing.skb->protocol, acked);
skb               196 drivers/net/arcnet/capmode.c 		arcnet_dump_skb(dev, lp->outgoing.skb, "ack_tx");
skb               210 drivers/net/arcnet/capmode.c 	skb_copy_from_linear_data(lp->outgoing.skb, ackpkt,
skb               225 drivers/net/arcnet/capmode.c 	dev_kfree_skb_irq(lp->outgoing.skb);
skb                39 drivers/net/arcnet/rfc1051.c static __be16 type_trans(struct sk_buff *skb, struct net_device *dev);
skb                42 drivers/net/arcnet/rfc1051.c static int build_header(struct sk_buff *skb, struct net_device *dev,
skb                87 drivers/net/arcnet/rfc1051.c static __be16 type_trans(struct sk_buff *skb, struct net_device *dev)
skb                89 drivers/net/arcnet/rfc1051.c 	struct archdr *pkt = (struct archdr *)skb->data;
skb                94 drivers/net/arcnet/rfc1051.c 	skb_reset_mac_header(skb);
skb                95 drivers/net/arcnet/rfc1051.c 	skb_pull(skb, hdr_size);
skb                98 drivers/net/arcnet/rfc1051.c 		skb->pkt_type = PACKET_BROADCAST;
skb               102 drivers/net/arcnet/rfc1051.c 			skb->pkt_type = PACKET_OTHERHOST;
skb               125 drivers/net/arcnet/rfc1051.c 	struct sk_buff *skb;
skb               136 drivers/net/arcnet/rfc1051.c 	skb = alloc_skb(length + ARC_HDR_SIZE, GFP_ATOMIC);
skb               137 drivers/net/arcnet/rfc1051.c 	if (!skb) {
skb               141 drivers/net/arcnet/rfc1051.c 	skb_put(skb, length + ARC_HDR_SIZE);
skb               142 drivers/net/arcnet/rfc1051.c 	skb->dev = dev;
skb               144 drivers/net/arcnet/rfc1051.c 	pkt = (struct archdr *)skb->data;
skb               154 drivers/net/arcnet/rfc1051.c 		arcnet_dump_skb(dev, skb, "rx");
skb               156 drivers/net/arcnet/rfc1051.c 	skb->protocol = type_trans(skb, dev);
skb               157 drivers/net/arcnet/rfc1051.c 	netif_rx(skb);
skb               161 drivers/net/arcnet/rfc1051.c static int build_header(struct sk_buff *skb, struct net_device *dev,
skb               165 drivers/net/arcnet/rfc1051.c 	struct archdr *pkt = skb_push(skb, hdr_size);
skb                40 drivers/net/arcnet/rfc1201.c static __be16 type_trans(struct sk_buff *skb, struct net_device *dev);
skb                43 drivers/net/arcnet/rfc1201.c static int build_header(struct sk_buff *skb, struct net_device *dev,
skb                91 drivers/net/arcnet/rfc1201.c static __be16 type_trans(struct sk_buff *skb, struct net_device *dev)
skb                93 drivers/net/arcnet/rfc1201.c 	struct archdr *pkt = (struct archdr *)skb->data;
skb                98 drivers/net/arcnet/rfc1201.c 	skb_reset_mac_header(skb);
skb                99 drivers/net/arcnet/rfc1201.c 	skb_pull(skb, hdr_size);
skb               102 drivers/net/arcnet/rfc1201.c 		skb->pkt_type = PACKET_BROADCAST;
skb               106 drivers/net/arcnet/rfc1201.c 			skb->pkt_type = PACKET_OTHERHOST;
skb               136 drivers/net/arcnet/rfc1201.c 	struct sk_buff *skb;
skb               169 drivers/net/arcnet/rfc1201.c 		if (in->skb) {	/* already assembling one! */
skb               174 drivers/net/arcnet/rfc1201.c 			dev_kfree_skb_irq(in->skb);
skb               177 drivers/net/arcnet/rfc1201.c 			in->skb = NULL;
skb               181 drivers/net/arcnet/rfc1201.c 		skb = alloc_skb(length + ARC_HDR_SIZE, GFP_ATOMIC);
skb               182 drivers/net/arcnet/rfc1201.c 		if (!skb) {
skb               186 drivers/net/arcnet/rfc1201.c 		skb_put(skb, length + ARC_HDR_SIZE);
skb               187 drivers/net/arcnet/rfc1201.c 		skb->dev = dev;
skb               189 drivers/net/arcnet/rfc1201.c 		pkt = (struct archdr *)skb->data;
skb               232 drivers/net/arcnet/rfc1201.c 			arcnet_dump_skb(dev, skb, "rx");
skb               234 drivers/net/arcnet/rfc1201.c 		skb->protocol = type_trans(skb, dev);
skb               235 drivers/net/arcnet/rfc1201.c 		netif_rx(skb);
skb               258 drivers/net/arcnet/rfc1201.c 		if (in->skb && in->sequence != soft->sequence) {
skb               262 drivers/net/arcnet/rfc1201.c 			dev_kfree_skb_irq(in->skb);
skb               263 drivers/net/arcnet/rfc1201.c 			in->skb = NULL;
skb               271 drivers/net/arcnet/rfc1201.c 			if (in->skb) {	/* already assembling one! */
skb               277 drivers/net/arcnet/rfc1201.c 				dev_kfree_skb_irq(in->skb);
skb               291 drivers/net/arcnet/rfc1201.c 			in->skb = skb = alloc_skb(508 * in->numpackets + ARC_HDR_SIZE,
skb               293 drivers/net/arcnet/rfc1201.c 			if (!skb) {
skb               299 drivers/net/arcnet/rfc1201.c 			skb->dev = dev;
skb               300 drivers/net/arcnet/rfc1201.c 			pkt = (struct archdr *)skb->data;
skb               304 drivers/net/arcnet/rfc1201.c 			skb_put(skb, ARC_HDR_SIZE + RFC1201_HDR_SIZE);
skb               313 drivers/net/arcnet/rfc1201.c 			if (!in->skb) {
skb               340 drivers/net/arcnet/rfc1201.c 				dev_kfree_skb_irq(in->skb);
skb               341 drivers/net/arcnet/rfc1201.c 				in->skb = NULL;
skb               347 drivers/net/arcnet/rfc1201.c 			pkt = (struct archdr *)in->skb->data;
skb               351 drivers/net/arcnet/rfc1201.c 		skb = in->skb;
skb               354 drivers/net/arcnet/rfc1201.c 				      skb->data + skb->len,
skb               356 drivers/net/arcnet/rfc1201.c 		skb_put(skb, length - RFC1201_HDR_SIZE);
skb               360 drivers/net/arcnet/rfc1201.c 			in->skb = NULL;
skb               364 drivers/net/arcnet/rfc1201.c 				   skb->len, pkt->hard.source);
skb               366 drivers/net/arcnet/rfc1201.c 				   skb->len, pkt->hard.source);
skb               368 drivers/net/arcnet/rfc1201.c 				arcnet_dump_skb(dev, skb, "rx");
skb               370 drivers/net/arcnet/rfc1201.c 			skb->protocol = type_trans(skb, dev);
skb               371 drivers/net/arcnet/rfc1201.c 			netif_rx(skb);
skb               377 drivers/net/arcnet/rfc1201.c static int build_header(struct sk_buff *skb, struct net_device *dev,
skb               382 drivers/net/arcnet/rfc1201.c 	struct archdr *pkt = skb_push(skb, hdr_size);
skb               833 drivers/net/bonding/bond_3ad.c 	struct sk_buff *skb;
skb               837 drivers/net/bonding/bond_3ad.c 	skb = dev_alloc_skb(length);
skb               838 drivers/net/bonding/bond_3ad.c 	if (!skb)
skb               844 drivers/net/bonding/bond_3ad.c 	skb->dev = slave->dev;
skb               845 drivers/net/bonding/bond_3ad.c 	skb_reset_mac_header(skb);
skb               846 drivers/net/bonding/bond_3ad.c 	skb->network_header = skb->mac_header + ETH_HLEN;
skb               847 drivers/net/bonding/bond_3ad.c 	skb->protocol = PKT_TYPE_LACPDU;
skb               848 drivers/net/bonding/bond_3ad.c 	skb->priority = TC_PRIO_CONTROL;
skb               850 drivers/net/bonding/bond_3ad.c 	lacpdu_header = skb_put(skb, length);
skb               861 drivers/net/bonding/bond_3ad.c 	dev_queue_xmit(skb);
skb               877 drivers/net/bonding/bond_3ad.c 	struct sk_buff *skb;
skb               881 drivers/net/bonding/bond_3ad.c 	skb = dev_alloc_skb(length + 16);
skb               882 drivers/net/bonding/bond_3ad.c 	if (!skb)
skb               896 drivers/net/bonding/bond_3ad.c 	skb_reserve(skb, 16);
skb               898 drivers/net/bonding/bond_3ad.c 	skb->dev = slave->dev;
skb               899 drivers/net/bonding/bond_3ad.c 	skb_reset_mac_header(skb);
skb               900 drivers/net/bonding/bond_3ad.c 	skb->network_header = skb->mac_header + ETH_HLEN;
skb               901 drivers/net/bonding/bond_3ad.c 	skb->protocol = PKT_TYPE_LACPDU;
skb               903 drivers/net/bonding/bond_3ad.c 	marker_header = skb_put(skb, length);
skb               914 drivers/net/bonding/bond_3ad.c 	dev_queue_xmit(skb);
skb              2654 drivers/net/bonding/bond_3ad.c int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond,
skb              2659 drivers/net/bonding/bond_3ad.c 	if (skb->protocol != PKT_TYPE_LACPDU)
skb              2662 drivers/net/bonding/bond_3ad.c 	if (!MAC_ADDRESS_EQUAL(eth_hdr(skb)->h_dest, lacpdu_mcast_addr))
skb              2665 drivers/net/bonding/bond_3ad.c 	lacpdu = skb_header_pointer(skb, 0, sizeof(_lacpdu), &_lacpdu);
skb              2718 drivers/net/bonding/bond_3ad.c int bond_3ad_stats_fill(struct sk_buff *skb, struct bond_3ad_stats *stats)
skb              2723 drivers/net/bonding/bond_3ad.c 	if (nla_put_u64_64bit(skb, BOND_3AD_STAT_LACPDU_RX, val,
skb              2727 drivers/net/bonding/bond_3ad.c 	if (nla_put_u64_64bit(skb, BOND_3AD_STAT_LACPDU_TX, val,
skb              2731 drivers/net/bonding/bond_3ad.c 	if (nla_put_u64_64bit(skb, BOND_3AD_STAT_LACPDU_UNKNOWN_RX, val,
skb              2735 drivers/net/bonding/bond_3ad.c 	if (nla_put_u64_64bit(skb, BOND_3AD_STAT_LACPDU_ILLEGAL_RX, val,
skb              2740 drivers/net/bonding/bond_3ad.c 	if (nla_put_u64_64bit(skb, BOND_3AD_STAT_MARKER_RX, val,
skb              2744 drivers/net/bonding/bond_3ad.c 	if (nla_put_u64_64bit(skb, BOND_3AD_STAT_MARKER_TX, val,
skb              2748 drivers/net/bonding/bond_3ad.c 	if (nla_put_u64_64bit(skb, BOND_3AD_STAT_MARKER_RESP_RX, val,
skb              2752 drivers/net/bonding/bond_3ad.c 	if (nla_put_u64_64bit(skb, BOND_3AD_STAT_MARKER_RESP_TX, val,
skb              2756 drivers/net/bonding/bond_3ad.c 	if (nla_put_u64_64bit(skb, BOND_3AD_STAT_MARKER_UNKNOWN_RX, val,
skb               271 drivers/net/bonding/bond_alb.c static int rlb_arp_recv(const struct sk_buff *skb, struct bonding *bond,
skb               276 drivers/net/bonding/bond_alb.c 	if (skb->protocol != cpu_to_be16(ETH_P_ARP))
skb               279 drivers/net/bonding/bond_alb.c 	arp = skb_header_pointer(skb, 0, sizeof(_arp), &_arp);
skb               430 drivers/net/bonding/bond_alb.c 		struct sk_buff *skb;
skb               432 drivers/net/bonding/bond_alb.c 		skb = arp_create(ARPOP_REPLY, ETH_P_ARP,
skb               439 drivers/net/bonding/bond_alb.c 		if (!skb) {
skb               446 drivers/net/bonding/bond_alb.c 		skb->dev = client_info->slave->dev;
skb               449 drivers/net/bonding/bond_alb.c 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
skb               453 drivers/net/bonding/bond_alb.c 		arp_xmit(skb);
skb               551 drivers/net/bonding/bond_alb.c static struct slave *rlb_choose_channel(struct sk_buff *skb,
skb               626 drivers/net/bonding/bond_alb.c 		if (vlan_get_tag(skb, &client_info->vlan_id))
skb               650 drivers/net/bonding/bond_alb.c static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
skb               655 drivers/net/bonding/bond_alb.c 	if (!pskb_network_may_pull(skb, sizeof(*arp)))
skb               657 drivers/net/bonding/bond_alb.c 	arp = (struct arp_pkt *)skb_network_header(skb);
skb               667 drivers/net/bonding/bond_alb.c 		tx_slave = rlb_choose_channel(skb, bond, arp);
skb               679 drivers/net/bonding/bond_alb.c 		tx_slave = rlb_choose_channel(skb, bond, arp);
skb               909 drivers/net/bonding/bond_alb.c 	struct sk_buff *skb;
skb               917 drivers/net/bonding/bond_alb.c 	skb = dev_alloc_skb(size);
skb               918 drivers/net/bonding/bond_alb.c 	if (!skb)
skb               921 drivers/net/bonding/bond_alb.c 	skb_put_data(skb, &pkt, size);
skb               923 drivers/net/bonding/bond_alb.c 	skb_reset_mac_header(skb);
skb               924 drivers/net/bonding/bond_alb.c 	skb->network_header = skb->mac_header + ETH_HLEN;
skb               925 drivers/net/bonding/bond_alb.c 	skb->protocol = pkt.type;
skb               926 drivers/net/bonding/bond_alb.c 	skb->priority = TC_PRIO_CONTROL;
skb               927 drivers/net/bonding/bond_alb.c 	skb->dev = slave->dev;
skb               933 drivers/net/bonding/bond_alb.c 		__vlan_hwaccel_put_tag(skb, vlan_proto, vid);
skb               935 drivers/net/bonding/bond_alb.c 	dev_queue_xmit(skb);
skb              1302 drivers/net/bonding/bond_alb.c static netdev_tx_t bond_do_alb_xmit(struct sk_buff *skb, struct bonding *bond,
skb              1306 drivers/net/bonding/bond_alb.c 	struct ethhdr *eth_data = eth_hdr(skb);
skb              1312 drivers/net/bonding/bond_alb.c 			bond_info->unbalanced_load += skb->len;
skb              1321 drivers/net/bonding/bond_alb.c 		bond_dev_queue_xmit(bond, skb, tx_slave->dev);
skb              1332 drivers/net/bonding/bond_alb.c 	bond_tx_drop(bond->dev, skb);
skb              1337 drivers/net/bonding/bond_alb.c netdev_tx_t bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
skb              1344 drivers/net/bonding/bond_alb.c 	skb_reset_mac_header(skb);
skb              1345 drivers/net/bonding/bond_alb.c 	eth_data = eth_hdr(skb);
skb              1349 drivers/net/bonding/bond_alb.c 		switch (skb->protocol) {
skb              1354 drivers/net/bonding/bond_alb.c 			hash_index = bond_xmit_hash(bond, skb);
skb              1358 drivers/net/bonding/bond_alb.c 							      skb->len);
skb              1372 drivers/net/bonding/bond_alb.c 	return bond_do_alb_xmit(skb, bond, tx_slave);
skb              1375 drivers/net/bonding/bond_alb.c netdev_tx_t bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
skb              1387 drivers/net/bonding/bond_alb.c 	skb_reset_mac_header(skb);
skb              1388 drivers/net/bonding/bond_alb.c 	eth_data = eth_hdr(skb);
skb              1390 drivers/net/bonding/bond_alb.c 	switch (ntohs(skb->protocol)) {
skb              1395 drivers/net/bonding/bond_alb.c 		    !pskb_network_may_pull(skb, sizeof(*iph))) {
skb              1399 drivers/net/bonding/bond_alb.c 		iph = ip_hdr(skb);
skb              1427 drivers/net/bonding/bond_alb.c 		if (!pskb_network_may_pull(skb, sizeof(*ip6hdr))) {
skb              1435 drivers/net/bonding/bond_alb.c 		ip6hdr = ipv6_hdr(skb);
skb              1448 drivers/net/bonding/bond_alb.c 		if (pskb_network_may_pull(skb, sizeof(*ipxhdr))) {
skb              1452 drivers/net/bonding/bond_alb.c 		ipxhdr = (struct ipxhdr *)skb_network_header(skb);
skb              1469 drivers/net/bonding/bond_alb.c 		eth_data = eth_hdr(skb);
skb              1477 drivers/net/bonding/bond_alb.c 			tx_slave = rlb_arp_xmit(skb, bond);
skb              1487 drivers/net/bonding/bond_alb.c 			tx_slave = tlb_choose_channel(bond, hash_index, skb->len);
skb              1500 drivers/net/bonding/bond_alb.c 				tx_slave = slaves->arr[bond_xmit_hash(bond, skb) %
skb              1505 drivers/net/bonding/bond_alb.c 	return bond_do_alb_xmit(skb, bond, tx_slave);
skb               243 drivers/net/bonding/bond_main.c void bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
skb               246 drivers/net/bonding/bond_main.c 	skb->dev = slave_dev;
skb               248 drivers/net/bonding/bond_main.c 	BUILD_BUG_ON(sizeof(skb->queue_mapping) !=
skb               249 drivers/net/bonding/bond_main.c 		     sizeof(qdisc_skb_cb(skb)->slave_dev_queue_mapping));
skb               250 drivers/net/bonding/bond_main.c 	skb_set_queue_mapping(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping);
skb               253 drivers/net/bonding/bond_main.c 		bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb);
skb               255 drivers/net/bonding/bond_main.c 		dev_queue_xmit(skb);
skb              1160 drivers/net/bonding/bond_main.c static bool bond_should_deliver_exact_match(struct sk_buff *skb,
skb              1166 drivers/net/bonding/bond_main.c 		    skb->pkt_type != PACKET_BROADCAST &&
skb              1167 drivers/net/bonding/bond_main.c 		    skb->pkt_type != PACKET_MULTICAST)
skb              1176 drivers/net/bonding/bond_main.c 	struct sk_buff *skb = *pskb;
skb              1183 drivers/net/bonding/bond_main.c 	skb = skb_share_check(skb, GFP_ATOMIC);
skb              1184 drivers/net/bonding/bond_main.c 	if (unlikely(!skb))
skb              1187 drivers/net/bonding/bond_main.c 	*pskb = skb;
skb              1189 drivers/net/bonding/bond_main.c 	slave = bond_slave_get_rcu(skb->dev);
skb              1194 drivers/net/bonding/bond_main.c 		ret = recv_probe(skb, bond, slave);
skb              1196 drivers/net/bonding/bond_main.c 			consume_skb(skb);
skb              1212 drivers/net/bonding/bond_main.c 	if (bond_should_deliver_exact_match(skb, slave, bond)) {
skb              1213 drivers/net/bonding/bond_main.c 		if (is_link_local_ether_addr(eth_hdr(skb)->h_dest))
skb              1218 drivers/net/bonding/bond_main.c 	skb->dev = bond->dev;
skb              1222 drivers/net/bonding/bond_main.c 	    skb->pkt_type == PACKET_HOST) {
skb              1224 drivers/net/bonding/bond_main.c 		if (unlikely(skb_cow_head(skb,
skb              1225 drivers/net/bonding/bond_main.c 					  skb->data - skb_mac_header(skb)))) {
skb              1226 drivers/net/bonding/bond_main.c 			kfree_skb(skb);
skb              1229 drivers/net/bonding/bond_main.c 		bond_hw_addr_copy(eth_hdr(skb)->h_dest, bond->dev->dev_addr,
skb              2371 drivers/net/bonding/bond_main.c 	struct sk_buff *skb;
skb              2379 drivers/net/bonding/bond_main.c 	skb = arp_create(arp_op, ETH_P_ARP, dest_ip, slave_dev, src_ip,
skb              2382 drivers/net/bonding/bond_main.c 	if (!skb) {
skb              2401 drivers/net/bonding/bond_main.c 		skb = vlan_insert_tag_set_proto(skb, tags->vlan_proto,
skb              2403 drivers/net/bonding/bond_main.c 		if (!skb) {
skb              2414 drivers/net/bonding/bond_main.c 		__vlan_hwaccel_put_tag(skb, outer_tag->vlan_proto,
skb              2419 drivers/net/bonding/bond_main.c 	arp_xmit(skb);
skb              2536 drivers/net/bonding/bond_main.c int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
skb              2539 drivers/net/bonding/bond_main.c 	struct arphdr *arp = (struct arphdr *)skb->data;
skb              2543 drivers/net/bonding/bond_main.c 	int is_arp = skb->protocol == __cpu_to_be16(ETH_P_ARP);
skb              2558 drivers/net/bonding/bond_main.c 		   __func__, skb->dev->name);
skb              2560 drivers/net/bonding/bond_main.c 	if (alen > skb_headlen(skb)) {
skb              2564 drivers/net/bonding/bond_main.c 		if (skb_copy_bits(skb, 0, arp, alen) < 0)
skb              2569 drivers/net/bonding/bond_main.c 	    skb->pkt_type == PACKET_OTHERHOST ||
skb              2570 drivers/net/bonding/bond_main.c 	    skb->pkt_type == PACKET_LOOPBACK ||
skb              2625 drivers/net/bonding/bond_main.c 	if (arp != (struct arphdr *)skb->data)
skb              3242 drivers/net/bonding/bond_main.c static inline u32 bond_eth_hash(struct sk_buff *skb)
skb              3246 drivers/net/bonding/bond_main.c 	ep = skb_header_pointer(skb, 0, sizeof(hdr_tmp), &hdr_tmp);
skb              3253 drivers/net/bonding/bond_main.c static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb,
skb              3261 drivers/net/bonding/bond_main.c 		return skb_flow_dissect_flow_keys(skb, fk, 0);
skb              3264 drivers/net/bonding/bond_main.c 	noff = skb_network_offset(skb);
skb              3265 drivers/net/bonding/bond_main.c 	if (skb->protocol == htons(ETH_P_IP)) {
skb              3266 drivers/net/bonding/bond_main.c 		if (unlikely(!pskb_may_pull(skb, noff + sizeof(*iph))))
skb              3268 drivers/net/bonding/bond_main.c 		iph = ip_hdr(skb);
skb              3273 drivers/net/bonding/bond_main.c 	} else if (skb->protocol == htons(ETH_P_IPV6)) {
skb              3274 drivers/net/bonding/bond_main.c 		if (unlikely(!pskb_may_pull(skb, noff + sizeof(*iph6))))
skb              3276 drivers/net/bonding/bond_main.c 		iph6 = ipv6_hdr(skb);
skb              3284 drivers/net/bonding/bond_main.c 		fk->ports.ports = skb_flow_get_ports(skb, noff, proto);
skb              3297 drivers/net/bonding/bond_main.c u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb)
skb              3303 drivers/net/bonding/bond_main.c 	    skb->l4_hash)
skb              3304 drivers/net/bonding/bond_main.c 		return skb->hash;
skb              3307 drivers/net/bonding/bond_main.c 	    !bond_flow_dissect(bond, skb, &flow))
skb              3308 drivers/net/bonding/bond_main.c 		return bond_eth_hash(skb);
skb              3312 drivers/net/bonding/bond_main.c 		hash = bond_eth_hash(skb);
skb              3845 drivers/net/bonding/bond_main.c static void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int slave_id)
skb              3855 drivers/net/bonding/bond_main.c 				bond_dev_queue_xmit(bond, skb, slave->dev);
skb              3867 drivers/net/bonding/bond_main.c 			bond_dev_queue_xmit(bond, skb, slave->dev);
skb              3872 drivers/net/bonding/bond_main.c 	bond_tx_drop(bond->dev, skb);
skb              3908 drivers/net/bonding/bond_main.c static netdev_tx_t bond_xmit_roundrobin(struct sk_buff *skb,
skb              3922 drivers/net/bonding/bond_main.c 	if (skb->protocol == htons(ETH_P_IP)) {
skb              3923 drivers/net/bonding/bond_main.c 		int noff = skb_network_offset(skb);
skb              3926 drivers/net/bonding/bond_main.c 		if (unlikely(!pskb_may_pull(skb, noff + sizeof(*iph))))
skb              3929 drivers/net/bonding/bond_main.c 		iph = ip_hdr(skb);
skb              3933 drivers/net/bonding/bond_main.c 				bond_dev_queue_xmit(bond, skb, slave->dev);
skb              3935 drivers/net/bonding/bond_main.c 				bond_xmit_slave_id(bond, skb, 0);
skb              3944 drivers/net/bonding/bond_main.c 		bond_xmit_slave_id(bond, skb, slave_id % slave_cnt);
skb              3946 drivers/net/bonding/bond_main.c 		bond_tx_drop(bond_dev, skb);
skb              3954 drivers/net/bonding/bond_main.c static netdev_tx_t bond_xmit_activebackup(struct sk_buff *skb,
skb              3962 drivers/net/bonding/bond_main.c 		bond_dev_queue_xmit(bond, skb, slave->dev);
skb              3964 drivers/net/bonding/bond_main.c 		bond_tx_drop(bond_dev, skb);
skb              4097 drivers/net/bonding/bond_main.c static netdev_tx_t bond_3ad_xor_xmit(struct sk_buff *skb,
skb              4108 drivers/net/bonding/bond_main.c 		slave = slaves->arr[bond_xmit_hash(bond, skb) % count];
skb              4109 drivers/net/bonding/bond_main.c 		bond_dev_queue_xmit(bond, skb, slave->dev);
skb              4111 drivers/net/bonding/bond_main.c 		bond_tx_drop(dev, skb);
skb              4118 drivers/net/bonding/bond_main.c static netdev_tx_t bond_xmit_broadcast(struct sk_buff *skb,
skb              4129 drivers/net/bonding/bond_main.c 			struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
skb              4140 drivers/net/bonding/bond_main.c 		bond_dev_queue_xmit(bond, skb, slave->dev);
skb              4142 drivers/net/bonding/bond_main.c 		bond_tx_drop(bond_dev, skb);
skb              4151 drivers/net/bonding/bond_main.c 				      struct sk_buff *skb)
skb              4156 drivers/net/bonding/bond_main.c 	if (!skb_rx_queue_recorded(skb))
skb              4161 drivers/net/bonding/bond_main.c 		if (slave->queue_id == skb_get_queue_mapping(skb)) {
skb              4164 drivers/net/bonding/bond_main.c 				bond_dev_queue_xmit(bond, skb, slave->dev);
skb              4176 drivers/net/bonding/bond_main.c static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb,
skb              4184 drivers/net/bonding/bond_main.c 	u16 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0;
skb              4187 drivers/net/bonding/bond_main.c 	qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb_get_queue_mapping(skb);
skb              4197 drivers/net/bonding/bond_main.c static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb              4202 drivers/net/bonding/bond_main.c 	    !bond_slave_override(bond, skb))
skb              4207 drivers/net/bonding/bond_main.c 		return bond_xmit_roundrobin(skb, dev);
skb              4209 drivers/net/bonding/bond_main.c 		return bond_xmit_activebackup(skb, dev);
skb              4212 drivers/net/bonding/bond_main.c 		return bond_3ad_xor_xmit(skb, dev);
skb              4214 drivers/net/bonding/bond_main.c 		return bond_xmit_broadcast(skb, dev);
skb              4216 drivers/net/bonding/bond_main.c 		return bond_alb_xmit(skb, dev);
skb              4218 drivers/net/bonding/bond_main.c 		return bond_tlb_xmit(skb, dev);
skb              4223 drivers/net/bonding/bond_main.c 		bond_tx_drop(dev, skb);
skb              4228 drivers/net/bonding/bond_main.c static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb              4241 drivers/net/bonding/bond_main.c 		ret = __bond_start_xmit(skb, dev);
skb              4243 drivers/net/bonding/bond_main.c 		bond_tx_drop(dev, skb);
skb                32 drivers/net/bonding/bond_netlink.c static int bond_fill_slave_info(struct sk_buff *skb,
skb                38 drivers/net/bonding/bond_netlink.c 	if (nla_put_u8(skb, IFLA_BOND_SLAVE_STATE, bond_slave_state(slave)))
skb                41 drivers/net/bonding/bond_netlink.c 	if (nla_put_u8(skb, IFLA_BOND_SLAVE_MII_STATUS, slave->link))
skb                44 drivers/net/bonding/bond_netlink.c 	if (nla_put_u32(skb, IFLA_BOND_SLAVE_LINK_FAILURE_COUNT,
skb                48 drivers/net/bonding/bond_netlink.c 	if (nla_put(skb, IFLA_BOND_SLAVE_PERM_HWADDR,
skb                52 drivers/net/bonding/bond_netlink.c 	if (nla_put_u16(skb, IFLA_BOND_SLAVE_QUEUE_ID, slave->queue_id))
skb                62 drivers/net/bonding/bond_netlink.c 			if (nla_put_u16(skb, IFLA_BOND_SLAVE_AD_AGGREGATOR_ID,
skb                65 drivers/net/bonding/bond_netlink.c 			if (nla_put_u8(skb,
skb                69 drivers/net/bonding/bond_netlink.c 			if (nla_put_u16(skb,
skb               522 drivers/net/bonding/bond_netlink.c static int bond_fill_info(struct sk_buff *skb,
skb               531 drivers/net/bonding/bond_netlink.c 	if (nla_put_u8(skb, IFLA_BOND_MODE, BOND_MODE(bond)))
skb               535 drivers/net/bonding/bond_netlink.c 	if (ifindex && nla_put_u32(skb, IFLA_BOND_ACTIVE_SLAVE, ifindex))
skb               538 drivers/net/bonding/bond_netlink.c 	if (nla_put_u32(skb, IFLA_BOND_MIIMON, bond->params.miimon))
skb               541 drivers/net/bonding/bond_netlink.c 	if (nla_put_u32(skb, IFLA_BOND_UPDELAY,
skb               545 drivers/net/bonding/bond_netlink.c 	if (nla_put_u32(skb, IFLA_BOND_DOWNDELAY,
skb               549 drivers/net/bonding/bond_netlink.c 	if (nla_put_u32(skb, IFLA_BOND_PEER_NOTIF_DELAY,
skb               553 drivers/net/bonding/bond_netlink.c 	if (nla_put_u8(skb, IFLA_BOND_USE_CARRIER, bond->params.use_carrier))
skb               556 drivers/net/bonding/bond_netlink.c 	if (nla_put_u32(skb, IFLA_BOND_ARP_INTERVAL, bond->params.arp_interval))
skb               559 drivers/net/bonding/bond_netlink.c 	targets = nla_nest_start_noflag(skb, IFLA_BOND_ARP_IP_TARGET);
skb               566 drivers/net/bonding/bond_netlink.c 			if (nla_put_be32(skb, i, bond->params.arp_targets[i]))
skb               573 drivers/net/bonding/bond_netlink.c 		nla_nest_end(skb, targets);
skb               575 drivers/net/bonding/bond_netlink.c 		nla_nest_cancel(skb, targets);
skb               577 drivers/net/bonding/bond_netlink.c 	if (nla_put_u32(skb, IFLA_BOND_ARP_VALIDATE, bond->params.arp_validate))
skb               580 drivers/net/bonding/bond_netlink.c 	if (nla_put_u32(skb, IFLA_BOND_ARP_ALL_TARGETS,
skb               586 drivers/net/bonding/bond_netlink.c 	    nla_put_u32(skb, IFLA_BOND_PRIMARY, primary->dev->ifindex))
skb               589 drivers/net/bonding/bond_netlink.c 	if (nla_put_u8(skb, IFLA_BOND_PRIMARY_RESELECT,
skb               593 drivers/net/bonding/bond_netlink.c 	if (nla_put_u8(skb, IFLA_BOND_FAIL_OVER_MAC,
skb               597 drivers/net/bonding/bond_netlink.c 	if (nla_put_u8(skb, IFLA_BOND_XMIT_HASH_POLICY,
skb               601 drivers/net/bonding/bond_netlink.c 	if (nla_put_u32(skb, IFLA_BOND_RESEND_IGMP,
skb               605 drivers/net/bonding/bond_netlink.c 	if (nla_put_u8(skb, IFLA_BOND_NUM_PEER_NOTIF,
skb               609 drivers/net/bonding/bond_netlink.c 	if (nla_put_u8(skb, IFLA_BOND_ALL_SLAVES_ACTIVE,
skb               613 drivers/net/bonding/bond_netlink.c 	if (nla_put_u32(skb, IFLA_BOND_MIN_LINKS,
skb               617 drivers/net/bonding/bond_netlink.c 	if (nla_put_u32(skb, IFLA_BOND_LP_INTERVAL,
skb               622 drivers/net/bonding/bond_netlink.c 	if (nla_put_u32(skb, IFLA_BOND_PACKETS_PER_SLAVE,
skb               626 drivers/net/bonding/bond_netlink.c 	if (nla_put_u8(skb, IFLA_BOND_AD_LACP_RATE,
skb               630 drivers/net/bonding/bond_netlink.c 	if (nla_put_u8(skb, IFLA_BOND_AD_SELECT,
skb               634 drivers/net/bonding/bond_netlink.c 	if (nla_put_u8(skb, IFLA_BOND_TLB_DYNAMIC_LB,
skb               642 drivers/net/bonding/bond_netlink.c 			if (nla_put_u16(skb, IFLA_BOND_AD_ACTOR_SYS_PRIO,
skb               646 drivers/net/bonding/bond_netlink.c 			if (nla_put_u16(skb, IFLA_BOND_AD_USER_PORT_KEY,
skb               650 drivers/net/bonding/bond_netlink.c 			if (nla_put(skb, IFLA_BOND_AD_ACTOR_SYSTEM,
skb               657 drivers/net/bonding/bond_netlink.c 			nest = nla_nest_start_noflag(skb, IFLA_BOND_AD_INFO);
skb               661 drivers/net/bonding/bond_netlink.c 			if (nla_put_u16(skb, IFLA_BOND_AD_INFO_AGGREGATOR,
skb               664 drivers/net/bonding/bond_netlink.c 			if (nla_put_u16(skb, IFLA_BOND_AD_INFO_NUM_PORTS,
skb               667 drivers/net/bonding/bond_netlink.c 			if (nla_put_u16(skb, IFLA_BOND_AD_INFO_ACTOR_KEY,
skb               670 drivers/net/bonding/bond_netlink.c 			if (nla_put_u16(skb, IFLA_BOND_AD_INFO_PARTNER_KEY,
skb               673 drivers/net/bonding/bond_netlink.c 			if (nla_put(skb, IFLA_BOND_AD_INFO_PARTNER_MAC,
skb               678 drivers/net/bonding/bond_netlink.c 			nla_nest_end(skb, nest);
skb               701 drivers/net/bonding/bond_netlink.c static int bond_fill_linkxstats(struct sk_buff *skb,
skb               724 drivers/net/bonding/bond_netlink.c 	nest = nla_nest_start_noflag(skb, LINK_XSTATS_TYPE_BOND);
skb               735 drivers/net/bonding/bond_netlink.c 		nest2 = nla_nest_start_noflag(skb, BOND_XSTATS_3AD);
skb               737 drivers/net/bonding/bond_netlink.c 			nla_nest_end(skb, nest);
skb               741 drivers/net/bonding/bond_netlink.c 		if (bond_3ad_stats_fill(skb, stats)) {
skb               742 drivers/net/bonding/bond_netlink.c 			nla_nest_cancel(skb, nest2);
skb               743 drivers/net/bonding/bond_netlink.c 			nla_nest_end(skb, nest);
skb               746 drivers/net/bonding/bond_netlink.c 		nla_nest_end(skb, nest2);
skb               748 drivers/net/bonding/bond_netlink.c 	nla_nest_end(skb, nest);
skb                82 drivers/net/caif/caif_hsi.c 					   const struct sk_buff *skb,
skb                88 drivers/net/caif/caif_hsi.c 	info = (struct caif_payload_info *)&skb->cb;
skb                90 drivers/net/caif/caif_hsi.c 	tpad = PAD_POW2((skb->len + hpad), cfhsi->cfg.tail_align);
skb                91 drivers/net/caif/caif_hsi.c 	len = skb->len + hpad + tpad;
skb               120 drivers/net/caif/caif_hsi.c 	struct sk_buff *skb;
skb               124 drivers/net/caif/caif_hsi.c 		skb = skb_dequeue(&cfhsi->qhead[i]);
skb               125 drivers/net/caif/caif_hsi.c 		if (skb)
skb               129 drivers/net/caif/caif_hsi.c 	return skb;
skb               142 drivers/net/caif/caif_hsi.c 	struct sk_buff *skb;
skb               146 drivers/net/caif/caif_hsi.c 		skb = cfhsi_dequeue(cfhsi);
skb               147 drivers/net/caif/caif_hsi.c 		if (!skb)
skb               152 drivers/net/caif/caif_hsi.c 		cfhsi_update_aggregation_stats(cfhsi, skb, -1);
skb               154 drivers/net/caif/caif_hsi.c 		kfree_skb(skb);
skb               221 drivers/net/caif/caif_hsi.c 	struct sk_buff *skb;
skb               224 drivers/net/caif/caif_hsi.c 	skb = cfhsi_dequeue(cfhsi);
skb               225 drivers/net/caif/caif_hsi.c 	if (!skb)
skb               232 drivers/net/caif/caif_hsi.c 	if (skb->len < CFHSI_MAX_EMB_FRM_SZ) {
skb               238 drivers/net/caif/caif_hsi.c 		info = (struct caif_payload_info *)&skb->cb;
skb               241 drivers/net/caif/caif_hsi.c 		tpad = PAD_POW2((skb->len + hpad), cfhsi->cfg.tail_align);
skb               244 drivers/net/caif/caif_hsi.c 		if ((skb->len + hpad + tpad) <= CFHSI_MAX_EMB_FRM_SZ) {
skb               253 drivers/net/caif/caif_hsi.c 			cfhsi->ndev->stats.tx_bytes += skb->len;
skb               254 drivers/net/caif/caif_hsi.c 			cfhsi_update_aggregation_stats(cfhsi, skb, -1);
skb               258 drivers/net/caif/caif_hsi.c 			skb_copy_bits(skb, 0, pemb, skb->len);
skb               261 drivers/net/caif/caif_hsi.c 			consume_skb(skb);
skb               262 drivers/net/caif/caif_hsi.c 			skb = NULL;
skb               272 drivers/net/caif/caif_hsi.c 		if (!skb)
skb               273 drivers/net/caif/caif_hsi.c 			skb = cfhsi_dequeue(cfhsi);
skb               275 drivers/net/caif/caif_hsi.c 		if (!skb)
skb               279 drivers/net/caif/caif_hsi.c 		info = (struct caif_payload_info *)&skb->cb;
skb               282 drivers/net/caif/caif_hsi.c 		tpad = PAD_POW2((skb->len + hpad), cfhsi->cfg.tail_align);
skb               285 drivers/net/caif/caif_hsi.c 		desc->cffrm_len[nfrms] = hpad + skb->len + tpad;
skb               294 drivers/net/caif/caif_hsi.c 		cfhsi->ndev->stats.tx_bytes += skb->len;
skb               295 drivers/net/caif/caif_hsi.c 		cfhsi_update_aggregation_stats(cfhsi, skb, -1);
skb               299 drivers/net/caif/caif_hsi.c 		skb_copy_bits(skb, 0, pfrm, skb->len);
skb               305 drivers/net/caif/caif_hsi.c 		pfrm += skb->len + tpad;
skb               308 drivers/net/caif/caif_hsi.c 		consume_skb(skb);
skb               309 drivers/net/caif/caif_hsi.c 		skb = NULL;
skb               427 drivers/net/caif/caif_hsi.c 		struct sk_buff *skb;
skb               447 drivers/net/caif/caif_hsi.c 		skb = alloc_skb(len + 1, GFP_ATOMIC);
skb               448 drivers/net/caif/caif_hsi.c 		if (!skb) {
skb               453 drivers/net/caif/caif_hsi.c 		caif_assert(skb != NULL);
skb               455 drivers/net/caif/caif_hsi.c 		skb_put_data(skb, pfrm, len);
skb               457 drivers/net/caif/caif_hsi.c 		skb->protocol = htons(ETH_P_CAIF);
skb               458 drivers/net/caif/caif_hsi.c 		skb_reset_mac_header(skb);
skb               459 drivers/net/caif/caif_hsi.c 		skb->dev = cfhsi->ndev;
skb               467 drivers/net/caif/caif_hsi.c 			netif_rx(skb);
skb               469 drivers/net/caif/caif_hsi.c 			netif_rx_ni(skb);
skb               555 drivers/net/caif/caif_hsi.c 		struct sk_buff *skb;
skb               575 drivers/net/caif/caif_hsi.c 		skb = alloc_skb(len + 1, GFP_ATOMIC);
skb               576 drivers/net/caif/caif_hsi.c 		if (!skb) {
skb               582 drivers/net/caif/caif_hsi.c 		caif_assert(skb != NULL);
skb               584 drivers/net/caif/caif_hsi.c 		skb_put_data(skb, pcffrm, len);
skb               586 drivers/net/caif/caif_hsi.c 		skb->protocol = htons(ETH_P_CAIF);
skb               587 drivers/net/caif/caif_hsi.c 		skb_reset_mac_header(skb);
skb               588 drivers/net/caif/caif_hsi.c 		skb->dev = cfhsi->ndev;
skb               595 drivers/net/caif/caif_hsi.c 			netif_rx(skb);
skb               597 drivers/net/caif/caif_hsi.c 			netif_rx_ni(skb);
skb              1009 drivers/net/caif/caif_hsi.c static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev)
skb              1021 drivers/net/caif/caif_hsi.c 	switch (skb->priority) {
skb              1042 drivers/net/caif/caif_hsi.c 	cfhsi_update_aggregation_stats(cfhsi, skb, 1);
skb              1045 drivers/net/caif/caif_hsi.c 	skb_queue_tail(&cfhsi->qhead[prio], skb);
skb              1374 drivers/net/caif/caif_hsi.c static int caif_hsi_fill_info(struct sk_buff *skb, const struct net_device *dev)
skb              1378 drivers/net/caif/caif_hsi.c 	if (nla_put_u32(skb, __IFLA_CAIF_HSI_INACTIVITY_TOUT,
skb              1380 drivers/net/caif/caif_hsi.c 	    nla_put_u32(skb, __IFLA_CAIF_HSI_AGGREGATION_TOUT,
skb              1382 drivers/net/caif/caif_hsi.c 	    nla_put_u32(skb, __IFLA_CAIF_HSI_HEAD_ALIGN,
skb              1384 drivers/net/caif/caif_hsi.c 	    nla_put_u32(skb, __IFLA_CAIF_HSI_TAIL_ALIGN,
skb              1386 drivers/net/caif/caif_hsi.c 	    nla_put_u32(skb, __IFLA_CAIF_HSI_QHIGH_WATERMARK,
skb              1388 drivers/net/caif/caif_hsi.c 	    nla_put_u32(skb, __IFLA_CAIF_HSI_QLOW_WATERMARK,
skb               165 drivers/net/caif/caif_serial.c 	struct sk_buff *skb = NULL;
skb               191 drivers/net/caif/caif_serial.c 	skb = netdev_alloc_skb(ser->dev, count+1);
skb               192 drivers/net/caif/caif_serial.c 	if (skb == NULL)
skb               194 drivers/net/caif/caif_serial.c 	skb_put_data(skb, data, count);
skb               196 drivers/net/caif/caif_serial.c 	skb->protocol = htons(ETH_P_CAIF);
skb               197 drivers/net/caif/caif_serial.c 	skb_reset_mac_header(skb);
skb               200 drivers/net/caif/caif_serial.c 	ret = netif_rx_ni(skb);
skb               212 drivers/net/caif/caif_serial.c 	struct sk_buff *skb;
skb               223 drivers/net/caif/caif_serial.c 	while ((skb = skb_peek(&ser->head)) != NULL) {
skb               226 drivers/net/caif/caif_serial.c 		len = skb->len;
skb               237 drivers/net/caif/caif_serial.c 			tty_wr = tty->ops->write(tty, skb->data, len);
skb               241 drivers/net/caif/caif_serial.c 			ldisc_receive(tty, skb->data, NULL, len);
skb               250 drivers/net/caif/caif_serial.c 		skb_pull(skb, tty_wr);
skb               251 drivers/net/caif/caif_serial.c 		if (skb->len == 0) {
skb               253 drivers/net/caif/caif_serial.c 			WARN_ON(tmp != skb);
skb               254 drivers/net/caif/caif_serial.c 			dev_consume_skb_any(skb);
skb               269 drivers/net/caif/caif_serial.c static int caif_xmit(struct sk_buff *skb, struct net_device *dev)
skb               283 drivers/net/caif/caif_serial.c 	skb_queue_tail(&ser->head, skb);
skb               342 drivers/net/caif/caif_spi.c 		struct sk_buff *skb;
skb               347 drivers/net/caif/caif_spi.c 		skb = skb_dequeue(&cfspi->chead);
skb               348 drivers/net/caif/caif_spi.c 		if (!skb)
skb               355 drivers/net/caif/caif_spi.c 		info = (struct caif_payload_info *)&skb->cb;
skb               368 drivers/net/caif/caif_spi.c 		skb_copy_bits(skb, 0, dst, skb->len);
skb               369 drivers/net/caif/caif_spi.c 		dst += skb->len;
skb               371 drivers/net/caif/caif_spi.c 		cfspi->ndev->stats.tx_bytes += skb->len;
skb               377 drivers/net/caif/caif_spi.c 		epad = PAD_POW2((skb->len + spad), spi_up_tail_align);
skb               380 drivers/net/caif/caif_spi.c 		dev_kfree_skb(skb);
skb               389 drivers/net/caif/caif_spi.c 	struct sk_buff *skb = NULL;
skb               398 drivers/net/caif/caif_spi.c 		skb = skb_dequeue_tail(&cfspi->chead);
skb               399 drivers/net/caif/caif_spi.c 		skb_queue_head(&cfspi->qhead, skb);
skb               407 drivers/net/caif/caif_spi.c 		skb = skb_dequeue(&cfspi->qhead);
skb               408 drivers/net/caif/caif_spi.c 		if (!skb)
skb               415 drivers/net/caif/caif_spi.c 		info = (struct caif_payload_info *)&skb->cb;
skb               428 drivers/net/caif/caif_spi.c 		epad = PAD_POW2((skb->len + spad), spi_up_tail_align);
skb               430 drivers/net/caif/caif_spi.c 		if ((skb->len + spad + epad + frm_len) <= CAIF_MAX_SPI_FRAME) {
skb               431 drivers/net/caif/caif_spi.c 			skb_queue_tail(&cfspi->chead, skb);
skb               433 drivers/net/caif/caif_spi.c 			frm_len += skb->len + spad + epad;
skb               436 drivers/net/caif/caif_spi.c 			skb_queue_head(&cfspi->qhead, skb);
skb               491 drivers/net/caif/caif_spi.c static int cfspi_xmit(struct sk_buff *skb, struct net_device *dev)
skb               500 drivers/net/caif/caif_spi.c 	skb_queue_tail(&cfspi->qhead, skb);
skb               528 drivers/net/caif/caif_spi.c 		struct sk_buff *skb = NULL;
skb               549 drivers/net/caif/caif_spi.c 		skb = netdev_alloc_skb(cfspi->ndev, pkt_len + 1);
skb               550 drivers/net/caif/caif_spi.c 		caif_assert(skb != NULL);
skb               552 drivers/net/caif/caif_spi.c 		skb_put_data(skb, src, pkt_len);
skb               555 drivers/net/caif/caif_spi.c 		skb->protocol = htons(ETH_P_CAIF);
skb               556 drivers/net/caif/caif_spi.c 		skb_reset_mac_header(skb);
skb               562 drivers/net/caif/caif_spi.c 			res = netif_rx_ni(skb);
skb               564 drivers/net/caif/caif_spi.c 			res = cfspi_xmit(skb, cfspi->ndev);
skb               220 drivers/net/caif/caif_virtio.c 	struct sk_buff *skb;
skb               237 drivers/net/caif/caif_virtio.c 	skb = netdev_alloc_skb(cfv->ndev, frm_len + pad_len);
skb               238 drivers/net/caif/caif_virtio.c 	if (!skb) {
skb               243 drivers/net/caif/caif_virtio.c 	skb_reserve(skb, cfv->rx_hr + pad_len);
skb               245 drivers/net/caif/caif_virtio.c 	skb_put_data(skb, frm + cfv->rx_hr, cfpkt_len);
skb               246 drivers/net/caif/caif_virtio.c 	return skb;
skb               256 drivers/net/caif/caif_virtio.c 	struct sk_buff *skb;
skb               261 drivers/net/caif/caif_virtio.c 		skb = NULL;
skb               288 drivers/net/caif/caif_virtio.c 		skb = cfv_alloc_and_copy_skb(&err, cfv, buf,
skb               294 drivers/net/caif/caif_virtio.c 		skb_len = skb->len;
skb               295 drivers/net/caif/caif_virtio.c 		skb->protocol = htons(ETH_P_CAIF);
skb               296 drivers/net/caif/caif_virtio.c 		skb_reset_mac_header(skb);
skb               297 drivers/net/caif/caif_virtio.c 		skb->dev = cfv->ndev;
skb               298 drivers/net/caif/caif_virtio.c 		err = netif_receive_skb(skb);
skb               329 drivers/net/caif/caif_virtio.c 		dev_kfree_skb(skb);
skb               480 drivers/net/caif/caif_virtio.c 						       struct sk_buff *skb,
skb               483 drivers/net/caif/caif_virtio.c 	struct caif_payload_info *info = (void *)&skb->cb;
skb               490 drivers/net/caif/caif_virtio.c 	if (unlikely(cfv->tx_hr + skb->len + cfv->tx_tr > cfv->mtu)) {
skb               492 drivers/net/caif/caif_virtio.c 			    cfv->tx_hr + skb->len + cfv->tx_tr, cfv->mtu);
skb               503 drivers/net/caif/caif_virtio.c 	buf_info->size = cfv->tx_hr + skb->len + cfv->tx_tr + pad_len;
skb               511 drivers/net/caif/caif_virtio.c 	skb_copy_bits(skb, 0, buf_info->vaddr + cfv->tx_hr + pad_len, skb->len);
skb               513 drivers/net/caif/caif_virtio.c 		    skb->len + cfv->tx_hr + cfv->rx_hr);
skb               522 drivers/net/caif/caif_virtio.c static int cfv_netdev_tx(struct sk_buff *skb, struct net_device *netdev)
skb               548 drivers/net/caif/caif_virtio.c 	buf_info = cfv_alloc_and_copy_to_shm(cfv, skb, &sg);
skb               557 drivers/net/caif/caif_virtio.c 			buf_info = cfv_alloc_and_copy_to_shm(cfv, skb, &sg);
skb               585 drivers/net/caif/caif_virtio.c 	cfv->ndev->stats.tx_bytes += skb->len;
skb               591 drivers/net/caif/caif_virtio.c 	dev_kfree_skb(skb);
skb               597 drivers/net/caif/caif_virtio.c 	dev_kfree_skb(skb);
skb               449 drivers/net/can/at91_can.c static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb               453 drivers/net/can/at91_can.c 	struct can_frame *cf = (struct can_frame *)skb->data;
skb               457 drivers/net/can/at91_can.c 	if (can_dropped_invalid_skb(dev, skb))
skb               487 drivers/net/can/at91_can.c 	can_put_echo_skb(skb, dev, mb - get_mb_tx_first(priv));
skb               542 drivers/net/can/at91_can.c 	struct sk_buff *skb;
skb               549 drivers/net/can/at91_can.c 	skb = alloc_can_err_skb(dev, &cf);
skb               550 drivers/net/can/at91_can.c 	if (unlikely(!skb))
skb               558 drivers/net/can/at91_can.c 	netif_receive_skb(skb);
skb               611 drivers/net/can/at91_can.c 	struct sk_buff *skb;
skb               613 drivers/net/can/at91_can.c 	skb = alloc_can_skb(dev, &cf);
skb               614 drivers/net/can/at91_can.c 	if (unlikely(!skb)) {
skb               623 drivers/net/can/at91_can.c 	netif_receive_skb(skb);
skb               770 drivers/net/can/at91_can.c 	struct sk_buff *skb;
skb               776 drivers/net/can/at91_can.c 	skb = alloc_can_err_skb(dev, &cf);
skb               777 drivers/net/can/at91_can.c 	if (unlikely(!skb))
skb               784 drivers/net/can/at91_can.c 	netif_receive_skb(skb);
skb              1011 drivers/net/can/at91_can.c 	struct sk_buff *skb;
skb              1043 drivers/net/can/at91_can.c 	skb = alloc_can_err_skb(dev, &cf);
skb              1044 drivers/net/can/at91_can.c 	if (unlikely(!skb))
skb              1051 drivers/net/can/at91_can.c 	netif_rx(skb);
skb               374 drivers/net/can/c_can/c_can.c 	struct sk_buff *skb;
skb               384 drivers/net/can/c_can/c_can.c 	skb = alloc_can_err_skb(dev, &frame);
skb               385 drivers/net/can/c_can/c_can.c 	if (unlikely(!skb))
skb               391 drivers/net/can/c_can/c_can.c 	netif_receive_skb(skb);
skb               400 drivers/net/can/c_can/c_can.c 	struct sk_buff *skb;
skb               403 drivers/net/can/c_can/c_can.c 	skb = alloc_can_skb(dev, &frame);
skb               404 drivers/net/can/c_can/c_can.c 	if (!skb) {
skb               443 drivers/net/can/c_can/c_can.c 	netif_receive_skb(skb);
skb               462 drivers/net/can/c_can/c_can.c static netdev_tx_t c_can_start_xmit(struct sk_buff *skb,
skb               465 drivers/net/can/c_can/c_can.c 	struct can_frame *frame = (struct can_frame *)skb->data;
skb               469 drivers/net/can/c_can/c_can.c 	if (can_dropped_invalid_skb(dev, skb))
skb               488 drivers/net/can/c_can/c_can.c 	can_put_echo_skb(skb, dev, idx);
skb               914 drivers/net/can/c_can/c_can.c 	struct sk_buff *skb;
skb               941 drivers/net/can/c_can/c_can.c 	skb = alloc_can_err_skb(dev, &cf);
skb               942 drivers/net/can/c_can/c_can.c 	if (unlikely(!skb))
skb               990 drivers/net/can/c_can/c_can.c 	netif_receive_skb(skb);
skb              1001 drivers/net/can/c_can/c_can.c 	struct sk_buff *skb;
skb              1019 drivers/net/can/c_can/c_can.c 	skb = alloc_can_err_skb(dev, &cf);
skb              1020 drivers/net/can/c_can/c_can.c 	if (unlikely(!skb))
skb              1060 drivers/net/can/c_can/c_can.c 	netif_receive_skb(skb);
skb               426 drivers/net/can/cc770/cc770.c static netdev_tx_t cc770_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb               431 drivers/net/can/cc770/cc770.c 	if (can_dropped_invalid_skb(dev, skb))
skb               442 drivers/net/can/cc770/cc770.c 	priv->tx_skb = skb;
skb               453 drivers/net/can/cc770/cc770.c 	struct sk_buff *skb;
skb               458 drivers/net/can/cc770/cc770.c 	skb = alloc_can_skb(dev, &cf);
skb               459 drivers/net/can/cc770/cc770.c 	if (!skb)
skb               496 drivers/net/can/cc770/cc770.c 	netif_rx(skb);
skb               504 drivers/net/can/cc770/cc770.c 	struct sk_buff *skb;
skb               509 drivers/net/can/cc770/cc770.c 	skb = alloc_can_err_skb(dev, &cf);
skb               510 drivers/net/can/cc770/cc770.c 	if (!skb)
skb               576 drivers/net/can/cc770/cc770.c 	netif_rx(skb);
skb               437 drivers/net/can/dev.c void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
skb               445 drivers/net/can/dev.c 	if (!(dev->flags & IFF_ECHO) || skb->pkt_type != PACKET_LOOPBACK ||
skb               446 drivers/net/can/dev.c 	    (skb->protocol != htons(ETH_P_CAN) &&
skb               447 drivers/net/can/dev.c 	     skb->protocol != htons(ETH_P_CANFD))) {
skb               448 drivers/net/can/dev.c 		kfree_skb(skb);
skb               453 drivers/net/can/dev.c 		skb = can_create_echo_skb(skb);
skb               454 drivers/net/can/dev.c 		if (!skb)
skb               458 drivers/net/can/dev.c 		skb->pkt_type = PACKET_BROADCAST;
skb               459 drivers/net/can/dev.c 		skb->ip_summed = CHECKSUM_UNNECESSARY;
skb               460 drivers/net/can/dev.c 		skb->dev = dev;
skb               463 drivers/net/can/dev.c 		priv->echo_skb[idx] = skb;
skb               467 drivers/net/can/dev.c 		kfree_skb(skb);
skb               487 drivers/net/can/dev.c 		struct sk_buff *skb = priv->echo_skb[idx];
skb               488 drivers/net/can/dev.c 		struct canfd_frame *cf = (struct canfd_frame *)skb->data;
skb               494 drivers/net/can/dev.c 		return skb;
skb               508 drivers/net/can/dev.c 	struct sk_buff *skb;
skb               511 drivers/net/can/dev.c 	skb = __can_get_echo_skb(dev, idx, &len);
skb               512 drivers/net/can/dev.c 	if (!skb)
skb               515 drivers/net/can/dev.c 	netif_rx(skb);
skb               543 drivers/net/can/dev.c 	struct sk_buff *skb;
skb               555 drivers/net/can/dev.c 	skb = alloc_can_err_skb(dev, &cf);
skb               556 drivers/net/can/dev.c 	if (!skb) {
skb               562 drivers/net/can/dev.c 	netif_rx(skb);
skb               641 drivers/net/can/dev.c 	struct sk_buff *skb;
skb               643 drivers/net/can/dev.c 	skb = netdev_alloc_skb(dev, sizeof(struct can_skb_priv) +
skb               645 drivers/net/can/dev.c 	if (unlikely(!skb))
skb               648 drivers/net/can/dev.c 	skb->protocol = htons(ETH_P_CAN);
skb               649 drivers/net/can/dev.c 	skb->pkt_type = PACKET_BROADCAST;
skb               650 drivers/net/can/dev.c 	skb->ip_summed = CHECKSUM_UNNECESSARY;
skb               652 drivers/net/can/dev.c 	skb_reset_mac_header(skb);
skb               653 drivers/net/can/dev.c 	skb_reset_network_header(skb);
skb               654 drivers/net/can/dev.c 	skb_reset_transport_header(skb);
skb               656 drivers/net/can/dev.c 	can_skb_reserve(skb);
skb               657 drivers/net/can/dev.c 	can_skb_prv(skb)->ifindex = dev->ifindex;
skb               658 drivers/net/can/dev.c 	can_skb_prv(skb)->skbcnt = 0;
skb               660 drivers/net/can/dev.c 	*cf = skb_put_zero(skb, sizeof(struct can_frame));
skb               662 drivers/net/can/dev.c 	return skb;
skb               669 drivers/net/can/dev.c 	struct sk_buff *skb;
skb               671 drivers/net/can/dev.c 	skb = netdev_alloc_skb(dev, sizeof(struct can_skb_priv) +
skb               673 drivers/net/can/dev.c 	if (unlikely(!skb))
skb               676 drivers/net/can/dev.c 	skb->protocol = htons(ETH_P_CANFD);
skb               677 drivers/net/can/dev.c 	skb->pkt_type = PACKET_BROADCAST;
skb               678 drivers/net/can/dev.c 	skb->ip_summed = CHECKSUM_UNNECESSARY;
skb               680 drivers/net/can/dev.c 	skb_reset_mac_header(skb);
skb               681 drivers/net/can/dev.c 	skb_reset_network_header(skb);
skb               682 drivers/net/can/dev.c 	skb_reset_transport_header(skb);
skb               684 drivers/net/can/dev.c 	can_skb_reserve(skb);
skb               685 drivers/net/can/dev.c 	can_skb_prv(skb)->ifindex = dev->ifindex;
skb               686 drivers/net/can/dev.c 	can_skb_prv(skb)->skbcnt = 0;
skb               688 drivers/net/can/dev.c 	*cfd = skb_put_zero(skb, sizeof(struct canfd_frame));
skb               690 drivers/net/can/dev.c 	return skb;
skb               696 drivers/net/can/dev.c 	struct sk_buff *skb;
skb               698 drivers/net/can/dev.c 	skb = alloc_can_skb(dev, cf);
skb               699 drivers/net/can/dev.c 	if (unlikely(!skb))
skb               705 drivers/net/can/dev.c 	return skb;
skb              1125 drivers/net/can/dev.c static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
skb              1136 drivers/net/can/dev.c 	     nla_put(skb, IFLA_CAN_BITTIMING,
skb              1140 drivers/net/can/dev.c 	     nla_put(skb, IFLA_CAN_BITTIMING_CONST,
skb              1143 drivers/net/can/dev.c 	    nla_put(skb, IFLA_CAN_CLOCK, sizeof(priv->clock), &priv->clock) ||
skb              1144 drivers/net/can/dev.c 	    nla_put_u32(skb, IFLA_CAN_STATE, state) ||
skb              1145 drivers/net/can/dev.c 	    nla_put(skb, IFLA_CAN_CTRLMODE, sizeof(cm), &cm) ||
skb              1146 drivers/net/can/dev.c 	    nla_put_u32(skb, IFLA_CAN_RESTART_MS, priv->restart_ms) ||
skb              1150 drivers/net/can/dev.c 	     nla_put(skb, IFLA_CAN_BERR_COUNTER, sizeof(bec), &bec)) ||
skb              1153 drivers/net/can/dev.c 	     nla_put(skb, IFLA_CAN_DATA_BITTIMING,
skb              1157 drivers/net/can/dev.c 	     nla_put(skb, IFLA_CAN_DATA_BITTIMING_CONST,
skb              1162 drivers/net/can/dev.c 	     (nla_put_u16(skb, IFLA_CAN_TERMINATION, priv->termination) ||
skb              1163 drivers/net/can/dev.c 	      nla_put(skb, IFLA_CAN_TERMINATION_CONST,
skb              1169 drivers/net/can/dev.c 	     nla_put(skb, IFLA_CAN_BITRATE_CONST,
skb              1175 drivers/net/can/dev.c 	     nla_put(skb, IFLA_CAN_DATA_BITRATE_CONST,
skb              1180 drivers/net/can/dev.c 	    (nla_put(skb, IFLA_CAN_BITRATE_MAX,
skb              1195 drivers/net/can/dev.c static int can_fill_xstats(struct sk_buff *skb, const struct net_device *dev)
skb              1199 drivers/net/can/dev.c 	if (nla_put(skb, IFLA_INFO_XSTATS,
skb               628 drivers/net/can/flexcan.c static netdev_tx_t flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb               631 drivers/net/can/flexcan.c 	struct can_frame *cf = (struct can_frame *)skb->data;
skb               637 drivers/net/can/flexcan.c 	if (can_dropped_invalid_skb(dev, skb))
skb               657 drivers/net/can/flexcan.c 	can_put_echo_skb(skb, dev, 0);
skb               677 drivers/net/can/flexcan.c 	struct sk_buff *skb;
skb               685 drivers/net/can/flexcan.c 	skb = alloc_can_err_skb(dev, &cf);
skb               686 drivers/net/can/flexcan.c 	if (unlikely(!skb))
skb               730 drivers/net/can/flexcan.c 	err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
skb               739 drivers/net/can/flexcan.c 	struct sk_buff *skb;
skb               768 drivers/net/can/flexcan.c 	skb = alloc_can_err_skb(dev, &cf);
skb               769 drivers/net/can/flexcan.c 	if (unlikely(!skb))
skb               777 drivers/net/can/flexcan.c 	err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
skb               748 drivers/net/can/grcan.c 		struct sk_buff *skb = alloc_can_err_skb(dev, &skb_cf);
skb               750 drivers/net/can/grcan.c 		if (skb == NULL) {
skb               757 drivers/net/can/grcan.c 		netif_rx(skb);
skb              1170 drivers/net/can/grcan.c 	struct sk_buff *skb;
skb              1185 drivers/net/can/grcan.c 		skb = alloc_can_skb(dev, &cf);
skb              1186 drivers/net/can/grcan.c 		if (skb == NULL) {
skb              1219 drivers/net/can/grcan.c 		netif_receive_skb(skb);
skb              1280 drivers/net/can/grcan.c static int grcan_txbug_workaround(struct net_device *dev, struct sk_buff *skb,
skb              1322 drivers/net/can/grcan.c 		kfree_skb(skb);
skb              1348 drivers/net/can/grcan.c static netdev_tx_t grcan_start_xmit(struct sk_buff *skb,
skb              1354 drivers/net/can/grcan.c 	struct can_frame *cf = (struct can_frame *)skb->data;
skb              1363 drivers/net/can/grcan.c 	if (can_dropped_invalid_skb(dev, skb))
skb              1437 drivers/net/can/grcan.c 			err = grcan_txbug_workaround(dev, skb, txwr,
skb              1451 drivers/net/can/grcan.c 	can_put_echo_skb(skb, dev, slotindex);
skb               253 drivers/net/can/ifi_canfd/ifi_canfd.c 	struct sk_buff *skb;
skb               262 drivers/net/can/ifi_canfd/ifi_canfd.c 		skb = alloc_canfd_skb(ndev, &cf);
skb               264 drivers/net/can/ifi_canfd/ifi_canfd.c 		skb = alloc_can_skb(ndev, (struct can_frame **)&cf);
skb               266 drivers/net/can/ifi_canfd/ifi_canfd.c 	if (!skb) {
skb               321 drivers/net/can/ifi_canfd/ifi_canfd.c 	netif_receive_skb(skb);
skb               357 drivers/net/can/ifi_canfd/ifi_canfd.c 	struct sk_buff *skb;
skb               365 drivers/net/can/ifi_canfd/ifi_canfd.c 	skb = alloc_can_err_skb(ndev, &frame);
skb               366 drivers/net/can/ifi_canfd/ifi_canfd.c 	if (unlikely(!skb))
skb               372 drivers/net/can/ifi_canfd/ifi_canfd.c 	netif_receive_skb(skb);
skb               382 drivers/net/can/ifi_canfd/ifi_canfd.c 	struct sk_buff *skb;
skb               399 drivers/net/can/ifi_canfd/ifi_canfd.c 	skb = alloc_can_err_skb(ndev, &cf);
skb               400 drivers/net/can/ifi_canfd/ifi_canfd.c 	if (unlikely(!skb))
skb               435 drivers/net/can/ifi_canfd/ifi_canfd.c 	netif_receive_skb(skb);
skb               461 drivers/net/can/ifi_canfd/ifi_canfd.c 	struct sk_buff *skb;
skb               492 drivers/net/can/ifi_canfd/ifi_canfd.c 	skb = alloc_can_err_skb(ndev, &cf);
skb               493 drivers/net/can/ifi_canfd/ifi_canfd.c 	if (unlikely(!skb))
skb               527 drivers/net/can/ifi_canfd/ifi_canfd.c 	netif_receive_skb(skb);
skb               866 drivers/net/can/ifi_canfd/ifi_canfd.c static netdev_tx_t ifi_canfd_start_xmit(struct sk_buff *skb,
skb               870 drivers/net/can/ifi_canfd/ifi_canfd.c 	struct canfd_frame *cf = (struct canfd_frame *)skb->data;
skb               874 drivers/net/can/ifi_canfd/ifi_canfd.c 	if (can_dropped_invalid_skb(ndev, skb))
skb               904 drivers/net/can/ifi_canfd/ifi_canfd.c 	if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) && can_is_canfd_skb(skb)) {
skb               925 drivers/net/can/ifi_canfd/ifi_canfd.c 	can_put_echo_skb(skb, ndev, 0);
skb               995 drivers/net/can/janz-ican3.c 	struct sk_buff *skb;
skb              1015 drivers/net/can/janz-ican3.c 	skb = alloc_can_err_skb(dev, &cf);
skb              1016 drivers/net/can/janz-ican3.c 	if (skb) {
skb              1021 drivers/net/can/janz-ican3.c 		netif_rx(skb);
skb              1039 drivers/net/can/janz-ican3.c 	struct sk_buff *skb;
skb              1096 drivers/net/can/janz-ican3.c 	skb = alloc_can_err_skb(dev, &cf);
skb              1097 drivers/net/can/janz-ican3.c 	if (skb == NULL)
skb              1174 drivers/net/can/janz-ican3.c 	netif_rx(skb);
skb              1274 drivers/net/can/janz-ican3.c static void ican3_put_echo_skb(struct ican3_dev *mod, struct sk_buff *skb)
skb              1276 drivers/net/can/janz-ican3.c 	skb = can_create_echo_skb(skb);
skb              1277 drivers/net/can/janz-ican3.c 	if (!skb)
skb              1281 drivers/net/can/janz-ican3.c 	skb_queue_tail(&mod->echoq, skb);
skb              1286 drivers/net/can/janz-ican3.c 	struct sk_buff *skb = skb_dequeue(&mod->echoq);
skb              1291 drivers/net/can/janz-ican3.c 	if (!skb) {
skb              1296 drivers/net/can/janz-ican3.c 	cf = (struct can_frame *)skb->data;
skb              1300 drivers/net/can/janz-ican3.c 	if (skb->pkt_type != PACKET_LOOPBACK) {
skb              1301 drivers/net/can/janz-ican3.c 		kfree_skb(skb);
skb              1305 drivers/net/can/janz-ican3.c 	skb->protocol = htons(ETH_P_CAN);
skb              1306 drivers/net/can/janz-ican3.c 	skb->pkt_type = PACKET_BROADCAST;
skb              1307 drivers/net/can/janz-ican3.c 	skb->ip_summed = CHECKSUM_UNNECESSARY;
skb              1308 drivers/net/can/janz-ican3.c 	skb->dev = mod->ndev;
skb              1309 drivers/net/can/janz-ican3.c 	netif_receive_skb(skb);
skb              1322 drivers/net/can/janz-ican3.c static bool ican3_echo_skb_matches(struct ican3_dev *mod, struct sk_buff *skb)
skb              1324 drivers/net/can/janz-ican3.c 	struct can_frame *cf = (struct can_frame *)skb->data;
skb              1379 drivers/net/can/janz-ican3.c 	struct sk_buff *skb;
skb              1396 drivers/net/can/janz-ican3.c 	skb = alloc_can_skb(ndev, &cf);
skb              1397 drivers/net/can/janz-ican3.c 	if (unlikely(skb == NULL)) {
skb              1415 drivers/net/can/janz-ican3.c 	if (ican3_echo_skb_matches(mod, skb)) {
skb              1418 drivers/net/can/janz-ican3.c 		kfree_skb(skb);
skb              1425 drivers/net/can/janz-ican3.c 	netif_receive_skb(skb);
skb              1683 drivers/net/can/janz-ican3.c static netdev_tx_t ican3_xmit(struct sk_buff *skb, struct net_device *ndev)
skb              1686 drivers/net/can/janz-ican3.c 	struct can_frame *cf = (struct can_frame *)skb->data;
skb              1691 drivers/net/can/janz-ican3.c 	if (can_dropped_invalid_skb(ndev, skb))
skb              1718 drivers/net/can/janz-ican3.c 	ican3_put_echo_skb(mod, skb);
skb               725 drivers/net/can/kvaser_pciefd.c 					   struct sk_buff *skb)
skb               727 drivers/net/can/kvaser_pciefd.c 	struct canfd_frame *cf = (struct canfd_frame *)skb->data;
skb               746 drivers/net/can/kvaser_pciefd.c 	if (can_is_canfd_skb(skb)) {
skb               762 drivers/net/can/kvaser_pciefd.c static netdev_tx_t kvaser_pciefd_start_xmit(struct sk_buff *skb,
skb               771 drivers/net/can/kvaser_pciefd.c 	if (can_dropped_invalid_skb(netdev, skb))
skb               774 drivers/net/can/kvaser_pciefd.c 	nwords = kvaser_pciefd_prepare_tx_packet(&packet, can, skb);
skb               779 drivers/net/can/kvaser_pciefd.c 	can_put_echo_skb(skb, netdev, can->echo_idx);
skb              1140 drivers/net/can/kvaser_pciefd.c 	struct sk_buff *skb;
skb              1154 drivers/net/can/kvaser_pciefd.c 		skb = alloc_canfd_skb(priv->dev, &cf);
skb              1155 drivers/net/can/kvaser_pciefd.c 		if (!skb) {
skb              1166 drivers/net/can/kvaser_pciefd.c 		skb = alloc_can_skb(priv->dev, (struct can_frame **)&cf);
skb              1167 drivers/net/can/kvaser_pciefd.c 		if (!skb) {
skb              1184 drivers/net/can/kvaser_pciefd.c 	shhwtstamps = skb_hwtstamps(skb);
skb              1193 drivers/net/can/kvaser_pciefd.c 	return netif_rx(skb);
skb              1252 drivers/net/can/kvaser_pciefd.c 	struct sk_buff *skb;
skb              1265 drivers/net/can/kvaser_pciefd.c 	skb = alloc_can_err_skb(ndev, &cf);
skb              1275 drivers/net/can/kvaser_pciefd.c 			if (skb)
skb              1287 drivers/net/can/kvaser_pciefd.c 	if (!skb) {
skb              1292 drivers/net/can/kvaser_pciefd.c 	shhwtstamps = skb_hwtstamps(skb);
skb              1304 drivers/net/can/kvaser_pciefd.c 	netif_rx(skb);
skb              1344 drivers/net/can/kvaser_pciefd.c 		struct sk_buff *skb;
skb              1348 drivers/net/can/kvaser_pciefd.c 		skb = alloc_can_err_skb(ndev, &cf);
skb              1349 drivers/net/can/kvaser_pciefd.c 		if (!skb) {
skb              1366 drivers/net/can/kvaser_pciefd.c 		shhwtstamps = skb_hwtstamps(skb);
skb              1374 drivers/net/can/kvaser_pciefd.c 		netif_rx(skb);
skb              1484 drivers/net/can/kvaser_pciefd.c 	struct sk_buff *skb;
skb              1488 drivers/net/can/kvaser_pciefd.c 	skb = alloc_can_err_skb(can->can.dev, &cf);
skb              1492 drivers/net/can/kvaser_pciefd.c 		if (skb)
skb              1495 drivers/net/can/kvaser_pciefd.c 	} else if (skb) {
skb              1499 drivers/net/can/kvaser_pciefd.c 	if (skb) {
skb              1503 drivers/net/can/kvaser_pciefd.c 		netif_rx(skb);
skb               442 drivers/net/can/m_can/m_can.c 	struct sk_buff *skb;
skb               450 drivers/net/can/m_can/m_can.c 		skb = alloc_canfd_skb(dev, &cf);
skb               452 drivers/net/can/m_can/m_can.c 		skb = alloc_can_skb(dev, (struct can_frame **)&cf);
skb               453 drivers/net/can/m_can/m_can.c 	if (!skb) {
skb               492 drivers/net/can/m_can/m_can.c 	netif_receive_skb(skb);
skb               527 drivers/net/can/m_can/m_can.c 	struct sk_buff *skb;
skb               535 drivers/net/can/m_can/m_can.c 	skb = alloc_can_err_skb(dev, &frame);
skb               536 drivers/net/can/m_can/m_can.c 	if (unlikely(!skb))
skb               542 drivers/net/can/m_can/m_can.c 	netif_receive_skb(skb);
skb               553 drivers/net/can/m_can/m_can.c 	struct sk_buff *skb;
skb               559 drivers/net/can/m_can/m_can.c 	skb = alloc_can_err_skb(dev, &cf);
skb               560 drivers/net/can/m_can/m_can.c 	if (unlikely(!skb))
skb               599 drivers/net/can/m_can/m_can.c 	netif_receive_skb(skb);
skb               662 drivers/net/can/m_can/m_can.c 	struct sk_buff *skb;
skb               689 drivers/net/can/m_can/m_can.c 	skb = alloc_can_err_skb(dev, &cf);
skb               690 drivers/net/can/m_can/m_can.c 	if (unlikely(!skb))
skb               726 drivers/net/can/m_can/m_can.c 	netif_receive_skb(skb);
skb              1416 drivers/net/can/m_can/m_can.c 	struct sk_buff *skb = cdev->tx_skb;
skb              1446 drivers/net/can/m_can/m_can.c 		can_put_echo_skb(skb, dev, 0);
skb              1451 drivers/net/can/m_can/m_can.c 			if (can_is_canfd_skb(skb)) {
skb              1477 drivers/net/can/m_can/m_can.c 				kfree_skb(skb);
skb              1493 drivers/net/can/m_can/m_can.c 		if (can_is_canfd_skb(skb)) {
skb              1517 drivers/net/can/m_can/m_can.c 		can_put_echo_skb(skb, dev, putidx);
skb              1540 drivers/net/can/m_can/m_can.c static netdev_tx_t m_can_start_xmit(struct sk_buff *skb,
skb              1545 drivers/net/can/m_can/m_can.c 	if (can_dropped_invalid_skb(dev, skb))
skb              1562 drivers/net/can/m_can/m_can.c 			cdev->tx_skb = skb;
skb              1567 drivers/net/can/m_can/m_can.c 		cdev->tx_skb = skb;
skb               186 drivers/net/can/mscan/mscan.c static netdev_tx_t mscan_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb               188 drivers/net/can/mscan/mscan.c 	struct can_frame *frame = (struct can_frame *)skb->data;
skb               194 drivers/net/can/mscan/mscan.c 	if (can_dropped_invalid_skb(dev, skb))
skb               272 drivers/net/can/mscan/mscan.c 	can_put_echo_skb(skb, dev, buf_id);
skb               385 drivers/net/can/mscan/mscan.c 	struct sk_buff *skb;
skb               394 drivers/net/can/mscan/mscan.c 		skb = alloc_can_skb(dev, &frame);
skb               395 drivers/net/can/mscan/mscan.c 		if (!skb) {
skb               411 drivers/net/can/mscan/mscan.c 		netif_receive_skb(skb);
skb               481 drivers/net/can/pch_can.c 	struct sk_buff *skb;
skb               488 drivers/net/can/pch_can.c 	skb = alloc_can_err_skb(ndev, &cf);
skb               489 drivers/net/can/pch_can.c 	if (!skb)
skb               563 drivers/net/can/pch_can.c 	netif_receive_skb(skb);
skb               608 drivers/net/can/pch_can.c 	struct sk_buff *skb;
skb               618 drivers/net/can/pch_can.c 	skb = alloc_can_err_skb(ndev, &cf);
skb               619 drivers/net/can/pch_can.c 	if (!skb)
skb               627 drivers/net/can/pch_can.c 	netif_receive_skb(skb);
skb               635 drivers/net/can/pch_can.c 	struct sk_buff *skb;
skb               666 drivers/net/can/pch_can.c 		skb = alloc_can_skb(priv->ndev, &cf);
skb               667 drivers/net/can/pch_can.c 		if (!skb) {
skb               695 drivers/net/can/pch_can.c 		netif_receive_skb(skb);
skb               880 drivers/net/can/pch_can.c static netdev_tx_t pch_xmit(struct sk_buff *skb, struct net_device *ndev)
skb               883 drivers/net/can/pch_can.c 	struct can_frame *cf = (struct can_frame *)skb->data;
skb               888 drivers/net/can/pch_can.c 	if (can_dropped_invalid_skb(ndev, skb))
skb               927 drivers/net/can/pch_can.c 	can_put_echo_skb(skb, ndev, tx_obj_no - PCH_RX_OBJ_END - 1);
skb               241 drivers/net/can/peak_canfd/peak_canfd.c 	struct sk_buff *skb;
skb               272 drivers/net/can/peak_canfd/peak_canfd.c 		skb = alloc_canfd_skb(priv->ndev, &cf);
skb               273 drivers/net/can/peak_canfd/peak_canfd.c 		if (!skb)
skb               283 drivers/net/can/peak_canfd/peak_canfd.c 		skb = alloc_can_skb(priv->ndev, (struct can_frame **)&cf);
skb               284 drivers/net/can/peak_canfd/peak_canfd.c 		if (!skb)
skb               302 drivers/net/can/peak_canfd/peak_canfd.c 	netif_rx(skb);
skb               324 drivers/net/can/peak_canfd/peak_canfd.c 	struct sk_buff *skb;
skb               342 drivers/net/can/peak_canfd/peak_canfd.c 	skb = alloc_can_err_skb(ndev, &cf);
skb               350 drivers/net/can/peak_canfd/peak_canfd.c 		if (skb)
skb               357 drivers/net/can/peak_canfd/peak_canfd.c 		if (skb) {
skb               370 drivers/net/can/peak_canfd/peak_canfd.c 		if (skb) {
skb               385 drivers/net/can/peak_canfd/peak_canfd.c 		dev_kfree_skb(skb);
skb               389 drivers/net/can/peak_canfd/peak_canfd.c 	if (!skb) {
skb               396 drivers/net/can/peak_canfd/peak_canfd.c 	netif_rx(skb);
skb               406 drivers/net/can/peak_canfd/peak_canfd.c 	struct sk_buff *skb;
skb               411 drivers/net/can/peak_canfd/peak_canfd.c 	skb = alloc_can_err_skb(priv->ndev, &cf);
skb               412 drivers/net/can/peak_canfd/peak_canfd.c 	if (!skb) {
skb               425 drivers/net/can/peak_canfd/peak_canfd.c 	netif_rx(skb);
skb               626 drivers/net/can/peak_canfd/peak_canfd.c static netdev_tx_t peak_canfd_start_xmit(struct sk_buff *skb,
skb               631 drivers/net/can/peak_canfd/peak_canfd.c 	struct canfd_frame *cf = (struct canfd_frame *)skb->data;
skb               639 drivers/net/can/peak_canfd/peak_canfd.c 	if (can_dropped_invalid_skb(ndev, skb))
skb               665 drivers/net/can/peak_canfd/peak_canfd.c 	if (can_is_canfd_skb(skb)) {
skb               701 drivers/net/can/peak_canfd/peak_canfd.c 	can_put_echo_skb(skb, ndev, priv->echo_idx);
skb               228 drivers/net/can/rcar/rcar_can.c 	struct sk_buff *skb;
skb               232 drivers/net/can/rcar/rcar_can.c 	skb = alloc_can_err_skb(ndev, &cf);
skb               238 drivers/net/can/rcar/rcar_can.c 		if (skb) {
skb               249 drivers/net/can/rcar/rcar_can.c 		if (skb)
skb               257 drivers/net/can/rcar/rcar_can.c 			if (skb)
skb               264 drivers/net/can/rcar/rcar_can.c 			if (skb)
skb               271 drivers/net/can/rcar/rcar_can.c 			if (skb)
skb               278 drivers/net/can/rcar/rcar_can.c 			if (skb)
skb               285 drivers/net/can/rcar/rcar_can.c 			if (skb) {
skb               294 drivers/net/can/rcar/rcar_can.c 			if (skb)
skb               301 drivers/net/can/rcar/rcar_can.c 			if (skb)
skb               316 drivers/net/can/rcar/rcar_can.c 		if (skb)
skb               326 drivers/net/can/rcar/rcar_can.c 		if (skb)
skb               340 drivers/net/can/rcar/rcar_can.c 		if (skb)
skb               348 drivers/net/can/rcar/rcar_can.c 		if (skb) {
skb               359 drivers/net/can/rcar/rcar_can.c 		if (skb) {
skb               365 drivers/net/can/rcar/rcar_can.c 	if (skb) {
skb               368 drivers/net/can/rcar/rcar_can.c 		netif_rx(skb);
skb               592 drivers/net/can/rcar/rcar_can.c static netdev_tx_t rcar_can_start_xmit(struct sk_buff *skb,
skb               596 drivers/net/can/rcar/rcar_can.c 	struct can_frame *cf = (struct can_frame *)skb->data;
skb               599 drivers/net/can/rcar/rcar_can.c 	if (can_dropped_invalid_skb(ndev, skb))
skb               620 drivers/net/can/rcar/rcar_can.c 	can_put_echo_skb(skb, ndev, priv->tx_head % RCAR_CAN_FIFO_DEPTH);
skb               645 drivers/net/can/rcar/rcar_can.c 	struct sk_buff *skb;
skb               649 drivers/net/can/rcar/rcar_can.c 	skb = alloc_can_skb(priv->ndev, &cf);
skb               650 drivers/net/can/rcar/rcar_can.c 	if (!skb) {
skb               675 drivers/net/can/rcar/rcar_can.c 	netif_receive_skb(skb);
skb               922 drivers/net/can/rcar/rcar_canfd.c 	struct sk_buff *skb;
skb               928 drivers/net/can/rcar/rcar_canfd.c 	skb = alloc_can_err_skb(ndev, &cf);
skb               929 drivers/net/can/rcar/rcar_canfd.c 	if (!skb) {
skb              1029 drivers/net/can/rcar/rcar_canfd.c 	netif_rx(skb);
skb              1116 drivers/net/can/rcar/rcar_canfd.c 	struct sk_buff *skb;
skb              1127 drivers/net/can/rcar/rcar_canfd.c 		skb = alloc_can_err_skb(ndev, &cf);
skb              1128 drivers/net/can/rcar/rcar_canfd.c 		if (!skb) {
skb              1138 drivers/net/can/rcar/rcar_canfd.c 		netif_rx(skb);
skb              1338 drivers/net/can/rcar/rcar_canfd.c static netdev_tx_t rcar_canfd_start_xmit(struct sk_buff *skb,
skb              1342 drivers/net/can/rcar/rcar_canfd.c 	struct canfd_frame *cf = (struct canfd_frame *)skb->data;
skb              1347 drivers/net/can/rcar/rcar_canfd.c 	if (can_dropped_invalid_skb(ndev, skb))
skb              1368 drivers/net/can/rcar/rcar_canfd.c 		if (can_is_canfd_skb(skb)) {
skb              1393 drivers/net/can/rcar/rcar_canfd.c 	can_put_echo_skb(skb, ndev, priv->tx_head % RCANFD_FIFO_DEPTH);
skb              1416 drivers/net/can/rcar/rcar_canfd.c 	struct sk_buff *skb;
skb              1427 drivers/net/can/rcar/rcar_canfd.c 			skb = alloc_canfd_skb(priv->ndev, &cf);
skb              1429 drivers/net/can/rcar/rcar_canfd.c 			skb = alloc_can_skb(priv->ndev,
skb              1434 drivers/net/can/rcar/rcar_canfd.c 		skb = alloc_can_skb(priv->ndev, (struct can_frame **)&cf);
skb              1437 drivers/net/can/rcar/rcar_canfd.c 	if (!skb) {
skb              1483 drivers/net/can/rcar/rcar_canfd.c 	netif_receive_skb(skb);
skb                14 drivers/net/can/rx-offload.c static inline struct can_rx_offload_cb *can_rx_offload_get_cb(struct sk_buff *skb)
skb                16 drivers/net/can/rx-offload.c 	BUILD_BUG_ON(sizeof(struct can_rx_offload_cb) > sizeof(skb->cb));
skb                18 drivers/net/can/rx-offload.c 	return (struct can_rx_offload_cb *)skb->cb;
skb                42 drivers/net/can/rx-offload.c 	struct sk_buff *skb;
skb                46 drivers/net/can/rx-offload.c 	       (skb = skb_dequeue(&offload->skb_queue))) {
skb                47 drivers/net/can/rx-offload.c 		struct can_frame *cf = (struct can_frame *)skb->data;
skb                52 drivers/net/can/rx-offload.c 		netif_receive_skb(skb);
skb               134 drivers/net/can/rx-offload.c 	struct sk_buff *skb = NULL, *skb_error = NULL;
skb               141 drivers/net/can/rx-offload.c 		skb = alloc_can_skb(offload->dev, &cf);
skb               142 drivers/net/can/rx-offload.c 		if (unlikely(!skb))
skb               179 drivers/net/can/rx-offload.c 	cb = can_rx_offload_get_cb(skb);
skb               184 drivers/net/can/rx-offload.c 		kfree_skb(skb);
skb               190 drivers/net/can/rx-offload.c 		kfree_skb(skb);
skb               199 drivers/net/can/rx-offload.c 	return skb;
skb               212 drivers/net/can/rx-offload.c 		struct sk_buff *skb;
skb               217 drivers/net/can/rx-offload.c 		skb = can_rx_offload_offload_one(offload, i);
skb               218 drivers/net/can/rx-offload.c 		if (IS_ERR_OR_NULL(skb))
skb               221 drivers/net/can/rx-offload.c 		__skb_queue_add_sort(&skb_queue, skb, can_rx_offload_compare);
skb               246 drivers/net/can/rx-offload.c 	struct sk_buff *skb;
skb               250 drivers/net/can/rx-offload.c 		skb = can_rx_offload_offload_one(offload, 0);
skb               251 drivers/net/can/rx-offload.c 		if (IS_ERR(skb))
skb               253 drivers/net/can/rx-offload.c 		if (!skb)
skb               256 drivers/net/can/rx-offload.c 		skb_queue_tail(&offload->skb_queue, skb);
skb               268 drivers/net/can/rx-offload.c 				struct sk_buff *skb, u32 timestamp)
skb               275 drivers/net/can/rx-offload.c 		kfree_skb(skb);
skb               279 drivers/net/can/rx-offload.c 	cb = can_rx_offload_get_cb(skb);
skb               283 drivers/net/can/rx-offload.c 	__skb_queue_add_sort(&offload->skb_queue, skb, can_rx_offload_compare);
skb               297 drivers/net/can/rx-offload.c 	struct sk_buff *skb;
skb               301 drivers/net/can/rx-offload.c 	skb = __can_get_echo_skb(dev, idx, &len);
skb               302 drivers/net/can/rx-offload.c 	if (!skb)
skb               305 drivers/net/can/rx-offload.c 	err = can_rx_offload_queue_sorted(offload, skb, timestamp);
skb               316 drivers/net/can/rx-offload.c 			      struct sk_buff *skb)
skb               320 drivers/net/can/rx-offload.c 		kfree_skb(skb);
skb               324 drivers/net/can/rx-offload.c 	skb_queue_tail(&offload->skb_queue, skb);
skb               281 drivers/net/can/sja1000/sja1000.c static netdev_tx_t sja1000_start_xmit(struct sk_buff *skb,
skb               285 drivers/net/can/sja1000/sja1000.c 	struct can_frame *cf = (struct can_frame *)skb->data;
skb               293 drivers/net/can/sja1000/sja1000.c 	if (can_dropped_invalid_skb(dev, skb))
skb               322 drivers/net/can/sja1000/sja1000.c 	can_put_echo_skb(skb, dev, 0);
skb               342 drivers/net/can/sja1000/sja1000.c 	struct sk_buff *skb;
skb               349 drivers/net/can/sja1000/sja1000.c 	skb = alloc_can_skb(dev, &cf);
skb               350 drivers/net/can/sja1000/sja1000.c 	if (skb == NULL)
skb               385 drivers/net/can/sja1000/sja1000.c 	netif_rx(skb);
skb               395 drivers/net/can/sja1000/sja1000.c 	struct sk_buff *skb;
skb               401 drivers/net/can/sja1000/sja1000.c 	skb = alloc_can_err_skb(dev, &cf);
skb               402 drivers/net/can/sja1000/sja1000.c 	if (skb == NULL)
skb               494 drivers/net/can/sja1000/sja1000.c 	netif_rx(skb);
skb               145 drivers/net/can/slcan.c 	struct sk_buff *skb;
skb               204 drivers/net/can/slcan.c 	skb = dev_alloc_skb(sizeof(struct can_frame) +
skb               206 drivers/net/can/slcan.c 	if (!skb)
skb               209 drivers/net/can/slcan.c 	skb->dev = sl->dev;
skb               210 drivers/net/can/slcan.c 	skb->protocol = htons(ETH_P_CAN);
skb               211 drivers/net/can/slcan.c 	skb->pkt_type = PACKET_BROADCAST;
skb               212 drivers/net/can/slcan.c 	skb->ip_summed = CHECKSUM_UNNECESSARY;
skb               214 drivers/net/can/slcan.c 	can_skb_reserve(skb);
skb               215 drivers/net/can/slcan.c 	can_skb_prv(skb)->ifindex = sl->dev->ifindex;
skb               216 drivers/net/can/slcan.c 	can_skb_prv(skb)->skbcnt = 0;
skb               218 drivers/net/can/slcan.c 	skb_put_data(skb, &cf, sizeof(struct can_frame));
skb               222 drivers/net/can/slcan.c 	netif_rx_ni(skb);
skb               358 drivers/net/can/slcan.c static netdev_tx_t slc_xmit(struct sk_buff *skb, struct net_device *dev)
skb               362 drivers/net/can/slcan.c 	if (skb->len != CAN_MTU)
skb               377 drivers/net/can/slcan.c 	slc_encaps(sl, (struct can_frame *) skb->data); /* encaps & send */
skb               381 drivers/net/can/slcan.c 	kfree_skb(skb);
skb                51 drivers/net/can/softing/softing_main.c static netdev_tx_t softing_netdev_start_xmit(struct sk_buff *skb,
skb                59 drivers/net/can/softing/softing_main.c 	struct can_frame *cf = (struct can_frame *)skb->data;
skb                62 drivers/net/can/softing/softing_main.c 	if (can_dropped_invalid_skb(dev, skb))
skb               107 drivers/net/can/softing/softing_main.c 	can_put_echo_skb(skb, dev, priv->tx.echo_put);
skb               134 drivers/net/can/softing/softing_main.c 	struct sk_buff *skb;
skb               137 drivers/net/can/softing/softing_main.c 	skb = alloc_can_skb(netdev, &cf);
skb               138 drivers/net/can/softing/softing_main.c 	if (!skb)
skb               141 drivers/net/can/softing/softing_main.c 	skb->tstamp = ktime;
skb               142 drivers/net/can/softing/softing_main.c 	return netif_rx(skb);
skb               283 drivers/net/can/softing/softing_main.c 			struct sk_buff *skb;
skb               284 drivers/net/can/softing/softing_main.c 			skb = priv->can.echo_skb[priv->tx.echo_get];
skb               285 drivers/net/can/softing/softing_main.c 			if (skb)
skb               286 drivers/net/can/softing/softing_main.c 				skb->tstamp = ktime;
skb               316 drivers/net/can/spi/hi311x.c 	struct sk_buff *skb;
skb               320 drivers/net/can/spi/hi311x.c 	skb = alloc_can_skb(priv->net, &frame);
skb               321 drivers/net/can/spi/hi311x.c 	if (!skb) {
skb               357 drivers/net/can/spi/hi311x.c 	netif_rx_ni(skb);
skb               365 drivers/net/can/spi/hi311x.c static netdev_tx_t hi3110_hard_start_xmit(struct sk_buff *skb,
skb               376 drivers/net/can/spi/hi311x.c 	if (can_dropped_invalid_skb(net, skb))
skb               380 drivers/net/can/spi/hi311x.c 	priv->tx_skb = skb;
skb               663 drivers/net/can/spi/hi311x.c 			struct sk_buff *skb;
skb               667 drivers/net/can/spi/hi311x.c 			skb = alloc_can_err_skb(net, &cf);
skb               668 drivers/net/can/spi/hi311x.c 			if (!skb)
skb               678 drivers/net/can/spi/hi311x.c 			netif_rx_ni(skb);
skb               694 drivers/net/can/spi/hi311x.c 			struct sk_buff *skb;
skb               698 drivers/net/can/spi/hi311x.c 				skb = alloc_can_err_skb(net, &cf);
skb               699 drivers/net/can/spi/hi311x.c 				if (!skb)
skb               719 drivers/net/can/spi/hi311x.c 				netif_rx_ni(skb);
skb               409 drivers/net/can/spi/mcp251x.c 	struct sk_buff *skb;
skb               413 drivers/net/can/spi/mcp251x.c 	skb = alloc_can_skb(priv->net, &frame);
skb               414 drivers/net/can/spi/mcp251x.c 	if (!skb) {
skb               452 drivers/net/can/spi/mcp251x.c 	netif_rx_ni(skb);
skb               460 drivers/net/can/spi/mcp251x.c static netdev_tx_t mcp251x_hard_start_xmit(struct sk_buff *skb,
skb               471 drivers/net/can/spi/mcp251x.c 	if (can_dropped_invalid_skb(net, skb))
skb               475 drivers/net/can/spi/mcp251x.c 	priv->tx_skb = skb;
skb               670 drivers/net/can/spi/mcp251x.c 	struct sk_buff *skb;
skb               673 drivers/net/can/spi/mcp251x.c 	skb = alloc_can_err_skb(net, &frame);
skb               674 drivers/net/can/spi/mcp251x.c 	if (skb) {
skb               677 drivers/net/can/spi/mcp251x.c 		netif_rx_ni(skb);
skb               412 drivers/net/can/sun4i_can.c static netdev_tx_t sun4ican_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb               415 drivers/net/can/sun4i_can.c 	struct can_frame *cf = (struct can_frame *)skb->data;
skb               421 drivers/net/can/sun4i_can.c 	if (can_dropped_invalid_skb(dev, skb))
skb               451 drivers/net/can/sun4i_can.c 	can_put_echo_skb(skb, dev, 0);
skb               466 drivers/net/can/sun4i_can.c 	struct sk_buff *skb;
skb               473 drivers/net/can/sun4i_can.c 	skb = alloc_can_skb(dev, &cf);
skb               474 drivers/net/can/sun4i_can.c 	if (!skb)
skb               505 drivers/net/can/sun4i_can.c 	netif_rx(skb);
skb               515 drivers/net/can/sun4i_can.c 	struct sk_buff *skb;
skb               522 drivers/net/can/sun4i_can.c 	skb = alloc_can_err_skb(dev, &cf);
skb               528 drivers/net/can/sun4i_can.c 	if (skb) {
skb               536 drivers/net/can/sun4i_can.c 		if (likely(skb)) {
skb               569 drivers/net/can/sun4i_can.c 		if (likely(skb)) {
skb               608 drivers/net/can/sun4i_can.c 		if (likely(skb)) {
skb               618 drivers/net/can/sun4i_can.c 		if (likely(skb))
skb               626 drivers/net/can/sun4i_can.c 	if (likely(skb)) {
skb               629 drivers/net/can/sun4i_can.c 		netif_rx(skb);
skb               475 drivers/net/can/ti_hecc.c static netdev_tx_t ti_hecc_xmit(struct sk_buff *skb, struct net_device *ndev)
skb               478 drivers/net/can/ti_hecc.c 	struct can_frame *cf = (struct can_frame *)skb->data;
skb               482 drivers/net/can/ti_hecc.c 	if (can_dropped_invalid_skb(ndev, skb))
skb               516 drivers/net/can/ti_hecc.c 	can_put_echo_skb(skb, ndev, mbxno);
skb               593 drivers/net/can/ti_hecc.c 	struct sk_buff *skb;
skb               599 drivers/net/can/ti_hecc.c 		skb = alloc_can_err_skb(ndev, &cf);
skb               600 drivers/net/can/ti_hecc.c 		if (!skb) {
skb               622 drivers/net/can/ti_hecc.c 		err = can_rx_offload_queue_sorted(&priv->offload, skb,
skb               639 drivers/net/can/ti_hecc.c 	struct sk_buff *skb;
skb               643 drivers/net/can/ti_hecc.c 	skb = alloc_can_err_skb(priv->ndev, &cf);
skb               644 drivers/net/can/ti_hecc.c 	if (unlikely(!skb)) {
skb               657 drivers/net/can/ti_hecc.c 	err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
skb               300 drivers/net/can/usb/ems_usb.c 	struct sk_buff *skb;
skb               304 drivers/net/can/usb/ems_usb.c 	skb = alloc_can_skb(dev->netdev, &cf);
skb               305 drivers/net/can/usb/ems_usb.c 	if (skb == NULL)
skb               325 drivers/net/can/usb/ems_usb.c 	netif_rx(skb);
skb               331 drivers/net/can/usb/ems_usb.c 	struct sk_buff *skb;
skb               334 drivers/net/can/usb/ems_usb.c 	skb = alloc_can_err_skb(dev->netdev, &cf);
skb               335 drivers/net/can/usb/ems_usb.c 	if (skb == NULL)
skb               400 drivers/net/can/usb/ems_usb.c 	netif_rx(skb);
skb               727 drivers/net/can/usb/ems_usb.c static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *netdev)
skb               732 drivers/net/can/usb/ems_usb.c 	struct can_frame *cf = (struct can_frame *)skb->data;
skb               740 drivers/net/can/usb/ems_usb.c 	if (can_dropped_invalid_skb(netdev, skb))
skb               804 drivers/net/can/usb/ems_usb.c 	can_put_echo_skb(skb, netdev, context->echo_index);
skb               814 drivers/net/can/usb/ems_usb.c 		dev_kfree_skb(skb);
skb               844 drivers/net/can/usb/ems_usb.c 	dev_kfree_skb(skb);
skb               219 drivers/net/can/usb/esd_usb2.c 	struct sk_buff *skb;
skb               228 drivers/net/can/usb/esd_usb2.c 		skb = alloc_can_err_skb(priv->netdev, &cf);
skb               229 drivers/net/can/usb/esd_usb2.c 		if (skb == NULL) {
skb               296 drivers/net/can/usb/esd_usb2.c 		netif_rx(skb);
skb               305 drivers/net/can/usb/esd_usb2.c 	struct sk_buff *skb;
skb               317 drivers/net/can/usb/esd_usb2.c 		skb = alloc_can_skb(priv->netdev, &cf);
skb               318 drivers/net/can/usb/esd_usb2.c 		if (skb == NULL) {
skb               338 drivers/net/can/usb/esd_usb2.c 		netif_rx(skb);
skb               700 drivers/net/can/usb/esd_usb2.c static netdev_tx_t esd_usb2_start_xmit(struct sk_buff *skb,
skb               707 drivers/net/can/usb/esd_usb2.c 	struct can_frame *cf = (struct can_frame *)skb->data;
skb               715 drivers/net/can/usb/esd_usb2.c 	if (can_dropped_invalid_skb(netdev, skb))
skb               722 drivers/net/can/usb/esd_usb2.c 		dev_kfree_skb(skb);
skb               731 drivers/net/can/usb/esd_usb2.c 		dev_kfree_skb(skb);
skb               785 drivers/net/can/usb/esd_usb2.c 	can_put_echo_skb(skb, netdev, context->echo_index);
skb               300 drivers/net/can/usb/gs_usb.c 	struct sk_buff *skb;
skb               328 drivers/net/can/usb/gs_usb.c 		skb = alloc_can_skb(dev->netdev, &cf);
skb               329 drivers/net/can/usb/gs_usb.c 		if (!skb)
skb               344 drivers/net/can/usb/gs_usb.c 		netif_rx(skb);
skb               376 drivers/net/can/usb/gs_usb.c 		skb = alloc_can_err_skb(netdev, &cf);
skb               377 drivers/net/can/usb/gs_usb.c 		if (!skb)
skb               385 drivers/net/can/usb/gs_usb.c 		netif_rx(skb);
skb               462 drivers/net/can/usb/gs_usb.c static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
skb               474 drivers/net/can/usb/gs_usb.c 	if (can_dropped_invalid_skb(netdev, skb))
skb               504 drivers/net/can/usb/gs_usb.c 	cf = (struct can_frame *)skb->data;
skb               520 drivers/net/can/usb/gs_usb.c 	can_put_echo_skb(skb, netdev, idx);
skb               564 drivers/net/can/usb/gs_usb.c 	dev_kfree_skb(skb);
skb               165 drivers/net/can/usb/kvaser_usb/kvaser_usb.h 				  const struct sk_buff *skb, int *frame_len,
skb               245 drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c 	struct sk_buff *skb;
skb               250 drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c 	skb = alloc_can_err_skb(netdev, &cf);
skb               251 drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c 	if (!skb) {
skb               262 drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c 	netif_rx(skb);
skb               501 drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
skb               515 drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c 	if (can_dropped_invalid_skb(netdev, skb))
skb               521 drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c 		dev_kfree_skb(skb);
skb               548 drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c 	buf = dev->ops->dev_frame_to_cmd(priv, skb, &context->dlc, &cmd_len,
skb               552 drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c 		dev_kfree_skb(skb);
skb               565 drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c 	can_put_echo_skb(skb, netdev, context->echo_index);
skb               845 drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c 	struct sk_buff *skb;
skb               864 drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c 	skb = alloc_can_err_skb(netdev, &cf);
skb               865 drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c 	if (skb) {
skb               883 drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c 	if (!skb) {
skb               899 drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c 	netif_rx(skb);
skb               993 drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c 	struct sk_buff *skb;
skb              1010 drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c 	skb = alloc_can_err_skb(netdev, &cf);
skb              1013 drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c 		if (skb) {
skb              1038 drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c 	if (!skb) {
skb              1044 drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c 	shhwtstamps = skb_hwtstamps(skb);
skb              1053 drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c 	netif_rx(skb);
skb              1065 drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c 	struct sk_buff *skb;
skb              1068 drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c 	skb = alloc_can_err_skb(netdev, &cf);
skb              1069 drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c 	if (!skb) {
skb              1088 drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c 	netif_rx(skb);
skb              1141 drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c 	struct sk_buff *skb;
skb              1162 drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c 	skb = alloc_can_skb(priv->netdev, &cf);
skb              1163 drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c 	if (!skb) {
skb              1168 drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c 	shhwtstamps = skb_hwtstamps(skb);
skb              1192 drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c 	netif_rx(skb);
skb              1201 drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c 	struct sk_buff *skb;
skb              1229 drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c 		skb = alloc_canfd_skb(priv->netdev, &cf);
skb              1231 drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c 		skb = alloc_can_skb(priv->netdev, (struct can_frame **)&cf);
skb              1233 drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c 	if (!skb) {
skb              1238 drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c 	shhwtstamps = skb_hwtstamps(skb);
skb              1270 drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c 	netif_rx(skb);
skb              1348 drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c 				  const struct sk_buff *skb, int *frame_len,
skb              1353 drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c 	struct canfd_frame *cf = (struct canfd_frame *)skb->data;
skb              1404 drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c 	if (can_is_canfd_skb(skb)) {
skb              1428 drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c 				  const struct sk_buff *skb, int *frame_len,
skb              1433 drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c 	struct can_frame *cf = (struct can_frame *)skb->data;
skb              1978 drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c 			      const struct sk_buff *skb, int *frame_len,
skb              1984 drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c 		buf = kvaser_usb_hydra_frame_to_cmd_ext(priv, skb, frame_len,
skb              1987 drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c 		buf = kvaser_usb_hydra_frame_to_cmd_std(priv, skb, frame_len,
skb               345 drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c 			     const struct sk_buff *skb, int *frame_len,
skb               351 drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c 	struct can_frame *cf = (struct can_frame *)skb->data;
skb               571 drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c 		struct sk_buff *skb;
skb               574 drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c 		skb = alloc_can_err_skb(priv->netdev, &cf);
skb               575 drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c 		if (skb) {
skb               580 drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c 			netif_rx(skb);
skb               698 drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c 	struct sk_buff *skb;
skb               726 drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c 	skb = alloc_can_err_skb(priv->netdev, &cf);
skb               727 drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c 	if (!skb) {
skb               782 drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c 	netif_rx(skb);
skb               927 drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c 	struct sk_buff *skb;
skb               967 drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c 	skb = alloc_can_skb(priv->netdev, &cf);
skb               968 drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c 	if (!skb) {
skb              1009 drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c 	netif_rx(skb);
skb               309 drivers/net/can/usb/mcba_usb.c static netdev_tx_t mcba_usb_start_xmit(struct sk_buff *skb,
skb               313 drivers/net/can/usb/mcba_usb.c 	struct can_frame *cf = (struct can_frame *)skb->data;
skb               322 drivers/net/can/usb/mcba_usb.c 	if (can_dropped_invalid_skb(netdev, skb))
skb               329 drivers/net/can/usb/mcba_usb.c 	can_put_echo_skb(skb, priv->netdev, ctx->ndx);
skb               369 drivers/net/can/usb/mcba_usb.c 	dev_kfree_skb(skb);
skb               422 drivers/net/can/usb/mcba_usb.c 	struct sk_buff *skb;
skb               426 drivers/net/can/usb/mcba_usb.c 	skb = alloc_can_skb(priv->netdev, &cf);
skb               427 drivers/net/can/usb/mcba_usb.c 	if (!skb)
skb               462 drivers/net/can/usb/mcba_usb.c 	netif_rx(skb);
skb               401 drivers/net/can/usb/peak_usb/pcan_usb.c 	struct sk_buff *skb;
skb               479 drivers/net/can/usb/peak_usb/pcan_usb.c 	skb = alloc_can_err_skb(mc->netdev, &cf);
skb               480 drivers/net/can/usb/peak_usb/pcan_usb.c 	if (!skb)
skb               523 drivers/net/can/usb/peak_usb/pcan_usb.c 		struct skb_shared_hwtstamps *hwts = skb_hwtstamps(skb);
skb               531 drivers/net/can/usb/peak_usb/pcan_usb.c 	netif_rx(skb);
skb               613 drivers/net/can/usb/peak_usb/pcan_usb.c 	struct sk_buff *skb;
skb               617 drivers/net/can/usb/peak_usb/pcan_usb.c 	skb = alloc_can_skb(mc->netdev, &cf);
skb               618 drivers/net/can/usb/peak_usb/pcan_usb.c 	if (!skb)
skb               665 drivers/net/can/usb/peak_usb/pcan_usb.c 	hwts = skb_hwtstamps(skb);
skb               672 drivers/net/can/usb/peak_usb/pcan_usb.c 	netif_rx(skb);
skb               677 drivers/net/can/usb/peak_usb/pcan_usb.c 	dev_kfree_skb(skb);
skb               733 drivers/net/can/usb/peak_usb/pcan_usb.c static int pcan_usb_encode_msg(struct peak_usb_device *dev, struct sk_buff *skb,
skb               738 drivers/net/can/usb/peak_usb/pcan_usb.c 	struct can_frame *cf = (struct can_frame *)skb->data;
skb               152 drivers/net/can/usb/peak_usb/pcan_usb_core.c int peak_usb_netif_rx(struct sk_buff *skb,
skb               155 drivers/net/can/usb/peak_usb/pcan_usb_core.c 	struct skb_shared_hwtstamps *hwts = skb_hwtstamps(skb);
skb               159 drivers/net/can/usb/peak_usb/pcan_usb_core.c 	return netif_rx(skb);
skb               281 drivers/net/can/usb/peak_usb/pcan_usb_core.c static netdev_tx_t peak_usb_ndo_start_xmit(struct sk_buff *skb,
skb               287 drivers/net/can/usb/peak_usb/pcan_usb_core.c 	struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
skb               293 drivers/net/can/usb/peak_usb/pcan_usb_core.c 	if (can_dropped_invalid_skb(netdev, skb))
skb               310 drivers/net/can/usb/peak_usb/pcan_usb_core.c 	err = dev->adapter->dev_encode_msg(dev, skb, obuf, &size);
skb               314 drivers/net/can/usb/peak_usb/pcan_usb_core.c 		dev_kfree_skb(skb);
skb               326 drivers/net/can/usb/peak_usb/pcan_usb_core.c 	can_put_echo_skb(skb, netdev, context->echo_index);
skb                63 drivers/net/can/usb/peak_usb/pcan_usb_core.h 	int (*dev_encode_msg)(struct peak_usb_device *dev, struct sk_buff *skb,
skb               147 drivers/net/can/usb/peak_usb/pcan_usb_core.h int peak_usb_netif_rx(struct sk_buff *skb,
skb               474 drivers/net/can/usb/peak_usb/pcan_usb_fd.c 	struct sk_buff *skb;
skb               479 drivers/net/can/usb/peak_usb/pcan_usb_fd.c 		skb = alloc_canfd_skb(netdev, &cfd);
skb               480 drivers/net/can/usb/peak_usb/pcan_usb_fd.c 		if (!skb)
skb               492 drivers/net/can/usb/peak_usb/pcan_usb_fd.c 		skb = alloc_can_skb(netdev, (struct can_frame **)&cfd);
skb               493 drivers/net/can/usb/peak_usb/pcan_usb_fd.c 		if (!skb)
skb               509 drivers/net/can/usb/peak_usb/pcan_usb_fd.c 	peak_usb_netif_rx(skb, &usb_if->time_ref, le32_to_cpu(rm->ts_low));
skb               529 drivers/net/can/usb/peak_usb/pcan_usb_fd.c 	struct sk_buff *skb;
skb               558 drivers/net/can/usb/peak_usb/pcan_usb_fd.c 	skb = alloc_can_err_skb(netdev, &cf);
skb               559 drivers/net/can/usb/peak_usb/pcan_usb_fd.c 	if (skb)
skb               566 drivers/net/can/usb/peak_usb/pcan_usb_fd.c 	if (!skb)
skb               569 drivers/net/can/usb/peak_usb/pcan_usb_fd.c 	peak_usb_netif_rx(skb, &usb_if->time_ref, le32_to_cpu(sm->ts_low));
skb               601 drivers/net/can/usb/peak_usb/pcan_usb_fd.c 	struct sk_buff *skb;
skb               604 drivers/net/can/usb/peak_usb/pcan_usb_fd.c 	skb = alloc_can_err_skb(netdev, &cf);
skb               605 drivers/net/can/usb/peak_usb/pcan_usb_fd.c 	if (!skb)
skb               611 drivers/net/can/usb/peak_usb/pcan_usb_fd.c 	peak_usb_netif_rx(skb, &usb_if->time_ref, le32_to_cpu(ov->ts_low));
skb               712 drivers/net/can/usb/peak_usb/pcan_usb_fd.c 				  struct sk_buff *skb, u8 *obuf, size_t *size)
skb               715 drivers/net/can/usb/peak_usb/pcan_usb_fd.c 	struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
skb               731 drivers/net/can/usb/peak_usb/pcan_usb_fd.c 	if (can_is_canfd_skb(skb)) {
skb               527 drivers/net/can/usb/peak_usb/pcan_usb_pro.c 	struct sk_buff *skb;
skb               530 drivers/net/can/usb/peak_usb/pcan_usb_pro.c 	skb = alloc_can_skb(netdev, &can_frame);
skb               531 drivers/net/can/usb/peak_usb/pcan_usb_pro.c 	if (!skb)
skb               545 drivers/net/can/usb/peak_usb/pcan_usb_pro.c 	hwts = skb_hwtstamps(skb);
skb               551 drivers/net/can/usb/peak_usb/pcan_usb_pro.c 	netif_rx(skb);
skb               566 drivers/net/can/usb/peak_usb/pcan_usb_pro.c 	struct sk_buff *skb;
skb               614 drivers/net/can/usb/peak_usb/pcan_usb_pro.c 	skb = alloc_can_err_skb(netdev, &can_frame);
skb               615 drivers/net/can/usb/peak_usb/pcan_usb_pro.c 	if (!skb)
skb               662 drivers/net/can/usb/peak_usb/pcan_usb_pro.c 	hwts = skb_hwtstamps(skb);
skb               666 drivers/net/can/usb/peak_usb/pcan_usb_pro.c 	netif_rx(skb);
skb               762 drivers/net/can/usb/peak_usb/pcan_usb_pro.c 				   struct sk_buff *skb, u8 *obuf, size_t *size)
skb               764 drivers/net/can/usb/peak_usb/pcan_usb_pro.c 	struct can_frame *cf = (struct can_frame *)skb->data;
skb               570 drivers/net/can/usb/ucan.c 	struct sk_buff *skb;
skb               609 drivers/net/can/usb/ucan.c 	skb = alloc_can_skb(up->netdev, &cf);
skb               610 drivers/net/can/usb/ucan.c 	if (!skb)
skb               628 drivers/net/can/usb/ucan.c 	netif_rx(skb);
skb              1111 drivers/net/can/usb/ucan.c static netdev_tx_t ucan_start_xmit(struct sk_buff *skb,
skb              1120 drivers/net/can/usb/ucan.c 	struct can_frame *cf = (struct can_frame *)skb->data;
skb              1123 drivers/net/can/usb/ucan.c 	if (can_dropped_invalid_skb(netdev, skb))
skb              1140 drivers/net/can/usb/ucan.c 	can_put_echo_skb(skb, up->netdev, echo_index);
skb              1185 drivers/net/can/usb/ucan.c 	dev_kfree_skb(skb);
skb               339 drivers/net/can/usb/usb_8dev.c 	struct sk_buff *skb;
skb               356 drivers/net/can/usb/usb_8dev.c 	skb = alloc_can_err_skb(priv->netdev, &cf);
skb               357 drivers/net/can/usb/usb_8dev.c 	if (!skb)
skb               453 drivers/net/can/usb/usb_8dev.c 	netif_rx(skb);
skb               461 drivers/net/can/usb/usb_8dev.c 	struct sk_buff *skb;
skb               468 drivers/net/can/usb/usb_8dev.c 		skb = alloc_can_skb(priv->netdev, &cf);
skb               469 drivers/net/can/usb/usb_8dev.c 		if (!skb)
skb               485 drivers/net/can/usb/usb_8dev.c 		netif_rx(skb);
skb               599 drivers/net/can/usb/usb_8dev.c static netdev_tx_t usb_8dev_start_xmit(struct sk_buff *skb,
skb               604 drivers/net/can/usb/usb_8dev.c 	struct can_frame *cf = (struct can_frame *) skb->data;
skb               612 drivers/net/can/usb/usb_8dev.c 	if (can_dropped_invalid_skb(netdev, skb))
skb               667 drivers/net/can/usb/usb_8dev.c 	can_put_echo_skb(skb, netdev, context->echo_index);
skb               710 drivers/net/can/usb/usb_8dev.c 	dev_kfree_skb(skb);
skb                71 drivers/net/can/vcan.c static void vcan_rx(struct sk_buff *skb, struct net_device *dev)
skb                73 drivers/net/can/vcan.c 	struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
skb                79 drivers/net/can/vcan.c 	skb->pkt_type  = PACKET_BROADCAST;
skb                80 drivers/net/can/vcan.c 	skb->dev       = dev;
skb                81 drivers/net/can/vcan.c 	skb->ip_summed = CHECKSUM_UNNECESSARY;
skb                83 drivers/net/can/vcan.c 	netif_rx_ni(skb);
skb                86 drivers/net/can/vcan.c static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev)
skb                88 drivers/net/can/vcan.c 	struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
skb                92 drivers/net/can/vcan.c 	if (can_dropped_invalid_skb(dev, skb))
skb                99 drivers/net/can/vcan.c 	loop = skb->pkt_type == PACKET_LOOPBACK;
skb               110 drivers/net/can/vcan.c 		consume_skb(skb);
skb               117 drivers/net/can/vcan.c 		skb = can_create_echo_skb(skb);
skb               118 drivers/net/can/vcan.c 		if (!skb)
skb               122 drivers/net/can/vcan.c 		vcan_rx(skb, dev);
skb               125 drivers/net/can/vcan.c 		consume_skb(skb);
skb                36 drivers/net/can/vxcan.c static netdev_tx_t vxcan_xmit(struct sk_buff *skb, struct net_device *dev)
skb                40 drivers/net/can/vxcan.c 	struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
skb                43 drivers/net/can/vxcan.c 	if (can_dropped_invalid_skb(dev, skb))
skb                49 drivers/net/can/vxcan.c 		kfree_skb(skb);
skb                54 drivers/net/can/vxcan.c 	skb = can_create_echo_skb(skb);
skb                55 drivers/net/can/vxcan.c 	if (!skb)
skb                59 drivers/net/can/vxcan.c 	skb->csum_start = 0;
skb                60 drivers/net/can/vxcan.c 	skb->pkt_type   = PACKET_BROADCAST;
skb                61 drivers/net/can/vxcan.c 	skb->dev        = peer;
skb                62 drivers/net/can/vxcan.c 	skb->ip_summed  = CHECKSUM_UNNECESSARY;
skb                64 drivers/net/can/vxcan.c 	if (netif_rx_ni(skb) == NET_RX_SUCCESS) {
skb               546 drivers/net/can/xilinx_can.c static void xcan_write_frame(struct xcan_priv *priv, struct sk_buff *skb,
skb               550 drivers/net/can/xilinx_can.c 	struct canfd_frame *cf = (struct canfd_frame *)skb->data;
skb               581 drivers/net/can/xilinx_can.c 	if (can_is_canfd_skb(skb)) {
skb               628 drivers/net/can/xilinx_can.c static int xcan_start_xmit_fifo(struct sk_buff *skb, struct net_device *ndev)
skb               638 drivers/net/can/xilinx_can.c 	can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max);
skb               644 drivers/net/can/xilinx_can.c 	xcan_write_frame(priv, skb, XCAN_TXFIFO_OFFSET);
skb               666 drivers/net/can/xilinx_can.c static int xcan_start_xmit_mailbox(struct sk_buff *skb, struct net_device *ndev)
skb               675 drivers/net/can/xilinx_can.c 	can_put_echo_skb(skb, ndev, 0);
skb               681 drivers/net/can/xilinx_can.c 	xcan_write_frame(priv, skb,
skb               703 drivers/net/can/xilinx_can.c static netdev_tx_t xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
skb               708 drivers/net/can/xilinx_can.c 	if (can_dropped_invalid_skb(ndev, skb))
skb               712 drivers/net/can/xilinx_can.c 		ret = xcan_start_xmit_mailbox(skb, ndev);
skb               714 drivers/net/can/xilinx_can.c 		ret = xcan_start_xmit_fifo(skb, ndev);
skb               741 drivers/net/can/xilinx_can.c 	struct sk_buff *skb;
skb               744 drivers/net/can/xilinx_can.c 	skb = alloc_can_skb(ndev, &cf);
skb               745 drivers/net/can/xilinx_can.c 	if (unlikely(!skb)) {
skb               789 drivers/net/can/xilinx_can.c 	netif_receive_skb(skb);
skb               810 drivers/net/can/xilinx_can.c 	struct sk_buff *skb;
skb               816 drivers/net/can/xilinx_can.c 		skb = alloc_canfd_skb(ndev, &cf);
skb               818 drivers/net/can/xilinx_can.c 		skb = alloc_can_skb(ndev, (struct can_frame **)&cf);
skb               820 drivers/net/can/xilinx_can.c 	if (unlikely(!skb)) {
skb               871 drivers/net/can/xilinx_can.c 	netif_receive_skb(skb);
skb               956 drivers/net/can/xilinx_can.c 		struct sk_buff *skb;
skb               959 drivers/net/can/xilinx_can.c 		skb = alloc_can_err_skb(ndev, &cf);
skb               961 drivers/net/can/xilinx_can.c 		xcan_set_error_state(ndev, new_state, skb ? cf : NULL);
skb               963 drivers/net/can/xilinx_can.c 		if (skb) {
skb               968 drivers/net/can/xilinx_can.c 			netif_rx(skb);
skb               987 drivers/net/can/xilinx_can.c 	struct sk_buff *skb;
skb               990 drivers/net/can/xilinx_can.c 	skb = alloc_can_err_skb(ndev, &cf);
skb              1001 drivers/net/can/xilinx_can.c 		if (skb)
skb              1007 drivers/net/can/xilinx_can.c 			xcan_set_error_state(ndev, new_state, skb ? cf : NULL);
skb              1013 drivers/net/can/xilinx_can.c 		if (skb) {
skb              1023 drivers/net/can/xilinx_can.c 		if (skb) {
skb              1034 drivers/net/can/xilinx_can.c 		if (skb) {
skb              1042 drivers/net/can/xilinx_can.c 		if (skb)
skb              1048 drivers/net/can/xilinx_can.c 			if (skb) {
skb              1057 drivers/net/can/xilinx_can.c 			if (skb) {
skb              1066 drivers/net/can/xilinx_can.c 			if (skb) {
skb              1075 drivers/net/can/xilinx_can.c 			if (skb) {
skb              1084 drivers/net/can/xilinx_can.c 			if (skb) {
skb              1092 drivers/net/can/xilinx_can.c 	if (skb) {
skb              1095 drivers/net/can/xilinx_can.c 		netif_rx(skb);
skb               215 drivers/net/dsa/mv88e6xxx/hwtstamp.c static u8 *parse_ptp_header(struct sk_buff *skb, unsigned int type)
skb               217 drivers/net/dsa/mv88e6xxx/hwtstamp.c 	u8 *data = skb_mac_header(skb);
skb               238 drivers/net/dsa/mv88e6xxx/hwtstamp.c 	if (skb->len + ETH_HLEN < offset + 34)
skb               248 drivers/net/dsa/mv88e6xxx/hwtstamp.c 				   struct sk_buff *skb, unsigned int type)
skb               256 drivers/net/dsa/mv88e6xxx/hwtstamp.c 	hdr = parse_ptp_header(skb, type);
skb               275 drivers/net/dsa/mv88e6xxx/hwtstamp.c static int seq_match(struct sk_buff *skb, u16 ts_seqid)
skb               277 drivers/net/dsa/mv88e6xxx/hwtstamp.c 	unsigned int type = SKB_PTP_TYPE(skb);
skb               278 drivers/net/dsa/mv88e6xxx/hwtstamp.c 	u8 *hdr = parse_ptp_header(skb, type);
skb               288 drivers/net/dsa/mv88e6xxx/hwtstamp.c 			       struct sk_buff *skb, u16 reg,
skb               326 drivers/net/dsa/mv88e6xxx/hwtstamp.c 	for ( ; skb; skb = __skb_dequeue(&received)) {
skb               327 drivers/net/dsa/mv88e6xxx/hwtstamp.c 		if (mv88e6xxx_ts_valid(status) && seq_match(skb, seq_id)) {
skb               333 drivers/net/dsa/mv88e6xxx/hwtstamp.c 			shwt = skb_hwtstamps(skb);
skb               338 drivers/net/dsa/mv88e6xxx/hwtstamp.c 		netif_rx_ni(skb);
skb               346 drivers/net/dsa/mv88e6xxx/hwtstamp.c 	struct sk_buff *skb;
skb               348 drivers/net/dsa/mv88e6xxx/hwtstamp.c 	skb = skb_dequeue(&ps->rx_queue);
skb               350 drivers/net/dsa/mv88e6xxx/hwtstamp.c 	if (skb)
skb               351 drivers/net/dsa/mv88e6xxx/hwtstamp.c 		mv88e6xxx_get_rxts(chip, ps, skb, ptp_ops->arr0_sts_reg,
skb               354 drivers/net/dsa/mv88e6xxx/hwtstamp.c 	skb = skb_dequeue(&ps->rx_queue2);
skb               355 drivers/net/dsa/mv88e6xxx/hwtstamp.c 	if (skb)
skb               356 drivers/net/dsa/mv88e6xxx/hwtstamp.c 		mv88e6xxx_get_rxts(chip, ps, skb, ptp_ops->arr1_sts_reg,
skb               366 drivers/net/dsa/mv88e6xxx/hwtstamp.c 			     struct sk_buff *skb, unsigned int type)
skb               378 drivers/net/dsa/mv88e6xxx/hwtstamp.c 	hdr = mv88e6xxx_should_tstamp(chip, port, skb, type);
skb               382 drivers/net/dsa/mv88e6xxx/hwtstamp.c 	SKB_PTP_TYPE(skb) = type;
skb               385 drivers/net/dsa/mv88e6xxx/hwtstamp.c 		skb_queue_tail(&ps->rx_queue2, skb);
skb               387 drivers/net/dsa/mv88e6xxx/hwtstamp.c 		skb_queue_tail(&ps->rx_queue, skb);
skb              1758 drivers/net/dsa/sja1105/sja1105_main.c 			     struct sk_buff *skb, bool takets)
skb              1766 drivers/net/dsa/sja1105/sja1105_main.c 	hdr = eth_hdr(skb);
skb              1777 drivers/net/dsa/sja1105/sja1105_main.c 		kfree_skb(skb);
skb              1782 drivers/net/dsa/sja1105/sja1105_main.c 	dsa_enqueue_skb(skb, ds->ports[port].slave);
skb              1820 drivers/net/dsa/sja1105/sja1105_main.c 					      struct sk_buff *skb)
skb              1847 drivers/net/dsa/sja1105/sja1105_main.c 	clone = DSA_SKB_CB(skb)->clone;
skb              1849 drivers/net/dsa/sja1105/sja1105_main.c 	sja1105_mgmt_xmit(ds, port, slot, skb, !!clone);
skb              2010 drivers/net/dsa/sja1105/sja1105_main.c 	struct sk_buff *skb;
skb              2015 drivers/net/dsa/sja1105/sja1105_main.c 	while ((skb = skb_dequeue(&data->skb_rxtstamp_queue)) != NULL) {
skb              2016 drivers/net/dsa/sja1105/sja1105_main.c 		struct skb_shared_hwtstamps *shwt = skb_hwtstamps(skb);
skb              2023 drivers/net/dsa/sja1105/sja1105_main.c 		ts = SJA1105_SKB_CB(skb)->meta_tstamp;
skb              2028 drivers/net/dsa/sja1105/sja1105_main.c 		netif_rx_ni(skb);
skb              2036 drivers/net/dsa/sja1105/sja1105_main.c 				  struct sk_buff *skb, unsigned int type)
skb              2047 drivers/net/dsa/sja1105/sja1105_main.c 	skb_queue_tail(&data->skb_rxtstamp_queue, skb);
skb              2057 drivers/net/dsa/sja1105/sja1105_main.c 				  struct sk_buff *skb, unsigned int type)
skb                81 drivers/net/dummy.c static netdev_tx_t dummy_xmit(struct sk_buff *skb, struct net_device *dev)
skb                87 drivers/net/dummy.c 	dstats->tx_bytes += skb->len;
skb                90 drivers/net/dummy.c 	skb_tx_timestamp(skb);
skb                91 drivers/net/dummy.c 	dev_kfree_skb(skb);
skb               135 drivers/net/eql.c static netdev_tx_t eql_slave_xmit(struct sk_buff *skb, struct net_device *dev);
skb               331 drivers/net/eql.c static netdev_tx_t eql_slave_xmit(struct sk_buff *skb, struct net_device *dev)
skb               342 drivers/net/eql.c 		skb->dev = slave_dev;
skb               343 drivers/net/eql.c 		skb->priority = TC_PRIO_FILLER;
skb               344 drivers/net/eql.c 		slave->bytes_queued += skb->len;
skb               345 drivers/net/eql.c 		dev_queue_xmit(skb);
skb               349 drivers/net/eql.c 		dev_kfree_skb(skb);
skb               192 drivers/net/ethernet/3com/3c509.c static netdev_tx_t el3_start_xmit(struct sk_buff *skb, struct net_device *dev);
skb               710 drivers/net/ethernet/3com/3c509.c el3_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb               718 drivers/net/ethernet/3com/3c509.c 	dev->stats.tx_bytes += skb->len;
skb               722 drivers/net/ethernet/3com/3c509.c 			   dev->name, skb->len, inw(ioaddr + EL3_STATUS));
skb               738 drivers/net/ethernet/3com/3c509.c 	outw(skb->len, ioaddr + TX_FIFO);
skb               741 drivers/net/ethernet/3com/3c509.c 	outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
skb               751 drivers/net/ethernet/3com/3c509.c 	dev_consume_skb_any (skb);
skb               939 drivers/net/ethernet/3com/3c509.c 			struct sk_buff *skb;
skb               941 drivers/net/ethernet/3com/3c509.c 			skb = netdev_alloc_skb(dev, pkt_len + 5);
skb               945 drivers/net/ethernet/3com/3c509.c 			if (skb != NULL) {
skb               946 drivers/net/ethernet/3com/3c509.c 				skb_reserve(skb, 2);     /* Align IP on 16 byte */
skb               949 drivers/net/ethernet/3com/3c509.c 				insl(ioaddr + RX_FIFO, skb_put(skb,pkt_len),
skb               953 drivers/net/ethernet/3com/3c509.c 				skb->protocol = eth_type_trans(skb,dev);
skb               954 drivers/net/ethernet/3com/3c509.c 				netif_rx(skb);
skb               371 drivers/net/ethernet/3com/3c515.c static netdev_tx_t corkscrew_start_xmit(struct sk_buff *skb,
skb               821 drivers/net/ethernet/3com/3c515.c 			struct sk_buff *skb;
skb               829 drivers/net/ethernet/3com/3c515.c 			skb = netdev_alloc_skb(dev, PKT_BUF_SZ);
skb               830 drivers/net/ethernet/3com/3c515.c 			vp->rx_skbuff[i] = skb;
skb               831 drivers/net/ethernet/3com/3c515.c 			if (skb == NULL)
skb               833 drivers/net/ethernet/3com/3c515.c 			skb_reserve(skb, 2);	/* Align IP on 16 byte boundaries */
skb               834 drivers/net/ethernet/3com/3c515.c 			vp->rx_ring[i].addr = isa_virt_to_bus(skb->data);
skb              1001 drivers/net/ethernet/3com/3c515.c static netdev_tx_t corkscrew_start_xmit(struct sk_buff *skb,
skb              1028 drivers/net/ethernet/3com/3c515.c 		vp->tx_skbuff[entry] = skb;
skb              1030 drivers/net/ethernet/3com/3c515.c 		vp->tx_ring[entry].addr = isa_virt_to_bus(skb->data);
skb              1031 drivers/net/ethernet/3com/3c515.c 		vp->tx_ring[entry].length = skb->len | 0x80000000;
skb              1032 drivers/net/ethernet/3com/3c515.c 		vp->tx_ring[entry].status = skb->len | 0x80000000;
skb              1061 drivers/net/ethernet/3com/3c515.c 	outl(skb->len, ioaddr + TX_FIFO);
skb              1062 drivers/net/ethernet/3com/3c515.c 	dev->stats.tx_bytes += skb->len;
skb              1066 drivers/net/ethernet/3com/3c515.c 		outl((int) (skb->data), ioaddr + Wn7_MasterAddr);
skb              1067 drivers/net/ethernet/3com/3c515.c 		outw((skb->len + 3) & ~3, ioaddr + Wn7_MasterLen);
skb              1068 drivers/net/ethernet/3com/3c515.c 		vp->tx_skb = skb;
skb              1073 drivers/net/ethernet/3com/3c515.c 		outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
skb              1074 drivers/net/ethernet/3com/3c515.c 		dev_kfree_skb(skb);
skb              1084 drivers/net/ethernet/3com/3c515.c 	outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
skb              1085 drivers/net/ethernet/3com/3c515.c 	dev_kfree_skb(skb);
skb              1295 drivers/net/ethernet/3com/3c515.c 			struct sk_buff *skb;
skb              1297 drivers/net/ethernet/3com/3c515.c 			skb = netdev_alloc_skb(dev, pkt_len + 5 + 2);
skb              1301 drivers/net/ethernet/3com/3c515.c 			if (skb != NULL) {
skb              1302 drivers/net/ethernet/3com/3c515.c 				skb_reserve(skb, 2);	/* Align IP on 16 byte boundaries */
skb              1305 drivers/net/ethernet/3com/3c515.c 				     skb_put(skb, pkt_len),
skb              1308 drivers/net/ethernet/3com/3c515.c 				skb->protocol = eth_type_trans(skb, dev);
skb              1309 drivers/net/ethernet/3com/3c515.c 				netif_rx(skb);
skb              1360 drivers/net/ethernet/3com/3c515.c 			struct sk_buff *skb;
skb              1370 drivers/net/ethernet/3com/3c515.c 			    (skb = netdev_alloc_skb(dev, pkt_len + 4)) != NULL) {
skb              1371 drivers/net/ethernet/3com/3c515.c 				skb_reserve(skb, 2);	/* Align IP on 16 byte boundaries */
skb              1373 drivers/net/ethernet/3com/3c515.c 				skb_put_data(skb,
skb              1380 drivers/net/ethernet/3com/3c515.c 				skb = vp->rx_skbuff[entry];
skb              1382 drivers/net/ethernet/3com/3c515.c 				temp = skb_put(skb, pkt_len);
skb              1388 drivers/net/ethernet/3com/3c515.c 						skb->head, temp);
skb              1391 drivers/net/ethernet/3com/3c515.c 			skb->protocol = eth_type_trans(skb, dev);
skb              1392 drivers/net/ethernet/3com/3c515.c 			netif_rx(skb);
skb              1399 drivers/net/ethernet/3com/3c515.c 		struct sk_buff *skb;
skb              1402 drivers/net/ethernet/3com/3c515.c 			skb = netdev_alloc_skb(dev, PKT_BUF_SZ);
skb              1403 drivers/net/ethernet/3com/3c515.c 			if (skb == NULL)
skb              1405 drivers/net/ethernet/3com/3c515.c 			skb_reserve(skb, 2);	/* Align IP on 16 byte boundaries */
skb              1406 drivers/net/ethernet/3com/3c515.c 			vp->rx_ring[entry].addr = isa_virt_to_bus(skb->data);
skb              1407 drivers/net/ethernet/3com/3c515.c 			vp->rx_skbuff[entry] = skb;
skb               230 drivers/net/ethernet/3com/3c574_cs.c static netdev_tx_t el3_start_xmit(struct sk_buff *skb,
skb               730 drivers/net/ethernet/3com/3c574_cs.c static netdev_tx_t el3_start_xmit(struct sk_buff *skb,
skb               738 drivers/net/ethernet/3com/3c574_cs.c 		  "status %4.4x.\n", dev->name, (long)skb->len,
skb               743 drivers/net/ethernet/3com/3c574_cs.c 	dev->stats.tx_bytes += skb->len;
skb               746 drivers/net/ethernet/3com/3c574_cs.c 	outw(skb->len, ioaddr + TX_FIFO);
skb               749 drivers/net/ethernet/3com/3c574_cs.c 	outsl(ioaddr + TX_FIFO, skb->data, (skb->len+3)>>2);
skb               761 drivers/net/ethernet/3com/3c574_cs.c 	dev_kfree_skb(skb);
skb              1008 drivers/net/ethernet/3com/3c574_cs.c 			struct sk_buff *skb;
skb              1010 drivers/net/ethernet/3com/3c574_cs.c 			skb = netdev_alloc_skb(dev, pkt_len + 5);
skb              1014 drivers/net/ethernet/3com/3c574_cs.c 			if (skb != NULL) {
skb              1015 drivers/net/ethernet/3com/3c574_cs.c 				skb_reserve(skb, 2);
skb              1016 drivers/net/ethernet/3com/3c574_cs.c 				insl(ioaddr+RX_FIFO, skb_put(skb, pkt_len),
skb              1018 drivers/net/ethernet/3com/3c574_cs.c 				skb->protocol = eth_type_trans(skb, dev);
skb              1019 drivers/net/ethernet/3com/3c574_cs.c 				netif_rx(skb);
skb               169 drivers/net/ethernet/3com/3c589_cs.c static netdev_tx_t el3_start_xmit(struct sk_buff *skb,
skb               565 drivers/net/ethernet/3com/3c589_cs.c static netdev_tx_t el3_start_xmit(struct sk_buff *skb,
skb               573 drivers/net/ethernet/3com/3c589_cs.c 	       (long)skb->len, inw(ioaddr + EL3_STATUS));
skb               577 drivers/net/ethernet/3com/3c589_cs.c 	dev->stats.tx_bytes += skb->len;
skb               580 drivers/net/ethernet/3com/3c589_cs.c 	outw(skb->len, ioaddr + TX_FIFO);
skb               583 drivers/net/ethernet/3com/3c589_cs.c 	outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
skb               593 drivers/net/ethernet/3com/3c589_cs.c 	dev_kfree_skb(skb);
skb               855 drivers/net/ethernet/3com/3c589_cs.c 			struct sk_buff *skb;
skb               857 drivers/net/ethernet/3com/3c589_cs.c 			skb = netdev_alloc_skb(dev, pkt_len + 5);
skb               861 drivers/net/ethernet/3com/3c589_cs.c 			if (skb != NULL) {
skb               862 drivers/net/ethernet/3com/3c589_cs.c 				skb_reserve(skb, 2);
skb               863 drivers/net/ethernet/3com/3c589_cs.c 				insl(ioaddr+RX_FIFO, skb_put(skb, pkt_len),
skb               865 drivers/net/ethernet/3com/3c589_cs.c 				skb->protocol = eth_type_trans(skb, dev);
skb               866 drivers/net/ethernet/3com/3c589_cs.c 				netif_rx(skb);
skb               762 drivers/net/ethernet/3com/3c59x.c static netdev_tx_t vortex_start_xmit(struct sk_buff *skb,
skb               764 drivers/net/ethernet/3com/3c59x.c static netdev_tx_t boomerang_start_xmit(struct sk_buff *skb,
skb              1735 drivers/net/ethernet/3com/3c59x.c 			struct sk_buff *skb;
skb              1740 drivers/net/ethernet/3com/3c59x.c 			skb = __netdev_alloc_skb(dev, PKT_BUF_SZ + NET_IP_ALIGN,
skb              1742 drivers/net/ethernet/3com/3c59x.c 			vp->rx_skbuff[i] = skb;
skb              1743 drivers/net/ethernet/3com/3c59x.c 			if (skb == NULL)
skb              1746 drivers/net/ethernet/3com/3c59x.c 			skb_reserve(skb, NET_IP_ALIGN);	/* Align IP on 16 byte boundaries */
skb              1747 drivers/net/ethernet/3com/3c59x.c 			dma = dma_map_single(vp->gendev, skb->data,
skb              2041 drivers/net/ethernet/3com/3c59x.c vortex_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb              2045 drivers/net/ethernet/3com/3c59x.c 	int skblen = skb->len;
skb              2048 drivers/net/ethernet/3com/3c59x.c 	iowrite32(skb->len, ioaddr + TX_FIFO);
skb              2051 drivers/net/ethernet/3com/3c59x.c 		int len = (skb->len + 3) & ~3;
skb              2052 drivers/net/ethernet/3com/3c59x.c 		vp->tx_skb_dma = dma_map_single(vp->gendev, skb->data, len,
skb              2055 drivers/net/ethernet/3com/3c59x.c 			dev_kfree_skb_any(skb);
skb              2065 drivers/net/ethernet/3com/3c59x.c 		vp->tx_skb = skb;
skb              2066 drivers/net/ethernet/3com/3c59x.c 		skb_tx_timestamp(skb);
skb              2071 drivers/net/ethernet/3com/3c59x.c 		skb_tx_timestamp(skb);
skb              2072 drivers/net/ethernet/3com/3c59x.c 		iowrite32_rep(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
skb              2073 drivers/net/ethernet/3com/3c59x.c 		dev_consume_skb_any (skb);
skb              2109 drivers/net/ethernet/3com/3c59x.c boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb              2115 drivers/net/ethernet/3com/3c59x.c 	int skblen = skb->len;
skb              2143 drivers/net/ethernet/3com/3c59x.c 	vp->tx_skbuff[entry] = skb;
skb              2147 drivers/net/ethernet/3com/3c59x.c 	if (skb->ip_summed != CHECKSUM_PARTIAL)
skb              2148 drivers/net/ethernet/3com/3c59x.c 			vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded);
skb              2150 drivers/net/ethernet/3com/3c59x.c 			vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded | AddTCPChksum | AddUDPChksum);
skb              2152 drivers/net/ethernet/3com/3c59x.c 	if (!skb_shinfo(skb)->nr_frags) {
skb              2153 drivers/net/ethernet/3com/3c59x.c 		dma_addr = dma_map_single(vp->gendev, skb->data, skb->len,
skb              2159 drivers/net/ethernet/3com/3c59x.c 		vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb->len | LAST_FRAG);
skb              2163 drivers/net/ethernet/3com/3c59x.c 		dma_addr = dma_map_single(vp->gendev, skb->data,
skb              2164 drivers/net/ethernet/3com/3c59x.c 					  skb_headlen(skb), DMA_TO_DEVICE);
skb              2169 drivers/net/ethernet/3com/3c59x.c 		vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb_headlen(skb));
skb              2171 drivers/net/ethernet/3com/3c59x.c 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb              2172 drivers/net/ethernet/3com/3c59x.c 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
skb              2196 drivers/net/ethernet/3com/3c59x.c 			if (i == skb_shinfo(skb)->nr_frags-1)
skb              2203 drivers/net/ethernet/3com/3c59x.c 	dma_addr = dma_map_single(vp->gendev, skb->data, skb->len, DMA_TO_DEVICE);
skb              2207 drivers/net/ethernet/3com/3c59x.c 	vp->tx_ring[entry].length = cpu_to_le32(skb->len | LAST_FRAG);
skb              2208 drivers/net/ethernet/3com/3c59x.c 	vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded);
skb              2233 drivers/net/ethernet/3com/3c59x.c 	skb_tx_timestamp(skb);
skb              2431 drivers/net/ethernet/3com/3c59x.c 					struct sk_buff *skb = vp->tx_skbuff[entry];
skb              2439 drivers/net/ethernet/3com/3c59x.c 					for (i=1; i<=skb_shinfo(skb)->nr_frags; i++)
skb              2446 drivers/net/ethernet/3com/3c59x.c 						le32_to_cpu(vp->tx_ring[entry].addr), skb->len, DMA_TO_DEVICE);
skb              2449 drivers/net/ethernet/3com/3c59x.c 					bytes_compl += skb->len;
skb              2450 drivers/net/ethernet/3com/3c59x.c 					dev_consume_skb_irq(skb);
skb              2544 drivers/net/ethernet/3com/3c59x.c 			struct sk_buff *skb;
skb              2546 drivers/net/ethernet/3com/3c59x.c 			skb = netdev_alloc_skb(dev, pkt_len + 5);
skb              2550 drivers/net/ethernet/3com/3c59x.c 			if (skb != NULL) {
skb              2551 drivers/net/ethernet/3com/3c59x.c 				skb_reserve(skb, 2);	/* Align IP on 16 byte boundaries */
skb              2555 drivers/net/ethernet/3com/3c59x.c 					dma_addr_t dma = dma_map_single(vp->gendev, skb_put(skb, pkt_len),
skb              2558 drivers/net/ethernet/3com/3c59x.c 					iowrite16((skb->len + 3) & ~3, ioaddr + Wn7_MasterLen);
skb              2565 drivers/net/ethernet/3com/3c59x.c 					             skb_put(skb, pkt_len),
skb              2569 drivers/net/ethernet/3com/3c59x.c 				skb->protocol = eth_type_trans(skb, dev);
skb              2570 drivers/net/ethernet/3com/3c59x.c 				netif_rx(skb);
skb              2616 drivers/net/ethernet/3com/3c59x.c 			struct sk_buff *skb, *newskb;
skb              2627 drivers/net/ethernet/3com/3c59x.c 			    (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
skb              2628 drivers/net/ethernet/3com/3c59x.c 				skb_reserve(skb, 2);	/* Align IP on 16 byte boundaries */
skb              2631 drivers/net/ethernet/3com/3c59x.c 				skb_put_data(skb, vp->rx_skbuff[entry]->data,
skb              2654 drivers/net/ethernet/3com/3c59x.c 				skb = vp->rx_skbuff[entry];
skb              2657 drivers/net/ethernet/3com/3c59x.c 				skb_put(skb, pkt_len);
skb              2661 drivers/net/ethernet/3com/3c59x.c 			skb->protocol = eth_type_trans(skb, dev);
skb              2667 drivers/net/ethernet/3com/3c59x.c 					skb->ip_summed = CHECKSUM_UNNECESSARY;
skb              2671 drivers/net/ethernet/3com/3c59x.c 			netif_rx(skb);
skb              2764 drivers/net/ethernet/3com/3c59x.c 				struct sk_buff *skb = vp->tx_skbuff[i];
skb              2768 drivers/net/ethernet/3com/3c59x.c 				for (k=0; k<=skb_shinfo(skb)->nr_frags; k++)
skb              2774 drivers/net/ethernet/3com/3c59x.c 				dma_unmap_single(vp->gendev, le32_to_cpu(vp->tx_ring[i].addr), skb->len, DMA_TO_DEVICE);
skb              2776 drivers/net/ethernet/3com/3c59x.c 				dev_kfree_skb(skb);
skb               256 drivers/net/ethernet/3com/typhoon.c 	struct sk_buff *skb;
skb               691 drivers/net/ethernet/3com/typhoon.c typhoon_tso_fill(struct sk_buff *skb, struct transmit_ring *txRing,
skb               704 drivers/net/ethernet/3com/typhoon.c 	tcpd->mss_flags = cpu_to_le16(skb_tso_size(skb));
skb               707 drivers/net/ethernet/3com/typhoon.c 	tcpd->bytesTx = cpu_to_le32(skb->len);
skb               712 drivers/net/ethernet/3com/typhoon.c typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
skb               739 drivers/net/ethernet/3com/typhoon.c 	numDesc = skb_shinfo(skb)->nr_frags + 1;
skb               740 drivers/net/ethernet/3com/typhoon.c 	if (skb_is_gso(skb))
skb               761 drivers/net/ethernet/3com/typhoon.c 	first_txd->tx_addr = (u64)((unsigned long) skb);
skb               764 drivers/net/ethernet/3com/typhoon.c 	if(skb->ip_summed == CHECKSUM_PARTIAL) {
skb               771 drivers/net/ethernet/3com/typhoon.c 	if (skb_vlan_tag_present(skb)) {
skb               775 drivers/net/ethernet/3com/typhoon.c 		    cpu_to_le32(htons(skb_vlan_tag_get(skb)) <<
skb               779 drivers/net/ethernet/3com/typhoon.c 	if (skb_is_gso(skb)) {
skb               783 drivers/net/ethernet/3com/typhoon.c 		typhoon_tso_fill(skb, txRing, tp->txlo_dma_addr);
skb               792 drivers/net/ethernet/3com/typhoon.c 	if(skb_shinfo(skb)->nr_frags == 0) {
skb               793 drivers/net/ethernet/3com/typhoon.c 		skb_dma = pci_map_single(tp->tx_pdev, skb->data, skb->len,
skb               796 drivers/net/ethernet/3com/typhoon.c 		txd->len = cpu_to_le16(skb->len);
skb               803 drivers/net/ethernet/3com/typhoon.c 		len = skb_headlen(skb);
skb               804 drivers/net/ethernet/3com/typhoon.c 		skb_dma = pci_map_single(tp->tx_pdev, skb->data, len,
skb               812 drivers/net/ethernet/3com/typhoon.c 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb               813 drivers/net/ethernet/3com/typhoon.c 			const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
skb              1524 drivers/net/ethernet/3com/typhoon.c 			struct sk_buff *skb = (struct sk_buff *) ptr;
skb              1525 drivers/net/ethernet/3com/typhoon.c 			dev_kfree_skb_irq(skb);
skb              1571 drivers/net/ethernet/3com/typhoon.c 		dev_kfree_skb_any(rxb->skb);
skb              1572 drivers/net/ethernet/3com/typhoon.c 		rxb->skb = NULL;
skb              1593 drivers/net/ethernet/3com/typhoon.c 	struct sk_buff *skb;
skb              1596 drivers/net/ethernet/3com/typhoon.c 	rxb->skb = NULL;
skb              1602 drivers/net/ethernet/3com/typhoon.c 	skb = netdev_alloc_skb(tp->dev, PKT_BUF_SZ);
skb              1603 drivers/net/ethernet/3com/typhoon.c 	if(!skb)
skb              1610 drivers/net/ethernet/3com/typhoon.c 	skb_reserve(skb, 2);
skb              1613 drivers/net/ethernet/3com/typhoon.c 	dma_addr = pci_map_single(tp->pdev, skb->data,
skb              1623 drivers/net/ethernet/3com/typhoon.c 	rxb->skb = skb;
skb              1637 drivers/net/ethernet/3com/typhoon.c 	struct sk_buff *skb, *new_skb;
skb              1654 drivers/net/ethernet/3com/typhoon.c 		skb = rxb->skb;
skb              1672 drivers/net/ethernet/3com/typhoon.c 			skb_copy_to_linear_data(new_skb, skb->data, pkt_len);
skb              1679 drivers/net/ethernet/3com/typhoon.c 			new_skb = skb;
skb              1716 drivers/net/ethernet/3com/typhoon.c 		if(rxb->skb)
skb              1795 drivers/net/ethernet/3com/typhoon.c 		if(rxb->skb) {
skb              1798 drivers/net/ethernet/3com/typhoon.c 			dev_kfree_skb(rxb->skb);
skb              1799 drivers/net/ethernet/3com/typhoon.c 			rxb->skb = NULL;
skb                21 drivers/net/ethernet/8390/8390.c netdev_tx_t ei_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb                23 drivers/net/ethernet/8390/8390.c 	return __ei_start_xmit(skb, dev);
skb                36 drivers/net/ethernet/8390/8390.h netdev_tx_t ei_start_xmit(struct sk_buff *skb, struct net_device *dev);
skb                54 drivers/net/ethernet/8390/8390.h netdev_tx_t eip_start_xmit(struct sk_buff *skb, struct net_device *dev);
skb                26 drivers/net/ethernet/8390/8390p.c netdev_tx_t eip_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb                28 drivers/net/ethernet/8390/8390p.c 	return __ei_start_xmit(skb, dev);
skb                85 drivers/net/ethernet/8390/apne.c 								struct sk_buff *skb, int ring_offset);
skb               435 drivers/net/ethernet/8390/apne.c apne_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
skb               438 drivers/net/ethernet/8390/apne.c     char *buf = skb->data;
skb               224 drivers/net/ethernet/8390/ax88796.c 			   struct sk_buff *skb, int ring_offset)
skb               228 drivers/net/ethernet/8390/ax88796.c 	char *buf = skb->data;
skb                82 drivers/net/ethernet/8390/axnet_cs.c static netdev_tx_t axnet_start_xmit(struct sk_buff *skb,
skb                97 drivers/net/ethernet/8390/axnet_cs.c 			struct sk_buff *skb, int ring_offset);
skb               646 drivers/net/ethernet/8390/axnet_cs.c 			struct sk_buff *skb, int ring_offset)
skb               651 drivers/net/ethernet/8390/axnet_cs.c     char *buf = skb->data;
skb               951 drivers/net/ethernet/8390/axnet_cs.c static netdev_tx_t axnet_start_xmit(struct sk_buff *skb,
skb               962 drivers/net/ethernet/8390/axnet_cs.c 	length = skb->len;
skb              1030 drivers/net/ethernet/8390/axnet_cs.c 	if (length == skb->len)
skb              1031 drivers/net/ethernet/8390/axnet_cs.c 		ei_block_output(dev, length, skb->data, output_page);
skb              1034 drivers/net/ethernet/8390/axnet_cs.c 		skb_copy_from_linear_data(skb, packet, skb->len);
skb              1067 drivers/net/ethernet/8390/axnet_cs.c 	dev_kfree_skb (skb);
skb              1387 drivers/net/ethernet/8390/axnet_cs.c 			struct sk_buff *skb;
skb              1389 drivers/net/ethernet/8390/axnet_cs.c 			skb = netdev_alloc_skb(dev, pkt_len + 2);
skb              1390 drivers/net/ethernet/8390/axnet_cs.c 			if (skb == NULL) 
skb              1400 drivers/net/ethernet/8390/axnet_cs.c 				skb_reserve(skb,2);	/* IP headers on 16 byte boundaries */
skb              1401 drivers/net/ethernet/8390/axnet_cs.c 				skb_put(skb, pkt_len);	/* Make room */
skb              1402 drivers/net/ethernet/8390/axnet_cs.c 				ei_block_input(dev, pkt_len, skb, current_offset + sizeof(rx_frame));
skb              1403 drivers/net/ethernet/8390/axnet_cs.c 				skb->protocol=eth_type_trans(skb,dev);
skb              1404 drivers/net/ethernet/8390/axnet_cs.c 				netif_rx(skb);
skb               370 drivers/net/ethernet/8390/etherh.c etherh_block_input (struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
skb               388 drivers/net/ethernet/8390/etherh.c 	buf = skb->data;
skb                65 drivers/net/ethernet/8390/hydra.c 			      struct sk_buff *skb, int ring_offset);
skb               218 drivers/net/ethernet/8390/hydra.c 			      struct sk_buff *skb, int ring_offset)
skb               230 drivers/net/ethernet/8390/hydra.c 	z_memcpy_fromio(skb->data,xfer_start,semi_count);
skb               232 drivers/net/ethernet/8390/hydra.c 	z_memcpy_fromio(skb->data+semi_count, mem_base, count);
skb               234 drivers/net/ethernet/8390/hydra.c 	z_memcpy_fromio(skb->data, xfer_start,count);
skb               300 drivers/net/ethernet/8390/lib8390.c static netdev_tx_t __ei_start_xmit(struct sk_buff *skb,
skb               305 drivers/net/ethernet/8390/lib8390.c 	int send_length = skb->len, output_page;
skb               308 drivers/net/ethernet/8390/lib8390.c 	char *data = skb->data;
skb               310 drivers/net/ethernet/8390/lib8390.c 	if (skb->len < ETH_ZLEN) {
skb               312 drivers/net/ethernet/8390/lib8390.c 		memcpy(buf, data, skb->len);
skb               406 drivers/net/ethernet/8390/lib8390.c 	skb_tx_timestamp(skb);
skb               407 drivers/net/ethernet/8390/lib8390.c 	dev_consume_skb_any(skb);
skb               721 drivers/net/ethernet/8390/lib8390.c 			struct sk_buff *skb;
skb               723 drivers/net/ethernet/8390/lib8390.c 			skb = netdev_alloc_skb(dev, pkt_len + 2);
skb               724 drivers/net/ethernet/8390/lib8390.c 			if (skb == NULL) {
skb               731 drivers/net/ethernet/8390/lib8390.c 				skb_reserve(skb, 2);	/* IP headers on 16 byte boundaries */
skb               732 drivers/net/ethernet/8390/lib8390.c 				skb_put(skb, pkt_len);	/* Make room */
skb               733 drivers/net/ethernet/8390/lib8390.c 				ei_block_input(dev, pkt_len, skb, current_offset + sizeof(rx_frame));
skb               734 drivers/net/ethernet/8390/lib8390.c 				skb->protocol = eth_type_trans(skb, dev);
skb               735 drivers/net/ethernet/8390/lib8390.c 				if (!skb_defer_rx_timestamp(skb))
skb               736 drivers/net/ethernet/8390/lib8390.c 					netif_rx(skb);
skb               138 drivers/net/ethernet/8390/mac8390.c 			     struct sk_buff *skb, int ring_offset);
skb               152 drivers/net/ethernet/8390/mac8390.c 			      struct sk_buff *skb, int ring_offset);
skb               160 drivers/net/ethernet/8390/mac8390.c 				  struct sk_buff *skb, int ring_offset);
skb               722 drivers/net/ethernet/8390/mac8390.c 			     struct sk_buff *skb, int ring_offset)
skb               730 drivers/net/ethernet/8390/mac8390.c 		memcpy_fromio(skb->data,
skb               734 drivers/net/ethernet/8390/mac8390.c 		memcpy_fromio(skb->data + semi_count,
skb               737 drivers/net/ethernet/8390/mac8390.c 		memcpy_fromio(skb->data,
skb               763 drivers/net/ethernet/8390/mac8390.c 			      struct sk_buff *skb, int ring_offset)
skb               774 drivers/net/ethernet/8390/mac8390.c 		dayna_memcpy_fromcard(dev, skb->data, xfer_base, semi_count);
skb               776 drivers/net/ethernet/8390/mac8390.c 		dayna_memcpy_fromcard(dev, skb->data + semi_count,
skb               780 drivers/net/ethernet/8390/mac8390.c 		dayna_memcpy_fromcard(dev, skb->data, xfer_base, count);
skb               805 drivers/net/ethernet/8390/mac8390.c 				  struct sk_buff *skb, int ring_offset)
skb               813 drivers/net/ethernet/8390/mac8390.c 		word_memcpy_fromcard(skb->data, dev->mem_start + xfer_base,
skb               816 drivers/net/ethernet/8390/mac8390.c 		word_memcpy_fromcard(skb->data + semi_count,
skb               819 drivers/net/ethernet/8390/mac8390.c 		word_memcpy_fromcard(skb->data, dev->mem_start + xfer_base,
skb               227 drivers/net/ethernet/8390/mcf8390.c 				struct sk_buff *skb, int ring_offset)
skb               231 drivers/net/ethernet/8390/mcf8390.c 	char *buf = skb->data;
skb               182 drivers/net/ethernet/8390/ne.c 			  struct sk_buff *skb, int ring_offset);
skb               613 drivers/net/ethernet/8390/ne.c static void ne_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
skb               620 drivers/net/ethernet/8390/ne.c 	char *buf = skb->data;
skb               174 drivers/net/ethernet/8390/ne2k-pci.c 			  struct sk_buff *skb, int ring_offset);
skb               518 drivers/net/ethernet/8390/ne2k-pci.c 				 struct sk_buff *skb, int ring_offset)
skb               521 drivers/net/ethernet/8390/ne2k-pci.c 	char *buf = skb->data;
skb              1157 drivers/net/ethernet/8390/pcnet_cs.c 			    struct sk_buff *skb, int ring_offset)
skb              1161 drivers/net/ethernet/8390/pcnet_cs.c     char *buf = skb->data;
skb              1370 drivers/net/ethernet/8390/pcnet_cs.c 			      struct sk_buff *skb, int ring_offset)
skb              1375 drivers/net/ethernet/8390/pcnet_cs.c     char *buf = skb->data;
skb                92 drivers/net/ethernet/8390/smc-ultra.c 						  struct sk_buff *skb, int ring_offset);
skb                98 drivers/net/ethernet/8390/smc-ultra.c 						  struct sk_buff *skb, int ring_offset);
skb               462 drivers/net/ethernet/8390/smc-ultra.c ultra_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
skb               472 drivers/net/ethernet/8390/smc-ultra.c 		memcpy_fromio(skb->data, xfer_start, semi_count);
skb               474 drivers/net/ethernet/8390/smc-ultra.c 		memcpy_fromio(skb->data + semi_count, ei_status.mem + TX_PAGES * 256, count);
skb               476 drivers/net/ethernet/8390/smc-ultra.c 		memcpy_fromio(skb->data, xfer_start, count);
skb               514 drivers/net/ethernet/8390/smc-ultra.c 						  struct sk_buff *skb, int ring_offset)
skb               517 drivers/net/ethernet/8390/smc-ultra.c     char *buf = skb->data;
skb                66 drivers/net/ethernet/8390/stnic.c 			       struct sk_buff *skb , int ring_offset);
skb               222 drivers/net/ethernet/8390/stnic.c stnic_block_input (struct net_device *dev, int length, struct sk_buff *skb,
skb               225 drivers/net/ethernet/8390/stnic.c   char *buf = skb->data;
skb                58 drivers/net/ethernet/8390/wd.c 						  struct sk_buff *skb, int ring_offset);
skb               438 drivers/net/ethernet/8390/wd.c wd_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
skb               447 drivers/net/ethernet/8390/wd.c 		memcpy_fromio(skb->data, xfer_start, semi_count);
skb               449 drivers/net/ethernet/8390/wd.c 		memcpy_fromio(skb->data + semi_count, ei_status.mem + TX_PAGES * 256, count);
skb               452 drivers/net/ethernet/8390/wd.c 		memcpy_fromio(skb->data, xfer_start, count);
skb               160 drivers/net/ethernet/8390/xsurf100.c 			      struct sk_buff *skb, int ring_offset)
skb               164 drivers/net/ethernet/8390/xsurf100.c 	char *buf = skb->data;
skb               155 drivers/net/ethernet/8390/zorro8390.c 				  struct sk_buff *skb, int ring_offset)
skb               158 drivers/net/ethernet/8390/zorro8390.c 	char *buf = skb->data;
skb               161 drivers/net/ethernet/adaptec/starfire.c #define skb_first_frag_len(skb)	skb_headlen(skb)
skb               162 drivers/net/ethernet/adaptec/starfire.c #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
skb               521 drivers/net/ethernet/adaptec/starfire.c 	struct sk_buff *skb;
skb               525 drivers/net/ethernet/adaptec/starfire.c 	struct sk_buff *skb;
skb               581 drivers/net/ethernet/adaptec/starfire.c static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
skb              1150 drivers/net/ethernet/adaptec/starfire.c 		struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz);
skb              1151 drivers/net/ethernet/adaptec/starfire.c 		np->rx_info[i].skb = skb;
skb              1152 drivers/net/ethernet/adaptec/starfire.c 		if (skb == NULL)
skb              1154 drivers/net/ethernet/adaptec/starfire.c 		np->rx_info[i].mapping = pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
skb              1157 drivers/net/ethernet/adaptec/starfire.c 			dev_kfree_skb(skb);
skb              1158 drivers/net/ethernet/adaptec/starfire.c 			np->rx_info[i].skb = NULL;
skb              1170 drivers/net/ethernet/adaptec/starfire.c 		np->rx_info[i].skb = NULL;
skb              1187 drivers/net/ethernet/adaptec/starfire.c static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
skb              1199 drivers/net/ethernet/adaptec/starfire.c 	if ((np->cur_tx - np->dirty_tx) + skb_num_frags(skb) * 2 > TX_RING_SIZE) {
skb              1205 drivers/net/ethernet/adaptec/starfire.c 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb              1206 drivers/net/ethernet/adaptec/starfire.c 		if (skb_padto(skb, (skb->len + PADDING_MASK) & ~PADDING_MASK))
skb              1213 drivers/net/ethernet/adaptec/starfire.c 	for (i = 0; i < skb_num_frags(skb); i++) {
skb              1218 drivers/net/ethernet/adaptec/starfire.c 			np->tx_info[entry].skb = skb;
skb              1220 drivers/net/ethernet/adaptec/starfire.c 			if (entry >= TX_RING_SIZE - skb_num_frags(skb)) {
skb              1228 drivers/net/ethernet/adaptec/starfire.c 			if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb              1232 drivers/net/ethernet/adaptec/starfire.c 			status |= skb_first_frag_len(skb) | (skb_num_frags(skb) << 16);
skb              1235 drivers/net/ethernet/adaptec/starfire.c 				pci_map_single(np->pci_dev, skb->data, skb_first_frag_len(skb), PCI_DMA_TODEVICE);
skb              1237 drivers/net/ethernet/adaptec/starfire.c 			const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[i - 1];
skb              1287 drivers/net/ethernet/adaptec/starfire.c 	np->tx_info[entry].skb = NULL;
skb              1291 drivers/net/ethernet/adaptec/starfire.c 				 skb_first_frag_len(skb),
skb              1299 drivers/net/ethernet/adaptec/starfire.c 						&skb_shinfo(skb)->frags[j-1]),
skb              1304 drivers/net/ethernet/adaptec/starfire.c 	dev_kfree_skb_any(skb);
skb              1372 drivers/net/ethernet/adaptec/starfire.c 				struct sk_buff *skb = np->tx_info[entry].skb;
skb              1373 drivers/net/ethernet/adaptec/starfire.c 				np->tx_info[entry].skb = NULL;
skb              1376 drivers/net/ethernet/adaptec/starfire.c 						 skb_first_frag_len(skb),
skb              1383 drivers/net/ethernet/adaptec/starfire.c 					for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb              1386 drivers/net/ethernet/adaptec/starfire.c 								 skb_frag_size(&skb_shinfo(skb)->frags[i]),
skb              1393 drivers/net/ethernet/adaptec/starfire.c 				dev_consume_skb_irq(skb);
skb              1446 drivers/net/ethernet/adaptec/starfire.c 		struct sk_buff *skb;
skb              1477 drivers/net/ethernet/adaptec/starfire.c 		    (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
skb              1478 drivers/net/ethernet/adaptec/starfire.c 			skb_reserve(skb, 2);	/* 16 byte align the IP header */
skb              1482 drivers/net/ethernet/adaptec/starfire.c 			skb_copy_to_linear_data(skb, np->rx_info[entry].skb->data, pkt_len);
skb              1486 drivers/net/ethernet/adaptec/starfire.c 			skb_put(skb, pkt_len);
skb              1489 drivers/net/ethernet/adaptec/starfire.c 			skb = np->rx_info[entry].skb;
skb              1490 drivers/net/ethernet/adaptec/starfire.c 			skb_put(skb, pkt_len);
skb              1491 drivers/net/ethernet/adaptec/starfire.c 			np->rx_info[entry].skb = NULL;
skb              1498 drivers/net/ethernet/adaptec/starfire.c 			       skb->data, skb->data + 6,
skb              1499 drivers/net/ethernet/adaptec/starfire.c 			       skb->data[12], skb->data[13]);
skb              1503 drivers/net/ethernet/adaptec/starfire.c 		skb->protocol = eth_type_trans(skb, dev);
skb              1509 drivers/net/ethernet/adaptec/starfire.c 			skb->ip_summed = CHECKSUM_UNNECESSARY;
skb              1522 drivers/net/ethernet/adaptec/starfire.c 			skb->ip_summed = CHECKSUM_COMPLETE;
skb              1523 drivers/net/ethernet/adaptec/starfire.c 			skb->csum = le16_to_cpu(desc->csum);
skb              1534 drivers/net/ethernet/adaptec/starfire.c 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlid);
skb              1537 drivers/net/ethernet/adaptec/starfire.c 		netif_receive_skb(skb);
skb              1594 drivers/net/ethernet/adaptec/starfire.c 	struct sk_buff *skb;
skb              1600 drivers/net/ethernet/adaptec/starfire.c 		if (np->rx_info[entry].skb == NULL) {
skb              1601 drivers/net/ethernet/adaptec/starfire.c 			skb = netdev_alloc_skb(dev, np->rx_buf_sz);
skb              1602 drivers/net/ethernet/adaptec/starfire.c 			np->rx_info[entry].skb = skb;
skb              1603 drivers/net/ethernet/adaptec/starfire.c 			if (skb == NULL)
skb              1606 drivers/net/ethernet/adaptec/starfire.c 				pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
skb              1609 drivers/net/ethernet/adaptec/starfire.c 				dev_kfree_skb(skb);
skb              1610 drivers/net/ethernet/adaptec/starfire.c 				np->rx_info[entry].skb = NULL;
skb              1981 drivers/net/ethernet/adaptec/starfire.c 		if (np->rx_info[i].skb != NULL) {
skb              1983 drivers/net/ethernet/adaptec/starfire.c 			dev_kfree_skb(np->rx_info[i].skb);
skb              1985 drivers/net/ethernet/adaptec/starfire.c 		np->rx_info[i].skb = NULL;
skb              1989 drivers/net/ethernet/adaptec/starfire.c 		struct sk_buff *skb = np->tx_info[i].skb;
skb              1990 drivers/net/ethernet/adaptec/starfire.c 		if (skb == NULL)
skb              1994 drivers/net/ethernet/adaptec/starfire.c 				 skb_first_frag_len(skb), PCI_DMA_TODEVICE);
skb              1996 drivers/net/ethernet/adaptec/starfire.c 		dev_kfree_skb(skb);
skb              1997 drivers/net/ethernet/adaptec/starfire.c 		np->tx_info[i].skb = NULL;
skb                68 drivers/net/ethernet/aeroflex/greth.c static netdev_tx_t greth_start_xmit(struct sk_buff *skb,
skb                70 drivers/net/ethernet/aeroflex/greth.c static netdev_tx_t greth_start_xmit_gbit(struct sk_buff *skb,
skb                96 drivers/net/ethernet/aeroflex/greth.c static void greth_print_tx_packet(struct sk_buff *skb)
skb               101 drivers/net/ethernet/aeroflex/greth.c 	if (skb_shinfo(skb)->nr_frags == 0)
skb               102 drivers/net/ethernet/aeroflex/greth.c 		length = skb->len;
skb               104 drivers/net/ethernet/aeroflex/greth.c 		length = skb_headlen(skb);
skb               107 drivers/net/ethernet/aeroflex/greth.c 			skb->data, length, true);
skb               109 drivers/net/ethernet/aeroflex/greth.c 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb               112 drivers/net/ethernet/aeroflex/greth.c 			       skb_frag_address(&skb_shinfo(skb)->frags[i]),
skb               113 drivers/net/ethernet/aeroflex/greth.c 			       skb_frag_size(&skb_shinfo(skb)->frags[i]), true);
skb               187 drivers/net/ethernet/aeroflex/greth.c 			struct sk_buff *skb = greth->tx_skbuff[greth->tx_last];
skb               188 drivers/net/ethernet/aeroflex/greth.c 			int nr_frags = skb_shinfo(skb)->nr_frags;
skb               194 drivers/net/ethernet/aeroflex/greth.c 					 skb_headlen(skb),
skb               198 drivers/net/ethernet/aeroflex/greth.c 				skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
skb               209 drivers/net/ethernet/aeroflex/greth.c 			dev_kfree_skb(skb);
skb               234 drivers/net/ethernet/aeroflex/greth.c 	struct sk_buff *skb;
skb               246 drivers/net/ethernet/aeroflex/greth.c 			skb = netdev_alloc_skb(greth->netdev, MAX_FRAME_SIZE+NET_IP_ALIGN);
skb               247 drivers/net/ethernet/aeroflex/greth.c 			if (skb == NULL) {
skb               252 drivers/net/ethernet/aeroflex/greth.c 			skb_reserve(skb, NET_IP_ALIGN);
skb               254 drivers/net/ethernet/aeroflex/greth.c 						  skb->data,
skb               263 drivers/net/ethernet/aeroflex/greth.c 			greth->rx_skbuff[i] = skb;
skb               393 drivers/net/ethernet/aeroflex/greth.c greth_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb               416 drivers/net/ethernet/aeroflex/greth.c 		greth_print_tx_packet(skb);
skb               419 drivers/net/ethernet/aeroflex/greth.c 	if (unlikely(skb->len > MAX_FRAME_SIZE)) {
skb               427 drivers/net/ethernet/aeroflex/greth.c 	memcpy((unsigned char *) phys_to_virt(dma_addr), skb->data, skb->len);
skb               429 drivers/net/ethernet/aeroflex/greth.c 	dma_sync_single_for_device(greth->dev, dma_addr, skb->len, DMA_TO_DEVICE);
skb               431 drivers/net/ethernet/aeroflex/greth.c 	status = GRETH_BD_EN | GRETH_BD_IE | (skb->len & GRETH_BD_LEN);
skb               432 drivers/net/ethernet/aeroflex/greth.c 	greth->tx_bufs_length[greth->tx_next] = skb->len & GRETH_BD_LEN;
skb               449 drivers/net/ethernet/aeroflex/greth.c 	dev_kfree_skb(skb);
skb               462 drivers/net/ethernet/aeroflex/greth.c greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
skb               471 drivers/net/ethernet/aeroflex/greth.c 	nr_frags = skb_shinfo(skb)->nr_frags;
skb               482 drivers/net/ethernet/aeroflex/greth.c 		greth_print_tx_packet(skb);
skb               484 drivers/net/ethernet/aeroflex/greth.c 	if (unlikely(skb->len > MAX_FRAME_SIZE)) {
skb               490 drivers/net/ethernet/aeroflex/greth.c 	greth->tx_skbuff[greth->tx_next] = skb;
skb               498 drivers/net/ethernet/aeroflex/greth.c 	if (skb->ip_summed == CHECKSUM_PARTIAL)
skb               500 drivers/net/ethernet/aeroflex/greth.c 	status |= skb_headlen(skb) & GRETH_BD_LEN;
skb               507 drivers/net/ethernet/aeroflex/greth.c 	dma_addr = dma_map_single(greth->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
skb               518 drivers/net/ethernet/aeroflex/greth.c 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
skb               523 drivers/net/ethernet/aeroflex/greth.c 		if (skb->ip_summed == CHECKSUM_PARTIAL)
skb               577 drivers/net/ethernet/aeroflex/greth.c 	dev_kfree_skb(skb);
skb               675 drivers/net/ethernet/aeroflex/greth.c 	struct sk_buff *skb = NULL;
skb               685 drivers/net/ethernet/aeroflex/greth.c 		skb = greth->tx_skbuff[tx_last];
skb               687 drivers/net/ethernet/aeroflex/greth.c 		nr_frags = skb_shinfo(skb)->nr_frags;
skb               702 drivers/net/ethernet/aeroflex/greth.c 		dev->stats.tx_bytes += skb->len;
skb               710 drivers/net/ethernet/aeroflex/greth.c 				 skb_headlen(skb),
skb               714 drivers/net/ethernet/aeroflex/greth.c 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
skb               724 drivers/net/ethernet/aeroflex/greth.c 		dev_kfree_skb(skb);
skb               726 drivers/net/ethernet/aeroflex/greth.c 	if (skb) { /* skb is set only if the above while loop was entered */
skb               741 drivers/net/ethernet/aeroflex/greth.c 	struct sk_buff *skb;
skb               785 drivers/net/ethernet/aeroflex/greth.c 			skb = netdev_alloc_skb(dev, pkt_len + NET_IP_ALIGN);
skb               787 drivers/net/ethernet/aeroflex/greth.c 			if (unlikely(skb == NULL)) {
skb               795 drivers/net/ethernet/aeroflex/greth.c 				skb_reserve(skb, NET_IP_ALIGN);
skb               805 drivers/net/ethernet/aeroflex/greth.c 				skb_put_data(skb, phys_to_virt(dma_addr),
skb               808 drivers/net/ethernet/aeroflex/greth.c 				skb->protocol = eth_type_trans(skb, dev);
skb               811 drivers/net/ethernet/aeroflex/greth.c 				netif_receive_skb(skb);
skb               857 drivers/net/ethernet/aeroflex/greth.c 	struct sk_buff *skb, *newskb;
skb               868 drivers/net/ethernet/aeroflex/greth.c 		skb = greth->rx_skbuff[greth->rx_cur];
skb               915 drivers/net/ethernet/aeroflex/greth.c 				skb_put(skb, pkt_len);
skb               918 drivers/net/ethernet/aeroflex/greth.c 					skb->ip_summed = CHECKSUM_UNNECESSARY;
skb               920 drivers/net/ethernet/aeroflex/greth.c 					skb_checksum_none_assert(skb);
skb               922 drivers/net/ethernet/aeroflex/greth.c 				skb->protocol = eth_type_trans(skb, dev);
skb               925 drivers/net/ethernet/aeroflex/greth.c 				netif_receive_skb(skb);
skb               353 drivers/net/ethernet/agere/et131x.c 	struct sk_buff *skb;	/* Network skb we are tied to */
skb               405 drivers/net/ethernet/agere/et131x.c 	struct sk_buff *skb;
skb              2024 drivers/net/ethernet/agere/et131x.c 		rfd->skb = NULL;
skb              2100 drivers/net/ethernet/agere/et131x.c 		rfd->skb = NULL;
skb              2199 drivers/net/ethernet/agere/et131x.c 	struct sk_buff *skb;
skb              2279 drivers/net/ethernet/agere/et131x.c 	skb = dev_alloc_skb(rfd->len + 2);
skb              2280 drivers/net/ethernet/agere/et131x.c 	if (!skb)
skb              2285 drivers/net/ethernet/agere/et131x.c 	skb_put_data(skb, fbr->virt[buff_index], rfd->len);
skb              2287 drivers/net/ethernet/agere/et131x.c 	skb->protocol = eth_type_trans(skb, adapter->netdev);
skb              2288 drivers/net/ethernet/agere/et131x.c 	skb->ip_summed = CHECKSUM_NONE;
skb              2289 drivers/net/ethernet/agere/et131x.c 	netif_receive_skb(skb);
skb              2427 drivers/net/ethernet/agere/et131x.c 	struct sk_buff *skb = tcb->skb;
skb              2428 drivers/net/ethernet/agere/et131x.c 	u32 nr_frags = skb_shinfo(skb)->nr_frags + 1;
skb              2429 drivers/net/ethernet/agere/et131x.c 	skb_frag_t *frags = &skb_shinfo(skb)->frags[0];
skb              2458 drivers/net/ethernet/agere/et131x.c 			if (skb_headlen(skb) <= 1514) {
skb              2462 drivers/net/ethernet/agere/et131x.c 				desc[frag].len_vlan = skb_headlen(skb);
skb              2464 drivers/net/ethernet/agere/et131x.c 							  skb->data,
skb              2465 drivers/net/ethernet/agere/et131x.c 							  skb_headlen(skb),
skb              2471 drivers/net/ethernet/agere/et131x.c 				desc[frag].len_vlan = skb_headlen(skb) / 2;
skb              2473 drivers/net/ethernet/agere/et131x.c 							  skb->data,
skb              2474 drivers/net/ethernet/agere/et131x.c 							  skb_headlen(skb) / 2,
skb              2480 drivers/net/ethernet/agere/et131x.c 				desc[frag].len_vlan = skb_headlen(skb) / 2;
skb              2482 drivers/net/ethernet/agere/et131x.c 							  skb->data +
skb              2483 drivers/net/ethernet/agere/et131x.c 							  skb_headlen(skb) / 2,
skb              2484 drivers/net/ethernet/agere/et131x.c 							  skb_headlen(skb) / 2,
skb              2588 drivers/net/ethernet/agere/et131x.c static int send_packet(struct sk_buff *skb, struct et131x_adapter *adapter)
skb              2596 drivers/net/ethernet/agere/et131x.c 	if (skb->len < ETH_HLEN)
skb              2615 drivers/net/ethernet/agere/et131x.c 	tcb->skb = skb;
skb              2647 drivers/net/ethernet/agere/et131x.c 	if (tcb->skb) {
skb              2648 drivers/net/ethernet/agere/et131x.c 		stats->tx_bytes += tcb->skb->len;
skb              2673 drivers/net/ethernet/agere/et131x.c 		dev_kfree_skb_any(tcb->skb);
skb              3777 drivers/net/ethernet/agere/et131x.c static netdev_tx_t et131x_tx(struct sk_buff *skb, struct net_device *netdev)
skb              3797 drivers/net/ethernet/agere/et131x.c 	if (send_packet(skb, adapter))
skb              3803 drivers/net/ethernet/agere/et131x.c 	dev_kfree_skb_any(skb);
skb               501 drivers/net/ethernet/alacritech/slic.h 	struct sk_buff *skb;
skb               524 drivers/net/ethernet/alacritech/slic.h 	struct sk_buff *skb;
skb               363 drivers/net/ethernet/alacritech/slicoss.c 		if (unlikely(!buff->skb)) {
skb               372 drivers/net/ethernet/alacritech/slicoss.c 		bytes += buff->skb->len;
skb               375 drivers/net/ethernet/alacritech/slicoss.c 		dev_kfree_skb_any(buff->skb);
skb               376 drivers/net/ethernet/alacritech/slicoss.c 		buff->skb = NULL;
skb               403 drivers/net/ethernet/alacritech/slicoss.c 	struct sk_buff *skb;
skb               407 drivers/net/ethernet/alacritech/slicoss.c 		skb = alloc_skb(maplen + ALIGN_MASK, gfp);
skb               408 drivers/net/ethernet/alacritech/slicoss.c 		if (!skb)
skb               411 drivers/net/ethernet/alacritech/slicoss.c 		paddr = dma_map_single(&sdev->pdev->dev, skb->data, maplen,
skb               416 drivers/net/ethernet/alacritech/slicoss.c 			dev_kfree_skb_any(skb);
skb               424 drivers/net/ethernet/alacritech/slicoss.c 			skb_reserve(skb, offset);
skb               427 drivers/net/ethernet/alacritech/slicoss.c 		desc = (struct slic_rx_desc *)skb->data;
skb               439 drivers/net/ethernet/alacritech/slicoss.c 		buff->skb = skb;
skb               452 drivers/net/ethernet/alacritech/slicoss.c 				    struct sk_buff *skb)
skb               461 drivers/net/ethernet/alacritech/slicoss.c 		info = (struct slic_rx_info_oasis *)skb->data;
skb               499 drivers/net/ethernet/alacritech/slicoss.c 		info = (struct slic_rx_info_mojave *)skb->data;
skb               556 drivers/net/ethernet/alacritech/slicoss.c 	struct sk_buff *skb;
skb               563 drivers/net/ethernet/alacritech/slicoss.c 		skb = buff->skb;
skb               564 drivers/net/ethernet/alacritech/slicoss.c 		if (!skb)
skb               567 drivers/net/ethernet/alacritech/slicoss.c 		desc = (struct slic_rx_desc *)skb->data;
skb               585 drivers/net/ethernet/alacritech/slicoss.c 		buff->skb = NULL;
skb               593 drivers/net/ethernet/alacritech/slicoss.c 		skb_reserve(skb, SLIC_RX_BUFF_HDR_SIZE);
skb               596 drivers/net/ethernet/alacritech/slicoss.c 			slic_handle_frame_error(sdev, skb);
skb               597 drivers/net/ethernet/alacritech/slicoss.c 			dev_kfree_skb_any(skb);
skb               599 drivers/net/ethernet/alacritech/slicoss.c 			struct ethhdr *eh = (struct ethhdr *)skb->data;
skb               605 drivers/net/ethernet/alacritech/slicoss.c 			skb_put(skb, len);
skb               606 drivers/net/ethernet/alacritech/slicoss.c 			skb->protocol = eth_type_trans(skb, dev);
skb               607 drivers/net/ethernet/alacritech/slicoss.c 			skb->ip_summed = CHECKSUM_UNNECESSARY;
skb               609 drivers/net/ethernet/alacritech/slicoss.c 			napi_gro_receive(&sdev->napi, skb);
skb               904 drivers/net/ethernet/alacritech/slicoss.c 		if (!buff->skb)
skb               910 drivers/net/ethernet/alacritech/slicoss.c 		consume_skb(buff->skb);
skb               946 drivers/net/ethernet/alacritech/slicoss.c 		if (!buff->skb)
skb               953 drivers/net/ethernet/alacritech/slicoss.c 		consume_skb(buff->skb);
skb              1408 drivers/net/ethernet/alacritech/slicoss.c static netdev_tx_t slic_xmit(struct sk_buff *skb, struct net_device *dev)
skb              1424 drivers/net/ethernet/alacritech/slicoss.c 	maplen = skb_headlen(skb);
skb              1425 drivers/net/ethernet/alacritech/slicoss.c 	paddr = dma_map_single(&sdev->pdev->dev, skb->data, maplen,
skb              1433 drivers/net/ethernet/alacritech/slicoss.c 	buff->skb = skb;
skb              1456 drivers/net/ethernet/alacritech/slicoss.c 	dev_kfree_skb_any(skb);
skb               435 drivers/net/ethernet/allwinner/sun4i-emac.c static int emac_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb               452 drivers/net/ethernet/allwinner/sun4i-emac.c 			skb->data, skb->len);
skb               453 drivers/net/ethernet/allwinner/sun4i-emac.c 	dev->stats.tx_bytes += skb->len;
skb               459 drivers/net/ethernet/allwinner/sun4i-emac.c 		writel(skb->len, db->membase + EMAC_TX_PL0_REG);
skb               468 drivers/net/ethernet/allwinner/sun4i-emac.c 		writel(skb->len, db->membase + EMAC_TX_PL1_REG);
skb               485 drivers/net/ethernet/allwinner/sun4i-emac.c 	dev_consume_skb_any(skb);
skb               514 drivers/net/ethernet/allwinner/sun4i-emac.c 	struct sk_buff *skb;
skb               631 drivers/net/ethernet/allwinner/sun4i-emac.c 			skb = netdev_alloc_skb(dev, rxlen + 4);
skb               632 drivers/net/ethernet/allwinner/sun4i-emac.c 			if (!skb)
skb               634 drivers/net/ethernet/allwinner/sun4i-emac.c 			skb_reserve(skb, 2);
skb               635 drivers/net/ethernet/allwinner/sun4i-emac.c 			rdptr = skb_put(skb, rxlen - 4);
skb               646 drivers/net/ethernet/allwinner/sun4i-emac.c 			skb->protocol = eth_type_trans(skb, dev);
skb               647 drivers/net/ethernet/allwinner/sun4i-emac.c 			netif_rx(skb);
skb               637 drivers/net/ethernet/alteon/acenic.c 		struct sk_buff *skb = ap->skb->rx_std_skbuff[i].skb;
skb               639 drivers/net/ethernet/alteon/acenic.c 		if (skb) {
skb               643 drivers/net/ethernet/alteon/acenic.c 			ringp = &ap->skb->rx_std_skbuff[i];
skb               650 drivers/net/ethernet/alteon/acenic.c 			ap->skb->rx_std_skbuff[i].skb = NULL;
skb               651 drivers/net/ethernet/alteon/acenic.c 			dev_kfree_skb(skb);
skb               657 drivers/net/ethernet/alteon/acenic.c 			struct sk_buff *skb = ap->skb->rx_mini_skbuff[i].skb;
skb               659 drivers/net/ethernet/alteon/acenic.c 			if (skb) {
skb               663 drivers/net/ethernet/alteon/acenic.c 				ringp = &ap->skb->rx_mini_skbuff[i];
skb               670 drivers/net/ethernet/alteon/acenic.c 				ap->skb->rx_mini_skbuff[i].skb = NULL;
skb               671 drivers/net/ethernet/alteon/acenic.c 				dev_kfree_skb(skb);
skb               677 drivers/net/ethernet/alteon/acenic.c 		struct sk_buff *skb = ap->skb->rx_jumbo_skbuff[i].skb;
skb               678 drivers/net/ethernet/alteon/acenic.c 		if (skb) {
skb               682 drivers/net/ethernet/alteon/acenic.c 			ringp = &ap->skb->rx_jumbo_skbuff[i];
skb               689 drivers/net/ethernet/alteon/acenic.c 			ap->skb->rx_jumbo_skbuff[i].skb = NULL;
skb               690 drivers/net/ethernet/alteon/acenic.c 			dev_kfree_skb(skb);
skb               835 drivers/net/ethernet/alteon/acenic.c 	kfree(ap->skb);
skb              1156 drivers/net/ethernet/alteon/acenic.c 	if (!(ap->skb = kmalloc(sizeof(struct ace_skb), GFP_KERNEL))) {
skb              1178 drivers/net/ethernet/alteon/acenic.c 	memset(ap->skb, 0, sizeof(struct ace_skb));
skb              1641 drivers/net/ethernet/alteon/acenic.c 		struct sk_buff *skb;
skb              1645 drivers/net/ethernet/alteon/acenic.c 		skb = netdev_alloc_skb_ip_align(dev, ACE_STD_BUFSIZE);
skb              1646 drivers/net/ethernet/alteon/acenic.c 		if (!skb)
skb              1649 drivers/net/ethernet/alteon/acenic.c 		mapping = pci_map_page(ap->pdev, virt_to_page(skb->data),
skb              1650 drivers/net/ethernet/alteon/acenic.c 				       offset_in_page(skb->data),
skb              1653 drivers/net/ethernet/alteon/acenic.c 		ap->skb->rx_std_skbuff[idx].skb = skb;
skb              1654 drivers/net/ethernet/alteon/acenic.c 		dma_unmap_addr_set(&ap->skb->rx_std_skbuff[idx],
skb              1702 drivers/net/ethernet/alteon/acenic.c 		struct sk_buff *skb;
skb              1706 drivers/net/ethernet/alteon/acenic.c 		skb = netdev_alloc_skb_ip_align(dev, ACE_MINI_BUFSIZE);
skb              1707 drivers/net/ethernet/alteon/acenic.c 		if (!skb)
skb              1710 drivers/net/ethernet/alteon/acenic.c 		mapping = pci_map_page(ap->pdev, virt_to_page(skb->data),
skb              1711 drivers/net/ethernet/alteon/acenic.c 				       offset_in_page(skb->data),
skb              1714 drivers/net/ethernet/alteon/acenic.c 		ap->skb->rx_mini_skbuff[idx].skb = skb;
skb              1715 drivers/net/ethernet/alteon/acenic.c 		dma_unmap_addr_set(&ap->skb->rx_mini_skbuff[idx],
skb              1758 drivers/net/ethernet/alteon/acenic.c 		struct sk_buff *skb;
skb              1762 drivers/net/ethernet/alteon/acenic.c 		skb = netdev_alloc_skb_ip_align(dev, ACE_JUMBO_BUFSIZE);
skb              1763 drivers/net/ethernet/alteon/acenic.c 		if (!skb)
skb              1766 drivers/net/ethernet/alteon/acenic.c 		mapping = pci_map_page(ap->pdev, virt_to_page(skb->data),
skb              1767 drivers/net/ethernet/alteon/acenic.c 				       offset_in_page(skb->data),
skb              1770 drivers/net/ethernet/alteon/acenic.c 		ap->skb->rx_jumbo_skbuff[idx].skb = skb;
skb              1771 drivers/net/ethernet/alteon/acenic.c 		dma_unmap_addr_set(&ap->skb->rx_jumbo_skbuff[idx],
skb              1882 drivers/net/ethernet/alteon/acenic.c 				if (ap->skb->rx_jumbo_skbuff[i].skb) {
skb              1885 drivers/net/ethernet/alteon/acenic.c 					dev_kfree_skb(ap->skb->rx_jumbo_skbuff[i].skb);
skb              1886 drivers/net/ethernet/alteon/acenic.c 					ap->skb->rx_jumbo_skbuff[i].skb = NULL;
skb              1932 drivers/net/ethernet/alteon/acenic.c 		struct sk_buff *skb;
skb              1957 drivers/net/ethernet/alteon/acenic.c 			rip = &ap->skb->rx_std_skbuff[skbidx];
skb              1962 drivers/net/ethernet/alteon/acenic.c 			rip = &ap->skb->rx_jumbo_skbuff[skbidx];
skb              1967 drivers/net/ethernet/alteon/acenic.c 			rip = &ap->skb->rx_mini_skbuff[skbidx];
skb              1978 drivers/net/ethernet/alteon/acenic.c 		skb = rip->skb;
skb              1979 drivers/net/ethernet/alteon/acenic.c 		rip->skb = NULL;
skb              1984 drivers/net/ethernet/alteon/acenic.c 		skb_put(skb, retdesc->size);
skb              1991 drivers/net/ethernet/alteon/acenic.c 		skb->protocol = eth_type_trans(skb, dev);
skb              1998 drivers/net/ethernet/alteon/acenic.c 			skb->csum = htons(csum);
skb              1999 drivers/net/ethernet/alteon/acenic.c 			skb->ip_summed = CHECKSUM_COMPLETE;
skb              2001 drivers/net/ethernet/alteon/acenic.c 			skb_checksum_none_assert(skb);
skb              2006 drivers/net/ethernet/alteon/acenic.c 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), retdesc->vlan);
skb              2007 drivers/net/ethernet/alteon/acenic.c 		netif_rx(skb);
skb              2042 drivers/net/ethernet/alteon/acenic.c 		struct sk_buff *skb;
skb              2045 drivers/net/ethernet/alteon/acenic.c 		info = ap->skb->tx_skbuff + idx;
skb              2046 drivers/net/ethernet/alteon/acenic.c 		skb = info->skb;
skb              2055 drivers/net/ethernet/alteon/acenic.c 		if (skb) {
skb              2057 drivers/net/ethernet/alteon/acenic.c 			dev->stats.tx_bytes += skb->len;
skb              2058 drivers/net/ethernet/alteon/acenic.c 			dev_consume_skb_irq(skb);
skb              2059 drivers/net/ethernet/alteon/acenic.c 			info->skb = NULL;
skb              2323 drivers/net/ethernet/alteon/acenic.c 		struct sk_buff *skb;
skb              2326 drivers/net/ethernet/alteon/acenic.c 		info = ap->skb->tx_skbuff + i;
skb              2327 drivers/net/ethernet/alteon/acenic.c 		skb = info->skb;
skb              2345 drivers/net/ethernet/alteon/acenic.c 		if (skb) {
skb              2346 drivers/net/ethernet/alteon/acenic.c 			dev_kfree_skb(skb);
skb              2347 drivers/net/ethernet/alteon/acenic.c 			info->skb = NULL;
skb              2366 drivers/net/ethernet/alteon/acenic.c ace_map_tx_skb(struct ace_private *ap, struct sk_buff *skb,
skb              2372 drivers/net/ethernet/alteon/acenic.c 	mapping = pci_map_page(ap->pdev, virt_to_page(skb->data),
skb              2373 drivers/net/ethernet/alteon/acenic.c 			       offset_in_page(skb->data),
skb              2374 drivers/net/ethernet/alteon/acenic.c 			       skb->len, PCI_DMA_TODEVICE);
skb              2376 drivers/net/ethernet/alteon/acenic.c 	info = ap->skb->tx_skbuff + idx;
skb              2377 drivers/net/ethernet/alteon/acenic.c 	info->skb = tail;
skb              2379 drivers/net/ethernet/alteon/acenic.c 	dma_unmap_len_set(info, maplen, skb->len);
skb              2407 drivers/net/ethernet/alteon/acenic.c static netdev_tx_t ace_start_xmit(struct sk_buff *skb,
skb              2422 drivers/net/ethernet/alteon/acenic.c 	if (!skb_shinfo(skb)->nr_frags)	{
skb              2426 drivers/net/ethernet/alteon/acenic.c 		mapping = ace_map_tx_skb(ap, skb, skb, idx);
skb              2427 drivers/net/ethernet/alteon/acenic.c 		flagsize = (skb->len << 16) | (BD_FLG_END);
skb              2428 drivers/net/ethernet/alteon/acenic.c 		if (skb->ip_summed == CHECKSUM_PARTIAL)
skb              2430 drivers/net/ethernet/alteon/acenic.c 		if (skb_vlan_tag_present(skb)) {
skb              2432 drivers/net/ethernet/alteon/acenic.c 			vlan_tag = skb_vlan_tag_get(skb);
skb              2447 drivers/net/ethernet/alteon/acenic.c 		mapping = ace_map_tx_skb(ap, skb, NULL, idx);
skb              2448 drivers/net/ethernet/alteon/acenic.c 		flagsize = (skb_headlen(skb) << 16);
skb              2449 drivers/net/ethernet/alteon/acenic.c 		if (skb->ip_summed == CHECKSUM_PARTIAL)
skb              2451 drivers/net/ethernet/alteon/acenic.c 		if (skb_vlan_tag_present(skb)) {
skb              2453 drivers/net/ethernet/alteon/acenic.c 			vlan_tag = skb_vlan_tag_get(skb);
skb              2460 drivers/net/ethernet/alteon/acenic.c 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb              2461 drivers/net/ethernet/alteon/acenic.c 			const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
skb              2465 drivers/net/ethernet/alteon/acenic.c 			info = ap->skb->tx_skbuff + idx;
skb              2473 drivers/net/ethernet/alteon/acenic.c 			if (skb->ip_summed == CHECKSUM_PARTIAL)
skb              2477 drivers/net/ethernet/alteon/acenic.c 			if (i == skb_shinfo(skb)->nr_frags - 1) {
skb              2486 drivers/net/ethernet/alteon/acenic.c 				info->skb = skb;
skb              2488 drivers/net/ethernet/alteon/acenic.c 				info->skb = NULL;
skb               593 drivers/net/ethernet/alteon/acenic.h 	struct sk_buff		*skb;
skb               604 drivers/net/ethernet/alteon/acenic.h 	struct sk_buff		*skb;
skb               638 drivers/net/ethernet/alteon/acenic.h 	struct ace_skb		*skb;
skb               776 drivers/net/ethernet/alteon/acenic.h static netdev_tx_t ace_start_xmit(struct sk_buff *skb,
skb               373 drivers/net/ethernet/altera/altera_tse.h 	struct sk_buff *skb;
skb               214 drivers/net/ethernet/altera/altera_tse_main.c 	rxbuffer->skb = netdev_alloc_skb_ip_align(priv->dev, len);
skb               215 drivers/net/ethernet/altera/altera_tse_main.c 	if (!rxbuffer->skb)
skb               218 drivers/net/ethernet/altera/altera_tse_main.c 	rxbuffer->dma_addr = dma_map_single(priv->device, rxbuffer->skb->data,
skb               224 drivers/net/ethernet/altera/altera_tse_main.c 		dev_kfree_skb_any(rxbuffer->skb);
skb               235 drivers/net/ethernet/altera/altera_tse_main.c 	struct sk_buff *skb = rxbuffer->skb;
skb               238 drivers/net/ethernet/altera/altera_tse_main.c 	if (skb != NULL) {
skb               243 drivers/net/ethernet/altera/altera_tse_main.c 		dev_kfree_skb_any(skb);
skb               244 drivers/net/ethernet/altera/altera_tse_main.c 		rxbuffer->skb = NULL;
skb               263 drivers/net/ethernet/altera/altera_tse_main.c 	if (buffer->skb) {
skb               264 drivers/net/ethernet/altera/altera_tse_main.c 		dev_kfree_skb_any(buffer->skb);
skb               265 drivers/net/ethernet/altera/altera_tse_main.c 		buffer->skb = NULL;
skb               341 drivers/net/ethernet/altera/altera_tse_main.c 		if (likely(priv->rx_ring[entry].skb == NULL)) {
skb               353 drivers/net/ethernet/altera/altera_tse_main.c static inline void tse_rx_vlan(struct net_device *dev, struct sk_buff *skb)
skb               358 drivers/net/ethernet/altera/altera_tse_main.c 	    !__vlan_get_tag(skb, &vid)) {
skb               359 drivers/net/ethernet/altera/altera_tse_main.c 		eth_hdr = (struct ethhdr *)skb->data;
skb               360 drivers/net/ethernet/altera/altera_tse_main.c 		memmove(skb->data + VLAN_HLEN, eth_hdr, ETH_ALEN * 2);
skb               361 drivers/net/ethernet/altera/altera_tse_main.c 		skb_pull(skb, VLAN_HLEN);
skb               362 drivers/net/ethernet/altera/altera_tse_main.c 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
skb               372 drivers/net/ethernet/altera/altera_tse_main.c 	struct sk_buff *skb;
skb               402 drivers/net/ethernet/altera/altera_tse_main.c 		skb = priv->rx_ring[entry].skb;
skb               403 drivers/net/ethernet/altera/altera_tse_main.c 		if (unlikely(!skb)) {
skb               410 drivers/net/ethernet/altera/altera_tse_main.c 		priv->rx_ring[entry].skb = NULL;
skb               412 drivers/net/ethernet/altera/altera_tse_main.c 		skb_put(skb, pktlength);
skb               421 drivers/net/ethernet/altera/altera_tse_main.c 				       16, 1, skb->data, pktlength, true);
skb               424 drivers/net/ethernet/altera/altera_tse_main.c 		tse_rx_vlan(priv->dev, skb);
skb               426 drivers/net/ethernet/altera/altera_tse_main.c 		skb->protocol = eth_type_trans(skb, priv->dev);
skb               427 drivers/net/ethernet/altera/altera_tse_main.c 		skb_checksum_none_assert(skb);
skb               429 drivers/net/ethernet/altera/altera_tse_main.c 		napi_gro_receive(&priv->napi, skb);
skb               465 drivers/net/ethernet/altera/altera_tse_main.c 		if (likely(tx_buff->skb))
skb               557 drivers/net/ethernet/altera/altera_tse_main.c static int tse_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb               563 drivers/net/ethernet/altera/altera_tse_main.c 	int nfrags = skb_shinfo(skb)->nr_frags;
skb               564 drivers/net/ethernet/altera/altera_tse_main.c 	unsigned int nopaged_len = skb_headlen(skb);
skb               586 drivers/net/ethernet/altera/altera_tse_main.c 	dma_addr = dma_map_single(priv->device, skb->data, nopaged_len,
skb               594 drivers/net/ethernet/altera/altera_tse_main.c 	buffer->skb = skb;
skb               600 drivers/net/ethernet/altera/altera_tse_main.c 	skb_tx_timestamp(skb);
skb               603 drivers/net/ethernet/altera/altera_tse_main.c 	dev->stats.tx_bytes += skb->len;
skb               664 drivers/net/ethernet/amazon/ena/ena_netdev.c 		if (!tx_info->skb)
skb               680 drivers/net/ethernet/amazon/ena/ena_netdev.c 		dev_kfree_skb_any(tx_info->skb);
skb               732 drivers/net/ethernet/amazon/ena/ena_netdev.c 		if (likely(tx_info->skb))
skb               769 drivers/net/ethernet/amazon/ena/ena_netdev.c 		struct sk_buff *skb;
skb               781 drivers/net/ethernet/amazon/ena/ena_netdev.c 		skb = tx_info->skb;
skb               784 drivers/net/ethernet/amazon/ena/ena_netdev.c 		prefetch(&skb->end);
skb               786 drivers/net/ethernet/amazon/ena/ena_netdev.c 		tx_info->skb = NULL;
skb               793 drivers/net/ethernet/amazon/ena/ena_netdev.c 			  skb);
skb               795 drivers/net/ethernet/amazon/ena/ena_netdev.c 		tx_bytes += skb->len;
skb               796 drivers/net/ethernet/amazon/ena/ena_netdev.c 		dev_kfree_skb(skb);
skb               842 drivers/net/ethernet/amazon/ena/ena_netdev.c 	struct sk_buff *skb;
skb               845 drivers/net/ethernet/amazon/ena/ena_netdev.c 		skb = napi_get_frags(rx_ring->napi);
skb               847 drivers/net/ethernet/amazon/ena/ena_netdev.c 		skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
skb               850 drivers/net/ethernet/amazon/ena/ena_netdev.c 	if (unlikely(!skb)) {
skb               859 drivers/net/ethernet/amazon/ena/ena_netdev.c 	return skb;
skb               867 drivers/net/ethernet/amazon/ena/ena_netdev.c 	struct sk_buff *skb;
skb               891 drivers/net/ethernet/amazon/ena/ena_netdev.c 		skb = ena_alloc_skb(rx_ring, false);
skb               892 drivers/net/ethernet/amazon/ena/ena_netdev.c 		if (unlikely(!skb))
skb               897 drivers/net/ethernet/amazon/ena/ena_netdev.c 			  skb->len, skb->data_len);
skb               904 drivers/net/ethernet/amazon/ena/ena_netdev.c 		skb_copy_to_linear_data(skb, va, len);
skb               910 drivers/net/ethernet/amazon/ena/ena_netdev.c 		skb_put(skb, len);
skb               911 drivers/net/ethernet/amazon/ena/ena_netdev.c 		skb->protocol = eth_type_trans(skb, rx_ring->netdev);
skb               915 drivers/net/ethernet/amazon/ena/ena_netdev.c 		return skb;
skb               918 drivers/net/ethernet/amazon/ena/ena_netdev.c 	skb = ena_alloc_skb(rx_ring, true);
skb               919 drivers/net/ethernet/amazon/ena/ena_netdev.c 	if (unlikely(!skb))
skb               927 drivers/net/ethernet/amazon/ena/ena_netdev.c 		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page,
skb               932 drivers/net/ethernet/amazon/ena/ena_netdev.c 			  skb->len, skb->data_len);
skb               949 drivers/net/ethernet/amazon/ena/ena_netdev.c 	return skb;
skb               959 drivers/net/ethernet/amazon/ena/ena_netdev.c 				   struct sk_buff *skb)
skb               963 drivers/net/ethernet/amazon/ena/ena_netdev.c 		skb->ip_summed = CHECKSUM_NONE;
skb               969 drivers/net/ethernet/amazon/ena/ena_netdev.c 		skb->ip_summed = CHECKSUM_NONE;
skb               977 drivers/net/ethernet/amazon/ena/ena_netdev.c 		skb->ip_summed = CHECKSUM_NONE;
skb               996 drivers/net/ethernet/amazon/ena/ena_netdev.c 			skb->ip_summed = CHECKSUM_NONE;
skb              1001 drivers/net/ethernet/amazon/ena/ena_netdev.c 			skb->ip_summed = CHECKSUM_UNNECESSARY;
skb              1009 drivers/net/ethernet/amazon/ena/ena_netdev.c 			skb->ip_summed = CHECKSUM_NONE;
skb              1012 drivers/net/ethernet/amazon/ena/ena_netdev.c 		skb->ip_summed = CHECKSUM_NONE;
skb              1020 drivers/net/ethernet/amazon/ena/ena_netdev.c 			    struct sk_buff *skb)
skb              1036 drivers/net/ethernet/amazon/ena/ena_netdev.c 		skb_set_hash(skb, ena_rx_ctx->hash, hash_type);
skb              1055 drivers/net/ethernet/amazon/ena/ena_netdev.c 	struct sk_buff *skb;
skb              1086 drivers/net/ethernet/amazon/ena/ena_netdev.c 		skb = ena_rx_skb(rx_ring, rx_ring->ena_bufs, ena_rx_ctx.descs,
skb              1090 drivers/net/ethernet/amazon/ena/ena_netdev.c 		if (unlikely(!skb)) {
skb              1101 drivers/net/ethernet/amazon/ena/ena_netdev.c 		ena_rx_checksum(rx_ring, &ena_rx_ctx, skb);
skb              1103 drivers/net/ethernet/amazon/ena/ena_netdev.c 		ena_set_rx_hash(rx_ring, &ena_rx_ctx, skb);
skb              1105 drivers/net/ethernet/amazon/ena/ena_netdev.c 		skb_record_rx_queue(skb, rx_ring->qid);
skb              1110 drivers/net/ethernet/amazon/ena/ena_netdev.c 			napi_gro_receive(napi, skb);
skb              1112 drivers/net/ethernet/amazon/ena/ena_netdev.c 			total_len += skb->len;
skb              2060 drivers/net/ethernet/amazon/ena/ena_netdev.c static void ena_tx_csum(struct ena_com_tx_ctx *ena_tx_ctx, struct sk_buff *skb)
skb              2062 drivers/net/ethernet/amazon/ena/ena_netdev.c 	u32 mss = skb_shinfo(skb)->gso_size;
skb              2066 drivers/net/ethernet/amazon/ena/ena_netdev.c 	if ((skb->ip_summed == CHECKSUM_PARTIAL) || mss) {
skb              2070 drivers/net/ethernet/amazon/ena/ena_netdev.c 			ena_meta->l4_hdr_len = tcp_hdr(skb)->doff;
skb              2078 drivers/net/ethernet/amazon/ena/ena_netdev.c 		switch (ip_hdr(skb)->version) {
skb              2081 drivers/net/ethernet/amazon/ena/ena_netdev.c 			if (ip_hdr(skb)->frag_off & htons(IP_DF))
skb              2085 drivers/net/ethernet/amazon/ena/ena_netdev.c 			l4_protocol = ip_hdr(skb)->protocol;
skb              2089 drivers/net/ethernet/amazon/ena/ena_netdev.c 			l4_protocol = ipv6_hdr(skb)->nexthdr;
skb              2101 drivers/net/ethernet/amazon/ena/ena_netdev.c 		ena_meta->l3_hdr_len = skb_network_header_len(skb);
skb              2102 drivers/net/ethernet/amazon/ena/ena_netdev.c 		ena_meta->l3_hdr_offset = skb_network_offset(skb);
skb              2111 drivers/net/ethernet/amazon/ena/ena_netdev.c 				       struct sk_buff *skb)
skb              2115 drivers/net/ethernet/amazon/ena/ena_netdev.c 	num_frags = skb_shinfo(skb)->nr_frags;
skb              2116 drivers/net/ethernet/amazon/ena/ena_netdev.c 	header_len = skb_headlen(skb);
skb              2129 drivers/net/ethernet/amazon/ena/ena_netdev.c 	rc = skb_linearize(skb);
skb              2141 drivers/net/ethernet/amazon/ena/ena_netdev.c 			  struct sk_buff *skb,
skb              2153 drivers/net/ethernet/amazon/ena/ena_netdev.c 	skb_head_len = skb_headlen(skb);
skb              2154 drivers/net/ethernet/amazon/ena/ena_netdev.c 	tx_info->skb = skb;
skb              2168 drivers/net/ethernet/amazon/ena/ena_netdev.c 		push_len = min_t(u32, skb->len, tx_ring->tx_max_header_size);
skb              2169 drivers/net/ethernet/amazon/ena/ena_netdev.c 		*push_hdr = skb_header_pointer(skb, 0, push_len,
skb              2172 drivers/net/ethernet/amazon/ena/ena_netdev.c 		if (unlikely(skb->data != *push_hdr)) {
skb              2186 drivers/net/ethernet/amazon/ena/ena_netdev.c 		  "skb: %p header_buf->vaddr: %p push_len: %d\n", skb,
skb              2190 drivers/net/ethernet/amazon/ena/ena_netdev.c 		dma = dma_map_single(tx_ring->dev, skb->data + push_len,
skb              2205 drivers/net/ethernet/amazon/ena/ena_netdev.c 	last_frag = skb_shinfo(skb)->nr_frags;
skb              2208 drivers/net/ethernet/amazon/ena/ena_netdev.c 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
skb              2237 drivers/net/ethernet/amazon/ena/ena_netdev.c 	tx_info->skb = NULL;
skb              2246 drivers/net/ethernet/amazon/ena/ena_netdev.c static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb              2257 drivers/net/ethernet/amazon/ena/ena_netdev.c 	netif_dbg(adapter, tx_queued, dev, "%s skb %p\n", __func__, skb);
skb              2259 drivers/net/ethernet/amazon/ena/ena_netdev.c 	qid = skb_get_queue_mapping(skb);
skb              2263 drivers/net/ethernet/amazon/ena/ena_netdev.c 	rc = ena_check_and_linearize_skb(tx_ring, skb);
skb              2267 drivers/net/ethernet/amazon/ena/ena_netdev.c 	skb_tx_timestamp(skb);
skb              2274 drivers/net/ethernet/amazon/ena/ena_netdev.c 	WARN(tx_info->skb, "SKB isn't NULL req_id %d\n", req_id);
skb              2276 drivers/net/ethernet/amazon/ena/ena_netdev.c 	rc = ena_tx_map_skb(tx_ring, tx_info, skb, &push_hdr, &header_len);
skb              2288 drivers/net/ethernet/amazon/ena/ena_netdev.c 	ena_tx_csum(&ena_tx_ctx, skb);
skb              2320 drivers/net/ethernet/amazon/ena/ena_netdev.c 	netdev_tx_sent_queue(txq, skb->len);
skb              2324 drivers/net/ethernet/amazon/ena/ena_netdev.c 	tx_ring->tx_stats.bytes += skb->len;
skb              2381 drivers/net/ethernet/amazon/ena/ena_netdev.c 	tx_info->skb = NULL;
skb              2384 drivers/net/ethernet/amazon/ena/ena_netdev.c 	dev_kfree_skb(skb);
skb              2388 drivers/net/ethernet/amazon/ena/ena_netdev.c static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
skb              2396 drivers/net/ethernet/amazon/ena/ena_netdev.c 	if (skb_rx_queue_recorded(skb))
skb              2397 drivers/net/ethernet/amazon/ena/ena_netdev.c 		qid = skb_get_rx_queue(skb);
skb              2399 drivers/net/ethernet/amazon/ena/ena_netdev.c 		qid = netdev_pick_tx(dev, skb, NULL);
skb               175 drivers/net/ethernet/amazon/ena/ena_netdev.h 	struct sk_buff *skb;
skb               202 drivers/net/ethernet/amazon/ena/ena_netdev.h 	struct sk_buff *skb;
skb               323 drivers/net/ethernet/amd/7990.c 			struct sk_buff *skb = netdev_alloc_skb(dev, len + 2);
skb               325 drivers/net/ethernet/amd/7990.c 			if (!skb) {
skb               333 drivers/net/ethernet/amd/7990.c 			skb_reserve(skb, 2);           /* 16 byte align */
skb               334 drivers/net/ethernet/amd/7990.c 			skb_put(skb, len);             /* make room */
skb               335 drivers/net/ethernet/amd/7990.c 			skb_copy_to_linear_data(skb,
skb               338 drivers/net/ethernet/amd/7990.c 			skb->protocol = eth_type_trans(skb, dev);
skb               339 drivers/net/ethernet/amd/7990.c 			netif_rx(skb);
skb               539 drivers/net/ethernet/amd/7990.c int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb               550 drivers/net/ethernet/amd/7990.c 		dev_consume_skb_any(skb);
skb               554 drivers/net/ethernet/amd/7990.c 	skblen = skb->len;
skb               564 drivers/net/ethernet/amd/7990.c 			printk("%2.2x ", skb->data[i]);
skb               573 drivers/net/ethernet/amd/7990.c 	if (skb->len < ETH_ZLEN)
skb               575 drivers/net/ethernet/amd/7990.c 	skb_copy_from_linear_data(skb, (void *)&ib->tx_buf[entry][0], skblen);
skb               584 drivers/net/ethernet/amd/7990.c 	dev_consume_skb_any(skb);
skb               244 drivers/net/ethernet/amd/7990.h int lance_start_xmit(struct sk_buff *skb, struct net_device *dev);
skb               295 drivers/net/ethernet/amd/a2065.c 			struct sk_buff *skb = netdev_alloc_skb(dev, len + 2);
skb               297 drivers/net/ethernet/amd/a2065.c 			if (!skb) {
skb               305 drivers/net/ethernet/amd/a2065.c 			skb_reserve(skb, 2);		/* 16 byte align */
skb               306 drivers/net/ethernet/amd/a2065.c 			skb_put(skb, len);		/* make room */
skb               307 drivers/net/ethernet/amd/a2065.c 			skb_copy_to_linear_data(skb,
skb               310 drivers/net/ethernet/amd/a2065.c 			skb->protocol = eth_type_trans(skb, dev);
skb               311 drivers/net/ethernet/amd/a2065.c 			netif_rx(skb);
skb               535 drivers/net/ethernet/amd/a2065.c static netdev_tx_t lance_start_xmit(struct sk_buff *skb,
skb               545 drivers/net/ethernet/amd/a2065.c 	if (skb_padto(skb, ETH_ZLEN))
skb               547 drivers/net/ethernet/amd/a2065.c 	skblen = max_t(unsigned, skb->len, ETH_ZLEN);
skb               557 drivers/net/ethernet/amd/a2065.c 		       16, 1, skb->data, 64, true);
skb               563 drivers/net/ethernet/amd/a2065.c 	skb_copy_from_linear_data(skb, (void *)&ib->tx_buf[entry][0], skblen);
skb               576 drivers/net/ethernet/amd/a2065.c 	dev_kfree_skb(skb);
skb               441 drivers/net/ethernet/amd/am79c961a.c am79c961_sendpacket(struct sk_buff *skb, struct net_device *dev)
skb               455 drivers/net/ethernet/amd/am79c961a.c 	am_writebuffer (dev, bufaddr, skb->data, skb->len);
skb               456 drivers/net/ethernet/amd/am79c961a.c 	am_writeword (dev, hdraddr + 4, -skb->len);
skb               472 drivers/net/ethernet/amd/am79c961a.c 	dev_consume_skb_any(skb);
skb               484 drivers/net/ethernet/amd/am79c961a.c 		struct sk_buff *skb;
skb               515 drivers/net/ethernet/amd/am79c961a.c 		skb = netdev_alloc_skb(dev, len + 2);
skb               517 drivers/net/ethernet/amd/am79c961a.c 		if (skb) {
skb               518 drivers/net/ethernet/amd/am79c961a.c 			skb_reserve(skb, 2);
skb               520 drivers/net/ethernet/amd/am79c961a.c 			am_readbuffer(dev, pktaddr, skb_put(skb, len), len);
skb               522 drivers/net/ethernet/amd/am79c961a.c 			skb->protocol = eth_type_trans(skb, dev);
skb               523 drivers/net/ethernet/amd/am79c961a.c 			netif_rx(skb);
skb               683 drivers/net/ethernet/amd/amd8111e.c 	struct sk_buff *skb,*new_skb;
skb               740 drivers/net/ethernet/amd/amd8111e.c 		skb = lp->rx_skbuff[rx_index];
skb               743 drivers/net/ethernet/amd/amd8111e.c 		skb_put(skb, pkt_len);
skb               750 drivers/net/ethernet/amd/amd8111e.c 		skb->protocol = eth_type_trans(skb, dev);
skb               755 drivers/net/ethernet/amd/amd8111e.c 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
skb               758 drivers/net/ethernet/amd/amd8111e.c 		napi_gro_receive(napi, skb);
skb              1248 drivers/net/ethernet/amd/amd8111e.c static netdev_tx_t amd8111e_start_xmit(struct sk_buff *skb,
skb              1259 drivers/net/ethernet/amd/amd8111e.c 	lp->tx_ring[tx_index].buff_count = cpu_to_le16(skb->len);
skb              1261 drivers/net/ethernet/amd/amd8111e.c 	lp->tx_skbuff[tx_index] = skb;
skb              1265 drivers/net/ethernet/amd/amd8111e.c 	if (skb_vlan_tag_present(skb)) {
skb              1269 drivers/net/ethernet/amd/amd8111e.c 				cpu_to_le16(skb_vlan_tag_get(skb));
skb              1274 drivers/net/ethernet/amd/amd8111e.c 	    pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
skb               193 drivers/net/ethernet/amd/ariadne.c 			struct sk_buff *skb;
skb               195 drivers/net/ethernet/amd/ariadne.c 			skb = netdev_alloc_skb(dev, pkt_len + 2);
skb               196 drivers/net/ethernet/amd/ariadne.c 			if (skb == NULL) {
skb               210 drivers/net/ethernet/amd/ariadne.c 			skb_reserve(skb, 2);	/* 16 byte align */
skb               211 drivers/net/ethernet/amd/ariadne.c 			skb_put(skb, pkt_len);	/* Make room */
skb               212 drivers/net/ethernet/amd/ariadne.c 			skb_copy_to_linear_data(skb,
skb               215 drivers/net/ethernet/amd/ariadne.c 			skb->protocol = eth_type_trans(skb, dev);
skb               217 drivers/net/ethernet/amd/ariadne.c 				   ((u_short *)skb->data)[6],
skb               218 drivers/net/ethernet/amd/ariadne.c 				   skb->data + 6, skb->data,
skb               219 drivers/net/ethernet/amd/ariadne.c 				   skb->data, skb->len);
skb               221 drivers/net/ethernet/amd/ariadne.c 			netif_rx(skb);
skb               543 drivers/net/ethernet/amd/ariadne.c static netdev_tx_t ariadne_start_xmit(struct sk_buff *skb,
skb               550 drivers/net/ethernet/amd/ariadne.c 	int len = skb->len;
skb               561 drivers/net/ethernet/amd/ariadne.c 	if (skb->len < ETH_ZLEN) {
skb               562 drivers/net/ethernet/amd/ariadne.c 		if (skb_padto(skb, ETH_ZLEN))
skb               570 drivers/net/ethernet/amd/ariadne.c 		   ((u_short *)skb->data)[6],
skb               571 drivers/net/ethernet/amd/ariadne.c 		   skb->data + 6, skb->data,
skb               572 drivers/net/ethernet/amd/ariadne.c 		   skb->data, skb->len);
skb               581 drivers/net/ethernet/amd/ariadne.c 	priv->tx_ring[entry]->TMD2 = swapw((u_short)-skb->len);
skb               583 drivers/net/ethernet/amd/ariadne.c 	memcpyw(priv->tx_buff[entry], (u_short *)skb->data, len);
skb               588 drivers/net/ethernet/amd/ariadne.c 		       skb->len > 64 ? 64 : skb->len, true);
skb               594 drivers/net/ethernet/amd/ariadne.c 	dev_kfree_skb(skb);
skb               342 drivers/net/ethernet/amd/atarilance.c static netdev_tx_t lance_start_xmit(struct sk_buff *skb,
skb               774 drivers/net/ethernet/amd/atarilance.c lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb               787 drivers/net/ethernet/amd/atarilance.c 	len = skb->len;
skb               794 drivers/net/ethernet/amd/atarilance.c 	if (len > skb->len) {
skb               795 drivers/net/ethernet/amd/atarilance.c 		if (skb_padto(skb, len))
skb               805 drivers/net/ethernet/amd/atarilance.c 				dev->name, ((u_short *)skb->data)[6],
skb               806 drivers/net/ethernet/amd/atarilance.c 				&skb->data[6], skb->data,
skb               807 drivers/net/ethernet/amd/atarilance.c 				(int)skb->data, (int)skb->len );
skb               825 drivers/net/ethernet/amd/atarilance.c 	lp->memcpy_f( PKTBUF_ADDR(head), (void *)skb->data, skb->len );
skb               827 drivers/net/ethernet/amd/atarilance.c 	dev->stats.tx_bytes += skb->len;
skb               828 drivers/net/ethernet/amd/atarilance.c 	dev_kfree_skb( skb );
skb               991 drivers/net/ethernet/amd/atarilance.c 			struct sk_buff *skb;
skb               998 drivers/net/ethernet/amd/atarilance.c 				skb = netdev_alloc_skb(dev, pkt_len + 2);
skb               999 drivers/net/ethernet/amd/atarilance.c 				if (skb == NULL) {
skb              1022 drivers/net/ethernet/amd/atarilance.c 				skb_reserve( skb, 2 );	/* 16 byte align */
skb              1023 drivers/net/ethernet/amd/atarilance.c 				skb_put( skb, pkt_len );	/* Make room */
skb              1024 drivers/net/ethernet/amd/atarilance.c 				lp->memcpy_f( skb->data, PKTBUF_ADDR(head), pkt_len );
skb              1025 drivers/net/ethernet/amd/atarilance.c 				skb->protocol = eth_type_trans( skb, dev );
skb              1026 drivers/net/ethernet/amd/atarilance.c 				netif_rx( skb );
skb               776 drivers/net/ethernet/amd/au1000_eth.c 	struct sk_buff *skb;
skb               795 drivers/net/ethernet/amd/au1000_eth.c 			skb = netdev_alloc_skb(dev, frmlen + 2);
skb               796 drivers/net/ethernet/amd/au1000_eth.c 			if (skb == NULL) {
skb               800 drivers/net/ethernet/amd/au1000_eth.c 			skb_reserve(skb, 2);	/* 16 byte IP header align */
skb               801 drivers/net/ethernet/amd/au1000_eth.c 			skb_copy_to_linear_data(skb,
skb               803 drivers/net/ethernet/amd/au1000_eth.c 			skb_put(skb, frmlen);
skb               804 drivers/net/ethernet/amd/au1000_eth.c 			skb->protocol = eth_type_trans(skb, dev);
skb               805 drivers/net/ethernet/amd/au1000_eth.c 			netif_rx(skb);	/* pass the packet to upper layers */
skb               963 drivers/net/ethernet/amd/au1000_eth.c static netdev_tx_t au1000_tx(struct sk_buff *skb, struct net_device *dev)
skb               973 drivers/net/ethernet/amd/au1000_eth.c 				(unsigned)aup, skb->len,
skb               974 drivers/net/ethernet/amd/au1000_eth.c 				skb->data, aup->tx_head);
skb               994 drivers/net/ethernet/amd/au1000_eth.c 	skb_copy_from_linear_data(skb, (void *)pDB->vaddr, skb->len);
skb               995 drivers/net/ethernet/amd/au1000_eth.c 	if (skb->len < ETH_ZLEN) {
skb               996 drivers/net/ethernet/amd/au1000_eth.c 		for (i = skb->len; i < ETH_ZLEN; i++)
skb              1001 drivers/net/ethernet/amd/au1000_eth.c 		ptxd->len = skb->len;
skb              1008 drivers/net/ethernet/amd/au1000_eth.c 	dev_kfree_skb(skb);
skb               563 drivers/net/ethernet/amd/declance.c 	struct sk_buff *skb;
skb               609 drivers/net/ethernet/amd/declance.c 			skb = netdev_alloc_skb(dev, len + 2);
skb               611 drivers/net/ethernet/amd/declance.c 			if (skb == 0) {
skb               622 drivers/net/ethernet/amd/declance.c 			skb_reserve(skb, 2);	/* 16 byte align */
skb               623 drivers/net/ethernet/amd/declance.c 			skb_put(skb, len);	/* make room */
skb               625 drivers/net/ethernet/amd/declance.c 			cp_from_buf(lp->type, skb->data,
skb               628 drivers/net/ethernet/amd/declance.c 			skb->protocol = eth_type_trans(skb, dev);
skb               629 drivers/net/ethernet/amd/declance.c 			netif_rx(skb);
skb               898 drivers/net/ethernet/amd/declance.c static netdev_tx_t lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb               906 drivers/net/ethernet/amd/declance.c 	len = skb->len;
skb               909 drivers/net/ethernet/amd/declance.c 		if (skb_padto(skb, ETH_ZLEN))
skb               922 drivers/net/ethernet/amd/declance.c 	cp_to_buf(lp->type, lp->tx_buf_ptr_cpu[entry], skb->data, len);
skb               938 drivers/net/ethernet/amd/declance.c 	dev_kfree_skb(skb);
skb               302 drivers/net/ethernet/amd/lance.c static netdev_tx_t lance_start_xmit(struct sk_buff *skb,
skb               844 drivers/net/ethernet/amd/lance.c 		struct sk_buff *skb = lp->rx_skbuff[i];
skb               847 drivers/net/ethernet/amd/lance.c 		if (skb)
skb               848 drivers/net/ethernet/amd/lance.c 			dev_kfree_skb_any(skb);
skb               870 drivers/net/ethernet/amd/lance.c 		struct sk_buff *skb;
skb               873 drivers/net/ethernet/amd/lance.c 		skb = alloc_skb(PKT_BUF_SZ, GFP_DMA | gfp);
skb               874 drivers/net/ethernet/amd/lance.c 		lp->rx_skbuff[i] = skb;
skb               875 drivers/net/ethernet/amd/lance.c 		if (skb)
skb               876 drivers/net/ethernet/amd/lance.c 			rx_buff = skb->data;
skb               950 drivers/net/ethernet/amd/lance.c static netdev_tx_t lance_start_xmit(struct sk_buff *skb,
skb               977 drivers/net/ethernet/amd/lance.c 		if (skb->len < ETH_ZLEN) {
skb               978 drivers/net/ethernet/amd/lance.c 			if (skb_padto(skb, ETH_ZLEN))
skb               983 drivers/net/ethernet/amd/lance.c 			lp->tx_ring[entry].length = -skb->len;
skb               985 drivers/net/ethernet/amd/lance.c 		lp->tx_ring[entry].length = -skb->len;
skb               989 drivers/net/ethernet/amd/lance.c 	dev->stats.tx_bytes += skb->len;
skb               993 drivers/net/ethernet/amd/lance.c 	if ((u32)isa_virt_to_bus(skb->data) + skb->len > 0x01000000) {
skb               996 drivers/net/ethernet/amd/lance.c 				   dev->name, (u32)isa_virt_to_bus(skb->data));
skb               997 drivers/net/ethernet/amd/lance.c 		skb_copy_from_linear_data(skb, &lp->tx_bounce_buffs[entry], skb->len);
skb              1000 drivers/net/ethernet/amd/lance.c 		dev_kfree_skb(skb);
skb              1002 drivers/net/ethernet/amd/lance.c 		lp->tx_skbuff[entry] = skb;
skb              1003 drivers/net/ethernet/amd/lance.c 		lp->tx_ring[entry].base = ((u32)isa_virt_to_bus(skb->data) & 0xffffff) | 0x83000000;
skb              1175 drivers/net/ethernet/amd/lance.c 			struct sk_buff *skb;
skb              1184 drivers/net/ethernet/amd/lance.c 				skb = dev_alloc_skb(pkt_len+2);
skb              1185 drivers/net/ethernet/amd/lance.c 				if (skb == NULL)
skb              1200 drivers/net/ethernet/amd/lance.c 				skb_reserve(skb,2);	/* 16 byte align */
skb              1201 drivers/net/ethernet/amd/lance.c 				skb_put(skb,pkt_len);	/* Make room */
skb              1202 drivers/net/ethernet/amd/lance.c 				skb_copy_to_linear_data(skb,
skb              1205 drivers/net/ethernet/amd/lance.c 				skb->protocol=eth_type_trans(skb,dev);
skb              1206 drivers/net/ethernet/amd/lance.c 				netif_rx(skb);
skb               255 drivers/net/ethernet/amd/ni65.c static netdev_tx_t ni65_send_packet(struct sk_buff *skb,
skb               605 drivers/net/ethernet/amd/ni65.c 	struct sk_buff *skb=NULL;
skb               610 drivers/net/ethernet/amd/ni65.c 		ret = skb = alloc_skb(2+16+size,GFP_KERNEL|GFP_DMA);
skb               611 drivers/net/ethernet/amd/ni65.c 		if(!skb) {
skb               615 drivers/net/ethernet/amd/ni65.c 		skb_reserve(skb,2+16);
skb               616 drivers/net/ethernet/amd/ni65.c 		skb_put(skb,R_BUF_SIZE);	 /* grab the whole space .. (not necessary) */
skb               617 drivers/net/ethernet/amd/ni65.c 		ptr = skb->data;
skb               627 drivers/net/ethernet/amd/ni65.c 			kfree_skb(skb);
skb              1083 drivers/net/ethernet/amd/ni65.c 			struct sk_buff *skb = alloc_skb(R_BUF_SIZE+2+16,GFP_ATOMIC);
skb              1084 drivers/net/ethernet/amd/ni65.c 			if (skb)
skb              1085 drivers/net/ethernet/amd/ni65.c 				skb_reserve(skb,16);
skb              1087 drivers/net/ethernet/amd/ni65.c 			struct sk_buff *skb = netdev_alloc_skb(dev, len + 2);
skb              1089 drivers/net/ethernet/amd/ni65.c 			if(skb)
skb              1091 drivers/net/ethernet/amd/ni65.c 				skb_reserve(skb,2);
skb              1093 drivers/net/ethernet/amd/ni65.c 				if( (unsigned long) (skb->data + R_BUF_SIZE) > 0x1000000) {
skb              1094 drivers/net/ethernet/amd/ni65.c 					skb_put(skb,len);
skb              1095 drivers/net/ethernet/amd/ni65.c 					skb_copy_to_linear_data(skb, (unsigned char *)(p->recv_skb[p->rmdnum]->data),len);
skb              1099 drivers/net/ethernet/amd/ni65.c 					skb_put(skb,R_BUF_SIZE);
skb              1100 drivers/net/ethernet/amd/ni65.c 					p->recv_skb[p->rmdnum] = skb;
skb              1101 drivers/net/ethernet/amd/ni65.c 					rmdp->u.buffer = (u32) isa_virt_to_bus(skb->data);
skb              1102 drivers/net/ethernet/amd/ni65.c 					skb = skb1;
skb              1103 drivers/net/ethernet/amd/ni65.c 					skb_trim(skb,len);
skb              1106 drivers/net/ethernet/amd/ni65.c 				skb_put(skb,len);
skb              1107 drivers/net/ethernet/amd/ni65.c 				skb_copy_to_linear_data(skb, (unsigned char *) p->recvbounce[p->rmdnum],len);
skb              1111 drivers/net/ethernet/amd/ni65.c 				skb->protocol=eth_type_trans(skb,dev);
skb              1112 drivers/net/ethernet/amd/ni65.c 				netif_rx(skb);
skb              1154 drivers/net/ethernet/amd/ni65.c static netdev_tx_t ni65_send_packet(struct sk_buff *skb,
skb              1167 drivers/net/ethernet/amd/ni65.c 		short len = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
skb              1172 drivers/net/ethernet/amd/ni65.c 		if( (unsigned long) (skb->data + skb->len) > 0x1000000) {
skb              1175 drivers/net/ethernet/amd/ni65.c 			skb_copy_from_linear_data(skb, p->tmdbounce[p->tmdbouncenum],
skb              1176 drivers/net/ethernet/amd/ni65.c 				      skb->len > T_BUF_SIZE ? T_BUF_SIZE :
skb              1177 drivers/net/ethernet/amd/ni65.c 							      skb->len);
skb              1178 drivers/net/ethernet/amd/ni65.c 			if (len > skb->len)
skb              1179 drivers/net/ethernet/amd/ni65.c 				memset((char *)p->tmdbounce[p->tmdbouncenum]+skb->len, 0, len-skb->len);
skb              1180 drivers/net/ethernet/amd/ni65.c 			dev_kfree_skb (skb);
skb              1193 drivers/net/ethernet/amd/ni65.c 			tmdp->u.buffer = (u32) isa_virt_to_bus(skb->data);
skb              1194 drivers/net/ethernet/amd/ni65.c 			p->tmd_skb[p->tmdnum] = skb;
skb               408 drivers/net/ethernet/amd/nmclan_cs.c static netdev_tx_t mace_start_xmit(struct sk_buff *skb,
skb               856 drivers/net/ethernet/amd/nmclan_cs.c static netdev_tx_t mace_start_xmit(struct sk_buff *skb,
skb               865 drivers/net/ethernet/amd/nmclan_cs.c 	dev->name, (long)skb->len);
skb               881 drivers/net/ethernet/amd/nmclan_cs.c     dev->stats.tx_bytes += skb->len;
skb               886 drivers/net/ethernet/amd/nmclan_cs.c     outw(skb->len, ioaddr + AM2150_XMT);
skb               888 drivers/net/ethernet/amd/nmclan_cs.c     outsw(ioaddr + AM2150_XMT, skb->data, skb->len >> 1);
skb               889 drivers/net/ethernet/amd/nmclan_cs.c     if (skb->len & 1) {
skb               891 drivers/net/ethernet/amd/nmclan_cs.c       outb(skb->data[skb->len-1], ioaddr + AM2150_XMT);
skb               906 drivers/net/ethernet/amd/nmclan_cs.c   dev_kfree_skb(skb);
skb              1095 drivers/net/ethernet/amd/nmclan_cs.c       struct sk_buff *skb;
skb              1105 drivers/net/ethernet/amd/nmclan_cs.c       skb = netdev_alloc_skb(dev, pkt_len + 2);
skb              1107 drivers/net/ethernet/amd/nmclan_cs.c       if (skb != NULL) {
skb              1108 drivers/net/ethernet/amd/nmclan_cs.c 	skb_reserve(skb, 2);
skb              1109 drivers/net/ethernet/amd/nmclan_cs.c 	insw(ioaddr + AM2150_RCV, skb_put(skb, pkt_len), pkt_len>>1);
skb              1111 drivers/net/ethernet/amd/nmclan_cs.c 	    *(skb_tail_pointer(skb) - 1) = inb(ioaddr + AM2150_RCV);
skb              1112 drivers/net/ethernet/amd/nmclan_cs.c 	skb->protocol = eth_type_trans(skb, dev);
skb              1114 drivers/net/ethernet/amd/nmclan_cs.c 	netif_rx(skb); /* Send the packet to the upper (protocol) layers. */
skb               979 drivers/net/ethernet/amd/pcnet32.c 	struct sk_buff *skb;	/* sk buff */
skb              1016 drivers/net/ethernet/amd/pcnet32.c 		skb = netdev_alloc_skb(dev, size);
skb              1017 drivers/net/ethernet/amd/pcnet32.c 		if (!skb) {
skb              1023 drivers/net/ethernet/amd/pcnet32.c 		packet = skb->data;
skb              1024 drivers/net/ethernet/amd/pcnet32.c 		skb_put(skb, size);	/* create space for data */
skb              1025 drivers/net/ethernet/amd/pcnet32.c 		lp->tx_skbuff[x] = skb;
skb              1026 drivers/net/ethernet/amd/pcnet32.c 		lp->tx_ring[x].length = cpu_to_le16(-skb->len);
skb              1044 drivers/net/ethernet/amd/pcnet32.c 			pci_map_single(lp->pci_dev, skb->data, skb->len,
skb              1091 drivers/net/ethernet/amd/pcnet32.c 			skb = lp->rx_skbuff[x];
skb              1093 drivers/net/ethernet/amd/pcnet32.c 				pr_cont(" %02x", *(skb->data + i));
skb              1101 drivers/net/ethernet/amd/pcnet32.c 		skb = lp->rx_skbuff[x];
skb              1104 drivers/net/ethernet/amd/pcnet32.c 			if (*(skb->data + i) != packet[i]) {
skb              1107 drivers/net/ethernet/amd/pcnet32.c 					     i, *(skb->data + i), packet[i]);
skb              1185 drivers/net/ethernet/amd/pcnet32.c 	struct sk_buff *skb;
skb              1242 drivers/net/ethernet/amd/pcnet32.c 				skb = NULL;
skb              1244 drivers/net/ethernet/amd/pcnet32.c 				skb = lp->rx_skbuff[entry];
skb              1249 drivers/net/ethernet/amd/pcnet32.c 				skb_put(skb, pkt_len);
skb              1256 drivers/net/ethernet/amd/pcnet32.c 			skb = NULL;
skb              1258 drivers/net/ethernet/amd/pcnet32.c 		skb = netdev_alloc_skb(dev, pkt_len + NET_IP_ALIGN);
skb              1260 drivers/net/ethernet/amd/pcnet32.c 	if (skb == NULL) {
skb              1265 drivers/net/ethernet/amd/pcnet32.c 		skb_reserve(skb, NET_IP_ALIGN);
skb              1266 drivers/net/ethernet/amd/pcnet32.c 		skb_put(skb, pkt_len);	/* Make room */
skb              1271 drivers/net/ethernet/amd/pcnet32.c 		skb_copy_to_linear_data(skb,
skb              1279 drivers/net/ethernet/amd/pcnet32.c 	dev->stats.rx_bytes += skb->len;
skb              1280 drivers/net/ethernet/amd/pcnet32.c 	skb->protocol = eth_type_trans(skb, dev);
skb              1281 drivers/net/ethernet/amd/pcnet32.c 	netif_receive_skb(skb);
skb              2498 drivers/net/ethernet/amd/pcnet32.c static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
skb              2526 drivers/net/ethernet/amd/pcnet32.c 	lp->tx_ring[entry].length = cpu_to_le16(-skb->len);
skb              2531 drivers/net/ethernet/amd/pcnet32.c 	    pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
skb              2533 drivers/net/ethernet/amd/pcnet32.c 		dev_kfree_skb_any(skb);
skb              2537 drivers/net/ethernet/amd/pcnet32.c 	lp->tx_skbuff[entry] = skb;
skb              2543 drivers/net/ethernet/amd/pcnet32.c 	dev->stats.tx_bytes += skb->len;
skb               239 drivers/net/ethernet/amd/sun3lance.c static netdev_tx_t lance_start_xmit(struct sk_buff *skb,
skb               516 drivers/net/ethernet/amd/sun3lance.c lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb               603 drivers/net/ethernet/amd/sun3lance.c 			dev->name, lp->new_tx, ((u_short *)skb->data)[6],
skb               604 drivers/net/ethernet/amd/sun3lance.c 			DEV_ADDR(&skb->data[6]), DEV_ADDR(skb->data),
skb               605 drivers/net/ethernet/amd/sun3lance.c 			(int)skb->data, (int)skb->len );
skb               622 drivers/net/ethernet/amd/sun3lance.c 	len = (ETH_ZLEN < skb->len) ? skb->len : ETH_ZLEN;
skb               628 drivers/net/ethernet/amd/sun3lance.c 	skb_copy_from_linear_data(skb, PKTBUF_ADDR(head), skb->len);
skb               629 drivers/net/ethernet/amd/sun3lance.c 	if (len != skb->len)
skb               630 drivers/net/ethernet/amd/sun3lance.c 		memset(PKTBUF_ADDR(head) + skb->len, 0, len-skb->len);
skb               634 drivers/net/ethernet/amd/sun3lance.c 	dev->stats.tx_bytes += skb->len;
skb               641 drivers/net/ethernet/amd/sun3lance.c 	dev_kfree_skb(skb);
skb               808 drivers/net/ethernet/amd/sun3lance.c 			struct sk_buff *skb;
skb               815 drivers/net/ethernet/amd/sun3lance.c 				skb = netdev_alloc_skb(dev, pkt_len + 2);
skb               816 drivers/net/ethernet/amd/sun3lance.c 				if (skb == NULL) {
skb               845 drivers/net/ethernet/amd/sun3lance.c 				skb_reserve( skb, 2 );	/* 16 byte align */
skb               846 drivers/net/ethernet/amd/sun3lance.c 				skb_put( skb, pkt_len );	/* Make room */
skb               847 drivers/net/ethernet/amd/sun3lance.c 				skb_copy_to_linear_data(skb,
skb               851 drivers/net/ethernet/amd/sun3lance.c 				skb->protocol = eth_type_trans( skb, dev );
skb               852 drivers/net/ethernet/amd/sun3lance.c 				netif_rx( skb );
skb               515 drivers/net/ethernet/amd/sunlance.c 	struct sk_buff *skb;
skb               536 drivers/net/ethernet/amd/sunlance.c 			skb = netdev_alloc_skb(dev, len + 2);
skb               538 drivers/net/ethernet/amd/sunlance.c 			if (skb == NULL) {
skb               548 drivers/net/ethernet/amd/sunlance.c 			skb_reserve(skb, 2);		/* 16 byte align */
skb               549 drivers/net/ethernet/amd/sunlance.c 			skb_put(skb, len);		/* make room */
skb               550 drivers/net/ethernet/amd/sunlance.c 			skb_copy_to_linear_data(skb,
skb               553 drivers/net/ethernet/amd/sunlance.c 			skb->protocol = eth_type_trans(skb, dev);
skb               554 drivers/net/ethernet/amd/sunlance.c 			netif_rx(skb);
skb               647 drivers/net/ethernet/amd/sunlance.c static void lance_piocopy_to_skb(struct sk_buff *skb, void __iomem *piobuf, int len)
skb               649 drivers/net/ethernet/amd/sunlance.c 	u16 *p16 = (u16 *) skb->data;
skb               684 drivers/net/ethernet/amd/sunlance.c 	struct sk_buff *skb;
skb               706 drivers/net/ethernet/amd/sunlance.c 			skb = netdev_alloc_skb(dev, len + 2);
skb               708 drivers/net/ethernet/amd/sunlance.c 			if (skb == NULL) {
skb               718 drivers/net/ethernet/amd/sunlance.c 			skb_reserve (skb, 2);		/* 16 byte align */
skb               719 drivers/net/ethernet/amd/sunlance.c 			skb_put(skb, len);		/* make room */
skb               720 drivers/net/ethernet/amd/sunlance.c 			lance_piocopy_to_skb(skb, &(ib->rx_buf[entry][0]), len);
skb               721 drivers/net/ethernet/amd/sunlance.c 			skb->protocol = eth_type_trans(skb, dev);
skb               722 drivers/net/ethernet/amd/sunlance.c 			netif_rx(skb);
skb              1110 drivers/net/ethernet/amd/sunlance.c static netdev_tx_t lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb              1115 drivers/net/ethernet/amd/sunlance.c 	skblen = skb->len;
skb              1128 drivers/net/ethernet/amd/sunlance.c 		lance_piocopy_from_skb(&ib->tx_buf[entry][0], skb->data, skblen);
skb              1136 drivers/net/ethernet/amd/sunlance.c 		skb_copy_from_linear_data(skb, &ib->tx_buf [entry][0], skblen);
skb              1158 drivers/net/ethernet/amd/sunlance.c 	dev_kfree_skb(skb);
skb               485 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 	if (rdata->skb) {
skb               486 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 		dev_kfree_skb_any(rdata->skb);
skb               487 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 		rdata->skb = NULL;
skb               517 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 		rdata->state.skb = NULL;
skb               523 drivers/net/ethernet/amd/xgbe/xgbe-desc.c static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
skb               558 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 		skb_dma = dma_map_single(pdata->dev, skb->data,
skb               579 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 	for (datalen = skb_headlen(skb) - offset; datalen; ) {
skb               582 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 		skb_dma = dma_map_single(pdata->dev, skb->data + offset, len,
skb               603 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb               607 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 		frag = &skb_shinfo(skb)->frags[i];
skb               643 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 	rdata->skb = skb;
skb              1711 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 				struct sk_buff *skb,
skb              1723 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 			pdata->tx_tstamp_skb = skb_get(skb);
skb              1724 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
skb              1729 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 	skb_tx_timestamp(skb);
skb              1732 drivers/net/ethernet/amd/xgbe/xgbe-drv.c static void xgbe_prep_vlan(struct sk_buff *skb, struct xgbe_packet_data *packet)
skb              1734 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 	if (skb_vlan_tag_present(skb))
skb              1735 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 		packet->vlan_ctag = skb_vlan_tag_get(skb);
skb              1738 drivers/net/ethernet/amd/xgbe/xgbe-drv.c static int xgbe_prep_tso(struct sk_buff *skb, struct xgbe_packet_data *packet)
skb              1746 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 	ret = skb_cow_head(skb, 0);
skb              1751 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 		packet->header_len = skb_inner_transport_offset(skb) +
skb              1752 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 				     inner_tcp_hdrlen(skb);
skb              1753 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 		packet->tcp_header_len = inner_tcp_hdrlen(skb);
skb              1755 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 		packet->header_len = skb_transport_offset(skb) +
skb              1756 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 				     tcp_hdrlen(skb);
skb              1757 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 		packet->tcp_header_len = tcp_hdrlen(skb);
skb              1759 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 	packet->tcp_payload_len = skb->len - packet->header_len;
skb              1760 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 	packet->mss = skb_shinfo(skb)->gso_size;
skb              1770 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 	packet->tx_packets = skb_shinfo(skb)->gso_segs;
skb              1776 drivers/net/ethernet/amd/xgbe/xgbe-drv.c static bool xgbe_is_vxlan(struct xgbe_prv_data *pdata, struct sk_buff *skb)
skb              1783 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 	if (!skb->encapsulation)
skb              1786 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 	if (skb->ip_summed != CHECKSUM_PARTIAL)
skb              1789 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 	switch (skb->protocol) {
skb              1791 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 		if (ip_hdr(skb)->protocol != IPPROTO_UDP)
skb              1796 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 		if (ipv6_hdr(skb)->nexthdr != IPPROTO_UDP)
skb              1806 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 		if ((skb->protocol == htons(ETH_P_IP)) &&
skb              1808 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 		    (vdata->port == udp_hdr(skb)->dest))
skb              1810 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 		else if ((skb->protocol == htons(ETH_P_IPV6)) &&
skb              1812 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 			 (vdata->port == udp_hdr(skb)->dest))
skb              1819 drivers/net/ethernet/amd/xgbe/xgbe-drv.c static int xgbe_is_tso(struct sk_buff *skb)
skb              1821 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 	if (skb->ip_summed != CHECKSUM_PARTIAL)
skb              1824 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 	if (!skb_is_gso(skb))
skb              1833 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 			     struct xgbe_ring *ring, struct sk_buff *skb,
skb              1841 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 	packet->skb = skb;
skb              1847 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 	packet->tx_bytes = skb->len;
skb              1849 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 	if (xgbe_is_tso(skb)) {
skb              1851 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 		if (skb_shinfo(skb)->gso_size != ring->tx.cur_mss) {
skb              1863 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 	} else if (skb->ip_summed == CHECKSUM_PARTIAL)
skb              1867 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 	if (xgbe_is_vxlan(pdata, skb))
skb              1871 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 	if (skb_vlan_tag_present(skb)) {
skb              1873 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 		if (skb_vlan_tag_get(skb) != ring->tx.cur_vlan_ctag)
skb              1884 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
skb              1889 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 	for (len = skb_headlen(skb); len;) {
skb              1894 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb              1895 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 		frag = &skb_shinfo(skb)->frags[i];
skb              2011 drivers/net/ethernet/amd/xgbe/xgbe-drv.c static netdev_tx_t xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
skb              2022 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 	DBGPR("-->xgbe_xmit: skb->len = %d\n", skb->len);
skb              2024 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 	channel = pdata->channel[skb->queue_mapping];
skb              2031 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 	if (skb->len == 0) {
skb              2034 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 		dev_kfree_skb_any(skb);
skb              2040 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 	xgbe_packet_info(pdata, ring, skb, packet);
skb              2047 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 	ret = xgbe_prep_tso(skb, packet);
skb              2051 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 		dev_kfree_skb_any(skb);
skb              2054 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 	xgbe_prep_vlan(skb, packet);
skb              2056 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 	if (!desc_if->map_tx_skb(channel, skb)) {
skb              2057 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 		dev_kfree_skb_any(skb);
skb              2061 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 	xgbe_prep_tx_tstamp(pdata, skb, packet);
skb              2070 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 		xgbe_print_pkt(netdev, skb, true);
skb              2477 drivers/net/ethernet/amd/xgbe/xgbe-drv.c static netdev_features_t xgbe_features_check(struct sk_buff *skb,
skb              2481 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 	features = vlan_features_check(skb, features);
skb              2482 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 	features = vxlan_features_check(skb, features);
skb              2553 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 	struct sk_buff *skb;
skb              2556 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 	skb = napi_alloc_skb(napi, rdata->rx.hdr.dma_len);
skb              2557 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 	if (!skb)
skb              2569 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 	skb_copy_to_linear_data(skb, packet, len);
skb              2570 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 	skb_put(skb, len);
skb              2572 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 	return skb;
skb              2692 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 	struct sk_buff *skb;
skb              2717 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 			skb = rdata->state.skb;
skb              2722 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 			skb = NULL;
skb              2756 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 			dev_kfree_skb(skb);
skb              2767 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 			if (!skb) {
skb              2768 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 				skb = xgbe_create_skb(pdata, napi, rdata,
skb              2770 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 				if (!skb) {
skb              2783 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 				skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
skb              2796 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 		if (!skb)
skb              2802 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 		    (skb->protocol == htons(ETH_P_8021Q)))
skb              2805 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 		if (skb->len > max_len) {
skb              2808 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 			dev_kfree_skb(skb);
skb              2813 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 			xgbe_print_pkt(netdev, skb, false);
skb              2815 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 		skb_checksum_none_assert(skb);
skb              2818 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 			skb->ip_summed = CHECKSUM_UNNECESSARY;
skb              2822 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 			skb->encapsulation = 1;
skb              2826 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 				skb->csum_level = 1;
skb              2831 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
skb              2840 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 			hwtstamps = skb_hwtstamps(skb);
skb              2846 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 			skb_set_hash(skb, packet->rss_hash,
skb              2849 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 		skb->dev = netdev;
skb              2850 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 		skb->protocol = eth_type_trans(skb, netdev);
skb              2851 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 		skb_record_rx_queue(skb, channel->queue_index);
skb              2853 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 		napi_gro_receive(napi, skb);
skb              2863 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 		rdata->state.skb = skb;
skb              2976 drivers/net/ethernet/amd/xgbe/xgbe-drv.c void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx)
skb              2978 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 	struct ethhdr *eth = (struct ethhdr *)skb->data;
skb              2985 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 		   (tx_rx ? "TX" : "RX"), skb->len);
skb              2991 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 	for (i = 0; i < skb->len; i += 32) {
skb              2992 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 		unsigned int len = min(skb->len - i, 32U);
skb              2994 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 		hex_dump_to_buffer(&skb->data[i], len, 32, 1,
skb               351 drivers/net/ethernet/amd/xgbe/xgbe.h 	struct sk_buff *skb;
skb               426 drivers/net/ethernet/amd/xgbe/xgbe.h 	struct sk_buff *skb;		/* Virtual address of SKB */
skb               442 drivers/net/ethernet/amd/xgbe/xgbe.h 		struct sk_buff *skb;
skb                73 drivers/net/ethernet/apm/xgene-v2/main.c 	struct sk_buff *skb;
skb                82 drivers/net/ethernet/apm/xgene-v2/main.c 		skb = netdev_alloc_skb(ndev, len);
skb                83 drivers/net/ethernet/apm/xgene-v2/main.c 		if (unlikely(!skb))
skb                86 drivers/net/ethernet/apm/xgene-v2/main.c 		dma_addr = dma_map_single(dev, skb->data, len, DMA_FROM_DEVICE);
skb                89 drivers/net/ethernet/apm/xgene-v2/main.c 			dev_kfree_skb_any(skb);
skb                93 drivers/net/ethernet/apm/xgene-v2/main.c 		ring->pkt_info[tail].skb = skb;
skb               172 drivers/net/ethernet/apm/xgene-v2/main.c static netdev_tx_t xge_start_xmit(struct sk_buff *skb, struct net_device *ndev)
skb               186 drivers/net/ethernet/apm/xgene-v2/main.c 	len = skb_headlen(skb);
skb               198 drivers/net/ethernet/apm/xgene-v2/main.c 		dev_kfree_skb_any(skb);
skb               201 drivers/net/ethernet/apm/xgene-v2/main.c 	memcpy(pkt_buf, skb->data, len);
skb               210 drivers/net/ethernet/apm/xgene-v2/main.c 	tx_ring->pkt_info[tail].skb = skb;
skb               219 drivers/net/ethernet/apm/xgene-v2/main.c 	skb_tx_timestamp(skb);
skb               243 drivers/net/ethernet/apm/xgene-v2/main.c 	struct sk_buff *skb;
skb               263 drivers/net/ethernet/apm/xgene-v2/main.c 		skb = tx_ring->pkt_info[head].skb;
skb               267 drivers/net/ethernet/apm/xgene-v2/main.c 		pdata->stats.tx_bytes += skb->len;
skb               269 drivers/net/ethernet/apm/xgene-v2/main.c 		dev_kfree_skb_any(skb);
skb               291 drivers/net/ethernet/apm/xgene-v2/main.c 	struct sk_buff *skb;
skb               314 drivers/net/ethernet/apm/xgene-v2/main.c 		skb = rx_ring->pkt_info[head].skb;
skb               315 drivers/net/ethernet/apm/xgene-v2/main.c 		rx_ring->pkt_info[head].skb = NULL;
skb               324 drivers/net/ethernet/apm/xgene-v2/main.c 			dev_kfree_skb_any(skb);
skb               328 drivers/net/ethernet/apm/xgene-v2/main.c 		skb_put(skb, len);
skb               329 drivers/net/ethernet/apm/xgene-v2/main.c 		skb->protocol = eth_type_trans(skb, ndev);
skb               333 drivers/net/ethernet/apm/xgene-v2/main.c 		napi_gro_receive(&pdata->napi, skb);
skb               374 drivers/net/ethernet/apm/xgene-v2/main.c 	struct sk_buff *skb;
skb               379 drivers/net/ethernet/apm/xgene-v2/main.c 		skb = ring->pkt_info[i].skb;
skb               382 drivers/net/ethernet/apm/xgene-v2/main.c 		if (!skb)
skb               387 drivers/net/ethernet/apm/xgene-v2/main.c 		dev_kfree_skb_any(skb);
skb               558 drivers/net/ethernet/apm/xgene-v2/main.c 	struct sk_buff *skb;
skb               570 drivers/net/ethernet/apm/xgene-v2/main.c 		skb = tx_ring->pkt_info[i].skb;
skb               574 drivers/net/ethernet/apm/xgene-v2/main.c 		dev_kfree_skb_any(skb);
skb                67 drivers/net/ethernet/apm/xgene-v2/ring.h 	struct sk_buff *skb;
skb               121 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	struct sk_buff *skb;
skb               142 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 		skb = netdev_alloc_skb_ip_align(ndev, len);
skb               143 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 		if (unlikely(!skb))
skb               146 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 		dma_addr = dma_map_single(dev, skb->data, len, DMA_FROM_DEVICE);
skb               149 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 			dev_kfree_skb_any(skb);
skb               153 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 		buf_pool->rx_skb[tail] = skb;
skb               229 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	struct sk_buff *skb;
skb               239 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	skb = cp_ring->cp_skb[skb_index];
skb               244 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 			 skb_headlen(skb),
skb               247 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb               248 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 		frag = &skb_shinfo(skb)->frags[i];
skb               267 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	if (likely(skb)) {
skb               268 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 		dev_kfree_skb_any(skb);
skb               307 drivers/net/ethernet/apm/xgene/xgene_enet_main.c static int xgene_enet_work_msg(struct sk_buff *skb, u64 *hopinfo)
skb               309 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	struct net_device *ndev = skb->dev;
skb               317 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	ethhdr = xgene_enet_hdr_len(skb->data);
skb               319 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	if (unlikely(skb->protocol != htons(ETH_P_IP)) &&
skb               320 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	    unlikely(skb->protocol != htons(ETH_P_8021Q)))
skb               323 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	if (unlikely(!(skb->dev->features & NETIF_F_IP_CSUM)))
skb               326 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	iph = ip_hdr(skb);
skb               331 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 		l4hlen = tcp_hdrlen(skb) >> 2;
skb               335 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 			hdr_len = ethhdr + ip_hdrlen(skb) + tcp_hdrlen(skb);
skb               336 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 			mss = skb_shinfo(skb)->gso_size;
skb               338 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 			if (skb_is_nonlinear(skb)) {
skb               339 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 				len = skb_headlen(skb);
skb               340 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 				nr_frags = skb_shinfo(skb)->nr_frags;
skb               344 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 						&skb_shinfo(skb)->frags[i]);
skb               348 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 					if (skb_linearize(skb))
skb               353 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 			if (!mss || ((skb->len - hdr_len) <= mss))
skb               367 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	l3hlen = ip_hdrlen(skb) >> 2;
skb               407 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 				    struct sk_buff *skb)
skb               428 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	ret = xgene_enet_work_msg(skb, &hopinfo);
skb               435 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	len = skb_headlen(skb);
skb               438 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	dma_addr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
skb               449 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	if (!skb_is_nonlinear(skb))
skb               458 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	nr_frags = skb_shinfo(skb)->nr_frags;
skb               466 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 			frag = &skb_shinfo(skb)->frags[fidx];
skb               529 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 			dev_kfree_skb_any(skb);
skb               542 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	tx_ring->cp_ring->cp_skb[tx_ring->tail] = skb;
skb               549 drivers/net/ethernet/apm/xgene/xgene_enet_main.c static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb,
skb               554 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	int index = skb->queue_mapping;
skb               567 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	if (skb_padto(skb, XGENE_MIN_ENET_FRAME_SIZE))
skb               570 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	count = xgene_enet_setup_tx_desc(tx_ring, skb);
skb               575 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 		dev_kfree_skb_any(skb);
skb               579 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	skb_tx_timestamp(skb);
skb               582 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	tx_ring->tx_bytes += skb->len;
skb               588 drivers/net/ethernet/apm/xgene/xgene_enet_main.c static void xgene_enet_rx_csum(struct sk_buff *skb)
skb               590 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	struct net_device *ndev = skb->dev;
skb               591 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	struct iphdr *iph = ip_hdr(skb);
skb               596 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	if (skb->protocol != htons(ETH_P_IP))
skb               605 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	skb->ip_summed = CHECKSUM_UNNECESSARY;
skb               646 drivers/net/ethernet/apm/xgene/xgene_enet_main.c static bool xgene_enet_errata_10GE_10(struct sk_buff *skb, u32 len, u8 status)
skb               651 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	    skb->protocol == htons(ETH_P_8021Q))
skb               658 drivers/net/ethernet/apm/xgene/xgene_enet_main.c static bool xgene_enet_errata_10GE_8(struct sk_buff *skb, u32 len, u8 status)
skb               661 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 		if (ntohs(eth_hdr(skb)->h_proto) < 46)
skb               677 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	struct sk_buff *skb;
skb               695 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	skb = buf_pool->rx_skb[skb_index];
skb               699 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	skb_put(skb, datalen);
skb               700 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	prefetch(skb->data - NET_IP_ALIGN);
skb               701 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	skb->protocol = eth_type_trans(skb, ndev);
skb               707 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 		if (xgene_enet_errata_10GE_8(skb, datalen, status)) {
skb               709 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 		} else if (xgene_enet_errata_10GE_10(skb, datalen, status)) {
skb               712 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 			dev_kfree_skb_any(skb);
skb               740 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, 0,
skb               750 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	rx_ring->npagepool -= skb_shinfo(skb)->nr_frags;
skb               753 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	skb_checksum_none_assert(skb);
skb               754 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	xgene_enet_rx_csum(skb);
skb               758 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	napi_gro_receive(&rx_ring->napi, skb);
skb               145 drivers/net/ethernet/apple/bmac.c static int bmac_transmit_packet(struct sk_buff *skb, struct net_device *dev);
skb               158 drivers/net/ethernet/apple/bmac.c static netdev_tx_t bmac_output(struct sk_buff *skb, struct net_device *dev);
skb               561 drivers/net/ethernet/apple/bmac.c bmac_construct_xmt(struct sk_buff *skb, volatile struct dbdma_cmd *cp)
skb               567 drivers/net/ethernet/apple/bmac.c 	len = skb->len;
skb               568 drivers/net/ethernet/apple/bmac.c 	vaddr = skb->data;
skb               575 drivers/net/ethernet/apple/bmac.c bmac_construct_rxbuff(struct sk_buff *skb, volatile struct dbdma_cmd *cp)
skb               577 drivers/net/ethernet/apple/bmac.c 	unsigned char *addr = skb? skb->data: bmac_emergency_rxbuf;
skb               610 drivers/net/ethernet/apple/bmac.c 	struct sk_buff *skb;
skb               616 drivers/net/ethernet/apple/bmac.c 		if ((skb = bp->rx_bufs[i]) == NULL) {
skb               617 drivers/net/ethernet/apple/bmac.c 			bp->rx_bufs[i] = skb = netdev_alloc_skb(dev, RX_BUFLEN + 2);
skb               618 drivers/net/ethernet/apple/bmac.c 			if (skb != NULL)
skb               619 drivers/net/ethernet/apple/bmac.c 				skb_reserve(skb, 2);
skb               621 drivers/net/ethernet/apple/bmac.c 		bmac_construct_rxbuff(skb, &bp->rx_cmds[i]);
skb               639 drivers/net/ethernet/apple/bmac.c static int bmac_transmit_packet(struct sk_buff *skb, struct net_device *dev)
skb               660 drivers/net/ethernet/apple/bmac.c 	bmac_construct_xmt(skb, &bp->tx_cmds[bp->tx_fill]);
skb               662 drivers/net/ethernet/apple/bmac.c 	bp->tx_bufs[bp->tx_fill] = skb;
skb               665 drivers/net/ethernet/apple/bmac.c 	dev->stats.tx_bytes += skb->len;
skb               681 drivers/net/ethernet/apple/bmac.c 	struct sk_buff *skb;
skb               703 drivers/net/ethernet/apple/bmac.c 			skb = NULL;
skb               707 drivers/net/ethernet/apple/bmac.c 			skb = bp->rx_bufs[i];
skb               710 drivers/net/ethernet/apple/bmac.c 		if (skb != NULL) {
skb               712 drivers/net/ethernet/apple/bmac.c 			skb_put(skb, nb);
skb               713 drivers/net/ethernet/apple/bmac.c 			skb->protocol = eth_type_trans(skb, dev);
skb               714 drivers/net/ethernet/apple/bmac.c 			netif_rx(skb);
skb               720 drivers/net/ethernet/apple/bmac.c 		if ((skb = bp->rx_bufs[i]) == NULL) {
skb               721 drivers/net/ethernet/apple/bmac.c 			bp->rx_bufs[i] = skb = netdev_alloc_skb(dev, RX_BUFLEN + 2);
skb               722 drivers/net/ethernet/apple/bmac.c 			if (skb != NULL)
skb               725 drivers/net/ethernet/apple/bmac.c 		bmac_construct_rxbuff(skb, &bp->rx_cmds[i]);
skb              1198 drivers/net/ethernet/apple/bmac.c 	struct sk_buff *skb;
skb              1214 drivers/net/ethernet/apple/bmac.c 	skb = netdev_alloc_skb(dev, ETHERMINPACKET);
skb              1215 drivers/net/ethernet/apple/bmac.c 	if (skb != NULL) {
skb              1216 drivers/net/ethernet/apple/bmac.c 		data = skb_put_zero(skb, ETHERMINPACKET);
skb              1219 drivers/net/ethernet/apple/bmac.c 		bmac_transmit_packet(skb, dev);
skb              1439 drivers/net/ethernet/apple/bmac.c 	struct sk_buff *skb;
skb              1452 drivers/net/ethernet/apple/bmac.c 		skb = skb_dequeue(bp->queue);
skb              1453 drivers/net/ethernet/apple/bmac.c 		if (skb == NULL)
skb              1455 drivers/net/ethernet/apple/bmac.c 		bmac_transmit_packet(skb, dev);
skb              1461 drivers/net/ethernet/apple/bmac.c bmac_output(struct sk_buff *skb, struct net_device *dev)
skb              1464 drivers/net/ethernet/apple/bmac.c 	skb_queue_tail(bp->queue, skb);
skb                82 drivers/net/ethernet/apple/mace.c static netdev_tx_t mace_xmit_start(struct sk_buff *skb, struct net_device *dev);
skb               436 drivers/net/ethernet/apple/mace.c     struct sk_buff *skb;
skb               447 drivers/net/ethernet/apple/mace.c 	skb = netdev_alloc_skb(dev, RX_BUFLEN + 2);
skb               448 drivers/net/ethernet/apple/mace.c 	if (!skb) {
skb               451 drivers/net/ethernet/apple/mace.c 	    skb_reserve(skb, 2);	/* so IP header lands on 4-byte bdry */
skb               452 drivers/net/ethernet/apple/mace.c 	    data = skb->data;
skb               454 drivers/net/ethernet/apple/mace.c 	mp->rx_bufs[i] = skb;
skb               529 drivers/net/ethernet/apple/mace.c static netdev_tx_t mace_xmit_start(struct sk_buff *skb, struct net_device *dev)
skb               552 drivers/net/ethernet/apple/mace.c     len = skb->len;
skb               557 drivers/net/ethernet/apple/mace.c     mp->tx_bufs[fill] = skb;
skb               560 drivers/net/ethernet/apple/mace.c     cp->phy_addr = cpu_to_le32(virt_to_bus(skb->data));
skb               881 drivers/net/ethernet/apple/mace.c     struct sk_buff *skb;
skb               906 drivers/net/ethernet/apple/mace.c 	skb = mp->rx_bufs[i];
skb               907 drivers/net/ethernet/apple/mace.c 	if (!skb) {
skb               910 drivers/net/ethernet/apple/mace.c 	    data = skb->data;
skb               929 drivers/net/ethernet/apple/mace.c 		skb_put(skb, nb);
skb               930 drivers/net/ethernet/apple/mace.c 		skb->protocol = eth_type_trans(skb, dev);
skb               931 drivers/net/ethernet/apple/mace.c 		dev->stats.rx_bytes += skb->len;
skb               932 drivers/net/ethernet/apple/mace.c 		netif_rx(skb);
skb               955 drivers/net/ethernet/apple/mace.c 	skb = mp->rx_bufs[i];
skb               956 drivers/net/ethernet/apple/mace.c 	if (!skb) {
skb               957 drivers/net/ethernet/apple/mace.c 	    skb = netdev_alloc_skb(dev, RX_BUFLEN + 2);
skb               958 drivers/net/ethernet/apple/mace.c 	    if (skb) {
skb               959 drivers/net/ethernet/apple/mace.c 		skb_reserve(skb, 2);
skb               960 drivers/net/ethernet/apple/mace.c 		mp->rx_bufs[i] = skb;
skb               964 drivers/net/ethernet/apple/mace.c 	data = skb? skb->data: dummy_buf;
skb                88 drivers/net/ethernet/apple/macmace.c static netdev_tx_t mace_xmit_start(struct sk_buff *skb, struct net_device *dev);
skb               443 drivers/net/ethernet/apple/macmace.c static netdev_tx_t mace_xmit_start(struct sk_buff *skb, struct net_device *dev)
skb               461 drivers/net/ethernet/apple/macmace.c 	dev->stats.tx_bytes += skb->len;
skb               464 drivers/net/ethernet/apple/macmace.c 	skb_copy_from_linear_data(skb, mp->tx_ring, skb->len);
skb               469 drivers/net/ethernet/apple/macmace.c 	psc_write_long(PSC_ENETWR_LEN + mp->tx_slot, skb->len);
skb               474 drivers/net/ethernet/apple/macmace.c 	dev_kfree_skb(skb);
skb               637 drivers/net/ethernet/apple/macmace.c 	struct sk_buff *skb;
skb               653 drivers/net/ethernet/apple/macmace.c 		skb = netdev_alloc_skb(dev, frame_length + 2);
skb               654 drivers/net/ethernet/apple/macmace.c 		if (!skb) {
skb               658 drivers/net/ethernet/apple/macmace.c 		skb_reserve(skb, 2);
skb               659 drivers/net/ethernet/apple/macmace.c 		skb_put_data(skb, mf->data, frame_length);
skb               661 drivers/net/ethernet/apple/macmace.c 		skb->protocol = eth_type_trans(skb, dev);
skb               662 drivers/net/ethernet/apple/macmace.c 		netif_rx(skb);
skb                92 drivers/net/ethernet/aquantia/atlantic/aq_main.c static int aq_ndev_start_xmit(struct sk_buff *skb, struct net_device *ndev)
skb                96 drivers/net/ethernet/aquantia/atlantic/aq_main.c 	return aq_nic_xmit(aq_nic, skb);
skb               424 drivers/net/ethernet/aquantia/atlantic/aq_nic.c 				   struct sk_buff *skb,
skb               428 drivers/net/ethernet/aquantia/atlantic/aq_nic.c 	unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
skb               437 drivers/net/ethernet/aquantia/atlantic/aq_nic.c 	if (unlikely(skb_is_gso(skb))) {
skb               438 drivers/net/ethernet/aquantia/atlantic/aq_nic.c 		dx_buff->mss = skb_shinfo(skb)->gso_size;
skb               440 drivers/net/ethernet/aquantia/atlantic/aq_nic.c 		dx_buff->len_pkt = skb->len;
skb               442 drivers/net/ethernet/aquantia/atlantic/aq_nic.c 		dx_buff->len_l3 = ip_hdrlen(skb);
skb               443 drivers/net/ethernet/aquantia/atlantic/aq_nic.c 		dx_buff->len_l4 = tcp_hdrlen(skb);
skb               446 drivers/net/ethernet/aquantia/atlantic/aq_nic.c 			(ip_hdr(skb)->version == 6) ? 1U : 0U;
skb               450 drivers/net/ethernet/aquantia/atlantic/aq_nic.c 	if (self->aq_nic_cfg.is_vlan_tx_insert && skb_vlan_tag_present(skb)) {
skb               451 drivers/net/ethernet/aquantia/atlantic/aq_nic.c 		dx_buff->vlan_tx_tag = skb_vlan_tag_get(skb);
skb               452 drivers/net/ethernet/aquantia/atlantic/aq_nic.c 		dx_buff->len_pkt = skb->len;
skb               464 drivers/net/ethernet/aquantia/atlantic/aq_nic.c 	dx_buff->len = skb_headlen(skb);
skb               466 drivers/net/ethernet/aquantia/atlantic/aq_nic.c 				     skb->data,
skb               476 drivers/net/ethernet/aquantia/atlantic/aq_nic.c 	dx_buff->len_pkt = skb->len;
skb               481 drivers/net/ethernet/aquantia/atlantic/aq_nic.c 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb               482 drivers/net/ethernet/aquantia/atlantic/aq_nic.c 		dx_buff->is_ip_cso = (htons(ETH_P_IP) == skb->protocol) ?
skb               485 drivers/net/ethernet/aquantia/atlantic/aq_nic.c 		if (ip_hdr(skb)->version == 4) {
skb               487 drivers/net/ethernet/aquantia/atlantic/aq_nic.c 				(ip_hdr(skb)->protocol == IPPROTO_TCP) ?
skb               490 drivers/net/ethernet/aquantia/atlantic/aq_nic.c 				(ip_hdr(skb)->protocol == IPPROTO_UDP) ?
skb               492 drivers/net/ethernet/aquantia/atlantic/aq_nic.c 		} else if (ip_hdr(skb)->version == 6) {
skb               494 drivers/net/ethernet/aquantia/atlantic/aq_nic.c 				(ipv6_hdr(skb)->nexthdr == NEXTHDR_TCP) ?
skb               497 drivers/net/ethernet/aquantia/atlantic/aq_nic.c 				(ipv6_hdr(skb)->nexthdr == NEXTHDR_UDP) ?
skb               507 drivers/net/ethernet/aquantia/atlantic/aq_nic.c 		skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_count];
skb               545 drivers/net/ethernet/aquantia/atlantic/aq_nic.c 	dx_buff->skb = skb;
skb               573 drivers/net/ethernet/aquantia/atlantic/aq_nic.c int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
skb               577 drivers/net/ethernet/aquantia/atlantic/aq_nic.c 	unsigned int vec = skb->queue_mapping % self->aq_nic_cfg.vecs;
skb               581 drivers/net/ethernet/aquantia/atlantic/aq_nic.c 	frags = skb_shinfo(skb)->nr_frags + 1;
skb               586 drivers/net/ethernet/aquantia/atlantic/aq_nic.c 		dev_kfree_skb_any(skb);
skb               598 drivers/net/ethernet/aquantia/atlantic/aq_nic.c 	frags = aq_nic_map_skb(self, skb, ring);
skb               129 drivers/net/ethernet/aquantia/atlantic/aq_nic.h int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb);
skb               248 drivers/net/ethernet/aquantia/atlantic/aq_ring.c 			self->stats.tx.bytes += buff->skb->len;
skb               250 drivers/net/ethernet/aquantia/atlantic/aq_ring.c 			dev_kfree_skb_any(buff->skb);
skb               262 drivers/net/ethernet/aquantia/atlantic/aq_ring.c 			   struct sk_buff *skb)
skb               269 drivers/net/ethernet/aquantia/atlantic/aq_ring.c 		skb->ip_summed = CHECKSUM_NONE;
skb               273 drivers/net/ethernet/aquantia/atlantic/aq_ring.c 		__skb_incr_checksum_unnecessary(skb);
skb               275 drivers/net/ethernet/aquantia/atlantic/aq_ring.c 		skb->ip_summed = CHECKSUM_NONE;
skb               279 drivers/net/ethernet/aquantia/atlantic/aq_ring.c 		__skb_incr_checksum_unnecessary(skb);
skb               297 drivers/net/ethernet/aquantia/atlantic/aq_ring.c 		struct sk_buff *skb = NULL;
skb               354 drivers/net/ethernet/aquantia/atlantic/aq_ring.c 			skb = build_skb(aq_buf_vaddr(&buff->rxdata),
skb               356 drivers/net/ethernet/aquantia/atlantic/aq_ring.c 			if (unlikely(!skb)) {
skb               360 drivers/net/ethernet/aquantia/atlantic/aq_ring.c 			skb_put(skb, buff->len);
skb               363 drivers/net/ethernet/aquantia/atlantic/aq_ring.c 			skb = napi_alloc_skb(napi, AQ_CFG_RX_HDR_SIZE);
skb               364 drivers/net/ethernet/aquantia/atlantic/aq_ring.c 			if (unlikely(!skb)) {
skb               371 drivers/net/ethernet/aquantia/atlantic/aq_ring.c 				hdr_len = eth_get_headlen(skb->dev,
skb               375 drivers/net/ethernet/aquantia/atlantic/aq_ring.c 			memcpy(__skb_put(skb, hdr_len), aq_buf_vaddr(&buff->rxdata),
skb               379 drivers/net/ethernet/aquantia/atlantic/aq_ring.c 				skb_add_rx_frag(skb, 0, buff->rxdata.page,
skb               399 drivers/net/ethernet/aquantia/atlantic/aq_ring.c 					skb_add_rx_frag(skb, i++,
skb               417 drivers/net/ethernet/aquantia/atlantic/aq_ring.c 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
skb               420 drivers/net/ethernet/aquantia/atlantic/aq_ring.c 		skb->protocol = eth_type_trans(skb, ndev);
skb               422 drivers/net/ethernet/aquantia/atlantic/aq_ring.c 		aq_rx_checksum(self, buff, skb);
skb               424 drivers/net/ethernet/aquantia/atlantic/aq_ring.c 		skb_set_hash(skb, buff->rss_hash,
skb               428 drivers/net/ethernet/aquantia/atlantic/aq_ring.c 		skb_record_rx_queue(skb, self->idx);
skb               431 drivers/net/ethernet/aquantia/atlantic/aq_ring.c 		self->stats.rx.bytes += skb->len;
skb               433 drivers/net/ethernet/aquantia/atlantic/aq_ring.c 		napi_gro_receive(napi, skb);
skb                52 drivers/net/ethernet/aquantia/atlantic/aq_ring.h 			struct sk_buff *skb;
skb               101 drivers/net/ethernet/arc/emac.h 	struct sk_buff *skb;
skb               121 drivers/net/ethernet/arc/emac_main.c 		struct sk_buff *skb = tx_buff->skb;
skb               124 drivers/net/ethernet/arc/emac_main.c 		if ((info & FOR_EMAC) || !txbd->data || !skb)
skb               141 drivers/net/ethernet/arc/emac_main.c 			stats->tx_bytes += skb->len;
skb               148 drivers/net/ethernet/arc/emac_main.c 		dev_consume_skb_irq(skb);
skb               152 drivers/net/ethernet/arc/emac_main.c 		tx_buff->skb = NULL;
skb               186 drivers/net/ethernet/arc/emac_main.c 		struct sk_buff *skb;
skb               216 drivers/net/ethernet/arc/emac_main.c 		skb = netdev_alloc_skb_ip_align(ndev, EMAC_BUFFER_SIZE);
skb               217 drivers/net/ethernet/arc/emac_main.c 		if (unlikely(!skb)) {
skb               227 drivers/net/ethernet/arc/emac_main.c 		addr = dma_map_single(&ndev->dev, (void *)skb->data,
skb               232 drivers/net/ethernet/arc/emac_main.c 			dev_kfree_skb(skb);
skb               247 drivers/net/ethernet/arc/emac_main.c 		skb_put(rx_buff->skb, pktlen);
skb               248 drivers/net/ethernet/arc/emac_main.c 		rx_buff->skb->dev = ndev;
skb               249 drivers/net/ethernet/arc/emac_main.c 		rx_buff->skb->protocol = eth_type_trans(rx_buff->skb, ndev);
skb               251 drivers/net/ethernet/arc/emac_main.c 		netif_receive_skb(rx_buff->skb);
skb               253 drivers/net/ethernet/arc/emac_main.c 		rx_buff->skb = skb;
skb               444 drivers/net/ethernet/arc/emac_main.c 		rx_buff->skb = netdev_alloc_skb_ip_align(ndev,
skb               446 drivers/net/ethernet/arc/emac_main.c 		if (unlikely(!rx_buff->skb))
skb               449 drivers/net/ethernet/arc/emac_main.c 		addr = dma_map_single(&ndev->dev, (void *)rx_buff->skb->data,
skb               453 drivers/net/ethernet/arc/emac_main.c 			dev_kfree_skb(rx_buff->skb);
skb               558 drivers/net/ethernet/arc/emac_main.c 		if (tx_buff->skb) {
skb               565 drivers/net/ethernet/arc/emac_main.c 			dev_kfree_skb_irq(tx_buff->skb);
skb               570 drivers/net/ethernet/arc/emac_main.c 		tx_buff->skb = NULL;
skb               589 drivers/net/ethernet/arc/emac_main.c 		if (rx_buff->skb) {
skb               596 drivers/net/ethernet/arc/emac_main.c 			dev_kfree_skb_irq(rx_buff->skb);
skb               601 drivers/net/ethernet/arc/emac_main.c 		rx_buff->skb = NULL;
skb               677 drivers/net/ethernet/arc/emac_main.c static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
skb               685 drivers/net/ethernet/arc/emac_main.c 	if (skb_padto(skb, ETH_ZLEN))
skb               688 drivers/net/ethernet/arc/emac_main.c 	len = max_t(unsigned int, ETH_ZLEN, skb->len);
skb               696 drivers/net/ethernet/arc/emac_main.c 	addr = dma_map_single(&ndev->dev, (void *)skb->data, len,
skb               702 drivers/net/ethernet/arc/emac_main.c 		dev_kfree_skb_any(skb);
skb               713 drivers/net/ethernet/arc/emac_main.c 	skb_tx_timestamp(skb);
skb               720 drivers/net/ethernet/arc/emac_main.c 	priv->tx_buff[*txbd_curr].skb = skb;
skb               253 drivers/net/ethernet/atheros/ag71xx.c 			struct sk_buff *skb;
skb               644 drivers/net/ethernet/atheros/ag71xx.c 		struct sk_buff *skb;
skb               649 drivers/net/ethernet/atheros/ag71xx.c 		skb = ring->buf[i].tx.skb;
skb               665 drivers/net/ethernet/atheros/ag71xx.c 		if (!skb)
skb               668 drivers/net/ethernet/atheros/ag71xx.c 		dev_kfree_skb_any(skb);
skb               669 drivers/net/ethernet/atheros/ag71xx.c 		ring->buf[i].tx.skb = NULL;
skb               972 drivers/net/ethernet/atheros/ag71xx.c 		if (ring->buf[i].tx.skb) {
skb               975 drivers/net/ethernet/atheros/ag71xx.c 			dev_kfree_skb_any(ring->buf[i].tx.skb);
skb               977 drivers/net/ethernet/atheros/ag71xx.c 		ring->buf[i].tx.skb = NULL;
skb              1001 drivers/net/ethernet/atheros/ag71xx.c 		ring->buf[i].tx.skb = NULL;
skb              1327 drivers/net/ethernet/atheros/ag71xx.c static netdev_tx_t ag71xx_hard_start_xmit(struct sk_buff *skb,
skb              1340 drivers/net/ethernet/atheros/ag71xx.c 	if (skb->len <= 4) {
skb              1345 drivers/net/ethernet/atheros/ag71xx.c 	dma_addr = dma_map_single(&ag->pdev->dev, skb->data, skb->len,
skb              1353 drivers/net/ethernet/atheros/ag71xx.c 				 skb->len & ag->dcfg->desc_pktlen_mask);
skb              1358 drivers/net/ethernet/atheros/ag71xx.c 	ring->buf[i].tx.len = skb->len;
skb              1359 drivers/net/ethernet/atheros/ag71xx.c 	ring->buf[i].tx.skb = skb;
skb              1361 drivers/net/ethernet/atheros/ag71xx.c 	netdev_sent_queue(ndev, skb->len);
skb              1363 drivers/net/ethernet/atheros/ag71xx.c 	skb_tx_timestamp(skb);
skb              1388 drivers/net/ethernet/atheros/ag71xx.c 	dma_unmap_single(&ag->pdev->dev, dma_addr, skb->len, DMA_TO_DEVICE);
skb              1393 drivers/net/ethernet/atheros/ag71xx.c 	dev_kfree_skb(skb);
skb              1440 drivers/net/ethernet/atheros/ag71xx.c 	struct sk_buff *next, *skb;
skb              1480 drivers/net/ethernet/atheros/ag71xx.c 		skb = build_skb(ring->buf[i].rx.rx_buf, ag71xx_buffer_size(ag));
skb              1481 drivers/net/ethernet/atheros/ag71xx.c 		if (!skb) {
skb              1486 drivers/net/ethernet/atheros/ag71xx.c 		skb_reserve(skb, offset);
skb              1487 drivers/net/ethernet/atheros/ag71xx.c 		skb_put(skb, pktlen);
skb              1491 drivers/net/ethernet/atheros/ag71xx.c 			kfree_skb(skb);
skb              1493 drivers/net/ethernet/atheros/ag71xx.c 			skb->dev = ndev;
skb              1494 drivers/net/ethernet/atheros/ag71xx.c 			skb->ip_summed = CHECKSUM_NONE;
skb              1495 drivers/net/ethernet/atheros/ag71xx.c 			list_add_tail(&skb->list, &rx_list);
skb              1507 drivers/net/ethernet/atheros/ag71xx.c 	list_for_each_entry_safe(skb, next, &rx_list, list)
skb              1508 drivers/net/ethernet/atheros/ag71xx.c 		skb->protocol = eth_type_trans(skb, ndev);
skb                47 drivers/net/ethernet/atheros/alx/alx.h 	struct sk_buff *skb;
skb                66 drivers/net/ethernet/atheros/alx/main.c 	if (txb->skb) {
skb                67 drivers/net/ethernet/atheros/alx/main.c 		dev_kfree_skb_any(txb->skb);
skb                68 drivers/net/ethernet/atheros/alx/main.c 		txb->skb = NULL;
skb                75 drivers/net/ethernet/atheros/alx/main.c 	struct sk_buff *skb;
skb                85 drivers/net/ethernet/atheros/alx/main.c 	while (!cur_buf->skb && next != rxq->read_idx) {
skb                97 drivers/net/ethernet/atheros/alx/main.c 		skb = __netdev_alloc_skb(alx->dev, alx->rxbuf_size + 64, gfp);
skb                98 drivers/net/ethernet/atheros/alx/main.c 		if (!skb)
skb               101 drivers/net/ethernet/atheros/alx/main.c 		if (((unsigned long)skb->data & 0xfff) == 0xfc0)
skb               102 drivers/net/ethernet/atheros/alx/main.c 			skb_reserve(skb, 64);
skb               105 drivers/net/ethernet/atheros/alx/main.c 				     skb->data, alx->rxbuf_size,
skb               108 drivers/net/ethernet/atheros/alx/main.c 			dev_kfree_skb(skb);
skb               116 drivers/net/ethernet/atheros/alx/main.c 			dev_kfree_skb(skb);
skb               120 drivers/net/ethernet/atheros/alx/main.c 		cur_buf->skb = skb;
skb               143 drivers/net/ethernet/atheros/alx/main.c 						 struct sk_buff *skb)
skb               145 drivers/net/ethernet/atheros/alx/main.c 	unsigned int r_idx = skb->queue_mapping;
skb               181 drivers/net/ethernet/atheros/alx/main.c 			struct sk_buff *skb;
skb               183 drivers/net/ethernet/atheros/alx/main.c 			skb = txq->bufs[sw_read_idx].skb;
skb               184 drivers/net/ethernet/atheros/alx/main.c 			if (skb) {
skb               185 drivers/net/ethernet/atheros/alx/main.c 				total_bytes += skb->len;
skb               222 drivers/net/ethernet/atheros/alx/main.c 	struct sk_buff *skb;
skb               248 drivers/net/ethernet/atheros/alx/main.c 		skb = rxb->skb;
skb               249 drivers/net/ethernet/atheros/alx/main.c 		rxb->skb = NULL;
skb               254 drivers/net/ethernet/atheros/alx/main.c 			dev_kfree_skb_any(skb);
skb               260 drivers/net/ethernet/atheros/alx/main.c 		skb_put(skb, length);
skb               261 drivers/net/ethernet/atheros/alx/main.c 		skb->protocol = eth_type_trans(skb, rxq->netdev);
skb               263 drivers/net/ethernet/atheros/alx/main.c 		skb_checksum_none_assert(skb);
skb               273 drivers/net/ethernet/atheros/alx/main.c 				skb->ip_summed = CHECKSUM_UNNECESSARY;
skb               278 drivers/net/ethernet/atheros/alx/main.c 		napi_gro_receive(&rxq->np->napi, skb);
skb               517 drivers/net/ethernet/atheros/alx/main.c 		if (cur_buf->skb) {
skb               522 drivers/net/ethernet/atheros/alx/main.c 			dev_kfree_skb(cur_buf->skb);
skb               523 drivers/net/ethernet/atheros/alx/main.c 			cur_buf->skb = NULL;
skb              1366 drivers/net/ethernet/atheros/alx/main.c static int alx_tpd_req(struct sk_buff *skb)
skb              1370 drivers/net/ethernet/atheros/alx/main.c 	num = skb_shinfo(skb)->nr_frags + 1;
skb              1372 drivers/net/ethernet/atheros/alx/main.c 	if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
skb              1378 drivers/net/ethernet/atheros/alx/main.c static int alx_tx_csum(struct sk_buff *skb, struct alx_txd *first)
skb              1382 drivers/net/ethernet/atheros/alx/main.c 	if (skb->ip_summed != CHECKSUM_PARTIAL)
skb              1385 drivers/net/ethernet/atheros/alx/main.c 	cso = skb_checksum_start_offset(skb);
skb              1389 drivers/net/ethernet/atheros/alx/main.c 	css = cso + skb->csum_offset;
skb              1397 drivers/net/ethernet/atheros/alx/main.c static int alx_tso(struct sk_buff *skb, struct alx_txd *first)
skb              1401 drivers/net/ethernet/atheros/alx/main.c 	if (skb->ip_summed != CHECKSUM_PARTIAL)
skb              1404 drivers/net/ethernet/atheros/alx/main.c 	if (!skb_is_gso(skb))
skb              1407 drivers/net/ethernet/atheros/alx/main.c 	err = skb_cow_head(skb, 0);
skb              1411 drivers/net/ethernet/atheros/alx/main.c 	if (skb->protocol == htons(ETH_P_IP)) {
skb              1412 drivers/net/ethernet/atheros/alx/main.c 		struct iphdr *iph = ip_hdr(skb);
skb              1415 drivers/net/ethernet/atheros/alx/main.c 		tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
skb              1418 drivers/net/ethernet/atheros/alx/main.c 	} else if (skb_is_gso_v6(skb)) {
skb              1419 drivers/net/ethernet/atheros/alx/main.c 		ipv6_hdr(skb)->payload_len = 0;
skb              1420 drivers/net/ethernet/atheros/alx/main.c 		tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
skb              1421 drivers/net/ethernet/atheros/alx/main.c 						       &ipv6_hdr(skb)->daddr,
skb              1424 drivers/net/ethernet/atheros/alx/main.c 		first->adrl.l.pkt_len = skb->len;
skb              1429 drivers/net/ethernet/atheros/alx/main.c 	first->word1 |= (skb_transport_offset(skb) &
skb              1431 drivers/net/ethernet/atheros/alx/main.c 	first->word1 |= (skb_shinfo(skb)->gso_size &
skb              1436 drivers/net/ethernet/atheros/alx/main.c static int alx_map_tx_skb(struct alx_tx_queue *txq, struct sk_buff *skb)
skb              1455 drivers/net/ethernet/atheros/alx/main.c 	maplen = skb_headlen(skb);
skb              1456 drivers/net/ethernet/atheros/alx/main.c 	dma = dma_map_single(txq->dev, skb->data, maplen,
skb              1467 drivers/net/ethernet/atheros/alx/main.c 	for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
skb              1468 drivers/net/ethernet/atheros/alx/main.c 		skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
skb              1490 drivers/net/ethernet/atheros/alx/main.c 	txq->bufs[txq->write_idx].skb = skb;
skb              1507 drivers/net/ethernet/atheros/alx/main.c static netdev_tx_t alx_start_xmit_ring(struct sk_buff *skb,
skb              1516 drivers/net/ethernet/atheros/alx/main.c 	if (alx_tpd_avail(txq) < alx_tpd_req(skb)) {
skb              1524 drivers/net/ethernet/atheros/alx/main.c 	tso = alx_tso(skb, first);
skb              1527 drivers/net/ethernet/atheros/alx/main.c 	else if (!tso && alx_tx_csum(skb, first))
skb              1530 drivers/net/ethernet/atheros/alx/main.c 	if (alx_map_tx_skb(txq, skb) < 0)
skb              1533 drivers/net/ethernet/atheros/alx/main.c 	netdev_tx_sent_queue(alx_get_tx_queue(txq), skb->len);
skb              1545 drivers/net/ethernet/atheros/alx/main.c 	dev_kfree_skb_any(skb);
skb              1549 drivers/net/ethernet/atheros/alx/main.c static netdev_tx_t alx_start_xmit(struct sk_buff *skb,
skb              1553 drivers/net/ethernet/atheros/alx/main.c 	return alx_start_xmit_ring(skb, alx_tx_queue_mapping(alx, skb));
skb               443 drivers/net/ethernet/atheros/atl1c/atl1c.h 	struct sk_buff *skb;	/* socket buffer */
skb               843 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 	if (buffer_info->skb)
skb               844 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 		dev_consume_skb_any(buffer_info->skb);
skb               846 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 	buffer_info->skb = NULL;
skb              1553 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 		if (buffer_info->skb) {
skb              1554 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 			total_bytes += buffer_info->skb->len;
skb              1646 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 		  struct sk_buff *skb, struct atl1c_recv_ret_status *prrs)
skb              1653 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 	skb_checksum_none_assert(skb);
skb              1658 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 	struct sk_buff *skb;
skb              1673 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 	skb = build_skb(page_address(page) + adapter->rx_page_offset,
skb              1675 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 	if (likely(skb)) {
skb              1676 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 		skb_reserve(skb, NET_SKB_PAD);
skb              1683 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 	return skb;
skb              1691 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 	struct sk_buff *skb;
skb              1707 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 		skb = atl1c_alloc_skb(adapter);
skb              1708 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 		if (unlikely(!skb)) {
skb              1719 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 		vir_addr = skb->data;
skb              1721 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 		buffer_info->skb = skb;
skb              1727 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 			dev_kfree_skb(skb);
skb              1728 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 			buffer_info->skb = NULL;
skb              1780 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 		buffer_info[rfd_index].skb = NULL;
skb              1799 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 	struct sk_buff *skb;
skb              1839 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 			skb = buffer_info->skb;
skb              1848 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 		skb_put(skb, length - ETH_FCS_LEN);
skb              1849 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 		skb->protocol = eth_type_trans(skb, netdev);
skb              1850 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 		atl1c_rx_checksum(adapter, skb, rrs);
skb              1856 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan);
skb              1858 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 		netif_receive_skb(skb);
skb              1952 drivers/net/ethernet/atheros/atl1c/atl1c_main.c static u16 atl1c_cal_tpd_req(const struct sk_buff *skb)
skb              1957 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 	tpd_req = skb_shinfo(skb)->nr_frags + 1;
skb              1959 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 	if (skb_is_gso(skb)) {
skb              1960 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 		proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
skb              1961 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 		if (proto_hdr_len < skb_headlen(skb))
skb              1963 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
skb              1970 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 			  struct sk_buff *skb,
skb              1979 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 	if (skb_is_gso(skb)) {
skb              1982 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 		err = skb_cow_head(skb, 0);
skb              1986 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 		offload_type = skb_shinfo(skb)->gso_type;
skb              1989 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 			real_len = (((unsigned char *)ip_hdr(skb) - skb->data)
skb              1990 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 					+ ntohs(ip_hdr(skb)->tot_len));
skb              1992 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 			if (real_len < skb->len)
skb              1993 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 				pskb_trim(skb, real_len);
skb              1995 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 			hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
skb              1996 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 			if (unlikely(skb->len == hdr_len)) {
skb              2003 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 				ip_hdr(skb)->check = 0;
skb              2004 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 				tcp_hdr(skb)->check = ~csum_tcpudp_magic(
skb              2005 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 							ip_hdr(skb)->saddr,
skb              2006 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 							ip_hdr(skb)->daddr,
skb              2018 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 			ipv6_hdr(skb)->payload_len = 0;
skb              2020 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 			hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
skb              2021 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 			if (unlikely(skb->len == hdr_len)) {
skb              2028 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 				tcp_hdr(skb)->check = ~csum_ipv6_magic(
skb              2029 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 						&ipv6_hdr(skb)->saddr,
skb              2030 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 						&ipv6_hdr(skb)->daddr,
skb              2034 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 			etpd->pkt_len = cpu_to_le32(skb->len);
skb              2039 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 		(*tpd)->word1 |= (skb_transport_offset(skb) & TPD_TCPHDR_OFFSET_MASK) <<
skb              2041 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 		(*tpd)->word1 |= (skb_shinfo(skb)->gso_size & TPD_MSS_MASK) <<
skb              2047 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 	if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
skb              2049 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 		cso = skb_checksum_start_offset(skb);
skb              2057 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 			css = cso + skb->csum_offset;
skb              2092 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 		      struct sk_buff *skb, struct atl1c_tpd_desc *tpd,
skb              2097 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 	u16 buf_len = skb_headlen(skb);
skb              2105 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 	nr_frags = skb_shinfo(skb)->nr_frags;
skb              2109 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 		map_len = hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
skb              2115 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 					skb->data, hdr_len, PCI_DMA_TODEVICE);
skb              2139 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 			pci_map_single(adapter->pdev, skb->data + mapped_len,
skb              2153 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 		skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
skb              2178 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 	buffer_info->skb = skb;
skb              2188 drivers/net/ethernet/atheros/atl1c/atl1c_main.c static void atl1c_tx_queue(struct atl1c_adapter *adapter, struct sk_buff *skb,
skb              2198 drivers/net/ethernet/atheros/atl1c/atl1c_main.c static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb,
skb              2207 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 		dev_kfree_skb_any(skb);
skb              2211 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 	tpd_req = atl1c_cal_tpd_req(skb);
skb              2222 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 	if (atl1c_tso_csum(adapter, skb, &tpd, type) != 0) {
skb              2223 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 		dev_kfree_skb_any(skb);
skb              2227 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 	if (unlikely(skb_vlan_tag_present(skb))) {
skb              2228 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 		u16 vlan = skb_vlan_tag_get(skb);
skb              2237 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 	if (skb_network_offset(skb) != ETH_HLEN)
skb              2240 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 	if (atl1c_tx_map(adapter, skb, tpd, type) < 0) {
skb              2245 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 		dev_kfree_skb_any(skb);
skb              2247 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 		netdev_sent_queue(adapter->netdev, skb->len);
skb              2248 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 		atl1c_tx_queue(adapter, skb, tpd, type);
skb               365 drivers/net/ethernet/atheros/atl1e/atl1e.h 	struct sk_buff *skb;
skb               672 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 		if (tx_buffer->skb) {
skb               673 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 			dev_kfree_skb_any(tx_buffer->skb);
skb               674 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 			tx_buffer->skb = NULL;
skb              1248 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 		if (tx_buffer->skb) {
skb              1249 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 			dev_consume_skb_irq(tx_buffer->skb);
skb              1250 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 			tx_buffer->skb = NULL;
skb              1352 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 		  struct sk_buff *skb, struct atl1e_recv_ret_status *prrs)
skb              1360 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 	skb_checksum_none_assert(skb);
skb              1373 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 			skb->ip_summed = CHECKSUM_UNNECESSARY;
skb              1399 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 	struct sk_buff *skb = NULL;
skb              1447 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 			skb = netdev_alloc_skb_ip_align(netdev, packet_size);
skb              1448 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 			if (skb == NULL)
skb              1451 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 			memcpy(skb->data, (u8 *)(prrs + 1), packet_size);
skb              1452 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 			skb_put(skb, packet_size);
skb              1453 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 			skb->protocol = eth_type_trans(skb, netdev);
skb              1454 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 			atl1e_rx_checksum(adapter, skb, prrs);
skb              1463 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 				__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
skb              1465 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 			napi_gro_receive(&adapter->napi, skb);
skb              1593 drivers/net/ethernet/atheros/atl1e/atl1e_main.c static u16 atl1e_cal_tdp_req(const struct sk_buff *skb)
skb              1600 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb              1601 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 		fg_size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
skb              1605 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 	if (skb_is_gso(skb)) {
skb              1606 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 		if (skb->protocol == htons(ETH_P_IP) ||
skb              1607 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 		   (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6)) {
skb              1608 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 			proto_hdr_len = skb_transport_offset(skb) +
skb              1609 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 					tcp_hdrlen(skb);
skb              1610 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 			if (proto_hdr_len < skb_headlen(skb)) {
skb              1611 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 				tpd_req += ((skb_headlen(skb) - proto_hdr_len +
skb              1622 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 		       struct sk_buff *skb, struct atl1e_tpd_desc *tpd)
skb              1628 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 	if (skb_is_gso(skb)) {
skb              1631 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 		err = skb_cow_head(skb, 0);
skb              1635 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 		offload_type = skb_shinfo(skb)->gso_type;
skb              1638 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 			real_len = (((unsigned char *)ip_hdr(skb) - skb->data)
skb              1639 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 					+ ntohs(ip_hdr(skb)->tot_len));
skb              1641 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 			if (real_len < skb->len)
skb              1642 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 				pskb_trim(skb, real_len);
skb              1644 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 			hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
skb              1645 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 			if (unlikely(skb->len == hdr_len)) {
skb              1651 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 				ip_hdr(skb)->check = 0;
skb              1652 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 				ip_hdr(skb)->tot_len = 0;
skb              1653 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 				tcp_hdr(skb)->check = ~csum_tcpudp_magic(
skb              1654 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 							ip_hdr(skb)->saddr,
skb              1655 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 							ip_hdr(skb)->daddr,
skb              1657 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 				tpd->word3 |= (ip_hdr(skb)->ihl &
skb              1660 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 				tpd->word3 |= ((tcp_hdrlen(skb) >> 2) &
skb              1663 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 				tpd->word3 |= ((skb_shinfo(skb)->gso_size) &
skb              1672 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 	if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
skb              1675 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 		cso = skb_checksum_start_offset(skb);
skb              1681 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 			css = cso + skb->csum_offset;
skb              1694 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 			struct sk_buff *skb, struct atl1e_tpd_desc *tpd)
skb              1698 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 	u16 buf_len = skb_headlen(skb);
skb              1708 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 	nr_frags = skb_shinfo(skb)->nr_frags;
skb              1712 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 		map_len = hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
skb              1718 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 					skb->data, hdr_len, PCI_DMA_TODEVICE);
skb              1740 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 		tx_buffer->skb = NULL;
skb              1746 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 			pci_map_single(adapter->pdev, skb->data + mapped_len,
skb              1773 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
skb              1785 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 			BUG_ON(tx_buffer->skb);
skb              1787 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 			tx_buffer->skb = NULL;
skb              1831 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 	tx_buffer->skb = skb;
skb              1847 drivers/net/ethernet/atheros/atl1e/atl1e_main.c static netdev_tx_t atl1e_xmit_frame(struct sk_buff *skb,
skb              1855 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 		dev_kfree_skb_any(skb);
skb              1859 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 	if (unlikely(skb->len <= 0)) {
skb              1860 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 		dev_kfree_skb_any(skb);
skb              1863 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 	tpd_req = atl1e_cal_tdp_req(skb);
skb              1873 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 	if (skb_vlan_tag_present(skb)) {
skb              1874 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 		u16 vlan_tag = skb_vlan_tag_get(skb);
skb              1883 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 	if (skb->protocol == htons(ETH_P_8021Q))
skb              1886 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 	if (skb_network_offset(skb) != ETH_HLEN)
skb              1890 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 	if (atl1e_tso_csum(adapter, skb, tpd) != 0) {
skb              1891 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 		dev_kfree_skb_any(skb);
skb              1895 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 	if (atl1e_tx_map(adapter, skb, tpd)) {
skb              1896 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 		dev_kfree_skb_any(skb);
skb              1145 drivers/net/ethernet/atheros/atlx/atl1.c 		if (buffer_info->skb) {
skb              1146 drivers/net/ethernet/atheros/atlx/atl1.c 			dev_kfree_skb(buffer_info->skb);
skb              1147 drivers/net/ethernet/atheros/atlx/atl1.c 			buffer_info->skb = NULL;
skb              1188 drivers/net/ethernet/atheros/atlx/atl1.c 		if (buffer_info->skb) {
skb              1189 drivers/net/ethernet/atheros/atlx/atl1.c 			dev_kfree_skb_any(buffer_info->skb);
skb              1190 drivers/net/ethernet/atheros/atlx/atl1.c 			buffer_info->skb = NULL;
skb              1786 drivers/net/ethernet/atheros/atlx/atl1.c 	struct rx_return_desc *rrd, struct sk_buff *skb)
skb              1801 drivers/net/ethernet/atheros/atlx/atl1.c 	skb_checksum_none_assert(skb);
skb              1822 drivers/net/ethernet/atheros/atlx/atl1.c 		skb->ip_summed = CHECKSUM_UNNECESSARY;
skb              1839 drivers/net/ethernet/atheros/atlx/atl1.c 	struct sk_buff *skb;
skb              1851 drivers/net/ethernet/atheros/atlx/atl1.c 		if (buffer_info->skb) {
skb              1858 drivers/net/ethernet/atheros/atlx/atl1.c 		skb = netdev_alloc_skb_ip_align(adapter->netdev,
skb              1860 drivers/net/ethernet/atheros/atlx/atl1.c 		if (unlikely(!skb)) {
skb              1867 drivers/net/ethernet/atheros/atlx/atl1.c 		buffer_info->skb = skb;
skb              1869 drivers/net/ethernet/atheros/atlx/atl1.c 		page = virt_to_page(skb->data);
skb              1870 drivers/net/ethernet/atheros/atlx/atl1.c 		offset = offset_in_page(skb->data);
skb              1911 drivers/net/ethernet/atheros/atlx/atl1.c 	struct sk_buff *skb;
skb              2000 drivers/net/ethernet/atheros/atlx/atl1.c 		skb = buffer_info->skb;
skb              2003 drivers/net/ethernet/atheros/atlx/atl1.c 		skb_put(skb, length - ETH_FCS_LEN);
skb              2006 drivers/net/ethernet/atheros/atlx/atl1.c 		atl1_rx_checksum(adapter, rrd, skb);
skb              2007 drivers/net/ethernet/atheros/atlx/atl1.c 		skb->protocol = eth_type_trans(skb, adapter->netdev);
skb              2014 drivers/net/ethernet/atheros/atlx/atl1.c 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
skb              2016 drivers/net/ethernet/atheros/atlx/atl1.c 		netif_receive_skb(skb);
skb              2019 drivers/net/ethernet/atheros/atlx/atl1.c 		buffer_info->skb = NULL;
skb              2072 drivers/net/ethernet/atheros/atlx/atl1.c 		if (buffer_info->skb) {
skb              2073 drivers/net/ethernet/atheros/atlx/atl1.c 			dev_consume_skb_irq(buffer_info->skb);
skb              2074 drivers/net/ethernet/atheros/atlx/atl1.c 			buffer_info->skb = NULL;
skb              2100 drivers/net/ethernet/atheros/atlx/atl1.c static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb,
skb              2106 drivers/net/ethernet/atheros/atlx/atl1.c 	if (skb_shinfo(skb)->gso_size) {
skb              2109 drivers/net/ethernet/atheros/atlx/atl1.c 		err = skb_cow_head(skb, 0);
skb              2113 drivers/net/ethernet/atheros/atlx/atl1.c 		if (skb->protocol == htons(ETH_P_IP)) {
skb              2114 drivers/net/ethernet/atheros/atlx/atl1.c 			struct iphdr *iph = ip_hdr(skb);
skb              2116 drivers/net/ethernet/atheros/atlx/atl1.c 			real_len = (((unsigned char *)iph - skb->data) +
skb              2118 drivers/net/ethernet/atheros/atlx/atl1.c 			if (real_len < skb->len)
skb              2119 drivers/net/ethernet/atheros/atlx/atl1.c 				pskb_trim(skb, real_len);
skb              2120 drivers/net/ethernet/atheros/atlx/atl1.c 			hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
skb              2121 drivers/net/ethernet/atheros/atlx/atl1.c 			if (skb->len == hdr_len) {
skb              2123 drivers/net/ethernet/atheros/atlx/atl1.c 				tcp_hdr(skb)->check =
skb              2125 drivers/net/ethernet/atheros/atlx/atl1.c 					iph->daddr, tcp_hdrlen(skb),
skb              2129 drivers/net/ethernet/atheros/atlx/atl1.c 				ptpd->word3 |= ((tcp_hdrlen(skb) >> 2) &
skb              2138 drivers/net/ethernet/atheros/atlx/atl1.c 			tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
skb              2141 drivers/net/ethernet/atheros/atlx/atl1.c 				(unsigned char *) skb_network_header(skb);
skb              2149 drivers/net/ethernet/atheros/atlx/atl1.c 			ptpd->word3 |= ((tcp_hdrlen(skb) >> 2) &
skb              2151 drivers/net/ethernet/atheros/atlx/atl1.c 			ptpd->word3 |= (skb_shinfo(skb)->gso_size &
skb              2160 drivers/net/ethernet/atheros/atlx/atl1.c static int atl1_tx_csum(struct atl1_adapter *adapter, struct sk_buff *skb,
skb              2165 drivers/net/ethernet/atheros/atlx/atl1.c 	if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
skb              2166 drivers/net/ethernet/atheros/atlx/atl1.c 		css = skb_checksum_start_offset(skb);
skb              2167 drivers/net/ethernet/atheros/atlx/atl1.c 		cso = css + (u8) skb->csum_offset;
skb              2185 drivers/net/ethernet/atheros/atlx/atl1.c static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
skb              2190 drivers/net/ethernet/atheros/atlx/atl1.c 	u16 buf_len = skb->len;
skb              2200 drivers/net/ethernet/atheros/atlx/atl1.c 	buf_len -= skb->data_len;
skb              2201 drivers/net/ethernet/atheros/atlx/atl1.c 	nr_frags = skb_shinfo(skb)->nr_frags;
skb              2204 drivers/net/ethernet/atheros/atlx/atl1.c 	BUG_ON(buffer_info->skb);
skb              2206 drivers/net/ethernet/atheros/atlx/atl1.c 	buffer_info->skb = NULL;
skb              2211 drivers/net/ethernet/atheros/atlx/atl1.c 		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
skb              2213 drivers/net/ethernet/atheros/atlx/atl1.c 		page = virt_to_page(skb->data);
skb              2214 drivers/net/ethernet/atheros/atlx/atl1.c 		offset = offset_in_page(skb->data);
skb              2231 drivers/net/ethernet/atheros/atlx/atl1.c 				buffer_info->skb = NULL;
skb              2236 drivers/net/ethernet/atheros/atlx/atl1.c 				page = virt_to_page(skb->data +
skb              2238 drivers/net/ethernet/atheros/atlx/atl1.c 				offset = offset_in_page(skb->data +
skb              2250 drivers/net/ethernet/atheros/atlx/atl1.c 		page = virt_to_page(skb->data);
skb              2251 drivers/net/ethernet/atheros/atlx/atl1.c 		offset = offset_in_page(skb->data);
skb              2259 drivers/net/ethernet/atheros/atlx/atl1.c 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
skb              2268 drivers/net/ethernet/atheros/atlx/atl1.c 			BUG_ON(buffer_info->skb);
skb              2270 drivers/net/ethernet/atheros/atlx/atl1.c 			buffer_info->skb = NULL;
skb              2284 drivers/net/ethernet/atheros/atlx/atl1.c 	buffer_info->skb = skb;
skb              2337 drivers/net/ethernet/atheros/atlx/atl1.c static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb,
skb              2353 drivers/net/ethernet/atheros/atlx/atl1.c 	len = skb_headlen(skb);
skb              2355 drivers/net/ethernet/atheros/atlx/atl1.c 	if (unlikely(skb->len <= 0)) {
skb              2356 drivers/net/ethernet/atheros/atlx/atl1.c 		dev_kfree_skb_any(skb);
skb              2360 drivers/net/ethernet/atheros/atlx/atl1.c 	nr_frags = skb_shinfo(skb)->nr_frags;
skb              2362 drivers/net/ethernet/atheros/atlx/atl1.c 		unsigned int f_size = skb_frag_size(&skb_shinfo(skb)->frags[f]);
skb              2367 drivers/net/ethernet/atheros/atlx/atl1.c 	mss = skb_shinfo(skb)->gso_size;
skb              2369 drivers/net/ethernet/atheros/atlx/atl1.c 		if (skb->protocol == htons(ETH_P_IP)) {
skb              2370 drivers/net/ethernet/atheros/atlx/atl1.c 			proto_hdr_len = (skb_transport_offset(skb) +
skb              2371 drivers/net/ethernet/atheros/atlx/atl1.c 					 tcp_hdrlen(skb));
skb              2373 drivers/net/ethernet/atheros/atlx/atl1.c 				dev_kfree_skb_any(skb);
skb              2397 drivers/net/ethernet/atheros/atlx/atl1.c 	if (skb_vlan_tag_present(skb)) {
skb              2398 drivers/net/ethernet/atheros/atlx/atl1.c 		vlan_tag = skb_vlan_tag_get(skb);
skb              2406 drivers/net/ethernet/atheros/atlx/atl1.c 	tso = atl1_tso(adapter, skb, ptpd);
skb              2408 drivers/net/ethernet/atheros/atlx/atl1.c 		dev_kfree_skb_any(skb);
skb              2413 drivers/net/ethernet/atheros/atlx/atl1.c 		ret_val = atl1_tx_csum(adapter, skb, ptpd);
skb              2415 drivers/net/ethernet/atheros/atlx/atl1.c 			dev_kfree_skb_any(skb);
skb              2420 drivers/net/ethernet/atheros/atlx/atl1.c 	atl1_tx_map(adapter, skb, ptpd);
skb               594 drivers/net/ethernet/atheros/atlx/atl1.h 	struct sk_buff *skb;	/* socket buffer */
skb               410 drivers/net/ethernet/atheros/atlx/atl2.c 	struct sk_buff *skb;
skb               423 drivers/net/ethernet/atheros/atlx/atl2.c 			skb = netdev_alloc_skb_ip_align(netdev, rx_size);
skb               424 drivers/net/ethernet/atheros/atlx/atl2.c 			if (NULL == skb) {
skb               432 drivers/net/ethernet/atheros/atlx/atl2.c 			memcpy(skb->data, rxd->packet, rx_size);
skb               433 drivers/net/ethernet/atheros/atlx/atl2.c 			skb_put(skb, rx_size);
skb               434 drivers/net/ethernet/atheros/atlx/atl2.c 			skb->protocol = eth_type_trans(skb, netdev);
skb               440 drivers/net/ethernet/atheros/atlx/atl2.c 				__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
skb               442 drivers/net/ethernet/atheros/atlx/atl2.c 			netif_rx(skb);
skb               826 drivers/net/ethernet/atheros/atlx/atl2.c static netdev_tx_t atl2_xmit_frame(struct sk_buff *skb,
skb               836 drivers/net/ethernet/atheros/atlx/atl2.c 		dev_kfree_skb_any(skb);
skb               840 drivers/net/ethernet/atheros/atlx/atl2.c 	if (unlikely(skb->len <= 0)) {
skb               841 drivers/net/ethernet/atheros/atlx/atl2.c 		dev_kfree_skb_any(skb);
skb               848 drivers/net/ethernet/atheros/atlx/atl2.c 	if (skb->len + sizeof(struct tx_pkt_header) + 4  > txbuf_unused ||
skb               860 drivers/net/ethernet/atheros/atlx/atl2.c 	txph->pkt_size = skb->len;
skb               866 drivers/net/ethernet/atheros/atlx/atl2.c 	if (copy_len >= skb->len) {
skb               867 drivers/net/ethernet/atheros/atlx/atl2.c 		memcpy(((u8 *)adapter->txd_ring) + offset, skb->data, skb->len);
skb               868 drivers/net/ethernet/atheros/atlx/atl2.c 		offset += ((u32)(skb->len + 3) & ~3);
skb               870 drivers/net/ethernet/atheros/atlx/atl2.c 		memcpy(((u8 *)adapter->txd_ring)+offset, skb->data, copy_len);
skb               871 drivers/net/ethernet/atheros/atlx/atl2.c 		memcpy((u8 *)adapter->txd_ring, skb->data+copy_len,
skb               872 drivers/net/ethernet/atheros/atlx/atl2.c 			skb->len-copy_len);
skb               873 drivers/net/ethernet/atheros/atlx/atl2.c 		offset = ((u32)(skb->len-copy_len + 3) & ~3);
skb               876 drivers/net/ethernet/atheros/atlx/atl2.c 	if (skb_vlan_tag_present(skb)) {
skb               877 drivers/net/ethernet/atheros/atlx/atl2.c 		u16 vlan_tag = skb_vlan_tag_get(skb);
skb               897 drivers/net/ethernet/atheros/atlx/atl2.c 	dev_consume_skb_any(skb);
skb               229 drivers/net/ethernet/aurora/nb8800.c 	struct sk_buff *skb;
skb               235 drivers/net/ethernet/aurora/nb8800.c 	skb = napi_alloc_skb(&priv->napi, size);
skb               236 drivers/net/ethernet/aurora/nb8800.c 	if (!skb) {
skb               244 drivers/net/ethernet/aurora/nb8800.c 		skb_put_data(skb, data, len);
skb               252 drivers/net/ethernet/aurora/nb8800.c 			dev_kfree_skb(skb);
skb               257 drivers/net/ethernet/aurora/nb8800.c 		skb_put_data(skb, data, RX_COPYHDR);
skb               258 drivers/net/ethernet/aurora/nb8800.c 		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
skb               263 drivers/net/ethernet/aurora/nb8800.c 	skb->protocol = eth_type_trans(skb, dev);
skb               264 drivers/net/ethernet/aurora/nb8800.c 	napi_gro_receive(&priv->napi, skb);
skb               387 drivers/net/ethernet/aurora/nb8800.c static int nb8800_xmit(struct sk_buff *skb, struct net_device *dev)
skb               404 drivers/net/ethernet/aurora/nb8800.c 	align = (8 - (uintptr_t)skb->data) & 7;
skb               406 drivers/net/ethernet/aurora/nb8800.c 	dma_len = skb->len - align;
skb               407 drivers/net/ethernet/aurora/nb8800.c 	dma_addr = dma_map_single(&dev->dev, skb->data + align,
skb               412 drivers/net/ethernet/aurora/nb8800.c 		kfree_skb(skb);
skb               431 drivers/net/ethernet/aurora/nb8800.c 		memcpy(txd->buf, skb->data, align);
skb               448 drivers/net/ethernet/aurora/nb8800.c 	txb->skb = skb;
skb               459 drivers/net/ethernet/aurora/nb8800.c 	netdev_sent_queue(dev, skb->len);
skb               498 drivers/net/ethernet/aurora/nb8800.c 		struct sk_buff *skb;
skb               503 drivers/net/ethernet/aurora/nb8800.c 		skb = txb->skb;
skb               504 drivers/net/ethernet/aurora/nb8800.c 		len += skb->len;
skb               511 drivers/net/ethernet/aurora/nb8800.c 			kfree_skb(skb);
skb               513 drivers/net/ethernet/aurora/nb8800.c 			consume_skb(skb);
skb               520 drivers/net/ethernet/aurora/nb8800.c 		txb->skb = NULL;
skb               762 drivers/net/ethernet/aurora/nb8800.c 			kfree_skb(priv->tx_bufs[i].skb);
skb               219 drivers/net/ethernet/aurora/nb8800.h 	struct sk_buff			*skb;
skb               628 drivers/net/ethernet/broadcom/b44.c 		struct sk_buff *skb = rp->skb;
skb               630 drivers/net/ethernet/broadcom/b44.c 		BUG_ON(skb == NULL);
skb               634 drivers/net/ethernet/broadcom/b44.c 				 skb->len,
skb               636 drivers/net/ethernet/broadcom/b44.c 		rp->skb = NULL;
skb               638 drivers/net/ethernet/broadcom/b44.c 		bytes_compl += skb->len;
skb               641 drivers/net/ethernet/broadcom/b44.c 		dev_consume_skb_irq(skb);
skb               663 drivers/net/ethernet/broadcom/b44.c 	struct sk_buff *skb;
skb               673 drivers/net/ethernet/broadcom/b44.c 	skb = netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ);
skb               674 drivers/net/ethernet/broadcom/b44.c 	if (skb == NULL)
skb               677 drivers/net/ethernet/broadcom/b44.c 	mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
skb               689 drivers/net/ethernet/broadcom/b44.c 		dev_kfree_skb_any(skb);
skb               690 drivers/net/ethernet/broadcom/b44.c 		skb = alloc_skb(RX_PKT_BUF_SZ, GFP_ATOMIC | GFP_DMA);
skb               691 drivers/net/ethernet/broadcom/b44.c 		if (skb == NULL)
skb               693 drivers/net/ethernet/broadcom/b44.c 		mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
skb               700 drivers/net/ethernet/broadcom/b44.c 			dev_kfree_skb_any(skb);
skb               706 drivers/net/ethernet/broadcom/b44.c 	rh = (struct rx_header *) skb->data;
skb               711 drivers/net/ethernet/broadcom/b44.c 	map->skb = skb;
skb               715 drivers/net/ethernet/broadcom/b44.c 		src_map->skb = NULL;
skb               747 drivers/net/ethernet/broadcom/b44.c 	dest_map->skb = src_map->skb;
skb               748 drivers/net/ethernet/broadcom/b44.c 	rh = (struct rx_header *) src_map->skb->data;
skb               767 drivers/net/ethernet/broadcom/b44.c 	src_map->skb = NULL;
skb               791 drivers/net/ethernet/broadcom/b44.c 		struct sk_buff *skb = rp->skb;
skb               799 drivers/net/ethernet/broadcom/b44.c 		rh = (struct rx_header *) skb->data;
skb               833 drivers/net/ethernet/broadcom/b44.c 			skb_put(skb, len + RX_PKT_OFFSET);
skb               834 drivers/net/ethernet/broadcom/b44.c 			skb_pull(skb, RX_PKT_OFFSET);
skb               845 drivers/net/ethernet/broadcom/b44.c 			skb_copy_from_linear_data_offset(skb, RX_PKT_OFFSET,
skb               847 drivers/net/ethernet/broadcom/b44.c 			skb = copy_skb;
skb               849 drivers/net/ethernet/broadcom/b44.c 		skb_checksum_none_assert(skb);
skb               850 drivers/net/ethernet/broadcom/b44.c 		skb->protocol = eth_type_trans(skb, bp->dev);
skb               851 drivers/net/ethernet/broadcom/b44.c 		netif_receive_skb(skb);
skb               973 drivers/net/ethernet/broadcom/b44.c static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb               981 drivers/net/ethernet/broadcom/b44.c 	len = skb->len;
skb               991 drivers/net/ethernet/broadcom/b44.c 	mapping = dma_map_single(bp->sdev->dma_dev, skb->data, len, DMA_TO_DEVICE);
skb              1014 drivers/net/ethernet/broadcom/b44.c 		skb_copy_from_linear_data(skb, skb_put(bounce_skb, len), len);
skb              1015 drivers/net/ethernet/broadcom/b44.c 		dev_consume_skb_any(skb);
skb              1016 drivers/net/ethernet/broadcom/b44.c 		skb = bounce_skb;
skb              1020 drivers/net/ethernet/broadcom/b44.c 	bp->tx_buffers[entry].skb = skb;
skb              1048 drivers/net/ethernet/broadcom/b44.c 	netdev_sent_queue(dev, skb->len);
skb              1102 drivers/net/ethernet/broadcom/b44.c 		if (rp->skb == NULL)
skb              1106 drivers/net/ethernet/broadcom/b44.c 		dev_kfree_skb_any(rp->skb);
skb              1107 drivers/net/ethernet/broadcom/b44.c 		rp->skb = NULL;
skb              1114 drivers/net/ethernet/broadcom/b44.c 		if (rp->skb == NULL)
skb              1116 drivers/net/ethernet/broadcom/b44.c 		dma_unmap_single(bp->sdev->dma_dev, rp->mapping, rp->skb->len,
skb              1118 drivers/net/ethernet/broadcom/b44.c 		dev_kfree_skb_any(rp->skb);
skb              1119 drivers/net/ethernet/broadcom/b44.c 		rp->skb = NULL;
skb               282 drivers/net/ethernet/broadcom/b44.h 	struct sk_buff		*skb;
skb               232 drivers/net/ethernet/broadcom/bcm63xx_enet.c 		struct sk_buff *skb;
skb               241 drivers/net/ethernet/broadcom/bcm63xx_enet.c 			skb = netdev_alloc_skb(dev, priv->rx_skb_size);
skb               242 drivers/net/ethernet/broadcom/bcm63xx_enet.c 			if (!skb)
skb               244 drivers/net/ethernet/broadcom/bcm63xx_enet.c 			priv->rx_skb[desc_idx] = skb;
skb               245 drivers/net/ethernet/broadcom/bcm63xx_enet.c 			p = dma_map_single(&priv->pdev->dev, skb->data,
skb               315 drivers/net/ethernet/broadcom/bcm63xx_enet.c 		struct sk_buff *skb;
skb               364 drivers/net/ethernet/broadcom/bcm63xx_enet.c 		skb = priv->rx_skb[desc_idx];
skb               381 drivers/net/ethernet/broadcom/bcm63xx_enet.c 			memcpy(nskb->data, skb->data, len);
skb               384 drivers/net/ethernet/broadcom/bcm63xx_enet.c 			skb = nskb;
skb               391 drivers/net/ethernet/broadcom/bcm63xx_enet.c 		skb_put(skb, len);
skb               392 drivers/net/ethernet/broadcom/bcm63xx_enet.c 		skb->protocol = eth_type_trans(skb, dev);
skb               395 drivers/net/ethernet/broadcom/bcm63xx_enet.c 		netif_receive_skb(skb);
skb               424 drivers/net/ethernet/broadcom/bcm63xx_enet.c 		struct sk_buff *skb;
skb               441 drivers/net/ethernet/broadcom/bcm63xx_enet.c 		skb = priv->tx_skb[priv->tx_dirty_desc];
skb               443 drivers/net/ethernet/broadcom/bcm63xx_enet.c 		dma_unmap_single(&priv->pdev->dev, desc->address, skb->len,
skb               456 drivers/net/ethernet/broadcom/bcm63xx_enet.c 		dev_kfree_skb(skb);
skb               559 drivers/net/ethernet/broadcom/bcm63xx_enet.c bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb               582 drivers/net/ethernet/broadcom/bcm63xx_enet.c 	if (priv->enet_is_sw && skb->len < 64) {
skb               583 drivers/net/ethernet/broadcom/bcm63xx_enet.c 		int needed = 64 - skb->len;
skb               586 drivers/net/ethernet/broadcom/bcm63xx_enet.c 		if (unlikely(skb_tailroom(skb) < needed)) {
skb               589 drivers/net/ethernet/broadcom/bcm63xx_enet.c 			nskb = skb_copy_expand(skb, 0, needed, GFP_ATOMIC);
skb               594 drivers/net/ethernet/broadcom/bcm63xx_enet.c 			dev_kfree_skb(skb);
skb               595 drivers/net/ethernet/broadcom/bcm63xx_enet.c 			skb = nskb;
skb               597 drivers/net/ethernet/broadcom/bcm63xx_enet.c 		data = skb_put_zero(skb, needed);
skb               602 drivers/net/ethernet/broadcom/bcm63xx_enet.c 	priv->tx_skb[priv->tx_curr_desc] = skb;
skb               605 drivers/net/ethernet/broadcom/bcm63xx_enet.c 	desc->address = dma_map_single(&priv->pdev->dev, skb->data, skb->len,
skb               608 drivers/net/ethernet/broadcom/bcm63xx_enet.c 	len_stat = (skb->len << DMADESC_LENGTH_SHIFT) & DMADESC_LENGTH_MASK;
skb               634 drivers/net/ethernet/broadcom/bcm63xx_enet.c 	dev->stats.tx_bytes += skb->len;
skb               655 drivers/net/ethernet/broadcom/bcmsysport.c 	dev_consume_skb_any(cb->skb);
skb               656 drivers/net/ethernet/broadcom/bcmsysport.c 	cb->skb = NULL;
skb               665 drivers/net/ethernet/broadcom/bcmsysport.c 	struct sk_buff *skb, *rx_skb;
skb               669 drivers/net/ethernet/broadcom/bcmsysport.c 	skb = __netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH,
skb               671 drivers/net/ethernet/broadcom/bcmsysport.c 	if (!skb) {
skb               677 drivers/net/ethernet/broadcom/bcmsysport.c 	mapping = dma_map_single(kdev, skb->data,
skb               681 drivers/net/ethernet/broadcom/bcmsysport.c 		dev_kfree_skb_any(skb);
skb               687 drivers/net/ethernet/broadcom/bcmsysport.c 	rx_skb = cb->skb;
skb               693 drivers/net/ethernet/broadcom/bcmsysport.c 	cb->skb = skb;
skb               706 drivers/net/ethernet/broadcom/bcmsysport.c 	struct sk_buff *skb;
skb               711 drivers/net/ethernet/broadcom/bcmsysport.c 		skb = bcm_sysport_rx_refill(priv, cb);
skb               712 drivers/net/ethernet/broadcom/bcmsysport.c 		dev_kfree_skb(skb);
skb               713 drivers/net/ethernet/broadcom/bcmsysport.c 		if (!cb->skb)
skb               729 drivers/net/ethernet/broadcom/bcmsysport.c 	struct sk_buff *skb;
skb               755 drivers/net/ethernet/broadcom/bcmsysport.c 		skb = bcm_sysport_rx_refill(priv, cb);
skb               763 drivers/net/ethernet/broadcom/bcmsysport.c 		if (unlikely(!skb)) {
skb               771 drivers/net/ethernet/broadcom/bcmsysport.c 		rsb = (struct bcm_rsb *)skb->data;
skb               785 drivers/net/ethernet/broadcom/bcmsysport.c 			dev_kfree_skb_any(skb);
skb               793 drivers/net/ethernet/broadcom/bcmsysport.c 			dev_kfree_skb_any(skb);
skb               803 drivers/net/ethernet/broadcom/bcmsysport.c 			dev_kfree_skb_any(skb);
skb               807 drivers/net/ethernet/broadcom/bcmsysport.c 		skb_put(skb, len);
skb               811 drivers/net/ethernet/broadcom/bcmsysport.c 			skb->ip_summed = CHECKSUM_UNNECESSARY;
skb               817 drivers/net/ethernet/broadcom/bcmsysport.c 		skb_pull(skb, sizeof(*rsb) + 2);
skb               823 drivers/net/ethernet/broadcom/bcmsysport.c 			skb_trim(skb, len - ETH_FCS_LEN);
skb               827 drivers/net/ethernet/broadcom/bcmsysport.c 		skb->protocol = eth_type_trans(skb, ndev);
skb               835 drivers/net/ethernet/broadcom/bcmsysport.c 		napi_gro_receive(&priv->napi, skb);
skb               858 drivers/net/ethernet/broadcom/bcmsysport.c 	if (cb->skb) {
skb               859 drivers/net/ethernet/broadcom/bcmsysport.c 		*bytes_compl += cb->skb->len;
skb              1211 drivers/net/ethernet/broadcom/bcmsysport.c static struct sk_buff *bcm_sysport_insert_tsb(struct sk_buff *skb,
skb              1223 drivers/net/ethernet/broadcom/bcmsysport.c 	if (unlikely(skb_headroom(skb) < sizeof(*tsb))) {
skb              1224 drivers/net/ethernet/broadcom/bcmsysport.c 		nskb = skb_realloc_headroom(skb, sizeof(*tsb));
skb              1226 drivers/net/ethernet/broadcom/bcmsysport.c 			dev_kfree_skb_any(skb);
skb              1232 drivers/net/ethernet/broadcom/bcmsysport.c 		dev_consume_skb_any(skb);
skb              1233 drivers/net/ethernet/broadcom/bcmsysport.c 		skb = nskb;
skb              1237 drivers/net/ethernet/broadcom/bcmsysport.c 	tsb = skb_push(skb, sizeof(*tsb));
skb              1241 drivers/net/ethernet/broadcom/bcmsysport.c 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb              1242 drivers/net/ethernet/broadcom/bcmsysport.c 		ip_ver = skb->protocol;
skb              1245 drivers/net/ethernet/broadcom/bcmsysport.c 			ip_proto = ip_hdr(skb)->protocol;
skb              1248 drivers/net/ethernet/broadcom/bcmsysport.c 			ip_proto = ipv6_hdr(skb)->nexthdr;
skb              1251 drivers/net/ethernet/broadcom/bcmsysport.c 			return skb;
skb              1255 drivers/net/ethernet/broadcom/bcmsysport.c 		csum_start = skb_checksum_start_offset(skb) - sizeof(*tsb);
skb              1256 drivers/net/ethernet/broadcom/bcmsysport.c 		csum_info = (csum_start + skb->csum_offset) & L4_CSUM_PTR_MASK;
skb              1271 drivers/net/ethernet/broadcom/bcmsysport.c 	return skb;
skb              1274 drivers/net/ethernet/broadcom/bcmsysport.c static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
skb              1289 drivers/net/ethernet/broadcom/bcmsysport.c 	queue = skb_get_queue_mapping(skb);
skb              1304 drivers/net/ethernet/broadcom/bcmsysport.c 		skb = bcm_sysport_insert_tsb(skb, dev);
skb              1305 drivers/net/ethernet/broadcom/bcmsysport.c 		if (!skb) {
skb              1311 drivers/net/ethernet/broadcom/bcmsysport.c 	skb_len = skb->len;
skb              1313 drivers/net/ethernet/broadcom/bcmsysport.c 	mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
skb              1317 drivers/net/ethernet/broadcom/bcmsysport.c 			  skb->data, skb_len);
skb              1324 drivers/net/ethernet/broadcom/bcmsysport.c 	cb->skb = skb;
skb              1333 drivers/net/ethernet/broadcom/bcmsysport.c 	if (skb->ip_summed == CHECKSUM_PARTIAL)
skb              2231 drivers/net/ethernet/broadcom/bcmsysport.c static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb,
skb              2235 drivers/net/ethernet/broadcom/bcmsysport.c 	u16 queue = skb_get_queue_mapping(skb);
skb              2240 drivers/net/ethernet/broadcom/bcmsysport.c 		return netdev_pick_tx(dev, skb, NULL);
skb              2248 drivers/net/ethernet/broadcom/bcmsysport.c 		return netdev_pick_tx(dev, skb, NULL);
skb               683 drivers/net/ethernet/broadcom/bcmsysport.h 	struct sk_buff	*skb;		/* SKB for RX packets */
skb               133 drivers/net/ethernet/broadcom/bgmac.c 				    struct sk_buff *skb)
skb               143 drivers/net/ethernet/broadcom/bgmac.c 	if (skb->len > BGMAC_DESC_CTL1_LEN) {
skb               144 drivers/net/ethernet/broadcom/bgmac.c 		netdev_err(bgmac->net_dev, "Too long skb (%d)\n", skb->len);
skb               148 drivers/net/ethernet/broadcom/bgmac.c 	if (skb->ip_summed == CHECKSUM_PARTIAL)
skb               149 drivers/net/ethernet/broadcom/bgmac.c 		skb_checksum_help(skb);
skb               151 drivers/net/ethernet/broadcom/bgmac.c 	nr_frags = skb_shinfo(skb)->nr_frags;
skb               162 drivers/net/ethernet/broadcom/bgmac.c 	slot->dma_addr = dma_map_single(dma_dev, skb->data, skb_headlen(skb),
skb               171 drivers/net/ethernet/broadcom/bgmac.c 	bgmac_dma_tx_add_buf(bgmac, ring, index, skb_headlen(skb), flags);
skb               175 drivers/net/ethernet/broadcom/bgmac.c 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
skb               191 drivers/net/ethernet/broadcom/bgmac.c 	slot->skb = skb;
skb               193 drivers/net/ethernet/broadcom/bgmac.c 	netdev_sent_queue(net_dev, skb->len);
skb               211 drivers/net/ethernet/broadcom/bgmac.c 	dma_unmap_single(dma_dev, slot->dma_addr, skb_headlen(skb),
skb               228 drivers/net/ethernet/broadcom/bgmac.c 	dev_kfree_skb(skb);
skb               268 drivers/net/ethernet/broadcom/bgmac.c 		if (slot->skb) {
skb               269 drivers/net/ethernet/broadcom/bgmac.c 			bgmac->net_dev->stats.tx_bytes += slot->skb->len;
skb               271 drivers/net/ethernet/broadcom/bgmac.c 			bytes_compl += slot->skb->len;
skb               275 drivers/net/ethernet/broadcom/bgmac.c 			dev_kfree_skb(slot->skb);
skb               276 drivers/net/ethernet/broadcom/bgmac.c 			slot->skb = NULL;
skb               428 drivers/net/ethernet/broadcom/bgmac.c 		struct sk_buff *skb;
skb               469 drivers/net/ethernet/broadcom/bgmac.c 			skb = build_skb(buf, BGMAC_RX_ALLOC_SIZE);
skb               470 drivers/net/ethernet/broadcom/bgmac.c 			if (unlikely(!skb)) {
skb               476 drivers/net/ethernet/broadcom/bgmac.c 			skb_put(skb, BGMAC_RX_FRAME_OFFSET +
skb               478 drivers/net/ethernet/broadcom/bgmac.c 			skb_pull(skb, BGMAC_RX_FRAME_OFFSET +
skb               481 drivers/net/ethernet/broadcom/bgmac.c 			skb_checksum_none_assert(skb);
skb               482 drivers/net/ethernet/broadcom/bgmac.c 			skb->protocol = eth_type_trans(skb, bgmac->net_dev);
skb               485 drivers/net/ethernet/broadcom/bgmac.c 			napi_gro_receive(&bgmac->napi, skb);
skb               538 drivers/net/ethernet/broadcom/bgmac.c 		dev_kfree_skb(slot->skb);
skb               543 drivers/net/ethernet/broadcom/bgmac.c 		if (slot->skb)
skb              1223 drivers/net/ethernet/broadcom/bgmac.c static netdev_tx_t bgmac_start_xmit(struct sk_buff *skb,
skb              1231 drivers/net/ethernet/broadcom/bgmac.c 	return bgmac_dma_tx_add(bgmac, ring, skb);
skb               433 drivers/net/ethernet/broadcom/bgmac.h 		struct sk_buff *skb;
skb              2864 drivers/net/ethernet/broadcom/bnx2.c 		struct sk_buff *skb;
skb              2870 drivers/net/ethernet/broadcom/bnx2.c 		skb = tx_buf->skb;
skb              2873 drivers/net/ethernet/broadcom/bnx2.c 		prefetch(&skb->end);
skb              2890 drivers/net/ethernet/broadcom/bnx2.c 			skb_headlen(skb), PCI_DMA_TODEVICE);
skb              2892 drivers/net/ethernet/broadcom/bnx2.c 		tx_buf->skb = NULL;
skb              2903 drivers/net/ethernet/broadcom/bnx2.c 				skb_frag_size(&skb_shinfo(skb)->frags[i]),
skb              2909 drivers/net/ethernet/broadcom/bnx2.c 		tx_bytes += skb->len;
skb              2910 drivers/net/ethernet/broadcom/bnx2.c 		dev_kfree_skb_any(skb);
skb              2944 drivers/net/ethernet/broadcom/bnx2.c 			struct sk_buff *skb, int count)
skb              2958 drivers/net/ethernet/broadcom/bnx2.c 	if (skb) {
skb              2962 drivers/net/ethernet/broadcom/bnx2.c 		shinfo = skb_shinfo(skb);
skb              2968 drivers/net/ethernet/broadcom/bnx2.c 		dev_kfree_skb(skb);
skb              3037 drivers/net/ethernet/broadcom/bnx2.c 	struct sk_buff *skb;
skb              3054 drivers/net/ethernet/broadcom/bnx2.c 	skb = build_skb(data, 0);
skb              3055 drivers/net/ethernet/broadcom/bnx2.c 	if (!skb) {
skb              3059 drivers/net/ethernet/broadcom/bnx2.c 	skb_reserve(skb, ((u8 *)get_l2_fhdr(data) - data) + BNX2_RX_OFFSET);
skb              3061 drivers/net/ethernet/broadcom/bnx2.c 		skb_put(skb, len);
skb              3062 drivers/net/ethernet/broadcom/bnx2.c 		return skb;
skb              3071 drivers/net/ethernet/broadcom/bnx2.c 		skb_put(skb, hdr_len);
skb              3084 drivers/net/ethernet/broadcom/bnx2.c 				skb->len -= tail;
skb              3086 drivers/net/ethernet/broadcom/bnx2.c 					skb->tail -= tail;
skb              3089 drivers/net/ethernet/broadcom/bnx2.c 						&skb_shinfo(skb)->frags[i - 1];
skb              3091 drivers/net/ethernet/broadcom/bnx2.c 					skb->data_len -= tail;
skb              3093 drivers/net/ethernet/broadcom/bnx2.c 				return skb;
skb              3104 drivers/net/ethernet/broadcom/bnx2.c 			skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
skb              3113 drivers/net/ethernet/broadcom/bnx2.c 				bnx2_reuse_rx_skb_pages(bp, rxr, skb,
skb              3122 drivers/net/ethernet/broadcom/bnx2.c 			skb->data_len += frag_len;
skb              3123 drivers/net/ethernet/broadcom/bnx2.c 			skb->truesize += PAGE_SIZE;
skb              3124 drivers/net/ethernet/broadcom/bnx2.c 			skb->len += frag_len;
skb              3132 drivers/net/ethernet/broadcom/bnx2.c 	return skb;
skb              3170 drivers/net/ethernet/broadcom/bnx2.c 		struct sk_buff *skb;
skb              3228 drivers/net/ethernet/broadcom/bnx2.c 			skb = netdev_alloc_skb(bp->dev, len + 6);
skb              3229 drivers/net/ethernet/broadcom/bnx2.c 			if (!skb) {
skb              3236 drivers/net/ethernet/broadcom/bnx2.c 			memcpy(skb->data,
skb              3239 drivers/net/ethernet/broadcom/bnx2.c 			skb_reserve(skb, 6);
skb              3240 drivers/net/ethernet/broadcom/bnx2.c 			skb_put(skb, len);
skb              3246 drivers/net/ethernet/broadcom/bnx2.c 			skb = bnx2_rx_skb(bp, rxr, data, len, hdr_len, dma_addr,
skb              3248 drivers/net/ethernet/broadcom/bnx2.c 			if (!skb)
skb              3253 drivers/net/ethernet/broadcom/bnx2.c 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rx_hdr->l2_fhdr_vlan_tag);
skb              3255 drivers/net/ethernet/broadcom/bnx2.c 		skb->protocol = eth_type_trans(skb, bp->dev);
skb              3258 drivers/net/ethernet/broadcom/bnx2.c 		    skb->protocol != htons(0x8100) &&
skb              3259 drivers/net/ethernet/broadcom/bnx2.c 		    skb->protocol != htons(ETH_P_8021AD)) {
skb              3261 drivers/net/ethernet/broadcom/bnx2.c 			dev_kfree_skb(skb);
skb              3266 drivers/net/ethernet/broadcom/bnx2.c 		skb_checksum_none_assert(skb);
skb              3273 drivers/net/ethernet/broadcom/bnx2.c 				skb->ip_summed = CHECKSUM_UNNECESSARY;
skb              3278 drivers/net/ethernet/broadcom/bnx2.c 			skb_set_hash(skb, rx_hdr->l2_fhdr_hash,
skb              3281 drivers/net/ethernet/broadcom/bnx2.c 		skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
skb              3282 drivers/net/ethernet/broadcom/bnx2.c 		napi_gro_receive(&bnapi->napi, skb);
skb              5447 drivers/net/ethernet/broadcom/bnx2.c 			struct sk_buff *skb = tx_buf->skb;
skb              5450 drivers/net/ethernet/broadcom/bnx2.c 			if (!skb) {
skb              5457 drivers/net/ethernet/broadcom/bnx2.c 					 skb_headlen(skb),
skb              5460 drivers/net/ethernet/broadcom/bnx2.c 			tx_buf->skb = NULL;
skb              5468 drivers/net/ethernet/broadcom/bnx2.c 					skb_frag_size(&skb_shinfo(skb)->frags[k]),
skb              5471 drivers/net/ethernet/broadcom/bnx2.c 			dev_kfree_skb(skb);
skb              5810 drivers/net/ethernet/broadcom/bnx2.c 	struct sk_buff *skb;
skb              5842 drivers/net/ethernet/broadcom/bnx2.c 	skb = netdev_alloc_skb(bp->dev, pkt_size);
skb              5843 drivers/net/ethernet/broadcom/bnx2.c 	if (!skb)
skb              5845 drivers/net/ethernet/broadcom/bnx2.c 	packet = skb_put(skb, pkt_size);
skb              5851 drivers/net/ethernet/broadcom/bnx2.c 	map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
skb              5854 drivers/net/ethernet/broadcom/bnx2.c 		dev_kfree_skb(skb);
skb              5892 drivers/net/ethernet/broadcom/bnx2.c 	dev_kfree_skb(skb);
skb              6595 drivers/net/ethernet/broadcom/bnx2.c bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb              6609 drivers/net/ethernet/broadcom/bnx2.c 	i = skb_get_queue_mapping(skb);
skb              6615 drivers/net/ethernet/broadcom/bnx2.c 	    (skb_shinfo(skb)->nr_frags + 1))) {
skb              6621 drivers/net/ethernet/broadcom/bnx2.c 	len = skb_headlen(skb);
skb              6626 drivers/net/ethernet/broadcom/bnx2.c 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb              6630 drivers/net/ethernet/broadcom/bnx2.c 	if (skb_vlan_tag_present(skb)) {
skb              6632 drivers/net/ethernet/broadcom/bnx2.c 			(TX_BD_FLAGS_VLAN_TAG | (skb_vlan_tag_get(skb) << 16));
skb              6635 drivers/net/ethernet/broadcom/bnx2.c 	if ((mss = skb_shinfo(skb)->gso_size)) {
skb              6641 drivers/net/ethernet/broadcom/bnx2.c 		tcp_opt_len = tcp_optlen(skb);
skb              6643 drivers/net/ethernet/broadcom/bnx2.c 		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
skb              6644 drivers/net/ethernet/broadcom/bnx2.c 			u32 tcp_off = skb_transport_offset(skb) -
skb              6660 drivers/net/ethernet/broadcom/bnx2.c 			iph = ip_hdr(skb);
skb              6669 drivers/net/ethernet/broadcom/bnx2.c 	mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
skb              6671 drivers/net/ethernet/broadcom/bnx2.c 		dev_kfree_skb_any(skb);
skb              6676 drivers/net/ethernet/broadcom/bnx2.c 	tx_buf->skb = skb;
skb              6686 drivers/net/ethernet/broadcom/bnx2.c 	last_frag = skb_shinfo(skb)->nr_frags;
skb              6688 drivers/net/ethernet/broadcom/bnx2.c 	tx_buf->is_gso = skb_is_gso(skb);
skb              6691 drivers/net/ethernet/broadcom/bnx2.c 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
skb              6716 drivers/net/ethernet/broadcom/bnx2.c 	netdev_tx_sent_queue(txq, skb->len);
skb              6719 drivers/net/ethernet/broadcom/bnx2.c 	txr->tx_prod_bseq += skb->len;
skb              6748 drivers/net/ethernet/broadcom/bnx2.c 	tx_buf->skb = NULL;
skb              6750 drivers/net/ethernet/broadcom/bnx2.c 			 skb_headlen(skb), PCI_DMA_TODEVICE);
skb              6758 drivers/net/ethernet/broadcom/bnx2.c 			       skb_frag_size(&skb_shinfo(skb)->frags[i]),
skb              6762 drivers/net/ethernet/broadcom/bnx2.c 	dev_kfree_skb_any(skb);
skb              6643 drivers/net/ethernet/broadcom/bnx2.h 	struct sk_buff		*skb;
skb               357 drivers/net/ethernet/broadcom/bnx2x/bnx2x.h 	struct sk_buff	*skb;
skb               771 drivers/net/ethernet/broadcom/bnx2x/bnx2x.h #define SKB_CS_OFF(skb)		(offsetof(struct tcphdr, check) - \
skb               772 drivers/net/ethernet/broadcom/bnx2x/bnx2x.h 				 skb->csum_offset)
skb               773 drivers/net/ethernet/broadcom/bnx2x/bnx2x.h #define SKB_CS(skb)		(*(u16 *)(skb_transport_header(skb) + \
skb               774 drivers/net/ethernet/broadcom/bnx2x/bnx2x.h 					  skb->csum_offset))
skb              2523 drivers/net/ethernet/broadcom/bnx2x/bnx2x.h void bnx2x_set_rx_ts(struct bnx2x *bp, struct sk_buff *skb);
skb               201 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	struct sk_buff *skb = tx_buf->skb;
skb               207 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	prefetch(&skb->end);
skb               210 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	   txdata->txq_index, idx, tx_buf, skb);
skb               260 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	WARN_ON(!skb);
skb               261 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	if (likely(skb)) {
skb               263 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 		(*bytes_compl) += skb->len;
skb               264 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 		dev_kfree_skb_any(skb);
skb               268 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	tx_buf->skb = NULL;
skb               512 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
skb               524 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
skb               527 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
skb               538 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;
skb               543 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
skb               588 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 			       struct sk_buff *skb,
skb               607 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 		bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
skb               648 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 			skb_fill_page_desc(skb, j, old_rx_pg.page,
skb               655 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 				skb_fill_page_desc(skb, frag_id++,
skb               665 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 		skb->data_len += frag_len;
skb               666 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 		skb->truesize += SGE_PAGES;
skb               667 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 		skb->len += frag_len;
skb               697 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
skb               699 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	const struct iphdr *iph = ip_hdr(skb);
skb               702 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	skb_set_transport_header(skb, sizeof(struct iphdr));
skb               703 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	th = tcp_hdr(skb);
skb               705 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
skb               709 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
skb               711 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	struct ipv6hdr *iph = ipv6_hdr(skb);
skb               714 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	skb_set_transport_header(skb, sizeof(struct ipv6hdr));
skb               715 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	th = tcp_hdr(skb);
skb               717 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
skb               721 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb,
skb               724 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	skb_reset_network_header(skb);
skb               725 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	gro_func(bp, skb);
skb               726 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	tcp_gro_complete(skb);
skb               731 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 			       struct sk_buff *skb)
skb               734 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	if (skb_shinfo(skb)->gso_size) {
skb               735 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 		switch (be16_to_cpu(skb->protocol)) {
skb               737 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 			bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum);
skb               740 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 			bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum);
skb               745 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 					 be16_to_cpu(skb->protocol));
skb               749 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	skb_record_rx_queue(skb, fp->rx_queue);
skb               750 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	napi_gro_receive(&fp->napi, skb);
skb               762 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	struct sk_buff *skb = NULL;
skb               782 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 		skb = build_skb(data, fp->rx_frag_size);
skb               784 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	if (likely(skb)) {
skb               794 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 		skb_reserve(skb, pad + NET_SKB_PAD);
skb               795 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 		skb_put(skb, len);
skb               796 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 		skb_set_hash(skb, tpa_info->rxhash, tpa_info->rxhash_type);
skb               798 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 		skb->protocol = eth_type_trans(skb, bp->dev);
skb               799 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 		skb->ip_summed = CHECKSUM_UNNECESSARY;
skb               802 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 					 skb, cqe, cqe_idx)) {
skb               804 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 				__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag);
skb               805 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 			bnx2x_gro_receive(bp, fp, skb);
skb               809 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 			dev_kfree_skb_any(skb);
skb               857 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
skb               877 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 		skb->ip_summed = CHECKSUM_UNNECESSARY;
skb               911 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 		struct sk_buff *skb;
skb              1032 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 			skb = napi_alloc_skb(&fp->napi, len);
skb              1033 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 			if (skb == NULL) {
skb              1039 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 			memcpy(skb->data, data + pad, len);
skb              1048 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 				skb = build_skb(data, fp->rx_frag_size);
skb              1049 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 				if (unlikely(!skb)) {
skb              1055 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 				skb_reserve(skb, pad);
skb              1066 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 		skb_put(skb, len);
skb              1067 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 		skb->protocol = eth_type_trans(skb, bp->dev);
skb              1071 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 		skb_set_hash(skb, rxhash, rxhash_type);
skb              1073 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 		skb_checksum_none_assert(skb);
skb              1076 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 			bnx2x_csum_validate(skb, cqe, fp,
skb              1079 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 		skb_record_rx_queue(skb, fp->rx_queue);
skb              1084 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 			bnx2x_set_rx_ts(bp, skb);
skb              1088 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
skb              1091 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 		napi_gro_receive(&fp->napi, skb);
skb              1914 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
skb              1920 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 		struct ethhdr *hdr = (struct ethhdr *)skb->data;
skb              1926 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 				(struct vlan_ethhdr *)skb->data;
skb              1937 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	return netdev_pick_tx(dev, skb, NULL) % (BNX2X_NUM_ETH_QUEUES(bp));
skb              3337 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
skb              3343 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	if (skb->ip_summed != CHECKSUM_PARTIAL)
skb              3346 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	protocol = vlan_get_protocol(skb);
skb              3349 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 		prot = ipv6_hdr(skb)->nexthdr;
skb              3352 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 		prot = ip_hdr(skb)->protocol;
skb              3355 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	if (!CHIP_IS_E1x(bp) && skb->encapsulation) {
skb              3356 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 		if (inner_ip_hdr(skb)->version == 6) {
skb              3358 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 			if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
skb              3362 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 			if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP)
skb              3369 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	if (skb_is_gso(skb)) {
skb              3370 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 		if (skb_is_gso_v6(skb)) {
skb              3394 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
skb              3403 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - num_tso_win_sub)) {
skb              3405 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 			unsigned short lso_mss = skb_shinfo(skb)->gso_size;
skb              3408 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 			int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
skb              3415 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 				hlen = (int)(skb_inner_transport_header(skb) -
skb              3416 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 					     skb->data) +
skb              3417 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 					     inner_tcp_hdrlen(skb);
skb              3419 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 				hlen = (int)(skb_transport_header(skb) -
skb              3420 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 					     skb->data) + tcp_hdrlen(skb);
skb              3423 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 			first_bd_sz = skb_headlen(skb) - hlen;
skb              3430 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 					skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
skb              3446 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 			  skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
skb              3453 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 					skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
skb              3467 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 		   skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
skb              3480 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c static void bnx2x_set_pbd_gso(struct sk_buff *skb,
skb              3484 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
skb              3485 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
skb              3486 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
skb              3489 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 		pbd->ip_id = bswab16(ip_hdr(skb)->id);
skb              3491 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 			bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
skb              3492 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 						   ip_hdr(skb)->daddr,
skb              3496 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 			bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
skb              3497 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 						 &ipv6_hdr(skb)->daddr,
skb              3515 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
skb              3519 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 		((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
skb              3524 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 		*parsing_data |= ((inner_tcp_hdrlen(skb) / 4) <<
skb              3528 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 		return skb_inner_transport_header(skb) +
skb              3529 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 			inner_tcp_hdrlen(skb) - skb->data;
skb              3535 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	return skb_inner_transport_header(skb) +
skb              3536 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 		sizeof(struct udphdr) - skb->data;
skb              3549 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
skb              3553 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 		((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
skb              3558 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 		*parsing_data |= ((tcp_hdrlen(skb) / 4) <<
skb              3562 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 		return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
skb              3567 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
skb              3571 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
skb              3592 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
skb              3596 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
skb              3601 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 			    ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
skb              3604 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	pbd->ip_hlen_w = (skb_transport_header(skb) -
skb              3605 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 			skb_network_header(skb)) >> 1;
skb              3611 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 		hlen += tcp_hdrlen(skb) / 2;
skb              3619 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 		pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check);
skb              3622 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 		s8 fix = SKB_CS_OFF(skb); /* signed! */
skb              3626 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 		   le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
skb              3630 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 			bnx2x_csum_fix(skb_transport_header(skb),
skb              3631 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 				       SKB_CS(skb), fix);
skb              3640 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
skb              3650 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	hlen_w = (skb_inner_transport_header(skb) -
skb              3651 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 		  skb_network_header(skb)) >> 1;
skb              3654 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	hlen_w += inner_tcp_hdrlen(skb) >> 1;
skb              3660 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 		struct iphdr *iph = ip_hdr(skb);
skb              3676 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
skb              3678 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
skb              3682 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 		pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
skb              3686 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 					inner_ip_hdr(skb)->saddr,
skb              3687 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 					inner_ip_hdr(skb)->daddr,
skb              3692 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 					&inner_ipv6_hdr(skb)->saddr,
skb              3693 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 					&inner_ipv6_hdr(skb)->daddr,
skb              3697 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	outerip_off = (skb_network_header(skb) - skb->data) >> 1;
skb              3703 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 		((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
skb              3706 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
skb              3708 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 		pbd2->tunnel_udp_hdr_start_w = skb_transport_offset(skb) >> 1;
skb              3712 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c static inline void bnx2x_set_ipv6_ext_e2(struct sk_buff *skb, u32 *parsing_data,
skb              3721 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 		ipv6 = inner_ipv6_hdr(skb);
skb              3723 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 		ipv6 = ipv6_hdr(skb);
skb              3733 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb              3749 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	u32 xmit_type = bnx2x_xmit_type(bp, skb);
skb              3761 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	txq_index = skb_get_queue_mapping(skb);
skb              3778 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 			skb_shinfo(skb)->nr_frags +
skb              3786 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 			dev_kfree_skb(skb);
skb              3798 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	   txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
skb              3799 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	   ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type,
skb              3800 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	   skb->len);
skb              3802 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	eth = (struct ethhdr *)skb->data;
skb              3816 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
skb              3819 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 		if (skb_linearize(skb) != 0) {
skb              3822 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 			dev_kfree_skb_any(skb);
skb              3828 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	mapping = dma_map_single(&bp->pdev->dev, skb->data,
skb              3829 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 				 skb_headlen(skb), DMA_TO_DEVICE);
skb              3833 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 		dev_kfree_skb_any(skb);
skb              3861 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
skb              3870 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
skb              3872 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 			bp->ptp_tx_skb = skb_get(skb);
skb              3883 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	tx_buf->skb = skb;
skb              3890 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	if (skb_vlan_tag_present(skb)) {
skb              3892 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 		    cpu_to_le16(skb_vlan_tag_get(skb));
skb              3904 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 			if (__vlan_get_tag(skb, &vlan_tci)) {
skb              3928 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 		bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
skb              3938 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 			hlen = bnx2x_set_pbd_csum_enc(bp, skb,
skb              3950 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 				(skb_inner_network_header(skb) -
skb              3951 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 				 skb->data) >> 1;
skb              3954 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 				bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2,
skb              3972 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 			hlen = bnx2x_set_pbd_csum_e2(bp, skb,
skb              3977 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 		bnx2x_set_ipv6_ext_e2(skb, &pbd_e2_parsing_data, xmit_type);
skb              4018 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 			hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
skb              4028 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
skb              4042 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 		   skb->len, hlen, skb_headlen(skb),
skb              4043 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 		   skb_shinfo(skb)->gso_size);
skb              4047 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 		if (unlikely(skb_headlen(skb) > hlen)) {
skb              4055 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 				(skb_shinfo(skb)->gso_size <<
skb              4059 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 			bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
skb              4071 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb              4072 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
skb              4154 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	netdev_tx_sent_queue(txq, skb->len);
skb              4156 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	skb_tx_timestamp(skb);
skb               487 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev);
skb               502 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
skb              2495 drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c 	struct sk_buff *skb;
skb              2553 drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c 	skb = netdev_alloc_skb(bp->dev, fp_rx->rx_buf_size);
skb              2554 drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c 	if (!skb) {
skb              2559 drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c 	packet = skb_put(skb, pkt_size);
skb              2565 drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c 	mapping = dma_map_single(&bp->pdev->dev, skb->data,
skb              2566 drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c 				 skb_headlen(skb), DMA_TO_DEVICE);
skb              2569 drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c 		dev_kfree_skb(skb);
skb              2579 drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c 	netdev_tx_sent_queue(txq, skb->len);
skb              2584 drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c 	tx_buf->skb = skb;
skb              2592 drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c 	tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
skb              1160 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c 					  i, cos, j, sw_bd->skb,
skb              12970 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c static netdev_features_t bnx2x_features_check(struct sk_buff *skb,
skb              12987 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c 	if (unlikely(skb_is_gso(skb) &&
skb              12988 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c 		     (skb_shinfo(skb)->gso_size > 9000) &&
skb              12989 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c 		     !skb_gso_validate_mac_len(skb, 9700)))
skb              12992 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c 	features = vlan_features_check(skb, features);
skb              12993 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c 	return vxlan_features_check(skb, features);
skb              15278 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c void bnx2x_set_rx_ts(struct bnx2x *bp, struct sk_buff *skb)
skb              15295 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c 	skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
skb               335 drivers/net/ethernet/broadcom/bnxt/bnxt.c static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
skb               337 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	struct metadata_dst *md_dst = skb_metadata_dst(skb);
skb               345 drivers/net/ethernet/broadcom/bnxt/bnxt.c static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb               360 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	i = skb_get_queue_mapping(skb);
skb               362 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		dev_kfree_skb_any(skb);
skb               371 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
skb               376 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	length = skb->len;
skb               377 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	len = skb_headlen(skb);
skb               378 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	last_frag = skb_shinfo(skb)->nr_frags;
skb               385 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	tx_buf->skb = skb;
skb               389 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	cfa_action = bnxt_xmit_get_cfa_action(skb);
skb               390 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	if (skb_vlan_tag_present(skb)) {
skb               392 drivers/net/ethernet/broadcom/bnxt/bnxt.c 				 skb_vlan_tag_get(skb);
skb               396 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		if (skb->vlan_proto == htons(ETH_P_8021Q))
skb               418 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		if (skb->ip_summed == CHECKSUM_PARTIAL)
skb               432 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		skb_copy_from_linear_data(skb, pdata, len);
skb               435 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
skb               457 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		netdev_tx_sent_queue(txq, skb->len);
skb               475 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		if (skb_pad(skb, pad)) {
skb               477 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			tx_buf->skb = NULL;
skb               483 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
skb               486 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		dev_kfree_skb_any(skb);
skb               487 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		tx_buf->skb = NULL;
skb               502 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	if (skb_is_gso(skb)) {
skb               505 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		if (skb->encapsulation)
skb               506 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			hdr_len = skb_inner_network_offset(skb) +
skb               507 drivers/net/ethernet/broadcom/bnxt/bnxt.c 				skb_inner_network_header_len(skb) +
skb               508 drivers/net/ethernet/broadcom/bnxt/bnxt.c 				inner_tcp_hdrlen(skb);
skb               510 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			hdr_len = skb_transport_offset(skb) +
skb               511 drivers/net/ethernet/broadcom/bnxt/bnxt.c 				tcp_hdrlen(skb);
skb               516 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		length = skb_shinfo(skb)->gso_size;
skb               519 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb               528 drivers/net/ethernet/broadcom/bnxt/bnxt.c 				     skb->len);
skb               539 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
skb               565 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	netdev_tx_sent_queue(txq, skb->len);
skb               601 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	tx_buf->skb = NULL;
skb               603 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			 skb_headlen(skb), PCI_DMA_TODEVICE);
skb               611 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			       skb_frag_size(&skb_shinfo(skb)->frags[i]),
skb               615 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	dev_kfree_skb_any(skb);
skb               630 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		struct sk_buff *skb;
skb               635 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		skb = tx_buf->skb;
skb               636 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		tx_buf->skb = NULL;
skb               644 drivers/net/ethernet/broadcom/bnxt/bnxt.c 				 skb_headlen(skb), PCI_DMA_TODEVICE);
skb               653 drivers/net/ethernet/broadcom/bnxt/bnxt.c 				skb_frag_size(&skb_shinfo(skb)->frags[j]),
skb               660 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		tx_bytes += skb->len;
skb               661 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		dev_kfree_skb_any(skb);
skb               934 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	struct sk_buff *skb;
skb               950 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
skb               951 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	if (!skb) {
skb               957 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE);
skb               958 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
skb               961 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	frag = &skb_shinfo(skb)->frags[0];
skb               964 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	skb->data_len -= payload;
skb               965 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	skb->tail += payload;
skb               967 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	return skb;
skb               977 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	struct sk_buff *skb;
skb               986 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	skb = build_skb(data, 0);
skb               989 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	if (!skb) {
skb               994 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	skb_reserve(skb, bp->rx_offset);
skb               995 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	skb_put(skb, offset_and_len & 0xffff);
skb               996 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	return skb;
skb              1001 drivers/net/ethernet/broadcom/bnxt/bnxt.c 				     struct sk_buff *skb, u16 idx,
skb              1030 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		skb_fill_page_desc(skb, i, cons_rx_buf->page,
skb              1046 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			shinfo = skb_shinfo(skb);
skb              1050 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			dev_kfree_skb(skb);
skb              1066 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		skb->data_len += frag_len;
skb              1067 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		skb->len += frag_len;
skb              1068 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		skb->truesize += PAGE_SIZE;
skb              1073 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	return skb;
skb              1095 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	struct sk_buff *skb;
skb              1097 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	skb = napi_alloc_skb(&bnapi->napi, len);
skb              1098 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	if (!skb)
skb              1104 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
skb              1110 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	skb_put(skb, len);
skb              1111 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	return skb;
skb              1295 drivers/net/ethernet/broadcom/bnxt/bnxt.c static void bnxt_gro_tunnel(struct sk_buff *skb, __be16 ip_proto)
skb              1300 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		struct iphdr *iph = (struct iphdr *)skb->data;
skb              1305 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
skb              1312 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
skb              1314 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
skb              1321 drivers/net/ethernet/broadcom/bnxt/bnxt.c 					   struct sk_buff *skb)
skb              1340 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		__be16 proto = *((__be16 *)(skb->data + inner_ip_off -
skb              1358 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	skb_set_network_header(skb, nw_off);
skb              1360 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		struct ipv6hdr *iph = ipv6_hdr(skb);
skb              1362 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
skb              1363 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		len = skb->len - skb_transport_offset(skb);
skb              1364 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		th = tcp_hdr(skb);
skb              1367 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		struct iphdr *iph = ip_hdr(skb);
skb              1369 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
skb              1370 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		len = skb->len - skb_transport_offset(skb);
skb              1371 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		th = tcp_hdr(skb);
skb              1376 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		__be16 proto = *((__be16 *)(skb->data + outer_ip_off -
skb              1379 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		bnxt_gro_tunnel(skb, proto);
skb              1382 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	return skb;
skb              1387 drivers/net/ethernet/broadcom/bnxt/bnxt.c 					   struct sk_buff *skb)
skb              1399 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	skb_set_network_header(skb, nw_off);
skb              1402 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	skb_set_transport_header(skb, nw_off + iphdr_len);
skb              1405 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		__be16 proto = *((__be16 *)(skb->data + outer_ip_off -
skb              1408 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		bnxt_gro_tunnel(skb, proto);
skb              1411 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	return skb;
skb              1419 drivers/net/ethernet/broadcom/bnxt/bnxt.c 					   struct sk_buff *skb)
skb              1433 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		skb_set_network_header(skb, nw_off);
skb              1434 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		iph = ip_hdr(skb);
skb              1435 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
skb              1436 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		len = skb->len - skb_transport_offset(skb);
skb              1437 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		th = tcp_hdr(skb);
skb              1444 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		skb_set_network_header(skb, nw_off);
skb              1445 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		iph = ipv6_hdr(skb);
skb              1446 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
skb              1447 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		len = skb->len - skb_transport_offset(skb);
skb              1448 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		th = tcp_hdr(skb);
skb              1451 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		dev_kfree_skb_any(skb);
skb              1456 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		bnxt_gro_tunnel(skb, skb->protocol);
skb              1458 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	return skb;
skb              1465 drivers/net/ethernet/broadcom/bnxt/bnxt.c 					   struct sk_buff *skb)
skb              1473 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		return skb;
skb              1475 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	NAPI_GRO_CB(skb)->count = segs;
skb              1476 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	skb_shinfo(skb)->gso_size =
skb              1478 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	skb_shinfo(skb)->gso_type = tpa_info->gso_type;
skb              1483 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
skb              1484 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	if (likely(skb))
skb              1485 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		tcp_gro_complete(skb);
skb              1487 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	return skb;
skb              1514 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	struct sk_buff *skb;
skb              1571 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
skb              1572 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		if (!skb) {
skb              1590 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		skb = build_skb(data, 0);
skb              1595 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		if (!skb) {
skb              1600 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		skb_reserve(skb, bp->rx_offset);
skb              1601 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		skb_put(skb, len);
skb              1605 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		skb = bnxt_rx_pages(bp, cpr, skb, idx, agg_bufs, true);
skb              1606 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		if (!skb) {
skb              1612 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	skb->protocol =
skb              1613 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		eth_type_trans(skb, bnxt_get_pkt_dev(bp, tpa_info->cfa_code));
skb              1616 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
skb              1619 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	    (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
skb              1624 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		__vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
skb              1627 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	skb_checksum_none_assert(skb);
skb              1629 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		skb->ip_summed = CHECKSUM_UNNECESSARY;
skb              1630 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		skb->csum_level =
skb              1635 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
skb              1637 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	return skb;
skb              1653 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			     struct sk_buff *skb)
skb              1655 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	if (skb->dev != bp->dev) {
skb              1657 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		bnxt_vf_rep_rx(bp, skb);
skb              1660 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	skb_record_rx_queue(skb, bnapi->index);
skb              1661 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	napi_gro_receive(&bnapi->napi, skb);
skb              1685 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	struct sk_buff *skb;
skb              1718 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons,
skb              1722 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		if (IS_ERR(skb))
skb              1726 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		if (likely(skb)) {
skb              1727 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			bnxt_deliver_skb(bp, bnapi, skb);
skb              1790 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
skb              1792 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		if (!skb) {
skb              1806 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
skb              1808 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		if (!skb) {
skb              1815 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs, false);
skb              1816 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		if (!skb) {
skb              1829 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
skb              1833 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	skb->protocol = eth_type_trans(skb, bnxt_get_pkt_dev(bp, cfa_code));
skb              1837 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	    (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
skb              1842 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		__vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
skb              1845 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	skb_checksum_none_assert(skb);
skb              1848 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			skb->ip_summed = CHECKSUM_UNNECESSARY;
skb              1849 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			skb->csum_level = RX_CMP_ENCAP(rxcmp1);
skb              1858 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	bnxt_deliver_skb(bp, bnapi, skb);
skb              2488 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			struct sk_buff *skb;
skb              2504 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			skb = tx_buf->skb;
skb              2505 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			if (!skb) {
skb              2510 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			tx_buf->skb = NULL;
skb              2513 drivers/net/ethernet/broadcom/bnxt/bnxt.c 				dev_kfree_skb(skb);
skb              2520 drivers/net/ethernet/broadcom/bnxt/bnxt.c 					 skb_headlen(skb),
skb              2527 drivers/net/ethernet/broadcom/bnxt/bnxt.c 				skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
skb              2535 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			dev_kfree_skb(skb);
skb              11034 drivers/net/ethernet/broadcom/bnxt/bnxt.c static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
skb              11040 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
skb              11065 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
skb              11091 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
skb              11263 drivers/net/ethernet/broadcom/bnxt/bnxt.c static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
skb              11269 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0,
skb               684 drivers/net/ethernet/broadcom/bnxt/bnxt.h 		struct sk_buff		*skb;
skb              2818 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	struct sk_buff *skb;
skb              2827 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	skb = netdev_alloc_skb(bp->dev, pkt_size);
skb              2828 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	if (!skb)
skb              2830 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	data = skb_put(skb, pkt_size);
skb              2838 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
skb              2841 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 		dev_kfree_skb(skb);
skb              2853 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	dev_kfree_skb(skb);
skb               113 drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c static netdev_tx_t bnxt_vf_rep_xmit(struct sk_buff *skb,
skb               117 drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c 	int rc, len = skb->len;
skb               119 drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c 	skb_dst_drop(skb);
skb               121 drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c 	skb_dst_set(skb, (struct dst_entry *)vf_rep->dst);
skb               122 drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c 	skb->dev = vf_rep->dst->u.port_info.lower_dev;
skb               124 drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c 	rc = dev_queue_xmit(skb);
skb               194 drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c void bnxt_vf_rep_rx(struct bnxt *bp, struct sk_buff *skb)
skb               196 drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c 	struct bnxt_vf_rep *vf_rep = netdev_priv(skb->dev);
skb               198 drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c 	vf_rep->rx_stats.bytes += skb->len;
skb               201 drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c 	netif_receive_skb(skb);
skb                20 drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h void bnxt_vf_rep_rx(struct bnxt *bp, struct sk_buff *skb);
skb                46 drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h static inline void bnxt_vf_rep_rx(struct bnxt *bp, struct sk_buff *skb)
skb              1326 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	struct sk_buff *skb;
skb              1328 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	skb = cb->skb;
skb              1330 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	if (skb) {
skb              1331 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		cb->skb = NULL;
skb              1332 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		if (cb == GENET_CB(skb)->first_cb)
skb              1342 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		if (cb == GENET_CB(skb)->last_cb)
skb              1343 drivers/net/ethernet/broadcom/genet/bcmgenet.c 			return skb;
skb              1360 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	struct sk_buff *skb;
skb              1362 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	skb = cb->skb;
skb              1363 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	cb->skb = NULL;
skb              1371 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	return skb;
skb              1384 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	struct sk_buff *skb;
skb              1405 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		skb = bcmgenet_free_tx_cb(&priv->pdev->dev,
skb              1407 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		if (skb) {
skb              1409 drivers/net/ethernet/broadcom/genet/bcmgenet.c 			bytes_compl += GENET_CB(skb)->bytes_sent;
skb              1410 drivers/net/ethernet/broadcom/genet/bcmgenet.c 			dev_consume_skb_any(skb);
skb              1486 drivers/net/ethernet/broadcom/genet/bcmgenet.c 					    struct sk_buff *skb)
skb              1495 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	if (unlikely(skb_headroom(skb) < sizeof(*status))) {
skb              1499 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		new_skb = skb_realloc_headroom(skb, sizeof(*status));
skb              1500 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		dev_kfree_skb(skb);
skb              1505 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		skb = new_skb;
skb              1508 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	skb_push(skb, sizeof(*status));
skb              1509 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	status = (struct status_64 *)skb->data;
skb              1511 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	if (skb->ip_summed  == CHECKSUM_PARTIAL) {
skb              1512 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		ip_ver = skb->protocol;
skb              1515 drivers/net/ethernet/broadcom/genet/bcmgenet.c 			ip_proto = ip_hdr(skb)->protocol;
skb              1518 drivers/net/ethernet/broadcom/genet/bcmgenet.c 			ip_proto = ipv6_hdr(skb)->nexthdr;
skb              1521 drivers/net/ethernet/broadcom/genet/bcmgenet.c 			return skb;
skb              1524 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		offset = skb_checksum_start_offset(skb) - sizeof(*status);
skb              1526 drivers/net/ethernet/broadcom/genet/bcmgenet.c 				(offset + skb->csum_offset);
skb              1543 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	return skb;
skb              1546 drivers/net/ethernet/broadcom/genet/bcmgenet.c static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
skb              1561 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	index = skb_get_queue_mapping(skb);
skb              1577 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	nr_frags = skb_shinfo(skb)->nr_frags;
skb              1591 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	if (skb_padto(skb, ETH_ZLEN)) {
skb              1599 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	GENET_CB(skb)->bytes_sent = skb->len;
skb              1603 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		skb = bcmgenet_put_tx_csum(dev, skb);
skb              1604 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		if (!skb) {
skb              1617 drivers/net/ethernet/broadcom/genet/bcmgenet.c 			GENET_CB(skb)->first_cb = tx_cb_ptr;
skb              1618 drivers/net/ethernet/broadcom/genet/bcmgenet.c 			size = skb_headlen(skb);
skb              1619 drivers/net/ethernet/broadcom/genet/bcmgenet.c 			mapping = dma_map_single(kdev, skb->data, size,
skb              1623 drivers/net/ethernet/broadcom/genet/bcmgenet.c 			frag = &skb_shinfo(skb)->frags[i - 1];
skb              1639 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		tx_cb_ptr->skb = skb;
skb              1646 drivers/net/ethernet/broadcom/genet/bcmgenet.c 			if (skb->ip_summed == CHECKSUM_PARTIAL)
skb              1655 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	GENET_CB(skb)->last_cb = tx_cb_ptr;
skb              1656 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	skb_tx_timestamp(skb);
skb              1663 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	netdev_tx_sent_queue(txq, GENET_CB(skb)->bytes_sent);
skb              1687 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	dev_kfree_skb(skb);
skb              1695 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	struct sk_buff *skb;
skb              1700 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	skb = __netdev_alloc_skb(priv->dev, priv->rx_buf_len + SKB_ALIGNMENT,
skb              1702 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	if (!skb) {
skb              1710 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	mapping = dma_map_single(kdev, skb->data, priv->rx_buf_len,
skb              1714 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		dev_kfree_skb_any(skb);
skb              1724 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	cb->skb = skb;
skb              1742 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	struct sk_buff *skb;
skb              1789 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		skb = bcmgenet_rx_refill(priv, cb);
skb              1791 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		if (unlikely(!skb)) {
skb              1802 drivers/net/ethernet/broadcom/genet/bcmgenet.c 			status = (struct status_64 *)skb->data;
skb              1821 drivers/net/ethernet/broadcom/genet/bcmgenet.c 			dev_kfree_skb_any(skb);
skb              1842 drivers/net/ethernet/broadcom/genet/bcmgenet.c 			dev_kfree_skb_any(skb);
skb              1849 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		skb_put(skb, len);
skb              1851 drivers/net/ethernet/broadcom/genet/bcmgenet.c 			skb_pull(skb, 64);
skb              1856 drivers/net/ethernet/broadcom/genet/bcmgenet.c 			skb->ip_summed = CHECKSUM_UNNECESSARY;
skb              1859 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		skb_pull(skb, 2);
skb              1863 drivers/net/ethernet/broadcom/genet/bcmgenet.c 			skb_trim(skb, len - ETH_FCS_LEN);
skb              1870 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		skb->protocol = eth_type_trans(skb, priv->dev);
skb              1877 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		napi_gro_receive(&ring->napi, skb);
skb              1940 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	struct sk_buff *skb;
skb              1948 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		skb = bcmgenet_rx_refill(priv, cb);
skb              1949 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		if (skb)
skb              1950 drivers/net/ethernet/broadcom/genet/bcmgenet.c 			dev_consume_skb_any(skb);
skb              1951 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		if (!cb->skb)
skb              1960 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	struct sk_buff *skb;
skb              1967 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		skb = bcmgenet_free_rx_cb(&priv->pdev->dev, cb);
skb              1968 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		if (skb)
skb              1969 drivers/net/ethernet/broadcom/genet/bcmgenet.c 			dev_consume_skb_any(skb);
skb               488 drivers/net/ethernet/broadcom/genet/bcmgenet.h 	struct sk_buff      *skb;
skb               554 drivers/net/ethernet/broadcom/genet/bcmgenet.h #define GENET_CB(skb)	((struct bcmgenet_skb_cb *)((skb)->cb))
skb               289 drivers/net/ethernet/broadcom/sb1250-mac.c static netdev_tx_t sbmac_start_tx(struct sk_buff *skb, struct net_device *dev);
skb               744 drivers/net/ethernet/broadcom/sb1250-mac.c static inline void sbdma_align_skb(struct sk_buff *skb,
skb               747 drivers/net/ethernet/broadcom/sb1250-mac.c 	unsigned char *addr = skb->data;
skb               750 drivers/net/ethernet/broadcom/sb1250-mac.c 	skb_reserve(skb, newaddr - addr + offset);
skb              2018 drivers/net/ethernet/broadcom/sb1250-mac.c static netdev_tx_t sbmac_start_tx(struct sk_buff *skb, struct net_device *dev)
skb              2031 drivers/net/ethernet/broadcom/sb1250-mac.c 	if (sbdma_add_txbuffer(&(sc->sbm_txdma),skb)) {
skb              6561 drivers/net/ethernet/broadcom/tg3.c 		struct sk_buff *skb = ri->skb;
skb              6564 drivers/net/ethernet/broadcom/tg3.c 		if (unlikely(skb == NULL)) {
skb              6576 drivers/net/ethernet/broadcom/tg3.c 			skb_tstamp_tx(skb, &timestamp);
skb              6581 drivers/net/ethernet/broadcom/tg3.c 				 skb_headlen(skb),
skb              6584 drivers/net/ethernet/broadcom/tg3.c 		ri->skb = NULL;
skb              6594 drivers/net/ethernet/broadcom/tg3.c 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb              6596 drivers/net/ethernet/broadcom/tg3.c 			if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
skb              6601 drivers/net/ethernet/broadcom/tg3.c 				       skb_frag_size(&skb_shinfo(skb)->frags[i]),
skb              6614 drivers/net/ethernet/broadcom/tg3.c 		bytes_compl += skb->len;
skb              6616 drivers/net/ethernet/broadcom/tg3.c 		dev_consume_skb_any(skb);
skb              6842 drivers/net/ethernet/broadcom/tg3.c 		struct sk_buff *skb;
skb              6907 drivers/net/ethernet/broadcom/tg3.c 			skb = build_skb(data, frag_size);
skb              6908 drivers/net/ethernet/broadcom/tg3.c 			if (!skb) {
skb              6912 drivers/net/ethernet/broadcom/tg3.c 			skb_reserve(skb, TG3_RX_OFFSET(tp));
skb              6917 drivers/net/ethernet/broadcom/tg3.c 			skb = netdev_alloc_skb(tp->dev,
skb              6919 drivers/net/ethernet/broadcom/tg3.c 			if (skb == NULL)
skb              6922 drivers/net/ethernet/broadcom/tg3.c 			skb_reserve(skb, TG3_RAW_IP_ALIGN);
skb              6924 drivers/net/ethernet/broadcom/tg3.c 			memcpy(skb->data,
skb              6930 drivers/net/ethernet/broadcom/tg3.c 		skb_put(skb, len);
skb              6933 drivers/net/ethernet/broadcom/tg3.c 						 skb_hwtstamps(skb));
skb              6939 drivers/net/ethernet/broadcom/tg3.c 			skb->ip_summed = CHECKSUM_UNNECESSARY;
skb              6941 drivers/net/ethernet/broadcom/tg3.c 			skb_checksum_none_assert(skb);
skb              6943 drivers/net/ethernet/broadcom/tg3.c 		skb->protocol = eth_type_trans(skb, tp->dev);
skb              6946 drivers/net/ethernet/broadcom/tg3.c 		    skb->protocol != htons(ETH_P_8021Q) &&
skb              6947 drivers/net/ethernet/broadcom/tg3.c 		    skb->protocol != htons(ETH_P_8021AD)) {
skb              6948 drivers/net/ethernet/broadcom/tg3.c 			dev_kfree_skb_any(skb);
skb              6954 drivers/net/ethernet/broadcom/tg3.c 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
skb              6957 drivers/net/ethernet/broadcom/tg3.c 		napi_gro_receive(&tnapi->napi, skb);
skb              7771 drivers/net/ethernet/broadcom/tg3.c 	struct sk_buff *skb;
skb              7774 drivers/net/ethernet/broadcom/tg3.c 	skb = txb->skb;
skb              7775 drivers/net/ethernet/broadcom/tg3.c 	txb->skb = NULL;
skb              7779 drivers/net/ethernet/broadcom/tg3.c 			 skb_headlen(skb),
skb              7789 drivers/net/ethernet/broadcom/tg3.c 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
skb              7813 drivers/net/ethernet/broadcom/tg3.c 	struct sk_buff *new_skb, *skb = *pskb;
skb              7818 drivers/net/ethernet/broadcom/tg3.c 		new_skb = skb_copy(skb, GFP_ATOMIC);
skb              7820 drivers/net/ethernet/broadcom/tg3.c 		int more_headroom = 4 - ((unsigned long)skb->data & 3);
skb              7822 drivers/net/ethernet/broadcom/tg3.c 		new_skb = skb_copy_expand(skb,
skb              7823 drivers/net/ethernet/broadcom/tg3.c 					  skb_headroom(skb) + more_headroom,
skb              7824 drivers/net/ethernet/broadcom/tg3.c 					  skb_tailroom(skb), GFP_ATOMIC);
skb              7842 drivers/net/ethernet/broadcom/tg3.c 			tnapi->tx_buffers[*entry].skb = new_skb;
skb              7856 drivers/net/ethernet/broadcom/tg3.c 	dev_consume_skb_any(skb);
skb              7861 drivers/net/ethernet/broadcom/tg3.c static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
skb              7866 drivers/net/ethernet/broadcom/tg3.c 	return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
skb              7875 drivers/net/ethernet/broadcom/tg3.c 		       struct netdev_queue *txq, struct sk_buff *skb)
skb              7878 drivers/net/ethernet/broadcom/tg3.c 	u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
skb              7896 drivers/net/ethernet/broadcom/tg3.c 	segs = skb_gso_segment(skb, tp->dev->features &
skb              7909 drivers/net/ethernet/broadcom/tg3.c 	dev_consume_skb_any(skb);
skb              7915 drivers/net/ethernet/broadcom/tg3.c static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb              7930 drivers/net/ethernet/broadcom/tg3.c 	txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
skb              7931 drivers/net/ethernet/broadcom/tg3.c 	tnapi = &tp->napi[skb_get_queue_mapping(skb)];
skb              7942 drivers/net/ethernet/broadcom/tg3.c 	if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
skb              7956 drivers/net/ethernet/broadcom/tg3.c 	mss = skb_shinfo(skb)->gso_size;
skb              7960 drivers/net/ethernet/broadcom/tg3.c 		if (skb_cow_head(skb, 0))
skb              7963 drivers/net/ethernet/broadcom/tg3.c 		iph = ip_hdr(skb);
skb              7964 drivers/net/ethernet/broadcom/tg3.c 		tcp_opt_len = tcp_optlen(skb);
skb              7966 drivers/net/ethernet/broadcom/tg3.c 		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
skb              7971 drivers/net/ethernet/broadcom/tg3.c 		if (skb->protocol == htons(ETH_P_8021Q) ||
skb              7972 drivers/net/ethernet/broadcom/tg3.c 		    skb->protocol == htons(ETH_P_8021AD)) {
skb              7973 drivers/net/ethernet/broadcom/tg3.c 			if (tg3_tso_bug_gso_check(tnapi, skb))
skb              7974 drivers/net/ethernet/broadcom/tg3.c 				return tg3_tso_bug(tp, tnapi, txq, skb);
skb              7978 drivers/net/ethernet/broadcom/tg3.c 		if (!skb_is_gso_v6(skb)) {
skb              7981 drivers/net/ethernet/broadcom/tg3.c 				if (tg3_tso_bug_gso_check(tnapi, skb))
skb              7982 drivers/net/ethernet/broadcom/tg3.c 					return tg3_tso_bug(tp, tnapi, txq, skb);
skb              7994 drivers/net/ethernet/broadcom/tg3.c 		tcph = tcp_hdr(skb);
skb              8030 drivers/net/ethernet/broadcom/tg3.c 	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb              8034 drivers/net/ethernet/broadcom/tg3.c 		if (skb->protocol == htons(ETH_P_8021Q) ||
skb              8035 drivers/net/ethernet/broadcom/tg3.c 		    skb->protocol == htons(ETH_P_8021AD)) {
skb              8036 drivers/net/ethernet/broadcom/tg3.c 			if (skb_checksum_help(skb))
skb              8044 drivers/net/ethernet/broadcom/tg3.c 	    !mss && skb->len > VLAN_ETH_FRAME_LEN)
skb              8047 drivers/net/ethernet/broadcom/tg3.c 	if (skb_vlan_tag_present(skb)) {
skb              8049 drivers/net/ethernet/broadcom/tg3.c 		vlan = skb_vlan_tag_get(skb);
skb              8052 drivers/net/ethernet/broadcom/tg3.c 	if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
skb              8054 drivers/net/ethernet/broadcom/tg3.c 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
skb              8058 drivers/net/ethernet/broadcom/tg3.c 	len = skb_headlen(skb);
skb              8060 drivers/net/ethernet/broadcom/tg3.c 	mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
skb              8065 drivers/net/ethernet/broadcom/tg3.c 	tnapi->tx_buffers[entry].skb = skb;
skb              8074 drivers/net/ethernet/broadcom/tg3.c 			  ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
skb              8077 drivers/net/ethernet/broadcom/tg3.c 	} else if (skb_shinfo(skb)->nr_frags > 0) {
skb              8088 drivers/net/ethernet/broadcom/tg3.c 		last = skb_shinfo(skb)->nr_frags - 1;
skb              8090 drivers/net/ethernet/broadcom/tg3.c 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
skb              8096 drivers/net/ethernet/broadcom/tg3.c 			tnapi->tx_buffers[entry].skb = NULL;
skb              8116 drivers/net/ethernet/broadcom/tg3.c 		if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
skb              8125 drivers/net/ethernet/broadcom/tg3.c 			return tg3_tso_bug(tp, tnapi, txq, skb);
skb              8133 drivers/net/ethernet/broadcom/tg3.c 		if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
skb              8138 drivers/net/ethernet/broadcom/tg3.c 	skb_tx_timestamp(skb);
skb              8139 drivers/net/ethernet/broadcom/tg3.c 	netdev_tx_sent_queue(txq, skb->len);
skb              8167 drivers/net/ethernet/broadcom/tg3.c 	tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
skb              8169 drivers/net/ethernet/broadcom/tg3.c 	dev_kfree_skb_any(skb);
skb              8561 drivers/net/ethernet/broadcom/tg3.c 			struct sk_buff *skb = tnapi->tx_buffers[i].skb;
skb              8563 drivers/net/ethernet/broadcom/tg3.c 			if (!skb)
skb              8567 drivers/net/ethernet/broadcom/tg3.c 					 skb_shinfo(skb)->nr_frags - 1);
skb              8569 drivers/net/ethernet/broadcom/tg3.c 			dev_consume_skb_any(skb);
skb              13431 drivers/net/ethernet/broadcom/tg3.c 	struct sk_buff *skb;
skb              13452 drivers/net/ethernet/broadcom/tg3.c 	skb = netdev_alloc_skb(tp->dev, tx_len);
skb              13453 drivers/net/ethernet/broadcom/tg3.c 	if (!skb)
skb              13456 drivers/net/ethernet/broadcom/tg3.c 	tx_data = skb_put(skb, tx_len);
skb              13518 drivers/net/ethernet/broadcom/tg3.c 	map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
skb              13520 drivers/net/ethernet/broadcom/tg3.c 		dev_kfree_skb(skb);
skb              13525 drivers/net/ethernet/broadcom/tg3.c 	tnapi->tx_buffers[val].skb = skb;
skb              13538 drivers/net/ethernet/broadcom/tg3.c 		tnapi->tx_buffers[val].skb = NULL;
skb              13539 drivers/net/ethernet/broadcom/tg3.c 		dev_kfree_skb(skb);
skb              13568 drivers/net/ethernet/broadcom/tg3.c 	dev_kfree_skb(skb);
skb              2868 drivers/net/ethernet/broadcom/tg3.h 	struct sk_buff			*skb;
skb                94 drivers/net/ethernet/brocade/bna/bnad.c 	struct sk_buff *skb;
skb               100 drivers/net/ethernet/brocade/bna/bnad.c 	skb = unmap->skb;
skb               101 drivers/net/ethernet/brocade/bna/bnad.c 	unmap->skb = NULL;
skb               105 drivers/net/ethernet/brocade/bna/bnad.c 		skb_headlen(skb), DMA_TO_DEVICE);
skb               140 drivers/net/ethernet/brocade/bna/bnad.c 	struct sk_buff *skb;
skb               144 drivers/net/ethernet/brocade/bna/bnad.c 		skb = unmap_q[i].skb;
skb               145 drivers/net/ethernet/brocade/bna/bnad.c 		if (!skb)
skb               149 drivers/net/ethernet/brocade/bna/bnad.c 		dev_kfree_skb_any(skb);
skb               165 drivers/net/ethernet/brocade/bna/bnad.c 	struct sk_buff *skb;
skb               182 drivers/net/ethernet/brocade/bna/bnad.c 		skb = unmap->skb;
skb               185 drivers/net/ethernet/brocade/bna/bnad.c 		sent_bytes += skb->len;
skb               191 drivers/net/ethernet/brocade/bna/bnad.c 		dev_kfree_skb_any(skb);
skb               309 drivers/net/ethernet/brocade/bna/bnad.c 	if (!unmap->skb)
skb               315 drivers/net/ethernet/brocade/bna/bnad.c 	dev_kfree_skb_any(unmap->skb);
skb               316 drivers/net/ethernet/brocade/bna/bnad.c 	unmap->skb = NULL;
skb               419 drivers/net/ethernet/brocade/bna/bnad.c 	struct sk_buff *skb;
skb               430 drivers/net/ethernet/brocade/bna/bnad.c 		skb = netdev_alloc_skb_ip_align(bnad->netdev, buff_sz);
skb               432 drivers/net/ethernet/brocade/bna/bnad.c 		if (unlikely(!skb)) {
skb               438 drivers/net/ethernet/brocade/bna/bnad.c 		dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
skb               441 drivers/net/ethernet/brocade/bna/bnad.c 			dev_kfree_skb_any(skb);
skb               447 drivers/net/ethernet/brocade/bna/bnad.c 		unmap->skb = skb;
skb               519 drivers/net/ethernet/brocade/bna/bnad.c bnad_cq_setup_skb_frags(struct bna_ccb *ccb, struct sk_buff *skb, u32 nvecs)
skb               552 drivers/net/ethernet/brocade/bna/bnad.c 		skb->truesize += unmap->vector.len;
skb               555 drivers/net/ethernet/brocade/bna/bnad.c 		skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
skb               565 drivers/net/ethernet/brocade/bna/bnad.c 	skb->len += totlen;
skb               566 drivers/net/ethernet/brocade/bna/bnad.c 	skb->data_len += totlen;
skb               570 drivers/net/ethernet/brocade/bna/bnad.c bnad_cq_setup_skb(struct bnad *bnad, struct sk_buff *skb,
skb               573 drivers/net/ethernet/brocade/bna/bnad.c 	prefetch(skb->data);
skb               579 drivers/net/ethernet/brocade/bna/bnad.c 	skb_put(skb, len);
skb               580 drivers/net/ethernet/brocade/bna/bnad.c 	skb->protocol = eth_type_trans(skb, bnad->netdev);
skb               582 drivers/net/ethernet/brocade/bna/bnad.c 	unmap->skb = NULL;
skb               593 drivers/net/ethernet/brocade/bna/bnad.c 	struct sk_buff *skb = NULL;
skb               631 drivers/net/ethernet/brocade/bna/bnad.c 			skb = unmap->skb;
skb               633 drivers/net/ethernet/brocade/bna/bnad.c 			skb = napi_get_frags(&rx_ctrl->napi);
skb               634 drivers/net/ethernet/brocade/bna/bnad.c 			if (unlikely(!skb))
skb               637 drivers/net/ethernet/brocade/bna/bnad.c 		prefetch(skb);
skb               690 drivers/net/ethernet/brocade/bna/bnad.c 			bnad_cq_setup_skb(bnad, skb, unmap, len);
skb               692 drivers/net/ethernet/brocade/bna/bnad.c 			bnad_cq_setup_skb_frags(ccb, skb, nvecs);
skb               706 drivers/net/ethernet/brocade/bna/bnad.c 			skb->ip_summed = CHECKSUM_UNNECESSARY;
skb               708 drivers/net/ethernet/brocade/bna/bnad.c 			skb_checksum_none_assert(skb);
skb               712 drivers/net/ethernet/brocade/bna/bnad.c 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cmpl->vlan_tag));
skb               715 drivers/net/ethernet/brocade/bna/bnad.c 			netif_receive_skb(skb);
skb              2481 drivers/net/ethernet/brocade/bna/bnad.c bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
skb              2485 drivers/net/ethernet/brocade/bna/bnad.c 	err = skb_cow_head(skb, 0);
skb              2495 drivers/net/ethernet/brocade/bna/bnad.c 	if (vlan_get_protocol(skb) == htons(ETH_P_IP)) {
skb              2496 drivers/net/ethernet/brocade/bna/bnad.c 		struct iphdr *iph = ip_hdr(skb);
skb              2502 drivers/net/ethernet/brocade/bna/bnad.c 		tcp_hdr(skb)->check =
skb              2507 drivers/net/ethernet/brocade/bna/bnad.c 		struct ipv6hdr *ipv6h = ipv6_hdr(skb);
skb              2510 drivers/net/ethernet/brocade/bna/bnad.c 		tcp_hdr(skb)->check =
skb              2809 drivers/net/ethernet/brocade/bna/bnad.c 		    struct sk_buff *skb, struct bna_txq_entry *txqent)
skb              2815 drivers/net/ethernet/brocade/bna/bnad.c 	if (skb_vlan_tag_present(skb)) {
skb              2816 drivers/net/ethernet/brocade/bna/bnad.c 		vlan_tag = (u16)skb_vlan_tag_get(skb);
skb              2826 drivers/net/ethernet/brocade/bna/bnad.c 	if (skb_is_gso(skb)) {
skb              2827 drivers/net/ethernet/brocade/bna/bnad.c 		gso_size = skb_shinfo(skb)->gso_size;
skb              2832 drivers/net/ethernet/brocade/bna/bnad.c 		if (unlikely((gso_size + skb_transport_offset(skb) +
skb              2833 drivers/net/ethernet/brocade/bna/bnad.c 			      tcp_hdrlen(skb)) >= skb->len)) {
skb              2842 drivers/net/ethernet/brocade/bna/bnad.c 		if (bnad_tso_prepare(bnad, skb)) {
skb              2850 drivers/net/ethernet/brocade/bna/bnad.c 			tcp_hdrlen(skb) >> 2, skb_transport_offset(skb)));
skb              2855 drivers/net/ethernet/brocade/bna/bnad.c 		if (unlikely(skb->len > (bnad->netdev->mtu + VLAN_ETH_HLEN))) {
skb              2860 drivers/net/ethernet/brocade/bna/bnad.c 		if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb              2861 drivers/net/ethernet/brocade/bna/bnad.c 			__be16 net_proto = vlan_get_protocol(skb);
skb              2865 drivers/net/ethernet/brocade/bna/bnad.c 				proto = ip_hdr(skb)->protocol;
skb              2869 drivers/net/ethernet/brocade/bna/bnad.c 				proto = ipv6_hdr(skb)->nexthdr;
skb              2876 drivers/net/ethernet/brocade/bna/bnad.c 					      (0, skb_transport_offset(skb)));
skb              2880 drivers/net/ethernet/brocade/bna/bnad.c 				if (unlikely(skb_headlen(skb) <
skb              2881 drivers/net/ethernet/brocade/bna/bnad.c 					    skb_transport_offset(skb) +
skb              2882 drivers/net/ethernet/brocade/bna/bnad.c 				    tcp_hdrlen(skb))) {
skb              2890 drivers/net/ethernet/brocade/bna/bnad.c 					      (0, skb_transport_offset(skb)));
skb              2893 drivers/net/ethernet/brocade/bna/bnad.c 				if (unlikely(skb_headlen(skb) <
skb              2894 drivers/net/ethernet/brocade/bna/bnad.c 					    skb_transport_offset(skb) +
skb              2909 drivers/net/ethernet/brocade/bna/bnad.c 	txqent->hdr.wi.frame_length = htonl(skb->len);
skb              2919 drivers/net/ethernet/brocade/bna/bnad.c bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
skb              2931 drivers/net/ethernet/brocade/bna/bnad.c 	len = skb_headlen(skb);
skb              2935 drivers/net/ethernet/brocade/bna/bnad.c 	if (unlikely(skb->len <= ETH_HLEN)) {
skb              2936 drivers/net/ethernet/brocade/bna/bnad.c 		dev_kfree_skb_any(skb);
skb              2941 drivers/net/ethernet/brocade/bna/bnad.c 		dev_kfree_skb_any(skb);
skb              2946 drivers/net/ethernet/brocade/bna/bnad.c 		dev_kfree_skb_any(skb);
skb              2958 drivers/net/ethernet/brocade/bna/bnad.c 		dev_kfree_skb_any(skb);
skb              2967 drivers/net/ethernet/brocade/bna/bnad.c 	vectors = 1 + skb_shinfo(skb)->nr_frags;
skb              2971 drivers/net/ethernet/brocade/bna/bnad.c 		dev_kfree_skb_any(skb);
skb              3010 drivers/net/ethernet/brocade/bna/bnad.c 	if (bnad_txq_wi_prepare(bnad, tcb, skb, txqent)) {
skb              3011 drivers/net/ethernet/brocade/bna/bnad.c 		dev_kfree_skb_any(skb);
skb              3017 drivers/net/ethernet/brocade/bna/bnad.c 	head_unmap->skb = skb;
skb              3022 drivers/net/ethernet/brocade/bna/bnad.c 	dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
skb              3025 drivers/net/ethernet/brocade/bna/bnad.c 		dev_kfree_skb_any(skb);
skb              3035 drivers/net/ethernet/brocade/bna/bnad.c 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
skb              3042 drivers/net/ethernet/brocade/bna/bnad.c 			dev_kfree_skb_any(skb);
skb              3064 drivers/net/ethernet/brocade/bna/bnad.c 			dev_kfree_skb_any(skb);
skb              3077 drivers/net/ethernet/brocade/bna/bnad.c 	if (unlikely(len != skb->len)) {
skb              3080 drivers/net/ethernet/brocade/bna/bnad.c 		dev_kfree_skb_any(skb);
skb              3093 drivers/net/ethernet/brocade/bna/bnad.c 	skb_tx_timestamp(skb);
skb               224 drivers/net/ethernet/brocade/bna/bnad.h 	struct sk_buff		*skb;
skb               236 drivers/net/ethernet/brocade/bna/bnad.h 	struct sk_buff		*skb;
skb               747 drivers/net/ethernet/cadence/macb.h 	struct sk_buff *skb;
skb               853 drivers/net/ethernet/cadence/macb.h 	struct sk_buff		*skb;
skb              1199 drivers/net/ethernet/cadence/macb.h 	struct sk_buff *skb;			/* holds skb until xmit interrupt completes */
skb              1251 drivers/net/ethernet/cadence/macb.h int gem_ptp_txstamp(struct macb_queue *queue, struct sk_buff *skb, struct macb_dma_desc *des);
skb              1252 drivers/net/ethernet/cadence/macb.h void gem_ptp_rxstamp(struct macb *bp, struct sk_buff *skb, struct macb_dma_desc *desc);
skb              1253 drivers/net/ethernet/cadence/macb.h static inline int gem_ptp_do_txstamp(struct macb_queue *queue, struct sk_buff *skb, struct macb_dma_desc *desc)
skb              1258 drivers/net/ethernet/cadence/macb.h 	return gem_ptp_txstamp(queue, skb, desc);
skb              1261 drivers/net/ethernet/cadence/macb.h static inline void gem_ptp_do_rxstamp(struct macb *bp, struct sk_buff *skb, struct macb_dma_desc *desc)
skb              1266 drivers/net/ethernet/cadence/macb.h 	gem_ptp_rxstamp(bp, skb, desc);
skb              1274 drivers/net/ethernet/cadence/macb.h static inline int gem_ptp_do_txstamp(struct macb_queue *queue, struct sk_buff *skb, struct macb_dma_desc *desc)
skb              1279 drivers/net/ethernet/cadence/macb.h static inline void gem_ptp_do_rxstamp(struct macb *bp, struct sk_buff *skb, struct macb_dma_desc *desc) { }
skb               686 drivers/net/ethernet/cadence/macb_main.c 	if (tx_skb->skb) {
skb               687 drivers/net/ethernet/cadence/macb_main.c 		dev_kfree_skb_any(tx_skb->skb);
skb               688 drivers/net/ethernet/cadence/macb_main.c 		tx_skb->skb = NULL;
skb               732 drivers/net/ethernet/cadence/macb_main.c 	struct sk_buff		*skb;
skb               768 drivers/net/ethernet/cadence/macb_main.c 		skb = tx_skb->skb;
skb               772 drivers/net/ethernet/cadence/macb_main.c 			while (!skb) {
skb               776 drivers/net/ethernet/cadence/macb_main.c 				skb = tx_skb->skb;
skb               785 drivers/net/ethernet/cadence/macb_main.c 					    skb->data);
skb               788 drivers/net/ethernet/cadence/macb_main.c 				bp->dev->stats.tx_bytes += skb->len;
skb               789 drivers/net/ethernet/cadence/macb_main.c 				queue->stats.tx_bytes += skb->len;
skb               855 drivers/net/ethernet/cadence/macb_main.c 		struct sk_buff		*skb;
skb               875 drivers/net/ethernet/cadence/macb_main.c 			skb = tx_skb->skb;
skb               878 drivers/net/ethernet/cadence/macb_main.c 			if (skb) {
skb               879 drivers/net/ethernet/cadence/macb_main.c 				if (unlikely(skb_shinfo(skb)->tx_flags &
skb               881 drivers/net/ethernet/cadence/macb_main.c 				    gem_ptp_do_txstamp(queue, skb, desc) == 0) {
skb               885 drivers/net/ethernet/cadence/macb_main.c 					tx_skb->skb = NULL;
skb               889 drivers/net/ethernet/cadence/macb_main.c 					    skb->data);
skb               892 drivers/net/ethernet/cadence/macb_main.c 				bp->dev->stats.tx_bytes += skb->len;
skb               893 drivers/net/ethernet/cadence/macb_main.c 				queue->stats.tx_bytes += skb->len;
skb               903 drivers/net/ethernet/cadence/macb_main.c 			if (skb)
skb               918 drivers/net/ethernet/cadence/macb_main.c 	struct sk_buff		*skb;
skb               935 drivers/net/ethernet/cadence/macb_main.c 			skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size);
skb               936 drivers/net/ethernet/cadence/macb_main.c 			if (unlikely(!skb)) {
skb               943 drivers/net/ethernet/cadence/macb_main.c 			paddr = dma_map_single(&bp->pdev->dev, skb->data,
skb               947 drivers/net/ethernet/cadence/macb_main.c 				dev_kfree_skb(skb);
skb               951 drivers/net/ethernet/cadence/macb_main.c 			queue->rx_skbuff[entry] = skb;
skb               963 drivers/net/ethernet/cadence/macb_main.c 			skb_reserve(skb, NET_IP_ALIGN);
skb              1005 drivers/net/ethernet/cadence/macb_main.c 	struct sk_buff		*skb;
skb              1041 drivers/net/ethernet/cadence/macb_main.c 		skb = queue->rx_skbuff[entry];
skb              1042 drivers/net/ethernet/cadence/macb_main.c 		if (unlikely(!skb)) {
skb              1055 drivers/net/ethernet/cadence/macb_main.c 		skb_put(skb, len);
skb              1059 drivers/net/ethernet/cadence/macb_main.c 		skb->protocol = eth_type_trans(skb, bp->dev);
skb              1060 drivers/net/ethernet/cadence/macb_main.c 		skb_checksum_none_assert(skb);
skb              1064 drivers/net/ethernet/cadence/macb_main.c 			skb->ip_summed = CHECKSUM_UNNECESSARY;
skb              1068 drivers/net/ethernet/cadence/macb_main.c 		bp->dev->stats.rx_bytes += skb->len;
skb              1069 drivers/net/ethernet/cadence/macb_main.c 		queue->stats.rx_bytes += skb->len;
skb              1071 drivers/net/ethernet/cadence/macb_main.c 		gem_ptp_do_rxstamp(bp, skb, desc);
skb              1075 drivers/net/ethernet/cadence/macb_main.c 			    skb->len, skb->csum);
skb              1077 drivers/net/ethernet/cadence/macb_main.c 			       skb_mac_header(skb), 16, true);
skb              1079 drivers/net/ethernet/cadence/macb_main.c 			       skb->data, 32, true);
skb              1082 drivers/net/ethernet/cadence/macb_main.c 		napi_gro_receive(napi, skb);
skb              1096 drivers/net/ethernet/cadence/macb_main.c 	struct sk_buff *skb;
skb              1115 drivers/net/ethernet/cadence/macb_main.c 	skb = netdev_alloc_skb(bp->dev, len + NET_IP_ALIGN);
skb              1116 drivers/net/ethernet/cadence/macb_main.c 	if (!skb) {
skb              1133 drivers/net/ethernet/cadence/macb_main.c 	skb_checksum_none_assert(skb);
skb              1134 drivers/net/ethernet/cadence/macb_main.c 	skb_put(skb, len);
skb              1141 drivers/net/ethernet/cadence/macb_main.c 				dev_kfree_skb_any(skb);
skb              1146 drivers/net/ethernet/cadence/macb_main.c 		skb_copy_to_linear_data_offset(skb, offset,
skb              1160 drivers/net/ethernet/cadence/macb_main.c 	__skb_pull(skb, NET_IP_ALIGN);
skb              1161 drivers/net/ethernet/cadence/macb_main.c 	skb->protocol = eth_type_trans(skb, bp->dev);
skb              1164 drivers/net/ethernet/cadence/macb_main.c 	bp->dev->stats.rx_bytes += skb->len;
skb              1166 drivers/net/ethernet/cadence/macb_main.c 		    skb->len, skb->csum);
skb              1167 drivers/net/ethernet/cadence/macb_main.c 	napi_gro_receive(napi, skb);
skb              1495 drivers/net/ethernet/cadence/macb_main.c 				struct sk_buff *skb,
skb              1503 drivers/net/ethernet/cadence/macb_main.c 	unsigned int f, nr_frags = skb_shinfo(skb)->nr_frags;
skb              1508 drivers/net/ethernet/cadence/macb_main.c 	if (skb_shinfo(skb)->gso_size != 0) {
skb              1509 drivers/net/ethernet/cadence/macb_main.c 		if (ip_hdr(skb)->protocol == IPPROTO_UDP)
skb              1518 drivers/net/ethernet/cadence/macb_main.c 	len = skb_headlen(skb);
skb              1529 drivers/net/ethernet/cadence/macb_main.c 					 skb->data + offset,
skb              1535 drivers/net/ethernet/cadence/macb_main.c 		tx_skb->skb = NULL;
skb              1550 drivers/net/ethernet/cadence/macb_main.c 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
skb              1565 drivers/net/ethernet/cadence/macb_main.c 			tx_skb->skb = NULL;
skb              1584 drivers/net/ethernet/cadence/macb_main.c 	tx_skb->skb = skb;
skb              1602 drivers/net/ethernet/cadence/macb_main.c 			mss_mfs = skb_shinfo(skb)->gso_size +
skb              1603 drivers/net/ethernet/cadence/macb_main.c 					skb_transport_offset(skb) +
skb              1606 drivers/net/ethernet/cadence/macb_main.c 			mss_mfs = skb_shinfo(skb)->gso_size;
skb              1633 drivers/net/ethernet/cadence/macb_main.c 			    skb->ip_summed != CHECKSUM_PARTIAL && !lso_ctrl)
skb              1666 drivers/net/ethernet/cadence/macb_main.c static netdev_features_t macb_features_check(struct sk_buff *skb,
skb              1676 drivers/net/ethernet/cadence/macb_main.c 	if (!skb_is_nonlinear(skb) || (ip_hdr(skb)->protocol != IPPROTO_UDP))
skb              1680 drivers/net/ethernet/cadence/macb_main.c 	hdrlen = skb_transport_offset(skb);
skb              1686 drivers/net/ethernet/cadence/macb_main.c 	if (!IS_ALIGNED(skb_headlen(skb) - hdrlen, MACB_TX_LEN_ALIGN))
skb              1689 drivers/net/ethernet/cadence/macb_main.c 	nr_frags = skb_shinfo(skb)->nr_frags;
skb              1693 drivers/net/ethernet/cadence/macb_main.c 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
skb              1701 drivers/net/ethernet/cadence/macb_main.c static inline int macb_clear_csum(struct sk_buff *skb)
skb              1704 drivers/net/ethernet/cadence/macb_main.c 	if (skb->ip_summed != CHECKSUM_PARTIAL)
skb              1708 drivers/net/ethernet/cadence/macb_main.c 	if (unlikely(skb_cow_head(skb, 0)))
skb              1715 drivers/net/ethernet/cadence/macb_main.c 	*(__sum16 *)(skb_checksum_start(skb) + skb->csum_offset) = 0;
skb              1719 drivers/net/ethernet/cadence/macb_main.c static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev)
skb              1721 drivers/net/ethernet/cadence/macb_main.c 	bool cloned = skb_cloned(*skb) || skb_header_cloned(*skb);
skb              1722 drivers/net/ethernet/cadence/macb_main.c 	int padlen = ETH_ZLEN - (*skb)->len;
skb              1723 drivers/net/ethernet/cadence/macb_main.c 	int headroom = skb_headroom(*skb);
skb              1724 drivers/net/ethernet/cadence/macb_main.c 	int tailroom = skb_tailroom(*skb);
skb              1729 drivers/net/ethernet/cadence/macb_main.c 	    !((*skb)->ip_summed != CHECKSUM_PARTIAL) ||
skb              1730 drivers/net/ethernet/cadence/macb_main.c 	    skb_shinfo(*skb)->gso_size)	/* Not available for GSO */
skb              1749 drivers/net/ethernet/cadence/macb_main.c 		(*skb)->data = memmove((*skb)->head, (*skb)->data, (*skb)->len);
skb              1750 drivers/net/ethernet/cadence/macb_main.c 		skb_set_tail_pointer(*skb, (*skb)->len);
skb              1752 drivers/net/ethernet/cadence/macb_main.c 		nskb = skb_copy_expand(*skb, 0, padlen, GFP_ATOMIC);
skb              1756 drivers/net/ethernet/cadence/macb_main.c 		dev_consume_skb_any(*skb);
skb              1757 drivers/net/ethernet/cadence/macb_main.c 		*skb = nskb;
skb              1761 drivers/net/ethernet/cadence/macb_main.c 		skb_put_zero(*skb, padlen - ETH_FCS_LEN);
skb              1765 drivers/net/ethernet/cadence/macb_main.c 	fcs = crc32_le(~0, (*skb)->data, (*skb)->len);
skb              1768 drivers/net/ethernet/cadence/macb_main.c 	skb_put_u8(*skb, fcs		& 0xff);
skb              1769 drivers/net/ethernet/cadence/macb_main.c 	skb_put_u8(*skb, (fcs >> 8)	& 0xff);
skb              1770 drivers/net/ethernet/cadence/macb_main.c 	skb_put_u8(*skb, (fcs >> 16)	& 0xff);
skb              1771 drivers/net/ethernet/cadence/macb_main.c 	skb_put_u8(*skb, (fcs >> 24)	& 0xff);
skb              1776 drivers/net/ethernet/cadence/macb_main.c static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb              1778 drivers/net/ethernet/cadence/macb_main.c 	u16 queue_index = skb_get_queue_mapping(skb);
skb              1787 drivers/net/ethernet/cadence/macb_main.c 	if (macb_clear_csum(skb)) {
skb              1788 drivers/net/ethernet/cadence/macb_main.c 		dev_kfree_skb_any(skb);
skb              1792 drivers/net/ethernet/cadence/macb_main.c 	if (macb_pad_and_fcs(&skb, dev)) {
skb              1793 drivers/net/ethernet/cadence/macb_main.c 		dev_kfree_skb_any(skb);
skb              1797 drivers/net/ethernet/cadence/macb_main.c 	is_lso = (skb_shinfo(skb)->gso_size != 0);
skb              1800 drivers/net/ethernet/cadence/macb_main.c 		is_udp = !!(ip_hdr(skb)->protocol == IPPROTO_UDP);
skb              1805 drivers/net/ethernet/cadence/macb_main.c 			hdrlen = skb_transport_offset(skb);
skb              1807 drivers/net/ethernet/cadence/macb_main.c 			hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
skb              1808 drivers/net/ethernet/cadence/macb_main.c 		if (skb_headlen(skb) < hdrlen) {
skb              1814 drivers/net/ethernet/cadence/macb_main.c 		hdrlen = min(skb_headlen(skb), bp->max_tx_length);
skb              1819 drivers/net/ethernet/cadence/macb_main.c 		    queue_index, skb->len, skb->head, skb->data,
skb              1820 drivers/net/ethernet/cadence/macb_main.c 		    skb_tail_pointer(skb), skb_end_pointer(skb));
skb              1822 drivers/net/ethernet/cadence/macb_main.c 		       skb->data, 16, true);
skb              1829 drivers/net/ethernet/cadence/macb_main.c 	if (is_lso && (skb_headlen(skb) > hdrlen))
skb              1831 drivers/net/ethernet/cadence/macb_main.c 		desc_cnt = DIV_ROUND_UP((skb_headlen(skb) - hdrlen), bp->max_tx_length) + 1;
skb              1833 drivers/net/ethernet/cadence/macb_main.c 		desc_cnt = DIV_ROUND_UP(skb_headlen(skb), bp->max_tx_length);
skb              1834 drivers/net/ethernet/cadence/macb_main.c 	nr_frags = skb_shinfo(skb)->nr_frags;
skb              1836 drivers/net/ethernet/cadence/macb_main.c 		frag_size = skb_frag_size(&skb_shinfo(skb)->frags[f]);
skb              1853 drivers/net/ethernet/cadence/macb_main.c 	if (!macb_tx_map(bp, queue, skb, hdrlen)) {
skb              1854 drivers/net/ethernet/cadence/macb_main.c 		dev_kfree_skb_any(skb);
skb              1860 drivers/net/ethernet/cadence/macb_main.c 	skb_tx_timestamp(skb);
skb              1895 drivers/net/ethernet/cadence/macb_main.c 	struct sk_buff		*skb;
skb              1907 drivers/net/ethernet/cadence/macb_main.c 			skb = queue->rx_skbuff[i];
skb              1909 drivers/net/ethernet/cadence/macb_main.c 			if (!skb)
skb              1917 drivers/net/ethernet/cadence/macb_main.c 			dev_kfree_skb_any(skb);
skb              1918 drivers/net/ethernet/cadence/macb_main.c 			skb = NULL;
skb              3767 drivers/net/ethernet/cadence/macb_main.c static netdev_tx_t at91ether_start_xmit(struct sk_buff *skb,
skb              3776 drivers/net/ethernet/cadence/macb_main.c 		lp->skb = skb;
skb              3777 drivers/net/ethernet/cadence/macb_main.c 		lp->skb_length = skb->len;
skb              3778 drivers/net/ethernet/cadence/macb_main.c 		lp->skb_physaddr = dma_map_single(&lp->pdev->dev, skb->data,
skb              3779 drivers/net/ethernet/cadence/macb_main.c 						  skb->len, DMA_TO_DEVICE);
skb              3781 drivers/net/ethernet/cadence/macb_main.c 			dev_kfree_skb_any(skb);
skb              3790 drivers/net/ethernet/cadence/macb_main.c 		macb_writel(lp, TCR, skb->len);
skb              3809 drivers/net/ethernet/cadence/macb_main.c 	struct sk_buff *skb;
skb              3816 drivers/net/ethernet/cadence/macb_main.c 		skb = netdev_alloc_skb(dev, pktlen + 2);
skb              3817 drivers/net/ethernet/cadence/macb_main.c 		if (skb) {
skb              3818 drivers/net/ethernet/cadence/macb_main.c 			skb_reserve(skb, 2);
skb              3819 drivers/net/ethernet/cadence/macb_main.c 			skb_put_data(skb, p_recv, pktlen);
skb              3821 drivers/net/ethernet/cadence/macb_main.c 			skb->protocol = eth_type_trans(skb, dev);
skb              3824 drivers/net/ethernet/cadence/macb_main.c 			netif_rx(skb);
skb              3867 drivers/net/ethernet/cadence/macb_main.c 		if (lp->skb) {
skb              3868 drivers/net/ethernet/cadence/macb_main.c 			dev_consume_skb_irq(lp->skb);
skb              3869 drivers/net/ethernet/cadence/macb_main.c 			lp->skb = NULL;
skb               269 drivers/net/ethernet/cadence/macb_ptp.c void gem_ptp_rxstamp(struct macb *bp, struct sk_buff *skb,
skb               272 drivers/net/ethernet/cadence/macb_ptp.c 	struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
skb               284 drivers/net/ethernet/cadence/macb_ptp.c static void gem_tstamp_tx(struct macb *bp, struct sk_buff *skb,
skb               293 drivers/net/ethernet/cadence/macb_ptp.c 	skb_tstamp_tx(skb, &shhwtstamps);
skb               296 drivers/net/ethernet/cadence/macb_ptp.c int gem_ptp_txstamp(struct macb_queue *queue, struct sk_buff *skb,
skb               310 drivers/net/ethernet/cadence/macb_ptp.c 	skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
skb               313 drivers/net/ethernet/cadence/macb_ptp.c 	tx_timestamp->skb = skb;
skb               339 drivers/net/ethernet/cadence/macb_ptp.c 		gem_tstamp_tx(queue->bp, tx_ts->skb, &tx_ts->desc_ptp);
skb               341 drivers/net/ethernet/cadence/macb_ptp.c 		dev_kfree_skb_any(tx_ts->skb);
skb               685 drivers/net/ethernet/calxeda/xgmac.c 		struct sk_buff *skb;
skb               690 drivers/net/ethernet/calxeda/xgmac.c 			skb = netdev_alloc_skb_ip_align(priv->dev, bufsz);
skb               691 drivers/net/ethernet/calxeda/xgmac.c 			if (unlikely(skb == NULL))
skb               694 drivers/net/ethernet/calxeda/xgmac.c 			paddr = dma_map_single(priv->device, skb->data,
skb               698 drivers/net/ethernet/calxeda/xgmac.c 				dev_kfree_skb_any(skb);
skb               701 drivers/net/ethernet/calxeda/xgmac.c 			priv->rx_skbuff[entry] = skb;
skb               798 drivers/net/ethernet/calxeda/xgmac.c 		struct sk_buff *skb = priv->rx_skbuff[i];
skb               799 drivers/net/ethernet/calxeda/xgmac.c 		if (skb == NULL)
skb               805 drivers/net/ethernet/calxeda/xgmac.c 		dev_kfree_skb_any(skb);
skb               870 drivers/net/ethernet/calxeda/xgmac.c 		struct sk_buff *skb = priv->tx_skbuff[entry];
skb               890 drivers/net/ethernet/calxeda/xgmac.c 			dev_consume_skb_any(skb);
skb              1074 drivers/net/ethernet/calxeda/xgmac.c static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev)
skb              1080 drivers/net/ethernet/calxeda/xgmac.c 	int nfrags = skb_shinfo(skb)->nr_frags;
skb              1089 drivers/net/ethernet/calxeda/xgmac.c 	desc_flags = (skb->ip_summed == CHECKSUM_PARTIAL) ?
skb              1095 drivers/net/ethernet/calxeda/xgmac.c 	len = skb_headlen(skb);
skb              1096 drivers/net/ethernet/calxeda/xgmac.c 	paddr = dma_map_single(priv->device, skb->data, len, DMA_TO_DEVICE);
skb              1098 drivers/net/ethernet/calxeda/xgmac.c 		dev_kfree_skb_any(skb);
skb              1101 drivers/net/ethernet/calxeda/xgmac.c 	priv->tx_skbuff[entry] = skb;
skb              1105 drivers/net/ethernet/calxeda/xgmac.c 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
skb              1116 drivers/net/ethernet/calxeda/xgmac.c 		priv->tx_skbuff[entry] = skb;
skb              1162 drivers/net/ethernet/calxeda/xgmac.c 	dev_kfree_skb_any(skb);
skb              1174 drivers/net/ethernet/calxeda/xgmac.c 		struct sk_buff *skb;
skb              1193 drivers/net/ethernet/calxeda/xgmac.c 		skb = priv->rx_skbuff[entry];
skb              1194 drivers/net/ethernet/calxeda/xgmac.c 		if (unlikely(!skb)) {
skb              1204 drivers/net/ethernet/calxeda/xgmac.c 		skb_put(skb, frame_len);
skb              1208 drivers/net/ethernet/calxeda/xgmac.c 		skb->protocol = eth_type_trans(skb, priv->dev);
skb              1209 drivers/net/ethernet/calxeda/xgmac.c 		skb->ip_summed = ip_checksum;
skb              1211 drivers/net/ethernet/calxeda/xgmac.c 			netif_receive_skb(skb);
skb              1213 drivers/net/ethernet/calxeda/xgmac.c 			napi_gro_receive(&priv->napi, skb);
skb               195 drivers/net/ethernet/cavium/liquidio/lio_core.c 	struct sk_buff *skb = NULL;
skb               202 drivers/net/ethernet/cavium/liquidio/lio_core.c 		skb = finfo->skb;
skb               208 drivers/net/ethernet/cavium/liquidio/lio_core.c 		skb = sc->callback_arg;
skb               216 drivers/net/ethernet/cavium/liquidio/lio_core.c 	*bytes_compl += skb->len;
skb               222 drivers/net/ethernet/cavium/liquidio/lio_core.c 	struct sk_buff *skb;
skb               230 drivers/net/ethernet/cavium/liquidio/lio_core.c 		skb = finfo->skb;
skb               236 drivers/net/ethernet/cavium/liquidio/lio_core.c 		skb = sc->callback_arg;
skb               243 drivers/net/ethernet/cavium/liquidio/lio_core.c 	txq = netdev_get_tx_queue(skb->dev, skb_get_queue_mapping(skb));
skb               244 drivers/net/ethernet/cavium/liquidio/lio_core.c 	netdev_tx_sent_queue(txq, skb->len);
skb               577 drivers/net/ethernet/cavium/liquidio/lio_core.c 	struct sk_buff *skb = (struct sk_buff *)skbuff;
skb               590 drivers/net/ethernet/cavium/liquidio/lio_core.c 			recv_buffer_free(skb);
skb               595 drivers/net/ethernet/cavium/liquidio/lio_core.c 		skb->dev = netdev;
skb               597 drivers/net/ethernet/cavium/liquidio/lio_core.c 		skb_record_rx_queue(skb, droq->q_no);
skb               602 drivers/net/ethernet/cavium/liquidio/lio_core.c 			pg_info = ((struct octeon_skb_page_info *)(skb->cb));
skb               607 drivers/net/ethernet/cavium/liquidio/lio_core.c 				memcpy(skb->data, va, MIN_SKB_SIZE);
skb               608 drivers/net/ethernet/cavium/liquidio/lio_core.c 				skb_put(skb, MIN_SKB_SIZE);
skb               609 drivers/net/ethernet/cavium/liquidio/lio_core.c 				skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
skb               618 drivers/net/ethernet/cavium/liquidio/lio_core.c 				((struct octeon_skb_page_info *)(skb->cb));
skb               619 drivers/net/ethernet/cavium/liquidio/lio_core.c 			skb_copy_to_linear_data(skb, page_address(pg_info->page)
skb               621 drivers/net/ethernet/cavium/liquidio/lio_core.c 			skb_put(skb, len);
skb               638 drivers/net/ethernet/cavium/liquidio/lio_core.c 					memcpy(&ns, (skb->data + r_dh_off),
skb               641 drivers/net/ethernet/cavium/liquidio/lio_core.c 					shhwtstamps = skb_hwtstamps(skb);
skb               650 drivers/net/ethernet/cavium/liquidio/lio_core.c 			__be32 *hash_be = (__be32 *)(skb->data + r_dh_off);
skb               653 drivers/net/ethernet/cavium/liquidio/lio_core.c 			skb_set_hash(skb, hash, PKT_HASH_TYPE_L4);
skb               657 drivers/net/ethernet/cavium/liquidio/lio_core.c 		skb_pull(skb, rh->r_dh.len * BYTES_PER_DHLEN_UNIT);
skb               658 drivers/net/ethernet/cavium/liquidio/lio_core.c 		skb->protocol = eth_type_trans(skb, skb->dev);
skb               667 drivers/net/ethernet/cavium/liquidio/lio_core.c 			skb->ip_summed = CHECKSUM_UNNECESSARY;
skb               669 drivers/net/ethernet/cavium/liquidio/lio_core.c 			skb->ip_summed = CHECKSUM_NONE;
skb               675 drivers/net/ethernet/cavium/liquidio/lio_core.c 			skb->encapsulation = 1;
skb               676 drivers/net/ethernet/cavium/liquidio/lio_core.c 			skb->csum_level = 1;
skb               687 drivers/net/ethernet/cavium/liquidio/lio_core.c 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag);
skb               690 drivers/net/ethernet/cavium/liquidio/lio_core.c 		napi_gro_receive(napi, skb);
skb               696 drivers/net/ethernet/cavium/liquidio/lio_core.c 		recv_buffer_free(skb);
skb              1457 drivers/net/ethernet/cavium/liquidio/lio_main.c 	struct sk_buff *skb;
skb              1462 drivers/net/ethernet/cavium/liquidio/lio_main.c 	skb = finfo->skb;
skb              1465 drivers/net/ethernet/cavium/liquidio/lio_main.c 	dma_unmap_single(&lio->oct_dev->pci_dev->dev, finfo->dptr, skb->len,
skb              1468 drivers/net/ethernet/cavium/liquidio/lio_main.c 	tx_buffer_free(skb);
skb              1478 drivers/net/ethernet/cavium/liquidio/lio_main.c 	struct sk_buff *skb;
skb              1484 drivers/net/ethernet/cavium/liquidio/lio_main.c 	skb = finfo->skb;
skb              1487 drivers/net/ethernet/cavium/liquidio/lio_main.c 	frags = skb_shinfo(skb)->nr_frags;
skb              1490 drivers/net/ethernet/cavium/liquidio/lio_main.c 			 g->sg[0].ptr[0], (skb->len - skb->data_len),
skb              1495 drivers/net/ethernet/cavium/liquidio/lio_main.c 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
skb              1503 drivers/net/ethernet/cavium/liquidio/lio_main.c 	iq = skb_iq(lio->oct_dev, skb);
skb              1508 drivers/net/ethernet/cavium/liquidio/lio_main.c 	tx_buffer_free(skb);
skb              1519 drivers/net/ethernet/cavium/liquidio/lio_main.c 	struct sk_buff *skb;
skb              1525 drivers/net/ethernet/cavium/liquidio/lio_main.c 	skb = (struct sk_buff *)sc->callback_arg;
skb              1526 drivers/net/ethernet/cavium/liquidio/lio_main.c 	finfo = (struct octnet_buf_free_info *)&skb->cb;
skb              1530 drivers/net/ethernet/cavium/liquidio/lio_main.c 	frags = skb_shinfo(skb)->nr_frags;
skb              1533 drivers/net/ethernet/cavium/liquidio/lio_main.c 			 g->sg[0].ptr[0], (skb->len - skb->data_len),
skb              1538 drivers/net/ethernet/cavium/liquidio/lio_main.c 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
skb              1546 drivers/net/ethernet/cavium/liquidio/lio_main.c 	iq = skb_iq(lio->oct_dev, skb);
skb              2224 drivers/net/ethernet/cavium/liquidio/lio_main.c 	struct sk_buff *skb = (struct sk_buff *)buf;
skb              2226 drivers/net/ethernet/cavium/liquidio/lio_main.c 	finfo = (struct octnet_buf_free_info *)skb->cb;
skb              2240 drivers/net/ethernet/cavium/liquidio/lio_main.c 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) != 0)) {
skb              2246 drivers/net/ethernet/cavium/liquidio/lio_main.c 			   skb, (unsigned long long)ns);
skb              2248 drivers/net/ethernet/cavium/liquidio/lio_main.c 		skb_tstamp_tx(skb, &ts);
skb              2252 drivers/net/ethernet/cavium/liquidio/lio_main.c 	tx_buffer_free(skb);
skb              2288 drivers/net/ethernet/cavium/liquidio/lio_main.c 	sc->callback_arg = finfo->skb;
skb              2320 drivers/net/ethernet/cavium/liquidio/lio_main.c static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
skb              2339 drivers/net/ethernet/cavium/liquidio/lio_main.c 	q_idx = skb_iq(oct, skb);
skb              2350 drivers/net/ethernet/cavium/liquidio/lio_main.c 	    (skb->len <= 0)) {
skb              2360 drivers/net/ethernet/cavium/liquidio/lio_main.c 	finfo = (struct octnet_buf_free_info *)skb->cb;
skb              2362 drivers/net/ethernet/cavium/liquidio/lio_main.c 	finfo->skb = skb;
skb              2384 drivers/net/ethernet/cavium/liquidio/lio_main.c 	ndata.datasize = skb->len;
skb              2389 drivers/net/ethernet/cavium/liquidio/lio_main.c 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb              2390 drivers/net/ethernet/cavium/liquidio/lio_main.c 		if (skb->encapsulation) {
skb              2397 drivers/net/ethernet/cavium/liquidio/lio_main.c 	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
skb              2398 drivers/net/ethernet/cavium/liquidio/lio_main.c 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
skb              2402 drivers/net/ethernet/cavium/liquidio/lio_main.c 	if (skb_shinfo(skb)->nr_frags == 0) {
skb              2403 drivers/net/ethernet/cavium/liquidio/lio_main.c 		cmdsetup.s.u.datasize = skb->len;
skb              2408 drivers/net/ethernet/cavium/liquidio/lio_main.c 				      skb->data,
skb              2409 drivers/net/ethernet/cavium/liquidio/lio_main.c 				      skb->len,
skb              2442 drivers/net/ethernet/cavium/liquidio/lio_main.c 		cmdsetup.s.u.gatherptrs = (skb_shinfo(skb)->nr_frags + 1);
skb              2448 drivers/net/ethernet/cavium/liquidio/lio_main.c 						 skb->data,
skb              2449 drivers/net/ethernet/cavium/liquidio/lio_main.c 						 (skb->len - skb->data_len),
skb              2457 drivers/net/ethernet/cavium/liquidio/lio_main.c 		add_sg_size(&g->sg[0], (skb->len - skb->data_len), 0);
skb              2459 drivers/net/ethernet/cavium/liquidio/lio_main.c 		frags = skb_shinfo(skb)->nr_frags;
skb              2462 drivers/net/ethernet/cavium/liquidio/lio_main.c 			frag = &skb_shinfo(skb)->frags[i - 1];
skb              2473 drivers/net/ethernet/cavium/liquidio/lio_main.c 						 skb->len - skb->data_len,
skb              2476 drivers/net/ethernet/cavium/liquidio/lio_main.c 					frag = &skb_shinfo(skb)->frags[j - 1];
skb              2512 drivers/net/ethernet/cavium/liquidio/lio_main.c 	if (skb_shinfo(skb)->gso_size) {
skb              2513 drivers/net/ethernet/cavium/liquidio/lio_main.c 		tx_info->s.gso_size = skb_shinfo(skb)->gso_size;
skb              2514 drivers/net/ethernet/cavium/liquidio/lio_main.c 		tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs;
skb              2519 drivers/net/ethernet/cavium/liquidio/lio_main.c 	if (skb_vlan_tag_present(skb)) {
skb              2520 drivers/net/ethernet/cavium/liquidio/lio_main.c 		irh->priority = skb_vlan_tag_get(skb) >> 13;
skb              2521 drivers/net/ethernet/cavium/liquidio/lio_main.c 		irh->vlan = skb_vlan_tag_get(skb) & 0xfff;
skb              2558 drivers/net/ethernet/cavium/liquidio/lio_main.c 	tx_buffer_free(skb);
skb               803 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c 	struct sk_buff *skb;
skb               807 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c 	skb = finfo->skb;
skb               810 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c 	dma_unmap_single(&lio->oct_dev->pci_dev->dev, finfo->dptr, skb->len,
skb               813 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c 	tx_buffer_free(skb);
skb               824 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c 	struct sk_buff *skb;
skb               829 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c 	skb = finfo->skb;
skb               832 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c 	frags = skb_shinfo(skb)->nr_frags;
skb               835 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c 			 g->sg[0].ptr[0], (skb->len - skb->data_len),
skb               840 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
skb               848 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c 	iq = skb_iq(lio->oct_dev, skb);
skb               854 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c 	tx_buffer_free(skb);
skb               866 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c 	struct sk_buff *skb;
skb               871 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c 	skb = (struct sk_buff *)sc->callback_arg;
skb               872 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c 	finfo = (struct octnet_buf_free_info *)&skb->cb;
skb               876 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c 	frags = skb_shinfo(skb)->nr_frags;
skb               879 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c 			 g->sg[0].ptr[0], (skb->len - skb->data_len),
skb               884 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
skb               892 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c 	iq = skb_iq(lio->oct_dev, skb);
skb              1309 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c 	struct sk_buff *skb = (struct sk_buff *)buf;
skb              1315 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c 	finfo = (struct octnet_buf_free_info *)skb->cb;
skb              1329 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c 	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
skb              1335 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c 			   skb, (unsigned long long)ns);
skb              1337 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c 		skb_tstamp_tx(skb, &ts);
skb              1341 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c 	tx_buffer_free(skb);
skb              1377 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c 	sc->callback_arg = finfo->skb;
skb              1404 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
skb              1424 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c 	q_idx = skb_iq(lio->oct_dev, skb);
skb              1434 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c 	    (!lio->linfo.link.s.link_up) || (skb->len <= 0)) {
skb              1443 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c 	finfo = (struct octnet_buf_free_info *)skb->cb;
skb              1445 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c 	finfo->skb = skb;
skb              1463 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c 	ndata.datasize = skb->len;
skb              1468 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb              1469 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c 		if (skb->encapsulation) {
skb              1476 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c 	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
skb              1477 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
skb              1481 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c 	if (!skb_shinfo(skb)->nr_frags) {
skb              1482 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c 		cmdsetup.s.u.datasize = skb->len;
skb              1486 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c 				      skb->data,
skb              1487 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c 				      skb->len,
skb              1516 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c 		cmdsetup.s.u.gatherptrs = (skb_shinfo(skb)->nr_frags + 1);
skb              1522 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c 						 skb->data,
skb              1523 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c 						 (skb->len - skb->data_len),
skb              1530 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c 		add_sg_size(&g->sg[0], (skb->len - skb->data_len), 0);
skb              1532 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c 		frags = skb_shinfo(skb)->nr_frags;
skb              1535 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c 			frag = &skb_shinfo(skb)->frags[i - 1];
skb              1545 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c 						 skb->len - skb->data_len,
skb              1548 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c 					frag = &skb_shinfo(skb)->frags[j - 1];
skb              1576 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c 	if (skb_shinfo(skb)->gso_size) {
skb              1577 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c 		tx_info->s.gso_size = skb_shinfo(skb)->gso_size;
skb              1578 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c 		tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs;
skb              1582 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c 	if (skb_vlan_tag_present(skb)) {
skb              1583 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c 		irh->priority = skb_vlan_tag_get(skb) >> VLAN_PRIO_SHIFT;
skb              1584 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c 		irh->vlan = skb_vlan_tag_get(skb) & VLAN_VID_MASK;
skb              1624 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c 	tx_buffer_free(skb);
skb                32 drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c static netdev_tx_t lio_vf_rep_pkt_xmit(struct sk_buff *skb,
skb               262 drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c 		       struct sk_buff *skb,
skb               269 drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c 		pg_info = ((struct octeon_skb_page_info *)(skb->cb));
skb               273 drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c 			memcpy(skb->data, va, MIN_SKB_SIZE);
skb               274 drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c 			skb_put(skb, MIN_SKB_SIZE);
skb               277 drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c 		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
skb               284 drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c 			((struct octeon_skb_page_info *)(skb->cb));
skb               286 drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c 		skb_copy_to_linear_data(skb, page_address(pg_info->page) +
skb               288 drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c 		skb_put(skb, len);
skb               301 drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c 	struct sk_buff *skb;
skb               308 drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c 	skb = recv_pkt->buffer_ptr[0];
skb               321 drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c 	skb->dev = vf_ndev;
skb               326 drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c 	lio_vf_rep_copy_packet(oct, skb, recv_pkt->buffer_size[0]);
skb               328 drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c 	skb_pull(skb, rh->r_dh.len * BYTES_PER_DHLEN_UNIT);
skb               329 drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c 	skb->protocol = eth_type_trans(skb, skb->dev);
skb               330 drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c 	skb->ip_summed = CHECKSUM_NONE;
skb               332 drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c 	netif_rx(skb);
skb               352 drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c 	struct sk_buff *skb = sc->ctxptr;
skb               353 drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c 	struct net_device *ndev = skb->dev;
skb               358 drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c 	dev_kfree_skb_any(skb);
skb               370 drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c lio_vf_rep_pkt_xmit(struct sk_buff *skb, struct net_device *ndev)
skb               383 drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c 	    skb->len <= 0)
skb               400 drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c 	if (skb_shinfo(skb)->nr_frags != 0) {
skb               407 drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c 				     skb->data, skb->len, DMA_TO_DEVICE);
skb               414 drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c 	sc->virtdptr = skb->data;
skb               415 drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c 	sc->datasize = skb->len;
skb               416 drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c 	sc->ctxptr = skb;
skb               443 drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c 	dev_kfree_skb_any(skb);
skb                53 drivers/net/ethernet/cavium/liquidio/octeon_main.h 	struct sk_buff *skb;
skb               264 drivers/net/ethernet/cavium/liquidio/octeon_network.h 	struct sk_buff *skb;
skb               271 drivers/net/ethernet/cavium/liquidio/octeon_network.h 	skb = dev_alloc_skb(MIN_SKB_SIZE + SKB_ADJ);
skb               272 drivers/net/ethernet/cavium/liquidio/octeon_network.h 	if (unlikely(!skb)) {
skb               278 drivers/net/ethernet/cavium/liquidio/octeon_network.h 	if ((unsigned long)skb->data & SKB_ADJ_MASK) {
skb               279 drivers/net/ethernet/cavium/liquidio/octeon_network.h 		u32 r = SKB_ADJ - ((unsigned long)skb->data & SKB_ADJ_MASK);
skb               281 drivers/net/ethernet/cavium/liquidio/octeon_network.h 		skb_reserve(skb, r);
skb               284 drivers/net/ethernet/cavium/liquidio/octeon_network.h 	skb_pg_info = ((struct octeon_skb_page_info *)(skb->cb));
skb               292 drivers/net/ethernet/cavium/liquidio/octeon_network.h 		dev_kfree_skb_any((struct sk_buff *)skb);
skb               303 drivers/net/ethernet/cavium/liquidio/octeon_network.h 	return (void *)skb;
skb               309 drivers/net/ethernet/cavium/liquidio/octeon_network.h 	struct sk_buff *skb;
skb               312 drivers/net/ethernet/cavium/liquidio/octeon_network.h 	skb = dev_alloc_skb(size + SKB_ADJ);
skb               313 drivers/net/ethernet/cavium/liquidio/octeon_network.h 	if (unlikely(!skb))
skb               316 drivers/net/ethernet/cavium/liquidio/octeon_network.h 	if ((unsigned long)skb->data & SKB_ADJ_MASK) {
skb               317 drivers/net/ethernet/cavium/liquidio/octeon_network.h 		u32 r = SKB_ADJ - ((unsigned long)skb->data & SKB_ADJ_MASK);
skb               319 drivers/net/ethernet/cavium/liquidio/octeon_network.h 		skb_reserve(skb, r);
skb               322 drivers/net/ethernet/cavium/liquidio/octeon_network.h 	skb_pg_info = ((struct octeon_skb_page_info *)(skb->cb));
skb               327 drivers/net/ethernet/cavium/liquidio/octeon_network.h 	return skb;
skb               366 drivers/net/ethernet/cavium/liquidio/octeon_network.h 	struct sk_buff *skb;
skb               368 drivers/net/ethernet/cavium/liquidio/octeon_network.h 	skb = dev_alloc_skb(MIN_SKB_SIZE + SKB_ADJ);
skb               369 drivers/net/ethernet/cavium/liquidio/octeon_network.h 	if (unlikely(!skb)) {
skb               376 drivers/net/ethernet/cavium/liquidio/octeon_network.h 	if ((unsigned long)skb->data & SKB_ADJ_MASK) {
skb               377 drivers/net/ethernet/cavium/liquidio/octeon_network.h 		u32 r = SKB_ADJ - ((unsigned long)skb->data & SKB_ADJ_MASK);
skb               379 drivers/net/ethernet/cavium/liquidio/octeon_network.h 		skb_reserve(skb, r);
skb               382 drivers/net/ethernet/cavium/liquidio/octeon_network.h 	skb_pg_info = ((struct octeon_skb_page_info *)(skb->cb));
skb               387 drivers/net/ethernet/cavium/liquidio/octeon_network.h 	return skb;
skb               393 drivers/net/ethernet/cavium/liquidio/octeon_network.h 	struct sk_buff *skb = (struct sk_buff *)buffer;
skb               400 drivers/net/ethernet/cavium/liquidio/octeon_network.h 	if (skb)
skb               401 drivers/net/ethernet/cavium/liquidio/octeon_network.h 		dev_kfree_skb_any(skb);
skb               406 drivers/net/ethernet/cavium/liquidio/octeon_network.h 	struct sk_buff *skb = (struct sk_buff *)buffer;
skb               409 drivers/net/ethernet/cavium/liquidio/octeon_network.h 	pg_info = ((struct octeon_skb_page_info *)(skb->cb));
skb               438 drivers/net/ethernet/cavium/liquidio/octeon_network.h void *get_rbd(struct sk_buff *skb)
skb               443 drivers/net/ethernet/cavium/liquidio/octeon_network.h 	pg_info = ((struct octeon_skb_page_info *)(skb->cb));
skb               454 drivers/net/ethernet/cavium/liquidio/octeon_network.h 	struct sk_buff *skb = (struct sk_buff *)buf;
skb               457 drivers/net/ethernet/cavium/liquidio/octeon_network.h 	pg_info = ((struct octeon_skb_page_info *)(skb->cb));
skb               602 drivers/net/ethernet/cavium/liquidio/octeon_network.h static inline int skb_iq(struct octeon_device *oct, struct sk_buff *skb)
skb               604 drivers/net/ethernet/cavium/liquidio/octeon_network.h 	return skb->queue_mapping % oct->num_iqs;
skb               219 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		struct sk_buff *skb;
skb               224 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		skb = netdev_alloc_skb(netdev, size);
skb               225 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		if (!skb)
skb               227 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		skb_reserve(skb, NET_IP_ALIGN);
skb               228 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		__skb_queue_tail(&p->rx_list, skb);
skb               232 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		re.s.addr = dma_map_single(p->dev, skb->data,
skb               253 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	struct sk_buff *skb;
skb               275 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		skb = __skb_dequeue(&p->tx_list);
skb               301 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 			skb_tstamp_tx(skb, &ts);
skb               304 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		dev_kfree_skb_any(skb);
skb               392 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	struct sk_buff *skb;
skb               399 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	re.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb);
skb               402 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		skb_put(skb, re.s.len);
skb               407 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 			u64 ns = *(u64 *)skb->data;
skb               409 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 			ts = skb_hwtstamps(skb);
skb               411 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 			__skb_pull(skb, 8);
skb               413 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		skb->protocol = eth_type_trans(skb, netdev);
skb               415 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		netdev->stats.rx_bytes += skb->len;
skb               416 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		netif_receive_skb(skb);
skb               426 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		skb_put(skb, re.s.len);
skb               433 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 			skb_new = skb_copy_expand(skb, 0, skb2->len,
skb               441 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 			dev_kfree_skb_any(skb);
skb               443 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 			skb = skb_new;
skb               448 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		dev_kfree_skb_any(skb);
skb               456 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	dev_kfree_skb_any(skb);
skb              1275 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev)
skb              1283 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	re.s.tstamp = ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) != 0);
skb              1284 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	re.s.len = skb->len;
skb              1285 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	re.s.addr = dma_map_single(p->dev, skb->data,
skb              1286 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 				   skb->len,
skb              1305 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	__skb_queue_tail(&p->tx_list, skb);
skb              1319 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	netdev->stats.tx_bytes += skb->len;
skb               532 drivers/net/ethernet/cavium/thunder/nicvf_main.c 				struct rcv_queue *rq, struct sk_buff **skb)
skb               585 drivers/net/ethernet/cavium/thunder/nicvf_main.c 		*skb = build_skb(xdp.data,
skb               587 drivers/net/ethernet/cavium/thunder/nicvf_main.c 		if (!*skb)
skb               590 drivers/net/ethernet/cavium/thunder/nicvf_main.c 			skb_put(*skb, len);
skb               663 drivers/net/ethernet/cavium/thunder/nicvf_main.c 	struct sk_buff *skb = NULL;
skb               696 drivers/net/ethernet/cavium/thunder/nicvf_main.c 	skb = (struct sk_buff *)sq->skbuff[cqe_tx->sqe_ptr];
skb               697 drivers/net/ethernet/cavium/thunder/nicvf_main.c 	if (skb) {
skb               711 drivers/net/ethernet/cavium/thunder/nicvf_main.c 		prefetch(skb);
skb               713 drivers/net/ethernet/cavium/thunder/nicvf_main.c 		*tx_bytes += skb->len;
skb               715 drivers/net/ethernet/cavium/thunder/nicvf_main.c 		if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS &&
skb               717 drivers/net/ethernet/cavium/thunder/nicvf_main.c 			nic->pnicvf->ptp_skb = skb;
skb               719 drivers/net/ethernet/cavium/thunder/nicvf_main.c 			napi_consume_skb(skb, budget);
skb               732 drivers/net/ethernet/cavium/thunder/nicvf_main.c 				    struct sk_buff *skb)
skb               755 drivers/net/ethernet/cavium/thunder/nicvf_main.c 	skb_set_hash(skb, hash, hash_type);
skb               758 drivers/net/ethernet/cavium/thunder/nicvf_main.c static inline void nicvf_set_rxtstamp(struct nicvf *nic, struct sk_buff *skb)
skb               767 drivers/net/ethernet/cavium/thunder/nicvf_main.c 				    be64_to_cpu(*(__be64 *)skb->data));
skb               768 drivers/net/ethernet/cavium/thunder/nicvf_main.c 	skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
skb               770 drivers/net/ethernet/cavium/thunder/nicvf_main.c 	__skb_pull(skb, 8);
skb               778 drivers/net/ethernet/cavium/thunder/nicvf_main.c 	struct sk_buff *skb = NULL;
skb               802 drivers/net/ethernet/cavium/thunder/nicvf_main.c 		if (nicvf_xdp_rx(snic, nic->xdp_prog, cqe_rx, sq, rq, &skb))
skb               805 drivers/net/ethernet/cavium/thunder/nicvf_main.c 		skb = nicvf_get_rcv_skb(snic, cqe_rx,
skb               809 drivers/net/ethernet/cavium/thunder/nicvf_main.c 	if (!skb)
skb               813 drivers/net/ethernet/cavium/thunder/nicvf_main.c 		netdev_info(nic->netdev, "skb 0x%p, len=%d\n", skb, skb->len);
skb               815 drivers/net/ethernet/cavium/thunder/nicvf_main.c 			       skb->data, skb->len, true);
skb               820 drivers/net/ethernet/cavium/thunder/nicvf_main.c 		dev_kfree_skb_any(skb);
skb               824 drivers/net/ethernet/cavium/thunder/nicvf_main.c 	nicvf_set_rxtstamp(nic, skb);
skb               825 drivers/net/ethernet/cavium/thunder/nicvf_main.c 	nicvf_set_rxhash(netdev, cqe_rx, skb);
skb               827 drivers/net/ethernet/cavium/thunder/nicvf_main.c 	skb_record_rx_queue(skb, rq_idx);
skb               830 drivers/net/ethernet/cavium/thunder/nicvf_main.c 		skb->ip_summed = CHECKSUM_UNNECESSARY;
skb               832 drivers/net/ethernet/cavium/thunder/nicvf_main.c 		skb_checksum_none_assert(skb);
skb               835 drivers/net/ethernet/cavium/thunder/nicvf_main.c 	skb->protocol = eth_type_trans(skb, netdev);
skb               839 drivers/net/ethernet/cavium/thunder/nicvf_main.c 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
skb               843 drivers/net/ethernet/cavium/thunder/nicvf_main.c 		napi_gro_receive(napi, skb);
skb               845 drivers/net/ethernet/cavium/thunder/nicvf_main.c 		netif_receive_skb(skb);
skb              1254 drivers/net/ethernet/cavium/thunder/nicvf_main.c static netdev_tx_t nicvf_xmit(struct sk_buff *skb, struct net_device *netdev)
skb              1257 drivers/net/ethernet/cavium/thunder/nicvf_main.c 	int qid = skb_get_queue_mapping(skb);
skb              1264 drivers/net/ethernet/cavium/thunder/nicvf_main.c 	if (skb->len <= ETH_HLEN) {
skb              1265 drivers/net/ethernet/cavium/thunder/nicvf_main.c 		dev_kfree_skb(skb);
skb              1285 drivers/net/ethernet/cavium/thunder/nicvf_main.c 			dev_kfree_skb(skb);
skb              1293 drivers/net/ethernet/cavium/thunder/nicvf_main.c 	    !nicvf_sq_append_skb(snic, sq, skb, qid)) {
skb               236 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	struct sk_buff *skb;
skb               241 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	skb = build_skb(data, RCV_FRAG_LEN);
skb               242 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	if (!skb) {
skb               247 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	prefetch(skb->data);
skb               248 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	return skb;
skb               573 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	struct sk_buff *skb;
skb               593 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 		skb = (struct sk_buff *)sq->skbuff[sq->head];
skb               594 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 		if (!skb || !sq->xdp_page)
skb               615 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 		if (skb)
skb               616 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 			dev_kfree_skb_any(skb);
skb              1183 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	struct sk_buff *skb;
skb              1195 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 		skb = (struct sk_buff *)sq->skbuff[sq->head];
skb              1196 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 		if (skb)
skb              1197 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 			dev_kfree_skb_any(skb);
skb              1261 drivers/net/ethernet/cavium/thunder/nicvf_queues.c static int nicvf_tso_count_subdescs(struct sk_buff *skb)
skb              1263 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	struct skb_shared_info *sh = skb_shinfo(skb);
skb              1264 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
skb              1265 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	unsigned int data_len = skb->len - sh_len;
skb              1268 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	long f_size = skb_headlen(skb) - sh_len;  /* current fragment size */
skb              1307 drivers/net/ethernet/cavium/thunder/nicvf_queues.c static int nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb)
skb              1311 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	if (skb_shinfo(skb)->gso_size && !nic->hw_tso) {
skb              1312 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 		subdesc_cnt = nicvf_tso_count_subdescs(skb);
skb              1317 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	if (nic->t88 && nic->hw_tso && skb_shinfo(skb)->gso_size)
skb              1320 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	if (skb_shinfo(skb)->nr_frags)
skb              1321 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 		subdesc_cnt += skb_shinfo(skb)->nr_frags;
skb              1331 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 			 int subdesc_cnt, struct sk_buff *skb, int len)
skb              1341 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	ip.hdr = skb_network_header(skb);
skb              1346 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	if (nic->t88 && nic->hw_tso && skb_shinfo(skb)->gso_size) {
skb              1352 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 		sq->skbuff[qentry] = (u64)skb;
skb              1361 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb              1364 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 		hdr->l3_offset = skb_network_offset(skb);
skb              1365 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 		hdr->l4_offset = skb_transport_offset(skb);
skb              1383 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	if (nic->hw_tso && skb_shinfo(skb)->gso_size) {
skb              1385 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 		hdr->tso_start = skb_transport_offset(skb) + tcp_hdrlen(skb);
skb              1386 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 		hdr->tso_max_paysize = skb_shinfo(skb)->gso_size;
skb              1388 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 		hdr->inner_l3_offset = skb_network_offset(skb) - 2;
skb              1393 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
skb              1394 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 		skb_tx_timestamp(skb);
skb              1399 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	if (skb_shinfo(skb)->gso_size)
skb              1407 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
skb              1439 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 					    int tso_sqe, struct sk_buff *skb)
skb              1444 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	sq->skbuff[qentry] = (u64)skb;
skb              1465 drivers/net/ethernet/cavium/thunder/nicvf_queues.c static inline void nicvf_sq_doorbell(struct nicvf *nic, struct sk_buff *skb,
skb              1471 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 				  skb_get_queue_mapping(skb));
skb              1473 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	netdev_tx_sent_queue(txq, skb->len);
skb              1487 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 			       int sq_num, int qentry, struct sk_buff *skb)
skb              1493 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
skb              1495 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	tso_start(skb, &tso);
skb              1496 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	total_len = skb->len - hdr_len;
skb              1503 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 		data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
skb              1509 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 		tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
skb              1530 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 			tso_build_data(skb, &tso, size);
skb              1533 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 					 seg_subdescs - 1, skb, seg_len);
skb              1540 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	sq->skbuff[hdr_qentry] = (u64)skb;
skb              1542 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	nicvf_sq_doorbell(nic, skb, sq_num, desc_cnt);
skb              1550 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 			struct sk_buff *skb, u8 sq_num)
skb              1557 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	subdesc_cnt = nicvf_sq_subdesc_required(nic, skb);
skb              1564 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	if (skb_shinfo(skb)->gso_size && !nic->hw_tso)
skb              1565 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 		return nicvf_sq_append_tso(nic, sq, sq_num, qentry, skb);
skb              1569 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 				 skb, skb->len);
skb              1574 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	size = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len;
skb              1576 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	dma_addr = dma_map_page_attrs(&nic->pdev->dev, virt_to_page(skb->data),
skb              1577 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 				      offset_in_page(skb->data), size,
skb              1587 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	if (!skb_is_nonlinear(skb))
skb              1590 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb              1591 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
skb              1612 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	if (nic->t88 && skb_shinfo(skb)->gso_size) {
skb              1614 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 		nicvf_sq_add_cqe_subdesc(sq, qentry, hdr_sqe, skb);
skb              1617 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	nicvf_sq_doorbell(nic, skb, sq_num, subdesc_cnt);
skb              1667 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	struct sk_buff *skb = NULL;
skb              1691 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 			if (skb)
skb              1692 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 				dev_kfree_skb_any(skb);
skb              1701 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 			skb = nicvf_rb_ptr_to_skb(nic,
skb              1704 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 			if (!skb)
skb              1706 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 			skb_reserve(skb, cqe_rx->align_pad);
skb              1707 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 			skb_put(skb, payload_len);
skb              1713 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
skb              1719 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	return skb;
skb               344 drivers/net/ethernet/cavium/thunder/nicvf_queues.h 			struct sk_buff *skb, u8 sq_num);
skb               163 drivers/net/ethernet/chelsio/cxgb/sge.c 	struct sk_buff *skb;
skb               169 drivers/net/ethernet/chelsio/cxgb/sge.c 	struct sk_buff *skb;
skb               431 drivers/net/ethernet/chelsio/cxgb/sge.c static struct sk_buff *sched_skb(struct sge *sge, struct sk_buff *skb,
skb               438 drivers/net/ethernet/chelsio/cxgb/sge.c 	pr_debug("sched_skb %p\n", skb);
skb               439 drivers/net/ethernet/chelsio/cxgb/sge.c 	if (!skb) {
skb               443 drivers/net/ethernet/chelsio/cxgb/sge.c 		skbq = &s->p[skb->dev->if_port].skbq;
skb               444 drivers/net/ethernet/chelsio/cxgb/sge.c 		__skb_queue_tail(skbq, skb);
skb               446 drivers/net/ethernet/chelsio/cxgb/sge.c 		skb = NULL;
skb               457 drivers/net/ethernet/chelsio/cxgb/sge.c 		skb = skb_peek(skbq);
skb               459 drivers/net/ethernet/chelsio/cxgb/sge.c 		if (!skb)
skb               462 drivers/net/ethernet/chelsio/cxgb/sge.c 		len = skb->len;
skb               466 drivers/net/ethernet/chelsio/cxgb/sge.c 			__skb_unlink(skb, skbq);
skb               469 drivers/net/ethernet/chelsio/cxgb/sge.c 		skb = NULL;
skb               479 drivers/net/ethernet/chelsio/cxgb/sge.c 	if (s->num && !skb) {
skb               487 drivers/net/ethernet/chelsio/cxgb/sge.c 	pr_debug("sched_skb ret %p\n", skb);
skb               489 drivers/net/ethernet/chelsio/cxgb/sge.c 	return skb;
skb               515 drivers/net/ethernet/chelsio/cxgb/sge.c 		dev_kfree_skb(ce->skb);
skb               516 drivers/net/ethernet/chelsio/cxgb/sge.c 		ce->skb = NULL;
skb               633 drivers/net/ethernet/chelsio/cxgb/sge.c 		if (ce->skb) {
skb               634 drivers/net/ethernet/chelsio/cxgb/sge.c 			dev_kfree_skb_any(ce->skb);
skb               832 drivers/net/ethernet/chelsio/cxgb/sge.c 		struct sk_buff *skb;
skb               835 drivers/net/ethernet/chelsio/cxgb/sge.c 		skb = dev_alloc_skb(q->rx_buffer_size);
skb               836 drivers/net/ethernet/chelsio/cxgb/sge.c 		if (!skb)
skb               839 drivers/net/ethernet/chelsio/cxgb/sge.c 		skb_reserve(skb, q->dma_offset);
skb               840 drivers/net/ethernet/chelsio/cxgb/sge.c 		mapping = pci_map_single(pdev, skb->data, dma_len,
skb               842 drivers/net/ethernet/chelsio/cxgb/sge.c 		skb_reserve(skb, sge->rx_pkt_pad);
skb               844 drivers/net/ethernet/chelsio/cxgb/sge.c 		ce->skb = skb;
skb              1044 drivers/net/ethernet/chelsio/cxgb/sge.c 	struct sk_buff *skb;
skb              1047 drivers/net/ethernet/chelsio/cxgb/sge.c 		skb = napi_alloc_skb(&adapter->napi, len);
skb              1048 drivers/net/ethernet/chelsio/cxgb/sge.c 		if (!skb)
skb              1051 drivers/net/ethernet/chelsio/cxgb/sge.c 		skb_put(skb, len);
skb              1056 drivers/net/ethernet/chelsio/cxgb/sge.c 		skb_copy_from_linear_data(ce->skb, skb->data, len);
skb              1062 drivers/net/ethernet/chelsio/cxgb/sge.c 		return skb;
skb              1073 drivers/net/ethernet/chelsio/cxgb/sge.c 	skb = ce->skb;
skb              1074 drivers/net/ethernet/chelsio/cxgb/sge.c 	prefetch(skb->data);
skb              1076 drivers/net/ethernet/chelsio/cxgb/sge.c 	skb_put(skb, len);
skb              1077 drivers/net/ethernet/chelsio/cxgb/sge.c 	return skb;
skb              1092 drivers/net/ethernet/chelsio/cxgb/sge.c 	struct sk_buff *skb = ce->skb;
skb              1097 drivers/net/ethernet/chelsio/cxgb/sge.c 	       adapter->name, *skb->data);
skb              1111 drivers/net/ethernet/chelsio/cxgb/sge.c static inline unsigned int compute_large_page_tx_descs(struct sk_buff *skb)
skb              1116 drivers/net/ethernet/chelsio/cxgb/sge.c 		unsigned int nfrags = skb_shinfo(skb)->nr_frags;
skb              1117 drivers/net/ethernet/chelsio/cxgb/sge.c 		unsigned int i, len = skb_headlen(skb);
skb              1123 drivers/net/ethernet/chelsio/cxgb/sge.c 			const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
skb              1175 drivers/net/ethernet/chelsio/cxgb/sge.c 			ce1->skb = NULL;
skb              1199 drivers/net/ethernet/chelsio/cxgb/sge.c static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb,
skb              1207 drivers/net/ethernet/chelsio/cxgb/sge.c 	    nfrags = skb_shinfo(skb)->nr_frags;
skb              1212 drivers/net/ethernet/chelsio/cxgb/sge.c 	mapping = pci_map_single(adapter->pdev, skb->data,
skb              1213 drivers/net/ethernet/chelsio/cxgb/sge.c 				 skb_headlen(skb), PCI_DMA_TODEVICE);
skb              1216 drivers/net/ethernet/chelsio/cxgb/sge.c 	desc_len = skb_headlen(skb);
skb              1226 drivers/net/ethernet/chelsio/cxgb/sge.c 	ce->skb = NULL;
skb              1250 drivers/net/ethernet/chelsio/cxgb/sge.c 	ce->skb = NULL;
skb              1252 drivers/net/ethernet/chelsio/cxgb/sge.c 	dma_unmap_len_set(ce, dma_len, skb_headlen(skb));
skb              1255 drivers/net/ethernet/chelsio/cxgb/sge.c 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
skb              1276 drivers/net/ethernet/chelsio/cxgb/sge.c 		ce->skb = NULL;
skb              1280 drivers/net/ethernet/chelsio/cxgb/sge.c 	ce->skb = skb;
skb              1309 drivers/net/ethernet/chelsio/cxgb/sge.c 	struct sk_buff *skb;
skb              1317 drivers/net/ethernet/chelsio/cxgb/sge.c 	while ((skb = sched_skb(sge, NULL, credits)) != NULL) {
skb              1319 drivers/net/ethernet/chelsio/cxgb/sge.c 	        count = 1 + skb_shinfo(skb)->nr_frags;
skb              1320 drivers/net/ethernet/chelsio/cxgb/sge.c 		count += compute_large_page_tx_descs(skb);
skb              1329 drivers/net/ethernet/chelsio/cxgb/sge.c 		write_tx_descs(adapter, skb, pidx, genbit, q);
skb              1354 drivers/net/ethernet/chelsio/cxgb/sge.c 	struct sk_buff *skb;
skb              1360 drivers/net/ethernet/chelsio/cxgb/sge.c 	skb = get_packet(adapter, fl, len - sge->rx_pkt_pad);
skb              1361 drivers/net/ethernet/chelsio/cxgb/sge.c 	if (unlikely(!skb)) {
skb              1366 drivers/net/ethernet/chelsio/cxgb/sge.c 	p = (const struct cpl_rx_pkt *) skb->data;
skb              1368 drivers/net/ethernet/chelsio/cxgb/sge.c 		kfree_skb(skb);
skb              1371 drivers/net/ethernet/chelsio/cxgb/sge.c 	__skb_pull(skb, sizeof(*p));
skb              1376 drivers/net/ethernet/chelsio/cxgb/sge.c 	skb->protocol = eth_type_trans(skb, dev);
skb              1378 drivers/net/ethernet/chelsio/cxgb/sge.c 	    skb->protocol == htons(ETH_P_IP) &&
skb              1379 drivers/net/ethernet/chelsio/cxgb/sge.c 	    (skb->data[9] == IPPROTO_TCP || skb->data[9] == IPPROTO_UDP)) {
skb              1381 drivers/net/ethernet/chelsio/cxgb/sge.c 		skb->ip_summed = CHECKSUM_UNNECESSARY;
skb              1383 drivers/net/ethernet/chelsio/cxgb/sge.c 		skb_checksum_none_assert(skb);
skb              1387 drivers/net/ethernet/chelsio/cxgb/sge.c 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(p->vlan));
skb              1389 drivers/net/ethernet/chelsio/cxgb/sge.c 	netif_receive_skb(skb);
skb              1512 drivers/net/ethernet/chelsio/cxgb/sge.c 			prefetch(fl->centries[fl->cidx].skb);
skb              1565 drivers/net/ethernet/chelsio/cxgb/sge.c 	prefetch(fl->centries[fl->cidx].skb);
skb              1659 drivers/net/ethernet/chelsio/cxgb/sge.c static int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter,
skb              1672 drivers/net/ethernet/chelsio/cxgb/sge.c 	count = 1 + skb_shinfo(skb)->nr_frags;
skb              1673 drivers/net/ethernet/chelsio/cxgb/sge.c 	count += compute_large_page_tx_descs(skb);
skb              1697 drivers/net/ethernet/chelsio/cxgb/sge.c 	if (sge->tx_sched && !qid && skb->dev) {
skb              1703 drivers/net/ethernet/chelsio/cxgb/sge.c 		skb = sched_skb(sge, skb, credits);
skb              1704 drivers/net/ethernet/chelsio/cxgb/sge.c 		if (!skb) {
skb              1709 drivers/net/ethernet/chelsio/cxgb/sge.c 		count = 1 + skb_shinfo(skb)->nr_frags;
skb              1710 drivers/net/ethernet/chelsio/cxgb/sge.c 		count += compute_large_page_tx_descs(skb);
skb              1723 drivers/net/ethernet/chelsio/cxgb/sge.c 	write_tx_descs(adapter, skb, pidx, genbit, q);
skb              1745 drivers/net/ethernet/chelsio/cxgb/sge.c 			skb = NULL;
skb              1770 drivers/net/ethernet/chelsio/cxgb/sge.c netdev_tx_t t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb              1776 drivers/net/ethernet/chelsio/cxgb/sge.c 	struct sk_buff *orig_skb = skb;
skb              1779 drivers/net/ethernet/chelsio/cxgb/sge.c 	if (skb->protocol == htons(ETH_P_CPL5))
skb              1786 drivers/net/ethernet/chelsio/cxgb/sge.c 	if (unlikely(skb_headroom(skb) < dev->hard_header_len - ETH_HLEN)) {
skb              1787 drivers/net/ethernet/chelsio/cxgb/sge.c 		skb = skb_realloc_headroom(skb, sizeof(struct cpl_tx_pkt_lso));
skb              1790 drivers/net/ethernet/chelsio/cxgb/sge.c 		if (!skb)
skb              1794 drivers/net/ethernet/chelsio/cxgb/sge.c 	if (skb_shinfo(skb)->gso_size) {
skb              1800 drivers/net/ethernet/chelsio/cxgb/sge.c 		eth_type = skb_network_offset(skb) == ETH_HLEN ?
skb              1803 drivers/net/ethernet/chelsio/cxgb/sge.c 		hdr = skb_push(skb, sizeof(*hdr));
skb              1806 drivers/net/ethernet/chelsio/cxgb/sge.c 		hdr->ip_hdr_words = ip_hdr(skb)->ihl;
skb              1807 drivers/net/ethernet/chelsio/cxgb/sge.c 		hdr->tcp_hdr_words = tcp_hdr(skb)->doff;
skb              1809 drivers/net/ethernet/chelsio/cxgb/sge.c 							  skb_shinfo(skb)->gso_size));
skb              1810 drivers/net/ethernet/chelsio/cxgb/sge.c 		hdr->len = htonl(skb->len - sizeof(*hdr));
skb              1819 drivers/net/ethernet/chelsio/cxgb/sge.c 		if (unlikely(skb->len < ETH_HLEN ||
skb              1820 drivers/net/ethernet/chelsio/cxgb/sge.c 			     skb->len > dev->mtu + eth_hdr_len(skb->data))) {
skb              1822 drivers/net/ethernet/chelsio/cxgb/sge.c 				   skb->len, eth_hdr_len(skb->data), dev->mtu);
skb              1823 drivers/net/ethernet/chelsio/cxgb/sge.c 			dev_kfree_skb_any(skb);
skb              1827 drivers/net/ethernet/chelsio/cxgb/sge.c 		if (skb->ip_summed == CHECKSUM_PARTIAL &&
skb              1828 drivers/net/ethernet/chelsio/cxgb/sge.c 		    ip_hdr(skb)->protocol == IPPROTO_UDP) {
skb              1829 drivers/net/ethernet/chelsio/cxgb/sge.c 			if (unlikely(skb_checksum_help(skb))) {
skb              1831 drivers/net/ethernet/chelsio/cxgb/sge.c 				dev_kfree_skb_any(skb);
skb              1840 drivers/net/ethernet/chelsio/cxgb/sge.c 			if (skb->protocol == htons(ETH_P_ARP) &&
skb              1841 drivers/net/ethernet/chelsio/cxgb/sge.c 			    arp_hdr(skb)->ar_op == htons(ARPOP_REQUEST)) {
skb              1842 drivers/net/ethernet/chelsio/cxgb/sge.c 				adapter->sge->espibug_skb[dev->if_port] = skb;
skb              1847 drivers/net/ethernet/chelsio/cxgb/sge.c 				skb = skb_get(skb);
skb              1851 drivers/net/ethernet/chelsio/cxgb/sge.c 		cpl = __skb_push(skb, sizeof(*cpl));
skb              1854 drivers/net/ethernet/chelsio/cxgb/sge.c 		cpl->l4_csum_dis = skb->ip_summed == CHECKSUM_PARTIAL ? 0 : 1;
skb              1857 drivers/net/ethernet/chelsio/cxgb/sge.c 		st->tx_cso += (skb->ip_summed == CHECKSUM_PARTIAL);
skb              1861 drivers/net/ethernet/chelsio/cxgb/sge.c 	if (skb_vlan_tag_present(skb)) {
skb              1863 drivers/net/ethernet/chelsio/cxgb/sge.c 		cpl->vlan = htons(skb_vlan_tag_get(skb));
skb              1869 drivers/net/ethernet/chelsio/cxgb/sge.c 	ret = t1_sge_tx(skb, adapter, 0, dev);
skb              1874 drivers/net/ethernet/chelsio/cxgb/sge.c 	if (unlikely(ret != NETDEV_TX_OK && skb != orig_skb)) {
skb              1875 drivers/net/ethernet/chelsio/cxgb/sge.c 		dev_kfree_skb_any(skb);
skb              1994 drivers/net/ethernet/chelsio/cxgb/sge.c 			struct sk_buff *skb = sge->espibug_skb[i];
skb              1998 drivers/net/ethernet/chelsio/cxgb/sge.c 			    !seop[i] || ((seop[i] & 0xfff) != 0) || !skb)
skb              2001 drivers/net/ethernet/chelsio/cxgb/sge.c 			if (!skb->cb[0]) {
skb              2002 drivers/net/ethernet/chelsio/cxgb/sge.c 				skb_copy_to_linear_data_offset(skb,
skb              2006 drivers/net/ethernet/chelsio/cxgb/sge.c 				skb_copy_to_linear_data_offset(skb,
skb              2007 drivers/net/ethernet/chelsio/cxgb/sge.c 							       skb->len - 10,
skb              2010 drivers/net/ethernet/chelsio/cxgb/sge.c 				skb->cb[0] = 0xff;
skb              2016 drivers/net/ethernet/chelsio/cxgb/sge.c 			skb = skb_get(skb);
skb              2017 drivers/net/ethernet/chelsio/cxgb/sge.c 			t1_sge_tx(skb, adapter, 0, adapter->port[i].dev);
skb              2029 drivers/net/ethernet/chelsio/cxgb/sge.c 	        struct sk_buff *skb = sge->espibug_skb[0];
skb              2032 drivers/net/ethernet/chelsio/cxgb/sge.c 	        if ((seop & 0xfff0fff) == 0xfff && skb) {
skb              2033 drivers/net/ethernet/chelsio/cxgb/sge.c 	                if (!skb->cb[0]) {
skb              2034 drivers/net/ethernet/chelsio/cxgb/sge.c 	                        skb_copy_to_linear_data_offset(skb,
skb              2038 drivers/net/ethernet/chelsio/cxgb/sge.c 	                        skb_copy_to_linear_data_offset(skb,
skb              2039 drivers/net/ethernet/chelsio/cxgb/sge.c 							       skb->len - 10,
skb              2042 drivers/net/ethernet/chelsio/cxgb/sge.c 	                        skb->cb[0] = 0xff;
skb              2048 drivers/net/ethernet/chelsio/cxgb/sge.c 	                skb = skb_get(skb);
skb              2049 drivers/net/ethernet/chelsio/cxgb/sge.c 	                t1_sge_tx(skb, adapter, 0, adapter->port[0].dev);
skb                80 drivers/net/ethernet/chelsio/cxgb/sge.h netdev_tx_t t1_start_xmit(struct sk_buff *skb, struct net_device *dev);
skb                62 drivers/net/ethernet/chelsio/cxgb3/adapter.h 	int (*send)(struct port_info *pi, struct sk_buff **skb);
skb                63 drivers/net/ethernet/chelsio/cxgb3/adapter.h 	int (*recv)(struct port_info *pi, struct sk_buff *skb);
skb               306 drivers/net/ethernet/chelsio/cxgb3/adapter.h int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb);
skb               322 drivers/net/ethernet/chelsio/cxgb3/adapter.h netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev);
skb               323 drivers/net/ethernet/chelsio/cxgb3/adapter.h int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb);
skb               459 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	struct sk_buff *skb;
skb               468 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		skb = alloc_skb(sizeof(*req), GFP_KERNEL);
skb               469 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		if (!skb)
skb               470 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 			skb = adap->nofail_skb;
skb               471 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		if (!skb)
skb               474 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		req = __skb_put_zero(skb, sizeof(*req));
skb               479 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		t3_mgmt_tx(adap, skb);
skb               480 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		if (skb == adap->nofail_skb) {
skb               491 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		skb = alloc_skb(sizeof(*req), GFP_KERNEL);
skb               492 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		if (!skb)
skb               493 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 			skb = adap->nofail_skb;
skb               494 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		if (!skb)
skb               497 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		req = __skb_put_zero(skb, sizeof(*req));
skb               501 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		t3_mgmt_tx(adap, skb);
skb               502 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		if (skb == adap->nofail_skb) {
skb               513 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		skb = alloc_skb(sizeof(*req), GFP_KERNEL);
skb               514 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		if (!skb)
skb               515 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 			skb = adap->nofail_skb;
skb               516 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		if (!skb)
skb               519 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		req = __skb_put_zero(skb, sizeof(*req));
skb               523 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		t3_mgmt_tx(adap, skb);
skb               524 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		if (skb == adap->nofail_skb) {
skb               532 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
skb               533 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	if (!skb)
skb               534 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		skb = adap->nofail_skb;
skb               535 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	if (!skb)
skb               538 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	greq = __skb_put_zero(skb, sizeof(*greq));
skb               542 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	t3_mgmt_tx(adap, skb);
skb               545 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	if (skb == adap->nofail_skb) {
skb               893 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
skb               898 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	ret = t3_offload_tx(tdev, skb);
skb               907 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
skb               909 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	if (!skb)
skb               912 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	req = __skb_put(skb, sizeof(*req));
skb               919 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	skb->priority = 1;
skb               920 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	offload_tx(&adapter->tdev, skb);
skb               945 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	struct sk_buff *skb;
skb               949 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	skb = alloc_skb(sizeof(*req), GFP_KERNEL);
skb               950 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	if (!skb)
skb               951 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		skb = adap->nofail_skb;
skb               952 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	if (!skb)
skb               955 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	req = skb_put(skb, sizeof(*req));
skb               963 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	ret = t3_mgmt_tx(adap, skb);
skb               964 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	if (skb == adap->nofail_skb) {
skb               550 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c static inline void mk_tid_release(struct sk_buff *skb, unsigned int tid)
skb               554 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	skb->priority = CPL_PRIORITY_SETUP;
skb               555 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	req = __skb_put(skb, sizeof(*req));
skb               564 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	struct sk_buff *skb;
skb               575 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 		skb = alloc_skb(sizeof(struct cpl_tid_release),
skb               577 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 		if (!skb)
skb               578 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 			skb = td->nofail_skb;
skb               579 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 		if (!skb) {
skb               585 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 		mk_tid_release(skb, p - td->tid_maps.tid_tab);
skb               586 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 		cxgb3_ofld_send(tdev, skb);
skb               588 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 		if (skb == td->nofail_skb)
skb               635 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 		struct sk_buff *skb;
skb               637 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 		skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
skb               638 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 		if (likely(skb)) {
skb               639 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 			mk_tid_release(skb, tid);
skb               640 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 			cxgb3_ofld_send(tdev, skb);
skb               706 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c static int do_smt_write_rpl(struct t3cdev *dev, struct sk_buff *skb)
skb               708 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	struct cpl_smt_write_rpl *rpl = cplhdr(skb);
skb               717 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c static int do_l2t_write_rpl(struct t3cdev *dev, struct sk_buff *skb)
skb               719 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	struct cpl_l2t_write_rpl *rpl = cplhdr(skb);
skb               728 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c static int do_rte_write_rpl(struct t3cdev *dev, struct sk_buff *skb)
skb               730 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	struct cpl_rte_write_rpl *rpl = cplhdr(skb);
skb               739 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c static int do_act_open_rpl(struct t3cdev *dev, struct sk_buff *skb)
skb               741 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	struct cpl_act_open_rpl *rpl = cplhdr(skb);
skb               749 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 		return t3c_tid->client->handlers[CPL_ACT_OPEN_RPL] (dev, skb,
skb               759 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c static int do_stid_rpl(struct t3cdev *dev, struct sk_buff *skb)
skb               761 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	union opcode_tid *p = cplhdr(skb);
skb               768 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 		return t3c_tid->client->handlers[p->opcode] (dev, skb,
skb               777 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c static int do_hwtid_rpl(struct t3cdev *dev, struct sk_buff *skb)
skb               779 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	union opcode_tid *p = cplhdr(skb);
skb               787 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 		    (dev, skb, t3c_tid->ctx);
skb               795 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c static int do_cr(struct t3cdev *dev, struct sk_buff *skb)
skb               797 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	struct cpl_pass_accept_req *req = cplhdr(skb);
skb               814 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 		    (dev, skb, t3c_tid->ctx);
skb               829 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c static struct sk_buff *cxgb3_get_cpl_reply_skb(struct sk_buff *skb, size_t len,
skb               832 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	if (likely(!skb_cloned(skb))) {
skb               833 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 		BUG_ON(skb->len < len);
skb               834 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 		__skb_trim(skb, len);
skb               835 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 		skb_get(skb);
skb               837 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 		skb = alloc_skb(len, gfp);
skb               838 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 		if (skb)
skb               839 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 			__skb_put(skb, len);
skb               841 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	return skb;
skb               844 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c static int do_abort_req_rss(struct t3cdev *dev, struct sk_buff *skb)
skb               846 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	union opcode_tid *p = cplhdr(skb);
skb               854 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 		    (dev, skb, t3c_tid->ctx);
skb               856 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 		struct cpl_abort_req_rss *req = cplhdr(skb);
skb               866 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 		reply_skb = cxgb3_get_cpl_reply_skb(skb,
skb               889 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c static int do_act_establish(struct t3cdev *dev, struct sk_buff *skb)
skb               891 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	struct cpl_act_establish *req = cplhdr(skb);
skb               908 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 		    (dev, skb, t3c_tid->ctx);
skb               916 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c static int do_trace(struct t3cdev *dev, struct sk_buff *skb)
skb               918 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	struct cpl_trace_pkt *p = cplhdr(skb);
skb               920 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	skb->protocol = htons(0xffff);
skb               921 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	skb->dev = dev->lldev;
skb               922 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	skb_pull(skb, sizeof(*p));
skb               923 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	skb_reset_mac_header(skb);
skb               924 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	netif_receive_skb(skb);
skb               934 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c static inline u32 get_hwtid(struct sk_buff *skb)
skb               936 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	return ntohl((__force __be32)skb->priority) >> 8 & 0xfffff;
skb               939 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c static inline u32 get_opcode(struct sk_buff *skb)
skb               941 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	return G_OPCODE(ntohl((__force __be32)skb->csum));
skb               944 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c static int do_term(struct t3cdev *dev, struct sk_buff *skb)
skb               946 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	unsigned int hwtid = get_hwtid(skb);
skb               947 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	unsigned int opcode = get_opcode(skb);
skb               953 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 		return t3c_tid->client->handlers[opcode] (dev, skb,
skb               990 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c static int do_bad_cpl(struct t3cdev *dev, struct sk_buff *skb)
skb               992 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	pr_err("%s: received bad CPL command 0x%x\n", dev->name, *skb->data);
skb              1022 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 		struct sk_buff *skb = *skbs++;
skb              1023 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 		unsigned int opcode = get_opcode(skb);
skb              1024 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 		int ret = cpl_handlers[opcode] (dev, skb);
skb              1028 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 			union opcode_tid *p = cplhdr(skb);
skb              1035 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 			kfree_skb(skb);
skb              1043 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c int cxgb3_ofld_send(struct t3cdev *dev, struct sk_buff *skb)
skb              1048 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	r = dev->send(dev, skb);
skb              1090 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	struct sk_buff *skb;
skb              1093 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
skb              1094 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	if (!skb) {
skb              1098 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	skb->priority = CPL_PRIORITY_CONTROL;
skb              1099 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	req = skb_put(skb, sizeof(*req));
skb              1107 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	tdev->send(tdev, skb);
skb                70 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.h 				      struct sk_buff *skb, void *ctx);
skb               129 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.h typedef int (*cpl_handler_func)(struct t3cdev *dev, struct sk_buff *skb);
skb               135 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.h static inline void *cplhdr(struct sk_buff *skb)
skb               137 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.h 	return skb->data;
skb                87 drivers/net/ethernet/chelsio/cxgb3/l2t.c static int setup_l2e_send_pending(struct t3cdev *dev, struct sk_buff *skb,
skb                93 drivers/net/ethernet/chelsio/cxgb3/l2t.c 	if (!skb) {
skb                94 drivers/net/ethernet/chelsio/cxgb3/l2t.c 		skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
skb                95 drivers/net/ethernet/chelsio/cxgb3/l2t.c 		if (!skb)
skb                99 drivers/net/ethernet/chelsio/cxgb3/l2t.c 	req = __skb_put(skb, sizeof(*req));
skb               107 drivers/net/ethernet/chelsio/cxgb3/l2t.c 	skb->priority = CPL_PRIORITY_CONTROL;
skb               108 drivers/net/ethernet/chelsio/cxgb3/l2t.c 	cxgb3_ofld_send(dev, skb);
skb               110 drivers/net/ethernet/chelsio/cxgb3/l2t.c 	skb_queue_walk_safe(&e->arpq, skb, tmp) {
skb               111 drivers/net/ethernet/chelsio/cxgb3/l2t.c 		__skb_unlink(skb, &e->arpq);
skb               112 drivers/net/ethernet/chelsio/cxgb3/l2t.c 		cxgb3_ofld_send(dev, skb);
skb               123 drivers/net/ethernet/chelsio/cxgb3/l2t.c static inline void arpq_enqueue(struct l2t_entry *e, struct sk_buff *skb)
skb               125 drivers/net/ethernet/chelsio/cxgb3/l2t.c 	__skb_queue_tail(&e->arpq, skb);
skb               128 drivers/net/ethernet/chelsio/cxgb3/l2t.c int t3_l2t_send_slow(struct t3cdev *dev, struct sk_buff *skb,
skb               141 drivers/net/ethernet/chelsio/cxgb3/l2t.c 		return cxgb3_ofld_send(dev, skb);
skb               149 drivers/net/ethernet/chelsio/cxgb3/l2t.c 		arpq_enqueue(e, skb);
skb               161 drivers/net/ethernet/chelsio/cxgb3/l2t.c 			skb = alloc_skb(sizeof(struct cpl_l2t_write_req),
skb               163 drivers/net/ethernet/chelsio/cxgb3/l2t.c 			if (!skb)
skb               168 drivers/net/ethernet/chelsio/cxgb3/l2t.c 				setup_l2e_send_pending(dev, skb, e);
skb               170 drivers/net/ethernet/chelsio/cxgb3/l2t.c 				__kfree_skb(skb);
skb               382 drivers/net/ethernet/chelsio/cxgb3/l2t.c 	struct sk_buff *skb, *tmp;
skb               384 drivers/net/ethernet/chelsio/cxgb3/l2t.c 	skb_queue_walk_safe(arpq, skb, tmp) {
skb               385 drivers/net/ethernet/chelsio/cxgb3/l2t.c 		struct l2t_skb_cb *cb = L2T_SKB_CB(skb);
skb               387 drivers/net/ethernet/chelsio/cxgb3/l2t.c 		__skb_unlink(skb, arpq);
skb               389 drivers/net/ethernet/chelsio/cxgb3/l2t.c 			cb->arp_failure_handler(dev, skb);
skb               391 drivers/net/ethernet/chelsio/cxgb3/l2t.c 			cxgb3_ofld_send(dev, skb);
skb                83 drivers/net/ethernet/chelsio/cxgb3/l2t.h 					 struct sk_buff * skb);
skb                92 drivers/net/ethernet/chelsio/cxgb3/l2t.h #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
skb                94 drivers/net/ethernet/chelsio/cxgb3/l2t.h static inline void set_arp_failure_handler(struct sk_buff *skb,
skb                97 drivers/net/ethernet/chelsio/cxgb3/l2t.h 	L2T_SKB_CB(skb)->arp_failure_handler = hnd;
skb               114 drivers/net/ethernet/chelsio/cxgb3/l2t.h int t3_l2t_send_slow(struct t3cdev *dev, struct sk_buff *skb,
skb               119 drivers/net/ethernet/chelsio/cxgb3/l2t.h int cxgb3_ofld_send(struct t3cdev *dev, struct sk_buff *skb);
skb               121 drivers/net/ethernet/chelsio/cxgb3/l2t.h static inline int l2t_send(struct t3cdev *dev, struct sk_buff *skb,
skb               125 drivers/net/ethernet/chelsio/cxgb3/l2t.h 		return cxgb3_ofld_send(dev, skb);
skb               126 drivers/net/ethernet/chelsio/cxgb3/l2t.h 	return t3_l2t_send_slow(dev, skb, e);
skb               110 drivers/net/ethernet/chelsio/cxgb3/sge.c 	struct sk_buff *skb;
skb               119 drivers/net/ethernet/chelsio/cxgb3/sge.c 		struct sk_buff *skb;
skb               236 drivers/net/ethernet/chelsio/cxgb3/sge.c static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q,
skb               246 drivers/net/ethernet/chelsio/cxgb3/sge.c 	if (frag_idx == 0 && skb_headlen(skb)) {
skb               248 drivers/net/ethernet/chelsio/cxgb3/sge.c 				 skb_headlen(skb), PCI_DMA_TODEVICE);
skb               253 drivers/net/ethernet/chelsio/cxgb3/sge.c 	nfrags = skb_shinfo(skb)->nr_frags;
skb               257 drivers/net/ethernet/chelsio/cxgb3/sge.c 			       skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]),
skb               297 drivers/net/ethernet/chelsio/cxgb3/sge.c 		if (d->skb) {	/* an SGL is present */
skb               299 drivers/net/ethernet/chelsio/cxgb3/sge.c 				unmap_skb(d->skb, q, cidx, pdev);
skb               301 drivers/net/ethernet/chelsio/cxgb3/sge.c 				dev_consume_skb_any(d->skb);
skb               302 drivers/net/ethernet/chelsio/cxgb3/sge.c 				d->skb = NULL;
skb               367 drivers/net/ethernet/chelsio/cxgb3/sge.c 		kfree_skb(d->skb);
skb               368 drivers/net/ethernet/chelsio/cxgb3/sge.c 		d->skb = NULL;
skb               531 drivers/net/ethernet/chelsio/cxgb3/sge.c 			struct sk_buff *skb = alloc_skb(q->buf_size, gfp);
skb               532 drivers/net/ethernet/chelsio/cxgb3/sge.c 			if (!skb)
skb               535 drivers/net/ethernet/chelsio/cxgb3/sge.c 			sd->skb = skb;
skb               536 drivers/net/ethernet/chelsio/cxgb3/sge.c 			buf_start = skb->data;
skb               786 drivers/net/ethernet/chelsio/cxgb3/sge.c 	struct sk_buff *skb = NULL;
skb               789 drivers/net/ethernet/chelsio/cxgb3/sge.c 	prefetch(sd->skb->data);
skb               793 drivers/net/ethernet/chelsio/cxgb3/sge.c 		skb = alloc_skb(len, GFP_ATOMIC);
skb               794 drivers/net/ethernet/chelsio/cxgb3/sge.c 		if (likely(skb != NULL)) {
skb               795 drivers/net/ethernet/chelsio/cxgb3/sge.c 			__skb_put(skb, len);
skb               799 drivers/net/ethernet/chelsio/cxgb3/sge.c 			memcpy(skb->data, sd->skb->data, len);
skb               807 drivers/net/ethernet/chelsio/cxgb3/sge.c 		return skb;
skb               818 drivers/net/ethernet/chelsio/cxgb3/sge.c 	skb = sd->skb;
skb               819 drivers/net/ethernet/chelsio/cxgb3/sge.c 	skb_put(skb, len);
skb               821 drivers/net/ethernet/chelsio/cxgb3/sge.c 	return skb;
skb               846 drivers/net/ethernet/chelsio/cxgb3/sge.c 	struct sk_buff *newskb, *skb;
skb               851 drivers/net/ethernet/chelsio/cxgb3/sge.c 	newskb = skb = q->pg_skb;
skb               852 drivers/net/ethernet/chelsio/cxgb3/sge.c 	if (!skb && (len <= SGE_RX_COPY_THRES)) {
skb               871 drivers/net/ethernet/chelsio/cxgb3/sge.c 	if (unlikely(q->rx_recycle_buf || (!skb && fl->credits <= drop_thres)))
skb               876 drivers/net/ethernet/chelsio/cxgb3/sge.c 	if (!skb)
skb               893 drivers/net/ethernet/chelsio/cxgb3/sge.c 	if (!skb) {
skb               927 drivers/net/ethernet/chelsio/cxgb3/sge.c 	struct sk_buff *skb = alloc_skb(IMMED_PKT_SIZE, GFP_ATOMIC);
skb               929 drivers/net/ethernet/chelsio/cxgb3/sge.c 	if (skb) {
skb               930 drivers/net/ethernet/chelsio/cxgb3/sge.c 		__skb_put(skb, IMMED_PKT_SIZE);
skb               931 drivers/net/ethernet/chelsio/cxgb3/sge.c 		skb_copy_to_linear_data(skb, resp->imm_data, IMMED_PKT_SIZE);
skb               933 drivers/net/ethernet/chelsio/cxgb3/sge.c 	return skb;
skb               943 drivers/net/ethernet/chelsio/cxgb3/sge.c static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
skb               947 drivers/net/ethernet/chelsio/cxgb3/sge.c 	if (skb->len <= WR_LEN - sizeof(struct cpl_tx_pkt))
skb               950 drivers/net/ethernet/chelsio/cxgb3/sge.c 	flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 2;
skb               951 drivers/net/ethernet/chelsio/cxgb3/sge.c 	if (skb_shinfo(skb)->gso_size)
skb               963 drivers/net/ethernet/chelsio/cxgb3/sge.c static int map_skb(struct pci_dev *pdev, const struct sk_buff *skb,
skb               969 drivers/net/ethernet/chelsio/cxgb3/sge.c 	if (skb_headlen(skb)) {
skb               970 drivers/net/ethernet/chelsio/cxgb3/sge.c 		*addr = pci_map_single(pdev, skb->data, skb_headlen(skb),
skb               977 drivers/net/ethernet/chelsio/cxgb3/sge.c 	si = skb_shinfo(skb);
skb               994 drivers/net/ethernet/chelsio/cxgb3/sge.c 	pci_unmap_single(pdev, addr[-1], skb_headlen(skb), PCI_DMA_TODEVICE);
skb              1011 drivers/net/ethernet/chelsio/cxgb3/sge.c static inline unsigned int write_sgl(const struct sk_buff *skb,
skb              1022 drivers/net/ethernet/chelsio/cxgb3/sge.c 	nfrags = skb_shinfo(skb)->nr_frags;
skb              1024 drivers/net/ethernet/chelsio/cxgb3/sge.c 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
skb              1091 drivers/net/ethernet/chelsio/cxgb3/sge.c static void write_wr_hdr_sgl(unsigned int ndesc, struct sk_buff *skb,
skb              1102 drivers/net/ethernet/chelsio/cxgb3/sge.c 	sd->skb = skb;
skb              1147 drivers/net/ethernet/chelsio/cxgb3/sge.c 			sd->skb = skb;
skb              1179 drivers/net/ethernet/chelsio/cxgb3/sge.c static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
skb              1190 drivers/net/ethernet/chelsio/cxgb3/sge.c 	cpl->len = htonl(skb->len);
skb              1193 drivers/net/ethernet/chelsio/cxgb3/sge.c 	if (skb_vlan_tag_present(skb))
skb              1194 drivers/net/ethernet/chelsio/cxgb3/sge.c 		cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(skb_vlan_tag_get(skb));
skb              1196 drivers/net/ethernet/chelsio/cxgb3/sge.c 	tso_info = V_LSO_MSS(skb_shinfo(skb)->gso_size);
skb              1204 drivers/net/ethernet/chelsio/cxgb3/sge.c 		eth_type = skb_network_offset(skb) == ETH_HLEN ?
skb              1207 drivers/net/ethernet/chelsio/cxgb3/sge.c 		    V_LSO_IPHDR_WORDS(ip_hdr(skb)->ihl) |
skb              1208 drivers/net/ethernet/chelsio/cxgb3/sge.c 		    V_LSO_TCPHDR_WORDS(tcp_hdr(skb)->doff);
skb              1214 drivers/net/ethernet/chelsio/cxgb3/sge.c 		cntrl |= V_TXPKT_L4CSUM_DIS(skb->ip_summed != CHECKSUM_PARTIAL);
skb              1217 drivers/net/ethernet/chelsio/cxgb3/sge.c 		if (skb->len <= WR_LEN - sizeof(*cpl)) {
skb              1218 drivers/net/ethernet/chelsio/cxgb3/sge.c 			q->sdesc[pidx].skb = NULL;
skb              1219 drivers/net/ethernet/chelsio/cxgb3/sge.c 			if (!skb->data_len)
skb              1220 drivers/net/ethernet/chelsio/cxgb3/sge.c 				skb_copy_from_linear_data(skb, &d->flit[2],
skb              1221 drivers/net/ethernet/chelsio/cxgb3/sge.c 							  skb->len);
skb              1223 drivers/net/ethernet/chelsio/cxgb3/sge.c 				skb_copy_bits(skb, 0, &d->flit[2], skb->len);
skb              1225 drivers/net/ethernet/chelsio/cxgb3/sge.c 			flits = (skb->len + 7) / 8 + 2;
skb              1226 drivers/net/ethernet/chelsio/cxgb3/sge.c 			cpl->wr.wr_hi = htonl(V_WR_BCNTLFLT(skb->len & 7) |
skb              1233 drivers/net/ethernet/chelsio/cxgb3/sge.c 			dev_consume_skb_any(skb);
skb              1241 drivers/net/ethernet/chelsio/cxgb3/sge.c 	sgl_flits = write_sgl(skb, sgp, skb->data, skb_headlen(skb), addr);
skb              1243 drivers/net/ethernet/chelsio/cxgb3/sge.c 	write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen,
skb              1263 drivers/net/ethernet/chelsio/cxgb3/sge.c netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
skb              1278 drivers/net/ethernet/chelsio/cxgb3/sge.c 	if (unlikely(skb->len < ETH_HLEN)) {
skb              1279 drivers/net/ethernet/chelsio/cxgb3/sge.c 		dev_kfree_skb_any(skb);
skb              1283 drivers/net/ethernet/chelsio/cxgb3/sge.c 	qidx = skb_get_queue_mapping(skb);
skb              1291 drivers/net/ethernet/chelsio/cxgb3/sge.c 	ndesc = calc_tx_descs(skb);
skb              1302 drivers/net/ethernet/chelsio/cxgb3/sge.c 	if (skb->len > (WR_LEN - sizeof(struct cpl_tx_pkt))) {
skb              1303 drivers/net/ethernet/chelsio/cxgb3/sge.c 		if (unlikely(map_skb(adap->pdev, skb, addr) < 0)) {
skb              1304 drivers/net/ethernet/chelsio/cxgb3/sge.c 			dev_kfree_skb(skb);
skb              1332 drivers/net/ethernet/chelsio/cxgb3/sge.c 	if (skb->ip_summed == CHECKSUM_PARTIAL)
skb              1334 drivers/net/ethernet/chelsio/cxgb3/sge.c 	if (skb_shinfo(skb)->gso_size)
skb              1336 drivers/net/ethernet/chelsio/cxgb3/sge.c 	if (skb_vlan_tag_present(skb))
skb              1363 drivers/net/ethernet/chelsio/cxgb3/sge.c 	if (likely(!skb_shared(skb)))
skb              1364 drivers/net/ethernet/chelsio/cxgb3/sge.c 		skb_orphan(skb);
skb              1366 drivers/net/ethernet/chelsio/cxgb3/sge.c 	write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl, addr);
skb              1383 drivers/net/ethernet/chelsio/cxgb3/sge.c static inline void write_imm(struct tx_desc *d, struct sk_buff *skb,
skb              1386 drivers/net/ethernet/chelsio/cxgb3/sge.c 	struct work_request_hdr *from = (struct work_request_hdr *)skb->data;
skb              1389 drivers/net/ethernet/chelsio/cxgb3/sge.c 	if (likely(!skb->data_len))
skb              1392 drivers/net/ethernet/chelsio/cxgb3/sge.c 		skb_copy_bits(skb, sizeof(*from), &to[1], len - sizeof(*from));
skb              1400 drivers/net/ethernet/chelsio/cxgb3/sge.c 	kfree_skb(skb);
skb              1422 drivers/net/ethernet/chelsio/cxgb3/sge.c 				   struct sk_buff *skb, unsigned int ndesc,
skb              1426 drivers/net/ethernet/chelsio/cxgb3/sge.c 	      addq_exit:__skb_queue_tail(&q->sendq, skb);
skb              1461 drivers/net/ethernet/chelsio/cxgb3/sge.c static inline int immediate(const struct sk_buff *skb)
skb              1463 drivers/net/ethernet/chelsio/cxgb3/sge.c 	return skb->len <= WR_LEN;
skb              1477 drivers/net/ethernet/chelsio/cxgb3/sge.c 		     struct sk_buff *skb)
skb              1480 drivers/net/ethernet/chelsio/cxgb3/sge.c 	struct work_request_hdr *wrp = (struct work_request_hdr *)skb->data;
skb              1482 drivers/net/ethernet/chelsio/cxgb3/sge.c 	if (unlikely(!immediate(skb))) {
skb              1484 drivers/net/ethernet/chelsio/cxgb3/sge.c 		dev_kfree_skb(skb);
skb              1494 drivers/net/ethernet/chelsio/cxgb3/sge.c 	ret = check_desc_avail(adap, q, skb, 1, TXQ_CTRL);
skb              1503 drivers/net/ethernet/chelsio/cxgb3/sge.c 	write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
skb              1525 drivers/net/ethernet/chelsio/cxgb3/sge.c 	struct sk_buff *skb;
skb              1533 drivers/net/ethernet/chelsio/cxgb3/sge.c 	       (skb = __skb_dequeue(&q->sendq)) != NULL) {
skb              1535 drivers/net/ethernet/chelsio/cxgb3/sge.c 		write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
skb              1563 drivers/net/ethernet/chelsio/cxgb3/sge.c int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
skb              1567 drivers/net/ethernet/chelsio/cxgb3/sge.c 	ret = ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb);
skb              1581 drivers/net/ethernet/chelsio/cxgb3/sge.c static void deferred_unmap_destructor(struct sk_buff *skb)
skb              1588 drivers/net/ethernet/chelsio/cxgb3/sge.c 	dui = (struct deferred_unmap_info *)skb->head;
skb              1591 drivers/net/ethernet/chelsio/cxgb3/sge.c 	if (skb_tail_pointer(skb) - skb_transport_header(skb))
skb              1592 drivers/net/ethernet/chelsio/cxgb3/sge.c 		pci_unmap_single(dui->pdev, *p++, skb_tail_pointer(skb) -
skb              1593 drivers/net/ethernet/chelsio/cxgb3/sge.c 				 skb_transport_header(skb), PCI_DMA_TODEVICE);
skb              1595 drivers/net/ethernet/chelsio/cxgb3/sge.c 	si = skb_shinfo(skb);
skb              1601 drivers/net/ethernet/chelsio/cxgb3/sge.c static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev,
skb              1607 drivers/net/ethernet/chelsio/cxgb3/sge.c 	dui = (struct deferred_unmap_info *)skb->head;
skb              1629 drivers/net/ethernet/chelsio/cxgb3/sge.c static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
skb              1639 drivers/net/ethernet/chelsio/cxgb3/sge.c 	if (immediate(skb)) {
skb              1640 drivers/net/ethernet/chelsio/cxgb3/sge.c 		q->sdesc[pidx].skb = NULL;
skb              1641 drivers/net/ethernet/chelsio/cxgb3/sge.c 		write_imm(d, skb, skb->len, gen);
skb              1647 drivers/net/ethernet/chelsio/cxgb3/sge.c 	from = (struct work_request_hdr *)skb->data;
skb              1649 drivers/net/ethernet/chelsio/cxgb3/sge.c 	       skb_transport_offset(skb) - sizeof(*from));
skb              1651 drivers/net/ethernet/chelsio/cxgb3/sge.c 	flits = skb_transport_offset(skb) / 8;
skb              1653 drivers/net/ethernet/chelsio/cxgb3/sge.c 	sgl_flits = write_sgl(skb, sgp, skb_transport_header(skb),
skb              1654 drivers/net/ethernet/chelsio/cxgb3/sge.c 			      skb_tail_pointer(skb) - skb_transport_header(skb),
skb              1657 drivers/net/ethernet/chelsio/cxgb3/sge.c 		setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits);
skb              1658 drivers/net/ethernet/chelsio/cxgb3/sge.c 		skb->destructor = deferred_unmap_destructor;
skb              1661 drivers/net/ethernet/chelsio/cxgb3/sge.c 	write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits,
skb              1672 drivers/net/ethernet/chelsio/cxgb3/sge.c static inline unsigned int calc_tx_descs_ofld(const struct sk_buff *skb)
skb              1676 drivers/net/ethernet/chelsio/cxgb3/sge.c 	if (skb->len <= WR_LEN)
skb              1679 drivers/net/ethernet/chelsio/cxgb3/sge.c 	flits = skb_transport_offset(skb) / 8;	/* headers */
skb              1680 drivers/net/ethernet/chelsio/cxgb3/sge.c 	cnt = skb_shinfo(skb)->nr_frags;
skb              1681 drivers/net/ethernet/chelsio/cxgb3/sge.c 	if (skb_tail_pointer(skb) != skb_transport_header(skb))
skb              1695 drivers/net/ethernet/chelsio/cxgb3/sge.c 		     struct sk_buff *skb)
skb              1698 drivers/net/ethernet/chelsio/cxgb3/sge.c 	unsigned int ndesc = calc_tx_descs_ofld(skb), pidx, gen;
skb              1703 drivers/net/ethernet/chelsio/cxgb3/sge.c 	ret = check_desc_avail(adap, q, skb, ndesc, TXQ_OFLD);
skb              1706 drivers/net/ethernet/chelsio/cxgb3/sge.c 			skb->priority = ndesc;	/* save for restart */
skb              1713 drivers/net/ethernet/chelsio/cxgb3/sge.c 	if (!immediate(skb) &&
skb              1714 drivers/net/ethernet/chelsio/cxgb3/sge.c 	    map_skb(adap->pdev, skb, (dma_addr_t *)skb->head)) {
skb              1729 drivers/net/ethernet/chelsio/cxgb3/sge.c 	write_ofld_wr(adap, skb, q, pidx, gen, ndesc, (dma_addr_t *)skb->head);
skb              1742 drivers/net/ethernet/chelsio/cxgb3/sge.c 	struct sk_buff *skb;
skb              1752 drivers/net/ethernet/chelsio/cxgb3/sge.c 	while ((skb = skb_peek(&q->sendq)) != NULL) {
skb              1754 drivers/net/ethernet/chelsio/cxgb3/sge.c 		unsigned int ndesc = skb->priority;
skb              1767 drivers/net/ethernet/chelsio/cxgb3/sge.c 		if (!immediate(skb) &&
skb              1768 drivers/net/ethernet/chelsio/cxgb3/sge.c 		    map_skb(adap->pdev, skb, (dma_addr_t *)skb->head))
skb              1780 drivers/net/ethernet/chelsio/cxgb3/sge.c 		__skb_unlink(skb, &q->sendq);
skb              1783 drivers/net/ethernet/chelsio/cxgb3/sge.c 		write_ofld_wr(adap, skb, q, pidx, gen, ndesc,
skb              1784 drivers/net/ethernet/chelsio/cxgb3/sge.c 			      (dma_addr_t *)skb->head);
skb              1806 drivers/net/ethernet/chelsio/cxgb3/sge.c static inline int queue_set(const struct sk_buff *skb)
skb              1808 drivers/net/ethernet/chelsio/cxgb3/sge.c 	return skb->priority >> 1;
skb              1818 drivers/net/ethernet/chelsio/cxgb3/sge.c static inline int is_ctrl_pkt(const struct sk_buff *skb)
skb              1820 drivers/net/ethernet/chelsio/cxgb3/sge.c 	return skb->priority & 1;
skb              1832 drivers/net/ethernet/chelsio/cxgb3/sge.c int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
skb              1835 drivers/net/ethernet/chelsio/cxgb3/sge.c 	struct sge_qset *qs = &adap->sge.qs[queue_set(skb)];
skb              1837 drivers/net/ethernet/chelsio/cxgb3/sge.c 	if (unlikely(is_ctrl_pkt(skb)))
skb              1838 drivers/net/ethernet/chelsio/cxgb3/sge.c 		return ctrl_xmit(adap, &qs->txq[TXQ_CTRL], skb);
skb              1840 drivers/net/ethernet/chelsio/cxgb3/sge.c 	return ofld_xmit(adap, &qs->txq[TXQ_OFLD], skb);
skb              1852 drivers/net/ethernet/chelsio/cxgb3/sge.c static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb)
skb              1856 drivers/net/ethernet/chelsio/cxgb3/sge.c 	__skb_queue_tail(&q->rx_queue, skb);
skb              1903 drivers/net/ethernet/chelsio/cxgb3/sge.c 		struct sk_buff *skb, *tmp, *skbs[RX_BUNDLE_SIZE];
skb              1918 drivers/net/ethernet/chelsio/cxgb3/sge.c 		skb_queue_walk_safe(&queue, skb, tmp) {
skb              1923 drivers/net/ethernet/chelsio/cxgb3/sge.c 			__skb_unlink(skb, &queue);
skb              1924 drivers/net/ethernet/chelsio/cxgb3/sge.c 			prefetch(skb->data);
skb              1925 drivers/net/ethernet/chelsio/cxgb3/sge.c 			skbs[ngathered] = skb;
skb              1957 drivers/net/ethernet/chelsio/cxgb3/sge.c 			     struct sk_buff *skb, struct sk_buff *rx_gather[],
skb              1960 drivers/net/ethernet/chelsio/cxgb3/sge.c 	skb_reset_mac_header(skb);
skb              1961 drivers/net/ethernet/chelsio/cxgb3/sge.c 	skb_reset_network_header(skb);
skb              1962 drivers/net/ethernet/chelsio/cxgb3/sge.c 	skb_reset_transport_header(skb);
skb              1965 drivers/net/ethernet/chelsio/cxgb3/sge.c 		rx_gather[gather_idx++] = skb;
skb              1972 drivers/net/ethernet/chelsio/cxgb3/sge.c 		offload_enqueue(rq, skb);
skb              2016 drivers/net/ethernet/chelsio/cxgb3/sge.c static void cxgb3_arp_process(struct port_info *pi, struct sk_buff *skb)
skb              2018 drivers/net/ethernet/chelsio/cxgb3/sge.c 	struct net_device *dev = skb->dev;
skb              2027 drivers/net/ethernet/chelsio/cxgb3/sge.c 	skb_reset_network_header(skb);
skb              2028 drivers/net/ethernet/chelsio/cxgb3/sge.c 	arp = arp_hdr(skb);
skb              2049 drivers/net/ethernet/chelsio/cxgb3/sge.c static inline int is_arp(struct sk_buff *skb)
skb              2051 drivers/net/ethernet/chelsio/cxgb3/sge.c 	return skb->protocol == htons(ETH_P_ARP);
skb              2055 drivers/net/ethernet/chelsio/cxgb3/sge.c 					struct sk_buff *skb)
skb              2057 drivers/net/ethernet/chelsio/cxgb3/sge.c 	if (is_arp(skb)) {
skb              2058 drivers/net/ethernet/chelsio/cxgb3/sge.c 		cxgb3_arp_process(pi, skb);
skb              2063 drivers/net/ethernet/chelsio/cxgb3/sge.c 		pi->iscsic.recv(pi, skb);
skb              2079 drivers/net/ethernet/chelsio/cxgb3/sge.c 		   struct sk_buff *skb, int pad, int lro)
skb              2081 drivers/net/ethernet/chelsio/cxgb3/sge.c 	struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)(skb->data + pad);
skb              2085 drivers/net/ethernet/chelsio/cxgb3/sge.c 	skb_pull(skb, sizeof(*p) + pad);
skb              2086 drivers/net/ethernet/chelsio/cxgb3/sge.c 	skb->protocol = eth_type_trans(skb, adap->port[p->iff]);
skb              2087 drivers/net/ethernet/chelsio/cxgb3/sge.c 	pi = netdev_priv(skb->dev);
skb              2088 drivers/net/ethernet/chelsio/cxgb3/sge.c 	if ((skb->dev->features & NETIF_F_RXCSUM) && p->csum_valid &&
skb              2091 drivers/net/ethernet/chelsio/cxgb3/sge.c 		skb->ip_summed = CHECKSUM_UNNECESSARY;
skb              2093 drivers/net/ethernet/chelsio/cxgb3/sge.c 		skb_checksum_none_assert(skb);
skb              2094 drivers/net/ethernet/chelsio/cxgb3/sge.c 	skb_record_rx_queue(skb, qs - &adap->sge.qs[pi->first_qset]);
skb              2098 drivers/net/ethernet/chelsio/cxgb3/sge.c 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(p->vlan));
skb              2102 drivers/net/ethernet/chelsio/cxgb3/sge.c 			napi_gro_receive(&qs->napi, skb);
skb              2105 drivers/net/ethernet/chelsio/cxgb3/sge.c 				cxgb3_process_iscsi_prov_pack(pi, skb);
skb              2106 drivers/net/ethernet/chelsio/cxgb3/sge.c 			netif_receive_skb(skb);
skb              2109 drivers/net/ethernet/chelsio/cxgb3/sge.c 		netif_rx(skb);
skb              2133 drivers/net/ethernet/chelsio/cxgb3/sge.c 	struct sk_buff *skb = NULL;
skb              2140 drivers/net/ethernet/chelsio/cxgb3/sge.c 		skb = napi_get_frags(&qs->napi);
skb              2141 drivers/net/ethernet/chelsio/cxgb3/sge.c 		qs->nomem = !skb;
skb              2158 drivers/net/ethernet/chelsio/cxgb3/sge.c 	if (!skb) {
skb              2165 drivers/net/ethernet/chelsio/cxgb3/sge.c 	rx_frag = skb_shinfo(skb)->frags;
skb              2166 drivers/net/ethernet/chelsio/cxgb3/sge.c 	nr_frags = skb_shinfo(skb)->nr_frags;
skb              2174 drivers/net/ethernet/chelsio/cxgb3/sge.c 			skb->ip_summed = CHECKSUM_UNNECESSARY;
skb              2177 drivers/net/ethernet/chelsio/cxgb3/sge.c 			skb->ip_summed = CHECKSUM_NONE;
skb              2188 drivers/net/ethernet/chelsio/cxgb3/sge.c 	skb->len += len;
skb              2189 drivers/net/ethernet/chelsio/cxgb3/sge.c 	skb->data_len += len;
skb              2190 drivers/net/ethernet/chelsio/cxgb3/sge.c 	skb->truesize += len;
skb              2191 drivers/net/ethernet/chelsio/cxgb3/sge.c 	skb_shinfo(skb)->nr_frags++;
skb              2196 drivers/net/ethernet/chelsio/cxgb3/sge.c 	skb_record_rx_queue(skb, qs - &adap->sge.qs[pi->first_qset]);
skb              2200 drivers/net/ethernet/chelsio/cxgb3/sge.c 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cpl->vlan));
skb              2335 drivers/net/ethernet/chelsio/cxgb3/sge.c 		struct sk_buff *skb = NULL;
skb              2346 drivers/net/ethernet/chelsio/cxgb3/sge.c 			skb = alloc_skb(AN_PKT_SIZE, GFP_ATOMIC);
skb              2347 drivers/net/ethernet/chelsio/cxgb3/sge.c 			if (!skb)
skb              2350 drivers/net/ethernet/chelsio/cxgb3/sge.c 			__skb_put_data(skb, r, AN_PKT_SIZE);
skb              2351 drivers/net/ethernet/chelsio/cxgb3/sge.c 			skb->data[0] = CPL_ASYNC_NOTIF;
skb              2355 drivers/net/ethernet/chelsio/cxgb3/sge.c 			skb = get_imm_packet(r);
skb              2356 drivers/net/ethernet/chelsio/cxgb3/sge.c 			if (unlikely(!skb)) {
skb              2387 drivers/net/ethernet/chelsio/cxgb3/sge.c 				skb = get_packet_pg(adap, fl, q,
skb              2391 drivers/net/ethernet/chelsio/cxgb3/sge.c 				q->pg_skb = skb;
skb              2393 drivers/net/ethernet/chelsio/cxgb3/sge.c 				skb = get_packet(adap, fl, G_RSPD_LEN(len),
skb              2395 drivers/net/ethernet/chelsio/cxgb3/sge.c 			if (unlikely(!skb)) {
skb              2400 drivers/net/ethernet/chelsio/cxgb3/sge.c 				__skb_pull(skb, 2);
skb              2429 drivers/net/ethernet/chelsio/cxgb3/sge.c 		if (skb != NULL && packet_complete) {
skb              2431 drivers/net/ethernet/chelsio/cxgb3/sge.c 				rx_eth(adap, q, skb, ethpad, lro);
skb              2435 drivers/net/ethernet/chelsio/cxgb3/sge.c 				skb->csum = rss_hi;
skb              2436 drivers/net/ethernet/chelsio/cxgb3/sge.c 				skb->priority = rss_lo;
skb              2437 drivers/net/ethernet/chelsio/cxgb3/sge.c 				ngathered = rx_offload(&adap->tdev, q, skb,
skb                58 drivers/net/ethernet/chelsio/cxgb3/t3cdev.h 	int (*send)(struct t3cdev *dev, struct sk_buff *skb);
skb                59 drivers/net/ethernet/chelsio/cxgb3/t3cdev.h 	int (*recv)(struct t3cdev *dev, struct sk_buff **skb, int n);
skb              1092 drivers/net/ethernet/chelsio/cxgb4/cxgb4.h 	struct sk_buff *skb;
skb              1410 drivers/net/ethernet/chelsio/cxgb4/cxgb4.h netdev_tx_t t4_start_xmit(struct sk_buff *skb, struct net_device *dev);
skb              1413 drivers/net/ethernet/chelsio/cxgb4/cxgb4.h int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb);
skb              1414 drivers/net/ethernet/chelsio/cxgb4/cxgb4.h int t4_ofld_send(struct adapter *adap, struct sk_buff *skb);
skb              1438 drivers/net/ethernet/chelsio/cxgb4/cxgb4.h enum cpl_tx_tnl_lso_type cxgb_encap_offload_supported(struct sk_buff *skb);
skb              1897 drivers/net/ethernet/chelsio/cxgb4/cxgb4.h int cxgb4_map_skb(struct device *dev, const struct sk_buff *skb,
skb              1899 drivers/net/ethernet/chelsio/cxgb4/cxgb4.h void cxgb4_inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *q,
skb              1901 drivers/net/ethernet/chelsio/cxgb4/cxgb4.h void cxgb4_write_sgl(const struct sk_buff *skb, struct sge_txq *q,
skb                41 drivers/net/ethernet/chelsio/cxgb4/cxgb4_fcoe.c bool cxgb_fcoe_sof_eof_supported(struct adapter *adap, struct sk_buff *skb)
skb                43 drivers/net/ethernet/chelsio/cxgb4/cxgb4_fcoe.c 	struct fcoe_hdr *fcoeh = (struct fcoe_hdr *)skb_network_header(skb);
skb                52 drivers/net/ethernet/chelsio/cxgb4/cxgb4_fcoe.c 	skb_copy_bits(skb, skb->len - 4, &eof, 1);
skb                61 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 	struct sk_buff *skb;
skb                63 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 	skb = alloc_skb(sizeof(struct cpl_set_tcb_field), GFP_ATOMIC);
skb                64 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 	if (!skb)
skb                67 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 	req = (struct cpl_set_tcb_field *)__skb_put_zero(skb, sizeof(*req));
skb                75 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 	set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
skb                76 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 	t4_ofld_send(adap, skb);
skb               518 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 	struct sk_buff *skb;
skb               523 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 	skb = alloc_skb(len, GFP_KERNEL);
skb               524 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 	if (!skb)
skb               527 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 	fwr = __skb_put(skb, len);
skb               534 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 	t4_mgmt_tx(adapter, skb);
skb               548 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 	struct sk_buff *skb;
skb               550 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 	skb = alloc_skb(sizeof(*fwr), GFP_KERNEL);
skb               551 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 	if (!skb)
skb               563 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 			kfree_skb(skb);
skb               578 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 			kfree_skb(skb);
skb               583 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 	fwr = __skb_put_zero(skb, sizeof(*fwr));
skb               674 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 	set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
skb               675 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 	t4_ofld_send(adapter, skb);
skb              1010 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c static void mk_act_open_req6(struct filter_entry *f, struct sk_buff *skb,
skb              1016 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 	t6req = (struct cpl_t6_act_open_req6 *)__skb_put(skb, sizeof(*t6req));
skb              1051 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c static void mk_act_open_req(struct filter_entry *f, struct sk_buff *skb,
skb              1057 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 	t6req = (struct cpl_t6_act_open_req *)__skb_put(skb, sizeof(*t6req));
skb              1099 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 	struct sk_buff *skb;
skb              1191 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 		skb = alloc_skb(size, GFP_KERNEL);
skb              1192 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 		if (!skb) {
skb              1197 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 		mk_act_open_req6(f, skb,
skb              1201 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 		skb = alloc_skb(size, GFP_KERNEL);
skb              1202 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 		if (!skb) {
skb              1207 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 		mk_act_open_req(f, skb,
skb              1213 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 	set_wr_txq(skb, CPL_PRIORITY_SETUP, f->fs.val.iport & 0x3);
skb              1214 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 	t4_ofld_send(adapter, skb);
skb              1461 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 	struct sk_buff *skb;
skb              1489 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 	skb = alloc_skb(wrlen, GFP_KERNEL);
skb              1490 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 	if (!skb) {
skb              1494 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 	set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
skb              1495 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 	req = (struct cpl_set_tcb_field *)__skb_put(skb, wrlen);
skb              1507 drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 	t4_ofld_send(adapter, skb);
skb              1061 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
skb              1076 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 		err = vlan_get_tag(skb, &vlan_tci);
skb              1085 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 			if (skb->protocol == htons(ETH_P_FCOE))
skb              1086 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 				txq = skb->priority & 0x7;
skb              1094 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 		txq = (skb_rx_queue_recorded(skb)
skb              1095 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 			? skb_get_rx_queue(skb)
skb              1104 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
skb              1350 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
skb              1355 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
skb              1356 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	req = __skb_put(skb, sizeof(*req));
skb              1387 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	struct sk_buff *skb;
skb              1402 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 		while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
skb              1406 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 		mk_tid_release(skb, chan, p - adap->tids.tid_tab);
skb              1407 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 		t4_ofld_send(adap, skb);
skb              1421 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	struct sk_buff *skb;
skb              1442 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
skb              1443 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	if (likely(skb)) {
skb              1444 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 		mk_tid_release(skb, chan, tid);
skb              1445 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 		t4_ofld_send(adap, skb);
skb              1530 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	struct sk_buff *skb;
skb              1535 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	skb = alloc_skb(sizeof(*req), GFP_KERNEL);
skb              1536 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	if (!skb)
skb              1540 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	req = __skb_put(skb, sizeof(*req));
skb              1551 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	ret = t4_mgmt_tx(adap, skb);
skb              1571 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	struct sk_buff *skb;
skb              1576 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	skb = alloc_skb(sizeof(*req), GFP_KERNEL);
skb              1577 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	if (!skb)
skb              1581 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	req = __skb_put(skb, sizeof(*req));
skb              1594 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	ret = t4_mgmt_tx(adap, skb);
skb              1602 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	struct sk_buff *skb;
skb              1609 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	skb = alloc_skb(sizeof(*req), GFP_KERNEL);
skb              1610 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	if (!skb)
skb              1613 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	req = __skb_put(skb, sizeof(*req));
skb              1618 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	ret = t4_mgmt_tx(adap, skb);
skb              3359 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c static netdev_features_t cxgb_features_check(struct sk_buff *skb,
skb              3370 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	if (!skb->encapsulation || cxgb_encap_offload_supported(skb))
skb                58 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c bool cxgb4_ptp_is_ptp_tx(struct sk_buff *skb)
skb                62 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c 	uh = udp_hdr(skb);
skb                63 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c 	return skb->len >= PTP_MIN_LENGTH &&
skb                64 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c 		skb->len <= PTP_IN_TRANSMIT_PACKET_MAXNUM &&
skb                65 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c 		likely(skb->protocol == htons(ETH_P_IP)) &&
skb                66 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c 		ip_hdr(skb)->protocol == IPPROTO_UDP &&
skb                70 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c bool is_ptp_enabled(struct sk_buff *skb, struct net_device *dev)
skb                75 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c 	return (pi->ptp_enable && cxgb4_xmit_with_hwtstamp(skb) &&
skb                76 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c 		cxgb4_ptp_is_ptp_tx(skb));
skb                84 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c bool cxgb4_ptp_is_ptp_rx(struct sk_buff *skb)
skb                86 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c 	struct udphdr *uh = (struct udphdr *)(skb->data + ETH_HLEN +
skb                87 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c 					      IPV4_HLEN(skb->data));
skb                55 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.h static inline bool cxgb4_xmit_with_hwtstamp(struct sk_buff *skb)
skb                57 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.h 	return skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP;
skb                60 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.h static inline void cxgb4_xmit_hwtstamp_pending(struct sk_buff *skb)
skb                62 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.h 	skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
skb                67 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.h bool cxgb4_ptp_is_ptp_tx(struct sk_buff *skb);
skb                68 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.h bool cxgb4_ptp_is_ptp_rx(struct sk_buff *skb);
skb                73 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.h bool is_ptp_enabled(struct sk_buff *skb, struct net_device *dev);
skb               229 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h static inline void set_wr_txq(struct sk_buff *skb, int prio, int queue)
skb               231 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h 	skb_set_queue_mapping(skb, (queue << 1) | prio);
skb               386 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h 	int (*tx_handler)(struct sk_buff *skb, struct net_device *dev);
skb               391 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb);
skb               394 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h int cxgb4_crypto_send(struct net_device *dev, struct sk_buff *skb);
skb               142 drivers/net/ethernet/chelsio/cxgb4/l2t.c 	struct sk_buff *skb;
skb               145 drivers/net/ethernet/chelsio/cxgb4/l2t.c 	skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
skb               146 drivers/net/ethernet/chelsio/cxgb4/l2t.c 	if (!skb)
skb               149 drivers/net/ethernet/chelsio/cxgb4/l2t.c 	req = __skb_put(skb, sizeof(*req));
skb               162 drivers/net/ethernet/chelsio/cxgb4/l2t.c 	t4_mgmt_tx(adap, skb);
skb               175 drivers/net/ethernet/chelsio/cxgb4/l2t.c 	struct sk_buff *skb;
skb               177 drivers/net/ethernet/chelsio/cxgb4/l2t.c 	while ((skb = __skb_dequeue(&e->arpq)) != NULL)
skb               178 drivers/net/ethernet/chelsio/cxgb4/l2t.c 		t4_ofld_send(adap, skb);
skb               216 drivers/net/ethernet/chelsio/cxgb4/l2t.c static inline void arpq_enqueue(struct l2t_entry *e, struct sk_buff *skb)
skb               218 drivers/net/ethernet/chelsio/cxgb4/l2t.c 	__skb_queue_tail(&e->arpq, skb);
skb               221 drivers/net/ethernet/chelsio/cxgb4/l2t.c int cxgb4_l2t_send(struct net_device *dev, struct sk_buff *skb,
skb               236 drivers/net/ethernet/chelsio/cxgb4/l2t.c 		return t4_ofld_send(adap, skb);
skb               245 drivers/net/ethernet/chelsio/cxgb4/l2t.c 		arpq_enqueue(e, skb);
skb               354 drivers/net/ethernet/chelsio/cxgb4/l2t.c 	struct sk_buff *skb;
skb               361 drivers/net/ethernet/chelsio/cxgb4/l2t.c 		while ((skb = __skb_dequeue(&e->arpq)) != NULL)
skb               362 drivers/net/ethernet/chelsio/cxgb4/l2t.c 			kfree_skb(skb);
skb               373 drivers/net/ethernet/chelsio/cxgb4/l2t.c 	struct sk_buff *skb;
skb               381 drivers/net/ethernet/chelsio/cxgb4/l2t.c 		while ((skb = __skb_dequeue(&e->arpq)) != NULL)
skb               382 drivers/net/ethernet/chelsio/cxgb4/l2t.c 			kfree_skb(skb);
skb               516 drivers/net/ethernet/chelsio/cxgb4/l2t.c 	struct sk_buff *skb;
skb               518 drivers/net/ethernet/chelsio/cxgb4/l2t.c 	while ((skb = __skb_dequeue(&e->arpq)) != NULL) {
skb               519 drivers/net/ethernet/chelsio/cxgb4/l2t.c 		const struct l2t_skb_cb *cb = L2T_SKB_CB(skb);
skb               523 drivers/net/ethernet/chelsio/cxgb4/l2t.c 			cb->arp_err_handler(cb->handle, skb);
skb               525 drivers/net/ethernet/chelsio/cxgb4/l2t.c 			t4_ofld_send(adap, skb);
skb                91 drivers/net/ethernet/chelsio/cxgb4/l2t.h typedef void (*arp_err_handler_t)(void *handle, struct sk_buff *skb);
skb               101 drivers/net/ethernet/chelsio/cxgb4/l2t.h #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
skb               103 drivers/net/ethernet/chelsio/cxgb4/l2t.h static inline void t4_set_arp_err_handler(struct sk_buff *skb, void *handle,
skb               106 drivers/net/ethernet/chelsio/cxgb4/l2t.h 	L2T_SKB_CB(skb)->handle = handle;
skb               107 drivers/net/ethernet/chelsio/cxgb4/l2t.h 	L2T_SKB_CB(skb)->arp_err_handler = handler;
skb               111 drivers/net/ethernet/chelsio/cxgb4/l2t.h int cxgb4_l2t_send(struct net_device *dev, struct sk_buff *skb,
skb               241 drivers/net/ethernet/chelsio/cxgb4/sge.c int cxgb4_map_skb(struct device *dev, const struct sk_buff *skb,
skb               247 drivers/net/ethernet/chelsio/cxgb4/sge.c 	*addr = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
skb               251 drivers/net/ethernet/chelsio/cxgb4/sge.c 	si = skb_shinfo(skb);
skb               266 drivers/net/ethernet/chelsio/cxgb4/sge.c 	dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE);
skb               273 drivers/net/ethernet/chelsio/cxgb4/sge.c static void unmap_skb(struct device *dev, const struct sk_buff *skb,
skb               279 drivers/net/ethernet/chelsio/cxgb4/sge.c 	dma_unmap_single(dev, *addr++, skb_headlen(skb), DMA_TO_DEVICE);
skb               281 drivers/net/ethernet/chelsio/cxgb4/sge.c 	si = skb_shinfo(skb);
skb               295 drivers/net/ethernet/chelsio/cxgb4/sge.c static void deferred_unmap_destructor(struct sk_buff *skb)
skb               297 drivers/net/ethernet/chelsio/cxgb4/sge.c 	unmap_skb(skb->dev->dev.parent, skb, (dma_addr_t *)skb->head);
skb               301 drivers/net/ethernet/chelsio/cxgb4/sge.c static void unmap_sgl(struct device *dev, const struct sk_buff *skb,
skb               305 drivers/net/ethernet/chelsio/cxgb4/sge.c 	unsigned int nfrags = skb_shinfo(skb)->nr_frags;
skb               307 drivers/net/ethernet/chelsio/cxgb4/sge.c 	if (likely(skb_headlen(skb)))
skb               379 drivers/net/ethernet/chelsio/cxgb4/sge.c 		if (d->skb) {                       /* an SGL is present */
skb               381 drivers/net/ethernet/chelsio/cxgb4/sge.c 				unmap_sgl(dev, d->skb, d->sgl, q);
skb               382 drivers/net/ethernet/chelsio/cxgb4/sge.c 			dev_consume_skb_any(d->skb);
skb               383 drivers/net/ethernet/chelsio/cxgb4/sge.c 			d->skb = NULL;
skb               785 drivers/net/ethernet/chelsio/cxgb4/sge.c static inline int is_eth_imm(const struct sk_buff *skb, unsigned int chip_ver)
skb               789 drivers/net/ethernet/chelsio/cxgb4/sge.c 	if (skb->encapsulation && skb_shinfo(skb)->gso_size &&
skb               794 drivers/net/ethernet/chelsio/cxgb4/sge.c 		hdrlen = skb_shinfo(skb)->gso_size ?
skb               798 drivers/net/ethernet/chelsio/cxgb4/sge.c 	if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen)
skb               810 drivers/net/ethernet/chelsio/cxgb4/sge.c static inline unsigned int calc_tx_flits(const struct sk_buff *skb,
skb               814 drivers/net/ethernet/chelsio/cxgb4/sge.c 	int hdrlen = is_eth_imm(skb, chip_ver);
skb               822 drivers/net/ethernet/chelsio/cxgb4/sge.c 		return DIV_ROUND_UP(skb->len + hdrlen, sizeof(__be64));
skb               832 drivers/net/ethernet/chelsio/cxgb4/sge.c 	flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
skb               833 drivers/net/ethernet/chelsio/cxgb4/sge.c 	if (skb_shinfo(skb)->gso_size) {
skb               834 drivers/net/ethernet/chelsio/cxgb4/sge.c 		if (skb->encapsulation && chip_ver > CHELSIO_T5)
skb               857 drivers/net/ethernet/chelsio/cxgb4/sge.c static inline unsigned int calc_tx_descs(const struct sk_buff *skb,
skb               860 drivers/net/ethernet/chelsio/cxgb4/sge.c 	return flits_to_desc(calc_tx_flits(skb, chip_ver));
skb               880 drivers/net/ethernet/chelsio/cxgb4/sge.c void cxgb4_write_sgl(const struct sk_buff *skb, struct sge_txq *q,
skb               886 drivers/net/ethernet/chelsio/cxgb4/sge.c 	const struct skb_shared_info *si = skb_shinfo(skb);
skb               890 drivers/net/ethernet/chelsio/cxgb4/sge.c 	len = skb_headlen(skb) - start;
skb              1040 drivers/net/ethernet/chelsio/cxgb4/sge.c void cxgb4_inline_tx_skb(const struct sk_buff *skb,
skb              1046 drivers/net/ethernet/chelsio/cxgb4/sge.c 	if (likely(skb->len <= left)) {
skb              1047 drivers/net/ethernet/chelsio/cxgb4/sge.c 		if (likely(!skb->data_len))
skb              1048 drivers/net/ethernet/chelsio/cxgb4/sge.c 			skb_copy_from_linear_data(skb, pos, skb->len);
skb              1050 drivers/net/ethernet/chelsio/cxgb4/sge.c 			skb_copy_bits(skb, 0, pos, skb->len);
skb              1051 drivers/net/ethernet/chelsio/cxgb4/sge.c 		pos += skb->len;
skb              1053 drivers/net/ethernet/chelsio/cxgb4/sge.c 		skb_copy_bits(skb, 0, pos, left);
skb              1054 drivers/net/ethernet/chelsio/cxgb4/sge.c 		skb_copy_bits(skb, left, q->desc, skb->len - left);
skb              1055 drivers/net/ethernet/chelsio/cxgb4/sge.c 		pos = (void *)q->desc + (skb->len - left);
skb              1065 drivers/net/ethernet/chelsio/cxgb4/sge.c static void *inline_tx_skb_header(const struct sk_buff *skb,
skb              1073 drivers/net/ethernet/chelsio/cxgb4/sge.c 		memcpy(pos, skb->data, length);
skb              1076 drivers/net/ethernet/chelsio/cxgb4/sge.c 		memcpy(pos, skb->data, left);
skb              1077 drivers/net/ethernet/chelsio/cxgb4/sge.c 		memcpy(q->desc, skb->data + left, length - left);
skb              1093 drivers/net/ethernet/chelsio/cxgb4/sge.c static u64 hwcsum(enum chip_type chip, const struct sk_buff *skb)
skb              1099 drivers/net/ethernet/chelsio/cxgb4/sge.c 	if (skb->encapsulation &&
skb              1104 drivers/net/ethernet/chelsio/cxgb4/sge.c 		ver = inner_ip_hdr(skb)->version;
skb              1105 drivers/net/ethernet/chelsio/cxgb4/sge.c 		proto = (ver == 4) ? inner_ip_hdr(skb)->protocol :
skb              1106 drivers/net/ethernet/chelsio/cxgb4/sge.c 			inner_ipv6_hdr(skb)->nexthdr;
skb              1108 drivers/net/ethernet/chelsio/cxgb4/sge.c 		ver = ip_hdr(skb)->version;
skb              1109 drivers/net/ethernet/chelsio/cxgb4/sge.c 		proto = (ver == 4) ? ip_hdr(skb)->protocol :
skb              1110 drivers/net/ethernet/chelsio/cxgb4/sge.c 			ipv6_hdr(skb)->nexthdr;
skb              1145 drivers/net/ethernet/chelsio/cxgb4/sge.c 			l4_len = skb_inner_network_header_len(skb);
skb              1146 drivers/net/ethernet/chelsio/cxgb4/sge.c 			eth_hdr_len = skb_inner_network_offset(skb) - ETH_HLEN;
skb              1148 drivers/net/ethernet/chelsio/cxgb4/sge.c 			l4_len = skb_network_header_len(skb);
skb              1149 drivers/net/ethernet/chelsio/cxgb4/sge.c 			eth_hdr_len = skb_network_offset(skb) - ETH_HLEN;
skb              1159 drivers/net/ethernet/chelsio/cxgb4/sge.c 		int start = skb_transport_offset(skb);
skb              1163 drivers/net/ethernet/chelsio/cxgb4/sge.c 			TXPKT_CSUM_LOC_V(start + skb->csum_offset);
skb              1183 drivers/net/ethernet/chelsio/cxgb4/sge.c cxgb_fcoe_offload(struct sk_buff *skb, struct adapter *adap,
skb              1191 drivers/net/ethernet/chelsio/cxgb4/sge.c 	if (skb->protocol != htons(ETH_P_FCOE))
skb              1194 drivers/net/ethernet/chelsio/cxgb4/sge.c 	skb_reset_mac_header(skb);
skb              1195 drivers/net/ethernet/chelsio/cxgb4/sge.c 	skb->mac_len = sizeof(struct ethhdr);
skb              1197 drivers/net/ethernet/chelsio/cxgb4/sge.c 	skb_set_network_header(skb, skb->mac_len);
skb              1198 drivers/net/ethernet/chelsio/cxgb4/sge.c 	skb_set_transport_header(skb, skb->mac_len + sizeof(struct fcoe_hdr));
skb              1200 drivers/net/ethernet/chelsio/cxgb4/sge.c 	if (!cxgb_fcoe_sof_eof_supported(adap, skb))
skb              1216 drivers/net/ethernet/chelsio/cxgb4/sge.c enum cpl_tx_tnl_lso_type cxgb_encap_offload_supported(struct sk_buff *skb)
skb              1220 drivers/net/ethernet/chelsio/cxgb4/sge.c 	struct port_info *pi = netdev_priv(skb->dev);
skb              1223 drivers/net/ethernet/chelsio/cxgb4/sge.c 	if (skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
skb              1224 drivers/net/ethernet/chelsio/cxgb4/sge.c 	    skb->inner_protocol != htons(ETH_P_TEB))
skb              1227 drivers/net/ethernet/chelsio/cxgb4/sge.c 	switch (vlan_get_protocol(skb)) {
skb              1229 drivers/net/ethernet/chelsio/cxgb4/sge.c 		l4_hdr = ip_hdr(skb)->protocol;
skb              1232 drivers/net/ethernet/chelsio/cxgb4/sge.c 		l4_hdr = ipv6_hdr(skb)->nexthdr;
skb              1240 drivers/net/ethernet/chelsio/cxgb4/sge.c 		if (adapter->vxlan_port == udp_hdr(skb)->dest)
skb              1242 drivers/net/ethernet/chelsio/cxgb4/sge.c 		else if (adapter->geneve_port == udp_hdr(skb)->dest)
skb              1252 drivers/net/ethernet/chelsio/cxgb4/sge.c static inline void t6_fill_tnl_lso(struct sk_buff *skb,
skb              1258 drivers/net/ethernet/chelsio/cxgb4/sge.c 	int l3hdr_len = skb_network_header_len(skb);
skb              1259 drivers/net/ethernet/chelsio/cxgb4/sge.c 	int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
skb              1260 drivers/net/ethernet/chelsio/cxgb4/sge.c 	const struct skb_shared_info *ssi = skb_shinfo(skb);
skb              1261 drivers/net/ethernet/chelsio/cxgb4/sge.c 	bool v6 = (ip_hdr(skb)->version == 6);
skb              1277 drivers/net/ethernet/chelsio/cxgb4/sge.c 	val = skb_inner_mac_header(skb) - skb_mac_header(skb);
skb              1278 drivers/net/ethernet/chelsio/cxgb4/sge.c 	in_eth_xtra_len = skb_inner_network_header(skb) -
skb              1279 drivers/net/ethernet/chelsio/cxgb4/sge.c 			  skb_inner_mac_header(skb) - ETH_HLEN;
skb              1300 drivers/net/ethernet/chelsio/cxgb4/sge.c 	      CPL_TX_TNL_LSO_IPV6_V(inner_ip_hdr(skb)->version == 6) |
skb              1301 drivers/net/ethernet/chelsio/cxgb4/sge.c 	      CPL_TX_TNL_LSO_IPHDRLEN_V(skb_inner_network_header_len(skb) / 4) |
skb              1302 drivers/net/ethernet/chelsio/cxgb4/sge.c 	      CPL_TX_TNL_LSO_TCPHDRLEN_V(inner_tcp_hdrlen(skb) / 4);
skb              1309 drivers/net/ethernet/chelsio/cxgb4/sge.c 	tnl_lso->EthLenOffset_Size = htonl(CPL_TX_TNL_LSO_SIZE_V(skb->len));
skb              1363 drivers/net/ethernet/chelsio/cxgb4/sge.c static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
skb              1378 drivers/net/ethernet/chelsio/cxgb4/sge.c 	bool ptp_enabled = is_ptp_enabled(skb, dev);
skb              1390 drivers/net/ethernet/chelsio/cxgb4/sge.c 	if (unlikely(skb->len < ETH_HLEN)) {
skb              1391 drivers/net/ethernet/chelsio/cxgb4/sge.c out_free:	dev_kfree_skb_any(skb);
skb              1397 drivers/net/ethernet/chelsio/cxgb4/sge.c 	if (skb_vlan_tagged(skb))
skb              1399 drivers/net/ethernet/chelsio/cxgb4/sge.c 	if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len)))
skb              1404 drivers/net/ethernet/chelsio/cxgb4/sge.c 	ssi = skb_shinfo(skb);
skb              1406 drivers/net/ethernet/chelsio/cxgb4/sge.c 	if (xfrm_offload(skb) && !ssi->gso_size)
skb              1407 drivers/net/ethernet/chelsio/cxgb4/sge.c 		return adap->uld[CXGB4_ULD_CRYPTO].tx_handler(skb, dev);
skb              1410 drivers/net/ethernet/chelsio/cxgb4/sge.c 	qidx = skb_get_queue_mapping(skb);
skb              1414 drivers/net/ethernet/chelsio/cxgb4/sge.c 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
skb              1415 drivers/net/ethernet/chelsio/cxgb4/sge.c 			adap->ptp_tx_skb = skb_get(skb);
skb              1424 drivers/net/ethernet/chelsio/cxgb4/sge.c 	skb_tx_timestamp(skb);
skb              1430 drivers/net/ethernet/chelsio/cxgb4/sge.c 	err = cxgb_fcoe_offload(skb, adap, pi, &cntrl);
skb              1439 drivers/net/ethernet/chelsio/cxgb4/sge.c 	flits = calc_tx_flits(skb, chip_ver);
skb              1453 drivers/net/ethernet/chelsio/cxgb4/sge.c 	if (is_eth_imm(skb, chip_ver))
skb              1456 drivers/net/ethernet/chelsio/cxgb4/sge.c 	if (skb->encapsulation && chip_ver > CHELSIO_T5)
skb              1457 drivers/net/ethernet/chelsio/cxgb4/sge.c 		tnl_type = cxgb_encap_offload_supported(skb);
skb              1460 drivers/net/ethernet/chelsio/cxgb4/sge.c 	    unlikely(cxgb4_map_skb(adap->pdev_dev, skb, addr) < 0)) {
skb              1486 drivers/net/ethernet/chelsio/cxgb4/sge.c 	len = immediate ? skb->len : 0;
skb              1491 drivers/net/ethernet/chelsio/cxgb4/sge.c 		int l3hdr_len = skb_network_header_len(skb);
skb              1492 drivers/net/ethernet/chelsio/cxgb4/sge.c 		int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
skb              1503 drivers/net/ethernet/chelsio/cxgb4/sge.c 			struct iphdr *iph = ip_hdr(skb);
skb              1505 drivers/net/ethernet/chelsio/cxgb4/sge.c 			t6_fill_tnl_lso(skb, tnl_lso, tnl_type);
skb              1516 drivers/net/ethernet/chelsio/cxgb4/sge.c 			if (skb->ip_summed == CHECKSUM_PARTIAL)
skb              1517 drivers/net/ethernet/chelsio/cxgb4/sge.c 				cntrl = hwcsum(adap->params.chip, skb);
skb              1524 drivers/net/ethernet/chelsio/cxgb4/sge.c 					LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
skb              1529 drivers/net/ethernet/chelsio/cxgb4/sge.c 				lso->len = htonl(skb->len);
skb              1531 drivers/net/ethernet/chelsio/cxgb4/sge.c 				lso->len = htonl(LSO_T5_XFER_SIZE_V(skb->len));
skb              1568 drivers/net/ethernet/chelsio/cxgb4/sge.c 		if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb              1569 drivers/net/ethernet/chelsio/cxgb4/sge.c 			cntrl = hwcsum(adap->params.chip, skb) |
skb              1575 drivers/net/ethernet/chelsio/cxgb4/sge.c 	if (skb_vlan_tag_present(skb)) {
skb              1577 drivers/net/ethernet/chelsio/cxgb4/sge.c 		cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
skb              1579 drivers/net/ethernet/chelsio/cxgb4/sge.c 		if (skb->protocol == htons(ETH_P_FCOE))
skb              1581 drivers/net/ethernet/chelsio/cxgb4/sge.c 				 ((skb->priority & 0x7) << VLAN_PRIO_SHIFT));
skb              1597 drivers/net/ethernet/chelsio/cxgb4/sge.c 	cpl->len = htons(skb->len);
skb              1601 drivers/net/ethernet/chelsio/cxgb4/sge.c 		cxgb4_inline_tx_skb(skb, &q->q, sgl);
skb              1602 drivers/net/ethernet/chelsio/cxgb4/sge.c 		dev_consume_skb_any(skb);
skb              1606 drivers/net/ethernet/chelsio/cxgb4/sge.c 		cxgb4_write_sgl(skb, &q->q, (void *)sgl, end, 0, addr);
skb              1607 drivers/net/ethernet/chelsio/cxgb4/sge.c 		skb_orphan(skb);
skb              1612 drivers/net/ethernet/chelsio/cxgb4/sge.c 		q->q.sdesc[last_desc].skb = skb;
skb              1648 drivers/net/ethernet/chelsio/cxgb4/sge.c static inline int t4vf_is_eth_imm(const struct sk_buff *skb)
skb              1666 drivers/net/ethernet/chelsio/cxgb4/sge.c static inline unsigned int t4vf_calc_tx_flits(const struct sk_buff *skb)
skb              1674 drivers/net/ethernet/chelsio/cxgb4/sge.c 	if (t4vf_is_eth_imm(skb))
skb              1675 drivers/net/ethernet/chelsio/cxgb4/sge.c 		return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt),
skb              1686 drivers/net/ethernet/chelsio/cxgb4/sge.c 	flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
skb              1687 drivers/net/ethernet/chelsio/cxgb4/sge.c 	if (skb_shinfo(skb)->gso_size)
skb              1704 drivers/net/ethernet/chelsio/cxgb4/sge.c static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb,
skb              1728 drivers/net/ethernet/chelsio/cxgb4/sge.c 	if (unlikely(skb->len < fw_hdr_copy_len))
skb              1733 drivers/net/ethernet/chelsio/cxgb4/sge.c 	if (skb_vlan_tag_present(skb))
skb              1735 drivers/net/ethernet/chelsio/cxgb4/sge.c 	if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len)))
skb              1741 drivers/net/ethernet/chelsio/cxgb4/sge.c 	qidx = skb_get_queue_mapping(skb);
skb              1754 drivers/net/ethernet/chelsio/cxgb4/sge.c 	flits = t4vf_calc_tx_flits(skb);
skb              1771 drivers/net/ethernet/chelsio/cxgb4/sge.c 	if (!t4vf_is_eth_imm(skb) &&
skb              1772 drivers/net/ethernet/chelsio/cxgb4/sge.c 	    unlikely(cxgb4_map_skb(adapter->pdev_dev, skb, addr) < 0)) {
skb              1805 drivers/net/ethernet/chelsio/cxgb4/sge.c 	skb_copy_from_linear_data(skb, (void *)wr->ethmacdst, fw_hdr_copy_len);
skb              1812 drivers/net/ethernet/chelsio/cxgb4/sge.c 	ssi = skb_shinfo(skb);
skb              1816 drivers/net/ethernet/chelsio/cxgb4/sge.c 		int l3hdr_len = skb_network_header_len(skb);
skb              1817 drivers/net/ethernet/chelsio/cxgb4/sge.c 		int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
skb              1831 drivers/net/ethernet/chelsio/cxgb4/sge.c 				    LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
skb              1836 drivers/net/ethernet/chelsio/cxgb4/sge.c 			lso->len = cpu_to_be32(skb->len);
skb              1838 drivers/net/ethernet/chelsio/cxgb4/sge.c 			lso->len = cpu_to_be32(LSO_T5_XFER_SIZE_V(skb->len));
skb              1858 drivers/net/ethernet/chelsio/cxgb4/sge.c 		len = (t4vf_is_eth_imm(skb)
skb              1859 drivers/net/ethernet/chelsio/cxgb4/sge.c 		       ? skb->len + sizeof(*cpl)
skb              1869 drivers/net/ethernet/chelsio/cxgb4/sge.c 		if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb              1870 drivers/net/ethernet/chelsio/cxgb4/sge.c 			cntrl = hwcsum(adapter->params.chip, skb) |
skb              1881 drivers/net/ethernet/chelsio/cxgb4/sge.c 	if (skb_vlan_tag_present(skb)) {
skb              1883 drivers/net/ethernet/chelsio/cxgb4/sge.c 		cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
skb              1891 drivers/net/ethernet/chelsio/cxgb4/sge.c 	cpl->len = cpu_to_be16(skb->len);
skb              1897 drivers/net/ethernet/chelsio/cxgb4/sge.c 	if (t4vf_is_eth_imm(skb)) {
skb              1901 drivers/net/ethernet/chelsio/cxgb4/sge.c 		cxgb4_inline_tx_skb(skb, &txq->q, cpl + 1);
skb              1902 drivers/net/ethernet/chelsio/cxgb4/sge.c 		dev_consume_skb_any(skb);
skb              1956 drivers/net/ethernet/chelsio/cxgb4/sge.c 		cxgb4_write_sgl(skb, tq, sgl, end, 0, addr);
skb              1957 drivers/net/ethernet/chelsio/cxgb4/sge.c 		skb_orphan(skb);
skb              1962 drivers/net/ethernet/chelsio/cxgb4/sge.c 		tq->sdesc[last_desc].skb = skb;
skb              1978 drivers/net/ethernet/chelsio/cxgb4/sge.c 	dev_kfree_skb_any(skb);
skb              1982 drivers/net/ethernet/chelsio/cxgb4/sge.c netdev_tx_t t4_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb              1987 drivers/net/ethernet/chelsio/cxgb4/sge.c 		return cxgb4_vf_eth_xmit(skb, dev);
skb              1989 drivers/net/ethernet/chelsio/cxgb4/sge.c 	return cxgb4_eth_xmit(skb, dev);
skb              2018 drivers/net/ethernet/chelsio/cxgb4/sge.c static inline int is_imm(const struct sk_buff *skb)
skb              2020 drivers/net/ethernet/chelsio/cxgb4/sge.c 	return skb->len <= MAX_CTRL_WR_LEN;
skb              2051 drivers/net/ethernet/chelsio/cxgb4/sge.c static int ctrl_xmit(struct sge_ctrl_txq *q, struct sk_buff *skb)
skb              2056 drivers/net/ethernet/chelsio/cxgb4/sge.c 	if (unlikely(!is_imm(skb))) {
skb              2058 drivers/net/ethernet/chelsio/cxgb4/sge.c 		dev_kfree_skb(skb);
skb              2062 drivers/net/ethernet/chelsio/cxgb4/sge.c 	ndesc = DIV_ROUND_UP(skb->len, sizeof(struct tx_desc));
skb              2066 drivers/net/ethernet/chelsio/cxgb4/sge.c 		skb->priority = ndesc;                  /* save for restart */
skb              2067 drivers/net/ethernet/chelsio/cxgb4/sge.c 		__skb_queue_tail(&q->sendq, skb);
skb              2073 drivers/net/ethernet/chelsio/cxgb4/sge.c 	cxgb4_inline_tx_skb(skb, &q->q, wr);
skb              2082 drivers/net/ethernet/chelsio/cxgb4/sge.c 	kfree_skb(skb);
skb              2094 drivers/net/ethernet/chelsio/cxgb4/sge.c 	struct sk_buff *skb;
skb              2102 drivers/net/ethernet/chelsio/cxgb4/sge.c 	while ((skb = __skb_dequeue(&q->sendq)) != NULL) {
skb              2104 drivers/net/ethernet/chelsio/cxgb4/sge.c 		unsigned int ndesc = skb->priority;     /* previously saved */
skb              2114 drivers/net/ethernet/chelsio/cxgb4/sge.c 		cxgb4_inline_tx_skb(skb, &q->q, wr);
skb              2115 drivers/net/ethernet/chelsio/cxgb4/sge.c 		kfree_skb(skb);
skb              2146 drivers/net/ethernet/chelsio/cxgb4/sge.c int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
skb              2151 drivers/net/ethernet/chelsio/cxgb4/sge.c 	ret = ctrl_xmit(&adap->sge.ctrlq[0], skb);
skb              2163 drivers/net/ethernet/chelsio/cxgb4/sge.c static inline int is_ofld_imm(const struct sk_buff *skb)
skb              2165 drivers/net/ethernet/chelsio/cxgb4/sge.c 	struct work_request_hdr *req = (struct work_request_hdr *)skb->data;
skb              2169 drivers/net/ethernet/chelsio/cxgb4/sge.c 		return skb->len <= SGE_MAX_WR_LEN;
skb              2171 drivers/net/ethernet/chelsio/cxgb4/sge.c 		return skb->len <= MAX_IMM_TX_PKT_LEN;
skb              2182 drivers/net/ethernet/chelsio/cxgb4/sge.c static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
skb              2186 drivers/net/ethernet/chelsio/cxgb4/sge.c 	if (is_ofld_imm(skb))
skb              2187 drivers/net/ethernet/chelsio/cxgb4/sge.c 		return DIV_ROUND_UP(skb->len, 8);
skb              2189 drivers/net/ethernet/chelsio/cxgb4/sge.c 	flits = skb_transport_offset(skb) / 8U;   /* headers */
skb              2190 drivers/net/ethernet/chelsio/cxgb4/sge.c 	cnt = skb_shinfo(skb)->nr_frags;
skb              2191 drivers/net/ethernet/chelsio/cxgb4/sge.c 	if (skb_tail_pointer(skb) != skb_transport_header(skb))
skb              2250 drivers/net/ethernet/chelsio/cxgb4/sge.c 	struct sk_buff *skb;
skb              2266 drivers/net/ethernet/chelsio/cxgb4/sge.c 	while ((skb = skb_peek(&q->sendq)) != NULL && !q->full) {
skb              2278 drivers/net/ethernet/chelsio/cxgb4/sge.c 		flits = skb->priority;                /* previously saved */
skb              2283 drivers/net/ethernet/chelsio/cxgb4/sge.c 			ofldtxq_stop(q, (struct fw_wr_hdr *)skb->data);
skb              2286 drivers/net/ethernet/chelsio/cxgb4/sge.c 		if (is_ofld_imm(skb))
skb              2287 drivers/net/ethernet/chelsio/cxgb4/sge.c 			cxgb4_inline_tx_skb(skb, &q->q, pos);
skb              2288 drivers/net/ethernet/chelsio/cxgb4/sge.c 		else if (cxgb4_map_skb(q->adap->pdev_dev, skb,
skb              2289 drivers/net/ethernet/chelsio/cxgb4/sge.c 				       (dma_addr_t *)skb->head)) {
skb              2294 drivers/net/ethernet/chelsio/cxgb4/sge.c 			int last_desc, hdr_len = skb_transport_offset(skb);
skb              2302 drivers/net/ethernet/chelsio/cxgb4/sge.c 			pos = (void *)inline_tx_skb_header(skb, &q->q,
skb              2320 drivers/net/ethernet/chelsio/cxgb4/sge.c 			cxgb4_write_sgl(skb, &q->q, (void *)pos,
skb              2322 drivers/net/ethernet/chelsio/cxgb4/sge.c 					(dma_addr_t *)skb->head);
skb              2324 drivers/net/ethernet/chelsio/cxgb4/sge.c 			skb->dev = q->adap->port[0];
skb              2325 drivers/net/ethernet/chelsio/cxgb4/sge.c 			skb->destructor = deferred_unmap_destructor;
skb              2330 drivers/net/ethernet/chelsio/cxgb4/sge.c 			q->q.sdesc[last_desc].skb = skb;
skb              2346 drivers/net/ethernet/chelsio/cxgb4/sge.c 		__skb_unlink(skb, &q->sendq);
skb              2347 drivers/net/ethernet/chelsio/cxgb4/sge.c 		if (is_ofld_imm(skb))
skb              2348 drivers/net/ethernet/chelsio/cxgb4/sge.c 			kfree_skb(skb);
skb              2366 drivers/net/ethernet/chelsio/cxgb4/sge.c static int ofld_xmit(struct sge_uld_txq *q, struct sk_buff *skb)
skb              2368 drivers/net/ethernet/chelsio/cxgb4/sge.c 	skb->priority = calc_tx_flits_ofld(skb);       /* save for restart */
skb              2379 drivers/net/ethernet/chelsio/cxgb4/sge.c 	__skb_queue_tail(&q->sendq, skb);
skb              2410 drivers/net/ethernet/chelsio/cxgb4/sge.c static inline unsigned int skb_txq(const struct sk_buff *skb)
skb              2412 drivers/net/ethernet/chelsio/cxgb4/sge.c 	return skb->queue_mapping >> 1;
skb              2422 drivers/net/ethernet/chelsio/cxgb4/sge.c static inline unsigned int is_ctrl_pkt(const struct sk_buff *skb)
skb              2424 drivers/net/ethernet/chelsio/cxgb4/sge.c 	return skb->queue_mapping & 1;
skb              2427 drivers/net/ethernet/chelsio/cxgb4/sge.c static inline int uld_send(struct adapter *adap, struct sk_buff *skb,
skb              2432 drivers/net/ethernet/chelsio/cxgb4/sge.c 	unsigned int idx = skb_txq(skb);
skb              2434 drivers/net/ethernet/chelsio/cxgb4/sge.c 	if (unlikely(is_ctrl_pkt(skb))) {
skb              2438 drivers/net/ethernet/chelsio/cxgb4/sge.c 		return ctrl_xmit(&adap->sge.ctrlq[idx], skb);
skb              2448 drivers/net/ethernet/chelsio/cxgb4/sge.c 	return ofld_xmit(txq, skb);
skb              2460 drivers/net/ethernet/chelsio/cxgb4/sge.c int t4_ofld_send(struct adapter *adap, struct sk_buff *skb)
skb              2465 drivers/net/ethernet/chelsio/cxgb4/sge.c 	ret = uld_send(adap, skb, CXGB4_TX_OFLD);
skb              2478 drivers/net/ethernet/chelsio/cxgb4/sge.c int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb)
skb              2480 drivers/net/ethernet/chelsio/cxgb4/sge.c 	return t4_ofld_send(netdev2adap(dev), skb);
skb              2590 drivers/net/ethernet/chelsio/cxgb4/sge.c static int t4_crypto_send(struct adapter *adap, struct sk_buff *skb)
skb              2595 drivers/net/ethernet/chelsio/cxgb4/sge.c 	ret = uld_send(adap, skb, CXGB4_TX_CRYPTO);
skb              2608 drivers/net/ethernet/chelsio/cxgb4/sge.c int cxgb4_crypto_send(struct net_device *dev, struct sk_buff *skb)
skb              2610 drivers/net/ethernet/chelsio/cxgb4/sge.c 	return t4_crypto_send(netdev2adap(dev), skb);
skb              2614 drivers/net/ethernet/chelsio/cxgb4/sge.c static inline void copy_frags(struct sk_buff *skb,
skb              2620 drivers/net/ethernet/chelsio/cxgb4/sge.c 	__skb_fill_page_desc(skb, 0, gl->frags[0].page,
skb              2623 drivers/net/ethernet/chelsio/cxgb4/sge.c 	skb_shinfo(skb)->nr_frags = gl->nfrags;
skb              2625 drivers/net/ethernet/chelsio/cxgb4/sge.c 		__skb_fill_page_desc(skb, i, gl->frags[i].page,
skb              2645 drivers/net/ethernet/chelsio/cxgb4/sge.c 	struct sk_buff *skb;
skb              2653 drivers/net/ethernet/chelsio/cxgb4/sge.c 		skb = dev_alloc_skb(gl->tot_len);
skb              2654 drivers/net/ethernet/chelsio/cxgb4/sge.c 		if (unlikely(!skb))
skb              2656 drivers/net/ethernet/chelsio/cxgb4/sge.c 		__skb_put(skb, gl->tot_len);
skb              2657 drivers/net/ethernet/chelsio/cxgb4/sge.c 		skb_copy_to_linear_data(skb, gl->va, gl->tot_len);
skb              2659 drivers/net/ethernet/chelsio/cxgb4/sge.c 		skb = dev_alloc_skb(skb_len);
skb              2660 drivers/net/ethernet/chelsio/cxgb4/sge.c 		if (unlikely(!skb))
skb              2662 drivers/net/ethernet/chelsio/cxgb4/sge.c 		__skb_put(skb, pull_len);
skb              2663 drivers/net/ethernet/chelsio/cxgb4/sge.c 		skb_copy_to_linear_data(skb, gl->va, pull_len);
skb              2665 drivers/net/ethernet/chelsio/cxgb4/sge.c 		copy_frags(skb, gl, pull_len);
skb              2666 drivers/net/ethernet/chelsio/cxgb4/sge.c 		skb->len = gl->tot_len;
skb              2667 drivers/net/ethernet/chelsio/cxgb4/sge.c 		skb->data_len = skb->len - pull_len;
skb              2668 drivers/net/ethernet/chelsio/cxgb4/sge.c 		skb->truesize += skb->data_len;
skb              2670 drivers/net/ethernet/chelsio/cxgb4/sge.c out:	return skb;
skb              2697 drivers/net/ethernet/chelsio/cxgb4/sge.c 	struct sk_buff *skb;
skb              2699 drivers/net/ethernet/chelsio/cxgb4/sge.c 	skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN);
skb              2700 drivers/net/ethernet/chelsio/cxgb4/sge.c 	if (unlikely(!skb)) {
skb              2706 drivers/net/ethernet/chelsio/cxgb4/sge.c 		__skb_pull(skb, sizeof(struct cpl_trace_pkt));
skb              2708 drivers/net/ethernet/chelsio/cxgb4/sge.c 		__skb_pull(skb, sizeof(struct cpl_t5_trace_pkt));
skb              2710 drivers/net/ethernet/chelsio/cxgb4/sge.c 	skb_reset_mac_header(skb);
skb              2711 drivers/net/ethernet/chelsio/cxgb4/sge.c 	skb->protocol = htons(0xffff);
skb              2712 drivers/net/ethernet/chelsio/cxgb4/sge.c 	skb->dev = adap->port[0];
skb              2713 drivers/net/ethernet/chelsio/cxgb4/sge.c 	netif_receive_skb(skb);
skb              2746 drivers/net/ethernet/chelsio/cxgb4/sge.c 	struct sk_buff *skb;
skb              2748 drivers/net/ethernet/chelsio/cxgb4/sge.c 	skb = napi_get_frags(&rxq->rspq.napi);
skb              2749 drivers/net/ethernet/chelsio/cxgb4/sge.c 	if (unlikely(!skb)) {
skb              2755 drivers/net/ethernet/chelsio/cxgb4/sge.c 	copy_frags(skb, gl, s->pktshift);
skb              2757 drivers/net/ethernet/chelsio/cxgb4/sge.c 		skb->csum_level = 1;
skb              2758 drivers/net/ethernet/chelsio/cxgb4/sge.c 	skb->len = gl->tot_len - s->pktshift;
skb              2759 drivers/net/ethernet/chelsio/cxgb4/sge.c 	skb->data_len = skb->len;
skb              2760 drivers/net/ethernet/chelsio/cxgb4/sge.c 	skb->truesize += skb->data_len;
skb              2761 drivers/net/ethernet/chelsio/cxgb4/sge.c 	skb->ip_summed = CHECKSUM_UNNECESSARY;
skb              2762 drivers/net/ethernet/chelsio/cxgb4/sge.c 	skb_record_rx_queue(skb, rxq->rspq.idx);
skb              2763 drivers/net/ethernet/chelsio/cxgb4/sge.c 	pi = netdev_priv(skb->dev);
skb              2765 drivers/net/ethernet/chelsio/cxgb4/sge.c 		cxgb4_sgetim_to_hwtstamp(adapter, skb_hwtstamps(skb),
skb              2768 drivers/net/ethernet/chelsio/cxgb4/sge.c 		skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val,
skb              2772 drivers/net/ethernet/chelsio/cxgb4/sge.c 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan));
skb              2799 drivers/net/ethernet/chelsio/cxgb4/sge.c 					 struct sk_buff *skb)
skb              2806 drivers/net/ethernet/chelsio/cxgb4/sge.c 	cpl = (struct cpl_rx_mps_pkt *)skb->data;
skb              2811 drivers/net/ethernet/chelsio/cxgb4/sge.c 	data = skb->data + sizeof(*cpl);
skb              2812 drivers/net/ethernet/chelsio/cxgb4/sge.c 	skb_pull(skb, 2 * sizeof(u64) + sizeof(struct cpl_rx_mps_pkt));
skb              2813 drivers/net/ethernet/chelsio/cxgb4/sge.c 	offset = ETH_HLEN + IPV4_HLEN(skb->data) + UDP_HLEN;
skb              2814 drivers/net/ethernet/chelsio/cxgb4/sge.c 	if (skb->len < offset + OFF_PTP_SEQUENCE_ID + sizeof(short))
skb              2817 drivers/net/ethernet/chelsio/cxgb4/sge.c 	hwtstamps = skb_hwtstamps(skb);
skb              2833 drivers/net/ethernet/chelsio/cxgb4/sge.c 			  struct sge_eth_rxq *rxq, struct sk_buff *skb)
skb              2839 drivers/net/ethernet/chelsio/cxgb4/sge.c 		ret = t4_systim_to_hwstamp(adapter, skb);
skb              2841 drivers/net/ethernet/chelsio/cxgb4/sge.c 			kfree_skb(skb);
skb              2857 drivers/net/ethernet/chelsio/cxgb4/sge.c static int t4_tx_hststamp(struct adapter *adapter, struct sk_buff *skb,
skb              2864 drivers/net/ethernet/chelsio/cxgb4/sge.c 		kfree_skb(skb);
skb              2930 drivers/net/ethernet/chelsio/cxgb4/sge.c 	struct sk_buff *skb;
skb              2975 drivers/net/ethernet/chelsio/cxgb4/sge.c 	skb = cxgb4_pktgl_to_skb(si, RX_PKT_SKB_LEN, RX_PULL_LEN);
skb              2976 drivers/net/ethernet/chelsio/cxgb4/sge.c 	if (unlikely(!skb)) {
skb              2985 drivers/net/ethernet/chelsio/cxgb4/sge.c 		ret = t4_rx_hststamp(adapter, rsp, rxq, skb);
skb              2990 drivers/net/ethernet/chelsio/cxgb4/sge.c 		__skb_pull(skb, s->pktshift); /* remove ethernet header pad */
skb              2995 drivers/net/ethernet/chelsio/cxgb4/sge.c 		     cxgb4_ptp_is_ptp_rx(skb))) {
skb              2996 drivers/net/ethernet/chelsio/cxgb4/sge.c 		if (!t4_tx_hststamp(adapter, skb, q->netdev))
skb              3000 drivers/net/ethernet/chelsio/cxgb4/sge.c 	skb->protocol = eth_type_trans(skb, q->netdev);
skb              3001 drivers/net/ethernet/chelsio/cxgb4/sge.c 	skb_record_rx_queue(skb, q->idx);
skb              3002 drivers/net/ethernet/chelsio/cxgb4/sge.c 	if (skb->dev->features & NETIF_F_RXHASH)
skb              3003 drivers/net/ethernet/chelsio/cxgb4/sge.c 		skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val,
skb              3009 drivers/net/ethernet/chelsio/cxgb4/sge.c 		cxgb4_sgetim_to_hwtstamp(q->adap, skb_hwtstamps(skb),
skb              3013 drivers/net/ethernet/chelsio/cxgb4/sge.c 			skb->ip_summed = CHECKSUM_UNNECESSARY;
skb              3017 drivers/net/ethernet/chelsio/cxgb4/sge.c 			skb->csum = csum_unfold(c);
skb              3020 drivers/net/ethernet/chelsio/cxgb4/sge.c 				skb->ip_summed = CHECKSUM_UNNECESSARY;
skb              3021 drivers/net/ethernet/chelsio/cxgb4/sge.c 				skb->csum_level = 1;
skb              3023 drivers/net/ethernet/chelsio/cxgb4/sge.c 				skb->ip_summed = CHECKSUM_COMPLETE;
skb              3028 drivers/net/ethernet/chelsio/cxgb4/sge.c 		skb_checksum_none_assert(skb);
skb              3042 drivers/net/ethernet/chelsio/cxgb4/sge.c 					skb->ip_summed = CHECKSUM_UNNECESSARY;
skb              3051 drivers/net/ethernet/chelsio/cxgb4/sge.c 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan));
skb              3054 drivers/net/ethernet/chelsio/cxgb4/sge.c 	skb_mark_napi_id(skb, &q->napi);
skb              3055 drivers/net/ethernet/chelsio/cxgb4/sge.c 	netif_receive_skb(skb);
skb               142 drivers/net/ethernet/chelsio/cxgb4/smt.c 	struct sk_buff *skb;
skb               148 drivers/net/ethernet/chelsio/cxgb4/smt.c 		skb = alloc_skb(size, GFP_ATOMIC);
skb               149 drivers/net/ethernet/chelsio/cxgb4/smt.c 		if (!skb)
skb               154 drivers/net/ethernet/chelsio/cxgb4/smt.c 		req = (struct cpl_smt_write_req *)__skb_put(skb, size);
skb               184 drivers/net/ethernet/chelsio/cxgb4/smt.c 		skb = alloc_skb(size, GFP_ATOMIC);
skb               185 drivers/net/ethernet/chelsio/cxgb4/smt.c 		if (!skb)
skb               188 drivers/net/ethernet/chelsio/cxgb4/smt.c 		t6req = (struct cpl_t6_smt_write_req *)__skb_put(skb, size);
skb               204 drivers/net/ethernet/chelsio/cxgb4/smt.c 	t4_mgmt_tx(adapter, skb);
skb                73 drivers/net/ethernet/chelsio/cxgb4/srq.c 	struct sk_buff *skb;
skb                83 drivers/net/ethernet/chelsio/cxgb4/srq.c 	skb = alloc_skb(sizeof(*req), GFP_KERNEL);
skb                84 drivers/net/ethernet/chelsio/cxgb4/srq.c 	if (!skb)
skb                87 drivers/net/ethernet/chelsio/cxgb4/srq.c 		__skb_put_zero(skb, sizeof(*req));
skb                97 drivers/net/ethernet/chelsio/cxgb4/srq.c 	t4_mgmt_tx(adap, skb);
skb               158 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	struct sk_buff *skb;		/* socket buffer of TX data source */
skb               277 drivers/net/ethernet/chelsio/cxgb4vf/sge.c static int map_skb(struct device *dev, const struct sk_buff *skb,
skb               283 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	*addr = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
skb               287 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	si = skb_shinfo(skb);
skb               300 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE);
skb               306 drivers/net/ethernet/chelsio/cxgb4vf/sge.c static void unmap_sgl(struct device *dev, const struct sk_buff *skb,
skb               310 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	unsigned int nfrags = skb_shinfo(skb)->nr_frags;
skb               312 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	if (likely(skb_headlen(skb)))
skb               392 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 		if (sdesc->skb) {
skb               394 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 				unmap_sgl(dev, sdesc->skb, sdesc->sgl, tq);
skb               395 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 			dev_consume_skb_any(sdesc->skb);
skb               396 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 			sdesc->skb = NULL;
skb               832 drivers/net/ethernet/chelsio/cxgb4vf/sge.c static inline int is_eth_imm(const struct sk_buff *skb)
skb               851 drivers/net/ethernet/chelsio/cxgb4vf/sge.c static inline unsigned int calc_tx_flits(const struct sk_buff *skb)
skb               860 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	if (is_eth_imm(skb))
skb               861 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 		return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt),
skb               873 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
skb               874 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	if (skb_shinfo(skb)->gso_size)
skb               901 drivers/net/ethernet/chelsio/cxgb4vf/sge.c static void write_sgl(const struct sk_buff *skb, struct sge_txq *tq,
skb               907 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	const struct skb_shared_info *si = skb_shinfo(skb);
skb               911 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	len = skb_headlen(skb) - start;
skb              1052 drivers/net/ethernet/chelsio/cxgb4vf/sge.c static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *tq,
skb              1058 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	if (likely(skb->len <= left)) {
skb              1059 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 		if (likely(!skb->data_len))
skb              1060 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 			skb_copy_from_linear_data(skb, pos, skb->len);
skb              1062 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 			skb_copy_bits(skb, 0, pos, skb->len);
skb              1063 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 		pos += skb->len;
skb              1065 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 		skb_copy_bits(skb, 0, pos, left);
skb              1066 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 		skb_copy_bits(skb, left, tq->desc, skb->len - left);
skb              1067 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 		pos = (void *)tq->desc + (skb->len - left);
skb              1080 drivers/net/ethernet/chelsio/cxgb4vf/sge.c static u64 hwcsum(enum chip_type chip, const struct sk_buff *skb)
skb              1083 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	const struct iphdr *iph = ip_hdr(skb);
skb              1113 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 		u64 hdr_len = TXPKT_IPHDR_LEN_V(skb_network_header_len(skb));
skb              1114 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 		int eth_hdr_len = skb_network_offset(skb) - ETH_HLEN;
skb              1122 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 		int start = skb_transport_offset(skb);
skb              1126 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 			TXPKT_CSUM_LOC_V(start + skb->csum_offset);
skb              1157 drivers/net/ethernet/chelsio/cxgb4vf/sge.c int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
skb              1181 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	if (unlikely(skb->len < fw_hdr_copy_len))
skb              1186 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	if (skb_vlan_tagged(skb))
skb              1188 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len)))
skb              1196 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	qidx = skb_get_queue_mapping(skb);
skb              1200 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	if (pi->vlan_id && !skb_vlan_tag_present(skb))
skb              1201 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 		__vlan_hwaccel_put_tag(skb, cpu_to_be16(ETH_P_8021Q),
skb              1215 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	flits = calc_tx_flits(skb);
skb              1233 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	if (!is_eth_imm(skb) &&
skb              1234 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	    unlikely(map_skb(adapter->pdev_dev, skb, addr) < 0)) {
skb              1270 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	skb_copy_from_linear_data(skb, (void *)wr->ethmacdst, fw_hdr_copy_len);
skb              1278 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	ssi = skb_shinfo(skb);
skb              1282 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 		int l3hdr_len = skb_network_header_len(skb);
skb              1283 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 		int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
skb              1299 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 				    LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
skb              1304 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 			lso->len = cpu_to_be32(skb->len);
skb              1306 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 			lso->len = cpu_to_be32(LSO_T5_XFER_SIZE_V(skb->len));
skb              1327 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 		len = is_eth_imm(skb) ? skb->len + sizeof(*cpl) : sizeof(*cpl);
skb              1337 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 		if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb              1338 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 			cntrl = hwcsum(adapter->params.chip, skb) |
skb              1349 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	if (skb_vlan_tag_present(skb)) {
skb              1351 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 		cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
skb              1361 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	cpl->len = cpu_to_be16(skb->len);
skb              1367 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 		  ndesc, credits, txq->q.pidx, skb->len, ssi->nr_frags);
skb              1374 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	if (is_eth_imm(skb)) {
skb              1379 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 		inline_tx_skb(skb, &txq->q, cpl + 1);
skb              1380 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 		dev_consume_skb_any(skb);
skb              1435 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 		write_sgl(skb, tq, sgl, end, 0, addr);
skb              1436 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 		skb_orphan(skb);
skb              1441 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 		tq->sdesc[last_desc].skb = skb;
skb              1459 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	dev_kfree_skb_any(skb);
skb              1472 drivers/net/ethernet/chelsio/cxgb4vf/sge.c static inline void copy_frags(struct sk_buff *skb,
skb              1479 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	__skb_fill_page_desc(skb, 0, gl->frags[0].page,
skb              1482 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	skb_shinfo(skb)->nr_frags = gl->nfrags;
skb              1484 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 		__skb_fill_page_desc(skb, i, gl->frags[i].page,
skb              1505 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	struct sk_buff *skb;
skb              1520 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 		skb = alloc_skb(gl->tot_len, GFP_ATOMIC);
skb              1521 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 		if (unlikely(!skb))
skb              1523 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 		__skb_put(skb, gl->tot_len);
skb              1524 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 		skb_copy_to_linear_data(skb, gl->va, gl->tot_len);
skb              1526 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 		skb = alloc_skb(skb_len, GFP_ATOMIC);
skb              1527 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 		if (unlikely(!skb))
skb              1529 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 		__skb_put(skb, pull_len);
skb              1530 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 		skb_copy_to_linear_data(skb, gl->va, pull_len);
skb              1532 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 		copy_frags(skb, gl, pull_len);
skb              1533 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 		skb->len = gl->tot_len;
skb              1534 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 		skb->data_len = skb->len - pull_len;
skb              1535 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 		skb->truesize += skb->data_len;
skb              1539 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	return skb;
skb              1574 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	struct sk_buff *skb;
skb              1576 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	skb = napi_get_frags(&rxq->rspq.napi);
skb              1577 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	if (unlikely(!skb)) {
skb              1583 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	copy_frags(skb, gl, s->pktshift);
skb              1584 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	skb->len = gl->tot_len - s->pktshift;
skb              1585 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	skb->data_len = skb->len;
skb              1586 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	skb->truesize += skb->data_len;
skb              1587 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	skb->ip_summed = CHECKSUM_UNNECESSARY;
skb              1588 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	skb_record_rx_queue(skb, rxq->rspq.idx);
skb              1589 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	pi = netdev_priv(skb->dev);
skb              1592 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 		__vlan_hwaccel_put_tag(skb, cpu_to_be16(ETH_P_8021Q),
skb              1617 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	struct sk_buff *skb;
skb              1640 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	skb = t4vf_pktgl_to_skb(gl, RX_SKB_LEN, RX_PULL_LEN);
skb              1641 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	if (unlikely(!skb)) {
skb              1646 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	__skb_pull(skb, s->pktshift);
skb              1647 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	skb->protocol = eth_type_trans(skb, rspq->netdev);
skb              1648 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	skb_record_rx_queue(skb, rspq->idx);
skb              1649 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	pi = netdev_priv(skb->dev);
skb              1655 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 			skb->ip_summed = CHECKSUM_UNNECESSARY;
skb              1659 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 			skb->csum = csum_unfold(c);
skb              1660 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 			skb->ip_summed = CHECKSUM_COMPLETE;
skb              1664 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 		skb_checksum_none_assert(skb);
skb              1668 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
skb              1672 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	netif_receive_skb(skb);
skb                89 drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h cxgb_mk_tid_release(struct sk_buff *skb, u32 len, u32 tid, u16 chan)
skb                93 drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h 	req = __skb_put_zero(skb, len);
skb                97 drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h 	set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
skb               101 drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h cxgb_mk_close_con_req(struct sk_buff *skb, u32 len, u32 tid, u16 chan,
skb               106 drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h 	req = __skb_put_zero(skb, len);
skb               110 drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h 	set_wr_txq(skb, CPL_PRIORITY_DATA, chan);
skb               111 drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h 	t4_set_arp_err_handler(skb, handle, handler);
skb               115 drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h cxgb_mk_abort_req(struct sk_buff *skb, u32 len, u32 tid, u16 chan,
skb               120 drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h 	req = __skb_put_zero(skb, len);
skb               125 drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h 	set_wr_txq(skb, CPL_PRIORITY_DATA, chan);
skb               126 drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h 	t4_set_arp_err_handler(skb, handle, handler);
skb               130 drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h cxgb_mk_abort_rpl(struct sk_buff *skb, u32 len, u32 tid, u16 chan)
skb               134 drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h 	rpl = __skb_put_zero(skb, len);
skb               139 drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h 	set_wr_txq(skb, CPL_PRIORITY_DATA, chan);
skb               143 drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h cxgb_mk_rx_data_ack(struct sk_buff *skb, u32 len, u32 tid, u16 chan,
skb               148 drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h 	req = __skb_put_zero(skb, len);
skb               153 drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h 	set_wr_txq(skb, CPL_PRIORITY_ACK, chan);
skb               420 drivers/net/ethernet/cirrus/cs89x0.c 	struct sk_buff *skb;
skb               437 drivers/net/ethernet/cirrus/cs89x0.c 	skb = netdev_alloc_skb(dev, length + 2);
skb               438 drivers/net/ethernet/cirrus/cs89x0.c 	if (skb == NULL) {
skb               449 drivers/net/ethernet/cirrus/cs89x0.c 	skb_reserve(skb, 2);	/* longword align L3 header */
skb               453 drivers/net/ethernet/cirrus/cs89x0.c 		skb_put_data(skb, bp, semi_cnt);
skb               454 drivers/net/ethernet/cirrus/cs89x0.c 		skb_put_data(skb, lp->dma_buff, length - semi_cnt);
skb               456 drivers/net/ethernet/cirrus/cs89x0.c 		skb_put_data(skb, bp, length);
skb               465 drivers/net/ethernet/cirrus/cs89x0.c 		 ((skb->data[ETH_ALEN + ETH_ALEN] << 8) |
skb               466 drivers/net/ethernet/cirrus/cs89x0.c 		  skb->data[ETH_ALEN + ETH_ALEN + 1]));
skb               468 drivers/net/ethernet/cirrus/cs89x0.c 	skb->protocol = eth_type_trans(skb, dev);
skb               469 drivers/net/ethernet/cirrus/cs89x0.c 	netif_rx(skb);
skb               674 drivers/net/ethernet/cirrus/cs89x0.c 	struct sk_buff *skb;
skb               686 drivers/net/ethernet/cirrus/cs89x0.c 	skb = netdev_alloc_skb(dev, length + 2);
skb               687 drivers/net/ethernet/cirrus/cs89x0.c 	if (skb == NULL) {
skb               691 drivers/net/ethernet/cirrus/cs89x0.c 	skb_reserve(skb, 2);	/* longword align L3 header */
skb               693 drivers/net/ethernet/cirrus/cs89x0.c 	readwords(lp, RX_FRAME_PORT, skb_put(skb, length), length >> 1);
skb               695 drivers/net/ethernet/cirrus/cs89x0.c 		skb->data[length-1] = ioread16(lp->virt_addr + RX_FRAME_PORT);
skb               699 drivers/net/ethernet/cirrus/cs89x0.c 		 (skb->data[ETH_ALEN + ETH_ALEN] << 8) |
skb               700 drivers/net/ethernet/cirrus/cs89x0.c 		 skb->data[ETH_ALEN + ETH_ALEN + 1]);
skb               702 drivers/net/ethernet/cirrus/cs89x0.c 	skb->protocol = eth_type_trans(skb, dev);
skb               703 drivers/net/ethernet/cirrus/cs89x0.c 	netif_rx(skb);
skb              1142 drivers/net/ethernet/cirrus/cs89x0.c static netdev_tx_t net_send_packet(struct sk_buff *skb, struct net_device *dev)
skb              1148 drivers/net/ethernet/cirrus/cs89x0.c 		 dev->name, skb->len,
skb              1149 drivers/net/ethernet/cirrus/cs89x0.c 		 ((skb->data[ETH_ALEN + ETH_ALEN] << 8) |
skb              1150 drivers/net/ethernet/cirrus/cs89x0.c 		  skb->data[ETH_ALEN + ETH_ALEN + 1]));
skb              1162 drivers/net/ethernet/cirrus/cs89x0.c 	iowrite16(skb->len, lp->virt_addr + TX_LEN_PORT);
skb              1175 drivers/net/ethernet/cirrus/cs89x0.c 	writewords(lp, TX_FRAME_PORT, skb->data, (skb->len + 1) >> 1);
skb              1177 drivers/net/ethernet/cirrus/cs89x0.c 	dev->stats.tx_bytes += skb->len;
skb              1178 drivers/net/ethernet/cirrus/cs89x0.c 	dev_consume_skb_any(skb);
skb               238 drivers/net/ethernet/cirrus/ep93xx_eth.c 		struct sk_buff *skb;
skb               281 drivers/net/ethernet/cirrus/ep93xx_eth.c 		skb = netdev_alloc_skb(dev, length + 2);
skb               282 drivers/net/ethernet/cirrus/ep93xx_eth.c 		if (likely(skb != NULL)) {
skb               284 drivers/net/ethernet/cirrus/ep93xx_eth.c 			skb_reserve(skb, 2);
skb               287 drivers/net/ethernet/cirrus/ep93xx_eth.c 			skb_copy_to_linear_data(skb, ep->rx_buf[entry], length);
skb               291 drivers/net/ethernet/cirrus/ep93xx_eth.c 			skb_put(skb, length);
skb               292 drivers/net/ethernet/cirrus/ep93xx_eth.c 			skb->protocol = eth_type_trans(skb, dev);
skb               294 drivers/net/ethernet/cirrus/ep93xx_eth.c 			napi_gro_receive(&ep->napi, skb);
skb               331 drivers/net/ethernet/cirrus/ep93xx_eth.c static netdev_tx_t ep93xx_xmit(struct sk_buff *skb, struct net_device *dev)
skb               337 drivers/net/ethernet/cirrus/ep93xx_eth.c 	if (unlikely(skb->len > MAX_PKT_SIZE)) {
skb               339 drivers/net/ethernet/cirrus/ep93xx_eth.c 		dev_kfree_skb(skb);
skb               348 drivers/net/ethernet/cirrus/ep93xx_eth.c 	txd->tdesc1 = TDESC1_EOF | (entry << 16) | (skb->len & 0xfff);
skb               349 drivers/net/ethernet/cirrus/ep93xx_eth.c 	dma_sync_single_for_cpu(dev->dev.parent, txd->buf_addr, skb->len,
skb               351 drivers/net/ethernet/cirrus/ep93xx_eth.c 	skb_copy_and_csum_dev(skb, ep->tx_buf[entry]);
skb               352 drivers/net/ethernet/cirrus/ep93xx_eth.c 	dma_sync_single_for_device(dev->dev.parent, txd->buf_addr, skb->len,
skb               354 drivers/net/ethernet/cirrus/ep93xx_eth.c 	dev_kfree_skb(skb);
skb               116 drivers/net/ethernet/cirrus/mac89x0.c static netdev_tx_t net_send_packet(struct sk_buff *skb, struct net_device *dev);
skb               328 drivers/net/ethernet/cirrus/mac89x0.c net_send_packet(struct sk_buff *skb, struct net_device *dev)
skb               334 drivers/net/ethernet/cirrus/mac89x0.c 		  skb->len, skb->data[ETH_ALEN + ETH_ALEN] << 8 |
skb               335 drivers/net/ethernet/cirrus/mac89x0.c 		  skb->data[ETH_ALEN + ETH_ALEN + 1]);
skb               345 drivers/net/ethernet/cirrus/mac89x0.c 	writereg(dev, PP_TxLength, skb->len);
skb               356 drivers/net/ethernet/cirrus/mac89x0.c 	skb_copy_from_linear_data(skb, (void *)(dev->mem_start + PP_TxFrame),
skb               357 drivers/net/ethernet/cirrus/mac89x0.c 				  skb->len+1);
skb               360 drivers/net/ethernet/cirrus/mac89x0.c 	dev_kfree_skb (skb);
skb               436 drivers/net/ethernet/cirrus/mac89x0.c 	struct sk_buff *skb;
skb               457 drivers/net/ethernet/cirrus/mac89x0.c 	skb = alloc_skb(length, GFP_ATOMIC);
skb               458 drivers/net/ethernet/cirrus/mac89x0.c 	if (skb == NULL) {
skb               462 drivers/net/ethernet/cirrus/mac89x0.c 	skb_put(skb, length);
skb               464 drivers/net/ethernet/cirrus/mac89x0.c 	skb_copy_to_linear_data(skb, (void *)(dev->mem_start + PP_RxFrame),
skb               468 drivers/net/ethernet/cirrus/mac89x0.c 		  length, skb->data[ETH_ALEN + ETH_ALEN] << 8 |
skb               469 drivers/net/ethernet/cirrus/mac89x0.c 		  skb->data[ETH_ALEN + ETH_ALEN + 1]);
skb               471 drivers/net/ethernet/cirrus/mac89x0.c         skb->protocol=eth_type_trans(skb,dev);
skb               472 drivers/net/ethernet/cirrus/mac89x0.c 	netif_rx(skb);
skb               171 drivers/net/ethernet/cisco/enic/enic_clsf.c int enic_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
skb               181 drivers/net/ethernet/cisco/enic/enic_clsf.c 	res = skb_flow_dissect_flow_keys(skb, &keys, 0);
skb               187 drivers/net/ethernet/cisco/enic/enic_clsf.c 	tbl_idx = skb_get_hash_raw(skb) & ENIC_RFS_FLW_MASK;
skb                17 drivers/net/ethernet/cisco/enic/enic_clsf.h int enic_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
skb               278 drivers/net/ethernet/cisco/enic/enic_main.c static netdev_features_t enic_features_check(struct sk_buff *skb,
skb               282 drivers/net/ethernet/cisco/enic/enic_main.c 	const struct ethhdr *eth = (struct ethhdr *)skb_inner_mac_header(skb);
skb               288 drivers/net/ethernet/cisco/enic/enic_main.c 	if (!skb->encapsulation)
skb               291 drivers/net/ethernet/cisco/enic/enic_main.c 	features = vxlan_features_check(skb, features);
skb               293 drivers/net/ethernet/cisco/enic/enic_main.c 	switch (vlan_get_protocol(skb)) {
skb               297 drivers/net/ethernet/cisco/enic/enic_main.c 		proto = ipv6_hdr(skb)->nexthdr;
skb               300 drivers/net/ethernet/cisco/enic/enic_main.c 		proto = ip_hdr(skb)->protocol;
skb               319 drivers/net/ethernet/cisco/enic/enic_main.c 		udph = udp_hdr(skb);
skb               580 drivers/net/ethernet/cisco/enic/enic_main.c 				  struct sk_buff *skb, unsigned int len_left,
skb               587 drivers/net/ethernet/cisco/enic/enic_main.c 	for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
skb               594 drivers/net/ethernet/cisco/enic/enic_main.c 		enic_queue_wq_desc_cont(wq, skb, dma_addr, skb_frag_size(frag),
skb               603 drivers/net/ethernet/cisco/enic/enic_main.c 				  struct sk_buff *skb, int vlan_tag_insert,
skb               606 drivers/net/ethernet/cisco/enic/enic_main.c 	unsigned int head_len = skb_headlen(skb);
skb               607 drivers/net/ethernet/cisco/enic/enic_main.c 	unsigned int len_left = skb->len - head_len;
skb               612 drivers/net/ethernet/cisco/enic/enic_main.c 	dma_addr = pci_map_single(enic->pdev, skb->data, head_len,
skb               622 drivers/net/ethernet/cisco/enic/enic_main.c 	enic_queue_wq_desc(wq, skb, dma_addr, head_len,	vlan_tag_insert,
skb               626 drivers/net/ethernet/cisco/enic/enic_main.c 		err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
skb               632 drivers/net/ethernet/cisco/enic/enic_main.c 				     struct sk_buff *skb, int vlan_tag_insert,
skb               635 drivers/net/ethernet/cisco/enic/enic_main.c 	unsigned int head_len = skb_headlen(skb);
skb               636 drivers/net/ethernet/cisco/enic/enic_main.c 	unsigned int len_left = skb->len - head_len;
skb               637 drivers/net/ethernet/cisco/enic/enic_main.c 	unsigned int hdr_len = skb_checksum_start_offset(skb);
skb               638 drivers/net/ethernet/cisco/enic/enic_main.c 	unsigned int csum_offset = hdr_len + skb->csum_offset;
skb               643 drivers/net/ethernet/cisco/enic/enic_main.c 	dma_addr = pci_map_single(enic->pdev, skb->data, head_len,
skb               653 drivers/net/ethernet/cisco/enic/enic_main.c 	enic_queue_wq_desc_csum_l4(wq, skb, dma_addr, head_len,	csum_offset,
skb               658 drivers/net/ethernet/cisco/enic/enic_main.c 		err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
skb               663 drivers/net/ethernet/cisco/enic/enic_main.c static void enic_preload_tcp_csum_encap(struct sk_buff *skb)
skb               665 drivers/net/ethernet/cisco/enic/enic_main.c 	const struct ethhdr *eth = (struct ethhdr *)skb_inner_mac_header(skb);
skb               669 drivers/net/ethernet/cisco/enic/enic_main.c 		inner_ip_hdr(skb)->check = 0;
skb               670 drivers/net/ethernet/cisco/enic/enic_main.c 		inner_tcp_hdr(skb)->check =
skb               671 drivers/net/ethernet/cisco/enic/enic_main.c 			~csum_tcpudp_magic(inner_ip_hdr(skb)->saddr,
skb               672 drivers/net/ethernet/cisco/enic/enic_main.c 					   inner_ip_hdr(skb)->daddr, 0,
skb               676 drivers/net/ethernet/cisco/enic/enic_main.c 		inner_tcp_hdr(skb)->check =
skb               677 drivers/net/ethernet/cisco/enic/enic_main.c 			~csum_ipv6_magic(&inner_ipv6_hdr(skb)->saddr,
skb               678 drivers/net/ethernet/cisco/enic/enic_main.c 					 &inner_ipv6_hdr(skb)->daddr, 0,
skb               687 drivers/net/ethernet/cisco/enic/enic_main.c static void enic_preload_tcp_csum(struct sk_buff *skb)
skb               694 drivers/net/ethernet/cisco/enic/enic_main.c 	if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
skb               695 drivers/net/ethernet/cisco/enic/enic_main.c 		ip_hdr(skb)->check = 0;
skb               696 drivers/net/ethernet/cisco/enic/enic_main.c 		tcp_hdr(skb)->check = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
skb               697 drivers/net/ethernet/cisco/enic/enic_main.c 			ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
skb               698 drivers/net/ethernet/cisco/enic/enic_main.c 	} else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) {
skb               699 drivers/net/ethernet/cisco/enic/enic_main.c 		tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
skb               700 drivers/net/ethernet/cisco/enic/enic_main.c 			&ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
skb               705 drivers/net/ethernet/cisco/enic/enic_main.c 				 struct sk_buff *skb, unsigned int mss,
skb               709 drivers/net/ethernet/cisco/enic/enic_main.c 	unsigned int frag_len_left = skb_headlen(skb);
skb               710 drivers/net/ethernet/cisco/enic/enic_main.c 	unsigned int len_left = skb->len - frag_len_left;
skb               718 drivers/net/ethernet/cisco/enic/enic_main.c 	if (skb->encapsulation) {
skb               719 drivers/net/ethernet/cisco/enic/enic_main.c 		hdr_len = skb_inner_transport_header(skb) - skb->data;
skb               720 drivers/net/ethernet/cisco/enic/enic_main.c 		hdr_len += inner_tcp_hdrlen(skb);
skb               721 drivers/net/ethernet/cisco/enic/enic_main.c 		enic_preload_tcp_csum_encap(skb);
skb               723 drivers/net/ethernet/cisco/enic/enic_main.c 		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
skb               724 drivers/net/ethernet/cisco/enic/enic_main.c 		enic_preload_tcp_csum(skb);
skb               732 drivers/net/ethernet/cisco/enic/enic_main.c 		dma_addr = pci_map_single(enic->pdev, skb->data + offset, len,
skb               736 drivers/net/ethernet/cisco/enic/enic_main.c 		enic_queue_wq_desc_tso(wq, skb, dma_addr, len, mss, hdr_len,
skb               749 drivers/net/ethernet/cisco/enic/enic_main.c 	for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
skb               762 drivers/net/ethernet/cisco/enic/enic_main.c 			enic_queue_wq_desc_cont(wq, skb, dma_addr, len,
skb               775 drivers/net/ethernet/cisco/enic/enic_main.c 					  struct sk_buff *skb,
skb               779 drivers/net/ethernet/cisco/enic/enic_main.c 	unsigned int head_len = skb_headlen(skb);
skb               780 drivers/net/ethernet/cisco/enic/enic_main.c 	unsigned int len_left = skb->len - head_len;
skb               791 drivers/net/ethernet/cisco/enic/enic_main.c 	dma_addr = pci_map_single(enic->pdev, skb->data, head_len,
skb               796 drivers/net/ethernet/cisco/enic/enic_main.c 	enic_queue_wq_desc_ex(wq, skb, dma_addr, head_len, mss_or_csum, 0,
skb               801 drivers/net/ethernet/cisco/enic/enic_main.c 		err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
skb               807 drivers/net/ethernet/cisco/enic/enic_main.c 	struct vnic_wq *wq, struct sk_buff *skb)
skb               809 drivers/net/ethernet/cisco/enic/enic_main.c 	unsigned int mss = skb_shinfo(skb)->gso_size;
skb               815 drivers/net/ethernet/cisco/enic/enic_main.c 	if (skb_vlan_tag_present(skb)) {
skb               818 drivers/net/ethernet/cisco/enic/enic_main.c 		vlan_tag = skb_vlan_tag_get(skb);
skb               825 drivers/net/ethernet/cisco/enic/enic_main.c 		err = enic_queue_wq_skb_tso(enic, wq, skb, mss,
skb               828 drivers/net/ethernet/cisco/enic/enic_main.c 	else if (skb->encapsulation)
skb               829 drivers/net/ethernet/cisco/enic/enic_main.c 		err = enic_queue_wq_skb_encap(enic, wq, skb, vlan_tag_insert,
skb               831 drivers/net/ethernet/cisco/enic/enic_main.c 	else if	(skb->ip_summed == CHECKSUM_PARTIAL)
skb               832 drivers/net/ethernet/cisco/enic/enic_main.c 		err = enic_queue_wq_skb_csum_l4(enic, wq, skb, vlan_tag_insert,
skb               835 drivers/net/ethernet/cisco/enic/enic_main.c 		err = enic_queue_wq_skb_vlan(enic, wq, skb, vlan_tag_insert,
skb               850 drivers/net/ethernet/cisco/enic/enic_main.c 		dev_kfree_skb(skb);
skb               855 drivers/net/ethernet/cisco/enic/enic_main.c static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
skb               863 drivers/net/ethernet/cisco/enic/enic_main.c 	if (skb->len <= 0) {
skb               864 drivers/net/ethernet/cisco/enic/enic_main.c 		dev_kfree_skb_any(skb);
skb               868 drivers/net/ethernet/cisco/enic/enic_main.c 	txq_map = skb_get_queue_mapping(skb) % enic->wq_count;
skb               877 drivers/net/ethernet/cisco/enic/enic_main.c 	if (skb_shinfo(skb)->gso_size == 0 &&
skb               878 drivers/net/ethernet/cisco/enic/enic_main.c 	    skb_shinfo(skb)->nr_frags + 1 > ENIC_NON_TSO_MAX_DESC &&
skb               879 drivers/net/ethernet/cisco/enic/enic_main.c 	    skb_linearize(skb)) {
skb               880 drivers/net/ethernet/cisco/enic/enic_main.c 		dev_kfree_skb_any(skb);
skb               887 drivers/net/ethernet/cisco/enic/enic_main.c 	    skb_shinfo(skb)->nr_frags + ENIC_DESC_MAX_SPLITS) {
skb               895 drivers/net/ethernet/cisco/enic/enic_main.c 	enic_queue_wq_skb(enic, wq, skb);
skb               899 drivers/net/ethernet/cisco/enic/enic_main.c 	skb_tx_timestamp(skb);
skb              1220 drivers/net/ethernet/cisco/enic/enic_main.c 	struct sk_buff *skb)
skb              1238 drivers/net/ethernet/cisco/enic/enic_main.c 	if (nla_put_u16(skb, IFLA_PORT_REQUEST, pp->request) ||
skb              1239 drivers/net/ethernet/cisco/enic/enic_main.c 	    nla_put_u16(skb, IFLA_PORT_RESPONSE, response) ||
skb              1241 drivers/net/ethernet/cisco/enic/enic_main.c 	     nla_put(skb, IFLA_PORT_PROFILE, PORT_PROFILE_MAX, pp->name)) ||
skb              1243 drivers/net/ethernet/cisco/enic/enic_main.c 	     nla_put(skb, IFLA_PORT_INSTANCE_UUID, PORT_UUID_MAX,
skb              1246 drivers/net/ethernet/cisco/enic/enic_main.c 	     nla_put(skb, IFLA_PORT_HOST_UUID, PORT_UUID_MAX, pp->host_uuid)))
skb              1271 drivers/net/ethernet/cisco/enic/enic_main.c 	struct sk_buff *skb;
skb              1283 drivers/net/ethernet/cisco/enic/enic_main.c 	skb = netdev_alloc_skb_ip_align(netdev, len);
skb              1284 drivers/net/ethernet/cisco/enic/enic_main.c 	if (!skb)
skb              1287 drivers/net/ethernet/cisco/enic/enic_main.c 	dma_addr = pci_map_single(enic->pdev, skb->data, len,
skb              1290 drivers/net/ethernet/cisco/enic/enic_main.c 		dev_kfree_skb(skb);
skb              1294 drivers/net/ethernet/cisco/enic/enic_main.c 	enic_queue_rq_desc(rq, skb, os_buf_index,
skb              1309 drivers/net/ethernet/cisco/enic/enic_main.c static bool enic_rxcopybreak(struct net_device *netdev, struct sk_buff **skb,
skb              1322 drivers/net/ethernet/cisco/enic/enic_main.c 	memcpy(new_skb->data, (*skb)->data, len);
skb              1323 drivers/net/ethernet/cisco/enic/enic_main.c 	*skb = new_skb;
skb              1334 drivers/net/ethernet/cisco/enic/enic_main.c 	struct sk_buff *skb;
skb              1349 drivers/net/ethernet/cisco/enic/enic_main.c 	skb = buf->os_buf;
skb              1372 drivers/net/ethernet/cisco/enic/enic_main.c 		dev_kfree_skb_any(skb);
skb              1383 drivers/net/ethernet/cisco/enic/enic_main.c 		if (!enic_rxcopybreak(netdev, &skb, buf, bytes_written)) {
skb              1388 drivers/net/ethernet/cisco/enic/enic_main.c 		prefetch(skb->data - NET_IP_ALIGN);
skb              1390 drivers/net/ethernet/cisco/enic/enic_main.c 		skb_put(skb, bytes_written);
skb              1391 drivers/net/ethernet/cisco/enic/enic_main.c 		skb->protocol = eth_type_trans(skb, netdev);
skb              1392 drivers/net/ethernet/cisco/enic/enic_main.c 		skb_record_rx_queue(skb, q_number);
skb              1399 drivers/net/ethernet/cisco/enic/enic_main.c 				skb_set_hash(skb, rss_hash, PKT_HASH_TYPE_L4);
skb              1404 drivers/net/ethernet/cisco/enic/enic_main.c 				skb_set_hash(skb, rss_hash, PKT_HASH_TYPE_L3);
skb              1439 drivers/net/ethernet/cisco/enic/enic_main.c 			skb->ip_summed = CHECKSUM_UNNECESSARY;
skb              1440 drivers/net/ethernet/cisco/enic/enic_main.c 			skb->csum_level = encap;
skb              1444 drivers/net/ethernet/cisco/enic/enic_main.c 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
skb              1446 drivers/net/ethernet/cisco/enic/enic_main.c 		skb_mark_napi_id(skb, &enic->napi[rq->index]);
skb              1448 drivers/net/ethernet/cisco/enic/enic_main.c 			netif_receive_skb(skb);
skb              1450 drivers/net/ethernet/cisco/enic/enic_main.c 			napi_gro_receive(&enic->napi[q_number], skb);
skb              1461 drivers/net/ethernet/cisco/enic/enic_main.c 		dev_kfree_skb_any(skb);
skb                97 drivers/net/ethernet/cortina/gemini.c 	struct sk_buff	**skb;
skb               590 drivers/net/ethernet/cortina/gemini.c 		txq->skb = skb_tab;
skb               639 drivers/net/ethernet/cortina/gemini.c 			dev_kfree_skb(txq->skb[c]);
skb               699 drivers/net/ethernet/cortina/gemini.c 	kfree(port->txq->skb);
skb              1137 drivers/net/ethernet/cortina/gemini.c static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb,
skb              1141 drivers/net/ethernet/cortina/gemini.c 	struct skb_shared_info *skb_si =  skb_shinfo(skb);
skb              1155 drivers/net/ethernet/cortina/gemini.c 	if (skb->protocol == htons(ETH_P_8021Q))
skb              1158 drivers/net/ethernet/cortina/gemini.c 	word1 = skb->len;
skb              1166 drivers/net/ethernet/cortina/gemini.c 	if (skb->ip_summed != CHECKSUM_NONE) {
skb              1169 drivers/net/ethernet/cortina/gemini.c 		if (skb->protocol == htons(ETH_P_IP)) {
skb              1171 drivers/net/ethernet/cortina/gemini.c 			tcp = ip_hdr(skb)->protocol == IPPROTO_TCP;
skb              1174 drivers/net/ethernet/cortina/gemini.c 			tcp = ipv6_hdr(skb)->nexthdr == IPPROTO_TCP;
skb              1183 drivers/net/ethernet/cortina/gemini.c 			buffer = skb->data;
skb              1184 drivers/net/ethernet/cortina/gemini.c 			buflen = skb_headlen(skb);
skb              1193 drivers/net/ethernet/cortina/gemini.c 			txq->skb[w] = skb;
skb              1228 drivers/net/ethernet/cortina/gemini.c static int gmac_start_xmit(struct sk_buff *skb, struct net_device *netdev)
skb              1239 drivers/net/ethernet/cortina/gemini.c 	if (skb->len >= 0x10000)
skb              1242 drivers/net/ethernet/cortina/gemini.c 	txq_num = skb_get_queue_mapping(skb);
skb              1246 drivers/net/ethernet/cortina/gemini.c 	nfrags = skb_shinfo(skb)->nr_frags;
skb              1275 drivers/net/ethernet/cortina/gemini.c 	if (gmac_map_tx_bufs(netdev, skb, txq, &w)) {
skb              1276 drivers/net/ethernet/cortina/gemini.c 		if (skb_linearize(skb))
skb              1283 drivers/net/ethernet/cortina/gemini.c 		if (gmac_map_tx_bufs(netdev, skb, txq, &w))
skb              1293 drivers/net/ethernet/cortina/gemini.c 	dev_kfree_skb(skb);
skb              1361 drivers/net/ethernet/cortina/gemini.c 	struct sk_buff *skb = NULL;
skb              1382 drivers/net/ethernet/cortina/gemini.c 	skb = napi_get_frags(&port->napi);
skb              1383 drivers/net/ethernet/cortina/gemini.c 	if (!skb)
skb              1387 drivers/net/ethernet/cortina/gemini.c 		skb->ip_summed = CHECKSUM_UNNECESSARY;
skb              1392 drivers/net/ethernet/cortina/gemini.c 	return skb;
skb              1404 drivers/net/ethernet/cortina/gemini.c 	static struct sk_buff *skb;
skb              1451 drivers/net/ethernet/cortina/gemini.c 			if (skb) {
skb              1456 drivers/net/ethernet/cortina/gemini.c 			skb = gmac_skb_if_good_frame(port, word0, frame_len);
skb              1457 drivers/net/ethernet/cortina/gemini.c 			if (!skb)
skb              1464 drivers/net/ethernet/cortina/gemini.c 		} else if (!skb) {
skb              1470 drivers/net/ethernet/cortina/gemini.c 			frag_len = frame_len - skb->len;
skb              1479 drivers/net/ethernet/cortina/gemini.c 		skb_fill_page_desc(skb, frag_nr, page, page_offs, frag_len);
skb              1480 drivers/net/ethernet/cortina/gemini.c 		skb->len += frag_len;
skb              1481 drivers/net/ethernet/cortina/gemini.c 		skb->data_len += frag_len;
skb              1482 drivers/net/ethernet/cortina/gemini.c 		skb->truesize += frag_len;
skb              1487 drivers/net/ethernet/cortina/gemini.c 			skb = NULL;
skb              1493 drivers/net/ethernet/cortina/gemini.c 		if (skb) {
skb              1495 drivers/net/ethernet/cortina/gemini.c 			skb = NULL;
skb              1019 drivers/net/ethernet/davicom/dm9000.c dm9000_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb              1034 drivers/net/ethernet/davicom/dm9000.c 	(db->outblk)(db->io_data, skb->data, skb->len);
skb              1035 drivers/net/ethernet/davicom/dm9000.c 	dev->stats.tx_bytes += skb->len;
skb              1040 drivers/net/ethernet/davicom/dm9000.c 		dm9000_send_packet(dev, skb->ip_summed, skb->len);
skb              1043 drivers/net/ethernet/davicom/dm9000.c 		db->queue_pkt_len = skb->len;
skb              1044 drivers/net/ethernet/davicom/dm9000.c 		db->queue_ip_summed = skb->ip_summed;
skb              1051 drivers/net/ethernet/davicom/dm9000.c 	dev_consume_skb_any(skb);
skb              1095 drivers/net/ethernet/davicom/dm9000.c 	struct sk_buff *skb;
skb              1164 drivers/net/ethernet/davicom/dm9000.c 		    ((skb = netdev_alloc_skb(dev, RxLen + 4)) != NULL)) {
skb              1165 drivers/net/ethernet/davicom/dm9000.c 			skb_reserve(skb, 2);
skb              1166 drivers/net/ethernet/davicom/dm9000.c 			rdptr = skb_put(skb, RxLen - 4);
skb              1174 drivers/net/ethernet/davicom/dm9000.c 			skb->protocol = eth_type_trans(skb, dev);
skb              1177 drivers/net/ethernet/davicom/dm9000.c 					skb->ip_summed = CHECKSUM_UNNECESSARY;
skb              1179 drivers/net/ethernet/davicom/dm9000.c 					skb_checksum_none_assert(skb);
skb              1181 drivers/net/ethernet/davicom/dm9000.c 			netif_rx(skb);
skb               293 drivers/net/ethernet/dec/tulip/de2104x.c 	struct sk_buff		*skb;
skb               410 drivers/net/ethernet/dec/tulip/de2104x.c 		struct sk_buff *skb, *copy_skb;
skb               413 drivers/net/ethernet/dec/tulip/de2104x.c 		skb = de->rx_skb[rx_tail].skb;
skb               414 drivers/net/ethernet/dec/tulip/de2104x.c 		BUG_ON(!skb);
skb               451 drivers/net/ethernet/dec/tulip/de2104x.c 			skb_put(skb, len);
skb               457 drivers/net/ethernet/dec/tulip/de2104x.c 			de->rx_skb[rx_tail].skb = copy_skb;
skb               461 drivers/net/ethernet/dec/tulip/de2104x.c 			skb_copy_from_linear_data(skb, skb_put(copy_skb, len),
skb               466 drivers/net/ethernet/dec/tulip/de2104x.c 			skb = copy_skb;
skb               469 drivers/net/ethernet/dec/tulip/de2104x.c 		skb->protocol = eth_type_trans (skb, de->dev);
skb               472 drivers/net/ethernet/dec/tulip/de2104x.c 		de->dev->stats.rx_bytes += skb->len;
skb               473 drivers/net/ethernet/dec/tulip/de2104x.c 		rc = netif_rx (skb);
skb               546 drivers/net/ethernet/dec/tulip/de2104x.c 		struct sk_buff *skb;
skb               554 drivers/net/ethernet/dec/tulip/de2104x.c 		skb = de->tx_skb[tx_tail].skb;
skb               555 drivers/net/ethernet/dec/tulip/de2104x.c 		BUG_ON(!skb);
skb               556 drivers/net/ethernet/dec/tulip/de2104x.c 		if (unlikely(skb == DE_DUMMY_SKB))
skb               559 drivers/net/ethernet/dec/tulip/de2104x.c 		if (unlikely(skb == DE_SETUP_SKB)) {
skb               566 drivers/net/ethernet/dec/tulip/de2104x.c 				 skb->len, PCI_DMA_TODEVICE);
skb               584 drivers/net/ethernet/dec/tulip/de2104x.c 				de->dev->stats.tx_bytes += skb->len;
skb               588 drivers/net/ethernet/dec/tulip/de2104x.c 			dev_consume_skb_irq(skb);
skb               592 drivers/net/ethernet/dec/tulip/de2104x.c 		de->tx_skb[tx_tail].skb = NULL;
skb               603 drivers/net/ethernet/dec/tulip/de2104x.c static netdev_tx_t de_start_xmit (struct sk_buff *skb,
skb               625 drivers/net/ethernet/dec/tulip/de2104x.c 	len = skb->len;
skb               626 drivers/net/ethernet/dec/tulip/de2104x.c 	mapping = pci_map_single(de->pdev, skb->data, len, PCI_DMA_TODEVICE);
skb               635 drivers/net/ethernet/dec/tulip/de2104x.c 	de->tx_skb[entry].skb = skb;
skb               644 drivers/net/ethernet/dec/tulip/de2104x.c 		  entry, skb->len);
skb               755 drivers/net/ethernet/dec/tulip/de2104x.c 		de->tx_skb[entry].skb = DE_DUMMY_SKB;
skb               767 drivers/net/ethernet/dec/tulip/de2104x.c 	de->tx_skb[entry].skb = DE_SETUP_SKB;
skb              1279 drivers/net/ethernet/dec/tulip/de2104x.c 		struct sk_buff *skb;
skb              1281 drivers/net/ethernet/dec/tulip/de2104x.c 		skb = netdev_alloc_skb(de->dev, de->rx_buf_sz);
skb              1282 drivers/net/ethernet/dec/tulip/de2104x.c 		if (!skb)
skb              1286 drivers/net/ethernet/dec/tulip/de2104x.c 			skb->data, de->rx_buf_sz, PCI_DMA_FROMDEVICE);
skb              1287 drivers/net/ethernet/dec/tulip/de2104x.c 		de->rx_skb[i].skb = skb;
skb              1338 drivers/net/ethernet/dec/tulip/de2104x.c 		if (de->rx_skb[i].skb) {
skb              1341 drivers/net/ethernet/dec/tulip/de2104x.c 			dev_kfree_skb(de->rx_skb[i].skb);
skb              1346 drivers/net/ethernet/dec/tulip/de2104x.c 		struct sk_buff *skb = de->tx_skb[i].skb;
skb              1347 drivers/net/ethernet/dec/tulip/de2104x.c 		if ((skb) && (skb != DE_DUMMY_SKB)) {
skb              1348 drivers/net/ethernet/dec/tulip/de2104x.c 			if (skb != DE_SETUP_SKB) {
skb              1352 drivers/net/ethernet/dec/tulip/de2104x.c 					skb->len, PCI_DMA_TODEVICE);
skb              1353 drivers/net/ethernet/dec/tulip/de2104x.c 				dev_kfree_skb(skb);
skb               898 drivers/net/ethernet/dec/tulip/de4x5.c static netdev_tx_t de4x5_queue_pkt(struct sk_buff *skb,
skb               921 drivers/net/ethernet/dec/tulip/de4x5.c static void    load_packet(struct net_device *dev, char *buf, u32 flags, struct sk_buff *skb);
skb               944 drivers/net/ethernet/dec/tulip/de4x5.c static void    de4x5_put_cache(struct net_device *dev, struct sk_buff *skb);
skb               945 drivers/net/ethernet/dec/tulip/de4x5.c static void    de4x5_putb_cache(struct net_device *dev, struct sk_buff *skb);
skb               997 drivers/net/ethernet/dec/tulip/de4x5.c static void    de4x5_dbg_rx(struct sk_buff *skb, int len);
skb              1457 drivers/net/ethernet/dec/tulip/de4x5.c de4x5_queue_pkt(struct sk_buff *skb, struct net_device *dev)
skb              1483 drivers/net/ethernet/dec/tulip/de4x5.c 	    de4x5_putb_cache(dev, skb);          /* Requeue the buffer */
skb              1485 drivers/net/ethernet/dec/tulip/de4x5.c 	    de4x5_put_cache(dev, skb);
skb              1490 drivers/net/ethernet/dec/tulip/de4x5.c     } else if (skb->len > 0) {
skb              1493 drivers/net/ethernet/dec/tulip/de4x5.c 	    de4x5_put_cache(dev, skb);
skb              1494 drivers/net/ethernet/dec/tulip/de4x5.c 	    skb = de4x5_get_cache(dev);
skb              1497 drivers/net/ethernet/dec/tulip/de4x5.c 	while (skb && !netif_queue_stopped(dev) &&
skb              1501 drivers/net/ethernet/dec/tulip/de4x5.c 	    load_packet(dev, skb->data, TD_IC | TD_LS | TD_FS | skb->len, skb);
skb              1502 drivers/net/ethernet/dec/tulip/de4x5.c  	    lp->stats.tx_bytes += skb->len;
skb              1510 drivers/net/ethernet/dec/tulip/de4x5.c 	    skb = de4x5_get_cache(dev);
skb              1513 drivers/net/ethernet/dec/tulip/de4x5.c 	if (skb) de4x5_putb_cache(dev, skb);
skb              1520 drivers/net/ethernet/dec/tulip/de4x5.c 	dev_kfree_skb_any(skb);
skb              1636 drivers/net/ethernet/dec/tulip/de4x5.c 		struct sk_buff *skb;
skb              1640 drivers/net/ethernet/dec/tulip/de4x5.c 		if ((skb = de4x5_alloc_rx_buff(dev, entry, pkt_len)) == NULL) {
skb              1645 drivers/net/ethernet/dec/tulip/de4x5.c 		    de4x5_dbg_rx(skb, pkt_len);
skb              1648 drivers/net/ethernet/dec/tulip/de4x5.c 		    skb->protocol=eth_type_trans(skb,dev);
skb              1649 drivers/net/ethernet/dec/tulip/de4x5.c 		    de4x5_local_stats(dev, skb->data, pkt_len);
skb              1650 drivers/net/ethernet/dec/tulip/de4x5.c 		    netif_rx(skb);
skb              1896 drivers/net/ethernet/dec/tulip/de4x5.c load_packet(struct net_device *dev, char *buf, u32 flags, struct sk_buff *skb)
skb              1905 drivers/net/ethernet/dec/tulip/de4x5.c     lp->tx_skb[lp->tx_new] = skb;
skb              3758 drivers/net/ethernet/dec/tulip/de4x5.c de4x5_put_cache(struct net_device *dev, struct sk_buff *skb)
skb              3762 drivers/net/ethernet/dec/tulip/de4x5.c     __skb_queue_tail(&lp->cache.queue, skb);
skb              3766 drivers/net/ethernet/dec/tulip/de4x5.c de4x5_putb_cache(struct net_device *dev, struct sk_buff *skb)
skb              3770 drivers/net/ethernet/dec/tulip/de4x5.c     __skb_queue_head(&lp->cache.queue, skb);
skb              5336 drivers/net/ethernet/dec/tulip/de4x5.c de4x5_dbg_rx(struct sk_buff *skb, int len)
skb              5342 drivers/net/ethernet/dec/tulip/de4x5.c 	       skb->data, &skb->data[6],
skb              5343 drivers/net/ethernet/dec/tulip/de4x5.c 	       (u_char)skb->data[12],
skb              5344 drivers/net/ethernet/dec/tulip/de4x5.c 	       (u_char)skb->data[13],
skb              5349 drivers/net/ethernet/dec/tulip/de4x5.c 	    printk("%02x ",(u_char)skb->data[i+j]);
skb               672 drivers/net/ethernet/dec/tulip/dmfe.c static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb,
skb               683 drivers/net/ethernet/dec/tulip/dmfe.c 	if (skb->len > MAX_PACKET_SIZE) {
skb               684 drivers/net/ethernet/dec/tulip/dmfe.c 		pr_err("big packet = %d\n", (u16)skb->len);
skb               685 drivers/net/ethernet/dec/tulip/dmfe.c 		dev_kfree_skb_any(skb);
skb               706 drivers/net/ethernet/dec/tulip/dmfe.c 	skb_copy_from_linear_data(skb, txptr->tx_buf_ptr, skb->len);
skb               707 drivers/net/ethernet/dec/tulip/dmfe.c 	txptr->tdes1 = cpu_to_le32(0xe1000000 | skb->len);
skb               732 drivers/net/ethernet/dec/tulip/dmfe.c 	dev_consume_skb_any(skb);
skb               953 drivers/net/ethernet/dec/tulip/dmfe.c 	struct sk_buff *skb, *newskb;
skb               993 drivers/net/ethernet/dec/tulip/dmfe.c 				skb = rxptr->rx_skb_ptr;
skb               997 drivers/net/ethernet/dec/tulip/dmfe.c 					(cal_CRC(skb->data, rxlen, 1) !=
skb               998 drivers/net/ethernet/dec/tulip/dmfe.c 					(*(u32 *) (skb->data+rxlen) ))) { /* FIXME (?) */
skb              1009 drivers/net/ethernet/dec/tulip/dmfe.c 						skb = newskb;
skb              1011 drivers/net/ethernet/dec/tulip/dmfe.c 						skb_reserve(skb, 2); /* 16byte align */
skb              1013 drivers/net/ethernet/dec/tulip/dmfe.c 							  skb_put(skb, rxlen),
skb              1017 drivers/net/ethernet/dec/tulip/dmfe.c 						skb_put(skb, rxlen);
skb              1019 drivers/net/ethernet/dec/tulip/dmfe.c 					skb->protocol = eth_type_trans(skb, dev);
skb              1020 drivers/net/ethernet/dec/tulip/dmfe.c 					netif_rx(skb);
skb              1336 drivers/net/ethernet/dec/tulip/dmfe.c static void dmfe_reuse_skb(struct dmfe_board_info *db, struct sk_buff * skb)
skb              1341 drivers/net/ethernet/dec/tulip/dmfe.c 		rxptr->rx_skb_ptr = skb;
skb              1343 drivers/net/ethernet/dec/tulip/dmfe.c 			    skb->data, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
skb              1549 drivers/net/ethernet/dec/tulip/dmfe.c 	struct sk_buff *skb;
skb              1554 drivers/net/ethernet/dec/tulip/dmfe.c 		if ( ( skb = netdev_alloc_skb(dev, RX_ALLOC_SIZE) ) == NULL )
skb              1556 drivers/net/ethernet/dec/tulip/dmfe.c 		rxptr->rx_skb_ptr = skb; /* FIXME (?) */
skb              1557 drivers/net/ethernet/dec/tulip/dmfe.c 		rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->data,
skb                68 drivers/net/ethernet/dec/tulip/interrupt.c 		if (tp->rx_buffers[entry].skb == NULL) {
skb                69 drivers/net/ethernet/dec/tulip/interrupt.c 			struct sk_buff *skb;
skb                72 drivers/net/ethernet/dec/tulip/interrupt.c 			skb = tp->rx_buffers[entry].skb =
skb                74 drivers/net/ethernet/dec/tulip/interrupt.c 			if (skb == NULL)
skb                77 drivers/net/ethernet/dec/tulip/interrupt.c 			mapping = pci_map_single(tp->pdev, skb->data, PKT_BUF_SZ,
skb                80 drivers/net/ethernet/dec/tulip/interrupt.c 				dev_kfree_skb(skb);
skb                81 drivers/net/ethernet/dec/tulip/interrupt.c 				tp->rx_buffers[entry].skb = NULL;
skb               206 drivers/net/ethernet/dec/tulip/interrupt.c                                struct sk_buff *skb;
skb               211 drivers/net/ethernet/dec/tulip/interrupt.c                                    (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
skb               212 drivers/net/ethernet/dec/tulip/interrupt.c                                        skb_reserve(skb, 2);    /* 16 byte align the IP header */
skb               217 drivers/net/ethernet/dec/tulip/interrupt.c                                        skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data,
skb               219 drivers/net/ethernet/dec/tulip/interrupt.c                                        skb_put(skb, pkt_len);
skb               221 drivers/net/ethernet/dec/tulip/interrupt.c                                        skb_put_data(skb,
skb               222 drivers/net/ethernet/dec/tulip/interrupt.c                                                     tp->rx_buffers[entry].skb->data,
skb               229 drivers/net/ethernet/dec/tulip/interrupt.c                                        char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
skb               239 drivers/net/ethernet/dec/tulip/interrupt.c 						       skb->head, temp);
skb               246 drivers/net/ethernet/dec/tulip/interrupt.c                                        tp->rx_buffers[entry].skb = NULL;
skb               249 drivers/net/ethernet/dec/tulip/interrupt.c                                skb->protocol = eth_type_trans(skb, dev);
skb               251 drivers/net/ethernet/dec/tulip/interrupt.c                                netif_receive_skb(skb);
skb               317 drivers/net/ethernet/dec/tulip/interrupt.c          if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
skb               340 drivers/net/ethernet/dec/tulip/interrupt.c              tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
skb               343 drivers/net/ethernet/dec/tulip/interrupt.c          if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
skb               432 drivers/net/ethernet/dec/tulip/interrupt.c 			struct sk_buff *skb;
skb               437 drivers/net/ethernet/dec/tulip/interrupt.c 			    (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
skb               438 drivers/net/ethernet/dec/tulip/interrupt.c 				skb_reserve(skb, 2);	/* 16 byte align the IP header */
skb               443 drivers/net/ethernet/dec/tulip/interrupt.c 				skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data,
skb               445 drivers/net/ethernet/dec/tulip/interrupt.c 				skb_put(skb, pkt_len);
skb               447 drivers/net/ethernet/dec/tulip/interrupt.c 				skb_put_data(skb,
skb               448 drivers/net/ethernet/dec/tulip/interrupt.c 					     tp->rx_buffers[entry].skb->data,
skb               455 drivers/net/ethernet/dec/tulip/interrupt.c 				char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
skb               465 drivers/net/ethernet/dec/tulip/interrupt.c 						skb->head, temp);
skb               472 drivers/net/ethernet/dec/tulip/interrupt.c 				tp->rx_buffers[entry].skb = NULL;
skb               475 drivers/net/ethernet/dec/tulip/interrupt.c 			skb->protocol = eth_type_trans(skb, dev);
skb               477 drivers/net/ethernet/dec/tulip/interrupt.c 			netif_rx(skb);
skb               597 drivers/net/ethernet/dec/tulip/interrupt.c 				if (tp->tx_buffers[entry].skb == NULL) {
skb               627 drivers/net/ethernet/dec/tulip/interrupt.c 						tp->tx_buffers[entry].skb->len;
skb               633 drivers/net/ethernet/dec/tulip/interrupt.c 						 tp->tx_buffers[entry].skb->len,
skb               637 drivers/net/ethernet/dec/tulip/interrupt.c 				dev_kfree_skb_irq(tp->tx_buffers[entry].skb);
skb               638 drivers/net/ethernet/dec/tulip/interrupt.c 				tp->tx_buffers[entry].skb = NULL;
skb               781 drivers/net/ethernet/dec/tulip/interrupt.c 	if (tp->rx_buffers[entry].skb == NULL) {
skb               396 drivers/net/ethernet/dec/tulip/tulip.h 	struct sk_buff	*skb;
skb               261 drivers/net/ethernet/dec/tulip/tulip_core.c static netdev_tx_t tulip_start_xmit(struct sk_buff *skb,
skb               367 drivers/net/ethernet/dec/tulip/tulip_core.c 		tp->tx_buffers[tp->cur_tx].skb = NULL;
skb               627 drivers/net/ethernet/dec/tulip/tulip_core.c 		tp->rx_buffers[i].skb = NULL;
skb               640 drivers/net/ethernet/dec/tulip/tulip_core.c 		struct sk_buff *skb = netdev_alloc_skb(dev, PKT_BUF_SZ);
skb               641 drivers/net/ethernet/dec/tulip/tulip_core.c 		tp->rx_buffers[i].skb = skb;
skb               642 drivers/net/ethernet/dec/tulip/tulip_core.c 		if (skb == NULL)
skb               644 drivers/net/ethernet/dec/tulip/tulip_core.c 		mapping = pci_map_single(tp->pdev, skb->data,
skb               655 drivers/net/ethernet/dec/tulip/tulip_core.c 		tp->tx_buffers[i].skb = NULL;
skb               664 drivers/net/ethernet/dec/tulip/tulip_core.c tulip_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb               677 drivers/net/ethernet/dec/tulip/tulip_core.c 	tp->tx_buffers[entry].skb = skb;
skb               678 drivers/net/ethernet/dec/tulip/tulip_core.c 	mapping = pci_map_single(tp->pdev, skb->data,
skb               679 drivers/net/ethernet/dec/tulip/tulip_core.c 				 skb->len, PCI_DMA_TODEVICE);
skb               696 drivers/net/ethernet/dec/tulip/tulip_core.c 	tp->tx_ring[entry].length = cpu_to_le32(skb->len | flag);
skb               727 drivers/net/ethernet/dec/tulip/tulip_core.c 		if (tp->tx_buffers[entry].skb == NULL) {
skb               738 drivers/net/ethernet/dec/tulip/tulip_core.c 				tp->tx_buffers[entry].skb->len,
skb               742 drivers/net/ethernet/dec/tulip/tulip_core.c 		dev_kfree_skb_irq(tp->tx_buffers[entry].skb);
skb               743 drivers/net/ethernet/dec/tulip/tulip_core.c 		tp->tx_buffers[entry].skb = NULL;
skb               798 drivers/net/ethernet/dec/tulip/tulip_core.c 		struct sk_buff *skb = tp->rx_buffers[i].skb;
skb               801 drivers/net/ethernet/dec/tulip/tulip_core.c 		tp->rx_buffers[i].skb = NULL;
skb               808 drivers/net/ethernet/dec/tulip/tulip_core.c 		if (skb) {
skb               811 drivers/net/ethernet/dec/tulip/tulip_core.c 			dev_kfree_skb (skb);
skb               816 drivers/net/ethernet/dec/tulip/tulip_core.c 		struct sk_buff *skb = tp->tx_buffers[i].skb;
skb               818 drivers/net/ethernet/dec/tulip/tulip_core.c 		if (skb != NULL) {
skb               820 drivers/net/ethernet/dec/tulip/tulip_core.c 					 skb->len, PCI_DMA_TODEVICE);
skb               821 drivers/net/ethernet/dec/tulip/tulip_core.c 			dev_kfree_skb (skb);
skb               823 drivers/net/ethernet/dec/tulip/tulip_core.c 		tp->tx_buffers[i].skb = NULL;
skb              1151 drivers/net/ethernet/dec/tulip/tulip_core.c 				tp->tx_buffers[entry].skb = NULL;
skb              1162 drivers/net/ethernet/dec/tulip/tulip_core.c 			tp->tx_buffers[entry].skb = NULL;
skb               583 drivers/net/ethernet/dec/tulip/uli526x.c static netdev_tx_t uli526x_start_xmit(struct sk_buff *skb,
skb               597 drivers/net/ethernet/dec/tulip/uli526x.c 	if (skb->len > MAX_PACKET_SIZE) {
skb               598 drivers/net/ethernet/dec/tulip/uli526x.c 		netdev_err(dev, "big packet = %d\n", (u16)skb->len);
skb               599 drivers/net/ethernet/dec/tulip/uli526x.c 		dev_kfree_skb_any(skb);
skb               617 drivers/net/ethernet/dec/tulip/uli526x.c 	skb_copy_from_linear_data(skb, txptr->tx_buf_ptr, skb->len);
skb               618 drivers/net/ethernet/dec/tulip/uli526x.c 	txptr->tdes1 = cpu_to_le32(0xe1000000 | skb->len);
skb               640 drivers/net/ethernet/dec/tulip/uli526x.c 	dev_consume_skb_any(skb);
skb               806 drivers/net/ethernet/dec/tulip/uli526x.c 	struct sk_buff *skb;
skb               848 drivers/net/ethernet/dec/tulip/uli526x.c 				skb = rxptr->rx_skb_ptr;
skb               854 drivers/net/ethernet/dec/tulip/uli526x.c 					skb = new_skb;
skb               856 drivers/net/ethernet/dec/tulip/uli526x.c 					skb_reserve(skb, 2); /* 16byte align */
skb               857 drivers/net/ethernet/dec/tulip/uli526x.c 					skb_put_data(skb,
skb               862 drivers/net/ethernet/dec/tulip/uli526x.c 					skb_put(skb, rxlen);
skb               864 drivers/net/ethernet/dec/tulip/uli526x.c 				skb->protocol = eth_type_trans(skb, dev);
skb               865 drivers/net/ethernet/dec/tulip/uli526x.c 				netif_rx(skb);
skb              1273 drivers/net/ethernet/dec/tulip/uli526x.c static void uli526x_reuse_skb(struct uli526x_board_info *db, struct sk_buff * skb)
skb              1278 drivers/net/ethernet/dec/tulip/uli526x.c 		rxptr->rx_skb_ptr = skb;
skb              1280 drivers/net/ethernet/dec/tulip/uli526x.c 							  skb_tail_pointer(skb),
skb              1445 drivers/net/ethernet/dec/tulip/uli526x.c 	struct sk_buff *skb;
skb              1450 drivers/net/ethernet/dec/tulip/uli526x.c 		skb = netdev_alloc_skb(dev, RX_ALLOC_SIZE);
skb              1451 drivers/net/ethernet/dec/tulip/uli526x.c 		if (skb == NULL)
skb              1453 drivers/net/ethernet/dec/tulip/uli526x.c 		rxptr->rx_skb_ptr = skb; /* FIXME (?) */
skb              1455 drivers/net/ethernet/dec/tulip/uli526x.c 							  skb_tail_pointer(skb),
skb               337 drivers/net/ethernet/dec/tulip/winbond-840.c static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
skb               811 drivers/net/ethernet/dec/tulip/winbond-840.c 		struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz);
skb               812 drivers/net/ethernet/dec/tulip/winbond-840.c 		np->rx_skbuff[i] = skb;
skb               813 drivers/net/ethernet/dec/tulip/winbond-840.c 		if (skb == NULL)
skb               815 drivers/net/ethernet/dec/tulip/winbond-840.c 		np->rx_addr[i] = pci_map_single(np->pci_dev,skb->data,
skb               996 drivers/net/ethernet/dec/tulip/winbond-840.c static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
skb              1008 drivers/net/ethernet/dec/tulip/winbond-840.c 				skb->data,skb->len, PCI_DMA_TODEVICE);
skb              1009 drivers/net/ethernet/dec/tulip/winbond-840.c 	np->tx_skbuff[entry] = skb;
skb              1012 drivers/net/ethernet/dec/tulip/winbond-840.c 	if (skb->len < TX_BUFLIMIT) {
skb              1013 drivers/net/ethernet/dec/tulip/winbond-840.c 		np->tx_ring[entry].length = DescWholePkt | skb->len;
skb              1015 drivers/net/ethernet/dec/tulip/winbond-840.c 		int len = skb->len - TX_BUFLIMIT;
skb              1040 drivers/net/ethernet/dec/tulip/winbond-840.c 	np->tx_q_bytes += skb->len;
skb              1216 drivers/net/ethernet/dec/tulip/winbond-840.c 			struct sk_buff *skb;
skb              1228 drivers/net/ethernet/dec/tulip/winbond-840.c 			    (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
skb              1229 drivers/net/ethernet/dec/tulip/winbond-840.c 				skb_reserve(skb, 2);	/* 16 byte align the IP header */
skb              1233 drivers/net/ethernet/dec/tulip/winbond-840.c 				skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len);
skb              1234 drivers/net/ethernet/dec/tulip/winbond-840.c 				skb_put(skb, pkt_len);
skb              1242 drivers/net/ethernet/dec/tulip/winbond-840.c 				skb_put(skb = np->rx_skbuff[entry], pkt_len);
skb              1249 drivers/net/ethernet/dec/tulip/winbond-840.c 					   &skb->data[0], &skb->data[6],
skb              1250 drivers/net/ethernet/dec/tulip/winbond-840.c 					   skb->data[12], skb->data[13],
skb              1251 drivers/net/ethernet/dec/tulip/winbond-840.c 					   &skb->data[14]);
skb              1253 drivers/net/ethernet/dec/tulip/winbond-840.c 			skb->protocol = eth_type_trans(skb, dev);
skb              1254 drivers/net/ethernet/dec/tulip/winbond-840.c 			netif_rx(skb);
skb              1264 drivers/net/ethernet/dec/tulip/winbond-840.c 		struct sk_buff *skb;
skb              1267 drivers/net/ethernet/dec/tulip/winbond-840.c 			skb = netdev_alloc_skb(dev, np->rx_buf_sz);
skb              1268 drivers/net/ethernet/dec/tulip/winbond-840.c 			np->rx_skbuff[entry] = skb;
skb              1269 drivers/net/ethernet/dec/tulip/winbond-840.c 			if (skb == NULL)
skb              1272 drivers/net/ethernet/dec/tulip/winbond-840.c 							skb->data,
skb               109 drivers/net/ethernet/dec/tulip/xircom_cb.c static netdev_tx_t xircom_start_xmit(struct sk_buff *skb,
skb               375 drivers/net/ethernet/dec/tulip/xircom_cb.c static netdev_tx_t xircom_start_xmit(struct sk_buff *skb,
skb               400 drivers/net/ethernet/dec/tulip/xircom_cb.c 			skb_copy_from_linear_data(skb,
skb               402 drivers/net/ethernet/dec/tulip/xircom_cb.c 						  skb->len);
skb               406 drivers/net/ethernet/dec/tulip/xircom_cb.c 			card->tx_buffer[4*desc+1] = cpu_to_le32(skb->len);
skb               412 drivers/net/ethernet/dec/tulip/xircom_cb.c 			card->tx_skb[desc] = skb;
skb              1109 drivers/net/ethernet/dec/tulip/xircom_cb.c 		struct sk_buff *skb;
skb              1116 drivers/net/ethernet/dec/tulip/xircom_cb.c 		skb = netdev_alloc_skb(dev, pkt_len + 2);
skb              1117 drivers/net/ethernet/dec/tulip/xircom_cb.c 		if (skb == NULL) {
skb              1121 drivers/net/ethernet/dec/tulip/xircom_cb.c 		skb_reserve(skb, 2);
skb              1122 drivers/net/ethernet/dec/tulip/xircom_cb.c 		skb_copy_to_linear_data(skb,
skb              1125 drivers/net/ethernet/dec/tulip/xircom_cb.c 		skb_put(skb, pkt_len);
skb              1126 drivers/net/ethernet/dec/tulip/xircom_cb.c 		skb->protocol = eth_type_trans(skb, dev);
skb              1127 drivers/net/ethernet/dec/tulip/xircom_cb.c 		netif_rx(skb);
skb                70 drivers/net/ethernet/dlink/dl2k.c static netdev_tx_t start_xmit (struct sk_buff *skb, struct net_device *dev);
skb               440 drivers/net/ethernet/dlink/dl2k.c 	struct sk_buff *skb;
skb               445 drivers/net/ethernet/dlink/dl2k.c 		skb = np->rx_skbuff[i];
skb               446 drivers/net/ethernet/dlink/dl2k.c 		if (skb) {
skb               448 drivers/net/ethernet/dlink/dl2k.c 					 skb->len, PCI_DMA_FROMDEVICE);
skb               449 drivers/net/ethernet/dlink/dl2k.c 			dev_kfree_skb(skb);
skb               456 drivers/net/ethernet/dlink/dl2k.c 		skb = np->tx_skbuff[i];
skb               457 drivers/net/ethernet/dlink/dl2k.c 		if (skb) {
skb               459 drivers/net/ethernet/dlink/dl2k.c 					 skb->len, PCI_DMA_TODEVICE);
skb               460 drivers/net/ethernet/dlink/dl2k.c 			dev_kfree_skb(skb);
skb               502 drivers/net/ethernet/dlink/dl2k.c 		struct sk_buff *skb;
skb               504 drivers/net/ethernet/dlink/dl2k.c 		skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz);
skb               505 drivers/net/ethernet/dlink/dl2k.c 		np->rx_skbuff[i] = skb;
skb               506 drivers/net/ethernet/dlink/dl2k.c 		if (!skb) {
skb               517 drivers/net/ethernet/dlink/dl2k.c 				  np->pdev, skb->data, np->rx_buf_sz,
skb               669 drivers/net/ethernet/dlink/dl2k.c 			struct sk_buff *skb;
skb               673 drivers/net/ethernet/dlink/dl2k.c 				skb = netdev_alloc_skb_ip_align(dev,
skb               675 drivers/net/ethernet/dlink/dl2k.c 				if (skb == NULL) {
skb               682 drivers/net/ethernet/dlink/dl2k.c 				np->rx_skbuff[entry] = skb;
skb               685 drivers/net/ethernet/dlink/dl2k.c 					 (np->pdev, skb->data, np->rx_buf_sz,
skb               712 drivers/net/ethernet/dlink/dl2k.c start_xmit (struct sk_buff *skb, struct net_device *dev)
skb               721 drivers/net/ethernet/dlink/dl2k.c 		dev_kfree_skb(skb);
skb               725 drivers/net/ethernet/dlink/dl2k.c 	np->tx_skbuff[entry] = skb;
skb               729 drivers/net/ethernet/dlink/dl2k.c 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb               738 drivers/net/ethernet/dlink/dl2k.c 		    ((u64)skb->priority << 45);
skb               740 drivers/net/ethernet/dlink/dl2k.c 	txdesc->fraginfo = cpu_to_le64 (pci_map_single (np->pdev, skb->data,
skb               741 drivers/net/ethernet/dlink/dl2k.c 							skb->len,
skb               743 drivers/net/ethernet/dlink/dl2k.c 	txdesc->fraginfo |= cpu_to_le64((u64)skb->len << 48);
skb               834 drivers/net/ethernet/dlink/dl2k.c 		struct sk_buff *skb;
skb               838 drivers/net/ethernet/dlink/dl2k.c 		skb = np->tx_skbuff[entry];
skb               841 drivers/net/ethernet/dlink/dl2k.c 				  skb->len, PCI_DMA_TODEVICE);
skb               843 drivers/net/ethernet/dlink/dl2k.c 			dev_consume_skb_irq(skb);
skb               845 drivers/net/ethernet/dlink/dl2k.c 			dev_kfree_skb(skb);
skb               957 drivers/net/ethernet/dlink/dl2k.c 			struct sk_buff *skb;
skb               965 drivers/net/ethernet/dlink/dl2k.c 				skb_put (skb = np->rx_skbuff[entry], pkt_len);
skb               967 drivers/net/ethernet/dlink/dl2k.c 			} else if ((skb = netdev_alloc_skb_ip_align(dev, pkt_len))) {
skb               972 drivers/net/ethernet/dlink/dl2k.c 				skb_copy_to_linear_data (skb,
skb               975 drivers/net/ethernet/dlink/dl2k.c 				skb_put (skb, pkt_len);
skb               981 drivers/net/ethernet/dlink/dl2k.c 			skb->protocol = eth_type_trans (skb, dev);
skb               986 drivers/net/ethernet/dlink/dl2k.c 				skb->ip_summed = CHECKSUM_UNNECESSARY;
skb               989 drivers/net/ethernet/dlink/dl2k.c 			netif_rx (skb);
skb               998 drivers/net/ethernet/dlink/dl2k.c 		struct sk_buff *skb;
skb              1001 drivers/net/ethernet/dlink/dl2k.c 			skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz);
skb              1002 drivers/net/ethernet/dlink/dl2k.c 			if (skb == NULL) {
skb              1010 drivers/net/ethernet/dlink/dl2k.c 			np->rx_skbuff[entry] = skb;
skb              1013 drivers/net/ethernet/dlink/dl2k.c 					 (np->pdev, skb->data, np->rx_buf_sz,
skb               437 drivers/net/ethernet/dlink/sundance.c static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
skb              1047 drivers/net/ethernet/dlink/sundance.c 		struct sk_buff *skb =
skb              1049 drivers/net/ethernet/dlink/sundance.c 		np->rx_skbuff[i] = skb;
skb              1050 drivers/net/ethernet/dlink/sundance.c 		if (skb == NULL)
skb              1052 drivers/net/ethernet/dlink/sundance.c 		skb_reserve(skb, 2);	/* 16 byte align the IP header. */
skb              1054 drivers/net/ethernet/dlink/sundance.c 			dma_map_single(&np->pci_dev->dev, skb->data,
skb              1058 drivers/net/ethernet/dlink/sundance.c 			dev_kfree_skb(skb);
skb              1099 drivers/net/ethernet/dlink/sundance.c start_tx (struct sk_buff *skb, struct net_device *dev)
skb              1107 drivers/net/ethernet/dlink/sundance.c 	np->tx_skbuff[entry] = skb;
skb              1113 drivers/net/ethernet/dlink/sundance.c 				skb->data, skb->len, DMA_TO_DEVICE));
skb              1117 drivers/net/ethernet/dlink/sundance.c 	txdesc->frag[0].length = cpu_to_le32 (skb->len | LastFrag);
skb              1140 drivers/net/ethernet/dlink/sundance.c 	dev_kfree_skb_any(skb);
skb              1152 drivers/net/ethernet/dlink/sundance.c 	struct sk_buff *skb;
skb              1163 drivers/net/ethernet/dlink/sundance.c 		skb = np->tx_skbuff[i];
skb              1164 drivers/net/ethernet/dlink/sundance.c 		if (skb) {
skb              1167 drivers/net/ethernet/dlink/sundance.c 				skb->len, DMA_TO_DEVICE);
skb              1168 drivers/net/ethernet/dlink/sundance.c 			dev_kfree_skb_any(skb);
skb              1272 drivers/net/ethernet/dlink/sundance.c 				struct sk_buff *skb;
skb              1283 drivers/net/ethernet/dlink/sundance.c 				skb = np->tx_skbuff[entry];
skb              1287 drivers/net/ethernet/dlink/sundance.c 					skb->len, DMA_TO_DEVICE);
skb              1298 drivers/net/ethernet/dlink/sundance.c 				struct sk_buff *skb;
skb              1302 drivers/net/ethernet/dlink/sundance.c 				skb = np->tx_skbuff[entry];
skb              1306 drivers/net/ethernet/dlink/sundance.c 					skb->len, DMA_TO_DEVICE);
skb              1374 drivers/net/ethernet/dlink/sundance.c 			struct sk_buff *skb;
skb              1384 drivers/net/ethernet/dlink/sundance.c 			    (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
skb              1385 drivers/net/ethernet/dlink/sundance.c 				skb_reserve(skb, 2);	/* 16 byte align the IP header */
skb              1389 drivers/net/ethernet/dlink/sundance.c 				skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len);
skb              1393 drivers/net/ethernet/dlink/sundance.c 				skb_put(skb, pkt_len);
skb              1398 drivers/net/ethernet/dlink/sundance.c 				skb_put(skb = np->rx_skbuff[entry], pkt_len);
skb              1401 drivers/net/ethernet/dlink/sundance.c 			skb->protocol = eth_type_trans(skb, dev);
skb              1403 drivers/net/ethernet/dlink/sundance.c 			netif_rx(skb);
skb              1434 drivers/net/ethernet/dlink/sundance.c 		struct sk_buff *skb;
skb              1437 drivers/net/ethernet/dlink/sundance.c 			skb = netdev_alloc_skb(dev, np->rx_buf_sz + 2);
skb              1438 drivers/net/ethernet/dlink/sundance.c 			np->rx_skbuff[entry] = skb;
skb              1439 drivers/net/ethernet/dlink/sundance.c 			if (skb == NULL)
skb              1441 drivers/net/ethernet/dlink/sundance.c 			skb_reserve(skb, 2);	/* Align IP on 16 byte boundaries */
skb              1443 drivers/net/ethernet/dlink/sundance.c 				dma_map_single(&np->pci_dev->dev, skb->data,
skb              1447 drivers/net/ethernet/dlink/sundance.c 			    dev_kfree_skb_irq(skb);
skb              1834 drivers/net/ethernet/dlink/sundance.c 	struct sk_buff *skb;
skb              1905 drivers/net/ethernet/dlink/sundance.c 		skb = np->rx_skbuff[i];
skb              1906 drivers/net/ethernet/dlink/sundance.c 		if (skb) {
skb              1910 drivers/net/ethernet/dlink/sundance.c 			dev_kfree_skb(skb);
skb              1917 drivers/net/ethernet/dlink/sundance.c 		skb = np->tx_skbuff[i];
skb              1918 drivers/net/ethernet/dlink/sundance.c 		if (skb) {
skb              1921 drivers/net/ethernet/dlink/sundance.c 				skb->len, DMA_TO_DEVICE);
skb              1922 drivers/net/ethernet/dlink/sundance.c 			dev_kfree_skb(skb);
skb               368 drivers/net/ethernet/dnet.c 	struct sk_buff *skb;
skb               389 drivers/net/ethernet/dnet.c 		skb = netdev_alloc_skb(dev, pkt_len + 5);
skb               390 drivers/net/ethernet/dnet.c 		if (skb != NULL) {
skb               392 drivers/net/ethernet/dnet.c 			skb_reserve(skb, 2);
skb               397 drivers/net/ethernet/dnet.c 			data_ptr = skb_put(skb, pkt_len);
skb               400 drivers/net/ethernet/dnet.c 			skb->protocol = eth_type_trans(skb, dev);
skb               401 drivers/net/ethernet/dnet.c 			netif_receive_skb(skb);
skb               494 drivers/net/ethernet/dnet.c static inline void dnet_print_skb(struct sk_buff *skb)
skb               498 drivers/net/ethernet/dnet.c 	for (k = 0; k < skb->len; k++)
skb               499 drivers/net/ethernet/dnet.c 		printk(" %02x", (unsigned int)skb->data[k]);
skb               503 drivers/net/ethernet/dnet.c #define dnet_print_skb(skb)	do {} while (0)
skb               506 drivers/net/ethernet/dnet.c static netdev_tx_t dnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb               518 drivers/net/ethernet/dnet.c 	       skb->len, skb->head, skb->data);
skb               519 drivers/net/ethernet/dnet.c 	dnet_print_skb(skb);
skb               522 drivers/net/ethernet/dnet.c 	len = (skb->len + 3) >> 2;
skb               528 drivers/net/ethernet/dnet.c 	bufp = (unsigned int *)(((unsigned long) skb->data) & ~0x3UL);
skb               529 drivers/net/ethernet/dnet.c 	wrsz = (u32) skb->len + 3;
skb               530 drivers/net/ethernet/dnet.c 	wrsz += ((unsigned long) skb->data) & 0x3;
skb               532 drivers/net/ethernet/dnet.c 	tx_cmd = ((((unsigned long)(skb->data)) & 0x03) << 16) | (u32) skb->len;
skb               554 drivers/net/ethernet/dnet.c 	skb_tx_timestamp(skb);
skb               557 drivers/net/ethernet/dnet.c 	dev_kfree_skb(skb);
skb               213 drivers/net/ethernet/ec_bhf.c 		struct sk_buff *skb;
skb               215 drivers/net/ethernet/ec_bhf.c 		skb = netdev_alloc_skb_ip_align(priv->net_dev, pkt_size);
skb               216 drivers/net/ethernet/ec_bhf.c 		if (skb) {
skb               217 drivers/net/ethernet/ec_bhf.c 			skb_put_data(skb, data, pkt_size);
skb               218 drivers/net/ethernet/ec_bhf.c 			skb->protocol = eth_type_trans(skb, priv->net_dev);
skb               221 drivers/net/ethernet/ec_bhf.c 			netif_rx(skb);
skb               282 drivers/net/ethernet/ec_bhf.c static netdev_tx_t ec_bhf_start_xmit(struct sk_buff *skb,
skb               291 drivers/net/ethernet/ec_bhf.c 	skb_copy_and_csum_dev(skb, desc->data);
skb               292 drivers/net/ethernet/ec_bhf.c 	len = skb->len;
skb               313 drivers/net/ethernet/ec_bhf.c 	dev_kfree_skb(skb);
skb               897 drivers/net/ethernet/emulex/benet/be.h static inline u8 is_tcp_pkt(struct sk_buff *skb)
skb               901 drivers/net/ethernet/emulex/benet/be.h 	if (ip_hdr(skb)->version == 4)
skb               902 drivers/net/ethernet/emulex/benet/be.h 		val = (ip_hdr(skb)->protocol == IPPROTO_TCP);
skb               903 drivers/net/ethernet/emulex/benet/be.h 	else if (ip_hdr(skb)->version == 6)
skb               904 drivers/net/ethernet/emulex/benet/be.h 		val = (ipv6_hdr(skb)->nexthdr == NEXTHDR_TCP);
skb               909 drivers/net/ethernet/emulex/benet/be.h static inline u8 is_udp_pkt(struct sk_buff *skb)
skb               913 drivers/net/ethernet/emulex/benet/be.h 	if (ip_hdr(skb)->version == 4)
skb               914 drivers/net/ethernet/emulex/benet/be.h 		val = (ip_hdr(skb)->protocol == IPPROTO_UDP);
skb               915 drivers/net/ethernet/emulex/benet/be.h 	else if (ip_hdr(skb)->version == 6)
skb               916 drivers/net/ethernet/emulex/benet/be.h 		val = (ipv6_hdr(skb)->nexthdr == NEXTHDR_UDP);
skb               921 drivers/net/ethernet/emulex/benet/be.h static inline bool is_ipv4_pkt(struct sk_buff *skb)
skb               923 drivers/net/ethernet/emulex/benet/be.h 	return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4;
skb               926 drivers/net/ethernet/emulex/benet/be.h static inline bool is_ipv6_ext_hdr(struct sk_buff *skb)
skb               928 drivers/net/ethernet/emulex/benet/be.h 	if (ip_hdr(skb)->version == 6)
skb               929 drivers/net/ethernet/emulex/benet/be.h 		return ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr);
skb               738 drivers/net/ethernet/emulex/benet/be_main.c static int be_gso_hdr_len(struct sk_buff *skb)
skb               740 drivers/net/ethernet/emulex/benet/be_main.c 	if (skb->encapsulation)
skb               741 drivers/net/ethernet/emulex/benet/be_main.c 		return skb_inner_transport_offset(skb) +
skb               742 drivers/net/ethernet/emulex/benet/be_main.c 		       inner_tcp_hdrlen(skb);
skb               743 drivers/net/ethernet/emulex/benet/be_main.c 	return skb_transport_offset(skb) + tcp_hdrlen(skb);
skb               746 drivers/net/ethernet/emulex/benet/be_main.c static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
skb               749 drivers/net/ethernet/emulex/benet/be_main.c 	u32 tx_pkts = skb_shinfo(skb)->gso_segs ? : 1;
skb               751 drivers/net/ethernet/emulex/benet/be_main.c 	u32 dup_hdr_len = tx_pkts > 1 ? be_gso_hdr_len(skb) * (tx_pkts - 1) : 0;
skb               755 drivers/net/ethernet/emulex/benet/be_main.c 	stats->tx_bytes += skb->len + dup_hdr_len;
skb               757 drivers/net/ethernet/emulex/benet/be_main.c 	if (skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL)
skb               763 drivers/net/ethernet/emulex/benet/be_main.c static u32 skb_wrb_cnt(struct sk_buff *skb)
skb               766 drivers/net/ethernet/emulex/benet/be_main.c 	return 1 + (skb_headlen(skb) ? 1 : 0) + skb_shinfo(skb)->nr_frags;
skb               789 drivers/net/ethernet/emulex/benet/be_main.c 				     struct sk_buff *skb)
skb               794 drivers/net/ethernet/emulex/benet/be_main.c 	vlan_tag = skb_vlan_tag_get(skb);
skb               795 drivers/net/ethernet/emulex/benet/be_main.c 	vlan_prio = skb_vlan_tag_get_prio(skb);
skb               805 drivers/net/ethernet/emulex/benet/be_main.c static u16 skb_inner_ip_proto(struct sk_buff *skb)
skb               807 drivers/net/ethernet/emulex/benet/be_main.c 	return (inner_ip_hdr(skb)->version == 4) ?
skb               808 drivers/net/ethernet/emulex/benet/be_main.c 		inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
skb               811 drivers/net/ethernet/emulex/benet/be_main.c static u16 skb_ip_proto(struct sk_buff *skb)
skb               813 drivers/net/ethernet/emulex/benet/be_main.c 	return (ip_hdr(skb)->version == 4) ?
skb               814 drivers/net/ethernet/emulex/benet/be_main.c 		ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
skb               833 drivers/net/ethernet/emulex/benet/be_main.c 				       struct sk_buff *skb,
skb               838 drivers/net/ethernet/emulex/benet/be_main.c 	if (skb_is_gso(skb)) {
skb               840 drivers/net/ethernet/emulex/benet/be_main.c 		wrb_params->lso_mss = skb_shinfo(skb)->gso_size;
skb               841 drivers/net/ethernet/emulex/benet/be_main.c 		if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
skb               843 drivers/net/ethernet/emulex/benet/be_main.c 	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb               844 drivers/net/ethernet/emulex/benet/be_main.c 		if (skb->encapsulation) {
skb               846 drivers/net/ethernet/emulex/benet/be_main.c 			proto = skb_inner_ip_proto(skb);
skb               848 drivers/net/ethernet/emulex/benet/be_main.c 			proto = skb_ip_proto(skb);
skb               856 drivers/net/ethernet/emulex/benet/be_main.c 	if (skb_vlan_tag_present(skb)) {
skb               858 drivers/net/ethernet/emulex/benet/be_main.c 		wrb_params->vlan_tag = be_get_tx_vlan_tag(adapter, skb);
skb               867 drivers/net/ethernet/emulex/benet/be_main.c 			 struct sk_buff *skb)
skb               895 drivers/net/ethernet/emulex/benet/be_main.c 	SET_TX_WRB_HDR_BITS(num_wrb, hdr, skb_wrb_cnt(skb));
skb               896 drivers/net/ethernet/emulex/benet/be_main.c 	SET_TX_WRB_HDR_BITS(len, hdr, skb->len);
skb               931 drivers/net/ethernet/emulex/benet/be_main.c 				struct sk_buff *skb, u16 head)
skb               933 drivers/net/ethernet/emulex/benet/be_main.c 	u32 num_frags = skb_wrb_cnt(skb);
skb               937 drivers/net/ethernet/emulex/benet/be_main.c 	wrb_fill_hdr(adapter, hdr, wrb_params, skb);
skb               941 drivers/net/ethernet/emulex/benet/be_main.c 	txo->sent_skb_list[head] = skb;
skb               993 drivers/net/ethernet/emulex/benet/be_main.c 			   struct sk_buff *skb,
skb               996 drivers/net/ethernet/emulex/benet/be_main.c 	u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
skb              1005 drivers/net/ethernet/emulex/benet/be_main.c 	if (skb->len > skb->data_len) {
skb              1006 drivers/net/ethernet/emulex/benet/be_main.c 		len = skb_headlen(skb);
skb              1008 drivers/net/ethernet/emulex/benet/be_main.c 		busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
skb              1016 drivers/net/ethernet/emulex/benet/be_main.c 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb              1017 drivers/net/ethernet/emulex/benet/be_main.c 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
skb              1027 drivers/net/ethernet/emulex/benet/be_main.c 	be_tx_setup_wrb_hdr(adapter, txo, wrb_params, skb, head);
skb              1029 drivers/net/ethernet/emulex/benet/be_main.c 	be_tx_stats_update(txo, skb);
skb              1044 drivers/net/ethernet/emulex/benet/be_main.c 					     struct sk_buff *skb,
skb              1051 drivers/net/ethernet/emulex/benet/be_main.c 	skb = skb_share_check(skb, GFP_ATOMIC);
skb              1052 drivers/net/ethernet/emulex/benet/be_main.c 	if (unlikely(!skb))
skb              1053 drivers/net/ethernet/emulex/benet/be_main.c 		return skb;
skb              1055 drivers/net/ethernet/emulex/benet/be_main.c 	if (skb_vlan_tag_present(skb)) {
skb              1056 drivers/net/ethernet/emulex/benet/be_main.c 		vlan_tag = be_get_tx_vlan_tag(adapter, skb);
skb              1072 drivers/net/ethernet/emulex/benet/be_main.c 		skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
skb              1074 drivers/net/ethernet/emulex/benet/be_main.c 		if (unlikely(!skb))
skb              1075 drivers/net/ethernet/emulex/benet/be_main.c 			return skb;
skb              1076 drivers/net/ethernet/emulex/benet/be_main.c 		__vlan_hwaccel_clear_tag(skb);
skb              1082 drivers/net/ethernet/emulex/benet/be_main.c 		skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
skb              1084 drivers/net/ethernet/emulex/benet/be_main.c 		if (unlikely(!skb))
skb              1085 drivers/net/ethernet/emulex/benet/be_main.c 			return skb;
skb              1089 drivers/net/ethernet/emulex/benet/be_main.c 	return skb;
skb              1092 drivers/net/ethernet/emulex/benet/be_main.c static bool be_ipv6_exthdr_check(struct sk_buff *skb)
skb              1094 drivers/net/ethernet/emulex/benet/be_main.c 	struct ethhdr *eh = (struct ethhdr *)skb->data;
skb              1098 drivers/net/ethernet/emulex/benet/be_main.c 		struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
skb              1104 drivers/net/ethernet/emulex/benet/be_main.c 				(struct ipv6_opt_hdr *)(skb->data + offset);
skb              1114 drivers/net/ethernet/emulex/benet/be_main.c static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
skb              1116 drivers/net/ethernet/emulex/benet/be_main.c 	return skb_vlan_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
skb              1119 drivers/net/ethernet/emulex/benet/be_main.c static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
skb              1121 drivers/net/ethernet/emulex/benet/be_main.c 	return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
skb              1125 drivers/net/ethernet/emulex/benet/be_main.c 						  struct sk_buff *skb,
skb              1129 drivers/net/ethernet/emulex/benet/be_main.c 	struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
skb              1137 drivers/net/ethernet/emulex/benet/be_main.c 	eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
skb              1139 drivers/net/ethernet/emulex/benet/be_main.c 	if (skb->len <= 60 &&
skb              1140 drivers/net/ethernet/emulex/benet/be_main.c 	    (lancer_chip(adapter) || skb_vlan_tag_present(skb)) &&
skb              1141 drivers/net/ethernet/emulex/benet/be_main.c 	    is_ipv4_pkt(skb)) {
skb              1142 drivers/net/ethernet/emulex/benet/be_main.c 		ip = (struct iphdr *)ip_hdr(skb);
skb              1143 drivers/net/ethernet/emulex/benet/be_main.c 		pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
skb              1157 drivers/net/ethernet/emulex/benet/be_main.c 	if (skb->ip_summed != CHECKSUM_PARTIAL &&
skb              1158 drivers/net/ethernet/emulex/benet/be_main.c 	    skb_vlan_tag_present(skb)) {
skb              1159 drivers/net/ethernet/emulex/benet/be_main.c 		skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
skb              1160 drivers/net/ethernet/emulex/benet/be_main.c 		if (unlikely(!skb))
skb              1168 drivers/net/ethernet/emulex/benet/be_main.c 	if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
skb              1179 drivers/net/ethernet/emulex/benet/be_main.c 	if (be_ipv6_tx_stall_chk(adapter, skb) &&
skb              1180 drivers/net/ethernet/emulex/benet/be_main.c 	    be_vlan_tag_tx_chk(adapter, skb)) {
skb              1181 drivers/net/ethernet/emulex/benet/be_main.c 		skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
skb              1182 drivers/net/ethernet/emulex/benet/be_main.c 		if (unlikely(!skb))
skb              1186 drivers/net/ethernet/emulex/benet/be_main.c 	return skb;
skb              1188 drivers/net/ethernet/emulex/benet/be_main.c 	dev_kfree_skb_any(skb);
skb              1194 drivers/net/ethernet/emulex/benet/be_main.c 					   struct sk_buff *skb,
skb              1204 drivers/net/ethernet/emulex/benet/be_main.c 	if (skb->len <= 32) {
skb              1205 drivers/net/ethernet/emulex/benet/be_main.c 		if (skb_put_padto(skb, 36))
skb              1210 drivers/net/ethernet/emulex/benet/be_main.c 		skb = be_lancer_xmit_workarounds(adapter, skb, wrb_params);
skb              1211 drivers/net/ethernet/emulex/benet/be_main.c 		if (!skb)
skb              1218 drivers/net/ethernet/emulex/benet/be_main.c 	WARN_ON_ONCE(skb->len > BE_MAX_GSO_SIZE);
skb              1219 drivers/net/ethernet/emulex/benet/be_main.c 	err = pskb_trim(skb, BE_MAX_GSO_SIZE);
skb              1222 drivers/net/ethernet/emulex/benet/be_main.c 	return skb;
skb              1266 drivers/net/ethernet/emulex/benet/be_main.c #define is_arp_allowed_on_bmc(adapter, skb)	\
skb              1267 drivers/net/ethernet/emulex/benet/be_main.c 	(is_arp(skb) && is_arp_filt_enabled(adapter))
skb              1269 drivers/net/ethernet/emulex/benet/be_main.c #define is_arp(skb)	(skb->protocol == htons(ETH_P_ARP))
skb              1300 drivers/net/ethernet/emulex/benet/be_main.c 			       struct sk_buff **skb)
skb              1302 drivers/net/ethernet/emulex/benet/be_main.c 	struct ethhdr *eh = (struct ethhdr *)(*skb)->data;
skb              1313 drivers/net/ethernet/emulex/benet/be_main.c 	    is_arp_allowed_on_bmc(adapter, (*skb))) {
skb              1318 drivers/net/ethernet/emulex/benet/be_main.c 	if ((*skb)->protocol == htons(ETH_P_IPV6)) {
skb              1319 drivers/net/ethernet/emulex/benet/be_main.c 		struct ipv6hdr *hdr = ipv6_hdr((*skb));
skb              1323 drivers/net/ethernet/emulex/benet/be_main.c 			struct icmp6hdr *icmp6 = icmp6_hdr((*skb));
skb              1338 drivers/net/ethernet/emulex/benet/be_main.c 	if (is_udp_pkt((*skb))) {
skb              1339 drivers/net/ethernet/emulex/benet/be_main.c 		struct udphdr *udp = udp_hdr((*skb));
skb              1364 drivers/net/ethernet/emulex/benet/be_main.c 		*skb = be_insert_vlan_in_pkt(adapter, *skb, NULL);
skb              1369 drivers/net/ethernet/emulex/benet/be_main.c static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
skb              1372 drivers/net/ethernet/emulex/benet/be_main.c 	u16 q_idx = skb_get_queue_mapping(skb);
skb              1378 drivers/net/ethernet/emulex/benet/be_main.c 	skb = be_xmit_workarounds(adapter, skb, &wrb_params);
skb              1379 drivers/net/ethernet/emulex/benet/be_main.c 	if (unlikely(!skb))
skb              1382 drivers/net/ethernet/emulex/benet/be_main.c 	be_get_wrb_params_from_skb(adapter, skb, &wrb_params);
skb              1384 drivers/net/ethernet/emulex/benet/be_main.c 	wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
skb              1386 drivers/net/ethernet/emulex/benet/be_main.c 		dev_kfree_skb_any(skb);
skb              1393 drivers/net/ethernet/emulex/benet/be_main.c 	if (be_send_pkt_to_bmc(adapter, &skb)) {
skb              1395 drivers/net/ethernet/emulex/benet/be_main.c 		wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
skb              1399 drivers/net/ethernet/emulex/benet/be_main.c 			skb_get(skb);
skb              1425 drivers/net/ethernet/emulex/benet/be_main.c 	struct sk_buff *skb;
skb              1462 drivers/net/ethernet/emulex/benet/be_main.c 				skb = txo->sent_skb_list[j];
skb              1463 drivers/net/ethernet/emulex/benet/be_main.c 				if (ip_hdr(skb)->protocol == IPPROTO_TCP) {
skb              1464 drivers/net/ethernet/emulex/benet/be_main.c 					tcphdr = tcp_hdr(skb);
skb              1473 drivers/net/ethernet/emulex/benet/be_main.c 				} else if (ip_hdr(skb)->protocol ==
skb              1475 drivers/net/ethernet/emulex/benet/be_main.c 					udphdr = udp_hdr(skb);
skb              1482 drivers/net/ethernet/emulex/benet/be_main.c 					 j, skb, skb->len, skb->protocol);
skb              2322 drivers/net/ethernet/emulex/benet/be_main.c static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
skb              2337 drivers/net/ethernet/emulex/benet/be_main.c 	skb->len = curr_frag_len;
skb              2339 drivers/net/ethernet/emulex/benet/be_main.c 		memcpy(skb->data, start, curr_frag_len);
skb              2342 drivers/net/ethernet/emulex/benet/be_main.c 		skb->data_len = 0;
skb              2343 drivers/net/ethernet/emulex/benet/be_main.c 		skb->tail += curr_frag_len;
skb              2346 drivers/net/ethernet/emulex/benet/be_main.c 		memcpy(skb->data, start, hdr_len);
skb              2347 drivers/net/ethernet/emulex/benet/be_main.c 		skb_shinfo(skb)->nr_frags = 1;
skb              2348 drivers/net/ethernet/emulex/benet/be_main.c 		skb_frag_set_page(skb, 0, page_info->page);
skb              2349 drivers/net/ethernet/emulex/benet/be_main.c 		skb_frag_off_set(&skb_shinfo(skb)->frags[0],
skb              2351 drivers/net/ethernet/emulex/benet/be_main.c 		skb_frag_size_set(&skb_shinfo(skb)->frags[0],
skb              2353 drivers/net/ethernet/emulex/benet/be_main.c 		skb->data_len = curr_frag_len - hdr_len;
skb              2354 drivers/net/ethernet/emulex/benet/be_main.c 		skb->truesize += rx_frag_size;
skb              2355 drivers/net/ethernet/emulex/benet/be_main.c 		skb->tail += hdr_len;
skb              2374 drivers/net/ethernet/emulex/benet/be_main.c 			skb_frag_set_page(skb, j, page_info->page);
skb              2375 drivers/net/ethernet/emulex/benet/be_main.c 			skb_frag_off_set(&skb_shinfo(skb)->frags[j],
skb              2377 drivers/net/ethernet/emulex/benet/be_main.c 			skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
skb              2378 drivers/net/ethernet/emulex/benet/be_main.c 			skb_shinfo(skb)->nr_frags++;
skb              2383 drivers/net/ethernet/emulex/benet/be_main.c 		skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
skb              2384 drivers/net/ethernet/emulex/benet/be_main.c 		skb->len += curr_frag_len;
skb              2385 drivers/net/ethernet/emulex/benet/be_main.c 		skb->data_len += curr_frag_len;
skb              2386 drivers/net/ethernet/emulex/benet/be_main.c 		skb->truesize += rx_frag_size;
skb              2399 drivers/net/ethernet/emulex/benet/be_main.c 	struct sk_buff *skb;
skb              2401 drivers/net/ethernet/emulex/benet/be_main.c 	skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
skb              2402 drivers/net/ethernet/emulex/benet/be_main.c 	if (unlikely(!skb)) {
skb              2408 drivers/net/ethernet/emulex/benet/be_main.c 	skb_fill_rx_data(rxo, skb, rxcp);
skb              2411 drivers/net/ethernet/emulex/benet/be_main.c 		skb->ip_summed = CHECKSUM_UNNECESSARY;
skb              2413 drivers/net/ethernet/emulex/benet/be_main.c 		skb_checksum_none_assert(skb);
skb              2415 drivers/net/ethernet/emulex/benet/be_main.c 	skb->protocol = eth_type_trans(skb, netdev);
skb              2416 drivers/net/ethernet/emulex/benet/be_main.c 	skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
skb              2418 drivers/net/ethernet/emulex/benet/be_main.c 		skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
skb              2420 drivers/net/ethernet/emulex/benet/be_main.c 	skb->csum_level = rxcp->tunneled;
skb              2421 drivers/net/ethernet/emulex/benet/be_main.c 	skb_mark_napi_id(skb, napi);
skb              2424 drivers/net/ethernet/emulex/benet/be_main.c 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
skb              2426 drivers/net/ethernet/emulex/benet/be_main.c 	netif_receive_skb(skb);
skb              2436 drivers/net/ethernet/emulex/benet/be_main.c 	struct sk_buff *skb = NULL;
skb              2440 drivers/net/ethernet/emulex/benet/be_main.c 	skb = napi_get_frags(napi);
skb              2441 drivers/net/ethernet/emulex/benet/be_main.c 	if (!skb) {
skb              2456 drivers/net/ethernet/emulex/benet/be_main.c 			skb_frag_set_page(skb, j, page_info->page);
skb              2457 drivers/net/ethernet/emulex/benet/be_main.c 			skb_frag_off_set(&skb_shinfo(skb)->frags[j],
skb              2459 drivers/net/ethernet/emulex/benet/be_main.c 			skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
skb              2463 drivers/net/ethernet/emulex/benet/be_main.c 		skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
skb              2464 drivers/net/ethernet/emulex/benet/be_main.c 		skb->truesize += rx_frag_size;
skb              2470 drivers/net/ethernet/emulex/benet/be_main.c 	skb_shinfo(skb)->nr_frags = j + 1;
skb              2471 drivers/net/ethernet/emulex/benet/be_main.c 	skb->len = rxcp->pkt_size;
skb              2472 drivers/net/ethernet/emulex/benet/be_main.c 	skb->data_len = rxcp->pkt_size;
skb              2473 drivers/net/ethernet/emulex/benet/be_main.c 	skb->ip_summed = CHECKSUM_UNNECESSARY;
skb              2474 drivers/net/ethernet/emulex/benet/be_main.c 	skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
skb              2476 drivers/net/ethernet/emulex/benet/be_main.c 		skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
skb              2478 drivers/net/ethernet/emulex/benet/be_main.c 	skb->csum_level = rxcp->tunneled;
skb              2481 drivers/net/ethernet/emulex/benet/be_main.c 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
skb              2753 drivers/net/ethernet/emulex/benet/be_main.c 	struct sk_buff *skb = NULL;
skb              2762 drivers/net/ethernet/emulex/benet/be_main.c 			if (skb)
skb              2763 drivers/net/ethernet/emulex/benet/be_main.c 				dev_consume_skb_any(skb);
skb              2764 drivers/net/ethernet/emulex/benet/be_main.c 			skb = sent_skbs[txq->tail];
skb              2773 drivers/net/ethernet/emulex/benet/be_main.c 			      (unmap_skb_hdr && skb_headlen(skb)));
skb              2778 drivers/net/ethernet/emulex/benet/be_main.c 	dev_consume_skb_any(skb);
skb              5009 drivers/net/ethernet/emulex/benet/be_main.c static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
skb              5034 drivers/net/ethernet/emulex/benet/be_main.c 	return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
skb              5198 drivers/net/ethernet/emulex/benet/be_main.c static netdev_features_t be_features_check(struct sk_buff *skb,
skb              5205 drivers/net/ethernet/emulex/benet/be_main.c 	if (skb_is_gso(skb)) {
skb              5209 drivers/net/ethernet/emulex/benet/be_main.c 		if (!skyhawk_chip(adapter) && is_ipv6_ext_hdr(skb))
skb              5217 drivers/net/ethernet/emulex/benet/be_main.c 		    (skb_shinfo(skb)->gso_size < 256 ||
skb              5218 drivers/net/ethernet/emulex/benet/be_main.c 		     skb_shinfo(skb)->gso_segs == 1))
skb              5226 drivers/net/ethernet/emulex/benet/be_main.c 	features = vlan_features_check(skb, features);
skb              5227 drivers/net/ethernet/emulex/benet/be_main.c 	if (!skb->encapsulation ||
skb              5237 drivers/net/ethernet/emulex/benet/be_main.c 	switch (vlan_get_protocol(skb)) {
skb              5239 drivers/net/ethernet/emulex/benet/be_main.c 		l4_hdr = ip_hdr(skb)->protocol;
skb              5242 drivers/net/ethernet/emulex/benet/be_main.c 		l4_hdr = ipv6_hdr(skb)->nexthdr;
skb              5249 drivers/net/ethernet/emulex/benet/be_main.c 	    skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
skb              5250 drivers/net/ethernet/emulex/benet/be_main.c 	    skb->inner_protocol != htons(ETH_P_TEB) ||
skb              5251 drivers/net/ethernet/emulex/benet/be_main.c 	    skb_inner_mac_header(skb) - skb_transport_header(skb) !=
skb              5254 drivers/net/ethernet/emulex/benet/be_main.c 	    udp_hdr(skb)->dest != adapter->vxlan_port)
skb               443 drivers/net/ethernet/ethoc.c 			struct sk_buff *skb;
skb               446 drivers/net/ethernet/ethoc.c 			skb = netdev_alloc_skb_ip_align(dev, size);
skb               448 drivers/net/ethernet/ethoc.c 			if (likely(skb)) {
skb               450 drivers/net/ethernet/ethoc.c 				memcpy_fromio(skb_put(skb, size), src, size);
skb               451 drivers/net/ethernet/ethoc.c 				skb->protocol = eth_type_trans(skb, dev);
skb               454 drivers/net/ethernet/ethoc.c 				netif_receive_skb(skb);
skb               880 drivers/net/ethernet/ethoc.c static netdev_tx_t ethoc_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb               887 drivers/net/ethernet/ethoc.c 	if (skb_put_padto(skb, ETHOC_ZLEN)) {
skb               892 drivers/net/ethernet/ethoc.c 	if (unlikely(skb->len > ETHOC_BUFSIZ)) {
skb               902 drivers/net/ethernet/ethoc.c 	if (unlikely(skb->len < ETHOC_ZLEN))
skb               908 drivers/net/ethernet/ethoc.c 	memcpy_toio(dest, skb->data, skb->len);
skb               911 drivers/net/ethernet/ethoc.c 	bd.stat |= TX_BD_LEN(skb->len);
skb               923 drivers/net/ethernet/ethoc.c 	skb_tx_timestamp(skb);
skb               925 drivers/net/ethernet/ethoc.c 	dev_kfree_skb(skb);
skb                68 drivers/net/ethernet/ezchip/nps_enet.c 	struct sk_buff *skb;
skb               107 drivers/net/ethernet/ezchip/nps_enet.c 	skb = netdev_alloc_skb_ip_align(ndev, frame_len);
skb               108 drivers/net/ethernet/ezchip/nps_enet.c 	if (unlikely(!skb)) {
skb               115 drivers/net/ethernet/ezchip/nps_enet.c 	nps_enet_read_rx_fifo(ndev, skb->data, frame_len);
skb               117 drivers/net/ethernet/ezchip/nps_enet.c 	skb_put(skb, frame_len);
skb               118 drivers/net/ethernet/ezchip/nps_enet.c 	skb->protocol = eth_type_trans(skb, ndev);
skb               119 drivers/net/ethernet/ezchip/nps_enet.c 	skb->ip_summed = CHECKSUM_UNNECESSARY;
skb               123 drivers/net/ethernet/ezchip/nps_enet.c 	netif_receive_skb(skb);
skb               378 drivers/net/ethernet/ezchip/nps_enet.c 				struct sk_buff *skb)
skb               382 drivers/net/ethernet/ezchip/nps_enet.c 	short length = skb->len;
skb               384 drivers/net/ethernet/ezchip/nps_enet.c 	u32 *src = (void *)skb->data;
skb               532 drivers/net/ethernet/ezchip/nps_enet.c static netdev_tx_t nps_enet_start_xmit(struct sk_buff *skb,
skb               540 drivers/net/ethernet/ezchip/nps_enet.c 	priv->tx_skb = skb;
skb               547 drivers/net/ethernet/ezchip/nps_enet.c 	nps_enet_send_frame(ndev, skb);
skb               386 drivers/net/ethernet/faraday/ftgmac100.c 	struct sk_buff *skb;
skb               390 drivers/net/ethernet/faraday/ftgmac100.c 	skb = netdev_alloc_skb_ip_align(netdev, RX_BUF_SIZE);
skb               391 drivers/net/ethernet/faraday/ftgmac100.c 	if (unlikely(!skb)) {
skb               397 drivers/net/ethernet/faraday/ftgmac100.c 		map = dma_map_single(priv->dev, skb->data, RX_BUF_SIZE,
skb               402 drivers/net/ethernet/faraday/ftgmac100.c 			dev_kfree_skb_any(skb);
skb               404 drivers/net/ethernet/faraday/ftgmac100.c 			skb = NULL;
skb               410 drivers/net/ethernet/faraday/ftgmac100.c 	priv->rx_skbs[entry] = skb;
skb               453 drivers/net/ethernet/faraday/ftgmac100.c 	struct sk_buff *skb;
skb               503 drivers/net/ethernet/faraday/ftgmac100.c 	skb = priv->rx_skbs[pointer];
skb               504 drivers/net/ethernet/faraday/ftgmac100.c 	if (!unlikely(skb)) {
skb               524 drivers/net/ethernet/faraday/ftgmac100.c 			skb->ip_summed = CHECKSUM_NONE;
skb               526 drivers/net/ethernet/faraday/ftgmac100.c 			skb->ip_summed = CHECKSUM_UNNECESSARY;
skb               530 drivers/net/ethernet/faraday/ftgmac100.c 	skb_put(skb, size);
skb               535 drivers/net/ethernet/faraday/ftgmac100.c 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
skb               556 drivers/net/ethernet/faraday/ftgmac100.c 	skb->protocol = eth_type_trans(skb, netdev);
skb               562 drivers/net/ethernet/faraday/ftgmac100.c 	if (skb->ip_summed == CHECKSUM_NONE)
skb               563 drivers/net/ethernet/faraday/ftgmac100.c 		netif_receive_skb(skb);
skb               565 drivers/net/ethernet/faraday/ftgmac100.c 		napi_gro_receive(&priv->napi, skb);
skb               612 drivers/net/ethernet/faraday/ftgmac100.c 				     struct sk_buff *skb,
skb               620 drivers/net/ethernet/faraday/ftgmac100.c 		len = skb_headlen(skb);
skb               629 drivers/net/ethernet/faraday/ftgmac100.c 		dev_kfree_skb(skb);
skb               637 drivers/net/ethernet/faraday/ftgmac100.c 	struct sk_buff *skb;
skb               648 drivers/net/ethernet/faraday/ftgmac100.c 	skb = priv->tx_skbs[pointer];
skb               650 drivers/net/ethernet/faraday/ftgmac100.c 	netdev->stats.tx_bytes += skb->len;
skb               651 drivers/net/ethernet/faraday/ftgmac100.c 	ftgmac100_free_tx_packet(priv, pointer, skb, txdes, ctl_stat);
skb               683 drivers/net/ethernet/faraday/ftgmac100.c static bool ftgmac100_prep_tx_csum(struct sk_buff *skb, u32 *csum_vlan)
skb               685 drivers/net/ethernet/faraday/ftgmac100.c 	if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
skb               686 drivers/net/ethernet/faraday/ftgmac100.c 		u8 ip_proto = ip_hdr(skb)->protocol;
skb               700 drivers/net/ethernet/faraday/ftgmac100.c 	return skb_checksum_help(skb) == 0;
skb               703 drivers/net/ethernet/faraday/ftgmac100.c static netdev_tx_t ftgmac100_hard_start_xmit(struct sk_buff *skb,
skb               713 drivers/net/ethernet/faraday/ftgmac100.c 	if (eth_skb_pad(skb)) {
skb               719 drivers/net/ethernet/faraday/ftgmac100.c 	if (unlikely(skb->len > MAX_PKT_SIZE)) {
skb               728 drivers/net/ethernet/faraday/ftgmac100.c 	nfrags = skb_shinfo(skb)->nr_frags;
skb               732 drivers/net/ethernet/faraday/ftgmac100.c 	if (skb->ip_summed == CHECKSUM_PARTIAL &&
skb               733 drivers/net/ethernet/faraday/ftgmac100.c 	    !ftgmac100_prep_tx_csum(skb, &csum_vlan))
skb               737 drivers/net/ethernet/faraday/ftgmac100.c 	if (skb_vlan_tag_present(skb)) {
skb               739 drivers/net/ethernet/faraday/ftgmac100.c 		csum_vlan |= skb_vlan_tag_get(skb) & 0xffff;
skb               743 drivers/net/ethernet/faraday/ftgmac100.c 	len = skb_headlen(skb);
skb               746 drivers/net/ethernet/faraday/ftgmac100.c 	map = dma_map_single(priv->dev, skb->data, len, DMA_TO_DEVICE);
skb               760 drivers/net/ethernet/faraday/ftgmac100.c 	priv->tx_skbs[pointer] = skb;
skb               775 drivers/net/ethernet/faraday/ftgmac100.c 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
skb               786 drivers/net/ethernet/faraday/ftgmac100.c 		priv->tx_skbs[pointer] = skb;
skb               833 drivers/net/ethernet/faraday/ftgmac100.c 	ftgmac100_free_tx_packet(priv, pointer, skb, first, f_ctl_stat);
skb               841 drivers/net/ethernet/faraday/ftgmac100.c 		ftgmac100_free_tx_packet(priv, pointer, skb, txdes, ctl_stat);
skb               851 drivers/net/ethernet/faraday/ftgmac100.c 	dev_kfree_skb_any(skb);
skb               864 drivers/net/ethernet/faraday/ftgmac100.c 		struct sk_buff *skb = priv->rx_skbs[i];
skb               867 drivers/net/ethernet/faraday/ftgmac100.c 		if (!skb)
skb               872 drivers/net/ethernet/faraday/ftgmac100.c 		dev_kfree_skb_any(skb);
skb               878 drivers/net/ethernet/faraday/ftgmac100.c 		struct sk_buff *skb = priv->tx_skbs[i];
skb               880 drivers/net/ethernet/faraday/ftgmac100.c 		if (!skb)
skb               882 drivers/net/ethernet/faraday/ftgmac100.c 		ftgmac100_free_tx_packet(priv, i, skb, txdes,
skb               389 drivers/net/ethernet/faraday/ftmac100.c 	struct sk_buff *skb;
skb               412 drivers/net/ethernet/faraday/ftmac100.c 	skb = netdev_alloc_skb_ip_align(netdev, 128);
skb               413 drivers/net/ethernet/faraday/ftmac100.c 	if (unlikely(!skb)) {
skb               429 drivers/net/ethernet/faraday/ftmac100.c 	skb_fill_page_desc(skb, 0, page, 0, length);
skb               430 drivers/net/ethernet/faraday/ftmac100.c 	skb->len += length;
skb               431 drivers/net/ethernet/faraday/ftmac100.c 	skb->data_len += length;
skb               434 drivers/net/ethernet/faraday/ftmac100.c 		skb->truesize += PAGE_SIZE;
skb               436 drivers/net/ethernet/faraday/ftmac100.c 		__pskb_pull_tail(skb, ETH_HLEN);
skb               439 drivers/net/ethernet/faraday/ftmac100.c 		__pskb_pull_tail(skb, length);
skb               445 drivers/net/ethernet/faraday/ftmac100.c 	skb->protocol = eth_type_trans(skb, netdev);
skb               448 drivers/net/ethernet/faraday/ftmac100.c 	netdev->stats.rx_bytes += skb->len;
skb               451 drivers/net/ethernet/faraday/ftmac100.c 	netif_receive_skb(skb);
skb               535 drivers/net/ethernet/faraday/ftmac100.c static void ftmac100_txdes_set_skb(struct ftmac100_txdes *txdes, struct sk_buff *skb)
skb               537 drivers/net/ethernet/faraday/ftmac100.c 	txdes->txdes3 = (unsigned int)skb;
skb               577 drivers/net/ethernet/faraday/ftmac100.c 	struct sk_buff *skb;
skb               588 drivers/net/ethernet/faraday/ftmac100.c 	skb = ftmac100_txdes_get_skb(txdes);
skb               600 drivers/net/ethernet/faraday/ftmac100.c 		netdev->stats.tx_bytes += skb->len;
skb               603 drivers/net/ethernet/faraday/ftmac100.c 	dma_unmap_single(priv->dev, map, skb_headlen(skb), DMA_TO_DEVICE);
skb               604 drivers/net/ethernet/faraday/ftmac100.c 	dev_kfree_skb(skb);
skb               624 drivers/net/ethernet/faraday/ftmac100.c static netdev_tx_t ftmac100_xmit(struct ftmac100 *priv, struct sk_buff *skb,
skb               629 drivers/net/ethernet/faraday/ftmac100.c 	unsigned int len = (skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len;
skb               635 drivers/net/ethernet/faraday/ftmac100.c 	ftmac100_txdes_set_skb(txdes, skb);
skb               706 drivers/net/ethernet/faraday/ftmac100.c 		struct sk_buff *skb = ftmac100_txdes_get_skb(txdes);
skb               709 drivers/net/ethernet/faraday/ftmac100.c 		if (!skb)
skb               712 drivers/net/ethernet/faraday/ftmac100.c 		dma_unmap_single(priv->dev, map, skb_headlen(skb), DMA_TO_DEVICE);
skb               713 drivers/net/ethernet/faraday/ftmac100.c 		dev_kfree_skb(skb);
skb              1005 drivers/net/ethernet/faraday/ftmac100.c ftmac100_hard_start_xmit(struct sk_buff *skb, struct net_device *netdev)
skb              1010 drivers/net/ethernet/faraday/ftmac100.c 	if (unlikely(skb->len > MAX_PKT_SIZE)) {
skb              1015 drivers/net/ethernet/faraday/ftmac100.c 		dev_kfree_skb(skb);
skb              1019 drivers/net/ethernet/faraday/ftmac100.c 	map = dma_map_single(priv->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
skb              1026 drivers/net/ethernet/faraday/ftmac100.c 		dev_kfree_skb(skb);
skb              1030 drivers/net/ethernet/faraday/ftmac100.c 	return ftmac100_xmit(priv, skb, map);
skb               433 drivers/net/ethernet/fealnx.c static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
skb              1063 drivers/net/ethernet/fealnx.c 		struct sk_buff *skb;
skb              1065 drivers/net/ethernet/fealnx.c 		skb = netdev_alloc_skb(dev, np->rx_buf_sz);
skb              1066 drivers/net/ethernet/fealnx.c 		if (skb == NULL)
skb              1072 drivers/net/ethernet/fealnx.c 		np->lack_rxbuf->skbuff = skb;
skb              1073 drivers/net/ethernet/fealnx.c 		np->lack_rxbuf->buffer = pci_map_single(np->pci_dev, skb->data,
skb              1259 drivers/net/ethernet/fealnx.c 		struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz);
skb              1261 drivers/net/ethernet/fealnx.c 		if (skb == NULL) {
skb              1267 drivers/net/ethernet/fealnx.c 		np->rx_ring[i].skbuff = skb;
skb              1268 drivers/net/ethernet/fealnx.c 		np->rx_ring[i].buffer = pci_map_single(np->pci_dev, skb->data,
skb              1295 drivers/net/ethernet/fealnx.c static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
skb              1302 drivers/net/ethernet/fealnx.c 	np->cur_tx_copy->skbuff = skb;
skb              1307 drivers/net/ethernet/fealnx.c 	np->cur_tx_copy->buffer = pci_map_single(np->pci_dev, skb->data,
skb              1308 drivers/net/ethernet/fealnx.c 		skb->len, PCI_DMA_TODEVICE);
skb              1310 drivers/net/ethernet/fealnx.c 	np->cur_tx_copy->control |= (skb->len << PKTSShift);	/* pkt size */
skb              1311 drivers/net/ethernet/fealnx.c 	np->cur_tx_copy->control |= (skb->len << TBSShift);	/* buffer size */
skb              1319 drivers/net/ethernet/fealnx.c 	if (skb->len > BPT) {
skb              1323 drivers/net/ethernet/fealnx.c 		np->cur_tx_copy->buffer = pci_map_single(np->pci_dev, skb->data,
skb              1326 drivers/net/ethernet/fealnx.c 		np->cur_tx_copy->control |= (skb->len << PKTSShift);	/* pkt size */
skb              1331 drivers/net/ethernet/fealnx.c 		next->skbuff = skb;
skb              1333 drivers/net/ethernet/fealnx.c 		next->control |= (skb->len << PKTSShift);	/* pkt size */
skb              1334 drivers/net/ethernet/fealnx.c 		next->control |= ((skb->len - BPT) << TBSShift);	/* buf size */
skb              1338 drivers/net/ethernet/fealnx.c 		next->buffer = pci_map_single(ep->pci_dev, skb->data + BPT,
skb              1339 drivers/net/ethernet/fealnx.c                                 skb->len - BPT, PCI_DMA_TODEVICE);
skb              1347 drivers/net/ethernet/fealnx.c 		np->cur_tx_copy->buffer = pci_map_single(np->pci_dev, skb->data,
skb              1348 drivers/net/ethernet/fealnx.c 			skb->len, PCI_DMA_TODEVICE);
skb              1350 drivers/net/ethernet/fealnx.c 		np->cur_tx_copy->control |= (skb->len << PKTSShift);	/* pkt size */
skb              1351 drivers/net/ethernet/fealnx.c 		np->cur_tx_copy->control |= (skb->len << TBSShift);	/* buffer size */
skb              1684 drivers/net/ethernet/fealnx.c 			struct sk_buff *skb;
skb              1697 drivers/net/ethernet/fealnx.c 			    (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
skb              1698 drivers/net/ethernet/fealnx.c 				skb_reserve(skb, 2);	/* 16 byte align the IP header */
skb              1706 drivers/net/ethernet/fealnx.c 				skb_copy_to_linear_data(skb,
skb              1708 drivers/net/ethernet/fealnx.c 				skb_put(skb, pkt_len);
skb              1710 drivers/net/ethernet/fealnx.c 				skb_put_data(skb, np->cur_rx->skbuff->data,
skb              1722 drivers/net/ethernet/fealnx.c 				skb_put(skb = np->cur_rx->skbuff, pkt_len);
skb              1726 drivers/net/ethernet/fealnx.c 			skb->protocol = eth_type_trans(skb, dev);
skb              1727 drivers/net/ethernet/fealnx.c 			netif_rx(skb);
skb              1910 drivers/net/ethernet/fealnx.c 		struct sk_buff *skb = np->rx_ring[i].skbuff;
skb              1913 drivers/net/ethernet/fealnx.c 		if (skb) {
skb              1916 drivers/net/ethernet/fealnx.c 			dev_kfree_skb(skb);
skb              1922 drivers/net/ethernet/fealnx.c 		struct sk_buff *skb = np->tx_ring[i].skbuff;
skb              1924 drivers/net/ethernet/fealnx.c 		if (skb) {
skb              1926 drivers/net/ethernet/fealnx.c 				skb->len, PCI_DMA_TODEVICE);
skb              1927 drivers/net/ethernet/fealnx.c 			dev_kfree_skb(skb);
skb              1397 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 			       struct sk_buff *skb,
skb              1402 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 	u16 ethertype = ntohs(skb->protocol);
skb              1408 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 	if (skb->ip_summed != CHECKSUM_PARTIAL)
skb              1425 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 		skb_reset_mac_header(skb);
skb              1426 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 		ethertype = ntohs(vlan_eth_hdr(skb)->h_vlan_encapsulated_proto);
skb              1435 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 		iph = ip_hdr(skb);
skb              1441 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 		ipv6h = ipv6_hdr(skb);
skb              1450 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 				    ntohs(skb->protocol));
skb              1473 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 	parse_result->ip_off[0] = (u8)skb_network_offset(skb);
skb              1474 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 	parse_result->l4_off = (u8)skb_transport_offset(skb);
skb              1618 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 	struct sk_buff **skbh, *skb;
skb              1623 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 	skb = *skbh;
skb              1626 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 		nr_frags = skb_shinfo(skb)->nr_frags;
skb              1649 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 				 skb_tail_pointer(skb) - (u8 *)skbh, dma_dir);
skb              1654 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 	    skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
skb              1660 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 			skb_tstamp_tx(skb, &shhwtstamps);
skb              1670 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 	return skb;
skb              1700 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 	struct sk_buff *skb;
skb              1710 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 	skb = build_skb(vaddr, dpaa_bp->size +
skb              1712 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 	if (WARN_ONCE(!skb, "Build skb failure on Rx\n"))
skb              1715 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 	skb_reserve(skb, fd_off);
skb              1716 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 	skb_put(skb, qm_fd_get_length(fd));
skb              1718 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 	skb->ip_summed = rx_csum_offload(priv, fd);
skb              1720 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 	return skb;
skb              1742 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 	struct sk_buff *skb;
skb              1754 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 	skb = NULL;
skb              1772 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 		if (!skb) {
skb              1775 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 			skb = build_skb(sg_vaddr, sz);
skb              1776 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 			if (WARN_ON(!skb))
skb              1779 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 			skb->ip_summed = rx_csum_offload(priv, fd);
skb              1785 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 			skb_reserve(skb, fd_off);
skb              1786 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 			skb_put(skb, qm_sg_entry_get_len(&sgt[i]));
skb              1811 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 			skb_add_rx_frag(skb, i - 1, head_page, frag_off,
skb              1825 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 	return skb;
skb              1857 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 			    struct sk_buff *skb, struct qm_fd *fd,
skb              1872 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 	buffer_start = skb->data - priv->tx_headroom;
skb              1876 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 	*skbh = skb;
skb              1883 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 	err = dpaa_enable_tx_csum(priv, skb, fd,
skb              1893 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 	qm_fd_set_contig(fd, priv->tx_headroom, skb->len);
skb              1898 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 			      skb_tail_pointer(skb) - buffer_start, dma_dir);
skb              1910 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 			struct sk_buff *skb, struct qm_fd *fd)
skb              1913 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 	const int nr_frags = skb_shinfo(skb)->nr_frags;
skb              1939 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 	err = dpaa_enable_tx_csum(priv, skb, fd,
skb              1950 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 	frag_len = skb_headlen(skb);
skb              1954 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 	addr = dma_map_single(dev, skb->data,
skb              1955 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 			      skb_headlen(skb), dma_dir);
skb              1965 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 		frag = &skb_shinfo(skb)->frags[i];
skb              1987 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 	qm_fd_set_sg(fd, priv->tx_headroom, skb->len);
skb              1992 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 	*skbh = skb;
skb              2053 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
skb              2055 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 	const int queue_mapping = skb_get_queue_mapping(skb);
skb              2056 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 	bool nonlinear = skb_is_nonlinear(skb);
skb              2078 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 		if (skb_cow_head(skb, priv->tx_headroom))
skb              2081 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 		WARN_ON(skb_is_nonlinear(skb));
skb              2088 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 		     (skb_shinfo(skb)->nr_frags >= DPAA_SGT_MAX_ENTRIES))) {
skb              2092 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 		if (__skb_linearize(skb))
skb              2095 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 		nonlinear = skb_is_nonlinear(skb);
skb              2100 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 		err = skb_to_sg_fd(priv, skb, &fd);
skb              2104 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 		err = skb_to_contig_fd(priv, skb, &fd, &offset);
skb              2114 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 	if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
skb              2116 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
skb              2126 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 	dev_kfree_skb(skb);
skb              2160 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 	struct sk_buff *skb;
skb              2168 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 	skb = dpaa_cleanup_tx_fd(priv, fd, false);
skb              2169 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 	dev_kfree_skb(skb);
skb              2195 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 	struct sk_buff	*skb;
skb              2208 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 	skb = dpaa_cleanup_tx_fd(priv, fd, true);
skb              2210 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 	consume_skb(skb);
skb              2270 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 	struct sk_buff *skb;
skb              2332 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 		skb = contig_fd_to_skb(priv, fd);
skb              2334 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 		skb = sg_fd_to_skb(priv, fd);
skb              2335 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 	if (!skb)
skb              2339 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 		shhwtstamps = skb_hwtstamps(skb);
skb              2348 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 	skb->protocol = eth_type_trans(skb, net_dev);
skb              2358 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 		skb_set_hash(skb, be32_to_cpu(*(u32 *)(vaddr + hash_offset)),
skb              2362 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 	skb_len = skb->len;
skb              2364 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 	if (unlikely(netif_receive_skb(skb) == NET_RX_DROP)) {
skb              2428 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 	struct sk_buff *skb;
skb              2438 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 	skb = dpaa_cleanup_tx_fd(priv, fd, false);
skb              2439 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 	dev_kfree_skb_any(skb);
skb                44 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 			     struct sk_buff *skb)
skb                46 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 	skb_checksum_none_assert(skb);
skb                58 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 	skb->ip_summed = CHECKSUM_UNNECESSARY;
skb               106 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 	struct sk_buff *skb = NULL;
skb               112 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 	skb = build_skb(fd_vaddr, DPAA2_ETH_RX_BUF_RAW_SIZE);
skb               113 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 	if (unlikely(!skb))
skb               116 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 	skb_reserve(skb, fd_offset);
skb               117 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 	skb_put(skb, fd_length);
skb               119 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 	return skb;
skb               127 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 	struct sk_buff *skb = NULL;
skb               154 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 			skb = build_skb(sg_vaddr, DPAA2_ETH_RX_BUF_RAW_SIZE);
skb               155 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 			if (unlikely(!skb)) {
skb               171 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 			skb_reserve(skb, sg_offset);
skb               172 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 			skb_put(skb, sg_length);
skb               187 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 			skb_add_rx_frag(skb, i - 1, head_page, page_offset,
skb               200 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 	return skb;
skb               360 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 	struct sk_buff *skb;
skb               394 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 		skb = build_linear_skb(ch, fd, vaddr);
skb               400 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 		skb = build_frag_skb(priv, ch, buf_data);
skb               409 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 	if (unlikely(!skb))
skb               412 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 	prefetch(skb->data);
skb               416 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 		struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
skb               429 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 		validate_rx_csum(priv, status, skb);
skb               432 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 	skb->protocol = eth_type_trans(skb, priv->net_dev);
skb               433 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 	skb_record_rx_queue(skb, fq->flowid);
skb               438 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 	list_add_tail(&skb->list, ch->rx_list);
skb               520 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 		       struct sk_buff *skb,
skb               526 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 	int nr_frags = skb_shinfo(skb)->nr_frags;
skb               548 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 	num_sg = skb_to_sgvec(skb, scl, 0, skb->len);
skb               588 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 	swa->sg.skb = skb;
skb               602 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 	dpaa2_fd_set_len(fd, skb->len);
skb               605 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 	if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
skb               621 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 			   struct sk_buff *skb,
skb               629 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 	buffer_start = skb->data - dpaa2_eth_needed_headroom(priv, skb);
skb               636 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 	if (aligned_start >= skb->head)
skb               645 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 	swa->single.skb = skb;
skb               648 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 			      skb_tail_pointer(skb) - buffer_start,
skb               654 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 	dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start));
skb               655 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 	dpaa2_fd_set_len(fd, skb->len);
skb               659 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 	if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
skb               678 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 	struct sk_buff *skb = NULL;
skb               690 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 			skb = swa->single.skb;
skb               695 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 					 skb_tail_pointer(skb) - buffer_start,
skb               703 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 		skb = swa->sg.skb;
skb               729 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 	if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
skb               738 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 		skb_tstamp_tx(skb, &shhwtstamps);
skb               746 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 	napi_consume_skb(skb, in_napi);
skb               749 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
skb               766 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 	needed_headroom = dpaa2_eth_needed_headroom(priv, skb);
skb               767 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 	if (skb_headroom(skb) < needed_headroom) {
skb               770 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 		ns = skb_realloc_headroom(skb, needed_headroom);
skb               777 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 		if (skb->sk)
skb               778 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 			skb_set_owner_w(ns, skb->sk);
skb               780 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 		dev_kfree_skb(skb);
skb               781 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 		skb = ns;
skb               787 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 	skb = skb_unshare(skb, GFP_ATOMIC);
skb               788 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 	if (unlikely(!skb)) {
skb               797 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 	if (skb_is_nonlinear(skb)) {
skb               798 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 		err = build_sg_fd(priv, skb, &fd);
skb               800 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 		percpu_extras->tx_sg_bytes += skb->len;
skb               802 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 		err = build_single_fd(priv, skb, &fd);
skb               817 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 	queue_mapping = skb_get_queue_mapping(skb);
skb               859 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 	dev_kfree_skb(skb);
skb               106 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h 			struct sk_buff *skb;
skb               109 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h 			struct sk_buff *skb;
skb               489 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h 				       struct sk_buff *skb)
skb               496 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h 	if (!skb)
skb               502 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h 	if (skb_is_nonlinear(skb))
skb               506 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h 	if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
skb                16 drivers/net/ethernet/freescale/enetc/enetc.c static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb,
skb                19 drivers/net/ethernet/freescale/enetc/enetc.c netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev)
skb                25 drivers/net/ethernet/freescale/enetc/enetc.c 	tx_ring = priv->tx_ring[skb->queue_mapping];
skb                27 drivers/net/ethernet/freescale/enetc/enetc.c 	if (unlikely(skb_shinfo(skb)->nr_frags > ENETC_MAX_SKB_FRAGS))
skb                28 drivers/net/ethernet/freescale/enetc/enetc.c 		if (unlikely(skb_linearize(skb)))
skb                31 drivers/net/ethernet/freescale/enetc/enetc.c 	count = skb_shinfo(skb)->nr_frags + 1; /* fragments + head */
skb                37 drivers/net/ethernet/freescale/enetc/enetc.c 	count = enetc_map_tx_buffs(tx_ring, skb, priv->active_offloads);
skb                47 drivers/net/ethernet/freescale/enetc/enetc.c 	dev_kfree_skb_any(skb);
skb                51 drivers/net/ethernet/freescale/enetc/enetc.c static bool enetc_tx_csum(struct sk_buff *skb, union enetc_tx_bd *txbd)
skb                56 drivers/net/ethernet/freescale/enetc/enetc.c 	if (skb->ip_summed != CHECKSUM_PARTIAL)
skb                59 drivers/net/ethernet/freescale/enetc/enetc.c 	switch (skb->csum_offset) {
skb                67 drivers/net/ethernet/freescale/enetc/enetc.c 		skb_checksum_help(skb);
skb                71 drivers/net/ethernet/freescale/enetc/enetc.c 	l3_start = skb_network_offset(skb);
skb                72 drivers/net/ethernet/freescale/enetc/enetc.c 	l3_hsize = skb_network_header_len(skb);
skb                75 drivers/net/ethernet/freescale/enetc/enetc.c 	if (skb->protocol == htons(ETH_P_IPV6))
skb               103 drivers/net/ethernet/freescale/enetc/enetc.c 	if (tx_swbd->skb) {
skb               104 drivers/net/ethernet/freescale/enetc/enetc.c 		dev_kfree_skb_any(tx_swbd->skb);
skb               105 drivers/net/ethernet/freescale/enetc/enetc.c 		tx_swbd->skb = NULL;
skb               109 drivers/net/ethernet/freescale/enetc/enetc.c static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb,
skb               114 drivers/net/ethernet/freescale/enetc/enetc.c 	int len = skb_headlen(skb);
skb               127 drivers/net/ethernet/freescale/enetc/enetc.c 	dma = dma_map_single(tx_ring->dev, skb->data, len, DMA_TO_DEVICE);
skb               141 drivers/net/ethernet/freescale/enetc/enetc.c 	do_vlan = skb_vlan_tag_present(skb);
skb               143 drivers/net/ethernet/freescale/enetc/enetc.c 		    (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP);
skb               150 drivers/net/ethernet/freescale/enetc/enetc.c 	if (enetc_tx_csum(skb, &temp_bd))
skb               154 drivers/net/ethernet/freescale/enetc/enetc.c 	temp_bd.frm_len = cpu_to_le16(skb->len);
skb               175 drivers/net/ethernet/freescale/enetc/enetc.c 			temp_bd.ext.vid = cpu_to_le16(skb_vlan_tag_get(skb));
skb               181 drivers/net/ethernet/freescale/enetc/enetc.c 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
skb               189 drivers/net/ethernet/freescale/enetc/enetc.c 	frag = &skb_shinfo(skb)->frags[0];
skb               190 drivers/net/ethernet/freescale/enetc/enetc.c 	for (f = 0; f < skb_shinfo(skb)->nr_frags; f++, frag++) {
skb               225 drivers/net/ethernet/freescale/enetc/enetc.c 	tx_ring->tx_swbd[i].skb = skb;
skb               320 drivers/net/ethernet/freescale/enetc/enetc.c static void enetc_tstamp_tx(struct sk_buff *skb, u64 tstamp)
skb               324 drivers/net/ethernet/freescale/enetc/enetc.c 	if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) {
skb               327 drivers/net/ethernet/freescale/enetc/enetc.c 		skb_tstamp_tx(skb, &shhwtstamps);
skb               347 drivers/net/ethernet/freescale/enetc/enetc.c 		bool is_eof = !!tx_swbd->skb;
skb               368 drivers/net/ethernet/freescale/enetc/enetc.c 				enetc_tstamp_tx(tx_swbd->skb, tstamp);
skb               371 drivers/net/ethernet/freescale/enetc/enetc.c 			napi_consume_skb(tx_swbd->skb, napi_budget);
skb               372 drivers/net/ethernet/freescale/enetc/enetc.c 			tx_swbd->skb = NULL;
skb               482 drivers/net/ethernet/freescale/enetc/enetc.c 				struct sk_buff *skb)
skb               484 drivers/net/ethernet/freescale/enetc/enetc.c 	struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
skb               505 drivers/net/ethernet/freescale/enetc/enetc.c 			       union enetc_rx_bd *rxbd, struct sk_buff *skb)
skb               514 drivers/net/ethernet/freescale/enetc/enetc.c 		skb->csum = csum_unfold((__force __sum16)~htons(inet_csum));
skb               515 drivers/net/ethernet/freescale/enetc/enetc.c 		skb->ip_summed = CHECKSUM_COMPLETE;
skb               522 drivers/net/ethernet/freescale/enetc/enetc.c 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
skb               526 drivers/net/ethernet/freescale/enetc/enetc.c 		enetc_get_rx_tstamp(rx_ring->ndev, rxbd, skb);
skb               531 drivers/net/ethernet/freescale/enetc/enetc.c 			      struct sk_buff *skb)
skb               533 drivers/net/ethernet/freescale/enetc/enetc.c 	skb_record_rx_queue(skb, rx_ring->index);
skb               534 drivers/net/ethernet/freescale/enetc/enetc.c 	skb->protocol = eth_type_trans(skb, rx_ring->ndev);
skb               593 drivers/net/ethernet/freescale/enetc/enetc.c 	struct sk_buff *skb;
skb               597 drivers/net/ethernet/freescale/enetc/enetc.c 	skb = build_skb(ba - ENETC_RXB_PAD, ENETC_RXB_TRUESIZE);
skb               598 drivers/net/ethernet/freescale/enetc/enetc.c 	if (unlikely(!skb)) {
skb               603 drivers/net/ethernet/freescale/enetc/enetc.c 	skb_reserve(skb, ENETC_RXB_PAD);
skb               604 drivers/net/ethernet/freescale/enetc/enetc.c 	__skb_put(skb, size);
skb               608 drivers/net/ethernet/freescale/enetc/enetc.c 	return skb;
skb               612 drivers/net/ethernet/freescale/enetc/enetc.c 				     u16 size, struct sk_buff *skb)
skb               616 drivers/net/ethernet/freescale/enetc/enetc.c 	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_swbd->page,
skb               636 drivers/net/ethernet/freescale/enetc/enetc.c 		struct sk_buff *skb;
skb               654 drivers/net/ethernet/freescale/enetc/enetc.c 		skb = enetc_map_rx_buff_to_skb(rx_ring, i, size);
skb               655 drivers/net/ethernet/freescale/enetc/enetc.c 		if (!skb)
skb               658 drivers/net/ethernet/freescale/enetc/enetc.c 		enetc_get_offloads(rx_ring, rxbd, skb);
skb               670 drivers/net/ethernet/freescale/enetc/enetc.c 			dev_kfree_skb(skb);
skb               698 drivers/net/ethernet/freescale/enetc/enetc.c 			enetc_add_rx_buff_to_skb(rx_ring, i, size, skb);
skb               709 drivers/net/ethernet/freescale/enetc/enetc.c 		rx_byte_cnt += skb->len;
skb               711 drivers/net/ethernet/freescale/enetc/enetc.c 		enetc_process_skb(rx_ring, skb);
skb               713 drivers/net/ethernet/freescale/enetc/enetc.c 		napi_gro_receive(napi, skb);
skb                21 drivers/net/ethernet/freescale/enetc/enetc.h 	struct sk_buff *skb;
skb               227 drivers/net/ethernet/freescale/enetc/enetc.h netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev);
skb               361 drivers/net/ethernet/freescale/fec_main.c static inline bool is_ipv4_pkt(struct sk_buff *skb)
skb               363 drivers/net/ethernet/freescale/fec_main.c 	return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4;
skb               367 drivers/net/ethernet/freescale/fec_main.c fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev)
skb               370 drivers/net/ethernet/freescale/fec_main.c 	if (skb->ip_summed != CHECKSUM_PARTIAL)
skb               373 drivers/net/ethernet/freescale/fec_main.c 	if (unlikely(skb_cow_head(skb, 0)))
skb               376 drivers/net/ethernet/freescale/fec_main.c 	if (is_ipv4_pkt(skb))
skb               377 drivers/net/ethernet/freescale/fec_main.c 		ip_hdr(skb)->check = 0;
skb               378 drivers/net/ethernet/freescale/fec_main.c 	*(__sum16 *)(skb->head + skb->csum_start + skb->csum_offset) = 0;
skb               385 drivers/net/ethernet/freescale/fec_main.c 			     struct sk_buff *skb,
skb               391 drivers/net/ethernet/freescale/fec_main.c 	int nr_frags = skb_shinfo(skb)->nr_frags;
skb               402 drivers/net/ethernet/freescale/fec_main.c 		this_frag = &skb_shinfo(skb)->frags[frag];
skb               409 drivers/net/ethernet/freescale/fec_main.c 		frag_len = skb_frag_size(&skb_shinfo(skb)->frags[frag]);
skb               416 drivers/net/ethernet/freescale/fec_main.c 				if (unlikely(skb_shinfo(skb)->tx_flags &
skb               425 drivers/net/ethernet/freescale/fec_main.c 			if (skb->ip_summed == CHECKSUM_PARTIAL)
skb               472 drivers/net/ethernet/freescale/fec_main.c 				   struct sk_buff *skb, struct net_device *ndev)
skb               475 drivers/net/ethernet/freescale/fec_main.c 	int nr_frags = skb_shinfo(skb)->nr_frags;
skb               487 drivers/net/ethernet/freescale/fec_main.c 		dev_kfree_skb_any(skb);
skb               494 drivers/net/ethernet/freescale/fec_main.c 	if (fec_enet_clear_csum(skb, ndev)) {
skb               495 drivers/net/ethernet/freescale/fec_main.c 		dev_kfree_skb_any(skb);
skb               506 drivers/net/ethernet/freescale/fec_main.c 	bufaddr = skb->data;
skb               507 drivers/net/ethernet/freescale/fec_main.c 	buflen = skb_headlen(skb);
skb               512 drivers/net/ethernet/freescale/fec_main.c 		memcpy(txq->tx_bounce[index], skb->data, buflen);
skb               522 drivers/net/ethernet/freescale/fec_main.c 		dev_kfree_skb_any(skb);
skb               529 drivers/net/ethernet/freescale/fec_main.c 		last_bdp = fec_enet_txq_submit_frag_skb(txq, skb, ndev);
skb               533 drivers/net/ethernet/freescale/fec_main.c 			dev_kfree_skb_any(skb);
skb               540 drivers/net/ethernet/freescale/fec_main.c 			if (unlikely(skb_shinfo(skb)->tx_flags &
skb               552 drivers/net/ethernet/freescale/fec_main.c 		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
skb               554 drivers/net/ethernet/freescale/fec_main.c 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
skb               559 drivers/net/ethernet/freescale/fec_main.c 		if (skb->ip_summed == CHECKSUM_PARTIAL)
skb               568 drivers/net/ethernet/freescale/fec_main.c 	txq->tx_skbuff[index] = skb;
skb               584 drivers/net/ethernet/freescale/fec_main.c 	skb_tx_timestamp(skb);
skb               599 drivers/net/ethernet/freescale/fec_main.c fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
skb               626 drivers/net/ethernet/freescale/fec_main.c 		dev_kfree_skb_any(skb);
skb               638 drivers/net/ethernet/freescale/fec_main.c 		if (skb->ip_summed == CHECKSUM_PARTIAL)
skb               660 drivers/net/ethernet/freescale/fec_main.c 			 struct sk_buff *skb, struct net_device *ndev,
skb               664 drivers/net/ethernet/freescale/fec_main.c 	int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
skb               679 drivers/net/ethernet/freescale/fec_main.c 		memcpy(txq->tx_bounce[index], skb->data, hdr_len);
skb               688 drivers/net/ethernet/freescale/fec_main.c 			dev_kfree_skb_any(skb);
skb               701 drivers/net/ethernet/freescale/fec_main.c 		if (skb->ip_summed == CHECKSUM_PARTIAL)
skb               713 drivers/net/ethernet/freescale/fec_main.c 				   struct sk_buff *skb,
skb               717 drivers/net/ethernet/freescale/fec_main.c 	int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
skb               724 drivers/net/ethernet/freescale/fec_main.c 	if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(txq)) {
skb               725 drivers/net/ethernet/freescale/fec_main.c 		dev_kfree_skb_any(skb);
skb               732 drivers/net/ethernet/freescale/fec_main.c 	if (fec_enet_clear_csum(skb, ndev)) {
skb               733 drivers/net/ethernet/freescale/fec_main.c 		dev_kfree_skb_any(skb);
skb               738 drivers/net/ethernet/freescale/fec_main.c 	tso_start(skb, &tso);
skb               740 drivers/net/ethernet/freescale/fec_main.c 	total_len = skb->len - hdr_len;
skb               745 drivers/net/ethernet/freescale/fec_main.c 		data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
skb               750 drivers/net/ethernet/freescale/fec_main.c 		tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
skb               751 drivers/net/ethernet/freescale/fec_main.c 		ret = fec_enet_txq_put_hdr_tso(txq, skb, ndev, bdp, index);
skb               761 drivers/net/ethernet/freescale/fec_main.c 			ret = fec_enet_txq_put_data_tso(txq, skb, ndev,
skb               770 drivers/net/ethernet/freescale/fec_main.c 			tso_build_data(skb, &tso, size);
skb               777 drivers/net/ethernet/freescale/fec_main.c 	txq->tx_skbuff[index] = skb;
skb               779 drivers/net/ethernet/freescale/fec_main.c 	skb_tx_timestamp(skb);
skb               798 drivers/net/ethernet/freescale/fec_main.c fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
skb               807 drivers/net/ethernet/freescale/fec_main.c 	queue = skb_get_queue_mapping(skb);
skb               811 drivers/net/ethernet/freescale/fec_main.c 	if (skb_is_gso(skb))
skb               812 drivers/net/ethernet/freescale/fec_main.c 		ret = fec_enet_txq_submit_tso(txq, skb, ndev);
skb               814 drivers/net/ethernet/freescale/fec_main.c 		ret = fec_enet_txq_submit_skb(txq, skb, ndev);
skb              1247 drivers/net/ethernet/freescale/fec_main.c 	struct	sk_buff	*skb;
skb              1274 drivers/net/ethernet/freescale/fec_main.c 		skb = txq->tx_skbuff[index];
skb              1282 drivers/net/ethernet/freescale/fec_main.c 		if (!skb)
skb              1302 drivers/net/ethernet/freescale/fec_main.c 			ndev->stats.tx_bytes += skb->len;
skb              1305 drivers/net/ethernet/freescale/fec_main.c 		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) &&
skb              1311 drivers/net/ethernet/freescale/fec_main.c 			skb_tstamp_tx(skb, &shhwtstamps);
skb              1321 drivers/net/ethernet/freescale/fec_main.c 		dev_kfree_skb_any(skb);
skb              1361 drivers/net/ethernet/freescale/fec_main.c fec_enet_new_rxbdp(struct net_device *ndev, struct bufdesc *bdp, struct sk_buff *skb)
skb              1366 drivers/net/ethernet/freescale/fec_main.c 	off = ((unsigned long)skb->data) & fep->rx_align;
skb              1368 drivers/net/ethernet/freescale/fec_main.c 		skb_reserve(skb, fep->rx_align + 1 - off);
skb              1370 drivers/net/ethernet/freescale/fec_main.c 	bdp->cbd_bufaddr = cpu_to_fec32(dma_map_single(&fep->pdev->dev, skb->data, FEC_ENET_RX_FRSIZE - fep->rx_align, DMA_FROM_DEVICE));
skb              1380 drivers/net/ethernet/freescale/fec_main.c static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb,
skb              1398 drivers/net/ethernet/freescale/fec_main.c 		memcpy(new_skb->data, (*skb)->data, length);
skb              1400 drivers/net/ethernet/freescale/fec_main.c 		swap_buffer2(new_skb->data, (*skb)->data, length);
skb              1401 drivers/net/ethernet/freescale/fec_main.c 	*skb = new_skb;
skb              1419 drivers/net/ethernet/freescale/fec_main.c 	struct  sk_buff *skb;
skb              1481 drivers/net/ethernet/freescale/fec_main.c 		skb = rxq->rx_skbuff[index];
skb              1487 drivers/net/ethernet/freescale/fec_main.c 		is_copybreak = fec_enet_copybreak(ndev, &skb, bdp, pkt_len - 4,
skb              1501 drivers/net/ethernet/freescale/fec_main.c 		prefetch(skb->data - NET_IP_ALIGN);
skb              1502 drivers/net/ethernet/freescale/fec_main.c 		skb_put(skb, pkt_len - 4);
skb              1503 drivers/net/ethernet/freescale/fec_main.c 		data = skb->data;
skb              1510 drivers/net/ethernet/freescale/fec_main.c 			data = skb_pull_inline(skb, 2);
skb              1530 drivers/net/ethernet/freescale/fec_main.c 			memmove(skb->data + VLAN_HLEN, data, ETH_ALEN * 2);
skb              1531 drivers/net/ethernet/freescale/fec_main.c 			skb_pull(skb, VLAN_HLEN);
skb              1534 drivers/net/ethernet/freescale/fec_main.c 		skb->protocol = eth_type_trans(skb, ndev);
skb              1539 drivers/net/ethernet/freescale/fec_main.c 					  skb_hwtstamps(skb));
skb              1545 drivers/net/ethernet/freescale/fec_main.c 				skb->ip_summed = CHECKSUM_UNNECESSARY;
skb              1547 drivers/net/ethernet/freescale/fec_main.c 				skb_checksum_none_assert(skb);
skb              1553 drivers/net/ethernet/freescale/fec_main.c 			__vlan_hwaccel_put_tag(skb,
skb              1557 drivers/net/ethernet/freescale/fec_main.c 		napi_gro_receive(&fep->napi, skb);
skb              2746 drivers/net/ethernet/freescale/fec_main.c 	struct sk_buff *skb;
skb              2756 drivers/net/ethernet/freescale/fec_main.c 			skb = rxq->rx_skbuff[i];
skb              2758 drivers/net/ethernet/freescale/fec_main.c 			if (skb) {
skb              2763 drivers/net/ethernet/freescale/fec_main.c 				dev_kfree_skb(skb);
skb              2775 drivers/net/ethernet/freescale/fec_main.c 			skb = txq->tx_skbuff[i];
skb              2777 drivers/net/ethernet/freescale/fec_main.c 			dev_kfree_skb(skb);
skb              2858 drivers/net/ethernet/freescale/fec_main.c 	struct sk_buff *skb;
skb              2865 drivers/net/ethernet/freescale/fec_main.c 		skb = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE);
skb              2866 drivers/net/ethernet/freescale/fec_main.c 		if (!skb)
skb              2869 drivers/net/ethernet/freescale/fec_main.c 		if (fec_enet_new_rxbdp(ndev, bdp, skb)) {
skb              2870 drivers/net/ethernet/freescale/fec_main.c 			dev_kfree_skb(skb);
skb              2874 drivers/net/ethernet/freescale/fec_main.c 		rxq->rx_skbuff[i] = skb;
skb               125 drivers/net/ethernet/freescale/fec_mpc52xx.c 		struct sk_buff *skb;
skb               127 drivers/net/ethernet/freescale/fec_mpc52xx.c 		skb = bcom_retrieve_buffer(s, NULL, (struct bcom_bd **)&bd);
skb               128 drivers/net/ethernet/freescale/fec_mpc52xx.c 		dma_unmap_single(dev->dev.parent, bd->skb_pa, skb->len,
skb               130 drivers/net/ethernet/freescale/fec_mpc52xx.c 		kfree_skb(skb);
skb               149 drivers/net/ethernet/freescale/fec_mpc52xx.c 	struct sk_buff *skb;
skb               152 drivers/net/ethernet/freescale/fec_mpc52xx.c 		skb = netdev_alloc_skb(dev, FEC_RX_BUFFER_SIZE);
skb               153 drivers/net/ethernet/freescale/fec_mpc52xx.c 		if (!skb)
skb               157 drivers/net/ethernet/freescale/fec_mpc52xx.c 		memset(skb->data, 0, FEC_RX_BUFFER_SIZE);
skb               158 drivers/net/ethernet/freescale/fec_mpc52xx.c 		mpc52xx_fec_rx_submit(dev, skb);
skb               309 drivers/net/ethernet/freescale/fec_mpc52xx.c mpc52xx_fec_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb               326 drivers/net/ethernet/freescale/fec_mpc52xx.c 	bd->status = skb->len | BCOM_FEC_TX_BD_TFD | BCOM_FEC_TX_BD_TC;
skb               327 drivers/net/ethernet/freescale/fec_mpc52xx.c 	bd->skb_pa = dma_map_single(dev->dev.parent, skb->data, skb->len,
skb               330 drivers/net/ethernet/freescale/fec_mpc52xx.c 	skb_tx_timestamp(skb);
skb               331 drivers/net/ethernet/freescale/fec_mpc52xx.c 	bcom_submit_next_buffer(priv->tx_dmatsk, skb);
skb               365 drivers/net/ethernet/freescale/fec_mpc52xx.c 		struct sk_buff *skb;
skb               367 drivers/net/ethernet/freescale/fec_mpc52xx.c 		skb = bcom_retrieve_buffer(priv->tx_dmatsk, NULL,
skb               369 drivers/net/ethernet/freescale/fec_mpc52xx.c 		dma_unmap_single(dev->dev.parent, bd->skb_pa, skb->len,
skb               372 drivers/net/ethernet/freescale/fec_mpc52xx.c 		dev_consume_skb_irq(skb);
skb               386 drivers/net/ethernet/freescale/fec_mpc52xx.c 	struct sk_buff *skb;  /* new sk_buff to enqueue in its place */
skb               409 drivers/net/ethernet/freescale/fec_mpc52xx.c 		skb = netdev_alloc_skb(dev, FEC_RX_BUFFER_SIZE);
skb               410 drivers/net/ethernet/freescale/fec_mpc52xx.c 		if (!skb) {
skb               419 drivers/net/ethernet/freescale/fec_mpc52xx.c 		mpc52xx_fec_rx_submit(dev, skb);
skb                77 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c static void skb_align(struct sk_buff *skb, int align)
skb                79 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c 	int off = ((unsigned long)skb->data) & (align - 1);
skb                82 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c 		skb_reserve(skb, align - off);
skb                92 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c 	struct sk_buff *skb, *skbn;
skb               112 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c 		skb = fep->tx_skbuff[dirtyidx];
skb               161 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c 		if (skb) {
skb               162 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c 			dev_kfree_skb(skb);
skb               231 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c 			skb = fep->rx_skbuff[curidx];
skb               245 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c 					skb_copy_from_linear_data(skb,
skb               247 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c 					swap(skb, skbn);
skb               275 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c 				skb_put(skb, pkt_len);	/* Make room */
skb               276 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c 				skb->protocol = eth_type_trans(skb, dev);
skb               278 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c 				netif_receive_skb(skb);
skb               281 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c 				skbn = skb;
skb               365 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c 	struct sk_buff *skb;
skb               378 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c 		skb = netdev_alloc_skb(dev, ENET_RX_FRSIZE);
skb               379 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c 		if (skb == NULL)
skb               382 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c 		skb_align(skb, ENET_RX_ALIGN);
skb               383 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c 		fep->rx_skbuff[i] = skb;
skb               385 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c 			dma_map_single(fep->dev, skb->data,
skb               414 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c 	struct sk_buff *skb;
skb               422 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c 		if ((skb = fep->tx_skbuff[i]) == NULL)
skb               427 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c 				skb->len, DMA_TO_DEVICE);
skb               430 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c 		dev_kfree_skb(skb);
skb               437 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c 		if ((skb = fep->rx_skbuff[i]) == NULL)
skb               447 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c 		dev_kfree_skb(skb);
skb               458 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c 					       struct sk_buff *skb)
skb               462 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c 	if (skb_linearize(skb))
skb               466 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c 	new_skb = netdev_alloc_skb(dev, skb->len + 4);
skb               474 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c 	skb_copy_from_linear_data(skb, new_skb->data, skb->len);
skb               475 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c 	skb_put(new_skb, skb->len);
skb               478 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c 	dev_kfree_skb_any(skb);
skb               485 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb               498 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c 	if (!IS_ALIGNED((unsigned long)skb->data, 4)) {
skb               501 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c 		nr_frags = skb_shinfo(skb)->nr_frags;
skb               502 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c 		frag = skb_shinfo(skb)->frags;
skb               512 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c 		skb = tx_skb_align_workaround(dev, skb);
skb               513 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c 		if (!skb) {
skb               531 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c 	nr_frags = skb_shinfo(skb)->nr_frags;
skb               546 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c 	len = skb->len;
skb               549 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c 		len -= skb->data_len;
skb               555 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c 				skb->data, len, DMA_TO_DEVICE));
skb               559 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c 	frag = skb_shinfo(skb)->frags;
skb               590 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c 	if (skb->len <= 60)
skb               596 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c 	fep->tx_skbuff[curidx] = skb;
skb               608 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c 	skb_tx_timestamp(skb);
skb              1122 drivers/net/ethernet/freescale/gianfar.c 	dev_kfree_skb(rx_queue->skb);
skb              1714 drivers/net/ethernet/freescale/gianfar.c static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
skb              1716 drivers/net/ethernet/freescale/gianfar.c 	struct txfcb *fcb = skb_push(skb, GMAC_FCB_LEN);
skb              1723 drivers/net/ethernet/freescale/gianfar.c static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
skb              1735 drivers/net/ethernet/freescale/gianfar.c 	if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
skb              1737 drivers/net/ethernet/freescale/gianfar.c 		fcb->phcs = (__force __be16)(udp_hdr(skb)->check);
skb              1739 drivers/net/ethernet/freescale/gianfar.c 		fcb->phcs = (__force __be16)(tcp_hdr(skb)->check);
skb              1746 drivers/net/ethernet/freescale/gianfar.c 	fcb->l3os = (u8)(skb_network_offset(skb) - fcb_length);
skb              1747 drivers/net/ethernet/freescale/gianfar.c 	fcb->l4os = skb_network_header_len(skb);
skb              1752 drivers/net/ethernet/freescale/gianfar.c static inline void gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
skb              1755 drivers/net/ethernet/freescale/gianfar.c 	fcb->vlctl = cpu_to_be16(skb_vlan_tag_get(skb));
skb              1793 drivers/net/ethernet/freescale/gianfar.c static netdev_tx_t gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb              1808 drivers/net/ethernet/freescale/gianfar.c 	rq = skb->queue_mapping;
skb              1814 drivers/net/ethernet/freescale/gianfar.c 	do_csum = (CHECKSUM_PARTIAL == skb->ip_summed);
skb              1815 drivers/net/ethernet/freescale/gianfar.c 	do_vlan = skb_vlan_tag_present(skb);
skb              1816 drivers/net/ethernet/freescale/gianfar.c 	do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
skb              1827 drivers/net/ethernet/freescale/gianfar.c 	if (fcb_len && unlikely(skb_headroom(skb) < fcb_len)) {
skb              1830 drivers/net/ethernet/freescale/gianfar.c 		skb_new = skb_realloc_headroom(skb, fcb_len);
skb              1833 drivers/net/ethernet/freescale/gianfar.c 			dev_kfree_skb_any(skb);
skb              1837 drivers/net/ethernet/freescale/gianfar.c 		if (skb->sk)
skb              1838 drivers/net/ethernet/freescale/gianfar.c 			skb_set_owner_w(skb_new, skb->sk);
skb              1839 drivers/net/ethernet/freescale/gianfar.c 		dev_consume_skb_any(skb);
skb              1840 drivers/net/ethernet/freescale/gianfar.c 		skb = skb_new;
skb              1844 drivers/net/ethernet/freescale/gianfar.c 	nr_frags = skb_shinfo(skb)->nr_frags;
skb              1861 drivers/net/ethernet/freescale/gianfar.c 	bytes_sent = skb->len;
skb              1864 drivers/net/ethernet/freescale/gianfar.c 	GFAR_CB(skb)->bytes_sent = bytes_sent;
skb              1872 drivers/net/ethernet/freescale/gianfar.c 		skb_push(skb, GMAC_TXPAL_LEN);
skb              1873 drivers/net/ethernet/freescale/gianfar.c 		memset(skb->data, 0, GMAC_TXPAL_LEN);
skb              1878 drivers/net/ethernet/freescale/gianfar.c 		fcb = gfar_add_fcb(skb);
skb              1884 drivers/net/ethernet/freescale/gianfar.c 		gfar_tx_checksum(skb, fcb, fcb_len);
skb              1887 drivers/net/ethernet/freescale/gianfar.c 		    unlikely(gfar_csum_errata_76(priv, skb->len))) {
skb              1888 drivers/net/ethernet/freescale/gianfar.c 			__skb_pull(skb, GMAC_FCB_LEN);
skb              1889 drivers/net/ethernet/freescale/gianfar.c 			skb_checksum_help(skb);
skb              1892 drivers/net/ethernet/freescale/gianfar.c 				fcb = gfar_add_fcb(skb);
skb              1902 drivers/net/ethernet/freescale/gianfar.c 		gfar_tx_vlan(skb, fcb);
skb              1904 drivers/net/ethernet/freescale/gianfar.c 	bufaddr = dma_map_single(priv->dev, skb->data, skb_headlen(skb),
skb              1923 drivers/net/ethernet/freescale/gianfar.c 		frag = &skb_shinfo(skb)->frags[0];
skb              1964 drivers/net/ethernet/freescale/gianfar.c 			      (skb_headlen(skb) - fcb_len);
skb              1973 drivers/net/ethernet/freescale/gianfar.c 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
skb              1976 drivers/net/ethernet/freescale/gianfar.c 		lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
skb              1987 drivers/net/ethernet/freescale/gianfar.c 	tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
skb              2038 drivers/net/ethernet/freescale/gianfar.c 	dev_kfree_skb_any(skb);
skb              2191 drivers/net/ethernet/freescale/gianfar.c 	struct sk_buff *skb;
skb              2206 drivers/net/ethernet/freescale/gianfar.c 	while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
skb              2209 drivers/net/ethernet/freescale/gianfar.c 		do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
skb              2212 drivers/net/ethernet/freescale/gianfar.c 		frags = skb_shinfo(skb)->nr_frags;
skb              2243 drivers/net/ethernet/freescale/gianfar.c 			u64 *ns = (u64 *)(((uintptr_t)skb->data + 0x10) &
skb              2248 drivers/net/ethernet/freescale/gianfar.c 			skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN);
skb              2249 drivers/net/ethernet/freescale/gianfar.c 			skb_tstamp_tx(skb, &shhwtstamps);
skb              2265 drivers/net/ethernet/freescale/gianfar.c 		bytes_sent += GFAR_CB(skb)->bytes_sent;
skb              2267 drivers/net/ethernet/freescale/gianfar.c 		dev_kfree_skb_any(skb);
skb              2385 drivers/net/ethernet/freescale/gianfar.c 			     struct sk_buff *skb, bool first)
skb              2391 drivers/net/ethernet/freescale/gianfar.c 		skb_put(skb, size);
skb              2395 drivers/net/ethernet/freescale/gianfar.c 			size -= skb->len;
skb              2397 drivers/net/ethernet/freescale/gianfar.c 		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
skb              2436 drivers/net/ethernet/freescale/gianfar.c 					    u32 lstatus, struct sk_buff *skb)
skb              2442 drivers/net/ethernet/freescale/gianfar.c 	if (likely(!skb)) {
skb              2445 drivers/net/ethernet/freescale/gianfar.c 		skb = build_skb(buff_addr, GFAR_SKBFRAG_SIZE);
skb              2446 drivers/net/ethernet/freescale/gianfar.c 		if (unlikely(!skb)) {
skb              2450 drivers/net/ethernet/freescale/gianfar.c 		skb_reserve(skb, RXBUF_ALIGNMENT);
skb              2457 drivers/net/ethernet/freescale/gianfar.c 	if (gfar_add_rx_frag(rxb, lstatus, skb, first)) {
skb              2469 drivers/net/ethernet/freescale/gianfar.c 	return skb;
skb              2472 drivers/net/ethernet/freescale/gianfar.c static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
skb              2480 drivers/net/ethernet/freescale/gianfar.c 		skb->ip_summed = CHECKSUM_UNNECESSARY;
skb              2482 drivers/net/ethernet/freescale/gianfar.c 		skb_checksum_none_assert(skb);
skb              2486 drivers/net/ethernet/freescale/gianfar.c static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb)
skb              2492 drivers/net/ethernet/freescale/gianfar.c 	fcb = (struct rxfcb *)skb->data;
skb              2498 drivers/net/ethernet/freescale/gianfar.c 		skb_pull(skb, GMAC_FCB_LEN);
skb              2502 drivers/net/ethernet/freescale/gianfar.c 		struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
skb              2503 drivers/net/ethernet/freescale/gianfar.c 		u64 *ns = (u64 *) skb->data;
skb              2510 drivers/net/ethernet/freescale/gianfar.c 		skb_pull(skb, priv->padding);
skb              2513 drivers/net/ethernet/freescale/gianfar.c 	pskb_trim(skb, skb->len - ETH_FCS_LEN);
skb              2516 drivers/net/ethernet/freescale/gianfar.c 		gfar_rx_checksum(skb, fcb);
skb              2524 drivers/net/ethernet/freescale/gianfar.c 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
skb              2539 drivers/net/ethernet/freescale/gianfar.c 	struct sk_buff *skb = rx_queue->skb;
skb              2563 drivers/net/ethernet/freescale/gianfar.c 		skb = gfar_get_next_rxbuff(rx_queue, lstatus, skb);
skb              2564 drivers/net/ethernet/freescale/gianfar.c 		if (unlikely(!skb))
skb              2583 drivers/net/ethernet/freescale/gianfar.c 			dev_kfree_skb(skb);
skb              2584 drivers/net/ethernet/freescale/gianfar.c 			skb = NULL;
skb              2589 drivers/net/ethernet/freescale/gianfar.c 		gfar_process_frame(ndev, skb);
skb              2593 drivers/net/ethernet/freescale/gianfar.c 		total_bytes += skb->len;
skb              2595 drivers/net/ethernet/freescale/gianfar.c 		skb_record_rx_queue(skb, rx_queue->qindex);
skb              2597 drivers/net/ethernet/freescale/gianfar.c 		skb->protocol = eth_type_trans(skb, ndev);
skb              2600 drivers/net/ethernet/freescale/gianfar.c 		napi_gro_receive(&rx_queue->grp->napi_rx, skb);
skb              2602 drivers/net/ethernet/freescale/gianfar.c 		skb = NULL;
skb              2606 drivers/net/ethernet/freescale/gianfar.c 	rx_queue->skb = skb;
skb               559 drivers/net/ethernet/freescale/gianfar.h #define GFAR_CB(skb) ((struct gianfar_skb_cb *)((skb)->cb))
skb              1018 drivers/net/ethernet/freescale/gianfar.h 	struct	sk_buff *skb;
skb               209 drivers/net/ethernet/freescale/ucc_geth.c 	struct sk_buff *skb;
skb               211 drivers/net/ethernet/freescale/ucc_geth.c 	skb = netdev_alloc_skb(ugeth->ndev,
skb               214 drivers/net/ethernet/freescale/ucc_geth.c 	if (!skb)
skb               220 drivers/net/ethernet/freescale/ucc_geth.c 	skb_reserve(skb,
skb               222 drivers/net/ethernet/freescale/ucc_geth.c 		    (((unsigned)skb->data) & (UCC_GETH_RX_DATA_BUF_ALIGNMENT -
skb               227 drivers/net/ethernet/freescale/ucc_geth.c 				     skb->data,
skb               235 drivers/net/ethernet/freescale/ucc_geth.c 	return skb;
skb               242 drivers/net/ethernet/freescale/ucc_geth.c 	struct sk_buff *skb;
skb               250 drivers/net/ethernet/freescale/ucc_geth.c 		skb = get_new_skb(ugeth, bd);
skb               252 drivers/net/ethernet/freescale/ucc_geth.c 		if (!skb)	/* If can not allocate data buffer,
skb               256 drivers/net/ethernet/freescale/ucc_geth.c 		ugeth->rx_skbuff[rxQ][i] = skb;
skb              3077 drivers/net/ethernet/freescale/ucc_geth.c ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb              3090 drivers/net/ethernet/freescale/ucc_geth.c 	netdev_sent_queue(dev, skb->len);
skb              3093 drivers/net/ethernet/freescale/ucc_geth.c 	dev->stats.tx_bytes += skb->len;
skb              3099 drivers/net/ethernet/freescale/ucc_geth.c 	ugeth->tx_skbuff[txQ][ugeth->skb_curtx[txQ]] = skb;
skb              3108 drivers/net/ethernet/freescale/ucc_geth.c 		      dma_map_single(ugeth->dev, skb->data,
skb              3109 drivers/net/ethernet/freescale/ucc_geth.c 			      skb->len, DMA_TO_DEVICE));
skb              3113 drivers/net/ethernet/freescale/ucc_geth.c 	bd_status = (bd_status & T_W) | T_R | T_I | T_L | skb->len;
skb              3133 drivers/net/ethernet/freescale/ucc_geth.c 	skb_tx_timestamp(skb);
skb              3155 drivers/net/ethernet/freescale/ucc_geth.c 	struct sk_buff *skb;
skb              3175 drivers/net/ethernet/freescale/ucc_geth.c 		skb = ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]];
skb              3179 drivers/net/ethernet/freescale/ucc_geth.c 		if (!skb ||
skb              3184 drivers/net/ethernet/freescale/ucc_geth.c 				       __LINE__, (u32)skb);
skb              3185 drivers/net/ethernet/freescale/ucc_geth.c 			dev_kfree_skb(skb);
skb              3194 drivers/net/ethernet/freescale/ucc_geth.c 			skb_put(skb, length);
skb              3197 drivers/net/ethernet/freescale/ucc_geth.c 			skb->protocol = eth_type_trans(skb, ugeth->ndev);
skb              3201 drivers/net/ethernet/freescale/ucc_geth.c 			netif_receive_skb(skb);
skb              3204 drivers/net/ethernet/freescale/ucc_geth.c 		skb = get_new_skb(ugeth, bd);
skb              3205 drivers/net/ethernet/freescale/ucc_geth.c 		if (!skb) {
skb              3212 drivers/net/ethernet/freescale/ucc_geth.c 		ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = skb;
skb              3245 drivers/net/ethernet/freescale/ucc_geth.c 		struct sk_buff *skb;
skb              3251 drivers/net/ethernet/freescale/ucc_geth.c 		skb = ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]];
skb              3252 drivers/net/ethernet/freescale/ucc_geth.c 		if (!skb)
skb              3255 drivers/net/ethernet/freescale/ucc_geth.c 		bytes_sent += skb->len;
skb              3258 drivers/net/ethernet/freescale/ucc_geth.c 		dev_consume_skb_any(skb);
skb                90 drivers/net/ethernet/fujitsu/fmvj18x_cs.c static netdev_tx_t fjn_start_xmit(struct sk_buff *skb,
skb               806 drivers/net/ethernet/fujitsu/fmvj18x_cs.c static netdev_tx_t fjn_start_xmit(struct sk_buff *skb,
skb               811 drivers/net/ethernet/fujitsu/fmvj18x_cs.c     short length = skb->len;
skb               815 drivers/net/ethernet/fujitsu/fmvj18x_cs.c     	if (skb_padto(skb, ETH_ZLEN))
skb               823 drivers/net/ethernet/fujitsu/fmvj18x_cs.c 	unsigned char *buf = skb->data;
skb               832 drivers/net/ethernet/fujitsu/fmvj18x_cs.c 		   (unsigned long)skb->len);
skb               833 drivers/net/ethernet/fujitsu/fmvj18x_cs.c 	dev->stats.tx_bytes += skb->len;
skb               872 drivers/net/ethernet/fujitsu/fmvj18x_cs.c     dev_kfree_skb (skb);
skb               986 drivers/net/ethernet/fujitsu/fmvj18x_cs.c 	    struct sk_buff *skb;
skb               995 drivers/net/ethernet/fujitsu/fmvj18x_cs.c 	    skb = netdev_alloc_skb(dev, pkt_len + 2);
skb               996 drivers/net/ethernet/fujitsu/fmvj18x_cs.c 	    if (skb == NULL) {
skb              1002 drivers/net/ethernet/fujitsu/fmvj18x_cs.c 	    skb_reserve(skb, 2);
skb              1003 drivers/net/ethernet/fujitsu/fmvj18x_cs.c 	    insw(ioaddr + DATAPORT, skb_put(skb, pkt_len),
skb              1005 drivers/net/ethernet/fujitsu/fmvj18x_cs.c 	    skb->protocol = eth_type_trans(skb, dev);
skb              1012 drivers/net/ethernet/fujitsu/fmvj18x_cs.c 			pr_debug(" %02x", skb->data[i]);
skb              1016 drivers/net/ethernet/fujitsu/fmvj18x_cs.c 	    netif_rx(skb);
skb                98 drivers/net/ethernet/google/gve/gve.h 	struct sk_buff *skb; /* skb for this pkt */
skb               434 drivers/net/ethernet/google/gve/gve.h netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev);
skb               233 drivers/net/ethernet/google/gve/gve_rx.c 	struct sk_buff *skb = napi_alloc_skb(napi, len);
skb               237 drivers/net/ethernet/google/gve/gve_rx.c 	if (unlikely(!skb))
skb               240 drivers/net/ethernet/google/gve/gve_rx.c 	__skb_put(skb, len);
skb               242 drivers/net/ethernet/google/gve/gve_rx.c 	skb_copy_to_linear_data(skb, va, len);
skb               244 drivers/net/ethernet/google/gve/gve_rx.c 	skb->protocol = eth_type_trans(skb, dev);
skb               245 drivers/net/ethernet/google/gve/gve_rx.c 	return skb;
skb               253 drivers/net/ethernet/google/gve/gve_rx.c 	struct sk_buff *skb = napi_get_frags(napi);
skb               255 drivers/net/ethernet/google/gve/gve_rx.c 	if (unlikely(!skb))
skb               258 drivers/net/ethernet/google/gve/gve_rx.c 	skb_add_rx_frag(skb, 0, page_info->page,
skb               262 drivers/net/ethernet/google/gve/gve_rx.c 	return skb;
skb               282 drivers/net/ethernet/google/gve/gve_rx.c 	struct sk_buff *skb;
skb               303 drivers/net/ethernet/google/gve/gve_rx.c 			skb = gve_rx_copy(dev, napi, page_info, len);
skb               307 drivers/net/ethernet/google/gve/gve_rx.c 			skb = gve_rx_copy(dev, napi, page_info, len);
skb               316 drivers/net/ethernet/google/gve/gve_rx.c 			skb = gve_rx_add_frags(dev, napi, page_info, len);
skb               317 drivers/net/ethernet/google/gve/gve_rx.c 			if (!skb)
skb               327 drivers/net/ethernet/google/gve/gve_rx.c 			skb = gve_rx_copy(dev, napi, page_info, len);
skb               333 drivers/net/ethernet/google/gve/gve_rx.c 		skb = gve_rx_copy(dev, napi, page_info, len);
skb               340 drivers/net/ethernet/google/gve/gve_rx.c 	if (!skb)
skb               346 drivers/net/ethernet/google/gve/gve_rx.c 			skb->ip_summed = CHECKSUM_COMPLETE;
skb               348 drivers/net/ethernet/google/gve/gve_rx.c 			skb->ip_summed = CHECKSUM_NONE;
skb               349 drivers/net/ethernet/google/gve/gve_rx.c 		skb->csum = csum_unfold(rx_desc->csum);
skb               355 drivers/net/ethernet/google/gve/gve_rx.c 		skb_set_hash(skb, be32_to_cpu(rx_desc->rss_hash),
skb               358 drivers/net/ethernet/google/gve/gve_rx.c 	if (skb_is_nonlinear(skb))
skb               361 drivers/net/ethernet/google/gve/gve_rx.c 		napi_gro_receive(napi, skb);
skb               286 drivers/net/ethernet/google/gve/gve_tx.c 					      struct sk_buff *skb)
skb               292 drivers/net/ethernet/google/gve/gve_tx.c 	hlen = skb_is_gso(skb) ? skb_checksum_start_offset(skb) +
skb               293 drivers/net/ethernet/google/gve/gve_tx.c 				 tcp_hdrlen(skb) : skb_headlen(skb);
skb               299 drivers/net/ethernet/google/gve/gve_tx.c 	bytes = align_hdr_pad + pad_bytes + skb->len;
skb               320 drivers/net/ethernet/google/gve/gve_tx.c static int gve_maybe_stop_tx(struct gve_tx_ring *tx, struct sk_buff *skb)
skb               324 drivers/net/ethernet/google/gve/gve_tx.c 	bytes_required = gve_skb_fifo_bytes_required(tx, skb);
skb               354 drivers/net/ethernet/google/gve/gve_tx.c 				 struct sk_buff *skb, bool is_gso,
skb               361 drivers/net/ethernet/google/gve/gve_tx.c 		pkt_desc->pkt.l4_csum_offset = skb->csum_offset >> 1;
skb               363 drivers/net/ethernet/google/gve/gve_tx.c 	} else if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
skb               365 drivers/net/ethernet/google/gve/gve_tx.c 		pkt_desc->pkt.l4_csum_offset = skb->csum_offset >> 1;
skb               373 drivers/net/ethernet/google/gve/gve_tx.c 	pkt_desc->pkt.len = cpu_to_be16(skb->len);
skb               379 drivers/net/ethernet/google/gve/gve_tx.c 				 struct sk_buff *skb, bool is_gso,
skb               384 drivers/net/ethernet/google/gve/gve_tx.c 		if (skb_is_gso_v6(skb))
skb               386 drivers/net/ethernet/google/gve/gve_tx.c 		seg_desc->seg.l3_offset = skb_network_offset(skb) >> 1;
skb               387 drivers/net/ethernet/google/gve/gve_tx.c 		seg_desc->seg.mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
skb               407 drivers/net/ethernet/google/gve/gve_tx.c static int gve_tx_add_skb(struct gve_tx_ring *tx, struct sk_buff *skb,
skb               413 drivers/net/ethernet/google/gve/gve_tx.c 	bool is_gso = skb_is_gso(skb);
skb               423 drivers/net/ethernet/google/gve/gve_tx.c 	l4_hdr_offset = skb_checksum_start_offset(skb);
skb               429 drivers/net/ethernet/google/gve/gve_tx.c 	hlen = is_gso ? l4_hdr_offset + tcp_hdrlen(skb) :
skb               430 drivers/net/ethernet/google/gve/gve_tx.c 			skb_headlen(skb);
skb               432 drivers/net/ethernet/google/gve/gve_tx.c 	info->skb =  skb;
skb               440 drivers/net/ethernet/google/gve/gve_tx.c 	payload_nfrags = gve_tx_alloc_fifo(&tx->tx_fifo, skb->len - hlen,
skb               443 drivers/net/ethernet/google/gve/gve_tx.c 	gve_tx_fill_pkt_desc(pkt_desc, skb, is_gso, l4_hdr_offset,
skb               447 drivers/net/ethernet/google/gve/gve_tx.c 	skb_copy_bits(skb, 0,
skb               459 drivers/net/ethernet/google/gve/gve_tx.c 		gve_tx_fill_seg_desc(seg_desc, skb, is_gso,
skb               463 drivers/net/ethernet/google/gve/gve_tx.c 		skb_copy_bits(skb, copy_offset,
skb               475 drivers/net/ethernet/google/gve/gve_tx.c netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev)
skb               481 drivers/net/ethernet/google/gve/gve_tx.c 	WARN(skb_get_queue_mapping(skb) > priv->tx_cfg.num_queues,
skb               483 drivers/net/ethernet/google/gve/gve_tx.c 	tx = &priv->tx[skb_get_queue_mapping(skb)];
skb               484 drivers/net/ethernet/google/gve/gve_tx.c 	if (unlikely(gve_maybe_stop_tx(tx, skb))) {
skb               493 drivers/net/ethernet/google/gve/gve_tx.c 	nsegs = gve_tx_add_skb(tx, skb, &priv->pdev->dev);
skb               495 drivers/net/ethernet/google/gve/gve_tx.c 	netdev_tx_sent_queue(tx->netdev_txq, skb->len);
skb               496 drivers/net/ethernet/google/gve/gve_tx.c 	skb_tx_timestamp(skb);
skb               516 drivers/net/ethernet/google/gve/gve_tx.c 	struct sk_buff *skb;
skb               526 drivers/net/ethernet/google/gve/gve_tx.c 		skb = info->skb;
skb               529 drivers/net/ethernet/google/gve/gve_tx.c 		if (skb) {
skb               530 drivers/net/ethernet/google/gve/gve_tx.c 			info->skb = NULL;
skb               531 drivers/net/ethernet/google/gve/gve_tx.c 			bytes += skb->len;
skb               533 drivers/net/ethernet/google/gve/gve_tx.c 			dev_consume_skb_any(skb);
skb               506 drivers/net/ethernet/hisilicon/hip04_eth.c hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
skb               521 drivers/net/ethernet/hisilicon/hip04_eth.c 	phys = dma_map_single(priv->dev, skb->data, skb->len, DMA_TO_DEVICE);
skb               523 drivers/net/ethernet/hisilicon/hip04_eth.c 		dev_kfree_skb(skb);
skb               527 drivers/net/ethernet/hisilicon/hip04_eth.c 	priv->tx_skb[tx_head] = skb;
skb               530 drivers/net/ethernet/hisilicon/hip04_eth.c 	desc->send_size = (__force u32)cpu_to_be32(skb->len);
skb               543 drivers/net/ethernet/hisilicon/hip04_eth.c 	skb_tx_timestamp(skb);
skb               547 drivers/net/ethernet/hisilicon/hip04_eth.c 	netdev_sent_queue(ndev, skb->len);
skb               550 drivers/net/ethernet/hisilicon/hip04_eth.c 	stats->tx_bytes += skb->len;
skb               580 drivers/net/ethernet/hisilicon/hip04_eth.c 	struct sk_buff *skb;
skb               594 drivers/net/ethernet/hisilicon/hip04_eth.c 		skb = build_skb(buf, priv->rx_buf_size);
skb               595 drivers/net/ethernet/hisilicon/hip04_eth.c 		if (unlikely(!skb)) {
skb               604 drivers/net/ethernet/hisilicon/hip04_eth.c 		desc = (struct rx_desc *)skb->data;
skb               609 drivers/net/ethernet/hisilicon/hip04_eth.c 			dev_kfree_skb_any(skb);
skb               612 drivers/net/ethernet/hisilicon/hip04_eth.c 			dev_kfree_skb_any(skb);
skb               616 drivers/net/ethernet/hisilicon/hip04_eth.c 			skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
skb               617 drivers/net/ethernet/hisilicon/hip04_eth.c 			skb_put(skb, len);
skb               618 drivers/net/ethernet/hisilicon/hip04_eth.c 			skb->protocol = eth_type_trans(skb, ndev);
skb               619 drivers/net/ethernet/hisilicon/hip04_eth.c 			napi_gro_receive(&priv->napi, skb);
skb               101 drivers/net/ethernet/hisilicon/hisi_femac.c 	struct sk_buff **skb;
skb               143 drivers/net/ethernet/hisilicon/hisi_femac.c 				    struct sk_buff *skb, unsigned int pos)
skb               148 drivers/net/ethernet/hisilicon/hisi_femac.c 	dma_unmap_single(priv->dev, dma_addr, skb->len, DMA_TO_DEVICE);
skb               153 drivers/net/ethernet/hisilicon/hisi_femac.c 	struct sk_buff *skb;
skb               163 drivers/net/ethernet/hisilicon/hisi_femac.c 		skb = txq->skb[txq->tail];
skb               164 drivers/net/ethernet/hisilicon/hisi_femac.c 		if (unlikely(!skb)) {
skb               169 drivers/net/ethernet/hisilicon/hisi_femac.c 		hisi_femac_tx_dma_unmap(priv, skb, txq->tail);
skb               171 drivers/net/ethernet/hisilicon/hisi_femac.c 		bytes_compl += skb->len;
skb               172 drivers/net/ethernet/hisilicon/hisi_femac.c 		dev_kfree_skb_any(skb);
skb               177 drivers/net/ethernet/hisilicon/hisi_femac.c 		txq->skb[txq->tail] = NULL;
skb               213 drivers/net/ethernet/hisilicon/hisi_femac.c 	struct sk_buff *skb;
skb               222 drivers/net/ethernet/hisilicon/hisi_femac.c 		if (unlikely(rxq->skb[pos])) {
skb               224 drivers/net/ethernet/hisilicon/hisi_femac.c 				   pos, rxq->skb[pos]);
skb               227 drivers/net/ethernet/hisilicon/hisi_femac.c 		skb = netdev_alloc_skb_ip_align(priv->ndev, len);
skb               228 drivers/net/ethernet/hisilicon/hisi_femac.c 		if (unlikely(!skb))
skb               231 drivers/net/ethernet/hisilicon/hisi_femac.c 		addr = dma_map_single(priv->dev, skb->data, len,
skb               234 drivers/net/ethernet/hisilicon/hisi_femac.c 			dev_kfree_skb_any(skb);
skb               238 drivers/net/ethernet/hisilicon/hisi_femac.c 		rxq->skb[pos] = skb;
skb               249 drivers/net/ethernet/hisilicon/hisi_femac.c 	struct sk_buff *skb;
skb               264 drivers/net/ethernet/hisilicon/hisi_femac.c 		skb = rxq->skb[pos];
skb               265 drivers/net/ethernet/hisilicon/hisi_femac.c 		if (unlikely(!skb)) {
skb               269 drivers/net/ethernet/hisilicon/hisi_femac.c 		rxq->skb[pos] = NULL;
skb               274 drivers/net/ethernet/hisilicon/hisi_femac.c 		skb_put(skb, len);
skb               275 drivers/net/ethernet/hisilicon/hisi_femac.c 		if (unlikely(skb->len > MAX_FRAME_SIZE)) {
skb               276 drivers/net/ethernet/hisilicon/hisi_femac.c 			netdev_err(dev, "rcv len err, len = %d\n", skb->len);
skb               279 drivers/net/ethernet/hisilicon/hisi_femac.c 			dev_kfree_skb_any(skb);
skb               283 drivers/net/ethernet/hisilicon/hisi_femac.c 		skb->protocol = eth_type_trans(skb, dev);
skb               284 drivers/net/ethernet/hisilicon/hisi_femac.c 		napi_gro_receive(&priv->napi, skb);
skb               286 drivers/net/ethernet/hisilicon/hisi_femac.c 		dev->stats.rx_bytes += skb->len;
skb               351 drivers/net/ethernet/hisilicon/hisi_femac.c 	queue->skb = devm_kcalloc(dev, num, sizeof(struct sk_buff *),
skb               353 drivers/net/ethernet/hisilicon/hisi_femac.c 	if (!queue->skb)
skb               389 drivers/net/ethernet/hisilicon/hisi_femac.c 	struct sk_buff *skb;
skb               395 drivers/net/ethernet/hisilicon/hisi_femac.c 		skb = rxq->skb[pos];
skb               396 drivers/net/ethernet/hisilicon/hisi_femac.c 		if (unlikely(!skb)) {
skb               406 drivers/net/ethernet/hisilicon/hisi_femac.c 		dev_kfree_skb_any(skb);
skb               407 drivers/net/ethernet/hisilicon/hisi_femac.c 		rxq->skb[pos] = NULL;
skb               414 drivers/net/ethernet/hisilicon/hisi_femac.c 		skb = txq->skb[pos];
skb               415 drivers/net/ethernet/hisilicon/hisi_femac.c 		if (unlikely(!skb)) {
skb               420 drivers/net/ethernet/hisilicon/hisi_femac.c 		hisi_femac_tx_dma_unmap(priv, skb, pos);
skb               421 drivers/net/ethernet/hisilicon/hisi_femac.c 		dev_kfree_skb_any(skb);
skb               422 drivers/net/ethernet/hisilicon/hisi_femac.c 		txq->skb[pos] = NULL;
skb               499 drivers/net/ethernet/hisilicon/hisi_femac.c static netdev_tx_t hisi_femac_net_xmit(struct sk_buff *skb,
skb               526 drivers/net/ethernet/hisilicon/hisi_femac.c 	addr = dma_map_single(priv->dev, skb->data,
skb               527 drivers/net/ethernet/hisilicon/hisi_femac.c 			      skb->len, DMA_TO_DEVICE);
skb               529 drivers/net/ethernet/hisilicon/hisi_femac.c 		dev_kfree_skb_any(skb);
skb               535 drivers/net/ethernet/hisilicon/hisi_femac.c 	txq->skb[txq->head] = skb;
skb               539 drivers/net/ethernet/hisilicon/hisi_femac.c 	writel(skb->len + ETH_FCS_LEN, priv->port_base + EQFRM_LEN);
skb               544 drivers/net/ethernet/hisilicon/hisi_femac.c 	dev->stats.tx_bytes += skb->len;
skb               545 drivers/net/ethernet/hisilicon/hisi_femac.c 	netdev_sent_queue(dev, skb->len);
skb               467 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c 	struct sk_buff *skb;
skb               482 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c 			skb = netdev_alloc_skb_ip_align(priv->netdev, len);
skb               483 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c 			if (unlikely(skb == NULL))
skb               487 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c 		addr = dma_map_single(priv->dev, skb->data, len, DMA_FROM_DEVICE);
skb               489 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c 			dev_kfree_skb_any(skb);
skb               495 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c 		priv->rx_skb[pos] = skb;
skb               511 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c 	struct sk_buff *skb;
skb               527 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c 		skb = priv->rx_skb[pos];
skb               528 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c 		if (unlikely(!skb)) {
skb               541 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c 		skb_put(skb, len);
skb               542 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c 		if (skb->len > MAC_MAX_FRAME_SIZE) {
skb               543 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c 			netdev_err(dev, "rcv len err, len = %d\n", skb->len);
skb               546 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c 			dev_kfree_skb_any(skb);
skb               550 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c 		skb->protocol = eth_type_trans(skb, dev);
skb               551 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c 		napi_gro_receive(&priv->napi, skb);
skb               553 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c 		dev->stats.rx_bytes += skb->len;
skb               567 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c 				  struct sk_buff *skb, u32 pos)
skb               580 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb               589 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c 	struct sk_buff *skb;
skb               605 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c 		skb = priv->tx_skb[pos];
skb               606 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c 		if (unlikely(!skb)) {
skb               612 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c 		bytes_compl += skb->len;
skb               615 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c 		if (skb_shinfo(skb)->nr_frags) {
skb               616 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c 			hix5hd2_clean_sg_desc(priv, skb, pos);
skb               619 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c 			dma_unmap_single(priv->dev, addr, skb->len,
skb               624 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c 		dev_consume_skb_any(skb);
skb               683 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c static u32 hix5hd2_get_desc_cmd(struct sk_buff *skb, unsigned long hw_cap)
skb               688 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c 		if (skb_shinfo(skb)->nr_frags)
skb               690 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c 		cmd |= skb_shinfo(skb)->nr_frags << DESC_FRAGS_NUM_OFF;
skb               693 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c 			((skb->len & DESC_DATA_MASK) << DESC_BUFF_LEN_OFF);
skb               696 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c 	cmd |= (skb->len & DESC_DATA_MASK) << DESC_DATA_LEN_OFF;
skb               703 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c 				struct sk_buff *skb, u32 pos)
skb               712 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c 	desc->total_len = cpu_to_le32(skb->len);
skb               713 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c 	addr = dma_map_single(priv->dev, skb->data, skb_headlen(skb),
skb               718 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c 	desc->linear_len = cpu_to_le32(skb_headlen(skb));
skb               720 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb               721 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
skb               735 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c static netdev_tx_t hix5hd2_net_xmit(struct sk_buff *skb, struct net_device *dev)
skb               755 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c 	cmd = hix5hd2_get_desc_cmd(skb, priv->hw_cap);
skb               758 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c 	if (skb_shinfo(skb)->nr_frags) {
skb               759 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c 		ret = hix5hd2_fill_sg_desc(priv, skb, pos);
skb               761 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c 			dev_kfree_skb_any(skb);
skb               767 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c 		addr = dma_map_single(priv->dev, skb->data, skb->len,
skb               770 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c 			dev_kfree_skb_any(skb);
skb               777 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c 	priv->tx_skb[pos] = skb;
skb               787 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c 	dev->stats.tx_bytes += skb->len;
skb               788 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c 	netdev_sent_queue(dev, skb->len);
skb               800 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c 		struct sk_buff *skb = priv->rx_skb[i];
skb               801 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c 		if (skb == NULL)
skb               808 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c 		dev_kfree_skb_any(skb);
skb               813 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c 		struct sk_buff *skb = priv->tx_skb[i];
skb               814 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c 		if (skb == NULL)
skb               819 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c 		dma_unmap_single(priv->dev, addr, skb->len, DMA_TO_DEVICE);
skb               820 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c 		dev_kfree_skb_any(skb);
skb                44 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	struct sk_buff *skb;
skb                71 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		skb = (struct sk_buff *)priv;
skb                73 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb                74 drivers/net/ethernet/hisilicon/hns/hns_enet.c 			skb_reset_mac_len(skb);
skb                75 drivers/net/ethernet/hisilicon/hns/hns_enet.c 			protocol = skb->protocol;
skb                80 drivers/net/ethernet/hisilicon/hns/hns_enet.c 				protocol = vlan_get_protocol(skb);
skb                81 drivers/net/ethernet/hisilicon/hns/hns_enet.c 				skb->protocol = protocol;
skb                84 drivers/net/ethernet/hisilicon/hns/hns_enet.c 			if (skb->protocol == htons(ETH_P_IP)) {
skb                85 drivers/net/ethernet/hisilicon/hns/hns_enet.c 				iphdr = ip_hdr(skb);
skb                91 drivers/net/ethernet/hisilicon/hns/hns_enet.c 				    skb_is_gso(skb)) {
skb                94 drivers/net/ethernet/hisilicon/hns/hns_enet.c 					l4_len = tcp_hdrlen(skb);
skb                95 drivers/net/ethernet/hisilicon/hns/hns_enet.c 					mss = skb_shinfo(skb)->gso_size;
skb                96 drivers/net/ethernet/hisilicon/hns/hns_enet.c 					paylen = skb->len - SKB_TMP_LEN(skb);
skb                98 drivers/net/ethernet/hisilicon/hns/hns_enet.c 			} else if (skb->protocol == htons(ETH_P_IPV6)) {
skb               100 drivers/net/ethernet/hisilicon/hns/hns_enet.c 				ipv6hdr = ipv6_hdr(skb);
skb               105 drivers/net/ethernet/hisilicon/hns/hns_enet.c 				    skb_is_gso(skb) && skb_is_gso_v6(skb)) {
skb               108 drivers/net/ethernet/hisilicon/hns/hns_enet.c 					l4_len = tcp_hdrlen(skb);
skb               109 drivers/net/ethernet/hisilicon/hns/hns_enet.c 					mss = skb_shinfo(skb)->gso_size;
skb               110 drivers/net/ethernet/hisilicon/hns/hns_enet.c 					paylen = skb->len - SKB_TMP_LEN(skb);
skb               150 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	struct sk_buff *skb;
skb               170 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		skb = (struct sk_buff *)priv;
skb               172 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb               173 drivers/net/ethernet/hisilicon/hns/hns_enet.c 			protocol = skb->protocol;
skb               179 drivers/net/ethernet/hisilicon/hns/hns_enet.c 				protocol = vlan_get_protocol(skb);
skb               180 drivers/net/ethernet/hisilicon/hns/hns_enet.c 				skb->protocol = protocol;
skb               183 drivers/net/ethernet/hisilicon/hns/hns_enet.c 			if (skb->protocol == htons(ETH_P_IP)) {
skb               188 drivers/net/ethernet/hisilicon/hns/hns_enet.c 			} else if (skb->protocol == htons(ETH_P_IPV6)) {
skb               213 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	struct sk_buff *skb = *out_skb;
skb               218 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	buf_num = skb_shinfo(skb)->nr_frags + 1;
skb               224 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		new_skb = skb_copy(skb, GFP_ATOMIC);
skb               228 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		dev_kfree_skb_any(skb);
skb               246 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	struct sk_buff *skb = *out_skb;
skb               250 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	size = skb_headlen(skb);
skb               253 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	frag_num = skb_shinfo(skb)->nr_frags;
skb               255 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		frag = &skb_shinfo(skb)->frags[i];
skb               261 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		buf_num = (skb->len + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE;
skb               265 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		new_skb = skb_copy(skb, GFP_ATOMIC);
skb               268 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		dev_kfree_skb_any(skb);
skb               305 drivers/net/ethernet/hisilicon/hns/hns_enet.c 				struct sk_buff *skb,
skb               319 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	switch (priv->ops.maybe_stop_tx(&skb, &buf_num, ring)) {
skb               332 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	seg_num = skb_shinfo(skb)->nr_frags + 1;
skb               336 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	size = skb_headlen(skb);
skb               337 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
skb               343 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	priv->ops.fill_desc(ring, skb, size, dma, seg_num == 1 ? 1 : 0,
skb               348 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		frag = &skb_shinfo(skb)->frags[i - 1];
skb               362 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	dev_queue = netdev_get_tx_queue(ndev, skb->queue_mapping);
skb               363 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	netdev_tx_sent_queue(dev_queue, skb->len);
skb               366 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	ndev->stats.tx_bytes += skb->len;
skb               370 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	assert(skb->queue_mapping < priv->ae_handle->q_num);
skb               371 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	hnae_queue_xmit(priv->ae_handle->qs[skb->queue_mapping], buf_num);
skb               393 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	dev_kfree_skb_any(skb);
skb               398 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	netif_stop_subqueue(ndev, skb->queue_mapping);
skb               408 drivers/net/ethernet/hisilicon/hns/hns_enet.c static void hns_nic_reuse_page(struct sk_buff *skb, int i,
skb               431 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
skb               474 drivers/net/ethernet/hisilicon/hns/hns_enet.c 				struct sk_buff *skb, u32 flag)
skb               535 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	skb->ip_summed = CHECKSUM_UNNECESSARY;
skb               544 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	struct sk_buff *skb;
skb               565 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	skb = *out_skb = napi_alloc_skb(&ring_data->napi,
skb               567 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	if (unlikely(!skb)) {
skb               572 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	prefetchw(skb->data);
skb               579 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
skb               597 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		memcpy(__skb_put(skb, pull_len), va,
skb               600 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		hns_nic_reuse_page(skb, 0, ring, pull_len, desc_cb);
skb               611 drivers/net/ethernet/hisilicon/hns/hns_enet.c 			hns_nic_reuse_page(skb, i, ring, 0, desc_cb);
skb               625 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		dev_kfree_skb_any(skb);
skb               635 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		dev_kfree_skb_any(skb);
skb               642 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		dev_kfree_skb_any(skb);
skb               648 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		dev_kfree_skb_any(skb);
skb               653 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	ring->stats.rx_bytes += skb->len;
skb               658 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	hns_nic_rx_checksum(ring_data, skb, bnum_flag);
skb               697 drivers/net/ethernet/hisilicon/hns/hns_enet.c 			      struct sk_buff *skb)
skb               701 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	skb->protocol = eth_type_trans(skb, ndev);
skb               702 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	(void)napi_gro_receive(&ring_data->napi, skb);
skb               824 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	struct sk_buff *skb;
skb               846 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		err = hns_nic_poll_rx_skb(ring_data, &skb, &bnum);
skb               847 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		if (unlikely(!skb)) /* this fault cannot be repaired */
skb               859 drivers/net/ethernet/hisilicon/hns/hns_enet.c 							ring_data, skb);
skb              1514 drivers/net/ethernet/hisilicon/hns/hns_enet.c static netdev_tx_t hns_nic_net_xmit(struct sk_buff *skb,
skb              1519 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	assert(skb->queue_mapping < ndev->ae_handle->q_num);
skb              1521 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	return hns_nic_net_xmit_hw(ndev, skb,
skb              1522 drivers/net/ethernet/hisilicon/hns/hns_enet.c 				   &tx_ring_data(priv, skb->queue_mapping));
skb              1526 drivers/net/ethernet/hisilicon/hns/hns_enet.c 				  struct sk_buff *skb)
skb              1528 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	dev_kfree_skb_any(skb);
skb              1534 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	struct sk_buff *skb;
skb              1539 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	skb = alloc_skb(64, GFP_KERNEL);
skb              1540 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	if (!skb)
skb              1543 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	skb_put(skb, 64);
skb              1544 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	skb->dev = ndev;
skb              1545 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	memset(skb->data, 0xFF, skb->len);
skb              1548 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	ethhdr = (struct ethhdr *)skb->data;
skb              1551 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	frame_len = skb->len & (~1ul);
skb              1552 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	memset(&skb->data[frame_len / 2], 0xAA,
skb              1555 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	skb->queue_mapping = HNS_LB_TX_RING;
skb              1557 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	return skb;
skb              1621 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	struct sk_buff *skb;
skb              1679 drivers/net/ethernet/hisilicon/hns/hns_enet.c 				skb = hns_assemble_skb(ndev);
skb              1680 drivers/net/ethernet/hisilicon/hns/hns_enet.c 				if (!skb)
skb              1682 drivers/net/ethernet/hisilicon/hns/hns_enet.c 				rd = &tx_ring_data(priv, skb->queue_mapping);
skb              1683 drivers/net/ethernet/hisilicon/hns/hns_enet.c 				hns_nic_net_xmit_hw(ndev, skb, rd);
skb              1940 drivers/net/ethernet/hisilicon/hns/hns_enet.c hns_nic_select_queue(struct net_device *ndev, struct sk_buff *skb,
skb              1943 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	struct ethhdr *eth_hdr = (struct ethhdr *)skb->data;
skb              1951 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		return netdev_pick_tx(ndev, skb, NULL);
skb                92 drivers/net/ethernet/hisilicon/hns/hns_enet.h 				struct sk_buff *skb,
skb               372 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c 			       struct sk_buff *skb)
skb               385 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c 		ndev = skb->dev;
skb               388 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c 		frame_size = skb->len;
skb               389 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c 		memset(skb->data, 0xFF, frame_size);
skb               392 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c 			memcpy(skb->data, ndev->dev_addr, 6);
skb               393 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c 			skb->data[5] += 0x1f;
skb               397 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c 		memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1);
skb               398 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c 		memset(&skb->data[frame_size / 2 + 10], 0xBE,
skb               400 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c 		memset(&skb->data[frame_size / 2 + 12], 0xAF,
skb               413 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c 	frame_size = skb->len;
skb               416 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c 	new_skb = skb_copy(skb, GFP_ATOMIC);
skb               417 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c 	dev_kfree_skb_any(skb);
skb               418 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c 	skb = new_skb;
skb               421 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c 	if (*(skb->data + 10) == 0xFF) { /* for rx check frame*/
skb               422 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c 		if ((*(skb->data + frame_size / 2 + 10) == 0xBE) &&
skb               423 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c 		    (*(skb->data + frame_size / 2 + 12) == 0xAF))
skb               429 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c 		ndev->stats.rx_bytes += skb->len;
skb               432 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c 		for (i = 0; i < skb->len; i++) {
skb               434 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c 				 "%02x", *(skb->data + i));
skb               435 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c 			if ((i % 16 == 15) || (i == skb->len - 1))
skb               439 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c 	dev_kfree_skb_any(skb);
skb               485 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c 	struct sk_buff *skb;
skb               489 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c 	skb = alloc_skb(size, GFP_KERNEL);
skb               490 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c 	if (!skb)
skb               494 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c 	(void)skb_put(skb, size);
skb               495 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c 	skb->dev = ndev;
skb               496 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c 	__lb_other_process(NULL, skb);
skb               497 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c 	skb->queue_mapping = NIC_LB_TEST_RING_ID;
skb               505 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c 			(void)skb_get(skb);
skb               508 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c 				ndev, skb,
skb               509 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c 				&tx_ring_data(priv, skb->queue_mapping));
skb               542 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c 	kfree_skb(skb);
skb               673 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c static int hns3_set_tso(struct sk_buff *skb, u32 *paylen,
skb               682 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	if (!skb_is_gso(skb))
skb               685 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	ret = skb_cow_head(skb, 0);
skb               689 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	l3.hdr = skb_network_header(skb);
skb               690 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	l4.hdr = skb_transport_header(skb);
skb               699 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
skb               703 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		if ((!(skb_shinfo(skb)->gso_type &
skb               705 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		    (skb_shinfo(skb)->gso_type &
skb               713 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		l3.hdr = skb_inner_network_header(skb);
skb               714 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		l4.hdr = skb_inner_transport_header(skb);
skb               724 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	l4_offset = l4.hdr - skb->data;
skb               728 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	l4_paylen = skb->len - l4_offset;
skb               733 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	*paylen = skb->len - hdr_len;
skb               737 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	*mss = skb_shinfo(skb)->gso_size;
skb               742 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto,
skb               752 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	l3.hdr = skb_network_header(skb);
skb               753 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	l4_hdr = skb_transport_header(skb);
skb               755 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	if (skb->protocol == htons(ETH_P_IPV6)) {
skb               759 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			ipv6_skip_exthdr(skb, exthdr - skb->data,
skb               761 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	} else if (skb->protocol == htons(ETH_P_IP)) {
skb               770 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	if (!skb->encapsulation) {
skb               776 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	l3.hdr = skb_inner_network_header(skb);
skb               777 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	l4_hdr = skb_inner_transport_header(skb);
skb               783 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			ipv6_skip_exthdr(skb, exthdr - skb->data,
skb               800 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c static bool hns3_tunnel_csum_bug(struct sk_buff *skb)
skb               804 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	l4.hdr = skb_transport_header(skb);
skb               806 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	if (!(!skb->encapsulation &&
skb               810 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	skb_checksum_help(skb);
skb               815 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c static void hns3_set_outer_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
skb               823 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	l3.hdr = skb_network_header(skb);
skb               824 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	l4.hdr = skb_transport_header(skb);
skb               827 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	l2_len = l3.hdr - skb->data;
skb               834 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	il2_hdr = skb_inner_mac_header(skb);
skb               840 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	if (skb->protocol == htons(ETH_P_IP)) {
skb               841 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		if (skb_is_gso(skb))
skb               850 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	} else if (skb->protocol == htons(ETH_P_IPV6)) {
skb               863 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
skb               867 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	unsigned char *l2_hdr = skb->data;
skb               873 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	l4.hdr = skb_transport_header(skb);
skb               874 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	l3.hdr = skb_network_header(skb);
skb               877 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	if (skb->encapsulation) {
skb               883 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			if (skb_is_gso(skb))
skb               889 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			skb_checksum_help(skb);
skb               893 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		hns3_set_outer_l2l3l4(skb, ol4_proto, ol_type_vlan_len_msec);
skb               896 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		l2_hdr = skb_inner_mac_header(skb);
skb               897 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		l3.hdr = skb_inner_network_header(skb);
skb               898 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		l4.hdr = skb_inner_transport_header(skb);
skb               909 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		if (skb_is_gso(skb))
skb               934 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		if (hns3_tunnel_csum_bug(skb))
skb               954 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		if (skb_is_gso(skb))
skb               960 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		skb_checksum_help(skb);
skb               976 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			     struct sk_buff *skb)
skb               982 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	if (!(skb->protocol == htons(ETH_P_8021Q) ||
skb               983 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	      skb_vlan_tag_present(skb)))
skb               989 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	if (unlikely(skb_vlan_tagged_multi(skb) &&
skb               994 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	if (skb->protocol == htons(ETH_P_8021Q) &&
skb              1000 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		skb->protocol = vlan_get_protocol(skb);
skb              1004 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	if (skb_vlan_tag_present(skb)) {
skb              1008 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		if (skb->protocol == htons(ETH_P_8021Q) &&
skb              1015 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		skb->protocol = vlan_get_protocol(skb);
skb              1019 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	rc = skb_cow_head(skb, 0);
skb              1023 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	vhdr = (struct vlan_ethhdr *)skb->data;
skb              1024 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority << VLAN_PRIO_SHIFT)
skb              1027 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	skb->protocol = vlan_get_protocol(skb);
skb              1032 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			      struct sk_buff *skb, struct hns3_desc *desc)
skb              1036 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	u32 paylen = skb->len;
skb              1042 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	ret = hns3_handle_vtags(ring, skb);
skb              1049 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		inner_vtag = skb_vlan_tag_get(skb);
skb              1050 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		inner_vtag |= (skb->priority << VLAN_PRIO_SHIFT) &
skb              1054 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		out_vtag = skb_vlan_tag_get(skb);
skb              1055 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		out_vtag |= (skb->priority << VLAN_PRIO_SHIFT) &
skb              1061 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb              1064 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		skb_reset_mac_len(skb);
skb              1066 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
skb              1074 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		ret = hns3_set_l2l3l4(skb, ol4_proto, il4_proto,
skb              1084 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		ret = hns3_set_tso(skb, &paylen, &mss,
skb              1119 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		struct sk_buff *skb = (struct sk_buff *)priv;
skb              1122 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		ret = hns3_fill_skb_desc(ring, skb, desc);
skb              1126 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
skb              1191 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c static unsigned int hns3_nic_bd_num(struct sk_buff *skb)
skb              1197 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	if (likely(skb->len <= HNS3_MAX_BD_SIZE))
skb              1198 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		return skb_shinfo(skb)->nr_frags + 1;
skb              1200 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	bd_num = hns3_tx_bd_count(skb_headlen(skb));
skb              1202 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb              1203 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
skb              1210 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c static unsigned int hns3_gso_hdr_len(struct sk_buff *skb)
skb              1212 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	if (!skb->encapsulation)
skb              1213 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		return skb_transport_offset(skb) + tcp_hdrlen(skb);
skb              1215 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	return skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb);
skb              1223 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c static bool hns3_skb_need_linearized(struct sk_buff *skb)
skb              1230 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		tot_len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
skb              1235 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	if (((tot_len + skb_headlen(skb)) < (skb_shinfo(skb)->gso_size +
skb              1236 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	    hns3_gso_hdr_len(skb))) || (tot_len < skb_shinfo(skb)->gso_size))
skb              1240 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	for (i = 0; i < (skb_shinfo(skb)->nr_frags - bd_limit - 1); i++) {
skb              1241 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		tot_len -= skb_frag_size(&skb_shinfo(skb)->frags[i]);
skb              1242 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		tot_len += skb_frag_size(&skb_shinfo(skb)->frags[i + bd_limit]);
skb              1244 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		if (tot_len < skb_shinfo(skb)->gso_size)
skb              1254 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	struct sk_buff *skb = *out_skb;
skb              1257 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	bd_num = hns3_nic_bd_num(skb);
skb              1261 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		if (skb_is_gso(skb) && bd_num <= HNS3_MAX_BD_NUM_TSO &&
skb              1262 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		    !hns3_skb_need_linearized(skb))
skb              1266 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		new_skb = skb_copy(skb, GFP_ATOMIC);
skb              1269 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		dev_kfree_skb_any(skb);
skb              1319 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
skb              1323 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		&tx_ring_data(priv, skb->queue_mapping);
skb              1335 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	if (skb_put_padto(skb, HNS3_MIN_TX_LEN))
skb              1339 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	prefetch(skb->data);
skb              1341 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	buf_num = hns3_nic_maybe_stop_tx(ring, &skb);
skb              1359 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	seg_num = skb_shinfo(skb)->nr_frags + 1;
skb              1361 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	size = skb_headlen(skb);
skb              1365 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	ret = hns3_fill_desc(ring, skb, size, seg_num == 1 ? 1 : 0,
skb              1372 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		frag = &skb_shinfo(skb)->frags[i - 1];
skb              1385 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	netdev_tx_sent_queue(dev_queue, skb->len);
skb              1397 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	dev_kfree_skb_any(skb);
skb              1792 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c static int hns3_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
skb              1801 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	if (skb->encapsulation)
skb              1804 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	if (!skb_flow_dissect_flow_keys(skb, &fkeys, 0))
skb              2432 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
skb              2440 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
skb              2464 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c static int hns3_gro_complete(struct sk_buff *skb, u32 l234info)
skb              2466 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	__be16 type = skb->protocol;
skb              2473 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		if ((depth + VLAN_HLEN) > skb_headlen(skb))
skb              2476 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		vh = (struct vlan_hdr *)(skb->data + depth);
skb              2481 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	skb_set_network_header(skb, depth);
skb              2484 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		const struct iphdr *iph = ip_hdr(skb);
skb              2487 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		skb_set_transport_header(skb, depth);
skb              2488 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		th = tcp_hdr(skb);
skb              2489 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		th->check = ~tcp_v4_check(skb->len - depth, iph->saddr,
skb              2492 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		const struct ipv6hdr *iph = ipv6_hdr(skb);
skb              2495 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		skb_set_transport_header(skb, depth);
skb              2496 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		th = tcp_hdr(skb);
skb              2497 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		th->check = ~tcp_v6_check(skb->len - depth, &iph->saddr,
skb              2500 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		hns3_rl_err(skb->dev,
skb              2506 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
skb              2508 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
skb              2511 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID;
skb              2513 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	skb->csum_start = (unsigned char *)th - skb->head;
skb              2514 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	skb->csum_offset = offsetof(struct tcphdr, check);
skb              2515 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	skb->ip_summed = CHECKSUM_PARTIAL;
skb              2519 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
skb              2526 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	skb->ip_summed = CHECKSUM_NONE;
skb              2528 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	skb_checksum_none_assert(skb);
skb              2552 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		skb->csum_level = 1;
skb              2566 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			skb->ip_summed = CHECKSUM_UNNECESSARY;
skb              2573 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c static void hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb)
skb              2575 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	if (skb_has_frag_list(skb))
skb              2578 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	napi_gro_receive(&ring->tqp_vector->napi, skb);
skb              2639 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	struct sk_buff *skb;
skb              2641 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	ring->skb = napi_alloc_skb(&ring->tqp_vector->napi, HNS3_RX_HEAD_SIZE);
skb              2642 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	skb = ring->skb;
skb              2643 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	if (unlikely(!skb)) {
skb              2653 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	prefetchw(skb->data);
skb              2659 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
skb              2675 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	__skb_put(skb, ring->pull_len);
skb              2676 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	hns3_nic_reuse_page(skb, ring->frag_num++, ring, ring->pull_len,
skb              2686 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	struct sk_buff *skb = *out_skb;
skb              2729 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 				skb_shinfo(skb)->frag_list = new_skb;
skb              2738 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			skb = ring->tail_skb;
skb              2741 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		hns3_nic_reuse_page(skb, ring->frag_num++, ring, 0, desc_cb);
skb              2750 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 				     struct sk_buff *skb, u32 l234info,
skb              2755 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	skb_shinfo(skb)->gso_size = hnae3_get_field(bd_base_info,
skb              2759 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	if (!skb_shinfo(skb)->gso_size) {
skb              2760 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		hns3_rx_checksum(ring, skb, l234info, bd_base_info, ol_info);
skb              2764 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	NAPI_GRO_CB(skb)->count = hnae3_get_field(l234info,
skb              2770 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
skb              2772 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
skb              2776 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	return  hns3_gro_complete(skb, l234info);
skb              2780 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 				     struct sk_buff *skb, u32 rss_hash)
skb              2790 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	skb_set_hash(skb, rss_hash, rss_type);
skb              2793 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb)
skb              2821 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
skb              2837 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	len = skb->len;
skb              2840 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	skb->protocol = eth_type_trans(skb, netdev);
skb              2843 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	ret = hns3_set_gro_and_checksum(ring, skb, l234info,
skb              2866 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	hns3_set_rx_skb_rss_type(ring, skb, le32_to_cpu(desc->rx.rss_hash));
skb              2873 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	struct sk_buff *skb = ring->skb;
skb              2892 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	if (!skb)
skb              2907 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	if (!skb) {
skb              2909 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		*out_skb = skb = ring->skb;
skb              2914 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			ret = hns3_add_frag(ring, desc, &skb, false);
skb              2921 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			memcpy(skb->data, ring->va,
skb              2925 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		ret = hns3_add_frag(ring, desc, &skb, true);
skb              2932 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		memcpy(skb->data, ring->va,
skb              2936 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	ret = hns3_handle_bdinfo(ring, skb);
skb              2938 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		dev_kfree_skb_any(skb);
skb              2942 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	skb_record_rx_queue(skb, ring->tqp->tqp_index);
skb              2943 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	*out_skb = skb;
skb              2953 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	struct sk_buff *skb = ring->skb;
skb              2973 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		err = hns3_handle_rx_bd(ring, &skb);
skb              2974 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		if (unlikely(!skb)) /* This fault cannot be repaired */
skb              2982 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			ring->skb = NULL;
skb              2987 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		rx_fn(ring, skb);
skb              2990 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		ring->skb = NULL;
skb              3637 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	if (ring->skb) {
skb              3638 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		dev_kfree_skb_any(ring->skb);
skb              3639 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		ring->skb = NULL;
skb              4125 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	if (ring->skb) {
skb              4126 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		dev_kfree_skb_any(ring->skb);
skb              4127 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		ring->skb = NULL;
skb               431 drivers/net/ethernet/hisilicon/hns3/hns3_enet.h 	struct sk_buff *skb;
skb               656 drivers/net/ethernet/hisilicon/hns3/hns3_enet.h netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev);
skb               140 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c static void hns3_lp_setup_skb(struct sk_buff *skb)
skb               144 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c 	struct net_device *ndev = skb->dev;
skb               150 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c 	skb_reserve(skb, NET_IP_ALIGN);
skb               151 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c 	ethh = skb_put(skb, sizeof(struct ethhdr));
skb               152 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c 	packet = skb_put(skb, HNS3_NIC_LB_TEST_PACKET_SIZE);
skb               166 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c 	skb_reset_mac_header(skb);
skb               173 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c 				   struct sk_buff *skb)
skb               176 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c 	unsigned char *packet = skb->data;
skb               179 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c 	for (i = 0; i < skb->len; i++)
skb               184 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c 	if (i == skb->len)
skb               188 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c 			       skb->data, skb->len, true);
skb               190 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c 	dev_kfree_skb_any(skb);
skb               238 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c 	struct sk_buff *skb;
skb               242 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c 	skb = alloc_skb(HNS3_NIC_LB_TEST_PACKET_SIZE + ETH_HLEN + NET_IP_ALIGN,
skb               244 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c 	if (!skb)
skb               247 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c 	skb->dev = ndev;
skb               248 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c 	hns3_lp_setup_skb(skb);
skb               249 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c 	skb->queue_mapping = HNS3_NIC_LB_TEST_RING_ID;
skb               255 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c 		skb_get(skb);
skb               256 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c 		tx_ret = hns3_nic_net_xmit(skb, ndev);
skb               260 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c 			kfree_skb(skb);
skb               287 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c 	kfree_skb(skb);
skb               924 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c 		tmp_rings[i].skb = NULL;
skb               226 drivers/net/ethernet/hp/hp100.c static netdev_tx_t hp100_start_xmit(struct sk_buff *skb,
skb               228 drivers/net/ethernet/hp/hp100.c static netdev_tx_t hp100_start_xmit_bm(struct sk_buff *skb,
skb              1205 drivers/net/ethernet/hp/hp100.c 	ringptr->skb = NULL;
skb              1231 drivers/net/ethernet/hp/hp100.c 	ringptr->skb = NULL;
skb              1262 drivers/net/ethernet/hp/hp100.c 	ringptr->skb = netdev_alloc_skb(dev, roundup(MAX_ETHER_SIZE + 2, 4));
skb              1264 drivers/net/ethernet/hp/hp100.c 	if (NULL != ringptr->skb) {
skb              1270 drivers/net/ethernet/hp/hp100.c 		skb_reserve(ringptr->skb, 2);
skb              1272 drivers/net/ethernet/hp/hp100.c 		ringptr->skb->data = skb_put(ringptr->skb, MAX_ETHER_SIZE);
skb              1285 drivers/net/ethernet/hp/hp100.c 				     (unsigned int) ringptr->skb->data);
skb              1292 drivers/net/ethernet/hp/hp100.c 					       ringptr->skb->data);
skb              1472 drivers/net/ethernet/hp/hp100.c static netdev_tx_t hp100_start_xmit_bm(struct sk_buff *skb,
skb              1485 drivers/net/ethernet/hp/hp100.c 	if (skb->len <= 0)
skb              1488 drivers/net/ethernet/hp/hp100.c 	if (lp->chip == HP100_CHIPID_SHASTA && skb_padto(skb, ETH_ZLEN))
skb              1547 drivers/net/ethernet/hp/hp100.c 	ok_flag = skb->len >= HP100_MIN_PACKET_SIZE;
skb              1548 drivers/net/ethernet/hp/hp100.c 	i = ok_flag ? skb->len : HP100_MIN_PACKET_SIZE;
skb              1550 drivers/net/ethernet/hp/hp100.c 	ringptr->skb = skb;
skb              1557 drivers/net/ethernet/hp/hp100.c 		ringptr->pdl[2] = skb->len;	/* 1st Frag: Length of frag */
skb              1561 drivers/net/ethernet/hp/hp100.c 	ringptr->pdl[1] = ((u32) pci_map_single(lp->pci_dev, skb->data, ringptr->pdl[2], PCI_DMA_TODEVICE));	/* 1st Frag: Adr. of data */
skb              1569 drivers/net/ethernet/hp/hp100.c 	dev->stats.tx_bytes += skb->len;
skb              1576 drivers/net/ethernet/hp/hp100.c 	dev_kfree_skb(skb);
skb              1610 drivers/net/ethernet/hp/hp100.c 				dev->name, (u_int) lp->txrhead->skb->data,
skb              1615 drivers/net/ethernet/hp/hp100.c 		dev_consume_skb_any(lp->txrhead->skb);
skb              1616 drivers/net/ethernet/hp/hp100.c 		lp->txrhead->skb = NULL;
skb              1623 drivers/net/ethernet/hp/hp100.c static netdev_tx_t hp100_start_xmit(struct sk_buff *skb,
skb              1636 drivers/net/ethernet/hp/hp100.c 	if (skb->len <= 0)
skb              1644 drivers/net/ethernet/hp/hp100.c 	if (!(((i / 2) - 539) > (skb->len + 16) && (hp100_inb(TX_PKT_CNT) < 255))) {
skb              1704 drivers/net/ethernet/hp/hp100.c 			dev->name, val, hp100_inw(IRQ_MASK), (int) skb->len);
skb              1707 drivers/net/ethernet/hp/hp100.c 	ok_flag = skb->len >= HP100_MIN_PACKET_SIZE;
skb              1708 drivers/net/ethernet/hp/hp100.c 	i = ok_flag ? skb->len : HP100_MIN_PACKET_SIZE;
skb              1715 drivers/net/ethernet/hp/hp100.c 		memcpy_toio(lp->mem_ptr_virt, skb->data, (skb->len + 3) & ~3);
skb              1717 drivers/net/ethernet/hp/hp100.c 			memset_io(lp->mem_ptr_virt, 0, HP100_MIN_PACKET_SIZE - skb->len);
skb              1719 drivers/net/ethernet/hp/hp100.c 		outsl(ioaddr + HP100_REG_DATA32, skb->data,
skb              1720 drivers/net/ethernet/hp/hp100.c 		      (skb->len + 3) >> 2);
skb              1722 drivers/net/ethernet/hp/hp100.c 			for (i = (skb->len + 3) & ~3; i < HP100_MIN_PACKET_SIZE; i += 4)
skb              1729 drivers/net/ethernet/hp/hp100.c 	dev->stats.tx_bytes += skb->len;
skb              1733 drivers/net/ethernet/hp/hp100.c 	dev_consume_skb_any(skb);
skb              1742 drivers/net/ethernet/hp/hp100.c 	dev_kfree_skb(skb);
skb              1763 drivers/net/ethernet/hp/hp100.c 	struct sk_buff *skb;
skb              1804 drivers/net/ethernet/hp/hp100.c 		skb = netdev_alloc_skb(dev, pkt_len + 2);
skb              1805 drivers/net/ethernet/hp/hp100.c 		if (skb == NULL) {	/* Not enough memory->drop packet */
skb              1815 drivers/net/ethernet/hp/hp100.c 			skb_reserve(skb,2);
skb              1818 drivers/net/ethernet/hp/hp100.c 			skb_put(skb, pkt_len);
skb              1819 drivers/net/ethernet/hp/hp100.c 			ptr = skb->data;
skb              1827 drivers/net/ethernet/hp/hp100.c 			skb->protocol = eth_type_trans(skb, dev);
skb              1835 drivers/net/ethernet/hp/hp100.c 			netif_rx(skb);
skb              1915 drivers/net/ethernet/hp/hp100.c 			if (ptr->skb == NULL) {
skb              1920 drivers/net/ethernet/hp/hp100.c 				skb_trim(ptr->skb, pkt_len);	/* Shorten it */
skb              1921 drivers/net/ethernet/hp/hp100.c 				ptr->skb->protocol =
skb              1922 drivers/net/ethernet/hp/hp100.c 				    eth_type_trans(ptr->skb, dev);
skb              1924 drivers/net/ethernet/hp/hp100.c 				netif_rx(ptr->skb);	/* Up and away... */
skb              1940 drivers/net/ethernet/hp/hp100.c 			if (ptr->skb != NULL)
skb              1941 drivers/net/ethernet/hp/hp100.c 				dev_kfree_skb_any(ptr->skb);
skb               543 drivers/net/ethernet/hp/hp100.h 	struct sk_buff *skb;
skb               687 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 			struct sk_buff *skb, unsigned int wqe_size)
skb               691 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 	sq->saved_skb[prod_idx] = skb;
skb               710 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 					 struct sk_buff **skb,
skb               724 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 	*skb = sq->saved_skb[*cons_idx];
skb               748 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 				       struct sk_buff **skb,
skb               754 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 	*skb = sq->saved_skb[*cons_idx];
skb               814 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 			struct hinic_rq_wqe *rq_wqe, struct sk_buff *skb)
skb               818 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 	rq->saved_skb[prod_idx] = skb;
skb               837 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 				       struct sk_buff **skb, u16 *cons_idx)
skb               856 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 	*skb = rq->saved_skb[*cons_idx];
skb               872 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 					    struct sk_buff **skb,
skb               884 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 	*skb = rq->saved_skb[*cons_idx];
skb               182 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h 			struct hinic_sq_wqe *wqe, struct sk_buff *skb,
skb               186 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h 				       struct sk_buff **skb,
skb               190 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h 					 struct sk_buff **skb,
skb               202 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h 			struct hinic_rq_wqe *wqe, struct sk_buff *skb);
skb               206 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h 				       struct sk_buff **skb, u16 *cons_idx);
skb               210 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h 					    struct sk_buff **skb,
skb               102 drivers/net/ethernet/huawei/hinic/hinic_rx.c 		    struct sk_buff *skb)
skb               113 drivers/net/ethernet/huawei/hinic/hinic_rx.c 		skb->ip_summed = CHECKSUM_UNNECESSARY;
skb               118 drivers/net/ethernet/huawei/hinic/hinic_rx.c 		skb->ip_summed = CHECKSUM_NONE;
skb               135 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	struct sk_buff *skb;
skb               139 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	skb = netdev_alloc_skb_ip_align(rxq->netdev, rxq->rq->buf_sz);
skb               140 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	if (!skb) {
skb               145 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	addr = dma_map_single(&pdev->dev, skb->data, rxq->rq->buf_sz,
skb               154 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	return skb;
skb               157 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	dev_kfree_skb_any(skb);
skb               183 drivers/net/ethernet/huawei/hinic/hinic_rx.c static void rx_free_skb(struct hinic_rxq *rxq, struct sk_buff *skb,
skb               187 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	dev_kfree_skb_any(skb);
skb               203 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	struct sk_buff *skb;
skb               214 drivers/net/ethernet/huawei/hinic/hinic_rx.c 		skb = rx_alloc_skb(rxq, &dma_addr);
skb               215 drivers/net/ethernet/huawei/hinic/hinic_rx.c 		if (!skb) {
skb               220 drivers/net/ethernet/huawei/hinic/hinic_rx.c 		hinic_set_sge(&sge, dma_addr, skb->len);
skb               225 drivers/net/ethernet/huawei/hinic/hinic_rx.c 			rx_free_skb(rxq, skb, dma_addr);
skb               231 drivers/net/ethernet/huawei/hinic/hinic_rx.c 		hinic_rq_write_wqe(rxq->rq, prod_idx, rq_wqe, skb);
skb               279 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	struct sk_buff *skb, *curr_skb = head_skb;
skb               287 drivers/net/ethernet/huawei/hinic/hinic_rx.c 						&skb, &ci);
skb               295 drivers/net/ethernet/huawei/hinic/hinic_rx.c 		prefetch(skb->data);
skb               302 drivers/net/ethernet/huawei/hinic/hinic_rx.c 		__skb_put(skb, curr_len);
skb               305 drivers/net/ethernet/huawei/hinic/hinic_rx.c 			skb_shinfo(head_skb)->frag_list = skb;
skb               307 drivers/net/ethernet/huawei/hinic/hinic_rx.c 			curr_skb->next = skb;
skb               309 drivers/net/ethernet/huawei/hinic/hinic_rx.c 		head_skb->len += skb->len;
skb               310 drivers/net/ethernet/huawei/hinic/hinic_rx.c 		head_skb->data_len += skb->len;
skb               311 drivers/net/ethernet/huawei/hinic/hinic_rx.c 		head_skb->truesize += skb->truesize;
skb               313 drivers/net/ethernet/huawei/hinic/hinic_rx.c 		curr_skb = skb;
skb               338 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	struct sk_buff *skb;
skb               348 drivers/net/ethernet/huawei/hinic/hinic_rx.c 		rq_wqe = hinic_rq_read_wqe(rxq->rq, HINIC_RQ_WQE_SIZE, &skb,
skb               362 drivers/net/ethernet/huawei/hinic/hinic_rx.c 		rx_csum(rxq, status, skb);
skb               364 drivers/net/ethernet/huawei/hinic/hinic_rx.c 		prefetch(skb->data);
skb               369 drivers/net/ethernet/huawei/hinic/hinic_rx.c 			__skb_put(skb, pkt_len);
skb               371 drivers/net/ethernet/huawei/hinic/hinic_rx.c 			__skb_put(skb, HINIC_RX_BUF_SZ);
skb               372 drivers/net/ethernet/huawei/hinic/hinic_rx.c 			num_wqes = rx_recv_jumbo_pkt(rxq, skb, pkt_len -
skb               384 drivers/net/ethernet/huawei/hinic/hinic_rx.c 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
skb               387 drivers/net/ethernet/huawei/hinic/hinic_rx.c 		skb_record_rx_queue(skb, qp->q_id);
skb               388 drivers/net/ethernet/huawei/hinic/hinic_rx.c 		skb->protocol = eth_type_trans(skb, rxq->netdev);
skb               390 drivers/net/ethernet/huawei/hinic/hinic_rx.c 		napi_gro_receive(&rxq->napi, skb);
skb                51 drivers/net/ethernet/huawei/hinic/hinic_tx.c #define TRANSPORT_OFFSET(l4_hdr, skb)	((u32)((l4_hdr) - (skb)->data))
skb               133 drivers/net/ethernet/huawei/hinic/hinic_tx.c static int tx_map_skb(struct hinic_dev *nic_dev, struct sk_buff *skb,
skb               143 drivers/net/ethernet/huawei/hinic/hinic_tx.c 	dma_addr = dma_map_single(&pdev->dev, skb->data, skb_headlen(skb),
skb               150 drivers/net/ethernet/huawei/hinic/hinic_tx.c 	hinic_set_sge(&sges[0], dma_addr, skb_headlen(skb));
skb               152 drivers/net/ethernet/huawei/hinic/hinic_tx.c 	for (i = 0 ; i < skb_shinfo(skb)->nr_frags; i++) {
skb               153 drivers/net/ethernet/huawei/hinic/hinic_tx.c 		frag = &skb_shinfo(skb)->frags[i];
skb               184 drivers/net/ethernet/huawei/hinic/hinic_tx.c static void tx_unmap_skb(struct hinic_dev *nic_dev, struct sk_buff *skb,
skb               192 drivers/net/ethernet/huawei/hinic/hinic_tx.c 	for (i = 0; i < skb_shinfo(skb)->nr_frags ; i++)
skb               200 drivers/net/ethernet/huawei/hinic/hinic_tx.c static void get_inner_l3_l4_type(struct sk_buff *skb, union hinic_l3 *ip,
skb               218 drivers/net/ethernet/huawei/hinic/hinic_tx.c 			int start = exthdr - skb->data;
skb               221 drivers/net/ethernet/huawei/hinic/hinic_tx.c 			ipv6_skip_exthdr(skb, start, l4_proto, &frag_off);
skb               229 drivers/net/ethernet/huawei/hinic/hinic_tx.c static void get_inner_l4_info(struct sk_buff *skb, union hinic_l4 *l4,
skb               243 drivers/net/ethernet/huawei/hinic/hinic_tx.c 		*offset = *l4_len + TRANSPORT_OFFSET(l4->hdr, skb);
skb               249 drivers/net/ethernet/huawei/hinic/hinic_tx.c 		*offset = TRANSPORT_OFFSET(l4->hdr, skb);
skb               259 drivers/net/ethernet/huawei/hinic/hinic_tx.c 		*offset = TRANSPORT_OFFSET(l4->hdr, skb);
skb               275 drivers/net/ethernet/huawei/hinic/hinic_tx.c 		       struct sk_buff *skb)
skb               284 drivers/net/ethernet/huawei/hinic/hinic_tx.c 	if (!skb_is_gso(skb))
skb               287 drivers/net/ethernet/huawei/hinic/hinic_tx.c 	if (skb_cow_head(skb, 0) < 0)
skb               290 drivers/net/ethernet/huawei/hinic/hinic_tx.c 	if (skb->encapsulation) {
skb               291 drivers/net/ethernet/huawei/hinic/hinic_tx.c 		u32 gso_type = skb_shinfo(skb)->gso_type;
skb               295 drivers/net/ethernet/huawei/hinic/hinic_tx.c 		ip.hdr = skb_network_header(skb);
skb               296 drivers/net/ethernet/huawei/hinic/hinic_tx.c 		l4.hdr = skb_transport_header(skb);
skb               297 drivers/net/ethernet/huawei/hinic/hinic_tx.c 		network_hdr_len = skb_inner_network_header_len(skb);
skb               309 drivers/net/ethernet/huawei/hinic/hinic_tx.c 					 skb_network_header_len(skb));
skb               318 drivers/net/ethernet/huawei/hinic/hinic_tx.c 		l4_tunnel_len = skb_inner_network_offset(skb) -
skb               319 drivers/net/ethernet/huawei/hinic/hinic_tx.c 				skb_transport_offset(skb);
skb               322 drivers/net/ethernet/huawei/hinic/hinic_tx.c 		ip.hdr = skb_inner_network_header(skb);
skb               323 drivers/net/ethernet/huawei/hinic/hinic_tx.c 		l4.hdr = skb_inner_transport_header(skb);
skb               325 drivers/net/ethernet/huawei/hinic/hinic_tx.c 		ip.hdr = skb_network_header(skb);
skb               326 drivers/net/ethernet/huawei/hinic/hinic_tx.c 		l4.hdr = skb_transport_header(skb);
skb               327 drivers/net/ethernet/huawei/hinic/hinic_tx.c 		network_hdr_len = skb_network_header_len(skb);
skb               336 drivers/net/ethernet/huawei/hinic/hinic_tx.c 	get_inner_l3_l4_type(skb, &ip, &l4, TX_OFFLOAD_TSO, &l3_offload,
skb               345 drivers/net/ethernet/huawei/hinic/hinic_tx.c 	get_inner_l4_info(skb, &l4, TX_OFFLOAD_TSO, l4_proto, &l4_offload,
skb               349 drivers/net/ethernet/huawei/hinic/hinic_tx.c 			       ip_identify, skb_shinfo(skb)->gso_size);
skb               355 drivers/net/ethernet/huawei/hinic/hinic_tx.c 			struct sk_buff *skb)
skb               364 drivers/net/ethernet/huawei/hinic/hinic_tx.c 	if (skb->ip_summed != CHECKSUM_PARTIAL)
skb               367 drivers/net/ethernet/huawei/hinic/hinic_tx.c 	if (skb->encapsulation) {
skb               370 drivers/net/ethernet/huawei/hinic/hinic_tx.c 		ip.hdr = skb_network_header(skb);
skb               380 drivers/net/ethernet/huawei/hinic/hinic_tx.c 					 skb_network_header_len(skb));
skb               382 drivers/net/ethernet/huawei/hinic/hinic_tx.c 		l4_tunnel_len = skb_inner_network_offset(skb) -
skb               383 drivers/net/ethernet/huawei/hinic/hinic_tx.c 				skb_transport_offset(skb);
skb               388 drivers/net/ethernet/huawei/hinic/hinic_tx.c 		ip.hdr = skb_inner_network_header(skb);
skb               389 drivers/net/ethernet/huawei/hinic/hinic_tx.c 		l4.hdr = skb_inner_transport_header(skb);
skb               390 drivers/net/ethernet/huawei/hinic/hinic_tx.c 		network_hdr_len = skb_inner_network_header_len(skb);
skb               392 drivers/net/ethernet/huawei/hinic/hinic_tx.c 		ip.hdr = skb_network_header(skb);
skb               393 drivers/net/ethernet/huawei/hinic/hinic_tx.c 		l4.hdr = skb_transport_header(skb);
skb               394 drivers/net/ethernet/huawei/hinic/hinic_tx.c 		network_hdr_len = skb_network_header_len(skb);
skb               397 drivers/net/ethernet/huawei/hinic/hinic_tx.c 	get_inner_l3_l4_type(skb, &ip, &l4, TX_OFFLOAD_CSUM, &l3_type,
skb               402 drivers/net/ethernet/huawei/hinic/hinic_tx.c 	get_inner_l4_info(skb, &l4, TX_OFFLOAD_CSUM, l4_proto, &l4_offload,
skb               419 drivers/net/ethernet/huawei/hinic/hinic_tx.c static int hinic_tx_offload(struct sk_buff *skb, struct hinic_sq_task *task,
skb               426 drivers/net/ethernet/huawei/hinic/hinic_tx.c 	enabled = offload_tso(task, queue_info, skb);
skb               430 drivers/net/ethernet/huawei/hinic/hinic_tx.c 		enabled = offload_csum(task, queue_info, skb);
skb               437 drivers/net/ethernet/huawei/hinic/hinic_tx.c 	if (unlikely(skb_vlan_tag_present(skb))) {
skb               438 drivers/net/ethernet/huawei/hinic/hinic_tx.c 		vlan_tag = skb_vlan_tag_get(skb);
skb               445 drivers/net/ethernet/huawei/hinic/hinic_tx.c 		hinic_task_set_l2hdr(task, skb_network_offset(skb));
skb               462 drivers/net/ethernet/huawei/hinic/hinic_tx.c netdev_tx_t hinic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
skb               465 drivers/net/ethernet/huawei/hinic/hinic_tx.c 	u16 prod_idx, q_id = skb->queue_mapping;
skb               476 drivers/net/ethernet/huawei/hinic/hinic_tx.c 	if (skb->len < MIN_SKB_LEN) {
skb               477 drivers/net/ethernet/huawei/hinic/hinic_tx.c 		if (skb_pad(skb, MIN_SKB_LEN - skb->len)) {
skb               482 drivers/net/ethernet/huawei/hinic/hinic_tx.c 		skb->len = MIN_SKB_LEN;
skb               485 drivers/net/ethernet/huawei/hinic/hinic_tx.c 	nr_sges = skb_shinfo(skb)->nr_frags + 1;
skb               497 drivers/net/ethernet/huawei/hinic/hinic_tx.c 	err = tx_map_skb(nic_dev, skb, txq->sges);
skb               516 drivers/net/ethernet/huawei/hinic/hinic_tx.c 		tx_unmap_skb(nic_dev, skb, txq->sges);
skb               529 drivers/net/ethernet/huawei/hinic/hinic_tx.c 	err = hinic_tx_offload(skb, &sq_wqe->task, &sq_wqe->ctrl.queue_info);
skb               533 drivers/net/ethernet/huawei/hinic/hinic_tx.c 	hinic_sq_write_wqe(txq->sq, prod_idx, sq_wqe, skb, wqe_size);
skb               544 drivers/net/ethernet/huawei/hinic/hinic_tx.c 	tx_unmap_skb(nic_dev, skb, txq->sges);
skb               547 drivers/net/ethernet/huawei/hinic/hinic_tx.c 	dev_kfree_skb_any(skb);
skb               563 drivers/net/ethernet/huawei/hinic/hinic_tx.c static void tx_free_skb(struct hinic_dev *nic_dev, struct sk_buff *skb,
skb               566 drivers/net/ethernet/huawei/hinic/hinic_tx.c 	tx_unmap_skb(nic_dev, skb, sges);
skb               568 drivers/net/ethernet/huawei/hinic/hinic_tx.c 	dev_kfree_skb_any(skb);
skb               581 drivers/net/ethernet/huawei/hinic/hinic_tx.c 	struct sk_buff *skb;
skb               585 drivers/net/ethernet/huawei/hinic/hinic_tx.c 	while ((sq_wqe = hinic_sq_read_wqebb(sq, &skb, &wqe_size, &ci))) {
skb               586 drivers/net/ethernet/huawei/hinic/hinic_tx.c 		sq_wqe = hinic_sq_read_wqe(sq, &skb, wqe_size, &ci);
skb               590 drivers/net/ethernet/huawei/hinic/hinic_tx.c 		nr_sges = skb_shinfo(skb)->nr_frags + 1;
skb               596 drivers/net/ethernet/huawei/hinic/hinic_tx.c 		tx_free_skb(nic_dev, skb, txq->free_sges);
skb               618 drivers/net/ethernet/huawei/hinic/hinic_tx.c 	struct sk_buff *skb;
skb               628 drivers/net/ethernet/huawei/hinic/hinic_tx.c 		sq_wqe = hinic_sq_read_wqebb(sq, &skb, &wqe_size, &sw_ci);
skb               637 drivers/net/ethernet/huawei/hinic/hinic_tx.c 			sq_wqe = hinic_sq_read_wqe(sq, &skb, wqe_size, &sw_ci);
skb               642 drivers/net/ethernet/huawei/hinic/hinic_tx.c 		tx_bytes += skb->len;
skb               645 drivers/net/ethernet/huawei/hinic/hinic_tx.c 		nr_sges = skb_shinfo(skb)->nr_frags + 1;
skb               651 drivers/net/ethernet/huawei/hinic/hinic_tx.c 		tx_free_skb(nic_dev, skb, txq->free_sges);
skb                47 drivers/net/ethernet/huawei/hinic/hinic_tx.h netdev_tx_t hinic_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
skb               238 drivers/net/ethernet/i825xx/82596.c 	struct sk_buff *skb;	/* So we can free it after tx */
skb               281 drivers/net/ethernet/i825xx/82596.c     struct sk_buff *skb;
skb               362 drivers/net/ethernet/i825xx/82596.c static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev);
skb               533 drivers/net/ethernet/i825xx/82596.c 		if (rbd->skb == NULL)
skb               535 drivers/net/ethernet/i825xx/82596.c 		dev_kfree_skb(rbd->skb);
skb               536 drivers/net/ethernet/i825xx/82596.c 		rbd->skb = NULL;
skb               550 drivers/net/ethernet/i825xx/82596.c 		struct sk_buff *skb = netdev_alloc_skb(dev, PKT_BUF_SZ);
skb               552 drivers/net/ethernet/i825xx/82596.c 		if (skb == NULL) {
skb               560 drivers/net/ethernet/i825xx/82596.c 		rbd->skb = skb;
skb               561 drivers/net/ethernet/i825xx/82596.c 		rbd->v_data = skb->data;
skb               562 drivers/net/ethernet/i825xx/82596.c 		rbd->b_data = WSWAPchar(virt_to_bus(skb->data));
skb               565 drivers/net/ethernet/i825xx/82596.c 		cache_clear(virt_to_phys(skb->data), PKT_BUF_SZ);
skb               773 drivers/net/ethernet/i825xx/82596.c 			struct sk_buff *skb = rbd->skb;
skb               789 drivers/net/ethernet/i825xx/82596.c 					skb = NULL;	/* drop pkt */
skb               793 drivers/net/ethernet/i825xx/82596.c 				skb_put(skb, pkt_len);
skb               795 drivers/net/ethernet/i825xx/82596.c 				rbd->skb = newskb;
skb               802 drivers/net/ethernet/i825xx/82596.c 				skb = netdev_alloc_skb(dev, pkt_len + 2);
skb               805 drivers/net/ethernet/i825xx/82596.c 			if (skb == NULL) {
skb               811 drivers/net/ethernet/i825xx/82596.c 					skb_reserve(skb, 2);
skb               812 drivers/net/ethernet/i825xx/82596.c 					skb_put_data(skb, rbd->v_data,
skb               815 drivers/net/ethernet/i825xx/82596.c 				skb->protocol=eth_type_trans(skb,dev);
skb               816 drivers/net/ethernet/i825xx/82596.c 				skb->len = pkt_len;
skb               818 drivers/net/ethernet/i825xx/82596.c 				cache_clear(virt_to_phys(rbd->skb->data),
skb               821 drivers/net/ethernet/i825xx/82596.c 				netif_rx(skb);
skb               890 drivers/net/ethernet/i825xx/82596.c 				struct sk_buff *skb = tx_cmd->skb;
skb               892 drivers/net/ethernet/i825xx/82596.c 				dev_kfree_skb(skb);
skb              1050 drivers/net/ethernet/i825xx/82596.c static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb              1055 drivers/net/ethernet/i825xx/82596.c 	short length = skb->len;
skb              1058 drivers/net/ethernet/i825xx/82596.c 				dev->name, skb->len, skb->data));
skb              1060 drivers/net/ethernet/i825xx/82596.c 	if (skb->len < ETH_ZLEN) {
skb              1061 drivers/net/ethernet/i825xx/82596.c 		if (skb_padto(skb, ETH_ZLEN))
skb              1075 drivers/net/ethernet/i825xx/82596.c 		dev_kfree_skb(skb);
skb              1083 drivers/net/ethernet/i825xx/82596.c 		tx_cmd->skb = skb;
skb              1090 drivers/net/ethernet/i825xx/82596.c 		tbd->data = WSWAPchar(virt_to_bus(skb->data));
skb              1093 drivers/net/ethernet/i825xx/82596.c 		cache_push(virt_to_phys(skb->data), length);
skb              1095 drivers/net/ethernet/i825xx/82596.c 		DEB(DEB_TXADDR,print_eth(skb->data, "tx-queued"));
skb              1295 drivers/net/ethernet/i825xx/82596.c 				struct sk_buff *skb = tx_cmd->skb;
skb              1298 drivers/net/ethernet/i825xx/82596.c 					DEB(DEB_TXADDR,print_eth(skb->data, "tx-done"));
skb              1313 drivers/net/ethernet/i825xx/82596.c 				dev_consume_skb_irq(skb);
skb                64 drivers/net/ethernet/i825xx/ether1.c static netdev_tx_t ether1_sendpacket(struct sk_buff *skb,
skb               669 drivers/net/ethernet/i825xx/ether1.c ether1_sendpacket (struct sk_buff *skb, struct net_device *dev)
skb               688 drivers/net/ethernet/i825xx/ether1.c 	if (skb->len < ETH_ZLEN) {
skb               689 drivers/net/ethernet/i825xx/ether1.c 		if (skb_padto(skb, ETH_ZLEN))
skb               698 drivers/net/ethernet/i825xx/ether1.c 	dataddr = ether1_txalloc (dev, skb->len);
skb               705 drivers/net/ethernet/i825xx/ether1.c 	tbd.tbd_opts = TBD_EOL | skb->len;
skb               716 drivers/net/ethernet/i825xx/ether1.c 	ether1_writebuffer (dev, skb->data, dataddr, skb->len);
skb               732 drivers/net/ethernet/i825xx/ether1.c 	dev_kfree_skb (skb);
skb               858 drivers/net/ethernet/i825xx/ether1.c 			struct sk_buff *skb;
skb               861 drivers/net/ethernet/i825xx/ether1.c 			skb = netdev_alloc_skb(dev, length + 2);
skb               863 drivers/net/ethernet/i825xx/ether1.c 			if (skb) {
skb               864 drivers/net/ethernet/i825xx/ether1.c 				skb_reserve (skb, 2);
skb               866 drivers/net/ethernet/i825xx/ether1.c 				ether1_readbuffer (dev, skb_put (skb, length), rbd.rbd_bufl, length);
skb               868 drivers/net/ethernet/i825xx/ether1.c 				skb->protocol = eth_type_trans (skb, dev);
skb               869 drivers/net/ethernet/i825xx/ether1.c 				netif_rx (skb);
skb               205 drivers/net/ethernet/i825xx/lib82596.c 	struct sk_buff *skb;		/* So we can free it after tx */
skb               259 drivers/net/ethernet/i825xx/lib82596.c 	struct sk_buff *skb;
skb               350 drivers/net/ethernet/i825xx/lib82596.c static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev);
skb               472 drivers/net/ethernet/i825xx/lib82596.c 		struct sk_buff *skb;
skb               474 drivers/net/ethernet/i825xx/lib82596.c 		skb = netdev_alloc_skb_ip_align(dev, PKT_BUF_SZ);
skb               475 drivers/net/ethernet/i825xx/lib82596.c 		if (skb == NULL)
skb               477 drivers/net/ethernet/i825xx/lib82596.c 		dma_addr = dma_map_single(dev->dev.parent, skb->data,
skb               482 drivers/net/ethernet/i825xx/lib82596.c 		rbd->skb = skb;
skb               483 drivers/net/ethernet/i825xx/lib82596.c 		rbd->v_data = skb->data;
skb               522 drivers/net/ethernet/i825xx/lib82596.c 		if (rbd->skb == NULL)
skb               527 drivers/net/ethernet/i825xx/lib82596.c 		dev_kfree_skb(rbd->skb);
skb               681 drivers/net/ethernet/i825xx/lib82596.c 			struct sk_buff *skb = rbd->skb;
skb               702 drivers/net/ethernet/i825xx/lib82596.c 					skb = NULL;	/* drop pkt */
skb               707 drivers/net/ethernet/i825xx/lib82596.c 				skb_put(skb, pkt_len);
skb               709 drivers/net/ethernet/i825xx/lib82596.c 				rbd->skb = newskb;
skb               718 drivers/net/ethernet/i825xx/lib82596.c 				skb = netdev_alloc_skb_ip_align(dev, pkt_len);
skb               721 drivers/net/ethernet/i825xx/lib82596.c 			if (skb == NULL) {
skb               730 drivers/net/ethernet/i825xx/lib82596.c 					skb_put_data(skb, rbd->v_data,
skb               736 drivers/net/ethernet/i825xx/lib82596.c 				skb->len = pkt_len;
skb               737 drivers/net/ethernet/i825xx/lib82596.c 				skb->protocol = eth_type_trans(skb, dev);
skb               738 drivers/net/ethernet/i825xx/lib82596.c 				netif_rx(skb);
skb               811 drivers/net/ethernet/i825xx/lib82596.c 				struct sk_buff *skb = tx_cmd->skb;
skb               814 drivers/net/ethernet/i825xx/lib82596.c 						 skb->len, DMA_TO_DEVICE);
skb               816 drivers/net/ethernet/i825xx/lib82596.c 				dev_kfree_skb(skb);
skb               969 drivers/net/ethernet/i825xx/lib82596.c static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb               974 drivers/net/ethernet/i825xx/lib82596.c 	short length = skb->len;
skb               978 drivers/net/ethernet/i825xx/lib82596.c 				dev->name, skb->len, skb->data));
skb               981 drivers/net/ethernet/i825xx/lib82596.c 		if (skb_padto(skb, ETH_ZLEN))
skb               997 drivers/net/ethernet/i825xx/lib82596.c 		dev_kfree_skb_any(skb);
skb              1005 drivers/net/ethernet/i825xx/lib82596.c 		tx_cmd->skb = skb;
skb              1012 drivers/net/ethernet/i825xx/lib82596.c 		tx_cmd->dma_addr = dma_map_single(dev->dev.parent, skb->data,
skb              1013 drivers/net/ethernet/i825xx/lib82596.c 						  skb->len, DMA_TO_DEVICE);
skb              1016 drivers/net/ethernet/i825xx/lib82596.c 		DEB(DEB_TXADDR, print_eth(skb->data, "tx-queued"));
skb              1176 drivers/net/ethernet/i825xx/lib82596.c 				struct sk_buff *skb = tx_cmd->skb;
skb              1180 drivers/net/ethernet/i825xx/lib82596.c 					    print_eth(skb->data, "tx-done"));
skb              1196 drivers/net/ethernet/i825xx/lib82596.c 						 skb->len, DMA_TO_DEVICE);
skb              1197 drivers/net/ethernet/i825xx/lib82596.c 				dev_consume_skb_irq(skb);
skb               764 drivers/net/ethernet/i825xx/sun3_82586.c 	struct sk_buff *skb;
skb               781 drivers/net/ethernet/i825xx/sun3_82586.c 					skb = netdev_alloc_skb(dev, totlen + 2);
skb               782 drivers/net/ethernet/i825xx/sun3_82586.c 					if(skb != NULL)
skb               784 drivers/net/ethernet/i825xx/sun3_82586.c 						skb_reserve(skb,2);
skb               785 drivers/net/ethernet/i825xx/sun3_82586.c 						skb_put(skb,totlen);
skb               786 drivers/net/ethernet/i825xx/sun3_82586.c 						skb_copy_to_linear_data(skb,(char *) p->base+swab32((unsigned long) rbd->buffer),totlen);
skb               787 drivers/net/ethernet/i825xx/sun3_82586.c 						skb->protocol=eth_type_trans(skb,dev);
skb               788 drivers/net/ethernet/i825xx/sun3_82586.c 						netif_rx(skb);
skb              1007 drivers/net/ethernet/i825xx/sun3_82586.c sun3_82586_send_packet(struct sk_buff *skb, struct net_device *dev)
skb              1015 drivers/net/ethernet/i825xx/sun3_82586.c 	if(skb->len > XMIT_BUFF_SIZE)
skb              1017 drivers/net/ethernet/i825xx/sun3_82586.c 		printk("%s: Sorry, max. framelength is %d bytes. The length of your frame is %d bytes.\n",dev->name,XMIT_BUFF_SIZE,skb->len);
skb              1031 drivers/net/ethernet/i825xx/sun3_82586.c 		len = skb->len;
skb              1037 drivers/net/ethernet/i825xx/sun3_82586.c 		skb_copy_from_linear_data(skb, (void *)p->xmit_cbuffs[p->xmit_count], skb->len);
skb              1065 drivers/net/ethernet/i825xx/sun3_82586.c 				dev_kfree_skb(skb);
skb              1084 drivers/net/ethernet/i825xx/sun3_82586.c 		dev_kfree_skb(skb);
skb              1107 drivers/net/ethernet/i825xx/sun3_82586.c 		dev_kfree_skb(skb);
skb               462 drivers/net/ethernet/ibm/ehea/ehea_main.c 		struct sk_buff *skb;
skb               464 drivers/net/ethernet/ibm/ehea/ehea_main.c 		skb = netdev_alloc_skb_ip_align(dev, packet_size);
skb               465 drivers/net/ethernet/ibm/ehea/ehea_main.c 		if (!skb) {
skb               476 drivers/net/ethernet/ibm/ehea/ehea_main.c 		skb_arr[index] = skb;
skb               477 drivers/net/ethernet/ibm/ehea/ehea_main.c 		tmp_addr = ehea_map_vaddr(skb->data);
skb               479 drivers/net/ethernet/ibm/ehea/ehea_main.c 			dev_consume_skb_any(skb);
skb               540 drivers/net/ethernet/ibm/ehea/ehea_main.c 				 struct sk_buff *skb, struct ehea_cqe *cqe,
skb               545 drivers/net/ethernet/ibm/ehea/ehea_main.c 	skb_put(skb, length);
skb               546 drivers/net/ethernet/ibm/ehea/ehea_main.c 	skb->protocol = eth_type_trans(skb, dev);
skb               551 drivers/net/ethernet/ibm/ehea/ehea_main.c 		skb->ip_summed = CHECKSUM_COMPLETE;
skb               552 drivers/net/ethernet/ibm/ehea/ehea_main.c 		skb->csum = csum_unfold(~cqe->inet_checksum_value);
skb               554 drivers/net/ethernet/ibm/ehea/ehea_main.c 		skb->ip_summed = CHECKSUM_UNNECESSARY;
skb               556 drivers/net/ethernet/ibm/ehea/ehea_main.c 	skb_record_rx_queue(skb, pr - &pr->port->port_res[0]);
skb               564 drivers/net/ethernet/ibm/ehea/ehea_main.c 	struct sk_buff *skb;
skb               583 drivers/net/ethernet/ibm/ehea/ehea_main.c 	skb = skb_array[skb_index];
skb               585 drivers/net/ethernet/ibm/ehea/ehea_main.c 	return skb;
skb               591 drivers/net/ethernet/ibm/ehea/ehea_main.c 	struct sk_buff *skb;
skb               608 drivers/net/ethernet/ibm/ehea/ehea_main.c 	skb = skb_array[wqe_index];
skb               610 drivers/net/ethernet/ibm/ehea/ehea_main.c 	return skb;
skb               617 drivers/net/ethernet/ibm/ehea/ehea_main.c 	struct sk_buff *skb;
skb               628 drivers/net/ethernet/ibm/ehea/ehea_main.c 		skb = get_skb_by_index(pr->rq2_skba.arr, pr->rq2_skba.len, cqe);
skb               629 drivers/net/ethernet/ibm/ehea/ehea_main.c 		dev_kfree_skb(skb);
skb               632 drivers/net/ethernet/ibm/ehea/ehea_main.c 		skb = get_skb_by_index(pr->rq3_skba.arr, pr->rq3_skba.len, cqe);
skb               633 drivers/net/ethernet/ibm/ehea/ehea_main.c 		dev_kfree_skb(skb);
skb               656 drivers/net/ethernet/ibm/ehea/ehea_main.c 	struct sk_buff *skb;
skb               683 drivers/net/ethernet/ibm/ehea/ehea_main.c 				skb = get_skb_by_index_ll(skb_arr_rq1,
skb               686 drivers/net/ethernet/ibm/ehea/ehea_main.c 				if (unlikely(!skb)) {
skb               690 drivers/net/ethernet/ibm/ehea/ehea_main.c 					skb = netdev_alloc_skb(dev,
skb               692 drivers/net/ethernet/ibm/ehea/ehea_main.c 					if (!skb)
skb               695 drivers/net/ethernet/ibm/ehea/ehea_main.c 				skb_copy_to_linear_data(skb, ((char *)cqe) + 64,
skb               697 drivers/net/ethernet/ibm/ehea/ehea_main.c 				ehea_fill_skb(dev, skb, cqe, pr);
skb               700 drivers/net/ethernet/ibm/ehea/ehea_main.c 				skb = get_skb_by_index(skb_arr_rq2,
skb               702 drivers/net/ethernet/ibm/ehea/ehea_main.c 				if (unlikely(!skb)) {
skb               707 drivers/net/ethernet/ibm/ehea/ehea_main.c 				ehea_fill_skb(dev, skb, cqe, pr);
skb               711 drivers/net/ethernet/ibm/ehea/ehea_main.c 				skb = get_skb_by_index(skb_arr_rq3,
skb               713 drivers/net/ethernet/ibm/ehea/ehea_main.c 				if (unlikely(!skb)) {
skb               718 drivers/net/ethernet/ibm/ehea/ehea_main.c 				ehea_fill_skb(dev, skb, cqe, pr);
skb               722 drivers/net/ethernet/ibm/ehea/ehea_main.c 			processed_bytes += skb->len;
skb               725 drivers/net/ethernet/ibm/ehea/ehea_main.c 				__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
skb               728 drivers/net/ethernet/ibm/ehea/ehea_main.c 			napi_gro_receive(&pr->napi, skb);
skb               799 drivers/net/ethernet/ibm/ehea/ehea_main.c 	struct sk_buff *skb;
skb               843 drivers/net/ethernet/ibm/ehea/ehea_main.c 			skb = pr->sq_skba.arr[index];
skb               844 drivers/net/ethernet/ibm/ehea/ehea_main.c 			dev_consume_skb_any(skb);
skb              1600 drivers/net/ethernet/ibm/ehea/ehea_main.c static void write_swqe2_immediate(struct sk_buff *skb, struct ehea_swqe *swqe,
skb              1603 drivers/net/ethernet/ibm/ehea/ehea_main.c 	int skb_data_size = skb_headlen(skb);
skb              1610 drivers/net/ethernet/ibm/ehea/ehea_main.c 	if (skb_is_gso(skb)) {
skb              1612 drivers/net/ethernet/ibm/ehea/ehea_main.c 		swqe->mss = skb_shinfo(skb)->gso_size;
skb              1617 drivers/net/ethernet/ibm/ehea/ehea_main.c 		immediate_len = ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb);
skb              1620 drivers/net/ethernet/ibm/ehea/ehea_main.c 	if (skb_is_gso(skb) || skb_data_size >= SWQE2_MAX_IMM) {
skb              1621 drivers/net/ethernet/ibm/ehea/ehea_main.c 		skb_copy_from_linear_data(skb, imm_data, immediate_len);
skb              1628 drivers/net/ethernet/ibm/ehea/ehea_main.c 				ehea_map_vaddr(skb->data + immediate_len);
skb              1632 drivers/net/ethernet/ibm/ehea/ehea_main.c 		skb_copy_from_linear_data(skb, imm_data, skb_data_size);
skb              1637 drivers/net/ethernet/ibm/ehea/ehea_main.c static inline void write_swqe2_data(struct sk_buff *skb, struct net_device *dev,
skb              1644 drivers/net/ethernet/ibm/ehea/ehea_main.c 	nfrags = skb_shinfo(skb)->nr_frags;
skb              1649 drivers/net/ethernet/ibm/ehea/ehea_main.c 	write_swqe2_immediate(skb, swqe, lkey);
skb              1655 drivers/net/ethernet/ibm/ehea/ehea_main.c 			frag = &skb_shinfo(skb)->frags[0];
skb              1668 drivers/net/ethernet/ibm/ehea/ehea_main.c 			frag = &skb_shinfo(skb)->frags[i];
skb              1953 drivers/net/ethernet/ibm/ehea/ehea_main.c static void xmit_common(struct sk_buff *skb, struct ehea_swqe *swqe)
skb              1957 drivers/net/ethernet/ibm/ehea/ehea_main.c 	if (vlan_get_protocol(skb) != htons(ETH_P_IP))
skb              1960 drivers/net/ethernet/ibm/ehea/ehea_main.c 	if (skb->ip_summed == CHECKSUM_PARTIAL)
skb              1963 drivers/net/ethernet/ibm/ehea/ehea_main.c 	swqe->ip_start = skb_network_offset(skb);
skb              1964 drivers/net/ethernet/ibm/ehea/ehea_main.c 	swqe->ip_end = swqe->ip_start + ip_hdrlen(skb) - 1;
skb              1966 drivers/net/ethernet/ibm/ehea/ehea_main.c 	switch (ip_hdr(skb)->protocol) {
skb              1968 drivers/net/ethernet/ibm/ehea/ehea_main.c 		if (skb->ip_summed == CHECKSUM_PARTIAL)
skb              1976 drivers/net/ethernet/ibm/ehea/ehea_main.c 		if (skb->ip_summed == CHECKSUM_PARTIAL)
skb              1985 drivers/net/ethernet/ibm/ehea/ehea_main.c static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev,
skb              1990 drivers/net/ethernet/ibm/ehea/ehea_main.c 	xmit_common(skb, swqe);
skb              1992 drivers/net/ethernet/ibm/ehea/ehea_main.c 	write_swqe2_data(skb, dev, swqe, lkey);
skb              1995 drivers/net/ethernet/ibm/ehea/ehea_main.c static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev,
skb              2000 drivers/net/ethernet/ibm/ehea/ehea_main.c 	xmit_common(skb, swqe);
skb              2002 drivers/net/ethernet/ibm/ehea/ehea_main.c 	if (!skb->data_len)
skb              2003 drivers/net/ethernet/ibm/ehea/ehea_main.c 		skb_copy_from_linear_data(skb, imm_data, skb->len);
skb              2005 drivers/net/ethernet/ibm/ehea/ehea_main.c 		skb_copy_bits(skb, 0, imm_data, skb->len);
skb              2007 drivers/net/ethernet/ibm/ehea/ehea_main.c 	swqe->immediate_data_length = skb->len;
skb              2008 drivers/net/ethernet/ibm/ehea/ehea_main.c 	dev_consume_skb_any(skb);
skb              2011 drivers/net/ethernet/ibm/ehea/ehea_main.c static netdev_tx_t ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb              2020 drivers/net/ethernet/ibm/ehea/ehea_main.c 	pr = &port->port_res[skb_get_queue_mapping(skb)];
skb              2021 drivers/net/ethernet/ibm/ehea/ehea_main.c 	txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
skb              2027 drivers/net/ethernet/ibm/ehea/ehea_main.c 	if (skb_vlan_tag_present(skb)) {
skb              2029 drivers/net/ethernet/ibm/ehea/ehea_main.c 		swqe->vlan_tag = skb_vlan_tag_get(skb);
skb              2033 drivers/net/ethernet/ibm/ehea/ehea_main.c 	pr->tx_bytes += skb->len;
skb              2035 drivers/net/ethernet/ibm/ehea/ehea_main.c 	if (skb->len <= SWQE3_MAX_IMM) {
skb              2038 drivers/net/ethernet/ibm/ehea/ehea_main.c 		ehea_xmit3(skb, dev, swqe);
skb              2054 drivers/net/ethernet/ibm/ehea/ehea_main.c 		pr->sq_skba.arr[pr->sq_skba.index] = skb;
skb              2060 drivers/net/ethernet/ibm/ehea/ehea_main.c 		ehea_xmit2(skb, dev, swqe, lkey);
skb              2581 drivers/net/ethernet/ibm/ehea/ehea_main.c 	struct sk_buff *skb;
skb              2592 drivers/net/ethernet/ibm/ehea/ehea_main.c 		skb = skba_rq2[index];
skb              2593 drivers/net/ethernet/ibm/ehea/ehea_main.c 		if (skb)
skb              2594 drivers/net/ethernet/ibm/ehea/ehea_main.c 			rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data);
skb              2601 drivers/net/ethernet/ibm/ehea/ehea_main.c 		skb = skba_rq3[index];
skb              2602 drivers/net/ethernet/ibm/ehea/ehea_main.c 		if (skb)
skb              2603 drivers/net/ethernet/ibm/ehea/ehea_main.c 			rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data);
skb              1069 drivers/net/ethernet/ibm/emac/core.c 		struct sk_buff *skb;
skb              1071 drivers/net/ethernet/ibm/emac/core.c 		skb = netdev_alloc_skb_ip_align(dev->ndev, rx_skb_size);
skb              1072 drivers/net/ethernet/ibm/emac/core.c 		if (!skb) {
skb              1081 drivers/net/ethernet/ibm/emac/core.c 		    dma_map_single(&dev->ofdev->dev, skb->data - NET_IP_ALIGN,
skb              1084 drivers/net/ethernet/ibm/emac/core.c 		dev->rx_skb[i] = skb;
skb              1175 drivers/net/ethernet/ibm/emac/core.c __emac_prepare_rx_skb(struct sk_buff *skb, struct emac_instance *dev, int slot)
skb              1177 drivers/net/ethernet/ibm/emac/core.c 	if (unlikely(!skb))
skb              1180 drivers/net/ethernet/ibm/emac/core.c 	dev->rx_skb[slot] = skb;
skb              1184 drivers/net/ethernet/ibm/emac/core.c 	    dma_map_single(&dev->ofdev->dev, skb->data - NET_IP_ALIGN,
skb              1196 drivers/net/ethernet/ibm/emac/core.c 	struct sk_buff *skb;
skb              1198 drivers/net/ethernet/ibm/emac/core.c 	skb = __netdev_alloc_skb_ip_align(dev->ndev, dev->rx_skb_size,
skb              1201 drivers/net/ethernet/ibm/emac/core.c 	return __emac_prepare_rx_skb(skb, dev, slot);
skb              1207 drivers/net/ethernet/ibm/emac/core.c 	struct sk_buff *skb;
skb              1209 drivers/net/ethernet/ibm/emac/core.c 	skb = napi_alloc_skb(&dev->mal->napi, dev->rx_skb_size);
skb              1211 drivers/net/ethernet/ibm/emac/core.c 	return __emac_prepare_rx_skb(skb, dev, slot);
skb              1418 drivers/net/ethernet/ibm/emac/core.c 			       struct sk_buff *skb)
skb              1421 drivers/net/ethernet/ibm/emac/core.c 		(skb->ip_summed == CHECKSUM_PARTIAL)) {
skb              1455 drivers/net/ethernet/ibm/emac/core.c static netdev_tx_t emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
skb              1458 drivers/net/ethernet/ibm/emac/core.c 	unsigned int len = skb->len;
skb              1462 drivers/net/ethernet/ibm/emac/core.c 	    MAL_TX_CTRL_LAST | emac_tx_csum(dev, skb);
skb              1472 drivers/net/ethernet/ibm/emac/core.c 	dev->tx_skb[slot] = skb;
skb              1474 drivers/net/ethernet/ibm/emac/core.c 						     skb->data, len,
skb              1514 drivers/net/ethernet/ibm/emac/core.c emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
skb              1517 drivers/net/ethernet/ibm/emac/core.c 	int nr_frags = skb_shinfo(skb)->nr_frags;
skb              1518 drivers/net/ethernet/ibm/emac/core.c 	int len = skb->len, chunk;
skb              1525 drivers/net/ethernet/ibm/emac/core.c 		return emac_start_xmit(skb, ndev);
skb              1527 drivers/net/ethernet/ibm/emac/core.c 	len -= skb->data_len;
skb              1537 drivers/net/ethernet/ibm/emac/core.c 	    emac_tx_csum(dev, skb);
skb              1544 drivers/net/ethernet/ibm/emac/core.c 	    dma_map_single(&dev->ofdev->dev, skb->data, len, DMA_TO_DEVICE);
skb              1552 drivers/net/ethernet/ibm/emac/core.c 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
skb              1565 drivers/net/ethernet/ibm/emac/core.c 	DBG2(dev, "xmit_sg(%u) %d - %d" NL, skb->len, dev->tx_slot, slot);
skb              1568 drivers/net/ethernet/ibm/emac/core.c 	dev->tx_skb[slot] = skb;
skb              1577 drivers/net/ethernet/ibm/emac/core.c 	return emac_xmit_finish(dev, skb->len);
skb              1644 drivers/net/ethernet/ibm/emac/core.c 			struct sk_buff *skb = dev->tx_skb[slot];
skb              1647 drivers/net/ethernet/ibm/emac/core.c 			if (skb) {
skb              1648 drivers/net/ethernet/ibm/emac/core.c 				dev_kfree_skb(skb);
skb              1674 drivers/net/ethernet/ibm/emac/core.c 	struct sk_buff *skb = dev->rx_skb[slot];
skb              1679 drivers/net/ethernet/ibm/emac/core.c 		dma_map_single(&dev->ofdev->dev, skb->data - NET_IP_ALIGN,
skb              1717 drivers/net/ethernet/ibm/emac/core.c 				struct sk_buff *skb, u16 ctrl)
skb              1721 drivers/net/ethernet/ibm/emac/core.c 		skb->ip_summed = CHECKSUM_UNNECESSARY;
skb              1760 drivers/net/ethernet/ibm/emac/core.c 		struct sk_buff *skb;
skb              1766 drivers/net/ethernet/ibm/emac/core.c 		skb = dev->rx_skb[slot];
skb              1796 drivers/net/ethernet/ibm/emac/core.c 			       skb->data - NET_IP_ALIGN,
skb              1799 drivers/net/ethernet/ibm/emac/core.c 			skb = copy_skb;
skb              1803 drivers/net/ethernet/ibm/emac/core.c 		skb_put(skb, len);
skb              1805 drivers/net/ethernet/ibm/emac/core.c 		skb->protocol = eth_type_trans(skb, dev->ndev);
skb              1806 drivers/net/ethernet/ibm/emac/core.c 		emac_rx_csum(dev, skb, ctrl);
skb              1808 drivers/net/ethernet/ibm/emac/core.c 		if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
skb              1826 drivers/net/ethernet/ibm/emac/core.c 				dev->rx_sg_skb = skb;
skb              1827 drivers/net/ethernet/ibm/emac/core.c 				skb_put(skb, len);
skb              1832 drivers/net/ethernet/ibm/emac/core.c 			skb = dev->rx_sg_skb;
skb              1839 drivers/net/ethernet/ibm/emac/core.c 				dev_kfree_skb(skb);
skb               211 drivers/net/ethernet/ibm/ibmveth.c 	struct sk_buff *skb;
skb               222 drivers/net/ethernet/ibm/ibmveth.c 		skb = netdev_alloc_skb(adapter->netdev, pool->buff_size);
skb               224 drivers/net/ethernet/ibm/ibmveth.c 		if (!skb) {
skb               240 drivers/net/ethernet/ibm/ibmveth.c 		dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
skb               248 drivers/net/ethernet/ibm/ibmveth.c 		pool->skbuff[index] = skb;
skb               251 drivers/net/ethernet/ibm/ibmveth.c 		*(u64 *)skb->data = correlator;
skb               260 drivers/net/ethernet/ibm/ibmveth.c 			ibmveth_flush_buffer(skb->data, len);
skb               288 drivers/net/ethernet/ibm/ibmveth.c 	dev_kfree_skb_any(skb);
skb               336 drivers/net/ethernet/ibm/ibmveth.c 			struct sk_buff *skb = pool->skbuff[i];
skb               337 drivers/net/ethernet/ibm/ibmveth.c 			if (skb) {
skb               342 drivers/net/ethernet/ibm/ibmveth.c 				dev_kfree_skb_any(skb);
skb               366 drivers/net/ethernet/ibm/ibmveth.c 	struct sk_buff *skb;
skb               371 drivers/net/ethernet/ibm/ibmveth.c 	skb = adapter->rx_buff_pool[pool].skbuff[index];
skb               373 drivers/net/ethernet/ibm/ibmveth.c 	BUG_ON(skb == NULL);
skb              1014 drivers/net/ethernet/ibm/ibmveth.c static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
skb              1029 drivers/net/ethernet/ibm/ibmveth.c 	    skb_has_frag_list(skb) && __skb_linearize(skb)) {
skb              1038 drivers/net/ethernet/ibm/ibmveth.c 	if (skb_shinfo(skb)->nr_frags > 5 && __skb_linearize(skb)) {
skb              1044 drivers/net/ethernet/ibm/ibmveth.c 	if (skb->ip_summed == CHECKSUM_PARTIAL &&
skb              1045 drivers/net/ethernet/ibm/ibmveth.c 	    ((skb->protocol == htons(ETH_P_IP) &&
skb              1046 drivers/net/ethernet/ibm/ibmveth.c 	      ip_hdr(skb)->protocol != IPPROTO_TCP) ||
skb              1047 drivers/net/ethernet/ibm/ibmveth.c 	     (skb->protocol == htons(ETH_P_IPV6) &&
skb              1048 drivers/net/ethernet/ibm/ibmveth.c 	      ipv6_hdr(skb)->nexthdr != IPPROTO_TCP)) &&
skb              1049 drivers/net/ethernet/ibm/ibmveth.c 	    skb_checksum_help(skb)) {
skb              1058 drivers/net/ethernet/ibm/ibmveth.c 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb              1059 drivers/net/ethernet/ibm/ibmveth.c 		unsigned char *buf = skb_transport_header(skb) +
skb              1060 drivers/net/ethernet/ibm/ibmveth.c 						skb->csum_offset;
skb              1068 drivers/net/ethernet/ibm/ibmveth.c 		if (skb_is_gso(skb) && adapter->fw_large_send_support)
skb              1080 drivers/net/ethernet/ibm/ibmveth.c 	if (force_bounce || (!skb_is_nonlinear(skb) &&
skb              1081 drivers/net/ethernet/ibm/ibmveth.c 				(skb->len < tx_copybreak))) {
skb              1082 drivers/net/ethernet/ibm/ibmveth.c 		skb_copy_from_linear_data(skb, adapter->bounce_buffer,
skb              1083 drivers/net/ethernet/ibm/ibmveth.c 					  skb->len);
skb              1085 drivers/net/ethernet/ibm/ibmveth.c 		descs[0].fields.flags_len = desc_flags | skb->len;
skb              1093 drivers/net/ethernet/ibm/ibmveth.c 			netdev->stats.tx_bytes += skb->len;
skb              1100 drivers/net/ethernet/ibm/ibmveth.c 	dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
skb              1101 drivers/net/ethernet/ibm/ibmveth.c 				  skb_headlen(skb), DMA_TO_DEVICE);
skb              1105 drivers/net/ethernet/ibm/ibmveth.c 	descs[0].fields.flags_len = desc_flags | skb_headlen(skb);
skb              1109 drivers/net/ethernet/ibm/ibmveth.c 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb              1110 drivers/net/ethernet/ibm/ibmveth.c 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
skb              1122 drivers/net/ethernet/ibm/ibmveth.c 	if (skb->ip_summed == CHECKSUM_PARTIAL && skb_is_gso(skb)) {
skb              1124 drivers/net/ethernet/ibm/ibmveth.c 			mss = (unsigned long)skb_shinfo(skb)->gso_size;
skb              1126 drivers/net/ethernet/ibm/ibmveth.c 		} else if (!skb_is_gso_v6(skb)) {
skb              1131 drivers/net/ethernet/ibm/ibmveth.c 			ip_hdr(skb)->check = 0xffff;
skb              1132 drivers/net/ethernet/ibm/ibmveth.c 			tcp_hdr(skb)->check =
skb              1133 drivers/net/ethernet/ibm/ibmveth.c 				cpu_to_be16(skb_shinfo(skb)->gso_size);
skb              1143 drivers/net/ethernet/ibm/ibmveth.c 		netdev->stats.tx_bytes += skb->len;
skb              1151 drivers/net/ethernet/ibm/ibmveth.c 	for (i = 1; i < skb_shinfo(skb)->nr_frags + 1; i++)
skb              1157 drivers/net/ethernet/ibm/ibmveth.c 	dev_consume_skb_any(skb);
skb              1175 drivers/net/ethernet/ibm/ibmveth.c 	if (skb_linearize(skb)) {
skb              1183 drivers/net/ethernet/ibm/ibmveth.c static void ibmveth_rx_mss_helper(struct sk_buff *skb, u16 mss, int lrg_pkt)
skb              1190 drivers/net/ethernet/ibm/ibmveth.c 	if (skb->protocol == htons(ETH_P_IP)) {
skb              1191 drivers/net/ethernet/ibm/ibmveth.c 		struct iphdr *iph = (struct iphdr *)skb->data;
skb              1195 drivers/net/ethernet/ibm/ibmveth.c 			skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
skb              1199 drivers/net/ethernet/ibm/ibmveth.c 	} else if (skb->protocol == htons(ETH_P_IPV6)) {
skb              1200 drivers/net/ethernet/ibm/ibmveth.c 		struct ipv6hdr *iph6 = (struct ipv6hdr *)skb->data;
skb              1204 drivers/net/ethernet/ibm/ibmveth.c 			skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
skb              1214 drivers/net/ethernet/ibm/ibmveth.c 	tcph = (struct tcphdr *)(skb->data + offset);
skb              1216 drivers/net/ethernet/ibm/ibmveth.c 		skb_shinfo(skb)->gso_size = mss;
skb              1218 drivers/net/ethernet/ibm/ibmveth.c 		skb_shinfo(skb)->gso_size = ntohs(tcph->check);
skb              1222 drivers/net/ethernet/ibm/ibmveth.c 	if (skb_shinfo(skb)->gso_size) {
skb              1224 drivers/net/ethernet/ibm/ibmveth.c 		skb_shinfo(skb)->gso_segs =
skb              1225 drivers/net/ethernet/ibm/ibmveth.c 				DIV_ROUND_UP(skb->len - hdr_len,
skb              1226 drivers/net/ethernet/ibm/ibmveth.c 					     skb_shinfo(skb)->gso_size);
skb              1230 drivers/net/ethernet/ibm/ibmveth.c static void ibmveth_rx_csum_helper(struct sk_buff *skb,
skb              1240 drivers/net/ethernet/ibm/ibmveth.c 	skb_proto = be16_to_cpu(skb->protocol);
skb              1243 drivers/net/ethernet/ibm/ibmveth.c 		iph = (struct iphdr *)skb->data;
skb              1257 drivers/net/ethernet/ibm/ibmveth.c 		iph6 = (struct ipv6hdr *)skb->data;
skb              1276 drivers/net/ethernet/ibm/ibmveth.c 		struct tcphdr *tcph = (struct tcphdr *)(skb->data + iphlen);
skb              1278 drivers/net/ethernet/ibm/ibmveth.c 		tcphdrlen = skb->len - iphlen;
skb              1289 drivers/net/ethernet/ibm/ibmveth.c 		skb_partial_csum_set(skb, iphlen,
skb              1291 drivers/net/ethernet/ibm/ibmveth.c 		skb_reset_network_header(skb);
skb              1315 drivers/net/ethernet/ibm/ibmveth.c 			struct sk_buff *skb, *new_skb;
skb              1321 drivers/net/ethernet/ibm/ibmveth.c 			skb = ibmveth_rxq_get_buffer(adapter);
skb              1329 drivers/net/ethernet/ibm/ibmveth.c 				__be64 *rxmss = (__be64 *)(skb->data + 8);
skb              1340 drivers/net/ethernet/ibm/ibmveth.c 							skb->data + offset,
skb              1343 drivers/net/ethernet/ibm/ibmveth.c 					ibmveth_flush_buffer(skb->data,
skb              1346 drivers/net/ethernet/ibm/ibmveth.c 					kfree_skb(skb);
skb              1347 drivers/net/ethernet/ibm/ibmveth.c 				skb = new_skb;
skb              1350 drivers/net/ethernet/ibm/ibmveth.c 				skb_reserve(skb, offset);
skb              1353 drivers/net/ethernet/ibm/ibmveth.c 			skb_put(skb, length);
skb              1354 drivers/net/ethernet/ibm/ibmveth.c 			skb->protocol = eth_type_trans(skb, netdev);
skb              1357 drivers/net/ethernet/ibm/ibmveth.c 				skb->ip_summed = CHECKSUM_UNNECESSARY;
skb              1358 drivers/net/ethernet/ibm/ibmveth.c 				ibmveth_rx_csum_helper(skb, adapter);
skb              1362 drivers/net/ethernet/ibm/ibmveth.c 				ibmveth_rx_mss_helper(skb, mss, lrg_pkt);
skb              1366 drivers/net/ethernet/ibm/ibmveth.c 			napi_gro_receive(napi, skb);	/* send it up */
skb               250 drivers/net/ethernet/ibm/ibmvnic.c 	struct sk_buff *skb;
skb               267 drivers/net/ethernet/ibm/ibmvnic.c 		skb = alloc_skb(pool->buff_size, GFP_ATOMIC);
skb               268 drivers/net/ethernet/ibm/ibmvnic.c 		if (!skb) {
skb               276 drivers/net/ethernet/ibm/ibmvnic.c 		if (pool->rx_buff[index].skb)
skb               288 drivers/net/ethernet/ibm/ibmvnic.c 		pool->rx_buff[index].skb = skb;
skb               325 drivers/net/ethernet/ibm/ibmvnic.c 	pool->rx_buff[index].skb = NULL;
skb               327 drivers/net/ethernet/ibm/ibmvnic.c 	dev_kfree_skb_any(skb);
skb               478 drivers/net/ethernet/ibm/ibmvnic.c 			if (rx_pool->rx_buff[j].skb) {
skb               479 drivers/net/ethernet/ibm/ibmvnic.c 				dev_kfree_skb_any(rx_pool->rx_buff[j].skb);
skb               480 drivers/net/ethernet/ibm/ibmvnic.c 				rx_pool->rx_buff[j].skb = NULL;
skb              1131 drivers/net/ethernet/ibm/ibmvnic.c 			if (rx_buff && rx_buff->skb) {
skb              1132 drivers/net/ethernet/ibm/ibmvnic.c 				dev_kfree_skb_any(rx_buff->skb);
skb              1133 drivers/net/ethernet/ibm/ibmvnic.c 				rx_buff->skb = NULL;
skb              1153 drivers/net/ethernet/ibm/ibmvnic.c 		if (tx_buff && tx_buff->skb) {
skb              1154 drivers/net/ethernet/ibm/ibmvnic.c 			dev_kfree_skb_any(tx_buff->skb);
skb              1155 drivers/net/ethernet/ibm/ibmvnic.c 			tx_buff->skb = NULL;
skb              1265 drivers/net/ethernet/ibm/ibmvnic.c static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
skb              1271 drivers/net/ethernet/ibm/ibmvnic.c 	if (skb_vlan_tagged(skb) && !skb_vlan_tag_present(skb))
skb              1276 drivers/net/ethernet/ibm/ibmvnic.c 	if (skb->protocol == htons(ETH_P_IP)) {
skb              1277 drivers/net/ethernet/ibm/ibmvnic.c 		hdr_len[1] = ip_hdr(skb)->ihl * 4;
skb              1278 drivers/net/ethernet/ibm/ibmvnic.c 		if (ip_hdr(skb)->protocol == IPPROTO_TCP)
skb              1279 drivers/net/ethernet/ibm/ibmvnic.c 			hdr_len[2] = tcp_hdrlen(skb);
skb              1280 drivers/net/ethernet/ibm/ibmvnic.c 		else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
skb              1282 drivers/net/ethernet/ibm/ibmvnic.c 	} else if (skb->protocol == htons(ETH_P_IPV6)) {
skb              1284 drivers/net/ethernet/ibm/ibmvnic.c 		if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
skb              1285 drivers/net/ethernet/ibm/ibmvnic.c 			hdr_len[2] = tcp_hdrlen(skb);
skb              1286 drivers/net/ethernet/ibm/ibmvnic.c 		else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
skb              1288 drivers/net/ethernet/ibm/ibmvnic.c 	} else if (skb->protocol == htons(ETH_P_ARP)) {
skb              1289 drivers/net/ethernet/ibm/ibmvnic.c 		hdr_len[1] = arp_hdr_len(skb->dev);
skb              1295 drivers/net/ethernet/ibm/ibmvnic.c 		hdr = skb_mac_header(skb);
skb              1301 drivers/net/ethernet/ibm/ibmvnic.c 		hdr = skb_network_header(skb);
skb              1307 drivers/net/ethernet/ibm/ibmvnic.c 		hdr = skb_transport_header(skb);
skb              1384 drivers/net/ethernet/ibm/ibmvnic.c 	tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len,
skb              1390 drivers/net/ethernet/ibm/ibmvnic.c static int ibmvnic_xmit_workarounds(struct sk_buff *skb,
skb              1399 drivers/net/ethernet/ibm/ibmvnic.c 	if (skb->len < netdev->min_mtu)
skb              1400 drivers/net/ethernet/ibm/ibmvnic.c 		return skb_put_padto(skb, netdev->min_mtu);
skb              1405 drivers/net/ethernet/ibm/ibmvnic.c static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
skb              1408 drivers/net/ethernet/ibm/ibmvnic.c 	int queue_num = skb_get_queue_mapping(skb);
skb              1432 drivers/net/ethernet/ibm/ibmvnic.c 		if (!netif_subqueue_stopped(netdev, skb))
skb              1434 drivers/net/ethernet/ibm/ibmvnic.c 		dev_kfree_skb_any(skb);
skb              1442 drivers/net/ethernet/ibm/ibmvnic.c 	if (ibmvnic_xmit_workarounds(skb, netdev)) {
skb              1448 drivers/net/ethernet/ibm/ibmvnic.c 	if (skb_is_gso(skb))
skb              1454 drivers/net/ethernet/ibm/ibmvnic.c 	txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb));
skb              1461 drivers/net/ethernet/ibm/ibmvnic.c 		dev_kfree_skb_any(skb);
skb              1475 drivers/net/ethernet/ibm/ibmvnic.c 	if (skb_shinfo(skb)->nr_frags) {
skb              1479 drivers/net/ethernet/ibm/ibmvnic.c 		skb_copy_from_linear_data(skb, dst, skb_headlen(skb));
skb              1480 drivers/net/ethernet/ibm/ibmvnic.c 		cur = skb_headlen(skb);
skb              1483 drivers/net/ethernet/ibm/ibmvnic.c 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb              1484 drivers/net/ethernet/ibm/ibmvnic.c 			const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
skb              1492 drivers/net/ethernet/ibm/ibmvnic.c 		skb_copy_from_linear_data(skb, dst, skb->len);
skb              1499 drivers/net/ethernet/ibm/ibmvnic.c 	tx_buff->skb = skb;
skb              1501 drivers/net/ethernet/ibm/ibmvnic.c 	tx_buff->data_len[0] = skb->len;
skb              1513 drivers/net/ethernet/ibm/ibmvnic.c 	if (skb_is_gso(skb))
skb              1519 drivers/net/ethernet/ibm/ibmvnic.c 	tx_crq.v1.sge_len = cpu_to_be32(skb->len);
skb              1522 drivers/net/ethernet/ibm/ibmvnic.c 	if (adapter->vlan_header_insertion && skb_vlan_tag_present(skb)) {
skb              1524 drivers/net/ethernet/ibm/ibmvnic.c 		tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
skb              1527 drivers/net/ethernet/ibm/ibmvnic.c 	if (skb->protocol == htons(ETH_P_IP)) {
skb              1529 drivers/net/ethernet/ibm/ibmvnic.c 		proto = ip_hdr(skb)->protocol;
skb              1530 drivers/net/ethernet/ibm/ibmvnic.c 	} else if (skb->protocol == htons(ETH_P_IPV6)) {
skb              1532 drivers/net/ethernet/ibm/ibmvnic.c 		proto = ipv6_hdr(skb)->nexthdr;
skb              1540 drivers/net/ethernet/ibm/ibmvnic.c 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb              1544 drivers/net/ethernet/ibm/ibmvnic.c 	if (skb_is_gso(skb)) {
skb              1546 drivers/net/ethernet/ibm/ibmvnic.c 		tx_crq.v1.mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
skb              1559 drivers/net/ethernet/ibm/ibmvnic.c 			dev_kfree_skb_any(skb);
skb              1560 drivers/net/ethernet/ibm/ibmvnic.c 			tx_buff->skb = NULL;
skb              1581 drivers/net/ethernet/ibm/ibmvnic.c 		dev_kfree_skb_any(skb);
skb              1582 drivers/net/ethernet/ibm/ibmvnic.c 		tx_buff->skb = NULL;
skb              1607 drivers/net/ethernet/ibm/ibmvnic.c 	tx_bytes += skb->len;
skb              2205 drivers/net/ethernet/ibm/ibmvnic.c 	rx_buff->skb = NULL;
skb              2222 drivers/net/ethernet/ibm/ibmvnic.c 		struct sk_buff *skb;
skb              2248 drivers/net/ethernet/ibm/ibmvnic.c 			dev_kfree_skb_any(rx_buff->skb);
skb              2251 drivers/net/ethernet/ibm/ibmvnic.c 		} else if (!rx_buff->skb) {
skb              2261 drivers/net/ethernet/ibm/ibmvnic.c 		skb = rx_buff->skb;
skb              2262 drivers/net/ethernet/ibm/ibmvnic.c 		skb_copy_to_linear_data(skb, rx_buff->data + offset,
skb              2270 drivers/net/ethernet/ibm/ibmvnic.c 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
skb              2277 drivers/net/ethernet/ibm/ibmvnic.c 		skb_put(skb, length);
skb              2278 drivers/net/ethernet/ibm/ibmvnic.c 		skb->protocol = eth_type_trans(skb, netdev);
skb              2279 drivers/net/ethernet/ibm/ibmvnic.c 		skb_record_rx_queue(skb, scrq_num);
skb              2283 drivers/net/ethernet/ibm/ibmvnic.c 			skb->ip_summed = CHECKSUM_UNNECESSARY;
skb              2286 drivers/net/ethernet/ibm/ibmvnic.c 		length = skb->len;
skb              2287 drivers/net/ethernet/ibm/ibmvnic.c 		napi_gro_receive(napi, skb); /* send it up */
skb              2357 drivers/net/ethernet/ibm/ibmvnic.c static netdev_features_t ibmvnic_features_check(struct sk_buff *skb,
skb              2365 drivers/net/ethernet/ibm/ibmvnic.c 	if (skb_is_gso(skb)) {
skb              2366 drivers/net/ethernet/ibm/ibmvnic.c 		if (skb_shinfo(skb)->gso_size < 224 ||
skb              2367 drivers/net/ethernet/ibm/ibmvnic.c 		    skb_shinfo(skb)->gso_segs == 1)
skb              2941 drivers/net/ethernet/ibm/ibmvnic.c 				dev_kfree_skb_any(txbuff->skb);
skb              2942 drivers/net/ethernet/ibm/ibmvnic.c 				txbuff->skb = NULL;
skb               888 drivers/net/ethernet/ibm/ibmvnic.h 	struct sk_buff *skb;
skb               911 drivers/net/ethernet/ibm/ibmvnic.h 	struct sk_buff *skb;
skb               421 drivers/net/ethernet/intel/e100.c 	struct sk_buff *skb;
skb               501 drivers/net/ethernet/intel/e100.c 	struct sk_buff *skb;
skb               847 drivers/net/ethernet/intel/e100.c static int e100_exec_cb(struct nic *nic, struct sk_buff *skb,
skb               864 drivers/net/ethernet/intel/e100.c 	cb->skb = skb;
skb               866 drivers/net/ethernet/intel/e100.c 	err = cb_prepare(nic, cb, skb);
skb              1072 drivers/net/ethernet/intel/e100.c static int e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
skb              1311 drivers/net/ethernet/intel/e100.c 			     struct sk_buff *skb)
skb              1313 drivers/net/ethernet/intel/e100.c 	const struct firmware *fw = (void *)skb;
skb              1318 drivers/net/ethernet/intel/e100.c 	cb->skb = NULL;
skb              1381 drivers/net/ethernet/intel/e100.c 	struct sk_buff *skb)
skb              1388 drivers/net/ethernet/intel/e100.c static int e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb)
skb              1563 drivers/net/ethernet/intel/e100.c static int e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
skb              1740 drivers/net/ethernet/intel/e100.c 	struct sk_buff *skb)
skb              1746 drivers/net/ethernet/intel/e100.c 				  skb->data, skb->len, PCI_DMA_TODEVICE);
skb              1749 drivers/net/ethernet/intel/e100.c 		dev_kfree_skb_any(skb);
skb              1750 drivers/net/ethernet/intel/e100.c 		skb = NULL;
skb              1758 drivers/net/ethernet/intel/e100.c 	if (unlikely(skb->no_fcs))
skb              1771 drivers/net/ethernet/intel/e100.c 	cb->u.tcb.tbd.size = cpu_to_le16(skb->len);
skb              1772 drivers/net/ethernet/intel/e100.c 	skb_tx_timestamp(skb);
skb              1776 drivers/net/ethernet/intel/e100.c static netdev_tx_t e100_xmit_frame(struct sk_buff *skb,
skb              1792 drivers/net/ethernet/intel/e100.c 	err = e100_exec_cb(nic, skb, e100_xmit_prepare);
skb              1830 drivers/net/ethernet/intel/e100.c 		if (likely(cb->skb != NULL)) {
skb              1832 drivers/net/ethernet/intel/e100.c 			dev->stats.tx_bytes += cb->skb->len;
skb              1838 drivers/net/ethernet/intel/e100.c 			dev_kfree_skb_any(cb->skb);
skb              1839 drivers/net/ethernet/intel/e100.c 			cb->skb = NULL;
skb              1860 drivers/net/ethernet/intel/e100.c 			if (cb->skb) {
skb              1865 drivers/net/ethernet/intel/e100.c 				dev_kfree_skb(cb->skb);
skb              1917 drivers/net/ethernet/intel/e100.c 	if (rx->skb) {
skb              1926 drivers/net/ethernet/intel/e100.c 	if (!(rx->skb = netdev_alloc_skb_ip_align(nic->netdev, RFD_BUF_LEN)))
skb              1930 drivers/net/ethernet/intel/e100.c 	skb_copy_to_linear_data(rx->skb, &nic->blank_rfd, sizeof(struct rfd));
skb              1931 drivers/net/ethernet/intel/e100.c 	rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data,
skb              1935 drivers/net/ethernet/intel/e100.c 		dev_kfree_skb_any(rx->skb);
skb              1936 drivers/net/ethernet/intel/e100.c 		rx->skb = NULL;
skb              1944 drivers/net/ethernet/intel/e100.c 	if (rx->prev->skb) {
skb              1945 drivers/net/ethernet/intel/e100.c 		struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data;
skb              1958 drivers/net/ethernet/intel/e100.c 	struct sk_buff *skb = rx->skb;
skb              1959 drivers/net/ethernet/intel/e100.c 	struct rfd *rfd = (struct rfd *)skb->data;
skb              2018 drivers/net/ethernet/intel/e100.c 	skb_reserve(skb, sizeof(struct rfd));
skb              2019 drivers/net/ethernet/intel/e100.c 	skb_put(skb, actual_size);
skb              2020 drivers/net/ethernet/intel/e100.c 	skb->protocol = eth_type_trans(skb, nic->netdev);
skb              2034 drivers/net/ethernet/intel/e100.c 		dev_kfree_skb_any(skb);
skb              2038 drivers/net/ethernet/intel/e100.c 		dev_kfree_skb_any(skb);
skb              2043 drivers/net/ethernet/intel/e100.c 		netif_receive_skb(skb);
skb              2048 drivers/net/ethernet/intel/e100.c 	rx->skb = NULL;
skb              2062 drivers/net/ethernet/intel/e100.c 	for (rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) {
skb              2080 drivers/net/ethernet/intel/e100.c 	old_before_last_rfd = (struct rfd *)old_before_last_rx->skb->data;
skb              2083 drivers/net/ethernet/intel/e100.c 	for (rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) {
skb              2100 drivers/net/ethernet/intel/e100.c 			(struct rfd *)new_before_last_rx->skb->data;
skb              2139 drivers/net/ethernet/intel/e100.c 			if (rx->skb) {
skb              2142 drivers/net/ethernet/intel/e100.c 				dev_kfree_skb(rx->skb);
skb              2180 drivers/net/ethernet/intel/e100.c 	before_last = (struct rfd *)rx->skb->data;
skb              2347 drivers/net/ethernet/intel/e100.c 	struct sk_buff *skb;
skb              2373 drivers/net/ethernet/intel/e100.c 	if (!(skb = netdev_alloc_skb(nic->netdev, ETH_DATA_LEN))) {
skb              2377 drivers/net/ethernet/intel/e100.c 	skb_put(skb, ETH_DATA_LEN);
skb              2378 drivers/net/ethernet/intel/e100.c 	memset(skb->data, 0xFF, ETH_DATA_LEN);
skb              2379 drivers/net/ethernet/intel/e100.c 	e100_xmit_frame(skb, nic->netdev);
skb              2386 drivers/net/ethernet/intel/e100.c 	if (memcmp(nic->rx_to_clean->skb->data + sizeof(struct rfd),
skb              2387 drivers/net/ethernet/intel/e100.c 	   skb->data, ETH_DATA_LEN))
skb               126 drivers/net/ethernet/intel/e1000/e1000.h 	struct sk_buff *skb;
skb               939 drivers/net/ethernet/intel/e1000/e1000_ethtool.c 			dev_kfree_skb(txdr->buffer_info[i].skb);
skb              1013 drivers/net/ethernet/intel/e1000/e1000_ethtool.c 		struct sk_buff *skb;
skb              1016 drivers/net/ethernet/intel/e1000/e1000_ethtool.c 		skb = alloc_skb(size, GFP_KERNEL);
skb              1017 drivers/net/ethernet/intel/e1000/e1000_ethtool.c 		if (!skb) {
skb              1021 drivers/net/ethernet/intel/e1000/e1000_ethtool.c 		skb_put(skb, size);
skb              1022 drivers/net/ethernet/intel/e1000/e1000_ethtool.c 		txdr->buffer_info[i].skb = skb;
skb              1023 drivers/net/ethernet/intel/e1000/e1000_ethtool.c 		txdr->buffer_info[i].length = skb->len;
skb              1025 drivers/net/ethernet/intel/e1000/e1000_ethtool.c 			dma_map_single(&pdev->dev, skb->data, skb->len,
skb              1032 drivers/net/ethernet/intel/e1000/e1000_ethtool.c 		tx_desc->lower.data = cpu_to_le32(skb->len);
skb              1355 drivers/net/ethernet/intel/e1000/e1000_ethtool.c static void e1000_create_lbtest_frame(struct sk_buff *skb,
skb              1358 drivers/net/ethernet/intel/e1000/e1000_ethtool.c 	memset(skb->data, 0xFF, frame_size);
skb              1360 drivers/net/ethernet/intel/e1000/e1000_ethtool.c 	memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1);
skb              1361 drivers/net/ethernet/intel/e1000/e1000_ethtool.c 	memset(&skb->data[frame_size / 2 + 10], 0xBE, 1);
skb              1362 drivers/net/ethernet/intel/e1000/e1000_ethtool.c 	memset(&skb->data[frame_size / 2 + 12], 0xAF, 1);
skb              1402 drivers/net/ethernet/intel/e1000/e1000_ethtool.c 			e1000_create_lbtest_frame(txdr->buffer_info[i].skb,
skb               107 drivers/net/ethernet/intel/e1000/e1000_main.c static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
skb               141 drivers/net/ethernet/intel/e1000/e1000_main.c 				       struct sk_buff *skb);
skb              1962 drivers/net/ethernet/intel/e1000/e1000_main.c 	if (buffer_info->skb) {
skb              1963 drivers/net/ethernet/intel/e1000/e1000_main.c 		dev_kfree_skb_any(buffer_info->skb);
skb              1964 drivers/net/ethernet/intel/e1000/e1000_main.c 		buffer_info->skb = NULL;
skb              2688 drivers/net/ethernet/intel/e1000/e1000_main.c 		     struct e1000_tx_ring *tx_ring, struct sk_buff *skb,
skb              2698 drivers/net/ethernet/intel/e1000/e1000_main.c 	if (skb_is_gso(skb)) {
skb              2701 drivers/net/ethernet/intel/e1000/e1000_main.c 		err = skb_cow_head(skb, 0);
skb              2705 drivers/net/ethernet/intel/e1000/e1000_main.c 		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
skb              2706 drivers/net/ethernet/intel/e1000/e1000_main.c 		mss = skb_shinfo(skb)->gso_size;
skb              2708 drivers/net/ethernet/intel/e1000/e1000_main.c 			struct iphdr *iph = ip_hdr(skb);
skb              2711 drivers/net/ethernet/intel/e1000/e1000_main.c 			tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
skb              2716 drivers/net/ethernet/intel/e1000/e1000_main.c 			ipcse = skb_transport_offset(skb) - 1;
skb              2717 drivers/net/ethernet/intel/e1000/e1000_main.c 		} else if (skb_is_gso_v6(skb)) {
skb              2718 drivers/net/ethernet/intel/e1000/e1000_main.c 			ipv6_hdr(skb)->payload_len = 0;
skb              2719 drivers/net/ethernet/intel/e1000/e1000_main.c 			tcp_hdr(skb)->check =
skb              2720 drivers/net/ethernet/intel/e1000/e1000_main.c 				~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
skb              2721 drivers/net/ethernet/intel/e1000/e1000_main.c 						 &ipv6_hdr(skb)->daddr,
skb              2725 drivers/net/ethernet/intel/e1000/e1000_main.c 		ipcss = skb_network_offset(skb);
skb              2726 drivers/net/ethernet/intel/e1000/e1000_main.c 		ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
skb              2727 drivers/net/ethernet/intel/e1000/e1000_main.c 		tucss = skb_transport_offset(skb);
skb              2728 drivers/net/ethernet/intel/e1000/e1000_main.c 		tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
skb              2732 drivers/net/ethernet/intel/e1000/e1000_main.c 			       E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
skb              2762 drivers/net/ethernet/intel/e1000/e1000_main.c 			  struct e1000_tx_ring *tx_ring, struct sk_buff *skb,
skb              2771 drivers/net/ethernet/intel/e1000/e1000_main.c 	if (skb->ip_summed != CHECKSUM_PARTIAL)
skb              2776 drivers/net/ethernet/intel/e1000/e1000_main.c 		if (ip_hdr(skb)->protocol == IPPROTO_TCP)
skb              2781 drivers/net/ethernet/intel/e1000/e1000_main.c 		if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
skb              2787 drivers/net/ethernet/intel/e1000/e1000_main.c 			       skb->protocol);
skb              2791 drivers/net/ethernet/intel/e1000/e1000_main.c 	css = skb_checksum_start_offset(skb);
skb              2800 drivers/net/ethernet/intel/e1000/e1000_main.c 		css + skb->csum_offset;
skb              2821 drivers/net/ethernet/intel/e1000/e1000_main.c 			struct sk_buff *skb, unsigned int first,
skb              2828 drivers/net/ethernet/intel/e1000/e1000_main.c 	unsigned int len = skb_headlen(skb);
skb              2842 drivers/net/ethernet/intel/e1000/e1000_main.c 		if (!skb->data_len && tx_ring->last_tx_tso &&
skb              2843 drivers/net/ethernet/intel/e1000/e1000_main.c 		    !skb_is_gso(skb)) {
skb              2866 drivers/net/ethernet/intel/e1000/e1000_main.c 		   !((unsigned long)(skb->data + offset + size - 1) & 4) &&
skb              2875 drivers/net/ethernet/intel/e1000/e1000_main.c 						  skb->data + offset,
skb              2892 drivers/net/ethernet/intel/e1000/e1000_main.c 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
skb              2938 drivers/net/ethernet/intel/e1000/e1000_main.c 	segs = skb_shinfo(skb)->gso_segs ?: 1;
skb              2940 drivers/net/ethernet/intel/e1000/e1000_main.c 	bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
skb              2942 drivers/net/ethernet/intel/e1000/e1000_main.c 	tx_ring->buffer_info[i].skb = skb;
skb              3038 drivers/net/ethernet/intel/e1000/e1000_main.c 				       struct sk_buff *skb)
skb              3041 drivers/net/ethernet/intel/e1000/e1000_main.c 	u32 skb_fifo_len = skb->len + E1000_FIFO_HDR;
skb              3096 drivers/net/ethernet/intel/e1000/e1000_main.c static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
skb              3105 drivers/net/ethernet/intel/e1000/e1000_main.c 	unsigned int len = skb_headlen(skb);
skb              3111 drivers/net/ethernet/intel/e1000/e1000_main.c 	__be16 protocol = vlan_get_protocol(skb);
skb              3124 drivers/net/ethernet/intel/e1000/e1000_main.c 	if (eth_skb_pad(skb))
skb              3127 drivers/net/ethernet/intel/e1000/e1000_main.c 	mss = skb_shinfo(skb)->gso_size;
skb              3140 drivers/net/ethernet/intel/e1000/e1000_main.c 		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
skb              3141 drivers/net/ethernet/intel/e1000/e1000_main.c 		if (skb->data_len && hdr_len == len) {
skb              3152 drivers/net/ethernet/intel/e1000/e1000_main.c 				if ((unsigned long)(skb_tail_pointer(skb) - 1)
skb              3156 drivers/net/ethernet/intel/e1000/e1000_main.c 				pull_size = min((unsigned int)4, skb->data_len);
skb              3157 drivers/net/ethernet/intel/e1000/e1000_main.c 				if (!__pskb_pull_tail(skb, pull_size)) {
skb              3160 drivers/net/ethernet/intel/e1000/e1000_main.c 					dev_kfree_skb_any(skb);
skb              3163 drivers/net/ethernet/intel/e1000/e1000_main.c 				len = skb_headlen(skb);
skb              3173 drivers/net/ethernet/intel/e1000/e1000_main.c 	if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
skb              3178 drivers/net/ethernet/intel/e1000/e1000_main.c 	if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
skb              3193 drivers/net/ethernet/intel/e1000/e1000_main.c 	nr_frags = skb_shinfo(skb)->nr_frags;
skb              3195 drivers/net/ethernet/intel/e1000/e1000_main.c 		count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]),
skb              3207 drivers/net/ethernet/intel/e1000/e1000_main.c 		     (e1000_82547_fifo_workaround(adapter, skb)))) {
skb              3214 drivers/net/ethernet/intel/e1000/e1000_main.c 	if (skb_vlan_tag_present(skb)) {
skb              3216 drivers/net/ethernet/intel/e1000/e1000_main.c 		tx_flags |= (skb_vlan_tag_get(skb) <<
skb              3222 drivers/net/ethernet/intel/e1000/e1000_main.c 	tso = e1000_tso(adapter, tx_ring, skb, protocol);
skb              3224 drivers/net/ethernet/intel/e1000/e1000_main.c 		dev_kfree_skb_any(skb);
skb              3232 drivers/net/ethernet/intel/e1000/e1000_main.c 	} else if (likely(e1000_tx_csum(adapter, tx_ring, skb, protocol)))
skb              3238 drivers/net/ethernet/intel/e1000/e1000_main.c 	if (unlikely(skb->no_fcs))
skb              3241 drivers/net/ethernet/intel/e1000/e1000_main.c 	count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd,
skb              3254 drivers/net/ethernet/intel/e1000/e1000_main.c 		netdev_sent_queue(netdev, skb->len);
skb              3255 drivers/net/ethernet/intel/e1000/e1000_main.c 		skb_tx_timestamp(skb);
skb              3274 drivers/net/ethernet/intel/e1000/e1000_main.c 		dev_kfree_skb_any(skb);
skb              3422 drivers/net/ethernet/intel/e1000/e1000_main.c 			(u64)buffer_info->time_stamp, buffer_info->skb, type);
skb              3848 drivers/net/ethernet/intel/e1000/e1000_main.c 				if (buffer_info->skb) {
skb              3849 drivers/net/ethernet/intel/e1000/e1000_main.c 					bytes_compl += buffer_info->skb->len;
skb              3937 drivers/net/ethernet/intel/e1000/e1000_main.c 			      u32 csum, struct sk_buff *skb)
skb              3943 drivers/net/ethernet/intel/e1000/e1000_main.c 	skb_checksum_none_assert(skb);
skb              3964 drivers/net/ethernet/intel/e1000/e1000_main.c 		skb->ip_summed = CHECKSUM_UNNECESSARY;
skb              3972 drivers/net/ethernet/intel/e1000/e1000_main.c static void e1000_consume_page(struct e1000_rx_buffer *bi, struct sk_buff *skb,
skb              3976 drivers/net/ethernet/intel/e1000/e1000_main.c 	skb->len += length;
skb              3977 drivers/net/ethernet/intel/e1000/e1000_main.c 	skb->data_len += length;
skb              3978 drivers/net/ethernet/intel/e1000/e1000_main.c 	skb->truesize += PAGE_SIZE;
skb              3989 drivers/net/ethernet/intel/e1000/e1000_main.c 			      __le16 vlan, struct sk_buff *skb)
skb              3991 drivers/net/ethernet/intel/e1000/e1000_main.c 	skb->protocol = eth_type_trans(skb, adapter->netdev);
skb              3996 drivers/net/ethernet/intel/e1000/e1000_main.c 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
skb              3998 drivers/net/ethernet/intel/e1000/e1000_main.c 	napi_gro_receive(&adapter->napi, skb);
skb              4101 drivers/net/ethernet/intel/e1000/e1000_main.c 	struct sk_buff *skb = napi_alloc_skb(&adapter->napi, bufsz);
skb              4103 drivers/net/ethernet/intel/e1000/e1000_main.c 	if (unlikely(!skb))
skb              4105 drivers/net/ethernet/intel/e1000/e1000_main.c 	return skb;
skb              4137 drivers/net/ethernet/intel/e1000/e1000_main.c 		struct sk_buff *skb;
skb              4211 drivers/net/ethernet/intel/e1000/e1000_main.c 				skb = rxtop;
skb              4213 drivers/net/ethernet/intel/e1000/e1000_main.c 				e1000_consume_page(buffer_info, skb, length);
skb              4225 drivers/net/ethernet/intel/e1000/e1000_main.c 					skb = e1000_alloc_rx_skb(adapter,
skb              4227 drivers/net/ethernet/intel/e1000/e1000_main.c 					if (!skb)
skb              4231 drivers/net/ethernet/intel/e1000/e1000_main.c 					memcpy(skb_tail_pointer(skb), vaddr,
skb              4237 drivers/net/ethernet/intel/e1000/e1000_main.c 					skb_put(skb, length);
skb              4240 drivers/net/ethernet/intel/e1000/e1000_main.c 							  le16_to_cpu(rx_desc->csum), skb);
skb              4242 drivers/net/ethernet/intel/e1000/e1000_main.c 					total_rx_bytes += skb->len;
skb              4246 drivers/net/ethernet/intel/e1000/e1000_main.c 							  rx_desc->special, skb);
skb              4249 drivers/net/ethernet/intel/e1000/e1000_main.c 					skb = napi_get_frags(&adapter->napi);
skb              4250 drivers/net/ethernet/intel/e1000/e1000_main.c 					if (!skb) {
skb              4254 drivers/net/ethernet/intel/e1000/e1000_main.c 					skb_fill_page_desc(skb, 0, p, 0,
skb              4256 drivers/net/ethernet/intel/e1000/e1000_main.c 					e1000_consume_page(buffer_info, skb,
skb              4266 drivers/net/ethernet/intel/e1000/e1000_main.c 				  le16_to_cpu(rx_desc->csum), skb);
skb              4268 drivers/net/ethernet/intel/e1000/e1000_main.c 		total_rx_bytes += (skb->len - 4); /* don't count FCS */
skb              4270 drivers/net/ethernet/intel/e1000/e1000_main.c 			pskb_trim(skb, skb->len - 4);
skb              4277 drivers/net/ethernet/intel/e1000/e1000_main.c 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
skb              4315 drivers/net/ethernet/intel/e1000/e1000_main.c 	struct sk_buff *skb;
skb              4320 drivers/net/ethernet/intel/e1000/e1000_main.c 	skb = e1000_alloc_rx_skb(adapter, length);
skb              4321 drivers/net/ethernet/intel/e1000/e1000_main.c 	if (!skb)
skb              4327 drivers/net/ethernet/intel/e1000/e1000_main.c 	skb_put_data(skb, data, length);
skb              4329 drivers/net/ethernet/intel/e1000/e1000_main.c 	return skb;
skb              4358 drivers/net/ethernet/intel/e1000/e1000_main.c 		struct sk_buff *skb;
skb              4372 drivers/net/ethernet/intel/e1000/e1000_main.c 		skb = e1000_copybreak(adapter, buffer_info, length, data);
skb              4373 drivers/net/ethernet/intel/e1000/e1000_main.c 		if (!skb) {
skb              4376 drivers/net/ethernet/intel/e1000/e1000_main.c 			skb = build_skb(data - E1000_HEADROOM, frag_len);
skb              4377 drivers/net/ethernet/intel/e1000/e1000_main.c 			if (!skb) {
skb              4382 drivers/net/ethernet/intel/e1000/e1000_main.c 			skb_reserve(skb, E1000_HEADROOM);
skb              4413 drivers/net/ethernet/intel/e1000/e1000_main.c 			dev_kfree_skb(skb);
skb              4427 drivers/net/ethernet/intel/e1000/e1000_main.c 				dev_kfree_skb(skb);
skb              4443 drivers/net/ethernet/intel/e1000/e1000_main.c 			skb_put(skb, length);
skb              4445 drivers/net/ethernet/intel/e1000/e1000_main.c 			skb_trim(skb, length);
skb              4451 drivers/net/ethernet/intel/e1000/e1000_main.c 				  le16_to_cpu(rx_desc->csum), skb);
skb              4453 drivers/net/ethernet/intel/e1000/e1000_main.c 		e1000_receive_skb(adapter, status, rx_desc->special, skb);
skb               129 drivers/net/ethernet/intel/e1000e/e1000.h 	struct sk_buff *skb;
skb              1129 drivers/net/ethernet/intel/e1000e/ethtool.c 			dev_kfree_skb(buffer_info->skb);
skb              1141 drivers/net/ethernet/intel/e1000e/ethtool.c 			dev_kfree_skb(buffer_info->skb);
skb              1206 drivers/net/ethernet/intel/e1000e/ethtool.c 		struct sk_buff *skb;
skb              1209 drivers/net/ethernet/intel/e1000e/ethtool.c 		skb = alloc_skb(skb_size, GFP_KERNEL);
skb              1210 drivers/net/ethernet/intel/e1000e/ethtool.c 		if (!skb) {
skb              1214 drivers/net/ethernet/intel/e1000e/ethtool.c 		skb_put(skb, skb_size);
skb              1215 drivers/net/ethernet/intel/e1000e/ethtool.c 		tx_ring->buffer_info[i].skb = skb;
skb              1216 drivers/net/ethernet/intel/e1000e/ethtool.c 		tx_ring->buffer_info[i].length = skb->len;
skb              1218 drivers/net/ethernet/intel/e1000e/ethtool.c 		    dma_map_single(&pdev->dev, skb->data, skb->len,
skb              1226 drivers/net/ethernet/intel/e1000e/ethtool.c 		tx_desc->lower.data = cpu_to_le32(skb->len);
skb              1272 drivers/net/ethernet/intel/e1000e/ethtool.c 		struct sk_buff *skb;
skb              1274 drivers/net/ethernet/intel/e1000e/ethtool.c 		skb = alloc_skb(2048 + NET_IP_ALIGN, GFP_KERNEL);
skb              1275 drivers/net/ethernet/intel/e1000e/ethtool.c 		if (!skb) {
skb              1279 drivers/net/ethernet/intel/e1000e/ethtool.c 		skb_reserve(skb, NET_IP_ALIGN);
skb              1280 drivers/net/ethernet/intel/e1000e/ethtool.c 		rx_ring->buffer_info[i].skb = skb;
skb              1282 drivers/net/ethernet/intel/e1000e/ethtool.c 		    dma_map_single(&pdev->dev, skb->data, 2048,
skb              1292 drivers/net/ethernet/intel/e1000e/ethtool.c 		memset(skb->data, 0x00, skb->len);
skb              1604 drivers/net/ethernet/intel/e1000e/ethtool.c static void e1000_create_lbtest_frame(struct sk_buff *skb,
skb              1607 drivers/net/ethernet/intel/e1000e/ethtool.c 	memset(skb->data, 0xFF, frame_size);
skb              1609 drivers/net/ethernet/intel/e1000e/ethtool.c 	memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1);
skb              1610 drivers/net/ethernet/intel/e1000e/ethtool.c 	memset(&skb->data[frame_size / 2 + 10], 0xBE, 1);
skb              1611 drivers/net/ethernet/intel/e1000e/ethtool.c 	memset(&skb->data[frame_size / 2 + 12], 0xAF, 1);
skb              1614 drivers/net/ethernet/intel/e1000e/ethtool.c static int e1000_check_lbtest_frame(struct sk_buff *skb,
skb              1618 drivers/net/ethernet/intel/e1000e/ethtool.c 	if (*(skb->data + 3) == 0xFF)
skb              1619 drivers/net/ethernet/intel/e1000e/ethtool.c 		if ((*(skb->data + frame_size / 2 + 10) == 0xBE) &&
skb              1620 drivers/net/ethernet/intel/e1000e/ethtool.c 		    (*(skb->data + frame_size / 2 + 12) == 0xAF))
skb              1658 drivers/net/ethernet/intel/e1000e/ethtool.c 			e1000_create_lbtest_frame(buffer_info->skb, 1024);
skb              1680 drivers/net/ethernet/intel/e1000e/ethtool.c 			ret_val = e1000_check_lbtest_frame(buffer_info->skb,
skb               311 drivers/net/ethernet/intel/e1000e/netdev.c 			buffer_info->skb, next_desc);
skb               313 drivers/net/ethernet/intel/e1000e/netdev.c 		if (netif_msg_pktdata(adapter) && buffer_info->skb)
skb               315 drivers/net/ethernet/intel/e1000e/netdev.c 				       16, 1, buffer_info->skb->data,
skb               316 drivers/net/ethernet/intel/e1000e/netdev.c 				       buffer_info->skb->len, true);
skb               383 drivers/net/ethernet/intel/e1000e/netdev.c 					buffer_info->skb, next_desc);
skb               392 drivers/net/ethernet/intel/e1000e/netdev.c 					buffer_info->skb, next_desc);
skb               447 drivers/net/ethernet/intel/e1000e/netdev.c 					buffer_info->skb, next_desc);
skb               454 drivers/net/ethernet/intel/e1000e/netdev.c 					buffer_info->skb, next_desc);
skb               457 drivers/net/ethernet/intel/e1000e/netdev.c 				    buffer_info->skb)
skb               461 drivers/net/ethernet/intel/e1000e/netdev.c 						       buffer_info->skb->data,
skb               520 drivers/net/ethernet/intel/e1000e/netdev.c 			       struct sk_buff *skb)
skb               539 drivers/net/ethernet/intel/e1000e/netdev.c 	e1000e_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), rxstmp);
skb               552 drivers/net/ethernet/intel/e1000e/netdev.c 			      struct net_device *netdev, struct sk_buff *skb,
skb               557 drivers/net/ethernet/intel/e1000e/netdev.c 	e1000e_rx_hwtstamp(adapter, staterr, skb);
skb               559 drivers/net/ethernet/intel/e1000e/netdev.c 	skb->protocol = eth_type_trans(skb, netdev);
skb               562 drivers/net/ethernet/intel/e1000e/netdev.c 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag);
skb               564 drivers/net/ethernet/intel/e1000e/netdev.c 	napi_gro_receive(&adapter->napi, skb);
skb               575 drivers/net/ethernet/intel/e1000e/netdev.c 			      struct sk_buff *skb)
skb               580 drivers/net/ethernet/intel/e1000e/netdev.c 	skb_checksum_none_assert(skb);
skb               602 drivers/net/ethernet/intel/e1000e/netdev.c 	skb->ip_summed = CHECKSUM_UNNECESSARY;
skb               652 drivers/net/ethernet/intel/e1000e/netdev.c 	struct sk_buff *skb;
skb               660 drivers/net/ethernet/intel/e1000e/netdev.c 		skb = buffer_info->skb;
skb               661 drivers/net/ethernet/intel/e1000e/netdev.c 		if (skb) {
skb               662 drivers/net/ethernet/intel/e1000e/netdev.c 			skb_trim(skb, 0);
skb               666 drivers/net/ethernet/intel/e1000e/netdev.c 		skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp);
skb               667 drivers/net/ethernet/intel/e1000e/netdev.c 		if (!skb) {
skb               673 drivers/net/ethernet/intel/e1000e/netdev.c 		buffer_info->skb = skb;
skb               675 drivers/net/ethernet/intel/e1000e/netdev.c 		buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
skb               721 drivers/net/ethernet/intel/e1000e/netdev.c 	struct sk_buff *skb;
skb               764 drivers/net/ethernet/intel/e1000e/netdev.c 		skb = __netdev_alloc_skb_ip_align(netdev, adapter->rx_ps_bsize0,
skb               767 drivers/net/ethernet/intel/e1000e/netdev.c 		if (!skb) {
skb               772 drivers/net/ethernet/intel/e1000e/netdev.c 		buffer_info->skb = skb;
skb               773 drivers/net/ethernet/intel/e1000e/netdev.c 		buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
skb               780 drivers/net/ethernet/intel/e1000e/netdev.c 			dev_kfree_skb_any(skb);
skb               781 drivers/net/ethernet/intel/e1000e/netdev.c 			buffer_info->skb = NULL;
skb               824 drivers/net/ethernet/intel/e1000e/netdev.c 	struct sk_buff *skb;
skb               832 drivers/net/ethernet/intel/e1000e/netdev.c 		skb = buffer_info->skb;
skb               833 drivers/net/ethernet/intel/e1000e/netdev.c 		if (skb) {
skb               834 drivers/net/ethernet/intel/e1000e/netdev.c 			skb_trim(skb, 0);
skb               838 drivers/net/ethernet/intel/e1000e/netdev.c 		skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp);
skb               839 drivers/net/ethernet/intel/e1000e/netdev.c 		if (unlikely(!skb)) {
skb               845 drivers/net/ethernet/intel/e1000e/netdev.c 		buffer_info->skb = skb;
skb               894 drivers/net/ethernet/intel/e1000e/netdev.c 				 struct sk_buff *skb)
skb               897 drivers/net/ethernet/intel/e1000e/netdev.c 		skb_set_hash(skb, le32_to_cpu(rss), PKT_HASH_TYPE_L3);
skb               928 drivers/net/ethernet/intel/e1000e/netdev.c 		struct sk_buff *skb;
skb               935 drivers/net/ethernet/intel/e1000e/netdev.c 		skb = buffer_info->skb;
skb               936 drivers/net/ethernet/intel/e1000e/netdev.c 		buffer_info->skb = NULL;
skb               938 drivers/net/ethernet/intel/e1000e/netdev.c 		prefetch(skb->data - NET_IP_ALIGN);
skb               969 drivers/net/ethernet/intel/e1000e/netdev.c 			buffer_info->skb = skb;
skb               978 drivers/net/ethernet/intel/e1000e/netdev.c 			buffer_info->skb = skb;
skb              1007 drivers/net/ethernet/intel/e1000e/netdev.c 							       (skb->data -
skb              1012 drivers/net/ethernet/intel/e1000e/netdev.c 				buffer_info->skb = skb;
skb              1013 drivers/net/ethernet/intel/e1000e/netdev.c 				skb = new_skb;
skb              1018 drivers/net/ethernet/intel/e1000e/netdev.c 		skb_put(skb, length);
skb              1021 drivers/net/ethernet/intel/e1000e/netdev.c 		e1000_rx_checksum(adapter, staterr, skb);
skb              1023 drivers/net/ethernet/intel/e1000e/netdev.c 		e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
skb              1025 drivers/net/ethernet/intel/e1000e/netdev.c 		e1000_receive_skb(adapter, netdev, skb, staterr,
skb              1070 drivers/net/ethernet/intel/e1000e/netdev.c 	if (buffer_info->skb) {
skb              1072 drivers/net/ethernet/intel/e1000e/netdev.c 			dev_kfree_skb_any(buffer_info->skb);
skb              1074 drivers/net/ethernet/intel/e1000e/netdev.c 			dev_consume_skb_any(buffer_info->skb);
skb              1075 drivers/net/ethernet/intel/e1000e/netdev.c 		buffer_info->skb = NULL;
skb              1172 drivers/net/ethernet/intel/e1000e/netdev.c 		struct sk_buff *skb = adapter->tx_hwtstamp_skb;
skb              1187 drivers/net/ethernet/intel/e1000e/netdev.c 		skb_tstamp_tx(skb, &shhwtstamps);
skb              1188 drivers/net/ethernet/intel/e1000e/netdev.c 		dev_consume_skb_any(skb);
skb              1237 drivers/net/ethernet/intel/e1000e/netdev.c 				if (buffer_info->skb) {
skb              1238 drivers/net/ethernet/intel/e1000e/netdev.c 					bytes_compl += buffer_info->skb->len;
skb              1311 drivers/net/ethernet/intel/e1000e/netdev.c 	struct sk_buff *skb;
skb              1327 drivers/net/ethernet/intel/e1000e/netdev.c 		skb = buffer_info->skb;
skb              1331 drivers/net/ethernet/intel/e1000e/netdev.c 		prefetch(skb->data - NET_IP_ALIGN);
skb              1353 drivers/net/ethernet/intel/e1000e/netdev.c 			dev_kfree_skb_irq(skb);
skb              1361 drivers/net/ethernet/intel/e1000e/netdev.c 			dev_kfree_skb_irq(skb);
skb              1369 drivers/net/ethernet/intel/e1000e/netdev.c 			dev_kfree_skb_irq(skb);
skb              1374 drivers/net/ethernet/intel/e1000e/netdev.c 		skb_put(skb, length);
skb              1402 drivers/net/ethernet/intel/e1000e/netdev.c 				memcpy(skb_tail_pointer(skb), vaddr, l1);
skb              1415 drivers/net/ethernet/intel/e1000e/netdev.c 				skb_put(skb, l1);
skb              1429 drivers/net/ethernet/intel/e1000e/netdev.c 			skb_fill_page_desc(skb, j, ps_page->page, 0, length);
skb              1431 drivers/net/ethernet/intel/e1000e/netdev.c 			skb->len += length;
skb              1432 drivers/net/ethernet/intel/e1000e/netdev.c 			skb->data_len += length;
skb              1433 drivers/net/ethernet/intel/e1000e/netdev.c 			skb->truesize += PAGE_SIZE;
skb              1441 drivers/net/ethernet/intel/e1000e/netdev.c 				pskb_trim(skb, skb->len - 4);
skb              1445 drivers/net/ethernet/intel/e1000e/netdev.c 		total_rx_bytes += skb->len;
skb              1448 drivers/net/ethernet/intel/e1000e/netdev.c 		e1000_rx_checksum(adapter, staterr, skb);
skb              1450 drivers/net/ethernet/intel/e1000e/netdev.c 		e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
skb              1456 drivers/net/ethernet/intel/e1000e/netdev.c 		e1000_receive_skb(adapter, netdev, skb, staterr,
skb              1461 drivers/net/ethernet/intel/e1000e/netdev.c 		buffer_info->skb = NULL;
skb              1490 drivers/net/ethernet/intel/e1000e/netdev.c static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
skb              1494 drivers/net/ethernet/intel/e1000e/netdev.c 	skb->len += length;
skb              1495 drivers/net/ethernet/intel/e1000e/netdev.c 	skb->data_len += length;
skb              1496 drivers/net/ethernet/intel/e1000e/netdev.c 	skb->truesize += PAGE_SIZE;
skb              1527 drivers/net/ethernet/intel/e1000e/netdev.c 		struct sk_buff *skb;
skb              1534 drivers/net/ethernet/intel/e1000e/netdev.c 		skb = buffer_info->skb;
skb              1535 drivers/net/ethernet/intel/e1000e/netdev.c 		buffer_info->skb = NULL;
skb              1558 drivers/net/ethernet/intel/e1000e/netdev.c 			buffer_info->skb = skb;
skb              1570 drivers/net/ethernet/intel/e1000e/netdev.c 				rxtop = skb;
skb              1580 drivers/net/ethernet/intel/e1000e/netdev.c 				buffer_info->skb = skb;
skb              1594 drivers/net/ethernet/intel/e1000e/netdev.c 				buffer_info->skb = skb;
skb              1595 drivers/net/ethernet/intel/e1000e/netdev.c 				skb = rxtop;
skb              1597 drivers/net/ethernet/intel/e1000e/netdev.c 				e1000_consume_page(buffer_info, skb, length);
skb              1603 drivers/net/ethernet/intel/e1000e/netdev.c 				    skb_tailroom(skb) >= length) {
skb              1606 drivers/net/ethernet/intel/e1000e/netdev.c 					memcpy(skb_tail_pointer(skb), vaddr,
skb              1612 drivers/net/ethernet/intel/e1000e/netdev.c 					skb_put(skb, length);
skb              1614 drivers/net/ethernet/intel/e1000e/netdev.c 					skb_fill_page_desc(skb, 0,
skb              1617 drivers/net/ethernet/intel/e1000e/netdev.c 					e1000_consume_page(buffer_info, skb,
skb              1624 drivers/net/ethernet/intel/e1000e/netdev.c 		e1000_rx_checksum(adapter, staterr, skb);
skb              1626 drivers/net/ethernet/intel/e1000e/netdev.c 		e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
skb              1629 drivers/net/ethernet/intel/e1000e/netdev.c 		total_rx_bytes += skb->len;
skb              1633 drivers/net/ethernet/intel/e1000e/netdev.c 		if (!pskb_may_pull(skb, ETH_HLEN)) {
skb              1635 drivers/net/ethernet/intel/e1000e/netdev.c 			dev_kfree_skb_irq(skb);
skb              1639 drivers/net/ethernet/intel/e1000e/netdev.c 		e1000_receive_skb(adapter, netdev, skb, staterr,
skb              1704 drivers/net/ethernet/intel/e1000e/netdev.c 		if (buffer_info->skb) {
skb              1705 drivers/net/ethernet/intel/e1000e/netdev.c 			dev_kfree_skb(buffer_info->skb);
skb              1706 drivers/net/ethernet/intel/e1000e/netdev.c 			buffer_info->skb = NULL;
skb              5432 drivers/net/ethernet/intel/e1000e/netdev.c static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb,
skb              5443 drivers/net/ethernet/intel/e1000e/netdev.c 	if (!skb_is_gso(skb))
skb              5446 drivers/net/ethernet/intel/e1000e/netdev.c 	err = skb_cow_head(skb, 0);
skb              5450 drivers/net/ethernet/intel/e1000e/netdev.c 	hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
skb              5451 drivers/net/ethernet/intel/e1000e/netdev.c 	mss = skb_shinfo(skb)->gso_size;
skb              5453 drivers/net/ethernet/intel/e1000e/netdev.c 		struct iphdr *iph = ip_hdr(skb);
skb              5456 drivers/net/ethernet/intel/e1000e/netdev.c 		tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
skb              5459 drivers/net/ethernet/intel/e1000e/netdev.c 		ipcse = skb_transport_offset(skb) - 1;
skb              5460 drivers/net/ethernet/intel/e1000e/netdev.c 	} else if (skb_is_gso_v6(skb)) {
skb              5461 drivers/net/ethernet/intel/e1000e/netdev.c 		ipv6_hdr(skb)->payload_len = 0;
skb              5462 drivers/net/ethernet/intel/e1000e/netdev.c 		tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
skb              5463 drivers/net/ethernet/intel/e1000e/netdev.c 						       &ipv6_hdr(skb)->daddr,
skb              5467 drivers/net/ethernet/intel/e1000e/netdev.c 	ipcss = skb_network_offset(skb);
skb              5468 drivers/net/ethernet/intel/e1000e/netdev.c 	ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
skb              5469 drivers/net/ethernet/intel/e1000e/netdev.c 	tucss = skb_transport_offset(skb);
skb              5470 drivers/net/ethernet/intel/e1000e/netdev.c 	tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
skb              5473 drivers/net/ethernet/intel/e1000e/netdev.c 		       E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
skb              5500 drivers/net/ethernet/intel/e1000e/netdev.c static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb,
skb              5510 drivers/net/ethernet/intel/e1000e/netdev.c 	if (skb->ip_summed != CHECKSUM_PARTIAL)
skb              5515 drivers/net/ethernet/intel/e1000e/netdev.c 		if (ip_hdr(skb)->protocol == IPPROTO_TCP)
skb              5520 drivers/net/ethernet/intel/e1000e/netdev.c 		if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
skb              5530 drivers/net/ethernet/intel/e1000e/netdev.c 	css = skb_checksum_start_offset(skb);
skb              5538 drivers/net/ethernet/intel/e1000e/netdev.c 	context_desc->upper_setup.tcp_fields.tucso = css + skb->csum_offset;
skb              5554 drivers/net/ethernet/intel/e1000e/netdev.c static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb,
skb              5561 drivers/net/ethernet/intel/e1000e/netdev.c 	unsigned int len = skb_headlen(skb);
skb              5575 drivers/net/ethernet/intel/e1000e/netdev.c 						  skb->data + offset,
skb              5593 drivers/net/ethernet/intel/e1000e/netdev.c 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
skb              5622 drivers/net/ethernet/intel/e1000e/netdev.c 	segs = skb_shinfo(skb)->gso_segs ? : 1;
skb              5624 drivers/net/ethernet/intel/e1000e/netdev.c 	bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
skb              5626 drivers/net/ethernet/intel/e1000e/netdev.c 	tx_ring->buffer_info[i].skb = skb;
skb              5718 drivers/net/ethernet/intel/e1000e/netdev.c 				    struct sk_buff *skb)
skb              5723 drivers/net/ethernet/intel/e1000e/netdev.c 	if (skb_vlan_tag_present(skb) &&
skb              5724 drivers/net/ethernet/intel/e1000e/netdev.c 	    !((skb_vlan_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) &&
skb              5729 drivers/net/ethernet/intel/e1000e/netdev.c 	if (skb->len <= MINIMUM_DHCP_PACKET_SIZE)
skb              5732 drivers/net/ethernet/intel/e1000e/netdev.c 	if (((struct ethhdr *)skb->data)->h_proto != htons(ETH_P_IP))
skb              5736 drivers/net/ethernet/intel/e1000e/netdev.c 		const struct iphdr *ip = (struct iphdr *)((u8 *)skb->data + 14);
skb              5746 drivers/net/ethernet/intel/e1000e/netdev.c 		offset = (u8 *)udp + 8 - skb->data;
skb              5747 drivers/net/ethernet/intel/e1000e/netdev.c 		length = skb->len - offset;
skb              5786 drivers/net/ethernet/intel/e1000e/netdev.c static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
skb              5793 drivers/net/ethernet/intel/e1000e/netdev.c 	unsigned int len = skb_headlen(skb);
skb              5799 drivers/net/ethernet/intel/e1000e/netdev.c 	__be16 protocol = vlan_get_protocol(skb);
skb              5802 drivers/net/ethernet/intel/e1000e/netdev.c 		dev_kfree_skb_any(skb);
skb              5806 drivers/net/ethernet/intel/e1000e/netdev.c 	if (skb->len <= 0) {
skb              5807 drivers/net/ethernet/intel/e1000e/netdev.c 		dev_kfree_skb_any(skb);
skb              5814 drivers/net/ethernet/intel/e1000e/netdev.c 	if (skb_put_padto(skb, 17))
skb              5817 drivers/net/ethernet/intel/e1000e/netdev.c 	mss = skb_shinfo(skb)->gso_size;
skb              5825 drivers/net/ethernet/intel/e1000e/netdev.c 		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
skb              5829 drivers/net/ethernet/intel/e1000e/netdev.c 		if (skb->data_len && (hdr_len == len)) {
skb              5832 drivers/net/ethernet/intel/e1000e/netdev.c 			pull_size = min_t(unsigned int, 4, skb->data_len);
skb              5833 drivers/net/ethernet/intel/e1000e/netdev.c 			if (!__pskb_pull_tail(skb, pull_size)) {
skb              5835 drivers/net/ethernet/intel/e1000e/netdev.c 				dev_kfree_skb_any(skb);
skb              5838 drivers/net/ethernet/intel/e1000e/netdev.c 			len = skb_headlen(skb);
skb              5843 drivers/net/ethernet/intel/e1000e/netdev.c 	if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
skb              5849 drivers/net/ethernet/intel/e1000e/netdev.c 	nr_frags = skb_shinfo(skb)->nr_frags;
skb              5851 drivers/net/ethernet/intel/e1000e/netdev.c 		count += DIV_ROUND_UP(skb_frag_size(&skb_shinfo(skb)->frags[f]),
skb              5855 drivers/net/ethernet/intel/e1000e/netdev.c 		e1000_transfer_dhcp_info(adapter, skb);
skb              5863 drivers/net/ethernet/intel/e1000e/netdev.c 	if (skb_vlan_tag_present(skb)) {
skb              5865 drivers/net/ethernet/intel/e1000e/netdev.c 		tx_flags |= (skb_vlan_tag_get(skb) <<
skb              5871 drivers/net/ethernet/intel/e1000e/netdev.c 	tso = e1000_tso(tx_ring, skb, protocol);
skb              5873 drivers/net/ethernet/intel/e1000e/netdev.c 		dev_kfree_skb_any(skb);
skb              5879 drivers/net/ethernet/intel/e1000e/netdev.c 	else if (e1000_tx_csum(tx_ring, skb, protocol))
skb              5889 drivers/net/ethernet/intel/e1000e/netdev.c 	if (unlikely(skb->no_fcs))
skb              5893 drivers/net/ethernet/intel/e1000e/netdev.c 	count = e1000_tx_map(tx_ring, skb, first, adapter->tx_fifo_limit,
skb              5896 drivers/net/ethernet/intel/e1000e/netdev.c 		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
skb              5899 drivers/net/ethernet/intel/e1000e/netdev.c 				skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
skb              5901 drivers/net/ethernet/intel/e1000e/netdev.c 				adapter->tx_hwtstamp_skb = skb_get(skb);
skb              5909 drivers/net/ethernet/intel/e1000e/netdev.c 		skb_tx_timestamp(skb);
skb              5911 drivers/net/ethernet/intel/e1000e/netdev.c 		netdev_sent_queue(netdev, skb->len);
skb              5928 drivers/net/ethernet/intel/e1000e/netdev.c 		dev_kfree_skb_any(skb);
skb                64 drivers/net/ethernet/intel/fm10k/fm10k.h 	struct sk_buff *skb;
skb               140 drivers/net/ethernet/intel/fm10k/fm10k.h 			struct sk_buff *skb;
skb               475 drivers/net/ethernet/intel/fm10k/fm10k.h #define FM10K_CB(skb) ((struct fm10k_cb *)(skb)->cb)
skb               482 drivers/net/ethernet/intel/fm10k/fm10k.h __be16 fm10k_tx_encap_offload(struct sk_buff *skb);
skb               483 drivers/net/ethernet/intel/fm10k/fm10k.h netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb,
skb               254 drivers/net/ethernet/intel/fm10k/fm10k_main.c 			      struct sk_buff *skb)
skb               265 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	if (unlikely(skb_is_nonlinear(skb)))
skb               269 drivers/net/ethernet/intel/fm10k/fm10k_main.c 		memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
skb               283 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	pull_len = eth_get_headlen(skb->dev, va, FM10K_RX_HDR_LEN);
skb               286 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long)));
skb               293 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
skb               301 drivers/net/ethernet/intel/fm10k/fm10k_main.c 					     struct sk_buff *skb)
skb               311 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	if (likely(!skb)) {
skb               322 drivers/net/ethernet/intel/fm10k/fm10k_main.c 		skb = napi_alloc_skb(&rx_ring->q_vector->napi,
skb               324 drivers/net/ethernet/intel/fm10k/fm10k_main.c 		if (unlikely(!skb)) {
skb               333 drivers/net/ethernet/intel/fm10k/fm10k_main.c 		prefetchw(skb->data);
skb               344 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	if (fm10k_add_rx_frag(rx_buffer, size, rx_desc, skb)) {
skb               356 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	return skb;
skb               361 drivers/net/ethernet/intel/fm10k/fm10k_main.c 				     struct sk_buff *skb)
skb               363 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	skb_checksum_none_assert(skb);
skb               381 drivers/net/ethernet/intel/fm10k/fm10k_main.c 		skb->encapsulation = true;
skb               385 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	skb->ip_summed = CHECKSUM_UNNECESSARY;
skb               398 drivers/net/ethernet/intel/fm10k/fm10k_main.c 				 struct sk_buff *skb)
skb               409 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	skb_set_hash(skb, le32_to_cpu(rx_desc->d.rss),
skb               416 drivers/net/ethernet/intel/fm10k/fm10k_main.c 			     struct sk_buff *skb)
skb               423 drivers/net/ethernet/intel/fm10k/fm10k_main.c 		u16 idx = le16_to_cpu(FM10K_CB(skb)->fi.w.dglort) - 1;
skb               434 drivers/net/ethernet/intel/fm10k/fm10k_main.c 		skb_record_rx_queue(skb, rx_ring->queue_index);
skb               436 drivers/net/ethernet/intel/fm10k/fm10k_main.c 		macvlan_count_rx(netdev_priv(dev), skb->len + ETH_HLEN, true,
skb               439 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	skb->protocol = eth_type_trans(skb, dev);
skb               454 drivers/net/ethernet/intel/fm10k/fm10k_main.c 					     struct sk_buff *skb)
skb               456 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	unsigned int len = skb->len;
skb               458 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	fm10k_rx_hash(rx_ring, rx_desc, skb);
skb               460 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	fm10k_rx_checksum(rx_ring, rx_desc, skb);
skb               462 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	FM10K_CB(skb)->tstamp = rx_desc->q.timestamp;
skb               464 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	FM10K_CB(skb)->fi.w.vlan = rx_desc->w.vlan;
skb               466 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	FM10K_CB(skb)->fi.d.glort = rx_desc->d.glort;
skb               472 drivers/net/ethernet/intel/fm10k/fm10k_main.c 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
skb               474 drivers/net/ethernet/intel/fm10k/fm10k_main.c 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
skb               478 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	fm10k_type_trans(rx_ring, rx_desc, skb);
skb               526 drivers/net/ethernet/intel/fm10k/fm10k_main.c 				  struct sk_buff *skb)
skb               542 drivers/net/ethernet/intel/fm10k/fm10k_main.c 		dev_kfree_skb_any(skb);
skb               548 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	if (eth_skb_pad(skb))
skb               560 drivers/net/ethernet/intel/fm10k/fm10k_main.c 			      struct sk_buff *skb)
skb               562 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	napi_gro_receive(&q_vector->napi, skb);
skb               569 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	struct sk_buff *skb = rx_ring->skb;
skb               594 drivers/net/ethernet/intel/fm10k/fm10k_main.c 		skb = fm10k_fetch_rx_buffer(rx_ring, rx_desc, skb);
skb               597 drivers/net/ethernet/intel/fm10k/fm10k_main.c 		if (!skb)
skb               607 drivers/net/ethernet/intel/fm10k/fm10k_main.c 		if (fm10k_cleanup_headers(rx_ring, rx_desc, skb)) {
skb               608 drivers/net/ethernet/intel/fm10k/fm10k_main.c 			skb = NULL;
skb               613 drivers/net/ethernet/intel/fm10k/fm10k_main.c 		total_bytes += fm10k_process_skb_fields(rx_ring, rx_desc, skb);
skb               615 drivers/net/ethernet/intel/fm10k/fm10k_main.c 		fm10k_receive_skb(q_vector, skb);
skb               618 drivers/net/ethernet/intel/fm10k/fm10k_main.c 		skb = NULL;
skb               625 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	rx_ring->skb = skb;
skb               638 drivers/net/ethernet/intel/fm10k/fm10k_main.c static struct ethhdr *fm10k_port_is_vxlan(struct sk_buff *skb)
skb               640 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	struct fm10k_intfc *interface = netdev_priv(skb->dev);
skb               649 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	if (vxlan_port->port != udp_hdr(skb)->dest)
skb               653 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	return (struct ethhdr *)(skb_transport_header(skb) + VXLAN_HLEN);
skb               664 drivers/net/ethernet/intel/fm10k/fm10k_main.c static struct ethhdr *fm10k_gre_is_nvgre(struct sk_buff *skb)
skb               667 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	int hlen = ip_hdrlen(skb);
skb               670 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	if (vlan_get_protocol(skb) != htons(ETH_P_IP))
skb               674 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	nvgre_hdr = (struct fm10k_nvgre_hdr *)(skb_network_header(skb) + hlen);
skb               687 drivers/net/ethernet/intel/fm10k/fm10k_main.c __be16 fm10k_tx_encap_offload(struct sk_buff *skb)
skb               692 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	if (skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
skb               693 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	    skb->inner_protocol != htons(ETH_P_TEB))
skb               696 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	switch (vlan_get_protocol(skb)) {
skb               698 drivers/net/ethernet/intel/fm10k/fm10k_main.c 		l4_hdr = ip_hdr(skb)->protocol;
skb               701 drivers/net/ethernet/intel/fm10k/fm10k_main.c 		l4_hdr = ipv6_hdr(skb)->nexthdr;
skb               709 drivers/net/ethernet/intel/fm10k/fm10k_main.c 		eth_hdr = fm10k_port_is_vxlan(skb);
skb               712 drivers/net/ethernet/intel/fm10k/fm10k_main.c 		eth_hdr = fm10k_gre_is_nvgre(skb);
skb               723 drivers/net/ethernet/intel/fm10k/fm10k_main.c 		inner_l4_hdr = inner_ip_hdr(skb)->protocol;
skb               726 drivers/net/ethernet/intel/fm10k/fm10k_main.c 		inner_l4_hdr = inner_ipv6_hdr(skb)->nexthdr;
skb               734 drivers/net/ethernet/intel/fm10k/fm10k_main.c 		inner_l4_hlen = inner_tcp_hdrlen(skb);
skb               746 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	if (skb_inner_transport_header(skb) + inner_l4_hlen -
skb               747 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	    skb_mac_header(skb) > FM10K_TUNNEL_HEADER_LENGTH)
skb               756 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	struct sk_buff *skb = first->skb;
skb               761 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	if (skb->ip_summed != CHECKSUM_PARTIAL)
skb               764 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	if (!skb_is_gso(skb))
skb               768 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	if (skb->encapsulation) {
skb               769 drivers/net/ethernet/intel/fm10k/fm10k_main.c 		if (!fm10k_tx_encap_offload(skb))
skb               771 drivers/net/ethernet/intel/fm10k/fm10k_main.c 		th = skb_inner_transport_header(skb);
skb               773 drivers/net/ethernet/intel/fm10k/fm10k_main.c 		th = skb_transport_header(skb);
skb               777 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	hdrlen = (th - skb->data) + (((struct tcphdr *)th)->doff << 2);
skb               782 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	first->gso_segs = skb_shinfo(skb)->gso_segs;
skb               788 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	tx_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
skb               803 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	struct sk_buff *skb = first->skb;
skb               815 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	if (skb->ip_summed != CHECKSUM_PARTIAL)
skb               818 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	if (skb->encapsulation) {
skb               819 drivers/net/ethernet/intel/fm10k/fm10k_main.c 		protocol = fm10k_tx_encap_offload(skb);
skb               821 drivers/net/ethernet/intel/fm10k/fm10k_main.c 			if (skb_checksum_help(skb)) {
skb               828 drivers/net/ethernet/intel/fm10k/fm10k_main.c 		network_hdr.raw = skb_inner_network_header(skb);
skb               829 drivers/net/ethernet/intel/fm10k/fm10k_main.c 		transport_hdr = skb_inner_transport_header(skb);
skb               831 drivers/net/ethernet/intel/fm10k/fm10k_main.c 		protocol = vlan_get_protocol(skb);
skb               832 drivers/net/ethernet/intel/fm10k/fm10k_main.c 		network_hdr.raw = skb_network_header(skb);
skb               833 drivers/net/ethernet/intel/fm10k/fm10k_main.c 		transport_hdr = skb_transport_header(skb);
skb               845 drivers/net/ethernet/intel/fm10k/fm10k_main.c 		ipv6_skip_exthdr(skb, network_hdr.raw - skb->data +
skb               860 drivers/net/ethernet/intel/fm10k/fm10k_main.c 		if (skb->encapsulation)
skb               869 drivers/net/ethernet/intel/fm10k/fm10k_main.c 		skb_checksum_help(skb);
skb               890 drivers/net/ethernet/intel/fm10k/fm10k_main.c static u8 fm10k_tx_desc_flags(struct sk_buff *skb, u32 tx_flags)
skb               946 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	struct sk_buff *skb = first->skb;
skb               955 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	u8 flags = fm10k_tx_desc_flags(skb, tx_flags);
skb               960 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	if (skb_vlan_tag_present(skb))
skb               961 drivers/net/ethernet/intel/fm10k/fm10k_main.c 		tx_desc->vlan = cpu_to_le16(skb_vlan_tag_get(skb));
skb               965 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	size = skb_headlen(skb);
skb               966 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	data = skb->data;
skb               970 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	data_len = skb->data_len;
skb               973 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
skb              1020 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	skb_tx_timestamp(first->skb);
skb              1062 drivers/net/ethernet/intel/fm10k/fm10k_main.c netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb,
skb              1065 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	u16 count = TXD_USE_COUNT(skb_headlen(skb));
skb              1076 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
skb              1077 drivers/net/ethernet/intel/fm10k/fm10k_main.c 		skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
skb              1089 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	first->skb = skb;
skb              1090 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN);
skb              1107 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	dev_kfree_skb_any(first->skb);
skb              1108 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	first->skb = NULL;
skb              1228 drivers/net/ethernet/intel/fm10k/fm10k_main.c 		napi_consume_skb(tx_buffer->skb, napi_budget);
skb              1237 drivers/net/ethernet/intel/fm10k/fm10k_main.c 		tx_buffer->skb = NULL;
skb               147 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c 	if (tx_buffer->skb) {
skb               148 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c 		dev_kfree_skb_any(tx_buffer->skb);
skb               161 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c 	tx_buffer->skb = NULL;
skb               256 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c 	dev_kfree_skb(rx_ring->skb);
skb               257 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c 	rx_ring->skb = NULL;
skb               626 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c static netdev_tx_t fm10k_xmit_frame(struct sk_buff *skb, struct net_device *dev)
skb               630 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c 	unsigned int r_idx = skb->queue_mapping;
skb               636 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c 	if ((skb->protocol == htons(ETH_P_8021Q)) &&
skb               637 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c 	    !skb_vlan_tag_present(skb)) {
skb               645 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c 		skb = skb_share_check(skb, GFP_ATOMIC);
skb               646 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c 		if (!skb)
skb               650 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c 		if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
skb               654 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c 		err = skb_cow_head(skb, 0);
skb               656 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c 			dev_kfree_skb(skb);
skb               661 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c 		vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN);
skb               664 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c 		__vlan_hwaccel_put_tag(skb,
skb               668 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c 		skb->protocol = (ntohs(proto) >= 1536) ? proto :
skb               672 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c 		memmove(skb->data + VLAN_HLEN, skb->data, 12);
skb               673 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c 		__skb_pull(skb, VLAN_HLEN);
skb               674 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c 		skb_reset_mac_header(skb);
skb               680 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c 	if (unlikely(skb->len < 17)) {
skb               681 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c 		int pad_len = 17 - skb->len;
skb               683 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c 		if (skb_pad(skb, pad_len))
skb               685 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c 		__skb_put(skb, pad_len);
skb               691 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c 	err = fm10k_xmit_frame_ring(skb, interface->tx_ring[r_idx]);
skb              1620 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c static netdev_features_t fm10k_features_check(struct sk_buff *skb,
skb              1624 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c 	if (!skb->encapsulation || fm10k_tx_encap_offload(skb))
skb              1136 drivers/net/ethernet/intel/i40e/i40e.h void i40e_ptp_rx_hwtstamp(struct i40e_pf *pf, struct sk_buff *skb, u8 index);
skb              12415 drivers/net/ethernet/intel/i40e/i40e_main.c static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
skb              12439 drivers/net/ethernet/intel/i40e/i40e_main.c 	return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
skb              12449 drivers/net/ethernet/intel/i40e/i40e_main.c static netdev_features_t i40e_features_check(struct sk_buff *skb,
skb              12459 drivers/net/ethernet/intel/i40e/i40e_main.c 	if (skb->ip_summed != CHECKSUM_PARTIAL)
skb              12465 drivers/net/ethernet/intel/i40e/i40e_main.c 	if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
skb              12469 drivers/net/ethernet/intel/i40e/i40e_main.c 	len = skb_network_header(skb) - skb->data;
skb              12474 drivers/net/ethernet/intel/i40e/i40e_main.c 	len = skb_transport_header(skb) - skb_network_header(skb);
skb              12478 drivers/net/ethernet/intel/i40e/i40e_main.c 	if (skb->encapsulation) {
skb              12480 drivers/net/ethernet/intel/i40e/i40e_main.c 		len = skb_inner_network_header(skb) - skb_transport_header(skb);
skb              12485 drivers/net/ethernet/intel/i40e/i40e_main.c 		len = skb_inner_transport_header(skb) -
skb              12486 drivers/net/ethernet/intel/i40e/i40e_main.c 		      skb_inner_network_header(skb);
skb               330 drivers/net/ethernet/intel/i40e/i40e_ptp.c 	struct sk_buff *skb;
skb               344 drivers/net/ethernet/intel/i40e/i40e_ptp.c 		skb = pf->ptp_tx_skb;
skb               349 drivers/net/ethernet/intel/i40e/i40e_ptp.c 		dev_kfree_skb_any(skb);
skb               365 drivers/net/ethernet/intel/i40e/i40e_ptp.c 	struct sk_buff *skb = pf->ptp_tx_skb;
skb               392 drivers/net/ethernet/intel/i40e/i40e_ptp.c 	skb_tstamp_tx(skb, &shhwtstamps);
skb               393 drivers/net/ethernet/intel/i40e/i40e_ptp.c 	dev_kfree_skb_any(skb);
skb               408 drivers/net/ethernet/intel/i40e/i40e_ptp.c void i40e_ptp_rx_hwtstamp(struct i40e_pf *pf, struct sk_buff *skb, u8 index)
skb               443 drivers/net/ethernet/intel/i40e/i40e_ptp.c 	i40e_ptp_convert_to_hwtstamp(skb_hwtstamps(skb), ns);
skb               859 drivers/net/ethernet/intel/i40e/i40e_ptp.c 		struct sk_buff *skb = pf->ptp_tx_skb;
skb               863 drivers/net/ethernet/intel/i40e/i40e_ptp.c 		dev_kfree_skb_any(skb);
skb               116 drivers/net/ethernet/intel/i40e/i40e_trace.h 		 struct sk_buff *skb),
skb               118 drivers/net/ethernet/intel/i40e/i40e_trace.h 	TP_ARGS(ring, desc, skb),
skb               123 drivers/net/ethernet/intel/i40e/i40e_trace.h 		__field(void*, skb)
skb               130 drivers/net/ethernet/intel/i40e/i40e_trace.h 		__entry->skb = skb;
skb               137 drivers/net/ethernet/intel/i40e/i40e_trace.h 		__entry->desc, __entry->skb)
skb               144 drivers/net/ethernet/intel/i40e/i40e_trace.h 		 struct sk_buff *skb),
skb               146 drivers/net/ethernet/intel/i40e/i40e_trace.h 	TP_ARGS(ring, desc, skb));
skb               152 drivers/net/ethernet/intel/i40e/i40e_trace.h 		 struct sk_buff *skb),
skb               154 drivers/net/ethernet/intel/i40e/i40e_trace.h 	TP_ARGS(ring, desc, skb));
skb               159 drivers/net/ethernet/intel/i40e/i40e_trace.h 	TP_PROTO(struct sk_buff *skb,
skb               162 drivers/net/ethernet/intel/i40e/i40e_trace.h 	TP_ARGS(skb, ring),
skb               165 drivers/net/ethernet/intel/i40e/i40e_trace.h 		__field(void*, skb)
skb               171 drivers/net/ethernet/intel/i40e/i40e_trace.h 		__entry->skb = skb;
skb               178 drivers/net/ethernet/intel/i40e/i40e_trace.h 		__get_str(devname), __entry->skb,
skb               184 drivers/net/ethernet/intel/i40e/i40e_trace.h 	TP_PROTO(struct sk_buff *skb,
skb               187 drivers/net/ethernet/intel/i40e/i40e_trace.h 	TP_ARGS(skb, ring));
skb               191 drivers/net/ethernet/intel/i40e/i40e_trace.h 	TP_PROTO(struct sk_buff *skb,
skb               194 drivers/net/ethernet/intel/i40e/i40e_trace.h 	TP_ARGS(skb, ring));
skb               604 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	if (tx_buffer->skb) {
skb               610 drivers/net/ethernet/intel/i40e/i40e_txrx.c 			dev_kfree_skb_any(tx_buffer->skb);
skb               624 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	tx_buffer->skb = NULL;
skb               816 drivers/net/ethernet/intel/i40e/i40e_txrx.c 			napi_consume_skb(tx_buf->skb, napi_budget);
skb               825 drivers/net/ethernet/intel/i40e/i40e_txrx.c 		tx_buf->skb = NULL;
skb              1352 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	if (rx_ring->skb) {
skb              1353 drivers/net/ethernet/intel/i40e/i40e_txrx.c 		dev_kfree_skb(rx_ring->skb);
skb              1354 drivers/net/ethernet/intel/i40e/i40e_txrx.c 		rx_ring->skb = NULL;
skb              1633 drivers/net/ethernet/intel/i40e/i40e_txrx.c 				    struct sk_buff *skb,
skb              1650 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	skb->ip_summed = CHECKSUM_NONE;
skb              1652 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	skb_checksum_none_assert(skb);
skb              1698 drivers/net/ethernet/intel/i40e/i40e_txrx.c 		skb->csum_level = 1;
skb              1705 drivers/net/ethernet/intel/i40e/i40e_txrx.c 		skb->ip_summed = CHECKSUM_UNNECESSARY;
skb              1749 drivers/net/ethernet/intel/i40e/i40e_txrx.c 				struct sk_buff *skb,
skb              1762 drivers/net/ethernet/intel/i40e/i40e_txrx.c 		skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype));
skb              1778 drivers/net/ethernet/intel/i40e/i40e_txrx.c 			     union i40e_rx_desc *rx_desc, struct sk_buff *skb)
skb              1790 drivers/net/ethernet/intel/i40e/i40e_txrx.c 		i40e_ptp_rx_hwtstamp(rx_ring->vsi->back, skb, tsyn);
skb              1792 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
skb              1794 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	i40e_rx_checksum(rx_ring->vsi, skb, rx_desc);
skb              1796 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	skb_record_rx_queue(skb, rx_ring->queue_index);
skb              1801 drivers/net/ethernet/intel/i40e/i40e_txrx.c 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
skb              1806 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	skb->protocol = eth_type_trans(skb, rx_ring->netdev);
skb              1823 drivers/net/ethernet/intel/i40e/i40e_txrx.c static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb,
skb              1828 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	if (IS_ERR(skb))
skb              1838 drivers/net/ethernet/intel/i40e/i40e_txrx.c 		dev_kfree_skb_any(skb);
skb              1843 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	if (eth_skb_pad(skb))
skb              1935 drivers/net/ethernet/intel/i40e/i40e_txrx.c 			     struct sk_buff *skb,
skb              1944 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
skb              2005 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	struct sk_buff *skb;
skb              2029 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
skb              2032 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	if (unlikely(!skb))
skb              2038 drivers/net/ethernet/intel/i40e/i40e_txrx.c 		headlen = eth_get_headlen(skb->dev, xdp->data,
skb              2042 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	memcpy(__skb_put(skb, headlen), xdp->data,
skb              2048 drivers/net/ethernet/intel/i40e/i40e_txrx.c 		skb_add_rx_frag(skb, 0, rx_buffer->page,
skb              2063 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	return skb;
skb              2087 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	struct sk_buff *skb;
skb              2099 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	skb = build_skb(xdp->data_hard_start, truesize);
skb              2100 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	if (unlikely(!skb))
skb              2104 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	skb_reserve(skb, xdp->data - xdp->data_hard_start);
skb              2105 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	__skb_put(skb, xdp->data_end - xdp->data);
skb              2107 drivers/net/ethernet/intel/i40e/i40e_txrx.c 		skb_metadata_set(skb, metasize);
skb              2116 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	return skb;
skb              2158 drivers/net/ethernet/intel/i40e/i40e_txrx.c 			    struct sk_buff *skb)
skb              2332 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	struct sk_buff *skb = rx_ring->skb;
skb              2381 drivers/net/ethernet/intel/i40e/i40e_txrx.c 		i40e_trace(clean_rx_irq, rx_ring, rx_desc, skb);
skb              2385 drivers/net/ethernet/intel/i40e/i40e_txrx.c 		if (!skb) {
skb              2393 drivers/net/ethernet/intel/i40e/i40e_txrx.c 			skb = i40e_run_xdp(rx_ring, &xdp);
skb              2396 drivers/net/ethernet/intel/i40e/i40e_txrx.c 		if (IS_ERR(skb)) {
skb              2397 drivers/net/ethernet/intel/i40e/i40e_txrx.c 			unsigned int xdp_res = -PTR_ERR(skb);
skb              2407 drivers/net/ethernet/intel/i40e/i40e_txrx.c 		} else if (skb) {
skb              2408 drivers/net/ethernet/intel/i40e/i40e_txrx.c 			i40e_add_rx_frag(rx_ring, rx_buffer, skb, size);
skb              2410 drivers/net/ethernet/intel/i40e/i40e_txrx.c 			skb = i40e_build_skb(rx_ring, rx_buffer, &xdp);
skb              2412 drivers/net/ethernet/intel/i40e/i40e_txrx.c 			skb = i40e_construct_skb(rx_ring, rx_buffer, &xdp);
skb              2416 drivers/net/ethernet/intel/i40e/i40e_txrx.c 		if (!skb) {
skb              2425 drivers/net/ethernet/intel/i40e/i40e_txrx.c 		if (i40e_is_non_eop(rx_ring, rx_desc, skb))
skb              2428 drivers/net/ethernet/intel/i40e/i40e_txrx.c 		if (i40e_cleanup_headers(rx_ring, skb, rx_desc)) {
skb              2429 drivers/net/ethernet/intel/i40e/i40e_txrx.c 			skb = NULL;
skb              2434 drivers/net/ethernet/intel/i40e/i40e_txrx.c 		total_rx_bytes += skb->len;
skb              2437 drivers/net/ethernet/intel/i40e/i40e_txrx.c 		i40e_process_skb_fields(rx_ring, rx_desc, skb);
skb              2439 drivers/net/ethernet/intel/i40e/i40e_txrx.c 		i40e_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb);
skb              2440 drivers/net/ethernet/intel/i40e/i40e_txrx.c 		napi_gro_receive(&rx_ring->q_vector->napi, skb);
skb              2441 drivers/net/ethernet/intel/i40e/i40e_txrx.c 		skb = NULL;
skb              2448 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	rx_ring->skb = skb;
skb              2667 drivers/net/ethernet/intel/i40e/i40e_txrx.c static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
skb              2700 drivers/net/ethernet/intel/i40e/i40e_txrx.c 		      skb_inner_network_header(skb) : skb_network_header(skb);
skb              2711 drivers/net/ethernet/intel/i40e/i40e_txrx.c 		unsigned int inner_hlen = hdr.network - skb->data;
skb              2716 drivers/net/ethernet/intel/i40e/i40e_txrx.c 		  ipv6_find_hdr(skb, &h_offset, IPPROTO_TCP, NULL, NULL);
skb              2812 drivers/net/ethernet/intel/i40e/i40e_txrx.c static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
skb              2816 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	__be16 protocol = skb->protocol;
skb              2828 drivers/net/ethernet/intel/i40e/i40e_txrx.c 		skb->protocol = vlan_get_protocol(skb);
skb              2833 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	if (skb_vlan_tag_present(skb)) {
skb              2834 drivers/net/ethernet/intel/i40e/i40e_txrx.c 		tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
skb              2840 drivers/net/ethernet/intel/i40e/i40e_txrx.c 		vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
skb              2854 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	    (skb->priority != TC_PRIO_CONTROL)) {
skb              2856 drivers/net/ethernet/intel/i40e/i40e_txrx.c 		tx_flags |= (skb->priority & 0x7) <<
skb              2862 drivers/net/ethernet/intel/i40e/i40e_txrx.c 			rc = skb_cow_head(skb, 0);
skb              2865 drivers/net/ethernet/intel/i40e/i40e_txrx.c 			vhdr = (struct vlan_ethhdr *)skb->data;
skb              2889 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	struct sk_buff *skb = first->skb;
skb              2905 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	if (skb->ip_summed != CHECKSUM_PARTIAL)
skb              2908 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	if (!skb_is_gso(skb))
skb              2911 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	err = skb_cow_head(skb, 0);
skb              2915 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	ip.hdr = skb_network_header(skb);
skb              2916 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	l4.hdr = skb_transport_header(skb);
skb              2926 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
skb              2932 drivers/net/ethernet/intel/i40e/i40e_txrx.c 		if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
skb              2933 drivers/net/ethernet/intel/i40e/i40e_txrx.c 		    (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
skb              2937 drivers/net/ethernet/intel/i40e/i40e_txrx.c 			l4_offset = l4.hdr - skb->data;
skb              2940 drivers/net/ethernet/intel/i40e/i40e_txrx.c 			paylen = skb->len - l4_offset;
skb              2946 drivers/net/ethernet/intel/i40e/i40e_txrx.c 		ip.hdr = skb_inner_network_header(skb);
skb              2947 drivers/net/ethernet/intel/i40e/i40e_txrx.c 		l4.hdr = skb_inner_transport_header(skb);
skb              2959 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	l4_offset = l4.hdr - skb->data;
skb              2962 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	paylen = skb->len - l4_offset;
skb              2969 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	gso_size = skb_shinfo(skb)->gso_size;
skb              2970 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	gso_segs = skb_shinfo(skb)->gso_segs;
skb              2978 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	cd_tso_len = skb->len - *hdr_len;
skb              2995 drivers/net/ethernet/intel/i40e/i40e_txrx.c static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
skb              3000 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
skb              3016 drivers/net/ethernet/intel/i40e/i40e_txrx.c 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
skb              3018 drivers/net/ethernet/intel/i40e/i40e_txrx.c 		pf->ptp_tx_skb = skb_get(skb);
skb              3039 drivers/net/ethernet/intel/i40e/i40e_txrx.c static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
skb              3059 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	if (skb->ip_summed != CHECKSUM_PARTIAL)
skb              3062 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	ip.hdr = skb_network_header(skb);
skb              3063 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	l4.hdr = skb_transport_header(skb);
skb              3066 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
skb              3068 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	if (skb->encapsulation) {
skb              3083 drivers/net/ethernet/intel/i40e/i40e_txrx.c 				ipv6_skip_exthdr(skb, exthdr - skb->data,
skb              3100 drivers/net/ethernet/intel/i40e/i40e_txrx.c 			l4.hdr = skb_inner_network_header(skb);
skb              3106 drivers/net/ethernet/intel/i40e/i40e_txrx.c 			skb_checksum_help(skb);
skb              3115 drivers/net/ethernet/intel/i40e/i40e_txrx.c 		ip.hdr = skb_inner_network_header(skb);
skb              3123 drivers/net/ethernet/intel/i40e/i40e_txrx.c 		    !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
skb              3124 drivers/net/ethernet/intel/i40e/i40e_txrx.c 		    (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
skb              3131 drivers/net/ethernet/intel/i40e/i40e_txrx.c 		l4.hdr = skb_inner_transport_header(skb);
skb              3157 drivers/net/ethernet/intel/i40e/i40e_txrx.c 			ipv6_skip_exthdr(skb, exthdr - skb->data,
skb              3186 drivers/net/ethernet/intel/i40e/i40e_txrx.c 		skb_checksum_help(skb);
skb              3263 drivers/net/ethernet/intel/i40e/i40e_txrx.c bool __i40e_chk_linearize(struct sk_buff *skb)
skb              3269 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	nr_frags = skb_shinfo(skb)->nr_frags;
skb              3277 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	frag = &skb_shinfo(skb)->frags[0];
skb              3285 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	sum = 1 - skb_shinfo(skb)->gso_size;
skb              3297 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
skb              3346 drivers/net/ethernet/intel/i40e/i40e_txrx.c static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
skb              3350 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	unsigned int data_len = skb->data_len;
skb              3351 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	unsigned int size = skb_headlen(skb);
skb              3368 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
skb              3373 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
skb              3459 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	skb_tx_timestamp(skb);
skb              3558 drivers/net/ethernet/intel/i40e/i40e_txrx.c static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
skb              3573 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	prefetch(skb->data);
skb              3575 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	i40e_trace(xmit_frame_ring, skb, tx_ring);
skb              3577 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	count = i40e_xmit_descriptor_count(skb);
skb              3578 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	if (i40e_chk_linearize(skb, count)) {
skb              3579 drivers/net/ethernet/intel/i40e/i40e_txrx.c 		if (__skb_linearize(skb)) {
skb              3580 drivers/net/ethernet/intel/i40e/i40e_txrx.c 			dev_kfree_skb_any(skb);
skb              3583 drivers/net/ethernet/intel/i40e/i40e_txrx.c 		count = i40e_txd_use_count(skb->len);
skb              3600 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	first->skb = skb;
skb              3601 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	first->bytecount = skb->len;
skb              3605 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
skb              3609 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	protocol = vlan_get_protocol(skb);
skb              3625 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	tso = i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
skb              3630 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss);
skb              3645 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	i40e_atr(tx_ring, skb, tx_flags);
skb              3647 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	if (i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
skb              3654 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	i40e_trace(xmit_frame_ring_drop, first->skb, tx_ring);
skb              3655 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	dev_kfree_skb_any(first->skb);
skb              3656 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	first->skb = NULL;
skb              3676 drivers/net/ethernet/intel/i40e/i40e_txrx.c netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
skb              3680 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping];
skb              3685 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	if (skb_put_padto(skb, I40E_MIN_TX_LEN))
skb              3688 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	return i40e_xmit_frame_ring(skb, tx_ring);
skb               286 drivers/net/ethernet/intel/i40e/i40e_txrx.h 		struct sk_buff *skb;
skb               410 drivers/net/ethernet/intel/i40e/i40e_txrx.h 	struct sk_buff *skb;		/* When i40e_clean_rx_ring_irq() must
skb               483 drivers/net/ethernet/intel/i40e/i40e_txrx.h netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
skb               495 drivers/net/ethernet/intel/i40e/i40e_txrx.h bool __i40e_chk_linearize(struct sk_buff *skb);
skb               522 drivers/net/ethernet/intel/i40e/i40e_txrx.h static inline int i40e_xmit_descriptor_count(struct sk_buff *skb)
skb               524 drivers/net/ethernet/intel/i40e/i40e_txrx.h 	const skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
skb               525 drivers/net/ethernet/intel/i40e/i40e_txrx.h 	unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
skb               526 drivers/net/ethernet/intel/i40e/i40e_txrx.h 	int count = 0, size = skb_headlen(skb);
skb               563 drivers/net/ethernet/intel/i40e/i40e_txrx.h static inline bool i40e_chk_linearize(struct sk_buff *skb, int count)
skb               569 drivers/net/ethernet/intel/i40e/i40e_txrx.h 	if (skb_is_gso(skb))
skb               570 drivers/net/ethernet/intel/i40e/i40e_txrx.h 		return __i40e_chk_linearize(skb);
skb                15 drivers/net/ethernet/intel/i40e/i40e_txrx_common.h 			     union i40e_rx_desc *rx_desc, struct sk_buff *skb);
skb               492 drivers/net/ethernet/intel/i40e/i40e_xsk.c 	struct sk_buff *skb;
skb               495 drivers/net/ethernet/intel/i40e/i40e_xsk.c 	skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
skb               498 drivers/net/ethernet/intel/i40e/i40e_xsk.c 	if (unlikely(!skb))
skb               501 drivers/net/ethernet/intel/i40e/i40e_xsk.c 	skb_reserve(skb, xdp->data - xdp->data_hard_start);
skb               502 drivers/net/ethernet/intel/i40e/i40e_xsk.c 	memcpy(__skb_put(skb, datasize), xdp->data, datasize);
skb               504 drivers/net/ethernet/intel/i40e/i40e_xsk.c 		skb_metadata_set(skb, metasize);
skb               507 drivers/net/ethernet/intel/i40e/i40e_xsk.c 	return skb;
skb               536 drivers/net/ethernet/intel/i40e/i40e_xsk.c 	struct sk_buff *skb;
skb               607 drivers/net/ethernet/intel/i40e/i40e_xsk.c 		skb = i40e_construct_skb_zc(rx_ring, bi, &xdp);
skb               608 drivers/net/ethernet/intel/i40e/i40e_xsk.c 		if (!skb) {
skb               616 drivers/net/ethernet/intel/i40e/i40e_xsk.c 		if (eth_skb_pad(skb))
skb               619 drivers/net/ethernet/intel/i40e/i40e_xsk.c 		total_rx_bytes += skb->len;
skb               622 drivers/net/ethernet/intel/i40e/i40e_xsk.c 		i40e_process_skb_fields(rx_ring, rx_desc, skb);
skb               623 drivers/net/ethernet/intel/i40e/i40e_xsk.c 		napi_gro_receive(&rx_ring->q_vector->napi, skb);
skb              3313 drivers/net/ethernet/intel/iavf/iavf_main.c static netdev_features_t iavf_features_check(struct sk_buff *skb,
skb              3323 drivers/net/ethernet/intel/iavf/iavf_main.c 	if (skb->ip_summed != CHECKSUM_PARTIAL)
skb              3329 drivers/net/ethernet/intel/iavf/iavf_main.c 	if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
skb              3333 drivers/net/ethernet/intel/iavf/iavf_main.c 	len = skb_network_header(skb) - skb->data;
skb              3338 drivers/net/ethernet/intel/iavf/iavf_main.c 	len = skb_transport_header(skb) - skb_network_header(skb);
skb              3342 drivers/net/ethernet/intel/iavf/iavf_main.c 	if (skb->encapsulation) {
skb              3344 drivers/net/ethernet/intel/iavf/iavf_main.c 		len = skb_inner_network_header(skb) - skb_transport_header(skb);
skb              3349 drivers/net/ethernet/intel/iavf/iavf_main.c 		len = skb_inner_transport_header(skb) -
skb              3350 drivers/net/ethernet/intel/iavf/iavf_main.c 		      skb_inner_network_header(skb);
skb               116 drivers/net/ethernet/intel/iavf/iavf_trace.h 		 struct sk_buff *skb),
skb               118 drivers/net/ethernet/intel/iavf/iavf_trace.h 	TP_ARGS(ring, desc, skb),
skb               123 drivers/net/ethernet/intel/iavf/iavf_trace.h 		__field(void*, skb)
skb               130 drivers/net/ethernet/intel/iavf/iavf_trace.h 		__entry->skb = skb;
skb               137 drivers/net/ethernet/intel/iavf/iavf_trace.h 		__entry->desc, __entry->skb)
skb               144 drivers/net/ethernet/intel/iavf/iavf_trace.h 		 struct sk_buff *skb),
skb               146 drivers/net/ethernet/intel/iavf/iavf_trace.h 	TP_ARGS(ring, desc, skb));
skb               152 drivers/net/ethernet/intel/iavf/iavf_trace.h 		 struct sk_buff *skb),
skb               154 drivers/net/ethernet/intel/iavf/iavf_trace.h 	TP_ARGS(ring, desc, skb));
skb               159 drivers/net/ethernet/intel/iavf/iavf_trace.h 	TP_PROTO(struct sk_buff *skb,
skb               162 drivers/net/ethernet/intel/iavf/iavf_trace.h 	TP_ARGS(skb, ring),
skb               165 drivers/net/ethernet/intel/iavf/iavf_trace.h 		__field(void*, skb)
skb               171 drivers/net/ethernet/intel/iavf/iavf_trace.h 		__entry->skb = skb;
skb               178 drivers/net/ethernet/intel/iavf/iavf_trace.h 		__get_str(devname), __entry->skb,
skb               184 drivers/net/ethernet/intel/iavf/iavf_trace.h 	TP_PROTO(struct sk_buff *skb,
skb               187 drivers/net/ethernet/intel/iavf/iavf_trace.h 	TP_ARGS(skb, ring));
skb               191 drivers/net/ethernet/intel/iavf/iavf_trace.h 	TP_PROTO(struct sk_buff *skb,
skb               194 drivers/net/ethernet/intel/iavf/iavf_trace.h 	TP_ARGS(skb, ring));
skb                30 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	if (tx_buffer->skb) {
skb                34 drivers/net/ethernet/intel/iavf/iavf_txrx.c 			dev_kfree_skb_any(tx_buffer->skb);
skb                48 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	tx_buffer->skb = NULL;
skb               227 drivers/net/ethernet/intel/iavf/iavf_txrx.c 		napi_consume_skb(tx_buf->skb, napi_budget);
skb               236 drivers/net/ethernet/intel/iavf/iavf_txrx.c 		tx_buf->skb = NULL;
skb               664 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	if (rx_ring->skb) {
skb               665 drivers/net/ethernet/intel/iavf/iavf_txrx.c 		dev_kfree_skb(rx_ring->skb);
skb               666 drivers/net/ethernet/intel/iavf/iavf_txrx.c 		rx_ring->skb = NULL;
skb               861 drivers/net/ethernet/intel/iavf/iavf_txrx.c 			     struct sk_buff *skb, u16 vlan_tag)
skb               867 drivers/net/ethernet/intel/iavf/iavf_txrx.c 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
skb               869 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	napi_gro_receive(&q_vector->napi, skb);
skb               944 drivers/net/ethernet/intel/iavf/iavf_txrx.c 				    struct sk_buff *skb,
skb               961 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	skb->ip_summed = CHECKSUM_NONE;
skb               963 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	skb_checksum_none_assert(skb);
skb              1009 drivers/net/ethernet/intel/iavf/iavf_txrx.c 		skb->ip_summed = CHECKSUM_UNNECESSARY;
skb              1053 drivers/net/ethernet/intel/iavf/iavf_txrx.c 				struct sk_buff *skb,
skb              1066 drivers/net/ethernet/intel/iavf/iavf_txrx.c 		skb_set_hash(skb, hash, iavf_ptype_to_htype(rx_ptype));
skb              1083 drivers/net/ethernet/intel/iavf/iavf_txrx.c 			     union iavf_rx_desc *rx_desc, struct sk_buff *skb,
skb              1086 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	iavf_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
skb              1088 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	iavf_rx_checksum(rx_ring->vsi, skb, rx_desc);
skb              1090 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	skb_record_rx_queue(skb, rx_ring->queue_index);
skb              1093 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	skb->protocol = eth_type_trans(skb, rx_ring->netdev);
skb              1109 drivers/net/ethernet/intel/iavf/iavf_txrx.c static bool iavf_cleanup_headers(struct iavf_ring *rx_ring, struct sk_buff *skb)
skb              1112 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	if (eth_skb_pad(skb))
skb              1230 drivers/net/ethernet/intel/iavf/iavf_txrx.c 			     struct sk_buff *skb,
skb              1242 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
skb              1306 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	struct sk_buff *skb;
skb              1318 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
skb              1321 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	if (unlikely(!skb))
skb              1327 drivers/net/ethernet/intel/iavf/iavf_txrx.c 		headlen = eth_get_headlen(skb->dev, va, IAVF_RX_HDR_SIZE);
skb              1330 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
skb              1335 drivers/net/ethernet/intel/iavf/iavf_txrx.c 		skb_add_rx_frag(skb, 0, rx_buffer->page,
skb              1350 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	return skb;
skb              1373 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	struct sk_buff *skb;
skb              1384 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	skb = build_skb(va - IAVF_SKB_PAD, truesize);
skb              1385 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	if (unlikely(!skb))
skb              1389 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	skb_reserve(skb, IAVF_SKB_PAD);
skb              1390 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	__skb_put(skb, size);
skb              1399 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	return skb;
skb              1446 drivers/net/ethernet/intel/iavf/iavf_txrx.c 			    struct sk_buff *skb)
skb              1481 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	struct sk_buff *skb = rx_ring->skb;
skb              1521 drivers/net/ethernet/intel/iavf/iavf_txrx.c 		iavf_trace(clean_rx_irq, rx_ring, rx_desc, skb);
skb              1525 drivers/net/ethernet/intel/iavf/iavf_txrx.c 		if (skb)
skb              1526 drivers/net/ethernet/intel/iavf/iavf_txrx.c 			iavf_add_rx_frag(rx_ring, rx_buffer, skb, size);
skb              1528 drivers/net/ethernet/intel/iavf/iavf_txrx.c 			skb = iavf_build_skb(rx_ring, rx_buffer, size);
skb              1530 drivers/net/ethernet/intel/iavf/iavf_txrx.c 			skb = iavf_construct_skb(rx_ring, rx_buffer, size);
skb              1533 drivers/net/ethernet/intel/iavf/iavf_txrx.c 		if (!skb) {
skb              1543 drivers/net/ethernet/intel/iavf/iavf_txrx.c 		if (iavf_is_non_eop(rx_ring, rx_desc, skb))
skb              1552 drivers/net/ethernet/intel/iavf/iavf_txrx.c 			dev_kfree_skb_any(skb);
skb              1553 drivers/net/ethernet/intel/iavf/iavf_txrx.c 			skb = NULL;
skb              1557 drivers/net/ethernet/intel/iavf/iavf_txrx.c 		if (iavf_cleanup_headers(rx_ring, skb)) {
skb              1558 drivers/net/ethernet/intel/iavf/iavf_txrx.c 			skb = NULL;
skb              1563 drivers/net/ethernet/intel/iavf/iavf_txrx.c 		total_rx_bytes += skb->len;
skb              1570 drivers/net/ethernet/intel/iavf/iavf_txrx.c 		iavf_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
skb              1576 drivers/net/ethernet/intel/iavf/iavf_txrx.c 		iavf_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb);
skb              1577 drivers/net/ethernet/intel/iavf/iavf_txrx.c 		iavf_receive_skb(rx_ring, skb, vlan_tag);
skb              1578 drivers/net/ethernet/intel/iavf/iavf_txrx.c 		skb = NULL;
skb              1584 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	rx_ring->skb = skb;
skb              1802 drivers/net/ethernet/intel/iavf/iavf_txrx.c static inline int iavf_tx_prepare_vlan_flags(struct sk_buff *skb,
skb              1806 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	__be16 protocol = skb->protocol;
skb              1818 drivers/net/ethernet/intel/iavf/iavf_txrx.c 		skb->protocol = vlan_get_protocol(skb);
skb              1823 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	if (skb_vlan_tag_present(skb)) {
skb              1824 drivers/net/ethernet/intel/iavf/iavf_txrx.c 		tx_flags |= skb_vlan_tag_get(skb) << IAVF_TX_FLAGS_VLAN_SHIFT;
skb              1830 drivers/net/ethernet/intel/iavf/iavf_txrx.c 		vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
skb              1855 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	struct sk_buff *skb = first->skb;
skb              1871 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	if (skb->ip_summed != CHECKSUM_PARTIAL)
skb              1874 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	if (!skb_is_gso(skb))
skb              1877 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	err = skb_cow_head(skb, 0);
skb              1881 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	ip.hdr = skb_network_header(skb);
skb              1882 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	l4.hdr = skb_transport_header(skb);
skb              1892 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
skb              1898 drivers/net/ethernet/intel/iavf/iavf_txrx.c 		if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
skb              1899 drivers/net/ethernet/intel/iavf/iavf_txrx.c 		    (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
skb              1903 drivers/net/ethernet/intel/iavf/iavf_txrx.c 			l4_offset = l4.hdr - skb->data;
skb              1906 drivers/net/ethernet/intel/iavf/iavf_txrx.c 			paylen = skb->len - l4_offset;
skb              1912 drivers/net/ethernet/intel/iavf/iavf_txrx.c 		ip.hdr = skb_inner_network_header(skb);
skb              1913 drivers/net/ethernet/intel/iavf/iavf_txrx.c 		l4.hdr = skb_inner_transport_header(skb);
skb              1925 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	l4_offset = l4.hdr - skb->data;
skb              1928 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	paylen = skb->len - l4_offset;
skb              1935 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	gso_size = skb_shinfo(skb)->gso_size;
skb              1936 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	gso_segs = skb_shinfo(skb)->gso_segs;
skb              1944 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	cd_tso_len = skb->len - *hdr_len;
skb              1961 drivers/net/ethernet/intel/iavf/iavf_txrx.c static int iavf_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
skb              1981 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	if (skb->ip_summed != CHECKSUM_PARTIAL)
skb              1984 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	ip.hdr = skb_network_header(skb);
skb              1985 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	l4.hdr = skb_transport_header(skb);
skb              1988 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	offset = ((ip.hdr - skb->data) / 2) << IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
skb              1990 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	if (skb->encapsulation) {
skb              2005 drivers/net/ethernet/intel/iavf/iavf_txrx.c 				ipv6_skip_exthdr(skb, exthdr - skb->data,
skb              2022 drivers/net/ethernet/intel/iavf/iavf_txrx.c 			l4.hdr = skb_inner_network_header(skb);
skb              2028 drivers/net/ethernet/intel/iavf/iavf_txrx.c 			skb_checksum_help(skb);
skb              2037 drivers/net/ethernet/intel/iavf/iavf_txrx.c 		ip.hdr = skb_inner_network_header(skb);
skb              2045 drivers/net/ethernet/intel/iavf/iavf_txrx.c 		    !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
skb              2046 drivers/net/ethernet/intel/iavf/iavf_txrx.c 		    (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
skb              2053 drivers/net/ethernet/intel/iavf/iavf_txrx.c 		l4.hdr = skb_inner_transport_header(skb);
skb              2079 drivers/net/ethernet/intel/iavf/iavf_txrx.c 			ipv6_skip_exthdr(skb, exthdr - skb->data,
skb              2108 drivers/net/ethernet/intel/iavf/iavf_txrx.c 		skb_checksum_help(skb);
skb              2162 drivers/net/ethernet/intel/iavf/iavf_txrx.c bool __iavf_chk_linearize(struct sk_buff *skb)
skb              2168 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	nr_frags = skb_shinfo(skb)->nr_frags;
skb              2176 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	frag = &skb_shinfo(skb)->frags[0];
skb              2184 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	sum = 1 - skb_shinfo(skb)->gso_size;
skb              2196 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
skb              2266 drivers/net/ethernet/intel/iavf/iavf_txrx.c static inline void iavf_tx_map(struct iavf_ring *tx_ring, struct sk_buff *skb,
skb              2270 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	unsigned int data_len = skb->data_len;
skb              2271 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	unsigned int size = skb_headlen(skb);
skb              2287 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
skb              2292 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
skb              2364 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	skb_tx_timestamp(skb);
skb              2408 drivers/net/ethernet/intel/iavf/iavf_txrx.c static netdev_tx_t iavf_xmit_frame_ring(struct sk_buff *skb,
skb              2422 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	prefetch(skb->data);
skb              2424 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	iavf_trace(xmit_frame_ring, skb, tx_ring);
skb              2426 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	count = iavf_xmit_descriptor_count(skb);
skb              2427 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	if (iavf_chk_linearize(skb, count)) {
skb              2428 drivers/net/ethernet/intel/iavf/iavf_txrx.c 		if (__skb_linearize(skb)) {
skb              2429 drivers/net/ethernet/intel/iavf/iavf_txrx.c 			dev_kfree_skb_any(skb);
skb              2432 drivers/net/ethernet/intel/iavf/iavf_txrx.c 		count = iavf_txd_use_count(skb->len);
skb              2449 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	first->skb = skb;
skb              2450 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	first->bytecount = skb->len;
skb              2454 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	if (iavf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
skb              2458 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	protocol = vlan_get_protocol(skb);
skb              2474 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	tso = iavf_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
skb              2485 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	iavf_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
skb              2491 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	iavf_trace(xmit_frame_ring_drop, first->skb, tx_ring);
skb              2492 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	dev_kfree_skb_any(first->skb);
skb              2493 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	first->skb = NULL;
skb              2504 drivers/net/ethernet/intel/iavf/iavf_txrx.c netdev_tx_t iavf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
skb              2507 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	struct iavf_ring *tx_ring = &adapter->tx_rings[skb->queue_mapping];
skb              2512 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	if (unlikely(skb->len < IAVF_MIN_TX_LEN)) {
skb              2513 drivers/net/ethernet/intel/iavf/iavf_txrx.c 		if (skb_pad(skb, IAVF_MIN_TX_LEN - skb->len))
skb              2515 drivers/net/ethernet/intel/iavf/iavf_txrx.c 		skb->len = IAVF_MIN_TX_LEN;
skb              2516 drivers/net/ethernet/intel/iavf/iavf_txrx.c 		skb_set_tail_pointer(skb, IAVF_MIN_TX_LEN);
skb              2519 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	return iavf_xmit_frame_ring(skb, tx_ring);
skb               263 drivers/net/ethernet/intel/iavf/iavf_txrx.h 		struct sk_buff *skb;
skb               382 drivers/net/ethernet/intel/iavf/iavf_txrx.h 	struct sk_buff *skb;		/* When iavf_clean_rx_ring_irq() must
skb               440 drivers/net/ethernet/intel/iavf/iavf_txrx.h netdev_tx_t iavf_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
skb               452 drivers/net/ethernet/intel/iavf/iavf_txrx.h bool __iavf_chk_linearize(struct sk_buff *skb);
skb               463 drivers/net/ethernet/intel/iavf/iavf_txrx.h static inline int iavf_xmit_descriptor_count(struct sk_buff *skb)
skb               465 drivers/net/ethernet/intel/iavf/iavf_txrx.h 	const skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
skb               466 drivers/net/ethernet/intel/iavf/iavf_txrx.h 	unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
skb               467 drivers/net/ethernet/intel/iavf/iavf_txrx.h 	int count = 0, size = skb_headlen(skb);
skb               504 drivers/net/ethernet/intel/iavf/iavf_txrx.h static inline bool iavf_chk_linearize(struct sk_buff *skb, int count)
skb               510 drivers/net/ethernet/intel/iavf/iavf_txrx.h 	if (skb_is_gso(skb))
skb               511 drivers/net/ethernet/intel/iavf/iavf_txrx.h 		return __iavf_chk_linearize(skb);
skb               586 drivers/net/ethernet/intel/ice/ice_dcb_lib.c 	struct sk_buff *skb = first->skb;
skb               593 drivers/net/ethernet/intel/ice/ice_dcb_lib.c 	    skb->priority != TC_PRIO_CONTROL) {
skb               596 drivers/net/ethernet/intel/ice/ice_dcb_lib.c 		first->tx_flags |= (skb->priority & 0x7) <<
skb               602 drivers/net/ethernet/intel/ice/ice_dcb_lib.c 			rc = skb_cow_head(skb, 0);
skb               605 drivers/net/ethernet/intel/ice/ice_dcb_lib.c 			vhdr = (struct vlan_ethhdr *)skb->data;
skb              4505 drivers/net/ethernet/intel/ice/ice_main.c ice_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
skb              4515 drivers/net/ethernet/intel/ice/ice_main.c 	return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bmode, 0, 0, nlflags,
skb              4813 drivers/net/ethernet/intel/ice/ice_main.c ice_features_check(struct sk_buff *skb,
skb              4823 drivers/net/ethernet/intel/ice/ice_main.c 	if (skb->ip_summed != CHECKSUM_PARTIAL)
skb              4829 drivers/net/ethernet/intel/ice/ice_main.c 	if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
skb              4832 drivers/net/ethernet/intel/ice/ice_main.c 	len = skb_network_header(skb) - skb->data;
skb              4836 drivers/net/ethernet/intel/ice/ice_main.c 	len = skb_transport_header(skb) - skb_network_header(skb);
skb              4840 drivers/net/ethernet/intel/ice/ice_main.c 	if (skb->encapsulation) {
skb              4841 drivers/net/ethernet/intel/ice/ice_main.c 		len = skb_inner_network_header(skb) - skb_transport_header(skb);
skb              4845 drivers/net/ethernet/intel/ice/ice_main.c 		len = skb_inner_transport_header(skb) -
skb              4846 drivers/net/ethernet/intel/ice/ice_main.c 		      skb_inner_network_header(skb);
skb                21 drivers/net/ethernet/intel/ice/ice_txrx.c 	if (tx_buf->skb) {
skb                22 drivers/net/ethernet/intel/ice/ice_txrx.c 		dev_kfree_skb_any(tx_buf->skb);
skb                36 drivers/net/ethernet/intel/ice/ice_txrx.c 	tx_buf->skb = NULL;
skb               140 drivers/net/ethernet/intel/ice/ice_txrx.c 		napi_consume_skb(tx_buf->skb, napi_budget);
skb               149 drivers/net/ethernet/intel/ice/ice_txrx.c 		tx_buf->skb = NULL;
skb               280 drivers/net/ethernet/intel/ice/ice_txrx.c 		if (rx_buf->skb) {
skb               281 drivers/net/ethernet/intel/ice/ice_txrx.c 			dev_kfree_skb(rx_buf->skb);
skb               282 drivers/net/ethernet/intel/ice/ice_txrx.c 			rx_buf->skb = NULL;
skb               602 drivers/net/ethernet/intel/ice/ice_txrx.c ice_add_rx_frag(struct ice_rx_buf *rx_buf, struct sk_buff *skb,
skb               613 drivers/net/ethernet/intel/ice/ice_txrx.c 	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page,
skb               659 drivers/net/ethernet/intel/ice/ice_txrx.c ice_get_rx_buf(struct ice_ring *rx_ring, struct sk_buff **skb,
skb               666 drivers/net/ethernet/intel/ice/ice_txrx.c 	*skb = rx_buf->skb;
skb               697 drivers/net/ethernet/intel/ice/ice_txrx.c 	struct sk_buff *skb;
skb               706 drivers/net/ethernet/intel/ice/ice_txrx.c 	skb = __napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE,
skb               708 drivers/net/ethernet/intel/ice/ice_txrx.c 	if (unlikely(!skb))
skb               711 drivers/net/ethernet/intel/ice/ice_txrx.c 	skb_record_rx_queue(skb, rx_ring->q_index);
skb               715 drivers/net/ethernet/intel/ice/ice_txrx.c 		headlen = eth_get_headlen(skb->dev, va, ICE_RX_HDR_SIZE);
skb               718 drivers/net/ethernet/intel/ice/ice_txrx.c 	memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
skb               728 drivers/net/ethernet/intel/ice/ice_txrx.c 		skb_add_rx_frag(skb, 0, rx_buf->page,
skb               740 drivers/net/ethernet/intel/ice/ice_txrx.c 	return skb;
skb               769 drivers/net/ethernet/intel/ice/ice_txrx.c 	rx_buf->skb = NULL;
skb               784 drivers/net/ethernet/intel/ice/ice_txrx.c static bool ice_cleanup_headers(struct sk_buff *skb)
skb               787 drivers/net/ethernet/intel/ice/ice_txrx.c 	if (eth_skb_pad(skb))
skb               823 drivers/net/ethernet/intel/ice/ice_txrx.c 	       struct sk_buff *skb)
skb               839 drivers/net/ethernet/intel/ice/ice_txrx.c 	rx_ring->rx_buf[ntc].skb = skb;
skb               865 drivers/net/ethernet/intel/ice/ice_txrx.c 	    struct sk_buff *skb, u8 rx_ptype)
skb               878 drivers/net/ethernet/intel/ice/ice_txrx.c 	skb_set_hash(skb, hash, ice_ptype_to_htype(rx_ptype));
skb               891 drivers/net/ethernet/intel/ice/ice_txrx.c ice_rx_csum(struct ice_ring *ring, struct sk_buff *skb,
skb               904 drivers/net/ethernet/intel/ice/ice_txrx.c 	skb->ip_summed = CHECKSUM_NONE;
skb               905 drivers/net/ethernet/intel/ice/ice_txrx.c 	skb_checksum_none_assert(skb);
skb               941 drivers/net/ethernet/intel/ice/ice_txrx.c 		skb->ip_summed = CHECKSUM_UNNECESSARY;
skb               965 drivers/net/ethernet/intel/ice/ice_txrx.c 		       struct sk_buff *skb, u8 ptype)
skb               967 drivers/net/ethernet/intel/ice/ice_txrx.c 	ice_rx_hash(rx_ring, rx_desc, skb, ptype);
skb               970 drivers/net/ethernet/intel/ice/ice_txrx.c 	skb->protocol = eth_type_trans(skb, rx_ring->netdev);
skb               972 drivers/net/ethernet/intel/ice/ice_txrx.c 	ice_rx_csum(rx_ring, skb, rx_desc, ptype);
skb               985 drivers/net/ethernet/intel/ice/ice_txrx.c ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag)
skb               989 drivers/net/ethernet/intel/ice/ice_txrx.c 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
skb               990 drivers/net/ethernet/intel/ice/ice_txrx.c 	napi_gro_receive(&rx_ring->q_vector->napi, skb);
skb              1015 drivers/net/ethernet/intel/ice/ice_txrx.c 		struct sk_buff *skb;
skb              1043 drivers/net/ethernet/intel/ice/ice_txrx.c 		rx_buf = ice_get_rx_buf(rx_ring, &skb, size);
skb              1045 drivers/net/ethernet/intel/ice/ice_txrx.c 		if (skb)
skb              1046 drivers/net/ethernet/intel/ice/ice_txrx.c 			ice_add_rx_frag(rx_buf, skb, size);
skb              1048 drivers/net/ethernet/intel/ice/ice_txrx.c 			skb = ice_construct_skb(rx_ring, rx_buf, size);
skb              1051 drivers/net/ethernet/intel/ice/ice_txrx.c 		if (!skb) {
skb              1062 drivers/net/ethernet/intel/ice/ice_txrx.c 		if (ice_is_non_eop(rx_ring, rx_desc, skb))
skb              1067 drivers/net/ethernet/intel/ice/ice_txrx.c 			dev_kfree_skb_any(skb);
skb              1078 drivers/net/ethernet/intel/ice/ice_txrx.c 		if (ice_cleanup_headers(skb)) {
skb              1079 drivers/net/ethernet/intel/ice/ice_txrx.c 			skb = NULL;
skb              1084 drivers/net/ethernet/intel/ice/ice_txrx.c 		total_rx_bytes += skb->len;
skb              1090 drivers/net/ethernet/intel/ice/ice_txrx.c 		ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
skb              1093 drivers/net/ethernet/intel/ice/ice_txrx.c 		ice_receive_skb(rx_ring, skb, vlan_tag);
skb              1599 drivers/net/ethernet/intel/ice/ice_txrx.c 	struct sk_buff *skb;
skb              1605 drivers/net/ethernet/intel/ice/ice_txrx.c 	skb = first->skb;
skb              1607 drivers/net/ethernet/intel/ice/ice_txrx.c 	data_len = skb->data_len;
skb              1608 drivers/net/ethernet/intel/ice/ice_txrx.c 	size = skb_headlen(skb);
skb              1618 drivers/net/ethernet/intel/ice/ice_txrx.c 	dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
skb              1622 drivers/net/ethernet/intel/ice/ice_txrx.c 	for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
skb              1685 drivers/net/ethernet/intel/ice/ice_txrx.c 	skb_tx_timestamp(first->skb);
skb              1744 drivers/net/ethernet/intel/ice/ice_txrx.c 	struct sk_buff *skb = first->skb;
skb              1759 drivers/net/ethernet/intel/ice/ice_txrx.c 	if (skb->ip_summed != CHECKSUM_PARTIAL)
skb              1762 drivers/net/ethernet/intel/ice/ice_txrx.c 	ip.hdr = skb_network_header(skb);
skb              1763 drivers/net/ethernet/intel/ice/ice_txrx.c 	l4.hdr = skb_transport_header(skb);
skb              1766 drivers/net/ethernet/intel/ice/ice_txrx.c 	l2_len = ip.hdr - skb->data;
skb              1769 drivers/net/ethernet/intel/ice/ice_txrx.c 	if (skb->encapsulation)
skb              1773 drivers/net/ethernet/intel/ice/ice_txrx.c 	protocol = vlan_get_protocol(skb);
skb              1789 drivers/net/ethernet/intel/ice/ice_txrx.c 			ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto,
skb              1823 drivers/net/ethernet/intel/ice/ice_txrx.c 		skb_checksum_help(skb);
skb              1846 drivers/net/ethernet/intel/ice/ice_txrx.c 	struct sk_buff *skb = first->skb;
skb              1847 drivers/net/ethernet/intel/ice/ice_txrx.c 	__be16 protocol = skb->protocol;
skb              1858 drivers/net/ethernet/intel/ice/ice_txrx.c 		skb->protocol = vlan_get_protocol(skb);
skb              1863 drivers/net/ethernet/intel/ice/ice_txrx.c 	if (skb_vlan_tag_present(skb)) {
skb              1864 drivers/net/ethernet/intel/ice/ice_txrx.c 		first->tx_flags |= skb_vlan_tag_get(skb) << ICE_TX_FLAGS_VLAN_S;
skb              1870 drivers/net/ethernet/intel/ice/ice_txrx.c 		vhdr = (struct vlan_hdr *)skb_header_pointer(skb, ETH_HLEN,
skb              1894 drivers/net/ethernet/intel/ice/ice_txrx.c 	struct sk_buff *skb = first->skb;
skb              1908 drivers/net/ethernet/intel/ice/ice_txrx.c 	if (skb->ip_summed != CHECKSUM_PARTIAL)
skb              1911 drivers/net/ethernet/intel/ice/ice_txrx.c 	if (!skb_is_gso(skb))
skb              1914 drivers/net/ethernet/intel/ice/ice_txrx.c 	err = skb_cow_head(skb, 0);
skb              1919 drivers/net/ethernet/intel/ice/ice_txrx.c 	ip.hdr = skb_network_header(skb);
skb              1920 drivers/net/ethernet/intel/ice/ice_txrx.c 	l4.hdr = skb_transport_header(skb);
skb              1931 drivers/net/ethernet/intel/ice/ice_txrx.c 	l4_start = l4.hdr - skb->data;
skb              1934 drivers/net/ethernet/intel/ice/ice_txrx.c 	paylen = skb->len - l4_start;
skb              1941 drivers/net/ethernet/intel/ice/ice_txrx.c 	first->gso_segs = skb_shinfo(skb)->gso_segs;
skb              1944 drivers/net/ethernet/intel/ice/ice_txrx.c 	cd_tso_len = skb->len - off->header_len;
skb              1945 drivers/net/ethernet/intel/ice/ice_txrx.c 	cd_mss = skb_shinfo(skb)->gso_size;
skb              1995 drivers/net/ethernet/intel/ice/ice_txrx.c static unsigned int ice_xmit_desc_count(struct sk_buff *skb)
skb              1997 drivers/net/ethernet/intel/ice/ice_txrx.c 	const skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
skb              1998 drivers/net/ethernet/intel/ice/ice_txrx.c 	unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
skb              1999 drivers/net/ethernet/intel/ice/ice_txrx.c 	unsigned int count = 0, size = skb_headlen(skb);
skb              2026 drivers/net/ethernet/intel/ice/ice_txrx.c static bool __ice_chk_linearize(struct sk_buff *skb)
skb              2032 drivers/net/ethernet/intel/ice/ice_txrx.c 	nr_frags = skb_shinfo(skb)->nr_frags;
skb              2040 drivers/net/ethernet/intel/ice/ice_txrx.c 	frag = &skb_shinfo(skb)->frags[0];
skb              2048 drivers/net/ethernet/intel/ice/ice_txrx.c 	sum = 1 - skb_shinfo(skb)->gso_size;
skb              2060 drivers/net/ethernet/intel/ice/ice_txrx.c 	stale = &skb_shinfo(skb)->frags[0];
skb              2086 drivers/net/ethernet/intel/ice/ice_txrx.c static bool ice_chk_linearize(struct sk_buff *skb, unsigned int count)
skb              2092 drivers/net/ethernet/intel/ice/ice_txrx.c 	if (skb_is_gso(skb))
skb              2093 drivers/net/ethernet/intel/ice/ice_txrx.c 		return __ice_chk_linearize(skb);
skb              2107 drivers/net/ethernet/intel/ice/ice_txrx.c ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
skb              2115 drivers/net/ethernet/intel/ice/ice_txrx.c 	count = ice_xmit_desc_count(skb);
skb              2116 drivers/net/ethernet/intel/ice/ice_txrx.c 	if (ice_chk_linearize(skb, count)) {
skb              2117 drivers/net/ethernet/intel/ice/ice_txrx.c 		if (__skb_linearize(skb))
skb              2119 drivers/net/ethernet/intel/ice/ice_txrx.c 		count = ice_txd_use_count(skb->len);
skb              2139 drivers/net/ethernet/intel/ice/ice_txrx.c 	first->skb = skb;
skb              2140 drivers/net/ethernet/intel/ice/ice_txrx.c 	first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN);
skb              2159 drivers/net/ethernet/intel/ice/ice_txrx.c 	if (unlikely(skb->priority == TC_PRIO_CONTROL &&
skb              2186 drivers/net/ethernet/intel/ice/ice_txrx.c 	dev_kfree_skb_any(skb);
skb              2197 drivers/net/ethernet/intel/ice/ice_txrx.c netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev)
skb              2203 drivers/net/ethernet/intel/ice/ice_txrx.c 	tx_ring = vsi->tx_rings[skb->queue_mapping];
skb              2208 drivers/net/ethernet/intel/ice/ice_txrx.c 	if (skb_put_padto(skb, ICE_MIN_TX_LEN))
skb              2211 drivers/net/ethernet/intel/ice/ice_txrx.c 	return ice_xmit_frame_ring(skb, tx_ring);
skb                57 drivers/net/ethernet/intel/ice/ice_txrx.h 	struct sk_buff *skb;
skb                77 drivers/net/ethernet/intel/ice/ice_txrx.h 	struct sk_buff *skb;
skb               234 drivers/net/ethernet/intel/ice/ice_txrx.h netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev);
skb               203 drivers/net/ethernet/intel/igb/igb.h 	struct sk_buff *skb;
skb               286 drivers/net/ethernet/intel/igb/igb.h 			struct sk_buff *skb;
skb               677 drivers/net/ethernet/intel/igb/igb.h void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb);
skb               679 drivers/net/ethernet/intel/igb/igb.h 			 struct sk_buff *skb);
skb              1771 drivers/net/ethernet/intel/igb/igb_ethtool.c static void igb_create_lbtest_frame(struct sk_buff *skb,
skb              1774 drivers/net/ethernet/intel/igb/igb_ethtool.c 	memset(skb->data, 0xFF, frame_size);
skb              1776 drivers/net/ethernet/intel/igb/igb_ethtool.c 	memset(&skb->data[frame_size], 0xAA, frame_size - 1);
skb              1777 drivers/net/ethernet/intel/igb/igb_ethtool.c 	memset(&skb->data[frame_size + 10], 0xBE, 1);
skb              1778 drivers/net/ethernet/intel/igb/igb_ethtool.c 	memset(&skb->data[frame_size + 12], 0xAF, 1);
skb              1839 drivers/net/ethernet/intel/igb/igb_ethtool.c 		dev_kfree_skb_any(tx_buffer_info->skb);
skb              1878 drivers/net/ethernet/intel/igb/igb_ethtool.c 	struct sk_buff *skb;
skb              1881 drivers/net/ethernet/intel/igb/igb_ethtool.c 	skb = alloc_skb(size, GFP_KERNEL);
skb              1882 drivers/net/ethernet/intel/igb/igb_ethtool.c 	if (!skb)
skb              1886 drivers/net/ethernet/intel/igb/igb_ethtool.c 	igb_create_lbtest_frame(skb, size);
skb              1887 drivers/net/ethernet/intel/igb/igb_ethtool.c 	skb_put(skb, size);
skb              1905 drivers/net/ethernet/intel/igb/igb_ethtool.c 			skb_get(skb);
skb              1906 drivers/net/ethernet/intel/igb/igb_ethtool.c 			tx_ret_val = igb_xmit_frame_ring(skb, tx_ring);
skb              1927 drivers/net/ethernet/intel/igb/igb_ethtool.c 	kfree_skb(skb);
skb               131 drivers/net/ethernet/intel/igb/igb_main.c static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *);
skb               455 drivers/net/ethernet/intel/igb/igb_main.c 				buffer_info->skb, next_desc);
skb               457 drivers/net/ethernet/intel/igb/igb_main.c 			if (netif_msg_pktdata(adapter) && buffer_info->skb)
skb               460 drivers/net/ethernet/intel/igb/igb_main.c 					16, 1, buffer_info->skb->data,
skb              2511 drivers/net/ethernet/intel/igb/igb_main.c igb_features_check(struct sk_buff *skb, struct net_device *dev,
skb              2517 drivers/net/ethernet/intel/igb/igb_main.c 	mac_hdr_len = skb_network_header(skb) - skb->data;
skb              2525 drivers/net/ethernet/intel/igb/igb_main.c 	network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
skb              2535 drivers/net/ethernet/intel/igb/igb_main.c 	if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
skb              4629 drivers/net/ethernet/intel/igb/igb_main.c 		dev_kfree_skb_any(tx_buffer->skb);
skb              4736 drivers/net/ethernet/intel/igb/igb_main.c 	dev_kfree_skb(rx_ring->skb);
skb              4737 drivers/net/ethernet/intel/igb/igb_main.c 	rx_ring->skb = NULL;
skb              5678 drivers/net/ethernet/intel/igb/igb_main.c 		ts = ktime_to_timespec64(first->skb->tstamp);
skb              5679 drivers/net/ethernet/intel/igb/igb_main.c 		first->skb->tstamp = ktime_set(0, 0);
skb              5691 drivers/net/ethernet/intel/igb/igb_main.c 	struct sk_buff *skb = first->skb;
skb              5704 drivers/net/ethernet/intel/igb/igb_main.c 	if (skb->ip_summed != CHECKSUM_PARTIAL)
skb              5707 drivers/net/ethernet/intel/igb/igb_main.c 	if (!skb_is_gso(skb))
skb              5710 drivers/net/ethernet/intel/igb/igb_main.c 	err = skb_cow_head(skb, 0);
skb              5714 drivers/net/ethernet/intel/igb/igb_main.c 	ip.hdr = skb_network_header(skb);
skb              5715 drivers/net/ethernet/intel/igb/igb_main.c 	l4.hdr = skb_checksum_start(skb);
skb              5722 drivers/net/ethernet/intel/igb/igb_main.c 		unsigned char *csum_start = skb_checksum_start(skb);
skb              5744 drivers/net/ethernet/intel/igb/igb_main.c 	l4_offset = l4.hdr - skb->data;
skb              5750 drivers/net/ethernet/intel/igb/igb_main.c 	paylen = skb->len - l4_offset;
skb              5754 drivers/net/ethernet/intel/igb/igb_main.c 	first->gso_segs = skb_shinfo(skb)->gso_segs;
skb              5759 drivers/net/ethernet/intel/igb/igb_main.c 	mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT;
skb              5763 drivers/net/ethernet/intel/igb/igb_main.c 	vlan_macip_lens |= (ip.hdr - skb->data) << E1000_ADVTXD_MACLEN_SHIFT;
skb              5772 drivers/net/ethernet/intel/igb/igb_main.c static inline bool igb_ipv6_csum_is_sctp(struct sk_buff *skb)
skb              5776 drivers/net/ethernet/intel/igb/igb_main.c 	ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL);
skb              5778 drivers/net/ethernet/intel/igb/igb_main.c 	return offset == skb_checksum_start_offset(skb);
skb              5783 drivers/net/ethernet/intel/igb/igb_main.c 	struct sk_buff *skb = first->skb;
skb              5787 drivers/net/ethernet/intel/igb/igb_main.c 	if (skb->ip_summed != CHECKSUM_PARTIAL) {
skb              5795 drivers/net/ethernet/intel/igb/igb_main.c 	switch (skb->csum_offset) {
skb              5804 drivers/net/ethernet/intel/igb/igb_main.c 		     (ip_hdr(skb)->protocol == IPPROTO_SCTP)) ||
skb              5806 drivers/net/ethernet/intel/igb/igb_main.c 		     igb_ipv6_csum_is_sctp(skb))) {
skb              5812 drivers/net/ethernet/intel/igb/igb_main.c 		skb_checksum_help(skb);
skb              5818 drivers/net/ethernet/intel/igb/igb_main.c 	vlan_macip_lens = skb_checksum_start_offset(skb) -
skb              5819 drivers/net/ethernet/intel/igb/igb_main.c 			  skb_network_offset(skb);
skb              5821 drivers/net/ethernet/intel/igb/igb_main.c 	vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
skb              5832 drivers/net/ethernet/intel/igb/igb_main.c static u32 igb_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
skb              5852 drivers/net/ethernet/intel/igb/igb_main.c 	cmd_type ^= IGB_SET_FLAG(skb->no_fcs, 1, E1000_ADVTXD_DCMD_IFCS);
skb              5919 drivers/net/ethernet/intel/igb/igb_main.c 	struct sk_buff *skb = first->skb;
skb              5926 drivers/net/ethernet/intel/igb/igb_main.c 	u32 cmd_type = igb_tx_cmd_type(skb, tx_flags);
skb              5931 drivers/net/ethernet/intel/igb/igb_main.c 	igb_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len);
skb              5933 drivers/net/ethernet/intel/igb/igb_main.c 	size = skb_headlen(skb);
skb              5934 drivers/net/ethernet/intel/igb/igb_main.c 	data_len = skb->data_len;
skb              5936 drivers/net/ethernet/intel/igb/igb_main.c 	dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
skb              5940 drivers/net/ethernet/intel/igb/igb_main.c 	for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
skb              5999 drivers/net/ethernet/intel/igb/igb_main.c 	skb_tx_timestamp(skb);
skb              6052 drivers/net/ethernet/intel/igb/igb_main.c 	dev_kfree_skb_any(tx_buffer->skb);
skb              6053 drivers/net/ethernet/intel/igb/igb_main.c 	tx_buffer->skb = NULL;
skb              6060 drivers/net/ethernet/intel/igb/igb_main.c netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
skb              6067 drivers/net/ethernet/intel/igb/igb_main.c 	u16 count = TXD_USE_COUNT(skb_headlen(skb));
skb              6068 drivers/net/ethernet/intel/igb/igb_main.c 	__be16 protocol = vlan_get_protocol(skb);
skb              6077 drivers/net/ethernet/intel/igb/igb_main.c 	for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
skb              6079 drivers/net/ethernet/intel/igb/igb_main.c 						&skb_shinfo(skb)->frags[f]));
skb              6088 drivers/net/ethernet/intel/igb/igb_main.c 	first->skb = skb;
skb              6089 drivers/net/ethernet/intel/igb/igb_main.c 	first->bytecount = skb->len;
skb              6092 drivers/net/ethernet/intel/igb/igb_main.c 	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
skb              6098 drivers/net/ethernet/intel/igb/igb_main.c 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
skb              6101 drivers/net/ethernet/intel/igb/igb_main.c 			adapter->ptp_tx_skb = skb_get(skb);
skb              6110 drivers/net/ethernet/intel/igb/igb_main.c 	if (skb_vlan_tag_present(skb)) {
skb              6112 drivers/net/ethernet/intel/igb/igb_main.c 		tx_flags |= (skb_vlan_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
skb              6131 drivers/net/ethernet/intel/igb/igb_main.c 	dev_kfree_skb_any(first->skb);
skb              6132 drivers/net/ethernet/intel/igb/igb_main.c 	first->skb = NULL;
skb              6148 drivers/net/ethernet/intel/igb/igb_main.c 						    struct sk_buff *skb)
skb              6150 drivers/net/ethernet/intel/igb/igb_main.c 	unsigned int r_idx = skb->queue_mapping;
skb              6158 drivers/net/ethernet/intel/igb/igb_main.c static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
skb              6166 drivers/net/ethernet/intel/igb/igb_main.c 	if (skb_put_padto(skb, 17))
skb              6169 drivers/net/ethernet/intel/igb/igb_main.c 	return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb));
skb              7784 drivers/net/ethernet/intel/igb/igb_main.c 		napi_consume_skb(tx_buffer->skb, napi_budget);
skb              7987 drivers/net/ethernet/intel/igb/igb_main.c 			    struct sk_buff *skb,
skb              7997 drivers/net/ethernet/intel/igb/igb_main.c 	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
skb              8018 drivers/net/ethernet/intel/igb/igb_main.c 	struct sk_buff *skb;
skb              8027 drivers/net/ethernet/intel/igb/igb_main.c 	skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGB_RX_HDR_LEN);
skb              8028 drivers/net/ethernet/intel/igb/igb_main.c 	if (unlikely(!skb))
skb              8032 drivers/net/ethernet/intel/igb/igb_main.c 		igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
skb              8040 drivers/net/ethernet/intel/igb/igb_main.c 		headlen = eth_get_headlen(skb->dev, va, IGB_RX_HDR_LEN);
skb              8043 drivers/net/ethernet/intel/igb/igb_main.c 	memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
skb              8048 drivers/net/ethernet/intel/igb/igb_main.c 		skb_add_rx_frag(skb, 0, rx_buffer->page,
skb              8060 drivers/net/ethernet/intel/igb/igb_main.c 	return skb;
skb              8075 drivers/net/ethernet/intel/igb/igb_main.c 	struct sk_buff *skb;
skb              8084 drivers/net/ethernet/intel/igb/igb_main.c 	skb = build_skb(va - IGB_SKB_PAD, truesize);
skb              8085 drivers/net/ethernet/intel/igb/igb_main.c 	if (unlikely(!skb))
skb              8089 drivers/net/ethernet/intel/igb/igb_main.c 	skb_reserve(skb, IGB_SKB_PAD);
skb              8090 drivers/net/ethernet/intel/igb/igb_main.c 	__skb_put(skb, size);
skb              8094 drivers/net/ethernet/intel/igb/igb_main.c 		igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb);
skb              8095 drivers/net/ethernet/intel/igb/igb_main.c 		__skb_pull(skb, IGB_TS_HDR_LEN);
skb              8105 drivers/net/ethernet/intel/igb/igb_main.c 	return skb;
skb              8110 drivers/net/ethernet/intel/igb/igb_main.c 				   struct sk_buff *skb)
skb              8112 drivers/net/ethernet/intel/igb/igb_main.c 	skb_checksum_none_assert(skb);
skb              8130 drivers/net/ethernet/intel/igb/igb_main.c 		if (!((skb->len == 60) &&
skb              8142 drivers/net/ethernet/intel/igb/igb_main.c 		skb->ip_summed = CHECKSUM_UNNECESSARY;
skb              8150 drivers/net/ethernet/intel/igb/igb_main.c 			       struct sk_buff *skb)
skb              8153 drivers/net/ethernet/intel/igb/igb_main.c 		skb_set_hash(skb,
skb              8202 drivers/net/ethernet/intel/igb/igb_main.c 				struct sk_buff *skb)
skb              8208 drivers/net/ethernet/intel/igb/igb_main.c 			dev_kfree_skb_any(skb);
skb              8214 drivers/net/ethernet/intel/igb/igb_main.c 	if (eth_skb_pad(skb))
skb              8232 drivers/net/ethernet/intel/igb/igb_main.c 				   struct sk_buff *skb)
skb              8236 drivers/net/ethernet/intel/igb/igb_main.c 	igb_rx_hash(rx_ring, rx_desc, skb);
skb              8238 drivers/net/ethernet/intel/igb/igb_main.c 	igb_rx_checksum(rx_ring, rx_desc, skb);
skb              8242 drivers/net/ethernet/intel/igb/igb_main.c 		igb_ptp_rx_rgtstamp(rx_ring->q_vector, skb);
skb              8254 drivers/net/ethernet/intel/igb/igb_main.c 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
skb              8257 drivers/net/ethernet/intel/igb/igb_main.c 	skb_record_rx_queue(skb, rx_ring->queue_index);
skb              8259 drivers/net/ethernet/intel/igb/igb_main.c 	skb->protocol = eth_type_trans(skb, rx_ring->netdev);
skb              8306 drivers/net/ethernet/intel/igb/igb_main.c 	struct sk_buff *skb = rx_ring->skb;
skb              8335 drivers/net/ethernet/intel/igb/igb_main.c 		if (skb)
skb              8336 drivers/net/ethernet/intel/igb/igb_main.c 			igb_add_rx_frag(rx_ring, rx_buffer, skb, size);
skb              8338 drivers/net/ethernet/intel/igb/igb_main.c 			skb = igb_build_skb(rx_ring, rx_buffer, rx_desc, size);
skb              8340 drivers/net/ethernet/intel/igb/igb_main.c 			skb = igb_construct_skb(rx_ring, rx_buffer,
skb              8344 drivers/net/ethernet/intel/igb/igb_main.c 		if (!skb) {
skb              8358 drivers/net/ethernet/intel/igb/igb_main.c 		if (igb_cleanup_headers(rx_ring, rx_desc, skb)) {
skb              8359 drivers/net/ethernet/intel/igb/igb_main.c 			skb = NULL;
skb              8364 drivers/net/ethernet/intel/igb/igb_main.c 		total_bytes += skb->len;
skb              8367 drivers/net/ethernet/intel/igb/igb_main.c 		igb_process_skb_fields(rx_ring, rx_desc, skb);
skb              8369 drivers/net/ethernet/intel/igb/igb_main.c 		napi_gro_receive(&q_vector->napi, skb);
skb              8372 drivers/net/ethernet/intel/igb/igb_main.c 		skb = NULL;
skb              8379 drivers/net/ethernet/intel/igb/igb_main.c 	rx_ring->skb = skb;
skb              8797 drivers/net/ethernet/intel/igb/igb_main.c 	struct sk_buff *skb;
skb              8808 drivers/net/ethernet/intel/igb/igb_main.c 	skb = netdev_alloc_skb_ip_align(netdev, E1000_WUPM_BYTES);
skb              8809 drivers/net/ethernet/intel/igb/igb_main.c 	if (!skb)
skb              8812 drivers/net/ethernet/intel/igb/igb_main.c 	skb_put(skb, wupl);
skb              8817 drivers/net/ethernet/intel/igb/igb_main.c 	memcpy_fromio(skb->data, hw->hw_addr + E1000_WUPM_REG(0), wupl);
skb              8819 drivers/net/ethernet/intel/igb/igb_main.c 	skb->protocol = eth_type_trans(skb, netdev);
skb              8820 drivers/net/ethernet/intel/igb/igb_main.c 	netif_rx(skb);
skb               818 drivers/net/ethernet/intel/igb/igb_ptp.c 	struct sk_buff *skb = adapter->ptp_tx_skb;
skb               855 drivers/net/ethernet/intel/igb/igb_ptp.c 	skb_tstamp_tx(skb, &shhwtstamps);
skb               856 drivers/net/ethernet/intel/igb/igb_ptp.c 	dev_kfree_skb_any(skb);
skb               870 drivers/net/ethernet/intel/igb/igb_ptp.c 			 struct sk_buff *skb)
skb               880 drivers/net/ethernet/intel/igb/igb_ptp.c 	igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb),
skb               897 drivers/net/ethernet/intel/igb/igb_ptp.c 	skb_hwtstamps(skb)->hwtstamp =
skb               898 drivers/net/ethernet/intel/igb/igb_ptp.c 		ktime_sub_ns(skb_hwtstamps(skb)->hwtstamp, adjust);
skb               910 drivers/net/ethernet/intel/igb/igb_ptp.c 			 struct sk_buff *skb)
skb               933 drivers/net/ethernet/intel/igb/igb_ptp.c 	igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
skb               949 drivers/net/ethernet/intel/igb/igb_ptp.c 	skb_hwtstamps(skb)->hwtstamp =
skb               950 drivers/net/ethernet/intel/igb/igb_ptp.c 		ktime_sub_ns(skb_hwtstamps(skb)->hwtstamp, adjust);
skb               101 drivers/net/ethernet/intel/igbvf/igbvf.h 	struct sk_buff *skb;
skb                85 drivers/net/ethernet/intel/igbvf/netdev.c 			      struct sk_buff *skb,
skb                97 drivers/net/ethernet/intel/igbvf/netdev.c 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
skb               100 drivers/net/ethernet/intel/igbvf/netdev.c 	napi_gro_receive(&adapter->rx_ring->napi, skb);
skb               104 drivers/net/ethernet/intel/igbvf/netdev.c 					 u32 status_err, struct sk_buff *skb)
skb               106 drivers/net/ethernet/intel/igbvf/netdev.c 	skb_checksum_none_assert(skb);
skb               123 drivers/net/ethernet/intel/igbvf/netdev.c 		skb->ip_summed = CHECKSUM_UNNECESSARY;
skb               141 drivers/net/ethernet/intel/igbvf/netdev.c 	struct sk_buff *skb;
skb               181 drivers/net/ethernet/intel/igbvf/netdev.c 		if (!buffer_info->skb) {
skb               182 drivers/net/ethernet/intel/igbvf/netdev.c 			skb = netdev_alloc_skb_ip_align(netdev, bufsz);
skb               183 drivers/net/ethernet/intel/igbvf/netdev.c 			if (!skb) {
skb               188 drivers/net/ethernet/intel/igbvf/netdev.c 			buffer_info->skb = skb;
skb               189 drivers/net/ethernet/intel/igbvf/netdev.c 			buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
skb               193 drivers/net/ethernet/intel/igbvf/netdev.c 				dev_kfree_skb(buffer_info->skb);
skb               194 drivers/net/ethernet/intel/igbvf/netdev.c 				buffer_info->skb = NULL;
skb               250 drivers/net/ethernet/intel/igbvf/netdev.c 	struct sk_buff *skb;
skb               284 drivers/net/ethernet/intel/igbvf/netdev.c 		skb = buffer_info->skb;
skb               285 drivers/net/ethernet/intel/igbvf/netdev.c 		prefetch(skb->data - NET_IP_ALIGN);
skb               286 drivers/net/ethernet/intel/igbvf/netdev.c 		buffer_info->skb = NULL;
skb               292 drivers/net/ethernet/intel/igbvf/netdev.c 			skb_put(skb, length);
skb               296 drivers/net/ethernet/intel/igbvf/netdev.c 		if (!skb_shinfo(skb)->nr_frags) {
skb               301 drivers/net/ethernet/intel/igbvf/netdev.c 			skb_put(skb, hlen);
skb               310 drivers/net/ethernet/intel/igbvf/netdev.c 			skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
skb               321 drivers/net/ethernet/intel/igbvf/netdev.c 			skb->len += length;
skb               322 drivers/net/ethernet/intel/igbvf/netdev.c 			skb->data_len += length;
skb               323 drivers/net/ethernet/intel/igbvf/netdev.c 			skb->truesize += PAGE_SIZE / 2;
skb               334 drivers/net/ethernet/intel/igbvf/netdev.c 			buffer_info->skb = next_buffer->skb;
skb               336 drivers/net/ethernet/intel/igbvf/netdev.c 			next_buffer->skb = skb;
skb               342 drivers/net/ethernet/intel/igbvf/netdev.c 			dev_kfree_skb_irq(skb);
skb               346 drivers/net/ethernet/intel/igbvf/netdev.c 		total_bytes += skb->len;
skb               349 drivers/net/ethernet/intel/igbvf/netdev.c 		igbvf_rx_checksum_adv(adapter, staterr, skb);
skb               351 drivers/net/ethernet/intel/igbvf/netdev.c 		skb->protocol = eth_type_trans(skb, netdev);
skb               353 drivers/net/ethernet/intel/igbvf/netdev.c 		igbvf_receive_skb(adapter, netdev, skb, staterr,
skb               401 drivers/net/ethernet/intel/igbvf/netdev.c 	if (buffer_info->skb) {
skb               402 drivers/net/ethernet/intel/igbvf/netdev.c 		dev_kfree_skb_any(buffer_info->skb);
skb               403 drivers/net/ethernet/intel/igbvf/netdev.c 		buffer_info->skb = NULL;
skb               574 drivers/net/ethernet/intel/igbvf/netdev.c 		if (buffer_info->skb) {
skb               575 drivers/net/ethernet/intel/igbvf/netdev.c 			dev_kfree_skb(buffer_info->skb);
skb               576 drivers/net/ethernet/intel/igbvf/netdev.c 			buffer_info->skb = NULL;
skb               774 drivers/net/ethernet/intel/igbvf/netdev.c 	struct sk_buff *skb;
skb               802 drivers/net/ethernet/intel/igbvf/netdev.c 			skb = buffer_info->skb;
skb               804 drivers/net/ethernet/intel/igbvf/netdev.c 			if (skb) {
skb               808 drivers/net/ethernet/intel/igbvf/netdev.c 				segs = skb_shinfo(skb)->gso_segs ?: 1;
skb               810 drivers/net/ethernet/intel/igbvf/netdev.c 				bytecount = ((segs - 1) * skb_headlen(skb)) +
skb               811 drivers/net/ethernet/intel/igbvf/netdev.c 					    skb->len;
skb              1998 drivers/net/ethernet/intel/igbvf/netdev.c 		     struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
skb              2013 drivers/net/ethernet/intel/igbvf/netdev.c 	if (skb->ip_summed != CHECKSUM_PARTIAL)
skb              2016 drivers/net/ethernet/intel/igbvf/netdev.c 	if (!skb_is_gso(skb))
skb              2019 drivers/net/ethernet/intel/igbvf/netdev.c 	err = skb_cow_head(skb, 0);
skb              2023 drivers/net/ethernet/intel/igbvf/netdev.c 	ip.hdr = skb_network_header(skb);
skb              2024 drivers/net/ethernet/intel/igbvf/netdev.c 	l4.hdr = skb_checksum_start(skb);
skb              2031 drivers/net/ethernet/intel/igbvf/netdev.c 		unsigned char *csum_start = skb_checksum_start(skb);
skb              2048 drivers/net/ethernet/intel/igbvf/netdev.c 	l4_offset = l4.hdr - skb->data;
skb              2054 drivers/net/ethernet/intel/igbvf/netdev.c 	paylen = skb->len - l4_offset;
skb              2059 drivers/net/ethernet/intel/igbvf/netdev.c 	mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT;
skb              2063 drivers/net/ethernet/intel/igbvf/netdev.c 	vlan_macip_lens |= (ip.hdr - skb->data) << E1000_ADVTXD_MACLEN_SHIFT;
skb              2071 drivers/net/ethernet/intel/igbvf/netdev.c static inline bool igbvf_ipv6_csum_is_sctp(struct sk_buff *skb)
skb              2075 drivers/net/ethernet/intel/igbvf/netdev.c 	ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL);
skb              2077 drivers/net/ethernet/intel/igbvf/netdev.c 	return offset == skb_checksum_start_offset(skb);
skb              2080 drivers/net/ethernet/intel/igbvf/netdev.c static bool igbvf_tx_csum(struct igbvf_ring *tx_ring, struct sk_buff *skb,
skb              2086 drivers/net/ethernet/intel/igbvf/netdev.c 	if (skb->ip_summed != CHECKSUM_PARTIAL) {
skb              2093 drivers/net/ethernet/intel/igbvf/netdev.c 	switch (skb->csum_offset) {
skb              2102 drivers/net/ethernet/intel/igbvf/netdev.c 		     (ip_hdr(skb)->protocol == IPPROTO_SCTP)) ||
skb              2104 drivers/net/ethernet/intel/igbvf/netdev.c 		     igbvf_ipv6_csum_is_sctp(skb))) {
skb              2110 drivers/net/ethernet/intel/igbvf/netdev.c 		skb_checksum_help(skb);
skb              2114 drivers/net/ethernet/intel/igbvf/netdev.c 	vlan_macip_lens = skb_checksum_start_offset(skb) -
skb              2115 drivers/net/ethernet/intel/igbvf/netdev.c 			  skb_network_offset(skb);
skb              2117 drivers/net/ethernet/intel/igbvf/netdev.c 	vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
skb              2155 drivers/net/ethernet/intel/igbvf/netdev.c 				   struct sk_buff *skb)
skb              2159 drivers/net/ethernet/intel/igbvf/netdev.c 	unsigned int len = skb_headlen(skb);
skb              2171 drivers/net/ethernet/intel/igbvf/netdev.c 	buffer_info->dma = dma_map_single(&pdev->dev, skb->data, len,
skb              2176 drivers/net/ethernet/intel/igbvf/netdev.c 	for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
skb              2184 drivers/net/ethernet/intel/igbvf/netdev.c 		frag = &skb_shinfo(skb)->frags[f];
skb              2198 drivers/net/ethernet/intel/igbvf/netdev.c 	tx_ring->buffer_info[i].skb = skb;
skb              2284 drivers/net/ethernet/intel/igbvf/netdev.c static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb,
skb              2293 drivers/net/ethernet/intel/igbvf/netdev.c 	__be16 protocol = vlan_get_protocol(skb);
skb              2296 drivers/net/ethernet/intel/igbvf/netdev.c 		dev_kfree_skb_any(skb);
skb              2300 drivers/net/ethernet/intel/igbvf/netdev.c 	if (skb->len <= 0) {
skb              2301 drivers/net/ethernet/intel/igbvf/netdev.c 		dev_kfree_skb_any(skb);
skb              2311 drivers/net/ethernet/intel/igbvf/netdev.c 	if (igbvf_maybe_stop_tx(netdev, skb_shinfo(skb)->nr_frags + 4)) {
skb              2316 drivers/net/ethernet/intel/igbvf/netdev.c 	if (skb_vlan_tag_present(skb)) {
skb              2318 drivers/net/ethernet/intel/igbvf/netdev.c 		tx_flags |= (skb_vlan_tag_get(skb) <<
skb              2327 drivers/net/ethernet/intel/igbvf/netdev.c 	tso = igbvf_tso(tx_ring, skb, tx_flags, &hdr_len);
skb              2329 drivers/net/ethernet/intel/igbvf/netdev.c 		dev_kfree_skb_any(skb);
skb              2335 drivers/net/ethernet/intel/igbvf/netdev.c 	else if (igbvf_tx_csum(tx_ring, skb, tx_flags, protocol) &&
skb              2336 drivers/net/ethernet/intel/igbvf/netdev.c 		 (skb->ip_summed == CHECKSUM_PARTIAL))
skb              2342 drivers/net/ethernet/intel/igbvf/netdev.c 	count = igbvf_tx_map_adv(adapter, tx_ring, skb);
skb              2346 drivers/net/ethernet/intel/igbvf/netdev.c 				   first, skb->len, hdr_len);
skb              2350 drivers/net/ethernet/intel/igbvf/netdev.c 		dev_kfree_skb_any(skb);
skb              2358 drivers/net/ethernet/intel/igbvf/netdev.c static netdev_tx_t igbvf_xmit_frame(struct sk_buff *skb,
skb              2365 drivers/net/ethernet/intel/igbvf/netdev.c 		dev_kfree_skb_any(skb);
skb              2371 drivers/net/ethernet/intel/igbvf/netdev.c 	return igbvf_xmit_frame_ring_adv(skb, netdev, tx_ring);
skb              2649 drivers/net/ethernet/intel/igbvf/netdev.c igbvf_features_check(struct sk_buff *skb, struct net_device *dev,
skb              2655 drivers/net/ethernet/intel/igbvf/netdev.c 	mac_hdr_len = skb_network_header(skb) - skb->data;
skb              2663 drivers/net/ethernet/intel/igbvf/netdev.c 	network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
skb              2673 drivers/net/ethernet/intel/igbvf/netdev.c 	if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
skb               186 drivers/net/ethernet/intel/igc/igc.h 	struct sk_buff *skb;
skb               279 drivers/net/ethernet/intel/igc/igc.h 			struct sk_buff *skb;
skb               228 drivers/net/ethernet/intel/igc/igc_main.c 		dev_kfree_skb_any(tx_buffer->skb);
skb               360 drivers/net/ethernet/intel/igc/igc_main.c 	dev_kfree_skb(rx_ring->skb);
skb               361 drivers/net/ethernet/intel/igc/igc_main.c 	rx_ring->skb = NULL;
skb               827 drivers/net/ethernet/intel/igc/igc_main.c 		ts = ktime_to_timespec64(first->skb->tstamp);
skb               828 drivers/net/ethernet/intel/igc/igc_main.c 		first->skb->tstamp = ktime_set(0, 0);
skb               835 drivers/net/ethernet/intel/igc/igc_main.c static inline bool igc_ipv6_csum_is_sctp(struct sk_buff *skb)
skb               839 drivers/net/ethernet/intel/igc/igc_main.c 	ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL);
skb               841 drivers/net/ethernet/intel/igc/igc_main.c 	return offset == skb_checksum_start_offset(skb);
skb               846 drivers/net/ethernet/intel/igc/igc_main.c 	struct sk_buff *skb = first->skb;
skb               850 drivers/net/ethernet/intel/igc/igc_main.c 	if (skb->ip_summed != CHECKSUM_PARTIAL) {
skb               858 drivers/net/ethernet/intel/igc/igc_main.c 	switch (skb->csum_offset) {
skb               867 drivers/net/ethernet/intel/igc/igc_main.c 		     (ip_hdr(skb)->protocol == IPPROTO_SCTP)) ||
skb               869 drivers/net/ethernet/intel/igc/igc_main.c 		     igc_ipv6_csum_is_sctp(skb))) {
skb               875 drivers/net/ethernet/intel/igc/igc_main.c 		skb_checksum_help(skb);
skb               881 drivers/net/ethernet/intel/igc/igc_main.c 	vlan_macip_lens = skb_checksum_start_offset(skb) -
skb               882 drivers/net/ethernet/intel/igc/igc_main.c 			  skb_network_offset(skb);
skb               884 drivers/net/ethernet/intel/igc/igc_main.c 	vlan_macip_lens |= skb_network_offset(skb) << IGC_ADVTXD_MACLEN_SHIFT;
skb               922 drivers/net/ethernet/intel/igc/igc_main.c static u32 igc_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
skb               955 drivers/net/ethernet/intel/igc/igc_main.c 	struct sk_buff *skb = first->skb;
skb               963 drivers/net/ethernet/intel/igc/igc_main.c 	u32 cmd_type = igc_tx_cmd_type(skb, tx_flags);
skb               967 drivers/net/ethernet/intel/igc/igc_main.c 	igc_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len);
skb               969 drivers/net/ethernet/intel/igc/igc_main.c 	size = skb_headlen(skb);
skb               970 drivers/net/ethernet/intel/igc/igc_main.c 	data_len = skb->data_len;
skb               972 drivers/net/ethernet/intel/igc/igc_main.c 	dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
skb               976 drivers/net/ethernet/intel/igc/igc_main.c 	for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
skb              1035 drivers/net/ethernet/intel/igc/igc_main.c 	skb_tx_timestamp(skb);
skb              1088 drivers/net/ethernet/intel/igc/igc_main.c 	dev_kfree_skb_any(tx_buffer->skb);
skb              1089 drivers/net/ethernet/intel/igc/igc_main.c 	tx_buffer->skb = NULL;
skb              1096 drivers/net/ethernet/intel/igc/igc_main.c static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
skb              1099 drivers/net/ethernet/intel/igc/igc_main.c 	u16 count = TXD_USE_COUNT(skb_headlen(skb));
skb              1100 drivers/net/ethernet/intel/igc/igc_main.c 	__be16 protocol = vlan_get_protocol(skb);
skb              1112 drivers/net/ethernet/intel/igc/igc_main.c 	for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
skb              1114 drivers/net/ethernet/intel/igc/igc_main.c 						&skb_shinfo(skb)->frags[f]));
skb              1123 drivers/net/ethernet/intel/igc/igc_main.c 	first->skb = skb;
skb              1124 drivers/net/ethernet/intel/igc/igc_main.c 	first->bytecount = skb->len;
skb              1139 drivers/net/ethernet/intel/igc/igc_main.c 						    struct sk_buff *skb)
skb              1141 drivers/net/ethernet/intel/igc/igc_main.c 	unsigned int r_idx = skb->queue_mapping;
skb              1149 drivers/net/ethernet/intel/igc/igc_main.c static netdev_tx_t igc_xmit_frame(struct sk_buff *skb,
skb              1157 drivers/net/ethernet/intel/igc/igc_main.c 	if (skb->len < 17) {
skb              1158 drivers/net/ethernet/intel/igc/igc_main.c 		if (skb_padto(skb, 17))
skb              1160 drivers/net/ethernet/intel/igc/igc_main.c 		skb->len = 17;
skb              1163 drivers/net/ethernet/intel/igc/igc_main.c 	return igc_xmit_frame_ring(skb, igc_tx_queue_mapping(adapter, skb));
skb              1168 drivers/net/ethernet/intel/igc/igc_main.c 			       struct sk_buff *skb)
skb              1171 drivers/net/ethernet/intel/igc/igc_main.c 		skb_set_hash(skb,
skb              1188 drivers/net/ethernet/intel/igc/igc_main.c 				   struct sk_buff *skb)
skb              1190 drivers/net/ethernet/intel/igc/igc_main.c 	igc_rx_hash(rx_ring, rx_desc, skb);
skb              1192 drivers/net/ethernet/intel/igc/igc_main.c 	skb_record_rx_queue(skb, rx_ring->queue_index);
skb              1194 drivers/net/ethernet/intel/igc/igc_main.c 	skb->protocol = eth_type_trans(skb, rx_ring->netdev);
skb              1228 drivers/net/ethernet/intel/igc/igc_main.c 			    struct sk_buff *skb,
skb              1234 drivers/net/ethernet/intel/igc/igc_main.c 	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
skb              1241 drivers/net/ethernet/intel/igc/igc_main.c 	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
skb              1259 drivers/net/ethernet/intel/igc/igc_main.c 	struct sk_buff *skb;
skb              1268 drivers/net/ethernet/intel/igc/igc_main.c 	skb = build_skb(va - IGC_SKB_PAD, truesize);
skb              1269 drivers/net/ethernet/intel/igc/igc_main.c 	if (unlikely(!skb))
skb              1273 drivers/net/ethernet/intel/igc/igc_main.c 	skb_reserve(skb, IGC_SKB_PAD);
skb              1274 drivers/net/ethernet/intel/igc/igc_main.c 	__skb_put(skb, size);
skb              1283 drivers/net/ethernet/intel/igc/igc_main.c 	return skb;
skb              1298 drivers/net/ethernet/intel/igc/igc_main.c 	struct sk_buff *skb;
skb              1307 drivers/net/ethernet/intel/igc/igc_main.c 	skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGC_RX_HDR_LEN);
skb              1308 drivers/net/ethernet/intel/igc/igc_main.c 	if (unlikely(!skb))
skb              1314 drivers/net/ethernet/intel/igc/igc_main.c 		headlen = eth_get_headlen(skb->dev, va, IGC_RX_HDR_LEN);
skb              1317 drivers/net/ethernet/intel/igc/igc_main.c 	memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
skb              1322 drivers/net/ethernet/intel/igc/igc_main.c 		skb_add_rx_frag(skb, 0, rx_buffer->page,
skb              1334 drivers/net/ethernet/intel/igc/igc_main.c 	return skb;
skb              1448 drivers/net/ethernet/intel/igc/igc_main.c 				struct sk_buff *skb)
skb              1455 drivers/net/ethernet/intel/igc/igc_main.c 			dev_kfree_skb_any(skb);
skb              1461 drivers/net/ethernet/intel/igc/igc_main.c 	if (eth_skb_pad(skb))
skb              1561 drivers/net/ethernet/intel/igc/igc_main.c 	struct sk_buff *skb = rx_ring->skb;
skb              1589 drivers/net/ethernet/intel/igc/igc_main.c 		if (skb)
skb              1590 drivers/net/ethernet/intel/igc/igc_main.c 			igc_add_rx_frag(rx_ring, rx_buffer, skb, size);
skb              1592 drivers/net/ethernet/intel/igc/igc_main.c 			skb = igc_build_skb(rx_ring, rx_buffer, rx_desc, size);
skb              1594 drivers/net/ethernet/intel/igc/igc_main.c 			skb = igc_construct_skb(rx_ring, rx_buffer,
skb              1598 drivers/net/ethernet/intel/igc/igc_main.c 		if (!skb) {
skb              1612 drivers/net/ethernet/intel/igc/igc_main.c 		if (igc_cleanup_headers(rx_ring, rx_desc, skb)) {
skb              1613 drivers/net/ethernet/intel/igc/igc_main.c 			skb = NULL;
skb              1618 drivers/net/ethernet/intel/igc/igc_main.c 		total_bytes += skb->len;
skb              1621 drivers/net/ethernet/intel/igc/igc_main.c 		igc_process_skb_fields(rx_ring, rx_desc, skb);
skb              1623 drivers/net/ethernet/intel/igc/igc_main.c 		napi_gro_receive(&q_vector->napi, skb);
skb              1626 drivers/net/ethernet/intel/igc/igc_main.c 		skb = NULL;
skb              1633 drivers/net/ethernet/intel/igc/igc_main.c 	rx_ring->skb = skb;
skb              1740 drivers/net/ethernet/intel/igc/igc_main.c 		napi_consume_skb(tx_buffer->skb, napi_budget);
skb              2292 drivers/net/ethernet/intel/igc/igc_main.c igc_features_check(struct sk_buff *skb, struct net_device *dev,
skb              2298 drivers/net/ethernet/intel/igc/igc_main.c 	mac_hdr_len = skb_network_header(skb) - skb->data;
skb              2306 drivers/net/ethernet/intel/igc/igc_main.c 	network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
skb              2316 drivers/net/ethernet/intel/igc/igc_main.c 	if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
skb                76 drivers/net/ethernet/intel/ixgb/ixgb.h 	struct sk_buff *skb;
skb                62 drivers/net/ethernet/intel/ixgb/ixgb_main.c static netdev_tx_t ixgb_xmit_frame(struct sk_buff *skb,
skb               905 drivers/net/ethernet/intel/ixgb/ixgb_main.c 	if (buffer_info->skb) {
skb               906 drivers/net/ethernet/intel/ixgb/ixgb_main.c 		dev_kfree_skb_any(buffer_info->skb);
skb               907 drivers/net/ethernet/intel/ixgb/ixgb_main.c 		buffer_info->skb = NULL;
skb              1000 drivers/net/ethernet/intel/ixgb/ixgb_main.c 		if (buffer_info->skb) {
skb              1001 drivers/net/ethernet/intel/ixgb/ixgb_main.c 			dev_kfree_skb(buffer_info->skb);
skb              1002 drivers/net/ethernet/intel/ixgb/ixgb_main.c 			buffer_info->skb = NULL;
skb              1185 drivers/net/ethernet/intel/ixgb/ixgb_main.c ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
skb              1192 drivers/net/ethernet/intel/ixgb/ixgb_main.c 	if (likely(skb_is_gso(skb))) {
skb              1197 drivers/net/ethernet/intel/ixgb/ixgb_main.c 		err = skb_cow_head(skb, 0);
skb              1201 drivers/net/ethernet/intel/ixgb/ixgb_main.c 		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
skb              1202 drivers/net/ethernet/intel/ixgb/ixgb_main.c 		mss = skb_shinfo(skb)->gso_size;
skb              1203 drivers/net/ethernet/intel/ixgb/ixgb_main.c 		iph = ip_hdr(skb);
skb              1206 drivers/net/ethernet/intel/ixgb/ixgb_main.c 		tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
skb              1209 drivers/net/ethernet/intel/ixgb/ixgb_main.c 		ipcss = skb_network_offset(skb);
skb              1210 drivers/net/ethernet/intel/ixgb/ixgb_main.c 		ipcso = (void *)&(iph->check) - (void *)skb->data;
skb              1211 drivers/net/ethernet/intel/ixgb/ixgb_main.c 		ipcse = skb_transport_offset(skb) - 1;
skb              1212 drivers/net/ethernet/intel/ixgb/ixgb_main.c 		tucss = skb_transport_offset(skb);
skb              1213 drivers/net/ethernet/intel/ixgb/ixgb_main.c 		tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
skb              1236 drivers/net/ethernet/intel/ixgb/ixgb_main.c 						| (skb->len - (hdr_len)));
skb              1249 drivers/net/ethernet/intel/ixgb/ixgb_main.c ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb)
skb              1255 drivers/net/ethernet/intel/ixgb/ixgb_main.c 	if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
skb              1257 drivers/net/ethernet/intel/ixgb/ixgb_main.c 		css = skb_checksum_start_offset(skb);
skb              1258 drivers/net/ethernet/intel/ixgb/ixgb_main.c 		cso = css + skb->csum_offset;
skb              1290 drivers/net/ethernet/intel/ixgb/ixgb_main.c ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
skb              1296 drivers/net/ethernet/intel/ixgb/ixgb_main.c 	int len = skb_headlen(skb);
skb              1298 drivers/net/ethernet/intel/ixgb/ixgb_main.c 	unsigned int mss = skb_shinfo(skb)->gso_size;
skb              1299 drivers/net/ethernet/intel/ixgb/ixgb_main.c 	unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
skb              1317 drivers/net/ethernet/intel/ixgb/ixgb_main.c 						  skb->data + offset,
skb              1334 drivers/net/ethernet/intel/ixgb/ixgb_main.c 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
skb              1367 drivers/net/ethernet/intel/ixgb/ixgb_main.c 	tx_ring->buffer_info[i].skb = skb;
skb              1478 drivers/net/ethernet/intel/ixgb/ixgb_main.c ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
skb              1488 drivers/net/ethernet/intel/ixgb/ixgb_main.c 		dev_kfree_skb_any(skb);
skb              1492 drivers/net/ethernet/intel/ixgb/ixgb_main.c 	if (skb->len <= 0) {
skb              1493 drivers/net/ethernet/intel/ixgb/ixgb_main.c 		dev_kfree_skb_any(skb);
skb              1501 drivers/net/ethernet/intel/ixgb/ixgb_main.c 	if (skb_vlan_tag_present(skb)) {
skb              1503 drivers/net/ethernet/intel/ixgb/ixgb_main.c 		vlan_id = skb_vlan_tag_get(skb);
skb              1508 drivers/net/ethernet/intel/ixgb/ixgb_main.c 	tso = ixgb_tso(adapter, skb);
skb              1510 drivers/net/ethernet/intel/ixgb/ixgb_main.c 		dev_kfree_skb_any(skb);
skb              1516 drivers/net/ethernet/intel/ixgb/ixgb_main.c 	else if (ixgb_tx_csum(adapter, skb))
skb              1519 drivers/net/ethernet/intel/ixgb/ixgb_main.c 	count = ixgb_tx_map(adapter, skb, first);
skb              1527 drivers/net/ethernet/intel/ixgb/ixgb_main.c 		dev_kfree_skb_any(skb);
skb              1878 drivers/net/ethernet/intel/ixgb/ixgb_main.c                  struct sk_buff *skb)
skb              1885 drivers/net/ethernet/intel/ixgb/ixgb_main.c 		skb_checksum_none_assert(skb);
skb              1893 drivers/net/ethernet/intel/ixgb/ixgb_main.c 		skb_checksum_none_assert(skb);
skb              1897 drivers/net/ethernet/intel/ixgb/ixgb_main.c 		skb->ip_summed = CHECKSUM_UNNECESSARY;
skb              1908 drivers/net/ethernet/intel/ixgb/ixgb_main.c 				 u32 length, struct sk_buff **skb)
skb              1920 drivers/net/ethernet/intel/ixgb/ixgb_main.c 				       (*skb)->data - NET_IP_ALIGN,
skb              1923 drivers/net/ethernet/intel/ixgb/ixgb_main.c 	buffer_info->skb = *skb;
skb              1924 drivers/net/ethernet/intel/ixgb/ixgb_main.c 	*skb = new_skb;
skb              1950 drivers/net/ethernet/intel/ixgb/ixgb_main.c 		struct sk_buff *skb;
skb              1959 drivers/net/ethernet/intel/ixgb/ixgb_main.c 		skb = buffer_info->skb;
skb              1960 drivers/net/ethernet/intel/ixgb/ixgb_main.c 		buffer_info->skb = NULL;
skb              1962 drivers/net/ethernet/intel/ixgb/ixgb_main.c 		prefetch(skb->data - NET_IP_ALIGN);
skb              1996 drivers/net/ethernet/intel/ixgb/ixgb_main.c 			dev_kfree_skb_irq(skb);
skb              2003 drivers/net/ethernet/intel/ixgb/ixgb_main.c 			dev_kfree_skb_irq(skb);
skb              2007 drivers/net/ethernet/intel/ixgb/ixgb_main.c 		ixgb_check_copybreak(&adapter->napi, buffer_info, length, &skb);
skb              2010 drivers/net/ethernet/intel/ixgb/ixgb_main.c 		skb_put(skb, length);
skb              2013 drivers/net/ethernet/intel/ixgb/ixgb_main.c 		ixgb_rx_checksum(adapter, rx_desc, skb);
skb              2015 drivers/net/ethernet/intel/ixgb/ixgb_main.c 		skb->protocol = eth_type_trans(skb, netdev);
skb              2017 drivers/net/ethernet/intel/ixgb/ixgb_main.c 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
skb              2020 drivers/net/ethernet/intel/ixgb/ixgb_main.c 		netif_receive_skb(skb);
skb              2059 drivers/net/ethernet/intel/ixgb/ixgb_main.c 	struct sk_buff *skb;
skb              2071 drivers/net/ethernet/intel/ixgb/ixgb_main.c 		skb = buffer_info->skb;
skb              2072 drivers/net/ethernet/intel/ixgb/ixgb_main.c 		if (skb) {
skb              2073 drivers/net/ethernet/intel/ixgb/ixgb_main.c 			skb_trim(skb, 0);
skb              2077 drivers/net/ethernet/intel/ixgb/ixgb_main.c 		skb = netdev_alloc_skb_ip_align(netdev, adapter->rx_buffer_len);
skb              2078 drivers/net/ethernet/intel/ixgb/ixgb_main.c 		if (unlikely(!skb)) {
skb              2084 drivers/net/ethernet/intel/ixgb/ixgb_main.c 		buffer_info->skb = skb;
skb              2088 drivers/net/ethernet/intel/ixgb/ixgb_main.c 		                                  skb->data,
skb               215 drivers/net/ethernet/intel/ixgbe/ixgbe.h 		struct sk_buff *skb;
skb               227 drivers/net/ethernet/intel/ixgbe/ixgbe.h 	struct sk_buff *skb;
skb               824 drivers/net/ethernet/intel/ixgbe/ixgbe.h #define IXGBE_CB(skb) ((struct ixgbe_cb *)(skb)->cb)
skb               927 drivers/net/ethernet/intel/ixgbe/ixgbe.h 		   union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb);
skb               969 drivers/net/ethernet/intel/ixgbe/ixgbe.h void ixgbe_ptp_rx_rgtstamp(struct ixgbe_q_vector *, struct sk_buff *skb);
skb               972 drivers/net/ethernet/intel/ixgbe/ixgbe.h 					 struct sk_buff *skb)
skb               975 drivers/net/ethernet/intel/ixgbe/ixgbe.h 		ixgbe_ptp_rx_pktstamp(rx_ring->q_vector, skb);
skb               982 drivers/net/ethernet/intel/ixgbe/ixgbe.h 	ixgbe_ptp_rx_rgtstamp(rx_ring->q_vector, skb);
skb               999 drivers/net/ethernet/intel/ixgbe/ixgbe.h netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
skb              1013 drivers/net/ethernet/intel/ixgbe/ixgbe.h 		    struct sk_buff *skb);
skb              1025 drivers/net/ethernet/intel/ixgbe/ixgbe.h 				  struct sk_buff *skb) { }
skb              1856 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c static void ixgbe_create_lbtest_frame(struct sk_buff *skb,
skb              1859 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 	memset(skb->data, 0xFF, frame_size);
skb              1861 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 	memset(&skb->data[frame_size], 0xAA, frame_size / 2 - 1);
skb              1862 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 	memset(&skb->data[frame_size + 10], 0xBE, 1);
skb              1863 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 	memset(&skb->data[frame_size + 12], 0xAF, 1);
skb              1912 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 		dev_kfree_skb_any(tx_buffer->skb);
skb              1977 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 	struct sk_buff *skb;
skb              1984 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 	skb = alloc_skb(size, GFP_KERNEL);
skb              1985 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 	if (!skb)
skb              1989 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 	ixgbe_create_lbtest_frame(skb, size);
skb              1990 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 	skb_put(skb, size);
skb              2009 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 			skb_get(skb);
skb              2010 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 			tx_ret_val = ixgbe_xmit_frame_ring(skb,
skb              2033 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 	kfree_skb(skb);
skb               384 drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c 		   struct sk_buff *skb)
skb               398 drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c 		skb->ip_summed = CHECKSUM_NONE;
skb               400 drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c 		skb->ip_summed = CHECKSUM_UNNECESSARY;
skb               402 drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c 	if (eth_hdr(skb)->h_proto == htons(ETH_P_8021Q))
skb               403 drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c 		fh = (struct fc_frame_header *)(skb->data +
skb               406 drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c 		fh = (struct fc_frame_header *)(skb->data +
skb               469 drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c 		skb_linearize(skb);
skb               470 drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c 		crc = skb_put(skb, sizeof(*crc));
skb               491 drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c 	struct sk_buff *skb = first->skb;
skb               499 drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c 	if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_type != SKB_GSO_FCOE)) {
skb               501 drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c 			skb_shinfo(skb)->gso_type);
skb               506 drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c 	skb_set_network_header(skb, skb->mac_len);
skb               507 drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c 	skb_set_transport_header(skb, skb->mac_len +
skb               511 drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c 	sof = ((struct fcoe_hdr *)skb_network_header(skb))->fcoe_sof;
skb               531 drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c 	skb_copy_bits(skb, skb->len - 4, &eof, 1);
skb               539 drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c 		if (skb_is_gso(skb))
skb               557 drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c 	fh = (struct fc_frame_header *)skb_transport_header(skb);
skb               565 drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c 	if (skb_is_gso(skb)) {
skb               566 drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c 		*hdr_len += skb_transport_offset(skb) +
skb               569 drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c 		first->gso_segs = DIV_ROUND_UP(skb->len - *hdr_len,
skb               570 drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c 					       skb_shinfo(skb)->gso_size);
skb               581 drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c 	mss_l4len_idx = skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
skb               584 drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c 	vlan_macip_lens = skb_transport_offset(skb) +
skb               586 drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c 	vlan_macip_lens |= (skb_transport_offset(skb) - 4)
skb               814 drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c static bool ixgbe_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs)
skb               818 drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c 		if (ip_hdr(skb)->ihl != 5)
skb               822 drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c 		if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
skb              1071 drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c 	sp = skb_sec_path(first->skb);
skb              1078 drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c 	xs = xfrm_input_state(first->skb);
skb              1116 drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c 		if (!skb_is_gso(first->skb)) {
skb              1125 drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c 			struct sk_buff *skb = first->skb;
skb              1129 drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c 			ret = skb_copy_bits(skb, skb->len - (authlen + 2),
skb              1153 drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c 		    struct sk_buff *skb)
skb              1176 drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c 		ip4 = (struct iphdr *)(skb->data + ETH_HLEN);
skb              1180 drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c 		ip6 = (struct ipv6hdr *)(skb->data + ETH_HLEN);
skb              1204 drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c 	sp = secpath_set(skb);
skb              1210 drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c 	xo = xfrm_offload(skb);
skb               696 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 					tx_buffer->skb,
skb               700 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 				    tx_buffer->skb)
skb               703 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 						tx_buffer->skb->data,
skb               804 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 					rx_buffer_info->skb,
skb               812 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 					rx_buffer_info->skb,
skb              1156 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			napi_consume_skb(tx_buffer->skb, napi_budget);
skb              1424 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 				 struct sk_buff *skb)
skb              1437 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
skb              1470 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 				     struct sk_buff *skb)
skb              1475 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	skb_checksum_none_assert(skb);
skb              1484 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		skb->encapsulation = 1;
skb              1511 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	skb->ip_summed = CHECKSUM_UNNECESSARY;
skb              1517 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			skb->ip_summed = CHECKSUM_NONE;
skb              1521 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		skb->csum_level = 1;
skb              1645 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 				   struct sk_buff *skb)
skb              1647 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	u16 hdr_len = skb_headlen(skb);
skb              1650 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	skb_shinfo(skb)->gso_size = DIV_ROUND_UP((skb->len - hdr_len),
skb              1651 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 						 IXGBE_CB(skb)->append_cnt);
skb              1652 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
skb              1656 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 				   struct sk_buff *skb)
skb              1659 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	if (!IXGBE_CB(skb)->append_cnt)
skb              1662 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	rx_ring->rx_stats.rsc_count += IXGBE_CB(skb)->append_cnt;
skb              1665 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	ixgbe_set_rsc_gso_size(rx_ring, skb);
skb              1668 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	IXGBE_CB(skb)->append_cnt = 0;
skb              1683 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			      struct sk_buff *skb)
skb              1688 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	ixgbe_update_rsc_stats(rx_ring, skb);
skb              1690 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	ixgbe_rx_hash(rx_ring, rx_desc, skb);
skb              1692 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	ixgbe_rx_checksum(rx_ring, rx_desc, skb);
skb              1695 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		ixgbe_ptp_rx_hwtstamp(rx_ring, rx_desc, skb);
skb              1700 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
skb              1704 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		ixgbe_ipsec_rx(rx_ring, rx_desc, skb);
skb              1708 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		skb_record_rx_queue(skb, rx_ring->queue_index);
skb              1710 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		macvlan_count_rx(netdev_priv(dev), skb->len + ETH_HLEN, true,
skb              1713 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	skb->protocol = eth_type_trans(skb, dev);
skb              1717 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		  struct sk_buff *skb)
skb              1719 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	napi_gro_receive(&q_vector->napi, skb);
skb              1735 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			     struct sk_buff *skb)
skb              1754 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			IXGBE_CB(skb)->append_cnt += rsc_cnt - 1;
skb              1768 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	rx_ring->rx_buffer_info[ntc].skb = skb;
skb              1787 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			    struct sk_buff *skb)
skb              1789 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
skb              1804 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	pull_len = eth_get_headlen(skb->dev, va, IXGBE_RX_HDR_SIZE);
skb              1807 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
skb              1812 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	skb->data_len -= pull_len;
skb              1813 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	skb->tail += pull_len;
skb              1827 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 				struct sk_buff *skb)
skb              1830 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		unsigned long offset = (unsigned long)(skb->data) & ~PAGE_MASK;
skb              1833 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 					      IXGBE_CB(skb)->dma,
skb              1835 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 					      skb_headlen(skb),
skb              1838 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
skb              1841 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 					      IXGBE_CB(skb)->dma,
skb              1848 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	if (unlikely(IXGBE_CB(skb)->page_released)) {
skb              1849 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		dma_unmap_page_attrs(rx_ring->dev, IXGBE_CB(skb)->dma,
skb              1880 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			   struct sk_buff *skb)
skb              1885 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	if (IS_ERR(skb))
skb              1895 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		dev_kfree_skb_any(skb);
skb              1900 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	if (!skb_headlen(skb))
skb              1901 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		ixgbe_pull_tail(rx_ring, skb);
skb              1910 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	if (eth_skb_pad(skb))
skb              2004 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			      struct sk_buff *skb,
skb              2014 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
skb              2025 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 						   struct sk_buff **skb,
skb              2032 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	*skb = rx_buffer->skb;
skb              2039 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		if (!*skb)
skb              2042 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		if (*skb)
skb              2043 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			ixgbe_dma_sync_frag(rx_ring, *skb);
skb              2060 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 				struct sk_buff *skb)
skb              2066 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		if (!IS_ERR(skb) && IXGBE_CB(skb)->dma == rx_buffer->dma) {
skb              2068 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			IXGBE_CB(skb)->page_released = true;
skb              2082 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	rx_buffer->skb = NULL;
skb              2097 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	struct sk_buff *skb;
skb              2121 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	skb = napi_alloc_skb(&rx_ring->q_vector->napi, IXGBE_RX_HDR_SIZE);
skb              2122 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	if (unlikely(!skb))
skb              2127 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			IXGBE_CB(skb)->dma = rx_buffer->dma;
skb              2129 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		skb_add_rx_frag(skb, 0, rx_buffer->page,
skb              2138 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		memcpy(__skb_put(skb, size),
skb              2143 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	return skb;
skb              2159 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	struct sk_buff *skb;
skb              2172 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	skb = build_skb(xdp->data_hard_start, truesize);
skb              2173 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	if (unlikely(!skb))
skb              2177 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	skb_reserve(skb, xdp->data - xdp->data_hard_start);
skb              2178 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	__skb_put(skb, xdp->data_end - xdp->data);
skb              2180 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		skb_metadata_set(skb, metasize);
skb              2184 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		IXGBE_CB(skb)->dma = rx_buffer->dma;
skb              2193 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	return skb;
skb              2296 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		struct sk_buff *skb;
skb              2316 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		rx_buffer = ixgbe_get_rx_buffer(rx_ring, rx_desc, &skb, size);
skb              2319 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		if (!skb) {
skb              2327 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			skb = ixgbe_run_xdp(adapter, rx_ring, &xdp);
skb              2330 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		if (IS_ERR(skb)) {
skb              2331 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			unsigned int xdp_res = -PTR_ERR(skb);
skb              2341 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		} else if (skb) {
skb              2342 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			ixgbe_add_rx_frag(rx_ring, rx_buffer, skb, size);
skb              2344 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			skb = ixgbe_build_skb(rx_ring, rx_buffer,
skb              2347 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			skb = ixgbe_construct_skb(rx_ring, rx_buffer,
skb              2352 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		if (!skb) {
skb              2358 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		ixgbe_put_rx_buffer(rx_ring, rx_buffer, skb);
skb              2362 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		if (ixgbe_is_non_eop(rx_ring, rx_desc, skb))
skb              2366 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		if (ixgbe_cleanup_headers(rx_ring, rx_desc, skb))
skb              2370 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		total_rx_bytes += skb->len;
skb              2373 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		ixgbe_process_skb_fields(rx_ring, rx_desc, skb);
skb              2378 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb);
skb              2394 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 				dev_kfree_skb_any(skb);
skb              2400 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		ixgbe_rx_skb(q_vector, skb);
skb              5300 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		if (rx_buffer->skb) {
skb              5301 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			struct sk_buff *skb = rx_buffer->skb;
skb              5302 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			if (IXGBE_CB(skb)->page_released)
skb              5304 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 						     IXGBE_CB(skb)->dma,
skb              5308 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			dev_kfree_skb(skb);
skb              5993 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			dev_kfree_skb_any(tx_buffer->skb);
skb              7957 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	struct sk_buff *skb = first->skb;
skb              7971 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	if (skb->ip_summed != CHECKSUM_PARTIAL)
skb              7974 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	if (!skb_is_gso(skb))
skb              7977 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	err = skb_cow_head(skb, 0);
skb              7982 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		ip.hdr = skb_inner_network_header(skb);
skb              7984 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		ip.hdr = skb_network_header(skb);
skb              7985 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	l4.hdr = skb_checksum_start(skb);
skb              7992 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		unsigned char *csum_start = skb_checksum_start(skb);
skb              8000 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		ip.v4->check = (skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) ?
skb              8016 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	l4_offset = l4.hdr - skb->data;
skb              8022 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	paylen = skb->len - l4_offset;
skb              8026 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	first->gso_segs = skb_shinfo(skb)->gso_segs;
skb              8031 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
skb              8038 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT;
skb              8047 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c static inline bool ixgbe_ipv6_csum_is_sctp(struct sk_buff *skb)
skb              8051 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL);
skb              8053 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	return offset == skb_checksum_start_offset(skb);
skb              8060 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	struct sk_buff *skb = first->skb;
skb              8065 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	if (skb->ip_summed != CHECKSUM_PARTIAL) {
skb              8073 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	switch (skb->csum_offset) {
skb              8082 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		     (ip_hdr(skb)->protocol == IPPROTO_SCTP)) ||
skb              8084 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		     ixgbe_ipv6_csum_is_sctp(skb))) {
skb              8090 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		skb_checksum_help(skb);
skb              8096 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	vlan_macip_lens = skb_checksum_start_offset(skb) -
skb              8097 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			  skb_network_offset(skb);
skb              8100 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
skb              8114 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c static u32 ixgbe_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
skb              8134 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	cmd_type ^= IXGBE_SET_FLAG(skb->no_fcs, 1, IXGBE_ADVTXD_DCMD_IFCS);
skb              8204 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	struct sk_buff *skb = first->skb;
skb              8211 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	u32 cmd_type = ixgbe_tx_cmd_type(skb, tx_flags);
skb              8216 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	ixgbe_tx_olinfo_status(tx_desc, tx_flags, skb->len - hdr_len);
skb              8218 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	size = skb_headlen(skb);
skb              8219 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	data_len = skb->data_len;
skb              8232 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
skb              8236 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
skb              8299 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	skb_tx_timestamp(skb);
skb              8346 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	dev_kfree_skb_any(first->skb);
skb              8347 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	first->skb = NULL;
skb              8367 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	struct sk_buff *skb;
skb              8387 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	skb = first->skb;
skb              8388 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	hdr.network = skb_network_header(skb);
skb              8389 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	if (unlikely(hdr.network <= skb->data))
skb              8391 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	if (skb->encapsulation &&
skb              8396 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		if (unlikely(skb_tail_pointer(skb) < hdr.network +
skb              8402 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		    udp_hdr(skb)->dest == adapter->vxlan_port)
skb              8403 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			hdr.network = skb_inner_network_header(skb);
skb              8406 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		    udp_hdr(skb)->dest == adapter->geneve_port)
skb              8407 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			hdr.network = skb_inner_network_header(skb);
skb              8413 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	if (unlikely(skb_tail_pointer(skb) < hdr.network + 40))
skb              8424 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		hlen = hdr.network - skb->data;
skb              8425 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		l4_proto = ipv6_find_hdr(skb, &hlen, IPPROTO_TCP, NULL, NULL);
skb              8426 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		hlen -= hdr.network - skb->data;
skb              8435 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	if (unlikely(skb_tail_pointer(skb) < hdr.network +
skb              8493 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	if (hdr.network != skb_network_header(skb))
skb              8502 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
skb              8510 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
skb              8514 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		txq += reciprocal_scale(skb_get_hash(skb),
skb              8524 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	switch (vlan_get_protocol(skb)) {
skb              8533 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		return netdev_pick_tx(dev, skb, sb_dev);
skb              8538 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) :
skb              8605 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
skb              8613 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	u16 count = TXD_USE_COUNT(skb_headlen(skb));
skb              8615 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	__be16 protocol = skb->protocol;
skb              8625 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
skb              8627 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 						&skb_shinfo(skb)->frags[f]));
skb              8636 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	first->skb = skb;
skb              8637 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	first->bytecount = skb->len;
skb              8641 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	if (skb_vlan_tag_present(skb)) {
skb              8642 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		tx_flags |= skb_vlan_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT;
skb              8647 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
skb              8655 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	protocol = vlan_get_protocol(skb);
skb              8657 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
skb              8662 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
skb              8666 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			adapter->ptp_tx_skb = skb_get(skb);
skb              8686 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	     (skb->priority != TC_PRIO_CONTROL))) {
skb              8688 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		tx_flags |= (skb->priority & 0x7) <<
skb              8693 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			if (skb_cow_head(skb, 0))
skb              8695 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			vhdr = (struct vlan_ethhdr *)skb->data;
skb              8721 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	if (xfrm_offload(skb) &&
skb              8744 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	dev_kfree_skb_any(first->skb);
skb              8745 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	first->skb = NULL;
skb              8757 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c static netdev_tx_t __ixgbe_xmit_frame(struct sk_buff *skb,
skb              8768 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	if (skb_put_padto(skb, 17))
skb              8771 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	tx_ring = ring ? ring : adapter->tx_ring[skb_get_queue_mapping(skb)];
skb              8775 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	return ixgbe_xmit_frame_ring(skb, adapter, tx_ring);
skb              8778 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
skb              8781 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	return __ixgbe_xmit_frame(skb, netdev, NULL);
skb              10050 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
skb              10059 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
skb              10200 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c ixgbe_features_check(struct sk_buff *skb, struct net_device *dev,
skb              10206 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	mac_hdr_len = skb_network_header(skb) - skb->data;
skb              10214 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
skb              10226 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) {
skb              10228 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		if (!secpath_exists(skb))
skb               814 drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c 	struct sk_buff *skb = adapter->ptp_tx_skb;
skb               832 drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c 	skb_tstamp_tx(skb, &shhwtstamps);
skb               833 drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c 	dev_kfree_skb_any(skb);
skb               886 drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c 			   struct sk_buff *skb)
skb               891 drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c 	skb_copy_bits(skb, skb->len - IXGBE_TS_HDR_LEN, &regval,
skb               893 drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c 	__pskb_trim(skb, skb->len - IXGBE_TS_HDR_LEN);
skb               901 drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c 	ixgbe_ptp_convert_to_hwtstamp(q_vector->adapter, skb_hwtstamps(skb),
skb               915 drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c 			   struct sk_buff *skb)
skb               940 drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c 	ixgbe_ptp_convert_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
skb                19 drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h 			   struct sk_buff *skb);
skb                22 drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h 			      struct sk_buff *skb);
skb                24 drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h 		  struct sk_buff *skb);
skb               223 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 	obi->skb = NULL;
skb               400 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 	struct sk_buff *skb;
skb               403 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 	skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
skb               406 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 	if (unlikely(!skb))
skb               409 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 	skb_reserve(skb, xdp->data - xdp->data_hard_start);
skb               410 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 	memcpy(__skb_put(skb, datasize), xdp->data, datasize);
skb               412 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 		skb_metadata_set(skb, metasize);
skb               415 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 	return skb;
skb               436 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 	struct sk_buff *skb;
skb               475 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 			next_bi->skb = ERR_PTR(-EINVAL);
skb               479 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 		if (unlikely(bi->skb)) {
skb               497 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 				bi->skb = NULL;
skb               510 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 		skb = ixgbe_construct_skb_zc(rx_ring, bi, &xdp);
skb               511 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 		if (!skb) {
skb               519 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 		if (eth_skb_pad(skb))
skb               522 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 		total_rx_bytes += skb->len;
skb               525 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 		ixgbe_process_skb_fields(rx_ring, rx_desc, skb);
skb               526 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 		ixgbe_rx_skb(q_vector, skb);
skb               419 drivers/net/ethernet/intel/ixgbevf/ipsec.c static bool ixgbevf_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs)
skb               423 drivers/net/ethernet/intel/ixgbevf/ipsec.c 		if (ip_hdr(skb)->ihl != 5)
skb               427 drivers/net/ethernet/intel/ixgbevf/ipsec.c 		if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
skb               457 drivers/net/ethernet/intel/ixgbevf/ipsec.c 	sp = skb_sec_path(first->skb);
skb               464 drivers/net/ethernet/intel/ixgbevf/ipsec.c 	xs = xfrm_input_state(first->skb);
skb               503 drivers/net/ethernet/intel/ixgbevf/ipsec.c 		if (!skb_is_gso(first->skb)) {
skb               512 drivers/net/ethernet/intel/ixgbevf/ipsec.c 			struct sk_buff *skb = first->skb;
skb               516 drivers/net/ethernet/intel/ixgbevf/ipsec.c 			ret = skb_copy_bits(skb, skb->len - (authlen + 2),
skb               540 drivers/net/ethernet/intel/ixgbevf/ipsec.c 		      struct sk_buff *skb)
skb               563 drivers/net/ethernet/intel/ixgbevf/ipsec.c 		ip4 = (struct iphdr *)(skb->data + ETH_HLEN);
skb               567 drivers/net/ethernet/intel/ixgbevf/ipsec.c 		ip6 = (struct ipv6hdr *)(skb->data + ETH_HLEN);
skb               591 drivers/net/ethernet/intel/ixgbevf/ipsec.c 	sp = secpath_set(skb);
skb               597 drivers/net/ethernet/intel/ixgbevf/ipsec.c 	xo = xfrm_offload(skb);
skb                33 drivers/net/ethernet/intel/ixgbevf/ixgbevf.h 		struct sk_buff *skb;
skb               118 drivers/net/ethernet/intel/ixgbevf/ixgbevf.h 	struct sk_buff *skb;
skb               468 drivers/net/ethernet/intel/ixgbevf/ixgbevf.h 		      struct sk_buff *skb);
skb               480 drivers/net/ethernet/intel/ixgbevf/ixgbevf.h 				    struct sk_buff *skb) { }
skb               310 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 			napi_consume_skb(tx_buffer->skb, napi_budget);
skb               432 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 			   struct sk_buff *skb)
skb               434 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	napi_gro_receive(&q_vector->napi, skb);
skb               445 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 				   struct sk_buff *skb)
skb               458 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
skb               471 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 				       struct sk_buff *skb)
skb               473 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	skb_checksum_none_assert(skb);
skb               495 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	skb->ip_summed = CHECKSUM_UNNECESSARY;
skb               510 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 				       struct sk_buff *skb)
skb               512 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	ixgbevf_rx_hash(rx_ring, rx_desc, skb);
skb               513 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	ixgbevf_rx_checksum(rx_ring, rx_desc, skb);
skb               520 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
skb               524 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		ixgbevf_ipsec_rx(rx_ring, rx_desc, skb);
skb               526 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	skb->protocol = eth_type_trans(skb, rx_ring->netdev);
skb               552 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 				  struct sk_buff *skb)
skb               558 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		if (IS_ERR(skb))
skb               736 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 				    struct sk_buff *skb)
skb               739 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	if (IS_ERR(skb))
skb               748 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 			dev_kfree_skb_any(skb);
skb               754 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	if (eth_skb_pad(skb))
skb               836 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 				struct sk_buff *skb,
skb               846 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
skb               869 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	struct sk_buff *skb;
skb               892 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	skb = napi_alloc_skb(&rx_ring->q_vector->napi, IXGBEVF_RX_HDR_SIZE);
skb               893 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	if (unlikely(!skb))
skb               899 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		headlen = eth_get_headlen(skb->dev, xdp->data,
skb               903 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	memcpy(__skb_put(skb, headlen), xdp->data,
skb               909 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		skb_add_rx_frag(skb, 0, rx_buffer->page,
skb               922 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	return skb;
skb               946 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	struct sk_buff *skb;
skb               959 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	skb = build_skb(xdp->data_hard_start, truesize);
skb               960 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	if (unlikely(!skb))
skb               964 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	skb_reserve(skb, xdp->data - xdp->data_hard_start);
skb               965 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	__skb_put(skb, xdp->data_end - xdp->data);
skb               967 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		skb_metadata_set(skb, metasize);
skb               976 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	return skb;
skb              1122 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	struct sk_buff *skb = rx_ring->skb;
skb              1153 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		if (!skb) {
skb              1161 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 			skb = ixgbevf_run_xdp(adapter, rx_ring, &xdp);
skb              1164 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		if (IS_ERR(skb)) {
skb              1165 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 			if (PTR_ERR(skb) == -IXGBEVF_XDP_TX) {
skb              1174 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		} else if (skb) {
skb              1175 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 			ixgbevf_add_rx_frag(rx_ring, rx_buffer, skb, size);
skb              1177 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 			skb = ixgbevf_build_skb(rx_ring, rx_buffer,
skb              1180 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 			skb = ixgbevf_construct_skb(rx_ring, rx_buffer,
skb              1185 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		if (!skb) {
skb              1191 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		ixgbevf_put_rx_buffer(rx_ring, rx_buffer, skb);
skb              1199 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		if (ixgbevf_cleanup_headers(rx_ring, rx_desc, skb)) {
skb              1200 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 			skb = NULL;
skb              1205 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		total_rx_bytes += skb->len;
skb              1210 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		if ((skb->pkt_type == PACKET_BROADCAST ||
skb              1211 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		     skb->pkt_type == PACKET_MULTICAST) &&
skb              1213 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 				     eth_hdr(skb)->h_source)) {
skb              1214 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 			dev_kfree_skb_irq(skb);
skb              1219 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		ixgbevf_process_skb_fields(rx_ring, rx_desc, skb);
skb              1221 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		ixgbevf_rx_skb(q_vector, skb);
skb              1224 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		skb = NULL;
skb              1231 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	rx_ring->skb = skb;
skb              2332 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	if (rx_ring->skb) {
skb              2333 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		dev_kfree_skb(rx_ring->skb);
skb              2334 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		rx_ring->skb = NULL;
skb              2388 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 			dev_kfree_skb_any(tx_buffer->skb);
skb              3745 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	struct sk_buff *skb = first->skb;
skb              3759 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	if (skb->ip_summed != CHECKSUM_PARTIAL)
skb              3762 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	if (!skb_is_gso(skb))
skb              3765 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	err = skb_cow_head(skb, 0);
skb              3770 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		ip.hdr = skb_inner_network_header(skb);
skb              3772 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		ip.hdr = skb_network_header(skb);
skb              3773 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	l4.hdr = skb_checksum_start(skb);
skb              3780 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		unsigned char *csum_start = skb_checksum_start(skb);
skb              3788 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		ip.v4->check = (skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) ?
skb              3804 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	l4_offset = l4.hdr - skb->data;
skb              3810 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	paylen = skb->len - l4_offset;
skb              3814 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	first->gso_segs = skb_shinfo(skb)->gso_segs;
skb              3819 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
skb              3827 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT;
skb              3836 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c static inline bool ixgbevf_ipv6_csum_is_sctp(struct sk_buff *skb)
skb              3840 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL);
skb              3842 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	return offset == skb_checksum_start_offset(skb);
skb              3849 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	struct sk_buff *skb = first->skb;
skb              3854 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	if (skb->ip_summed != CHECKSUM_PARTIAL)
skb              3857 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	switch (skb->csum_offset) {
skb              3866 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		     (ip_hdr(skb)->protocol == IPPROTO_SCTP)) ||
skb              3868 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		     ixgbevf_ipv6_csum_is_sctp(skb))) {
skb              3874 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		skb_checksum_help(skb);
skb              3883 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	vlan_macip_lens = skb_checksum_start_offset(skb) -
skb              3884 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 			  skb_network_offset(skb);
skb              3887 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
skb              3948 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	struct sk_buff *skb = first->skb;
skb              3960 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	ixgbevf_tx_olinfo_status(tx_desc, tx_flags, skb->len - hdr_len);
skb              3962 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	size = skb_headlen(skb);
skb              3963 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	data_len = skb->data_len;
skb              3965 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
skb              3969 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
skb              4026 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	skb_tx_timestamp(skb);
skb              4075 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	dev_kfree_skb_any(tx_buffer->skb);
skb              4076 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	tx_buffer->skb = NULL;
skb              4110 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c static int ixgbevf_xmit_frame_ring(struct sk_buff *skb,
skb              4116 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	u16 count = TXD_USE_COUNT(skb_headlen(skb));
skb              4122 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	u8 *dst_mac = skb_header_pointer(skb, 0, 0, NULL);
skb              4125 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		dev_kfree_skb_any(skb);
skb              4136 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
skb              4137 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
skb              4142 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	count += skb_shinfo(skb)->nr_frags;
skb              4151 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	first->skb = skb;
skb              4152 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	first->bytecount = skb->len;
skb              4155 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	if (skb_vlan_tag_present(skb)) {
skb              4156 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		tx_flags |= skb_vlan_tag_get(skb);
skb              4163 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	first->protocol = vlan_get_protocol(skb);
skb              4166 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	if (xfrm_offload(skb) && !ixgbevf_ipsec_tx(tx_ring, first, &ipsec_tx))
skb              4182 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	dev_kfree_skb_any(first->skb);
skb              4183 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	first->skb = NULL;
skb              4188 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c static netdev_tx_t ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
skb              4193 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	if (skb->len <= 0) {
skb              4194 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		dev_kfree_skb_any(skb);
skb              4201 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	if (skb->len < 17) {
skb              4202 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		if (skb_padto(skb, 17))
skb              4204 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		skb->len = 17;
skb              4207 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	tx_ring = adapter->tx_ring[skb->queue_mapping];
skb              4208 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	return ixgbevf_xmit_frame_ring(skb, tx_ring);
skb              4415 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c ixgbevf_features_check(struct sk_buff *skb, struct net_device *dev,
skb              4421 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	mac_hdr_len = skb_network_header(skb) - skb->data;
skb              4429 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
skb              4439 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
skb               612 drivers/net/ethernet/jme.c 				if (txbi->skb) {
skb               613 drivers/net/ethernet/jme.c 					dev_kfree_skb(txbi->skb);
skb               614 drivers/net/ethernet/jme.c 					txbi->skb = NULL;
skb               729 drivers/net/ethernet/jme.c 	struct sk_buff *skb;
skb               732 drivers/net/ethernet/jme.c 	skb = netdev_alloc_skb(jme->dev,
skb               734 drivers/net/ethernet/jme.c 	if (unlikely(!skb))
skb               737 drivers/net/ethernet/jme.c 	mapping = pci_map_page(jme->pdev, virt_to_page(skb->data),
skb               738 drivers/net/ethernet/jme.c 			       offset_in_page(skb->data), skb_tailroom(skb),
skb               741 drivers/net/ethernet/jme.c 		dev_kfree_skb(skb);
skb               749 drivers/net/ethernet/jme.c 	rxbi->skb = skb;
skb               750 drivers/net/ethernet/jme.c 	rxbi->len = skb_tailroom(skb);
skb               762 drivers/net/ethernet/jme.c 	if (rxbi->skb) {
skb               767 drivers/net/ethernet/jme.c 		dev_kfree_skb(rxbi->skb);
skb               768 drivers/net/ethernet/jme.c 		rxbi->skb = NULL;
skb               941 drivers/net/ethernet/jme.c jme_udpsum(struct sk_buff *skb)
skb               945 drivers/net/ethernet/jme.c 	if (skb->len < (ETH_HLEN + sizeof(struct iphdr)))
skb               947 drivers/net/ethernet/jme.c 	if (skb->protocol != htons(ETH_P_IP))
skb               949 drivers/net/ethernet/jme.c 	skb_set_network_header(skb, ETH_HLEN);
skb               950 drivers/net/ethernet/jme.c 	if ((ip_hdr(skb)->protocol != IPPROTO_UDP) ||
skb               951 drivers/net/ethernet/jme.c 	    (skb->len < (ETH_HLEN +
skb               952 drivers/net/ethernet/jme.c 			(ip_hdr(skb)->ihl << 2) +
skb               954 drivers/net/ethernet/jme.c 		skb_reset_network_header(skb);
skb               957 drivers/net/ethernet/jme.c 	skb_set_transport_header(skb,
skb               958 drivers/net/ethernet/jme.c 			ETH_HLEN + (ip_hdr(skb)->ihl << 2));
skb               959 drivers/net/ethernet/jme.c 	csum = udp_hdr(skb)->check;
skb               960 drivers/net/ethernet/jme.c 	skb_reset_transport_header(skb);
skb               961 drivers/net/ethernet/jme.c 	skb_reset_network_header(skb);
skb               967 drivers/net/ethernet/jme.c jme_rxsum_ok(struct jme_adapter *jme, u16 flags, struct sk_buff *skb)
skb               980 drivers/net/ethernet/jme.c 			== RXWBFLAG_UDPON) && jme_udpsum(skb)) {
skb              1001 drivers/net/ethernet/jme.c 	struct sk_buff *skb;
skb              1007 drivers/net/ethernet/jme.c 	skb = rxbi->skb;
skb              1024 drivers/net/ethernet/jme.c 		skb_reserve(skb, RX_PREPAD_SIZE);
skb              1025 drivers/net/ethernet/jme.c 		skb_put(skb, framesize);
skb              1026 drivers/net/ethernet/jme.c 		skb->protocol = eth_type_trans(skb, jme->dev);
skb              1028 drivers/net/ethernet/jme.c 		if (jme_rxsum_ok(jme, le16_to_cpu(rxdesc->descwb.flags), skb))
skb              1029 drivers/net/ethernet/jme.c 			skb->ip_summed = CHECKSUM_UNNECESSARY;
skb              1031 drivers/net/ethernet/jme.c 			skb_checksum_none_assert(skb);
skb              1036 drivers/net/ethernet/jme.c 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
skb              1039 drivers/net/ethernet/jme.c 		jme->jme_rx(skb);
skb              1446 drivers/net/ethernet/jme.c 		if (likely(ctxbi->skb &&
skb              1467 drivers/net/ethernet/jme.c 			dev_kfree_skb(ctxbi->skb);
skb              1478 drivers/net/ethernet/jme.c 			ctxbi->skb = NULL;
skb              1947 drivers/net/ethernet/jme.c 			struct sk_buff *skb)
skb              1953 drivers/net/ethernet/jme.c 	nr_alloc = skb_shinfo(skb)->nr_frags + 2;
skb              2024 drivers/net/ethernet/jme.c jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx)
skb              2030 drivers/net/ethernet/jme.c 	int i, nr_frags = skb_shinfo(skb)->nr_frags;
skb              2036 drivers/net/ethernet/jme.c 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
skb              2050 drivers/net/ethernet/jme.c 	len = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len;
skb              2053 drivers/net/ethernet/jme.c 	ret = jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, virt_to_page(skb->data),
skb              2054 drivers/net/ethernet/jme.c 			offset_in_page(skb->data), len, hidma);
skb              2065 drivers/net/ethernet/jme.c jme_tx_tso(struct sk_buff *skb, __le16 *mss, u8 *flags)
skb              2067 drivers/net/ethernet/jme.c 	*mss = cpu_to_le16(skb_shinfo(skb)->gso_size << TXDESC_MSS_SHIFT);
skb              2071 drivers/net/ethernet/jme.c 		if (skb->protocol == htons(ETH_P_IP)) {
skb              2072 drivers/net/ethernet/jme.c 			struct iphdr *iph = ip_hdr(skb);
skb              2075 drivers/net/ethernet/jme.c 			tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
skb              2080 drivers/net/ethernet/jme.c 			struct ipv6hdr *ip6h = ipv6_hdr(skb);
skb              2082 drivers/net/ethernet/jme.c 			tcp_hdr(skb)->check = ~csum_ipv6_magic(&ip6h->saddr,
skb              2095 drivers/net/ethernet/jme.c jme_tx_csum(struct jme_adapter *jme, struct sk_buff *skb, u8 *flags)
skb              2097 drivers/net/ethernet/jme.c 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb              2100 drivers/net/ethernet/jme.c 		switch (skb->protocol) {
skb              2102 drivers/net/ethernet/jme.c 			ip_proto = ip_hdr(skb)->protocol;
skb              2105 drivers/net/ethernet/jme.c 			ip_proto = ipv6_hdr(skb)->nexthdr;
skb              2127 drivers/net/ethernet/jme.c jme_tx_vlan(struct sk_buff *skb, __le16 *vlan, u8 *flags)
skb              2129 drivers/net/ethernet/jme.c 	if (skb_vlan_tag_present(skb)) {
skb              2131 drivers/net/ethernet/jme.c 		*vlan = cpu_to_le16(skb_vlan_tag_get(skb));
skb              2136 drivers/net/ethernet/jme.c jme_fill_tx_desc(struct jme_adapter *jme, struct sk_buff *skb, int idx)
skb              2151 drivers/net/ethernet/jme.c 	txdesc->desc1.pktsize = cpu_to_le16(skb->len);
skb              2164 drivers/net/ethernet/jme.c 	if (jme_tx_tso(skb, &txdesc->desc1.mss, &flags))
skb              2165 drivers/net/ethernet/jme.c 		jme_tx_csum(jme, skb, &flags);
skb              2166 drivers/net/ethernet/jme.c 	jme_tx_vlan(skb, &txdesc->desc1.vlan, &flags);
skb              2167 drivers/net/ethernet/jme.c 	ret = jme_map_tx_skb(jme, skb, idx);
skb              2177 drivers/net/ethernet/jme.c 	txbi->nr_desc = skb_shinfo(skb)->nr_frags + 2;
skb              2178 drivers/net/ethernet/jme.c 	txbi->skb = skb;
skb              2179 drivers/net/ethernet/jme.c 	txbi->len = skb->len;
skb              2210 drivers/net/ethernet/jme.c 			txbi->skb)) {
skb              2222 drivers/net/ethernet/jme.c jme_start_xmit(struct sk_buff *skb, struct net_device *netdev)
skb              2227 drivers/net/ethernet/jme.c 	if (unlikely(skb_is_gso(skb) && skb_cow_head(skb, 0))) {
skb              2228 drivers/net/ethernet/jme.c 		dev_kfree_skb_any(skb);
skb              2233 drivers/net/ethernet/jme.c 	idx = jme_alloc_txdesc(jme, skb);
skb              2243 drivers/net/ethernet/jme.c 	if (jme_fill_tx_desc(jme, skb, idx))
skb              2252 drivers/net/ethernet/jme.c 	       idx, skb_shinfo(skb)->nr_frags + 2, jiffies);
skb               353 drivers/net/ethernet/jme.h 	struct sk_buff *skb;
skb               445 drivers/net/ethernet/jme.h 	int			(*jme_rx)(struct sk_buff *skb);
skb               199 drivers/net/ethernet/korina.c static int korina_send_packet(struct sk_buff *skb, struct net_device *dev)
skb               219 drivers/net/ethernet/korina.c 			dev_kfree_skb_any(skb);
skb               228 drivers/net/ethernet/korina.c 	lp->tx_skb[lp->tx_chain_tail] = skb;
skb               230 drivers/net/ethernet/korina.c 	length = skb->len;
skb               231 drivers/net/ethernet/korina.c 	dma_cache_wback((u32)skb->data, skb->len);
skb               235 drivers/net/ethernet/korina.c 	td->ca = CPHYSADDR(skb->data);
skb               355 drivers/net/ethernet/korina.c 	struct sk_buff *skb, *skb_new;
skb               363 drivers/net/ethernet/korina.c 		skb = lp->rx_skb[lp->rx_next_done];
skb               410 drivers/net/ethernet/korina.c 		skb_put(skb, pkt_len - 4);
skb               411 drivers/net/ethernet/korina.c 		skb->protocol = eth_type_trans(skb, dev);
skb               414 drivers/net/ethernet/korina.c 		napi_gro_receive(&lp->napi, skb);
skb               431 drivers/net/ethernet/korina.c 			rd->ca = CPHYSADDR(skb->data);
skb               453 drivers/net/ethernet/korina.c 		skb = lp->rx_skb[lp->rx_next_done];
skb               454 drivers/net/ethernet/korina.c 		rd->ca = CPHYSADDR(skb->data);
skb               745 drivers/net/ethernet/korina.c 	struct sk_buff *skb;
skb               761 drivers/net/ethernet/korina.c 		skb = netdev_alloc_skb_ip_align(dev, KORINA_RBSIZE);
skb               762 drivers/net/ethernet/korina.c 		if (!skb)
skb               764 drivers/net/ethernet/korina.c 		lp->rx_skb[i] = skb;
skb               768 drivers/net/ethernet/korina.c 		lp->rd_ring[i].ca = CPHYSADDR(skb->data);
skb                85 drivers/net/ethernet/lantiq_etop.c 	struct sk_buff *skb[LTQ_DESC_NUM];
skb               107 drivers/net/ethernet/lantiq_etop.c 	ch->skb[ch->dma.desc] = netdev_alloc_skb(ch->netdev, MAX_DMA_DATA_LEN);
skb               108 drivers/net/ethernet/lantiq_etop.c 	if (!ch->skb[ch->dma.desc])
skb               111 drivers/net/ethernet/lantiq_etop.c 		ch->skb[ch->dma.desc]->data, MAX_DMA_DATA_LEN,
skb               114 drivers/net/ethernet/lantiq_etop.c 		CPHYSADDR(ch->skb[ch->dma.desc]->data);
skb               118 drivers/net/ethernet/lantiq_etop.c 	skb_reserve(ch->skb[ch->dma.desc], NET_IP_ALIGN);
skb               127 drivers/net/ethernet/lantiq_etop.c 	struct sk_buff *skb = ch->skb[ch->dma.desc];
skb               141 drivers/net/ethernet/lantiq_etop.c 	skb_put(skb, len);
skb               142 drivers/net/ethernet/lantiq_etop.c 	skb->protocol = eth_type_trans(skb, ch->netdev);
skb               143 drivers/net/ethernet/lantiq_etop.c 	netif_receive_skb(skb);
skb               181 drivers/net/ethernet/lantiq_etop.c 		dev_kfree_skb_any(ch->skb[ch->tx_free]);
skb               182 drivers/net/ethernet/lantiq_etop.c 		ch->skb[ch->tx_free] = NULL;
skb               218 drivers/net/ethernet/lantiq_etop.c 			dev_kfree_skb_any(ch->skb[ch->dma.desc]);
skb               455 drivers/net/ethernet/lantiq_etop.c ltq_etop_tx(struct sk_buff *skb, struct net_device *dev)
skb               457 drivers/net/ethernet/lantiq_etop.c 	int queue = skb_get_queue_mapping(skb);
skb               466 drivers/net/ethernet/lantiq_etop.c 	len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
skb               468 drivers/net/ethernet/lantiq_etop.c 	if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) || ch->skb[ch->dma.desc]) {
skb               469 drivers/net/ethernet/lantiq_etop.c 		dev_kfree_skb_any(skb);
skb               476 drivers/net/ethernet/lantiq_etop.c 	byte_offset = CPHYSADDR(skb->data) % 16;
skb               477 drivers/net/ethernet/lantiq_etop.c 	ch->skb[ch->dma.desc] = skb;
skb               482 drivers/net/ethernet/lantiq_etop.c 	desc->addr = ((unsigned int) dma_map_single(&priv->pdev->dev, skb->data, len,
skb                60 drivers/net/ethernet/lantiq_xrx200.c 	struct sk_buff *skb[LTQ_DESC_NUM];
skb               159 drivers/net/ethernet/lantiq_xrx200.c 	ch->skb[ch->dma.desc] = netdev_alloc_skb_ip_align(ch->priv->net_dev,
skb               161 drivers/net/ethernet/lantiq_xrx200.c 	if (!ch->skb[ch->dma.desc]) {
skb               167 drivers/net/ethernet/lantiq_xrx200.c 			ch->skb[ch->dma.desc]->data, XRX200_DMA_DATA_LEN,
skb               171 drivers/net/ethernet/lantiq_xrx200.c 		dev_kfree_skb_any(ch->skb[ch->dma.desc]);
skb               188 drivers/net/ethernet/lantiq_xrx200.c 	struct sk_buff *skb = ch->skb[ch->dma.desc];
skb               203 drivers/net/ethernet/lantiq_xrx200.c 	skb_put(skb, len);
skb               204 drivers/net/ethernet/lantiq_xrx200.c 	skb->protocol = eth_type_trans(skb, net_dev);
skb               205 drivers/net/ethernet/lantiq_xrx200.c 	netif_receive_skb(skb);
skb               252 drivers/net/ethernet/lantiq_xrx200.c 			struct sk_buff *skb = ch->skb[ch->tx_free];
skb               255 drivers/net/ethernet/lantiq_xrx200.c 			bytes += skb->len;
skb               256 drivers/net/ethernet/lantiq_xrx200.c 			ch->skb[ch->tx_free] = NULL;
skb               257 drivers/net/ethernet/lantiq_xrx200.c 			consume_skb(skb);
skb               279 drivers/net/ethernet/lantiq_xrx200.c static int xrx200_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
skb               288 drivers/net/ethernet/lantiq_xrx200.c 	skb->dev = net_dev;
skb               289 drivers/net/ethernet/lantiq_xrx200.c 	if (skb_put_padto(skb, ETH_ZLEN)) {
skb               294 drivers/net/ethernet/lantiq_xrx200.c 	len = skb->len;
skb               296 drivers/net/ethernet/lantiq_xrx200.c 	if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) || ch->skb[ch->dma.desc]) {
skb               302 drivers/net/ethernet/lantiq_xrx200.c 	ch->skb[ch->dma.desc] = skb;
skb               304 drivers/net/ethernet/lantiq_xrx200.c 	mapping = dma_map_single(priv->dev, skb->data, len, DMA_TO_DEVICE);
skb               326 drivers/net/ethernet/lantiq_xrx200.c 	dev_kfree_skb(skb);
skb               402 drivers/net/ethernet/lantiq_xrx200.c 		if (priv->chan_rx.skb[i])
skb               403 drivers/net/ethernet/lantiq_xrx200.c 			dev_kfree_skb_any(priv->chan_rx.skb[i]);
skb               420 drivers/net/ethernet/lantiq_xrx200.c 		dev_kfree_skb_any(priv->chan_rx.skb[i]);
skb               515 drivers/net/ethernet/marvell/mv643xx_eth.c 		struct sk_buff *skb;
skb               525 drivers/net/ethernet/marvell/mv643xx_eth.c 		skb = rxq->rx_skb[rxq->rx_curr_desc];
skb               565 drivers/net/ethernet/marvell/mv643xx_eth.c 		skb_put(skb, byte_cnt - 2 - 4);
skb               568 drivers/net/ethernet/marvell/mv643xx_eth.c 			skb->ip_summed = CHECKSUM_UNNECESSARY;
skb               569 drivers/net/ethernet/marvell/mv643xx_eth.c 		skb->protocol = eth_type_trans(skb, mp->dev);
skb               571 drivers/net/ethernet/marvell/mv643xx_eth.c 		napi_gro_receive(&mp->napi, skb);
skb               588 drivers/net/ethernet/marvell/mv643xx_eth.c 		dev_kfree_skb(skb);
skb               604 drivers/net/ethernet/marvell/mv643xx_eth.c 		struct sk_buff *skb;
skb               609 drivers/net/ethernet/marvell/mv643xx_eth.c 		skb = netdev_alloc_skb(mp->dev, mp->skb_size);
skb               611 drivers/net/ethernet/marvell/mv643xx_eth.c 		if (skb == NULL) {
skb               617 drivers/net/ethernet/marvell/mv643xx_eth.c 			skb_reserve(skb, SKB_DMA_REALIGN);
skb               628 drivers/net/ethernet/marvell/mv643xx_eth.c 		size = skb_end_pointer(skb) - skb->data;
skb               630 drivers/net/ethernet/marvell/mv643xx_eth.c 						  skb->data, size,
skb               633 drivers/net/ethernet/marvell/mv643xx_eth.c 		rxq->rx_skb[rx] = skb;
skb               643 drivers/net/ethernet/marvell/mv643xx_eth.c 		skb_reserve(skb, 2);
skb               655 drivers/net/ethernet/marvell/mv643xx_eth.c static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb)
skb               659 drivers/net/ethernet/marvell/mv643xx_eth.c 	for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
skb               660 drivers/net/ethernet/marvell/mv643xx_eth.c 		const skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag];
skb               674 drivers/net/ethernet/marvell/mv643xx_eth.c static int skb_tx_csum(struct mv643xx_eth_private *mp, struct sk_buff *skb,
skb               680 drivers/net/ethernet/marvell/mv643xx_eth.c 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb               684 drivers/net/ethernet/marvell/mv643xx_eth.c 		BUG_ON(skb->protocol != htons(ETH_P_IP) &&
skb               685 drivers/net/ethernet/marvell/mv643xx_eth.c 		       skb->protocol != htons(ETH_P_8021Q));
skb               687 drivers/net/ethernet/marvell/mv643xx_eth.c 		hdr_len = (void *)ip_hdr(skb) - (void *)skb->data;
skb               692 drivers/net/ethernet/marvell/mv643xx_eth.c 			ret = skb_checksum_help(skb);
skb               705 drivers/net/ethernet/marvell/mv643xx_eth.c 			   ip_hdr(skb)->ihl << TX_IHL_SHIFT;
skb               709 drivers/net/ethernet/marvell/mv643xx_eth.c 		switch (ip_hdr(skb)->protocol) {
skb               731 drivers/net/ethernet/marvell/mv643xx_eth.c 		 struct sk_buff *skb, char *data, int length,
skb               778 drivers/net/ethernet/marvell/mv643xx_eth.c txq_put_hdr_tso(struct sk_buff *skb, struct tx_queue *txq, int length,
skb               782 drivers/net/ethernet/marvell/mv643xx_eth.c 	int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
skb               793 drivers/net/ethernet/marvell/mv643xx_eth.c 	ret = skb_tx_csum(mp, skb, &l4i_chk, &cmd_csum, length);
skb               820 drivers/net/ethernet/marvell/mv643xx_eth.c static int txq_submit_tso(struct tx_queue *txq, struct sk_buff *skb,
skb               827 drivers/net/ethernet/marvell/mv643xx_eth.c 	int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
skb               832 drivers/net/ethernet/marvell/mv643xx_eth.c 	if ((txq->tx_desc_count + tso_count_descs(skb)) >= txq->tx_ring_size) {
skb               840 drivers/net/ethernet/marvell/mv643xx_eth.c 	tso_start(skb, &tso);
skb               842 drivers/net/ethernet/marvell/mv643xx_eth.c 	total_len = skb->len - hdr_len;
skb               847 drivers/net/ethernet/marvell/mv643xx_eth.c 		data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
skb               853 drivers/net/ethernet/marvell/mv643xx_eth.c 		tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
skb               854 drivers/net/ethernet/marvell/mv643xx_eth.c 		txq_put_hdr_tso(skb, txq, data_left, &first_cmd_sts,
skb               862 drivers/net/ethernet/marvell/mv643xx_eth.c 			ret = txq_put_data_tso(dev, txq, skb, tso.data, size,
skb               868 drivers/net/ethernet/marvell/mv643xx_eth.c 			tso_build_data(skb, &tso, size);
skb               872 drivers/net/ethernet/marvell/mv643xx_eth.c 	__skb_queue_tail(&txq->tx_skb, skb);
skb               873 drivers/net/ethernet/marvell/mv643xx_eth.c 	skb_tx_timestamp(skb);
skb               894 drivers/net/ethernet/marvell/mv643xx_eth.c static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
skb               897 drivers/net/ethernet/marvell/mv643xx_eth.c 	int nr_frags = skb_shinfo(skb)->nr_frags;
skb               905 drivers/net/ethernet/marvell/mv643xx_eth.c 		this_frag = &skb_shinfo(skb)->frags[frag];
skb               932 drivers/net/ethernet/marvell/mv643xx_eth.c static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb,
skb               936 drivers/net/ethernet/marvell/mv643xx_eth.c 	int nr_frags = skb_shinfo(skb)->nr_frags;
skb               952 drivers/net/ethernet/marvell/mv643xx_eth.c 	ret = skb_tx_csum(mp, skb, &l4i_chk, &cmd_sts, skb->len);
skb               964 drivers/net/ethernet/marvell/mv643xx_eth.c 		txq_submit_frag_skb(txq, skb);
skb               965 drivers/net/ethernet/marvell/mv643xx_eth.c 		length = skb_headlen(skb);
skb               968 drivers/net/ethernet/marvell/mv643xx_eth.c 		length = skb->len;
skb               973 drivers/net/ethernet/marvell/mv643xx_eth.c 	desc->buf_ptr = dma_map_single(mp->dev->dev.parent, skb->data,
skb               976 drivers/net/ethernet/marvell/mv643xx_eth.c 	__skb_queue_tail(&txq->tx_skb, skb);
skb               978 drivers/net/ethernet/marvell/mv643xx_eth.c 	skb_tx_timestamp(skb);
skb               996 drivers/net/ethernet/marvell/mv643xx_eth.c static netdev_tx_t mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
skb              1003 drivers/net/ethernet/marvell/mv643xx_eth.c 	queue = skb_get_queue_mapping(skb);
skb              1007 drivers/net/ethernet/marvell/mv643xx_eth.c 	if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) {
skb              1013 drivers/net/ethernet/marvell/mv643xx_eth.c 	length = skb->len;
skb              1015 drivers/net/ethernet/marvell/mv643xx_eth.c 	if (skb_is_gso(skb))
skb              1016 drivers/net/ethernet/marvell/mv643xx_eth.c 		ret = txq_submit_tso(txq, skb, dev);
skb              1018 drivers/net/ethernet/marvell/mv643xx_eth.c 		ret = txq_submit_skb(txq, skb, dev);
skb              1027 drivers/net/ethernet/marvell/mv643xx_eth.c 		dev_kfree_skb_any(skb);
skb              1109 drivers/net/ethernet/marvell/mv643xx_eth.c 			struct sk_buff *skb = __skb_dequeue(&txq->tx_skb);
skb              1111 drivers/net/ethernet/marvell/mv643xx_eth.c 			if (!WARN_ON(!skb))
skb              1112 drivers/net/ethernet/marvell/mv643xx_eth.c 				dev_consume_skb_any(skb);
skb               628 drivers/net/ethernet/marvell/mvneta.c 	struct sk_buff *skb;
skb              1741 drivers/net/ethernet/marvell/mvneta.c 			   struct sk_buff *skb)
skb              1746 drivers/net/ethernet/marvell/mvneta.c 		skb->csum = 0;
skb              1747 drivers/net/ethernet/marvell/mvneta.c 		skb->ip_summed = CHECKSUM_UNNECESSARY;
skb              1751 drivers/net/ethernet/marvell/mvneta.c 	skb->ip_summed = CHECKSUM_NONE;
skb              1777 drivers/net/ethernet/marvell/mvneta.c 		struct sk_buff *skb = txq->tx_skb[txq->txq_get_index];
skb              1779 drivers/net/ethernet/marvell/mvneta.c 		if (skb) {
skb              1780 drivers/net/ethernet/marvell/mvneta.c 			bytes_compl += skb->len;
skb              1790 drivers/net/ethernet/marvell/mvneta.c 		if (!skb)
skb              1792 drivers/net/ethernet/marvell/mvneta.c 		dev_kfree_skb_any(skb);
skb              1847 drivers/net/ethernet/marvell/mvneta.c static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb)
skb              1849 drivers/net/ethernet/marvell/mvneta.c 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb              1851 drivers/net/ethernet/marvell/mvneta.c 		__be16 l3_proto = vlan_get_protocol(skb);
skb              1855 drivers/net/ethernet/marvell/mvneta.c 			struct iphdr *ip4h = ip_hdr(skb);
skb              1861 drivers/net/ethernet/marvell/mvneta.c 			struct ipv6hdr *ip6h = ipv6_hdr(skb);
skb              1864 drivers/net/ethernet/marvell/mvneta.c 			if (skb_network_header_len(skb) > 0)
skb              1865 drivers/net/ethernet/marvell/mvneta.c 				ip_hdr_len = (skb_network_header_len(skb) >> 2);
skb              1870 drivers/net/ethernet/marvell/mvneta.c 		return mvneta_txq_desc_csum(skb_network_offset(skb),
skb              1987 drivers/net/ethernet/marvell/mvneta.c 			rxq->skb = netdev_alloc_skb_ip_align(dev, skb_size);
skb              1988 drivers/net/ethernet/marvell/mvneta.c 			if (unlikely(!rxq->skb)) {
skb              2005 drivers/net/ethernet/marvell/mvneta.c 			memcpy(rxq->skb->data, data + MVNETA_MH_SIZE,
skb              2007 drivers/net/ethernet/marvell/mvneta.c 			skb_put(rxq->skb, copy_size);
skb              2010 drivers/net/ethernet/marvell/mvneta.c 			mvneta_rx_csum(pp, rx_status, rxq->skb);
skb              2028 drivers/net/ethernet/marvell/mvneta.c 				skb_add_rx_frag(rxq->skb, frag_num, page,
skb              2037 drivers/net/ethernet/marvell/mvneta.c 			if (unlikely(!rxq->skb)) {
skb              2054 drivers/net/ethernet/marvell/mvneta.c 				frag_num = skb_shinfo(rxq->skb)->nr_frags;
skb              2058 drivers/net/ethernet/marvell/mvneta.c 				skb_add_rx_frag(rxq->skb, frag_num, page,
skb              2076 drivers/net/ethernet/marvell/mvneta.c 			dev_kfree_skb_any(rxq->skb);
skb              2078 drivers/net/ethernet/marvell/mvneta.c 			rxq->skb = NULL;
skb              2082 drivers/net/ethernet/marvell/mvneta.c 		rcvd_bytes += rxq->skb->len;
skb              2085 drivers/net/ethernet/marvell/mvneta.c 		rxq->skb->protocol = eth_type_trans(rxq->skb, dev);
skb              2087 drivers/net/ethernet/marvell/mvneta.c 		napi_gro_receive(napi, rxq->skb);
skb              2090 drivers/net/ethernet/marvell/mvneta.c 		rxq->skb = NULL;
skb              2134 drivers/net/ethernet/marvell/mvneta.c 		struct sk_buff *skb;
skb              2163 drivers/net/ethernet/marvell/mvneta.c 			skb = netdev_alloc_skb_ip_align(dev, rx_bytes);
skb              2164 drivers/net/ethernet/marvell/mvneta.c 			if (unlikely(!skb))
skb              2172 drivers/net/ethernet/marvell/mvneta.c 			skb_put_data(skb, data + MVNETA_MH_SIZE + NET_SKB_PAD,
skb              2175 drivers/net/ethernet/marvell/mvneta.c 			skb->protocol = eth_type_trans(skb, dev);
skb              2176 drivers/net/ethernet/marvell/mvneta.c 			mvneta_rx_csum(pp, rx_status, skb);
skb              2177 drivers/net/ethernet/marvell/mvneta.c 			napi_gro_receive(napi, skb);
skb              2200 drivers/net/ethernet/marvell/mvneta.c 		skb = build_skb(data, frag_size > PAGE_SIZE ? 0 : frag_size);
skb              2207 drivers/net/ethernet/marvell/mvneta.c 		if (!skb)
skb              2214 drivers/net/ethernet/marvell/mvneta.c 		skb_reserve(skb, MVNETA_MH_SIZE + NET_SKB_PAD);
skb              2215 drivers/net/ethernet/marvell/mvneta.c 		skb_put(skb, rx_bytes);
skb              2217 drivers/net/ethernet/marvell/mvneta.c 		skb->protocol = eth_type_trans(skb, dev);
skb              2219 drivers/net/ethernet/marvell/mvneta.c 		mvneta_rx_csum(pp, rx_status, skb);
skb              2221 drivers/net/ethernet/marvell/mvneta.c 		napi_gro_receive(napi, skb);
skb              2240 drivers/net/ethernet/marvell/mvneta.c mvneta_tso_put_hdr(struct sk_buff *skb,
skb              2244 drivers/net/ethernet/marvell/mvneta.c 	int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
skb              2249 drivers/net/ethernet/marvell/mvneta.c 	tx_desc->command = mvneta_skb_tx_csum(pp, skb);
skb              2258 drivers/net/ethernet/marvell/mvneta.c 		    struct sk_buff *skb, char *data, int size,
skb              2282 drivers/net/ethernet/marvell/mvneta.c 			txq->tx_skb[txq->txq_put_index] = skb;
skb              2288 drivers/net/ethernet/marvell/mvneta.c static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev,
skb              2295 drivers/net/ethernet/marvell/mvneta.c 	int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
skb              2299 drivers/net/ethernet/marvell/mvneta.c 	if ((txq->count + tso_count_descs(skb)) >= txq->size)
skb              2302 drivers/net/ethernet/marvell/mvneta.c 	if (skb_headlen(skb) < (skb_transport_offset(skb) + tcp_hdrlen(skb))) {
skb              2308 drivers/net/ethernet/marvell/mvneta.c 	tso_start(skb, &tso);
skb              2310 drivers/net/ethernet/marvell/mvneta.c 	total_len = skb->len - hdr_len;
skb              2314 drivers/net/ethernet/marvell/mvneta.c 		data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
skb              2320 drivers/net/ethernet/marvell/mvneta.c 		tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
skb              2322 drivers/net/ethernet/marvell/mvneta.c 		mvneta_tso_put_hdr(skb, pp, txq);
skb              2330 drivers/net/ethernet/marvell/mvneta.c 			if (mvneta_tso_put_data(dev, txq, skb,
skb              2337 drivers/net/ethernet/marvell/mvneta.c 			tso_build_data(skb, &tso, size);
skb              2360 drivers/net/ethernet/marvell/mvneta.c static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
skb              2364 drivers/net/ethernet/marvell/mvneta.c 	int i, nr_frags = skb_shinfo(skb)->nr_frags;
skb              2367 drivers/net/ethernet/marvell/mvneta.c 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
skb              2386 drivers/net/ethernet/marvell/mvneta.c 			txq->tx_skb[txq->txq_put_index] = skb;
skb              2414 drivers/net/ethernet/marvell/mvneta.c static netdev_tx_t mvneta_tx(struct sk_buff *skb, struct net_device *dev)
skb              2417 drivers/net/ethernet/marvell/mvneta.c 	u16 txq_id = skb_get_queue_mapping(skb);
skb              2420 drivers/net/ethernet/marvell/mvneta.c 	int len = skb->len;
skb              2427 drivers/net/ethernet/marvell/mvneta.c 	if (skb_is_gso(skb)) {
skb              2428 drivers/net/ethernet/marvell/mvneta.c 		frags = mvneta_tx_tso(skb, dev, txq);
skb              2432 drivers/net/ethernet/marvell/mvneta.c 	frags = skb_shinfo(skb)->nr_frags + 1;
skb              2437 drivers/net/ethernet/marvell/mvneta.c 	tx_cmd = mvneta_skb_tx_csum(pp, skb);
skb              2439 drivers/net/ethernet/marvell/mvneta.c 	tx_desc->data_size = skb_headlen(skb);
skb              2441 drivers/net/ethernet/marvell/mvneta.c 	tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, skb->data,
skb              2455 drivers/net/ethernet/marvell/mvneta.c 		txq->tx_skb[txq->txq_put_index] = skb;
skb              2464 drivers/net/ethernet/marvell/mvneta.c 		if (mvneta_tx_frag_process(pp, skb, txq)) {
skb              2498 drivers/net/ethernet/marvell/mvneta.c 		dev_kfree_skb_any(skb);
skb              2964 drivers/net/ethernet/marvell/mvneta.c 	if (rxq->skb)
skb              2965 drivers/net/ethernet/marvell/mvneta.c 		dev_kfree_skb_any(rxq->skb);
skb              2979 drivers/net/ethernet/marvell/mvneta.c 	rxq->skb               = NULL;
skb              1065 drivers/net/ethernet/marvell/mvpp2/mvpp2.h 	struct sk_buff *skb;
skb               281 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 			      struct sk_buff *skb,
skb               286 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	tx_buf->skb = skb;
skb              2243 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		if (tx_buf->skb)
skb              2244 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 			dev_kfree_skb_any(tx_buf->skb);
skb              2850 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 			  struct sk_buff *skb)
skb              2858 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 			skb->csum = 0;
skb              2859 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 			skb->ip_summed = CHECKSUM_UNNECESSARY;
skb              2863 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	skb->ip_summed = CHECKSUM_NONE;
skb              2886 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
skb              2888 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb              2891 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		__be16 l3_proto = vlan_get_protocol(skb);
skb              2894 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 			struct iphdr *ip4h = ip_hdr(skb);
skb              2900 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 			struct ipv6hdr *ip6h = ipv6_hdr(skb);
skb              2903 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 			if (skb_network_header_len(skb) > 0)
skb              2904 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 				ip_hdr_len = (skb_network_header_len(skb) >> 2);
skb              2910 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		return mvpp2_txq_desc_csum(skb_network_offset(skb),
skb              2935 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		struct sk_buff *skb;
skb              2974 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		skb = build_skb(data, frag_size);
skb              2975 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		if (!skb) {
skb              2992 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		skb_reserve(skb, MVPP2_MH_SIZE + NET_SKB_PAD);
skb              2993 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		skb_put(skb, rx_bytes);
skb              2994 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		skb->protocol = eth_type_trans(skb, dev);
skb              2995 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		mvpp2_rx_csum(port, rx_status, skb);
skb              2997 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		napi_gro_receive(napi, skb);
skb              3034 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
skb              3044 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb              3045 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
skb              3062 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		if (i == (skb_shinfo(skb)->nr_frags - 1)) {
skb              3066 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 			mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc);
skb              3087 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c static inline void mvpp2_tso_put_hdr(struct sk_buff *skb,
skb              3105 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	mvpp2_txdesc_cmd_set(port, tx_desc, mvpp2_skb_tx_csum(port, skb) |
skb              3111 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c static inline int mvpp2_tso_put_data(struct sk_buff *skb,
skb              3137 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 			mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc);
skb              3148 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c static int mvpp2_tx_tso(struct sk_buff *skb, struct net_device *dev,
skb              3155 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	int hdr_sz = skb_transport_offset(skb) + tcp_hdrlen(skb);
skb              3159 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	if (mvpp2_aggr_desc_num_check(port, aggr_txq, tso_count_descs(skb)) ||
skb              3161 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 					     tso_count_descs(skb)))
skb              3164 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	tso_start(skb, &tso);
skb              3165 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	len = skb->len - hdr_sz;
skb              3167 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		int left = min_t(int, skb_shinfo(skb)->gso_size, len);
skb              3174 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		tso_build_hdr(skb, hdr, &tso, left, len == 0);
skb              3175 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		mvpp2_tso_put_hdr(skb, dev, txq, aggr_txq, txq_pcpu, hdr_sz);
skb              3182 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 			if (mvpp2_tso_put_data(skb, dev, &tso, txq, aggr_txq,
skb              3185 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 			tso_build_data(skb, &tso, sz);
skb              3200 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c static netdev_tx_t mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
skb              3215 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	txq_id = skb_get_queue_mapping(skb);
skb              3223 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	if (skb_is_gso(skb)) {
skb              3224 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		frags = mvpp2_tx_tso(skb, dev, txq, aggr_txq, txq_pcpu);
skb              3227 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	frags = skb_shinfo(skb)->nr_frags + 1;
skb              3239 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	mvpp2_txdesc_size_set(port, tx_desc, skb_headlen(skb));
skb              3241 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	buf_dma_addr = dma_map_single(dev->dev.parent, skb->data,
skb              3242 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 				      skb_headlen(skb), DMA_TO_DEVICE);
skb              3251 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	tx_cmd = mvpp2_skb_tx_csum(port, skb);
skb              3257 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc);
skb              3265 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) {
skb              3289 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		stats->tx_bytes += skb->len;
skb              3293 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		dev_kfree_skb_any(skb);
skb               306 drivers/net/ethernet/marvell/pxa168_eth.c 	struct sk_buff *skb;
skb               313 drivers/net/ethernet/marvell/pxa168_eth.c 		skb = netdev_alloc_skb(dev, pep->skb_size);
skb               314 drivers/net/ethernet/marvell/pxa168_eth.c 		if (!skb)
skb               317 drivers/net/ethernet/marvell/pxa168_eth.c 			skb_reserve(skb, SKB_DMA_REALIGN);
skb               322 drivers/net/ethernet/marvell/pxa168_eth.c 		size = skb_end_pointer(skb) - skb->data;
skb               324 drivers/net/ethernet/marvell/pxa168_eth.c 							 skb->data,
skb               328 drivers/net/ethernet/marvell/pxa168_eth.c 		pep->rx_skb[used_rx_desc] = skb;
skb               341 drivers/net/ethernet/marvell/pxa168_eth.c 		skb_reserve(skb, ETH_HW_IP_ALIGN);
skb               701 drivers/net/ethernet/marvell/pxa168_eth.c 	struct sk_buff *skb;
skb               726 drivers/net/ethernet/marvell/pxa168_eth.c 		skb = pep->tx_skb[tx_index];
skb               727 drivers/net/ethernet/marvell/pxa168_eth.c 		if (skb)
skb               736 drivers/net/ethernet/marvell/pxa168_eth.c 		if (skb)
skb               737 drivers/net/ethernet/marvell/pxa168_eth.c 			dev_kfree_skb_irq(skb);
skb               769 drivers/net/ethernet/marvell/pxa168_eth.c 	struct sk_buff *skb;
skb               786 drivers/net/ethernet/marvell/pxa168_eth.c 		skb = pep->rx_skb[rx_curr_desc];
skb               824 drivers/net/ethernet/marvell/pxa168_eth.c 			dev_kfree_skb_irq(skb);
skb               830 drivers/net/ethernet/marvell/pxa168_eth.c 			skb_put(skb, rx_desc->byte_cnt - 4);
skb               831 drivers/net/ethernet/marvell/pxa168_eth.c 			skb->protocol = eth_type_trans(skb, dev);
skb               832 drivers/net/ethernet/marvell/pxa168_eth.c 			netif_receive_skb(skb);
skb              1253 drivers/net/ethernet/marvell/pxa168_eth.c pxa168_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb              1263 drivers/net/ethernet/marvell/pxa168_eth.c 	length = skb->len;
skb              1264 drivers/net/ethernet/marvell/pxa168_eth.c 	pep->tx_skb[tx_index] = skb;
skb              1266 drivers/net/ethernet/marvell/pxa168_eth.c 	desc->buf_ptr = dma_map_single(&pep->pdev->dev, skb->data, length,
skb              1269 drivers/net/ethernet/marvell/pxa168_eth.c 	skb_tx_timestamp(skb);
skb               936 drivers/net/ethernet/marvell/skge.c 			 struct sk_buff *skb, unsigned int bufsize)
skb               941 drivers/net/ethernet/marvell/skge.c 	map = pci_map_single(skge->hw->pdev, skb->data, bufsize,
skb               949 drivers/net/ethernet/marvell/skge.c 	e->skb = skb;
skb               991 drivers/net/ethernet/marvell/skge.c 		if (e->skb) {
skb               996 drivers/net/ethernet/marvell/skge.c 			dev_kfree_skb(e->skb);
skb               997 drivers/net/ethernet/marvell/skge.c 			e->skb = NULL;
skb              1014 drivers/net/ethernet/marvell/skge.c 		struct sk_buff *skb;
skb              1016 drivers/net/ethernet/marvell/skge.c 		skb = __netdev_alloc_skb(dev, skge->rx_buf_size + NET_IP_ALIGN,
skb              1018 drivers/net/ethernet/marvell/skge.c 		if (!skb)
skb              1021 drivers/net/ethernet/marvell/skge.c 		skb_reserve(skb, NET_IP_ALIGN);
skb              1022 drivers/net/ethernet/marvell/skge.c 		if (skge_rx_setup(skge, e, skb, skge->rx_buf_size) < 0) {
skb              1023 drivers/net/ethernet/marvell/skge.c 			dev_kfree_skb(skb);
skb              2729 drivers/net/ethernet/marvell/skge.c static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
skb              2740 drivers/net/ethernet/marvell/skge.c 	if (skb_padto(skb, ETH_ZLEN))
skb              2743 drivers/net/ethernet/marvell/skge.c 	if (unlikely(skge_avail(&skge->tx_ring) < skb_shinfo(skb)->nr_frags + 1))
skb              2749 drivers/net/ethernet/marvell/skge.c 	e->skb = skb;
skb              2750 drivers/net/ethernet/marvell/skge.c 	len = skb_headlen(skb);
skb              2751 drivers/net/ethernet/marvell/skge.c 	map = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE);
skb              2761 drivers/net/ethernet/marvell/skge.c 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb              2762 drivers/net/ethernet/marvell/skge.c 		const int offset = skb_checksum_start_offset(skb);
skb              2767 drivers/net/ethernet/marvell/skge.c 		if (ipip_hdr(skb)->protocol == IPPROTO_UDP &&
skb              2775 drivers/net/ethernet/marvell/skge.c 		td->csum_write = offset + skb->csum_offset;
skb              2779 drivers/net/ethernet/marvell/skge.c 	if (!skb_shinfo(skb)->nr_frags) /* single buffer i.e. no fragments */
skb              2785 drivers/net/ethernet/marvell/skge.c 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb              2786 drivers/net/ethernet/marvell/skge.c 			const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
skb              2794 drivers/net/ethernet/marvell/skge.c 			e->skb = skb;
skb              2812 drivers/net/ethernet/marvell/skge.c 	netdev_sent_queue(dev, skb->len);
skb              2818 drivers/net/ethernet/marvell/skge.c 		     e - skge->tx_ring.start, skb->len);
skb              2847 drivers/net/ethernet/marvell/skge.c 	dev_kfree_skb_any(skb);
skb              2879 drivers/net/ethernet/marvell/skge.c 			dev_kfree_skb(e->skb);
skb              3050 drivers/net/ethernet/marvell/skge.c 	struct sk_buff *skb;
skb              3070 drivers/net/ethernet/marvell/skge.c 		skb = netdev_alloc_skb_ip_align(dev, len);
skb              3071 drivers/net/ethernet/marvell/skge.c 		if (!skb)
skb              3078 drivers/net/ethernet/marvell/skge.c 		skb_copy_from_linear_data(e->skb, skb->data, len);
skb              3094 drivers/net/ethernet/marvell/skge.c 		skb = ee.skb;
skb              3095 drivers/net/ethernet/marvell/skge.c 		prefetch(skb->data);
skb              3108 drivers/net/ethernet/marvell/skge.c 	skb_put(skb, len);
skb              3111 drivers/net/ethernet/marvell/skge.c 		skb->csum = le16_to_cpu(csum);
skb              3112 drivers/net/ethernet/marvell/skge.c 		skb->ip_summed = CHECKSUM_COMPLETE;
skb              3115 drivers/net/ethernet/marvell/skge.c 	skb->protocol = eth_type_trans(skb, dev);
skb              3117 drivers/net/ethernet/marvell/skge.c 	return skb;
skb              3169 drivers/net/ethernet/marvell/skge.c 			bytes_compl += e->skb->len;
skb              3171 drivers/net/ethernet/marvell/skge.c 			dev_consume_skb_any(e->skb);
skb              3207 drivers/net/ethernet/marvell/skge.c 		struct sk_buff *skb;
skb              3215 drivers/net/ethernet/marvell/skge.c 		skb = skge_rx_get(dev, e, control, rd->status, rd->csum2);
skb              3216 drivers/net/ethernet/marvell/skge.c 		if (likely(skb)) {
skb              3217 drivers/net/ethernet/marvell/skge.c 			napi_gro_receive(napi, skb);
skb              2397 drivers/net/ethernet/marvell/skge.h 	struct sk_buff  	*skb;
skb              1201 drivers/net/ethernet/marvell/sky2.c 	for (i = 0; i < skb_shinfo(re->skb)->nr_frags; i++)
skb              1209 drivers/net/ethernet/marvell/sky2.c 	struct sk_buff *skb = re->skb;
skb              1212 drivers/net/ethernet/marvell/sky2.c 	re->data_addr = pci_map_single(pdev, skb->data, size, PCI_DMA_FROMDEVICE);
skb              1218 drivers/net/ethernet/marvell/sky2.c 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb              1219 drivers/net/ethernet/marvell/sky2.c 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
skb              1233 drivers/net/ethernet/marvell/sky2.c 			       skb_frag_size(&skb_shinfo(skb)->frags[i]),
skb              1243 drivers/net/ethernet/marvell/sky2.c 			 skb->dev->name);
skb              1249 drivers/net/ethernet/marvell/sky2.c 	struct sk_buff *skb = re->skb;
skb              1255 drivers/net/ethernet/marvell/sky2.c 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
skb              1257 drivers/net/ethernet/marvell/sky2.c 			       skb_frag_size(&skb_shinfo(skb)->frags[i]),
skb              1355 drivers/net/ethernet/marvell/sky2.c 		if (re->skb) {
skb              1357 drivers/net/ethernet/marvell/sky2.c 			kfree_skb(re->skb);
skb              1358 drivers/net/ethernet/marvell/sky2.c 			re->skb = NULL;
skb              1441 drivers/net/ethernet/marvell/sky2.c 	struct sk_buff *skb;
skb              1444 drivers/net/ethernet/marvell/sky2.c 	skb = __netdev_alloc_skb(sky2->netdev,
skb              1447 drivers/net/ethernet/marvell/sky2.c 	if (!skb)
skb              1458 drivers/net/ethernet/marvell/sky2.c 		start = PTR_ALIGN(skb->data, 8);
skb              1459 drivers/net/ethernet/marvell/sky2.c 		skb_reserve(skb, start - skb->data);
skb              1461 drivers/net/ethernet/marvell/sky2.c 		skb_reserve(skb, NET_IP_ALIGN);
skb              1468 drivers/net/ethernet/marvell/sky2.c 		skb_fill_page_desc(skb, i, page, 0, PAGE_SIZE);
skb              1471 drivers/net/ethernet/marvell/sky2.c 	return skb;
skb              1473 drivers/net/ethernet/marvell/sky2.c 	kfree_skb(skb);
skb              1494 drivers/net/ethernet/marvell/sky2.c 		re->skb = sky2_rx_alloc(sky2, GFP_KERNEL);
skb              1495 drivers/net/ethernet/marvell/sky2.c 		if (!re->skb)
skb              1499 drivers/net/ethernet/marvell/sky2.c 			dev_kfree_skb(re->skb);
skb              1500 drivers/net/ethernet/marvell/sky2.c 			re->skb = NULL;
skb              1788 drivers/net/ethernet/marvell/sky2.c static unsigned tx_le_req(const struct sk_buff *skb)
skb              1792 drivers/net/ethernet/marvell/sky2.c 	count = (skb_shinfo(skb)->nr_frags + 1)
skb              1795 drivers/net/ethernet/marvell/sky2.c 	if (skb_is_gso(skb))
skb              1800 drivers/net/ethernet/marvell/sky2.c 	if (skb->ip_summed == CHECKSUM_PARTIAL)
skb              1825 drivers/net/ethernet/marvell/sky2.c static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb,
skb              1839 drivers/net/ethernet/marvell/sky2.c  	if (unlikely(tx_avail(sky2) < tx_le_req(skb)))
skb              1842 drivers/net/ethernet/marvell/sky2.c 	len = skb_headlen(skb);
skb              1843 drivers/net/ethernet/marvell/sky2.c 	mapping = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE);
skb              1850 drivers/net/ethernet/marvell/sky2.c 		     "tx queued, slot %u, len %d\n", slot, skb->len);
skb              1862 drivers/net/ethernet/marvell/sky2.c 	mss = skb_shinfo(skb)->gso_size;
skb              1866 drivers/net/ethernet/marvell/sky2.c 			mss += ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb);
skb              1883 drivers/net/ethernet/marvell/sky2.c 	if (skb_vlan_tag_present(skb)) {
skb              1890 drivers/net/ethernet/marvell/sky2.c 		le->length = cpu_to_be16(skb_vlan_tag_get(skb));
skb              1895 drivers/net/ethernet/marvell/sky2.c 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb              1900 drivers/net/ethernet/marvell/sky2.c 			const unsigned offset = skb_transport_offset(skb);
skb              1904 drivers/net/ethernet/marvell/sky2.c 			tcpsum |= offset + skb->csum_offset;	/* sum write */
skb              1907 drivers/net/ethernet/marvell/sky2.c 			if (ip_hdr(skb)->protocol == IPPROTO_UDP)
skb              1934 drivers/net/ethernet/marvell/sky2.c 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb              1935 drivers/net/ethernet/marvell/sky2.c 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
skb              1963 drivers/net/ethernet/marvell/sky2.c 	re->skb = skb;
skb              1971 drivers/net/ethernet/marvell/sky2.c 	netdev_sent_queue(dev, skb->len);
skb              1986 drivers/net/ethernet/marvell/sky2.c 	dev_kfree_skb_any(skb);
skb              2011 drivers/net/ethernet/marvell/sky2.c 		struct sk_buff *skb = re->skb;
skb              2015 drivers/net/ethernet/marvell/sky2.c 		if (skb) {
skb              2020 drivers/net/ethernet/marvell/sky2.c 			bytes_compl += skb->len;
skb              2022 drivers/net/ethernet/marvell/sky2.c 			re->skb = NULL;
skb              2023 drivers/net/ethernet/marvell/sky2.c 			dev_kfree_skb_any(skb);
skb              2463 drivers/net/ethernet/marvell/sky2.c 	struct sk_buff *skb;
skb              2465 drivers/net/ethernet/marvell/sky2.c 	skb = netdev_alloc_skb_ip_align(sky2->netdev, length);
skb              2466 drivers/net/ethernet/marvell/sky2.c 	if (likely(skb)) {
skb              2469 drivers/net/ethernet/marvell/sky2.c 		skb_copy_from_linear_data(re->skb, skb->data, length);
skb              2470 drivers/net/ethernet/marvell/sky2.c 		skb->ip_summed = re->skb->ip_summed;
skb              2471 drivers/net/ethernet/marvell/sky2.c 		skb->csum = re->skb->csum;
skb              2472 drivers/net/ethernet/marvell/sky2.c 		skb_copy_hash(skb, re->skb);
skb              2473 drivers/net/ethernet/marvell/sky2.c 		__vlan_hwaccel_copy_tag(skb, re->skb);
skb              2477 drivers/net/ethernet/marvell/sky2.c 		__vlan_hwaccel_clear_tag(re->skb);
skb              2478 drivers/net/ethernet/marvell/sky2.c 		skb_clear_hash(re->skb);
skb              2479 drivers/net/ethernet/marvell/sky2.c 		re->skb->ip_summed = CHECKSUM_NONE;
skb              2480 drivers/net/ethernet/marvell/sky2.c 		skb_put(skb, length);
skb              2482 drivers/net/ethernet/marvell/sky2.c 	return skb;
skb              2486 drivers/net/ethernet/marvell/sky2.c static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space,
skb              2494 drivers/net/ethernet/marvell/sky2.c 	skb->tail += size;
skb              2495 drivers/net/ethernet/marvell/sky2.c 	skb->len += size;
skb              2498 drivers/net/ethernet/marvell/sky2.c 	num_frags = skb_shinfo(skb)->nr_frags;
skb              2500 drivers/net/ethernet/marvell/sky2.c 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
skb              2505 drivers/net/ethernet/marvell/sky2.c 			--skb_shinfo(skb)->nr_frags;
skb              2510 drivers/net/ethernet/marvell/sky2.c 			skb->data_len += size;
skb              2511 drivers/net/ethernet/marvell/sky2.c 			skb->truesize += PAGE_SIZE;
skb              2512 drivers/net/ethernet/marvell/sky2.c 			skb->len += size;
skb              2523 drivers/net/ethernet/marvell/sky2.c 	struct sk_buff *skb;
skb              2527 drivers/net/ethernet/marvell/sky2.c 	nre.skb = sky2_rx_alloc(sky2, GFP_ATOMIC);
skb              2528 drivers/net/ethernet/marvell/sky2.c 	if (unlikely(!nre.skb))
skb              2534 drivers/net/ethernet/marvell/sky2.c 	skb = re->skb;
skb              2536 drivers/net/ethernet/marvell/sky2.c 	prefetch(skb->data);
skb              2539 drivers/net/ethernet/marvell/sky2.c 	if (skb_shinfo(skb)->nr_frags)
skb              2540 drivers/net/ethernet/marvell/sky2.c 		skb_put_frags(skb, hdr_space, length);
skb              2542 drivers/net/ethernet/marvell/sky2.c 		skb_put(skb, length);
skb              2543 drivers/net/ethernet/marvell/sky2.c 	return skb;
skb              2546 drivers/net/ethernet/marvell/sky2.c 	dev_kfree_skb(nre.skb);
skb              2560 drivers/net/ethernet/marvell/sky2.c 	struct sk_buff *skb = NULL;
skb              2570 drivers/net/ethernet/marvell/sky2.c 	if (skb_vlan_tag_present(re->skb))
skb              2594 drivers/net/ethernet/marvell/sky2.c 		skb = receive_copy(sky2, re, length);
skb              2596 drivers/net/ethernet/marvell/sky2.c 		skb = receive_new(sky2, re, length);
skb              2598 drivers/net/ethernet/marvell/sky2.c 	dev->stats.rx_dropped += (skb == NULL);
skb              2603 drivers/net/ethernet/marvell/sky2.c 	return skb;
skb              2630 drivers/net/ethernet/marvell/sky2.c 			       struct sk_buff *skb)
skb              2632 drivers/net/ethernet/marvell/sky2.c 	if (skb->ip_summed == CHECKSUM_NONE)
skb              2633 drivers/net/ethernet/marvell/sky2.c 		netif_receive_skb(skb);
skb              2635 drivers/net/ethernet/marvell/sky2.c 		napi_gro_receive(&sky2->hw->napi, skb);
skb              2667 drivers/net/ethernet/marvell/sky2.c 		struct sk_buff *skb = sky2->rx_ring[sky2->rx_next].skb;
skb              2668 drivers/net/ethernet/marvell/sky2.c 		skb->ip_summed = CHECKSUM_COMPLETE;
skb              2669 drivers/net/ethernet/marvell/sky2.c 		skb->csum = le16_to_cpu(status);
skb              2687 drivers/net/ethernet/marvell/sky2.c 	struct sk_buff *skb;
skb              2689 drivers/net/ethernet/marvell/sky2.c 	skb = sky2->rx_ring[sky2->rx_next].skb;
skb              2690 drivers/net/ethernet/marvell/sky2.c 	__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), be16_to_cpu(length));
skb              2695 drivers/net/ethernet/marvell/sky2.c 	struct sk_buff *skb;
skb              2697 drivers/net/ethernet/marvell/sky2.c 	skb = sky2->rx_ring[sky2->rx_next].skb;
skb              2698 drivers/net/ethernet/marvell/sky2.c 	skb_set_hash(skb, le32_to_cpu(status), PKT_HASH_TYPE_L3);
skb              2717 drivers/net/ethernet/marvell/sky2.c 		struct sk_buff *skb;
skb              2739 drivers/net/ethernet/marvell/sky2.c 			skb = sky2_receive(dev, length, status);
skb              2740 drivers/net/ethernet/marvell/sky2.c 			if (!skb)
skb              2748 drivers/net/ethernet/marvell/sky2.c 					skb->ip_summed = CHECKSUM_UNNECESSARY;
skb              2750 drivers/net/ethernet/marvell/sky2.c 					skb->ip_summed = CHECKSUM_NONE;
skb              2753 drivers/net/ethernet/marvell/sky2.c 			skb->protocol = eth_type_trans(skb, dev);
skb              2754 drivers/net/ethernet/marvell/sky2.c 			sky2_skb_rx(sky2, skb);
skb              2192 drivers/net/ethernet/marvell/sky2.h 	struct sk_buff	*skb;
skb              2201 drivers/net/ethernet/marvell/sky2.h 	struct sk_buff	*skb;
skb               878 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	if (tx_buf->skb &&
skb               879 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	    (tx_buf->skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC))
skb               880 drivers/net/ethernet/mediatek/mtk_eth_soc.c 		dev_kfree_skb_any(tx_buf->skb);
skb               881 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	tx_buf->skb = NULL;
skb               898 drivers/net/ethernet/mediatek/mtk_eth_soc.c 			tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
skb               907 drivers/net/ethernet/mediatek/mtk_eth_soc.c static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
skb               937 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	if (skb->ip_summed == CHECKSUM_PARTIAL)
skb               941 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	if (skb_vlan_tag_present(skb))
skb               942 drivers/net/ethernet/mediatek/mtk_eth_soc.c 		txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb);
skb               944 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	mapped_addr = dma_map_single(eth->dev, skb->data,
skb               945 drivers/net/ethernet/mediatek/mtk_eth_soc.c 				     skb_headlen(skb), DMA_TO_DEVICE);
skb               953 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	setup_tx_buf(eth, itx_buf, itxd_pdma, mapped_addr, skb_headlen(skb),
skb               959 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	nr_frags = skb_shinfo(skb)->nr_frags;
skb               962 drivers/net/ethernet/mediatek/mtk_eth_soc.c 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
skb              1004 drivers/net/ethernet/mediatek/mtk_eth_soc.c 			tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
skb              1018 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	itx_buf->skb = skb;
skb              1021 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	WRITE_ONCE(itxd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) |
skb              1030 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	netdev_sent_queue(dev, skb->len);
skb              1031 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	skb_tx_timestamp(skb);
skb              1071 drivers/net/ethernet/mediatek/mtk_eth_soc.c static inline int mtk_cal_txd_req(struct sk_buff *skb)
skb              1077 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	if (skb_is_gso(skb)) {
skb              1078 drivers/net/ethernet/mediatek/mtk_eth_soc.c 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb              1079 drivers/net/ethernet/mediatek/mtk_eth_soc.c 			frag = &skb_shinfo(skb)->frags[i];
skb              1084 drivers/net/ethernet/mediatek/mtk_eth_soc.c 		nfrags += skb_shinfo(skb)->nr_frags;
skb              1126 drivers/net/ethernet/mediatek/mtk_eth_soc.c static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb              1144 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	tx_num = mtk_cal_txd_req(skb);
skb              1154 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	if (skb_is_gso(skb)) {
skb              1155 drivers/net/ethernet/mediatek/mtk_eth_soc.c 		if (skb_cow_head(skb, 0)) {
skb              1161 drivers/net/ethernet/mediatek/mtk_eth_soc.c 		if (skb_shinfo(skb)->gso_type &
skb              1164 drivers/net/ethernet/mediatek/mtk_eth_soc.c 			tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size);
skb              1168 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0)
skb              1181 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	dev_kfree_skb_any(skb);
skb              1230 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	struct sk_buff *skb;
skb              1289 drivers/net/ethernet/mediatek/mtk_eth_soc.c 		skb = build_skb(data, ring->frag_size);
skb              1290 drivers/net/ethernet/mediatek/mtk_eth_soc.c 		if (unlikely(!skb)) {
skb              1295 drivers/net/ethernet/mediatek/mtk_eth_soc.c 		skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
skb              1300 drivers/net/ethernet/mediatek/mtk_eth_soc.c 		skb->dev = netdev;
skb              1301 drivers/net/ethernet/mediatek/mtk_eth_soc.c 		skb_put(skb, pktlen);
skb              1303 drivers/net/ethernet/mediatek/mtk_eth_soc.c 			skb->ip_summed = CHECKSUM_UNNECESSARY;
skb              1305 drivers/net/ethernet/mediatek/mtk_eth_soc.c 			skb_checksum_none_assert(skb);
skb              1306 drivers/net/ethernet/mediatek/mtk_eth_soc.c 		skb->protocol = eth_type_trans(skb, netdev);
skb              1310 drivers/net/ethernet/mediatek/mtk_eth_soc.c 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
skb              1312 drivers/net/ethernet/mediatek/mtk_eth_soc.c 		skb_record_rx_queue(skb, 0);
skb              1313 drivers/net/ethernet/mediatek/mtk_eth_soc.c 		napi_gro_receive(napi, skb);
skb              1346 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	struct sk_buff *skb;
skb              1367 drivers/net/ethernet/mediatek/mtk_eth_soc.c 		skb = tx_buf->skb;
skb              1368 drivers/net/ethernet/mediatek/mtk_eth_soc.c 		if (!skb)
skb              1371 drivers/net/ethernet/mediatek/mtk_eth_soc.c 		if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
skb              1372 drivers/net/ethernet/mediatek/mtk_eth_soc.c 			bytes[mac] += skb->len;
skb              1394 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	struct sk_buff *skb;
skb              1403 drivers/net/ethernet/mediatek/mtk_eth_soc.c 		skb = tx_buf->skb;
skb              1404 drivers/net/ethernet/mediatek/mtk_eth_soc.c 		if (!skb)
skb              1407 drivers/net/ethernet/mediatek/mtk_eth_soc.c 		if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
skb              1408 drivers/net/ethernet/mediatek/mtk_eth_soc.c 			bytes[0] += skb->len;
skb               604 drivers/net/ethernet/mediatek/mtk_eth_soc.h 	struct sk_buff *skb;
skb               359 drivers/net/ethernet/mellanox/mlx4/en_netdev.c mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
skb               371 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	int nhoff = skb_network_offset(skb);
skb               374 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	if (skb->protocol != htons(ETH_P_IP))
skb               377 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	ip = (const struct iphdr *)(skb->data + nhoff);
skb               383 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
skb              2724 drivers/net/ethernet/mellanox/mlx4/en_netdev.c static netdev_features_t mlx4_en_features_check(struct sk_buff *skb,
skb              2728 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	features = vlan_features_check(skb, features);
skb              2729 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	features = vxlan_features_check(skb, features);
skb              2735 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	if (skb->encapsulation &&
skb              2736 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	    (skb->ip_summed == CHECKSUM_PARTIAL)) {
skb              2740 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		    (ip_hdr(skb)->version != 4) ||
skb              2741 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		    (udp_hdr(skb)->dest != priv->vxlan_port))
skb               469 drivers/net/ethernet/mellanox/mlx4/en_rx.c 				    struct sk_buff *skb,
skb               491 drivers/net/ethernet/mellanox/mlx4/en_rx.c 		__skb_fill_page_desc(skb, nr, page, frags->page_offset,
skb               523 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	skb->truesize += truesize;
skb               529 drivers/net/ethernet/mellanox/mlx4/en_rx.c 		__skb_frag_unref(skb_shinfo(skb)->frags + nr);
skb               579 drivers/net/ethernet/mellanox/mlx4/en_rx.c static int get_fixed_ipv4_csum(__wsum hw_checksum, struct sk_buff *skb,
skb               592 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	skb->csum = csum_sub(hw_checksum, csum_pseudo_header);
skb               601 drivers/net/ethernet/mellanox/mlx4/en_rx.c static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb,
skb               615 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	skb->csum = csum_add(temp, (__force __wsum)*(__be16 *)&ipv6h->nexthdr);
skb               625 drivers/net/ethernet/mellanox/mlx4/en_rx.c static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
skb               639 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	if (short_frame(skb->len))
skb               653 drivers/net/ethernet/mellanox/mlx4/en_rx.c 		return get_fixed_ipv6_csum(hw_checksum, skb, hdr);
skb               655 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	return get_fixed_ipv4_csum(hw_checksum, skb, hdr);
skb               699 drivers/net/ethernet/mellanox/mlx4/en_rx.c 		struct sk_buff *skb;
skb               822 drivers/net/ethernet/mellanox/mlx4/en_rx.c 		skb = napi_get_frags(&cq->napi);
skb               823 drivers/net/ethernet/mellanox/mlx4/en_rx.c 		if (unlikely(!skb))
skb               829 drivers/net/ethernet/mellanox/mlx4/en_rx.c 			mlx4_en_fill_hwtstamps(priv->mdev, skb_hwtstamps(skb),
skb               832 drivers/net/ethernet/mellanox/mlx4/en_rx.c 		skb_record_rx_queue(skb, cq_ring);
skb               851 drivers/net/ethernet/mellanox/mlx4/en_rx.c 					skb->csum_level = 1;
skb               857 drivers/net/ethernet/mellanox/mlx4/en_rx.c 				if (check_csum(cqe, skb, va, dev->features))
skb               869 drivers/net/ethernet/mellanox/mlx4/en_rx.c 		skb->ip_summed = ip_summed;
skb               871 drivers/net/ethernet/mellanox/mlx4/en_rx.c 			skb_set_hash(skb,
skb               878 drivers/net/ethernet/mellanox/mlx4/en_rx.c 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
skb               883 drivers/net/ethernet/mellanox/mlx4/en_rx.c 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD),
skb               886 drivers/net/ethernet/mellanox/mlx4/en_rx.c 		nr = mlx4_en_complete_rx_desc(priv, frags, skb, length);
skb               888 drivers/net/ethernet/mellanox/mlx4/en_rx.c 			skb_shinfo(skb)->nr_frags = nr;
skb               889 drivers/net/ethernet/mellanox/mlx4/en_rx.c 			skb->len = length;
skb               890 drivers/net/ethernet/mellanox/mlx4/en_rx.c 			skb->data_len = length;
skb               893 drivers/net/ethernet/mellanox/mlx4/en_rx.c 			__vlan_hwaccel_clear_tag(skb);
skb               894 drivers/net/ethernet/mellanox/mlx4/en_rx.c 			skb_clear_hash(skb);
skb                51 drivers/net/ethernet/mellanox/mlx4/en_selftest.c 	struct sk_buff *skb;
skb                60 drivers/net/ethernet/mellanox/mlx4/en_selftest.c 	skb = netdev_alloc_skb(priv->dev, MLX4_LOOPBACK_TEST_PAYLOAD + ETH_HLEN + NET_IP_ALIGN);
skb                61 drivers/net/ethernet/mellanox/mlx4/en_selftest.c 	if (!skb)
skb                64 drivers/net/ethernet/mellanox/mlx4/en_selftest.c 	skb_reserve(skb, NET_IP_ALIGN);
skb                66 drivers/net/ethernet/mellanox/mlx4/en_selftest.c 	ethh = skb_put(skb, sizeof(struct ethhdr));
skb                67 drivers/net/ethernet/mellanox/mlx4/en_selftest.c 	packet = skb_put(skb, packet_size);
skb                71 drivers/net/ethernet/mellanox/mlx4/en_selftest.c 	skb_reset_mac_header(skb);
skb                76 drivers/net/ethernet/mellanox/mlx4/en_selftest.c 	err = mlx4_en_xmit(skb, priv->dev);
skb               279 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	struct sk_buff *skb = tx_info->skb;
skb               286 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	prefetchw(&skb->users);
skb               292 drivers/net/ethernet/mellanox/mlx4/en_tx.c 		skb_tstamp_tx(skb, &hwts);
skb               332 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	napi_consume_skb(skb, napi_mode);
skb               574 drivers/net/ethernet/mellanox/mlx4/en_tx.c static bool is_inline(int inline_thold, const struct sk_buff *skb,
skb               580 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	if (skb->len > inline_thold || !inline_thold)
skb               595 drivers/net/ethernet/mellanox/mlx4/en_tx.c static int inline_size(const struct sk_buff *skb)
skb               597 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	if (skb->len + CTRL_SIZE + sizeof(struct mlx4_wqe_inline_seg)
skb               599 drivers/net/ethernet/mellanox/mlx4/en_tx.c 		return ALIGN(skb->len + CTRL_SIZE +
skb               602 drivers/net/ethernet/mellanox/mlx4/en_tx.c 		return ALIGN(skb->len + CTRL_SIZE + 2 *
skb               606 drivers/net/ethernet/mellanox/mlx4/en_tx.c static int get_real_size(const struct sk_buff *skb,
skb               618 drivers/net/ethernet/mellanox/mlx4/en_tx.c 		if (skb->encapsulation)
skb               619 drivers/net/ethernet/mellanox/mlx4/en_tx.c 			*lso_header_size = (skb_inner_transport_header(skb) - skb->data) + inner_tcp_hdrlen(skb);
skb               621 drivers/net/ethernet/mellanox/mlx4/en_tx.c 			*lso_header_size = skb_transport_offset(skb) + tcp_hdrlen(skb);
skb               624 drivers/net/ethernet/mellanox/mlx4/en_tx.c 		if (unlikely(*lso_header_size != skb_headlen(skb))) {
skb               627 drivers/net/ethernet/mellanox/mlx4/en_tx.c 			if (*lso_header_size < skb_headlen(skb))
skb               637 drivers/net/ethernet/mellanox/mlx4/en_tx.c 		*inline_ok = is_inline(priv->prof->inline_thold, skb,
skb               641 drivers/net/ethernet/mellanox/mlx4/en_tx.c 			real_size = inline_size(skb);
skb               651 drivers/net/ethernet/mellanox/mlx4/en_tx.c 			     const struct sk_buff *skb,
skb               657 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	unsigned int hlen = skb_headlen(skb);
skb               659 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	if (skb->len <= spc) {
skb               660 drivers/net/ethernet/mellanox/mlx4/en_tx.c 		if (likely(skb->len >= MIN_PKT_LEN)) {
skb               661 drivers/net/ethernet/mellanox/mlx4/en_tx.c 			inl->byte_count = cpu_to_be32(1 << 31 | skb->len);
skb               664 drivers/net/ethernet/mellanox/mlx4/en_tx.c 			memset(((void *)(inl + 1)) + skb->len, 0,
skb               665 drivers/net/ethernet/mellanox/mlx4/en_tx.c 			       MIN_PKT_LEN - skb->len);
skb               667 drivers/net/ethernet/mellanox/mlx4/en_tx.c 		skb_copy_from_linear_data(skb, inl + 1, hlen);
skb               675 drivers/net/ethernet/mellanox/mlx4/en_tx.c 			skb_copy_from_linear_data(skb, inl + 1, hlen);
skb               682 drivers/net/ethernet/mellanox/mlx4/en_tx.c 			memcpy(((void *)(inl + 1)), fragptr, skb->len - spc);
skb               684 drivers/net/ethernet/mellanox/mlx4/en_tx.c 			skb_copy_from_linear_data(skb, inl + 1, spc);
skb               686 drivers/net/ethernet/mellanox/mlx4/en_tx.c 			skb_copy_from_linear_data_offset(skb, spc, inl + 1,
skb               695 drivers/net/ethernet/mellanox/mlx4/en_tx.c 		inl->byte_count = cpu_to_be32(1 << 31 | (skb->len - spc));
skb               699 drivers/net/ethernet/mellanox/mlx4/en_tx.c u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
skb               706 drivers/net/ethernet/mellanox/mlx4/en_tx.c 		return netdev_pick_tx(dev, skb, NULL);
skb               708 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	return netdev_pick_tx(dev, skb, NULL) % rings_p_up;
skb               775 drivers/net/ethernet/mellanox/mlx4/en_tx.c 				  struct sk_buff *skb,
skb               804 drivers/net/ethernet/mellanox/mlx4/en_tx.c 		byte_count = skb_headlen(skb) - lso_header_size;
skb               806 drivers/net/ethernet/mellanox/mlx4/en_tx.c 		dma = dma_map_single(ddev, skb->data +
skb               836 drivers/net/ethernet/mellanox/mlx4/en_tx.c netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
skb               838 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	struct skb_shared_info *shinfo = skb_shinfo(skb);
skb               861 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	tx_ind = skb_get_queue_mapping(skb);
skb               870 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	real_size = get_real_size(skb, shinfo, dev, &lso_header_size,
skb               885 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	if (skb_vlan_tag_present(skb)) {
skb               888 drivers/net/ethernet/mellanox/mlx4/en_tx.c 		qpn_vlan.vlan_tag = cpu_to_be16(skb_vlan_tag_get(skb));
skb               889 drivers/net/ethernet/mellanox/mlx4/en_tx.c 		vlan_proto = be16_to_cpu(skb->vlan_proto);
skb               921 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	tx_info->skb = skb;
skb               939 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	tx_info->linear = lso_header_size < skb_headlen(skb) && !inline_ok;
skb               945 drivers/net/ethernet/mellanox/mlx4/en_tx.c 		if (!mlx4_en_build_dma_wqe(priv, shinfo, data, skb,
skb               964 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
skb               965 drivers/net/ethernet/mellanox/mlx4/en_tx.c 		if (!skb->encapsulation)
skb               979 drivers/net/ethernet/mellanox/mlx4/en_tx.c 		ethh = (struct ethhdr *)skb->data;
skb               999 drivers/net/ethernet/mellanox/mlx4/en_tx.c 		memcpy(tx_desc->lso.header, skb->data, lso_header_size);
skb              1004 drivers/net/ethernet/mellanox/mlx4/en_tx.c 		tx_info->nr_bytes = skb->len + (i - 1) * lso_header_size;
skb              1011 drivers/net/ethernet/mellanox/mlx4/en_tx.c 		tx_info->nr_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
skb              1015 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, skb->len);
skb              1018 drivers/net/ethernet/mellanox/mlx4/en_tx.c 		build_inline_wqe(tx_desc, skb, shinfo, fragptr);
skb              1020 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	if (skb->encapsulation) {
skb              1028 drivers/net/ethernet/mellanox/mlx4/en_tx.c 		ip.hdr = skb_inner_network_header(skb);
skb              1044 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	skb_tx_timestamp(skb);
skb              1089 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	dev_kfree_skb_any(skb);
skb               223 drivers/net/ethernet/mellanox/mlx4/mlx4_en.h 		struct sk_buff *skb;
skb               700 drivers/net/ethernet/mellanox/mlx4/mlx4_en.h u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
skb               702 drivers/net/ethernet/mellanox/mlx4/mlx4_en.h netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
skb                10 drivers/net/ethernet/mellanox/mlx5/core/accel/accel.h static inline bool is_metadata_hdr_valid(struct sk_buff *skb)
skb                14 drivers/net/ethernet/mellanox/mlx5/core/accel/accel.h 	if (unlikely(skb->len < ETH_HLEN + MLX5E_METADATA_ETHER_LEN))
skb                16 drivers/net/ethernet/mellanox/mlx5/core/accel/accel.h 	ethtype = (__be16 *)(skb->data + ETH_ALEN * 2);
skb                22 drivers/net/ethernet/mellanox/mlx5/core/accel/accel.h static inline void remove_metadata_hdr(struct sk_buff *skb)
skb                28 drivers/net/ethernet/mellanox/mlx5/core/accel/accel.h 	old_eth = (struct ethhdr *)skb->data;
skb                29 drivers/net/ethernet/mellanox/mlx5/core/accel/accel.h 	new_eth = (struct ethhdr *)(skb->data + MLX5E_METADATA_ETHER_LEN);
skb                32 drivers/net/ethernet/mellanox/mlx5/core/accel/accel.h 	skb_pull_inline(skb, MLX5E_METADATA_ETHER_LEN);
skb               343 drivers/net/ethernet/mellanox/mlx5/core/en.h 	struct sk_buff *skb;
skb               907 drivers/net/ethernet/mellanox/mlx5/core/en.h u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
skb               909 drivers/net/ethernet/mellanox/mlx5/core/en.h netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
skb               910 drivers/net/ethernet/mellanox/mlx5/core/en.h netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
skb              1196 drivers/net/ethernet/mellanox/mlx5/core/en.h netdev_features_t mlx5e_features_check(struct sk_buff *skb,
skb               204 drivers/net/ethernet/mellanox/mlx5/core/en/fs.h int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
skb               126 drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h 			 struct sk_buff *skb)
skb               135 drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h 	if (skb_vlan_tag_present(skb) &&
skb               203 drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h mlx5e_set_eseg_swp(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg,
skb               207 drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h 	eseg->swp_outer_l3_offset = skb_network_offset(skb) / 2;
skb               211 drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h 		eseg->swp_outer_l4_offset = skb_transport_offset(skb) / 2;
skb               217 drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h 		eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
skb               221 drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h 		eseg->swp_inner_l3_offset = skb_network_offset(skb) / 2;
skb               230 drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h 		eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
skb                74 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c 	struct sk_buff *skb;
skb                76 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c 	skb = napi_alloc_skb(rq->cq.napi, cqe_bcnt);
skb                77 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c 	if (unlikely(!skb)) {
skb                82 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c 	skb_put_data(skb, data, cqe_bcnt);
skb                84 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c 	return skb;
skb                51 drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h mlx5e_tx_tunnel_accel(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg)
skb                58 drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h 	l3_proto = vlan_get_protocol(skb);
skb                61 drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h 		l4_proto = ip_hdr(skb)->protocol;
skb                64 drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h 		l4_proto = ipv6_find_hdr(skb, &offset, -1, NULL, NULL);
skb                71 drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h 	    udp_hdr(skb)->dest != cpu_to_be16(GENEVE_UDP_PORT))
skb                76 drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h 	if (inner_ip_hdr(skb)->version == 6) {
skb                78 drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h 		swp_spec.tun_l4_proto = inner_ipv6_hdr(skb)->nexthdr;
skb                81 drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h 		swp_spec.tun_l4_proto = inner_ip_hdr(skb)->protocol;
skb                84 drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h 	mlx5e_set_eseg_swp(skb, eseg, &swp_spec);
skb                96 drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h mlx5e_udp_gso_handle_tx_skb(struct sk_buff *skb)
skb                98 drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h 	int payload_len = skb_shinfo(skb)->gso_size + sizeof(struct udphdr);
skb               100 drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h 	udp_hdr(skb)->len = htons(payload_len);
skb               104 drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h mlx5e_accel_handle_tx(struct sk_buff *skb,
skb               112 drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h 		skb = mlx5e_tls_handle_tx_skb(dev, sq, skb, wqe, pi);
skb               113 drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h 		if (unlikely(!skb))
skb               120 drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h 		skb = mlx5e_ipsec_handle_tx_skb(dev, *wqe, skb);
skb               121 drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h 		if (unlikely(!skb))
skb               126 drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h 	if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
skb               127 drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h 		mlx5e_udp_gso_handle_tx_skb(skb);
skb               129 drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h 	return skb;
skb               444 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c static bool mlx5e_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
skb               448 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c 		if (ip_hdr(skb)->ihl > 5)
skb               452 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c 		if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
skb               103 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h 	void (*set_iv_op)(struct sk_buff *skb, struct xfrm_state *x,
skb                83 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c static inline __be16 mlx5e_ipsec_mss_inv(struct sk_buff *skb)
skb                85 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c 	return mlx5e_ipsec_inverse_table[skb_shinfo(skb)->gso_size];
skb                88 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c static struct mlx5e_ipsec_metadata *mlx5e_ipsec_add_metadata(struct sk_buff *skb)
skb                93 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c 	if (unlikely(skb_cow_head(skb, sizeof(*mdata))))
skb                96 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c 	eth = (struct ethhdr *)skb_push(skb, sizeof(*mdata));
skb                97 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c 	skb->mac_header -= sizeof(*mdata);
skb               100 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c 	memmove(skb->data, skb->data + sizeof(*mdata),
skb               109 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c static int mlx5e_ipsec_remove_trailer(struct sk_buff *skb, struct xfrm_state *x)
skb               112 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c 	struct ipv6hdr *ipv6hdr = ipv6_hdr(skb);
skb               113 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c 	struct iphdr *ipv4hdr = ip_hdr(skb);
skb               118 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c 	ret = skb_copy_bits(skb, skb->len - alen - 2, &plen, 1);
skb               124 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c 	pskb_trim(skb, skb->len - trailer_len);
skb               125 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c 	if (skb->protocol == htons(ETH_P_IP)) {
skb               135 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c static void mlx5e_ipsec_set_swp(struct sk_buff *skb,
skb               150 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c 	swp_spec.l3_proto = skb->protocol;
skb               155 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c 			swp_spec.tun_l4_proto = inner_ipv6_hdr(skb)->nexthdr;
skb               158 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c 			swp_spec.tun_l4_proto = inner_ip_hdr(skb)->protocol;
skb               161 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c 		swp_spec.tun_l3_proto = skb->protocol;
skb               165 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c 	mlx5e_set_eseg_swp(skb, eseg, &swp_spec);
skb               168 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x,
skb               177 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c 	if (unlikely(skb_is_gso(skb) && oseq < MLX5E_IPSEC_ESN_SCOPE_MID &&
skb               178 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c 		     MLX5E_IPSEC_ESN_SCOPE_MID < (oseq - skb_shinfo(skb)->gso_segs))) {
skb               186 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c 	iv_offset = skb_transport_offset(skb) + sizeof(struct ip_esp_hdr);
skb               187 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c 	skb_store_bits(skb, iv_offset, &seqno, 8);
skb               190 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c void mlx5e_ipsec_set_iv(struct sk_buff *skb, struct xfrm_state *x,
skb               198 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c 	iv_offset = skb_transport_offset(skb) + sizeof(struct ip_esp_hdr);
skb               199 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c 	skb_store_bits(skb, iv_offset, &seqno, 8);
skb               202 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c static void mlx5e_ipsec_set_metadata(struct sk_buff *skb,
skb               209 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c 	if (skb_is_gso(skb)) {
skb               211 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c 		esph = ip_esp_hdr(skb);
skb               212 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c 		tcph = inner_tcp_hdr(skb);
skb               213 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c 		netdev_dbg(skb->dev, "   Offloading GSO packet outer L3 %u; L4 %u; Inner L3 %u; L4 %u\n",
skb               214 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c 			   skb->network_header,
skb               215 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c 			   skb->transport_header,
skb               216 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c 			   skb->inner_network_header,
skb               217 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c 			   skb->inner_transport_header);
skb               218 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c 		netdev_dbg(skb->dev, "   Offloading GSO packet of len %u; mss %u; TCP sp %u dp %u seq 0x%x ESP seq 0x%x\n",
skb               219 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c 			   skb->len, skb_shinfo(skb)->gso_size,
skb               223 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c 		mdata->content.tx.mss_inv = mlx5e_ipsec_mss_inv(skb);
skb               230 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c 	netdev_dbg(skb->dev, "   TX metadata syndrome %u proto %u mss_inv %04x seq %04x\n",
skb               238 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c 					  struct sk_buff *skb)
skb               241 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c 	struct xfrm_offload *xo = xfrm_offload(skb);
skb               248 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c 		return skb;
skb               250 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c 	sp = skb_sec_path(skb);
skb               256 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c 	x = xfrm_input_state(skb);
skb               263 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c 		     (skb->protocol != htons(ETH_P_IP) &&
skb               264 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c 		      skb->protocol != htons(ETH_P_IPV6)))) {
skb               269 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c 	if (!skb_is_gso(skb))
skb               270 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c 		if (unlikely(mlx5e_ipsec_remove_trailer(skb, x))) {
skb               274 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c 	mdata = mlx5e_ipsec_add_metadata(skb);
skb               279 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c 	mlx5e_ipsec_set_swp(skb, &wqe->eth, x->props.mode, xo);
skb               281 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c 	sa_entry->set_iv_op(skb, x, xo);
skb               282 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c 	mlx5e_ipsec_set_metadata(skb, mdata, xo);
skb               284 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c 	return skb;
skb               287 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c 	kfree_skb(skb);
skb               292 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c mlx5e_ipsec_build_sp(struct net_device *netdev, struct sk_buff *skb,
skb               301 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c 	sp = secpath_set(skb);
skb               314 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c 	sp = skb_sec_path(skb);
skb               318 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c 	xo = xfrm_offload(skb);
skb               342 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c 					  struct sk_buff *skb, u32 *cqe_bcnt)
skb               347 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c 	if (!is_metadata_hdr_valid(skb))
skb               348 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c 		return skb;
skb               351 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c 	mdata = (struct mlx5e_ipsec_metadata *)(skb->data + ETH_HLEN);
skb               352 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c 	xs = mlx5e_ipsec_build_sp(netdev, skb, mdata);
skb               354 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c 		kfree_skb(skb);
skb               358 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c 	remove_metadata_hdr(skb);
skb               361 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c 	return skb;
skb               364 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c bool mlx5e_ipsec_feature_check(struct sk_buff *skb, struct net_device *netdev,
skb               367 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c 	struct sec_path *sp = skb_sec_path(skb);
skb                45 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h 					  struct sk_buff *skb, u32 *cqe_bcnt);
skb                49 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h bool mlx5e_ipsec_feature_check(struct sk_buff *skb, struct net_device *netdev,
skb                51 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x,
skb                53 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h void mlx5e_ipsec_set_iv(struct sk_buff *skb, struct xfrm_state *x,
skb                57 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h 					  struct sk_buff *skb);
skb                92 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h 					 struct sk_buff *skb,
skb               427 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c 					 struct sk_buff *skb,
skb               437 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c 	if (!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk))
skb               440 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c 	datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
skb               444 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c 	tls_ctx = tls_get_ctx(skb->sk);
skb               456 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c 	seq = ntohl(tcp_hdr(skb)->seq);
skb               466 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c 			if (likely(!skb->decrypted))
skb               480 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c 	stats->tls_encrypted_packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
skb               484 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c 	return skb;
skb               487 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c 	dev_kfree_skb_any(skb);
skb                79 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c static int mlx5e_tls_add_metadata(struct sk_buff *skb, __be32 swid)
skb                84 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c 	if (skb_cow_head(skb, sizeof(struct mlx5e_tls_metadata)))
skb                87 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c 	eth = (struct ethhdr *)skb_push(skb, sizeof(struct mlx5e_tls_metadata));
skb                88 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c 	skb->mac_header -= sizeof(struct mlx5e_tls_metadata);
skb                91 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c 	memmove(skb->data, skb->data + sizeof(struct mlx5e_tls_metadata),
skb               143 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c static void mlx5e_tls_complete_sync_skb(struct sk_buff *skb,
skb               153 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c 	nskb->dev = skb->dev;
skb               155 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c 	skb_set_network_header(nskb, skb_network_offset(skb));
skb               156 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c 	skb_set_transport_header(nskb, skb_transport_offset(skb));
skb               157 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c 	memcpy(nskb->data, skb->data, headln);
skb               173 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c 	skb_shinfo(nskb)->gso_type = skb_shinfo(skb)->gso_type;
skb               184 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c 	nskb->queue_mapping = skb->queue_mapping;
skb               189 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c 		     struct mlx5e_txqsq *sq, struct sk_buff *skb,
skb               194 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c 	u32 tcp_seq = ntohl(tcp_hdr(skb)->seq);
skb               215 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c 		headln = skb_transport_offset(skb) + tcp_hdrlen(skb);
skb               216 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c 		payload = skb->len - headln;
skb               220 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c 			return skb;
skb               226 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c 	if (unlikely(mlx5e_tls_add_metadata(skb, context->swid))) {
skb               231 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c 	headln = skb_transport_offset(skb) + tcp_hdrlen(skb);
skb               239 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c 	context->expected_seq = tcp_seq + skb->len - headln;
skb               248 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c 	mlx5e_tls_complete_sync_skb(skb, nskb, tcp_seq, headln,
skb               252 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c 	return skb;
skb               255 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c 	dev_kfree_skb_any(skb);
skb               261 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c 					struct sk_buff *skb,
skb               273 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c 		skb = mlx5e_ktls_handle_tx_skb(netdev, sq, skb, wqe, pi);
skb               277 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c 	if (!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk))
skb               280 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c 	datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
skb               284 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c 	tls_ctx = tls_get_ctx(skb->sk);
skb               288 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c 	skb_seq = ntohl(tcp_hdr(skb)->seq);
skb               293 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c 		skb = mlx5e_tls_handle_ooo(context, sq, skb, wqe, pi, priv->tls);
skb               297 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c 	if (unlikely(mlx5e_tls_add_metadata(skb, context->swid))) {
skb               299 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c 		dev_kfree_skb_any(skb);
skb               300 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c 		skb = NULL;
skb               306 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c 	return skb;
skb               310 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c 				struct sk_buff *skb,
skb               346 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c 	skb->sk = sk;
skb               347 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c 	skb->destructor = sock_edemux;
skb               355 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c void mlx5e_tls_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb,
skb               361 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c 	if (!is_metadata_hdr_valid(skb))
skb               365 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c 	mdata = (struct mlx5e_tls_metadata *)(skb->data + ETH_HLEN);
skb               368 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c 		skb->decrypted = 1;
skb               371 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c 		tls_update_resync_sn(netdev, skb, mdata);
skb               385 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c 	remove_metadata_hdr(skb);
skb                45 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h 					struct sk_buff *skb,
skb                49 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h void mlx5e_tls_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb,
skb               671 drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
skb               680 drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c 	if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
skb               687 drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c 	if (skb->encapsulation)
skb              4266 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 						     struct sk_buff *skb,
skb              4274 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	switch (vlan_get_protocol(skb)) {
skb              4276 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		proto = ip_hdr(skb)->protocol;
skb              4279 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		proto = ipv6_find_hdr(skb, &offset, -1, NULL, NULL);
skb              4294 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		udph = udp_hdr(skb);
skb              4313 drivers/net/ethernet/mellanox/mlx5/core/en_main.c netdev_features_t mlx5e_features_check(struct sk_buff *skb,
skb              4319 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	features = vlan_features_check(skb, features);
skb              4320 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	features = vxlan_features_check(skb, features);
skb              4323 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	if (mlx5e_ipsec_feature_check(skb, netdev, features))
skb              4328 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	if (skb->encapsulation &&
skb              4330 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		return mlx5e_tunnel_features_check(priv, skb, features);
skb              4539 drivers/net/ethernet/mellanox/mlx5/core/en_main.c static int mlx5e_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
skb              4552 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
skb               408 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c mlx5e_add_skb_frag(struct mlx5e_rq *rq, struct sk_buff *skb,
skb               416 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
skb               421 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c mlx5e_copy_skb_header(struct device *pdev, struct sk_buff *skb,
skb               431 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	skb_copy_to_linear_data(skb, from, len);
skb               727 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
skb               730 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	struct ethhdr	*eth = (struct ethhdr *)(skb->data);
skb               738 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	proto = __vlan_get_protocol(skb, eth->h_proto, &network_depth);
skb               741 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	ip_p = skb->data + network_depth;
skb               747 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
skb               767 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
skb               782 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 				      struct sk_buff *skb)
skb               788 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	skb_set_hash(skb, be32_to_cpu(cqe->rss_hash_result), ht);
skb               791 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c static inline bool is_last_ethertype_ip(struct sk_buff *skb, int *network_depth,
skb               794 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	*proto = ((struct ethhdr *)skb->data)->h_proto;
skb               795 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	*proto = __vlan_get_protocol(skb, *proto, network_depth);
skb               798 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		return pskb_may_pull(skb, *network_depth + sizeof(struct iphdr));
skb               801 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		return pskb_may_pull(skb, *network_depth + sizeof(struct ipv6hdr));
skb               806 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c static inline void mlx5e_enable_ecn(struct mlx5e_rq *rq, struct sk_buff *skb)
skb               813 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	if (unlikely(!is_last_ethertype_ip(skb, &network_depth, &proto)))
skb               816 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	ip = skb->data + network_depth;
skb               818 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 					 IP6_ECN_set_ce(skb, (struct ipv6hdr *)ip));
skb               823 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c static u8 get_ip_proto(struct sk_buff *skb, int network_depth, __be16 proto)
skb               825 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	void *ip_p = skb->data + network_depth;
skb               836 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c tail_padding_csum_slow(struct sk_buff *skb, int offset, int len,
skb               840 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	skb->csum = csum_block_add(skb->csum,
skb               841 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 				   skb_checksum(skb, offset, len, 0),
skb               846 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c tail_padding_csum(struct sk_buff *skb, int offset,
skb               850 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	int len = skb->len - offset;
skb               854 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		tail_padding_csum_slow(skb, offset, len, stats);
skb               858 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	tail = skb_header_pointer(skb, offset, len, tail_padding);
skb               860 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		tail_padding_csum_slow(skb, offset, len, stats);
skb               865 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	skb->csum = csum_block_add(skb->csum, csum_partial(tail, len, 0), offset);
skb               869 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c mlx5e_skb_csum_fixup(struct sk_buff *skb, int network_depth, __be16 proto,
skb               882 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		skb->csum = csum_partial(skb->data + ETH_HLEN,
skb               884 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 					 skb->csum);
skb               889 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		ip4 = (struct iphdr *)(skb->data + network_depth);
skb               893 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		ip6 = (struct ipv6hdr *)(skb->data + network_depth);
skb               900 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	if (likely(pkt_len >= skb->len))
skb               903 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	tail_padding_csum(skb, pkt_len, stats);
skb               909 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 				     struct sk_buff *skb,
skb               920 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		skb->ip_summed = CHECKSUM_UNNECESSARY;
skb               937 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	if (short_frame(skb->len))
skb               940 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	if (likely(is_last_ethertype_ip(skb, &network_depth, &proto))) {
skb               941 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		if (unlikely(get_ip_proto(skb, network_depth, proto) == IPPROTO_SCTP))
skb               945 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		skb->ip_summed = CHECKSUM_COMPLETE;
skb               946 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
skb               952 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		mlx5e_skb_csum_fixup(skb, network_depth, proto, stats);
skb               959 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		skb->ip_summed = CHECKSUM_UNNECESSARY;
skb               961 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 			skb->csum_level = 1;
skb               962 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 			skb->encapsulation = 1;
skb               970 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	skb->ip_summed = CHECKSUM_NONE;
skb               979 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 				      struct sk_buff *skb)
skb               985 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	skb->mac_len = ETH_HLEN;
skb               988 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	mlx5e_tls_handle_rx_skb(netdev, skb, &cqe_bcnt);
skb               992 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt);
skb               993 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg);
skb              1003 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		skb_hwtstamps(skb)->hwtstamp =
skb              1006 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	skb_record_rx_queue(skb, rq->ix);
skb              1009 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		mlx5e_skb_set_hash(cqe, skb);
skb              1012 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
skb              1017 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	skb->mark = be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK;
skb              1019 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	mlx5e_handle_csum(netdev, cqe, rq, skb, !!lro_num_seg);
skb              1022 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		mlx5e_enable_ecn(rq, skb);
skb              1024 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	skb->protocol = eth_type_trans(skb, netdev);
skb              1030 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 					 struct sk_buff *skb)
skb              1036 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb);
skb              1044 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	struct sk_buff *skb = build_skb(va, frag_size);
skb              1046 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	if (unlikely(!skb)) {
skb              1051 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	skb_reserve(skb, headroom);
skb              1052 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	skb_put(skb, cqe_bcnt);
skb              1054 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	return skb;
skb              1063 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	struct sk_buff *skb;
skb              1083 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt);
skb              1084 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	if (unlikely(!skb))
skb              1090 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	return skb;
skb              1102 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	struct sk_buff *skb;
skb              1107 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	skb = napi_alloc_skb(rq->cq.napi,
skb              1109 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	if (unlikely(!skb)) {
skb              1114 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	prefetchw(skb->data);
skb              1120 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		mlx5e_add_skb_frag(rq, skb, wi->di, wi->offset + frag_headlen,
skb              1129 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	mlx5e_copy_skb_header(rq->pdev, skb, head_wi->di, head_wi->offset, headlen);
skb              1131 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	skb->tail += headlen;
skb              1132 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	skb->len  += headlen;
skb              1134 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	return skb;
skb              1150 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	struct sk_buff *skb;
skb              1164 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
skb              1168 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	if (!skb) {
skb              1179 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
skb              1180 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	napi_gro_receive(rq->cq.napi, skb);
skb              1197 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	struct sk_buff *skb;
skb              1210 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	skb = rq->wqe.skb_from_cqe(rq, cqe, wi, cqe_bcnt);
skb              1211 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	if (!skb) {
skb              1222 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
skb              1224 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	if (rep->vlan && skb_vlan_tag_present(skb))
skb              1225 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		skb_vlan_pop(skb);
skb              1227 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	napi_gro_receive(rq->cq.napi, skb);
skb              1245 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	struct sk_buff *skb;
skb              1247 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	skb = napi_alloc_skb(rq->cq.napi,
skb              1249 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	if (unlikely(!skb)) {
skb              1254 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	prefetchw(skb->data);
skb              1267 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		mlx5e_add_skb_frag(rq, skb, di, frag_offset,
skb              1274 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	mlx5e_copy_skb_header(rq->pdev, skb, head_di, head_offset, headlen);
skb              1276 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	skb->tail += headlen;
skb              1277 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	skb->len  += headlen;
skb              1279 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	return skb;
skb              1289 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	struct sk_buff *skb;
skb              1318 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt32);
skb              1319 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	if (unlikely(!skb))
skb              1325 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	return skb;
skb              1339 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	struct sk_buff *skb;
skb              1360 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	skb = INDIRECT_CALL_2(rq->mpwqe.skb_from_cqe_mpwrq,
skb              1364 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	if (!skb)
skb              1367 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
skb              1368 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	napi_gro_receive(rq->cq.napi, skb);
skb              1437 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 					 struct sk_buff *skb)
skb              1456 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		skb->dev = NULL;
skb              1466 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	dgid = skb->data + MLX5_IB_GRH_DGID_OFFSET;
skb              1468 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		skb->pkt_type = PACKET_HOST;
skb              1470 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		skb->pkt_type = PACKET_BROADCAST;
skb              1472 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		skb->pkt_type = PACKET_MULTICAST;
skb              1478 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	skb_pull(skb, MLX5_IB_GRH_BYTES);
skb              1480 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	skb->protocol = *((__be16 *)(skb->data));
skb              1483 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		skb->ip_summed = CHECKSUM_COMPLETE;
skb              1484 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
skb              1487 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		skb->ip_summed = CHECKSUM_NONE;
skb              1492 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		skb_hwtstamps(skb)->hwtstamp =
skb              1495 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	skb_record_rx_queue(skb, rq->ix);
skb              1498 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		mlx5e_skb_set_hash(cqe, skb);
skb              1501 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	pseudo_header = skb_push(skb, MLX5_IPOIB_PSEUDO_LEN);
skb              1503 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	skb_reset_mac_header(skb);
skb              1504 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	skb_pull(skb, MLX5_IPOIB_HARD_LEN);
skb              1506 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	skb->dev = netdev;
skb              1516 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	struct sk_buff *skb;
skb              1529 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
skb              1533 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	if (!skb)
skb              1536 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	mlx5i_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
skb              1537 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	if (unlikely(!skb->dev)) {
skb              1538 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		dev_kfree_skb_any(skb);
skb              1541 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	napi_gro_receive(rq->cq.napi, skb);
skb              1556 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	struct sk_buff *skb;
skb              1569 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
skb              1573 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	if (unlikely(!skb)) /* a DROP, save the page-reuse checks */
skb              1576 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	skb = mlx5e_ipsec_handle_rx_skb(rq->netdev, skb, &cqe_bcnt);
skb              1577 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	if (unlikely(!skb))
skb              1580 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
skb              1581 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	napi_gro_receive(rq->cq.napi, skb);
skb               105 drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c 	struct sk_buff *skb = NULL;
skb               112 drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c 	skb = netdev_alloc_skb(priv->netdev, MLX5E_TEST_PKT_SIZE);
skb               113 drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c 	if (!skb) {
skb               118 drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c 	prefetchw(skb->data);
skb               119 drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c 	skb_reserve(skb, NET_IP_ALIGN);
skb               122 drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c 	ethh = skb_push(skb, ETH_HLEN);
skb               123 drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c 	skb_reset_mac_header(skb);
skb               125 drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c 	skb_set_network_header(skb, skb->len);
skb               126 drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c 	iph = skb_put(skb, sizeof(struct iphdr));
skb               128 drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c 	skb_set_transport_header(skb, skb->len);
skb               129 drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c 	udph = skb_put(skb, sizeof(struct udphdr));
skb               158 drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c 	mlxh = skb_put(skb, sizeof(*mlxh));
skb               162 drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c 	skb->csum = 0;
skb               163 drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c 	skb->ip_summed = CHECKSUM_PARTIAL;
skb               164 drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c 	udp4_hwcsum(skb, iph->saddr, iph->daddr);
skb               166 drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c 	skb->protocol = htons(ETH_P_IP);
skb               167 drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c 	skb->pkt_type = PACKET_HOST;
skb               168 drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c 	skb->dev = priv->netdev;
skb               170 drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c 	return skb;
skb               181 drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c mlx5e_test_loopback_validate(struct sk_buff *skb,
skb               193 drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c 	if (MLX5E_TEST_PKT_SIZE - ETH_HLEN > skb_headlen(skb))
skb               196 drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c 	ethh = (struct ethhdr *)skb_mac_header(skb);
skb               200 drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c 	iph = ip_hdr(skb);
skb               217 drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c 	kfree_skb(skb);
skb               273 drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c 	struct sk_buff *skb = NULL;
skb               291 drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c 	skb = mlx5e_test_get_udp_skb(priv);
skb               292 drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c 	if (!skb) {
skb               297 drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c 	skb_set_queue_mapping(skb, 0);
skb               298 drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c 	err = dev_queue_xmit(skb);
skb                57 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c static inline int mlx5e_get_dscp_up(struct mlx5e_priv *priv, struct sk_buff *skb)
skb                61 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 	if (skb->protocol == htons(ETH_P_IP))
skb                62 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 		dscp_cp = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
skb                63 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 	else if (skb->protocol == htons(ETH_P_IPV6))
skb                64 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 		dscp_cp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
skb                70 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
skb                73 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 	int txq_ix = netdev_pick_tx(dev, skb, NULL);
skb                83 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 		up = mlx5e_get_dscp_up(priv, skb);
skb                86 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 		if (skb_vlan_tag_present(skb))
skb                87 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 			up = skb_vlan_tag_get_prio(skb);
skb                99 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c static inline int mlx5e_skb_l2_header_offset(struct sk_buff *skb)
skb               103 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 	return max(skb_network_offset(skb), MLX5E_MIN_INLINE);
skb               106 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c static inline int mlx5e_skb_l3_header_offset(struct sk_buff *skb)
skb               108 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 	if (skb_transport_header_was_set(skb))
skb               109 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 		return skb_transport_offset(skb);
skb               111 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 		return mlx5e_skb_l2_header_offset(skb);
skb               115 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 					struct sk_buff *skb)
skb               123 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 		hlen = eth_get_headlen(skb->dev, skb->data, skb_headlen(skb));
skb               124 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 		if (hlen == ETH_HLEN && !skb_vlan_tag_present(skb))
skb               128 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 		hlen = mlx5e_skb_l3_header_offset(skb);
skb               132 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 		hlen = mlx5e_skb_l2_header_offset(skb);
skb               134 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 	return min_t(u16, hlen, skb_headlen(skb));
skb               137 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs)
skb               143 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 	memcpy(vhdr, skb->data, cpy1_sz);
skb               144 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 	vhdr->h_vlan_proto = skb->vlan_proto;
skb               145 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 	vhdr->h_vlan_TCI = cpu_to_be16(skb_vlan_tag_get(skb));
skb               146 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 	memcpy(&vhdr->h_vlan_encapsulated_proto, skb->data + cpy1_sz, cpy2_sz);
skb               150 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg)
skb               152 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 	if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
skb               154 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 		if (skb->encapsulation) {
skb               167 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq *sq, struct sk_buff *skb)
skb               172 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 	if (skb->encapsulation) {
skb               173 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 		ihs = skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb);
skb               175 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 		stats->tso_inner_bytes += skb->len - ihs;
skb               177 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 		if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
skb               178 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 			ihs = skb_transport_offset(skb) + sizeof(struct udphdr);
skb               180 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 			ihs = skb_transport_offset(skb) + tcp_hdrlen(skb);
skb               182 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 		stats->tso_bytes += skb->len - ihs;
skb               189 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb,
skb               212 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb               213 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
skb               238 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
skb               249 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 	wi->skb = skb;
skb               254 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
skb               255 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
skb               269 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
skb               288 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 	if (skb_is_gso(skb)) {
skb               290 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 		mss       = cpu_to_be16(skb_shinfo(skb)->gso_size);
skb               291 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 		ihs       = mlx5e_tx_get_gso_ihs(sq, skb);
skb               292 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 		num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs;
skb               293 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 		stats->packets += skb_shinfo(skb)->gso_segs;
skb               295 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 		u8 mode = mlx5e_tx_wqe_inline_mode(sq, &wqe->ctrl, skb);
skb               299 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 		ihs       = mlx5e_calc_min_inline(mode, skb);
skb               300 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 		num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
skb               307 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 	headlen = skb->len - ihs - skb->data_len;
skb               309 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 	ds_cnt += skb_shinfo(skb)->nr_frags;
skb               312 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 		ihs += !!skb_vlan_tag_present(skb) * VLAN_HLEN;
skb               344 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 	if (skb->encapsulation)
skb               345 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 		mlx5e_tx_tunnel_accel(skb, eseg);
skb               347 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 	mlx5e_txwqe_build_eseg_csum(sq, skb, eseg);
skb               353 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 		if (skb_vlan_tag_present(skb)) {
skb               355 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 			mlx5e_insert_vlan(eseg->inline_hdr.start, skb, ihs);
skb               358 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 			memcpy(eseg->inline_hdr.start, skb->data, ihs);
skb               361 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 	} else if (skb_vlan_tag_present(skb)) {
skb               363 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 		if (skb->vlan_proto == cpu_to_be16(ETH_P_8021AD))
skb               365 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 		eseg->insert.vlan_tci = cpu_to_be16(skb_vlan_tag_get(skb));
skb               369 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 	num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + ihs, headlen, dseg);
skb               373 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 	mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt, num_wqebbs, num_bytes,
skb               380 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 	dev_kfree_skb_any(skb);
skb               385 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
skb               392 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 	sq = priv->txq2sq[skb_get_queue_mapping(skb)];
skb               396 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 	skb = mlx5e_accel_handle_tx(skb, sq, dev, &wqe, &pi);
skb               397 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 	if (unlikely(!skb))
skb               400 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 	return mlx5e_sq_xmit(sq, skb, wqe, pi, netdev_xmit_more());
skb               474 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 			struct sk_buff *skb;
skb               482 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 			skb = wi->skb;
skb               484 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 			if (unlikely(!skb)) {
skb               490 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 			if (unlikely(skb_shinfo(skb)->tx_flags &
skb               497 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 				skb_tstamp_tx(skb, &hwts);
skb               510 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 			napi_consume_skb(skb, napi_budget);
skb               542 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 	struct sk_buff *skb;
skb               551 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 		skb = wi->skb;
skb               553 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 		if (!skb) {
skb               566 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 		dev_kfree_skb_any(skb);
skb               588 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
skb               611 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 	if (skb_is_gso(skb)) {
skb               613 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 		mss       = cpu_to_be16(skb_shinfo(skb)->gso_size);
skb               614 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 		ihs       = mlx5e_tx_get_gso_ihs(sq, skb);
skb               615 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 		num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs;
skb               616 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 		stats->packets += skb_shinfo(skb)->gso_segs;
skb               618 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 		u8 mode = mlx5e_tx_wqe_inline_mode(sq, NULL, skb);
skb               622 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 		ihs       = mlx5e_calc_min_inline(mode, skb);
skb               623 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 		num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
skb               630 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 	headlen = skb->len - ihs - skb->data_len;
skb               632 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 	ds_cnt += skb_shinfo(skb)->nr_frags;
skb               658 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 	mlx5e_txwqe_build_eseg_csum(sq, skb, eseg);
skb               663 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 		memcpy(eseg->inline_hdr.start, skb->data, ihs);
skb               668 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 	num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + ihs, headlen, dseg);
skb               672 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 	mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt, num_wqebbs, num_bytes,
skb               679 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 	dev_kfree_skb_any(skb);
skb               626 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c static int mlx5i_xmit(struct net_device *dev, struct sk_buff *skb,
skb               630 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c 	struct mlx5e_txqsq *sq   = epriv->txq2sq[skb_get_queue_mapping(skb)];
skb               634 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c 	return mlx5i_sq_xmit(sq, skb, &mah->av, dqpn, ipriv->qkey, netdev_xmit_more());
skb               123 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
skb               319 drivers/net/ethernet/mellanox/mlxsw/core.c static int mlxsw_emad_construct_eth_hdr(struct sk_buff *skb)
skb               321 drivers/net/ethernet/mellanox/mlxsw/core.c 	char *eth_hdr = skb_push(skb, MLXSW_EMAD_ETH_HDR_LEN);
skb               329 drivers/net/ethernet/mellanox/mlxsw/core.c 	skb_reset_mac_header(skb);
skb               334 drivers/net/ethernet/mellanox/mlxsw/core.c static void mlxsw_emad_construct(struct sk_buff *skb,
skb               342 drivers/net/ethernet/mellanox/mlxsw/core.c 	buf = skb_push(skb, MLXSW_EMAD_END_TLV_LEN * sizeof(u32));
skb               345 drivers/net/ethernet/mellanox/mlxsw/core.c 	buf = skb_push(skb, reg->len + sizeof(u32));
skb               348 drivers/net/ethernet/mellanox/mlxsw/core.c 	buf = skb_push(skb, MLXSW_EMAD_OP_TLV_LEN * sizeof(u32));
skb               351 drivers/net/ethernet/mellanox/mlxsw/core.c 	mlxsw_emad_construct_eth_hdr(skb);
skb               354 drivers/net/ethernet/mellanox/mlxsw/core.c static char *mlxsw_emad_op_tlv(const struct sk_buff *skb)
skb               356 drivers/net/ethernet/mellanox/mlxsw/core.c 	return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN));
skb               359 drivers/net/ethernet/mellanox/mlxsw/core.c static char *mlxsw_emad_reg_tlv(const struct sk_buff *skb)
skb               361 drivers/net/ethernet/mellanox/mlxsw/core.c 	return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN +
skb               370 drivers/net/ethernet/mellanox/mlxsw/core.c static u64 mlxsw_emad_get_tid(const struct sk_buff *skb)
skb               374 drivers/net/ethernet/mellanox/mlxsw/core.c 	op_tlv = mlxsw_emad_op_tlv(skb);
skb               378 drivers/net/ethernet/mellanox/mlxsw/core.c static bool mlxsw_emad_is_resp(const struct sk_buff *skb)
skb               382 drivers/net/ethernet/mellanox/mlxsw/core.c 	op_tlv = mlxsw_emad_op_tlv(skb);
skb               411 drivers/net/ethernet/mellanox/mlxsw/core.c mlxsw_emad_process_status_skb(struct sk_buff *skb,
skb               414 drivers/net/ethernet/mellanox/mlxsw/core.c 	return mlxsw_emad_process_status(mlxsw_emad_op_tlv(skb), p_status);
skb               453 drivers/net/ethernet/mellanox/mlxsw/core.c 	struct sk_buff *skb;
skb               456 drivers/net/ethernet/mellanox/mlxsw/core.c 	skb = skb_copy(trans->tx_skb, GFP_KERNEL);
skb               457 drivers/net/ethernet/mellanox/mlxsw/core.c 	if (!skb)
skb               461 drivers/net/ethernet/mellanox/mlxsw/core.c 			    skb->data + mlxsw_core->driver->txhdr_len,
skb               462 drivers/net/ethernet/mellanox/mlxsw/core.c 			    skb->len - mlxsw_core->driver->txhdr_len);
skb               465 drivers/net/ethernet/mellanox/mlxsw/core.c 	err = mlxsw_core_skb_transmit(mlxsw_core, skb, &trans->tx_info);
skb               467 drivers/net/ethernet/mellanox/mlxsw/core.c 		dev_kfree_skb(skb);
skb               516 drivers/net/ethernet/mellanox/mlxsw/core.c 					struct sk_buff *skb)
skb               523 drivers/net/ethernet/mellanox/mlxsw/core.c 	err = mlxsw_emad_process_status_skb(skb, &trans->emad_status);
skb               528 drivers/net/ethernet/mellanox/mlxsw/core.c 			char *op_tlv = mlxsw_emad_op_tlv(skb);
skb               540 drivers/net/ethernet/mellanox/mlxsw/core.c static void mlxsw_emad_rx_listener_func(struct sk_buff *skb, u8 local_port,
skb               547 drivers/net/ethernet/mellanox/mlxsw/core.c 			    skb->data, skb->len);
skb               549 drivers/net/ethernet/mellanox/mlxsw/core.c 	if (!mlxsw_emad_is_resp(skb))
skb               553 drivers/net/ethernet/mellanox/mlxsw/core.c 		if (mlxsw_emad_get_tid(skb) == trans->tid) {
skb               554 drivers/net/ethernet/mellanox/mlxsw/core.c 			mlxsw_emad_process_response(mlxsw_core, trans, skb);
skb               560 drivers/net/ethernet/mellanox/mlxsw/core.c 	dev_kfree_skb(skb);
skb               626 drivers/net/ethernet/mellanox/mlxsw/core.c 	struct sk_buff *skb;
skb               635 drivers/net/ethernet/mellanox/mlxsw/core.c 	skb = netdev_alloc_skb(NULL, emad_len);
skb               636 drivers/net/ethernet/mellanox/mlxsw/core.c 	if (!skb)
skb               638 drivers/net/ethernet/mellanox/mlxsw/core.c 	memset(skb->data, 0, emad_len);
skb               639 drivers/net/ethernet/mellanox/mlxsw/core.c 	skb_reserve(skb, emad_len);
skb               641 drivers/net/ethernet/mellanox/mlxsw/core.c 	return skb;
skb               653 drivers/net/ethernet/mellanox/mlxsw/core.c 	struct sk_buff *skb;
skb               660 drivers/net/ethernet/mellanox/mlxsw/core.c 	skb = mlxsw_emad_alloc(mlxsw_core, reg->len);
skb               661 drivers/net/ethernet/mellanox/mlxsw/core.c 	if (!skb)
skb               666 drivers/net/ethernet/mellanox/mlxsw/core.c 	trans->tx_skb = skb;
skb               677 drivers/net/ethernet/mellanox/mlxsw/core.c 	mlxsw_emad_construct(skb, reg, payload, type, trans->tid);
skb               678 drivers/net/ethernet/mellanox/mlxsw/core.c 	mlxsw_core->driver->txhdr_construct(skb, &trans->tx_info);
skb              1303 drivers/net/ethernet/mellanox/mlxsw/core.c int mlxsw_core_skb_transmit(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
skb              1306 drivers/net/ethernet/mellanox/mlxsw/core.c 	return mlxsw_core->bus->skb_transmit(mlxsw_core->bus_priv, skb,
skb              1312 drivers/net/ethernet/mellanox/mlxsw/core.c 				struct sk_buff *skb, u8 local_port)
skb              1315 drivers/net/ethernet/mellanox/mlxsw/core.c 		mlxsw_core->driver->ptp_transmitted(mlxsw_core, skb,
skb              1378 drivers/net/ethernet/mellanox/mlxsw/core.c static void mlxsw_core_event_listener_func(struct sk_buff *skb, u8 local_port,
skb              1384 drivers/net/ethernet/mellanox/mlxsw/core.c 	char *op_tlv = mlxsw_emad_op_tlv(skb);
skb              1385 drivers/net/ethernet/mellanox/mlxsw/core.c 	char *reg_tlv = mlxsw_emad_reg_tlv(skb);
skb              1391 drivers/net/ethernet/mellanox/mlxsw/core.c 	dev_kfree_skb(skb);
skb              1765 drivers/net/ethernet/mellanox/mlxsw/core.c void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
skb              1808 drivers/net/ethernet/mellanox/mlxsw/core.c 	rxl->func(skb, local_port, rxl_item->priv);
skb              1812 drivers/net/ethernet/mellanox/mlxsw/core.c 	dev_kfree_skb(skb);
skb                49 drivers/net/ethernet/mellanox/mlxsw/core.h int mlxsw_core_skb_transmit(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
skb                52 drivers/net/ethernet/mellanox/mlxsw/core.h 				struct sk_buff *skb, u8 local_port);
skb                55 drivers/net/ethernet/mellanox/mlxsw/core.h 	void (*func)(struct sk_buff *skb, u8 local_port, void *priv);
skb               163 drivers/net/ethernet/mellanox/mlxsw/core.h void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
skb               309 drivers/net/ethernet/mellanox/mlxsw/core.h 	void (*txhdr_construct)(struct sk_buff *skb,
skb               323 drivers/net/ethernet/mellanox/mlxsw/core.h 				struct sk_buff *skb, u8 local_port);
skb               364 drivers/net/ethernet/mellanox/mlxsw/core.h 	int (*skb_transmit)(void *bus_priv, struct sk_buff *skb,
skb               451 drivers/net/ethernet/mellanox/mlxsw/core.h static inline struct mlxsw_skb_cb *mlxsw_skb_cb(struct sk_buff *skb)
skb               453 drivers/net/ethernet/mellanox/mlxsw/core.h 	BUILD_BUG_ON(sizeof(mlxsw_skb_cb) > sizeof(skb->cb));
skb               454 drivers/net/ethernet/mellanox/mlxsw/core.h 	return (struct mlxsw_skb_cb *) skb->cb;
skb               502 drivers/net/ethernet/mellanox/mlxsw/i2c.c static int mlxsw_i2c_skb_transmit(void *bus_priv, struct sk_buff *skb,
skb                63 drivers/net/ethernet/mellanox/mlxsw/pci.c 			struct sk_buff *skb;
skb                66 drivers/net/ethernet/mellanox/mlxsw/pci.c 			struct sk_buff *skb;
skb               350 drivers/net/ethernet/mellanox/mlxsw/pci.c 	struct sk_buff *skb;
skb               353 drivers/net/ethernet/mellanox/mlxsw/pci.c 	elem_info->u.rdq.skb = NULL;
skb               354 drivers/net/ethernet/mellanox/mlxsw/pci.c 	skb = netdev_alloc_skb_ip_align(NULL, buf_len);
skb               355 drivers/net/ethernet/mellanox/mlxsw/pci.c 	if (!skb)
skb               360 drivers/net/ethernet/mellanox/mlxsw/pci.c 	err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data,
skb               365 drivers/net/ethernet/mellanox/mlxsw/pci.c 	elem_info->u.rdq.skb = skb;
skb               369 drivers/net/ethernet/mellanox/mlxsw/pci.c 	dev_kfree_skb_any(skb);
skb               376 drivers/net/ethernet/mellanox/mlxsw/pci.c 	struct sk_buff *skb;
skb               379 drivers/net/ethernet/mellanox/mlxsw/pci.c 	skb = elem_info->u.rdq.skb;
skb               383 drivers/net/ethernet/mellanox/mlxsw/pci.c 	dev_kfree_skb_any(skb);
skb               513 drivers/net/ethernet/mellanox/mlxsw/pci.c 	struct sk_buff *skb;
skb               518 drivers/net/ethernet/mellanox/mlxsw/pci.c 	tx_info = mlxsw_skb_cb(elem_info->u.sdq.skb)->tx_info;
skb               519 drivers/net/ethernet/mellanox/mlxsw/pci.c 	skb = elem_info->u.sdq.skb;
skb               525 drivers/net/ethernet/mellanox/mlxsw/pci.c 		     skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
skb               526 drivers/net/ethernet/mellanox/mlxsw/pci.c 		mlxsw_core_ptp_transmitted(mlxsw_pci->core, skb,
skb               528 drivers/net/ethernet/mellanox/mlxsw/pci.c 		skb = NULL;
skb               531 drivers/net/ethernet/mellanox/mlxsw/pci.c 	if (skb)
skb               532 drivers/net/ethernet/mellanox/mlxsw/pci.c 		dev_kfree_skb_any(skb);
skb               533 drivers/net/ethernet/mellanox/mlxsw/pci.c 	elem_info->u.sdq.skb = NULL;
skb               548 drivers/net/ethernet/mellanox/mlxsw/pci.c 	struct sk_buff *skb;
skb               554 drivers/net/ethernet/mellanox/mlxsw/pci.c 	skb = elem_info->u.sdq.skb;
skb               555 drivers/net/ethernet/mellanox/mlxsw/pci.c 	if (!skb)
skb               578 drivers/net/ethernet/mellanox/mlxsw/pci.c 	skb_put(skb, byte_count);
skb               579 drivers/net/ethernet/mellanox/mlxsw/pci.c 	mlxsw_core_skb_receive(mlxsw_pci->core, skb, &rx_info);
skb              1565 drivers/net/ethernet/mellanox/mlxsw/pci.c static int mlxsw_pci_skb_transmit(void *bus_priv, struct sk_buff *skb,
skb              1575 drivers/net/ethernet/mellanox/mlxsw/pci.c 	if (skb_shinfo(skb)->nr_frags > MLXSW_PCI_WQE_SG_ENTRIES - 1) {
skb              1576 drivers/net/ethernet/mellanox/mlxsw/pci.c 		err = skb_linearize(skb);
skb              1589 drivers/net/ethernet/mellanox/mlxsw/pci.c 	mlxsw_skb_cb(skb)->tx_info = *tx_info;
skb              1590 drivers/net/ethernet/mellanox/mlxsw/pci.c 	elem_info->u.sdq.skb = skb;
skb              1597 drivers/net/ethernet/mellanox/mlxsw/pci.c 	err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data,
skb              1598 drivers/net/ethernet/mellanox/mlxsw/pci.c 				     skb_headlen(skb), DMA_TO_DEVICE);
skb              1602 drivers/net/ethernet/mellanox/mlxsw/pci.c 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb              1603 drivers/net/ethernet/mellanox/mlxsw/pci.c 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
skb              1613 drivers/net/ethernet/mellanox/mlxsw/pci.c 	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
skb              1614 drivers/net/ethernet/mellanox/mlxsw/pci.c 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
skb               162 drivers/net/ethernet/mellanox/mlxsw/spectrum.c 	void (*receive)(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
skb               168 drivers/net/ethernet/mellanox/mlxsw/spectrum.c 	void (*transmitted)(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
skb               521 drivers/net/ethernet/mellanox/mlxsw/spectrum.c static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
skb               524 drivers/net/ethernet/mellanox/mlxsw/spectrum.c 	char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
skb               802 drivers/net/ethernet/mellanox/mlxsw/spectrum.c static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
skb               815 drivers/net/ethernet/mellanox/mlxsw/spectrum.c 	if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
skb               817 drivers/net/ethernet/mellanox/mlxsw/spectrum.c 		dev_kfree_skb_any(skb);
skb               821 drivers/net/ethernet/mellanox/mlxsw/spectrum.c 	memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb));
skb               826 drivers/net/ethernet/mellanox/mlxsw/spectrum.c 	if (eth_skb_pad(skb)) {
skb               831 drivers/net/ethernet/mellanox/mlxsw/spectrum.c 	mlxsw_sp_txhdr_construct(skb, &tx_info);
skb               835 drivers/net/ethernet/mellanox/mlxsw/spectrum.c 	len = skb->len - MLXSW_TXHDR_LEN;
skb               840 drivers/net/ethernet/mellanox/mlxsw/spectrum.c 	err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info);
skb               850 drivers/net/ethernet/mellanox/mlxsw/spectrum.c 		dev_kfree_skb_any(skb);
skb              4260 drivers/net/ethernet/mellanox/mlxsw/spectrum.c void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb,
skb              4273 drivers/net/ethernet/mellanox/mlxsw/spectrum.c 	skb->dev = mlxsw_sp_port->dev;
skb              4278 drivers/net/ethernet/mellanox/mlxsw/spectrum.c 	pcpu_stats->rx_bytes += skb->len;
skb              4281 drivers/net/ethernet/mellanox/mlxsw/spectrum.c 	skb->protocol = eth_type_trans(skb, skb->dev);
skb              4282 drivers/net/ethernet/mellanox/mlxsw/spectrum.c 	netif_receive_skb(skb);
skb              4285 drivers/net/ethernet/mellanox/mlxsw/spectrum.c static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port,
skb              4288 drivers/net/ethernet/mellanox/mlxsw/spectrum.c 	skb->offload_fwd_mark = 1;
skb              4289 drivers/net/ethernet/mellanox/mlxsw/spectrum.c 	return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
skb              4292 drivers/net/ethernet/mellanox/mlxsw/spectrum.c static void mlxsw_sp_rx_listener_l3_mark_func(struct sk_buff *skb,
skb              4295 drivers/net/ethernet/mellanox/mlxsw/spectrum.c 	skb->offload_l3_fwd_mark = 1;
skb              4296 drivers/net/ethernet/mellanox/mlxsw/spectrum.c 	skb->offload_fwd_mark = 1;
skb              4297 drivers/net/ethernet/mellanox/mlxsw/spectrum.c 	return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
skb              4300 drivers/net/ethernet/mellanox/mlxsw/spectrum.c static void mlxsw_sp_rx_listener_sample_func(struct sk_buff *skb, u8 local_port,
skb              4320 drivers/net/ethernet/mellanox/mlxsw/spectrum.c 		  mlxsw_sp_port->sample->trunc_size : skb->len;
skb              4326 drivers/net/ethernet/mellanox/mlxsw/spectrum.c 	psample_sample_packet(psample_group, skb, size,
skb              4332 drivers/net/ethernet/mellanox/mlxsw/spectrum.c 	consume_skb(skb);
skb              4335 drivers/net/ethernet/mellanox/mlxsw/spectrum.c static void mlxsw_sp_rx_listener_ptp(struct sk_buff *skb, u8 local_port,
skb              4340 drivers/net/ethernet/mellanox/mlxsw/spectrum.c 	mlxsw_sp->ptp_ops->receive(mlxsw_sp, skb, local_port);
skb              5367 drivers/net/ethernet/mellanox/mlxsw/spectrum.c 				     struct sk_buff *skb, u8 local_port)
skb              5371 drivers/net/ethernet/mellanox/mlxsw/spectrum.c 	skb_pull(skb, MLXSW_TXHDR_LEN);
skb              5372 drivers/net/ethernet/mellanox/mlxsw/spectrum.c 	mlxsw_sp->ptp_ops->transmitted(mlxsw_sp, skb, local_port);
skb               463 drivers/net/ethernet/mellanox/mlxsw/spectrum.h void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb,
skb                78 drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c 						  struct sk_buff *skb)
skb                87 drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c 	err = devlink_dpipe_action_put(skb, &action);
skb                95 drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c 	return devlink_dpipe_action_put(skb, &action);
skb                99 drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c 						  struct sk_buff *skb)
skb               107 drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c 	return devlink_dpipe_match_put(skb, &match);
skb               310 drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c static int mlxsw_sp_dpipe_table_host_matches_dump(struct sk_buff *skb, int type)
skb               319 drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c 	err = devlink_dpipe_match_put(skb, &match);
skb               339 drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c 	return devlink_dpipe_match_put(skb, &match);
skb               343 drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c mlxsw_sp_dpipe_table_host4_matches_dump(void *priv, struct sk_buff *skb)
skb               345 drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c 	return mlxsw_sp_dpipe_table_host_matches_dump(skb, AF_INET);
skb               349 drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c mlxsw_sp_dpipe_table_host_actions_dump(void *priv, struct sk_buff *skb)
skb               357 drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c 	return devlink_dpipe_action_put(skb, &action);
skb               782 drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c mlxsw_sp_dpipe_table_host6_matches_dump(void *priv, struct sk_buff *skb)
skb               784 drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c 	return mlxsw_sp_dpipe_table_host_matches_dump(skb, AF_INET6);
skb               859 drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c 						 struct sk_buff *skb)
skb               868 drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c 	err = devlink_dpipe_match_put(skb, &match);
skb               876 drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c 	err = devlink_dpipe_match_put(skb, &match);
skb               884 drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c 	return devlink_dpipe_match_put(skb, &match);
skb               888 drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c 						 struct sk_buff *skb)
skb               897 drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c 	err = devlink_dpipe_action_put(skb, &action);
skb               905 drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c 	return devlink_dpipe_action_put(skb, &action);
skb                50 drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c 	struct sk_buff *skb;
skb               312 drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c static int mlxsw_sp_ptp_parse(struct sk_buff *skb,
skb               321 drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c 	data = skb_mac_header(skb);
skb               322 drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c 	ptp_class = ptp_classify_raw(skb);
skb               350 drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c 	if (skb->len < offset + 34)
skb               365 drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c 			     struct sk_buff *skb,
skb               378 drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c 	unmatched->skb = skb;
skb               427 drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c 					struct sk_buff *skb, u8 local_port,
skb               438 drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c 	if (!(mlxsw_sp_port && (!skb->dev || skb->dev == mlxsw_sp_port->dev))) {
skb               439 drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c 		dev_kfree_skb_any(skb);
skb               445 drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c 			*skb_hwtstamps(skb) = *hwtstamps;
skb               446 drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c 		mlxsw_sp_rx_listener_no_mark_func(skb, local_port, mlxsw_sp);
skb               449 drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c 		skb_tstamp_tx(skb, hwtstamps);
skb               450 drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c 		dev_kfree_skb_any(skb);
skb               456 drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c 				       struct sk_buff *skb,
skb               467 drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c 	mlxsw_sp1_ptp_packet_finish(mlxsw_sp, skb,
skb               475 drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c 	if (unmatched->skb && unmatched->timestamp)
skb               477 drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c 					   unmatched->skb,
skb               479 drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c 	else if (unmatched->skb)
skb               480 drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c 		mlxsw_sp1_ptp_packet_finish(mlxsw_sp, unmatched->skb,
skb               493 drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c 	if (unmatched->skb)
skb               494 drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c 		dev_kfree_skb_any(unmatched->skb);
skb               500 drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c 				    struct sk_buff *skb, u64 timestamp)
skb               511 drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c 	if (skb && unmatched && unmatched->timestamp) {
skb               512 drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c 		unmatched->skb = skb;
skb               513 drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c 	} else if (timestamp && unmatched && unmatched->skb) {
skb               521 drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c 							   skb, timestamp);
skb               524 drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c 		if (err && skb)
skb               525 drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c 			mlxsw_sp1_ptp_packet_finish(mlxsw_sp, skb,
skb               545 drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c 				     struct sk_buff *skb, u8 local_port,
skb               566 drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c 	err = mlxsw_sp_ptp_parse(skb, &key.domain_number, &key.message_type,
skb               577 drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c 	mlxsw_sp1_ptp_got_piece(mlxsw_sp, key, skb, 0);
skb               581 drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c 	mlxsw_sp1_ptp_packet_finish(mlxsw_sp, skb, local_port, ingress, NULL);
skb               616 drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c void mlxsw_sp1_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
skb               619 drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c 	skb_reset_mac_header(skb);
skb               620 drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c 	mlxsw_sp1_ptp_got_packet(mlxsw_sp, skb, local_port, true);
skb               624 drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c 			       struct sk_buff *skb, u8 local_port)
skb               626 drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c 	mlxsw_sp1_ptp_got_packet(mlxsw_sp, skb, local_port, false);
skb               660 drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c 		if (unmatched->skb)
skb                40 drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h void mlxsw_sp1_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
skb                44 drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h 			       struct sk_buff *skb, u8 local_port);
skb                90 drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h 					 struct sk_buff *skb, u8 local_port)
skb                92 drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h 	mlxsw_sp_rx_listener_no_mark_func(skb, local_port, mlxsw_sp);
skb                96 drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h 					     struct sk_buff *skb, u8 local_port)
skb                98 drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h 	dev_kfree_skb_any(skb);
skb               169 drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h 					 struct sk_buff *skb, u8 local_port)
skb               171 drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h 	mlxsw_sp_rx_listener_no_mark_func(skb, local_port, mlxsw_sp);
skb               175 drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h 					     struct sk_buff *skb, u8 local_port)
skb               177 drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h 	dev_kfree_skb_any(skb);
skb                14 drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c static void mlxsw_sp_rx_drop_listener(struct sk_buff *skb, u8 local_port,
skb                59 drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c static int mlxsw_sp_rx_listener(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
skb                68 drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c 		kfree_skb(skb);
skb                72 drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c 	skb->dev = mlxsw_sp_port->dev;
skb                77 drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c 	pcpu_stats->rx_bytes += skb->len;
skb                80 drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c 	skb->protocol = eth_type_trans(skb, skb->dev);
skb                85 drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c static void mlxsw_sp_rx_drop_listener(struct sk_buff *skb, u8 local_port,
skb                96 drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c 	if (mlxsw_sp_rx_listener(mlxsw_sp, skb, local_port, mlxsw_sp_port))
skb               102 drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c 	skb_push(skb, ETH_HLEN);
skb               103 drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c 	devlink_trap_report(devlink, skb, trap_ctx, in_devlink_port);
skb               104 drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c 	consume_skb(skb);
skb                90 drivers/net/ethernet/mellanox/mlxsw/switchib.c mlxsw_sib_tx_v1_hdr_construct(struct sk_buff *skb,
skb                93 drivers/net/ethernet/mellanox/mlxsw/switchib.c 	char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
skb               131 drivers/net/ethernet/mellanox/mlxsw/switchx2.c static void mlxsw_sx_txhdr_construct(struct sk_buff *skb,
skb               134 drivers/net/ethernet/mellanox/mlxsw/switchx2.c 	char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
skb               289 drivers/net/ethernet/mellanox/mlxsw/switchx2.c static netdev_tx_t mlxsw_sx_port_xmit(struct sk_buff *skb,
skb               302 drivers/net/ethernet/mellanox/mlxsw/switchx2.c 	if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
skb               304 drivers/net/ethernet/mellanox/mlxsw/switchx2.c 		dev_kfree_skb_any(skb);
skb               308 drivers/net/ethernet/mellanox/mlxsw/switchx2.c 	memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb));
skb               313 drivers/net/ethernet/mellanox/mlxsw/switchx2.c 	mlxsw_sx_txhdr_construct(skb, &tx_info);
skb               317 drivers/net/ethernet/mellanox/mlxsw/switchx2.c 	len = skb->len - MLXSW_TXHDR_LEN;
skb               321 drivers/net/ethernet/mellanox/mlxsw/switchx2.c 	err = mlxsw_core_skb_transmit(mlxsw_sx->core, skb, &tx_info);
skb               331 drivers/net/ethernet/mellanox/mlxsw/switchx2.c 		dev_kfree_skb_any(skb);
skb              1348 drivers/net/ethernet/mellanox/mlxsw/switchx2.c static void mlxsw_sx_rx_listener_func(struct sk_buff *skb, u8 local_port,
skb              1361 drivers/net/ethernet/mellanox/mlxsw/switchx2.c 	skb->dev = mlxsw_sx_port->dev;
skb              1366 drivers/net/ethernet/mellanox/mlxsw/switchx2.c 	pcpu_stats->rx_bytes += skb->len;
skb              1369 drivers/net/ethernet/mellanox/mlxsw/switchx2.c 	skb->protocol = eth_type_trans(skb, skb->dev);
skb              1370 drivers/net/ethernet/mellanox/mlxsw/switchx2.c 	netif_receive_skb(skb);
skb               141 drivers/net/ethernet/micrel/ks8842.c 	struct sk_buff  *skb;
skb               419 drivers/net/ethernet/micrel/ks8842.c static int ks8842_tx_frame_dma(struct sk_buff *skb, struct net_device *netdev)
skb               431 drivers/net/ethernet/micrel/ks8842.c 	sg_dma_len(&ctl->sg) = skb->len + sizeof(u32);
skb               437 drivers/net/ethernet/micrel/ks8842.c 	*buf++ = skb->len & 0xff;
skb               438 drivers/net/ethernet/micrel/ks8842.c 	*buf++ = (skb->len >> 8) & 0xff;
skb               439 drivers/net/ethernet/micrel/ks8842.c 	skb_copy_from_linear_data(skb, buf, skb->len);
skb               458 drivers/net/ethernet/micrel/ks8842.c 	netdev->stats.tx_bytes += skb->len;
skb               460 drivers/net/ethernet/micrel/ks8842.c 	dev_kfree_skb(skb);
skb               465 drivers/net/ethernet/micrel/ks8842.c static int ks8842_tx_frame(struct sk_buff *skb, struct net_device *netdev)
skb               468 drivers/net/ethernet/micrel/ks8842.c 	int len = skb->len;
skb               471 drivers/net/ethernet/micrel/ks8842.c 		__func__, skb->len, skb->head, skb->data,
skb               472 drivers/net/ethernet/micrel/ks8842.c 		skb_tail_pointer(skb), skb_end_pointer(skb));
skb               479 drivers/net/ethernet/micrel/ks8842.c 		u16 *ptr16 = (u16 *)skb->data;
skb               492 drivers/net/ethernet/micrel/ks8842.c 		u32 *ptr = (u32 *)skb->data;
skb               511 drivers/net/ethernet/micrel/ks8842.c 	dev_kfree_skb(skb);
skb               547 drivers/net/ethernet/micrel/ks8842.c 	ctl->skb = netdev_alloc_skb(netdev, DMA_BUFFER_SIZE);
skb               548 drivers/net/ethernet/micrel/ks8842.c 	if (ctl->skb) {
skb               551 drivers/net/ethernet/micrel/ks8842.c 			ctl->skb->data, DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
skb               583 drivers/net/ethernet/micrel/ks8842.c 	dev_kfree_skb(ctl->skb);
skb               584 drivers/net/ethernet/micrel/ks8842.c 	ctl->skb = NULL;
skb               595 drivers/net/ethernet/micrel/ks8842.c 	struct sk_buff *skb = ctl->skb;
skb               607 drivers/net/ethernet/micrel/ks8842.c 	status = *((u32 *)skb->data);
skb               619 drivers/net/ethernet/micrel/ks8842.c 		skb_reserve(skb, 4);
skb               620 drivers/net/ethernet/micrel/ks8842.c 		skb_put(skb, len);
skb               622 drivers/net/ethernet/micrel/ks8842.c 		skb->protocol = eth_type_trans(skb, netdev);
skb               623 drivers/net/ethernet/micrel/ks8842.c 		netif_rx(skb);
skb               626 drivers/net/ethernet/micrel/ks8842.c 		dev_kfree_skb(skb);
skb               651 drivers/net/ethernet/micrel/ks8842.c 		struct sk_buff *skb = netdev_alloc_skb_ip_align(netdev, len + 3);
skb               653 drivers/net/ethernet/micrel/ks8842.c 		if (skb) {
skb               658 drivers/net/ethernet/micrel/ks8842.c 				u16 *data16 = skb_put(skb, len);
skb               668 drivers/net/ethernet/micrel/ks8842.c 				u32 *data = skb_put(skb, len);
skb               677 drivers/net/ethernet/micrel/ks8842.c 			skb->protocol = eth_type_trans(skb, netdev);
skb               678 drivers/net/ethernet/micrel/ks8842.c 			netif_rx(skb);
skb               877 drivers/net/ethernet/micrel/ks8842.c 	dev_kfree_skb(rx_ctl->skb);
skb               878 drivers/net/ethernet/micrel/ks8842.c 	rx_ctl->skb = NULL;
skb              1030 drivers/net/ethernet/micrel/ks8842.c static netdev_tx_t ks8842_xmit_frame(struct sk_buff *skb,
skb              1040 drivers/net/ethernet/micrel/ks8842.c 		ret = ks8842_tx_frame_dma(skb, netdev);
skb              1049 drivers/net/ethernet/micrel/ks8842.c 	ret = ks8842_tx_frame(skb, netdev);
skb               506 drivers/net/ethernet/micrel/ks8851.c 	struct sk_buff *skb;
skb               549 drivers/net/ethernet/micrel/ks8851.c 			skb = netdev_alloc_skb_ip_align(ks->netdev, rxalign);
skb               550 drivers/net/ethernet/micrel/ks8851.c 			if (skb) {
skb               558 drivers/net/ethernet/micrel/ks8851.c 				rxpkt = skb_put(skb, rxlen) - 8;
skb               565 drivers/net/ethernet/micrel/ks8851.c 				skb->protocol = eth_type_trans(skb, ks->netdev);
skb               566 drivers/net/ethernet/micrel/ks8851.c 				netif_rx_ni(skb);
skb               933 drivers/net/ethernet/micrel/ks8851.c static netdev_tx_t ks8851_start_xmit(struct sk_buff *skb,
skb               937 drivers/net/ethernet/micrel/ks8851.c 	unsigned needed = calc_txlen(skb->len);
skb               941 drivers/net/ethernet/micrel/ks8851.c 		  "%s: skb %p, %d@%p\n", __func__, skb, skb->len, skb->data);
skb               950 drivers/net/ethernet/micrel/ks8851.c 		skb_queue_tail(&ks->txq, skb);
skb               474 drivers/net/ethernet/micrel/ks8851_mll.c 	struct sk_buff *skb;
skb               504 drivers/net/ethernet/micrel/ks8851_mll.c 		skb = netdev_alloc_skb(netdev, frame_hdr->len + 16);
skb               505 drivers/net/ethernet/micrel/ks8851_mll.c 		if (likely(skb)) {
skb               506 drivers/net/ethernet/micrel/ks8851_mll.c 			skb_reserve(skb, 2);
skb               508 drivers/net/ethernet/micrel/ks8851_mll.c 			ks_read_qmu(ks, (u16 *)skb->data, frame_hdr->len);
skb               509 drivers/net/ethernet/micrel/ks8851_mll.c 			skb_put(skb, frame_hdr->len - 4);
skb               510 drivers/net/ethernet/micrel/ks8851_mll.c 			skb->protocol = eth_type_trans(skb, netdev);
skb               511 drivers/net/ethernet/micrel/ks8851_mll.c 			netif_rx(skb);
skb               719 drivers/net/ethernet/micrel/ks8851_mll.c static netdev_tx_t ks_start_xmit(struct sk_buff *skb, struct net_device *netdev)
skb               731 drivers/net/ethernet/micrel/ks8851_mll.c 	if (likely(ks_tx_fifo_space(ks) >= skb->len + 12)) {
skb               732 drivers/net/ethernet/micrel/ks8851_mll.c 		ks_write_qmu(ks, skb->data, skb->len);
skb               734 drivers/net/ethernet/micrel/ks8851_mll.c 		netdev->stats.tx_bytes += skb->len;
skb               736 drivers/net/ethernet/micrel/ks8851_mll.c 		dev_kfree_skb(skb);
skb               965 drivers/net/ethernet/micrel/ksz884x.c 	struct sk_buff *skb;
skb              4435 drivers/net/ethernet/micrel/ksz884x.c 	dev_kfree_skb(dma_buf->skb);
skb              4436 drivers/net/ethernet/micrel/ksz884x.c 	dma_buf->skb = NULL;
skb              4458 drivers/net/ethernet/micrel/ksz884x.c 		if (dma_buf->skb && dma_buf->len != adapter->mtu)
skb              4461 drivers/net/ethernet/micrel/ksz884x.c 		if (!dma_buf->skb)
skb              4462 drivers/net/ethernet/micrel/ksz884x.c 			dma_buf->skb = alloc_skb(dma_buf->len, GFP_ATOMIC);
skb              4463 drivers/net/ethernet/micrel/ksz884x.c 		if (dma_buf->skb && !dma_buf->dma)
skb              4466 drivers/net/ethernet/micrel/ksz884x.c 				skb_tail_pointer(dma_buf->skb),
skb              4578 drivers/net/ethernet/micrel/ksz884x.c 		if (dma_buf->skb)
skb              4627 drivers/net/ethernet/micrel/ksz884x.c static void send_packet(struct sk_buff *skb, struct net_device *dev)
skb              4637 drivers/net/ethernet/micrel/ksz884x.c 	int last_frag = skb_shinfo(skb)->nr_frags;
skb              4647 drivers/net/ethernet/micrel/ksz884x.c 	len = skb->len;
skb              4658 drivers/net/ethernet/micrel/ksz884x.c 		dma_buf->len = skb_headlen(skb);
skb              4661 drivers/net/ethernet/micrel/ksz884x.c 			hw_priv->pdev, skb->data, dma_buf->len,
skb              4668 drivers/net/ethernet/micrel/ksz884x.c 			this_frag = &skb_shinfo(skb)->frags[frag];
skb              4704 drivers/net/ethernet/micrel/ksz884x.c 			hw_priv->pdev, skb->data, dma_buf->len,
skb              4710 drivers/net/ethernet/micrel/ksz884x.c 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb              4719 drivers/net/ethernet/micrel/ksz884x.c 	dma_buf->skb = skb;
skb              4764 drivers/net/ethernet/micrel/ksz884x.c 		if (dma_buf->skb) {
skb              4765 drivers/net/ethernet/micrel/ksz884x.c 			dev = dma_buf->skb->dev;
skb              4768 drivers/net/ethernet/micrel/ksz884x.c 			dev_kfree_skb_irq(dma_buf->skb);
skb              4769 drivers/net/ethernet/micrel/ksz884x.c 			dma_buf->skb = NULL;
skb              4807 drivers/net/ethernet/micrel/ksz884x.c static inline void copy_old_skb(struct sk_buff *old, struct sk_buff *skb)
skb              4809 drivers/net/ethernet/micrel/ksz884x.c 	skb->dev = old->dev;
skb              4810 drivers/net/ethernet/micrel/ksz884x.c 	skb->protocol = old->protocol;
skb              4811 drivers/net/ethernet/micrel/ksz884x.c 	skb->ip_summed = old->ip_summed;
skb              4812 drivers/net/ethernet/micrel/ksz884x.c 	skb->csum = old->csum;
skb              4813 drivers/net/ethernet/micrel/ksz884x.c 	skb_set_network_header(skb, ETH_HLEN);
skb              4827 drivers/net/ethernet/micrel/ksz884x.c static netdev_tx_t netdev_tx(struct sk_buff *skb, struct net_device *dev)
skb              4837 drivers/net/ethernet/micrel/ksz884x.c 		struct sk_buff *org_skb = skb;
skb              4839 drivers/net/ethernet/micrel/ksz884x.c 		if (skb->len <= 48) {
skb              4840 drivers/net/ethernet/micrel/ksz884x.c 			if (skb_end_pointer(skb) - skb->data >= 50) {
skb              4841 drivers/net/ethernet/micrel/ksz884x.c 				memset(&skb->data[skb->len], 0, 50 - skb->len);
skb              4842 drivers/net/ethernet/micrel/ksz884x.c 				skb->len = 50;
skb              4844 drivers/net/ethernet/micrel/ksz884x.c 				skb = netdev_alloc_skb(dev, 50);
skb              4845 drivers/net/ethernet/micrel/ksz884x.c 				if (!skb)
skb              4847 drivers/net/ethernet/micrel/ksz884x.c 				memcpy(skb->data, org_skb->data, org_skb->len);
skb              4848 drivers/net/ethernet/micrel/ksz884x.c 				memset(&skb->data[org_skb->len], 0,
skb              4850 drivers/net/ethernet/micrel/ksz884x.c 				skb->len = 50;
skb              4851 drivers/net/ethernet/micrel/ksz884x.c 				copy_old_skb(org_skb, skb);
skb              4858 drivers/net/ethernet/micrel/ksz884x.c 	num = skb_shinfo(skb)->nr_frags + 1;
skb              4859 drivers/net/ethernet/micrel/ksz884x.c 	left = hw_alloc_pkt(hw, skb->len, num);
skb              4862 drivers/net/ethernet/micrel/ksz884x.c 		    (CHECKSUM_PARTIAL == skb->ip_summed &&
skb              4863 drivers/net/ethernet/micrel/ksz884x.c 		     skb->protocol == htons(ETH_P_IPV6))) {
skb              4864 drivers/net/ethernet/micrel/ksz884x.c 			struct sk_buff *org_skb = skb;
skb              4866 drivers/net/ethernet/micrel/ksz884x.c 			skb = netdev_alloc_skb(dev, org_skb->len);
skb              4867 drivers/net/ethernet/micrel/ksz884x.c 			if (!skb) {
skb              4871 drivers/net/ethernet/micrel/ksz884x.c 			skb_copy_and_csum_dev(org_skb, skb->data);
skb              4873 drivers/net/ethernet/micrel/ksz884x.c 			skb->len = org_skb->len;
skb              4874 drivers/net/ethernet/micrel/ksz884x.c 			copy_old_skb(org_skb, skb);
skb              4876 drivers/net/ethernet/micrel/ksz884x.c 		send_packet(skb, dev);
skb              4961 drivers/net/ethernet/micrel/ksz884x.c static inline void csum_verified(struct sk_buff *skb)
skb              4966 drivers/net/ethernet/micrel/ksz884x.c 	protocol = skb->protocol;
skb              4967 drivers/net/ethernet/micrel/ksz884x.c 	skb_reset_network_header(skb);
skb              4968 drivers/net/ethernet/micrel/ksz884x.c 	iph = (struct iphdr *) skb_network_header(skb);
skb              4971 drivers/net/ethernet/micrel/ksz884x.c 		skb_set_network_header(skb, VLAN_HLEN);
skb              4972 drivers/net/ethernet/micrel/ksz884x.c 		iph = (struct iphdr *) skb_network_header(skb);
skb              4976 drivers/net/ethernet/micrel/ksz884x.c 			skb->ip_summed = CHECKSUM_UNNECESSARY;
skb              4987 drivers/net/ethernet/micrel/ksz884x.c 	struct sk_buff *skb;
skb              5000 drivers/net/ethernet/micrel/ksz884x.c 		skb = netdev_alloc_skb(dev, packet_len + 2);
skb              5001 drivers/net/ethernet/micrel/ksz884x.c 		if (!skb) {
skb              5010 drivers/net/ethernet/micrel/ksz884x.c 		skb_reserve(skb, 2);
skb              5012 drivers/net/ethernet/micrel/ksz884x.c 		skb_put_data(skb, dma_buf->skb->data, packet_len);
skb              5015 drivers/net/ethernet/micrel/ksz884x.c 	skb->protocol = eth_type_trans(skb, dev);
skb              5018 drivers/net/ethernet/micrel/ksz884x.c 		csum_verified(skb);
skb              5025 drivers/net/ethernet/micrel/ksz884x.c 	rx_status = netif_rx(skb);
skb               907 drivers/net/ethernet/microchip/enc28j60.c 	struct sk_buff *skb = NULL;
skb               961 drivers/net/ethernet/microchip/enc28j60.c 		skb = netdev_alloc_skb(ndev, len + NET_IP_ALIGN);
skb               962 drivers/net/ethernet/microchip/enc28j60.c 		if (!skb) {
skb               967 drivers/net/ethernet/microchip/enc28j60.c 			skb_reserve(skb, NET_IP_ALIGN);
skb               971 drivers/net/ethernet/microchip/enc28j60.c 				len, skb_put(skb, len));
skb               973 drivers/net/ethernet/microchip/enc28j60.c 				dump_packet(__func__, skb->len, skb->data);
skb               974 drivers/net/ethernet/microchip/enc28j60.c 			skb->protocol = eth_type_trans(skb, ndev);
skb               978 drivers/net/ethernet/microchip/enc28j60.c 			netif_rx_ni(skb);
skb              1281 drivers/net/ethernet/microchip/enc28j60.c static netdev_tx_t enc28j60_send_packet(struct sk_buff *skb,
skb              1297 drivers/net/ethernet/microchip/enc28j60.c 	priv->tx_skb = skb;
skb               334 drivers/net/ethernet/microchip/encx24j600.c 	struct sk_buff *skb = netdev_alloc_skb(dev, rsv->len + NET_IP_ALIGN);
skb               336 drivers/net/ethernet/microchip/encx24j600.c 	if (!skb) {
skb               341 drivers/net/ethernet/microchip/encx24j600.c 	skb_reserve(skb, NET_IP_ALIGN);
skb               342 drivers/net/ethernet/microchip/encx24j600.c 	encx24j600_raw_read(priv, RRXDATA, skb_put(skb, rsv->len), rsv->len);
skb               345 drivers/net/ethernet/microchip/encx24j600.c 		dump_packet("RX", skb->len, skb->data);
skb               347 drivers/net/ethernet/microchip/encx24j600.c 	skb->dev = dev;
skb               348 drivers/net/ethernet/microchip/encx24j600.c 	skb->protocol = eth_type_trans(skb, dev);
skb               349 drivers/net/ethernet/microchip/encx24j600.c 	skb->ip_summed = CHECKSUM_COMPLETE;
skb               355 drivers/net/ethernet/microchip/encx24j600.c 	netif_rx(skb);
skb               877 drivers/net/ethernet/microchip/encx24j600.c static netdev_tx_t encx24j600_tx(struct sk_buff *skb, struct net_device *dev)
skb               887 drivers/net/ethernet/microchip/encx24j600.c 	priv->tx_skb = skb;
skb              1246 drivers/net/ethernet/microchip/lan743x_main.c 	if (!buffer_info->skb)
skb              1250 drivers/net/ethernet/microchip/lan743x_main.c 		dev_kfree_skb(buffer_info->skb);
skb              1256 drivers/net/ethernet/microchip/lan743x_main.c 		dev_kfree_skb(buffer_info->skb);
skb              1261 drivers/net/ethernet/microchip/lan743x_main.c 					     buffer_info->skb, ignore_sync);
skb              1265 drivers/net/ethernet/microchip/lan743x_main.c 	buffer_info->skb = NULL;
skb              1304 drivers/net/ethernet/microchip/lan743x_main.c 				   struct sk_buff *skb)
skb              1309 drivers/net/ethernet/microchip/lan743x_main.c 	if (skb_is_gso(skb))
skb              1311 drivers/net/ethernet/microchip/lan743x_main.c 	nr_frags = skb_shinfo(skb)->nr_frags;
skb              1373 drivers/net/ethernet/microchip/lan743x_main.c 	buffer_info->skb = NULL;
skb              1424 drivers/net/ethernet/microchip/lan743x_main.c 	buffer_info->skb = NULL;
skb              1487 drivers/net/ethernet/microchip/lan743x_main.c 	buffer_info->skb = NULL;
skb              1502 drivers/net/ethernet/microchip/lan743x_main.c 				 struct sk_buff *skb,
skb              1523 drivers/net/ethernet/microchip/lan743x_main.c 	buffer_info->skb = skb;
skb              1547 drivers/net/ethernet/microchip/lan743x_main.c 					 struct sk_buff *skb)
skb              1560 drivers/net/ethernet/microchip/lan743x_main.c 	required_number_of_descriptors = lan743x_tx_get_desc_cnt(tx, skb);
skb              1566 drivers/net/ethernet/microchip/lan743x_main.c 			dev_kfree_skb(skb);
skb              1569 drivers/net/ethernet/microchip/lan743x_main.c 			tx->overflow_skb = skb;
skb              1576 drivers/net/ethernet/microchip/lan743x_main.c 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
skb              1579 drivers/net/ethernet/microchip/lan743x_main.c 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
skb              1584 drivers/net/ethernet/microchip/lan743x_main.c 	head_length = skb_headlen(skb);
skb              1585 drivers/net/ethernet/microchip/lan743x_main.c 	frame_length = skb_pagelen(skb);
skb              1586 drivers/net/ethernet/microchip/lan743x_main.c 	nr_frags = skb_shinfo(skb)->nr_frags;
skb              1588 drivers/net/ethernet/microchip/lan743x_main.c 	gso = skb_is_gso(skb);
skb              1590 drivers/net/ethernet/microchip/lan743x_main.c 		start_frame_length = max(skb_shinfo(skb)->gso_size,
skb              1595 drivers/net/ethernet/microchip/lan743x_main.c 				   skb->data, head_length,
skb              1598 drivers/net/ethernet/microchip/lan743x_main.c 				   skb->ip_summed == CHECKSUM_PARTIAL)) {
skb              1599 drivers/net/ethernet/microchip/lan743x_main.c 		dev_kfree_skb(skb);
skb              1610 drivers/net/ethernet/microchip/lan743x_main.c 		const skb_frag_t *frag = &(skb_shinfo(skb)->frags[j]);
skb              1618 drivers/net/ethernet/microchip/lan743x_main.c 			dev_kfree_skb(skb);
skb              1624 drivers/net/ethernet/microchip/lan743x_main.c 	lan743x_tx_frame_end(tx, skb, do_timestamp, ignore_sync);
skb              1907 drivers/net/ethernet/microchip/lan743x_main.c 					struct sk_buff *skb)
skb              1916 drivers/net/ethernet/microchip/lan743x_main.c 	buffer_info->skb = skb;
skb              1917 drivers/net/ethernet/microchip/lan743x_main.c 	if (!(buffer_info->skb))
skb              1920 drivers/net/ethernet/microchip/lan743x_main.c 					      buffer_info->skb->data,
skb              1935 drivers/net/ethernet/microchip/lan743x_main.c 	skb_reserve(buffer_info->skb, RX_HEAD_PADDING);
skb              1974 drivers/net/ethernet/microchip/lan743x_main.c 	if (buffer_info->skb) {
skb              1975 drivers/net/ethernet/microchip/lan743x_main.c 		dev_kfree_skb(buffer_info->skb);
skb              1976 drivers/net/ethernet/microchip/lan743x_main.c 		buffer_info->skb = NULL;
skb              2056 drivers/net/ethernet/microchip/lan743x_main.c 		struct sk_buff *skb = NULL;
skb              2077 drivers/net/ethernet/microchip/lan743x_main.c 			skb = buffer_info->skb;
skb              2089 drivers/net/ethernet/microchip/lan743x_main.c 			buffer_info->skb = NULL;
skb              2092 drivers/net/ethernet/microchip/lan743x_main.c 			skb_put(skb, packet_length - 4);
skb              2093 drivers/net/ethernet/microchip/lan743x_main.c 			skb->protocol = eth_type_trans(skb,
skb              2136 drivers/net/ethernet/microchip/lan743x_main.c 		if (!skb) {
skb              2143 drivers/net/ethernet/microchip/lan743x_main.c 		hwtstamps = skb_hwtstamps(skb);
skb              2149 drivers/net/ethernet/microchip/lan743x_main.c 		napi_gro_receive(&rx->napi, skb);
skb              2539 drivers/net/ethernet/microchip/lan743x_main.c static netdev_tx_t lan743x_netdev_xmit_frame(struct sk_buff *skb,
skb              2544 drivers/net/ethernet/microchip/lan743x_main.c 	return lan743x_tx_xmit_frame(&adapter->tx[0], skb);
skb               787 drivers/net/ethernet/microchip/lan743x_main.h 	struct sk_buff *skb;
skb               822 drivers/net/ethernet/microchip/lan743x_main.h 	struct sk_buff *skb;
skb                88 drivers/net/ethernet/microchip/lan743x_ptp.c 	struct sk_buff *skb;
skb               102 drivers/net/ethernet/microchip/lan743x_ptp.c 		skb = ptp->tx_ts_skb_queue[i];
skb               112 drivers/net/ethernet/microchip/lan743x_ptp.c 			skb_tstamp_tx(skb, &tstamps);
skb               114 drivers/net/ethernet/microchip/lan743x_ptp.c 		dev_kfree_skb(skb);
skb               811 drivers/net/ethernet/microchip/lan743x_ptp.c 					  struct sk_buff *skb, bool ignore_sync)
skb               817 drivers/net/ethernet/microchip/lan743x_ptp.c 		ptp->tx_ts_skb_queue[ptp->tx_ts_skb_queue_size] = skb;
skb               829 drivers/net/ethernet/microchip/lan743x_ptp.c 		dev_kfree_skb(skb);
skb               968 drivers/net/ethernet/microchip/lan743x_ptp.c 		struct sk_buff *skb = ptp->tx_ts_skb_queue[index];
skb               970 drivers/net/ethernet/microchip/lan743x_ptp.c 		dev_kfree_skb(skb);
skb              1099 drivers/net/ethernet/microchip/lan743x_ptp.c 				  struct sk_buff *skb, bool ignore_sync)
skb              1101 drivers/net/ethernet/microchip/lan743x_ptp.c 	lan743x_ptp_tx_ts_enqueue_skb(adapter, skb, ignore_sync);
skb                32 drivers/net/ethernet/microchip/lan743x_ptp.h 				  struct sk_buff *skb, bool ignore_sync);
skb               216 drivers/net/ethernet/moxa/moxart_ether.c 	struct sk_buff *skb;
skb               246 drivers/net/ethernet/moxa/moxart_ether.c 		skb = netdev_alloc_skb_ip_align(ndev, len);
skb               248 drivers/net/ethernet/moxa/moxart_ether.c 		if (unlikely(!skb)) {
skb               255 drivers/net/ethernet/moxa/moxart_ether.c 		memcpy(skb->data, priv->rx_buf[rx_head], len);
skb               256 drivers/net/ethernet/moxa/moxart_ether.c 		skb_put(skb, len);
skb               257 drivers/net/ethernet/moxa/moxart_ether.c 		skb->protocol = eth_type_trans(skb, ndev);
skb               258 drivers/net/ethernet/moxa/moxart_ether.c 		napi_gro_receive(&priv->napi, skb);
skb               334 drivers/net/ethernet/moxa/moxart_ether.c static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
skb               358 drivers/net/ethernet/moxa/moxart_ether.c 	len = skb->len > TX_BUF_SIZE ? TX_BUF_SIZE : skb->len;
skb               360 drivers/net/ethernet/moxa/moxart_ether.c 	priv->tx_mapping[tx_head] = dma_map_single(&ndev->dev, skb->data,
skb               368 drivers/net/ethernet/moxa/moxart_ether.c 	priv->tx_skb[tx_head] = skb;
skb               372 drivers/net/ethernet/moxa/moxart_ether.c 	moxart_desc_write((uintptr_t)skb->data,
skb               375 drivers/net/ethernet/moxa/moxart_ether.c 	if (skb->len < ETH_ZLEN) {
skb               376 drivers/net/ethernet/moxa/moxart_ether.c 		memset(&skb->data[skb->len],
skb               377 drivers/net/ethernet/moxa/moxart_ether.c 		       0, ETH_ZLEN - skb->len);
skb               557 drivers/net/ethernet/mscc/ocelot.c static int ocelot_port_xmit(struct sk_buff *skb, struct net_device *dev)
skb               559 drivers/net/ethernet/mscc/ocelot.c 	struct skb_shared_info *shinfo = skb_shinfo(skb);
skb               577 drivers/net/ethernet/mscc/ocelot.c 	info.vid = skb_vlan_tag_get(skb);
skb               592 drivers/net/ethernet/mscc/ocelot.c 	count = (skb->len + 3) / 4;
skb               593 drivers/net/ethernet/mscc/ocelot.c 	last = skb->len % 4;
skb               595 drivers/net/ethernet/mscc/ocelot.c 		ocelot_write_rix(ocelot, ((u32 *)skb->data)[i], QS_INJ_WR, grp);
skb               606 drivers/net/ethernet/mscc/ocelot.c 			 QS_INJ_CTRL_VLD_BYTES(skb->len < OCELOT_BUFFER_CELL_SZ ? 0 : last) |
skb               612 drivers/net/ethernet/mscc/ocelot.c 	skb_tx_timestamp(skb);
skb               615 drivers/net/ethernet/mscc/ocelot.c 	dev->stats.tx_bytes += skb->len;
skb               625 drivers/net/ethernet/mscc/ocelot.c 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
skb               627 drivers/net/ethernet/mscc/ocelot.c 		oskb->skb = skb;
skb               637 drivers/net/ethernet/mscc/ocelot.c 	dev_kfree_skb_any(skb);
skb               806 drivers/net/ethernet/mscc/ocelot.c 	struct sk_buff *skb;
skb               814 drivers/net/ethernet/mscc/ocelot.c 	u32 portid = NETLINK_CB(dump->cb->skb).portid;
skb               822 drivers/net/ethernet/mscc/ocelot.c 	nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH,
skb               836 drivers/net/ethernet/mscc/ocelot.c 	if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, entry->mac))
skb               839 drivers/net/ethernet/mscc/ocelot.c 	if (entry->vid && nla_put_u16(dump->skb, NDA_VLAN, entry->vid))
skb               842 drivers/net/ethernet/mscc/ocelot.c 	nlmsg_end(dump->skb, nlh);
skb               849 drivers/net/ethernet/mscc/ocelot.c 	nlmsg_cancel(dump->skb, nlh);
skb               901 drivers/net/ethernet/mscc/ocelot.c static int ocelot_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
skb               909 drivers/net/ethernet/mscc/ocelot.c 		.skb = skb,
skb              2230 drivers/net/ethernet/mscc/ocelot.c 			dev_kfree_skb_any(entry->skb);
skb               510 drivers/net/ethernet/mscc/ocelot.h 	struct sk_buff *skb;
skb               104 drivers/net/ethernet/mscc/ocelot_board.c 		struct sk_buff *skb;
skb               127 drivers/net/ethernet/mscc/ocelot_board.c 		skb = netdev_alloc_skb(dev, info.len);
skb               129 drivers/net/ethernet/mscc/ocelot_board.c 		if (unlikely(!skb)) {
skb               135 drivers/net/ethernet/mscc/ocelot_board.c 		buf = (u32 *)skb_put(skb, buf_len);
skb               150 drivers/net/ethernet/mscc/ocelot_board.c 			buf = (u32 *)skb_put(skb, ETH_FCS_LEN);
skb               170 drivers/net/ethernet/mscc/ocelot_board.c 			shhwtstamps = skb_hwtstamps(skb);
skb               179 drivers/net/ethernet/mscc/ocelot_board.c 			skb->offload_fwd_mark = 1;
skb               181 drivers/net/ethernet/mscc/ocelot_board.c 		skb->protocol = eth_type_trans(skb, dev);
skb               182 drivers/net/ethernet/mscc/ocelot_board.c 		netif_rx(skb);
skb               202 drivers/net/ethernet/mscc/ocelot_board.c 		struct sk_buff *skb = NULL;
skb               228 drivers/net/ethernet/mscc/ocelot_board.c 			skb = entry->skb;
skb               237 drivers/net/ethernet/mscc/ocelot_board.c 		if (unlikely(!skb))
skb               246 drivers/net/ethernet/mscc/ocelot_board.c 		skb_tstamp_tx(skb, &shhwtstamps);
skb               248 drivers/net/ethernet/mscc/ocelot_board.c 		dev_kfree_skb_any(skb);
skb               112 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 	struct sk_buff *skb;
skb               357 drivers/net/ethernet/myricom/myri10ge/myri10ge.c static netdev_tx_t myri10ge_sw_tso(struct sk_buff *skb,
skb              1177 drivers/net/ethernet/myricom/myri10ge/myri10ge.c static inline void myri10ge_vlan_ip_csum(struct sk_buff *skb, __wsum hw_csum)
skb              1179 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 	struct vlan_hdr *vh = (struct vlan_hdr *)(skb->data);
skb              1181 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 	if ((skb->protocol == htons(ETH_P_8021Q)) &&
skb              1184 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 		skb->csum = hw_csum;
skb              1185 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 		skb->ip_summed = CHECKSUM_COMPLETE;
skb              1285 drivers/net/ethernet/myricom/myri10ge/myri10ge.c myri10ge_vlan_rx(struct net_device *dev, void *addr, struct sk_buff *skb)
skb              1299 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 		if (skb->ip_summed == CHECKSUM_COMPLETE) {
skb              1301 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 			skb->csum = csum_sub(skb->csum, vsum);
skb              1304 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(veh->h_vlan_TCI));
skb              1306 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 		skb->len -= VLAN_HLEN;
skb              1307 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 		skb->data_len -= VLAN_HLEN;
skb              1308 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 		frag = skb_shinfo(skb)->frags;
skb              1320 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 	struct sk_buff *skb;
skb              1341 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 	skb = napi_get_frags(&ss->napi);
skb              1342 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 	if (unlikely(skb == NULL)) {
skb              1353 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 	rx_frags = skb_shinfo(skb)->frags;
skb              1357 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 		skb_fill_page_desc(skb, i, rx->info[idx].page,
skb              1371 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 	skb->len = len;
skb              1372 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 	skb->data_len = len;
skb              1373 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 	skb->truesize += len;
skb              1375 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 		skb->ip_summed = CHECKSUM_COMPLETE;
skb              1376 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 		skb->csum = csum;
skb              1378 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 	myri10ge_vlan_rx(mgp->dev, va, skb);
skb              1379 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 	skb_record_rx_queue(skb, ss - &mgp->ss[0]);
skb              1392 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 	struct sk_buff *skb;
skb              1397 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 		skb = tx->info[idx].skb;
skb              1400 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 		tx->info[idx].skb = NULL;
skb              1408 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 		if (skb) {
skb              1409 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 			ss->stats.tx_bytes += skb->len;
skb              1411 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 			dev_consume_skb_irq(skb);
skb              2082 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 	struct sk_buff *skb;
skb              2113 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 		skb = tx->info[idx].skb;
skb              2116 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 		tx->info[idx].skb = NULL;
skb              2120 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 		if (skb) {
skb              2122 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 			dev_kfree_skb_any(skb);
skb              2597 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 			if (tx->info[idx].skb != NULL)
skb              2608 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 			tx->info[idx].skb = NULL;
skb              2624 drivers/net/ethernet/myricom/myri10ge/myri10ge.c static netdev_tx_t myri10ge_xmit(struct sk_buff *skb,
skb              2642 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 	queue = skb_get_queue_mapping(skb);
skb              2654 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 	if (skb_is_gso(skb)) {
skb              2655 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 		mss = skb_shinfo(skb)->gso_size;
skb              2671 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 	if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
skb              2672 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 		cksum_offset = skb_checksum_start_offset(skb);
skb              2673 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 		pseudo_hdr_offset = cksum_offset + skb->csum_offset;
skb              2678 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 			if (skb_checksum_help(skb))
skb              2698 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 		cum_len = -(skb_transport_offset(skb) + tcp_hdrlen(skb));
skb              2703 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 		if (skb_is_gso_v6(skb)) {
skb              2704 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 			cksum_offset = tcp_hdrlen(skb);
skb              2707 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 				return myri10ge_sw_tso(skb, dev);
skb              2715 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 	if (skb->len <= MXGEFW_SEND_SMALL_SIZE) {
skb              2719 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 		if (eth_skb_pad(skb)) {
skb              2728 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 	len = skb_headlen(skb);
skb              2729 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 	bus = pci_map_single(mgp->pdev, skb->data, len, PCI_DMA_TODEVICE);
skb              2734 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 	tx->info[idx].skb = skb;
skb              2738 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 	frag_cnt = skb_shinfo(skb)->nr_frags;
skb              2822 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 			if (cksum_offset != 0 && !(mss && skb_is_gso_v6(skb))) {
skb              2833 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 		frag = &skb_shinfo(skb)->frags[frag_idx];
skb              2874 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 	if (skb_is_gso(skb)) {
skb              2879 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 	if (skb_linearize(skb))
skb              2886 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 	dev_kfree_skb_any(skb);
skb              2892 drivers/net/ethernet/myricom/myri10ge/myri10ge.c static netdev_tx_t myri10ge_sw_tso(struct sk_buff *skb,
skb              2900 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 	segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO6);
skb              2920 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 	dev_kfree_skb_any(skb);
skb              2924 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 	ss = &mgp->ss[skb_get_queue_mapping(skb)];
skb              2925 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 	dev_kfree_skb_any(skb);
skb               624 drivers/net/ethernet/natsemi/natsemi.c static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
skb              1934 drivers/net/ethernet/natsemi/natsemi.c 		struct sk_buff *skb;
skb              1938 drivers/net/ethernet/natsemi/natsemi.c 			skb = netdev_alloc_skb(dev, buflen);
skb              1939 drivers/net/ethernet/natsemi/natsemi.c 			np->rx_skbuff[entry] = skb;
skb              1940 drivers/net/ethernet/natsemi/natsemi.c 			if (skb == NULL)
skb              1943 drivers/net/ethernet/natsemi/natsemi.c 				skb->data, buflen, PCI_DMA_FROMDEVICE);
skb              1946 drivers/net/ethernet/natsemi/natsemi.c 				dev_kfree_skb_any(skb);
skb              2090 drivers/net/ethernet/natsemi/natsemi.c static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
skb              2103 drivers/net/ethernet/natsemi/natsemi.c 	np->tx_skbuff[entry] = skb;
skb              2105 drivers/net/ethernet/natsemi/natsemi.c 				skb->data,skb->len, PCI_DMA_TODEVICE);
skb              2108 drivers/net/ethernet/natsemi/natsemi.c 		dev_kfree_skb_irq(skb);
skb              2118 drivers/net/ethernet/natsemi/natsemi.c 		np->tx_ring[entry].cmd_status = cpu_to_le32(DescOwn | skb->len);
skb              2131 drivers/net/ethernet/natsemi/natsemi.c 		dev_kfree_skb_irq(skb);
skb              2354 drivers/net/ethernet/natsemi/natsemi.c 			struct sk_buff *skb;
skb              2359 drivers/net/ethernet/natsemi/natsemi.c 			    (skb = netdev_alloc_skb(dev, pkt_len + RX_OFFSET)) != NULL) {
skb              2361 drivers/net/ethernet/natsemi/natsemi.c 				skb_reserve(skb, RX_OFFSET);
skb              2366 drivers/net/ethernet/natsemi/natsemi.c 				skb_copy_to_linear_data(skb,
skb              2368 drivers/net/ethernet/natsemi/natsemi.c 				skb_put(skb, pkt_len);
skb              2377 drivers/net/ethernet/natsemi/natsemi.c 				skb_put(skb = np->rx_skbuff[entry], pkt_len);
skb              2380 drivers/net/ethernet/natsemi/natsemi.c 			skb->protocol = eth_type_trans(skb, dev);
skb              2381 drivers/net/ethernet/natsemi/natsemi.c 			netif_receive_skb(skb);
skb               500 drivers/net/ethernet/natsemi/ns83820.c static inline int ns83820_add_rx_skb(struct ns83820 *dev, struct sk_buff *skb)
skb               511 drivers/net/ethernet/natsemi/ns83820.c 		kfree_skb(skb);
skb               525 drivers/net/ethernet/natsemi/ns83820.c 	dev->rx_info.skbs[next_empty] = skb;
skb               529 drivers/net/ethernet/natsemi/ns83820.c 	buf = pci_map_single(dev->pci_dev, skb->data,
skb               552 drivers/net/ethernet/natsemi/ns83820.c 		struct sk_buff *skb;
skb               556 drivers/net/ethernet/natsemi/ns83820.c 		skb = __netdev_alloc_skb(ndev, REAL_RX_BUF_SIZE+16, gfp);
skb               557 drivers/net/ethernet/natsemi/ns83820.c 		if (unlikely(!skb))
skb               560 drivers/net/ethernet/natsemi/ns83820.c 		skb_reserve(skb, skb->data - PTR_ALIGN(skb->data, 16));
skb               563 drivers/net/ethernet/natsemi/ns83820.c 		res = ns83820_add_rx_skb(dev, skb);
skb               788 drivers/net/ethernet/natsemi/ns83820.c 		struct sk_buff *skb = dev->rx_info.skbs[i];
skb               791 drivers/net/ethernet/natsemi/ns83820.c 		kfree_skb(skb);
skb               846 drivers/net/ethernet/natsemi/ns83820.c 		struct sk_buff *skb;
skb               854 drivers/net/ethernet/natsemi/ns83820.c 		skb = info->skbs[next_rx];
skb               881 drivers/net/ethernet/natsemi/ns83820.c 			skb_put(skb, len);
skb               882 drivers/net/ethernet/natsemi/ns83820.c 			if (unlikely(!skb))
skb               889 drivers/net/ethernet/natsemi/ns83820.c 				skb->ip_summed = CHECKSUM_UNNECESSARY;
skb               891 drivers/net/ethernet/natsemi/ns83820.c 				skb_checksum_none_assert(skb);
skb               893 drivers/net/ethernet/natsemi/ns83820.c 			skb->protocol = eth_type_trans(skb, ndev);
skb               899 drivers/net/ethernet/natsemi/ns83820.c 				__vlan_hwaccel_put_tag(skb, htons(ETH_P_IPV6), tag);
skb               902 drivers/net/ethernet/natsemi/ns83820.c 			rx_rc = netif_rx(skb);
skb               908 drivers/net/ethernet/natsemi/ns83820.c 			dev_kfree_skb_irq(skb);
skb               968 drivers/net/ethernet/natsemi/ns83820.c 		struct sk_buff *skb;
skb               981 drivers/net/ethernet/natsemi/ns83820.c 		skb = dev->tx_skbs[tx_done_idx];
skb               983 drivers/net/ethernet/natsemi/ns83820.c 		dprintk("done(%p)\n", skb);
skb               987 drivers/net/ethernet/natsemi/ns83820.c 		if (skb) {
skb               992 drivers/net/ethernet/natsemi/ns83820.c 			dev_consume_skb_irq(skb);
skb              1022 drivers/net/ethernet/natsemi/ns83820.c 		struct sk_buff *skb = dev->tx_skbs[i];
skb              1024 drivers/net/ethernet/natsemi/ns83820.c 		if (skb) {
skb              1030 drivers/net/ethernet/natsemi/ns83820.c 			dev_kfree_skb_irq(skb);
skb              1044 drivers/net/ethernet/natsemi/ns83820.c static netdev_tx_t ns83820_hard_start_xmit(struct sk_buff *skb,
skb              1060 drivers/net/ethernet/natsemi/ns83820.c 	nr_frags =  skb_shinfo(skb)->nr_frags;
skb              1098 drivers/net/ethernet/natsemi/ns83820.c 	frag = skb_shinfo(skb)->frags;
skb              1102 drivers/net/ethernet/natsemi/ns83820.c 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb              1104 drivers/net/ethernet/natsemi/ns83820.c 		if (IPPROTO_TCP == ip_hdr(skb)->protocol)
skb              1106 drivers/net/ethernet/natsemi/ns83820.c 		else if (IPPROTO_UDP == ip_hdr(skb)->protocol)
skb              1111 drivers/net/ethernet/natsemi/ns83820.c 	if (skb_vlan_tag_present(skb)) {
skb              1116 drivers/net/ethernet/natsemi/ns83820.c 		short tag = skb_vlan_tag_get(skb);
skb              1121 drivers/net/ethernet/natsemi/ns83820.c 	len = skb->len;
skb              1123 drivers/net/ethernet/natsemi/ns83820.c 		len -= skb->data_len;
skb              1124 drivers/net/ethernet/natsemi/ns83820.c 	buf = pci_map_single(dev->pci_dev, skb->data, len, PCI_DMA_TODEVICE);
skb              1159 drivers/net/ethernet/natsemi/ns83820.c 	dev->tx_skbs[last_idx] = skb;
skb                70 drivers/net/ethernet/natsemi/sonic.c 		struct sk_buff *skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2);
skb                71 drivers/net/ethernet/natsemi/sonic.c 		if (skb == NULL) {
skb                83 drivers/net/ethernet/natsemi/sonic.c 			skb_reserve(skb, 2);
skb                84 drivers/net/ethernet/natsemi/sonic.c 		lp->rx_skb[i] = skb;
skb               237 drivers/net/ethernet/natsemi/sonic.c static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
skb               245 drivers/net/ethernet/natsemi/sonic.c 	netif_dbg(lp, tx_queued, dev, "%s: skb=%p\n", __func__, skb);
skb               247 drivers/net/ethernet/natsemi/sonic.c 	length = skb->len;
skb               249 drivers/net/ethernet/natsemi/sonic.c 		if (skb_padto(skb, ETH_ZLEN))
skb               258 drivers/net/ethernet/natsemi/sonic.c 	laddr = dma_map_single(lp->device, skb->data, length, DMA_TO_DEVICE);
skb               261 drivers/net/ethernet/natsemi/sonic.c 		dev_kfree_skb_any(skb);
skb               281 drivers/net/ethernet/natsemi/sonic.c 	lp->tx_skb[entry] = skb;
skb               336 drivers/net/ethernet/natsemi/sonic.h static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev);
skb              2313 drivers/net/ethernet/neterion/s2io.c 	struct sk_buff *skb;
skb              2324 drivers/net/ethernet/neterion/s2io.c 	skb = (struct sk_buff *)((unsigned long)txds->Host_Control);
skb              2325 drivers/net/ethernet/neterion/s2io.c 	if (!skb) {
skb              2330 drivers/net/ethernet/neterion/s2io.c 			 skb_headlen(skb), PCI_DMA_TODEVICE);
skb              2331 drivers/net/ethernet/neterion/s2io.c 	frg_cnt = skb_shinfo(skb)->nr_frags;
skb              2335 drivers/net/ethernet/neterion/s2io.c 			const skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
skb              2344 drivers/net/ethernet/neterion/s2io.c 	return skb;
skb              2358 drivers/net/ethernet/neterion/s2io.c 	struct sk_buff *skb;
skb              2375 drivers/net/ethernet/neterion/s2io.c 			skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
skb              2376 drivers/net/ethernet/neterion/s2io.c 			if (skb) {
skb              2377 drivers/net/ethernet/neterion/s2io.c 				swstats->mem_freed += skb->truesize;
skb              2378 drivers/net/ethernet/neterion/s2io.c 				dev_kfree_skb(skb);
skb              2444 drivers/net/ethernet/neterion/s2io.c 	struct sk_buff *skb;
skb              2504 drivers/net/ethernet/neterion/s2io.c 		skb = netdev_alloc_skb(nic->dev, size);
skb              2505 drivers/net/ethernet/neterion/s2io.c 		if (!skb) {
skb              2516 drivers/net/ethernet/neterion/s2io.c 		swstats->mem_allocated += skb->truesize;
skb              2522 drivers/net/ethernet/neterion/s2io.c 			skb_reserve(skb, NET_IP_ALIGN);
skb              2524 drivers/net/ethernet/neterion/s2io.c 				pci_map_single(ring->pdev, skb->data,
skb              2533 drivers/net/ethernet/neterion/s2io.c 			rxdp->Host_Control = (unsigned long)skb;
skb              2551 drivers/net/ethernet/neterion/s2io.c 			skb_reserve(skb, BUF0_LEN);
skb              2552 drivers/net/ethernet/neterion/s2io.c 			tmp = (u64)(unsigned long)skb->data;
skb              2555 drivers/net/ethernet/neterion/s2io.c 			skb->data = (void *) (unsigned long)tmp;
skb              2556 drivers/net/ethernet/neterion/s2io.c 			skb_reset_tail_pointer(skb);
skb              2581 drivers/net/ethernet/neterion/s2io.c 								    skb->data,
skb              2600 drivers/net/ethernet/neterion/s2io.c 								 skb->data,
skb              2611 drivers/net/ethernet/neterion/s2io.c 			rxdp->Host_Control = (unsigned long) (skb);
skb              2646 drivers/net/ethernet/neterion/s2io.c 	swstats->mem_freed += skb->truesize;
skb              2647 drivers/net/ethernet/neterion/s2io.c 	dev_kfree_skb_irq(skb);
skb              2655 drivers/net/ethernet/neterion/s2io.c 	struct sk_buff *skb;
skb              2666 drivers/net/ethernet/neterion/s2io.c 		skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control);
skb              2667 drivers/net/ethernet/neterion/s2io.c 		if (!skb)
skb              2694 drivers/net/ethernet/neterion/s2io.c 		swstats->mem_freed += skb->truesize;
skb              2695 drivers/net/ethernet/neterion/s2io.c 		dev_kfree_skb(skb);
skb              2888 drivers/net/ethernet/neterion/s2io.c 	struct sk_buff *skb;
skb              2914 drivers/net/ethernet/neterion/s2io.c 		skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control);
skb              2915 drivers/net/ethernet/neterion/s2io.c 		if (skb == NULL) {
skb              2940 drivers/net/ethernet/neterion/s2io.c 		prefetch(skb->data);
skb              2996 drivers/net/ethernet/neterion/s2io.c 	struct sk_buff *skb = NULL;
skb              3046 drivers/net/ethernet/neterion/s2io.c 		skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
skb              3047 drivers/net/ethernet/neterion/s2io.c 		if (skb == NULL) {
skb              3056 drivers/net/ethernet/neterion/s2io.c 		swstats->mem_freed += skb->truesize;
skb              3057 drivers/net/ethernet/neterion/s2io.c 		dev_consume_skb_irq(skb);
skb              3998 drivers/net/ethernet/neterion/s2io.c static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
skb              4017 drivers/net/ethernet/neterion/s2io.c 	if (unlikely(skb->len <= 0)) {
skb              4019 drivers/net/ethernet/neterion/s2io.c 		dev_kfree_skb_any(skb);
skb              4026 drivers/net/ethernet/neterion/s2io.c 		dev_kfree_skb_any(skb);
skb              4031 drivers/net/ethernet/neterion/s2io.c 	if (skb_vlan_tag_present(skb))
skb              4032 drivers/net/ethernet/neterion/s2io.c 		vlan_tag = skb_vlan_tag_get(skb);
skb              4034 drivers/net/ethernet/neterion/s2io.c 		if (skb->protocol == htons(ETH_P_IP)) {
skb              4037 drivers/net/ethernet/neterion/s2io.c 			ip = ip_hdr(skb);
skb              4058 drivers/net/ethernet/neterion/s2io.c 					if (skb->len > 1024)
skb              4066 drivers/net/ethernet/neterion/s2io.c 			[skb->priority & (MAX_TX_FIFOS - 1)];
skb              4093 drivers/net/ethernet/neterion/s2io.c 		dev_kfree_skb_any(skb);
skb              4098 drivers/net/ethernet/neterion/s2io.c 	offload_type = s2io_offload_type(skb);
skb              4101 drivers/net/ethernet/neterion/s2io.c 		txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
skb              4103 drivers/net/ethernet/neterion/s2io.c 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb              4119 drivers/net/ethernet/neterion/s2io.c 	frg_len = skb_headlen(skb);
skb              4120 drivers/net/ethernet/neterion/s2io.c 	txdp->Buffer_Pointer = pci_map_single(sp->pdev, skb->data,
skb              4125 drivers/net/ethernet/neterion/s2io.c 	txdp->Host_Control = (unsigned long)skb;
skb              4128 drivers/net/ethernet/neterion/s2io.c 	frg_cnt = skb_shinfo(skb)->nr_frags;
skb              4131 drivers/net/ethernet/neterion/s2io.c 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
skb              4168 drivers/net/ethernet/neterion/s2io.c 	swstats->mem_allocated += skb->truesize;
skb              4179 drivers/net/ethernet/neterion/s2io.c 	swstats->mem_freed += skb->truesize;
skb              4180 drivers/net/ethernet/neterion/s2io.c 	dev_kfree_skb_any(skb);
skb              6743 drivers/net/ethernet/neterion/s2io.c 				  struct sk_buff **skb, u64 *temp0, u64 *temp1,
skb              6752 drivers/net/ethernet/neterion/s2io.c 		if (*skb) {
skb              6761 drivers/net/ethernet/neterion/s2io.c 			*skb = netdev_alloc_skb(dev, size);
skb              6762 drivers/net/ethernet/neterion/s2io.c 			if (!(*skb)) {
skb              6769 drivers/net/ethernet/neterion/s2io.c 			stats->mem_allocated += (*skb)->truesize;
skb              6775 drivers/net/ethernet/neterion/s2io.c 				pci_map_single(sp->pdev, (*skb)->data,
skb              6780 drivers/net/ethernet/neterion/s2io.c 			rxdp->Host_Control = (unsigned long) (*skb);
skb              6785 drivers/net/ethernet/neterion/s2io.c 		if (*skb) {
skb              6790 drivers/net/ethernet/neterion/s2io.c 			*skb = netdev_alloc_skb(dev, size);
skb              6791 drivers/net/ethernet/neterion/s2io.c 			if (!(*skb)) {
skb              6799 drivers/net/ethernet/neterion/s2io.c 			stats->mem_allocated += (*skb)->truesize;
skb              6801 drivers/net/ethernet/neterion/s2io.c 				pci_map_single(sp->pdev, (*skb)->data,
skb              6817 drivers/net/ethernet/neterion/s2io.c 			rxdp->Host_Control = (unsigned long) (*skb);
skb              6840 drivers/net/ethernet/neterion/s2io.c 	stats->mem_freed += (*skb)->truesize;
skb              6841 drivers/net/ethernet/neterion/s2io.c 	dev_kfree_skb(*skb);
skb              6865 drivers/net/ethernet/neterion/s2io.c 	struct sk_buff *skb = NULL;
skb              6888 drivers/net/ethernet/neterion/s2io.c 				if (set_rxd_buffer_pointer(sp, rxdp, ba, &skb,
skb              7274 drivers/net/ethernet/neterion/s2io.c 	struct sk_buff *skb = (struct sk_buff *)
skb              7283 drivers/net/ethernet/neterion/s2io.c 	skb->dev = dev;
skb              7340 drivers/net/ethernet/neterion/s2io.c 				+= skb->truesize;
skb              7341 drivers/net/ethernet/neterion/s2io.c 			dev_kfree_skb(skb);
skb              7352 drivers/net/ethernet/neterion/s2io.c 		skb_put(skb, len);
skb              7358 drivers/net/ethernet/neterion/s2io.c 		unsigned char *buff = skb_push(skb, buf0_len);
skb              7362 drivers/net/ethernet/neterion/s2io.c 		skb_put(skb, buf2_len);
skb              7377 drivers/net/ethernet/neterion/s2io.c 			skb->ip_summed = CHECKSUM_UNNECESSARY;
skb              7384 drivers/net/ethernet/neterion/s2io.c 							    skb->data, &tcp,
skb              7389 drivers/net/ethernet/neterion/s2io.c 					lro->parent = skb;
skb              7392 drivers/net/ethernet/neterion/s2io.c 					lro_append_pkt(sp, lro, skb, tcp_len);
skb              7395 drivers/net/ethernet/neterion/s2io.c 					lro_append_pkt(sp, lro, skb, tcp_len);
skb              7427 drivers/net/ethernet/neterion/s2io.c 			skb_checksum_none_assert(skb);
skb              7430 drivers/net/ethernet/neterion/s2io.c 		skb_checksum_none_assert(skb);
skb              7432 drivers/net/ethernet/neterion/s2io.c 	swstats->mem_freed += skb->truesize;
skb              7434 drivers/net/ethernet/neterion/s2io.c 	skb_record_rx_queue(skb, ring_no);
skb              7435 drivers/net/ethernet/neterion/s2io.c 	queue_rx_frame(skb, RXD_GET_VLAN_TAG(rxdp->Control_2));
skb              8465 drivers/net/ethernet/neterion/s2io.c static void queue_rx_frame(struct sk_buff *skb, u16 vlan_tag)
skb              8467 drivers/net/ethernet/neterion/s2io.c 	struct net_device *dev = skb->dev;
skb              8470 drivers/net/ethernet/neterion/s2io.c 	skb->protocol = eth_type_trans(skb, dev);
skb              8472 drivers/net/ethernet/neterion/s2io.c 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
skb              8474 drivers/net/ethernet/neterion/s2io.c 		netif_receive_skb(skb);
skb              8476 drivers/net/ethernet/neterion/s2io.c 		netif_rx(skb);
skb              8480 drivers/net/ethernet/neterion/s2io.c 			   struct sk_buff *skb, u32 tcp_len)
skb              8487 drivers/net/ethernet/neterion/s2io.c 	skb_pull(skb, (skb->len - tcp_len));
skb              8489 drivers/net/ethernet/neterion/s2io.c 		lro->last_frag->next = skb;
skb              8491 drivers/net/ethernet/neterion/s2io.c 		skb_shinfo(first)->frag_list = skb;
skb              8492 drivers/net/ethernet/neterion/s2io.c 	first->truesize += skb->truesize;
skb              8493 drivers/net/ethernet/neterion/s2io.c 	lro->last_frag = skb;
skb              1106 drivers/net/ethernet/neterion/s2io.h static void queue_rx_frame(struct sk_buff *skb, u16 vlan_tag);
skb              1109 drivers/net/ethernet/neterion/s2io.h 			   struct sk_buff *skb, u32 tcp_len);
skb              1117 drivers/net/ethernet/neterion/s2io.h #define s2io_tcp_mss(skb) skb_shinfo(skb)->gso_size
skb              1118 drivers/net/ethernet/neterion/s2io.h #define s2io_udp_mss(skb) skb_shinfo(skb)->gso_size
skb              1119 drivers/net/ethernet/neterion/s2io.h #define s2io_offload_type(skb) skb_shinfo(skb)->gso_type
skb               197 drivers/net/ethernet/neterion/vxge/vxge-main.c 	struct sk_buff       *skb;
skb               207 drivers/net/ethernet/neterion/vxge/vxge-main.c 	skb = netdev_alloc_skb(dev, skb_size +
skb               209 drivers/net/ethernet/neterion/vxge/vxge-main.c 	if (skb == NULL) {
skb               218 drivers/net/ethernet/neterion/vxge/vxge-main.c 		__func__, __LINE__, skb);
skb               220 drivers/net/ethernet/neterion/vxge/vxge-main.c 	skb_reserve(skb, VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
skb               222 drivers/net/ethernet/neterion/vxge/vxge-main.c 	rx_priv->skb = skb;
skb               228 drivers/net/ethernet/neterion/vxge/vxge-main.c 	return skb;
skb               243 drivers/net/ethernet/neterion/vxge/vxge-main.c 	rx_priv->skb_data = rx_priv->skb->data;
skb               282 drivers/net/ethernet/neterion/vxge/vxge-main.c 		dev_kfree_skb(rx_priv->skb);
skb               293 drivers/net/ethernet/neterion/vxge/vxge-main.c vxge_rx_complete(struct vxge_ring *ring, struct sk_buff *skb, u16 vlan,
skb               299 drivers/net/ethernet/neterion/vxge/vxge-main.c 	skb_record_rx_queue(skb, ring->driver_id);
skb               300 drivers/net/ethernet/neterion/vxge/vxge-main.c 	skb->protocol = eth_type_trans(skb, ring->ndev);
skb               306 drivers/net/ethernet/neterion/vxge/vxge-main.c 	if (skb->pkt_type == PACKET_MULTICAST)
skb               312 drivers/net/ethernet/neterion/vxge/vxge-main.c 		ring->ndev->name, __func__, __LINE__, skb->protocol);
skb               316 drivers/net/ethernet/neterion/vxge/vxge-main.c 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ext_info->vlan);
skb               317 drivers/net/ethernet/neterion/vxge/vxge-main.c 	napi_gro_receive(ring->napi_p, skb);
skb               365 drivers/net/ethernet/neterion/vxge/vxge-main.c 	struct sk_buff *skb;
skb               377 drivers/net/ethernet/neterion/vxge/vxge-main.c 		skb = rx_priv->skb;
skb               384 drivers/net/ethernet/neterion/vxge/vxge-main.c 			ring->ndev->name, __func__, __LINE__, skb);
skb               398 drivers/net/ethernet/neterion/vxge/vxge-main.c 		vxge_assert(skb);
skb               400 drivers/net/ethernet/neterion/vxge/vxge-main.c 		prefetch((char *)skb + L1_CACHE_BYTES);
skb               426 drivers/net/ethernet/neterion/vxge/vxge-main.c 					skb_put(skb, pkt_length);
skb               435 drivers/net/ethernet/neterion/vxge/vxge-main.c 					dev_kfree_skb(rx_priv->skb);
skb               436 drivers/net/ethernet/neterion/vxge/vxge-main.c 					rx_priv->skb = skb;
skb               468 drivers/net/ethernet/neterion/vxge/vxge-main.c 					__LINE__, skb);
skb               469 drivers/net/ethernet/neterion/vxge/vxge-main.c 				memcpy(skb_up->data, skb->data, pkt_length);
skb               476 drivers/net/ethernet/neterion/vxge/vxge-main.c 				skb = skb_up;
skb               477 drivers/net/ethernet/neterion/vxge/vxge-main.c 				skb_put(skb, pkt_length);
skb               495 drivers/net/ethernet/neterion/vxge/vxge-main.c 			skb->ip_summed = CHECKSUM_UNNECESSARY;
skb               497 drivers/net/ethernet/neterion/vxge/vxge-main.c 			skb_checksum_none_assert(skb);
skb               502 drivers/net/ethernet/neterion/vxge/vxge-main.c 			u32 ns = *(u32 *)(skb->head + pkt_length);
skb               504 drivers/net/ethernet/neterion/vxge/vxge-main.c 			skb_hwts = skb_hwtstamps(skb);
skb               513 drivers/net/ethernet/neterion/vxge/vxge-main.c 			skb_set_hash(skb, ext_info.rth_value,
skb               516 drivers/net/ethernet/neterion/vxge/vxge-main.c 		vxge_rx_complete(ring, skb, ext_info.vlan,
skb               551 drivers/net/ethernet/neterion/vxge/vxge-main.c 	struct sk_buff *skb, **done_skb = *skb_ptr;
skb               564 drivers/net/ethernet/neterion/vxge/vxge-main.c 		skb = txd_priv->skb;
skb               565 drivers/net/ethernet/neterion/vxge/vxge-main.c 		frg_cnt = skb_shinfo(skb)->nr_frags;
skb               566 drivers/net/ethernet/neterion/vxge/vxge-main.c 		frag = &skb_shinfo(skb)->frags[0];
skb               573 drivers/net/ethernet/neterion/vxge/vxge-main.c 		vxge_assert(skb);
skb               577 drivers/net/ethernet/neterion/vxge/vxge-main.c 			skb, txd_priv, frg_cnt);
skb               589 drivers/net/ethernet/neterion/vxge/vxge-main.c 				skb_headlen(skb), PCI_DMA_TODEVICE);
skb               603 drivers/net/ethernet/neterion/vxge/vxge-main.c 		fifo->stats.tx_bytes += skb->len;
skb               606 drivers/net/ethernet/neterion/vxge/vxge-main.c 		*done_skb++ = skb;
skb               631 drivers/net/ethernet/neterion/vxge/vxge-main.c static u32 vxge_get_vpath_no(struct vxgedev *vdev, struct sk_buff *skb)
skb               634 drivers/net/ethernet/neterion/vxge/vxge-main.c 	if (skb->protocol == htons(ETH_P_IP)) {
skb               638 drivers/net/ethernet/neterion/vxge/vxge-main.c 		ip = ip_hdr(skb);
skb               803 drivers/net/ethernet/neterion/vxge/vxge-main.c vxge_xmit(struct sk_buff *skb, struct net_device *dev)
skb               823 drivers/net/ethernet/neterion/vxge/vxge-main.c 	if (unlikely(skb->len <= 0)) {
skb               826 drivers/net/ethernet/neterion/vxge/vxge-main.c 		dev_kfree_skb_any(skb);
skb               835 drivers/net/ethernet/neterion/vxge/vxge-main.c 		dev_kfree_skb_any(skb);
skb               840 drivers/net/ethernet/neterion/vxge/vxge-main.c 		vpath_no = vxge_learn_mac(vdev, skb->data + ETH_ALEN);
skb               845 drivers/net/ethernet/neterion/vxge/vxge-main.c 			dev_kfree_skb_any(skb);
skb               851 drivers/net/ethernet/neterion/vxge/vxge-main.c 		vpath_no = skb_get_queue_mapping(skb);
skb               853 drivers/net/ethernet/neterion/vxge/vxge-main.c 		vpath_no = vxge_get_vpath_no(vdev, skb);
skb               893 drivers/net/ethernet/neterion/vxge/vxge-main.c 	if (skb_vlan_tag_present(skb)) {
skb               894 drivers/net/ethernet/neterion/vxge/vxge-main.c 		u16 vlan_tag = skb_vlan_tag_get(skb);
skb               898 drivers/net/ethernet/neterion/vxge/vxge-main.c 	first_frg_len = skb_headlen(skb);
skb               900 drivers/net/ethernet/neterion/vxge/vxge-main.c 	dma_pointer = pci_map_single(fifo->pdev, skb->data, first_frg_len,
skb               910 drivers/net/ethernet/neterion/vxge/vxge-main.c 	txdl_priv->skb = skb;
skb               913 drivers/net/ethernet/neterion/vxge/vxge-main.c 	frg_cnt = skb_shinfo(skb)->nr_frags;
skb               917 drivers/net/ethernet/neterion/vxge/vxge-main.c 			__func__, __LINE__, skb, txdl_priv,
skb               923 drivers/net/ethernet/neterion/vxge/vxge-main.c 	frag = &skb_shinfo(skb)->frags[0];
skb               946 drivers/net/ethernet/neterion/vxge/vxge-main.c 	offload_type = vxge_offload_type(skb);
skb               949 drivers/net/ethernet/neterion/vxge/vxge-main.c 		int mss = vxge_tcp_mss(skb);
skb               955 drivers/net/ethernet/neterion/vxge/vxge-main.c 			vxge_assert(skb->len <=
skb               962 drivers/net/ethernet/neterion/vxge/vxge-main.c 	if (skb->ip_summed == CHECKSUM_PARTIAL)
skb               978 drivers/net/ethernet/neterion/vxge/vxge-main.c 	frag = &skb_shinfo(skb)->frags[0];
skb               981 drivers/net/ethernet/neterion/vxge/vxge-main.c 			skb_headlen(skb), PCI_DMA_TODEVICE);
skb               992 drivers/net/ethernet/neterion/vxge/vxge-main.c 	dev_kfree_skb_any(skb);
skb              1018 drivers/net/ethernet/neterion/vxge/vxge-main.c 	dev_kfree_skb(rx_priv->skb);
skb              1038 drivers/net/ethernet/neterion/vxge/vxge-main.c 	struct sk_buff *skb = txd_priv->skb;
skb              1046 drivers/net/ethernet/neterion/vxge/vxge-main.c 	vxge_assert(skb);
skb              1047 drivers/net/ethernet/neterion/vxge/vxge-main.c 	frg_cnt = skb_shinfo(skb)->nr_frags;
skb              1048 drivers/net/ethernet/neterion/vxge/vxge-main.c 	frag = &skb_shinfo(skb)->frags[0];
skb              1052 drivers/net/ethernet/neterion/vxge/vxge-main.c 		skb_headlen(skb), PCI_DMA_TODEVICE);
skb              1060 drivers/net/ethernet/neterion/vxge/vxge-main.c 	dev_kfree_skb(skb);
skb               404 drivers/net/ethernet/neterion/vxge/vxge-main.h 	struct sk_buff		*skb;
skb               411 drivers/net/ethernet/neterion/vxge/vxge-main.h 	struct sk_buff		*skb;
skb               513 drivers/net/ethernet/neterion/vxge/vxge-main.h #define vxge_tcp_mss(skb) (skb_shinfo(skb)->gso_size)
skb               514 drivers/net/ethernet/neterion/vxge/vxge-main.h #define vxge_udp_mss(skb) (skb_shinfo(skb)->gso_size)
skb               515 drivers/net/ethernet/neterion/vxge/vxge-main.h #define vxge_offload_type(skb) (skb_shinfo(skb)->gso_type)
skb                20 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c 	struct sk_buff *skb;
skb                22 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c 	skb = nfp_app_ctrl_msg_alloc(bpf->app, size, GFP_KERNEL);
skb                23 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c 	skb_put(skb, size);
skb                25 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c 	return skb;
skb                86 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c 	struct sk_buff *skb;
skb                90 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c 	skb = nfp_bpf_cmsg_alloc(bpf, sizeof(*req));
skb                91 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c 	if (!skb)
skb                94 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c 	req = (void *)skb->data;
skb               101 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c 	skb = nfp_ccm_communicate(&bpf->ccm, skb, NFP_CCM_TYPE_BPF_MAP_ALLOC,
skb               103 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c 	if (IS_ERR(skb))
skb               104 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c 		return PTR_ERR(skb);
skb               106 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c 	reply = (void *)skb->data;
skb               112 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c 	dev_consume_skb_any(skb);
skb               116 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c 	dev_kfree_skb_any(skb);
skb               124 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c 	struct sk_buff *skb;
skb               127 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c 	skb = nfp_bpf_cmsg_alloc(bpf, sizeof(*req));
skb               128 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c 	if (!skb) {
skb               133 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c 	req = (void *)skb->data;
skb               136 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c 	skb = nfp_ccm_communicate(&bpf->ccm, skb, NFP_CCM_TYPE_BPF_MAP_FREE,
skb               138 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c 	if (IS_ERR(skb)) {
skb               143 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c 	reply = (void *)skb->data;
skb               148 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c 	dev_consume_skb_any(skb);
skb               263 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c 			  struct sk_buff *skb, u32 cache_gen)
skb               283 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c 			swap(nfp_map->cache, skb);
skb               288 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c 	dev_consume_skb_any(skb);
skb               301 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c 	struct sk_buff *skb;
skb               315 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c 	skb = nfp_bpf_cmsg_map_req_alloc(bpf, 1);
skb               316 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c 	if (!skb) {
skb               321 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c 	req = (void *)skb->data;
skb               333 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c 	skb = nfp_ccm_communicate(&bpf->ccm, skb, op, 0);
skb               334 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c 	if (IS_ERR(skb)) {
skb               335 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c 		err = PTR_ERR(skb);
skb               339 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c 	if (skb->len < sizeof(*reply)) {
skb               341 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c 			  op, skb->len);
skb               346 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c 	reply = (void *)skb->data;
skb               358 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c 	if (skb->len != nfp_bpf_cmsg_map_reply_size(bpf, reply_entries)) {
skb               360 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c 			  op, skb->len, reply_entries);
skb               373 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c 	nfp_bpf_ctrl_op_cache_put(nfp_map, op, skb, cache_gen);
skb               377 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c 	dev_kfree_skb_any(skb);
skb               442 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb)
skb               446 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c 	if (unlikely(skb->len < sizeof(struct cmsg_reply_map_simple))) {
skb               447 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c 		cmsg_warn(bpf, "cmsg drop - too short %d!\n", skb->len);
skb               448 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c 		dev_kfree_skb_any(skb);
skb               452 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c 	if (nfp_ccm_get_type(skb) == NFP_CCM_TYPE_BPF_BPF_EVENT) {
skb               453 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c 		if (!nfp_bpf_event_output(bpf, skb->data, skb->len))
skb               454 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c 			dev_consume_skb_any(skb);
skb               456 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c 			dev_kfree_skb_any(skb);
skb               459 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c 	nfp_ccm_rx(&bpf->ccm, skb);
skb               609 drivers/net/ethernet/netronome/nfp/bpf/main.h void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb);
skb                50 drivers/net/ethernet/netronome/nfp/ccm.c 	struct sk_buff *skb;
skb                52 drivers/net/ethernet/netronome/nfp/ccm.c 	skb_queue_walk(&ccm->replies, skb) {
skb                53 drivers/net/ethernet/netronome/nfp/ccm.c 		msg_tag = nfp_ccm_get_tag(skb);
skb                56 drivers/net/ethernet/netronome/nfp/ccm.c 			__skb_unlink(skb, &ccm->replies);
skb                57 drivers/net/ethernet/netronome/nfp/ccm.c 			return skb;
skb                67 drivers/net/ethernet/netronome/nfp/ccm.c 	struct sk_buff *skb;
skb                70 drivers/net/ethernet/netronome/nfp/ccm.c 	skb = __nfp_ccm_reply(ccm, tag);
skb                73 drivers/net/ethernet/netronome/nfp/ccm.c 	return skb;
skb                79 drivers/net/ethernet/netronome/nfp/ccm.c 	struct sk_buff *skb;
skb                82 drivers/net/ethernet/netronome/nfp/ccm.c 	skb = __nfp_ccm_reply(ccm, tag);
skb                83 drivers/net/ethernet/netronome/nfp/ccm.c 	if (!skb)
skb                87 drivers/net/ethernet/netronome/nfp/ccm.c 	return skb;
skb                94 drivers/net/ethernet/netronome/nfp/ccm.c 	struct sk_buff *skb;
skb                99 drivers/net/ethernet/netronome/nfp/ccm.c 		skb = nfp_ccm_reply(ccm, app, tag);
skb               100 drivers/net/ethernet/netronome/nfp/ccm.c 		if (skb)
skb               101 drivers/net/ethernet/netronome/nfp/ccm.c 			return skb;
skb               105 drivers/net/ethernet/netronome/nfp/ccm.c 					       skb = nfp_ccm_reply(ccm, app,
skb               111 drivers/net/ethernet/netronome/nfp/ccm.c 	if (!skb)
skb               112 drivers/net/ethernet/netronome/nfp/ccm.c 		skb = nfp_ccm_reply_drop_tag(ccm, app, tag);
skb               119 drivers/net/ethernet/netronome/nfp/ccm.c 	if (!skb) {
skb               124 drivers/net/ethernet/netronome/nfp/ccm.c 	return skb;
skb               128 drivers/net/ethernet/netronome/nfp/ccm.c nfp_ccm_communicate(struct nfp_ccm *ccm, struct sk_buff *skb,
skb               139 drivers/net/ethernet/netronome/nfp/ccm.c 		dev_kfree_skb_any(skb);
skb               143 drivers/net/ethernet/netronome/nfp/ccm.c 	hdr = (void *)skb->data;
skb               148 drivers/net/ethernet/netronome/nfp/ccm.c 	__nfp_app_ctrl_tx(app, skb);
skb               152 drivers/net/ethernet/netronome/nfp/ccm.c 	skb = nfp_ccm_wait_reply(ccm, app, type, tag);
skb               153 drivers/net/ethernet/netronome/nfp/ccm.c 	if (IS_ERR(skb))
skb               154 drivers/net/ethernet/netronome/nfp/ccm.c 		return skb;
skb               156 drivers/net/ethernet/netronome/nfp/ccm.c 	reply_type = nfp_ccm_get_type(skb);
skb               163 drivers/net/ethernet/netronome/nfp/ccm.c 	if (reply_size && skb->len != reply_size) {
skb               165 drivers/net/ethernet/netronome/nfp/ccm.c 			 type, skb->len, reply_size);
skb               169 drivers/net/ethernet/netronome/nfp/ccm.c 	return skb;
skb               171 drivers/net/ethernet/netronome/nfp/ccm.c 	dev_kfree_skb_any(skb);
skb               175 drivers/net/ethernet/netronome/nfp/ccm.c void nfp_ccm_rx(struct nfp_ccm *ccm, struct sk_buff *skb)
skb               180 drivers/net/ethernet/netronome/nfp/ccm.c 	if (unlikely(skb->len < sizeof(struct nfp_ccm_hdr))) {
skb               181 drivers/net/ethernet/netronome/nfp/ccm.c 		ccm_warn(app, "cmsg drop - too short %d!\n", skb->len);
skb               187 drivers/net/ethernet/netronome/nfp/ccm.c 	tag = nfp_ccm_get_tag(skb);
skb               194 drivers/net/ethernet/netronome/nfp/ccm.c 	__skb_queue_tail(&ccm->replies, skb);
skb               203 drivers/net/ethernet/netronome/nfp/ccm.c 	dev_kfree_skb_any(skb);
skb                48 drivers/net/ethernet/netronome/nfp/ccm.h static inline u8 nfp_ccm_get_type(struct sk_buff *skb)
skb                52 drivers/net/ethernet/netronome/nfp/ccm.h 	hdr = (struct nfp_ccm_hdr *)skb->data;
skb                57 drivers/net/ethernet/netronome/nfp/ccm.h static inline __be16 __nfp_ccm_get_tag(struct sk_buff *skb)
skb                61 drivers/net/ethernet/netronome/nfp/ccm.h 	hdr = (struct nfp_ccm_hdr *)skb->data;
skb                66 drivers/net/ethernet/netronome/nfp/ccm.h static inline unsigned int nfp_ccm_get_tag(struct sk_buff *skb)
skb                68 drivers/net/ethernet/netronome/nfp/ccm.h 	return be16_to_cpu(__nfp_ccm_get_tag(skb));
skb               108 drivers/net/ethernet/netronome/nfp/ccm.h void nfp_ccm_rx(struct nfp_ccm *ccm, struct sk_buff *skb);
skb               110 drivers/net/ethernet/netronome/nfp/ccm.h nfp_ccm_communicate(struct nfp_ccm *ccm, struct sk_buff *skb,
skb               121 drivers/net/ethernet/netronome/nfp/ccm.h int __nfp_ccm_mbox_communicate(struct nfp_net *nn, struct sk_buff *skb,
skb               125 drivers/net/ethernet/netronome/nfp/ccm.h int nfp_ccm_mbox_communicate(struct nfp_net *nn, struct sk_buff *skb,
skb               129 drivers/net/ethernet/netronome/nfp/ccm.h int nfp_ccm_mbox_post(struct nfp_net *nn, struct sk_buff *skb,
skb                62 drivers/net/ethernet/netronome/nfp/ccm_mbox.c nfp_ccm_mbox_msg_init(struct sk_buff *skb, unsigned int exp_reply, int max_len)
skb                64 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 	struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb;
skb                73 drivers/net/ethernet/netronome/nfp/ccm_mbox.c static int nfp_ccm_mbox_maxlen(const struct sk_buff *skb)
skb                75 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 	struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb;
skb                80 drivers/net/ethernet/netronome/nfp/ccm_mbox.c static bool nfp_ccm_mbox_done(struct sk_buff *skb)
skb                82 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 	struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb;
skb                87 drivers/net/ethernet/netronome/nfp/ccm_mbox.c static bool nfp_ccm_mbox_in_progress(struct sk_buff *skb)
skb                89 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 	struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb;
skb                95 drivers/net/ethernet/netronome/nfp/ccm_mbox.c static void nfp_ccm_mbox_set_busy(struct sk_buff *skb)
skb                97 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 	struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb;
skb               102 drivers/net/ethernet/netronome/nfp/ccm_mbox.c static bool nfp_ccm_mbox_is_posted(struct sk_buff *skb)
skb               104 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 	struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb;
skb               109 drivers/net/ethernet/netronome/nfp/ccm_mbox.c static void nfp_ccm_mbox_mark_posted(struct sk_buff *skb)
skb               111 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 	struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb;
skb               116 drivers/net/ethernet/netronome/nfp/ccm_mbox.c static bool nfp_ccm_mbox_is_first(struct nfp_net *nn, struct sk_buff *skb)
skb               118 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 	return skb_queue_is_first(&nn->mbox_cmsg.queue, skb);
skb               121 drivers/net/ethernet/netronome/nfp/ccm_mbox.c static bool nfp_ccm_mbox_should_run(struct nfp_net *nn, struct sk_buff *skb)
skb               123 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 	struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb;
skb               131 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 	struct sk_buff *skb;
skb               133 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 	skb = skb_peek(&nn->mbox_cmsg.queue);
skb               134 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 	if (!skb)
skb               137 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 	cb = (void *)skb->cb;
skb               153 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 	struct sk_buff *skb;
skb               159 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 	skb = __skb_peek(&nn->mbox_cmsg.queue);
skb               162 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 				       skb->len);
skb               166 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 		data = (__be32 *)skb->data;
skb               167 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 		cnt = skb->len / 4;
skb               172 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 		if (skb->len & 3) {
skb               175 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 			memcpy(&tmp, &data[i], skb->len & 3);
skb               181 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 		len = round_up(skb->len, 4);
skb               182 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 		reserve = nfp_ccm_mbox_maxlen(skb) - len;
skb               190 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 		if (skb == last)
skb               192 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 		skb = skb_queue_next(&nn->mbox_cmsg.queue, skb);
skb               201 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 	struct sk_buff *skb;
skb               203 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 	skb = __skb_peek(&nn->mbox_cmsg.queue);
skb               205 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 		if (__nfp_ccm_get_tag(skb) == tag)
skb               206 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 			return skb;
skb               208 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 		if (skb == last)
skb               210 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 		skb = skb_queue_next(&nn->mbox_cmsg.queue, skb);
skb               218 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 	struct sk_buff *skb;
skb               262 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 		skb = nfp_ccm_mbox_find_req(nn, hdr.tag, last);
skb               263 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 		if (!skb) {
skb               268 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 		cb = (void *)skb->cb;
skb               273 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 				   nfp_ccm_get_type(skb));
skb               278 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 		if (hdr.type != __NFP_CCM_REPLY(nfp_ccm_get_type(skb))) {
skb               281 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 				   __NFP_CCM_REPLY(nfp_ccm_get_type(skb)));
skb               302 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 			if (length <= skb->len)
skb               303 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 				__skb_trim(skb, length);
skb               305 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 				skb_put(skb, length - skb->len);
skb               311 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 			skb_data = (__be32 *)skb->data;
skb               332 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 		skb = __skb_dequeue(&nn->mbox_cmsg.queue);
skb               333 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 		cb = (void *)skb->cb;
skb               345 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 					   nfp_ccm_get_type(skb), cb->err);
skb               346 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 			dev_consume_skb_any(skb);
skb               348 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 	} while (skb != last);
skb               358 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 	struct sk_buff *skb;
skb               362 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 		skb = __skb_dequeue(&nn->mbox_cmsg.queue);
skb               363 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 		cb = (void *)skb->cb;
skb               368 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 	} while (skb != last);
skb               378 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 	struct sk_buff *skb, *last;
skb               385 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 	last = skb = __skb_peek(&nn->mbox_cmsg.queue);
skb               386 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 	space -= 4 + nfp_ccm_mbox_maxlen(skb);
skb               389 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 		skb = skb_queue_next(&nn->mbox_cmsg.queue, last);
skb               390 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 		space -= 4 + nfp_ccm_mbox_maxlen(skb);
skb               393 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 		last = skb;
skb               394 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 		nfp_ccm_mbox_set_busy(skb);
skb               420 drivers/net/ethernet/netronome/nfp/ccm_mbox.c static int nfp_ccm_mbox_skb_return(struct sk_buff *skb)
skb               422 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 	struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb;
skb               425 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 		dev_kfree_skb_any(skb);
skb               434 drivers/net/ethernet/netronome/nfp/ccm_mbox.c nfp_ccm_mbox_unlink_unlock(struct nfp_net *nn, struct sk_buff *skb,
skb               440 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 	if (nfp_ccm_mbox_in_progress(skb)) {
skb               443 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 		wait_event(nn->mbox_cmsg.wq, nfp_ccm_mbox_done(skb));
skb               445 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 		return nfp_ccm_mbox_skb_return(skb);
skb               448 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 	was_first = nfp_ccm_mbox_should_run(nn, skb);
skb               449 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 	__skb_unlink(skb, &nn->mbox_cmsg.queue);
skb               464 drivers/net/ethernet/netronome/nfp/ccm_mbox.c nfp_ccm_mbox_msg_prepare(struct nfp_net *nn, struct sk_buff *skb,
skb               492 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 	undersize = max_reply_size - (skb_end_pointer(skb) - skb->data);
skb               494 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 		err = pskb_expand_head(skb, 0, undersize, flags);
skb               503 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 	max_len = max(max_reply_size, round_up(skb->len, 4));
skb               507 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 			   skb->len, max_reply_size, mbox_max);
skb               511 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 	nfp_ccm_mbox_msg_init(skb, reply_size, max_len);
skb               517 drivers/net/ethernet/netronome/nfp/ccm_mbox.c nfp_ccm_mbox_msg_enqueue(struct nfp_net *nn, struct sk_buff *skb,
skb               529 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 	hdr = (void *)skb->data;
skb               534 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 	__skb_queue_tail(&nn->mbox_cmsg.queue, skb);
skb               539 drivers/net/ethernet/netronome/nfp/ccm_mbox.c int __nfp_ccm_mbox_communicate(struct nfp_net *nn, struct sk_buff *skb,
skb               546 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 	err = nfp_ccm_mbox_msg_prepare(nn, skb, type, reply_size,
skb               553 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 	err = nfp_ccm_mbox_msg_enqueue(nn, skb, type, critical);
skb               558 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 	if (!nfp_ccm_mbox_is_first(nn, skb)) {
skb               564 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 					 nfp_ccm_mbox_done(skb) ||
skb               565 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 					 nfp_ccm_mbox_should_run(nn, skb),
skb               569 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 		if (nfp_ccm_mbox_done(skb)) {
skb               571 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 			return nfp_ccm_mbox_skb_return(skb);
skb               576 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 		if (!nfp_ccm_mbox_is_first(nn, skb)) {
skb               579 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 			err = nfp_ccm_mbox_unlink_unlock(nn, skb, type);
skb               588 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 	return nfp_ccm_mbox_skb_return(skb);
skb               593 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 	dev_kfree_skb_any(skb);
skb               597 drivers/net/ethernet/netronome/nfp/ccm_mbox.c int nfp_ccm_mbox_communicate(struct nfp_net *nn, struct sk_buff *skb,
skb               602 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 	return __nfp_ccm_mbox_communicate(nn, skb, type, reply_size,
skb               608 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 	struct sk_buff *skb;
skb               615 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 	skb = __skb_peek(&nn->mbox_cmsg.queue);
skb               616 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 	if (WARN_ON(!skb || !nfp_ccm_mbox_is_posted(skb) ||
skb               617 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 		    !nfp_ccm_mbox_should_run(nn, skb))) {
skb               627 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 	struct sk_buff *skb;
skb               633 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 	skb = skb_peek(&nn->mbox_cmsg.queue);
skb               634 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 	if (WARN_ON(!skb || !nfp_ccm_mbox_is_posted(skb)))
skb               640 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 		nfp_ccm_mbox_copy_out(nn, skb);
skb               642 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 		nfp_ccm_mbox_mark_all_err(nn, skb, -EIO);
skb               648 drivers/net/ethernet/netronome/nfp/ccm_mbox.c int nfp_ccm_mbox_post(struct nfp_net *nn, struct sk_buff *skb,
skb               653 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 	err = nfp_ccm_mbox_msg_prepare(nn, skb, type, 0, max_reply_size,
skb               658 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 	nfp_ccm_mbox_mark_posted(skb);
skb               662 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 	err = nfp_ccm_mbox_msg_enqueue(nn, skb, type, false);
skb               666 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 	if (nfp_ccm_mbox_is_first(nn, skb)) {
skb               668 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 			nfp_ccm_mbox_copy_in(nn, skb);
skb               685 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 	dev_kfree_skb_any(skb);
skb               694 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 	struct sk_buff *skb;
skb               702 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 	skb = alloc_skb(max_size, flags);
skb               703 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 	if (!skb)
skb               706 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 	skb_put(skb, req_size);
skb               708 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 	return skb;
skb               110 drivers/net/ethernet/netronome/nfp/crypto/tls.c nfp_net_tls_communicate_simple(struct nfp_net *nn, struct sk_buff *skb,
skb               116 drivers/net/ethernet/netronome/nfp/crypto/tls.c 	err = __nfp_ccm_mbox_communicate(nn, skb, type,
skb               124 drivers/net/ethernet/netronome/nfp/crypto/tls.c 	reply = (void *)skb->data;
skb               129 drivers/net/ethernet/netronome/nfp/crypto/tls.c 	dev_consume_skb_any(skb);
skb               137 drivers/net/ethernet/netronome/nfp/crypto/tls.c 	struct sk_buff *skb;
skb               139 drivers/net/ethernet/netronome/nfp/crypto/tls.c 	skb = nfp_net_tls_alloc_simple(nn, sizeof(*req), GFP_KERNEL);
skb               140 drivers/net/ethernet/netronome/nfp/crypto/tls.c 	if (!skb)
skb               143 drivers/net/ethernet/netronome/nfp/crypto/tls.c 	req = (void *)skb->data;
skb               147 drivers/net/ethernet/netronome/nfp/crypto/tls.c 	nfp_net_tls_communicate_simple(nn, skb, "delete",
skb               274 drivers/net/ethernet/netronome/nfp/crypto/tls.c 	struct sk_buff *skb;
skb               311 drivers/net/ethernet/netronome/nfp/crypto/tls.c 	skb = nfp_ccm_mbox_msg_alloc(nn, req_sz, sizeof(*reply), GFP_KERNEL);
skb               312 drivers/net/ethernet/netronome/nfp/crypto/tls.c 	if (!skb) {
skb               317 drivers/net/ethernet/netronome/nfp/crypto/tls.c 	front = (void *)skb->data;
skb               325 drivers/net/ethernet/netronome/nfp/crypto/tls.c 	req = (void *)skb->data;
skb               345 drivers/net/ethernet/netronome/nfp/crypto/tls.c 	skb_get(skb);
skb               347 drivers/net/ethernet/netronome/nfp/crypto/tls.c 	err = nfp_ccm_mbox_communicate(nn, skb, NFP_CCM_TYPE_CRYPTO_ADD,
skb               349 drivers/net/ethernet/netronome/nfp/crypto/tls.c 	reply = (void *)skb->data;
skb               354 drivers/net/ethernet/netronome/nfp/crypto/tls.c 	if (!WARN_ON_ONCE((u8 *)back < skb->head ||
skb               355 drivers/net/ethernet/netronome/nfp/crypto/tls.c 			  (u8 *)back > skb_end_pointer(skb)) &&
skb               358 drivers/net/ethernet/netronome/nfp/crypto/tls.c 	dev_consume_skb_any(skb); /* the extra ref from skb_get() above */
skb               389 drivers/net/ethernet/netronome/nfp/crypto/tls.c 	dev_consume_skb_any(skb);
skb               401 drivers/net/ethernet/netronome/nfp/crypto/tls.c 	dev_consume_skb_any(skb);
skb               427 drivers/net/ethernet/netronome/nfp/crypto/tls.c 	struct sk_buff *skb;
skb               432 drivers/net/ethernet/netronome/nfp/crypto/tls.c 	skb = nfp_net_tls_alloc_simple(nn, sizeof(*req), flags);
skb               433 drivers/net/ethernet/netronome/nfp/crypto/tls.c 	if (!skb)
skb               437 drivers/net/ethernet/netronome/nfp/crypto/tls.c 	req = (void *)skb->data;
skb               446 drivers/net/ethernet/netronome/nfp/crypto/tls.c 		err = nfp_net_tls_communicate_simple(nn, skb, "sync",
skb               452 drivers/net/ethernet/netronome/nfp/crypto/tls.c 		nfp_ccm_mbox_post(nn, skb, NFP_CCM_TYPE_CRYPTO_UPDATE,
skb               468 drivers/net/ethernet/netronome/nfp/crypto/tls.c 	struct sk_buff *skb;
skb               470 drivers/net/ethernet/netronome/nfp/crypto/tls.c 	skb = nfp_net_tls_alloc_simple(nn, sizeof(*req), GFP_KERNEL);
skb               471 drivers/net/ethernet/netronome/nfp/crypto/tls.c 	if (!skb)
skb               474 drivers/net/ethernet/netronome/nfp/crypto/tls.c 	req = (void *)skb->data;
skb               477 drivers/net/ethernet/netronome/nfp/crypto/tls.c 	return nfp_net_tls_communicate_simple(nn, skb, "reset",
skb                16 drivers/net/ethernet/netronome/nfp/flower/cmsg.c nfp_flower_cmsg_get_hdr(struct sk_buff *skb)
skb                18 drivers/net/ethernet/netronome/nfp/flower/cmsg.c 	return (struct nfp_flower_cmsg_hdr *)skb->data;
skb                26 drivers/net/ethernet/netronome/nfp/flower/cmsg.c 	struct sk_buff *skb;
skb                30 drivers/net/ethernet/netronome/nfp/flower/cmsg.c 	skb = nfp_app_ctrl_msg_alloc(app, size, flag);
skb                31 drivers/net/ethernet/netronome/nfp/flower/cmsg.c 	if (!skb)
skb                34 drivers/net/ethernet/netronome/nfp/flower/cmsg.c 	ch = nfp_flower_cmsg_get_hdr(skb);
skb                38 drivers/net/ethernet/netronome/nfp/flower/cmsg.c 	skb_put(skb, size);
skb                40 drivers/net/ethernet/netronome/nfp/flower/cmsg.c 	return skb;
skb                47 drivers/net/ethernet/netronome/nfp/flower/cmsg.c 	struct sk_buff *skb;
skb                49 drivers/net/ethernet/netronome/nfp/flower/cmsg.c 	skb = nfp_flower_cmsg_alloc(app, struct_size(msg, ports, num_ports),
skb                51 drivers/net/ethernet/netronome/nfp/flower/cmsg.c 	if (!skb)
skb                54 drivers/net/ethernet/netronome/nfp/flower/cmsg.c 	msg = nfp_flower_cmsg_get_data(skb);
skb                58 drivers/net/ethernet/netronome/nfp/flower/cmsg.c 	return skb;
skb                62 drivers/net/ethernet/netronome/nfp/flower/cmsg.c nfp_flower_cmsg_mac_repr_add(struct sk_buff *skb, unsigned int idx,
skb                68 drivers/net/ethernet/netronome/nfp/flower/cmsg.c 	msg = nfp_flower_cmsg_get_data(skb);
skb                79 drivers/net/ethernet/netronome/nfp/flower/cmsg.c 	struct sk_buff *skb;
skb                81 drivers/net/ethernet/netronome/nfp/flower/cmsg.c 	skb = nfp_flower_cmsg_alloc(repr->app, sizeof(*msg),
skb                83 drivers/net/ethernet/netronome/nfp/flower/cmsg.c 	if (!skb)
skb                86 drivers/net/ethernet/netronome/nfp/flower/cmsg.c 	msg = nfp_flower_cmsg_get_data(skb);
skb                96 drivers/net/ethernet/netronome/nfp/flower/cmsg.c 	nfp_ctrl_tx(repr->app->ctrl, skb);
skb               104 drivers/net/ethernet/netronome/nfp/flower/cmsg.c 	struct sk_buff *skb;
skb               106 drivers/net/ethernet/netronome/nfp/flower/cmsg.c 	skb = nfp_flower_cmsg_alloc(repr->app, sizeof(*msg),
skb               109 drivers/net/ethernet/netronome/nfp/flower/cmsg.c 	if (!skb)
skb               112 drivers/net/ethernet/netronome/nfp/flower/cmsg.c 	msg = nfp_flower_cmsg_get_data(skb);
skb               117 drivers/net/ethernet/netronome/nfp/flower/cmsg.c 	nfp_ctrl_tx(repr->app->ctrl, skb);
skb               123 drivers/net/ethernet/netronome/nfp/flower/cmsg.c nfp_flower_process_mtu_ack(struct nfp_app *app, struct sk_buff *skb)
skb               128 drivers/net/ethernet/netronome/nfp/flower/cmsg.c 	msg = nfp_flower_cmsg_get_data(skb);
skb               151 drivers/net/ethernet/netronome/nfp/flower/cmsg.c nfp_flower_cmsg_portmod_rx(struct nfp_app *app, struct sk_buff *skb)
skb               157 drivers/net/ethernet/netronome/nfp/flower/cmsg.c 	msg = nfp_flower_cmsg_get_data(skb);
skb               186 drivers/net/ethernet/netronome/nfp/flower/cmsg.c nfp_flower_cmsg_portreify_rx(struct nfp_app *app, struct sk_buff *skb)
skb               192 drivers/net/ethernet/netronome/nfp/flower/cmsg.c 	msg = nfp_flower_cmsg_get_data(skb);
skb               208 drivers/net/ethernet/netronome/nfp/flower/cmsg.c nfp_flower_cmsg_merge_hint_rx(struct nfp_app *app, struct sk_buff *skb)
skb               210 drivers/net/ethernet/netronome/nfp/flower/cmsg.c 	unsigned int msg_len = nfp_flower_cmsg_get_data_len(skb);
skb               215 drivers/net/ethernet/netronome/nfp/flower/cmsg.c 	msg = nfp_flower_cmsg_get_data(skb);
skb               252 drivers/net/ethernet/netronome/nfp/flower/cmsg.c nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb)
skb               259 drivers/net/ethernet/netronome/nfp/flower/cmsg.c 	cmsg_hdr = nfp_flower_cmsg_get_hdr(skb);
skb               264 drivers/net/ethernet/netronome/nfp/flower/cmsg.c 		nfp_flower_cmsg_portmod_rx(app, skb);
skb               268 drivers/net/ethernet/netronome/nfp/flower/cmsg.c 			nfp_flower_cmsg_merge_hint_rx(app, skb);
skb               273 drivers/net/ethernet/netronome/nfp/flower/cmsg.c 		nfp_tunnel_request_route(app, skb);
skb               276 drivers/net/ethernet/netronome/nfp/flower/cmsg.c 		nfp_tunnel_keep_alive(app, skb);
skb               279 drivers/net/ethernet/netronome/nfp/flower/cmsg.c 		nfp_flower_stats_rlim_reply(app, skb);
skb               283 drivers/net/ethernet/netronome/nfp/flower/cmsg.c 			skb_stored = nfp_flower_lag_unprocessed_msg(app, skb);
skb               295 drivers/net/ethernet/netronome/nfp/flower/cmsg.c 		dev_consume_skb_any(skb);
skb               298 drivers/net/ethernet/netronome/nfp/flower/cmsg.c 	dev_kfree_skb_any(skb);
skb               305 drivers/net/ethernet/netronome/nfp/flower/cmsg.c 	struct sk_buff *skb;
skb               318 drivers/net/ethernet/netronome/nfp/flower/cmsg.c 	while ((skb = __skb_dequeue(&cmsg_joined)))
skb               319 drivers/net/ethernet/netronome/nfp/flower/cmsg.c 		nfp_flower_cmsg_process_one_rx(priv->app, skb);
skb               323 drivers/net/ethernet/netronome/nfp/flower/cmsg.c nfp_flower_queue_ctl_msg(struct nfp_app *app, struct sk_buff *skb, int type)
skb               335 drivers/net/ethernet/netronome/nfp/flower/cmsg.c 		dev_kfree_skb_any(skb);
skb               339 drivers/net/ethernet/netronome/nfp/flower/cmsg.c 	skb_queue_tail(skb_head, skb);
skb               343 drivers/net/ethernet/netronome/nfp/flower/cmsg.c void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb)
skb               347 drivers/net/ethernet/netronome/nfp/flower/cmsg.c 	cmsg_hdr = nfp_flower_cmsg_get_hdr(skb);
skb               352 drivers/net/ethernet/netronome/nfp/flower/cmsg.c 		dev_kfree_skb_any(skb);
skb               358 drivers/net/ethernet/netronome/nfp/flower/cmsg.c 		nfp_flower_rx_flow_stats(app, skb);
skb               359 drivers/net/ethernet/netronome/nfp/flower/cmsg.c 		dev_consume_skb_any(skb);
skb               361 drivers/net/ethernet/netronome/nfp/flower/cmsg.c 		   nfp_flower_process_mtu_ack(app, skb)) {
skb               363 drivers/net/ethernet/netronome/nfp/flower/cmsg.c 		dev_consume_skb_any(skb);
skb               366 drivers/net/ethernet/netronome/nfp/flower/cmsg.c 		dev_consume_skb_any(skb);
skb               369 drivers/net/ethernet/netronome/nfp/flower/cmsg.c 		nfp_flower_cmsg_portreify_rx(app, skb);
skb               370 drivers/net/ethernet/netronome/nfp/flower/cmsg.c 		dev_consume_skb_any(skb);
skb               372 drivers/net/ethernet/netronome/nfp/flower/cmsg.c 		nfp_flower_queue_ctl_msg(app, skb, cmsg_hdr->type);
skb               583 drivers/net/ethernet/netronome/nfp/flower/cmsg.h static inline void *nfp_flower_cmsg_get_data(struct sk_buff *skb)
skb               585 drivers/net/ethernet/netronome/nfp/flower/cmsg.h 	return (unsigned char *)skb->data + NFP_FLOWER_CMSG_HLEN;
skb               588 drivers/net/ethernet/netronome/nfp/flower/cmsg.h static inline int nfp_flower_cmsg_get_data_len(struct sk_buff *skb)
skb               590 drivers/net/ethernet/netronome/nfp/flower/cmsg.h 	return skb->len - NFP_FLOWER_CMSG_HLEN;
skb               626 drivers/net/ethernet/netronome/nfp/flower/cmsg.h nfp_flower_cmsg_mac_repr_add(struct sk_buff *skb, unsigned int idx,
skb               633 drivers/net/ethernet/netronome/nfp/flower/cmsg.h void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb);
skb               210 drivers/net/ethernet/netronome/nfp/flower/lag_conf.c 	struct sk_buff *skb;
skb               214 drivers/net/ethernet/netronome/nfp/flower/lag_conf.c 	skb = nfp_flower_cmsg_alloc(priv->app, size,
skb               217 drivers/net/ethernet/netronome/nfp/flower/lag_conf.c 	if (!skb)
skb               220 drivers/net/ethernet/netronome/nfp/flower/lag_conf.c 	cmsg_payload = nfp_flower_cmsg_get_data(skb);
skb               260 drivers/net/ethernet/netronome/nfp/flower/lag_conf.c 	nfp_ctrl_tx(priv->app->ctrl, skb);
skb               383 drivers/net/ethernet/netronome/nfp/flower/lag_conf.c nfp_fl_lag_put_unprocessed(struct nfp_fl_lag *lag, struct sk_buff *skb)
skb               387 drivers/net/ethernet/netronome/nfp/flower/lag_conf.c 	cmsg_payload = nfp_flower_cmsg_get_data(skb);
skb               398 drivers/net/ethernet/netronome/nfp/flower/lag_conf.c 	__skb_queue_tail(&lag->retrans_skbs, skb);
skb               406 drivers/net/ethernet/netronome/nfp/flower/lag_conf.c 	struct sk_buff *skb;
skb               410 drivers/net/ethernet/netronome/nfp/flower/lag_conf.c 	while ((skb = __skb_dequeue(&lag->retrans_skbs)))
skb               411 drivers/net/ethernet/netronome/nfp/flower/lag_conf.c 		nfp_ctrl_tx(priv->app->ctrl, skb);
skb               414 drivers/net/ethernet/netronome/nfp/flower/lag_conf.c bool nfp_flower_lag_unprocessed_msg(struct nfp_app *app, struct sk_buff *skb)
skb               423 drivers/net/ethernet/netronome/nfp/flower/lag_conf.c 	cmsg_payload = nfp_flower_cmsg_get_data(skb);
skb               433 drivers/net/ethernet/netronome/nfp/flower/lag_conf.c 		if (!nfp_fl_lag_put_unprocessed(&priv->nfp_lag, skb))
skb               390 drivers/net/ethernet/netronome/nfp/flower/main.h void nfp_flower_rx_flow_stats(struct nfp_app *app, struct sk_buff *skb);
skb               399 drivers/net/ethernet/netronome/nfp/flower/main.h void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb);
skb               400 drivers/net/ethernet/netronome/nfp/flower/main.h void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb);
skb               407 drivers/net/ethernet/netronome/nfp/flower/main.h bool nfp_flower_lag_unprocessed_msg(struct nfp_app *app, struct sk_buff *skb);
skb               418 drivers/net/ethernet/netronome/nfp/flower/main.h void nfp_flower_stats_rlim_reply(struct nfp_app *app, struct sk_buff *skb);
skb               112 drivers/net/ethernet/netronome/nfp/flower/metadata.c void nfp_flower_rx_flow_stats(struct nfp_app *app, struct sk_buff *skb)
skb               114 drivers/net/ethernet/netronome/nfp/flower/metadata.c 	unsigned int msg_len = nfp_flower_cmsg_get_data_len(skb);
skb               121 drivers/net/ethernet/netronome/nfp/flower/metadata.c 	msg = nfp_flower_cmsg_get_data(skb);
skb                89 drivers/net/ethernet/netronome/nfp/flower/offload.c 	struct sk_buff *skb;
skb               106 drivers/net/ethernet/netronome/nfp/flower/offload.c 	skb = nfp_flower_cmsg_alloc(app, tot_len, mtype, GFP_KERNEL);
skb               107 drivers/net/ethernet/netronome/nfp/flower/offload.c 	if (!skb)
skb               110 drivers/net/ethernet/netronome/nfp/flower/offload.c 	msg = nfp_flower_cmsg_get_data(skb);
skb               124 drivers/net/ethernet/netronome/nfp/flower/offload.c 	nfp_ctrl_tx(app->ctrl, skb);
skb                70 drivers/net/ethernet/netronome/nfp/flower/qos_conf.c 	struct sk_buff *skb;
skb               111 drivers/net/ethernet/netronome/nfp/flower/qos_conf.c 	skb = nfp_flower_cmsg_alloc(repr->app, sizeof(struct nfp_police_config),
skb               113 drivers/net/ethernet/netronome/nfp/flower/qos_conf.c 	if (!skb)
skb               116 drivers/net/ethernet/netronome/nfp/flower/qos_conf.c 	config = nfp_flower_cmsg_get_data(skb);
skb               125 drivers/net/ethernet/netronome/nfp/flower/qos_conf.c 	nfp_ctrl_tx(repr->app->ctrl, skb);
skb               145 drivers/net/ethernet/netronome/nfp/flower/qos_conf.c 	struct sk_buff *skb;
skb               162 drivers/net/ethernet/netronome/nfp/flower/qos_conf.c 	skb = nfp_flower_cmsg_alloc(repr->app, sizeof(struct nfp_police_config),
skb               164 drivers/net/ethernet/netronome/nfp/flower/qos_conf.c 	if (!skb)
skb               173 drivers/net/ethernet/netronome/nfp/flower/qos_conf.c 	config = nfp_flower_cmsg_get_data(skb);
skb               176 drivers/net/ethernet/netronome/nfp/flower/qos_conf.c 	nfp_ctrl_tx(repr->app->ctrl, skb);
skb               181 drivers/net/ethernet/netronome/nfp/flower/qos_conf.c void nfp_flower_stats_rlim_reply(struct nfp_app *app, struct sk_buff *skb)
skb               192 drivers/net/ethernet/netronome/nfp/flower/qos_conf.c 	msg = nfp_flower_cmsg_get_data(skb);
skb               227 drivers/net/ethernet/netronome/nfp/flower/qos_conf.c 	struct sk_buff *skb;
skb               229 drivers/net/ethernet/netronome/nfp/flower/qos_conf.c 	skb = nfp_flower_cmsg_alloc(fl_priv->app,
skb               233 drivers/net/ethernet/netronome/nfp/flower/qos_conf.c 	if (!skb)
skb               236 drivers/net/ethernet/netronome/nfp/flower/qos_conf.c 	head = nfp_flower_cmsg_get_data(skb);
skb               240 drivers/net/ethernet/netronome/nfp/flower/qos_conf.c 	nfp_ctrl_tx(fl_priv->app->ctrl, skb);
skb               168 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb)
skb               177 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c 	payload = nfp_flower_cmsg_get_data(skb);
skb               184 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c 	pay_len = nfp_flower_cmsg_get_data_len(skb);
skb               213 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c 	struct sk_buff *skb;
skb               216 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c 	skb = nfp_flower_cmsg_alloc(app, plen, mtype, flag);
skb               217 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c 	if (!skb)
skb               220 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c 	msg = nfp_flower_cmsg_get_data(skb);
skb               221 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c 	memcpy(msg, pdata, nfp_flower_cmsg_get_data_len(skb));
skb               223 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c 	nfp_ctrl_tx(app->ctrl, skb);
skb               380 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb)
skb               389 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c 	payload = nfp_flower_cmsg_get_data(skb);
skb               105 drivers/net/ethernet/netronome/nfp/nfp_app.c 	struct sk_buff *skb;
skb               110 drivers/net/ethernet/netronome/nfp/nfp_app.c 	skb = alloc_skb(size, priority);
skb               111 drivers/net/ethernet/netronome/nfp/nfp_app.c 	if (!skb)
skb               115 drivers/net/ethernet/netronome/nfp/nfp_app.c 		skb_reserve(skb, 8);
skb               117 drivers/net/ethernet/netronome/nfp/nfp_app.c 	return skb;
skb               129 drivers/net/ethernet/netronome/nfp/nfp_app.h 	void (*ctrl_msg_rx)(struct nfp_app *app, struct sk_buff *skb);
skb               179 drivers/net/ethernet/netronome/nfp/nfp_app.h bool __nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb);
skb               180 drivers/net/ethernet/netronome/nfp/nfp_app.h bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb);
skb               337 drivers/net/ethernet/netronome/nfp/nfp_app.h static inline bool __nfp_app_ctrl_tx(struct nfp_app *app, struct sk_buff *skb)
skb               340 drivers/net/ethernet/netronome/nfp/nfp_app.h 			    skb->data, skb->len);
skb               342 drivers/net/ethernet/netronome/nfp/nfp_app.h 	return __nfp_ctrl_tx(app->ctrl, skb);
skb               345 drivers/net/ethernet/netronome/nfp/nfp_app.h static inline bool nfp_app_ctrl_tx(struct nfp_app *app, struct sk_buff *skb)
skb               348 drivers/net/ethernet/netronome/nfp/nfp_app.h 			    skb->data, skb->len);
skb               350 drivers/net/ethernet/netronome/nfp/nfp_app.h 	return nfp_ctrl_tx(app->ctrl, skb);
skb               353 drivers/net/ethernet/netronome/nfp/nfp_app.h static inline void nfp_app_ctrl_rx(struct nfp_app *app, struct sk_buff *skb)
skb               356 drivers/net/ethernet/netronome/nfp/nfp_app.h 			    skb->data, skb->len);
skb               358 drivers/net/ethernet/netronome/nfp/nfp_app.h 	app->type->ctrl_msg_rx(app, skb);
skb               160 drivers/net/ethernet/netronome/nfp/nfp_main.h bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb);
skb               182 drivers/net/ethernet/netronome/nfp/nfp_net.h 		struct sk_buff *skb;
skb               727 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 			   struct nfp_net_tx_desc *txd, struct sk_buff *skb,
skb               733 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	if (!skb_is_gso(skb))
skb               736 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	if (!skb->encapsulation) {
skb               737 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 		l3_offset = skb_network_offset(skb);
skb               738 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 		l4_offset = skb_transport_offset(skb);
skb               739 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 		hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
skb               741 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 		l3_offset = skb_inner_network_offset(skb);
skb               742 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 		l4_offset = skb_inner_transport_offset(skb);
skb               743 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 		hdrlen = skb_inner_transport_header(skb) - skb->data +
skb               744 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 			inner_tcp_hdrlen(skb);
skb               747 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	txbuf->pkt_cnt = skb_shinfo(skb)->gso_segs;
skb               750 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	mss = skb_shinfo(skb)->gso_size & PCIE_DESC_TX_MSS_MASK;
skb               776 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 			    struct nfp_net_tx_desc *txd, struct sk_buff *skb)
skb               785 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	if (skb->ip_summed != CHECKSUM_PARTIAL)
skb               789 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	if (skb->encapsulation)
skb               792 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
skb               793 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
skb               818 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	if (skb->encapsulation)
skb               827 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	       struct sk_buff *skb, u64 *tls_handle, int *nr_frags)
skb               836 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 		return skb;
skb               837 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	if (!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk))
skb               838 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 		return skb;
skb               840 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
skb               841 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	seq = ntohl(tcp_hdr(skb)->seq);
skb               842 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	ntls = tls_driver_ctx(skb->sk, TLS_OFFLOAD_CTX_DIR_TX);
skb               843 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	resync_pending = tls_offload_tx_resync_pending(skb->sk);
skb               847 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 			return skb;
skb               853 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 		nskb = tls_encrypt_skb(skb);
skb               861 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 		if (nskb == skb)
skb               862 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 			return skb;
skb               883 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 		if (!skb_is_gso(skb))
skb               886 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 			r_vec->hw_tls_tx += skb_shinfo(skb)->gso_segs;
skb               893 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	return skb;
skb               896 drivers/net/ethernet/netronome/nfp/nfp_net_common.c static void nfp_net_tls_tx_undo(struct sk_buff *skb, u64 tls_handle)
skb               904 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	if (WARN_ON_ONCE(!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk)))
skb               907 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
skb               908 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	seq = ntohl(tcp_hdr(skb)->seq);
skb               910 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	ntls = tls_driver_ctx(skb->sk, TLS_OFFLOAD_CTX_DIR_TX);
skb               925 drivers/net/ethernet/netronome/nfp/nfp_net_common.c static int nfp_net_prep_tx_meta(struct sk_buff *skb, u64 tls_handle)
skb               927 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	struct metadata_dst *md_dst = skb_metadata_dst(skb);
skb               942 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	if (unlikely(skb_cow_head(skb, md_bytes)))
skb               946 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	data = skb_push(skb, md_bytes) + md_bytes;
skb               975 drivers/net/ethernet/netronome/nfp/nfp_net_common.c static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
skb               992 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	qidx = skb_get_queue_mapping(skb);
skb               996 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	nr_frags = skb_shinfo(skb)->nr_frags;
skb              1010 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	skb = nfp_net_tls_tx(dp, r_vec, skb, &tls_handle, &nr_frags);
skb              1011 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	if (unlikely(!skb)) {
skb              1016 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	md_bytes = nfp_net_prep_tx_meta(skb, tls_handle);
skb              1021 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	dma_addr = dma_map_single(dp->dev, skb->data, skb_headlen(skb),
skb              1030 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	txbuf->skb = skb;
skb              1034 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	txbuf->real_len = skb->len;
skb              1039 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	txd->dma_len = cpu_to_le16(skb_headlen(skb));
skb              1041 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	txd->data_len = cpu_to_le16(skb->len);
skb              1048 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	nfp_net_tx_tso(r_vec, txbuf, txd, skb, md_bytes);
skb              1049 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	nfp_net_tx_csum(dp, r_vec, txbuf, txd, skb);
skb              1050 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	if (skb_vlan_tag_present(skb) && dp->ctrl & NFP_NET_CFG_CTRL_TXVLAN) {
skb              1052 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 		txd->vlan = cpu_to_le16(skb_vlan_tag_get(skb));
skb              1063 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 			frag = &skb_shinfo(skb)->frags[f];
skb              1072 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 			tx_ring->txbufs[wr_idx].skb = skb;
skb              1089 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	skb_tx_timestamp(skb);
skb              1105 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 		frag = &skb_shinfo(skb)->frags[f];
skb              1108 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 		tx_ring->txbufs[wr_idx].skb = NULL;
skb              1116 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 			 skb_headlen(skb), DMA_TO_DEVICE);
skb              1117 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	tx_ring->txbufs[wr_idx].skb = NULL;
skb              1127 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	nfp_net_tls_tx_undo(skb, tls_handle);
skb              1128 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	dev_kfree_skb_any(skb);
skb              1160 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 		struct sk_buff *skb;
skb              1167 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 		skb = tx_buf->skb;
skb              1168 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 		if (!skb)
skb              1171 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 		nr_frags = skb_shinfo(skb)->nr_frags;
skb              1177 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 					 skb_headlen(skb), DMA_TO_DEVICE);
skb              1183 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 			frag = &skb_shinfo(skb)->frags[fidx];
skb              1190 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 			napi_consume_skb(skb, budget);
skb              1193 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 		tx_buf->skb = NULL;
skb              1278 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 		struct sk_buff *skb;
skb              1284 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 		skb = tx_ring->txbufs[idx].skb;
skb              1285 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 		nr_frags = skb_shinfo(skb)->nr_frags;
skb              1290 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 					 skb_headlen(skb), DMA_TO_DEVICE);
skb              1293 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 			frag = &skb_shinfo(skb)->frags[tx_buf->fidx];
skb              1300 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 			dev_kfree_skb_any(skb);
skb              1303 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 		tx_buf->skb = NULL;
skb              1591 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 			    struct nfp_meta_parsed *meta, struct sk_buff *skb)
skb              1593 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	skb_checksum_none_assert(skb);
skb              1599 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 		skb->ip_summed = meta->csum_type;
skb              1600 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 		skb->csum = meta->csum;
skb              1620 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 		__skb_incr_checksum_unnecessary(skb);
skb              1628 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 		__skb_incr_checksum_unnecessary(skb);
skb              1714 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 		struct sk_buff *skb)
skb              1721 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	if (skb && rxbuf)
skb              1728 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	if (skb && rxbuf && skb->head == rxbuf->frag)
skb              1732 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	if (skb)
skb              1733 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 		dev_kfree_skb_any(skb);
skb              1810 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	struct sk_buff *skb;
skb              1974 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 		skb = build_skb(rxbuf->frag, true_bufsz);
skb              1975 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 		if (unlikely(!skb)) {
skb              1981 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 			nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, skb);
skb              1989 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 		skb_reserve(skb, pkt_off);
skb              1990 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 		skb_put(skb, pkt_len);
skb              1992 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 		skb->mark = meta.mark;
skb              1993 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 		skb_set_hash(skb, meta.hash, meta.hash_type);
skb              1995 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 		skb_record_rx_queue(skb, rx_ring->idx);
skb              1996 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 		skb->protocol = eth_type_trans(skb, netdev);
skb              1998 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 		nfp_net_rx_csum(dp, r_vec, rxd, &meta, skb);
skb              2002 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 			skb->decrypted = true;
skb              2010 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
skb              2013 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 			skb_metadata_set(skb, meta_len_xdp);
skb              2016 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 			napi_gro_receive(&rx_ring->r_vec->napi, skb);
skb              2018 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 			skb->dev = netdev;
skb              2019 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 			skb_reset_network_header(skb);
skb              2020 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 			__skb_push(skb, ETH_HLEN);
skb              2021 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 			dev_queue_xmit(skb);
skb              2068 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 		struct sk_buff *skb, bool old)
skb              2070 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	unsigned int real_len = skb->len, meta_len = 0;
skb              2081 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	if (WARN_ON_ONCE(skb_shinfo(skb)->nr_frags)) {
skb              2091 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 			__skb_queue_tail(&r_vec->queue, skb);
skb              2093 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 			__skb_queue_head(&r_vec->queue, skb);
skb              2098 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 		if (unlikely(skb_headroom(skb) < 8)) {
skb              2103 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 		put_unaligned_be32(NFP_META_PORT_ID_CTRL, skb_push(skb, 4));
skb              2104 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 		put_unaligned_be32(NFP_NET_META_PORTID, skb_push(skb, 4));
skb              2108 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	dma_addr = dma_map_single(dp->dev, skb->data, skb_headlen(skb),
skb              2117 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	txbuf->skb = skb;
skb              2126 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	txd->dma_len = cpu_to_le16(skb_headlen(skb));
skb              2128 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	txd->data_len = cpu_to_le16(skb->len);
skb              2146 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	dev_kfree_skb_any(skb);
skb              2150 drivers/net/ethernet/netronome/nfp/nfp_net_common.c bool __nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb)
skb              2154 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	return nfp_ctrl_tx_one(nn, r_vec, skb, false);
skb              2157 drivers/net/ethernet/netronome/nfp/nfp_net_common.c bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb)
skb              2163 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	ret = nfp_ctrl_tx_one(nn, r_vec, skb, false);
skb              2171 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	struct sk_buff *skb;
skb              2173 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	while ((skb = __skb_dequeue(&r_vec->queue)))
skb              2174 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 		if (nfp_ctrl_tx_one(r_vec->nfp_net, r_vec, skb, true))
skb              2204 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	struct sk_buff *skb;
skb              2248 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	skb = build_skb(rxbuf->frag, dp->fl_bufsz);
skb              2249 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	if (unlikely(!skb)) {
skb              2255 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 		nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, skb);
skb              2263 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	skb_reserve(skb, pkt_off);
skb              2264 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	skb_put(skb, pkt_len);
skb              2266 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	nfp_app_ctrl_rx(nn->app, skb);
skb              3493 drivers/net/ethernet/netronome/nfp/nfp_net_common.c nfp_net_features_check(struct sk_buff *skb, struct net_device *dev,
skb              3499 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	features &= vlan_features_check(skb, features);
skb              3501 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	if (!skb->encapsulation)
skb              3505 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	if (skb_is_gso(skb)) {
skb              3508 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 		hdrlen = skb_inner_transport_header(skb) - skb->data +
skb              3509 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 			inner_tcp_hdrlen(skb);
skb              3519 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	switch (vlan_get_protocol(skb)) {
skb              3521 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 		l4_hdr = ip_hdr(skb)->protocol;
skb              3524 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 		l4_hdr = ipv6_hdr(skb)->nexthdr;
skb              3530 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	if (skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
skb              3531 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	    skb->inner_protocol != htons(ETH_P_TEB) ||
skb              3534 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	     (skb_inner_mac_header(skb) - skb_transport_header(skb) !=
skb               112 drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c 			struct sk_buff *skb = READ_ONCE(tx_ring->txbufs[i].skb);
skb               114 drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c 			if (skb)
skb               116 drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c 					   skb->head, skb->data);
skb               184 drivers/net/ethernet/netronome/nfp/nfp_net_repr.c static netdev_tx_t nfp_repr_xmit(struct sk_buff *skb, struct net_device *netdev)
skb               187 drivers/net/ethernet/netronome/nfp/nfp_net_repr.c 	unsigned int len = skb->len;
skb               190 drivers/net/ethernet/netronome/nfp/nfp_net_repr.c 	skb_dst_drop(skb);
skb               192 drivers/net/ethernet/netronome/nfp/nfp_net_repr.c 	skb_dst_set(skb, (struct dst_entry *)repr->dst);
skb               193 drivers/net/ethernet/netronome/nfp/nfp_net_repr.c 	skb->dev = repr->dst->u.port_info.lower_dev;
skb               195 drivers/net/ethernet/netronome/nfp/nfp_net_repr.c 	ret = dev_queue_xmit(skb);
skb               162 drivers/net/ethernet/ni/nixge.c 	struct sk_buff *skb;
skb               249 drivers/net/ethernet/ni/nixge.c 	struct sk_buff *skb;
skb               260 drivers/net/ethernet/ni/nixge.c 		skb = (struct sk_buff *)(uintptr_t)
skb               263 drivers/net/ethernet/ni/nixge.c 		dev_kfree_skb(skb);
skb               285 drivers/net/ethernet/ni/nixge.c 	struct sk_buff *skb;
skb               327 drivers/net/ethernet/ni/nixge.c 		skb = netdev_alloc_skb_ip_align(ndev,
skb               329 drivers/net/ethernet/ni/nixge.c 		if (!skb)
skb               332 drivers/net/ethernet/ni/nixge.c 		nixge_hw_dma_bd_set_offset(&priv->rx_bd_v[i], (uintptr_t)skb);
skb               333 drivers/net/ethernet/ni/nixge.c 		phys = dma_map_single(ndev->dev.parent, skb->data,
skb               453 drivers/net/ethernet/ni/nixge.c 	if (tx_skb->skb) {
skb               454 drivers/net/ethernet/ni/nixge.c 		dev_kfree_skb_any(tx_skb->skb);
skb               455 drivers/net/ethernet/ni/nixge.c 		tx_skb->skb = NULL;
skb               505 drivers/net/ethernet/ni/nixge.c static int nixge_start_xmit(struct sk_buff *skb, struct net_device *ndev)
skb               515 drivers/net/ethernet/ni/nixge.c 	num_frag = skb_shinfo(skb)->nr_frags;
skb               525 drivers/net/ethernet/ni/nixge.c 	cur_phys = dma_map_single(ndev->dev.parent, skb->data,
skb               526 drivers/net/ethernet/ni/nixge.c 				  skb_headlen(skb), DMA_TO_DEVICE);
skb               531 drivers/net/ethernet/ni/nixge.c 	cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
skb               533 drivers/net/ethernet/ni/nixge.c 	tx_skb->skb = NULL;
skb               535 drivers/net/ethernet/ni/nixge.c 	tx_skb->size = skb_headlen(skb);
skb               543 drivers/net/ethernet/ni/nixge.c 		frag = &skb_shinfo(skb)->frags[ii];
skb               554 drivers/net/ethernet/ni/nixge.c 		tx_skb->skb = NULL;
skb               561 drivers/net/ethernet/ni/nixge.c 	tx_skb->skb = skb;
skb               596 drivers/net/ethernet/ni/nixge.c 	struct sk_buff *skb, *new_skb;
skb               610 drivers/net/ethernet/ni/nixge.c 		skb = (struct sk_buff *)(uintptr_t)
skb               622 drivers/net/ethernet/ni/nixge.c 		skb_put(skb, length);
skb               624 drivers/net/ethernet/ni/nixge.c 		skb->protocol = eth_type_trans(skb, ndev);
skb               625 drivers/net/ethernet/ni/nixge.c 		skb_checksum_none_assert(skb);
skb               630 drivers/net/ethernet/ni/nixge.c 		skb->ip_summed = CHECKSUM_NONE;
skb               632 drivers/net/ethernet/ni/nixge.c 		napi_gro_receive(&priv->napi, skb);
skb               708 drivers/net/ethernet/nvidia/forcedeth.c 	struct sk_buff *skb;
skb              1837 drivers/net/ethernet/nvidia/forcedeth.c 		struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz + NV_RX_ALLOC_PAD);
skb              1838 drivers/net/ethernet/nvidia/forcedeth.c 		if (likely(skb)) {
skb              1839 drivers/net/ethernet/nvidia/forcedeth.c 			np->put_rx_ctx->skb = skb;
skb              1841 drivers/net/ethernet/nvidia/forcedeth.c 							     skb->data,
skb              1842 drivers/net/ethernet/nvidia/forcedeth.c 							     skb_tailroom(skb),
skb              1846 drivers/net/ethernet/nvidia/forcedeth.c 				kfree_skb(skb);
skb              1849 drivers/net/ethernet/nvidia/forcedeth.c 			np->put_rx_ctx->dma_len = skb_tailroom(skb);
skb              1878 drivers/net/ethernet/nvidia/forcedeth.c 		struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz + NV_RX_ALLOC_PAD);
skb              1879 drivers/net/ethernet/nvidia/forcedeth.c 		if (likely(skb)) {
skb              1880 drivers/net/ethernet/nvidia/forcedeth.c 			np->put_rx_ctx->skb = skb;
skb              1882 drivers/net/ethernet/nvidia/forcedeth.c 							     skb->data,
skb              1883 drivers/net/ethernet/nvidia/forcedeth.c 							     skb_tailroom(skb),
skb              1887 drivers/net/ethernet/nvidia/forcedeth.c 				kfree_skb(skb);
skb              1890 drivers/net/ethernet/nvidia/forcedeth.c 			np->put_rx_ctx->dma_len = skb_tailroom(skb);
skb              1945 drivers/net/ethernet/nvidia/forcedeth.c 		np->rx_skb[i].skb = NULL;
skb              1981 drivers/net/ethernet/nvidia/forcedeth.c 		np->tx_skb[i].skb = NULL;
skb              2021 drivers/net/ethernet/nvidia/forcedeth.c 	if (tx_skb->skb) {
skb              2022 drivers/net/ethernet/nvidia/forcedeth.c 		dev_kfree_skb_any(tx_skb->skb);
skb              2023 drivers/net/ethernet/nvidia/forcedeth.c 		tx_skb->skb = NULL;
skb              2076 drivers/net/ethernet/nvidia/forcedeth.c 		if (np->rx_skb[i].skb) {
skb              2078 drivers/net/ethernet/nvidia/forcedeth.c 					 (skb_end_pointer(np->rx_skb[i].skb) -
skb              2079 drivers/net/ethernet/nvidia/forcedeth.c 					 np->rx_skb[i].skb->data),
skb              2081 drivers/net/ethernet/nvidia/forcedeth.c 			dev_kfree_skb(np->rx_skb[i].skb);
skb              2082 drivers/net/ethernet/nvidia/forcedeth.c 			np->rx_skb[i].skb = NULL;
skb              2210 drivers/net/ethernet/nvidia/forcedeth.c static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb              2215 drivers/net/ethernet/nvidia/forcedeth.c 	unsigned int fragments = skb_shinfo(skb)->nr_frags;
skb              2219 drivers/net/ethernet/nvidia/forcedeth.c 	u32 size = skb_headlen(skb);
skb              2231 drivers/net/ethernet/nvidia/forcedeth.c 		u32 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
skb              2253 drivers/net/ethernet/nvidia/forcedeth.c 						     skb->data + offset, bcnt,
skb              2258 drivers/net/ethernet/nvidia/forcedeth.c 			dev_kfree_skb_any(skb);
skb              2280 drivers/net/ethernet/nvidia/forcedeth.c 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
skb              2303 drivers/net/ethernet/nvidia/forcedeth.c 				dev_kfree_skb_any(skb);
skb              2339 drivers/net/ethernet/nvidia/forcedeth.c 	prev_tx_ctx->skb = skb;
skb              2341 drivers/net/ethernet/nvidia/forcedeth.c 	if (skb_is_gso(skb))
skb              2342 drivers/net/ethernet/nvidia/forcedeth.c 		tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
skb              2344 drivers/net/ethernet/nvidia/forcedeth.c 		tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ?
skb              2352 drivers/net/ethernet/nvidia/forcedeth.c 	netdev_sent_queue(np->dev, skb->len);
skb              2354 drivers/net/ethernet/nvidia/forcedeth.c 	skb_tx_timestamp(skb);
skb              2364 drivers/net/ethernet/nvidia/forcedeth.c static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
skb              2370 drivers/net/ethernet/nvidia/forcedeth.c 	unsigned int fragments = skb_shinfo(skb)->nr_frags;
skb              2374 drivers/net/ethernet/nvidia/forcedeth.c 	u32 size = skb_headlen(skb);
skb              2387 drivers/net/ethernet/nvidia/forcedeth.c 		u32 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
skb              2410 drivers/net/ethernet/nvidia/forcedeth.c 						     skb->data + offset, bcnt,
skb              2415 drivers/net/ethernet/nvidia/forcedeth.c 			dev_kfree_skb_any(skb);
skb              2438 drivers/net/ethernet/nvidia/forcedeth.c 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
skb              2461 drivers/net/ethernet/nvidia/forcedeth.c 				dev_kfree_skb_any(skb);
skb              2497 drivers/net/ethernet/nvidia/forcedeth.c 	prev_tx_ctx->skb = skb;
skb              2499 drivers/net/ethernet/nvidia/forcedeth.c 	if (skb_is_gso(skb))
skb              2500 drivers/net/ethernet/nvidia/forcedeth.c 		tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
skb              2502 drivers/net/ethernet/nvidia/forcedeth.c 		tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ?
skb              2506 drivers/net/ethernet/nvidia/forcedeth.c 	if (skb_vlan_tag_present(skb))
skb              2508 drivers/net/ethernet/nvidia/forcedeth.c 					skb_vlan_tag_get(skb));
skb              2537 drivers/net/ethernet/nvidia/forcedeth.c 	netdev_sent_queue(np->dev, skb->len);
skb              2539 drivers/net/ethernet/nvidia/forcedeth.c 	skb_tx_timestamp(skb);
skb              2597 drivers/net/ethernet/nvidia/forcedeth.c 					len = np->get_tx_ctx->skb->len;
skb              2601 drivers/net/ethernet/nvidia/forcedeth.c 				bytes_compl += np->get_tx_ctx->skb->len;
skb              2602 drivers/net/ethernet/nvidia/forcedeth.c 				dev_kfree_skb_any(np->get_tx_ctx->skb);
skb              2603 drivers/net/ethernet/nvidia/forcedeth.c 				np->get_tx_ctx->skb = NULL;
skb              2617 drivers/net/ethernet/nvidia/forcedeth.c 					len = np->get_tx_ctx->skb->len;
skb              2621 drivers/net/ethernet/nvidia/forcedeth.c 				bytes_compl += np->get_tx_ctx->skb->len;
skb              2622 drivers/net/ethernet/nvidia/forcedeth.c 				dev_kfree_skb_any(np->get_tx_ctx->skb);
skb              2623 drivers/net/ethernet/nvidia/forcedeth.c 				np->get_tx_ctx->skb = NULL;
skb              2670 drivers/net/ethernet/nvidia/forcedeth.c 				len = np->get_tx_ctx->skb->len;
skb              2675 drivers/net/ethernet/nvidia/forcedeth.c 			bytes_cleaned += np->get_tx_ctx->skb->len;
skb              2676 drivers/net/ethernet/nvidia/forcedeth.c 			dev_kfree_skb_any(np->get_tx_ctx->skb);
skb              2677 drivers/net/ethernet/nvidia/forcedeth.c 			np->get_tx_ctx->skb = NULL;
skb              2862 drivers/net/ethernet/nvidia/forcedeth.c 	struct sk_buff *skb;
skb              2877 drivers/net/ethernet/nvidia/forcedeth.c 		skb = np->get_rx_ctx->skb;
skb              2878 drivers/net/ethernet/nvidia/forcedeth.c 		np->get_rx_ctx->skb = NULL;
skb              2886 drivers/net/ethernet/nvidia/forcedeth.c 						len = nv_getlen(dev, skb->data, len);
skb              2888 drivers/net/ethernet/nvidia/forcedeth.c 							dev_kfree_skb(skb);
skb              2900 drivers/net/ethernet/nvidia/forcedeth.c 						dev_kfree_skb(skb);
skb              2905 drivers/net/ethernet/nvidia/forcedeth.c 				dev_kfree_skb(skb);
skb              2913 drivers/net/ethernet/nvidia/forcedeth.c 						len = nv_getlen(dev, skb->data, len);
skb              2915 drivers/net/ethernet/nvidia/forcedeth.c 							dev_kfree_skb(skb);
skb              2926 drivers/net/ethernet/nvidia/forcedeth.c 						dev_kfree_skb(skb);
skb              2932 drivers/net/ethernet/nvidia/forcedeth.c 					skb->ip_summed = CHECKSUM_UNNECESSARY;
skb              2934 drivers/net/ethernet/nvidia/forcedeth.c 				dev_kfree_skb(skb);
skb              2939 drivers/net/ethernet/nvidia/forcedeth.c 		skb_put(skb, len);
skb              2940 drivers/net/ethernet/nvidia/forcedeth.c 		skb->protocol = eth_type_trans(skb, dev);
skb              2941 drivers/net/ethernet/nvidia/forcedeth.c 		napi_gro_receive(&np->napi, skb);
skb              2964 drivers/net/ethernet/nvidia/forcedeth.c 	struct sk_buff *skb;
skb              2979 drivers/net/ethernet/nvidia/forcedeth.c 		skb = np->get_rx_ctx->skb;
skb              2980 drivers/net/ethernet/nvidia/forcedeth.c 		np->get_rx_ctx->skb = NULL;
skb              2987 drivers/net/ethernet/nvidia/forcedeth.c 					len = nv_getlen(dev, skb->data, len);
skb              2989 drivers/net/ethernet/nvidia/forcedeth.c 						dev_kfree_skb(skb);
skb              3000 drivers/net/ethernet/nvidia/forcedeth.c 					dev_kfree_skb(skb);
skb              3007 drivers/net/ethernet/nvidia/forcedeth.c 				skb->ip_summed = CHECKSUM_UNNECESSARY;
skb              3010 drivers/net/ethernet/nvidia/forcedeth.c 			skb_put(skb, len);
skb              3011 drivers/net/ethernet/nvidia/forcedeth.c 			skb->protocol = eth_type_trans(skb, dev);
skb              3012 drivers/net/ethernet/nvidia/forcedeth.c 			prefetch(skb->data);
skb              3025 drivers/net/ethernet/nvidia/forcedeth.c 				__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
skb              3027 drivers/net/ethernet/nvidia/forcedeth.c 			napi_gro_receive(&np->napi, skb);
skb              3033 drivers/net/ethernet/nvidia/forcedeth.c 			dev_kfree_skb(skb);
skb              5193 drivers/net/ethernet/nvidia/forcedeth.c 			rx_skb = np->rx_skb[0].skb;
skb               889 drivers/net/ethernet/nxp/lpc_eth.c 	struct sk_buff *skb;
skb               927 drivers/net/ethernet/nxp/lpc_eth.c 			skb = dev_alloc_skb(len);
skb               928 drivers/net/ethernet/nxp/lpc_eth.c 			if (!skb) {
skb               932 drivers/net/ethernet/nxp/lpc_eth.c 				skb_put_data(skb,
skb               937 drivers/net/ethernet/nxp/lpc_eth.c 				skb->protocol = eth_type_trans(skb, ndev);
skb               938 drivers/net/ethernet/nxp/lpc_eth.c 				netif_receive_skb(skb);
skb              1024 drivers/net/ethernet/nxp/lpc_eth.c static int lpc_eth_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
skb              1031 drivers/net/ethernet/nxp/lpc_eth.c 	len = skb->len;
skb              1055 drivers/net/ethernet/nxp/lpc_eth.c 	memcpy(pldat->tx_buff_v + txidx * ENET_MAXF_SIZE, skb->data, len);
skb              1073 drivers/net/ethernet/nxp/lpc_eth.c 	dev_kfree_skb(skb);
skb               424 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h 	struct sk_buff *skb;
skb               106 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c static int pch_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid)
skb               108 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 	u8 *data = skb->data;
skb               113 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 	if (ptp_classify_raw(skb) == PTP_CLASS_NONE)
skb               118 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 	if (skb->len < offset + OFF_PTP_SEQUENCE_ID + sizeof(seqid))
skb               132 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c pch_rx_timestamp(struct pch_gbe_adapter *adapter, struct sk_buff *skb)
skb               157 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 	if (!pch_ptp_match(skb, htons(uid), htonl(lo), htons(seq)))
skb               162 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 	shhwtstamps = skb_hwtstamps(skb);
skb               170 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c pch_tx_timestamp(struct pch_gbe_adapter *adapter, struct sk_buff *skb)
skb               178 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 	shtx = skb_shinfo(skb);
skb               205 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 	skb_tstamp_tx(skb, &shhwtstamps);
skb               894 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 	if (buffer_info->skb) {
skb               895 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 		dev_kfree_skb_any(buffer_info->skb);
skb               896 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 		buffer_info->skb = NULL;
skb               914 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 	if (buffer_info->skb) {
skb               915 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 		dev_kfree_skb_any(buffer_info->skb);
skb               916 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 		buffer_info->skb = NULL;
skb              1094 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 			      struct sk_buff *skb)
skb              1105 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 	if (unlikely(skb->len < PCH_GBE_SHORT_PKT))
skb              1107 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 	if (skb->ip_summed == CHECKSUM_NONE)
skb              1115 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 	if (skb->len < PCH_GBE_SHORT_PKT && skb->ip_summed != CHECKSUM_NONE) {
skb              1118 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 		if (skb->protocol == htons(ETH_P_IP)) {
skb              1119 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 			struct iphdr *iph = ip_hdr(skb);
skb              1121 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 			offset = skb_transport_offset(skb);
skb              1123 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 				skb->csum = 0;
skb              1124 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 				tcp_hdr(skb)->check = 0;
skb              1125 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 				skb->csum = skb_checksum(skb, offset,
skb              1126 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 							 skb->len - offset, 0);
skb              1127 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 				tcp_hdr(skb)->check =
skb              1130 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 							  skb->len - offset,
skb              1132 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 							  skb->csum);
skb              1134 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 				skb->csum = 0;
skb              1135 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 				udp_hdr(skb)->check = 0;
skb              1136 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 				skb->csum =
skb              1137 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 					skb_checksum(skb, offset,
skb              1138 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 						     skb->len - offset, 0);
skb              1139 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 				udp_hdr(skb)->check =
skb              1142 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 							  skb->len - offset,
skb              1144 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 							  skb->csum);
skb              1157 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 	tmp_skb = buffer_info->skb;
skb              1160 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 	memcpy(tmp_skb->data, skb->data, ETH_HLEN);
skb              1163 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 	tmp_skb->len = skb->len;
skb              1164 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 	memcpy(&tmp_skb->data[ETH_HLEN + 2], &skb->data[ETH_HLEN],
skb              1165 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 	       (skb->len - ETH_HLEN));
skb              1197 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 	pch_tx_timestamp(adapter, skb);
skb              1199 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 	dev_kfree_skb_any(skb);
skb              1361 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 	struct sk_buff *skb;
skb              1370 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 		skb = netdev_alloc_skb(netdev, bufsz);
skb              1371 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 		if (unlikely(!skb)) {
skb              1377 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 		skb_reserve(skb, NET_IP_ALIGN);
skb              1378 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 		buffer_info->skb = skb;
skb              1385 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 			dev_kfree_skb(skb);
skb              1386 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 			buffer_info->skb = NULL;
skb              1452 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 	struct sk_buff *skb;
skb              1462 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 		skb = netdev_alloc_skb(adapter->netdev, bufsz);
skb              1463 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 		skb_reserve(skb, PCH_GBE_DMA_ALIGN);
skb              1464 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 		buffer_info->skb = skb;
skb              1485 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 	struct sk_buff *skb;
skb              1532 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 		skb = buffer_info->skb;
skb              1553 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 			adapter->stats.tx_bytes += skb->len;
skb              1558 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 			adapter->stats.tx_bytes += skb->len;
skb              1567 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 		if (buffer_info->skb) {
skb              1570 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 			skb_trim(buffer_info->skb, 0);
skb              1628 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 	struct sk_buff *skb;
skb              1648 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 		skb = buffer_info->skb;
skb              1649 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 		buffer_info->skb = NULL;
skb              1682 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 			memcpy(skb->data, buffer_info->rx_buffer, length);
skb              1690 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 			skb_put(skb, length);
skb              1692 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 			pch_rx_timestamp(adapter, skb);
skb              1694 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 			skb->protocol = eth_type_trans(skb, netdev);
skb              1696 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 				skb->ip_summed = CHECKSUM_UNNECESSARY;
skb              1698 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 				skb->ip_summed = CHECKSUM_NONE;
skb              1700 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 			napi_gro_receive(&adapter->napi, skb);
skb              1704 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 				   skb->ip_summed, length);
skb              2067 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c static int pch_gbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
skb              2081 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 	pch_gbe_tx_queue(adapter, tx_ring, skb);
skb               553 drivers/net/ethernet/packetengines/hamachi.c static netdev_tx_t hamachi_start_xmit(struct sk_buff *skb,
skb               997 drivers/net/ethernet/packetengines/hamachi.c 		struct sk_buff *skb;
skb              1002 drivers/net/ethernet/packetengines/hamachi.c 		skb = hmp->tx_skbuff[entry];
skb              1003 drivers/net/ethernet/packetengines/hamachi.c 		if (skb) {
skb              1006 drivers/net/ethernet/packetengines/hamachi.c 				skb->len, PCI_DMA_TODEVICE);
skb              1007 drivers/net/ethernet/packetengines/hamachi.c 			dev_kfree_skb(skb);
skb              1085 drivers/net/ethernet/packetengines/hamachi.c 		struct sk_buff *skb;
skb              1094 drivers/net/ethernet/packetengines/hamachi.c 		skb = hmp->tx_skbuff[i];
skb              1095 drivers/net/ethernet/packetengines/hamachi.c 		if (skb){
skb              1097 drivers/net/ethernet/packetengines/hamachi.c 				skb->len, PCI_DMA_TODEVICE);
skb              1098 drivers/net/ethernet/packetengines/hamachi.c 			dev_kfree_skb(skb);
skb              1115 drivers/net/ethernet/packetengines/hamachi.c 		struct sk_buff *skb = hmp->rx_skbuff[i];
skb              1117 drivers/net/ethernet/packetengines/hamachi.c 		if (skb){
skb              1121 drivers/net/ethernet/packetengines/hamachi.c 			dev_kfree_skb(skb);
skb              1127 drivers/net/ethernet/packetengines/hamachi.c 		struct sk_buff *skb;
skb              1129 drivers/net/ethernet/packetengines/hamachi.c 		skb = netdev_alloc_skb_ip_align(dev, hmp->rx_buf_sz);
skb              1130 drivers/net/ethernet/packetengines/hamachi.c 		hmp->rx_skbuff[i] = skb;
skb              1131 drivers/net/ethernet/packetengines/hamachi.c 		if (skb == NULL)
skb              1135 drivers/net/ethernet/packetengines/hamachi.c 			skb->data, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE));
skb              1181 drivers/net/ethernet/packetengines/hamachi.c 		struct sk_buff *skb = netdev_alloc_skb(dev, hmp->rx_buf_sz + 2);
skb              1182 drivers/net/ethernet/packetengines/hamachi.c 		hmp->rx_skbuff[i] = skb;
skb              1183 drivers/net/ethernet/packetengines/hamachi.c 		if (skb == NULL)
skb              1185 drivers/net/ethernet/packetengines/hamachi.c 		skb_reserve(skb, 2); /* 16 byte align the IP header. */
skb              1187 drivers/net/ethernet/packetengines/hamachi.c 			skb->data, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE));
skb              1204 drivers/net/ethernet/packetengines/hamachi.c static netdev_tx_t hamachi_start_xmit(struct sk_buff *skb,
skb              1234 drivers/net/ethernet/packetengines/hamachi.c 	hmp->tx_skbuff[entry] = skb;
skb              1237 drivers/net/ethernet/packetengines/hamachi.c 		skb->data, skb->len, PCI_DMA_TODEVICE));
skb              1250 drivers/net/ethernet/packetengines/hamachi.c 			DescEndPacket | DescEndRing | DescIntr | skb->len);
skb              1253 drivers/net/ethernet/packetengines/hamachi.c 			DescEndPacket | DescIntr | skb->len);
skb              1329 drivers/net/ethernet/packetengines/hamachi.c 					struct sk_buff *skb;
skb              1333 drivers/net/ethernet/packetengines/hamachi.c 					skb = hmp->tx_skbuff[entry];
skb              1335 drivers/net/ethernet/packetengines/hamachi.c 					if (skb){
skb              1338 drivers/net/ethernet/packetengines/hamachi.c 							skb->len,
skb              1340 drivers/net/ethernet/packetengines/hamachi.c 						dev_consume_skb_irq(skb);
skb              1455 drivers/net/ethernet/packetengines/hamachi.c 			struct sk_buff *skb;
skb              1480 drivers/net/ethernet/packetengines/hamachi.c 			    (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
skb              1485 drivers/net/ethernet/packetengines/hamachi.c 				skb_reserve(skb, 2);	/* 16 byte align the IP header */
skb              1492 drivers/net/ethernet/packetengines/hamachi.c 				skb_copy_to_linear_data(skb,
skb              1494 drivers/net/ethernet/packetengines/hamachi.c 				skb_put(skb, pkt_len);
skb              1496 drivers/net/ethernet/packetengines/hamachi.c 				skb_put_data(skb, hmp->rx_ring_dma
skb              1507 drivers/net/ethernet/packetengines/hamachi.c 				skb_put(skb = hmp->rx_skbuff[entry], pkt_len);
skb              1510 drivers/net/ethernet/packetengines/hamachi.c 			skb->protocol = eth_type_trans(skb, dev);
skb              1516 drivers/net/ethernet/packetengines/hamachi.c 				struct iphdr *ih = (struct iphdr *) skb->data;
skb              1555 drivers/net/ethernet/packetengines/hamachi.c 						skb->csum = ntohs(pfck & 0xffff);
skb              1556 drivers/net/ethernet/packetengines/hamachi.c 						if (skb->csum > crc)
skb              1557 drivers/net/ethernet/packetengines/hamachi.c 							skb->csum -= crc;
skb              1559 drivers/net/ethernet/packetengines/hamachi.c 							skb->csum += (~crc & 0xffff);
skb              1564 drivers/net/ethernet/packetengines/hamachi.c 						skb->ip_summed = CHECKSUM_COMPLETE;
skb              1570 drivers/net/ethernet/packetengines/hamachi.c 			netif_rx(skb);
skb              1583 drivers/net/ethernet/packetengines/hamachi.c 			struct sk_buff *skb = netdev_alloc_skb(dev, hmp->rx_buf_sz + 2);
skb              1585 drivers/net/ethernet/packetengines/hamachi.c 			hmp->rx_skbuff[entry] = skb;
skb              1586 drivers/net/ethernet/packetengines/hamachi.c 			if (skb == NULL)
skb              1588 drivers/net/ethernet/packetengines/hamachi.c 			skb_reserve(skb, 2);	/* Align IP on 16 byte boundaries */
skb              1590 drivers/net/ethernet/packetengines/hamachi.c 				skb->data, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE));
skb              1649 drivers/net/ethernet/packetengines/hamachi.c 	struct sk_buff *skb;
skb              1704 drivers/net/ethernet/packetengines/hamachi.c 		skb = hmp->rx_skbuff[i];
skb              1706 drivers/net/ethernet/packetengines/hamachi.c 		if (skb) {
skb              1710 drivers/net/ethernet/packetengines/hamachi.c 			dev_kfree_skb(skb);
skb              1716 drivers/net/ethernet/packetengines/hamachi.c 		skb = hmp->tx_skbuff[i];
skb              1717 drivers/net/ethernet/packetengines/hamachi.c 		if (skb) {
skb              1720 drivers/net/ethernet/packetengines/hamachi.c 				skb->len, PCI_DMA_TODEVICE);
skb              1721 drivers/net/ethernet/packetengines/hamachi.c 			dev_kfree_skb(skb);
skb               349 drivers/net/ethernet/packetengines/yellowfin.c static netdev_tx_t yellowfin_start_xmit(struct sk_buff *skb,
skb               738 drivers/net/ethernet/packetengines/yellowfin.c 		struct sk_buff *skb = netdev_alloc_skb(dev, yp->rx_buf_sz + 2);
skb               739 drivers/net/ethernet/packetengines/yellowfin.c 		yp->rx_skbuff[i] = skb;
skb               740 drivers/net/ethernet/packetengines/yellowfin.c 		if (skb == NULL)
skb               742 drivers/net/ethernet/packetengines/yellowfin.c 		skb_reserve(skb, 2);	/* 16 byte align the IP header. */
skb               744 drivers/net/ethernet/packetengines/yellowfin.c 			skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE));
skb               804 drivers/net/ethernet/packetengines/yellowfin.c static netdev_tx_t yellowfin_start_xmit(struct sk_buff *skb,
skb               809 drivers/net/ethernet/packetengines/yellowfin.c 	int len = skb->len;
skb               820 drivers/net/ethernet/packetengines/yellowfin.c 		int cacheline_end = ((unsigned long)skb->data + skb->len) % 32;
skb               823 drivers/net/ethernet/packetengines/yellowfin.c 			len = skb->len + 32 - cacheline_end + 1;
skb               824 drivers/net/ethernet/packetengines/yellowfin.c 			if (skb_padto(skb, len)) {
skb               831 drivers/net/ethernet/packetengines/yellowfin.c 	yp->tx_skbuff[entry] = skb;
skb               835 drivers/net/ethernet/packetengines/yellowfin.c 		skb->data, len, PCI_DMA_TODEVICE));
skb               851 drivers/net/ethernet/packetengines/yellowfin.c 		skb->data, len, PCI_DMA_TODEVICE));
skb               918 drivers/net/ethernet/packetengines/yellowfin.c 			struct sk_buff *skb;
skb               922 drivers/net/ethernet/packetengines/yellowfin.c 			skb = yp->tx_skbuff[entry];
skb               924 drivers/net/ethernet/packetengines/yellowfin.c 			dev->stats.tx_bytes += skb->len;
skb               927 drivers/net/ethernet/packetengines/yellowfin.c 				skb->len, PCI_DMA_TODEVICE);
skb               928 drivers/net/ethernet/packetengines/yellowfin.c 			dev_consume_skb_irq(skb);
skb               946 drivers/net/ethernet/packetengines/yellowfin.c 				struct sk_buff *skb;
skb               959 drivers/net/ethernet/packetengines/yellowfin.c 				skb = yp->tx_skbuff[entry];
skb               978 drivers/net/ethernet/packetengines/yellowfin.c 					dev->stats.tx_bytes += skb->len;
skb               984 drivers/net/ethernet/packetengines/yellowfin.c 					yp->tx_ring[entry<<1].addr, skb->len,
skb               986 drivers/net/ethernet/packetengines/yellowfin.c 				dev_consume_skb_irq(skb);
skb              1110 drivers/net/ethernet/packetengines/yellowfin.c 			struct sk_buff *skb;
skb              1123 drivers/net/ethernet/packetengines/yellowfin.c 				skb_put(skb = rx_skb, pkt_len);
skb              1130 drivers/net/ethernet/packetengines/yellowfin.c 				skb = netdev_alloc_skb(dev, pkt_len + 2);
skb              1131 drivers/net/ethernet/packetengines/yellowfin.c 				if (skb == NULL)
skb              1133 drivers/net/ethernet/packetengines/yellowfin.c 				skb_reserve(skb, 2);	/* 16 byte align the IP header */
skb              1134 drivers/net/ethernet/packetengines/yellowfin.c 				skb_copy_to_linear_data(skb, rx_skb->data, pkt_len);
skb              1135 drivers/net/ethernet/packetengines/yellowfin.c 				skb_put(skb, pkt_len);
skb              1141 drivers/net/ethernet/packetengines/yellowfin.c 			skb->protocol = eth_type_trans(skb, dev);
skb              1142 drivers/net/ethernet/packetengines/yellowfin.c 			netif_rx(skb);
skb              1153 drivers/net/ethernet/packetengines/yellowfin.c 			struct sk_buff *skb = netdev_alloc_skb(dev, yp->rx_buf_sz + 2);
skb              1154 drivers/net/ethernet/packetengines/yellowfin.c 			if (skb == NULL)
skb              1156 drivers/net/ethernet/packetengines/yellowfin.c 			yp->rx_skbuff[entry] = skb;
skb              1157 drivers/net/ethernet/packetengines/yellowfin.c 			skb_reserve(skb, 2);	/* Align IP on 16 byte boundaries */
skb              1159 drivers/net/ethernet/packetengines/yellowfin.c 				skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE));
skb               114 drivers/net/ethernet/pasemi/pasemi_mac.c static inline void prefetch_skb(const struct sk_buff *skb)
skb               116 drivers/net/ethernet/pasemi/pasemi_mac.c 	const void *d = skb;
skb               244 drivers/net/ethernet/pasemi/pasemi_mac.c 				    struct sk_buff *skb,
skb               250 drivers/net/ethernet/pasemi/pasemi_mac.c 	pci_unmap_single(pdev, dmas[0], skb_headlen(skb), PCI_DMA_TODEVICE);
skb               253 drivers/net/ethernet/pasemi/pasemi_mac.c 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
skb               257 drivers/net/ethernet/pasemi/pasemi_mac.c 	dev_kfree_skb_irq(skb);
skb               525 drivers/net/ethernet/pasemi/pasemi_mac.c 		if (info->dma && info->skb) {
skb               526 drivers/net/ethernet/pasemi/pasemi_mac.c 			nfrags = skb_shinfo(info->skb)->nr_frags;
skb               531 drivers/net/ethernet/pasemi/pasemi_mac.c 							info->skb, dmas);
skb               550 drivers/net/ethernet/pasemi/pasemi_mac.c 		if (info->skb && info->dma) {
skb               553 drivers/net/ethernet/pasemi/pasemi_mac.c 					 info->skb->len,
skb               555 drivers/net/ethernet/pasemi/pasemi_mac.c 			dev_kfree_skb_any(info->skb);
skb               558 drivers/net/ethernet/pasemi/pasemi_mac.c 		info->skb = NULL;
skb               591 drivers/net/ethernet/pasemi/pasemi_mac.c 		struct sk_buff *skb;
skb               597 drivers/net/ethernet/pasemi/pasemi_mac.c 		skb = netdev_alloc_skb(dev, mac->bufsz);
skb               598 drivers/net/ethernet/pasemi/pasemi_mac.c 		skb_reserve(skb, LOCAL_SKB_ALIGN);
skb               600 drivers/net/ethernet/pasemi/pasemi_mac.c 		if (unlikely(!skb))
skb               603 drivers/net/ethernet/pasemi/pasemi_mac.c 		dma = pci_map_single(mac->dma_pdev, skb->data,
skb               608 drivers/net/ethernet/pasemi/pasemi_mac.c 			dev_kfree_skb_irq(info->skb);
skb               612 drivers/net/ethernet/pasemi/pasemi_mac.c 		info->skb = skb;
skb               702 drivers/net/ethernet/pasemi/pasemi_mac.c 	struct sk_buff *skb;
skb               738 drivers/net/ethernet/pasemi/pasemi_mac.c 		skb = info->skb;
skb               740 drivers/net/ethernet/pasemi/pasemi_mac.c 		prefetch_skb(skb);
skb               755 drivers/net/ethernet/pasemi/pasemi_mac.c 		info->skb = NULL;
skb               759 drivers/net/ethernet/pasemi/pasemi_mac.c 			skb->ip_summed = CHECKSUM_UNNECESSARY;
skb               760 drivers/net/ethernet/pasemi/pasemi_mac.c 			skb->csum = (macrx & XCT_MACRX_CSUM_M) >>
skb               763 drivers/net/ethernet/pasemi/pasemi_mac.c 			skb_checksum_none_assert(skb);
skb               770 drivers/net/ethernet/pasemi/pasemi_mac.c 		skb_put(skb, len-4);
skb               772 drivers/net/ethernet/pasemi/pasemi_mac.c 		skb->protocol = eth_type_trans(skb, mac->netdev);
skb               773 drivers/net/ethernet/pasemi/pasemi_mac.c 		napi_gro_receive(&mac->napi, skb);
skb               836 drivers/net/ethernet/pasemi/pasemi_mac.c 	prefetch(&TX_DESC_INFO(txring, start+1).skb);
skb               849 drivers/net/ethernet/pasemi/pasemi_mac.c 		struct sk_buff *skb;
skb               863 drivers/net/ethernet/pasemi/pasemi_mac.c 		skb = TX_DESC_INFO(txring, i+1).skb;
skb               880 drivers/net/ethernet/pasemi/pasemi_mac.c 		skbs[descr_count] = skb;
skb              1336 drivers/net/ethernet/pasemi/pasemi_mac.c static void pasemi_mac_queue_csdesc(const struct sk_buff *skb,
skb              1344 drivers/net/ethernet/pasemi/pasemi_mac.c 	const int nh_off = skb_network_offset(skb);
skb              1345 drivers/net/ethernet/pasemi/pasemi_mac.c 	const int nh_len = skb_network_header_len(skb);
skb              1346 drivers/net/ethernet/pasemi/pasemi_mac.c 	const int nfrags = skb_shinfo(skb)->nr_frags;
skb              1352 drivers/net/ethernet/pasemi/pasemi_mac.c 	       XCT_FUN_CRM_SIG | XCT_FUN_LLEN(skb->len - nh_off) |
skb              1355 drivers/net/ethernet/pasemi/pasemi_mac.c 	switch (ip_hdr(skb)->protocol) {
skb              1359 drivers/net/ethernet/pasemi/pasemi_mac.c 		cs_dest = map[0] + skb_transport_offset(skb) + 16;
skb              1364 drivers/net/ethernet/pasemi/pasemi_mac.c 		cs_dest = map[0] + skb_transport_offset(skb) + 6;
skb              1422 drivers/net/ethernet/pasemi/pasemi_mac.c static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev)
skb              1434 drivers/net/ethernet/pasemi/pasemi_mac.c 	const int nh_off = skb_network_offset(skb);
skb              1435 drivers/net/ethernet/pasemi/pasemi_mac.c 	const int nh_len = skb_network_header_len(skb);
skb              1441 drivers/net/ethernet/pasemi/pasemi_mac.c 	nfrags = skb_shinfo(skb)->nr_frags;
skb              1443 drivers/net/ethernet/pasemi/pasemi_mac.c 	map[0] = pci_map_single(mac->dma_pdev, skb->data, skb_headlen(skb),
skb              1445 drivers/net/ethernet/pasemi/pasemi_mac.c 	map_size[0] = skb_headlen(skb);
skb              1450 drivers/net/ethernet/pasemi/pasemi_mac.c 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
skb              1461 drivers/net/ethernet/pasemi/pasemi_mac.c 	if (skb->ip_summed == CHECKSUM_PARTIAL && skb->len <= 1540) {
skb              1462 drivers/net/ethernet/pasemi/pasemi_mac.c 		switch (ip_hdr(skb)->protocol) {
skb              1478 drivers/net/ethernet/pasemi/pasemi_mac.c 	mactx = dflags | XCT_MACTX_LLEN(skb->len);
skb              1493 drivers/net/ethernet/pasemi/pasemi_mac.c 	if (mac->num_cs && skb->ip_summed == CHECKSUM_PARTIAL && skb->len > 1540) {
skb              1497 drivers/net/ethernet/pasemi/pasemi_mac.c 		pasemi_mac_queue_csdesc(skb, map, map_size, txring, csring);
skb              1504 drivers/net/ethernet/pasemi/pasemi_mac.c 	TX_DESC_INFO(txring, fill).skb = skb;
skb              1521 drivers/net/ethernet/pasemi/pasemi_mac.c 	dev->stats.tx_bytes += skb->len;
skb                90 drivers/net/ethernet/pasemi/pasemi_mac.h 	struct sk_buff *skb;
skb                38 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 			     struct sk_buff *skb)
skb                46 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 	ionic_rxq_post(q, true, ionic_rx_clean, skb);
skb                50 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 			       struct ionic_cq_info *cq_info, struct sk_buff **skb)
skb                77 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 	memcpy(new_skb->data, (*skb)->data, clen);
skb                79 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 	ionic_rx_recycle(q, desc_info, *skb);
skb                80 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 	*skb = new_skb;
skb                90 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 	struct sk_buff *skb = cb_arg;
skb                98 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 		ionic_rx_recycle(q, desc_info, skb);
skb               104 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 		ionic_rx_recycle(q, desc_info, skb);
skb               111 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 	ionic_rx_copybreak(q, desc_info, cq_info, &skb);
skb               113 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 	skb_put(skb, le16_to_cpu(comp->len));
skb               114 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 	skb->protocol = eth_type_trans(skb, netdev);
skb               116 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 	skb_record_rx_queue(skb, q->index);
skb               122 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 			skb_set_hash(skb, le32_to_cpu(comp->rss_hash),
skb               129 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 			skb_set_hash(skb, le32_to_cpu(comp->rss_hash),
skb               137 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 			skb->ip_summed = CHECKSUM_COMPLETE;
skb               138 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 			skb->csum = (__wsum)le16_to_cpu(comp->csum);
skb               152 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
skb               156 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 	napi_gro_receive(&qcq->napi, skb);
skb               222 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 	struct sk_buff *skb;
skb               228 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 	skb = netdev_alloc_skb_ip_align(netdev, len);
skb               229 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 	if (!skb) {
skb               236 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 	*dma_addr = dma_map_single(dev, skb->data, len, DMA_FROM_DEVICE);
skb               238 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 		dev_kfree_skb(skb);
skb               245 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 	return skb;
skb               254 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 	struct sk_buff *skb;
skb               263 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 		skb = ionic_rx_skb_alloc(q, len, &dma_addr);
skb               264 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 		if (!skb)
skb               275 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 		ionic_rxq_post(q, ring_doorbell, ionic_rx_clean, skb);
skb               401 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 		struct sk_buff *skb = cb_arg;
skb               402 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 		u32 len = skb->len;
skb               404 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 		queue_index = skb_get_queue_mapping(skb);
skb               410 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 		dev_kfree_skb_any(skb);
skb               455 drivers/net/ethernet/pensando/ionic/ionic_txrx.c static int ionic_tx_tcp_inner_pseudo_csum(struct sk_buff *skb)
skb               459 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 	err = skb_cow_head(skb, 0);
skb               463 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 	if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
skb               464 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 		inner_ip_hdr(skb)->check = 0;
skb               465 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 		inner_tcp_hdr(skb)->check =
skb               466 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 			~csum_tcpudp_magic(inner_ip_hdr(skb)->saddr,
skb               467 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 					   inner_ip_hdr(skb)->daddr,
skb               469 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 	} else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) {
skb               470 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 		inner_tcp_hdr(skb)->check =
skb               471 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 			~csum_ipv6_magic(&inner_ipv6_hdr(skb)->saddr,
skb               472 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 					 &inner_ipv6_hdr(skb)->daddr,
skb               479 drivers/net/ethernet/pensando/ionic/ionic_txrx.c static int ionic_tx_tcp_pseudo_csum(struct sk_buff *skb)
skb               483 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 	err = skb_cow_head(skb, 0);
skb               487 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 	if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
skb               488 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 		ip_hdr(skb)->check = 0;
skb               489 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 		tcp_hdr(skb)->check =
skb               490 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 			~csum_tcpudp_magic(ip_hdr(skb)->saddr,
skb               491 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 					   ip_hdr(skb)->daddr,
skb               493 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 	} else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) {
skb               494 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 		tcp_hdr(skb)->check =
skb               495 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 			~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
skb               496 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 					 &ipv6_hdr(skb)->daddr,
skb               504 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 			      struct sk_buff *skb,
skb               527 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 		skb_tx_timestamp(skb);
skb               528 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 		netdev_tx_sent_queue(q_to_ndq(q), skb->len);
skb               529 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 		ionic_txq_post(q, !netdev_xmit_more(), ionic_tx_clean, skb);
skb               545 drivers/net/ethernet/pensando/ionic/ionic_txrx.c static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
skb               575 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 	mss = skb_shinfo(skb)->gso_size;
skb               576 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 	nfrags = skb_shinfo(skb)->nr_frags;
skb               577 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 	len_left = skb->len - skb_headlen(skb);
skb               578 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 	outer_csum = (skb_shinfo(skb)->gso_type & SKB_GSO_GRE_CSUM) ||
skb               579 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 		     (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM);
skb               580 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 	has_vlan = !!skb_vlan_tag_present(skb);
skb               581 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 	vlan_tci = skb_vlan_tag_get(skb);
skb               582 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 	encap = skb->encapsulation;
skb               590 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 		err = ionic_tx_tcp_inner_pseudo_csum(skb);
skb               592 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 		err = ionic_tx_tcp_pseudo_csum(skb);
skb               597 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 		hdrlen = skb_inner_transport_header(skb) - skb->data +
skb               598 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 			 inner_tcp_hdrlen(skb);
skb               600 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 		hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
skb               603 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 	left = skb_headlen(skb);
skb               613 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 		desc_addr = ionic_tx_map_single(q, skb->data + offset, len);
skb               623 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 		ionic_tx_tso_post(q, desc, skb,
skb               638 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 	for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
skb               662 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 				ionic_tx_tso_post(q, desc, skb, desc_addr,
skb               685 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 				ionic_tx_tso_post(q, desc, skb, desc_addr,
skb               714 drivers/net/ethernet/pensando/ionic/ionic_txrx.c static int ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb)
skb               725 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 	has_vlan = !!skb_vlan_tag_present(skb);
skb               726 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 	encap = skb->encapsulation;
skb               728 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 	dma_addr = ionic_tx_map_single(q, skb->data, skb_headlen(skb));
skb               736 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 				  flags, skb_shinfo(skb)->nr_frags, dma_addr);
skb               738 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 	desc->len = cpu_to_le16(skb_headlen(skb));
skb               739 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 	desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
skb               740 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 	desc->csum_start = cpu_to_le16(skb_checksum_start_offset(skb));
skb               741 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 	desc->csum_offset = cpu_to_le16(skb->csum_offset);
skb               743 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 	if (skb->csum_not_inet)
skb               751 drivers/net/ethernet/pensando/ionic/ionic_txrx.c static int ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb)
skb               762 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 	has_vlan = !!skb_vlan_tag_present(skb);
skb               763 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 	encap = skb->encapsulation;
skb               765 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 	dma_addr = ionic_tx_map_single(q, skb->data, skb_headlen(skb));
skb               773 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 				  flags, skb_shinfo(skb)->nr_frags, dma_addr);
skb               775 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 	desc->len = cpu_to_le16(skb_headlen(skb));
skb               776 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 	desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
skb               783 drivers/net/ethernet/pensando/ionic/ionic_txrx.c static int ionic_tx_skb_frags(struct ionic_queue *q, struct sk_buff *skb)
skb               786 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 	unsigned int len_left = skb->len - skb_headlen(skb);
skb               794 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 	for (frag = skb_shinfo(skb)->frags; len_left; frag++, elem++) {
skb               808 drivers/net/ethernet/pensando/ionic/ionic_txrx.c static int ionic_tx(struct ionic_queue *q, struct sk_buff *skb)
skb               814 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 	if (skb->ip_summed == CHECKSUM_PARTIAL)
skb               815 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 		err = ionic_tx_calc_csum(q, skb);
skb               817 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 		err = ionic_tx_calc_no_csum(q, skb);
skb               822 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 	err = ionic_tx_skb_frags(q, skb);
skb               826 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 	skb_tx_timestamp(skb);
skb               828 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 	stats->bytes += skb->len;
skb               830 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 	netdev_tx_sent_queue(q_to_ndq(q), skb->len);
skb               831 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 	ionic_txq_post(q, !netdev_xmit_more(), ionic_tx_clean, skb);
skb               836 drivers/net/ethernet/pensando/ionic/ionic_txrx.c static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb)
skb               842 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 	if (skb_is_gso(skb))
skb               843 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 		return (skb->len / skb_shinfo(skb)->gso_size) + 1;
skb               846 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 	if (skb_shinfo(skb)->nr_frags <= IONIC_TX_MAX_SG_ELEMS)
skb               850 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 	err = skb_linearize(skb);
skb               880 drivers/net/ethernet/pensando/ionic/ionic_txrx.c netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev)
skb               882 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 	u16 queue_index = skb_get_queue_mapping(skb);
skb               889 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 		dev_kfree_skb(skb);
skb               897 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 	ndescs = ionic_tx_descs_needed(q, skb);
skb               904 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 	if (skb_is_gso(skb))
skb               905 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 		err = ionic_tx_tso(q, skb);
skb               907 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 		err = ionic_tx(q, skb);
skb               923 drivers/net/ethernet/pensando/ionic/ionic_txrx.c 	dev_kfree_skb(skb);
skb                13 drivers/net/ethernet/pensando/ionic/ionic_txrx.h netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev);
skb               544 drivers/net/ethernet/qlogic/netxen/netxen_nic.h 	struct sk_buff *skb;
skb               552 drivers/net/ethernet/qlogic/netxen/netxen_nic.h 	struct sk_buff *skb;
skb               581 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c 		pbuf->skb = NULL;
skb               109 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 			if (rx_buf->skb != NULL)
skb               110 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 				dev_kfree_skb_any(rx_buf->skb);
skb               140 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 		if (cmd_buf->skb) {
skb               141 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 			dev_kfree_skb_any(cmd_buf->skb);
skb               142 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 			cmd_buf->skb = NULL;
skb              1457 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 	struct sk_buff *skb;
skb              1461 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 	buffer->skb = netdev_alloc_skb(adapter->netdev, rds_ring->skb_size);
skb              1462 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 	if (!buffer->skb)
skb              1465 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 	skb = buffer->skb;
skb              1468 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 		skb_reserve(skb, 2);
skb              1470 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 	dma = pci_map_single(pdev, skb->data,
skb              1474 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 		dev_kfree_skb_any(skb);
skb              1475 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 		buffer->skb = NULL;
skb              1479 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 	buffer->skb = skb;
skb              1490 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 	struct sk_buff *skb;
skb              1497 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 	skb = buffer->skb;
skb              1498 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 	if (!skb)
skb              1504 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 		skb->ip_summed = CHECKSUM_UNNECESSARY;
skb              1506 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 		skb->ip_summed = CHECKSUM_NONE;
skb              1508 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 	buffer->skb = NULL;
skb              1511 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 	return skb;
skb              1522 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 	struct sk_buff *skb;
skb              1541 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 	skb = netxen_process_rxbuf(adapter, rds_ring, index, cksum);
skb              1542 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 	if (!skb)
skb              1546 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 		skb_put(skb, rds_ring->skb_size);
skb              1548 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 		skb_put(skb, length);
skb              1552 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 		skb_pull(skb, pkt_offset);
skb              1554 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 	skb->protocol = eth_type_trans(skb, netdev);
skb              1556 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 	napi_gro_receive(&sds_ring->napi, skb);
skb              1576 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 	struct sk_buff *skb;
skb              1605 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 	skb = netxen_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK);
skb              1606 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 	if (!skb)
skb              1614 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 	skb_put(skb, lro_length + data_offset);
skb              1616 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 	skb_pull(skb, l2_hdr_offset);
skb              1617 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 	skb->protocol = eth_type_trans(skb, netdev);
skb              1619 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 	if (skb->protocol == htons(ETH_P_8021Q))
skb              1621 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 	iph = (struct iphdr *)(skb->data + vhdr_len);
skb              1622 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 	th = (struct tcphdr *)((skb->data + vhdr_len) + (iph->ihl << 2));
skb              1630 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 	length = skb->len;
skb              1633 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 		skb_shinfo(skb)->gso_size  =  netxen_get_lro_sts_mss(sts_data1);
skb              1635 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 	netif_receive_skb(skb);
skb              1755 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 		if (buffer->skb) {
skb              1768 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 			dev_kfree_skb_any(buffer->skb);
skb              1769 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 			buffer->skb = NULL;
skb              1824 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 		if (!buffer->skb) {
skb              1884 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 		if (!buffer->skb) {
skb              1859 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 		struct sk_buff *skb)
skb              1862 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 	__be16 protocol = skb->protocol;
skb              1871 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 		vh = (struct vlan_ethhdr *)skb->data;
skb              1875 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 	} else if (skb_vlan_tag_present(skb)) {
skb              1877 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 		vid = skb_vlan_tag_get(skb);
skb              1883 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 			skb_shinfo(skb)->gso_size > 0) {
skb              1885 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
skb              1887 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 		first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
skb              1901 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb              1905 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 			l4proto = ip_hdr(skb)->protocol;
skb              1912 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 			l4proto = ipv6_hdr(skb)->nexthdr;
skb              1921 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 	first_desc->tcp_hdr_offset += skb_transport_offset(skb);
skb              1922 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 	first_desc->ip_hdr_offset += skb_network_offset(skb);
skb              1939 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 		tx_ring->cmd_buf_arr[producer].skb = NULL;
skb              1945 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 		skb_copy_from_linear_data(skb, vh, 12);
skb              1948 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 		skb_copy_from_linear_data_offset(skb, 12,
skb              1963 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 		tx_ring->cmd_buf_arr[producer].skb = NULL;
skb              1965 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 		skb_copy_from_linear_data_offset(skb, copied,
skb              1980 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 		struct sk_buff *skb, struct netxen_cmd_buffer *pbuf)
skb              1987 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 	nr_frags = skb_shinfo(skb)->nr_frags;
skb              1990 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 	map = pci_map_single(pdev, skb->data,
skb              1991 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 			skb_headlen(skb), PCI_DMA_TODEVICE);
skb              1996 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 	nf->length = skb_headlen(skb);
skb              1999 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 		frag = &skb_shinfo(skb)->frags[i];
skb              2021 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 	pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
skb              2036 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
skb              2052 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 	frag_count = skb_shinfo(skb)->nr_frags + 1;
skb              2057 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 	if (!skb_is_gso(skb) && frag_count > NETXEN_MAX_FRAGS_PER_TX) {
skb              2060 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 			frag = &skb_shinfo(skb)->frags[i];
skb              2064 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 		if (!__pskb_pull_tail(skb, delta))
skb              2067 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 		frag_count = 1 + skb_shinfo(skb)->nr_frags;
skb              2084 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 	if (netxen_map_tx_skb(pdev, skb, pbuf))
skb              2087 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 	pbuf->skb = skb;
skb              2093 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 	netxen_set_tx_frags_len(first_desc, frag_count, skb->len);
skb              2105 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 			tx_ring->cmd_buf_arr[producer].skb = NULL;
skb              2129 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 	netxen_tso_check(netdev, tx_ring, first_desc, skb);
skb              2131 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 	adapter->stats.txbytes += skb->len;
skb              2140 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 	dev_kfree_skb_any(skb);
skb               100 drivers/net/ethernet/qlogic/qed/qed_ll2.c 	struct sk_buff *skb = cookie;
skb               104 drivers/net/ethernet/qlogic/qed/qed_ll2.c 			 skb_headlen(skb), DMA_TO_DEVICE);
skb               107 drivers/net/ethernet/qlogic/qed/qed_ll2.c 		cdev->ll2->cbs->tx_cb(cdev->ll2->cb_cookie, skb,
skb               110 drivers/net/ethernet/qlogic/qed/qed_ll2.c 	dev_kfree_skb_any(skb);
skb               168 drivers/net/ethernet/qlogic/qed/qed_ll2.c 	struct sk_buff *skb;
skb               203 drivers/net/ethernet/qlogic/qed/qed_ll2.c 	skb = build_skb(buffer->data, 0);
skb               204 drivers/net/ethernet/qlogic/qed/qed_ll2.c 	if (!skb) {
skb               211 drivers/net/ethernet/qlogic/qed/qed_ll2.c 	skb_reserve(skb, data->u.placement_offset);
skb               212 drivers/net/ethernet/qlogic/qed/qed_ll2.c 	skb_put(skb, data->length.packet_length);
skb               213 drivers/net/ethernet/qlogic/qed/qed_ll2.c 	skb_checksum_none_assert(skb);
skb               218 drivers/net/ethernet/qlogic/qed/qed_ll2.c 	skb_reset_mac_header(skb);
skb               219 drivers/net/ethernet/qlogic/qed/qed_ll2.c 	skb->protocol = eth_hdr(skb)->h_proto;
skb               224 drivers/net/ethernet/qlogic/qed/qed_ll2.c 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
skb               226 drivers/net/ethernet/qlogic/qed/qed_ll2.c 		cdev->ll2->cbs->rx_cb(cdev->ll2->cb_cookie, skb,
skb              2511 drivers/net/ethernet/qlogic/qed/qed_ll2.c static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb,
skb              2522 drivers/net/ethernet/qlogic/qed/qed_ll2.c 	if (unlikely(skb->ip_summed != CHECKSUM_NONE)) {
skb              2530 drivers/net/ethernet/qlogic/qed/qed_ll2.c 	nr_frags = skb_shinfo(skb)->nr_frags;
skb              2538 drivers/net/ethernet/qlogic/qed/qed_ll2.c 	mapping = dma_map_single(&cdev->pdev->dev, skb->data,
skb              2539 drivers/net/ethernet/qlogic/qed/qed_ll2.c 				 skb->len, DMA_TO_DEVICE);
skb              2546 drivers/net/ethernet/qlogic/qed/qed_ll2.c 	if (!((vlan_get_protocol(skb) == htons(ETH_P_IPV6)) &&
skb              2547 drivers/net/ethernet/qlogic/qed/qed_ll2.c 	      ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
skb              2550 drivers/net/ethernet/qlogic/qed/qed_ll2.c 	if (skb_vlan_tag_present(skb)) {
skb              2551 drivers/net/ethernet/qlogic/qed/qed_ll2.c 		vlan = skb_vlan_tag_get(skb);
skb              2561 drivers/net/ethernet/qlogic/qed/qed_ll2.c 	pkt.first_frag_len = skb->len;
skb              2562 drivers/net/ethernet/qlogic/qed/qed_ll2.c 	pkt.cookie = skb;
skb              2578 drivers/net/ethernet/qlogic/qed/qed_ll2.c 		frag = &skb_shinfo(skb)->frags[i];
skb              2605 drivers/net/ethernet/qlogic/qed/qed_ll2.c 	dma_unmap_single(&cdev->pdev->dev, mapping, skb->len, DMA_TO_DEVICE);
skb               323 drivers/net/ethernet/qlogic/qede/qede.h 	struct sk_buff *skb;
skb               385 drivers/net/ethernet/qlogic/qede/qede.h 	struct sk_buff *skb;
skb               490 drivers/net/ethernet/qlogic/qede/qede.h int qede_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
skb               518 drivers/net/ethernet/qlogic/qede/qede.h netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev);
skb               519 drivers/net/ethernet/qlogic/qede/qede.h u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb,
skb               521 drivers/net/ethernet/qlogic/qede/qede.h netdev_features_t qede_features_check(struct sk_buff *skb,
skb              1491 drivers/net/ethernet/qlogic/qede/qede_ethtool.c 					  struct sk_buff *skb)
skb              1515 drivers/net/ethernet/qlogic/qede/qede_ethtool.c 	txq->sw_tx_ring.skbs[idx].skb = skb;
skb              1520 drivers/net/ethernet/qlogic/qede/qede_ethtool.c 	val = skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK;
skb              1525 drivers/net/ethernet/qlogic/qede/qede_ethtool.c 	mapping = dma_map_single(&edev->pdev->dev, skb->data,
skb              1526 drivers/net/ethernet/qlogic/qede/qede_ethtool.c 				 skb_headlen(skb), DMA_TO_DEVICE);
skb              1531 drivers/net/ethernet/qlogic/qede/qede_ethtool.c 	BD_SET_UNMAP_ADDR_LEN(first_bd, mapping, skb_headlen(skb));
skb              1562 drivers/net/ethernet/qlogic/qede/qede_ethtool.c 	txq->sw_tx_ring.skbs[idx].skb = NULL;
skb              1654 drivers/net/ethernet/qlogic/qede/qede_ethtool.c 	struct sk_buff *skb = NULL;
skb              1682 drivers/net/ethernet/qlogic/qede/qede_ethtool.c 	skb = netdev_alloc_skb(edev->ndev, pkt_size);
skb              1683 drivers/net/ethernet/qlogic/qede/qede_ethtool.c 	if (!skb) {
skb              1688 drivers/net/ethernet/qlogic/qede/qede_ethtool.c 	packet = skb_put(skb, pkt_size);
skb              1695 drivers/net/ethernet/qlogic/qede/qede_ethtool.c 	rc = qede_selftest_transmit_traffic(edev, skb);
skb              1706 drivers/net/ethernet/qlogic/qede/qede_ethtool.c 	dev_kfree_skb(skb);
skb               389 drivers/net/ethernet/qlogic/qede/qede_filter.c 				 const struct sk_buff *skb)
skb               391 drivers/net/ethernet/qlogic/qede/qede_filter.c 	if (skb->protocol == htons(ETH_P_IP)) {
skb               392 drivers/net/ethernet/qlogic/qede/qede_filter.c 		if (tpos->tuple.src_ipv4 == ip_hdr(skb)->saddr &&
skb               393 drivers/net/ethernet/qlogic/qede/qede_filter.c 		    tpos->tuple.dst_ipv4 == ip_hdr(skb)->daddr)
skb               401 drivers/net/ethernet/qlogic/qede/qede_filter.c 		if (!memcmp(src, &ipv6_hdr(skb)->saddr, size) &&
skb               402 drivers/net/ethernet/qlogic/qede/qede_filter.c 		    !memcmp(&tpos->tuple.dst_ipv6, &ipv6_hdr(skb)->daddr, size))
skb               410 drivers/net/ethernet/qlogic/qede/qede_filter.c qede_arfs_htbl_key_search(struct hlist_head *h, const struct sk_buff *skb,
skb               417 drivers/net/ethernet/qlogic/qede/qede_filter.c 		    tpos->tuple.eth_proto == skb->protocol &&
skb               418 drivers/net/ethernet/qlogic/qede/qede_filter.c 		    qede_compare_ip_addr(tpos, skb) &&
skb               453 drivers/net/ethernet/qlogic/qede/qede_filter.c int qede_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
skb               464 drivers/net/ethernet/qlogic/qede/qede_filter.c 	if (skb->encapsulation)
skb               467 drivers/net/ethernet/qlogic/qede/qede_filter.c 	if (skb->protocol != htons(ETH_P_IP) &&
skb               468 drivers/net/ethernet/qlogic/qede/qede_filter.c 	    skb->protocol != htons(ETH_P_IPV6))
skb               471 drivers/net/ethernet/qlogic/qede/qede_filter.c 	if (skb->protocol == htons(ETH_P_IP)) {
skb               472 drivers/net/ethernet/qlogic/qede/qede_filter.c 		ip_proto = ip_hdr(skb)->protocol;
skb               475 drivers/net/ethernet/qlogic/qede/qede_filter.c 		ip_proto = ipv6_hdr(skb)->nexthdr;
skb               482 drivers/net/ethernet/qlogic/qede/qede_filter.c 	ports = (__be16 *)(skb->data + tp_offset);
skb               483 drivers/net/ethernet/qlogic/qede/qede_filter.c 	tbl_idx = skb_get_hash_raw(skb) & QEDE_RFS_FLW_MASK;
skb               488 drivers/net/ethernet/qlogic/qede/qede_filter.c 				      skb, ports[0], ports[1], ip_proto);
skb               509 drivers/net/ethernet/qlogic/qede/qede_filter.c 	min_hlen = ETH_HLEN + skb_headlen(skb);
skb               524 drivers/net/ethernet/qlogic/qede/qede_filter.c 	if (skb->protocol == htons(ETH_P_IP)) {
skb               525 drivers/net/ethernet/qlogic/qede/qede_filter.c 		n->tuple.src_ipv4 = ip_hdr(skb)->saddr;
skb               526 drivers/net/ethernet/qlogic/qede/qede_filter.c 		n->tuple.dst_ipv4 = ip_hdr(skb)->daddr;
skb               528 drivers/net/ethernet/qlogic/qede/qede_filter.c 		memcpy(&n->tuple.src_ipv6, &ipv6_hdr(skb)->saddr,
skb               530 drivers/net/ethernet/qlogic/qede/qede_filter.c 		memcpy(&n->tuple.dst_ipv6, &ipv6_hdr(skb)->daddr,
skb               535 drivers/net/ethernet/qlogic/qede/qede_filter.c 	eth->h_proto = skb->protocol;
skb               536 drivers/net/ethernet/qlogic/qede/qede_filter.c 	n->tuple.eth_proto = skb->protocol;
skb               539 drivers/net/ethernet/qlogic/qede/qede_filter.c 	memcpy(n->data + ETH_HLEN, skb->data, skb_headlen(skb));
skb               103 drivers/net/ethernet/qlogic/qede/qede_fp.c 	struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb;
skb               111 drivers/net/ethernet/qlogic/qede/qede_fp.c 	if (unlikely(!skb)) {
skb               118 drivers/net/ethernet/qlogic/qede/qede_fp.c 	*len = skb->len;
skb               136 drivers/net/ethernet/qlogic/qede/qede_fp.c 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, bds_consumed++) {
skb               147 drivers/net/ethernet/qlogic/qede/qede_fp.c 	dev_kfree_skb_any(skb);
skb               148 drivers/net/ethernet/qlogic/qede/qede_fp.c 	txq->sw_tx_ring.skbs[idx].skb = NULL;
skb               160 drivers/net/ethernet/qlogic/qede/qede_fp.c 	struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb;
skb               195 drivers/net/ethernet/qlogic/qede/qede_fp.c 	dev_kfree_skb_any(skb);
skb               196 drivers/net/ethernet/qlogic/qede/qede_fp.c 	txq->sw_tx_ring.skbs[idx].skb = NULL;
skb               200 drivers/net/ethernet/qlogic/qede/qede_fp.c static u32 qede_xmit_type(struct sk_buff *skb, int *ipv6_ext)
skb               205 drivers/net/ethernet/qlogic/qede/qede_fp.c 	if (skb->ip_summed != CHECKSUM_PARTIAL)
skb               208 drivers/net/ethernet/qlogic/qede/qede_fp.c 	l3_proto = vlan_get_protocol(skb);
skb               210 drivers/net/ethernet/qlogic/qede/qede_fp.c 	    (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
skb               213 drivers/net/ethernet/qlogic/qede/qede_fp.c 	if (skb->encapsulation) {
skb               215 drivers/net/ethernet/qlogic/qede/qede_fp.c 		if (skb_is_gso(skb)) {
skb               216 drivers/net/ethernet/qlogic/qede/qede_fp.c 			unsigned short gso_type = skb_shinfo(skb)->gso_type;
skb               227 drivers/net/ethernet/qlogic/qede/qede_fp.c 	if (skb_is_gso(skb))
skb               233 drivers/net/ethernet/qlogic/qede/qede_fp.c static void qede_set_params_for_ipv6_ext(struct sk_buff *skb,
skb               242 drivers/net/ethernet/qlogic/qede/qede_fp.c 	bd2_bits2 |= ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) &
skb               249 drivers/net/ethernet/qlogic/qede/qede_fp.c 	if (vlan_get_protocol(skb) == htons(ETH_P_IPV6))
skb               250 drivers/net/ethernet/qlogic/qede/qede_fp.c 		l4_proto = ipv6_hdr(skb)->nexthdr;
skb               252 drivers/net/ethernet/qlogic/qede/qede_fp.c 		l4_proto = ip_hdr(skb)->protocol;
skb               259 drivers/net/ethernet/qlogic/qede/qede_fp.c 			cpu_to_le16(((tcp_hdrlen(skb) / 4) &
skb               284 drivers/net/ethernet/qlogic/qede/qede_fp.c static u16 qede_get_skb_hlen(struct sk_buff *skb, bool is_encap_pkt)
skb               287 drivers/net/ethernet/qlogic/qede/qede_fp.c 		return (skb_inner_transport_header(skb) +
skb               288 drivers/net/ethernet/qlogic/qede/qede_fp.c 			inner_tcp_hdrlen(skb) - skb->data);
skb               290 drivers/net/ethernet/qlogic/qede/qede_fp.c 		return (skb_transport_header(skb) +
skb               291 drivers/net/ethernet/qlogic/qede/qede_fp.c 			tcp_hdrlen(skb) - skb->data);
skb               296 drivers/net/ethernet/qlogic/qede/qede_fp.c static bool qede_pkt_req_lin(struct sk_buff *skb, u8 xmit_type)
skb               303 drivers/net/ethernet/qlogic/qede/qede_fp.c 		hlen = qede_get_skb_hlen(skb, xmit_type & XMIT_ENC);
skb               306 drivers/net/ethernet/qlogic/qede/qede_fp.c 		if (skb_headlen(skb) > hlen)
skb               310 drivers/net/ethernet/qlogic/qede/qede_fp.c 	return (skb_shinfo(skb)->nr_frags > allowed_frags);
skb               585 drivers/net/ethernet/qlogic/qede/qede_fp.c static void qede_get_rxhash(struct sk_buff *skb, u8 bitfields, __le32 rss_hash)
skb               598 drivers/net/ethernet/qlogic/qede/qede_fp.c 	skb_set_hash(skb, hash, hash_type);
skb               601 drivers/net/ethernet/qlogic/qede/qede_fp.c static void qede_set_skb_csum(struct sk_buff *skb, u8 csum_flag)
skb               603 drivers/net/ethernet/qlogic/qede/qede_fp.c 	skb_checksum_none_assert(skb);
skb               606 drivers/net/ethernet/qlogic/qede/qede_fp.c 		skb->ip_summed = CHECKSUM_UNNECESSARY;
skb               609 drivers/net/ethernet/qlogic/qede/qede_fp.c 		skb->csum_level = 1;
skb               610 drivers/net/ethernet/qlogic/qede/qede_fp.c 		skb->encapsulation = 1;
skb               617 drivers/net/ethernet/qlogic/qede/qede_fp.c 				    struct sk_buff *skb, u16 vlan_tag)
skb               620 drivers/net/ethernet/qlogic/qede/qede_fp.c 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
skb               622 drivers/net/ethernet/qlogic/qede/qede_fp.c 	napi_gro_receive(&fp->napi, skb);
skb               626 drivers/net/ethernet/qlogic/qede/qede_fp.c 				struct sk_buff *skb,
skb               633 drivers/net/ethernet/qlogic/qede/qede_fp.c 		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
skb               635 drivers/net/ethernet/qlogic/qede/qede_fp.c 		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
skb               637 drivers/net/ethernet/qlogic/qede/qede_fp.c 	skb_shinfo(skb)->gso_size = __le16_to_cpu(cqe->len_on_first_bd) -
skb               648 drivers/net/ethernet/qlogic/qede/qede_fp.c 	struct sk_buff *skb = tpa_info->skb;
skb               654 drivers/net/ethernet/qlogic/qede/qede_fp.c 	skb_fill_page_desc(skb, tpa_info->frag_id++,
skb               669 drivers/net/ethernet/qlogic/qede/qede_fp.c 	skb->data_len += len_on_bd;
skb               670 drivers/net/ethernet/qlogic/qede/qede_fp.c 	skb->truesize += rxq->rx_buf_seg_size;
skb               671 drivers/net/ethernet/qlogic/qede/qede_fp.c 	skb->len += len_on_bd;
skb               720 drivers/net/ethernet/qlogic/qede/qede_fp.c 	struct sk_buff *skb;
skb               724 drivers/net/ethernet/qlogic/qede/qede_fp.c 	skb = build_skb(buf, rxq->rx_buf_seg_size);
skb               726 drivers/net/ethernet/qlogic/qede/qede_fp.c 	skb_reserve(skb, pad);
skb               727 drivers/net/ethernet/qlogic/qede/qede_fp.c 	skb_put(skb, len);
skb               729 drivers/net/ethernet/qlogic/qede/qede_fp.c 	return skb;
skb               738 drivers/net/ethernet/qlogic/qede/qede_fp.c 	struct sk_buff *skb;
skb               740 drivers/net/ethernet/qlogic/qede/qede_fp.c 	skb = qede_build_skb(rxq, bd, len, pad);
skb               749 drivers/net/ethernet/qlogic/qede/qede_fp.c 			dev_kfree_skb_any(skb);
skb               760 drivers/net/ethernet/qlogic/qede/qede_fp.c 	return skb;
skb               768 drivers/net/ethernet/qlogic/qede/qede_fp.c 	struct sk_buff *skb = NULL;
skb               777 drivers/net/ethernet/qlogic/qede/qede_fp.c 		skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE);
skb               778 drivers/net/ethernet/qlogic/qede/qede_fp.c 		if (unlikely(!skb))
skb               781 drivers/net/ethernet/qlogic/qede/qede_fp.c 		skb_reserve(skb, pad);
skb               782 drivers/net/ethernet/qlogic/qede/qede_fp.c 		skb_put_data(skb, page_address(bd->data) + offset, len);
skb               787 drivers/net/ethernet/qlogic/qede/qede_fp.c 	skb = qede_build_skb(rxq, bd, len, pad);
skb               795 drivers/net/ethernet/qlogic/qede/qede_fp.c 		dev_kfree_skb_any(skb);
skb               802 drivers/net/ethernet/qlogic/qede/qede_fp.c 	return skb;
skb               816 drivers/net/ethernet/qlogic/qede/qede_fp.c 	tpa_info->skb = qede_tpa_rx_build_skb(edev, rxq, sw_rx_data_cons,
skb               822 drivers/net/ethernet/qlogic/qede/qede_fp.c 	if (unlikely(!tpa_info->skb)) {
skb               845 drivers/net/ethernet/qlogic/qede/qede_fp.c 	qede_get_rxhash(tpa_info->skb, cqe->bitfields, cqe->rss_hash);
skb               848 drivers/net/ethernet/qlogic/qede/qede_fp.c 	qede_set_gro_params(edev, tpa_info->skb, cqe);
skb               863 drivers/net/ethernet/qlogic/qede/qede_fp.c static void qede_gro_ip_csum(struct sk_buff *skb)
skb               865 drivers/net/ethernet/qlogic/qede/qede_fp.c 	const struct iphdr *iph = ip_hdr(skb);
skb               868 drivers/net/ethernet/qlogic/qede/qede_fp.c 	skb_set_transport_header(skb, sizeof(struct iphdr));
skb               869 drivers/net/ethernet/qlogic/qede/qede_fp.c 	th = tcp_hdr(skb);
skb               871 drivers/net/ethernet/qlogic/qede/qede_fp.c 	th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
skb               874 drivers/net/ethernet/qlogic/qede/qede_fp.c 	tcp_gro_complete(skb);
skb               877 drivers/net/ethernet/qlogic/qede/qede_fp.c static void qede_gro_ipv6_csum(struct sk_buff *skb)
skb               879 drivers/net/ethernet/qlogic/qede/qede_fp.c 	struct ipv6hdr *iph = ipv6_hdr(skb);
skb               882 drivers/net/ethernet/qlogic/qede/qede_fp.c 	skb_set_transport_header(skb, sizeof(struct ipv6hdr));
skb               883 drivers/net/ethernet/qlogic/qede/qede_fp.c 	th = tcp_hdr(skb);
skb               885 drivers/net/ethernet/qlogic/qede/qede_fp.c 	th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
skb               887 drivers/net/ethernet/qlogic/qede/qede_fp.c 	tcp_gro_complete(skb);
skb               893 drivers/net/ethernet/qlogic/qede/qede_fp.c 			     struct sk_buff *skb,
skb               901 drivers/net/ethernet/qlogic/qede/qede_fp.c 	if (unlikely(!skb->data_len)) {
skb               902 drivers/net/ethernet/qlogic/qede/qede_fp.c 		skb_shinfo(skb)->gso_type = 0;
skb               903 drivers/net/ethernet/qlogic/qede/qede_fp.c 		skb_shinfo(skb)->gso_size = 0;
skb               908 drivers/net/ethernet/qlogic/qede/qede_fp.c 	if (skb_shinfo(skb)->gso_size) {
skb               909 drivers/net/ethernet/qlogic/qede/qede_fp.c 		skb_reset_network_header(skb);
skb               911 drivers/net/ethernet/qlogic/qede/qede_fp.c 		switch (skb->protocol) {
skb               913 drivers/net/ethernet/qlogic/qede/qede_fp.c 			qede_gro_ip_csum(skb);
skb               916 drivers/net/ethernet/qlogic/qede/qede_fp.c 			qede_gro_ipv6_csum(skb);
skb               921 drivers/net/ethernet/qlogic/qede/qede_fp.c 			       ntohs(skb->protocol));
skb               927 drivers/net/ethernet/qlogic/qede/qede_fp.c 	skb_record_rx_queue(skb, fp->rxq->rxq_id);
skb               928 drivers/net/ethernet/qlogic/qede/qede_fp.c 	qede_skb_receive(edev, fp, fp->rxq, skb, vlan_tag);
skb               952 drivers/net/ethernet/qlogic/qede/qede_fp.c 	struct sk_buff *skb;
skb               956 drivers/net/ethernet/qlogic/qede/qede_fp.c 	skb = tpa_info->skb;
skb               977 drivers/net/ethernet/qlogic/qede/qede_fp.c 	if (unlikely(skb->len != le16_to_cpu(cqe->total_packet_len)))
skb               980 drivers/net/ethernet/qlogic/qede/qede_fp.c 		       le16_to_cpu(cqe->total_packet_len), skb->len);
skb               983 drivers/net/ethernet/qlogic/qede/qede_fp.c 	skb->protocol = eth_type_trans(skb, edev->ndev);
skb               984 drivers/net/ethernet/qlogic/qede/qede_fp.c 	skb->ip_summed = CHECKSUM_UNNECESSARY;
skb               989 drivers/net/ethernet/qlogic/qede/qede_fp.c 	NAPI_GRO_CB(skb)->count = le16_to_cpu(cqe->num_of_coalesced_segs);
skb               991 drivers/net/ethernet/qlogic/qede/qede_fp.c 	qede_gro_receive(edev, fp, skb, tpa_info->vlan_tag);
skb              1004 drivers/net/ethernet/qlogic/qede/qede_fp.c 	dev_kfree_skb_any(tpa_info->skb);
skb              1005 drivers/net/ethernet/qlogic/qede/qede_fp.c 	tpa_info->skb = NULL;
skb              1126 drivers/net/ethernet/qlogic/qede/qede_fp.c 			       struct sk_buff *skb,
skb              1163 drivers/net/ethernet/qlogic/qede/qede_fp.c 		skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++,
skb              1166 drivers/net/ethernet/qlogic/qede/qede_fp.c 		skb->truesize += PAGE_SIZE;
skb              1167 drivers/net/ethernet/qlogic/qede/qede_fp.c 		skb->data_len += cur_size;
skb              1168 drivers/net/ethernet/qlogic/qede/qede_fp.c 		skb->len += cur_size;
skb              1211 drivers/net/ethernet/qlogic/qede/qede_fp.c 	struct sk_buff *skb;
skb              1263 drivers/net/ethernet/qlogic/qede/qede_fp.c 	skb = qede_rx_build_skb(edev, rxq, bd, len, pad);
skb              1264 drivers/net/ethernet/qlogic/qede/qede_fp.c 	if (!skb) {
skb              1274 drivers/net/ethernet/qlogic/qede/qede_fp.c 		u16 unmapped_frags = qede_rx_build_jumbo(edev, rxq, skb,
skb              1279 drivers/net/ethernet/qlogic/qede/qede_fp.c 			dev_kfree_skb_any(skb);
skb              1285 drivers/net/ethernet/qlogic/qede/qede_fp.c 	skb->protocol = eth_type_trans(skb, edev->ndev);
skb              1286 drivers/net/ethernet/qlogic/qede/qede_fp.c 	qede_get_rxhash(skb, fp_cqe->bitfields, fp_cqe->rss_hash);
skb              1287 drivers/net/ethernet/qlogic/qede/qede_fp.c 	qede_set_skb_csum(skb, csum_flag);
skb              1288 drivers/net/ethernet/qlogic/qede/qede_fp.c 	skb_record_rx_queue(skb, rxq->rxq_id);
skb              1289 drivers/net/ethernet/qlogic/qede/qede_fp.c 	qede_ptp_record_rx_ts(edev, cqe, skb);
skb              1292 drivers/net/ethernet/qlogic/qede/qede_fp.c 	qede_skb_receive(edev, fp, rxq, skb, le16_to_cpu(fp_cqe->vlan_tag));
skb              1429 drivers/net/ethernet/qlogic/qede/qede_fp.c netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev)
skb              1448 drivers/net/ethernet/qlogic/qede/qede_fp.c 	txq_index = skb_get_queue_mapping(skb);
skb              1455 drivers/net/ethernet/qlogic/qede/qede_fp.c 	xmit_type = qede_xmit_type(skb, &ipv6_ext);
skb              1458 drivers/net/ethernet/qlogic/qede/qede_fp.c 	if (qede_pkt_req_lin(skb, xmit_type)) {
skb              1459 drivers/net/ethernet/qlogic/qede/qede_fp.c 		if (skb_linearize(skb)) {
skb              1462 drivers/net/ethernet/qlogic/qede/qede_fp.c 			dev_kfree_skb_any(skb);
skb              1470 drivers/net/ethernet/qlogic/qede/qede_fp.c 	txq->sw_tx_ring.skbs[idx].skb = skb;
skb              1477 drivers/net/ethernet/qlogic/qede/qede_fp.c 	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
skb              1478 drivers/net/ethernet/qlogic/qede/qede_fp.c 		qede_ptp_tx_ts(edev, skb);
skb              1481 drivers/net/ethernet/qlogic/qede/qede_fp.c 	mapping = dma_map_single(txq->dev, skb->data,
skb              1482 drivers/net/ethernet/qlogic/qede/qede_fp.c 				 skb_headlen(skb), DMA_TO_DEVICE);
skb              1490 drivers/net/ethernet/qlogic/qede/qede_fp.c 	BD_SET_UNMAP_ADDR_LEN(first_bd, mapping, skb_headlen(skb));
skb              1510 drivers/net/ethernet/qlogic/qede/qede_fp.c 	if (skb_vlan_tag_present(skb)) {
skb              1511 drivers/net/ethernet/qlogic/qede/qede_fp.c 		first_bd->data.vlan = cpu_to_le16(skb_vlan_tag_get(skb));
skb              1543 drivers/net/ethernet/qlogic/qede/qede_fp.c 			qede_set_params_for_ipv6_ext(skb, second_bd, third_bd);
skb              1550 drivers/net/ethernet/qlogic/qede/qede_fp.c 			cpu_to_le16(skb_shinfo(skb)->gso_size);
skb              1561 drivers/net/ethernet/qlogic/qede/qede_fp.c 			hlen = qede_get_skb_hlen(skb, true);
skb              1565 drivers/net/ethernet/qlogic/qede/qede_fp.c 			hlen = qede_get_skb_hlen(skb, false);
skb              1575 drivers/net/ethernet/qlogic/qede/qede_fp.c 		if (unlikely(skb_headlen(skb) > hlen)) {
skb              1600 drivers/net/ethernet/qlogic/qede/qede_fp.c 		val |= ((skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) <<
skb              1608 drivers/net/ethernet/qlogic/qede/qede_fp.c 	while (tx_data_bd && frag_idx < skb_shinfo(skb)->nr_frags) {
skb              1610 drivers/net/ethernet/qlogic/qede/qede_fp.c 				    &skb_shinfo(skb)->frags[frag_idx],
skb              1627 drivers/net/ethernet/qlogic/qede/qede_fp.c 	for (; frag_idx < skb_shinfo(skb)->nr_frags; frag_idx++, nbd++) {
skb              1634 drivers/net/ethernet/qlogic/qede/qede_fp.c 				    &skb_shinfo(skb)->frags[frag_idx],
skb              1646 drivers/net/ethernet/qlogic/qede/qede_fp.c 	netdev_tx_sent_queue(netdev_txq, skb->len);
skb              1648 drivers/net/ethernet/qlogic/qede/qede_fp.c 	skb_tx_timestamp(skb);
skb              1689 drivers/net/ethernet/qlogic/qede/qede_fp.c u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb,
skb              1698 drivers/net/ethernet/qlogic/qede/qede_fp.c 		netdev_pick_tx(dev, skb, NULL) % total_txq :  0;
skb              1704 drivers/net/ethernet/qlogic/qede/qede_fp.c netdev_features_t qede_features_check(struct sk_buff *skb,
skb              1708 drivers/net/ethernet/qlogic/qede/qede_fp.c 	if (skb->encapsulation) {
skb              1711 drivers/net/ethernet/qlogic/qede/qede_fp.c 		switch (vlan_get_protocol(skb)) {
skb              1713 drivers/net/ethernet/qlogic/qede/qede_fp.c 			l4_proto = ip_hdr(skb)->protocol;
skb              1716 drivers/net/ethernet/qlogic/qede/qede_fp.c 			l4_proto = ipv6_hdr(skb)->nexthdr;
skb              1734 drivers/net/ethernet/qlogic/qede/qede_fp.c 			if ((skb_inner_mac_header(skb) -
skb              1735 drivers/net/ethernet/qlogic/qede/qede_fp.c 			     skb_transport_header(skb)) > hdrlen ||
skb              1736 drivers/net/ethernet/qlogic/qede/qede_fp.c 			     (ntohs(udp_hdr(skb)->dest) != vxln_port &&
skb              1737 drivers/net/ethernet/qlogic/qede/qede_fp.c 			      ntohs(udp_hdr(skb)->dest) != gnv_port))
skb               523 drivers/net/ethernet/qlogic/qede/qede_ptp.c void qede_ptp_tx_ts(struct qede_dev *edev, struct sk_buff *skb)
skb               549 drivers/net/ethernet/qlogic/qede/qede_ptp.c 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
skb               551 drivers/net/ethernet/qlogic/qede/qede_ptp.c 		ptp->tx_skb = skb_get(skb);
skb               557 drivers/net/ethernet/qlogic/qede/qede_ptp.c void qede_ptp_rx_ts(struct qede_dev *edev, struct sk_buff *skb)
skb               577 drivers/net/ethernet/qlogic/qede/qede_ptp.c 	skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
skb                40 drivers/net/ethernet/qlogic/qede/qede_ptp.h void qede_ptp_rx_ts(struct qede_dev *edev, struct sk_buff *skb);
skb                41 drivers/net/ethernet/qlogic/qede/qede_ptp.h void qede_ptp_tx_ts(struct qede_dev *edev, struct sk_buff *skb);
skb                49 drivers/net/ethernet/qlogic/qede/qede_ptp.h 					 struct sk_buff *skb)
skb                56 drivers/net/ethernet/qlogic/qede/qede_ptp.h 			qede_ptp_rx_ts(edev, skb);
skb               308 drivers/net/ethernet/qlogic/qla3xxx.c 	if (!lrg_buf_cb->skb) {
skb               309 drivers/net/ethernet/qlogic/qla3xxx.c 		lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev,
skb               311 drivers/net/ethernet/qlogic/qla3xxx.c 		if (unlikely(!lrg_buf_cb->skb)) {
skb               318 drivers/net/ethernet/qlogic/qla3xxx.c 			skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE);
skb               320 drivers/net/ethernet/qlogic/qla3xxx.c 					     lrg_buf_cb->skb->data,
skb               329 drivers/net/ethernet/qlogic/qla3xxx.c 				dev_kfree_skb(lrg_buf_cb->skb);
skb               330 drivers/net/ethernet/qlogic/qla3xxx.c 				lrg_buf_cb->skb = NULL;
skb              1792 drivers/net/ethernet/qlogic/qla3xxx.c 		if (!lrg_buf_cb->skb) {
skb              1793 drivers/net/ethernet/qlogic/qla3xxx.c 			lrg_buf_cb->skb =
skb              1796 drivers/net/ethernet/qlogic/qla3xxx.c 			if (unlikely(!lrg_buf_cb->skb)) {
skb              1805 drivers/net/ethernet/qlogic/qla3xxx.c 				skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE);
skb              1807 drivers/net/ethernet/qlogic/qla3xxx.c 						     lrg_buf_cb->skb->data,
skb              1817 drivers/net/ethernet/qlogic/qla3xxx.c 					dev_kfree_skb(lrg_buf_cb->skb);
skb              1818 drivers/net/ethernet/qlogic/qla3xxx.c 					lrg_buf_cb->skb = NULL;
skb              1962 drivers/net/ethernet/qlogic/qla3xxx.c 	qdev->ndev->stats.tx_bytes += tx_cb->skb->len;
skb              1965 drivers/net/ethernet/qlogic/qla3xxx.c 	dev_kfree_skb_irq(tx_cb->skb);
skb              1966 drivers/net/ethernet/qlogic/qla3xxx.c 	tx_cb->skb = NULL;
skb              2006 drivers/net/ethernet/qlogic/qla3xxx.c 	struct sk_buff *skb;
skb              2019 drivers/net/ethernet/qlogic/qla3xxx.c 	skb = lrg_buf_cb2->skb;
skb              2024 drivers/net/ethernet/qlogic/qla3xxx.c 	skb_put(skb, length);
skb              2029 drivers/net/ethernet/qlogic/qla3xxx.c 	prefetch(skb->data);
skb              2030 drivers/net/ethernet/qlogic/qla3xxx.c 	skb_checksum_none_assert(skb);
skb              2031 drivers/net/ethernet/qlogic/qla3xxx.c 	skb->protocol = eth_type_trans(skb, qdev->ndev);
skb              2033 drivers/net/ethernet/qlogic/qla3xxx.c 	napi_gro_receive(&qdev->napi, skb);
skb              2034 drivers/net/ethernet/qlogic/qla3xxx.c 	lrg_buf_cb2->skb = NULL;
skb              2060 drivers/net/ethernet/qlogic/qla3xxx.c 		skb1 = lrg_buf_cb1->skb;
skb              2068 drivers/net/ethernet/qlogic/qla3xxx.c 	skb2 = lrg_buf_cb2->skb;
skb              2106 drivers/net/ethernet/qlogic/qla3xxx.c 	lrg_buf_cb2->skb = NULL;
skb              2283 drivers/net/ethernet/qlogic/qla3xxx.c static void ql_hw_csum_setup(const struct sk_buff *skb,
skb              2286 drivers/net/ethernet/qlogic/qla3xxx.c 	const struct iphdr *ip = ip_hdr(skb);
skb              2288 drivers/net/ethernet/qlogic/qla3xxx.c 	mac_iocb_ptr->ip_hdr_off = skb_network_offset(skb);
skb              2308 drivers/net/ethernet/qlogic/qla3xxx.c 				struct sk_buff *skb)
skb              2312 drivers/net/ethernet/qlogic/qla3xxx.c 	int len = skb_headlen(skb);
skb              2317 drivers/net/ethernet/qlogic/qla3xxx.c 	int frag_cnt = (int)skb_shinfo(skb)->nr_frags;
skb              2323 drivers/net/ethernet/qlogic/qla3xxx.c 	map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
skb              2350 drivers/net/ethernet/qlogic/qla3xxx.c 		skb_frag_t *frag = &skb_shinfo(skb)->frags[completed_segs];
skb              2461 drivers/net/ethernet/qlogic/qla3xxx.c static netdev_tx_t ql3xxx_send(struct sk_buff *skb,
skb              2468 drivers/net/ethernet/qlogic/qla3xxx.c 	u32 tot_len = skb->len;
skb              2476 drivers/net/ethernet/qlogic/qla3xxx.c 					     skb_shinfo(skb)->nr_frags);
skb              2489 drivers/net/ethernet/qlogic/qla3xxx.c 	tx_cb->skb = skb;
skb              2491 drivers/net/ethernet/qlogic/qla3xxx.c 	    skb->ip_summed == CHECKSUM_PARTIAL)
skb              2492 drivers/net/ethernet/qlogic/qla3xxx.c 		ql_hw_csum_setup(skb, mac_iocb_ptr);
skb              2494 drivers/net/ethernet/qlogic/qla3xxx.c 	if (ql_send_map(qdev, mac_iocb_ptr, tx_cb, skb) != NETDEV_TX_OK) {
skb              2510 drivers/net/ethernet/qlogic/qla3xxx.c 		     qdev->req_producer_index, skb->len);
skb              2721 drivers/net/ethernet/qlogic/qla3xxx.c 		if (lrg_buf_cb->skb) {
skb              2722 drivers/net/ethernet/qlogic/qla3xxx.c 			dev_kfree_skb(lrg_buf_cb->skb);
skb              2754 drivers/net/ethernet/qlogic/qla3xxx.c 	struct sk_buff *skb;
skb              2762 drivers/net/ethernet/qlogic/qla3xxx.c 		skb = netdev_alloc_skb(qdev->ndev,
skb              2764 drivers/net/ethernet/qlogic/qla3xxx.c 		if (unlikely(!skb)) {
skb              2777 drivers/net/ethernet/qlogic/qla3xxx.c 			skb_reserve(skb, QL_HEADER_SPACE);
skb              2779 drivers/net/ethernet/qlogic/qla3xxx.c 					     skb->data,
skb              2789 drivers/net/ethernet/qlogic/qla3xxx.c 				dev_kfree_skb_irq(skb);
skb              2794 drivers/net/ethernet/qlogic/qla3xxx.c 			lrg_buf_cb->skb = skb;
skb              2831 drivers/net/ethernet/qlogic/qla3xxx.c 		tx_cb->skb = NULL;
skb              3642 drivers/net/ethernet/qlogic/qla3xxx.c 			if (tx_cb->skb) {
skb              3658 drivers/net/ethernet/qlogic/qla3xxx.c 				dev_kfree_skb(tx_cb->skb);
skb              3659 drivers/net/ethernet/qlogic/qla3xxx.c 				tx_cb->skb = NULL;
skb              1000 drivers/net/ethernet/qlogic/qla3xxx.h 	struct sk_buff *skb;
skb              1037 drivers/net/ethernet/qlogic/qla3xxx.h 	struct sk_buff *skb;
skb               361 drivers/net/ethernet/qlogic/qlcnic/qlcnic.h 	struct sk_buff *skb;
skb               369 drivers/net/ethernet/qlogic/qlcnic/qlcnic.h 	struct sk_buff *skb;
skb              1046 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c 	struct sk_buff *skb;
skb              1050 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c 		skb = netdev_alloc_skb(adapter->netdev, QLCNIC_ILB_PKT_SIZE);
skb              1051 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c 		if (!skb)
skb              1053 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c 		qlcnic_create_loopback_buff(skb->data, adapter->mac_addr);
skb              1054 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c 		skb_put(skb, QLCNIC_ILB_PKT_SIZE);
skb              1056 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c 		qlcnic_xmit_frame(skb, adapter->netdev);
skb              1066 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c 		dev_kfree_skb_any(skb);
skb               417 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c 		pbuf->skb = NULL;
skb                95 drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c 			if (rx_buf->skb == NULL)
skb               103 drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c 			dev_kfree_skb_any(rx_buf->skb);
skb               156 drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c 		if (cmd_buf->skb) {
skb               157 drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c 			dev_kfree_skb_any(cmd_buf->skb);
skb               158 drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c 			cmd_buf->skb = NULL;
skb               192 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 				 struct sk_buff *skb, int loopback_pkt, u16 vlan_id)
skb               194 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	struct ethhdr *phdr = (struct ethhdr *)(skb->data);
skb               303 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			       struct sk_buff *skb,
skb               306 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	struct vlan_ethhdr *vh = (struct vlan_ethhdr *)(skb->data);
skb               307 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	struct ethhdr *phdr = (struct ethhdr *)(skb->data);
skb               308 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	u16 protocol = ntohs(skb->protocol);
skb               321 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			vh = (struct vlan_ethhdr *)skb->data;
skb               323 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		} else if (skb_vlan_tag_present(skb)) {
skb               324 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			vlan_id = skb_vlan_tag_get(skb);
skb               372 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			       struct sk_buff *skb,
skb               384 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	if (skb_is_gso(skb)) {
skb               385 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		inner_hdr_len = skb_inner_transport_header(skb) +
skb               386 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 				inner_tcp_hdrlen(skb) -
skb               387 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 				skb_inner_mac_header(skb);
skb               390 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		outer_hdr_len = skb_transport_offset(skb) + 8 +
skb               396 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
skb               408 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			tx_ring->cmd_buf_arr[producer].skb = NULL;
skb               409 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			skb_copy_from_linear_data_offset(skb, copied,
skb               425 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb               426 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		if (inner_ip_hdr(skb)->version == 6) {
skb               427 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
skb               430 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			if (inner_ip_hdr(skb)->protocol == IPPROTO_UDP)
skb               439 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	if (ip_hdr(skb)->version == 6)
skb               443 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	encap_descr |= (skb_network_header_len(skb) >> 2) << 6;
skb               446 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	encap_descr |= skb_network_offset(skb) << 10;
skb               449 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	first_desc->tcp_hdr_offset = skb_inner_transport_header(skb) -
skb               450 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 				     skb->data;
skb               451 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	first_desc->ip_hdr_offset = skb_inner_network_offset(skb);
skb               459 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			 struct cmd_desc_type0 *first_desc, struct sk_buff *skb,
skb               467 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	u16 protocol = ntohs(skb->protocol);
skb               471 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		vh = (struct vlan_ethhdr *)skb->data;
skb               476 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	} else if (skb_vlan_tag_present(skb)) {
skb               478 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		vlan_tci = skb_vlan_tag_get(skb);
skb               494 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	if (*(skb->data) & BIT_0) {
skb               496 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN);
skb               499 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	if (skb_is_gso(skb)) {
skb               500 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
skb               501 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
skb               521 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			tx_ring->cmd_buf_arr[producer].skb = NULL;
skb               527 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			skb_copy_from_linear_data(skb, vh, 12);
skb               531 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			skb_copy_from_linear_data_offset(skb, 12,
skb               543 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			tx_ring->cmd_buf_arr[producer].skb = NULL;
skb               544 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			skb_copy_from_linear_data_offset(skb, copied,
skb               556 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb               558 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			l4proto = ip_hdr(skb)->protocol;
skb               565 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			l4proto = ipv6_hdr(skb)->nexthdr;
skb               573 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	first_desc->tcp_hdr_offset += skb_transport_offset(skb);
skb               574 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	first_desc->ip_hdr_offset += skb_network_offset(skb);
skb               580 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c static int qlcnic_map_tx_skb(struct pci_dev *pdev, struct sk_buff *skb,
skb               588 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	nr_frags = skb_shinfo(skb)->nr_frags;
skb               591 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	map = pci_map_single(pdev, skb->data, skb_headlen(skb),
skb               597 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	nf->length = skb_headlen(skb);
skb               600 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		frag = &skb_shinfo(skb)->frags[i];
skb               620 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
skb               626 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c static void qlcnic_unmap_buffers(struct pci_dev *pdev, struct sk_buff *skb,
skb               630 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	int i, nr_frags = skb_shinfo(skb)->nr_frags;
skb               638 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
skb               639 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	pbuf->skb = NULL;
skb               649 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
skb               669 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		phdr = (struct ethhdr *)skb->data;
skb               674 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	tx_ring = &adapter->tx_ring[skb_get_queue_mapping(skb)];
skb               677 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	frag_count = skb_shinfo(skb)->nr_frags + 1;
skb               682 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	if (!skb_is_gso(skb) && frag_count > QLCNIC_MAX_FRAGS_PER_TX) {
skb               684 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			delta += skb_frag_size(&skb_shinfo(skb)->frags[i]);
skb               686 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		if (!__pskb_pull_tail(skb, delta))
skb               689 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		frag_count = 1 + skb_shinfo(skb)->nr_frags;
skb               709 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	if (qlcnic_map_tx_skb(pdev, skb, pbuf)) {
skb               714 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	pbuf->skb = skb;
skb               717 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	qlcnic_set_tx_frags_len(first_desc, frag_count, skb->len);
skb               728 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			tx_ring->cmd_buf_arr[producer].skb = NULL;
skb               752 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	protocol = ntohs(skb->protocol);
skb               754 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		l4_is_udp = ip_hdr(skb)->protocol == IPPROTO_UDP;
skb               756 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		l4_is_udp = ipv6_hdr(skb)->nexthdr == IPPROTO_UDP;
skb               759 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	if (!skb->encapsulation || !l4_is_udp ||
skb               761 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb,
skb               766 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 						 skb, tx_ring)))
skb               771 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		qlcnic_send_filter(adapter, first_desc, skb, tx_ring);
skb               773 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	tx_ring->tx_stats.tx_bytes += skb->len;
skb               783 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	qlcnic_unmap_buffers(pdev, skb, pbuf);
skb               786 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	dev_kfree_skb_any(skb);
skb               818 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	struct sk_buff *skb;
skb               822 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	skb = netdev_alloc_skb(adapter->netdev, rds_ring->skb_size);
skb               823 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	if (!skb) {
skb               828 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	skb_reserve(skb, NET_IP_ALIGN);
skb               829 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	dma = pci_map_single(pdev, skb->data,
skb               834 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		dev_kfree_skb_any(skb);
skb               838 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	buffer->skb = skb;
skb               862 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		if (!buffer->skb) {
skb               905 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		if (buffer->skb) {
skb               917 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			dev_kfree_skb_any(buffer->skb);
skb               918 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			buffer->skb = NULL;
skb              1143 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	struct sk_buff *skb;
skb              1146 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	if (unlikely(buffer->skb == NULL)) {
skb              1154 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	skb = buffer->skb;
skb              1158 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		skb->ip_summed = CHECKSUM_UNNECESSARY;
skb              1160 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		skb_checksum_none_assert(skb);
skb              1164 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	buffer->skb = NULL;
skb              1166 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	return skb;
skb              1170 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 					  struct sk_buff *skb, u16 *vlan_tag)
skb              1174 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	if (!__vlan_get_tag(skb, vlan_tag)) {
skb              1175 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		eth_hdr = (struct ethhdr *)skb->data;
skb              1176 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		memmove(skb->data + VLAN_HLEN, eth_hdr, ETH_ALEN * 2);
skb              1177 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		skb_pull(skb, VLAN_HLEN);
skb              1201 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	struct sk_buff *skb;
skb              1220 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
skb              1221 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	if (!skb)
skb              1227 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, t_vid);
skb              1231 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		skb_put(skb, rds_ring->skb_size);
skb              1233 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		skb_put(skb, length);
skb              1236 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		skb_pull(skb, pkt_offset);
skb              1238 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
skb              1240 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		dev_kfree_skb(skb);
skb              1244 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	skb->protocol = eth_type_trans(skb, netdev);
skb              1247 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
skb              1249 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	napi_gro_receive(&sds_ring->napi, skb);
skb              1268 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	struct sk_buff *skb;
skb              1296 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK);
skb              1297 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	if (!skb)
skb              1303 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, t_vid);
skb              1311 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	skb_put(skb, lro_length + data_offset);
skb              1312 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	skb_pull(skb, l2_hdr_offset);
skb              1314 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
skb              1316 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		dev_kfree_skb(skb);
skb              1320 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	skb->protocol = eth_type_trans(skb, netdev);
skb              1322 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	if (ntohs(skb->protocol) == ETH_P_IPV6) {
skb              1323 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		ipv6h = (struct ipv6hdr *)skb->data;
skb              1324 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		th = (struct tcphdr *)(skb->data + sizeof(struct ipv6hdr));
skb              1328 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		iph = (struct iphdr *)skb->data;
skb              1329 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
skb              1337 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	length = skb->len;
skb              1340 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		skb_shinfo(skb)->gso_size = qlcnic_get_lro_sts_mss(sts_data1);
skb              1341 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		if (skb->protocol == htons(ETH_P_IPV6))
skb              1342 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
skb              1344 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
skb              1348 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
skb              1349 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	netif_receive_skb(skb);
skb              1453 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		if (!buffer->skb) {
skb              1478 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c static void dump_skb(struct sk_buff *skb, struct qlcnic_adapter *adapter)
skb              1487 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 				     skb->data, skb->len, true);
skb              1495 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	struct sk_buff *skb;
skb              1512 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
skb              1513 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	if (!skb)
skb              1517 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		skb_put(skb, rds_ring->skb_size);
skb              1519 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		skb_put(skb, length);
skb              1522 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		skb_pull(skb, pkt_offset);
skb              1524 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	if (!qlcnic_check_loopback_buff(skb->data, adapter->mac_addr))
skb              1527 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		dump_skb(skb, adapter);
skb              1529 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	dev_kfree_skb_any(skb);
skb              1724 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	struct sk_buff *skb;
skb              1742 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
skb              1743 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	if (!skb)
skb              1747 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		skb_put(skb, rds_ring->skb_size);
skb              1749 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		skb_put(skb, length);
skb              1751 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	err = qlcnic_check_rx_tagging(adapter, skb, &vid);
skb              1755 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, vid);
skb              1760 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		dev_kfree_skb(skb);
skb              1764 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	skb->protocol = eth_type_trans(skb, netdev);
skb              1767 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	    skb->ip_summed == CHECKSUM_UNNECESSARY) {
skb              1768 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		skb->csum_level = 1;
skb              1773 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
skb              1775 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	napi_gro_receive(&sds_ring->napi, skb);
skb              1790 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	struct sk_buff *skb;
skb              1818 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK);
skb              1819 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	if (!skb)
skb              1827 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	skb_put(skb, lro_length + data_offset);
skb              1828 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	skb_pull(skb, l2_hdr_offset);
skb              1830 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	err = qlcnic_check_rx_tagging(adapter, skb, &vid);
skb              1834 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, vid);
skb              1839 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		dev_kfree_skb(skb);
skb              1843 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	skb->protocol = eth_type_trans(skb, netdev);
skb              1844 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	if (ntohs(skb->protocol) == ETH_P_IPV6) {
skb              1845 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		ipv6h = (struct ipv6hdr *)skb->data;
skb              1846 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		th = (struct tcphdr *)(skb->data + sizeof(struct ipv6hdr));
skb              1851 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		iph = (struct iphdr *)skb->data;
skb              1852 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
skb              1859 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	length = skb->len;
skb              1863 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		skb_shinfo(skb)->gso_size = gso_size;
skb              1864 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		if (skb->protocol == htons(ETH_P_IPV6))
skb              1865 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
skb              1867 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
skb              1871 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
skb              1873 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	netif_receive_skb(skb);
skb              2178 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	struct sk_buff *skb;
skb              2192 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK);
skb              2193 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	if (!skb)
skb              2197 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		skb_put(skb, rds_ring->skb_size);
skb              2199 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		skb_put(skb, length);
skb              2201 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	if (!qlcnic_check_loopback_buff(skb->data, adapter->mac_addr))
skb              2204 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		dump_skb(skb, adapter);
skb              2206 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	dev_kfree_skb_any(skb);
skb               431 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c static int qlcnic_fdb_dump(struct sk_buff *skb, struct netlink_callback *ncb,
skb               439 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 		return ndo_dflt_fdb_dump(skb, ncb, netdev, filter_dev, idx);
skb               443 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 		err = ndo_dflt_fdb_dump(skb, ncb, netdev, filter_dev, idx);
skb               517 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c static netdev_features_t qlcnic_features_check(struct sk_buff *skb,
skb               521 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 	features = vlan_features_check(skb, features);
skb               522 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 	return vxlan_features_check(skb, features);
skb               210 drivers/net/ethernet/qualcomm/emac/emac-mac.c #define EMAC_SKB_CB(skb)	((struct emac_skb_cb *)(skb)->cb)
skb               604 drivers/net/ethernet/qualcomm/emac/emac-mac.c 		if (tpbuf->skb) {
skb               605 drivers/net/ethernet/qualcomm/emac/emac-mac.c 			dev_kfree_skb_any(tpbuf->skb);
skb               606 drivers/net/ethernet/qualcomm/emac/emac-mac.c 			tpbuf->skb = NULL;
skb               640 drivers/net/ethernet/qualcomm/emac/emac-mac.c 		if (rfbuf->skb) {
skb               641 drivers/net/ethernet/qualcomm/emac/emac-mac.c 			dev_kfree_skb(rfbuf->skb);
skb               642 drivers/net/ethernet/qualcomm/emac/emac-mac.c 			rfbuf->skb = NULL;
skb               876 drivers/net/ethernet/qualcomm/emac/emac-mac.c 		struct sk_buff *skb;
skb               879 drivers/net/ethernet/qualcomm/emac/emac-mac.c 		skb = netdev_alloc_skb_ip_align(adpt->netdev, adpt->rxbuf_size);
skb               880 drivers/net/ethernet/qualcomm/emac/emac-mac.c 		if (!skb)
skb               884 drivers/net/ethernet/qualcomm/emac/emac-mac.c 			dma_map_single(adpt->netdev->dev.parent, skb->data,
skb               890 drivers/net/ethernet/qualcomm/emac/emac-mac.c 			dev_kfree_skb(skb);
skb               893 drivers/net/ethernet/qualcomm/emac/emac-mac.c 		curr_rxbuf->skb = skb;
skb              1062 drivers/net/ethernet/qualcomm/emac/emac-mac.c 		rfbuf[consume_idx].skb = NULL;
skb              1073 drivers/net/ethernet/qualcomm/emac/emac-mac.c 			     struct sk_buff *skb,
skb              1080 drivers/net/ethernet/qualcomm/emac/emac-mac.c 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan);
skb              1083 drivers/net/ethernet/qualcomm/emac/emac-mac.c 	napi_gro_receive(&rx_q->napi, skb);
skb              1095 drivers/net/ethernet/qualcomm/emac/emac-mac.c 	struct sk_buff *skb;
skb              1119 drivers/net/ethernet/qualcomm/emac/emac-mac.c 			skb = rfbuf->skb;
skb              1139 drivers/net/ethernet/qualcomm/emac/emac-mac.c 			dev_kfree_skb(skb);
skb              1143 drivers/net/ethernet/qualcomm/emac/emac-mac.c 		skb_put(skb, RRD_PKT_SIZE(&rrd) - ETH_FCS_LEN);
skb              1144 drivers/net/ethernet/qualcomm/emac/emac-mac.c 		skb->dev = netdev;
skb              1145 drivers/net/ethernet/qualcomm/emac/emac-mac.c 		skb->protocol = eth_type_trans(skb, skb->dev);
skb              1147 drivers/net/ethernet/qualcomm/emac/emac-mac.c 			skb->ip_summed = RRD_L4F(&rrd) ?
skb              1150 drivers/net/ethernet/qualcomm/emac/emac-mac.c 			skb_checksum_none_assert(skb);
skb              1152 drivers/net/ethernet/qualcomm/emac/emac-mac.c 		emac_receive_skb(rx_q, skb, (u16)RRD_CVALN_TAG(&rrd),
skb              1196 drivers/net/ethernet/qualcomm/emac/emac-mac.c 		if (tpbuf->skb) {
skb              1198 drivers/net/ethernet/qualcomm/emac/emac-mac.c 			bytes_compl += tpbuf->skb->len;
skb              1199 drivers/net/ethernet/qualcomm/emac/emac-mac.c 			dev_consume_skb_irq(tpbuf->skb);
skb              1200 drivers/net/ethernet/qualcomm/emac/emac-mac.c 			tpbuf->skb = NULL;
skb              1247 drivers/net/ethernet/qualcomm/emac/emac-mac.c 			 struct sk_buff *skb,
skb              1253 drivers/net/ethernet/qualcomm/emac/emac-mac.c 	if (skb_is_gso(skb)) {
skb              1254 drivers/net/ethernet/qualcomm/emac/emac-mac.c 		if (skb_header_cloned(skb)) {
skb              1255 drivers/net/ethernet/qualcomm/emac/emac-mac.c 			ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
skb              1260 drivers/net/ethernet/qualcomm/emac/emac-mac.c 		if (skb->protocol == htons(ETH_P_IP)) {
skb              1261 drivers/net/ethernet/qualcomm/emac/emac-mac.c 			u32 pkt_len = ((unsigned char *)ip_hdr(skb) - skb->data)
skb              1262 drivers/net/ethernet/qualcomm/emac/emac-mac.c 				       + ntohs(ip_hdr(skb)->tot_len);
skb              1263 drivers/net/ethernet/qualcomm/emac/emac-mac.c 			if (skb->len > pkt_len)
skb              1264 drivers/net/ethernet/qualcomm/emac/emac-mac.c 				pskb_trim(skb, pkt_len);
skb              1267 drivers/net/ethernet/qualcomm/emac/emac-mac.c 		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
skb              1268 drivers/net/ethernet/qualcomm/emac/emac-mac.c 		if (unlikely(skb->len == hdr_len)) {
skb              1275 drivers/net/ethernet/qualcomm/emac/emac-mac.c 		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
skb              1276 drivers/net/ethernet/qualcomm/emac/emac-mac.c 			ip_hdr(skb)->check = 0;
skb              1277 drivers/net/ethernet/qualcomm/emac/emac-mac.c 			tcp_hdr(skb)->check =
skb              1278 drivers/net/ethernet/qualcomm/emac/emac-mac.c 				~csum_tcpudp_magic(ip_hdr(skb)->saddr,
skb              1279 drivers/net/ethernet/qualcomm/emac/emac-mac.c 						   ip_hdr(skb)->daddr,
skb              1284 drivers/net/ethernet/qualcomm/emac/emac-mac.c 		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
skb              1291 drivers/net/ethernet/qualcomm/emac/emac-mac.c 			ipv6_hdr(skb)->payload_len = 0;
skb              1292 drivers/net/ethernet/qualcomm/emac/emac-mac.c 			tcp_hdr(skb)->check =
skb              1293 drivers/net/ethernet/qualcomm/emac/emac-mac.c 				~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
skb              1294 drivers/net/ethernet/qualcomm/emac/emac-mac.c 						 &ipv6_hdr(skb)->daddr,
skb              1296 drivers/net/ethernet/qualcomm/emac/emac-mac.c 			TPD_PKT_LEN_SET(&extra_tpd, skb->len);
skb              1304 drivers/net/ethernet/qualcomm/emac/emac-mac.c 		TPD_TCPHDR_OFFSET_SET(tpd, skb_transport_offset(skb));
skb              1305 drivers/net/ethernet/qualcomm/emac/emac-mac.c 		TPD_MSS_SET(tpd, skb_shinfo(skb)->gso_size);
skb              1310 drivers/net/ethernet/qualcomm/emac/emac-mac.c 	if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
skb              1313 drivers/net/ethernet/qualcomm/emac/emac-mac.c 		cso = skb_transport_offset(skb);
skb              1319 drivers/net/ethernet/qualcomm/emac/emac-mac.c 		css = cso + skb->csum_offset;
skb              1331 drivers/net/ethernet/qualcomm/emac/emac-mac.c 			     struct emac_tx_queue *tx_q, struct sk_buff *skb,
skb              1334 drivers/net/ethernet/qualcomm/emac/emac-mac.c 	unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
skb              1336 drivers/net/ethernet/qualcomm/emac/emac-mac.c 	unsigned int len = skb_headlen(skb);
skb              1345 drivers/net/ethernet/qualcomm/emac/emac-mac.c 		mapped_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
skb              1350 drivers/net/ethernet/qualcomm/emac/emac-mac.c 					       virt_to_page(skb->data),
skb              1351 drivers/net/ethernet/qualcomm/emac/emac-mac.c 					       offset_in_page(skb->data),
skb              1370 drivers/net/ethernet/qualcomm/emac/emac-mac.c 					       virt_to_page(skb->data +
skb              1372 drivers/net/ethernet/qualcomm/emac/emac-mac.c 					       offset_in_page(skb->data +
skb              1388 drivers/net/ethernet/qualcomm/emac/emac-mac.c 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
skb              1414 drivers/net/ethernet/qualcomm/emac/emac-mac.c 	tpbuf->skb = skb;
skb              1433 drivers/net/ethernet/qualcomm/emac/emac-mac.c 	dev_kfree_skb(skb);
skb              1438 drivers/net/ethernet/qualcomm/emac/emac-mac.c 			 struct sk_buff *skb)
skb              1445 drivers/net/ethernet/qualcomm/emac/emac-mac.c 	if (emac_tso_csum(adpt, tx_q, skb, &tpd) != 0) {
skb              1446 drivers/net/ethernet/qualcomm/emac/emac-mac.c 		dev_kfree_skb_any(skb);
skb              1450 drivers/net/ethernet/qualcomm/emac/emac-mac.c 	if (skb_vlan_tag_present(skb)) {
skb              1453 drivers/net/ethernet/qualcomm/emac/emac-mac.c 		EMAC_VLAN_TO_TAG(skb_vlan_tag_get(skb), tag);
skb              1458 drivers/net/ethernet/qualcomm/emac/emac-mac.c 	if (skb_network_offset(skb) != ETH_HLEN)
skb              1461 drivers/net/ethernet/qualcomm/emac/emac-mac.c 	emac_tx_fill_tpd(adpt, tx_q, skb, &tpd);
skb              1463 drivers/net/ethernet/qualcomm/emac/emac-mac.c 	netdev_sent_queue(adpt->netdev, skb->len);
skb               144 drivers/net/ethernet/qualcomm/emac/emac-mac.h 	struct sk_buff		*skb;		/* socket buffer */
skb               231 drivers/net/ethernet/qualcomm/emac/emac-mac.h 			 struct sk_buff *skb);
skb               118 drivers/net/ethernet/qualcomm/emac/emac.c static int emac_start_xmit(struct sk_buff *skb, struct net_device *netdev)
skb               122 drivers/net/ethernet/qualcomm/emac/emac.c 	return emac_mac_tx_buf_send(adpt, &adpt->tx_q, skb);
skb                79 drivers/net/ethernet/qualcomm/qca_debug.c 	if (qca->txr.skb[qca->txr.head] == NULL)
skb                81 drivers/net/ethernet/qualcomm/qca_debug.c 	else if (qca->txr.skb[qca->txr.tail])
skb               239 drivers/net/ethernet/qualcomm/qca_spi.c qcaspi_tx_frame(struct qcaspi *qca, struct sk_buff *skb)
skb               246 drivers/net/ethernet/qualcomm/qca_spi.c 	len = skb->len;
skb               260 drivers/net/ethernet/qualcomm/qca_spi.c 						      skb->data + offset,
skb               264 drivers/net/ethernet/qualcomm/qca_spi.c 						     skb->data + offset,
skb               287 drivers/net/ethernet/qualcomm/qca_spi.c 	if (qca->txr.skb[qca->txr.head] == NULL)
skb               300 drivers/net/ethernet/qualcomm/qca_spi.c 	while (qca->txr.skb[qca->txr.head]) {
skb               301 drivers/net/ethernet/qualcomm/qca_spi.c 		pkt_len = qca->txr.skb[qca->txr.head]->len + QCASPI_HW_PKT_LEN;
skb               309 drivers/net/ethernet/qualcomm/qca_spi.c 		if (qcaspi_tx_frame(qca, qca->txr.skb[qca->txr.head]) == -1) {
skb               316 drivers/net/ethernet/qualcomm/qca_spi.c 		n_stats->tx_bytes += qca->txr.skb[qca->txr.head]->len;
skb               324 drivers/net/ethernet/qualcomm/qca_spi.c 		dev_kfree_skb(qca->txr.skb[qca->txr.head]);
skb               325 drivers/net/ethernet/qualcomm/qca_spi.c 		qca->txr.skb[qca->txr.head] = NULL;
skb               461 drivers/net/ethernet/qualcomm/qca_spi.c 	if (txr->skb[txr->tail])
skb               481 drivers/net/ethernet/qualcomm/qca_spi.c 		if (qca->txr.skb[i]) {
skb               482 drivers/net/ethernet/qualcomm/qca_spi.c 			dev_kfree_skb(qca->txr.skb[i]);
skb               483 drivers/net/ethernet/qualcomm/qca_spi.c 			qca->txr.skb[i] = NULL;
skb               577 drivers/net/ethernet/qualcomm/qca_spi.c 		    (qca->txr.skb[qca->txr.head] == NULL) &&
skb               585 drivers/net/ethernet/qualcomm/qca_spi.c 			   qca->txr.skb[qca->txr.head]);
skb               718 drivers/net/ethernet/qualcomm/qca_spi.c qcaspi_netdev_xmit(struct sk_buff *skb, struct net_device *dev)
skb               727 drivers/net/ethernet/qualcomm/qca_spi.c 	if (skb->len < QCAFRM_MIN_LEN)
skb               728 drivers/net/ethernet/qualcomm/qca_spi.c 		pad_len = QCAFRM_MIN_LEN - skb->len;
skb               730 drivers/net/ethernet/qualcomm/qca_spi.c 	if (qca->txr.skb[qca->txr.tail]) {
skb               737 drivers/net/ethernet/qualcomm/qca_spi.c 	if ((skb_headroom(skb) < QCAFRM_HEADER_LEN) ||
skb               738 drivers/net/ethernet/qualcomm/qca_spi.c 	    (skb_tailroom(skb) < QCAFRM_FOOTER_LEN + pad_len)) {
skb               739 drivers/net/ethernet/qualcomm/qca_spi.c 		tskb = skb_copy_expand(skb, QCAFRM_HEADER_LEN,
skb               745 drivers/net/ethernet/qualcomm/qca_spi.c 		dev_kfree_skb(skb);
skb               746 drivers/net/ethernet/qualcomm/qca_spi.c 		skb = tskb;
skb               749 drivers/net/ethernet/qualcomm/qca_spi.c 	frame_len = skb->len + pad_len;
skb               751 drivers/net/ethernet/qualcomm/qca_spi.c 	ptmp = skb_push(skb, QCAFRM_HEADER_LEN);
skb               755 drivers/net/ethernet/qualcomm/qca_spi.c 		ptmp = skb_put_zero(skb, pad_len);
skb               758 drivers/net/ethernet/qualcomm/qca_spi.c 	ptmp = skb_put(skb, QCAFRM_FOOTER_LEN);
skb               762 drivers/net/ethernet/qualcomm/qca_spi.c 		   skb->len);
skb               764 drivers/net/ethernet/qualcomm/qca_spi.c 	qca->txr.size += skb->len + QCASPI_HW_PKT_LEN;
skb               770 drivers/net/ethernet/qualcomm/qca_spi.c 	qca->txr.skb[qca->txr.tail] = skb;
skb                57 drivers/net/ethernet/qualcomm/qca_spi.h 	struct sk_buff *skb[TX_RING_MAX_LEN];
skb               199 drivers/net/ethernet/qualcomm/qca_uart.c qcauart_netdev_xmit(struct sk_buff *skb, struct net_device *dev)
skb               219 drivers/net/ethernet/qualcomm/qca_uart.c 	if (skb->len < QCAFRM_MIN_LEN)
skb               220 drivers/net/ethernet/qualcomm/qca_uart.c 		pad_len = QCAFRM_MIN_LEN - skb->len;
skb               222 drivers/net/ethernet/qualcomm/qca_uart.c 	pos += qcafrm_create_header(pos, skb->len + pad_len);
skb               224 drivers/net/ethernet/qualcomm/qca_uart.c 	memcpy(pos, skb->data, skb->len);
skb               225 drivers/net/ethernet/qualcomm/qca_uart.c 	pos += skb->len;
skb               247 drivers/net/ethernet/qualcomm/qca_uart.c 	dev_kfree_skb_any(skb);
skb               338 drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c static int rmnet_fill_info(struct sk_buff *skb, const struct net_device *dev)
skb               347 drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c 	if (nla_put_u16(skb, IFLA_RMNET_MUX_ID, priv->mux_id))
skb               359 drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c 	if (nla_put(skb, IFLA_RMNET_FLAGS, sizeof(f), &f))
skb                22 drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c static void rmnet_set_skb_proto(struct sk_buff *skb)
skb                24 drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c 	switch (skb->data[0] & 0xF0) {
skb                26 drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c 		skb->protocol = htons(ETH_P_IP);
skb                29 drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c 		skb->protocol = htons(ETH_P_IPV6);
skb                32 drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c 		skb->protocol = htons(ETH_P_MAP);
skb                40 drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c rmnet_deliver_skb(struct sk_buff *skb)
skb                42 drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c 	struct rmnet_priv *priv = netdev_priv(skb->dev);
skb                44 drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c 	skb_reset_transport_header(skb);
skb                45 drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c 	skb_reset_network_header(skb);
skb                46 drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c 	rmnet_vnd_rx_fixup(skb, skb->dev);
skb                48 drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c 	skb->pkt_type = PACKET_HOST;
skb                49 drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c 	skb_set_mac_header(skb, 0);
skb                50 drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c 	gro_cells_receive(&priv->gro_cells, skb);
skb                56 drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c __rmnet_map_ingress_handler(struct sk_buff *skb,
skb                63 drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c 	if (RMNET_MAP_GET_CD_BIT(skb)) {
skb                65 drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c 			return rmnet_map_command(skb, port);
skb                70 drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c 	mux_id = RMNET_MAP_GET_MUX_ID(skb);
skb                71 drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c 	pad = RMNET_MAP_GET_PAD(skb);
skb                72 drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c 	len = RMNET_MAP_GET_LENGTH(skb) - pad;
skb                81 drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c 	skb->dev = ep->egress_dev;
skb                84 drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c 	skb_pull(skb, sizeof(struct rmnet_map_header));
skb                85 drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c 	rmnet_set_skb_proto(skb);
skb                88 drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c 		if (!rmnet_map_checksum_downlink_packet(skb, len + pad))
skb                89 drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c 			skb->ip_summed = CHECKSUM_UNNECESSARY;
skb                92 drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c 	skb_trim(skb, len);
skb                93 drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c 	rmnet_deliver_skb(skb);
skb                97 drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c 	kfree_skb(skb);
skb               101 drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c rmnet_map_ingress_handler(struct sk_buff *skb,
skb               106 drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c 	if (skb->dev->type == ARPHRD_ETHER) {
skb               107 drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c 		if (pskb_expand_head(skb, ETH_HLEN, 0, GFP_ATOMIC)) {
skb               108 drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c 			kfree_skb(skb);
skb               112 drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c 		skb_push(skb, ETH_HLEN);
skb               116 drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c 		while ((skbn = rmnet_map_deaggregate(skb, port)) != NULL)
skb               119 drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c 		consume_skb(skb);
skb               121 drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c 		__rmnet_map_ingress_handler(skb, port);
skb               125 drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c static int rmnet_map_egress_handler(struct sk_buff *skb,
skb               140 drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c 	if (skb_headroom(skb) < required_headroom) {
skb               141 drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c 		if (pskb_expand_head(skb, required_headroom, 0, GFP_ATOMIC))
skb               146 drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c 		rmnet_map_checksum_uplink_packet(skb, orig_dev);
skb               148 drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c 	map_header = rmnet_map_add_map_header(skb, additional_header_len, 0);
skb               154 drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c 	skb->protocol = htons(ETH_P_MAP);
skb               160 drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c rmnet_bridge_handler(struct sk_buff *skb, struct net_device *bridge_dev)
skb               162 drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c 	if (skb_mac_header_was_set(skb))
skb               163 drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c 		skb_push(skb, skb->mac_len);
skb               166 drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c 		skb->dev = bridge_dev;
skb               167 drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c 		dev_queue_xmit(skb);
skb               179 drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c 	struct sk_buff *skb = *pskb;
skb               183 drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c 	if (!skb)
skb               186 drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c 	if (skb->pkt_type == PACKET_LOOPBACK)
skb               189 drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c 	dev = skb->dev;
skb               194 drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c 		rmnet_map_ingress_handler(skb, port);
skb               197 drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c 		rmnet_bridge_handler(skb, port->bridge_ep);
skb               209 drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c void rmnet_egress_handler(struct sk_buff *skb)
skb               216 drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c 	sk_pacing_shift_update(skb->sk, 8);
skb               218 drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c 	orig_dev = skb->dev;
skb               220 drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c 	skb->dev = priv->real_dev;
skb               223 drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c 	port = rmnet_get_port_rcu(skb->dev);
skb               227 drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c 	if (rmnet_map_egress_handler(skb, port, mux_id, orig_dev))
skb               230 drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c 	rmnet_vnd_tx_fixup(skb, orig_dev);
skb               232 drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c 	dev_queue_xmit(skb);
skb               237 drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c 	kfree_skb(skb);
skb                12 drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.h void rmnet_egress_handler(struct sk_buff *skb);
skb                55 drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
skb                57 drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
skb                59 drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port);
skb                60 drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len);
skb                61 drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
skb                11 drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c static u8 rmnet_map_do_flow_control(struct sk_buff *skb,
skb                20 drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c 	mux_id = RMNET_MAP_GET_MUX_ID(skb);
skb                23 drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c 		kfree_skb(skb);
skb                29 drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c 		kfree_skb(skb);
skb                41 drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c 		kfree_skb(skb);
skb                48 drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c static void rmnet_map_send_ack(struct sk_buff *skb,
skb                53 drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c 	struct net_device *dev = skb->dev;
skb                56 drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c 		skb_trim(skb,
skb                57 drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c 			 skb->len - sizeof(struct rmnet_map_dl_csum_trailer));
skb                59 drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c 	skb->protocol = htons(ETH_P_MAP);
skb                61 drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c 	cmd = RMNET_MAP_GET_CMD_START(skb);
skb                65 drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c 	dev->netdev_ops->ndo_start_xmit(skb, dev);
skb                72 drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port)
skb                78 drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c 	cmd = RMNET_MAP_GET_CMD_START(skb);
skb                83 drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c 		rc = rmnet_map_do_flow_control(skb, port, 1);
skb                87 drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c 		rc = rmnet_map_do_flow_control(skb, port, 0);
skb                92 drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c 		kfree_skb(skb);
skb                96 drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c 		rmnet_map_send_ack(skb, rc, port);
skb                41 drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c rmnet_map_ipv4_dl_csum_trailer(struct sk_buff *skb,
skb                51 drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c 	ip4h = (struct iphdr *)(skb->data);
skb                58 drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c 	txporthdr = skb->data + ip4h->ihl * 4;
skb               114 drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c rmnet_map_ipv6_dl_csum_trailer(struct sk_buff *skb,
skb               125 drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c 	ip6h = (struct ipv6hdr *)(skb->data);
skb               127 drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c 	txporthdr = skb->data + sizeof(struct ipv6hdr);
skb               138 drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c 			       (int)(txporthdr - (void *)(skb->data))));
skb               198 drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c 			      struct sk_buff *skb)
skb               203 drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c 	offset = htons((__force u16)(skb_transport_header(skb) -
skb               206 drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c 	ul_header->csum_insert_offset = skb->csum_offset;
skb               217 drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c 	skb->ip_summed = CHECKSUM_NONE;
skb               240 drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c 			      struct sk_buff *skb)
skb               245 drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c 	offset = htons((__force u16)(skb_transport_header(skb) -
skb               248 drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c 	ul_header->csum_insert_offset = skb->csum_offset;
skb               260 drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c 	skb->ip_summed = CHECKSUM_NONE;
skb               270 drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
skb               277 drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c 	map_datalen = skb->len - hdrlen;
skb               279 drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c 			skb_push(skb, sizeof(struct rmnet_map_header));
skb               292 drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c 	if (skb_tailroom(skb) < padding)
skb               295 drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c 	padbytes = (u8 *)skb_put(skb, padding);
skb               311 drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
skb               318 drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c 	if (skb->len == 0)
skb               321 drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c 	maph = (struct rmnet_map_header *)skb->data;
skb               327 drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c 	if (((int)skb->len - (int)packet_len) < 0)
skb               340 drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c 	memcpy(skbn->data, skb->data, packet_len);
skb               341 drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c 	skb_pull(skb, packet_len);
skb               352 drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len)
skb               354 drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c 	struct rmnet_priv *priv = netdev_priv(skb->dev);
skb               357 drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c 	if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM))) {
skb               362 drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c 	csum_trailer = (struct rmnet_map_dl_csum_trailer *)(skb->data + len);
skb               369 drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c 	if (skb->protocol == htons(ETH_P_IP)) {
skb               370 drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c 		return rmnet_map_ipv4_dl_csum_trailer(skb, csum_trailer, priv);
skb               371 drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c 	} else if (skb->protocol == htons(ETH_P_IPV6)) {
skb               373 drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c 		return rmnet_map_ipv6_dl_csum_trailer(skb, csum_trailer, priv);
skb               389 drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
skb               397 drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c 		    skb_push(skb, sizeof(struct rmnet_map_ul_csum_header));
skb               403 drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb               407 drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c 		if (skb->protocol == htons(ETH_P_IP)) {
skb               408 drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c 			rmnet_map_ipv4_ul_csum_header(iphdr, ul_header, skb);
skb               410 drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c 		} else if (skb->protocol == htons(ETH_P_IPV6)) {
skb               412 drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c 			rmnet_map_ipv6_ul_csum_header(iphdr, ul_header, skb);
skb                18 drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c void rmnet_vnd_rx_fixup(struct sk_buff *skb, struct net_device *dev)
skb                27 drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c 	pcpu_ptr->stats.rx_bytes += skb->len;
skb                31 drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c void rmnet_vnd_tx_fixup(struct sk_buff *skb, struct net_device *dev)
skb                40 drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c 	pcpu_ptr->stats.tx_bytes += skb->len;
skb                46 drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c static netdev_tx_t rmnet_vnd_start_xmit(struct sk_buff *skb,
skb                53 drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c 		rmnet_egress_handler(skb);
skb                56 drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c 		kfree_skb(skb);
skb                17 drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h void rmnet_vnd_rx_fixup(struct sk_buff *skb, struct net_device *dev);
skb                18 drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h void rmnet_vnd_tx_fixup(struct sk_buff *skb, struct net_device *dev);
skb               323 drivers/net/ethernet/rdc/r6040.c 	struct sk_buff *skb;
skb               332 drivers/net/ethernet/rdc/r6040.c 		skb = netdev_alloc_skb(dev, MAX_BUF_SIZE);
skb               333 drivers/net/ethernet/rdc/r6040.c 		if (!skb) {
skb               337 drivers/net/ethernet/rdc/r6040.c 		desc->skb_ptr = skb;
skb               795 drivers/net/ethernet/rdc/r6040.c static netdev_tx_t r6040_start_xmit(struct sk_buff *skb,
skb               803 drivers/net/ethernet/rdc/r6040.c 	if (skb_put_padto(skb, ETH_ZLEN) < 0)
skb               820 drivers/net/ethernet/rdc/r6040.c 	descptr->len = skb->len;
skb               821 drivers/net/ethernet/rdc/r6040.c 	descptr->skb_ptr = skb;
skb               823 drivers/net/ethernet/rdc/r6040.c 		skb->data, skb->len, PCI_DMA_TODEVICE));
skb               826 drivers/net/ethernet/rdc/r6040.c 	skb_tx_timestamp(skb);
skb               418 drivers/net/ethernet/realtek/8139cp.c static inline void cp_rx_skb (struct cp_private *cp, struct sk_buff *skb,
skb               423 drivers/net/ethernet/realtek/8139cp.c 	skb->protocol = eth_type_trans (skb, cp->dev);
skb               426 drivers/net/ethernet/realtek/8139cp.c 	cp->dev->stats.rx_bytes += skb->len;
skb               429 drivers/net/ethernet/realtek/8139cp.c 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), swab16(opts2 & 0xffff));
skb               431 drivers/net/ethernet/realtek/8139cp.c 	napi_gro_receive(&cp->napi, skb);
skb               475 drivers/net/ethernet/realtek/8139cp.c 		struct sk_buff *skb, *new_skb;
skb               479 drivers/net/ethernet/realtek/8139cp.c 		skb = cp->rx_skb[rx_tail];
skb               480 drivers/net/ethernet/realtek/8139cp.c 		BUG_ON(!skb);
skb               529 drivers/net/ethernet/realtek/8139cp.c 			skb->ip_summed = CHECKSUM_UNNECESSARY;
skb               531 drivers/net/ethernet/realtek/8139cp.c 			skb_checksum_none_assert(skb);
skb               533 drivers/net/ethernet/realtek/8139cp.c 		skb_put(skb, len);
skb               537 drivers/net/ethernet/realtek/8139cp.c 		cp_rx_skb(cp, skb, desc);
skb               656 drivers/net/ethernet/realtek/8139cp.c 		struct sk_buff *skb;
skb               664 drivers/net/ethernet/realtek/8139cp.c 		skb = cp->tx_skb[tx_tail];
skb               665 drivers/net/ethernet/realtek/8139cp.c 		BUG_ON(!skb);
skb               688 drivers/net/ethernet/realtek/8139cp.c 				cp->dev->stats.tx_bytes += skb->len;
skb               692 drivers/net/ethernet/realtek/8139cp.c 			bytes_compl += skb->len;
skb               694 drivers/net/ethernet/realtek/8139cp.c 			dev_consume_skb_irq(skb);
skb               709 drivers/net/ethernet/realtek/8139cp.c static inline u32 cp_tx_vlan_tag(struct sk_buff *skb)
skb               711 drivers/net/ethernet/realtek/8139cp.c 	return skb_vlan_tag_present(skb) ?
skb               712 drivers/net/ethernet/realtek/8139cp.c 		TxVlanTag | swab16(skb_vlan_tag_get(skb)) : 0x00;
skb               715 drivers/net/ethernet/realtek/8139cp.c static void unwind_tx_frag_mapping(struct cp_private *cp, struct sk_buff *skb,
skb               725 drivers/net/ethernet/realtek/8139cp.c 		this_frag = &skb_shinfo(skb)->frags[frag];
skb               731 drivers/net/ethernet/realtek/8139cp.c static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
skb               744 drivers/net/ethernet/realtek/8139cp.c 	if (TX_BUFFS_AVAIL(cp) <= (skb_shinfo(skb)->nr_frags + 1)) {
skb               753 drivers/net/ethernet/realtek/8139cp.c 	mss = skb_shinfo(skb)->gso_size;
skb               761 drivers/net/ethernet/realtek/8139cp.c 	opts2 = cpu_to_le32(cp_tx_vlan_tag(skb));
skb               765 drivers/net/ethernet/realtek/8139cp.c 	else if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb               766 drivers/net/ethernet/realtek/8139cp.c 		const struct iphdr *ip = ip_hdr(skb);
skb               778 drivers/net/ethernet/realtek/8139cp.c 	if (skb_shinfo(skb)->nr_frags == 0) {
skb               783 drivers/net/ethernet/realtek/8139cp.c 		len = skb->len;
skb               784 drivers/net/ethernet/realtek/8139cp.c 		mapping = dma_map_single(&cp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
skb               797 drivers/net/ethernet/realtek/8139cp.c 		cp->tx_skb[entry] = skb;
skb               800 drivers/net/ethernet/realtek/8139cp.c 			  entry, skb->len);
skb               811 drivers/net/ethernet/realtek/8139cp.c 		first_len = skb_headlen(skb);
skb               812 drivers/net/ethernet/realtek/8139cp.c 		first_mapping = dma_map_single(&cp->pdev->dev, skb->data,
skb               817 drivers/net/ethernet/realtek/8139cp.c 		cp->tx_skb[entry] = skb;
skb               819 drivers/net/ethernet/realtek/8139cp.c 		for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
skb               820 drivers/net/ethernet/realtek/8139cp.c 			const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
skb               831 drivers/net/ethernet/realtek/8139cp.c 				unwind_tx_frag_mapping(cp, skb, first_entry, entry);
skb               839 drivers/net/ethernet/realtek/8139cp.c 			if (frag == skb_shinfo(skb)->nr_frags - 1)
skb               851 drivers/net/ethernet/realtek/8139cp.c 			cp->tx_skb[entry] = skb;
skb               865 drivers/net/ethernet/realtek/8139cp.c 			  first_entry, entry, skb->len);
skb               869 drivers/net/ethernet/realtek/8139cp.c 	netdev_sent_queue(dev, skb->len);
skb               880 drivers/net/ethernet/realtek/8139cp.c 	dev_kfree_skb_any(skb);
skb              1064 drivers/net/ethernet/realtek/8139cp.c 		struct sk_buff *skb;
skb              1067 drivers/net/ethernet/realtek/8139cp.c 		skb = netdev_alloc_skb_ip_align(dev, cp->rx_buf_sz);
skb              1068 drivers/net/ethernet/realtek/8139cp.c 		if (!skb)
skb              1071 drivers/net/ethernet/realtek/8139cp.c 		mapping = dma_map_single(&cp->pdev->dev, skb->data,
skb              1074 drivers/net/ethernet/realtek/8139cp.c 			kfree_skb(skb);
skb              1077 drivers/net/ethernet/realtek/8139cp.c 		cp->rx_skb[i] = skb;
skb              1149 drivers/net/ethernet/realtek/8139cp.c 			struct sk_buff *skb = cp->tx_skb[i];
skb              1156 drivers/net/ethernet/realtek/8139cp.c 				dev_kfree_skb_any(skb);
skb              1856 drivers/net/ethernet/realtek/8139cp.c static netdev_features_t cp_features_check(struct sk_buff *skb,
skb              1860 drivers/net/ethernet/realtek/8139cp.c 	if (skb_shinfo(skb)->gso_size > MSSMask)
skb              1863 drivers/net/ethernet/realtek/8139cp.c 	return vlan_features_check(skb, features);
skb               647 drivers/net/ethernet/realtek/8139too.c static netdev_tx_t rtl8139_start_xmit (struct sk_buff *skb,
skb              1714 drivers/net/ethernet/realtek/8139too.c static netdev_tx_t rtl8139_start_xmit (struct sk_buff *skb,
skb              1720 drivers/net/ethernet/realtek/8139too.c 	unsigned int len = skb->len;
skb              1730 drivers/net/ethernet/realtek/8139too.c 		skb_copy_and_csum_dev(skb, tp->tx_buf[entry]);
skb              1731 drivers/net/ethernet/realtek/8139too.c 		dev_kfree_skb_any(skb);
skb              1733 drivers/net/ethernet/realtek/8139too.c 		dev_kfree_skb_any(skb);
skb              1915 drivers/net/ethernet/realtek/8139too.c static inline void wrap_copy(struct sk_buff *skb, const unsigned char *ring,
skb              1921 drivers/net/ethernet/realtek/8139too.c 		skb_copy_to_linear_data(skb, ring + offset, left);
skb              1922 drivers/net/ethernet/realtek/8139too.c 		skb_copy_to_linear_data_offset(skb, left, ring, size - left);
skb              1924 drivers/net/ethernet/realtek/8139too.c 		skb_copy_to_linear_data(skb, ring + offset, size);
skb              1964 drivers/net/ethernet/realtek/8139too.c 		struct sk_buff *skb;
skb              2039 drivers/net/ethernet/realtek/8139too.c 		skb = napi_alloc_skb(&tp->napi, pkt_size);
skb              2040 drivers/net/ethernet/realtek/8139too.c 		if (likely(skb)) {
skb              2042 drivers/net/ethernet/realtek/8139too.c 			wrap_copy(skb, rx_ring, ring_offset+4, pkt_size);
skb              2044 drivers/net/ethernet/realtek/8139too.c 			skb_copy_to_linear_data (skb, &rx_ring[ring_offset + 4], pkt_size);
skb              2046 drivers/net/ethernet/realtek/8139too.c 			skb_put (skb, pkt_size);
skb              2048 drivers/net/ethernet/realtek/8139too.c 			skb->protocol = eth_type_trans (skb, dev);
skb              2055 drivers/net/ethernet/realtek/8139too.c 			netif_receive_skb (skb);
skb               200 drivers/net/ethernet/realtek/atp.c static netdev_tx_t atp_send_packet(struct sk_buff *skb,
skb               551 drivers/net/ethernet/realtek/atp.c static netdev_tx_t atp_send_packet(struct sk_buff *skb,
skb               559 drivers/net/ethernet/realtek/atp.c 	length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
skb               571 drivers/net/ethernet/realtek/atp.c 	write_packet(ioaddr, length, skb->data, length-skb->len, dev->if_port);
skb               585 drivers/net/ethernet/realtek/atp.c 	dev_kfree_skb (skb);
skb               783 drivers/net/ethernet/realtek/atp.c 		struct sk_buff *skb;
skb               785 drivers/net/ethernet/realtek/atp.c 		skb = netdev_alloc_skb(dev, pkt_len + 2);
skb               786 drivers/net/ethernet/realtek/atp.c 		if (skb == NULL) {
skb               791 drivers/net/ethernet/realtek/atp.c 		skb_reserve(skb, 2);	/* Align IP on 16 byte boundaries */
skb               792 drivers/net/ethernet/realtek/atp.c 		read_block(ioaddr, pkt_len, skb_put(skb,pkt_len), dev->if_port);
skb               793 drivers/net/ethernet/realtek/atp.c 		skb->protocol = eth_type_trans(skb, dev);
skb               794 drivers/net/ethernet/realtek/atp.c 		netif_rx(skb);
skb               608 drivers/net/ethernet/realtek/r8169_main.c 	struct sk_buff	*skb;
skb              1624 drivers/net/ethernet/realtek/r8169_main.c static inline u32 rtl8169_tx_vlan_tag(struct sk_buff *skb)
skb              1626 drivers/net/ethernet/realtek/r8169_main.c 	return (skb_vlan_tag_present(skb)) ?
skb              1627 drivers/net/ethernet/realtek/r8169_main.c 		TxVlanTag | swab16(skb_vlan_tag_get(skb)) : 0x00;
skb              1630 drivers/net/ethernet/realtek/r8169_main.c static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb)
skb              1635 drivers/net/ethernet/realtek/r8169_main.c 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), swab16(opts2 & 0xffff));
skb              5638 drivers/net/ethernet/realtek/r8169_main.c 			struct sk_buff *skb = tx_skb->skb;
skb              5642 drivers/net/ethernet/realtek/r8169_main.c 			if (skb) {
skb              5643 drivers/net/ethernet/realtek/r8169_main.c 				dev_consume_skb_any(skb);
skb              5644 drivers/net/ethernet/realtek/r8169_main.c 				tx_skb->skb = NULL;
skb              5696 drivers/net/ethernet/realtek/r8169_main.c static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
skb              5699 drivers/net/ethernet/realtek/r8169_main.c 	struct skb_shared_info *info = skb_shinfo(skb);
skb              5732 drivers/net/ethernet/realtek/r8169_main.c 		tp->tx_skb[entry].skb = skb;
skb              5743 drivers/net/ethernet/realtek/r8169_main.c static bool rtl_test_hw_pad_bug(struct rtl8169_private *tp, struct sk_buff *skb)
skb              5745 drivers/net/ethernet/realtek/r8169_main.c 	return skb->len < ETH_ZLEN && tp->mac_version == RTL_GIGA_MAC_VER_34;
skb              5752 drivers/net/ethernet/realtek/r8169_main.c static int msdn_giant_send_check(struct sk_buff *skb)
skb              5758 drivers/net/ethernet/realtek/r8169_main.c 	ret = skb_cow_head(skb, 0);
skb              5762 drivers/net/ethernet/realtek/r8169_main.c 	ipv6h = ipv6_hdr(skb);
skb              5763 drivers/net/ethernet/realtek/r8169_main.c 	th = tcp_hdr(skb);
skb              5771 drivers/net/ethernet/realtek/r8169_main.c static void rtl8169_tso_csum_v1(struct sk_buff *skb, u32 *opts)
skb              5773 drivers/net/ethernet/realtek/r8169_main.c 	u32 mss = skb_shinfo(skb)->gso_size;
skb              5778 drivers/net/ethernet/realtek/r8169_main.c 	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb              5779 drivers/net/ethernet/realtek/r8169_main.c 		const struct iphdr *ip = ip_hdr(skb);
skb              5791 drivers/net/ethernet/realtek/r8169_main.c 				struct sk_buff *skb, u32 *opts)
skb              5793 drivers/net/ethernet/realtek/r8169_main.c 	u32 transport_offset = (u32)skb_transport_offset(skb);
skb              5794 drivers/net/ethernet/realtek/r8169_main.c 	u32 mss = skb_shinfo(skb)->gso_size;
skb              5797 drivers/net/ethernet/realtek/r8169_main.c 		switch (vlan_get_protocol(skb)) {
skb              5803 drivers/net/ethernet/realtek/r8169_main.c 			if (msdn_giant_send_check(skb))
skb              5816 drivers/net/ethernet/realtek/r8169_main.c 	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb              5819 drivers/net/ethernet/realtek/r8169_main.c 		switch (vlan_get_protocol(skb)) {
skb              5822 drivers/net/ethernet/realtek/r8169_main.c 			ip_protocol = ip_hdr(skb)->protocol;
skb              5827 drivers/net/ethernet/realtek/r8169_main.c 			ip_protocol = ipv6_hdr(skb)->nexthdr;
skb              5844 drivers/net/ethernet/realtek/r8169_main.c 		if (unlikely(rtl_test_hw_pad_bug(tp, skb)))
skb              5845 drivers/net/ethernet/realtek/r8169_main.c 			return !eth_skb_pad(skb);
skb              5880 drivers/net/ethernet/realtek/r8169_main.c static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
skb              5893 drivers/net/ethernet/realtek/r8169_main.c 	if (unlikely(!rtl_tx_slots_avail(tp, skb_shinfo(skb)->nr_frags))) {
skb              5901 drivers/net/ethernet/realtek/r8169_main.c 	opts[1] = rtl8169_tx_vlan_tag(skb);
skb              5905 drivers/net/ethernet/realtek/r8169_main.c 		if (!rtl8169_tso_csum_v2(tp, skb, opts))
skb              5908 drivers/net/ethernet/realtek/r8169_main.c 		rtl8169_tso_csum_v1(skb, opts);
skb              5911 drivers/net/ethernet/realtek/r8169_main.c 	len = skb_headlen(skb);
skb              5912 drivers/net/ethernet/realtek/r8169_main.c 	mapping = dma_map_single(d, skb->data, len, DMA_TO_DEVICE);
skb              5922 drivers/net/ethernet/realtek/r8169_main.c 	frags = rtl8169_xmit_frags(tp, skb, opts);
skb              5929 drivers/net/ethernet/realtek/r8169_main.c 		tp->tx_skb[entry].skb = skb;
skb              5934 drivers/net/ethernet/realtek/r8169_main.c 	skb_tx_timestamp(skb);
skb              5939 drivers/net/ethernet/realtek/r8169_main.c 	door_bell = __netdev_sent_queue(dev, skb->len, netdev_xmit_more());
skb              5979 drivers/net/ethernet/realtek/r8169_main.c 	dev_kfree_skb_any(skb);
skb              5989 drivers/net/ethernet/realtek/r8169_main.c static netdev_features_t rtl8169_features_check(struct sk_buff *skb,
skb              5993 drivers/net/ethernet/realtek/r8169_main.c 	int transport_offset = skb_transport_offset(skb);
skb              5996 drivers/net/ethernet/realtek/r8169_main.c 	if (skb_is_gso(skb)) {
skb              6000 drivers/net/ethernet/realtek/r8169_main.c 	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb              6001 drivers/net/ethernet/realtek/r8169_main.c 		if (skb->len < ETH_ZLEN) {
skb              6019 drivers/net/ethernet/realtek/r8169_main.c 	return vlan_features_check(skb, features);
skb              6083 drivers/net/ethernet/realtek/r8169_main.c 		if (tx_skb->skb) {
skb              6085 drivers/net/ethernet/realtek/r8169_main.c 			bytes_compl += tx_skb->skb->len;
skb              6086 drivers/net/ethernet/realtek/r8169_main.c 			napi_consume_skb(tx_skb->skb, budget);
skb              6087 drivers/net/ethernet/realtek/r8169_main.c 			tx_skb->skb = NULL;
skb              6130 drivers/net/ethernet/realtek/r8169_main.c static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1)
skb              6136 drivers/net/ethernet/realtek/r8169_main.c 		skb->ip_summed = CHECKSUM_UNNECESSARY;
skb              6138 drivers/net/ethernet/realtek/r8169_main.c 		skb_checksum_none_assert(skb);
skb              6178 drivers/net/ethernet/realtek/r8169_main.c 			struct sk_buff *skb;
skb              6195 drivers/net/ethernet/realtek/r8169_main.c 			skb = napi_alloc_skb(&tp->napi, pkt_size);
skb              6196 drivers/net/ethernet/realtek/r8169_main.c 			if (unlikely(!skb)) {
skb              6205 drivers/net/ethernet/realtek/r8169_main.c 			skb_copy_to_linear_data(skb, rx_buf, pkt_size);
skb              6206 drivers/net/ethernet/realtek/r8169_main.c 			skb->tail += pkt_size;
skb              6207 drivers/net/ethernet/realtek/r8169_main.c 			skb->len = pkt_size;
skb              6213 drivers/net/ethernet/realtek/r8169_main.c 			rtl8169_rx_csum(skb, status);
skb              6214 drivers/net/ethernet/realtek/r8169_main.c 			skb->protocol = eth_type_trans(skb, dev);
skb              6216 drivers/net/ethernet/realtek/r8169_main.c 			rtl8169_rx_vlan_tag(desc, skb);
skb              6218 drivers/net/ethernet/realtek/r8169_main.c 			if (skb->pkt_type == PACKET_MULTICAST)
skb              6221 drivers/net/ethernet/realtek/r8169_main.c 			napi_gro_receive(&tp->napi, skb);
skb               966 drivers/net/ethernet/renesas/ravb.h 	struct sk_buff *skb;
skb                99 drivers/net/ethernet/renesas/ravb_main.c static void ravb_set_buffer_align(struct sk_buff *skb)
skb               101 drivers/net/ethernet/renesas/ravb_main.c 	u32 reserve = (unsigned long)skb->data & (RAVB_ALIGN - 1);
skb               104 drivers/net/ethernet/renesas/ravb_main.c 		skb_reserve(skb, RAVB_ALIGN - reserve);
skb               341 drivers/net/ethernet/renesas/ravb_main.c 	struct sk_buff *skb;
skb               354 drivers/net/ethernet/renesas/ravb_main.c 		skb = netdev_alloc_skb(ndev, RX_BUF_SZ + RAVB_ALIGN - 1);
skb               355 drivers/net/ethernet/renesas/ravb_main.c 		if (!skb)
skb               357 drivers/net/ethernet/renesas/ravb_main.c 		ravb_set_buffer_align(skb);
skb               358 drivers/net/ethernet/renesas/ravb_main.c 		priv->rx_skb[q][i] = skb;
skb               484 drivers/net/ethernet/renesas/ravb_main.c 	struct sk_buff *skb;
skb               501 drivers/net/ethernet/renesas/ravb_main.c 			skb = ts_skb->skb;
skb               506 drivers/net/ethernet/renesas/ravb_main.c 				skb_tstamp_tx(skb, &shhwtstamps);
skb               507 drivers/net/ethernet/renesas/ravb_main.c 				dev_consume_skb_any(skb);
skb               510 drivers/net/ethernet/renesas/ravb_main.c 				dev_kfree_skb_any(skb);
skb               517 drivers/net/ethernet/renesas/ravb_main.c static void ravb_rx_csum(struct sk_buff *skb)
skb               524 drivers/net/ethernet/renesas/ravb_main.c 	if (unlikely(skb->len < sizeof(__sum16)))
skb               526 drivers/net/ethernet/renesas/ravb_main.c 	hw_csum = skb_tail_pointer(skb) - sizeof(__sum16);
skb               527 drivers/net/ethernet/renesas/ravb_main.c 	skb->csum = csum_unfold((__force __sum16)get_unaligned_le16(hw_csum));
skb               528 drivers/net/ethernet/renesas/ravb_main.c 	skb->ip_summed = CHECKSUM_COMPLETE;
skb               529 drivers/net/ethernet/renesas/ravb_main.c 	skb_trim(skb, skb->len - sizeof(__sum16));
skb               541 drivers/net/ethernet/renesas/ravb_main.c 	struct sk_buff *skb;
skb               581 drivers/net/ethernet/renesas/ravb_main.c 			skb = priv->rx_skb[q][entry];
skb               592 drivers/net/ethernet/renesas/ravb_main.c 				shhwtstamps = skb_hwtstamps(skb);
skb               600 drivers/net/ethernet/renesas/ravb_main.c 			skb_put(skb, pkt_len);
skb               601 drivers/net/ethernet/renesas/ravb_main.c 			skb->protocol = eth_type_trans(skb, ndev);
skb               603 drivers/net/ethernet/renesas/ravb_main.c 				ravb_rx_csum(skb);
skb               604 drivers/net/ethernet/renesas/ravb_main.c 			napi_gro_receive(&priv->napi[q], skb);
skb               620 drivers/net/ethernet/renesas/ravb_main.c 			skb = netdev_alloc_skb(ndev,
skb               623 drivers/net/ethernet/renesas/ravb_main.c 			if (!skb)
skb               625 drivers/net/ethernet/renesas/ravb_main.c 			ravb_set_buffer_align(skb);
skb               626 drivers/net/ethernet/renesas/ravb_main.c 			dma_addr = dma_map_single(ndev->dev.parent, skb->data,
skb               629 drivers/net/ethernet/renesas/ravb_main.c 			skb_checksum_none_assert(skb);
skb               636 drivers/net/ethernet/renesas/ravb_main.c 			priv->rx_skb[q][entry] = skb;
skb              1472 drivers/net/ethernet/renesas/ravb_main.c static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
skb              1476 drivers/net/ethernet/renesas/ravb_main.c 	u16 q = skb_get_queue_mapping(skb);
skb              1495 drivers/net/ethernet/renesas/ravb_main.c 	if (skb_put_padto(skb, ETH_ZLEN))
skb              1499 drivers/net/ethernet/renesas/ravb_main.c 	priv->tx_skb[q][entry / num_tx_desc] = skb;
skb              1504 drivers/net/ethernet/renesas/ravb_main.c 		len = PTR_ALIGN(skb->data, DPTR_ALIGN) - skb->data;
skb              1521 drivers/net/ethernet/renesas/ravb_main.c 		memcpy(buffer, skb->data, len);
skb              1531 drivers/net/ethernet/renesas/ravb_main.c 		buffer = skb->data + len;
skb              1532 drivers/net/ethernet/renesas/ravb_main.c 		len = skb->len - len;
skb              1541 drivers/net/ethernet/renesas/ravb_main.c 		len = skb->len;
skb              1542 drivers/net/ethernet/renesas/ravb_main.c 		dma_addr = dma_map_single(ndev->dev.parent, skb->data, skb->len,
skb              1561 drivers/net/ethernet/renesas/ravb_main.c 		ts_skb->skb = skb_get(skb);
skb              1567 drivers/net/ethernet/renesas/ravb_main.c 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
skb              1572 drivers/net/ethernet/renesas/ravb_main.c 	skb_tx_timestamp(skb);
skb              1598 drivers/net/ethernet/renesas/ravb_main.c 	dev_kfree_skb_any(skb);
skb              1603 drivers/net/ethernet/renesas/ravb_main.c static u16 ravb_select_queue(struct net_device *ndev, struct sk_buff *skb,
skb              1607 drivers/net/ethernet/renesas/ravb_main.c 	return (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ? RAVB_NC :
skb              1683 drivers/net/ethernet/renesas/ravb_main.c 		kfree_skb(ts_skb->skb);
skb              1163 drivers/net/ethernet/renesas/sh_eth.c static void sh_eth_set_receive_align(struct sk_buff *skb)
skb              1165 drivers/net/ethernet/renesas/sh_eth.c 	uintptr_t reserve = (uintptr_t)skb->data & (SH_ETH_RX_ALIGN - 1);
skb              1168 drivers/net/ethernet/renesas/sh_eth.c 		skb_reserve(skb, SH_ETH_RX_ALIGN - reserve);
skb              1357 drivers/net/ethernet/renesas/sh_eth.c 	struct sk_buff *skb;
skb              1377 drivers/net/ethernet/renesas/sh_eth.c 		skb = netdev_alloc_skb(ndev, skbuff_size);
skb              1378 drivers/net/ethernet/renesas/sh_eth.c 		if (skb == NULL)
skb              1380 drivers/net/ethernet/renesas/sh_eth.c 		sh_eth_set_receive_align(skb);
skb              1384 drivers/net/ethernet/renesas/sh_eth.c 		dma_addr = dma_map_single(&mdp->pdev->dev, skb->data, buf_len,
skb              1387 drivers/net/ethernet/renesas/sh_eth.c 			kfree_skb(skb);
skb              1390 drivers/net/ethernet/renesas/sh_eth.c 		mdp->rx_skbuff[i] = skb;
skb              1605 drivers/net/ethernet/renesas/sh_eth.c static void sh_eth_rx_csum(struct sk_buff *skb)
skb              1610 drivers/net/ethernet/renesas/sh_eth.c 	if (unlikely(skb->len < sizeof(__sum16)))
skb              1612 drivers/net/ethernet/renesas/sh_eth.c 	hw_csum = skb_tail_pointer(skb) - sizeof(__sum16);
skb              1613 drivers/net/ethernet/renesas/sh_eth.c 	skb->csum = csum_unfold((__force __sum16)get_unaligned_le16(hw_csum));
skb              1614 drivers/net/ethernet/renesas/sh_eth.c 	skb->ip_summed = CHECKSUM_COMPLETE;
skb              1615 drivers/net/ethernet/renesas/sh_eth.c 	skb_trim(skb, skb->len - sizeof(__sum16));
skb              1627 drivers/net/ethernet/renesas/sh_eth.c 	struct sk_buff *skb;
skb              1662 drivers/net/ethernet/renesas/sh_eth.c 		skb = mdp->rx_skbuff[entry];
skb              1678 drivers/net/ethernet/renesas/sh_eth.c 		} else	if (skb) {
skb              1686 drivers/net/ethernet/renesas/sh_eth.c 				skb_reserve(skb, NET_IP_ALIGN);
skb              1690 drivers/net/ethernet/renesas/sh_eth.c 			skb_put(skb, pkt_len);
skb              1691 drivers/net/ethernet/renesas/sh_eth.c 			skb->protocol = eth_type_trans(skb, ndev);
skb              1693 drivers/net/ethernet/renesas/sh_eth.c 				sh_eth_rx_csum(skb);
skb              1694 drivers/net/ethernet/renesas/sh_eth.c 			netif_receive_skb(skb);
skb              1713 drivers/net/ethernet/renesas/sh_eth.c 			skb = netdev_alloc_skb(ndev, skbuff_size);
skb              1714 drivers/net/ethernet/renesas/sh_eth.c 			if (skb == NULL)
skb              1716 drivers/net/ethernet/renesas/sh_eth.c 			sh_eth_set_receive_align(skb);
skb              1717 drivers/net/ethernet/renesas/sh_eth.c 			dma_addr = dma_map_single(&mdp->pdev->dev, skb->data,
skb              1720 drivers/net/ethernet/renesas/sh_eth.c 				kfree_skb(skb);
skb              1723 drivers/net/ethernet/renesas/sh_eth.c 			mdp->rx_skbuff[entry] = skb;
skb              1725 drivers/net/ethernet/renesas/sh_eth.c 			skb_checksum_none_assert(skb);
skb              2520 drivers/net/ethernet/renesas/sh_eth.c static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
skb              2539 drivers/net/ethernet/renesas/sh_eth.c 	if (skb_put_padto(skb, ETH_ZLEN))
skb              2543 drivers/net/ethernet/renesas/sh_eth.c 	mdp->tx_skbuff[entry] = skb;
skb              2547 drivers/net/ethernet/renesas/sh_eth.c 		sh_eth_soft_swap(PTR_ALIGN(skb->data, 4), skb->len + 2);
skb              2548 drivers/net/ethernet/renesas/sh_eth.c 	dma_addr = dma_map_single(&mdp->pdev->dev, skb->data, skb->len,
skb              2551 drivers/net/ethernet/renesas/sh_eth.c 		kfree_skb(skb);
skb              2555 drivers/net/ethernet/renesas/sh_eth.c 	txdesc->len  = cpu_to_le32(skb->len << 16);
skb               672 drivers/net/ethernet/rocker/rocker_main.c 				      struct sk_buff *skb, size_t buf_len)
skb               678 drivers/net/ethernet/rocker/rocker_main.c 	dma_handle = pci_map_single(pdev, skb->data, buf_len,
skb               703 drivers/net/ethernet/rocker/rocker_main.c 	struct sk_buff *skb;
skb               713 drivers/net/ethernet/rocker/rocker_main.c 	skb = netdev_alloc_skb_ip_align(dev, buf_len);
skb               714 drivers/net/ethernet/rocker/rocker_main.c 	if (!skb)
skb               716 drivers/net/ethernet/rocker/rocker_main.c 	err = rocker_dma_rx_ring_skb_map(rocker_port, desc_info, skb, buf_len);
skb               718 drivers/net/ethernet/rocker/rocker_main.c 		dev_kfree_skb_any(skb);
skb               721 drivers/net/ethernet/rocker/rocker_main.c 	rocker_desc_cookie_ptr_set(desc_info, skb);
skb               744 drivers/net/ethernet/rocker/rocker_main.c 	struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
skb               746 drivers/net/ethernet/rocker/rocker_main.c 	if (!skb)
skb               750 drivers/net/ethernet/rocker/rocker_main.c 	dev_kfree_skb_any(skb);
skb              1899 drivers/net/ethernet/rocker/rocker_main.c static netdev_tx_t rocker_port_xmit(struct sk_buff *skb, struct net_device *dev)
skb              1915 drivers/net/ethernet/rocker/rocker_main.c 	rocker_desc_cookie_ptr_set(desc_info, skb);
skb              1921 drivers/net/ethernet/rocker/rocker_main.c 					  skb->data, skb_headlen(skb));
skb              1924 drivers/net/ethernet/rocker/rocker_main.c 	if (skb_shinfo(skb)->nr_frags > ROCKER_TX_FRAGS_MAX) {
skb              1925 drivers/net/ethernet/rocker/rocker_main.c 		err = skb_linearize(skb);
skb              1930 drivers/net/ethernet/rocker/rocker_main.c 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb              1931 drivers/net/ethernet/rocker/rocker_main.c 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
skb              1955 drivers/net/ethernet/rocker/rocker_main.c 	dev_kfree_skb(skb);
skb              2427 drivers/net/ethernet/rocker/rocker_main.c 		struct sk_buff *skb;
skb              2435 drivers/net/ethernet/rocker/rocker_main.c 		skb = rocker_desc_cookie_ptr_get(desc_info);
skb              2438 drivers/net/ethernet/rocker/rocker_main.c 			rocker_port->dev->stats.tx_bytes += skb->len;
skb              2443 drivers/net/ethernet/rocker/rocker_main.c 		dev_kfree_skb_any(skb);
skb              2461 drivers/net/ethernet/rocker/rocker_main.c 	struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
skb              2465 drivers/net/ethernet/rocker/rocker_main.c 	if (!skb)
skb              2477 drivers/net/ethernet/rocker/rocker_main.c 	skb_put(skb, rx_len);
skb              2478 drivers/net/ethernet/rocker/rocker_main.c 	skb->protocol = eth_type_trans(skb, rocker_port->dev);
skb              2481 drivers/net/ethernet/rocker/rocker_main.c 		skb->offload_fwd_mark = 1;
skb              2484 drivers/net/ethernet/rocker/rocker_main.c 	rocker_port->dev->stats.rx_bytes += skb->len;
skb              2486 drivers/net/ethernet/rocker/rocker_main.c 	netif_receive_skb(skb);
skb               343 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 	struct sk_buff *skb;
skb               345 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 	skb = __netdev_alloc_skb_ip_align(dev, dma_buf_sz, GFP_KERNEL);
skb               346 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 	if (!skb)
skb               349 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 	rx_ring->rx_skbuff[i] = skb;
skb               350 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 	rx_ring->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
skb               355 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 		dev_kfree_skb_any(skb);
skb               744 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 		struct sk_buff *skb = tqueue->tx_skbuff[entry];
skb               765 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 		if (likely(skb)) {
skb               766 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 			dev_kfree_skb(skb);
skb              1239 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 			      struct sk_buff *skb)
skb              1244 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 	tcp_hdr_len = tcp_hdrlen(skb);
skb              1245 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 	total_hdr_len = skb_transport_offset(skb) + tcp_hdr_len;
skb              1247 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 	first_desc->tdes01 = dma_map_single(priv->device, skb->data,
skb              1255 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 					   skb->len - total_hdr_len);
skb              1266 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c static netdev_tx_t sxgbe_xmit(struct sk_buff *skb, struct net_device *dev)
skb              1271 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 	unsigned txq_index = skb_get_queue_mapping(skb);
skb              1277 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 	int nr_frags = skb_shinfo(skb)->nr_frags;
skb              1278 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 	int no_pagedlen = skb_headlen(skb);
skb              1280 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 	u16 cur_mss = skb_shinfo(skb)->gso_size;
skb              1286 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 	if (unlikely(skb_is_gso(skb) && tqueue->prev_mss != cur_mss))
skb              1289 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 	if (unlikely(skb_vlan_tag_present(skb) ||
skb              1290 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 		     ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
skb              1314 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 	tqueue->tx_skbuff[entry] = skb;
skb              1317 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 		if (likely(skb_is_gso(skb))) {
skb              1336 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 			sxgbe_tso_prepare(priv, first_desc, skb);
skb              1339 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 							 skb->data, no_pagedlen, DMA_TO_DEVICE);
skb              1350 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_num];
skb              1407 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 	dev->stats.tx_bytes += skb->len;
skb              1409 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
skb              1412 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
skb              1416 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 	skb_tx_timestamp(skb);
skb              1443 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 			struct sk_buff *skb;
skb              1445 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 			skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
skb              1447 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 			if (unlikely(skb == NULL))
skb              1450 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 			priv->rxq[qnum]->rx_skbuff[entry] = skb;
skb              1452 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 				dma_map_single(priv->device, skb->data, bfsize,
skb              1487 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 		struct sk_buff *skb;
skb              1513 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 		skb = priv->rxq[qnum]->rx_skbuff[entry];
skb              1515 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 		if (unlikely(!skb))
skb              1518 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 		prefetch(skb->data - NET_IP_ALIGN);
skb              1523 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 		skb_put(skb, frame_len);
skb              1525 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 		skb->ip_summed = checksum;
skb              1527 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 			netif_receive_skb(skb);
skb              1529 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 			napi_gro_receive(&priv->napi, skb);
skb                77 drivers/net/ethernet/seeq/ether3.c static netdev_tx_t	ether3_sendpacket(struct sk_buff *skb,
skb               483 drivers/net/ethernet/seeq/ether3.c ether3_sendpacket(struct sk_buff *skb, struct net_device *dev)
skb               486 drivers/net/ethernet/seeq/ether3.c 	unsigned int length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
skb               490 drivers/net/ethernet/seeq/ether3.c 		dev_kfree_skb(skb);
skb               497 drivers/net/ethernet/seeq/ether3.c 	if (length != skb->len) {
skb               498 drivers/net/ethernet/seeq/ether3.c 		if (skb_padto(skb, length))
skb               521 drivers/net/ethernet/seeq/ether3.c 	ether3_writebuffer(dev, skb->data, length);
skb               537 drivers/net/ethernet/seeq/ether3.c 	dev_kfree_skb(skb);
skb               632 drivers/net/ethernet/seeq/ether3.c 			struct sk_buff *skb;
skb               637 drivers/net/ethernet/seeq/ether3.c 			skb = netdev_alloc_skb(dev, length + 2);
skb               638 drivers/net/ethernet/seeq/ether3.c 			if (skb) {
skb               641 drivers/net/ethernet/seeq/ether3.c 				skb_reserve(skb, 2);
skb               642 drivers/net/ethernet/seeq/ether3.c 				buf = skb_put(skb, length);
skb               649 drivers/net/ethernet/seeq/ether3.c 				skb->protocol = eth_type_trans(skb, dev);
skb               650 drivers/net/ethernet/seeq/ether3.c 				netif_rx(skb);
skb                71 drivers/net/ethernet/seeq/sgiseeq.c 	struct sk_buff *skb;
skb                77 drivers/net/ethernet/seeq/sgiseeq.c 	struct sk_buff *skb;
skb               198 drivers/net/ethernet/seeq/sgiseeq.c 		if (!sp->rx_desc[i].skb) {
skb               200 drivers/net/ethernet/seeq/sgiseeq.c 			struct sk_buff *skb = netdev_alloc_skb(dev, PKT_BUF_SZ);
skb               202 drivers/net/ethernet/seeq/sgiseeq.c 			if (skb == NULL)
skb               204 drivers/net/ethernet/seeq/sgiseeq.c 			skb_reserve(skb, 2);
skb               206 drivers/net/ethernet/seeq/sgiseeq.c 						  skb->data - 2,
skb               208 drivers/net/ethernet/seeq/sgiseeq.c 			sp->rx_desc[i].skb = skb;
skb               226 drivers/net/ethernet/seeq/sgiseeq.c 		if (sp->tx_desc[i].skb) {
skb               227 drivers/net/ethernet/seeq/sgiseeq.c 			dev_kfree_skb(sp->tx_desc[i].skb);
skb               228 drivers/net/ethernet/seeq/sgiseeq.c 			sp->tx_desc[i].skb = NULL;
skb               234 drivers/net/ethernet/seeq/sgiseeq.c 		if (sp->rx_desc[i].skb) {
skb               235 drivers/net/ethernet/seeq/sgiseeq.c 			dev_kfree_skb(sp->rx_desc[i].skb);
skb               236 drivers/net/ethernet/seeq/sgiseeq.c 			sp->rx_desc[i].skb = NULL;
skb               342 drivers/net/ethernet/seeq/sgiseeq.c 	struct sk_buff *skb = NULL;
skb               355 drivers/net/ethernet/seeq/sgiseeq.c 		pkt_status = rd->skb->data[len];
skb               359 drivers/net/ethernet/seeq/sgiseeq.c 			if (!ether_addr_equal(rd->skb->data + 6, dev->dev_addr)) {
skb               361 drivers/net/ethernet/seeq/sgiseeq.c 					skb = rd->skb;
skb               364 drivers/net/ethernet/seeq/sgiseeq.c 						newskb = skb;
skb               365 drivers/net/ethernet/seeq/sgiseeq.c 						skb = NULL;
skb               370 drivers/net/ethernet/seeq/sgiseeq.c 					skb = netdev_alloc_skb_ip_align(dev, len);
skb               371 drivers/net/ethernet/seeq/sgiseeq.c 					if (skb)
skb               372 drivers/net/ethernet/seeq/sgiseeq.c 						skb_copy_to_linear_data(skb, rd->skb->data, len);
skb               374 drivers/net/ethernet/seeq/sgiseeq.c 					newskb = rd->skb;
skb               377 drivers/net/ethernet/seeq/sgiseeq.c 				if (skb) {
skb               378 drivers/net/ethernet/seeq/sgiseeq.c 					skb_put(skb, len);
skb               379 drivers/net/ethernet/seeq/sgiseeq.c 					skb->protocol = eth_type_trans(skb, dev);
skb               380 drivers/net/ethernet/seeq/sgiseeq.c 					netif_rx(skb);
skb               388 drivers/net/ethernet/seeq/sgiseeq.c 				newskb = rd->skb;
skb               392 drivers/net/ethernet/seeq/sgiseeq.c 			newskb = rd->skb;
skb               394 drivers/net/ethernet/seeq/sgiseeq.c 		rd->skb = newskb;
skb               489 drivers/net/ethernet/seeq/sgiseeq.c 		if (td->skb) {
skb               490 drivers/net/ethernet/seeq/sgiseeq.c 			dev_kfree_skb_any(td->skb);
skb               491 drivers/net/ethernet/seeq/sgiseeq.c 			td->skb = NULL;
skb               583 drivers/net/ethernet/seeq/sgiseeq.c sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb               594 drivers/net/ethernet/seeq/sgiseeq.c 	len = skb->len;
skb               596 drivers/net/ethernet/seeq/sgiseeq.c 		if (skb_padto(skb, ETH_ZLEN)) {
skb               621 drivers/net/ethernet/seeq/sgiseeq.c 	td->skb = skb;
skb               622 drivers/net/ethernet/seeq/sgiseeq.c 	td->tdma.pbuf = dma_map_single(dev->dev.parent, skb->data,
skb              2323 drivers/net/ethernet/sfc/ef10.c 				struct sk_buff *skb,
skb              2336 drivers/net/ethernet/sfc/ef10.c 	mss = skb_shinfo(skb)->gso_size;
skb              2343 drivers/net/ethernet/sfc/ef10.c 	ip = ip_hdr(skb);
skb              2351 drivers/net/ethernet/sfc/ef10.c 		struct ipv6hdr *ipv6 = ipv6_hdr(skb);
skb              2357 drivers/net/ethernet/sfc/ef10.c 	tcp = tcp_hdr(skb);
skb                23 drivers/net/ethernet/sfc/efx.h netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
skb                25 drivers/net/ethernet/sfc/efx.h netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
skb               167 drivers/net/ethernet/sfc/efx.h int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
skb                28 drivers/net/ethernet/sfc/falcon/efx.h netdev_tx_t ef4_hard_start_xmit(struct sk_buff *skb,
skb                30 drivers/net/ethernet/sfc/falcon/efx.h netdev_tx_t ef4_enqueue_skb(struct ef4_tx_queue *tx_queue, struct sk_buff *skb);
skb               164 drivers/net/ethernet/sfc/falcon/efx.h int ef4_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
skb               145 drivers/net/ethernet/sfc/falcon/net_driver.h 	const struct sk_buff *skb;
skb               428 drivers/net/ethernet/sfc/falcon/rx.c 	struct sk_buff *skb;
skb               430 drivers/net/ethernet/sfc/falcon/rx.c 	skb = napi_get_frags(napi);
skb               431 drivers/net/ethernet/sfc/falcon/rx.c 	if (unlikely(!skb)) {
skb               440 drivers/net/ethernet/sfc/falcon/rx.c 		skb_set_hash(skb, ef4_rx_buf_hash(efx, eh),
skb               442 drivers/net/ethernet/sfc/falcon/rx.c 	skb->ip_summed = ((rx_buf->flags & EF4_RX_PKT_CSUMMED) ?
skb               446 drivers/net/ethernet/sfc/falcon/rx.c 		skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
skb               450 drivers/net/ethernet/sfc/falcon/rx.c 		skb->len += rx_buf->len;
skb               451 drivers/net/ethernet/sfc/falcon/rx.c 		if (skb_shinfo(skb)->nr_frags == n_frags)
skb               457 drivers/net/ethernet/sfc/falcon/rx.c 	skb->data_len = skb->len;
skb               458 drivers/net/ethernet/sfc/falcon/rx.c 	skb->truesize += n_frags * efx->rx_buffer_truesize;
skb               460 drivers/net/ethernet/sfc/falcon/rx.c 	skb_record_rx_queue(skb, channel->rx_queue.core_index);
skb               472 drivers/net/ethernet/sfc/falcon/rx.c 	struct sk_buff *skb;
skb               475 drivers/net/ethernet/sfc/falcon/rx.c 	skb = netdev_alloc_skb(efx->net_dev,
skb               478 drivers/net/ethernet/sfc/falcon/rx.c 	if (unlikely(skb == NULL)) {
skb               485 drivers/net/ethernet/sfc/falcon/rx.c 	memcpy(skb->data + efx->rx_ip_align, eh - efx->rx_prefix_size,
skb               487 drivers/net/ethernet/sfc/falcon/rx.c 	skb_reserve(skb, efx->rx_ip_align + efx->rx_prefix_size);
skb               488 drivers/net/ethernet/sfc/falcon/rx.c 	__skb_put(skb, hdr_len);
skb               496 drivers/net/ethernet/sfc/falcon/rx.c 			skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
skb               500 drivers/net/ethernet/sfc/falcon/rx.c 			skb->len += rx_buf->len;
skb               501 drivers/net/ethernet/sfc/falcon/rx.c 			skb->data_len += rx_buf->len;
skb               502 drivers/net/ethernet/sfc/falcon/rx.c 			if (skb_shinfo(skb)->nr_frags == n_frags)
skb               513 drivers/net/ethernet/sfc/falcon/rx.c 	skb->truesize += n_frags * efx->rx_buffer_truesize;
skb               516 drivers/net/ethernet/sfc/falcon/rx.c 	skb->protocol = eth_type_trans(skb, efx->net_dev);
skb               518 drivers/net/ethernet/sfc/falcon/rx.c 	skb_mark_napi_id(skb, &channel->napi_str);
skb               520 drivers/net/ethernet/sfc/falcon/rx.c 	return skb;
skb               614 drivers/net/ethernet/sfc/falcon/rx.c 	struct sk_buff *skb;
skb               617 drivers/net/ethernet/sfc/falcon/rx.c 	skb = ef4_rx_mk_skb(channel, rx_buf, n_frags, eh, hdr_len);
skb               618 drivers/net/ethernet/sfc/falcon/rx.c 	if (unlikely(skb == NULL)) {
skb               625 drivers/net/ethernet/sfc/falcon/rx.c 	skb_record_rx_queue(skb, channel->rx_queue.core_index);
skb               628 drivers/net/ethernet/sfc/falcon/rx.c 	skb_checksum_none_assert(skb);
skb               630 drivers/net/ethernet/sfc/falcon/rx.c 		skb->ip_summed = CHECKSUM_UNNECESSARY;
skb               633 drivers/net/ethernet/sfc/falcon/rx.c 		if (channel->type->receive_skb(channel, skb))
skb               637 drivers/net/ethernet/sfc/falcon/rx.c 	netif_receive_skb(skb);
skb               830 drivers/net/ethernet/sfc/falcon/rx.c int ef4_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
skb               842 drivers/net/ethernet/sfc/falcon/rx.c 	if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
skb               415 drivers/net/ethernet/sfc/falcon/selftest.c 	struct sk_buff *skb;
skb               423 drivers/net/ethernet/sfc/falcon/selftest.c 		skb = alloc_skb(sizeof(state->payload), GFP_KERNEL);
skb               424 drivers/net/ethernet/sfc/falcon/selftest.c 		if (!skb)
skb               426 drivers/net/ethernet/sfc/falcon/selftest.c 		state->skbs[i] = skb;
skb               427 drivers/net/ethernet/sfc/falcon/selftest.c 		skb_get(skb);
skb               431 drivers/net/ethernet/sfc/falcon/selftest.c 		payload = skb_put(skb, sizeof(state->payload));
skb               440 drivers/net/ethernet/sfc/falcon/selftest.c 		rc = ef4_enqueue_skb(tx_queue, skb);
skb               451 drivers/net/ethernet/sfc/falcon/selftest.c 			kfree_skb(skb);
skb               471 drivers/net/ethernet/sfc/falcon/selftest.c 	struct sk_buff *skb;
skb               480 drivers/net/ethernet/sfc/falcon/selftest.c 		skb = state->skbs[i];
skb               481 drivers/net/ethernet/sfc/falcon/selftest.c 		if (skb && !skb_shared(skb))
skb               483 drivers/net/ethernet/sfc/falcon/selftest.c 		dev_kfree_skb(skb);
skb                70 drivers/net/ethernet/sfc/falcon/tx.c 		(*bytes_compl) += buffer->skb->len;
skb                71 drivers/net/ethernet/sfc/falcon/tx.c 		dev_consume_skb_any((struct sk_buff *)buffer->skb);
skb               148 drivers/net/ethernet/sfc/falcon/tx.c 				struct sk_buff *skb)
skb               151 drivers/net/ethernet/sfc/falcon/tx.c 	unsigned int copy_len = skb->len;
skb               164 drivers/net/ethernet/sfc/falcon/tx.c 	rc = skb_copy_bits(skb, 0, copy_buffer, copy_len);
skb               173 drivers/net/ethernet/sfc/falcon/tx.c 	buffer->skb = skb;
skb               206 drivers/net/ethernet/sfc/falcon/tx.c static int ef4_tx_map_data(struct ef4_tx_queue *tx_queue, struct sk_buff *skb)
skb               215 drivers/net/ethernet/sfc/falcon/tx.c 	nr_frags = skb_shinfo(skb)->nr_frags;
skb               219 drivers/net/ethernet/sfc/falcon/tx.c 	len = skb_headlen(skb);
skb               220 drivers/net/ethernet/sfc/falcon/tx.c 	dma_addr = dma_map_single(dma_dev, skb->data, len, DMA_TO_DEVICE);
skb               246 drivers/net/ethernet/sfc/falcon/tx.c 			buffer->skb = skb;
skb               252 drivers/net/ethernet/sfc/falcon/tx.c 		fragment = &skb_shinfo(skb)->frags[frag_index++];
skb               296 drivers/net/ethernet/sfc/falcon/tx.c netdev_tx_t ef4_enqueue_skb(struct ef4_tx_queue *tx_queue, struct sk_buff *skb)
skb               301 drivers/net/ethernet/sfc/falcon/tx.c 	skb_len = skb->len;
skb               302 drivers/net/ethernet/sfc/falcon/tx.c 	EF4_WARN_ON_PARANOID(skb_is_gso(skb));
skb               305 drivers/net/ethernet/sfc/falcon/tx.c 			(skb->data_len && skb_len <= EF4_TX_CB_SIZE)) {
skb               307 drivers/net/ethernet/sfc/falcon/tx.c 		if (ef4_enqueue_skb_copy(tx_queue, skb))
skb               314 drivers/net/ethernet/sfc/falcon/tx.c 	if (!data_mapped && (ef4_tx_map_data(tx_queue, skb)))
skb               345 drivers/net/ethernet/sfc/falcon/tx.c 	dev_kfree_skb_any(skb);
skb               393 drivers/net/ethernet/sfc/falcon/tx.c netdev_tx_t ef4_hard_start_xmit(struct sk_buff *skb,
skb               402 drivers/net/ethernet/sfc/falcon/tx.c 	index = skb_get_queue_mapping(skb);
skb               403 drivers/net/ethernet/sfc/falcon/tx.c 	type = skb->ip_summed == CHECKSUM_PARTIAL ? EF4_TXQ_TYPE_OFFLOAD : 0;
skb               410 drivers/net/ethernet/sfc/falcon/tx.c 	return ef4_enqueue_skb(tx_queue, skb);
skb                21 drivers/net/ethernet/sfc/falcon/tx.h int ef4_enqueue_skb_tso(struct ef4_tx_queue *tx_queue, struct sk_buff *skb,
skb               149 drivers/net/ethernet/sfc/net_driver.h 	const struct sk_buff *skb;
skb              1577 drivers/net/ethernet/sfc/net_driver.h static inline bool efx_xmit_with_hwtstamp(struct sk_buff *skb)
skb              1579 drivers/net/ethernet/sfc/net_driver.h 	return skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP;
skb              1581 drivers/net/ethernet/sfc/net_driver.h static inline void efx_xmit_hwtstamp_pending(struct sk_buff *skb)
skb              1583 drivers/net/ethernet/sfc/net_driver.h 	skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
skb               454 drivers/net/ethernet/sfc/nic.h bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
skb               458 drivers/net/ethernet/sfc/nic.h int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
skb               464 drivers/net/ethernet/sfc/nic.h 				   struct sk_buff *skb);
skb               466 drivers/net/ethernet/sfc/nic.h 					       struct sk_buff *skb)
skb               469 drivers/net/ethernet/sfc/nic.h 		__efx_rx_skb_attach_timestamp(channel, skb);
skb               342 drivers/net/ethernet/sfc/ptp.c 	void (*xmit_skb)(struct efx_nic *efx, struct sk_buff *skb);
skb               837 drivers/net/ethernet/sfc/ptp.c 	struct sk_buff *skb;
skb               839 drivers/net/ethernet/sfc/ptp.c 	while ((skb = skb_dequeue(q))) {
skb               841 drivers/net/ethernet/sfc/ptp.c 		netif_receive_skb(skb);
skb              1088 drivers/net/ethernet/sfc/ptp.c static void efx_ptp_xmit_skb_queue(struct efx_nic *efx, struct sk_buff *skb)
skb              1092 drivers/net/ethernet/sfc/ptp.c 	u8 type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0;
skb              1096 drivers/net/ethernet/sfc/ptp.c 		efx_enqueue_skb(tx_queue, skb);
skb              1099 drivers/net/ethernet/sfc/ptp.c 		dev_kfree_skb_any(skb);
skb              1104 drivers/net/ethernet/sfc/ptp.c static void efx_ptp_xmit_skb_mc(struct efx_nic *efx, struct sk_buff *skb)
skb              1114 drivers/net/ethernet/sfc/ptp.c 	MCDI_SET_DWORD(ptp_data->txbuf, PTP_IN_TRANSMIT_LENGTH, skb->len);
skb              1115 drivers/net/ethernet/sfc/ptp.c 	if (skb_shinfo(skb)->nr_frags != 0) {
skb              1116 drivers/net/ethernet/sfc/ptp.c 		rc = skb_linearize(skb);
skb              1121 drivers/net/ethernet/sfc/ptp.c 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb              1122 drivers/net/ethernet/sfc/ptp.c 		rc = skb_checksum_help(skb);
skb              1126 drivers/net/ethernet/sfc/ptp.c 	skb_copy_from_linear_data(skb,
skb              1129 drivers/net/ethernet/sfc/ptp.c 				  skb->len);
skb              1131 drivers/net/ethernet/sfc/ptp.c 			  ptp_data->txbuf, MC_CMD_PTP_IN_TRANSMIT_LEN(skb->len),
skb              1142 drivers/net/ethernet/sfc/ptp.c 	skb_tstamp_tx(skb, &timestamps);
skb              1147 drivers/net/ethernet/sfc/ptp.c 	dev_kfree_skb_any(skb);
skb              1180 drivers/net/ethernet/sfc/ptp.c 					      struct sk_buff *skb)
skb              1198 drivers/net/ethernet/sfc/ptp.c 	match = (struct efx_ptp_match *)skb->cb;
skb              1210 drivers/net/ethernet/sfc/ptp.c 			timestamps = skb_hwtstamps(skb);
skb              1231 drivers/net/ethernet/sfc/ptp.c 	struct sk_buff *skb;
skb              1233 drivers/net/ethernet/sfc/ptp.c 	while ((skb = skb_dequeue(&ptp->rxq))) {
skb              1236 drivers/net/ethernet/sfc/ptp.c 		match = (struct efx_ptp_match *)skb->cb;
skb              1238 drivers/net/ethernet/sfc/ptp.c 			__skb_queue_tail(q, skb);
skb              1239 drivers/net/ethernet/sfc/ptp.c 		} else if (efx_ptp_match_rx(efx, skb) ==
skb              1241 drivers/net/ethernet/sfc/ptp.c 			__skb_queue_tail(q, skb);
skb              1245 drivers/net/ethernet/sfc/ptp.c 			__skb_queue_tail(q, skb);
skb              1248 drivers/net/ethernet/sfc/ptp.c 			skb_queue_head(&ptp->rxq, skb);
skb              1255 drivers/net/ethernet/sfc/ptp.c static inline void efx_ptp_process_rx(struct efx_nic *efx, struct sk_buff *skb)
skb              1258 drivers/net/ethernet/sfc/ptp.c 	netif_receive_skb(skb);
skb              1404 drivers/net/ethernet/sfc/ptp.c 	struct sk_buff *skb;
skb              1418 drivers/net/ethernet/sfc/ptp.c 	while ((skb = skb_dequeue(&ptp_data->txq)))
skb              1419 drivers/net/ethernet/sfc/ptp.c 		ptp_data->xmit_skb(efx, skb);
skb              1421 drivers/net/ethernet/sfc/ptp.c 	while ((skb = __skb_dequeue(&tempq)))
skb              1422 drivers/net/ethernet/sfc/ptp.c 		efx_ptp_process_rx(efx, skb);
skb              1598 drivers/net/ethernet/sfc/ptp.c bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb)
skb              1602 drivers/net/ethernet/sfc/ptp.c 		skb->len >= PTP_MIN_LENGTH &&
skb              1603 drivers/net/ethernet/sfc/ptp.c 		skb->len <= MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM &&
skb              1604 drivers/net/ethernet/sfc/ptp.c 		likely(skb->protocol == htons(ETH_P_IP)) &&
skb              1605 drivers/net/ethernet/sfc/ptp.c 		skb_transport_header_was_set(skb) &&
skb              1606 drivers/net/ethernet/sfc/ptp.c 		skb_network_header_len(skb) >= sizeof(struct iphdr) &&
skb              1607 drivers/net/ethernet/sfc/ptp.c 		ip_hdr(skb)->protocol == IPPROTO_UDP &&
skb              1608 drivers/net/ethernet/sfc/ptp.c 		skb_headlen(skb) >=
skb              1609 drivers/net/ethernet/sfc/ptp.c 		skb_transport_offset(skb) + sizeof(struct udphdr) &&
skb              1610 drivers/net/ethernet/sfc/ptp.c 		udp_hdr(skb)->dest == htons(PTP_EVENT_PORT);
skb              1617 drivers/net/ethernet/sfc/ptp.c static bool efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb)
skb              1621 drivers/net/ethernet/sfc/ptp.c 	struct efx_ptp_match *match = (struct efx_ptp_match *)skb->cb;
skb              1630 drivers/net/ethernet/sfc/ptp.c 		if (!pskb_may_pull(skb, PTP_V1_MIN_LENGTH)) {
skb              1633 drivers/net/ethernet/sfc/ptp.c 		data = skb->data;
skb              1645 drivers/net/ethernet/sfc/ptp.c 		if (!pskb_may_pull(skb, PTP_V2_MIN_LENGTH)) {
skb              1648 drivers/net/ethernet/sfc/ptp.c 		data = skb->data;
skb              1694 drivers/net/ethernet/sfc/ptp.c 	skb_queue_tail(&ptp->rxq, skb);
skb              1704 drivers/net/ethernet/sfc/ptp.c int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb)
skb              1708 drivers/net/ethernet/sfc/ptp.c 	skb_queue_tail(&ptp->txq, skb);
skb              1710 drivers/net/ethernet/sfc/ptp.c 	if ((udp_hdr(skb)->dest == htons(PTP_EVENT_PORT)) &&
skb              1711 drivers/net/ethernet/sfc/ptp.c 	    (skb->len <= MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM))
skb              1712 drivers/net/ethernet/sfc/ptp.c 		efx_xmit_hwtstamp_pending(skb);
skb              2005 drivers/net/ethernet/sfc/ptp.c 				   struct sk_buff *skb)
skb              2016 drivers/net/ethernet/sfc/ptp.c 	pkt_timestamp_minor = efx_rx_buf_timestamp_minor(efx, skb_mac_header(skb));
skb              2054 drivers/net/ethernet/sfc/ptp.c 	timestamps = skb_hwtstamps(skb);
skb               416 drivers/net/ethernet/sfc/rx.c 	struct sk_buff *skb;
skb               418 drivers/net/ethernet/sfc/rx.c 	skb = napi_get_frags(napi);
skb               419 drivers/net/ethernet/sfc/rx.c 	if (unlikely(!skb)) {
skb               428 drivers/net/ethernet/sfc/rx.c 		skb_set_hash(skb, efx_rx_buf_hash(efx, eh),
skb               430 drivers/net/ethernet/sfc/rx.c 	skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
skb               432 drivers/net/ethernet/sfc/rx.c 	skb->csum_level = !!(rx_buf->flags & EFX_RX_PKT_CSUM_LEVEL);
skb               435 drivers/net/ethernet/sfc/rx.c 		skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
skb               439 drivers/net/ethernet/sfc/rx.c 		skb->len += rx_buf->len;
skb               440 drivers/net/ethernet/sfc/rx.c 		if (skb_shinfo(skb)->nr_frags == n_frags)
skb               446 drivers/net/ethernet/sfc/rx.c 	skb->data_len = skb->len;
skb               447 drivers/net/ethernet/sfc/rx.c 	skb->truesize += n_frags * efx->rx_buffer_truesize;
skb               449 drivers/net/ethernet/sfc/rx.c 	skb_record_rx_queue(skb, channel->rx_queue.core_index);
skb               461 drivers/net/ethernet/sfc/rx.c 	struct sk_buff *skb;
skb               464 drivers/net/ethernet/sfc/rx.c 	skb = netdev_alloc_skb(efx->net_dev,
skb               467 drivers/net/ethernet/sfc/rx.c 	if (unlikely(skb == NULL)) {
skb               474 drivers/net/ethernet/sfc/rx.c 	memcpy(skb->data + efx->rx_ip_align, eh - efx->rx_prefix_size,
skb               476 drivers/net/ethernet/sfc/rx.c 	skb_reserve(skb, efx->rx_ip_align + efx->rx_prefix_size);
skb               477 drivers/net/ethernet/sfc/rx.c 	__skb_put(skb, hdr_len);
skb               485 drivers/net/ethernet/sfc/rx.c 			skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
skb               489 drivers/net/ethernet/sfc/rx.c 			skb->len += rx_buf->len;
skb               490 drivers/net/ethernet/sfc/rx.c 			skb->data_len += rx_buf->len;
skb               491 drivers/net/ethernet/sfc/rx.c 			if (skb_shinfo(skb)->nr_frags == n_frags)
skb               502 drivers/net/ethernet/sfc/rx.c 	skb->truesize += n_frags * efx->rx_buffer_truesize;
skb               505 drivers/net/ethernet/sfc/rx.c 	skb->protocol = eth_type_trans(skb, efx->net_dev);
skb               507 drivers/net/ethernet/sfc/rx.c 	skb_mark_napi_id(skb, &channel->napi_str);
skb               509 drivers/net/ethernet/sfc/rx.c 	return skb;
skb               603 drivers/net/ethernet/sfc/rx.c 	struct sk_buff *skb;
skb               606 drivers/net/ethernet/sfc/rx.c 	skb = efx_rx_mk_skb(channel, rx_buf, n_frags, eh, hdr_len);
skb               607 drivers/net/ethernet/sfc/rx.c 	if (unlikely(skb == NULL)) {
skb               614 drivers/net/ethernet/sfc/rx.c 	skb_record_rx_queue(skb, channel->rx_queue.core_index);
skb               617 drivers/net/ethernet/sfc/rx.c 	skb_checksum_none_assert(skb);
skb               619 drivers/net/ethernet/sfc/rx.c 		skb->ip_summed = CHECKSUM_UNNECESSARY;
skb               620 drivers/net/ethernet/sfc/rx.c 		skb->csum_level = !!(rx_buf->flags & EFX_RX_PKT_CSUM_LEVEL);
skb               623 drivers/net/ethernet/sfc/rx.c 	efx_rx_skb_attach_timestamp(channel, skb);
skb               626 drivers/net/ethernet/sfc/rx.c 		if (channel->type->receive_skb(channel, skb))
skb               632 drivers/net/ethernet/sfc/rx.c 		list_add_tail(&skb->list, channel->rx_list);
skb               635 drivers/net/ethernet/sfc/rx.c 		netif_receive_skb(skb);
skb               890 drivers/net/ethernet/sfc/rx.c int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
skb               913 drivers/net/ethernet/sfc/rx.c 	if (!skb_flow_dissect_flow_keys(skb, &fk, 0)) {
skb               415 drivers/net/ethernet/sfc/selftest.c 	struct sk_buff *skb;
skb               423 drivers/net/ethernet/sfc/selftest.c 		skb = alloc_skb(sizeof(state->payload), GFP_KERNEL);
skb               424 drivers/net/ethernet/sfc/selftest.c 		if (!skb)
skb               426 drivers/net/ethernet/sfc/selftest.c 		state->skbs[i] = skb;
skb               427 drivers/net/ethernet/sfc/selftest.c 		skb_get(skb);
skb               431 drivers/net/ethernet/sfc/selftest.c 		payload = skb_put(skb, sizeof(state->payload));
skb               440 drivers/net/ethernet/sfc/selftest.c 		rc = efx_enqueue_skb(tx_queue, skb);
skb               451 drivers/net/ethernet/sfc/selftest.c 			kfree_skb(skb);
skb               471 drivers/net/ethernet/sfc/selftest.c 	struct sk_buff *skb;
skb               480 drivers/net/ethernet/sfc/selftest.c 		skb = state->skbs[i];
skb               481 drivers/net/ethernet/sfc/selftest.c 		if (skb && !skb_shared(skb))
skb               483 drivers/net/ethernet/sfc/selftest.c 		dev_kfree_skb(skb);
skb                77 drivers/net/ethernet/sfc/tx.c 		struct sk_buff *skb = (struct sk_buff *)buffer->skb;
skb                81 drivers/net/ethernet/sfc/tx.c 		(*bytes_compl) += skb->len;
skb                89 drivers/net/ethernet/sfc/tx.c 			skb_tstamp_tx(skb, &hwtstamp);
skb                94 drivers/net/ethernet/sfc/tx.c 		dev_consume_skb_any((struct sk_buff *)buffer->skb);
skb               165 drivers/net/ethernet/sfc/tx.c 				struct sk_buff *skb)
skb               167 drivers/net/ethernet/sfc/tx.c 	unsigned int copy_len = skb->len;
skb               180 drivers/net/ethernet/sfc/tx.c 	rc = skb_copy_bits(skb, 0, copy_buffer, copy_len);
skb               184 drivers/net/ethernet/sfc/tx.c 	buffer->skb = skb;
skb               262 drivers/net/ethernet/sfc/tx.c static void efx_skb_copy_bits_to_pio(struct efx_nic *efx, struct sk_buff *skb,
skb               268 drivers/net/ethernet/sfc/tx.c 	efx_memcpy_toio_aligned(efx, piobuf, skb->data, skb_headlen(skb),
skb               271 drivers/net/ethernet/sfc/tx.c 	for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
skb               272 drivers/net/ethernet/sfc/tx.c 		skb_frag_t *f = &skb_shinfo(skb)->frags[i];
skb               282 drivers/net/ethernet/sfc/tx.c 	EFX_WARN_ON_ONCE_PARANOID(skb_shinfo(skb)->frag_list);
skb               286 drivers/net/ethernet/sfc/tx.c 			       struct sk_buff *skb)
skb               297 drivers/net/ethernet/sfc/tx.c 	if (skb_shinfo(skb)->nr_frags) {
skb               305 drivers/net/ethernet/sfc/tx.c 		efx_skb_copy_bits_to_pio(tx_queue->efx, skb,
skb               315 drivers/net/ethernet/sfc/tx.c 		__iowrite64_copy(tx_queue->piobuf, skb->data,
skb               316 drivers/net/ethernet/sfc/tx.c 				 ALIGN(skb->len, L1_CACHE_BYTES) >> 3);
skb               319 drivers/net/ethernet/sfc/tx.c 	buffer->skb = skb;
skb               326 drivers/net/ethernet/sfc/tx.c 			     ESF_DZ_TX_PIO_BYTE_CNT, skb->len,
skb               360 drivers/net/ethernet/sfc/tx.c static int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
skb               370 drivers/net/ethernet/sfc/tx.c 	nr_frags = skb_shinfo(skb)->nr_frags;
skb               374 drivers/net/ethernet/sfc/tx.c 	len = skb_headlen(skb);
skb               375 drivers/net/ethernet/sfc/tx.c 	dma_addr = dma_map_single(dma_dev, skb->data, len, DMA_TO_DEVICE);
skb               387 drivers/net/ethernet/sfc/tx.c 		size_t header_len = skb_transport_header(skb) - skb->data +
skb               388 drivers/net/ethernet/sfc/tx.c 				(tcp_hdr(skb)->doff << 2u);
skb               416 drivers/net/ethernet/sfc/tx.c 			buffer->skb = skb;
skb               422 drivers/net/ethernet/sfc/tx.c 		fragment = &skb_shinfo(skb)->frags[frag_index++];
skb               463 drivers/net/ethernet/sfc/tx.c 			       struct sk_buff *skb)
skb               467 drivers/net/ethernet/sfc/tx.c 	segments = skb_gso_segment(skb, 0);
skb               471 drivers/net/ethernet/sfc/tx.c 	dev_consume_skb_any(skb);
skb               472 drivers/net/ethernet/sfc/tx.c 	skb = segments;
skb               474 drivers/net/ethernet/sfc/tx.c 	while (skb) {
skb               475 drivers/net/ethernet/sfc/tx.c 		next = skb->next;
skb               476 drivers/net/ethernet/sfc/tx.c 		skb->next = NULL;
skb               478 drivers/net/ethernet/sfc/tx.c 		efx_enqueue_skb(tx_queue, skb);
skb               479 drivers/net/ethernet/sfc/tx.c 		skb = next;
skb               501 drivers/net/ethernet/sfc/tx.c netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
skb               510 drivers/net/ethernet/sfc/tx.c 	skb_len = skb->len;
skb               511 drivers/net/ethernet/sfc/tx.c 	segments = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 0;
skb               521 drivers/net/ethernet/sfc/tx.c 		rc = tx_queue->handle_tso(tx_queue, skb, &data_mapped);
skb               523 drivers/net/ethernet/sfc/tx.c 			rc = efx_tx_tso_fallback(tx_queue, skb);
skb               534 drivers/net/ethernet/sfc/tx.c 		if (efx_enqueue_skb_pio(tx_queue, skb))
skb               539 drivers/net/ethernet/sfc/tx.c 	} else if (skb->data_len && skb_len <= EFX_TX_CB_SIZE) {
skb               541 drivers/net/ethernet/sfc/tx.c 		if (efx_enqueue_skb_copy(tx_queue, skb))
skb               548 drivers/net/ethernet/sfc/tx.c 	if (!data_mapped && (efx_tx_map_data(tx_queue, skb, segments)))
skb               582 drivers/net/ethernet/sfc/tx.c 	dev_kfree_skb_any(skb);
skb               644 drivers/net/ethernet/sfc/tx.c netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
skb               654 drivers/net/ethernet/sfc/tx.c 	if (unlikely(efx_xmit_with_hwtstamp(skb)) &&
skb               655 drivers/net/ethernet/sfc/tx.c 	    unlikely(efx_ptp_is_ptp_tx(efx, skb))) {
skb               656 drivers/net/ethernet/sfc/tx.c 		return efx_ptp_tx(efx, skb);
skb               659 drivers/net/ethernet/sfc/tx.c 	index = skb_get_queue_mapping(skb);
skb               660 drivers/net/ethernet/sfc/tx.c 	type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0;
skb               667 drivers/net/ethernet/sfc/tx.c 	return efx_enqueue_skb(tx_queue, skb);
skb                21 drivers/net/ethernet/sfc/tx.h int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
skb               143 drivers/net/ethernet/sfc/tx_tso.c static __be16 efx_tso_check_protocol(struct sk_buff *skb)
skb               145 drivers/net/ethernet/sfc/tx_tso.c 	__be16 protocol = skb->protocol;
skb               147 drivers/net/ethernet/sfc/tx_tso.c 	EFX_WARN_ON_ONCE_PARANOID(((struct ethhdr *)skb->data)->h_proto !=
skb               150 drivers/net/ethernet/sfc/tx_tso.c 		struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
skb               156 drivers/net/ethernet/sfc/tx_tso.c 		EFX_WARN_ON_ONCE_PARANOID(ip_hdr(skb)->protocol != IPPROTO_TCP);
skb               159 drivers/net/ethernet/sfc/tx_tso.c 		EFX_WARN_ON_ONCE_PARANOID(ipv6_hdr(skb)->nexthdr != NEXTHDR_TCP);
skb               161 drivers/net/ethernet/sfc/tx_tso.c 	EFX_WARN_ON_ONCE_PARANOID((PTR_DIFF(tcp_hdr(skb), skb->data) +
skb               162 drivers/net/ethernet/sfc/tx_tso.c 				   (tcp_hdr(skb)->doff << 2u)) >
skb               163 drivers/net/ethernet/sfc/tx_tso.c 				  skb_headlen(skb));
skb               171 drivers/net/ethernet/sfc/tx_tso.c 		     const struct sk_buff *skb)
skb               177 drivers/net/ethernet/sfc/tx_tso.c 	st->ip_off = skb_network_header(skb) - skb->data;
skb               178 drivers/net/ethernet/sfc/tx_tso.c 	st->tcp_off = skb_transport_header(skb) - skb->data;
skb               179 drivers/net/ethernet/sfc/tx_tso.c 	header_len = st->tcp_off + (tcp_hdr(skb)->doff << 2u);
skb               180 drivers/net/ethernet/sfc/tx_tso.c 	in_len = skb_headlen(skb) - header_len;
skb               185 drivers/net/ethernet/sfc/tx_tso.c 		st->ipv4_id = ntohs(ip_hdr(skb)->id);
skb               190 drivers/net/ethernet/sfc/tx_tso.c 	st->seqnum = ntohl(tcp_hdr(skb)->seq);
skb               192 drivers/net/ethernet/sfc/tx_tso.c 	EFX_WARN_ON_ONCE_PARANOID(tcp_hdr(skb)->urg);
skb               193 drivers/net/ethernet/sfc/tx_tso.c 	EFX_WARN_ON_ONCE_PARANOID(tcp_hdr(skb)->syn);
skb               194 drivers/net/ethernet/sfc/tx_tso.c 	EFX_WARN_ON_ONCE_PARANOID(tcp_hdr(skb)->rst);
skb               196 drivers/net/ethernet/sfc/tx_tso.c 	st->out_len = skb->len - header_len;
skb               198 drivers/net/ethernet/sfc/tx_tso.c 	dma_addr = dma_map_single(dma_dev, skb->data,
skb               199 drivers/net/ethernet/sfc/tx_tso.c 				  skb_headlen(skb), DMA_TO_DEVICE);
skb               201 drivers/net/ethernet/sfc/tx_tso.c 	st->header_unmap_len = skb_headlen(skb);
skb               233 drivers/net/ethernet/sfc/tx_tso.c 					  const struct sk_buff *skb,
skb               257 drivers/net/ethernet/sfc/tx_tso.c 		buffer->skb = skb;
skb               286 drivers/net/ethernet/sfc/tx_tso.c 				const struct sk_buff *skb,
skb               291 drivers/net/ethernet/sfc/tx_tso.c 	bool is_last = st->out_len <= skb_shinfo(skb)->gso_size;
skb               295 drivers/net/ethernet/sfc/tx_tso.c 		st->packet_space = skb_shinfo(skb)->gso_size;
skb               307 drivers/net/ethernet/sfc/tx_tso.c 	tcp_flags = ((u8 *)tcp_hdr(skb))[TCP_FLAGS_OFFSET] & ~tcp_flags_mask;
skb               341 drivers/net/ethernet/sfc/tx_tso.c 	st->seqnum += skb_shinfo(skb)->gso_size;
skb               363 drivers/net/ethernet/sfc/tx_tso.c 			struct sk_buff *skb,
skb               373 drivers/net/ethernet/sfc/tx_tso.c 	prefetch(skb->data);
skb               376 drivers/net/ethernet/sfc/tx_tso.c 	state.protocol = efx_tso_check_protocol(skb);
skb               380 drivers/net/ethernet/sfc/tx_tso.c 	rc = tso_start(&state, efx, tx_queue, skb);
skb               386 drivers/net/ethernet/sfc/tx_tso.c 		EFX_WARN_ON_ONCE_PARANOID(skb_shinfo(skb)->nr_frags < 1);
skb               389 drivers/net/ethernet/sfc/tx_tso.c 				      skb_shinfo(skb)->frags + frag_i);
skb               397 drivers/net/ethernet/sfc/tx_tso.c 	rc = tso_start_new_packet(tx_queue, skb, &state);
skb               404 drivers/net/ethernet/sfc/tx_tso.c 		tso_fill_packet_with_fragment(tx_queue, skb, &state);
skb               408 drivers/net/ethernet/sfc/tx_tso.c 			if (++frag_i >= skb_shinfo(skb)->nr_frags)
skb               412 drivers/net/ethernet/sfc/tx_tso.c 					      skb_shinfo(skb)->frags + frag_i);
skb               419 drivers/net/ethernet/sfc/tx_tso.c 			rc = tso_start_new_packet(tx_queue, skb, &state);
skb               115 drivers/net/ethernet/sgi/ioc3-eth.c static netdev_tx_t ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev);
skb               134 drivers/net/ethernet/sgi/ioc3-eth.c static inline int ioc3_alloc_skb(struct ioc3_private *ip, struct sk_buff **skb,
skb               160 drivers/net/ethernet/sgi/ioc3-eth.c 	*skb = new_skb;
skb               472 drivers/net/ethernet/sgi/ioc3-eth.c static void ioc3_tcpudp_checksum(struct sk_buff *skb, u32 hwsum, int len)
skb               474 drivers/net/ethernet/sgi/ioc3-eth.c 	struct ethhdr *eh = eth_hdr(skb);
skb               537 drivers/net/ethernet/sgi/ioc3-eth.c 		skb->ip_summed = CHECKSUM_UNNECESSARY;
skb               543 drivers/net/ethernet/sgi/ioc3-eth.c 	struct sk_buff *skb, *new_skb;
skb               554 drivers/net/ethernet/sgi/ioc3-eth.c 	skb = ip->rx_skbs[rx_entry];
skb               555 drivers/net/ethernet/sgi/ioc3-eth.c 	rxb = (struct ioc3_erxbuf *)(skb->data - RX_OFFSET);
skb               562 drivers/net/ethernet/sgi/ioc3-eth.c 			skb_put(skb, len);
skb               563 drivers/net/ethernet/sgi/ioc3-eth.c 			skb->protocol = eth_type_trans(skb, dev);
skb               570 drivers/net/ethernet/sgi/ioc3-eth.c 				new_skb = skb;
skb               576 drivers/net/ethernet/sgi/ioc3-eth.c 				ioc3_tcpudp_checksum(skb,
skb               583 drivers/net/ethernet/sgi/ioc3-eth.c 			netif_rx(skb);
skb               594 drivers/net/ethernet/sgi/ioc3-eth.c 			new_skb = skb;
skb               611 drivers/net/ethernet/sgi/ioc3-eth.c 		skb = ip->rx_skbs[rx_entry];
skb               612 drivers/net/ethernet/sgi/ioc3-eth.c 		rxb = (struct ioc3_erxbuf *)(skb->data - RX_OFFSET);
skb               626 drivers/net/ethernet/sgi/ioc3-eth.c 	struct sk_buff *skb;
skb               639 drivers/net/ethernet/sgi/ioc3-eth.c 		skb = ip->tx_skbs[o_entry];
skb               640 drivers/net/ethernet/sgi/ioc3-eth.c 		bytes += skb->len;
skb               641 drivers/net/ethernet/sgi/ioc3-eth.c 		dev_consume_skb_irq(skb);
skb               824 drivers/net/ethernet/sgi/ioc3-eth.c 	struct sk_buff *skb;
skb               828 drivers/net/ethernet/sgi/ioc3-eth.c 		skb = ip->tx_skbs[i];
skb               829 drivers/net/ethernet/sgi/ioc3-eth.c 		if (skb) {
skb               832 drivers/net/ethernet/sgi/ioc3-eth.c 			dev_kfree_skb_any(skb);
skb               843 drivers/net/ethernet/sgi/ioc3-eth.c 	struct sk_buff *skb;
skb               849 drivers/net/ethernet/sgi/ioc3-eth.c 		skb = ip->rx_skbs[n_entry];
skb               850 drivers/net/ethernet/sgi/ioc3-eth.c 		if (skb) {
skb               854 drivers/net/ethernet/sgi/ioc3-eth.c 			dev_kfree_skb_any(skb);
skb              1367 drivers/net/ethernet/sgi/ioc3-eth.c static netdev_tx_t ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb              1383 drivers/net/ethernet/sgi/ioc3-eth.c 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb              1384 drivers/net/ethernet/sgi/ioc3-eth.c 		const struct iphdr *ih = ip_hdr(skb);
skb              1393 drivers/net/ethernet/sgi/ioc3-eth.c 		eh = (u16 *)skb->data;
skb              1411 drivers/net/ethernet/sgi/ioc3-eth.c 			udp_hdr(skb)->check = csum;
skb              1415 drivers/net/ethernet/sgi/ioc3-eth.c 			tcp_hdr(skb)->check = csum;
skb              1423 drivers/net/ethernet/sgi/ioc3-eth.c 	data = (unsigned long)skb->data;
skb              1424 drivers/net/ethernet/sgi/ioc3-eth.c 	len = skb->len;
skb              1431 drivers/net/ethernet/sgi/ioc3-eth.c 		skb_copy_from_linear_data(skb, desc->data, skb->len);
skb              1449 drivers/net/ethernet/sgi/ioc3-eth.c 		d1 = dma_map_single(ip->dma_dev, skb->data, s1, DMA_TO_DEVICE);
skb              1465 drivers/net/ethernet/sgi/ioc3-eth.c 		d = dma_map_single(ip->dma_dev, skb->data, len, DMA_TO_DEVICE);
skb              1473 drivers/net/ethernet/sgi/ioc3-eth.c 	ip->tx_skbs[produce] = skb;			/* Remember skb */
skb              1488 drivers/net/ethernet/sgi/ioc3-eth.c 	dev_kfree_skb_any(skb);
skb               379 drivers/net/ethernet/sgi/meth.c 	struct sk_buff *skb;
skb               411 drivers/net/ethernet/sgi/meth.c 				skb = priv->rx_skbs[priv->rx_write];
skb               413 drivers/net/ethernet/sgi/meth.c 				skb = alloc_skb(METH_RX_BUFF_SIZE, GFP_ATOMIC);
skb               414 drivers/net/ethernet/sgi/meth.c 				if (!skb) {
skb               418 drivers/net/ethernet/sgi/meth.c 					skb = priv->rx_skbs[priv->rx_write];
skb               423 drivers/net/ethernet/sgi/meth.c 					skb_reserve(skb, METH_RX_HEAD);
skb               426 drivers/net/ethernet/sgi/meth.c 					priv->rx_skbs[priv->rx_write] = skb;
skb               435 drivers/net/ethernet/sgi/meth.c 			skb=priv->rx_skbs[priv->rx_write];
skb               452 drivers/net/ethernet/sgi/meth.c 		priv->rx_ring[priv->rx_write] = (rx_packet*)skb->head;
skb               480 drivers/net/ethernet/sgi/meth.c 	struct sk_buff *skb;
skb               490 drivers/net/ethernet/sgi/meth.c 		skb = priv->tx_skbs[priv->tx_read];
skb               499 drivers/net/ethernet/sgi/meth.c 				dev->stats.tx_bytes += skb->len;
skb               523 drivers/net/ethernet/sgi/meth.c 		dev_consume_skb_irq(skb);
skb               608 drivers/net/ethernet/sgi/meth.c 				  struct sk_buff *skb)
skb               611 drivers/net/ethernet/sgi/meth.c 	int len = (skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len;
skb               615 drivers/net/ethernet/sgi/meth.c 	skb_copy_from_linear_data(skb, desc->data.dt + (120 - len), skb->len);
skb               616 drivers/net/ethernet/sgi/meth.c 	if (skb->len < len)
skb               617 drivers/net/ethernet/sgi/meth.c 		memset(desc->data.dt + 120 - len + skb->len, 0, len-skb->len);
skb               621 drivers/net/ethernet/sgi/meth.c 				  struct sk_buff *skb)
skb               624 drivers/net/ethernet/sgi/meth.c 	void *buffer_data = (void *)(((unsigned long)skb->data + 7) & ~7);
skb               625 drivers/net/ethernet/sgi/meth.c 	int unaligned_len = (int)((unsigned long)buffer_data - (unsigned long)skb->data);
skb               626 drivers/net/ethernet/sgi/meth.c 	int buffer_len = skb->len - unaligned_len;
skb               629 drivers/net/ethernet/sgi/meth.c 	desc->header.raw = METH_TX_CMD_INT_EN | TX_CATBUF1 | (skb->len - 1);
skb               633 drivers/net/ethernet/sgi/meth.c 		skb_copy_from_linear_data(skb, desc->data.dt + (120 - unaligned_len),
skb               646 drivers/net/ethernet/sgi/meth.c 				  struct sk_buff *skb)
skb               649 drivers/net/ethernet/sgi/meth.c 	void *buffer1_data = (void *)(((unsigned long)skb->data + 7) & ~7);
skb               650 drivers/net/ethernet/sgi/meth.c 	void *buffer2_data = (void *)PAGE_ALIGN((unsigned long)skb->data);
skb               651 drivers/net/ethernet/sgi/meth.c 	int unaligned_len = (int)((unsigned long)buffer1_data - (unsigned long)skb->data);
skb               653 drivers/net/ethernet/sgi/meth.c 	int buffer2_len = skb->len - buffer1_len - unaligned_len;
skb               656 drivers/net/ethernet/sgi/meth.c 	desc->header.raw = METH_TX_CMD_INT_EN | TX_CATBUF1 | TX_CATBUF2| (skb->len - 1);
skb               659 drivers/net/ethernet/sgi/meth.c 		skb_copy_from_linear_data(skb, desc->data.dt + (120 - unaligned_len),
skb               676 drivers/net/ethernet/sgi/meth.c static void meth_add_to_tx_ring(struct meth_private *priv, struct sk_buff *skb)
skb               679 drivers/net/ethernet/sgi/meth.c 	priv->tx_skbs[priv->tx_write] = skb;
skb               680 drivers/net/ethernet/sgi/meth.c 	if (skb->len <= 120) {
skb               682 drivers/net/ethernet/sgi/meth.c 		meth_tx_short_prepare(priv, skb);
skb               683 drivers/net/ethernet/sgi/meth.c 	} else if (PAGE_ALIGN((unsigned long)skb->data) !=
skb               684 drivers/net/ethernet/sgi/meth.c 		   PAGE_ALIGN((unsigned long)skb->data + skb->len - 1)) {
skb               686 drivers/net/ethernet/sgi/meth.c 		meth_tx_2page_prepare(priv, skb);
skb               689 drivers/net/ethernet/sgi/meth.c 		meth_tx_1page_prepare(priv, skb);
skb               699 drivers/net/ethernet/sgi/meth.c static netdev_tx_t meth_tx(struct sk_buff *skb, struct net_device *dev)
skb               709 drivers/net/ethernet/sgi/meth.c 	meth_add_to_tx_ring(priv, skb);
skb               760 drivers/net/ethernet/silan/sc92031.c 		struct sk_buff *skb;
skb               787 drivers/net/ethernet/silan/sc92031.c 		skb = netdev_alloc_skb_ip_align(dev, pkt_size);
skb               788 drivers/net/ethernet/silan/sc92031.c 		if (unlikely(!skb)) {
skb               796 drivers/net/ethernet/silan/sc92031.c 			skb_put_data(skb, rx_ring + rx_ring_offset,
skb               798 drivers/net/ethernet/silan/sc92031.c 			skb_put_data(skb, rx_ring,
skb               801 drivers/net/ethernet/silan/sc92031.c 			skb_put_data(skb, rx_ring + rx_ring_offset, pkt_size);
skb               804 drivers/net/ethernet/silan/sc92031.c 		skb->protocol = eth_type_trans(skb, dev);
skb               805 drivers/net/ethernet/silan/sc92031.c 		netif_rx(skb);
skb               931 drivers/net/ethernet/silan/sc92031.c static netdev_tx_t sc92031_start_xmit(struct sk_buff *skb,
skb               940 drivers/net/ethernet/silan/sc92031.c 	if (unlikely(skb->len > TX_BUF_SIZE)) {
skb               956 drivers/net/ethernet/silan/sc92031.c 	skb_copy_and_csum_dev(skb, priv->tx_bufs + entry * TX_BUF_SIZE);
skb               958 drivers/net/ethernet/silan/sc92031.c 	len = skb->len;
skb               985 drivers/net/ethernet/silan/sc92031.c 	dev_consume_skb_any(skb);
skb               491 drivers/net/ethernet/sis/sis190.c 	struct sk_buff *skb;
skb               494 drivers/net/ethernet/sis/sis190.c 	skb = netdev_alloc_skb(tp->dev, rx_buf_sz);
skb               495 drivers/net/ethernet/sis/sis190.c 	if (unlikely(!skb))
skb               497 drivers/net/ethernet/sis/sis190.c 	mapping = pci_map_single(tp->pci_dev, skb->data, tp->rx_buf_sz,
skb               503 drivers/net/ethernet/sis/sis190.c 	return skb;
skb               506 drivers/net/ethernet/sis/sis190.c 	dev_kfree_skb_any(skb);
skb               535 drivers/net/ethernet/sis/sis190.c 	struct sk_buff *skb;
skb               541 drivers/net/ethernet/sis/sis190.c 	skb = netdev_alloc_skb_ip_align(tp->dev, pkt_size);
skb               542 drivers/net/ethernet/sis/sis190.c 	if (!skb)
skb               547 drivers/net/ethernet/sis/sis190.c 	skb_copy_to_linear_data(skb, sk_buff[0]->data, pkt_size);
skb               548 drivers/net/ethernet/sis/sis190.c 	*sk_buff = skb;
skb               599 drivers/net/ethernet/sis/sis190.c 			struct sk_buff *skb = tp->Rx_skbuff[entry];
skb               614 drivers/net/ethernet/sis/sis190.c 			if (sis190_try_rx_copy(tp, &skb, pkt_size, addr)) {
skb               625 drivers/net/ethernet/sis/sis190.c 			skb_put(skb, pkt_size);
skb               626 drivers/net/ethernet/sis/sis190.c 			skb->protocol = eth_type_trans(skb, dev);
skb               628 drivers/net/ethernet/sis/sis190.c 			sis190_rx_skb(skb);
skb               650 drivers/net/ethernet/sis/sis190.c static void sis190_unmap_tx_skb(struct pci_dev *pdev, struct sk_buff *skb,
skb               655 drivers/net/ethernet/sis/sis190.c 	len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
skb               702 drivers/net/ethernet/sis/sis190.c 		struct sk_buff *skb;
skb               707 drivers/net/ethernet/sis/sis190.c 		skb = tp->Tx_skbuff[entry];
skb               711 drivers/net/ethernet/sis/sis190.c 			stats->tx_bytes += skb->len;
skb               715 drivers/net/ethernet/sis/sis190.c 		sis190_unmap_tx_skb(tp->pci_dev, skb, txd);
skb               717 drivers/net/ethernet/sis/sis190.c 		dev_consume_skb_irq(skb);
skb              1109 drivers/net/ethernet/sis/sis190.c 		struct sk_buff *skb = tp->Tx_skbuff[i];
skb              1111 drivers/net/ethernet/sis/sis190.c 		if (!skb)
skb              1114 drivers/net/ethernet/sis/sis190.c 		sis190_unmap_tx_skb(tp->pci_dev, skb, tp->TxDescRing + i);
skb              1116 drivers/net/ethernet/sis/sis190.c 		dev_kfree_skb(skb);
skb              1171 drivers/net/ethernet/sis/sis190.c static netdev_tx_t sis190_start_xmit(struct sk_buff *skb,
skb              1180 drivers/net/ethernet/sis/sis190.c 	if (unlikely(skb->len < ETH_ZLEN)) {
skb              1181 drivers/net/ethernet/sis/sis190.c 		if (skb_padto(skb, ETH_ZLEN)) {
skb              1187 drivers/net/ethernet/sis/sis190.c 		len = skb->len;
skb              1200 drivers/net/ethernet/sis/sis190.c 	mapping = pci_map_single(tp->pci_dev, skb->data, len, PCI_DMA_TODEVICE);
skb              1207 drivers/net/ethernet/sis/sis190.c 	tp->Tx_skbuff[entry] = skb;
skb               228 drivers/net/ethernet/sis/sis900.c static netdev_tx_t sis900_start_xmit(struct sk_buff *skb,
skb              1180 drivers/net/ethernet/sis/sis900.c 		struct sk_buff *skb;
skb              1182 drivers/net/ethernet/sis/sis900.c 		if ((skb = netdev_alloc_skb(net_dev, RX_BUF_SIZE)) == NULL) {
skb              1189 drivers/net/ethernet/sis/sis900.c 		sis_priv->rx_skbuff[i] = skb;
skb              1192 drivers/net/ethernet/sis/sis900.c 				skb->data, RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
skb              1195 drivers/net/ethernet/sis/sis900.c 			dev_kfree_skb(skb);
skb              1561 drivers/net/ethernet/sis/sis900.c 		struct sk_buff *skb = sis_priv->tx_skbuff[i];
skb              1563 drivers/net/ethernet/sis/sis900.c 		if (skb) {
skb              1565 drivers/net/ethernet/sis/sis900.c 				sis_priv->tx_ring[i].bufptr, skb->len,
skb              1567 drivers/net/ethernet/sis/sis900.c 			dev_kfree_skb_irq(skb);
skb              1599 drivers/net/ethernet/sis/sis900.c sis900_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
skb              1612 drivers/net/ethernet/sis/sis900.c 	sis_priv->tx_skbuff[entry] = skb;
skb              1616 drivers/net/ethernet/sis/sis900.c 		skb->data, skb->len, PCI_DMA_TODEVICE);
skb              1619 drivers/net/ethernet/sis/sis900.c 			dev_kfree_skb_any(skb);
skb              1625 drivers/net/ethernet/sis/sis900.c 	sis_priv->tx_ring[entry].cmdsts = (OWN | INTR | skb->len);
skb              1653 drivers/net/ethernet/sis/sis900.c 		       net_dev->name, skb->data, (int)skb->len, entry);
skb              1778 drivers/net/ethernet/sis/sis900.c 			struct sk_buff * skb;
skb              1787 drivers/net/ethernet/sis/sis900.c 			if ((skb = netdev_alloc_skb(net_dev, RX_BUF_SIZE)) == NULL) {
skb              1794 drivers/net/ethernet/sis/sis900.c 				skb = sis_priv->rx_skbuff[entry];
skb              1809 drivers/net/ethernet/sis/sis900.c 				dev_kfree_skb(skb);
skb              1826 drivers/net/ethernet/sis/sis900.c 			sis_priv->rx_skbuff[entry] = skb;
skb              1829 drivers/net/ethernet/sis/sis900.c 				pci_map_single(sis_priv->pci_dev, skb->data,
skb              1833 drivers/net/ethernet/sis/sis900.c 				dev_kfree_skb_irq(skb);
skb              1846 drivers/net/ethernet/sis/sis900.c 		struct sk_buff *skb;
skb              1851 drivers/net/ethernet/sis/sis900.c 			skb = netdev_alloc_skb(net_dev, RX_BUF_SIZE);
skb              1852 drivers/net/ethernet/sis/sis900.c 			if (skb == NULL) {
skb              1860 drivers/net/ethernet/sis/sis900.c 			sis_priv->rx_skbuff[entry] = skb;
skb              1863 drivers/net/ethernet/sis/sis900.c 				pci_map_single(sis_priv->pci_dev, skb->data,
skb              1867 drivers/net/ethernet/sis/sis900.c 				dev_kfree_skb_irq(skb);
skb              1894 drivers/net/ethernet/sis/sis900.c 		struct sk_buff *skb;
skb              1930 drivers/net/ethernet/sis/sis900.c 		skb = sis_priv->tx_skbuff[entry];
skb              1932 drivers/net/ethernet/sis/sis900.c 			sis_priv->tx_ring[entry].bufptr, skb->len,
skb              1934 drivers/net/ethernet/sis/sis900.c 		dev_consume_skb_irq(skb);
skb              1962 drivers/net/ethernet/sis/sis900.c 	struct sk_buff *skb;
skb              1980 drivers/net/ethernet/sis/sis900.c 		skb = sis_priv->rx_skbuff[i];
skb              1981 drivers/net/ethernet/sis/sis900.c 		if (skb) {
skb              1984 drivers/net/ethernet/sis/sis900.c 			dev_kfree_skb(skb);
skb              1989 drivers/net/ethernet/sis/sis900.c 		skb = sis_priv->tx_skbuff[i];
skb              1990 drivers/net/ethernet/sis/sis900.c 		if (skb) {
skb              1992 drivers/net/ethernet/sis/sis900.c 					 skb->len, PCI_DMA_TODEVICE);
skb              1993 drivers/net/ethernet/sis/sis900.c 			dev_kfree_skb(skb);
skb               296 drivers/net/ethernet/smsc/epic100.c static netdev_tx_t epic_start_xmit(struct sk_buff *skb,
skb               915 drivers/net/ethernet/smsc/epic100.c 		struct sk_buff *skb = netdev_alloc_skb(dev, ep->rx_buf_sz + 2);
skb               916 drivers/net/ethernet/smsc/epic100.c 		ep->rx_skbuff[i] = skb;
skb               917 drivers/net/ethernet/smsc/epic100.c 		if (skb == NULL)
skb               919 drivers/net/ethernet/smsc/epic100.c 		skb_reserve(skb, 2);	/* 16 byte align the IP header. */
skb               921 drivers/net/ethernet/smsc/epic100.c 			skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
skb               937 drivers/net/ethernet/smsc/epic100.c static netdev_tx_t epic_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb               945 drivers/net/ethernet/smsc/epic100.c 	if (skb_padto(skb, ETH_ZLEN))
skb               956 drivers/net/ethernet/smsc/epic100.c 	ep->tx_skbuff[entry] = skb;
skb               957 drivers/net/ethernet/smsc/epic100.c 	ep->tx_ring[entry].bufaddr = pci_map_single(ep->pci_dev, skb->data,
skb               958 drivers/net/ethernet/smsc/epic100.c 		 			            skb->len, PCI_DMA_TODEVICE);
skb               970 drivers/net/ethernet/smsc/epic100.c 	ep->tx_ring[entry].buflength = ctrl_word | skb->len;
skb               972 drivers/net/ethernet/smsc/epic100.c 		((skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN) << 16)
skb               985 drivers/net/ethernet/smsc/epic100.c 			   skb->len, entry, ctrl_word, er32(TxSTAT));
skb              1022 drivers/net/ethernet/smsc/epic100.c 		struct sk_buff *skb;
skb              1037 drivers/net/ethernet/smsc/epic100.c 		skb = ep->tx_skbuff[entry];
skb              1039 drivers/net/ethernet/smsc/epic100.c 				 skb->len, PCI_DMA_TODEVICE);
skb              1040 drivers/net/ethernet/smsc/epic100.c 		dev_consume_skb_irq(skb);
skb              1168 drivers/net/ethernet/smsc/epic100.c 			struct sk_buff *skb;
skb              1178 drivers/net/ethernet/smsc/epic100.c 			    (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
skb              1179 drivers/net/ethernet/smsc/epic100.c 				skb_reserve(skb, 2);	/* 16 byte align the IP header */
skb              1184 drivers/net/ethernet/smsc/epic100.c 				skb_copy_to_linear_data(skb, ep->rx_skbuff[entry]->data, pkt_len);
skb              1185 drivers/net/ethernet/smsc/epic100.c 				skb_put(skb, pkt_len);
skb              1194 drivers/net/ethernet/smsc/epic100.c 				skb_put(skb = ep->rx_skbuff[entry], pkt_len);
skb              1197 drivers/net/ethernet/smsc/epic100.c 			skb->protocol = eth_type_trans(skb, dev);
skb              1198 drivers/net/ethernet/smsc/epic100.c 			netif_receive_skb(skb);
skb              1210 drivers/net/ethernet/smsc/epic100.c 			struct sk_buff *skb;
skb              1211 drivers/net/ethernet/smsc/epic100.c 			skb = ep->rx_skbuff[entry] = netdev_alloc_skb(dev, ep->rx_buf_sz + 2);
skb              1212 drivers/net/ethernet/smsc/epic100.c 			if (skb == NULL)
skb              1214 drivers/net/ethernet/smsc/epic100.c 			skb_reserve(skb, 2);	/* Align IP on 16 byte boundaries */
skb              1216 drivers/net/ethernet/smsc/epic100.c 				skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
skb              1271 drivers/net/ethernet/smsc/epic100.c 	struct sk_buff *skb;
skb              1291 drivers/net/ethernet/smsc/epic100.c 		skb = ep->rx_skbuff[i];
skb              1295 drivers/net/ethernet/smsc/epic100.c 		if (skb) {
skb              1298 drivers/net/ethernet/smsc/epic100.c 			dev_kfree_skb(skb);
skb              1303 drivers/net/ethernet/smsc/epic100.c 		skb = ep->tx_skbuff[i];
skb              1305 drivers/net/ethernet/smsc/epic100.c 		if (!skb)
skb              1307 drivers/net/ethernet/smsc/epic100.c 		pci_unmap_single(pdev, ep->tx_ring[i].bufaddr, skb->len,
skb              1309 drivers/net/ethernet/smsc/epic100.c 		dev_kfree_skb(skb);
skb               370 drivers/net/ethernet/smsc/smc911x.c 	struct sk_buff *skb;
skb               395 drivers/net/ethernet/smsc/smc911x.c 		skb = netdev_alloc_skb(dev, pkt_len+32);
skb               396 drivers/net/ethernet/smsc/smc911x.c 		if (unlikely(skb == NULL)) {
skb               406 drivers/net/ethernet/smsc/smc911x.c 		data = skb->data;
skb               407 drivers/net/ethernet/smsc/smc911x.c 		skb_reserve(skb, 2);
skb               408 drivers/net/ethernet/smsc/smc911x.c 		skb_put(skb,pkt_len-4);
skb               421 drivers/net/ethernet/smsc/smc911x.c 		lp->current_rx_skb = skb;
skb               431 drivers/net/ethernet/smsc/smc911x.c 		skb->protocol = eth_type_trans(skb, dev);
skb               432 drivers/net/ethernet/smsc/smc911x.c 		netif_rx(skb);
skb               445 drivers/net/ethernet/smsc/smc911x.c 	struct sk_buff *skb;
skb               452 drivers/net/ethernet/smsc/smc911x.c 	skb = lp->pending_tx_skb;
skb               459 drivers/net/ethernet/smsc/smc911x.c 	buf = (char*)((u32)(skb->data) & ~0xF);
skb               460 drivers/net/ethernet/smsc/smc911x.c 	len = (skb->len + 0xF + ((u32)skb->data & 0xF)) & ~0xF;
skb               461 drivers/net/ethernet/smsc/smc911x.c 	cmdA = (1<<24) | (((u32)skb->data & 0xF)<<16) |
skb               463 drivers/net/ethernet/smsc/smc911x.c 			skb->len;
skb               465 drivers/net/ethernet/smsc/smc911x.c 	buf = (char*)((u32)skb->data & ~0x3);
skb               466 drivers/net/ethernet/smsc/smc911x.c 	len = (skb->len + 3 + ((u32)skb->data & 3)) & ~0x3;
skb               467 drivers/net/ethernet/smsc/smc911x.c 	cmdA = (((u32)skb->data & 0x3) << 16) |
skb               469 drivers/net/ethernet/smsc/smc911x.c 			skb->len;
skb               472 drivers/net/ethernet/smsc/smc911x.c 	cmdB = (skb->len  << 16) | (skb->len & 0x7FF);
skb               484 drivers/net/ethernet/smsc/smc911x.c 	lp->current_tx_skb = skb;
skb               490 drivers/net/ethernet/smsc/smc911x.c 	dev_kfree_skb_irq(skb);
skb               505 drivers/net/ethernet/smsc/smc911x.c smc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb               538 drivers/net/ethernet/smsc/smc911x.c 	if (unlikely(free < (skb->len + 8 + 15 + 15))) {
skb               540 drivers/net/ethernet/smsc/smc911x.c 			    free, skb->len);
skb               545 drivers/net/ethernet/smsc/smc911x.c 		dev_kfree_skb_any(skb);
skb               556 drivers/net/ethernet/smsc/smc911x.c 			lp->pending_tx_skb = skb;
skb               566 drivers/net/ethernet/smsc/smc911x.c 	lp->pending_tx_skb = skb;
skb              1173 drivers/net/ethernet/smsc/smc911x.c 	struct sk_buff *skb = lp->current_tx_skb;
skb              1179 drivers/net/ethernet/smsc/smc911x.c 	BUG_ON(skb == NULL);
skb              1182 drivers/net/ethernet/smsc/smc911x.c 	dev_kfree_skb_irq(skb);
skb              1205 drivers/net/ethernet/smsc/smc911x.c 	struct sk_buff *skb = lp->current_rx_skb;
skb              1212 drivers/net/ethernet/smsc/smc911x.c 	BUG_ON(skb == NULL);
skb              1214 drivers/net/ethernet/smsc/smc911x.c 	PRINT_PKT(skb->data, skb->len);
skb              1215 drivers/net/ethernet/smsc/smc911x.c 	skb->protocol = eth_type_trans(skb, dev);
skb              1217 drivers/net/ethernet/smsc/smc911x.c 	dev->stats.rx_bytes += skb->len;
skb              1218 drivers/net/ethernet/smsc/smc911x.c 	netif_rx(skb);
skb               286 drivers/net/ethernet/smsc/smc9194.c static netdev_tx_t  smc_wait_to_send_packet( struct sk_buff * skb,
skb               467 drivers/net/ethernet/smsc/smc9194.c static netdev_tx_t smc_wait_to_send_packet(struct sk_buff *skb,
skb               486 drivers/net/ethernet/smsc/smc9194.c 	lp->saved_skb = skb;
skb               488 drivers/net/ethernet/smsc/smc9194.c 	length = skb->len;
skb               491 drivers/net/ethernet/smsc/smc9194.c 		if (skb_padto(skb, ETH_ZLEN)) {
skb               511 drivers/net/ethernet/smsc/smc9194.c 		dev_kfree_skb (skb);
skb               582 drivers/net/ethernet/smsc/smc9194.c 	struct sk_buff * 	skb = lp->saved_skb;
skb               589 drivers/net/ethernet/smsc/smc9194.c 	if ( !skb ) {
skb               593 drivers/net/ethernet/smsc/smc9194.c 	length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
skb               594 drivers/net/ethernet/smsc/smc9194.c 	buf = skb->data;
skb               601 drivers/net/ethernet/smsc/smc9194.c 		dev_kfree_skb_any(skb);
skb               664 drivers/net/ethernet/smsc/smc9194.c 	dev_kfree_skb_any (skb);
skb              1160 drivers/net/ethernet/smsc/smc9194.c 		struct sk_buff  * skb;
skb              1171 drivers/net/ethernet/smsc/smc9194.c 		skb = netdev_alloc_skb(dev, packet_length + 5);
skb              1172 drivers/net/ethernet/smsc/smc9194.c 		if ( skb == NULL ) {
skb              1182 drivers/net/ethernet/smsc/smc9194.c 		skb_reserve( skb, 2 );   /* 16 bit alignment */
skb              1184 drivers/net/ethernet/smsc/smc9194.c 		data = skb_put( skb, packet_length);
skb              1210 drivers/net/ethernet/smsc/smc9194.c 		skb->protocol = eth_type_trans(skb, dev );
skb              1211 drivers/net/ethernet/smsc/smc9194.c 		netif_rx(skb);
skb               275 drivers/net/ethernet/smsc/smc91c92_cs.c static netdev_tx_t smc_start_xmit(struct sk_buff *skb,
skb              1121 drivers/net/ethernet/smsc/smc91c92_cs.c     struct sk_buff *skb = smc->saved_skb;
skb              1125 drivers/net/ethernet/smsc/smc91c92_cs.c     if (!skb) {
skb              1136 drivers/net/ethernet/smsc/smc91c92_cs.c 	dev_kfree_skb_irq(skb);
skb              1142 drivers/net/ethernet/smsc/smc91c92_cs.c     dev->stats.tx_bytes += skb->len;
skb              1151 drivers/net/ethernet/smsc/smc91c92_cs.c 	u_char *buf = skb->data;
skb              1152 drivers/net/ethernet/smsc/smc91c92_cs.c 	u_int length = skb->len; /* The chip will pad to ethernet min. */
skb              1174 drivers/net/ethernet/smsc/smc91c92_cs.c     dev_kfree_skb_irq(skb);
skb              1195 drivers/net/ethernet/smsc/smc91c92_cs.c static netdev_tx_t smc_start_xmit(struct sk_buff *skb,
skb              1207 drivers/net/ethernet/smsc/smc91c92_cs.c 	       skb->len, inw(ioaddr + 2));
skb              1215 drivers/net/ethernet/smsc/smc91c92_cs.c     smc->saved_skb = skb;
skb              1217 drivers/net/ethernet/smsc/smc91c92_cs.c     num_pages = skb->len >> 8;
skb              1221 drivers/net/ethernet/smsc/smc91c92_cs.c 	dev_kfree_skb (skb);
skb              1494 drivers/net/ethernet/smsc/smc91c92_cs.c 	struct sk_buff *skb;
skb              1498 drivers/net/ethernet/smsc/smc91c92_cs.c 	skb = netdev_alloc_skb(dev, packet_length+2);
skb              1500 drivers/net/ethernet/smsc/smc91c92_cs.c 	if (skb == NULL) {
skb              1508 drivers/net/ethernet/smsc/smc91c92_cs.c 	skb_reserve(skb, 2);
skb              1509 drivers/net/ethernet/smsc/smc91c92_cs.c 	insw(ioaddr+DATA_1, skb_put(skb, packet_length),
skb              1511 drivers/net/ethernet/smsc/smc91c92_cs.c 	skb->protocol = eth_type_trans(skb, dev);
skb              1513 drivers/net/ethernet/smsc/smc91c92_cs.c 	netif_rx(skb);
skb               444 drivers/net/ethernet/smsc/smc91x.c 		struct sk_buff *skb;
skb               460 drivers/net/ethernet/smsc/smc91x.c 		skb = netdev_alloc_skb(dev, packet_len);
skb               461 drivers/net/ethernet/smsc/smc91x.c 		if (unlikely(skb == NULL)) {
skb               469 drivers/net/ethernet/smsc/smc91x.c 		skb_reserve(skb, 2);
skb               481 drivers/net/ethernet/smsc/smc91x.c 		data = skb_put(skb, data_len);
skb               489 drivers/net/ethernet/smsc/smc91x.c 		skb->protocol = eth_type_trans(skb, dev);
skb               490 drivers/net/ethernet/smsc/smc91x.c 		netif_rx(skb);
skb               543 drivers/net/ethernet/smsc/smc91x.c 	struct sk_buff *skb;
skb               556 drivers/net/ethernet/smsc/smc91x.c 	skb = lp->pending_tx_skb;
skb               557 drivers/net/ethernet/smsc/smc91x.c 	if (unlikely(!skb)) {
skb               576 drivers/net/ethernet/smsc/smc91x.c 	buf = skb->data;
skb               577 drivers/net/ethernet/smsc/smc91x.c 	len = skb->len;
skb               619 drivers/net/ethernet/smsc/smc91x.c 	dev_consume_skb_any(skb);
skb               629 drivers/net/ethernet/smsc/smc91x.c smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb               651 drivers/net/ethernet/smsc/smc91x.c 	numPages = ((skb->len & ~1) + (6 - 1)) >> 8;
skb               656 drivers/net/ethernet/smsc/smc91x.c 		dev_kfree_skb_any(skb);
skb               680 drivers/net/ethernet/smsc/smc91x.c 	lp->pending_tx_skb = skb;
skb              1216 drivers/net/ethernet/smsc/smsc911x.c 		struct sk_buff *skb;
skb              1249 drivers/net/ethernet/smsc/smsc911x.c 		skb = netdev_alloc_skb(dev, pktwords << 2);
skb              1250 drivers/net/ethernet/smsc/smsc911x.c 		if (unlikely(!skb)) {
skb              1260 drivers/net/ethernet/smsc/smsc911x.c 				 (unsigned int *)skb->data, pktwords);
skb              1263 drivers/net/ethernet/smsc/smsc911x.c 		skb_reserve(skb, NET_IP_ALIGN);
skb              1264 drivers/net/ethernet/smsc/smsc911x.c 		skb_put(skb, pktlength - 4);
skb              1265 drivers/net/ethernet/smsc/smsc911x.c 		skb->protocol = eth_type_trans(skb, dev);
skb              1266 drivers/net/ethernet/smsc/smsc911x.c 		skb_checksum_none_assert(skb);
skb              1267 drivers/net/ethernet/smsc/smsc911x.c 		netif_receive_skb(skb);
skb              1777 drivers/net/ethernet/smsc/smsc911x.c smsc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb              1794 drivers/net/ethernet/smsc/smsc911x.c 	tx_cmd_a = (u32)((ulong)skb->data & 0x03) << 16;
skb              1796 drivers/net/ethernet/smsc/smsc911x.c 	tx_cmd_a |= (unsigned int)skb->len;
skb              1798 drivers/net/ethernet/smsc/smsc911x.c 	tx_cmd_b = ((unsigned int)skb->len) << 16;
skb              1799 drivers/net/ethernet/smsc/smsc911x.c 	tx_cmd_b |= (unsigned int)skb->len;
skb              1804 drivers/net/ethernet/smsc/smsc911x.c 	bufp = (ulong)skb->data & (~0x3);
skb              1805 drivers/net/ethernet/smsc/smsc911x.c 	wrsz = (u32)skb->len + 3;
skb              1806 drivers/net/ethernet/smsc/smsc911x.c 	wrsz += (u32)((ulong)skb->data & 0x3);
skb              1810 drivers/net/ethernet/smsc/smsc911x.c 	freespace -= (skb->len + 32);
skb              1811 drivers/net/ethernet/smsc/smsc911x.c 	skb_tx_timestamp(skb);
skb              1812 drivers/net/ethernet/smsc/smsc911x.c 	dev_consume_skb_any(skb);
skb                40 drivers/net/ethernet/smsc/smsc9420.c 	struct sk_buff *skb;
skb               505 drivers/net/ethernet/smsc/smsc9420.c 		struct sk_buff *skb = pd->tx_buffers[i].skb;
skb               507 drivers/net/ethernet/smsc/smsc9420.c 		if (skb) {
skb               510 drivers/net/ethernet/smsc/smsc9420.c 					 skb->len, PCI_DMA_TODEVICE);
skb               511 drivers/net/ethernet/smsc/smsc9420.c 			dev_kfree_skb_any(skb);
skb               538 drivers/net/ethernet/smsc/smsc9420.c 		if (pd->rx_buffers[i].skb)
skb               539 drivers/net/ethernet/smsc/smsc9420.c 			dev_kfree_skb_any(pd->rx_buffers[i].skb);
skb               748 drivers/net/ethernet/smsc/smsc9420.c 	struct sk_buff *skb;
skb               765 drivers/net/ethernet/smsc/smsc9420.c 	skb = pd->rx_buffers[index].skb;
skb               766 drivers/net/ethernet/smsc/smsc9420.c 	pd->rx_buffers[index].skb = NULL;
skb               769 drivers/net/ethernet/smsc/smsc9420.c 		u16 hw_csum = get_unaligned_le16(skb_tail_pointer(skb) +
skb               771 drivers/net/ethernet/smsc/smsc9420.c 		put_unaligned_le16(hw_csum, &skb->csum);
skb               772 drivers/net/ethernet/smsc/smsc9420.c 		skb->ip_summed = CHECKSUM_COMPLETE;
skb               775 drivers/net/ethernet/smsc/smsc9420.c 	skb_reserve(skb, NET_IP_ALIGN);
skb               776 drivers/net/ethernet/smsc/smsc9420.c 	skb_put(skb, packet_length);
skb               778 drivers/net/ethernet/smsc/smsc9420.c 	skb->protocol = eth_type_trans(skb, dev);
skb               780 drivers/net/ethernet/smsc/smsc9420.c 	netif_receive_skb(skb);
skb               785 drivers/net/ethernet/smsc/smsc9420.c 	struct sk_buff *skb = netdev_alloc_skb(pd->dev, PKT_BUF_SZ);
skb               788 drivers/net/ethernet/smsc/smsc9420.c 	BUG_ON(pd->rx_buffers[index].skb);
skb               791 drivers/net/ethernet/smsc/smsc9420.c 	if (unlikely(!skb))
skb               794 drivers/net/ethernet/smsc/smsc9420.c 	mapping = pci_map_single(pd->pdev, skb_tail_pointer(skb),
skb               797 drivers/net/ethernet/smsc/smsc9420.c 		dev_kfree_skb_any(skb);
skb               802 drivers/net/ethernet/smsc/smsc9420.c 	pd->rx_buffers[index].skb = skb;
skb               910 drivers/net/ethernet/smsc/smsc9420.c 		BUG_ON(!pd->tx_buffers[index].skb);
skb               914 drivers/net/ethernet/smsc/smsc9420.c 			pd->tx_buffers[index].skb->len, PCI_DMA_TODEVICE);
skb               917 drivers/net/ethernet/smsc/smsc9420.c 		dev_kfree_skb_any(pd->tx_buffers[index].skb);
skb               918 drivers/net/ethernet/smsc/smsc9420.c 		pd->tx_buffers[index].skb = NULL;
skb               927 drivers/net/ethernet/smsc/smsc9420.c static netdev_tx_t smsc9420_hard_start_xmit(struct sk_buff *skb,
skb               941 drivers/net/ethernet/smsc/smsc9420.c 	BUG_ON(pd->tx_buffers[index].skb);
skb               944 drivers/net/ethernet/smsc/smsc9420.c 	mapping = pci_map_single(pd->pdev, skb->data,
skb               945 drivers/net/ethernet/smsc/smsc9420.c 				 skb->len, PCI_DMA_TODEVICE);
skb               952 drivers/net/ethernet/smsc/smsc9420.c 	pd->tx_buffers[index].skb = skb;
skb               955 drivers/net/ethernet/smsc/smsc9420.c 	tmp_desc1 = (TDES1_LS_ | ((u32)skb->len & 0x7FF));
skb               976 drivers/net/ethernet/smsc/smsc9420.c 	skb_tx_timestamp(skb);
skb              1193 drivers/net/ethernet/smsc/smsc9420.c 		pd->tx_buffers[i].skb = NULL;
skb              1229 drivers/net/ethernet/smsc/smsc9420.c 		pd->rx_buffers[i].skb = NULL;
skb               270 drivers/net/ethernet/socionext/netsec.c 		struct sk_buff *skb;
skb               661 drivers/net/ethernet/socionext/netsec.c 			bytes += desc->skb->len;
skb               662 drivers/net/ethernet/socionext/netsec.c 			dev_kfree_skb(desc->skb);
skb               803 drivers/net/ethernet/socionext/netsec.c 		dring->desc[idx].skb = buf;
skb               945 drivers/net/ethernet/socionext/netsec.c 		struct sk_buff *skb = NULL;
skb              1008 drivers/net/ethernet/socionext/netsec.c 		skb = build_skb(desc->addr, desc->len + NETSEC_RX_BUF_NON_DATA);
skb              1010 drivers/net/ethernet/socionext/netsec.c 		if (unlikely(!skb)) {
skb              1023 drivers/net/ethernet/socionext/netsec.c 		skb_reserve(skb, xdp.data - xdp.data_hard_start);
skb              1024 drivers/net/ethernet/socionext/netsec.c 		skb_put(skb, xdp.data_end - xdp.data);
skb              1025 drivers/net/ethernet/socionext/netsec.c 		skb->protocol = eth_type_trans(skb, priv->ndev);
skb              1029 drivers/net/ethernet/socionext/netsec.c 			skb->ip_summed = CHECKSUM_UNNECESSARY;
skb              1032 drivers/net/ethernet/socionext/netsec.c 		if ((skb && napi_gro_receive(&priv->napi, skb) != GRO_DROP) ||
skb              1111 drivers/net/ethernet/socionext/netsec.c static netdev_tx_t netsec_netdev_start_xmit(struct sk_buff *skb,
skb              1130 drivers/net/ethernet/socionext/netsec.c 	if (skb->ip_summed == CHECKSUM_PARTIAL)
skb              1133 drivers/net/ethernet/socionext/netsec.c 	if (skb_is_gso(skb))
skb              1134 drivers/net/ethernet/socionext/netsec.c 		tso_seg_len = skb_shinfo(skb)->gso_size;
skb              1137 drivers/net/ethernet/socionext/netsec.c 		if (skb->protocol == htons(ETH_P_IP)) {
skb              1138 drivers/net/ethernet/socionext/netsec.c 			ip_hdr(skb)->tot_len = 0;
skb              1139 drivers/net/ethernet/socionext/netsec.c 			tcp_hdr(skb)->check =
skb              1140 drivers/net/ethernet/socionext/netsec.c 				~tcp_v4_check(0, ip_hdr(skb)->saddr,
skb              1141 drivers/net/ethernet/socionext/netsec.c 					      ip_hdr(skb)->daddr, 0);
skb              1143 drivers/net/ethernet/socionext/netsec.c 			ipv6_hdr(skb)->payload_len = 0;
skb              1144 drivers/net/ethernet/socionext/netsec.c 			tcp_hdr(skb)->check =
skb              1145 drivers/net/ethernet/socionext/netsec.c 				~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
skb              1146 drivers/net/ethernet/socionext/netsec.c 						 &ipv6_hdr(skb)->daddr,
skb              1154 drivers/net/ethernet/socionext/netsec.c 	tx_desc.dma_addr = dma_map_single(priv->dev, skb->data,
skb              1155 drivers/net/ethernet/socionext/netsec.c 					  skb_headlen(skb), DMA_TO_DEVICE);
skb              1161 drivers/net/ethernet/socionext/netsec.c 		dev_kfree_skb_any(skb);
skb              1164 drivers/net/ethernet/socionext/netsec.c 	tx_desc.addr = skb->data;
skb              1165 drivers/net/ethernet/socionext/netsec.c 	tx_desc.len = skb_headlen(skb);
skb              1168 drivers/net/ethernet/socionext/netsec.c 	skb_tx_timestamp(skb);
skb              1169 drivers/net/ethernet/socionext/netsec.c 	netdev_sent_queue(priv->ndev, skb->len);
skb              1171 drivers/net/ethernet/socionext/netsec.c 	netsec_set_tx_de(priv, dring, &tx_ctrl, &tx_desc, skb);
skb              1198 drivers/net/ethernet/socionext/netsec.c 			dev_kfree_skb(desc->skb);
skb               581 drivers/net/ethernet/socionext/sni_ave.c 	struct sk_buff *skb;
skb               585 drivers/net/ethernet/socionext/sni_ave.c 	skb = priv->rx.desc[entry].skbs;
skb               586 drivers/net/ethernet/socionext/sni_ave.c 	if (!skb) {
skb               587 drivers/net/ethernet/socionext/sni_ave.c 		skb = netdev_alloc_skb(ndev, AVE_MAX_ETHFRAME);
skb               588 drivers/net/ethernet/socionext/sni_ave.c 		if (!skb) {
skb               592 drivers/net/ethernet/socionext/sni_ave.c 		skb->data += AVE_FRAME_HEADROOM;
skb               593 drivers/net/ethernet/socionext/sni_ave.c 		skb->tail += AVE_FRAME_HEADROOM;
skb               610 drivers/net/ethernet/socionext/sni_ave.c 			  skb->data - AVE_FRAME_HEADROOM,
skb               615 drivers/net/ethernet/socionext/sni_ave.c 		dev_kfree_skb_any(skb);
skb               618 drivers/net/ethernet/socionext/sni_ave.c 	priv->rx.desc[entry].skbs = skb;
skb               746 drivers/net/ethernet/socionext/sni_ave.c 	struct sk_buff *skb;
skb               776 drivers/net/ethernet/socionext/sni_ave.c 		skb = priv->rx.desc[proc_idx].skbs;
skb               781 drivers/net/ethernet/socionext/sni_ave.c 		skb->dev = ndev;
skb               782 drivers/net/ethernet/socionext/sni_ave.c 		skb_put(skb, pktlen);
skb               783 drivers/net/ethernet/socionext/sni_ave.c 		skb->protocol = eth_type_trans(skb, ndev);
skb               786 drivers/net/ethernet/socionext/sni_ave.c 			skb->ip_summed = CHECKSUM_UNNECESSARY;
skb               791 drivers/net/ethernet/socionext/sni_ave.c 		netif_receive_skb(skb);
skb              1397 drivers/net/ethernet/socionext/sni_ave.c static int ave_start_xmit(struct sk_buff *skb, struct net_device *ndev)
skb              1416 drivers/net/ethernet/socionext/sni_ave.c 	if (skb_put_padto(skb, ETH_ZLEN)) {
skb              1425 drivers/net/ethernet/socionext/sni_ave.c 			  skb->data, skb->len, DMA_TO_DEVICE, &paddr);
skb              1427 drivers/net/ethernet/socionext/sni_ave.c 		dev_kfree_skb_any(skb);
skb              1432 drivers/net/ethernet/socionext/sni_ave.c 	priv->tx.desc[proc_idx].skbs = skb;
skb              1437 drivers/net/ethernet/socionext/sni_ave.c 		(skb->len & AVE_STS_PKTLEN_TX_MASK);
skb              1444 drivers/net/ethernet/socionext/sni_ave.c 	if (skb->ip_summed == CHECKSUM_NONE ||
skb              1445 drivers/net/ethernet/socionext/sni_ave.c 	    skb->ip_summed == CHECKSUM_UNNECESSARY)
skb                17 drivers/net/ethernet/stmicro/stmmac/chain_mode.c static int jumbo_frm(void *p, struct sk_buff *skb, int csum)
skb                20 drivers/net/ethernet/stmicro/stmmac/chain_mode.c 	unsigned int nopaged_len = skb_headlen(skb);
skb                36 drivers/net/ethernet/stmicro/stmmac/chain_mode.c 	des2 = dma_map_single(priv->device, skb->data,
skb                45 drivers/net/ethernet/stmicro/stmmac/chain_mode.c 			0, false, skb->len);
skb                54 drivers/net/ethernet/stmicro/stmmac/chain_mode.c 					      (skb->data + bmax * i),
skb                62 drivers/net/ethernet/stmicro/stmmac/chain_mode.c 					STMMAC_CHAIN_MODE, 1, false, skb->len);
skb                67 drivers/net/ethernet/stmicro/stmmac/chain_mode.c 					      (skb->data + bmax * i), len,
skb                76 drivers/net/ethernet/stmicro/stmmac/chain_mode.c 					STMMAC_CHAIN_MODE, 1, true, skb->len);
skb               491 drivers/net/ethernet/stmicro/stmmac/hwif.h 	int (*jumbo_frm)(void *priv, struct sk_buff *skb, int csum);
skb                17 drivers/net/ethernet/stmicro/stmmac/ring_mode.c static int jumbo_frm(void *p, struct sk_buff *skb, int csum)
skb                20 drivers/net/ethernet/stmicro/stmmac/ring_mode.c 	unsigned int nopaged_len = skb_headlen(skb);
skb                40 drivers/net/ethernet/stmicro/stmmac/ring_mode.c 		des2 = dma_map_single(priv->device, skb->data, bmax,
skb                52 drivers/net/ethernet/stmicro/stmmac/ring_mode.c 				STMMAC_RING_MODE, 0, false, skb->len);
skb                61 drivers/net/ethernet/stmicro/stmmac/ring_mode.c 		des2 = dma_map_single(priv->device, skb->data + bmax, len,
skb                72 drivers/net/ethernet/stmicro/stmmac/ring_mode.c 				STMMAC_RING_MODE, 1, !skb_is_nonlinear(skb),
skb                73 drivers/net/ethernet/stmicro/stmmac/ring_mode.c 				skb->len);
skb                75 drivers/net/ethernet/stmicro/stmmac/ring_mode.c 		des2 = dma_map_single(priv->device, skb->data,
skb                85 drivers/net/ethernet/stmicro/stmmac/ring_mode.c 				STMMAC_RING_MODE, 0, !skb_is_nonlinear(skb),
skb                86 drivers/net/ethernet/stmicro/stmmac/ring_mode.c 				skb->len);
skb                81 drivers/net/ethernet/stmicro/stmmac/stmmac.h 		struct sk_buff *skb;
skb               433 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 				   struct dma_desc *p, struct sk_buff *skb)
skb               443 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
skb               460 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		skb_tstamp_tx(skb, &shhwtstamp);
skb               474 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 				   struct dma_desc *np, struct sk_buff *skb)
skb               490 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		shhwtstamp = skb_hwtstamps(skb);
skb              1882 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		struct sk_buff *skb = tx_q->tx_skbuff[entry];
skb              1913 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 			stmmac_get_tx_hwtstamp(priv, p, skb);
skb              1937 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		if (likely(skb != NULL)) {
skb              1939 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 			bytes_compl += skb->len;
skb              1940 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 			dev_consume_skb_any(skb);
skb              2811 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
skb              2820 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	if (!skb_vlan_tag_present(skb))
skb              2822 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	if (skb->vlan_proto == htons(ETH_P_8021AD)) {
skb              2823 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		inner_tag = skb_vlan_tag_get(skb);
skb              2827 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	tag = skb_vlan_tag_get(skb);
skb              2911 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
skb              2915 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	int nfrags = skb_shinfo(skb)->nr_frags;
skb              2916 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	u32 queue = skb_get_queue_mapping(skb);
skb              2929 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
skb              2933 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
skb              2945 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
skb              2947 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	mss = skb_shinfo(skb)->gso_size;
skb              2960 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 			__func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss);
skb              2961 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
skb              2962 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 			skb->data_len);
skb              2966 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
skb              2978 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
skb              2984 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
skb              3006 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
skb              3025 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
skb              3030 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	    !((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
skb              3053 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	dev->stats.tx_bytes += skb->len;
skb              3060 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	skb_tx_timestamp(skb);
skb              3062 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
skb              3065 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
skb              3074 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 			tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
skb              3101 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		print_pkt(skb->data, skb_headlen(skb));
skb              3104 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
skb              3114 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	dev_kfree_skb(skb);
skb              3127 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
skb              3130 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	unsigned int nopaged_len = skb_headlen(skb);
skb              3132 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	u32 queue = skb_get_queue_mapping(skb);
skb              3133 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	int nfrags = skb_shinfo(skb)->nr_frags;
skb              3148 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	if (skb_is_gso(skb) && priv->tso) {
skb              3149 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
skb              3150 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 			return stmmac_tso_xmit(skb, dev);
skb              3166 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
skb              3172 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
skb              3187 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
skb              3190 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
skb              3196 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
skb              3223 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 				priv->mode, 1, last_segment, skb->len);
skb              3227 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	tx_q->tx_skbuff[entry] = skb;
skb              3236 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	    !((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
skb              3274 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		print_pkt(skb->data, skb->len);
skb              3283 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	dev->stats.tx_bytes += skb->len;
skb              3288 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	skb_tx_timestamp(skb);
skb              3297 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		des = dma_map_single(priv->device, skb->data,
skb              3309 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
skb              3312 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
skb              3319 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 				skb->len);
skb              3330 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
skb              3342 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	dev_kfree_skb(skb);
skb              3347 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
skb              3353 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	veth = (struct vlan_ethhdr *)skb->data;
skb              3362 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
skb              3363 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		skb_pull(skb, VLAN_HLEN);
skb              3364 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		__vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
skb              3463 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	struct sk_buff *skb = NULL;
skb              3486 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 			skb = rx_q->state.skb;
skb              3491 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 			skb = NULL;
skb              3541 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 			dev_kfree_skb(skb);
skb              3566 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		if (!skb) {
skb              3579 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 			skb = napi_alloc_skb(&ch->rx_napi, len);
skb              3580 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 			if (!skb) {
skb              3588 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 			skb_copy_to_linear_data(skb, page_address(buf->page),
skb              3590 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 			skb_put(skb, len);
skb              3603 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
skb              3615 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
skb              3631 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		stmmac_get_rx_hwtstamp(priv, p, np, skb);
skb              3632 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		stmmac_rx_vlan(priv->dev, skb);
skb              3633 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		skb->protocol = eth_type_trans(skb, priv->dev);
skb              3636 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 			skb_checksum_none_assert(skb);
skb              3638 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 			skb->ip_summed = CHECKSUM_UNNECESSARY;
skb              3641 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 			skb_set_hash(skb, hash, hash_type);
skb              3643 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		skb_record_rx_queue(skb, queue);
skb              3644 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		napi_gro_receive(&ch->rx_napi, skb);
skb              3653 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		rx_q->state.skb = skb;
skb              4003 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
skb              4006 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
skb              4016 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
skb                60 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 	struct sk_buff *skb = NULL;
skb                83 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 	skb = netdev_alloc_skb(priv->dev, size);
skb                84 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 	if (!skb)
skb                87 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 	prefetchw(skb->data);
skb                90 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 		ehdr = skb_push(skb, ETH_HLEN + 8);
skb                92 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 		ehdr = skb_push(skb, ETH_HLEN + 4);
skb                94 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 		ehdr = skb_push(skb, ETH_HLEN - 6);
skb                96 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 		ehdr = skb_push(skb, ETH_HLEN);
skb                97 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 	skb_reset_mac_header(skb);
skb                99 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 	skb_set_network_header(skb, skb->len);
skb               100 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 	ihdr = skb_put(skb, sizeof(*ihdr));
skb               102 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 	skb_set_transport_header(skb, skb->len);
skb               104 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 		thdr = skb_put(skb, sizeof(*thdr));
skb               106 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 		uhdr = skb_put(skb, sizeof(*uhdr));
skb               186 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 	shdr = skb_put(skb, sizeof(*shdr));
skb               193 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 		skb_put(skb, attr->size);
skb               194 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 	if (attr->max_size && (attr->max_size > skb->len))
skb               195 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 		skb_put(skb, attr->max_size - skb->len);
skb               197 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 	skb->csum = 0;
skb               198 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 	skb->ip_summed = CHECKSUM_PARTIAL;
skb               200 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 		thdr->check = ~tcp_v4_check(skb->len, ihdr->saddr, ihdr->daddr, 0);
skb               201 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 		skb->csum_start = skb_transport_header(skb) - skb->head;
skb               202 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 		skb->csum_offset = offsetof(struct tcphdr, check);
skb               204 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 		udp4_hwcsum(skb, ihdr->saddr, ihdr->daddr);
skb               207 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 	skb->protocol = htons(ETH_P_IP);
skb               208 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 	skb->pkt_type = PACKET_HOST;
skb               209 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 	skb->dev = priv->dev;
skb               211 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 	return skb;
skb               219 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 	struct sk_buff *skb = NULL;
skb               221 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 	skb = arp_create(ARPOP_REQUEST, ETH_P_ARP, ip_dst, priv->dev, ip_src,
skb               223 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 	if (!skb)
skb               226 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 	skb->pkt_type = PACKET_HOST;
skb               227 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 	skb->dev = priv->dev;
skb               229 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 	return skb;
skb               241 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c static int stmmac_test_loopback_validate(struct sk_buff *skb,
skb               255 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 	skb = skb_unshare(skb, GFP_ATOMIC);
skb               256 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 	if (!skb)
skb               259 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 	if (skb_linearize(skb))
skb               261 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 	if (skb_headlen(skb) < (STMMAC_TEST_PKT_SIZE - ETH_HLEN))
skb               264 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 	ehdr = (struct ethhdr *)skb_mac_header(skb);
skb               277 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 	ihdr = ip_hdr(skb);
skb               279 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 		ihdr = (struct iphdr *)(skb_network_header(skb) + 4);
skb               303 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 	if (tpriv->packet->exp_hash && !skb->hash)
skb               311 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 	kfree_skb(skb);
skb               319 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 	struct sk_buff *skb = NULL;
skb               338 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 	skb = stmmac_test_get_udp_skb(priv, attr);
skb               339 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 	if (!skb) {
skb               344 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 	skb_set_queue_mapping(skb, attr->queue_mapping);
skb               345 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 	ret = dev_queue_xmit(skb);
skb               710 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c static int stmmac_test_flowctrl_validate(struct sk_buff *skb,
skb               718 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 	ehdr = (struct ethhdr *)skb_mac_header(skb);
skb               727 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 	kfree_skb(skb);
skb               831 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c static int stmmac_test_vlan_validate(struct sk_buff *skb,
skb               845 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 	skb = skb_unshare(skb, GFP_ATOMIC);
skb               846 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 	if (!skb)
skb               849 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 	if (skb_linearize(skb))
skb               851 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 	if (skb_headlen(skb) < (STMMAC_TEST_PKT_SIZE - ETH_HLEN))
skb               854 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 		if (skb->vlan_proto != htons(proto))
skb               856 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 		if (skb->vlan_tci != tpriv->vlan_id) {
skb               864 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 	ehdr = (struct ethhdr *)skb_mac_header(skb);
skb               868 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 	ihdr = ip_hdr(skb);
skb               870 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 		ihdr = (struct iphdr *)(skb_network_header(skb) + 4);
skb               886 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 	kfree_skb(skb);
skb               894 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 	struct sk_buff *skb = NULL;
skb               932 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 		skb = stmmac_test_get_udp_skb(priv, &attr);
skb               933 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 		if (!skb) {
skb               938 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 		skb_set_queue_mapping(skb, 0);
skb               939 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 		ret = dev_queue_xmit(skb);
skb               969 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 	struct sk_buff *skb = NULL;
skb              1008 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 		skb = stmmac_test_get_udp_skb(priv, &attr);
skb              1009 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 		if (!skb) {
skb              1014 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 		skb_set_queue_mapping(skb, 0);
skb              1015 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 		ret = dev_queue_xmit(skb);
skb              1223 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 	struct sk_buff *skb = NULL;
skb              1254 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 	skb = stmmac_test_get_udp_skb(priv, &attr);
skb              1255 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 	if (!skb) {
skb              1260 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 	__vlan_hwaccel_put_tag(skb, htons(proto), tpriv->vlan_id);
skb              1261 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 	skb->protocol = htons(proto);
skb              1263 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 	skb_set_queue_mapping(skb, 0);
skb              1264 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 	ret = dev_queue_xmit(skb);
skb              1553 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c static int stmmac_test_arp_validate(struct sk_buff *skb,
skb              1562 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 	ehdr = (struct ethhdr *)skb_mac_header(skb);
skb              1566 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 	ahdr = arp_hdr(skb);
skb              1573 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 	kfree_skb(skb);
skb              1583 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 	struct sk_buff *skb = NULL;
skb              1610 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 	skb = stmmac_test_get_arp_skb(priv, &attr);
skb              1611 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 	if (!skb) {
skb              1624 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 	skb_set_queue_mapping(skb, 0);
skb              1625 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 	ret = dev_queue_xmit(skb);
skb              1403 drivers/net/ethernet/sun/cassini.c 		struct sk_buff *skb;
skb              1404 drivers/net/ethernet/sun/cassini.c 		while ((skb = __skb_dequeue(&cp->rx_flows[i]))) {
skb              1405 drivers/net/ethernet/sun/cassini.c 			cas_skb_release(skb);
skb              1857 drivers/net/ethernet/sun/cassini.c 		struct sk_buff *skb = skbs[entry];
skb              1862 drivers/net/ethernet/sun/cassini.c 		if (!skb) {
skb              1869 drivers/net/ethernet/sun/cassini.c 		count -= skb_shinfo(skb)->nr_frags +
skb              1880 drivers/net/ethernet/sun/cassini.c 		for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
skb              1899 drivers/net/ethernet/sun/cassini.c 		cp->net_stats[ring].tx_bytes += skb->len;
skb              1901 drivers/net/ethernet/sun/cassini.c 		dev_consume_skb_irq(skb);
skb              1948 drivers/net/ethernet/sun/cassini.c 	struct sk_buff *skb;
skb              1962 drivers/net/ethernet/sun/cassini.c 	skb = netdev_alloc_skb(cp->dev, alloclen + swivel + cp->crc_size);
skb              1963 drivers/net/ethernet/sun/cassini.c 	if (skb == NULL)
skb              1966 drivers/net/ethernet/sun/cassini.c 	*skbref = skb;
skb              1967 drivers/net/ethernet/sun/cassini.c 	skb_reserve(skb, swivel);
skb              1969 drivers/net/ethernet/sun/cassini.c 	p = skb->data;
skb              1994 drivers/net/ethernet/sun/cassini.c 		skb_frag_t *frag = skb_shinfo(skb)->frags;
skb              2005 drivers/net/ethernet/sun/cassini.c 			dev_kfree_skb_irq(skb);
skb              2016 drivers/net/ethernet/sun/cassini.c 		if (p == (char *) skb->data) { /* not split */
skb              2028 drivers/net/ethernet/sun/cassini.c 		skb_put(skb, alloclen);
skb              2030 drivers/net/ethernet/sun/cassini.c 		skb_shinfo(skb)->nr_frags++;
skb              2031 drivers/net/ethernet/sun/cassini.c 		skb->data_len += hlen - swivel;
skb              2032 drivers/net/ethernet/sun/cassini.c 		skb->truesize += hlen - swivel;
skb              2033 drivers/net/ethernet/sun/cassini.c 		skb->len      += hlen - swivel;
skb              2054 drivers/net/ethernet/sun/cassini.c 			skb_shinfo(skb)->nr_frags++;
skb              2055 drivers/net/ethernet/sun/cassini.c 			skb->data_len += hlen;
skb              2056 drivers/net/ethernet/sun/cassini.c 			skb->len      += hlen;
skb              2083 drivers/net/ethernet/sun/cassini.c 			dev_kfree_skb_irq(skb);
skb              2096 drivers/net/ethernet/sun/cassini.c 		if (p == (char *) skb->data) /* not split */
skb              2120 drivers/net/ethernet/sun/cassini.c 			crcaddr = skb->data + alloclen;
skb              2122 drivers/net/ethernet/sun/cassini.c 		skb_put(skb, alloclen);
skb              2133 drivers/net/ethernet/sun/cassini.c 	skb->protocol = eth_type_trans(skb, cp->dev);
skb              2134 drivers/net/ethernet/sun/cassini.c 	if (skb->protocol == htons(ETH_P_IP)) {
skb              2135 drivers/net/ethernet/sun/cassini.c 		skb->csum = csum_unfold(~csum);
skb              2136 drivers/net/ethernet/sun/cassini.c 		skb->ip_summed = CHECKSUM_COMPLETE;
skb              2138 drivers/net/ethernet/sun/cassini.c 		skb_checksum_none_assert(skb);
skb              2158 drivers/net/ethernet/sun/cassini.c 				   struct sk_buff *skb)
skb              2167 drivers/net/ethernet/sun/cassini.c 	__skb_queue_tail(flow, skb);
skb              2169 drivers/net/ethernet/sun/cassini.c 		while ((skb = __skb_dequeue(flow))) {
skb              2170 drivers/net/ethernet/sun/cassini.c 			cas_skb_release(skb);
skb              2294 drivers/net/ethernet/sun/cassini.c 		struct sk_buff *uninitialized_var(skb);
skb              2332 drivers/net/ethernet/sun/cassini.c 		len = cas_rx_process_pkt(cp, rxc, entry, words, &skb);
skb              2343 drivers/net/ethernet/sun/cassini.c 			cas_skb_release(skb);
skb              2345 drivers/net/ethernet/sun/cassini.c 			cas_rx_flow_pkt(cp, words, skb);
skb              2754 drivers/net/ethernet/sun/cassini.c 				    struct sk_buff *skb)
skb              2767 drivers/net/ethernet/sun/cassini.c 	    CAS_TABORT(cp)*(skb_shinfo(skb)->nr_frags + 1)) {
skb              2775 drivers/net/ethernet/sun/cassini.c 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb              2776 drivers/net/ethernet/sun/cassini.c 		const u64 csum_start_off = skb_checksum_start_offset(skb);
skb              2777 drivers/net/ethernet/sun/cassini.c 		const u64 csum_stuff_off = csum_start_off + skb->csum_offset;
skb              2785 drivers/net/ethernet/sun/cassini.c 	cp->tx_skbs[ring][entry] = skb;
skb              2787 drivers/net/ethernet/sun/cassini.c 	nr_frags = skb_shinfo(skb)->nr_frags;
skb              2788 drivers/net/ethernet/sun/cassini.c 	len = skb_headlen(skb);
skb              2789 drivers/net/ethernet/sun/cassini.c 	mapping = pci_map_page(cp->pdev, virt_to_page(skb->data),
skb              2790 drivers/net/ethernet/sun/cassini.c 			       offset_in_page(skb->data), len,
skb              2794 drivers/net/ethernet/sun/cassini.c 	tabort = cas_calc_tabort(cp, (unsigned long) skb->data, len);
skb              2801 drivers/net/ethernet/sun/cassini.c 		skb_copy_from_linear_data_offset(skb, len - tabort,
skb              2813 drivers/net/ethernet/sun/cassini.c 		const skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag];
skb              2848 drivers/net/ethernet/sun/cassini.c 		     ring, entry, skb->len, TX_BUFFS_AVAIL(cp, ring));
skb              2854 drivers/net/ethernet/sun/cassini.c static netdev_tx_t cas_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb              2863 drivers/net/ethernet/sun/cassini.c 	if (skb_padto(skb, cp->min_frame_size))
skb              2869 drivers/net/ethernet/sun/cassini.c 	if (cas_xmit_tx_ringN(cp, ring++ & N_TX_RINGS_MASK, skb))
skb              3882 drivers/net/ethernet/sun/cassini.c 	struct sk_buff *skb, **skbs = cp->tx_skbs[ring];
skb              3893 drivers/net/ethernet/sun/cassini.c 		skb = skbs[i];
skb              3896 drivers/net/ethernet/sun/cassini.c 		for (frag = 0; frag <= skb_shinfo(skb)->nr_frags;  frag++) {
skb              3908 drivers/net/ethernet/sun/cassini.c 			if (frag != skb_shinfo(skb)->nr_frags) {
skb              3919 drivers/net/ethernet/sun/cassini.c 		dev_kfree_skb_any(skb);
skb                95 drivers/net/ethernet/sun/ldmvsw.c static struct vnet_port *vsw_tx_port_find(struct sk_buff *skb,
skb               103 drivers/net/ethernet/sun/ldmvsw.c static u16 vsw_select_queue(struct net_device *dev, struct sk_buff *skb,
skb               115 drivers/net/ethernet/sun/ldmvsw.c static netdev_tx_t vsw_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb               117 drivers/net/ethernet/sun/ldmvsw.c 	return sunvnet_start_xmit_common(skb, dev, vsw_tx_port_find);
skb              3260 drivers/net/ethernet/sun/niu.c static void niu_rx_skb_append(struct sk_buff *skb, struct page *page,
skb              3263 drivers/net/ethernet/sun/niu.c 	skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page, offset, size);
skb              3265 drivers/net/ethernet/sun/niu.c 	skb->len += size;
skb              3266 drivers/net/ethernet/sun/niu.c 	skb->data_len += size;
skb              3267 drivers/net/ethernet/sun/niu.c 	skb->truesize += truesize;
skb              3409 drivers/net/ethernet/sun/niu.c 	struct sk_buff *skb;
skb              3412 drivers/net/ethernet/sun/niu.c 	skb = netdev_alloc_skb(np->dev, RX_SKB_ALLOC_SIZE);
skb              3413 drivers/net/ethernet/sun/niu.c 	if (unlikely(!skb))
skb              3446 drivers/net/ethernet/sun/niu.c 				skb->ip_summed = CHECKSUM_UNNECESSARY;
skb              3448 drivers/net/ethernet/sun/niu.c 				skb_checksum_none_assert(skb);
skb              3450 drivers/net/ethernet/sun/niu.c 			append_size = append_size - skb->len;
skb              3452 drivers/net/ethernet/sun/niu.c 		niu_rx_skb_append(skb, page, off, append_size, rcr_size);
skb              3472 drivers/net/ethernet/sun/niu.c 	__pskb_pull_tail(skb, len);
skb              3474 drivers/net/ethernet/sun/niu.c 	rh = (struct rx_pkt_hdr1 *) skb->data;
skb              3476 drivers/net/ethernet/sun/niu.c 		skb_set_hash(skb,
skb              3482 drivers/net/ethernet/sun/niu.c 	skb_pull(skb, sizeof(*rh));
skb              3485 drivers/net/ethernet/sun/niu.c 	rp->rx_bytes += skb->len;
skb              3487 drivers/net/ethernet/sun/niu.c 	skb->protocol = eth_type_trans(skb, np->dev);
skb              3488 drivers/net/ethernet/sun/niu.c 	skb_record_rx_queue(skb, rp->rx_channel);
skb              3489 drivers/net/ethernet/sun/niu.c 	napi_gro_receive(napi, skb);
skb              3543 drivers/net/ethernet/sun/niu.c 	struct sk_buff *skb = tb->skb;
skb              3548 drivers/net/ethernet/sun/niu.c 	tp = (struct tx_pkt_hdr *) skb->data;
skb              3555 drivers/net/ethernet/sun/niu.c 	len = skb_headlen(skb);
skb              3562 drivers/net/ethernet/sun/niu.c 	tb->skb = NULL;
skb              3568 drivers/net/ethernet/sun/niu.c 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb              3570 drivers/net/ethernet/sun/niu.c 		BUG_ON(tb->skb != NULL);
skb              3572 drivers/net/ethernet/sun/niu.c 				    skb_frag_size(&skb_shinfo(skb)->frags[i]),
skb              3577 drivers/net/ethernet/sun/niu.c 	dev_kfree_skb(skb);
skb              4280 drivers/net/ethernet/sun/niu.c 			if (rp->tx_buffs[i].skb)
skb              6470 drivers/net/ethernet/sun/niu.c 				if (rp->tx_buffs[j].skb)
skb              6542 drivers/net/ethernet/sun/niu.c static u64 niu_compute_tx_flags(struct sk_buff *skb, struct ethhdr *ehdr,
skb              6560 drivers/net/ethernet/sun/niu.c 	switch (skb->protocol) {
skb              6562 drivers/net/ethernet/sun/niu.c 		ip_proto = ip_hdr(skb)->protocol;
skb              6563 drivers/net/ethernet/sun/niu.c 		ihl = ip_hdr(skb)->ihl;
skb              6566 drivers/net/ethernet/sun/niu.c 		ip_proto = ipv6_hdr(skb)->nexthdr;
skb              6576 drivers/net/ethernet/sun/niu.c 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb              6584 drivers/net/ethernet/sun/niu.c 		start = skb_checksum_start_offset(skb) -
skb              6586 drivers/net/ethernet/sun/niu.c 		stuff = start + skb->csum_offset;
skb              6592 drivers/net/ethernet/sun/niu.c 	l3off = skb_network_offset(skb) -
skb              6607 drivers/net/ethernet/sun/niu.c static netdev_tx_t niu_start_xmit(struct sk_buff *skb,
skb              6620 drivers/net/ethernet/sun/niu.c 	i = skb_get_queue_mapping(skb);
skb              6624 drivers/net/ethernet/sun/niu.c 	if (niu_tx_avail(rp) <= (skb_shinfo(skb)->nr_frags + 1)) {
skb              6631 drivers/net/ethernet/sun/niu.c 	if (eth_skb_pad(skb))
skb              6635 drivers/net/ethernet/sun/niu.c 	if (skb_headroom(skb) < len) {
skb              6638 drivers/net/ethernet/sun/niu.c 		skb_new = skb_realloc_headroom(skb, len);
skb              6641 drivers/net/ethernet/sun/niu.c 		kfree_skb(skb);
skb              6642 drivers/net/ethernet/sun/niu.c 		skb = skb_new;
skb              6644 drivers/net/ethernet/sun/niu.c 		skb_orphan(skb);
skb              6646 drivers/net/ethernet/sun/niu.c 	align = ((unsigned long) skb->data & (16 - 1));
skb              6649 drivers/net/ethernet/sun/niu.c 	ehdr = (struct ethhdr *) skb->data;
skb              6650 drivers/net/ethernet/sun/niu.c 	tp = skb_push(skb, headroom);
skb              6652 drivers/net/ethernet/sun/niu.c 	len = skb->len - sizeof(struct tx_pkt_hdr);
skb              6653 drivers/net/ethernet/sun/niu.c 	tp->flags = cpu_to_le64(niu_compute_tx_flags(skb, ehdr, align, len));
skb              6656 drivers/net/ethernet/sun/niu.c 	len = skb_headlen(skb);
skb              6657 drivers/net/ethernet/sun/niu.c 	mapping = np->ops->map_single(np->device, skb->data,
skb              6662 drivers/net/ethernet/sun/niu.c 	rp->tx_buffs[prod].skb = skb;
skb              6673 drivers/net/ethernet/sun/niu.c 	nfg = skb_shinfo(skb)->nr_frags;
skb              6693 drivers/net/ethernet/sun/niu.c 	for (i = 0; i <  skb_shinfo(skb)->nr_frags; i++) {
skb              6694 drivers/net/ethernet/sun/niu.c 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
skb              6701 drivers/net/ethernet/sun/niu.c 		rp->tx_buffs[prod].skb = NULL;
skb              6726 drivers/net/ethernet/sun/niu.c 	kfree_skb(skb);
skb              2835 drivers/net/ethernet/sun/niu.h 	struct sk_buff *skb;
skb               228 drivers/net/ethernet/sun/sunbmac.c 		struct sk_buff *skb;
skb               230 drivers/net/ethernet/sun/sunbmac.c 		skb = big_mac_alloc_skb(RX_BUF_ALLOC_SIZE, gfp_flags);
skb               231 drivers/net/ethernet/sun/sunbmac.c 		if (!skb)
skb               234 drivers/net/ethernet/sun/sunbmac.c 		bp->rx_skbs[i] = skb;
skb               237 drivers/net/ethernet/sun/sunbmac.c 		skb_put(skb, ETH_FRAME_LEN);
skb               238 drivers/net/ethernet/sun/sunbmac.c 		skb_reserve(skb, 34);
skb               242 drivers/net/ethernet/sun/sunbmac.c 				       skb->data,
skb               767 drivers/net/ethernet/sun/sunbmac.c 		struct sk_buff *skb;
skb               775 drivers/net/ethernet/sun/sunbmac.c 		skb = bp->tx_skbs[elem];
skb               777 drivers/net/ethernet/sun/sunbmac.c 		dev->stats.tx_bytes += skb->len;
skb               779 drivers/net/ethernet/sun/sunbmac.c 				 this->tx_addr, skb->len,
skb               782 drivers/net/ethernet/sun/sunbmac.c 		DTX(("skb(%p) ", skb));
skb               784 drivers/net/ethernet/sun/sunbmac.c 		dev_consume_skb_irq(skb);
skb               808 drivers/net/ethernet/sun/sunbmac.c 		struct sk_buff *skb;
skb               823 drivers/net/ethernet/sun/sunbmac.c 		skb = bp->rx_skbs[elem];
skb               849 drivers/net/ethernet/sun/sunbmac.c 			skb_trim(skb, len);
skb               862 drivers/net/ethernet/sun/sunbmac.c 			skb_copy_to_linear_data(copy_skb, (unsigned char *)skb->data, len);
skb               871 drivers/net/ethernet/sun/sunbmac.c 			skb = copy_skb;
skb               875 drivers/net/ethernet/sun/sunbmac.c 		skb->protocol = eth_type_trans(skb, bp->dev);
skb               876 drivers/net/ethernet/sun/sunbmac.c 		netif_rx(skb);
skb               954 drivers/net/ethernet/sun/sunbmac.c bigmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb               960 drivers/net/ethernet/sun/sunbmac.c 	len = skb->len;
skb               961 drivers/net/ethernet/sun/sunbmac.c 	mapping = dma_map_single(&bp->bigmac_op->dev, skb->data,
skb               969 drivers/net/ethernet/sun/sunbmac.c 	bp->tx_skbs[entry] = skb;
skb               326 drivers/net/ethernet/sun/sunbmac.h 	struct sk_buff *skb;
skb               328 drivers/net/ethernet/sun/sunbmac.h 	skb = alloc_skb(length + 64, gfp_flags);
skb               329 drivers/net/ethernet/sun/sunbmac.h 	if(skb) {
skb               330 drivers/net/ethernet/sun/sunbmac.h 		int offset = ALIGNED_RX_SKB_ADDR(skb->data);
skb               333 drivers/net/ethernet/sun/sunbmac.h 			skb_reserve(skb, offset);
skb               335 drivers/net/ethernet/sun/sunbmac.h 	return skb;
skb               650 drivers/net/ethernet/sun/sungem.c 		struct sk_buff *skb;
skb               659 drivers/net/ethernet/sun/sungem.c 		skb = gp->tx_skbs[entry];
skb               660 drivers/net/ethernet/sun/sungem.c 		if (skb_shinfo(skb)->nr_frags) {
skb               661 drivers/net/ethernet/sun/sungem.c 			int last = entry + skb_shinfo(skb)->nr_frags;
skb               677 drivers/net/ethernet/sun/sungem.c 		dev->stats.tx_bytes += skb->len;
skb               679 drivers/net/ethernet/sun/sungem.c 		for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
skb               690 drivers/net/ethernet/sun/sungem.c 		dev_consume_skb_any(skb);
skb               748 drivers/net/ethernet/sun/sungem.c 	struct sk_buff *skb = alloc_skb(size + 64, gfp_flags);
skb               750 drivers/net/ethernet/sun/sungem.c 	if (likely(skb)) {
skb               751 drivers/net/ethernet/sun/sungem.c 		unsigned long offset = ALIGNED_RX_SKB_ADDR(skb->data);
skb               752 drivers/net/ethernet/sun/sungem.c 		skb_reserve(skb, offset);
skb               754 drivers/net/ethernet/sun/sungem.c 	return skb;
skb               772 drivers/net/ethernet/sun/sungem.c 		struct sk_buff *skb;
skb               799 drivers/net/ethernet/sun/sungem.c 		skb = gp->rx_skbs[entry];
skb               837 drivers/net/ethernet/sun/sungem.c 			skb_trim(skb, len);
skb               849 drivers/net/ethernet/sun/sungem.c 			skb_copy_from_linear_data(skb, copy_skb->data, len);
skb               853 drivers/net/ethernet/sun/sungem.c 			skb = copy_skb;
skb               860 drivers/net/ethernet/sun/sungem.c 			skb->csum = csum_unfold(csum);
skb               861 drivers/net/ethernet/sun/sungem.c 			skb->ip_summed = CHECKSUM_COMPLETE;
skb               863 drivers/net/ethernet/sun/sungem.c 		skb->protocol = eth_type_trans(skb, gp->dev);
skb               865 drivers/net/ethernet/sun/sungem.c 		napi_gro_receive(&gp->napi, skb);
skb              1000 drivers/net/ethernet/sun/sungem.c static netdev_tx_t gem_start_xmit(struct sk_buff *skb,
skb              1008 drivers/net/ethernet/sun/sungem.c 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb              1009 drivers/net/ethernet/sun/sungem.c 		const u64 csum_start_off = skb_checksum_start_offset(skb);
skb              1010 drivers/net/ethernet/sun/sungem.c 		const u64 csum_stuff_off = csum_start_off + skb->csum_offset;
skb              1017 drivers/net/ethernet/sun/sungem.c 	if (unlikely(TX_BUFFS_AVAIL(gp) <= (skb_shinfo(skb)->nr_frags + 1))) {
skb              1027 drivers/net/ethernet/sun/sungem.c 	gp->tx_skbs[entry] = skb;
skb              1029 drivers/net/ethernet/sun/sungem.c 	if (skb_shinfo(skb)->nr_frags == 0) {
skb              1034 drivers/net/ethernet/sun/sungem.c 		len = skb->len;
skb              1036 drivers/net/ethernet/sun/sungem.c 				       virt_to_page(skb->data),
skb              1037 drivers/net/ethernet/sun/sungem.c 				       offset_in_page(skb->data),
skb              1060 drivers/net/ethernet/sun/sungem.c 		first_len = skb_headlen(skb);
skb              1061 drivers/net/ethernet/sun/sungem.c 		first_mapping = pci_map_page(gp->pdev, virt_to_page(skb->data),
skb              1062 drivers/net/ethernet/sun/sungem.c 					     offset_in_page(skb->data),
skb              1066 drivers/net/ethernet/sun/sungem.c 		for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
skb              1067 drivers/net/ethernet/sun/sungem.c 			const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
skb              1076 drivers/net/ethernet/sun/sungem.c 			if (frag == skb_shinfo(skb)->nr_frags - 1)
skb              1111 drivers/net/ethernet/sun/sungem.c 		       dev->name, entry, skb->len);
skb              1578 drivers/net/ethernet/sun/sungem.c 	struct sk_buff *skb;
skb              1587 drivers/net/ethernet/sun/sungem.c 			skb = gp->rx_skbs[i];
skb              1592 drivers/net/ethernet/sun/sungem.c 			dev_kfree_skb_any(skb);
skb              1605 drivers/net/ethernet/sun/sungem.c 			skb = gp->tx_skbs[i];
skb              1608 drivers/net/ethernet/sun/sungem.c 			for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
skb              1617 drivers/net/ethernet/sun/sungem.c 				if (frag != skb_shinfo(skb)->nr_frags)
skb              1620 drivers/net/ethernet/sun/sungem.c 			dev_kfree_skb_any(skb);
skb              1640 drivers/net/ethernet/sun/sungem.c 		struct sk_buff *skb;
skb              1643 drivers/net/ethernet/sun/sungem.c 		skb = gem_alloc_skb(dev, RX_BUF_ALLOC_SIZE(gp), GFP_KERNEL);
skb              1644 drivers/net/ethernet/sun/sungem.c 		if (!skb) {
skb              1650 drivers/net/ethernet/sun/sungem.c 		gp->rx_skbs[i] = skb;
skb              1651 drivers/net/ethernet/sun/sungem.c 		skb_put(skb, (gp->rx_buf_sz + RX_OFFSET));
skb              1653 drivers/net/ethernet/sun/sungem.c 					virt_to_page(skb->data),
skb              1654 drivers/net/ethernet/sun/sungem.c 					offset_in_page(skb->data),
skb              1660 drivers/net/ethernet/sun/sungem.c 		skb_reserve(skb, RX_OFFSET);
skb              1204 drivers/net/ethernet/sun/sunhme.c 			struct sk_buff *skb = hp->rx_skbs[i];
skb              1212 drivers/net/ethernet/sun/sunhme.c 			dev_kfree_skb_any(skb);
skb              1219 drivers/net/ethernet/sun/sunhme.c 			struct sk_buff *skb = hp->tx_skbs[i];
skb              1226 drivers/net/ethernet/sun/sunhme.c 			for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
skb              1240 drivers/net/ethernet/sun/sunhme.c 				if (frag != skb_shinfo(skb)->nr_frags)
skb              1244 drivers/net/ethernet/sun/sunhme.c 			dev_kfree_skb_any(skb);
skb              1265 drivers/net/ethernet/sun/sunhme.c 		struct sk_buff *skb;
skb              1268 drivers/net/ethernet/sun/sunhme.c 		skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
skb              1269 drivers/net/ethernet/sun/sunhme.c 		if (!skb) {
skb              1273 drivers/net/ethernet/sun/sunhme.c 		hp->rx_skbs[i] = skb;
skb              1276 drivers/net/ethernet/sun/sunhme.c 		skb_put(skb, (ETH_FRAME_LEN + RX_OFFSET + 4));
skb              1277 drivers/net/ethernet/sun/sunhme.c 		mapping = dma_map_single(hp->dma_dev, skb->data, RX_BUF_ALLOC_SIZE,
skb              1280 drivers/net/ethernet/sun/sunhme.c 			dev_kfree_skb_any(skb);
skb              1287 drivers/net/ethernet/sun/sunhme.c 		skb_reserve(skb, RX_OFFSET);
skb              1929 drivers/net/ethernet/sun/sunhme.c 		struct sk_buff *skb;
skb              1938 drivers/net/ethernet/sun/sunhme.c 		skb = hp->tx_skbs[elem];
skb              1939 drivers/net/ethernet/sun/sunhme.c 		if (skb_shinfo(skb)->nr_frags) {
skb              1942 drivers/net/ethernet/sun/sunhme.c 			last = elem + skb_shinfo(skb)->nr_frags;
skb              1949 drivers/net/ethernet/sun/sunhme.c 		dev->stats.tx_bytes += skb->len;
skb              1951 drivers/net/ethernet/sun/sunhme.c 		for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
skb              1965 drivers/net/ethernet/sun/sunhme.c 		dev_consume_skb_irq(skb);
skb              2001 drivers/net/ethernet/sun/sunhme.c 		struct sk_buff *skb;
skb              2027 drivers/net/ethernet/sun/sunhme.c 		skb = hp->rx_skbs[elem];
skb              2056 drivers/net/ethernet/sun/sunhme.c 			skb_trim(skb, len);
skb              2068 drivers/net/ethernet/sun/sunhme.c 			skb_copy_from_linear_data(skb, copy_skb->data, len);
skb              2075 drivers/net/ethernet/sun/sunhme.c 			skb = copy_skb;
skb              2079 drivers/net/ethernet/sun/sunhme.c 		skb->csum = csum_unfold(~(__force __sum16)htons(csum));
skb              2080 drivers/net/ethernet/sun/sunhme.c 		skb->ip_summed = CHECKSUM_COMPLETE;
skb              2083 drivers/net/ethernet/sun/sunhme.c 		skb->protocol = eth_type_trans(skb, dev);
skb              2084 drivers/net/ethernet/sun/sunhme.c 		netif_rx(skb);
skb              2286 drivers/net/ethernet/sun/sunhme.c static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
skb              2294 drivers/net/ethernet/sun/sunhme.c 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb              2295 drivers/net/ethernet/sun/sunhme.c 		const u32 csum_start_off = skb_checksum_start_offset(skb);
skb              2296 drivers/net/ethernet/sun/sunhme.c 		const u32 csum_stuff_off = csum_start_off + skb->csum_offset;
skb              2305 drivers/net/ethernet/sun/sunhme.c  	if (TX_BUFFS_AVAIL(hp) <= (skb_shinfo(skb)->nr_frags + 1)) {
skb              2315 drivers/net/ethernet/sun/sunhme.c 	hp->tx_skbs[entry] = skb;
skb              2317 drivers/net/ethernet/sun/sunhme.c 	if (skb_shinfo(skb)->nr_frags == 0) {
skb              2320 drivers/net/ethernet/sun/sunhme.c 		len = skb->len;
skb              2321 drivers/net/ethernet/sun/sunhme.c 		mapping = dma_map_single(hp->dma_dev, skb->data, len, DMA_TO_DEVICE);
skb              2336 drivers/net/ethernet/sun/sunhme.c 		first_len = skb_headlen(skb);
skb              2337 drivers/net/ethernet/sun/sunhme.c 		first_mapping = dma_map_single(hp->dma_dev, skb->data, first_len,
skb              2343 drivers/net/ethernet/sun/sunhme.c 		for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
skb              2344 drivers/net/ethernet/sun/sunhme.c 			const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
skb              2356 drivers/net/ethernet/sun/sunhme.c 			if (frag == skb_shinfo(skb)->nr_frags - 1)
skb              2385 drivers/net/ethernet/sun/sunhme.c 	dev_kfree_skb_any(skb);
skb               424 drivers/net/ethernet/sun/sunqe.c 		struct sk_buff *skb;
skb               439 drivers/net/ethernet/sun/sunqe.c 			skb = netdev_alloc_skb(dev, len + 2);
skb               440 drivers/net/ethernet/sun/sunqe.c 			if (skb == NULL) {
skb               443 drivers/net/ethernet/sun/sunqe.c 				skb_reserve(skb, 2);
skb               444 drivers/net/ethernet/sun/sunqe.c 				skb_put(skb, len);
skb               445 drivers/net/ethernet/sun/sunqe.c 				skb_copy_to_linear_data(skb, this_qbuf,
skb               447 drivers/net/ethernet/sun/sunqe.c 				skb->protocol = eth_type_trans(skb, qep->dev);
skb               448 drivers/net/ethernet/sun/sunqe.c 				netif_rx(skb);
skb               573 drivers/net/ethernet/sun/sunqe.c static netdev_tx_t qe_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb               585 drivers/net/ethernet/sun/sunqe.c 	len = skb->len;
skb               595 drivers/net/ethernet/sun/sunqe.c 	skb_copy_from_linear_data(skb, txbuf, len);
skb               619 drivers/net/ethernet/sun/sunqe.c 	dev_kfree_skb(skb);
skb               205 drivers/net/ethernet/sun/sunvnet.c static struct vnet_port *__tx_port_find(struct vnet *vp, struct sk_buff *skb)
skb               207 drivers/net/ethernet/sun/sunvnet.c 	unsigned int hash = vnet_hashfn(skb->data);
skb               214 drivers/net/ethernet/sun/sunvnet.c 		if (ether_addr_equal(port->raddr, skb->data))
skb               228 drivers/net/ethernet/sun/sunvnet.c static struct vnet_port *vnet_tx_port_find(struct sk_buff *skb,
skb               233 drivers/net/ethernet/sun/sunvnet.c 	return __tx_port_find(vp, skb);
skb               236 drivers/net/ethernet/sun/sunvnet.c static u16 vnet_select_queue(struct net_device *dev, struct sk_buff *skb,
skb               240 drivers/net/ethernet/sun/sunvnet.c 	struct vnet_port *port = __tx_port_find(vp, skb);
skb               249 drivers/net/ethernet/sun/sunvnet.c static netdev_tx_t vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb               251 drivers/net/ethernet/sun/sunvnet.c 	return sunvnet_start_xmit_common(skb, dev, vnet_tx_port_find);
skb               292 drivers/net/ethernet/sun/sunvnet_common.c 	struct sk_buff *skb;
skb               295 drivers/net/ethernet/sun/sunvnet_common.c 	skb = netdev_alloc_skb(dev, len + VNET_PACKET_SKIP + 8 + 8);
skb               296 drivers/net/ethernet/sun/sunvnet_common.c 	if (unlikely(!skb))
skb               299 drivers/net/ethernet/sun/sunvnet_common.c 	addr = (unsigned long)skb->data;
skb               302 drivers/net/ethernet/sun/sunvnet_common.c 		skb_reserve(skb, off);
skb               304 drivers/net/ethernet/sun/sunvnet_common.c 	return skb;
skb               307 drivers/net/ethernet/sun/sunvnet_common.c static inline void vnet_fullcsum_ipv4(struct sk_buff *skb)
skb               309 drivers/net/ethernet/sun/sunvnet_common.c 	struct iphdr *iph = ip_hdr(skb);
skb               310 drivers/net/ethernet/sun/sunvnet_common.c 	int offset = skb_transport_offset(skb);
skb               312 drivers/net/ethernet/sun/sunvnet_common.c 	if (skb->protocol != htons(ETH_P_IP))
skb               317 drivers/net/ethernet/sun/sunvnet_common.c 	skb->ip_summed = CHECKSUM_NONE;
skb               318 drivers/net/ethernet/sun/sunvnet_common.c 	skb->csum_level = 1;
skb               319 drivers/net/ethernet/sun/sunvnet_common.c 	skb->csum = 0;
skb               321 drivers/net/ethernet/sun/sunvnet_common.c 		struct tcphdr *ptcp = tcp_hdr(skb);
skb               324 drivers/net/ethernet/sun/sunvnet_common.c 		skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
skb               326 drivers/net/ethernet/sun/sunvnet_common.c 						skb->len - offset, IPPROTO_TCP,
skb               327 drivers/net/ethernet/sun/sunvnet_common.c 						skb->csum);
skb               329 drivers/net/ethernet/sun/sunvnet_common.c 		struct udphdr *pudp = udp_hdr(skb);
skb               332 drivers/net/ethernet/sun/sunvnet_common.c 		skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
skb               334 drivers/net/ethernet/sun/sunvnet_common.c 						skb->len - offset, IPPROTO_UDP,
skb               335 drivers/net/ethernet/sun/sunvnet_common.c 						skb->csum);
skb               340 drivers/net/ethernet/sun/sunvnet_common.c static inline void vnet_fullcsum_ipv6(struct sk_buff *skb)
skb               342 drivers/net/ethernet/sun/sunvnet_common.c 	struct ipv6hdr *ip6h = ipv6_hdr(skb);
skb               343 drivers/net/ethernet/sun/sunvnet_common.c 	int offset = skb_transport_offset(skb);
skb               345 drivers/net/ethernet/sun/sunvnet_common.c 	if (skb->protocol != htons(ETH_P_IPV6))
skb               350 drivers/net/ethernet/sun/sunvnet_common.c 	skb->ip_summed = CHECKSUM_NONE;
skb               351 drivers/net/ethernet/sun/sunvnet_common.c 	skb->csum_level = 1;
skb               352 drivers/net/ethernet/sun/sunvnet_common.c 	skb->csum = 0;
skb               354 drivers/net/ethernet/sun/sunvnet_common.c 		struct tcphdr *ptcp = tcp_hdr(skb);
skb               357 drivers/net/ethernet/sun/sunvnet_common.c 		skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
skb               359 drivers/net/ethernet/sun/sunvnet_common.c 					      skb->len - offset, IPPROTO_TCP,
skb               360 drivers/net/ethernet/sun/sunvnet_common.c 					      skb->csum);
skb               362 drivers/net/ethernet/sun/sunvnet_common.c 		struct udphdr *pudp = udp_hdr(skb);
skb               365 drivers/net/ethernet/sun/sunvnet_common.c 		skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
skb               367 drivers/net/ethernet/sun/sunvnet_common.c 					      skb->len - offset, IPPROTO_UDP,
skb               368 drivers/net/ethernet/sun/sunvnet_common.c 					      skb->csum);
skb               378 drivers/net/ethernet/sun/sunvnet_common.c 	struct sk_buff *skb;
skb               392 drivers/net/ethernet/sun/sunvnet_common.c 	skb = alloc_and_align_skb(dev, len);
skb               394 drivers/net/ethernet/sun/sunvnet_common.c 	if (unlikely(!skb)) {
skb               400 drivers/net/ethernet/sun/sunvnet_common.c 	skb_put(skb, copy_len);
skb               402 drivers/net/ethernet/sun/sunvnet_common.c 		       skb->data, copy_len, 0,
skb               409 drivers/net/ethernet/sun/sunvnet_common.c 	skb_pull(skb, VNET_PACKET_SKIP);
skb               410 drivers/net/ethernet/sun/sunvnet_common.c 	skb_trim(skb, len);
skb               411 drivers/net/ethernet/sun/sunvnet_common.c 	skb->protocol = eth_type_trans(skb, dev);
skb               416 drivers/net/ethernet/sun/sunvnet_common.c 		skb_reset_network_header(skb);
skb               419 drivers/net/ethernet/sun/sunvnet_common.c 			if (skb->protocol == ETH_P_IP) {
skb               420 drivers/net/ethernet/sun/sunvnet_common.c 				struct iphdr *iph = ip_hdr(skb);
skb               427 drivers/net/ethernet/sun/sunvnet_common.c 		    skb->ip_summed == CHECKSUM_NONE) {
skb               428 drivers/net/ethernet/sun/sunvnet_common.c 			if (skb->protocol == htons(ETH_P_IP)) {
skb               429 drivers/net/ethernet/sun/sunvnet_common.c 				struct iphdr *iph = ip_hdr(skb);
skb               432 drivers/net/ethernet/sun/sunvnet_common.c 				skb_set_transport_header(skb, ihl);
skb               433 drivers/net/ethernet/sun/sunvnet_common.c 				vnet_fullcsum_ipv4(skb);
skb               435 drivers/net/ethernet/sun/sunvnet_common.c 			} else if (skb->protocol == htons(ETH_P_IPV6)) {
skb               436 drivers/net/ethernet/sun/sunvnet_common.c 				skb_set_transport_header(skb,
skb               438 drivers/net/ethernet/sun/sunvnet_common.c 				vnet_fullcsum_ipv6(skb);
skb               443 drivers/net/ethernet/sun/sunvnet_common.c 			skb->ip_summed = CHECKSUM_PARTIAL;
skb               444 drivers/net/ethernet/sun/sunvnet_common.c 			skb->csum_level = 0;
skb               446 drivers/net/ethernet/sun/sunvnet_common.c 				skb->csum_level = 1;
skb               450 drivers/net/ethernet/sun/sunvnet_common.c 	skb->ip_summed = port->switch_port ? CHECKSUM_NONE : CHECKSUM_PARTIAL;
skb               452 drivers/net/ethernet/sun/sunvnet_common.c 	if (unlikely(is_multicast_ether_addr(eth_hdr(skb)->h_dest)))
skb               458 drivers/net/ethernet/sun/sunvnet_common.c 	napi_gro_receive(&port->napi, skb);
skb               462 drivers/net/ethernet/sun/sunvnet_common.c 	kfree_skb(skb);
skb               992 drivers/net/ethernet/sun/sunvnet_common.c 	struct sk_buff *skb = NULL;
skb              1011 drivers/net/ethernet/sun/sunvnet_common.c 		if (port->tx_bufs[txi].skb) {
skb              1015 drivers/net/ethernet/sun/sunvnet_common.c 			BUG_ON(port->tx_bufs[txi].skb->next);
skb              1017 drivers/net/ethernet/sun/sunvnet_common.c 			port->tx_bufs[txi].skb->next = skb;
skb              1018 drivers/net/ethernet/sun/sunvnet_common.c 			skb = port->tx_bufs[txi].skb;
skb              1019 drivers/net/ethernet/sun/sunvnet_common.c 			port->tx_bufs[txi].skb = NULL;
skb              1029 drivers/net/ethernet/sun/sunvnet_common.c 	return skb;
skb              1032 drivers/net/ethernet/sun/sunvnet_common.c static inline void vnet_free_skbs(struct sk_buff *skb)
skb              1036 drivers/net/ethernet/sun/sunvnet_common.c 	while (skb) {
skb              1037 drivers/net/ethernet/sun/sunvnet_common.c 		next = skb->next;
skb              1038 drivers/net/ethernet/sun/sunvnet_common.c 		skb->next = NULL;
skb              1039 drivers/net/ethernet/sun/sunvnet_common.c 		dev_kfree_skb(skb);
skb              1040 drivers/net/ethernet/sun/sunvnet_common.c 		skb = next;
skb              1064 drivers/net/ethernet/sun/sunvnet_common.c static inline int vnet_skb_map(struct ldc_channel *lp, struct sk_buff *skb,
skb              1071 drivers/net/ethernet/sun/sunvnet_common.c 	blen = skb_headlen(skb);
skb              1077 drivers/net/ethernet/sun/sunvnet_common.c 	err = ldc_map_single(lp, skb->data - VNET_PACKET_SKIP, blen, cookies,
skb              1083 drivers/net/ethernet/sun/sunvnet_common.c 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb              1084 drivers/net/ethernet/sun/sunvnet_common.c 		skb_frag_t *f = &skb_shinfo(skb)->frags[i];
skb              1108 drivers/net/ethernet/sun/sunvnet_common.c static inline struct sk_buff *vnet_skb_shape(struct sk_buff *skb, int ncookies)
skb              1113 drivers/net/ethernet/sun/sunvnet_common.c 	len = skb->len;
skb              1116 drivers/net/ethernet/sun/sunvnet_common.c 		pad += ETH_ZLEN - skb->len;
skb              1123 drivers/net/ethernet/sun/sunvnet_common.c 	docopy = skb_shinfo(skb)->nr_frags >= ncookies;
skb              1124 drivers/net/ethernet/sun/sunvnet_common.c 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb              1125 drivers/net/ethernet/sun/sunvnet_common.c 		skb_frag_t *f = &skb_shinfo(skb)->frags[i];
skb              1129 drivers/net/ethernet/sun/sunvnet_common.c 	if (((unsigned long)skb->data & 7) != VNET_PACKET_SKIP ||
skb              1130 drivers/net/ethernet/sun/sunvnet_common.c 	    skb_tailroom(skb) < pad ||
skb              1131 drivers/net/ethernet/sun/sunvnet_common.c 	    skb_headroom(skb) < VNET_PACKET_SKIP || docopy) {
skb              1135 drivers/net/ethernet/sun/sunvnet_common.c 		len = skb->len > ETH_ZLEN ? skb->len : ETH_ZLEN;
skb              1136 drivers/net/ethernet/sun/sunvnet_common.c 		nskb = alloc_and_align_skb(skb->dev, len);
skb              1138 drivers/net/ethernet/sun/sunvnet_common.c 			dev_kfree_skb(skb);
skb              1143 drivers/net/ethernet/sun/sunvnet_common.c 		nskb->protocol = skb->protocol;
skb              1144 drivers/net/ethernet/sun/sunvnet_common.c 		offset = skb_mac_header(skb) - skb->data;
skb              1146 drivers/net/ethernet/sun/sunvnet_common.c 		offset = skb_network_header(skb) - skb->data;
skb              1148 drivers/net/ethernet/sun/sunvnet_common.c 		offset = skb_transport_header(skb) - skb->data;
skb              1152 drivers/net/ethernet/sun/sunvnet_common.c 		nskb->csum_offset = skb->csum_offset;
skb              1153 drivers/net/ethernet/sun/sunvnet_common.c 		nskb->ip_summed = skb->ip_summed;
skb              1155 drivers/net/ethernet/sun/sunvnet_common.c 		if (skb->ip_summed == CHECKSUM_PARTIAL)
skb              1156 drivers/net/ethernet/sun/sunvnet_common.c 			start = skb_checksum_start_offset(skb);
skb              1161 drivers/net/ethernet/sun/sunvnet_common.c 			if (skb_copy_bits(skb, 0, nskb->data, start)) {
skb              1163 drivers/net/ethernet/sun/sunvnet_common.c 				dev_kfree_skb(skb);
skb              1168 drivers/net/ethernet/sun/sunvnet_common.c 			*(__sum16 *)(skb->data + offset) = 0;
skb              1169 drivers/net/ethernet/sun/sunvnet_common.c 			csum = skb_copy_and_csum_bits(skb, start,
skb              1171 drivers/net/ethernet/sun/sunvnet_common.c 						      skb->len - start, 0);
skb              1174 drivers/net/ethernet/sun/sunvnet_common.c 			if (skb->protocol == htons(ETH_P_IP)) {
skb              1181 drivers/net/ethernet/sun/sunvnet_common.c 								 skb->len - start,
skb              1185 drivers/net/ethernet/sun/sunvnet_common.c 			} else if (skb->protocol == htons(ETH_P_IPV6)) {
skb              1192 drivers/net/ethernet/sun/sunvnet_common.c 							       skb->len - start,
skb              1202 drivers/net/ethernet/sun/sunvnet_common.c 		} else if (skb_copy_bits(skb, 0, nskb->data, skb->len)) {
skb              1204 drivers/net/ethernet/sun/sunvnet_common.c 			dev_kfree_skb(skb);
skb              1207 drivers/net/ethernet/sun/sunvnet_common.c 		(void)skb_put(nskb, skb->len);
skb              1208 drivers/net/ethernet/sun/sunvnet_common.c 		if (skb_is_gso(skb)) {
skb              1209 drivers/net/ethernet/sun/sunvnet_common.c 			skb_shinfo(nskb)->gso_size = skb_shinfo(skb)->gso_size;
skb              1210 drivers/net/ethernet/sun/sunvnet_common.c 			skb_shinfo(nskb)->gso_type = skb_shinfo(skb)->gso_type;
skb              1212 drivers/net/ethernet/sun/sunvnet_common.c 		nskb->queue_mapping = skb->queue_mapping;
skb              1213 drivers/net/ethernet/sun/sunvnet_common.c 		dev_kfree_skb(skb);
skb              1214 drivers/net/ethernet/sun/sunvnet_common.c 		skb = nskb;
skb              1216 drivers/net/ethernet/sun/sunvnet_common.c 	return skb;
skb              1220 drivers/net/ethernet/sun/sunvnet_common.c vnet_handle_offloads(struct vnet_port *port, struct sk_buff *skb,
skb              1230 drivers/net/ethernet/sun/sunvnet_common.c 	int hlen = skb_transport_header(skb) - skb_mac_header(skb);
skb              1233 drivers/net/ethernet/sun/sunvnet_common.c 	if (skb->protocol == htons(ETH_P_IP))
skb              1234 drivers/net/ethernet/sun/sunvnet_common.c 		proto = ip_hdr(skb)->protocol;
skb              1235 drivers/net/ethernet/sun/sunvnet_common.c 	else if (skb->protocol == htons(ETH_P_IPV6))
skb              1236 drivers/net/ethernet/sun/sunvnet_common.c 		proto = ipv6_hdr(skb)->nexthdr;
skb              1239 drivers/net/ethernet/sun/sunvnet_common.c 		hlen += tcp_hdr(skb)->doff * 4;
skb              1244 drivers/net/ethernet/sun/sunvnet_common.c 		       "protocol %d tproto %d\n", skb->protocol, proto);
skb              1249 drivers/net/ethernet/sun/sunvnet_common.c 	gso_size = skb_shinfo(skb)->gso_size;
skb              1250 drivers/net/ethernet/sun/sunvnet_common.c 	gso_type = skb_shinfo(skb)->gso_type;
skb              1251 drivers/net/ethernet/sun/sunvnet_common.c 	gso_segs = skb_shinfo(skb)->gso_segs;
skb              1254 drivers/net/ethernet/sun/sunvnet_common.c 		gso_segs = DIV_ROUND_UP(skb->len - hlen, datalen);
skb              1261 drivers/net/ethernet/sun/sunvnet_common.c 		if (vnet_tx_dring_avail(dr) < skb_shinfo(skb)->gso_segs)
skb              1266 drivers/net/ethernet/sun/sunvnet_common.c 	maclen = skb_network_header(skb) - skb_mac_header(skb);
skb              1267 drivers/net/ethernet/sun/sunvnet_common.c 	skb_pull(skb, maclen);
skb              1270 drivers/net/ethernet/sun/sunvnet_common.c 		if (skb_unclone(skb, GFP_ATOMIC))
skb              1274 drivers/net/ethernet/sun/sunvnet_common.c 		skb_shinfo(skb)->gso_size = datalen;
skb              1275 drivers/net/ethernet/sun/sunvnet_common.c 		skb_shinfo(skb)->gso_segs = gso_segs;
skb              1277 drivers/net/ethernet/sun/sunvnet_common.c 	segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO);
skb              1281 drivers/net/ethernet/sun/sunvnet_common.c 	skb_push(skb, maclen);
skb              1282 drivers/net/ethernet/sun/sunvnet_common.c 	skb_reset_mac_header(skb);
skb              1301 drivers/net/ethernet/sun/sunvnet_common.c 		memcpy(skb_mac_header(curr), skb_mac_header(skb),
skb              1317 drivers/net/ethernet/sun/sunvnet_common.c 		dev_kfree_skb_any(skb);
skb              1321 drivers/net/ethernet/sun/sunvnet_common.c 	dev_kfree_skb_any(skb);
skb              1326 drivers/net/ethernet/sun/sunvnet_common.c sunvnet_start_xmit_common(struct sk_buff *skb, struct net_device *dev,
skb              1340 drivers/net/ethernet/sun/sunvnet_common.c 	port = vnet_tx_port(skb, dev);
skb              1344 drivers/net/ethernet/sun/sunvnet_common.c 	if (skb_is_gso(skb) && skb->len > port->tsolen) {
skb              1345 drivers/net/ethernet/sun/sunvnet_common.c 		err = vnet_handle_offloads(port, skb, vnet_tx_port);
skb              1350 drivers/net/ethernet/sun/sunvnet_common.c 	if (!skb_is_gso(skb) && skb->len > port->rmtu) {
skb              1356 drivers/net/ethernet/sun/sunvnet_common.c 		if (skb->protocol == htons(ETH_P_IP)) {
skb              1362 drivers/net/ethernet/sun/sunvnet_common.c 			fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos);
skb              1363 drivers/net/ethernet/sun/sunvnet_common.c 			fl4.daddr = ip_hdr(skb)->daddr;
skb              1364 drivers/net/ethernet/sun/sunvnet_common.c 			fl4.saddr = ip_hdr(skb)->saddr;
skb              1368 drivers/net/ethernet/sun/sunvnet_common.c 				skb_dst_set(skb, &rt->dst);
skb              1369 drivers/net/ethernet/sun/sunvnet_common.c 				icmp_send(skb, ICMP_DEST_UNREACH,
skb              1375 drivers/net/ethernet/sun/sunvnet_common.c 		else if (skb->protocol == htons(ETH_P_IPV6))
skb              1376 drivers/net/ethernet/sun/sunvnet_common.c 			icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, localmtu);
skb              1381 drivers/net/ethernet/sun/sunvnet_common.c 	skb = vnet_skb_shape(skb, 2);
skb              1383 drivers/net/ethernet/sun/sunvnet_common.c 	if (unlikely(!skb))
skb              1386 drivers/net/ethernet/sun/sunvnet_common.c 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb              1387 drivers/net/ethernet/sun/sunvnet_common.c 		if (skb->protocol == htons(ETH_P_IP))
skb              1388 drivers/net/ethernet/sun/sunvnet_common.c 			vnet_fullcsum_ipv4(skb);
skb              1390 drivers/net/ethernet/sun/sunvnet_common.c 		else if (skb->protocol == htons(ETH_P_IPV6))
skb              1391 drivers/net/ethernet/sun/sunvnet_common.c 			vnet_fullcsum_ipv6(skb);
skb              1396 drivers/net/ethernet/sun/sunvnet_common.c 	i = skb_get_queue_mapping(skb);
skb              1416 drivers/net/ethernet/sun/sunvnet_common.c 	BUG_ON(port->tx_bufs[txi].skb);
skb              1418 drivers/net/ethernet/sun/sunvnet_common.c 	len = skb->len;
skb              1422 drivers/net/ethernet/sun/sunvnet_common.c 	err = vnet_skb_map(port->vio.lp, skb, port->tx_bufs[txi].cookies, 2,
skb              1429 drivers/net/ethernet/sun/sunvnet_common.c 	port->tx_bufs[txi].skb = skb;
skb              1430 drivers/net/ethernet/sun/sunvnet_common.c 	skb = NULL;
skb              1450 drivers/net/ethernet/sun/sunvnet_common.c 		if (skb_is_gso(port->tx_bufs[txi].skb)) {
skb              1451 drivers/net/ethernet/sun/sunvnet_common.c 			dext->ipv4_lso_mss = skb_shinfo(port->tx_bufs[txi].skb)
skb              1499 drivers/net/ethernet/sun/sunvnet_common.c 		skb = port->tx_bufs[txi].skb;
skb              1500 drivers/net/ethernet/sun/sunvnet_common.c 		port->tx_bufs[txi].skb = NULL;
skb              1509 drivers/net/ethernet/sun/sunvnet_common.c 	dev->stats.tx_bytes += port->tx_bufs[txi].skb->len;
skb              1511 drivers/net/ethernet/sun/sunvnet_common.c 	port->stats.tx_bytes += port->tx_bufs[txi].skb->len;
skb              1535 drivers/net/ethernet/sun/sunvnet_common.c 	dev_kfree_skb(skb);
skb              1700 drivers/net/ethernet/sun/sunvnet_common.c 		void *skb = port->tx_bufs[i].skb;
skb              1702 drivers/net/ethernet/sun/sunvnet_common.c 		if (!skb)
skb              1710 drivers/net/ethernet/sun/sunvnet_common.c 		dev_kfree_skb(skb);
skb              1711 drivers/net/ethernet/sun/sunvnet_common.c 		port->tx_bufs[i].skb = NULL;
skb                32 drivers/net/ethernet/sun/sunvnet_common.h 	struct sk_buff		*skb;
skb               140 drivers/net/ethernet/sun/sunvnet_common.h sunvnet_start_xmit_common(struct sk_buff *skb, struct net_device *dev,
skb               333 drivers/net/ethernet/synopsys/dwc-xlgmac-common.c 		      struct sk_buff *skb, bool tx_rx)
skb               335 drivers/net/ethernet/synopsys/dwc-xlgmac-common.c 	struct ethhdr *eth = (struct ethhdr *)skb->data;
skb               342 drivers/net/ethernet/synopsys/dwc-xlgmac-common.c 		   (tx_rx ? "TX" : "RX"), skb->len);
skb               348 drivers/net/ethernet/synopsys/dwc-xlgmac-common.c 	for (i = 0; i < skb->len; i += 32) {
skb               349 drivers/net/ethernet/synopsys/dwc-xlgmac-common.c 		unsigned int len = min(skb->len - i, 32U);
skb               351 drivers/net/ethernet/synopsys/dwc-xlgmac-common.c 		hex_dump_to_buffer(&skb->data[i], len, 32, 1,
skb                36 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 	if (desc_data->skb) {
skb                37 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 		dev_kfree_skb_any(desc_data->skb);
skb                38 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 		desc_data->skb = NULL;
skb                68 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 		desc_data->state.skb = NULL;
skb               498 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 			     struct sk_buff *skb)
skb               534 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 		skb_dma = dma_map_single(pdata->dev, skb->data,
skb               555 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 	for (datalen = skb_headlen(skb) - offset; datalen; ) {
skb               558 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 		skb_dma = dma_map_single(pdata->dev, skb->data + offset, len,
skb               579 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb               583 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 		frag = &skb_shinfo(skb)->frags[i];
skb               619 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 	desc_data->skb = skb;
skb                63 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c static void xlgmac_prep_vlan(struct sk_buff *skb,
skb                66 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 	if (skb_vlan_tag_present(skb))
skb                67 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 		pkt_info->vlan_ctag = skb_vlan_tag_get(skb);
skb                70 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c static int xlgmac_prep_tso(struct sk_buff *skb,
skb                80 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 	ret = skb_cow_head(skb, 0);
skb                84 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 	pkt_info->header_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
skb                85 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 	pkt_info->tcp_header_len = tcp_hdrlen(skb);
skb                86 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 	pkt_info->tcp_payload_len = skb->len - pkt_info->header_len;
skb                87 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 	pkt_info->mss = skb_shinfo(skb)->gso_size;
skb                97 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 	pkt_info->tx_packets = skb_shinfo(skb)->gso_segs;
skb               103 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c static int xlgmac_is_tso(struct sk_buff *skb)
skb               105 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 	if (skb->ip_summed != CHECKSUM_PARTIAL)
skb               108 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 	if (!skb_is_gso(skb))
skb               116 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 			       struct sk_buff *skb,
skb               124 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 	pkt_info->skb = skb;
skb               130 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 	pkt_info->tx_bytes = skb->len;
skb               132 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 	if (xlgmac_is_tso(skb)) {
skb               134 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 		if (skb_shinfo(skb)->gso_size != ring->tx.cur_mss) {
skb               152 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 	} else if (skb->ip_summed == CHECKSUM_PARTIAL)
skb               159 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 	if (skb_vlan_tag_present(skb)) {
skb               161 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 		if (skb_vlan_tag_get(skb) != ring->tx.cur_vlan_ctag)
skb               175 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 	for (len = skb_headlen(skb); len;) {
skb               180 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb               181 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 		frag = &skb_shinfo(skb)->frags[i];
skb               700 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c static int xlgmac_xmit(struct sk_buff *skb, struct net_device *netdev)
skb               714 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 	XLGMAC_PR("skb->len = %d\n", skb->len);
skb               716 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 	channel = pdata->channel_head + skb->queue_mapping;
skb               721 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 	if (skb->len == 0) {
skb               724 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 		dev_kfree_skb_any(skb);
skb               730 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 	xlgmac_prep_tx_pkt(pdata, ring, skb, tx_pkt_info);
skb               738 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 	ret = xlgmac_prep_tso(skb, tx_pkt_info);
skb               742 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 		dev_kfree_skb_any(skb);
skb               745 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 	xlgmac_prep_vlan(skb, tx_pkt_info);
skb               747 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 	if (!desc_ops->map_tx_skb(channel, skb)) {
skb               748 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 		dev_kfree_skb_any(skb);
skb               759 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 		xlgmac_print_pkt(netdev, skb, true);
skb               993 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 	struct sk_buff *skb;
skb               996 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 	skb = napi_alloc_skb(napi, desc_data->rx.hdr.dma_len);
skb               997 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 	if (!skb)
skb              1012 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 	skb_copy_to_linear_data(skb, packet, copy_len);
skb              1013 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 	skb_put(skb, copy_len);
skb              1024 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
skb              1031 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 	return skb;
skb              1120 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 	struct sk_buff *skb;
skb              1139 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 			skb = desc_data->state.skb;
skb              1144 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 			skb = NULL;
skb              1182 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 			dev_kfree_skb(skb);
skb              1191 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 			if (dma_desc_len && !skb) {
skb              1192 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 				skb = xlgmac_create_skb(pdata, napi, desc_data,
skb              1194 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 				if (!skb)
skb              1205 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 					skb, skb_shinfo(skb)->nr_frags,
skb              1217 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 		if (!skb)
skb              1223 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 		    (skb->protocol == htons(ETH_P_8021Q)))
skb              1226 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 		if (skb->len > max_len) {
skb              1229 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 			dev_kfree_skb(skb);
skb              1234 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 			xlgmac_print_pkt(netdev, skb, false);
skb              1236 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 		skb_checksum_none_assert(skb);
skb              1240 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 			skb->ip_summed = CHECKSUM_UNNECESSARY;
skb              1245 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
skb              1253 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 			skb_set_hash(skb, pkt_info->rss_hash,
skb              1256 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 		skb->dev = netdev;
skb              1257 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 		skb->protocol = eth_type_trans(skb, netdev);
skb              1258 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 		skb_record_rx_queue(skb, channel->queue_index);
skb              1260 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 		napi_gro_receive(napi, skb);
skb              1270 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 		desc_data->state.skb = skb;
skb               207 drivers/net/ethernet/synopsys/dwc-xlgmac.h 	struct sk_buff *skb;
skb               255 drivers/net/ethernet/synopsys/dwc-xlgmac.h 	struct sk_buff *skb;
skb               292 drivers/net/ethernet/synopsys/dwc-xlgmac.h 	struct sk_buff *skb;
skb               309 drivers/net/ethernet/synopsys/dwc-xlgmac.h 		struct sk_buff *skb;
skb               385 drivers/net/ethernet/synopsys/dwc-xlgmac.h 			  struct sk_buff *skb);
skb               645 drivers/net/ethernet/synopsys/dwc-xlgmac.h 		      struct sk_buff *skb, bool tx_rx);
skb              1039 drivers/net/ethernet/tehuti/tehuti.c 			dev_kfree_skb(dm->skb);
skb              1083 drivers/net/ethernet/tehuti/tehuti.c 	struct sk_buff *skb;
skb              1092 drivers/net/ethernet/tehuti/tehuti.c 		skb = netdev_alloc_skb(priv->ndev, f->m.pktsz + NET_IP_ALIGN);
skb              1093 drivers/net/ethernet/tehuti/tehuti.c 		if (!skb)
skb              1096 drivers/net/ethernet/tehuti/tehuti.c 		skb_reserve(skb, NET_IP_ALIGN);
skb              1101 drivers/net/ethernet/tehuti/tehuti.c 					 skb->data, f->m.pktsz,
skb              1103 drivers/net/ethernet/tehuti/tehuti.c 		dm->skb = skb;
skb              1130 drivers/net/ethernet/tehuti/tehuti.c 	     struct sk_buff *skb)
skb              1139 drivers/net/ethernet/tehuti/tehuti.c 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), GET_RXD_VLAN_TCI(rxd_vlan));
skb              1141 drivers/net/ethernet/tehuti/tehuti.c 	netif_receive_skb(skb);
skb              1195 drivers/net/ethernet/tehuti/tehuti.c 	struct sk_buff *skb, *skb2;
skb              1256 drivers/net/ethernet/tehuti/tehuti.c 		skb = dm->skb;
skb              1265 drivers/net/ethernet/tehuti/tehuti.c 			memcpy(skb2->data, skb->data, len);
skb              1267 drivers/net/ethernet/tehuti/tehuti.c 			skb = skb2;
skb              1277 drivers/net/ethernet/tehuti/tehuti.c 		skb_put(skb, len);
skb              1278 drivers/net/ethernet/tehuti/tehuti.c 		skb->protocol = eth_type_trans(skb, ndev);
skb              1282 drivers/net/ethernet/tehuti/tehuti.c 			skb_checksum_none_assert(skb);
skb              1284 drivers/net/ethernet/tehuti/tehuti.c 			skb->ip_summed = CHECKSUM_UNNECESSARY;
skb              1286 drivers/net/ethernet/tehuti/tehuti.c 		NETIF_RX_MUX(priv, rxd_val1, rxd_vlan, skb);
skb              1484 drivers/net/ethernet/tehuti/tehuti.c bdx_tx_map_skb(struct bdx_priv *priv, struct sk_buff *skb,
skb              1489 drivers/net/ethernet/tehuti/tehuti.c 	int nr_frags = skb_shinfo(skb)->nr_frags;
skb              1492 drivers/net/ethernet/tehuti/tehuti.c 	db->wptr->len = skb_headlen(skb);
skb              1493 drivers/net/ethernet/tehuti/tehuti.c 	db->wptr->addr.dma = pci_map_single(priv->pdev, skb->data,
skb              1506 drivers/net/ethernet/tehuti/tehuti.c 		frag = &skb_shinfo(skb)->frags[i];
skb              1521 drivers/net/ethernet/tehuti/tehuti.c 	db->wptr->addr.skb = skb;
skb              1600 drivers/net/ethernet/tehuti/tehuti.c static netdev_tx_t bdx_tx_transmit(struct sk_buff *skb,
skb              1611 drivers/net/ethernet/tehuti/tehuti.c 	int nr_frags = skb_shinfo(skb)->nr_frags;
skb              1623 drivers/net/ethernet/tehuti/tehuti.c 	if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL))
skb              1626 drivers/net/ethernet/tehuti/tehuti.c 	if (skb_shinfo(skb)->gso_size) {
skb              1627 drivers/net/ethernet/tehuti/tehuti.c 		txd_mss = skb_shinfo(skb)->gso_size;
skb              1629 drivers/net/ethernet/tehuti/tehuti.c 		DBG("skb %p skb len %d gso size = %d\n", skb, skb->len,
skb              1633 drivers/net/ethernet/tehuti/tehuti.c 	if (skb_vlan_tag_present(skb)) {
skb              1635 drivers/net/ethernet/tehuti/tehuti.c 		txd_vlan_id = skb_vlan_tag_get(skb) & BITS_MASK(12);
skb              1639 drivers/net/ethernet/tehuti/tehuti.c 	txdd->length = CPU_CHIP_SWAP16(skb->len);
skb              1649 drivers/net/ethernet/tehuti/tehuti.c 	bdx_tx_map_skb(priv, skb, txdd);
skb              1693 drivers/net/ethernet/tehuti/tehuti.c 	ndev->stats.tx_bytes += skb->len;
skb              1738 drivers/net/ethernet/tehuti/tehuti.c 		dev_consume_skb_irq(db->rptr->addr.skb);
skb              1783 drivers/net/ethernet/tehuti/tehuti.c 			dev_kfree_skb(db->rptr->addr.skb);
skb               173 drivers/net/ethernet/tehuti/tehuti.h 	struct sk_buff *skb;
skb               185 drivers/net/ethernet/tehuti/tehuti.h 	struct sk_buff *skb;
skb               180 drivers/net/ethernet/ti/cpmac.c 	struct sk_buff *skb;
skb               249 drivers/net/ethernet/ti/cpmac.c static void cpmac_dump_skb(struct net_device *dev, struct sk_buff *skb)
skb               253 drivers/net/ethernet/ti/cpmac.c 	printk("%s: skb 0x%p, len=%d\n", dev->name, skb, skb->len);
skb               254 drivers/net/ethernet/ti/cpmac.c 	for (i = 0; i < skb->len; i++) {
skb               258 drivers/net/ethernet/ti/cpmac.c 			printk("%s: data[%p]:", dev->name, skb->data + i);
skb               260 drivers/net/ethernet/ti/cpmac.c 		printk(" %02x", ((u8 *)skb->data)[i]);
skb               356 drivers/net/ethernet/ti/cpmac.c 	struct sk_buff *skb, *result = NULL;
skb               368 drivers/net/ethernet/ti/cpmac.c 	skb = netdev_alloc_skb_ip_align(priv->dev, CPMAC_SKB_SIZE);
skb               369 drivers/net/ethernet/ti/cpmac.c 	if (likely(skb)) {
skb               370 drivers/net/ethernet/ti/cpmac.c 		skb_put(desc->skb, desc->datalen);
skb               371 drivers/net/ethernet/ti/cpmac.c 		desc->skb->protocol = eth_type_trans(desc->skb, priv->dev);
skb               372 drivers/net/ethernet/ti/cpmac.c 		skb_checksum_none_assert(desc->skb);
skb               375 drivers/net/ethernet/ti/cpmac.c 		result = desc->skb;
skb               378 drivers/net/ethernet/ti/cpmac.c 		desc->skb = skb;
skb               379 drivers/net/ethernet/ti/cpmac.c 		desc->data_mapping = dma_map_single(&priv->dev->dev, skb->data,
skb               403 drivers/net/ethernet/ti/cpmac.c 	struct sk_buff *skb;
skb               440 drivers/net/ethernet/ti/cpmac.c 		skb = cpmac_rx_one(priv, desc);
skb               441 drivers/net/ethernet/ti/cpmac.c 		if (likely(skb)) {
skb               442 drivers/net/ethernet/ti/cpmac.c 			netif_receive_skb(skb);
skb               535 drivers/net/ethernet/ti/cpmac.c static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb               545 drivers/net/ethernet/ti/cpmac.c 	if (unlikely(skb_padto(skb, ETH_ZLEN)))
skb               548 drivers/net/ethernet/ti/cpmac.c 	len = max_t(unsigned int, skb->len, ETH_ZLEN);
skb               549 drivers/net/ethernet/ti/cpmac.c 	queue = skb_get_queue_mapping(skb);
skb               563 drivers/net/ethernet/ti/cpmac.c 	desc->skb = skb;
skb               564 drivers/net/ethernet/ti/cpmac.c 	desc->data_mapping = dma_map_single(&dev->dev, skb->data, len,
skb               570 drivers/net/ethernet/ti/cpmac.c 		netdev_dbg(dev, "sending 0x%p, len=%d\n", skb, skb->len);
skb               574 drivers/net/ethernet/ti/cpmac.c 		cpmac_dump_skb(dev, skb);
skb               587 drivers/net/ethernet/ti/cpmac.c 	if (likely(desc->skb)) {
skb               590 drivers/net/ethernet/ti/cpmac.c 		dev->stats.tx_bytes += desc->skb->len;
skb               592 drivers/net/ethernet/ti/cpmac.c 		dma_unmap_single(&dev->dev, desc->data_mapping, desc->skb->len,
skb               597 drivers/net/ethernet/ti/cpmac.c 				   desc->skb, desc->skb->len);
skb               599 drivers/net/ethernet/ti/cpmac.c 		dev_consume_skb_irq(desc->skb);
skb               600 drivers/net/ethernet/ti/cpmac.c 		desc->skb = NULL;
skb               708 drivers/net/ethernet/ti/cpmac.c 		if (priv->desc_ring[i].skb) {
skb               709 drivers/net/ethernet/ti/cpmac.c 			dev_kfree_skb_any(priv->desc_ring[i].skb);
skb               710 drivers/net/ethernet/ti/cpmac.c 			priv->desc_ring[i].skb = NULL;
skb               915 drivers/net/ethernet/ti/cpmac.c 	struct sk_buff *skb;
skb               950 drivers/net/ethernet/ti/cpmac.c 		skb = netdev_alloc_skb_ip_align(dev, CPMAC_SKB_SIZE);
skb               951 drivers/net/ethernet/ti/cpmac.c 		if (unlikely(!skb)) {
skb               955 drivers/net/ethernet/ti/cpmac.c 		desc->skb = skb;
skb               956 drivers/net/ethernet/ti/cpmac.c 		desc->data_mapping = dma_map_single(&dev->dev, skb->data,
skb               989 drivers/net/ethernet/ti/cpmac.c 		if (priv->rx_head[i].skb) {
skb               994 drivers/net/ethernet/ti/cpmac.c 			kfree_skb(priv->rx_head[i].skb);
skb              1035 drivers/net/ethernet/ti/cpmac.c 		if (priv->rx_head[i].skb) {
skb              1040 drivers/net/ethernet/ti/cpmac.c 			kfree_skb(priv->rx_head[i].skb);
skb               379 drivers/net/ethernet/ti/cpsw.c 	struct sk_buff		*skb;
skb               389 drivers/net/ethernet/ti/cpsw.c 		skb = token;
skb               390 drivers/net/ethernet/ti/cpsw.c 		ndev = skb->dev;
skb               391 drivers/net/ethernet/ti/cpsw.c 		ch = skb_get_queue_mapping(skb);
skb               392 drivers/net/ethernet/ti/cpsw.c 		cpts_tx_timestamp(ndev_to_cpsw(ndev)->cpts, skb);
skb               393 drivers/net/ethernet/ti/cpsw.c 		dev_kfree_skb_any(skb);
skb               407 drivers/net/ethernet/ti/cpsw.c static void cpsw_rx_vlan_encap(struct sk_buff *skb)
skb               409 drivers/net/ethernet/ti/cpsw.c 	struct cpsw_priv *priv = netdev_priv(skb->dev);
skb               411 drivers/net/ethernet/ti/cpsw.c 	u32 rx_vlan_encap_hdr = *((u32 *)skb->data);
skb               415 drivers/net/ethernet/ti/cpsw.c 	skb_pull(skb, CPSW_RX_VLAN_ENCAP_HDR_SIZE);
skb               441 drivers/net/ethernet/ti/cpsw.c 	__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag);
skb               445 drivers/net/ethernet/ti/cpsw.c 		memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
skb               446 drivers/net/ethernet/ti/cpsw.c 		skb_pull(skb, VLAN_HLEN);
skb               684 drivers/net/ethernet/ti/cpsw.c 	struct sk_buff		*skb;
skb               751 drivers/net/ethernet/ti/cpsw.c 	skb = build_skb(pa, cpsw_rxbuf_total_len(pkt_size));
skb               752 drivers/net/ethernet/ti/cpsw.c 	if (!skb) {
skb               758 drivers/net/ethernet/ti/cpsw.c 	skb_reserve(skb, headroom);
skb               759 drivers/net/ethernet/ti/cpsw.c 	skb_put(skb, len);
skb               760 drivers/net/ethernet/ti/cpsw.c 	skb->dev = ndev;
skb               762 drivers/net/ethernet/ti/cpsw.c 		cpsw_rx_vlan_encap(skb);
skb               764 drivers/net/ethernet/ti/cpsw.c 		cpts_rx_timestamp(cpsw->cpts, skb);
skb               765 drivers/net/ethernet/ti/cpsw.c 	skb->protocol = eth_type_trans(skb, ndev);
skb               769 drivers/net/ethernet/ti/cpsw.c 	netif_receive_skb(skb);
skb              1795 drivers/net/ethernet/ti/cpsw.c static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
skb              1805 drivers/net/ethernet/ti/cpsw.c 	if (skb_padto(skb, CPSW_MIN_PACKET_SIZE)) {
skb              1811 drivers/net/ethernet/ti/cpsw.c 	if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
skb              1812 drivers/net/ethernet/ti/cpsw.c 	    priv->tx_ts_enabled && cpts_can_timestamp(cpts, skb))
skb              1813 drivers/net/ethernet/ti/cpsw.c 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
skb              1815 drivers/net/ethernet/ti/cpsw.c 	q_idx = skb_get_queue_mapping(skb);
skb              1821 drivers/net/ethernet/ti/cpsw.c 	skb_tx_timestamp(skb);
skb              1822 drivers/net/ethernet/ti/cpsw.c 	ret = cpdma_chan_submit(txch, skb, skb->data, skb->len,
skb                32 drivers/net/ethernet/ti/cpts.c static int cpts_match(struct sk_buff *skb, unsigned int ptp_class,
skb                81 drivers/net/ethernet/ti/cpts.c 	struct sk_buff *skb, *tmp;
skb                84 drivers/net/ethernet/ti/cpts.c 	skb_queue_walk_safe(&cpts->txq, skb, tmp) {
skb                85 drivers/net/ethernet/ti/cpts.c 		skb_cb = (struct cpts_skb_cb_data *)skb->cb;
skb                87 drivers/net/ethernet/ti/cpts.c 			__skb_unlink(skb, &cpts->txq);
skb                88 drivers/net/ethernet/ti/cpts.c 			dev_consume_skb_any(skb);
skb                99 drivers/net/ethernet/ti/cpts.c 	struct sk_buff *skb, *tmp;
skb               108 drivers/net/ethernet/ti/cpts.c 	skb_queue_walk_safe(&cpts->txq, skb, tmp) {
skb               110 drivers/net/ethernet/ti/cpts.c 		unsigned int class = ptp_classify_raw(skb);
skb               112 drivers/net/ethernet/ti/cpts.c 					(struct cpts_skb_cb_data *)skb->cb;
skb               114 drivers/net/ethernet/ti/cpts.c 		if (cpts_match(skb, class, seqid, mtype)) {
skb               119 drivers/net/ethernet/ti/cpts.c 			skb_tstamp_tx(skb, &ssh);
skb               121 drivers/net/ethernet/ti/cpts.c 			__skb_unlink(skb, &cpts->txq);
skb               122 drivers/net/ethernet/ti/cpts.c 			dev_consume_skb_any(skb);
skb               131 drivers/net/ethernet/ti/cpts.c 			__skb_unlink(skb, &cpts->txq);
skb               132 drivers/net/ethernet/ti/cpts.c 			dev_consume_skb_any(skb);
skb               330 drivers/net/ethernet/ti/cpts.c static int cpts_match(struct sk_buff *skb, unsigned int ptp_class,
skb               335 drivers/net/ethernet/ti/cpts.c 	u8 *msgtype, *data = skb->data;
skb               354 drivers/net/ethernet/ti/cpts.c 	if (skb->len + ETH_HLEN < offset + OFF_PTP_SEQUENCE_ID + sizeof(*seqid))
skb               367 drivers/net/ethernet/ti/cpts.c static u64 cpts_find_ts(struct cpts *cpts, struct sk_buff *skb, int ev_type)
skb               372 drivers/net/ethernet/ti/cpts.c 	unsigned int class = ptp_classify_raw(skb);
skb               392 drivers/net/ethernet/ti/cpts.c 		    cpts_match(skb, class, seqid, mtype)) {
skb               402 drivers/net/ethernet/ti/cpts.c 				(struct cpts_skb_cb_data *)skb->cb;
skb               406 drivers/net/ethernet/ti/cpts.c 		skb_get(skb);
skb               409 drivers/net/ethernet/ti/cpts.c 		__skb_queue_tail(&cpts->txq, skb);
skb               417 drivers/net/ethernet/ti/cpts.c void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb)
skb               422 drivers/net/ethernet/ti/cpts.c 	ns = cpts_find_ts(cpts, skb, CPTS_EV_RX);
skb               425 drivers/net/ethernet/ti/cpts.c 	ssh = skb_hwtstamps(skb);
skb               431 drivers/net/ethernet/ti/cpts.c void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb)
skb               436 drivers/net/ethernet/ti/cpts.c 	if (!(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
skb               438 drivers/net/ethernet/ti/cpts.c 	ns = cpts_find_ts(cpts, skb, CPTS_EV_TX);
skb               443 drivers/net/ethernet/ti/cpts.c 	skb_tstamp_tx(skb, &ssh);
skb               119 drivers/net/ethernet/ti/cpts.h void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb);
skb               120 drivers/net/ethernet/ti/cpts.h void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb);
skb               127 drivers/net/ethernet/ti/cpts.h static inline bool cpts_can_timestamp(struct cpts *cpts, struct sk_buff *skb)
skb               129 drivers/net/ethernet/ti/cpts.h 	unsigned int class = ptp_classify_raw(skb);
skb               140 drivers/net/ethernet/ti/cpts.h static inline void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb)
skb               143 drivers/net/ethernet/ti/cpts.h static inline void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb)
skb               168 drivers/net/ethernet/ti/cpts.h static inline bool cpts_can_timestamp(struct cpts *cpts, struct sk_buff *skb)
skb               856 drivers/net/ethernet/ti/davinci_emac.c 	struct sk_buff *skb = netdev_alloc_skb(priv->ndev, priv->rx_buf_size);
skb               857 drivers/net/ethernet/ti/davinci_emac.c 	if (WARN_ON(!skb))
skb               859 drivers/net/ethernet/ti/davinci_emac.c 	skb_reserve(skb, NET_IP_ALIGN);
skb               860 drivers/net/ethernet/ti/davinci_emac.c 	return skb;
skb               865 drivers/net/ethernet/ti/davinci_emac.c 	struct sk_buff		*skb = token;
skb               866 drivers/net/ethernet/ti/davinci_emac.c 	struct net_device	*ndev = skb->dev;
skb               873 drivers/net/ethernet/ti/davinci_emac.c 		dev_kfree_skb_any(skb);
skb               884 drivers/net/ethernet/ti/davinci_emac.c 	skb_put(skb, len);
skb               885 drivers/net/ethernet/ti/davinci_emac.c 	skb->protocol = eth_type_trans(skb, ndev);
skb               886 drivers/net/ethernet/ti/davinci_emac.c 	netif_receive_skb(skb);
skb               891 drivers/net/ethernet/ti/davinci_emac.c 	skb = emac_rx_alloc(priv);
skb               892 drivers/net/ethernet/ti/davinci_emac.c 	if (!skb) {
skb               899 drivers/net/ethernet/ti/davinci_emac.c 	ret = cpdma_chan_submit(priv->rxchan, skb, skb->data,
skb               900 drivers/net/ethernet/ti/davinci_emac.c 			skb_tailroom(skb), 0);
skb               904 drivers/net/ethernet/ti/davinci_emac.c 		dev_kfree_skb_any(skb);
skb               909 drivers/net/ethernet/ti/davinci_emac.c 	struct sk_buff		*skb = token;
skb               910 drivers/net/ethernet/ti/davinci_emac.c 	struct net_device	*ndev = skb->dev;
skb               919 drivers/net/ethernet/ti/davinci_emac.c 	dev_kfree_skb_any(skb);
skb               932 drivers/net/ethernet/ti/davinci_emac.c static int emac_dev_xmit(struct sk_buff *skb, struct net_device *ndev)
skb               945 drivers/net/ethernet/ti/davinci_emac.c 	ret_code = skb_padto(skb, EMAC_DEF_MIN_ETHPKTSIZE);
skb               952 drivers/net/ethernet/ti/davinci_emac.c 	skb_tx_timestamp(skb);
skb               954 drivers/net/ethernet/ti/davinci_emac.c 	ret_code = cpdma_chan_submit(priv->txchan, skb, skb->data, skb->len,
skb              1426 drivers/net/ethernet/ti/davinci_emac.c 		struct sk_buff *skb = emac_rx_alloc(priv);
skb              1428 drivers/net/ethernet/ti/davinci_emac.c 		if (!skb)
skb              1431 drivers/net/ethernet/ti/davinci_emac.c 		ret = cpdma_chan_idle_submit(priv->rxchan, skb, skb->data,
skb              1432 drivers/net/ethernet/ti/davinci_emac.c 					     skb_tailroom(skb), 0);
skb               129 drivers/net/ethernet/ti/netcp.h 	struct sk_buff		*skb;
skb               139 drivers/net/ethernet/ti/netcp.h 	void (*txtstamp)(void *ctx, struct sk_buff *skb);
skb                97 drivers/net/ethernet/ti/netcp_core.c 	void	(*txtstamp)(void *context, struct sk_buff *skb);
skb               649 drivers/net/ethernet/ti/netcp_core.c 	struct sk_buff *skb;
skb               680 drivers/net/ethernet/ti/netcp_core.c 	skb = build_skb(org_buf_ptr, org_buf_len);
skb               681 drivers/net/ethernet/ti/netcp_core.c 	if (unlikely(!skb)) {
skb               687 drivers/net/ethernet/ti/netcp_core.c 	skb_reserve(skb, NETCP_SOP_OFFSET);
skb               688 drivers/net/ethernet/ti/netcp_core.c 	__skb_put(skb, buf_len);
skb               715 drivers/net/ethernet/ti/netcp_core.c 		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
skb               733 drivers/net/ethernet/ti/netcp_core.c 		__pskb_trim(skb, skb->len - ETH_FCS_LEN);
skb               736 drivers/net/ethernet/ti/netcp_core.c 	p_info.skb = skb;
skb               737 drivers/net/ethernet/ti/netcp_core.c 	skb->dev = netcp->ndev;
skb               755 drivers/net/ethernet/ti/netcp_core.c 			dev_kfree_skb(skb);
skb               764 drivers/net/ethernet/ti/netcp_core.c 	rx_stats->rx_bytes += skb->len;
skb               768 drivers/net/ethernet/ti/netcp_core.c 	skb->protocol = eth_type_trans(skb, netcp->ndev);
skb               769 drivers/net/ethernet/ti/netcp_core.c 	netif_receive_skb(skb);
skb              1008 drivers/net/ethernet/ti/netcp_core.c 	struct sk_buff *skb;
skb              1027 drivers/net/ethernet/ti/netcp_core.c 		skb = (struct sk_buff *)GET_SW_DATA0(desc);
skb              1029 drivers/net/ethernet/ti/netcp_core.c 		if (!skb) {
skb              1035 drivers/net/ethernet/ti/netcp_core.c 		tx_cb = (struct netcp_tx_cb *)skb->cb;
skb              1037 drivers/net/ethernet/ti/netcp_core.c 			tx_cb->txtstamp(tx_cb->ts_context, skb);
skb              1039 drivers/net/ethernet/ti/netcp_core.c 		if (netif_subqueue_stopped(netcp->ndev, skb) &&
skb              1043 drivers/net/ethernet/ti/netcp_core.c 			u16 subqueue = skb_get_queue_mapping(skb);
skb              1050 drivers/net/ethernet/ti/netcp_core.c 		tx_stats->tx_bytes += skb->len;
skb              1052 drivers/net/ethernet/ti/netcp_core.c 		dev_kfree_skb(skb);
skb              1082 drivers/net/ethernet/ti/netcp_core.c netcp_tx_map_skb(struct sk_buff *skb, struct netcp_intf *netcp)
skb              1085 drivers/net/ethernet/ti/netcp_core.c 	unsigned int pkt_len = skb_headlen(skb);
skb              1092 drivers/net/ethernet/ti/netcp_core.c 	dma_addr = dma_map_single(dev, skb->data, pkt_len, DMA_TO_DEVICE);
skb              1106 drivers/net/ethernet/ti/netcp_core.c 	if (skb_is_nonlinear(skb)) {
skb              1107 drivers/net/ethernet/ti/netcp_core.c 		prefetchw(skb_shinfo(skb));
skb              1116 drivers/net/ethernet/ti/netcp_core.c 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb              1117 drivers/net/ethernet/ti/netcp_core.c 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
skb              1153 drivers/net/ethernet/ti/netcp_core.c 	if (skb_shinfo(skb)->frag_list) {
skb              1159 drivers/net/ethernet/ti/netcp_core.c 	WARN_ON(pkt_len != skb->len);
skb              1171 drivers/net/ethernet/ti/netcp_core.c 			       struct sk_buff *skb,
skb              1184 drivers/net/ethernet/ti/netcp_core.c 	p_info.skb = skb;
skb              1213 drivers/net/ethernet/ti/netcp_core.c 	tx_cb = (struct netcp_tx_cb *)skb->cb;
skb              1242 drivers/net/ethernet/ti/netcp_core.c 	SET_SW_DATA0((u32)skb, desc);
skb              1257 drivers/net/ethernet/ti/netcp_core.c 	skb_tx_timestamp(skb);
skb              1265 drivers/net/ethernet/ti/netcp_core.c static int netcp_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev)
skb              1269 drivers/net/ethernet/ti/netcp_core.c 	int subqueue = skb_get_queue_mapping(skb);
skb              1273 drivers/net/ethernet/ti/netcp_core.c 	if (unlikely(skb->len <= 0)) {
skb              1274 drivers/net/ethernet/ti/netcp_core.c 		dev_kfree_skb(skb);
skb              1278 drivers/net/ethernet/ti/netcp_core.c 	if (unlikely(skb->len < NETCP_MIN_PACKET_SIZE)) {
skb              1279 drivers/net/ethernet/ti/netcp_core.c 		ret = skb_padto(skb, NETCP_MIN_PACKET_SIZE);
skb              1287 drivers/net/ethernet/ti/netcp_core.c 		skb->len = NETCP_MIN_PACKET_SIZE;
skb              1290 drivers/net/ethernet/ti/netcp_core.c 	desc = netcp_tx_map_skb(skb, netcp);
skb              1297 drivers/net/ethernet/ti/netcp_core.c 	ret = netcp_tx_submit_skb(netcp, skb, desc);
skb              1313 drivers/net/ethernet/ti/netcp_core.c 	dev_kfree_skb(skb);
skb              2538 drivers/net/ethernet/ti/netcp_ethss.c static void gbe_txtstamp(void *context, struct sk_buff *skb)
skb              2543 drivers/net/ethernet/ti/netcp_ethss.c 	cpts_tx_timestamp(gbe_dev->cpts, skb);
skb              2549 drivers/net/ethernet/ti/netcp_ethss.c 	struct sk_buff *skb = p_info->skb;
skb              2551 drivers/net/ethernet/ti/netcp_ethss.c 	return cpts_can_timestamp(gbe_intf->gbe_dev->cpts, skb);
skb              2557 drivers/net/ethernet/ti/netcp_ethss.c 	struct phy_device *phydev = p_info->skb->dev->phydev;
skb              2560 drivers/net/ethernet/ti/netcp_ethss.c 	if (!(skb_shinfo(p_info->skb)->tx_flags & SKBTX_HW_TSTAMP) ||
skb              2569 drivers/net/ethernet/ti/netcp_ethss.c 		skb_shinfo(p_info->skb)->tx_flags |= SKBTX_IN_PROGRESS;
skb              2576 drivers/net/ethernet/ti/netcp_ethss.c 		skb_shinfo(p_info->skb)->tx_flags |= SKBTX_IN_PROGRESS;
skb              2584 drivers/net/ethernet/ti/netcp_ethss.c 	struct phy_device *phydev = p_info->skb->dev->phydev;
skb              2596 drivers/net/ethernet/ti/netcp_ethss.c 		cpts_rx_timestamp(gbe_dev->cpts, p_info->skb);
skb               216 drivers/net/ethernet/ti/tlan.c tlan_store_skb(struct tlan_list *tag, struct sk_buff *skb)
skb               218 drivers/net/ethernet/ti/tlan.c 	unsigned long addr = (unsigned long)skb;
skb              1057 drivers/net/ethernet/ti/tlan.c static netdev_tx_t tlan_start_tx(struct sk_buff *skb, struct net_device *dev)
skb              1068 drivers/net/ethernet/ti/tlan.c 		dev_kfree_skb_any(skb);
skb              1072 drivers/net/ethernet/ti/tlan.c 	if (skb_padto(skb, TLAN_MIN_FRAME_SIZE))
skb              1074 drivers/net/ethernet/ti/tlan.c 	txlen = max(skb->len, (unsigned int)TLAN_MIN_FRAME_SIZE);
skb              1092 drivers/net/ethernet/ti/tlan.c 						      skb->data, txlen,
skb              1094 drivers/net/ethernet/ti/tlan.c 	tlan_store_skb(tail_list, skb);
skb              1384 drivers/net/ethernet/ti/tlan.c 		struct sk_buff *skb = tlan_get_skb(head_list);
skb              1388 drivers/net/ethernet/ti/tlan.c 				 max(skb->len,
skb              1391 drivers/net/ethernet/ti/tlan.c 		dev_kfree_skb_any(skb);
skb              1506 drivers/net/ethernet/ti/tlan.c 	struct sk_buff	*skb;
skb              1532 drivers/net/ethernet/ti/tlan.c 		skb = tlan_get_skb(head_list);
skb              1535 drivers/net/ethernet/ti/tlan.c 		skb_put(skb, frame_size);
skb              1539 drivers/net/ethernet/ti/tlan.c 		skb->protocol = eth_type_trans(skb, dev);
skb              1540 drivers/net/ethernet/ti/tlan.c 		netif_rx(skb);
skb              1919 drivers/net/ethernet/ti/tlan.c 	struct sk_buff	*skb;
skb              1941 drivers/net/ethernet/ti/tlan.c 		skb = netdev_alloc_skb_ip_align(dev, TLAN_MAX_FRAME_SIZE + 5);
skb              1942 drivers/net/ethernet/ti/tlan.c 		if (!skb)
skb              1946 drivers/net/ethernet/ti/tlan.c 							 skb->data,
skb              1949 drivers/net/ethernet/ti/tlan.c 		tlan_store_skb(list, skb);
skb              1970 drivers/net/ethernet/ti/tlan.c 	struct sk_buff	*skb;
skb              1974 drivers/net/ethernet/ti/tlan.c 		skb = tlan_get_skb(list);
skb              1975 drivers/net/ethernet/ti/tlan.c 		if (skb) {
skb              1979 drivers/net/ethernet/ti/tlan.c 				max(skb->len,
skb              1982 drivers/net/ethernet/ti/tlan.c 			dev_kfree_skb_any(skb);
skb              1990 drivers/net/ethernet/ti/tlan.c 		skb = tlan_get_skb(list);
skb              1991 drivers/net/ethernet/ti/tlan.c 		if (skb) {
skb              1996 drivers/net/ethernet/ti/tlan.c 			dev_kfree_skb_any(skb);
skb               382 drivers/net/ethernet/toshiba/ps3_gelic_net.c 	descr->skb = dev_alloc_skb(bufsize + GELIC_NET_RXBUF_ALIGN - 1);
skb               383 drivers/net/ethernet/toshiba/ps3_gelic_net.c 	if (!descr->skb) {
skb               395 drivers/net/ethernet/toshiba/ps3_gelic_net.c 	offset = ((unsigned long)descr->skb->data) &
skb               398 drivers/net/ethernet/toshiba/ps3_gelic_net.c 		skb_reserve(descr->skb, GELIC_NET_RXBUF_ALIGN - offset);
skb               401 drivers/net/ethernet/toshiba/ps3_gelic_net.c 						     descr->skb->data,
skb               405 drivers/net/ethernet/toshiba/ps3_gelic_net.c 		dev_kfree_skb_any(descr->skb);
skb               406 drivers/net/ethernet/toshiba/ps3_gelic_net.c 		descr->skb = NULL;
skb               427 drivers/net/ethernet/toshiba/ps3_gelic_net.c 		if (descr->skb) {
skb               430 drivers/net/ethernet/toshiba/ps3_gelic_net.c 					 descr->skb->len,
skb               433 drivers/net/ethernet/toshiba/ps3_gelic_net.c 			dev_kfree_skb_any(descr->skb);
skb               434 drivers/net/ethernet/toshiba/ps3_gelic_net.c 			descr->skb = NULL;
skb               456 drivers/net/ethernet/toshiba/ps3_gelic_net.c 		if (!descr->skb) {
skb               496 drivers/net/ethernet/toshiba/ps3_gelic_net.c 	struct sk_buff *skb = descr->skb;
skb               500 drivers/net/ethernet/toshiba/ps3_gelic_net.c 	dma_unmap_single(ctodev(card), be32_to_cpu(descr->buf_addr), skb->len,
skb               502 drivers/net/ethernet/toshiba/ps3_gelic_net.c 	dev_kfree_skb_any(skb);
skb               511 drivers/net/ethernet/toshiba/ps3_gelic_net.c 	descr->skb = NULL;
skb               549 drivers/net/ethernet/toshiba/ps3_gelic_net.c 		netdev = tx_chain->tail->skb->dev;
skb               563 drivers/net/ethernet/toshiba/ps3_gelic_net.c 			if (tx_chain->tail->skb) {
skb               566 drivers/net/ethernet/toshiba/ps3_gelic_net.c 					tx_chain->tail->skb->len;
skb               700 drivers/net/ethernet/toshiba/ps3_gelic_net.c 				       struct sk_buff *skb)
skb               702 drivers/net/ethernet/toshiba/ps3_gelic_net.c 	if (skb->ip_summed != CHECKSUM_PARTIAL)
skb               709 drivers/net/ethernet/toshiba/ps3_gelic_net.c 		if (skb->protocol == htons(ETH_P_IP)) {
skb               710 drivers/net/ethernet/toshiba/ps3_gelic_net.c 			if (ip_hdr(skb)->protocol == IPPROTO_TCP)
skb               715 drivers/net/ethernet/toshiba/ps3_gelic_net.c 			else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
skb               730 drivers/net/ethernet/toshiba/ps3_gelic_net.c static struct sk_buff *gelic_put_vlan_tag(struct sk_buff *skb,
skb               736 drivers/net/ethernet/toshiba/ps3_gelic_net.c 	if (skb_headroom(skb) < VLAN_HLEN) {
skb               737 drivers/net/ethernet/toshiba/ps3_gelic_net.c 		struct sk_buff *sk_tmp = skb;
skb               738 drivers/net/ethernet/toshiba/ps3_gelic_net.c 		pr_debug("%s: hd=%d c=%ud\n", __func__, skb_headroom(skb), c);
skb               739 drivers/net/ethernet/toshiba/ps3_gelic_net.c 		skb = skb_realloc_headroom(sk_tmp, VLAN_HLEN);
skb               740 drivers/net/ethernet/toshiba/ps3_gelic_net.c 		if (!skb)
skb               744 drivers/net/ethernet/toshiba/ps3_gelic_net.c 	veth = skb_push(skb, VLAN_HLEN);
skb               747 drivers/net/ethernet/toshiba/ps3_gelic_net.c 	memmove(skb->data, skb->data + VLAN_HLEN, 2 * ETH_ALEN);
skb               752 drivers/net/ethernet/toshiba/ps3_gelic_net.c 	return skb;
skb               766 drivers/net/ethernet/toshiba/ps3_gelic_net.c 				  struct sk_buff *skb)
skb               774 drivers/net/ethernet/toshiba/ps3_gelic_net.c 		type = netdev_port(skb->dev)->type;
skb               775 drivers/net/ethernet/toshiba/ps3_gelic_net.c 		skb_tmp = gelic_put_vlan_tag(skb,
skb               779 drivers/net/ethernet/toshiba/ps3_gelic_net.c 		skb = skb_tmp;
skb               782 drivers/net/ethernet/toshiba/ps3_gelic_net.c 	buf = dma_map_single(ctodev(card), skb->data, skb->len, DMA_TO_DEVICE);
skb               787 drivers/net/ethernet/toshiba/ps3_gelic_net.c 			skb->data, skb->len);
skb               792 drivers/net/ethernet/toshiba/ps3_gelic_net.c 	descr->buf_size = cpu_to_be32(skb->len);
skb               793 drivers/net/ethernet/toshiba/ps3_gelic_net.c 	descr->skb = skb;
skb               796 drivers/net/ethernet/toshiba/ps3_gelic_net.c 	gelic_descr_set_tx_cmdstat(descr, skb);
skb               837 drivers/net/ethernet/toshiba/ps3_gelic_net.c netdev_tx_t gelic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
skb               858 drivers/net/ethernet/toshiba/ps3_gelic_net.c 	result = gelic_descr_prepare_tx(card, descr, skb);
skb               865 drivers/net/ethernet/toshiba/ps3_gelic_net.c 		dev_kfree_skb_any(skb);
skb               913 drivers/net/ethernet/toshiba/ps3_gelic_net.c 	struct sk_buff *skb = descr->skb;
skb               923 drivers/net/ethernet/toshiba/ps3_gelic_net.c 	skb_put(skb, be32_to_cpu(descr->valid_size)?
skb               932 drivers/net/ethernet/toshiba/ps3_gelic_net.c 	descr->skb = NULL;
skb               937 drivers/net/ethernet/toshiba/ps3_gelic_net.c 	skb_pull(skb, 2);
skb               938 drivers/net/ethernet/toshiba/ps3_gelic_net.c 	skb->protocol = eth_type_trans(skb, netdev);
skb               944 drivers/net/ethernet/toshiba/ps3_gelic_net.c 			skb->ip_summed = CHECKSUM_UNNECESSARY;
skb               946 drivers/net/ethernet/toshiba/ps3_gelic_net.c 			skb_checksum_none_assert(skb);
skb               948 drivers/net/ethernet/toshiba/ps3_gelic_net.c 		skb_checksum_none_assert(skb);
skb               952 drivers/net/ethernet/toshiba/ps3_gelic_net.c 	netdev->stats.rx_bytes += skb->len;
skb               955 drivers/net/ethernet/toshiba/ps3_gelic_net.c 	netif_receive_skb(skb);
skb               989 drivers/net/ethernet/toshiba/ps3_gelic_net.c 		vid = *(u16 *)(descr->skb->data) & VLAN_VID_MASK;
skb               244 drivers/net/ethernet/toshiba/ps3_gelic_net.h 	struct sk_buff *skb;
skb               360 drivers/net/ethernet/toshiba/ps3_gelic_net.h netdev_tx_t gelic_net_xmit(struct sk_buff *skb, struct net_device *netdev);
skb               357 drivers/net/ethernet/toshiba/spider_net.c 		if (descr->skb) {
skb               361 drivers/net/ethernet/toshiba/spider_net.c 			dev_kfree_skb(descr->skb);
skb               362 drivers/net/ethernet/toshiba/spider_net.c 			descr->skb = NULL;
skb               394 drivers/net/ethernet/toshiba/spider_net.c 	descr->skb = netdev_alloc_skb(card->netdev,
skb               396 drivers/net/ethernet/toshiba/spider_net.c 	if (!descr->skb) {
skb               409 drivers/net/ethernet/toshiba/spider_net.c 	offset = ((unsigned long)descr->skb->data) &
skb               412 drivers/net/ethernet/toshiba/spider_net.c 		skb_reserve(descr->skb, SPIDER_NET_RXBUF_ALIGN - offset);
skb               414 drivers/net/ethernet/toshiba/spider_net.c 	buf = pci_map_single(card->pdev, descr->skb->data,
skb               417 drivers/net/ethernet/toshiba/spider_net.c 		dev_kfree_skb_any(descr->skb);
skb               418 drivers/net/ethernet/toshiba/spider_net.c 		descr->skb = NULL;
skb               644 drivers/net/ethernet/toshiba/spider_net.c 			    struct sk_buff *skb)
skb               652 drivers/net/ethernet/toshiba/spider_net.c 	buf = pci_map_single(card->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
skb               656 drivers/net/ethernet/toshiba/spider_net.c 				  "Dropping packet\n", skb->data, skb->len);
skb               665 drivers/net/ethernet/toshiba/spider_net.c 		pci_unmap_single(card->pdev, buf, skb->len, PCI_DMA_TODEVICE);
skb               671 drivers/net/ethernet/toshiba/spider_net.c 	descr->skb = skb;
skb               673 drivers/net/ethernet/toshiba/spider_net.c 	hwdescr->buf_size = skb->len;
skb               681 drivers/net/ethernet/toshiba/spider_net.c 	if (skb->ip_summed == CHECKSUM_PARTIAL)
skb               682 drivers/net/ethernet/toshiba/spider_net.c 		switch (ip_hdr(skb)->protocol) {
skb               761 drivers/net/ethernet/toshiba/spider_net.c 	struct sk_buff *skb;
skb               779 drivers/net/ethernet/toshiba/spider_net.c 			dev->stats.tx_bytes += descr->skb->len;
skb               812 drivers/net/ethernet/toshiba/spider_net.c 		skb = descr->skb;
skb               813 drivers/net/ethernet/toshiba/spider_net.c 		descr->skb = NULL;
skb               818 drivers/net/ethernet/toshiba/spider_net.c 		if (skb) {
skb               819 drivers/net/ethernet/toshiba/spider_net.c 			pci_unmap_single(card->pdev, buf_addr, skb->len,
skb               821 drivers/net/ethernet/toshiba/spider_net.c 			dev_consume_skb_any(skb);
skb               874 drivers/net/ethernet/toshiba/spider_net.c spider_net_xmit(struct sk_buff *skb, struct net_device *netdev)
skb               881 drivers/net/ethernet/toshiba/spider_net.c 	if (spider_net_prepare_tx_descr(card, skb) != 0) {
skb               944 drivers/net/ethernet/toshiba/spider_net.c 	struct sk_buff *skb = descr->skb;
skb               949 drivers/net/ethernet/toshiba/spider_net.c 	skb_put(skb, hwdescr->valid_size);
skb               954 drivers/net/ethernet/toshiba/spider_net.c 	skb_pull(skb, SPIDER_MISALIGN);
skb               955 drivers/net/ethernet/toshiba/spider_net.c 	skb->protocol = eth_type_trans(skb, netdev);
skb               958 drivers/net/ethernet/toshiba/spider_net.c 	skb_checksum_none_assert(skb);
skb               963 drivers/net/ethernet/toshiba/spider_net.c 			skb->ip_summed = CHECKSUM_UNNECESSARY;
skb               972 drivers/net/ethernet/toshiba/spider_net.c 	netdev->stats.rx_bytes += skb->len;
skb               975 drivers/net/ethernet/toshiba/spider_net.c 	netif_receive_skb(skb);
skb              1049 drivers/net/ethernet/toshiba/spider_net.c 		         cnt, status, descr->skb);
skb              1210 drivers/net/ethernet/toshiba/spider_net.c 	descr->skb = NULL;
skb              1217 drivers/net/ethernet/toshiba/spider_net.c 	dev_kfree_skb_irq(descr->skb);
skb              1218 drivers/net/ethernet/toshiba/spider_net.c 	descr->skb = NULL;
skb               386 drivers/net/ethernet/toshiba/spider_net.h 	struct sk_buff *skb;
skb               432 drivers/net/ethernet/toshiba/tc35815.c 		struct sk_buff *skb;
skb               453 drivers/net/ethernet/toshiba/tc35815.c 	struct sk_buff *skb;
skb               454 drivers/net/ethernet/toshiba/tc35815.c 	skb = netdev_alloc_skb(dev, RX_BUF_SIZE);
skb               455 drivers/net/ethernet/toshiba/tc35815.c 	if (!skb)
skb               457 drivers/net/ethernet/toshiba/tc35815.c 	*dma_handle = pci_map_single(hwdev, skb->data, RX_BUF_SIZE,
skb               460 drivers/net/ethernet/toshiba/tc35815.c 		dev_kfree_skb_any(skb);
skb               463 drivers/net/ethernet/toshiba/tc35815.c 	skb_reserve(skb, 2);	/* make IP header 4byte aligned */
skb               464 drivers/net/ethernet/toshiba/tc35815.c 	return skb;
skb               467 drivers/net/ethernet/toshiba/tc35815.c static void free_rxbuf_skb(struct pci_dev *hwdev, struct sk_buff *skb, dma_addr_t dma_handle)
skb               471 drivers/net/ethernet/toshiba/tc35815.c 	dev_kfree_skb_any(skb);
skb               477 drivers/net/ethernet/toshiba/tc35815.c static netdev_tx_t	tc35815_send_packet(struct sk_buff *skb,
skb               886 drivers/net/ethernet/toshiba/tc35815.c 			lp->rx_skbs[i].skb =
skb               889 drivers/net/ethernet/toshiba/tc35815.c 			if (!lp->rx_skbs[i].skb) {
skb               892 drivers/net/ethernet/toshiba/tc35815.c 						       lp->rx_skbs[i].skb,
skb               894 drivers/net/ethernet/toshiba/tc35815.c 					lp->rx_skbs[i].skb = NULL;
skb               945 drivers/net/ethernet/toshiba/tc35815.c 		if (lp->rx_skbs[i].skb) {
skb               947 drivers/net/ethernet/toshiba/tc35815.c 				lp->rx_skbs[lp->fbl_count].skb =
skb               948 drivers/net/ethernet/toshiba/tc35815.c 					lp->rx_skbs[i].skb;
skb               982 drivers/net/ethernet/toshiba/tc35815.c 		struct sk_buff *skb =
skb               984 drivers/net/ethernet/toshiba/tc35815.c 			lp->tx_skbs[fdsystem].skb : NULL;
skb               986 drivers/net/ethernet/toshiba/tc35815.c 		if (lp->tx_skbs[i].skb != skb) {
skb               991 drivers/net/ethernet/toshiba/tc35815.c 		BUG_ON(lp->tx_skbs[i].skb != skb);
skb               993 drivers/net/ethernet/toshiba/tc35815.c 		if (skb) {
skb               994 drivers/net/ethernet/toshiba/tc35815.c 			pci_unmap_single(lp->pci_dev, lp->tx_skbs[i].skb_dma, skb->len, PCI_DMA_TODEVICE);
skb               995 drivers/net/ethernet/toshiba/tc35815.c 			lp->tx_skbs[i].skb = NULL;
skb               997 drivers/net/ethernet/toshiba/tc35815.c 			dev_kfree_skb_any(skb);
skb              1014 drivers/net/ethernet/toshiba/tc35815.c 			struct sk_buff *skb =
skb              1016 drivers/net/ethernet/toshiba/tc35815.c 				lp->tx_skbs[fdsystem].skb : NULL;
skb              1018 drivers/net/ethernet/toshiba/tc35815.c 			if (lp->tx_skbs[i].skb != skb) {
skb              1023 drivers/net/ethernet/toshiba/tc35815.c 			BUG_ON(lp->tx_skbs[i].skb != skb);
skb              1025 drivers/net/ethernet/toshiba/tc35815.c 			if (skb) {
skb              1026 drivers/net/ethernet/toshiba/tc35815.c 				pci_unmap_single(lp->pci_dev, lp->tx_skbs[i].skb_dma, skb->len, PCI_DMA_TODEVICE);
skb              1027 drivers/net/ethernet/toshiba/tc35815.c 				dev_kfree_skb(skb);
skb              1028 drivers/net/ethernet/toshiba/tc35815.c 				lp->tx_skbs[i].skb = NULL;
skb              1041 drivers/net/ethernet/toshiba/tc35815.c 		if (lp->rx_skbs[i].skb) {
skb              1042 drivers/net/ethernet/toshiba/tc35815.c 			free_rxbuf_skb(lp->pci_dev, lp->rx_skbs[i].skb,
skb              1044 drivers/net/ethernet/toshiba/tc35815.c 			lp->rx_skbs[i].skb = NULL;
skb              1258 drivers/net/ethernet/toshiba/tc35815.c tc35815_send_packet(struct sk_buff *skb, struct net_device *dev)
skb              1286 drivers/net/ethernet/toshiba/tc35815.c 		print_eth(skb->data);
skb              1288 drivers/net/ethernet/toshiba/tc35815.c 	if (lp->tx_skbs[lp->tfd_start].skb) {
skb              1293 drivers/net/ethernet/toshiba/tc35815.c 	BUG_ON(lp->tx_skbs[lp->tfd_start].skb);
skb              1295 drivers/net/ethernet/toshiba/tc35815.c 	lp->tx_skbs[lp->tfd_start].skb = skb;
skb              1296 drivers/net/ethernet/toshiba/tc35815.c 	lp->tx_skbs[lp->tfd_start].skb_dma = pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
skb              1301 drivers/net/ethernet/toshiba/tc35815.c 	txfd->bd.BDCtl = cpu_to_le32(skb->len);
skb              1478 drivers/net/ethernet/toshiba/tc35815.c 			struct sk_buff *skb;
skb              1494 drivers/net/ethernet/toshiba/tc35815.c 			if (!lp->rx_skbs[cur_bd].skb) {
skb              1501 drivers/net/ethernet/toshiba/tc35815.c 			skb = lp->rx_skbs[cur_bd].skb;
skb              1502 drivers/net/ethernet/toshiba/tc35815.c 			prefetch(skb->data);
skb              1503 drivers/net/ethernet/toshiba/tc35815.c 			lp->rx_skbs[cur_bd].skb = NULL;
skb              1508 drivers/net/ethernet/toshiba/tc35815.c 				memmove(skb->data, skb->data - NET_IP_ALIGN,
skb              1510 drivers/net/ethernet/toshiba/tc35815.c 			data = skb_put(skb, pkt_len);
skb              1513 drivers/net/ethernet/toshiba/tc35815.c 			skb->protocol = eth_type_trans(skb, dev);
skb              1514 drivers/net/ethernet/toshiba/tc35815.c 			netif_receive_skb(skb);
skb              1567 drivers/net/ethernet/toshiba/tc35815.c 				if (!lp->rx_skbs[curid].skb) {
skb              1568 drivers/net/ethernet/toshiba/tc35815.c 					lp->rx_skbs[curid].skb =
skb              1572 drivers/net/ethernet/toshiba/tc35815.c 					if (!lp->rx_skbs[curid].skb)
skb              1738 drivers/net/ethernet/toshiba/tc35815.c 		struct sk_buff *skb;
skb              1748 drivers/net/ethernet/toshiba/tc35815.c 		skb = fdsystem != 0xffffffff ?
skb              1749 drivers/net/ethernet/toshiba/tc35815.c 			lp->tx_skbs[fdsystem].skb : NULL;
skb              1751 drivers/net/ethernet/toshiba/tc35815.c 		if (lp->tx_skbs[lp->tfd_end].skb != skb) {
skb              1756 drivers/net/ethernet/toshiba/tc35815.c 		BUG_ON(lp->tx_skbs[lp->tfd_end].skb != skb);
skb              1758 drivers/net/ethernet/toshiba/tc35815.c 		if (skb) {
skb              1759 drivers/net/ethernet/toshiba/tc35815.c 			dev->stats.tx_bytes += skb->len;
skb              1760 drivers/net/ethernet/toshiba/tc35815.c 			pci_unmap_single(lp->pci_dev, lp->tx_skbs[lp->tfd_end].skb_dma, skb->len, PCI_DMA_TODEVICE);
skb              1761 drivers/net/ethernet/toshiba/tc35815.c 			lp->tx_skbs[lp->tfd_end].skb = NULL;
skb              1763 drivers/net/ethernet/toshiba/tc35815.c 			dev_kfree_skb_any(skb);
skb               615 drivers/net/ethernet/tundra/tsi108_eth.c 	struct sk_buff *skb;
skb               624 drivers/net/ethernet/tundra/tsi108_eth.c 		skb = data->txskbs[tx];
skb               634 drivers/net/ethernet/tundra/tsi108_eth.c 			dev_kfree_skb_any(skb);
skb               645 drivers/net/ethernet/tundra/tsi108_eth.c static int tsi108_send_packet(struct sk_buff * skb, struct net_device *dev)
skb               648 drivers/net/ethernet/tundra/tsi108_eth.c 	int frags = skb_shinfo(skb)->nr_frags + 1;
skb               694 drivers/net/ethernet/tundra/tsi108_eth.c 		data->txskbs[tx] = skb;
skb               698 drivers/net/ethernet/tundra/tsi108_eth.c 					skb->data, skb_headlen(skb),
skb               700 drivers/net/ethernet/tundra/tsi108_eth.c 			data->txring[tx].len = skb_headlen(skb);
skb               703 drivers/net/ethernet/tundra/tsi108_eth.c 			const skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
skb               718 drivers/net/ethernet/tundra/tsi108_eth.c 			       skb->len);
skb               719 drivers/net/ethernet/tundra/tsi108_eth.c 			for (i = 0; i < skb->len; i++)
skb               720 drivers/net/ethernet/tundra/tsi108_eth.c 				printk(" %2.2x", skb->data[i]);
skb               749 drivers/net/ethernet/tundra/tsi108_eth.c 		struct sk_buff *skb;
skb               754 drivers/net/ethernet/tundra/tsi108_eth.c 		skb = data->rxskbs[rx];
skb               769 drivers/net/ethernet/tundra/tsi108_eth.c 			dev_kfree_skb_any(skb);
skb               777 drivers/net/ethernet/tundra/tsi108_eth.c 				printk(" %2.2x", skb->data[i]);
skb               781 drivers/net/ethernet/tundra/tsi108_eth.c 		skb_put(skb, data->rxring[rx].len);
skb               782 drivers/net/ethernet/tundra/tsi108_eth.c 		skb->protocol = eth_type_trans(skb, dev);
skb               783 drivers/net/ethernet/tundra/tsi108_eth.c 		netif_receive_skb(skb);
skb               796 drivers/net/ethernet/tundra/tsi108_eth.c 		struct sk_buff *skb;
skb               798 drivers/net/ethernet/tundra/tsi108_eth.c 		skb = netdev_alloc_skb_ip_align(dev, TSI108_RXBUF_SIZE);
skb               799 drivers/net/ethernet/tundra/tsi108_eth.c 		data->rxskbs[rx] = skb;
skb               800 drivers/net/ethernet/tundra/tsi108_eth.c 		if (!skb)
skb               804 drivers/net/ethernet/tundra/tsi108_eth.c 				skb->data, TSI108_RX_SKB_SIZE,
skb              1328 drivers/net/ethernet/tundra/tsi108_eth.c 		struct sk_buff *skb;
skb              1330 drivers/net/ethernet/tundra/tsi108_eth.c 		skb = netdev_alloc_skb_ip_align(dev, TSI108_RXBUF_SIZE);
skb              1331 drivers/net/ethernet/tundra/tsi108_eth.c 		if (!skb) {
skb              1343 drivers/net/ethernet/tundra/tsi108_eth.c 		data->rxskbs[i] = skb;
skb              1402 drivers/net/ethernet/tundra/tsi108_eth.c 		struct sk_buff *skb;
skb              1403 drivers/net/ethernet/tundra/tsi108_eth.c 		skb = data->txskbs[tx];
skb              1406 drivers/net/ethernet/tundra/tsi108_eth.c 		dev_kfree_skb(skb);
skb              1415 drivers/net/ethernet/tundra/tsi108_eth.c 		struct sk_buff *skb;
skb              1417 drivers/net/ethernet/tundra/tsi108_eth.c 		skb = data->rxskbs[rx];
skb              1420 drivers/net/ethernet/tundra/tsi108_eth.c 		dev_kfree_skb(skb);
skb               510 drivers/net/ethernet/via/via-rhine.c static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
skb              1210 drivers/net/ethernet/via/via-rhine.c 	struct sk_buff *skb;
skb              1221 drivers/net/ethernet/via/via-rhine.c 	sd->skb = netdev_alloc_skb(dev, size);
skb              1222 drivers/net/ethernet/via/via-rhine.c 	if (!sd->skb)
skb              1225 drivers/net/ethernet/via/via-rhine.c 	sd->dma = dma_map_single(hwdev, sd->skb->data, size, DMA_FROM_DEVICE);
skb              1228 drivers/net/ethernet/via/via-rhine.c 		dev_kfree_skb_any(sd->skb);
skb              1249 drivers/net/ethernet/via/via-rhine.c 	rp->rx_skbuff[entry] = sd->skb;
skb              1781 drivers/net/ethernet/via/via-rhine.c static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
skb              1795 drivers/net/ethernet/via/via-rhine.c 	if (skb_padto(skb, ETH_ZLEN))
skb              1798 drivers/net/ethernet/via/via-rhine.c 	rp->tx_skbuff[entry] = skb;
skb              1801 drivers/net/ethernet/via/via-rhine.c 	    (((unsigned long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_PARTIAL)) {
skb              1803 drivers/net/ethernet/via/via-rhine.c 		if (skb->len > PKT_BUF_SZ) {
skb              1805 drivers/net/ethernet/via/via-rhine.c 			dev_kfree_skb_any(skb);
skb              1812 drivers/net/ethernet/via/via-rhine.c 		skb_copy_and_csum_dev(skb, rp->tx_buf[entry]);
skb              1813 drivers/net/ethernet/via/via-rhine.c 		if (skb->len < ETH_ZLEN)
skb              1814 drivers/net/ethernet/via/via-rhine.c 			memset(rp->tx_buf[entry] + skb->len, 0,
skb              1815 drivers/net/ethernet/via/via-rhine.c 			       ETH_ZLEN - skb->len);
skb              1822 drivers/net/ethernet/via/via-rhine.c 			dma_map_single(hwdev, skb->data, skb->len,
skb              1825 drivers/net/ethernet/via/via-rhine.c 			dev_kfree_skb_any(skb);
skb              1834 drivers/net/ethernet/via/via-rhine.c 		cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
skb              1836 drivers/net/ethernet/via/via-rhine.c 	if (unlikely(skb_vlan_tag_present(skb))) {
skb              1837 drivers/net/ethernet/via/via-rhine.c 		u16 vid_pcp = skb_vlan_tag_get(skb);
skb              1849 drivers/net/ethernet/via/via-rhine.c 	netdev_sent_queue(dev, skb->len);
skb              1865 drivers/net/ethernet/via/via-rhine.c 	if (skb_vlan_tag_present(skb))
skb              1931 drivers/net/ethernet/via/via-rhine.c 	struct sk_buff *skb;
skb              1950 drivers/net/ethernet/via/via-rhine.c 		skb = rp->tx_skbuff[entry];
skb              1979 drivers/net/ethernet/via/via-rhine.c 			rp->tx_stats.bytes += skb->len;
skb              1987 drivers/net/ethernet/via/via-rhine.c 					 skb->len,
skb              1990 drivers/net/ethernet/via/via-rhine.c 		bytes_compl += skb->len;
skb              1992 drivers/net/ethernet/via/via-rhine.c 		dev_consume_skb_any(skb);
skb              2022 drivers/net/ethernet/via/via-rhine.c static inline u16 rhine_get_vlan_tci(struct sk_buff *skb, int data_size)
skb              2024 drivers/net/ethernet/via/via-rhine.c 	u8 *trailer = (u8 *)skb->data + ((data_size + 3) & ~3) + 2;
skb              2028 drivers/net/ethernet/via/via-rhine.c static inline void rhine_rx_vlan_tag(struct sk_buff *skb, struct rx_desc *desc,
skb              2035 drivers/net/ethernet/via/via-rhine.c 		vlan_tci = rhine_get_vlan_tci(skb, data_size);
skb              2036 drivers/net/ethernet/via/via-rhine.c 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
skb              2093 drivers/net/ethernet/via/via-rhine.c 			struct sk_buff *skb;
skb              2098 drivers/net/ethernet/via/via-rhine.c 				skb = netdev_alloc_skb_ip_align(dev, pkt_len);
skb              2099 drivers/net/ethernet/via/via-rhine.c 				if (unlikely(!skb))
skb              2107 drivers/net/ethernet/via/via-rhine.c 				skb_copy_to_linear_data(skb,
skb              2121 drivers/net/ethernet/via/via-rhine.c 				skb = rp->rx_skbuff[entry];
skb              2130 drivers/net/ethernet/via/via-rhine.c 			skb_put(skb, pkt_len);
skb              2132 drivers/net/ethernet/via/via-rhine.c 			rhine_rx_vlan_tag(skb, desc, data_size);
skb              2134 drivers/net/ethernet/via/via-rhine.c 			skb->protocol = eth_type_trans(skb, dev);
skb              2136 drivers/net/ethernet/via/via-rhine.c 			netif_receive_skb(skb);
skb              1519 drivers/net/ethernet/via/via-velocity.c 	rd_info->skb = netdev_alloc_skb(vptr->netdev, vptr->rx.buf_sz + 64);
skb              1520 drivers/net/ethernet/via/via-velocity.c 	if (rd_info->skb == NULL)
skb              1527 drivers/net/ethernet/via/via-velocity.c 	skb_reserve(rd_info->skb,
skb              1528 drivers/net/ethernet/via/via-velocity.c 			64 - ((unsigned long) rd_info->skb->data & 63));
skb              1529 drivers/net/ethernet/via/via-velocity.c 	rd_info->skb_dma = dma_map_single(vptr->dev, rd_info->skb->data,
skb              1555 drivers/net/ethernet/via/via-velocity.c 		if (!vptr->rx.info[dirty].skb) {
skb              1591 drivers/net/ethernet/via/via-velocity.c 		if (!rd_info->skb)
skb              1597 drivers/net/ethernet/via/via-velocity.c 		dev_kfree_skb(rd_info->skb);
skb              1598 drivers/net/ethernet/via/via-velocity.c 		rd_info->skb = NULL;
skb              1716 drivers/net/ethernet/via/via-velocity.c 	struct sk_buff *skb = tdinfo->skb;
skb              1723 drivers/net/ethernet/via/via-velocity.c 		size_t pktlen = max_t(size_t, skb->len, ETH_ZLEN);
skb              1726 drivers/net/ethernet/via/via-velocity.c 		if (skb_shinfo(skb)->nr_frags > 0)
skb              1733 drivers/net/ethernet/via/via-velocity.c 	dev_consume_skb_irq(skb);
skb              1734 drivers/net/ethernet/via/via-velocity.c 	tdinfo->skb = NULL;
skb              1749 drivers/net/ethernet/via/via-velocity.c 	if (td_info->skb) {
skb              1753 drivers/net/ethernet/via/via-velocity.c 					td_info->skb->len, DMA_TO_DEVICE);
skb              1757 drivers/net/ethernet/via/via-velocity.c 		dev_kfree_skb(td_info->skb);
skb              1758 drivers/net/ethernet/via/via-velocity.c 		td_info->skb = NULL;
skb              1927 drivers/net/ethernet/via/via-velocity.c 				stats->tx_bytes += tdinfo->skb->len;
skb              1956 drivers/net/ethernet/via/via-velocity.c static inline void velocity_rx_csum(struct rx_desc *rd, struct sk_buff *skb)
skb              1958 drivers/net/ethernet/via/via-velocity.c 	skb_checksum_none_assert(skb);
skb              1967 drivers/net/ethernet/via/via-velocity.c 			skb->ip_summed = CHECKSUM_UNNECESSARY;
skb              2013 drivers/net/ethernet/via/via-velocity.c 					struct sk_buff *skb, int pkt_size)
skb              2016 drivers/net/ethernet/via/via-velocity.c 		memmove(skb->data + 2, skb->data, pkt_size);
skb              2017 drivers/net/ethernet/via/via-velocity.c 		skb_reserve(skb, 2);
skb              2035 drivers/net/ethernet/via/via-velocity.c 	struct sk_buff *skb;
skb              2047 drivers/net/ethernet/via/via-velocity.c 	skb = rd_info->skb;
skb              2052 drivers/net/ethernet/via/via-velocity.c 	velocity_rx_csum(rd, skb);
skb              2054 drivers/net/ethernet/via/via-velocity.c 	if (velocity_rx_copy(&skb, pkt_len, vptr) < 0) {
skb              2055 drivers/net/ethernet/via/via-velocity.c 		velocity_iph_realign(vptr, skb, pkt_len);
skb              2056 drivers/net/ethernet/via/via-velocity.c 		rd_info->skb = NULL;
skb              2064 drivers/net/ethernet/via/via-velocity.c 	skb_put(skb, pkt_len - 4);
skb              2065 drivers/net/ethernet/via/via-velocity.c 	skb->protocol = eth_type_trans(skb, vptr->netdev);
skb              2070 drivers/net/ethernet/via/via-velocity.c 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
skb              2072 drivers/net/ethernet/via/via-velocity.c 	netif_receive_skb(skb);
skb              2097 drivers/net/ethernet/via/via-velocity.c 		if (!vptr->rx.info[rd_curr].skb)
skb              2512 drivers/net/ethernet/via/via-velocity.c static netdev_tx_t velocity_xmit(struct sk_buff *skb,
skb              2524 drivers/net/ethernet/via/via-velocity.c 	if (skb_padto(skb, ETH_ZLEN))
skb              2529 drivers/net/ethernet/via/via-velocity.c 	if (skb_shinfo(skb)->nr_frags > 6 && __skb_linearize(skb)) {
skb              2530 drivers/net/ethernet/via/via-velocity.c 		dev_kfree_skb_any(skb);
skb              2534 drivers/net/ethernet/via/via-velocity.c 	pktlen = skb_shinfo(skb)->nr_frags == 0 ?
skb              2535 drivers/net/ethernet/via/via-velocity.c 			max_t(unsigned int, skb->len, ETH_ZLEN) :
skb              2536 drivers/net/ethernet/via/via-velocity.c 				skb_headlen(skb);
skb              2551 drivers/net/ethernet/via/via-velocity.c 	tdinfo->skb = skb;
skb              2552 drivers/net/ethernet/via/via-velocity.c 	tdinfo->skb_dma[0] = dma_map_single(vptr->dev, skb->data, pktlen,
skb              2560 drivers/net/ethernet/via/via-velocity.c 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb              2561 drivers/net/ethernet/via/via-velocity.c 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
skb              2576 drivers/net/ethernet/via/via-velocity.c 	if (skb_vlan_tag_present(skb)) {
skb              2577 drivers/net/ethernet/via/via-velocity.c 		td_ptr->tdesc1.vlan = cpu_to_le16(skb_vlan_tag_get(skb));
skb              2584 drivers/net/ethernet/via/via-velocity.c 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb              2585 drivers/net/ethernet/via/via-velocity.c 		const struct iphdr *ip = ip_hdr(skb);
skb               221 drivers/net/ethernet/via/via-velocity.h 	struct sk_buff *skb;
skb               230 drivers/net/ethernet/via/via-velocity.h 	struct sk_buff *skb;
skb               803 drivers/net/ethernet/wiznet/w5100.c static void w5100_tx_skb(struct net_device *ndev, struct sk_buff *skb)
skb               809 drivers/net/ethernet/wiznet/w5100.c 	w5100_writebuf(priv, offset, skb->data, skb->len);
skb               810 drivers/net/ethernet/wiznet/w5100.c 	w5100_write16(priv, W5100_S0_TX_WR(priv), offset + skb->len);
skb               811 drivers/net/ethernet/wiznet/w5100.c 	ndev->stats.tx_bytes += skb->len;
skb               813 drivers/net/ethernet/wiznet/w5100.c 	dev_kfree_skb(skb);
skb               822 drivers/net/ethernet/wiznet/w5100.c 	struct sk_buff *skb = priv->tx_skb;
skb               826 drivers/net/ethernet/wiznet/w5100.c 	if (WARN_ON(!skb))
skb               828 drivers/net/ethernet/wiznet/w5100.c 	w5100_tx_skb(priv->ndev, skb);
skb               831 drivers/net/ethernet/wiznet/w5100.c static netdev_tx_t w5100_start_tx(struct sk_buff *skb, struct net_device *ndev)
skb               839 drivers/net/ethernet/wiznet/w5100.c 		priv->tx_skb = skb;
skb               842 drivers/net/ethernet/wiznet/w5100.c 		w5100_tx_skb(ndev, skb);
skb               851 drivers/net/ethernet/wiznet/w5100.c 	struct sk_buff *skb;
skb               864 drivers/net/ethernet/wiznet/w5100.c 	skb = netdev_alloc_skb_ip_align(ndev, rx_len);
skb               865 drivers/net/ethernet/wiznet/w5100.c 	if (unlikely(!skb)) {
skb               872 drivers/net/ethernet/wiznet/w5100.c 	skb_put(skb, rx_len);
skb               873 drivers/net/ethernet/wiznet/w5100.c 	w5100_readbuf(priv, offset + 2, skb->data, rx_len);
skb               876 drivers/net/ethernet/wiznet/w5100.c 	skb->protocol = eth_type_trans(skb, ndev);
skb               881 drivers/net/ethernet/wiznet/w5100.c 	return skb;
skb               888 drivers/net/ethernet/wiznet/w5100.c 	struct sk_buff *skb;
skb               890 drivers/net/ethernet/wiznet/w5100.c 	while ((skb = w5100_rx_skb(priv->ndev)))
skb               891 drivers/net/ethernet/wiznet/w5100.c 		netif_rx_ni(skb);
skb               902 drivers/net/ethernet/wiznet/w5100.c 		struct sk_buff *skb = w5100_rx_skb(priv->ndev);
skb               904 drivers/net/ethernet/wiznet/w5100.c 		if (skb)
skb               905 drivers/net/ethernet/wiznet/w5100.c 			netif_receive_skb(skb);
skb               356 drivers/net/ethernet/wiznet/w5300.c static netdev_tx_t w5300_start_tx(struct sk_buff *skb, struct net_device *ndev)
skb               362 drivers/net/ethernet/wiznet/w5300.c 	w5300_write_frame(priv, skb->data, skb->len);
skb               364 drivers/net/ethernet/wiznet/w5300.c 	ndev->stats.tx_bytes += skb->len;
skb               365 drivers/net/ethernet/wiznet/w5300.c 	dev_kfree_skb(skb);
skb               377 drivers/net/ethernet/wiznet/w5300.c 	struct sk_buff *skb;
skb               388 drivers/net/ethernet/wiznet/w5300.c 		skb = netdev_alloc_skb_ip_align(ndev, roundup(rx_len, 2));
skb               389 drivers/net/ethernet/wiznet/w5300.c 		if (unlikely(!skb)) {
skb               397 drivers/net/ethernet/wiznet/w5300.c 		skb_put(skb, rx_len);
skb               398 drivers/net/ethernet/wiznet/w5300.c 		w5300_read_frame(priv, skb->data, rx_len);
skb               399 drivers/net/ethernet/wiznet/w5300.c 		skb->protocol = eth_type_trans(skb, ndev);
skb               401 drivers/net/ethernet/wiznet/w5300.c 		netif_receive_skb(skb);
skb               329 drivers/net/ethernet/xilinx/ll_temac_main.c 	struct sk_buff *skb;
skb               361 drivers/net/ethernet/xilinx/ll_temac_main.c 		skb = netdev_alloc_skb_ip_align(ndev,
skb               363 drivers/net/ethernet/xilinx/ll_temac_main.c 		if (!skb)
skb               366 drivers/net/ethernet/xilinx/ll_temac_main.c 		lp->rx_skb[i] = skb;
skb               368 drivers/net/ethernet/xilinx/ll_temac_main.c 		skb_dma_addr = dma_map_single(ndev->dev.parent, skb->data,
skb               767 drivers/net/ethernet/xilinx/ll_temac_main.c 	struct sk_buff *skb;
skb               775 drivers/net/ethernet/xilinx/ll_temac_main.c 		skb = (struct sk_buff *)ptr_from_txbd(cur_p);
skb               776 drivers/net/ethernet/xilinx/ll_temac_main.c 		if (skb)
skb               777 drivers/net/ethernet/xilinx/ll_temac_main.c 			dev_consume_skb_irq(skb);
skb               825 drivers/net/ethernet/xilinx/ll_temac_main.c temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
skb               834 drivers/net/ethernet/xilinx/ll_temac_main.c 	num_frag = skb_shinfo(skb)->nr_frags;
skb               835 drivers/net/ethernet/xilinx/ll_temac_main.c 	frag = &skb_shinfo(skb)->frags[0];
skb               856 drivers/net/ethernet/xilinx/ll_temac_main.c 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb               857 drivers/net/ethernet/xilinx/ll_temac_main.c 		unsigned int csum_start_off = skb_checksum_start_offset(skb);
skb               858 drivers/net/ethernet/xilinx/ll_temac_main.c 		unsigned int csum_index_off = csum_start_off + skb->csum_offset;
skb               867 drivers/net/ethernet/xilinx/ll_temac_main.c 	skb_dma_addr = dma_map_single(ndev->dev.parent, skb->data,
skb               868 drivers/net/ethernet/xilinx/ll_temac_main.c 				      skb_headlen(skb), DMA_TO_DEVICE);
skb               869 drivers/net/ethernet/xilinx/ll_temac_main.c 	cur_p->len = cpu_to_be32(skb_headlen(skb));
skb               871 drivers/net/ethernet/xilinx/ll_temac_main.c 		dev_kfree_skb_any(skb);
skb               876 drivers/net/ethernet/xilinx/ll_temac_main.c 	ptr_to_txbd((void *)skb, cur_p);
skb               903 drivers/net/ethernet/xilinx/ll_temac_main.c 					 skb_headlen(skb), DMA_TO_DEVICE);
skb               904 drivers/net/ethernet/xilinx/ll_temac_main.c 			dev_kfree_skb_any(skb);
skb               920 drivers/net/ethernet/xilinx/ll_temac_main.c 	skb_tx_timestamp(skb);
skb               957 drivers/net/ethernet/xilinx/ll_temac_main.c 		struct sk_buff *skb = lp->rx_skb[lp->rx_bd_ci];
skb               965 drivers/net/ethernet/xilinx/ll_temac_main.c 		if (!skb)
skb               979 drivers/net/ethernet/xilinx/ll_temac_main.c 		skb_put(skb, length);
skb               980 drivers/net/ethernet/xilinx/ll_temac_main.c 		skb->protocol = eth_type_trans(skb, ndev);
skb               981 drivers/net/ethernet/xilinx/ll_temac_main.c 		skb_checksum_none_assert(skb);
skb               985 drivers/net/ethernet/xilinx/ll_temac_main.c 		    (skb->protocol == htons(ETH_P_IP)) &&
skb               986 drivers/net/ethernet/xilinx/ll_temac_main.c 		    (skb->len > 64)) {
skb               993 drivers/net/ethernet/xilinx/ll_temac_main.c 			skb->csum = htons(be32_to_cpu(bd->app3) & 0xFFFF);
skb               994 drivers/net/ethernet/xilinx/ll_temac_main.c 			skb->ip_summed = CHECKSUM_COMPLETE;
skb               997 drivers/net/ethernet/xilinx/ll_temac_main.c 		if (!skb_defer_rx_timestamp(skb))
skb               998 drivers/net/ethernet/xilinx/ll_temac_main.c 			netif_rx(skb);
skb              1032 drivers/net/ethernet/xilinx/ll_temac_main.c 		struct sk_buff *skb;
skb              1044 drivers/net/ethernet/xilinx/ll_temac_main.c 		skb = netdev_alloc_skb_ip_align(ndev, XTE_MAX_JUMBO_FRAME_SIZE);
skb              1045 drivers/net/ethernet/xilinx/ll_temac_main.c 		if (!skb) {
skb              1050 drivers/net/ethernet/xilinx/ll_temac_main.c 		skb_dma_addr = dma_map_single(ndev->dev.parent, skb->data,
skb              1055 drivers/net/ethernet/xilinx/ll_temac_main.c 			dev_kfree_skb_any(skb);
skb              1062 drivers/net/ethernet/xilinx/ll_temac_main.c 		lp->rx_skb[rx_bd] = skb;
skb               377 drivers/net/ethernet/xilinx/xilinx_axienet.h 	struct sk_buff *skb;
skb               166 drivers/net/ethernet/xilinx/xilinx_axienet_main.c 		dev_kfree_skb(lp->rx_bd_v[i].skb);
skb               197 drivers/net/ethernet/xilinx/xilinx_axienet_main.c 	struct sk_buff *skb;
skb               229 drivers/net/ethernet/xilinx/xilinx_axienet_main.c 		skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
skb               230 drivers/net/ethernet/xilinx/xilinx_axienet_main.c 		if (!skb)
skb               233 drivers/net/ethernet/xilinx/xilinx_axienet_main.c 		lp->rx_bd_v[i].skb = skb;
skb               235 drivers/net/ethernet/xilinx/xilinx_axienet_main.c 						     skb->data,
skb               548 drivers/net/ethernet/xilinx/xilinx_axienet_main.c 		if (cur_p->skb)
skb               549 drivers/net/ethernet/xilinx/xilinx_axienet_main.c 			dev_consume_skb_irq(cur_p->skb);
skb               556 drivers/net/ethernet/xilinx/xilinx_axienet_main.c 		cur_p->skb = NULL;
skb               613 drivers/net/ethernet/xilinx/xilinx_axienet_main.c axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
skb               624 drivers/net/ethernet/xilinx/xilinx_axienet_main.c 	num_frag = skb_shinfo(skb)->nr_frags;
skb               643 drivers/net/ethernet/xilinx/xilinx_axienet_main.c 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb               648 drivers/net/ethernet/xilinx/xilinx_axienet_main.c 			csum_start_off = skb_transport_offset(skb);
skb               649 drivers/net/ethernet/xilinx/xilinx_axienet_main.c 			csum_index_off = csum_start_off + skb->csum_offset;
skb               654 drivers/net/ethernet/xilinx/xilinx_axienet_main.c 	} else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
skb               658 drivers/net/ethernet/xilinx/xilinx_axienet_main.c 	cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
skb               659 drivers/net/ethernet/xilinx/xilinx_axienet_main.c 	cur_p->phys = dma_map_single(ndev->dev.parent, skb->data,
skb               660 drivers/net/ethernet/xilinx/xilinx_axienet_main.c 				     skb_headlen(skb), DMA_TO_DEVICE);
skb               666 drivers/net/ethernet/xilinx/xilinx_axienet_main.c 		frag = &skb_shinfo(skb)->frags[ii];
skb               675 drivers/net/ethernet/xilinx/xilinx_axienet_main.c 	cur_p->skb = skb;
skb               703 drivers/net/ethernet/xilinx/xilinx_axienet_main.c 	struct sk_buff *skb, *new_skb;
skb               715 drivers/net/ethernet/xilinx/xilinx_axienet_main.c 		skb = cur_p->skb;
skb               716 drivers/net/ethernet/xilinx/xilinx_axienet_main.c 		cur_p->skb = NULL;
skb               719 drivers/net/ethernet/xilinx/xilinx_axienet_main.c 		skb_put(skb, length);
skb               720 drivers/net/ethernet/xilinx/xilinx_axienet_main.c 		skb->protocol = eth_type_trans(skb, ndev);
skb               722 drivers/net/ethernet/xilinx/xilinx_axienet_main.c 		skb->ip_summed = CHECKSUM_NONE;
skb               730 drivers/net/ethernet/xilinx/xilinx_axienet_main.c 				skb->ip_summed = CHECKSUM_UNNECESSARY;
skb               733 drivers/net/ethernet/xilinx/xilinx_axienet_main.c 			   skb->protocol == htons(ETH_P_IP) &&
skb               734 drivers/net/ethernet/xilinx/xilinx_axienet_main.c 			   skb->len > 64) {
skb               735 drivers/net/ethernet/xilinx/xilinx_axienet_main.c 			skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF);
skb               736 drivers/net/ethernet/xilinx/xilinx_axienet_main.c 			skb->ip_summed = CHECKSUM_COMPLETE;
skb               739 drivers/net/ethernet/xilinx/xilinx_axienet_main.c 		netif_rx(skb);
skb               753 drivers/net/ethernet/xilinx/xilinx_axienet_main.c 		cur_p->skb = new_skb;
skb              1543 drivers/net/ethernet/xilinx/xilinx_axienet_main.c 		if (cur_p->skb)
skb              1544 drivers/net/ethernet/xilinx/xilinx_axienet_main.c 			dev_kfree_skb_irq(cur_p->skb);
skb              1553 drivers/net/ethernet/xilinx/xilinx_axienet_main.c 		cur_p->skb = NULL;
skb               598 drivers/net/ethernet/xilinx/xilinx_emaclite.c 	struct sk_buff *skb;
skb               603 drivers/net/ethernet/xilinx/xilinx_emaclite.c 	skb = netdev_alloc_skb(dev, len + ALIGNMENT);
skb               604 drivers/net/ethernet/xilinx/xilinx_emaclite.c 	if (!skb) {
skb               616 drivers/net/ethernet/xilinx/xilinx_emaclite.c 	align = BUFFER_ALIGN(skb->data);
skb               618 drivers/net/ethernet/xilinx/xilinx_emaclite.c 		skb_reserve(skb, align);
skb               620 drivers/net/ethernet/xilinx/xilinx_emaclite.c 	skb_reserve(skb, 2);
skb               622 drivers/net/ethernet/xilinx/xilinx_emaclite.c 	len = xemaclite_recv_data(lp, (u8 *)skb->data, len);
skb               626 drivers/net/ethernet/xilinx/xilinx_emaclite.c 		dev_kfree_skb_irq(skb);
skb               630 drivers/net/ethernet/xilinx/xilinx_emaclite.c 	skb_put(skb, len);	/* Tell the skb how much data we got */
skb               632 drivers/net/ethernet/xilinx/xilinx_emaclite.c 	skb->protocol = eth_type_trans(skb, dev);
skb               633 drivers/net/ethernet/xilinx/xilinx_emaclite.c 	skb_checksum_none_assert(skb);
skb               638 drivers/net/ethernet/xilinx/xilinx_emaclite.c 	if (!skb_defer_rx_timestamp(skb))
skb               639 drivers/net/ethernet/xilinx/xilinx_emaclite.c 		netif_rx(skb);	/* Send the packet upstream */
skb               289 drivers/net/ethernet/xircom/xirc2ps_cs.c static netdev_tx_t do_start_xmit(struct sk_buff *skb,
skb              1032 drivers/net/ethernet/xircom/xirc2ps_cs.c 	    struct sk_buff *skb;
skb              1040 drivers/net/ethernet/xircom/xirc2ps_cs.c 	    skb = netdev_alloc_skb(dev, pktlen + 3);
skb              1041 drivers/net/ethernet/xircom/xirc2ps_cs.c 	    if (!skb) {
skb              1044 drivers/net/ethernet/xircom/xirc2ps_cs.c 		skb_reserve(skb, 2);
skb              1056 drivers/net/ethernet/xircom/xirc2ps_cs.c 			u_char *buf = skb_put(skb, pktlen);
skb              1066 drivers/net/ethernet/xircom/xirc2ps_cs.c 				skb_put(skb, pktlen), (pktlen+1)>>1);
skb              1080 drivers/net/ethernet/xircom/xirc2ps_cs.c 		    u_long *p = skb_put(skb, pktlen);
skb              1093 drivers/net/ethernet/xircom/xirc2ps_cs.c 		    insw(ioaddr+XIRCREG_EDP, skb_put(skb, pktlen),
skb              1096 drivers/net/ethernet/xircom/xirc2ps_cs.c 		skb->protocol = eth_type_trans(skb, dev);
skb              1097 drivers/net/ethernet/xircom/xirc2ps_cs.c 		netif_rx(skb);
skb              1215 drivers/net/ethernet/xircom/xirc2ps_cs.c do_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb              1221 drivers/net/ethernet/xircom/xirc2ps_cs.c     unsigned pktlen = skb->len;
skb              1224 drivers/net/ethernet/xircom/xirc2ps_cs.c 	  skb, dev, pktlen);
skb              1236 drivers/net/ethernet/xircom/xirc2ps_cs.c         if (skb_padto(skb, ETH_ZLEN))
skb              1256 drivers/net/ethernet/xircom/xirc2ps_cs.c     outsw(ioaddr+XIRCREG_EDP, skb->data, pktlen>>1);
skb              1258 drivers/net/ethernet/xircom/xirc2ps_cs.c 	PutByte(XIRCREG_EDP, skb->data[pktlen-1]);
skb              1263 drivers/net/ethernet/xircom/xirc2ps_cs.c     dev_kfree_skb (skb);
skb               255 drivers/net/ethernet/xscale/ixp4xx_eth.c static int ixp_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid)
skb               257 drivers/net/ethernet/xscale/ixp4xx_eth.c 	u8 *data = skb->data;
skb               262 drivers/net/ethernet/xscale/ixp4xx_eth.c 	if (ptp_classify_raw(skb) != PTP_CLASS_V1_IPV4)
skb               267 drivers/net/ethernet/xscale/ixp4xx_eth.c 	if (skb->len < offset + OFF_PTP_SEQUENCE_ID + sizeof(seqid))
skb               280 drivers/net/ethernet/xscale/ixp4xx_eth.c static void ixp_rx_timestamp(struct port *port, struct sk_buff *skb)
skb               306 drivers/net/ethernet/xscale/ixp4xx_eth.c 	if (!ixp_ptp_match(skb, htons(uid), htonl(lo), htons(seq)))
skb               315 drivers/net/ethernet/xscale/ixp4xx_eth.c 	shhwtstamps = skb_hwtstamps(skb);
skb               322 drivers/net/ethernet/xscale/ixp4xx_eth.c static void ixp_tx_timestamp(struct port *port, struct sk_buff *skb)
skb               330 drivers/net/ethernet/xscale/ixp4xx_eth.c 	shtx = skb_shinfo(skb);
skb               363 drivers/net/ethernet/xscale/ixp4xx_eth.c 	skb_tstamp_tx(skb, &shhwtstamps);
skb               679 drivers/net/ethernet/xscale/ixp4xx_eth.c 		struct sk_buff *skb;
skb               713 drivers/net/ethernet/xscale/ixp4xx_eth.c 		if ((skb = netdev_alloc_skb(dev, RX_BUFF_SIZE))) {
skb               714 drivers/net/ethernet/xscale/ixp4xx_eth.c 			phys = dma_map_single(&dev->dev, skb->data,
skb               717 drivers/net/ethernet/xscale/ixp4xx_eth.c 				dev_kfree_skb(skb);
skb               718 drivers/net/ethernet/xscale/ixp4xx_eth.c 				skb = NULL;
skb               722 drivers/net/ethernet/xscale/ixp4xx_eth.c 		skb = netdev_alloc_skb(dev,
skb               726 drivers/net/ethernet/xscale/ixp4xx_eth.c 		if (!skb) {
skb               737 drivers/net/ethernet/xscale/ixp4xx_eth.c 		temp = skb;
skb               738 drivers/net/ethernet/xscale/ixp4xx_eth.c 		skb = port->rx_buff_tab[n];
skb               744 drivers/net/ethernet/xscale/ixp4xx_eth.c 		memcpy_swab32((u32 *)skb->data, (u32 *)port->rx_buff_tab[n],
skb               747 drivers/net/ethernet/xscale/ixp4xx_eth.c 		skb_reserve(skb, NET_IP_ALIGN);
skb               748 drivers/net/ethernet/xscale/ixp4xx_eth.c 		skb_put(skb, desc->pkt_len);
skb               750 drivers/net/ethernet/xscale/ixp4xx_eth.c 		debug_pkt(dev, "eth_poll", skb->data, skb->len);
skb               752 drivers/net/ethernet/xscale/ixp4xx_eth.c 		ixp_rx_timestamp(port, skb);
skb               753 drivers/net/ethernet/xscale/ixp4xx_eth.c 		skb->protocol = eth_type_trans(skb, dev);
skb               755 drivers/net/ethernet/xscale/ixp4xx_eth.c 		dev->stats.rx_bytes += skb->len;
skb               756 drivers/net/ethernet/xscale/ixp4xx_eth.c 		netif_receive_skb(skb);
skb               824 drivers/net/ethernet/xscale/ixp4xx_eth.c static int eth_xmit(struct sk_buff *skb, struct net_device *dev)
skb               837 drivers/net/ethernet/xscale/ixp4xx_eth.c 	if (unlikely(skb->len > MAX_MRU)) {
skb               838 drivers/net/ethernet/xscale/ixp4xx_eth.c 		dev_kfree_skb(skb);
skb               843 drivers/net/ethernet/xscale/ixp4xx_eth.c 	debug_pkt(dev, "eth_xmit", skb->data, skb->len);
skb               845 drivers/net/ethernet/xscale/ixp4xx_eth.c 	len = skb->len;
skb               849 drivers/net/ethernet/xscale/ixp4xx_eth.c 	mem = skb->data;
skb               851 drivers/net/ethernet/xscale/ixp4xx_eth.c 	offset = (int)skb->data & 3; /* keep 32-bit alignment */
skb               854 drivers/net/ethernet/xscale/ixp4xx_eth.c 		dev_kfree_skb(skb);
skb               858 drivers/net/ethernet/xscale/ixp4xx_eth.c 	memcpy_swab32(mem, (u32 *)((int)skb->data & ~3), bytes / 4);
skb               863 drivers/net/ethernet/xscale/ixp4xx_eth.c 		dev_kfree_skb(skb);
skb               876 drivers/net/ethernet/xscale/ixp4xx_eth.c 	port->tx_buff_tab[n] = skb;
skb               907 drivers/net/ethernet/xscale/ixp4xx_eth.c 	ixp_tx_timestamp(port, skb);
skb               908 drivers/net/ethernet/xscale/ixp4xx_eth.c 	skb_tx_timestamp(skb);
skb               911 drivers/net/ethernet/xscale/ixp4xx_eth.c 	dev_kfree_skb(skb);
skb               301 drivers/net/fddi/defxx.c static netdev_tx_t dfx_xmt_queue_pkt(struct sk_buff *skb,
skb              2930 drivers/net/fddi/defxx.c static void my_skb_align(struct sk_buff *skb, int n)
skb              2932 drivers/net/fddi/defxx.c 	unsigned long x = (unsigned long)skb->data;
skb              2937 drivers/net/fddi/defxx.c 	skb_reserve(skb, v - x);
skb              3098 drivers/net/fddi/defxx.c 	struct sk_buff		*skb = NULL;			/* pointer to a sk_buff to hold incoming packet data */
skb              3165 drivers/net/fddi/defxx.c 						skb = (struct sk_buff *)bp->p_rcv_buff_va[entry];
skb              3170 drivers/net/fddi/defxx.c 						skb_reserve(skb, RCV_BUFF_K_PADDING);
skb              3179 drivers/net/fddi/defxx.c 					skb = netdev_alloc_skb(bp->dev,
skb              3181 drivers/net/fddi/defxx.c 				if (skb == NULL)
skb              3197 drivers/net/fddi/defxx.c 						skb_copy_to_linear_data(skb,
skb              3202 drivers/net/fddi/defxx.c 					skb_reserve(skb,3);		/* adjust data field so that it points to FC byte */
skb              3203 drivers/net/fddi/defxx.c 					skb_put(skb, pkt_len);		/* pass up packet length, NOT including CRC */
skb              3204 drivers/net/fddi/defxx.c 					skb->protocol = fddi_type_trans(skb, bp->dev);
skb              3205 drivers/net/fddi/defxx.c 					bp->rcv_total_bytes += skb->len;
skb              3206 drivers/net/fddi/defxx.c 					netif_rx(skb);
skb              3291 drivers/net/fddi/defxx.c static netdev_tx_t dfx_xmt_queue_pkt(struct sk_buff *skb,
skb              3312 drivers/net/fddi/defxx.c 	if (!IN_RANGE(skb->len, FDDI_K_LLC_ZLEN, FDDI_K_LLC_LEN))
skb              3315 drivers/net/fddi/defxx.c 			dev->name, skb->len);
skb              3318 drivers/net/fddi/defxx.c 		dev_kfree_skb(skb);
skb              3340 drivers/net/fddi/defxx.c 			dev_kfree_skb(skb);		/* free sk_buff now */
skb              3348 drivers/net/fddi/defxx.c 	skb_push(skb, 3);
skb              3349 drivers/net/fddi/defxx.c 	skb->data[0] = DFX_PRH0_BYTE;	/* these byte values are defined */
skb              3350 drivers/net/fddi/defxx.c 	skb->data[1] = DFX_PRH1_BYTE;	/* in the Motorola FDDI MAC chip */
skb              3351 drivers/net/fddi/defxx.c 	skb->data[2] = DFX_PRH2_BYTE;	/* specification */
skb              3353 drivers/net/fddi/defxx.c 	dma_addr = dma_map_single(bp->bus_dev, skb->data, skb->len,
skb              3356 drivers/net/fddi/defxx.c 		skb_pull(skb, 3);
skb              3407 drivers/net/fddi/defxx.c 	p_xmt_descr->long_0	= (u32) (PI_XMT_DESCR_M_SOP | PI_XMT_DESCR_M_EOP | ((skb->len) << PI_XMT_DESCR_V_SEG_LEN));
skb              3423 drivers/net/fddi/defxx.c 		skb_pull(skb,3);
skb              3444 drivers/net/fddi/defxx.c 	p_xmt_drv_descr->p_skb = skb;
skb              3569 drivers/net/fddi/defxx.c 			struct sk_buff *skb;
skb              3570 drivers/net/fddi/defxx.c 			skb = (struct sk_buff *)bp->p_rcv_buff_va[i+j];
skb              3571 drivers/net/fddi/defxx.c 			if (skb) {
skb              3576 drivers/net/fddi/defxx.c 				dev_kfree_skb(skb);
skb               107 drivers/net/fddi/defza.c static inline void fza_skb_align(struct sk_buff *skb, unsigned int v)
skb               111 drivers/net/fddi/defza.c 	x = (unsigned long)skb->data;
skb               114 drivers/net/fddi/defza.c 	skb_reserve(skb, y - x);
skb               678 drivers/net/fddi/defza.c 	struct sk_buff *skb, *newskb;
skb               692 drivers/net/fddi/defza.c 		skb = fp->rx_skbuff[i];
skb               700 drivers/net/fddi/defza.c 		frame = (struct fza_fddihdr *)skb->data;
skb               741 drivers/net/fddi/defza.c 						    skb->data, len, rmc,
skb               750 drivers/net/fddi/defza.c 			skb_reserve(skb, 3);	/* Skip over P and SD. */
skb               751 drivers/net/fddi/defza.c 			skb_put(skb, pkt_len);	/* And cut off FCS. */
skb               752 drivers/net/fddi/defza.c 			skb->protocol = fddi_type_trans(skb, dev);
skb               754 drivers/net/fddi/defza.c 			rx_stat = netif_rx(skb);
skb               764 drivers/net/fddi/defza.c 			skb = newskb;
skb               766 drivers/net/fddi/defza.c 			fp->rx_skbuff[i] = skb;
skb               804 drivers/net/fddi/defza.c 				struct sk_buff *skb;
skb               809 drivers/net/fddi/defza.c 				skb = fza_alloc_skb_irq(dev, (len + 3) & ~3);
skb               810 drivers/net/fddi/defza.c 				if (!skb)
skb               814 drivers/net/fddi/defza.c 					       skb->data;
skb               818 drivers/net/fddi/defza.c 				skb->dev = dev;
skb               819 drivers/net/fddi/defza.c 				skb_reserve(skb, 3);	/* Skip over PRH. */
skb               820 drivers/net/fddi/defza.c 				skb_put(skb, len - 3);
skb               821 drivers/net/fddi/defza.c 				skb_reset_network_header(skb);
skb               823 drivers/net/fddi/defza.c 				dev_queue_xmit_nit(skb, dev);
skb               825 drivers/net/fddi/defza.c 				dev_kfree_skb_irq(skb);
skb              1078 drivers/net/fddi/defza.c static netdev_tx_t fza_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb              1085 drivers/net/fddi/defza.c 	skb_push(skb, 3);			/* Make room for PRH. */
skb              1088 drivers/net/fddi/defza.c 	fc = skb->data[3];
skb              1089 drivers/net/fddi/defza.c 	skb->data[0] = 0;
skb              1090 drivers/net/fddi/defza.c 	skb->data[1] = 0;
skb              1091 drivers/net/fddi/defza.c 	skb->data[2] = FZA_PRH2_NORMAL;
skb              1093 drivers/net/fddi/defza.c 		skb->data[0] |= FZA_PRH0_FRAME_SYNC;
skb              1098 drivers/net/fddi/defza.c 			skb->data[0] |= FZA_PRH0_TKN_TYPE_IMM;
skb              1099 drivers/net/fddi/defza.c 			skb->data[1] |= FZA_PRH1_TKN_SEND_NONE;
skb              1102 drivers/net/fddi/defza.c 			skb->data[0] |= FZA_PRH0_TKN_TYPE_UNR;
skb              1103 drivers/net/fddi/defza.c 			skb->data[1] |= FZA_PRH1_TKN_SEND_UNR;
skb              1105 drivers/net/fddi/defza.c 		skb->data[1] |= FZA_PRH1_CRC_NORMAL;
skb              1109 drivers/net/fddi/defza.c 		skb->data[0] |= FZA_PRH0_TKN_TYPE_UNR;
skb              1110 drivers/net/fddi/defza.c 		skb->data[1] |= FZA_PRH1_CRC_NORMAL | FZA_PRH1_TKN_SEND_UNR;
skb              1113 drivers/net/fddi/defza.c 		skb->data[0] |= FZA_PRH0_TKN_TYPE_UNR;
skb              1114 drivers/net/fddi/defza.c 		skb->data[1] |= FZA_PRH1_TKN_SEND_ORIG;
skb              1128 drivers/net/fddi/defza.c 			  { .data_ptr = (struct fza_buffer_tx *)skb->data },
skb              1129 drivers/net/fddi/defza.c 			  skb->len, dev, 0);
skb              1142 drivers/net/fddi/defza.c 	dev_kfree_skb(skb);
skb              1151 drivers/net/fddi/defza.c 	struct sk_buff *skb;
skb              1160 drivers/net/fddi/defza.c 		skb = fza_alloc_skb(dev, FZA_RX_BUFFER_SIZE + 511);
skb              1161 drivers/net/fddi/defza.c 		if (skb) {
skb              1162 drivers/net/fddi/defza.c 			fza_skb_align(skb, 512);
skb              1163 drivers/net/fddi/defza.c 			dma = dma_map_single(fp->bdev, skb->data,
skb              1167 drivers/net/fddi/defza.c 				dev_kfree_skb(skb);
skb              1168 drivers/net/fddi/defza.c 				skb = NULL;
skb              1171 drivers/net/fddi/defza.c 		if (!skb) {
skb              1182 drivers/net/fddi/defza.c 		fp->rx_skbuff[i] = skb;
skb               105 drivers/net/fddi/skfp/h/osdef1st.h 	struct sk_buff *skb;
skb               110 drivers/net/fddi/skfp/h/osdef1st.h 	struct sk_buff *skb;
skb               107 drivers/net/fddi/skfp/skfddi.c static netdev_tx_t skfp_send_pkt(struct sk_buff *skb,
skb              1037 drivers/net/fddi/skfp/skfddi.c static netdev_tx_t skfp_send_pkt(struct sk_buff *skb,
skb              1054 drivers/net/fddi/skfp/skfddi.c 	if (!(skb->len >= FDDI_K_LLC_ZLEN && skb->len <= FDDI_K_LLC_LEN)) {
skb              1058 drivers/net/fddi/skfp/skfddi.c 		dev_kfree_skb(skb);
skb              1067 drivers/net/fddi/skfp/skfddi.c 	skb_queue_tail(&bp->SendSkbQueue, skb);
skb              1103 drivers/net/fddi/skfp/skfddi.c 	struct sk_buff *skb;
skb              1115 drivers/net/fddi/skfp/skfddi.c 		skb = skb_dequeue(&bp->SendSkbQueue);
skb              1117 drivers/net/fddi/skfp/skfddi.c 		if (!skb) {
skb              1123 drivers/net/fddi/skfp/skfddi.c 		fc = skb->data[0];
skb              1142 drivers/net/fddi/skfp/skfddi.c 		frame_status = hwm_tx_init(smc, fc, 1, skb->len, queue);
skb              1159 drivers/net/fddi/skfp/skfddi.c 			skb_queue_head(&bp->SendSkbQueue, skb);
skb              1168 drivers/net/fddi/skfp/skfddi.c 		CheckSourceAddress(skb->data, smc->hw.fddi_canon_addr.a);
skb              1172 drivers/net/fddi/skfp/skfddi.c 		dma_address = pci_map_single(&bp->pdev, skb->data,
skb              1173 drivers/net/fddi/skfp/skfddi.c 					     skb->len, PCI_DMA_TODEVICE);
skb              1175 drivers/net/fddi/skfp/skfddi.c 			txd->txd_os.skb = skb;			// save skb
skb              1178 drivers/net/fddi/skfp/skfddi.c 		hwm_tx_frag(smc, skb->data, dma_address, skb->len,
skb              1183 drivers/net/fddi/skfp/skfddi.c 					 skb->len, PCI_DMA_TODEVICE);
skb              1184 drivers/net/fddi/skfp/skfddi.c 			dev_kfree_skb_irq(skb);
skb              1462 drivers/net/fddi/skfp/skfddi.c 		if (r->rxd_os.skb && r->rxd_os.dma_addr) {
skb              1489 drivers/net/fddi/skfp/skfddi.c 	struct sk_buff *skb;
skb              1494 drivers/net/fddi/skfp/skfddi.c 	if (!(skb = txd->txd_os.skb)) {
skb              1498 drivers/net/fddi/skfp/skfddi.c 	txd->txd_os.skb = NULL;
skb              1502 drivers/net/fddi/skfp/skfddi.c 			 skb->len, PCI_DMA_TODEVICE);
skb              1506 drivers/net/fddi/skfp/skfddi.c 	smc->os.MacStat.gen.tx_bytes+=skb->len;	// Count bytes
skb              1509 drivers/net/fddi/skfp/skfddi.c 	dev_kfree_skb_irq(skb);
skb              1573 drivers/net/fddi/skfp/skfddi.c 	struct sk_buff *skb;
skb              1585 drivers/net/fddi/skfp/skfddi.c 	skb = rxd->rxd_os.skb;
skb              1586 drivers/net/fddi/skfp/skfddi.c 	if (!skb) {
skb              1591 drivers/net/fddi/skfp/skfddi.c 	virt = skb->data;
skb              1595 drivers/net/fddi/skfp/skfddi.c 	dump_data(skb->data, len);
skb              1635 drivers/net/fddi/skfp/skfddi.c 		skb_pull(skb, RifLength);
skb              1652 drivers/net/fddi/skfp/skfddi.c 	rxd->rxd_os.skb = NULL;
skb              1653 drivers/net/fddi/skfp/skfddi.c 	skb_trim(skb, len);
skb              1654 drivers/net/fddi/skfp/skfddi.c 	skb->protocol = fddi_type_trans(skb, bp->dev);
skb              1656 drivers/net/fddi/skfp/skfddi.c 	netif_rx(skb);
skb              1693 drivers/net/fddi/skfp/skfddi.c 	struct sk_buff *skb;
skb              1708 drivers/net/fddi/skfp/skfddi.c 		skb = src_rxd->rxd_os.skb;
skb              1709 drivers/net/fddi/skfp/skfddi.c 		if (skb == NULL) {	// this should not happen
skb              1712 drivers/net/fddi/skfp/skfddi.c 			skb = alloc_skb(MaxFrameSize + 3, GFP_ATOMIC);
skb              1713 drivers/net/fddi/skfp/skfddi.c 			if (skb) {
skb              1715 drivers/net/fddi/skfp/skfddi.c 				rxd->rxd_os.skb = skb;
skb              1716 drivers/net/fddi/skfp/skfddi.c 				skb_reserve(skb, 3);
skb              1717 drivers/net/fddi/skfp/skfddi.c 				skb_put(skb, MaxFrameSize);
skb              1718 drivers/net/fddi/skfp/skfddi.c 				v_addr = skb->data;
skb              1727 drivers/net/fddi/skfp/skfddi.c 				rxd->rxd_os.skb = NULL;
skb              1733 drivers/net/fddi/skfp/skfddi.c 			rxd->rxd_os.skb = skb;
skb              1734 drivers/net/fddi/skfp/skfddi.c 			v_addr = skb->data;
skb              1770 drivers/net/fddi/skfp/skfddi.c 	struct sk_buff *skb;
skb              1784 drivers/net/fddi/skfp/skfddi.c 		skb = alloc_skb(MaxFrameSize + 3, GFP_ATOMIC);
skb              1785 drivers/net/fddi/skfp/skfddi.c 		if (skb) {
skb              1787 drivers/net/fddi/skfp/skfddi.c 			skb_reserve(skb, 3);
skb              1788 drivers/net/fddi/skfp/skfddi.c 			skb_put(skb, MaxFrameSize);
skb              1789 drivers/net/fddi/skfp/skfddi.c 			v_addr = skb->data;
skb              1806 drivers/net/fddi/skfp/skfddi.c 		rxd->rxd_os.skb = skb;
skb              1836 drivers/net/fddi/skfp/skfddi.c 	struct sk_buff *skb;
skb              1845 drivers/net/fddi/skfp/skfddi.c 		skb = rxd->rxd_os.skb;
skb              1846 drivers/net/fddi/skfp/skfddi.c 		if (skb != NULL) {
skb              1853 drivers/net/fddi/skfp/skfddi.c 			dev_kfree_skb(skb);
skb              1854 drivers/net/fddi/skfp/skfddi.c 			rxd->rxd_os.skb = NULL;
skb              1892 drivers/net/fddi/skfp/skfddi.c 	struct sk_buff *skb;
skb              1904 drivers/net/fddi/skfp/skfddi.c 	skb = alloc_skb(len + 3, GFP_ATOMIC);
skb              1905 drivers/net/fddi/skfp/skfddi.c 	if (!skb) {
skb              1909 drivers/net/fddi/skfp/skfddi.c 	skb_reserve(skb, 3);
skb              1910 drivers/net/fddi/skfp/skfddi.c 	skb_put(skb, len);
skb              1911 drivers/net/fddi/skfp/skfddi.c 	skb_copy_to_linear_data(skb, look_ahead, len);
skb              1914 drivers/net/fddi/skfp/skfddi.c 	skb->protocol = fddi_type_trans(skb, smc->os.dev);
skb              1915 drivers/net/fddi/skfp/skfddi.c 	netif_rx(skb);
skb               626 drivers/net/fjes/fjes_main.c fjes_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
skb               647 drivers/net/fjes/fjes_main.c 	eth = (struct ethhdr *)skb->data;
skb               650 drivers/net/fjes/fjes_main.c 	vlan = (vlan_get_tag(skb, &vlan_id) == 0) ? true : false;
skb               652 drivers/net/fjes/fjes_main.c 	data = skb->data;
skb               653 drivers/net/fjes/fjes_main.c 	len = skb->len;
skb               730 drivers/net/fjes/fjes_main.c 				memcpy(shortpkt, skb->data, skb->len);
skb               786 drivers/net/fjes/fjes_main.c 		dev_kfree_skb(skb);
skb              1114 drivers/net/fjes/fjes_main.c 	struct sk_buff *skb;
skb              1138 drivers/net/fjes/fjes_main.c 			skb = napi_alloc_skb(napi, frame_len);
skb              1139 drivers/net/fjes/fjes_main.c 			if (!skb) {
skb              1147 drivers/net/fjes/fjes_main.c 				skb_put_data(skb, frame, frame_len);
skb              1148 drivers/net/fjes/fjes_main.c 				skb->protocol = eth_type_trans(skb, netdev);
skb              1149 drivers/net/fjes/fjes_main.c 				skb->ip_summed = CHECKSUM_UNNECESSARY;
skb              1151 drivers/net/fjes/fjes_main.c 				netif_receive_skb(skb);
skb               162 drivers/net/geneve.c static inline struct genevehdr *geneve_hdr(const struct sk_buff *skb)
skb               164 drivers/net/geneve.c 	return (struct genevehdr *)(udp_hdr(skb) + 1);
skb               168 drivers/net/geneve.c 					    struct sk_buff *skb)
skb               177 drivers/net/geneve.c 		iph = ip_hdr(skb); /* outer IP header... */
skb               183 drivers/net/geneve.c 			vni = geneve_hdr(skb)->vni;
skb               194 drivers/net/geneve.c 		ip6h = ipv6_hdr(skb); /* outer IPv6 header... */
skb               200 drivers/net/geneve.c 			vni = geneve_hdr(skb)->vni;
skb               212 drivers/net/geneve.c 		      struct sk_buff *skb)
skb               214 drivers/net/geneve.c 	struct genevehdr *gnvh = geneve_hdr(skb);
skb               228 drivers/net/geneve.c 		tun_dst = udp_tun_rx_dst(skb, geneve_get_sk_family(gs), flags,
skb               250 drivers/net/geneve.c 	skb_reset_mac_header(skb);
skb               251 drivers/net/geneve.c 	skb->protocol = eth_type_trans(skb, geneve->dev);
skb               252 drivers/net/geneve.c 	skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
skb               255 drivers/net/geneve.c 		skb_dst_set(skb, &tun_dst->dst);
skb               258 drivers/net/geneve.c 	if (ether_addr_equal(eth_hdr(skb)->h_source, geneve->dev->dev_addr)) {
skb               263 drivers/net/geneve.c 	oiph = skb_network_header(skb);
skb               264 drivers/net/geneve.c 	skb_reset_network_header(skb);
skb               267 drivers/net/geneve.c 		err = IP_ECN_decapsulate(oiph, skb);
skb               270 drivers/net/geneve.c 		err = IP6_ECN_decapsulate(oiph, skb);
skb               293 drivers/net/geneve.c 	len = skb->len;
skb               294 drivers/net/geneve.c 	err = gro_cells_receive(&geneve->gro_cells, skb);
skb               305 drivers/net/geneve.c 	kfree_skb(skb);
skb               343 drivers/net/geneve.c static int geneve_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
skb               351 drivers/net/geneve.c 	if (unlikely(!pskb_may_pull(skb, GENEVE_BASE_HLEN)))
skb               355 drivers/net/geneve.c 	geneveh = geneve_hdr(skb);
skb               366 drivers/net/geneve.c 	geneve = geneve_lookup_skb(gs, skb);
skb               371 drivers/net/geneve.c 	if (iptunnel_pull_header(skb, GENEVE_BASE_HLEN + opts_len,
skb               378 drivers/net/geneve.c 	geneve_rx(geneve, gs, skb);
skb               383 drivers/net/geneve.c 	kfree_skb(skb);
skb               388 drivers/net/geneve.c static int geneve_udp_encap_err_lookup(struct sock *sk, struct sk_buff *skb)
skb               395 drivers/net/geneve.c 	if (!pskb_may_pull(skb, skb_transport_offset(skb) + GENEVE_BASE_HLEN))
skb               398 drivers/net/geneve.c 	geneveh = geneve_hdr(skb);
skb               410 drivers/net/geneve.c 		struct iphdr *iph = ip_hdr(skb);
skb               414 drivers/net/geneve.c 			vni = geneve_hdr(skb)->vni;
skb               423 drivers/net/geneve.c 		struct ipv6hdr *ip6h = ipv6_hdr(skb);
skb               429 drivers/net/geneve.c 			vni = geneve_hdr(skb)->vni;
skb               475 drivers/net/geneve.c 					  struct sk_buff *skb)
skb               485 drivers/net/geneve.c 	off_gnv = skb_gro_offset(skb);
skb               487 drivers/net/geneve.c 	gh = skb_gro_header_fast(skb, off_gnv);
skb               488 drivers/net/geneve.c 	if (skb_gro_header_hard(skb, hlen)) {
skb               489 drivers/net/geneve.c 		gh = skb_gro_header_slow(skb, hlen, off_gnv);
skb               499 drivers/net/geneve.c 	if (skb_gro_header_hard(skb, hlen)) {
skb               500 drivers/net/geneve.c 		gh = skb_gro_header_slow(skb, hlen, off_gnv);
skb               524 drivers/net/geneve.c 	skb_gro_pull(skb, gh_len);
skb               525 drivers/net/geneve.c 	skb_gro_postpull_rcsum(skb, gh, gh_len);
skb               526 drivers/net/geneve.c 	pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
skb               532 drivers/net/geneve.c 	skb_gro_flush_final(skb, pp, flush);
skb               537 drivers/net/geneve.c static int geneve_gro_complete(struct sock *sk, struct sk_buff *skb,
skb               546 drivers/net/geneve.c 	gh = (struct genevehdr *)(skb->data + nhoff);
skb               553 drivers/net/geneve.c 		err = ptype->callbacks.gro_complete(skb, nhoff + gh_len);
skb               557 drivers/net/geneve.c 	skb_set_inner_mac_header(skb, nhoff + gh_len);
skb               740 drivers/net/geneve.c static int geneve_build_skb(struct dst_entry *dst, struct sk_buff *skb,
skb               749 drivers/net/geneve.c 	skb_reset_mac_header(skb);
skb               750 drivers/net/geneve.c 	skb_scrub_packet(skb, xnet);
skb               754 drivers/net/geneve.c 	err = skb_cow_head(skb, min_headroom);
skb               758 drivers/net/geneve.c 	err = udp_tunnel_handle_offloads(skb, udp_sum);
skb               762 drivers/net/geneve.c 	gnvh = __skb_push(skb, sizeof(*gnvh) + info->options_len);
skb               764 drivers/net/geneve.c 	skb_set_inner_protocol(skb, htons(ETH_P_TEB));
skb               772 drivers/net/geneve.c static struct rtable *geneve_get_v4_rt(struct sk_buff *skb,
skb               778 drivers/net/geneve.c 	bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
skb               788 drivers/net/geneve.c 	fl4->flowi4_mark = skb->mark;
skb               795 drivers/net/geneve.c 		tos = ip_tunnel_get_dsfield(ip_hdr(skb), skb);
skb               822 drivers/net/geneve.c static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb,
skb               828 drivers/net/geneve.c 	bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
skb               838 drivers/net/geneve.c 	fl6->flowi6_mark = skb->mark;
skb               844 drivers/net/geneve.c 		prio = ip_tunnel_get_dsfield(ip_hdr(skb), skb);
skb               874 drivers/net/geneve.c static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
skb               888 drivers/net/geneve.c 	rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info);
skb               892 drivers/net/geneve.c 	skb_tunnel_check_pmtu(skb, &rt->dst,
skb               895 drivers/net/geneve.c 	sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
skb               897 drivers/net/geneve.c 		tos = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb);
skb               902 drivers/net/geneve.c 		tos = ip_tunnel_ecn_encap(fl4.flowi4_tos, ip_hdr(skb), skb);
skb               904 drivers/net/geneve.c 			ttl = ip_tunnel_get_ttl(ip_hdr(skb), skb);
skb               912 drivers/net/geneve.c 			struct ethhdr *eth = eth_hdr(skb);
skb               917 drivers/net/geneve.c 				struct iphdr *iph = ip_hdr(skb);
skb               925 drivers/net/geneve.c 	err = geneve_build_skb(&rt->dst, skb, info, xnet, sizeof(struct iphdr));
skb               929 drivers/net/geneve.c 	udp_tunnel_xmit_skb(rt, gs4->sock->sk, skb, fl4.saddr, fl4.daddr,
skb               937 drivers/net/geneve.c static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
skb               950 drivers/net/geneve.c 	dst = geneve_get_v6_dst(skb, dev, gs6, &fl6, info);
skb               954 drivers/net/geneve.c 	skb_tunnel_check_pmtu(skb, dst, GENEVE_IPV6_HLEN + info->options_len);
skb               956 drivers/net/geneve.c 	sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
skb               958 drivers/net/geneve.c 		prio = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb);
skb               962 drivers/net/geneve.c 					   ip_hdr(skb), skb);
skb               964 drivers/net/geneve.c 			ttl = ip_tunnel_get_ttl(ip_hdr(skb), skb);
skb               969 drivers/net/geneve.c 	err = geneve_build_skb(dst, skb, info, xnet, sizeof(struct ipv6hdr));
skb               973 drivers/net/geneve.c 	udp_tunnel6_xmit_skb(dst, gs6->sock->sk, skb, dev,
skb               981 drivers/net/geneve.c static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev)
skb               988 drivers/net/geneve.c 		info = skb_tunnel_info(skb);
skb              1001 drivers/net/geneve.c 		err = geneve6_xmit_skb(skb, dev, geneve, info);
skb              1004 drivers/net/geneve.c 		err = geneve_xmit_skb(skb, dev, geneve, info);
skb              1010 drivers/net/geneve.c 	dev_kfree_skb(skb);
skb              1032 drivers/net/geneve.c static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
skb              1034 drivers/net/geneve.c 	struct ip_tunnel_info *info = skb_tunnel_info(skb);
skb              1042 drivers/net/geneve.c 		rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info);
skb              1054 drivers/net/geneve.c 		dst = geneve_get_v6_dst(skb, dev, gs6, &fl6, info);
skb              1065 drivers/net/geneve.c 	info->key.tp_src = udp_flow_src_port(geneve->net, skb,
skb              1681 drivers/net/geneve.c static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev)
skb              1692 drivers/net/geneve.c 	if (nla_put_u32(skb, IFLA_GENEVE_ID, vni))
skb              1696 drivers/net/geneve.c 		if (nla_put_in_addr(skb, IFLA_GENEVE_REMOTE,
skb              1699 drivers/net/geneve.c 		if (nla_put_u8(skb, IFLA_GENEVE_UDP_CSUM,
skb              1705 drivers/net/geneve.c 		if (nla_put_in6_addr(skb, IFLA_GENEVE_REMOTE6,
skb              1708 drivers/net/geneve.c 		if (nla_put_u8(skb, IFLA_GENEVE_UDP_ZERO_CSUM6_TX,
skb              1714 drivers/net/geneve.c 	if (nla_put_u8(skb, IFLA_GENEVE_TTL, info->key.ttl) ||
skb              1715 drivers/net/geneve.c 	    nla_put_u8(skb, IFLA_GENEVE_TOS, info->key.tos) ||
skb              1716 drivers/net/geneve.c 	    nla_put_be32(skb, IFLA_GENEVE_LABEL, info->key.label))
skb              1719 drivers/net/geneve.c 	if (nla_put_u8(skb, IFLA_GENEVE_DF, geneve->df))
skb              1722 drivers/net/geneve.c 	if (nla_put_be16(skb, IFLA_GENEVE_PORT, info->key.tp_dst))
skb              1725 drivers/net/geneve.c 	if (metadata && nla_put_flag(skb, IFLA_GENEVE_COLLECT_METADATA))
skb              1729 drivers/net/geneve.c 	if (nla_put_u8(skb, IFLA_GENEVE_UDP_ZERO_CSUM6_RX,
skb              1734 drivers/net/geneve.c 	if (nla_put_u8(skb, IFLA_GENEVE_TTL_INHERIT, ttl_inherit))
skb               153 drivers/net/gtp.c static bool gtp_check_ms_ipv4(struct sk_buff *skb, struct pdp_ctx *pctx,
skb               158 drivers/net/gtp.c 	if (!pskb_may_pull(skb, hdrlen + sizeof(struct iphdr)))
skb               161 drivers/net/gtp.c 	iph = (struct iphdr *)(skb->data + hdrlen);
skb               172 drivers/net/gtp.c static bool gtp_check_ms(struct sk_buff *skb, struct pdp_ctx *pctx,
skb               175 drivers/net/gtp.c 	switch (ntohs(skb->protocol)) {
skb               177 drivers/net/gtp.c 		return gtp_check_ms_ipv4(skb, pctx, hdrlen, role);
skb               182 drivers/net/gtp.c static int gtp_rx(struct pdp_ctx *pctx, struct sk_buff *skb,
skb               187 drivers/net/gtp.c 	if (!gtp_check_ms(skb, pctx, hdrlen, role)) {
skb               193 drivers/net/gtp.c 	if (iptunnel_pull_header(skb, hdrlen, skb->protocol,
skb               203 drivers/net/gtp.c 	skb_reset_network_header(skb);
skb               205 drivers/net/gtp.c 	skb->dev = pctx->dev;
skb               210 drivers/net/gtp.c 	stats->rx_bytes += skb->len;
skb               213 drivers/net/gtp.c 	netif_rx(skb);
skb               218 drivers/net/gtp.c static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb)
skb               225 drivers/net/gtp.c 	if (!pskb_may_pull(skb, hdrlen))
skb               228 drivers/net/gtp.c 	gtp0 = (struct gtp0_header *)(skb->data + sizeof(struct udphdr));
skb               238 drivers/net/gtp.c 		netdev_dbg(gtp->dev, "No PDP ctx to decap skb=%p\n", skb);
skb               242 drivers/net/gtp.c 	return gtp_rx(pctx, skb, hdrlen, gtp->role);
skb               245 drivers/net/gtp.c static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb)
skb               252 drivers/net/gtp.c 	if (!pskb_may_pull(skb, hdrlen))
skb               255 drivers/net/gtp.c 	gtp1 = (struct gtp1_header *)(skb->data + sizeof(struct udphdr));
skb               273 drivers/net/gtp.c 	if (!pskb_may_pull(skb, hdrlen))
skb               276 drivers/net/gtp.c 	gtp1 = (struct gtp1_header *)(skb->data + sizeof(struct udphdr));
skb               280 drivers/net/gtp.c 		netdev_dbg(gtp->dev, "No PDP ctx to decap skb=%p\n", skb);
skb               284 drivers/net/gtp.c 	return gtp_rx(pctx, skb, hdrlen, gtp->role);
skb               329 drivers/net/gtp.c static int gtp_encap_recv(struct sock *sk, struct sk_buff *skb)
skb               343 drivers/net/gtp.c 		ret = gtp0_udp_encap_recv(gtp, skb);
skb               347 drivers/net/gtp.c 		ret = gtp1u_udp_encap_recv(gtp, skb);
skb               361 drivers/net/gtp.c 		kfree_skb(skb);
skb               404 drivers/net/gtp.c static inline void gtp0_push_header(struct sk_buff *skb, struct pdp_ctx *pctx)
skb               406 drivers/net/gtp.c 	int payload_len = skb->len;
skb               409 drivers/net/gtp.c 	gtp0 = skb_push(skb, sizeof(*gtp0));
skb               421 drivers/net/gtp.c static inline void gtp1_push_header(struct sk_buff *skb, struct pdp_ctx *pctx)
skb               423 drivers/net/gtp.c 	int payload_len = skb->len;
skb               426 drivers/net/gtp.c 	gtp1 = skb_push(skb, sizeof(*gtp1));
skb               454 drivers/net/gtp.c static void gtp_push_header(struct sk_buff *skb, struct gtp_pktinfo *pktinfo)
skb               459 drivers/net/gtp.c 		gtp0_push_header(skb, pktinfo->pctx);
skb               463 drivers/net/gtp.c 		gtp1_push_header(skb, pktinfo->pctx);
skb               482 drivers/net/gtp.c static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,
skb               496 drivers/net/gtp.c 	iph = ip_hdr(skb);
skb               524 drivers/net/gtp.c 	skb_dst_drop(skb);
skb               543 drivers/net/gtp.c 	rt->dst.ops->update_pmtu(&rt->dst, NULL, skb, mtu, false);
skb               545 drivers/net/gtp.c 	if (!skb_is_gso(skb) && (iph->frag_off & htons(IP_DF)) &&
skb               548 drivers/net/gtp.c 		memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
skb               549 drivers/net/gtp.c 		icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
skb               555 drivers/net/gtp.c 	gtp_push_header(skb, pktinfo);
skb               564 drivers/net/gtp.c static netdev_tx_t gtp_dev_xmit(struct sk_buff *skb, struct net_device *dev)
skb               566 drivers/net/gtp.c 	unsigned int proto = ntohs(skb->protocol);
skb               571 drivers/net/gtp.c 	if (skb_cow_head(skb, dev->needed_headroom))
skb               574 drivers/net/gtp.c 	skb_reset_inner_headers(skb);
skb               580 drivers/net/gtp.c 		err = gtp_build_skb_ip4(skb, dev, &pktinfo);
skb               595 drivers/net/gtp.c 		udp_tunnel_xmit_skb(pktinfo.rt, pktinfo.sk, skb,
skb               608 drivers/net/gtp.c 	dev_kfree_skb(skb);
skb               739 drivers/net/gtp.c static int gtp_fill_info(struct sk_buff *skb, const struct net_device *dev)
skb               743 drivers/net/gtp.c 	if (nla_put_u32(skb, IFLA_GTP_PDP_HASHSIZE, gtp->hash_size))
skb              1042 drivers/net/gtp.c static int gtp_genl_new_pdp(struct sk_buff *skb, struct genl_info *info)
skb              1076 drivers/net/gtp.c 	gtp = gtp_find_dev(sock_net(skb->sk), info->attrs);
skb              1142 drivers/net/gtp.c static int gtp_genl_del_pdp(struct sk_buff *skb, struct genl_info *info)
skb              1152 drivers/net/gtp.c 	pctx = gtp_find_pdp(sock_net(skb->sk), info->attrs);
skb              1174 drivers/net/gtp.c static int gtp_genl_fill_info(struct sk_buff *skb, u32 snd_portid, u32 snd_seq,
skb              1179 drivers/net/gtp.c 	genlh = genlmsg_put(skb, snd_portid, snd_seq, &gtp_genl_family, flags,
skb              1184 drivers/net/gtp.c 	if (nla_put_u32(skb, GTPA_VERSION, pctx->gtp_version) ||
skb              1185 drivers/net/gtp.c 	    nla_put_be32(skb, GTPA_PEER_ADDRESS, pctx->peer_addr_ip4.s_addr) ||
skb              1186 drivers/net/gtp.c 	    nla_put_be32(skb, GTPA_MS_ADDRESS, pctx->ms_addr_ip4.s_addr))
skb              1191 drivers/net/gtp.c 		if (nla_put_u64_64bit(skb, GTPA_TID, pctx->u.v0.tid, GTPA_PAD) ||
skb              1192 drivers/net/gtp.c 		    nla_put_u16(skb, GTPA_FLOW, pctx->u.v0.flow))
skb              1196 drivers/net/gtp.c 		if (nla_put_u32(skb, GTPA_I_TEI, pctx->u.v1.i_tei) ||
skb              1197 drivers/net/gtp.c 		    nla_put_u32(skb, GTPA_O_TEI, pctx->u.v1.o_tei))
skb              1201 drivers/net/gtp.c 	genlmsg_end(skb, genlh);
skb              1206 drivers/net/gtp.c 	genlmsg_cancel(skb, genlh);
skb              1210 drivers/net/gtp.c static int gtp_genl_get_pdp(struct sk_buff *skb, struct genl_info *info)
skb              1221 drivers/net/gtp.c 	pctx = gtp_find_pdp(sock_net(skb->sk), info->attrs);
skb              1233 drivers/net/gtp.c 	err = gtp_genl_fill_info(skb2, NETLINK_CB(skb).portid, info->snd_seq,
skb              1248 drivers/net/gtp.c static int gtp_genl_dump_pdp(struct sk_buff *skb,
skb              1253 drivers/net/gtp.c 	struct net *net = sock_net(skb->sk);
skb              1274 drivers/net/gtp.c 				    gtp_genl_fill_info(skb,
skb              1275 drivers/net/gtp.c 					    NETLINK_CB(cb->skb).portid,
skb              1293 drivers/net/gtp.c 	return skb->len;
skb               246 drivers/net/hamradio/6pack.c static netdev_tx_t sp_xmit(struct sk_buff *skb, struct net_device *dev)
skb               250 drivers/net/hamradio/6pack.c 	if (skb->protocol == htons(ETH_P_IP))
skb               251 drivers/net/hamradio/6pack.c 		return ax25_ip_xmit(skb);
skb               256 drivers/net/hamradio/6pack.c 	dev->stats.tx_bytes += skb->len;
skb               257 drivers/net/hamradio/6pack.c 	sp_encaps(sp, skb->data, skb->len);
skb               260 drivers/net/hamradio/6pack.c 	dev_kfree_skb(skb);
skb               339 drivers/net/hamradio/6pack.c 	struct sk_buff *skb;
skb               347 drivers/net/hamradio/6pack.c 	if ((skb = dev_alloc_skb(count + 1)) == NULL)
skb               350 drivers/net/hamradio/6pack.c 	ptr = skb_put(skb, count + 1);
skb               354 drivers/net/hamradio/6pack.c 	skb->protocol = ax25_type_trans(skb, sp->dev);
skb               355 drivers/net/hamradio/6pack.c 	netif_rx(skb);
skb               193 drivers/net/hamradio/baycom_epp.c 	struct sk_buff *skb;  /* next transmit packet  */
skb               360 drivers/net/hamradio/baycom_epp.c 	struct sk_buff *skb;
skb               369 drivers/net/hamradio/baycom_epp.c 	skb = bc->skb;
skb               370 drivers/net/hamradio/baycom_epp.c 	if (!skb)
skb               372 drivers/net/hamradio/baycom_epp.c 	bc->skb = NULL;
skb               373 drivers/net/hamradio/baycom_epp.c 	pkt_len = skb->len-1; /* strip KISS byte */
skb               375 drivers/net/hamradio/baycom_epp.c 	bp = skb->data+1;
skb               414 drivers/net/hamradio/baycom_epp.c 	dev_kfree_skb(skb);
skb               529 drivers/net/hamradio/baycom_epp.c 	struct sk_buff *skb;
skb               538 drivers/net/hamradio/baycom_epp.c 	if (!(skb = dev_alloc_skb(pktlen))) {
skb               543 drivers/net/hamradio/baycom_epp.c 	cp = skb_put(skb, pktlen);
skb               546 drivers/net/hamradio/baycom_epp.c 	skb->protocol = ax25_type_trans(skb, dev);
skb               547 drivers/net/hamradio/baycom_epp.c 	netif_rx(skb);
skb               749 drivers/net/hamradio/baycom_epp.c 	if (!bc->skb)
skb               761 drivers/net/hamradio/baycom_epp.c static int baycom_send_packet(struct sk_buff *skb, struct net_device *dev)
skb               765 drivers/net/hamradio/baycom_epp.c 	if (skb->protocol == htons(ETH_P_IP))
skb               766 drivers/net/hamradio/baycom_epp.c 		return ax25_ip_xmit(skb);
skb               768 drivers/net/hamradio/baycom_epp.c 	if (skb->data[0] != 0) {
skb               769 drivers/net/hamradio/baycom_epp.c 		do_kiss_params(bc, skb->data, skb->len);
skb               770 drivers/net/hamradio/baycom_epp.c 		dev_kfree_skb(skb);
skb               773 drivers/net/hamradio/baycom_epp.c 	if (bc->skb) {
skb               774 drivers/net/hamradio/baycom_epp.c 		dev_kfree_skb(skb);
skb               778 drivers/net/hamradio/baycom_epp.c 	if (skb->len >= HDLCDRV_MAXFLEN+1 || skb->len < 3) {
skb               779 drivers/net/hamradio/baycom_epp.c 		dev_kfree_skb(skb);
skb               783 drivers/net/hamradio/baycom_epp.c 	bc->skb = skb;
skb               964 drivers/net/hamradio/baycom_epp.c 	dev_kfree_skb(bc->skb);
skb               965 drivers/net/hamradio/baycom_epp.c 	bc->skb = NULL;
skb              1151 drivers/net/hamradio/baycom_epp.c 	bc->skb = NULL;
skb               149 drivers/net/hamradio/bpqether.c static int bpq_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *ptype, struct net_device *orig_dev)
skb               159 drivers/net/hamradio/bpqether.c 	if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
skb               162 drivers/net/hamradio/bpqether.c 	if (!pskb_may_pull(skb, sizeof(struct ethhdr)))
skb               178 drivers/net/hamradio/bpqether.c 	eth = eth_hdr(skb);
skb               184 drivers/net/hamradio/bpqether.c 	if (skb_cow(skb, sizeof(struct ethhdr)))
skb               187 drivers/net/hamradio/bpqether.c 	len = skb->data[0] + skb->data[1] * 256 - 5;
skb               189 drivers/net/hamradio/bpqether.c 	skb_pull(skb, 2);	/* Remove the length bytes */
skb               190 drivers/net/hamradio/bpqether.c 	skb_trim(skb, len);	/* Set the length of the data */
skb               195 drivers/net/hamradio/bpqether.c 	ptr = skb_push(skb, 1);
skb               198 drivers/net/hamradio/bpqether.c 	skb->protocol = ax25_type_trans(skb, dev);
skb               199 drivers/net/hamradio/bpqether.c 	netif_rx(skb);
skb               206 drivers/net/hamradio/bpqether.c 	kfree_skb(skb);
skb               210 drivers/net/hamradio/bpqether.c 	kfree_skb(skb);
skb               217 drivers/net/hamradio/bpqether.c static netdev_tx_t bpq_xmit(struct sk_buff *skb, struct net_device *dev)
skb               224 drivers/net/hamradio/bpqether.c 	if (skb->protocol == htons(ETH_P_IP))
skb               225 drivers/net/hamradio/bpqether.c 		return ax25_ip_xmit(skb);
skb               232 drivers/net/hamradio/bpqether.c 		kfree_skb(skb);
skb               236 drivers/net/hamradio/bpqether.c 	skb_pull(skb, 1);			/* Drop KISS byte */
skb               237 drivers/net/hamradio/bpqether.c 	size = skb->len;
skb               244 drivers/net/hamradio/bpqether.c 	if (skb_cow(skb, AX25_BPQ_HEADER_LEN)) {
skb               247 drivers/net/hamradio/bpqether.c 		kfree_skb(skb);
skb               252 drivers/net/hamradio/bpqether.c 	ptr = skb_push(skb, 2);			/* Make space for length */
skb               262 drivers/net/hamradio/bpqether.c 		kfree_skb(skb);
skb               266 drivers/net/hamradio/bpqether.c 	skb->protocol = ax25_type_trans(skb, dev);
skb               267 drivers/net/hamradio/bpqether.c 	skb_reset_network_header(skb);
skb               268 drivers/net/hamradio/bpqether.c 	dev_hard_header(skb, dev, ETH_P_BPQ, bpq->dest_addr, NULL, 0);
skb               270 drivers/net/hamradio/bpqether.c 	dev->stats.tx_bytes+=skb->len;
skb               272 drivers/net/hamradio/bpqether.c 	dev_queue_xmit(skb);
skb               229 drivers/net/hamradio/dmascc.c static int scc_send_packet(struct sk_buff *skb, struct net_device *dev);
skb               911 drivers/net/hamradio/dmascc.c static int scc_send_packet(struct sk_buff *skb, struct net_device *dev)
skb               917 drivers/net/hamradio/dmascc.c 	if (skb->protocol == htons(ETH_P_IP))
skb               918 drivers/net/hamradio/dmascc.c 		return ax25_ip_xmit(skb);
skb               925 drivers/net/hamradio/dmascc.c 	skb_copy_from_linear_data_offset(skb, 1, priv->tx_buf[i], skb->len - 1);
skb               926 drivers/net/hamradio/dmascc.c 	priv->tx_len[i] = skb->len - 1;
skb               953 drivers/net/hamradio/dmascc.c 	dev_kfree_skb(skb);
skb              1254 drivers/net/hamradio/dmascc.c 	struct sk_buff *skb;
skb              1262 drivers/net/hamradio/dmascc.c 		skb = dev_alloc_skb(cb + 1);
skb              1263 drivers/net/hamradio/dmascc.c 		if (skb == NULL) {
skb              1268 drivers/net/hamradio/dmascc.c 			data = skb_put(skb, cb + 1);
skb              1271 drivers/net/hamradio/dmascc.c 			skb->protocol = ax25_type_trans(skb, priv->dev);
skb              1272 drivers/net/hamradio/dmascc.c 			netif_rx(skb);
skb               133 drivers/net/hamradio/hdlcdrv.c 	struct sk_buff *skb;
skb               142 drivers/net/hamradio/hdlcdrv.c 	if (!(skb = dev_alloc_skb(pkt_len))) {
skb               147 drivers/net/hamradio/hdlcdrv.c 	cp = skb_put(skb, pkt_len);
skb               150 drivers/net/hamradio/hdlcdrv.c 	skb->protocol = ax25_type_trans(skb, dev);
skb               151 drivers/net/hamradio/hdlcdrv.c 	netif_rx(skb);
skb               256 drivers/net/hamradio/hdlcdrv.c 	struct sk_buff *skb;
skb               290 drivers/net/hamradio/hdlcdrv.c 			if (!(skb = s->skb)) {
skb               298 drivers/net/hamradio/hdlcdrv.c 			s->skb = NULL;
skb               300 drivers/net/hamradio/hdlcdrv.c 			pkt_len = skb->len-1; /* strip KISS byte */
skb               304 drivers/net/hamradio/hdlcdrv.c 				dev_kfree_skb_irq(skb);
skb               307 drivers/net/hamradio/hdlcdrv.c 			skb_copy_from_linear_data_offset(skb, 1,
skb               310 drivers/net/hamradio/hdlcdrv.c 			dev_kfree_skb_irq(skb);
skb               366 drivers/net/hamradio/hdlcdrv.c 	if (!s || s->magic != HDLCDRV_MAGIC || s->hdlctx.ptt || !s->skb) 
skb               389 drivers/net/hamradio/hdlcdrv.c static netdev_tx_t hdlcdrv_send_packet(struct sk_buff *skb,
skb               394 drivers/net/hamradio/hdlcdrv.c 	if (skb->protocol == htons(ETH_P_IP))
skb               395 drivers/net/hamradio/hdlcdrv.c 		return ax25_ip_xmit(skb);
skb               397 drivers/net/hamradio/hdlcdrv.c 	if (skb->data[0] != 0) {
skb               398 drivers/net/hamradio/hdlcdrv.c 		do_kiss_params(sm, skb->data, skb->len);
skb               399 drivers/net/hamradio/hdlcdrv.c 		dev_kfree_skb(skb);
skb               402 drivers/net/hamradio/hdlcdrv.c 	if (sm->skb) {
skb               403 drivers/net/hamradio/hdlcdrv.c 		dev_kfree_skb(skb);
skb               407 drivers/net/hamradio/hdlcdrv.c 	sm->skb = skb;
skb               478 drivers/net/hamradio/hdlcdrv.c 	dev_kfree_skb(s->skb);
skb               479 drivers/net/hamradio/hdlcdrv.c 	s->skb = NULL;
skb               666 drivers/net/hamradio/hdlcdrv.c 	s->skb = NULL;
skb               237 drivers/net/hamradio/mkiss.c 	struct sk_buff *skb;
skb               283 drivers/net/hamradio/mkiss.c 	if ((skb = dev_alloc_skb(count)) == NULL) {
skb               291 drivers/net/hamradio/mkiss.c 	skb_put_data(skb, ax->rbuff, count);
skb               292 drivers/net/hamradio/mkiss.c 	skb->protocol = ax25_type_trans(skb, ax->dev);
skb               293 drivers/net/hamradio/mkiss.c 	netif_rx(skb);
skb               518 drivers/net/hamradio/mkiss.c static netdev_tx_t ax_xmit(struct sk_buff *skb, struct net_device *dev)
skb               522 drivers/net/hamradio/mkiss.c 	if (skb->protocol == htons(ETH_P_IP))
skb               523 drivers/net/hamradio/mkiss.c 		return ax25_ip_xmit(skb);
skb               551 drivers/net/hamradio/mkiss.c 	ax_encaps(dev, skb->data, skb->len);
skb               552 drivers/net/hamradio/mkiss.c 	kfree_skb(skb);
skb               210 drivers/net/hamradio/scc.c static void scc_net_rx(struct scc_channel *scc, struct sk_buff *skb);
skb               211 drivers/net/hamradio/scc.c static netdev_tx_t scc_net_tx(struct sk_buff *skb,
skb               324 drivers/net/hamradio/scc.c 	struct sk_buff *skb;
skb               330 drivers/net/hamradio/scc.c 	skb = dev_alloc_skb(2);
skb               331 drivers/net/hamradio/scc.c 	if (skb != NULL)
skb               333 drivers/net/hamradio/scc.c 		bp = skb_put(skb, 2);
skb               336 drivers/net/hamradio/scc.c 		scc_net_rx(scc, skb);
skb               369 drivers/net/hamradio/scc.c 	struct sk_buff *skb;
skb               372 drivers/net/hamradio/scc.c 	skb = scc->tx_buff;
skb               376 drivers/net/hamradio/scc.c 	if (skb == NULL)
skb               378 drivers/net/hamradio/scc.c 		skb = skb_dequeue(&scc->tx_queue);
skb               379 drivers/net/hamradio/scc.c 		scc->tx_buff = skb;
skb               382 drivers/net/hamradio/scc.c 		if (skb == NULL)
skb               389 drivers/net/hamradio/scc.c 		if (skb->len == 0)		/* Paranoia... */
skb               391 drivers/net/hamradio/scc.c 			dev_kfree_skb_irq(skb);
skb               403 drivers/net/hamradio/scc.c 		Outb(scc->data,*skb->data);	/* send byte */
skb               404 drivers/net/hamradio/scc.c 		skb_pull(skb, 1);
skb               413 drivers/net/hamradio/scc.c 	if (skb->len == 0)
skb               417 drivers/net/hamradio/scc.c 		dev_kfree_skb_irq(skb);
skb               425 drivers/net/hamradio/scc.c 	Outb(scc->data,*skb->data);		
skb               426 drivers/net/hamradio/scc.c 	skb_pull(skb, 1);
skb               518 drivers/net/hamradio/scc.c 	struct sk_buff *skb;
skb               529 drivers/net/hamradio/scc.c 	skb = scc->rx_buff;
skb               531 drivers/net/hamradio/scc.c 	if (skb == NULL)
skb               533 drivers/net/hamradio/scc.c 		skb = dev_alloc_skb(scc->stat.bufsize);
skb               534 drivers/net/hamradio/scc.c 		if (skb == NULL)
skb               543 drivers/net/hamradio/scc.c 		scc->rx_buff = skb;
skb               544 drivers/net/hamradio/scc.c 		skb_put_u8(skb, 0);	/* KISS data */
skb               547 drivers/net/hamradio/scc.c 	if (skb->len >= scc->stat.bufsize)
skb               552 drivers/net/hamradio/scc.c 		dev_kfree_skb_irq(skb);
skb               559 drivers/net/hamradio/scc.c 	skb_put_u8(skb, Inb(scc->data));
skb               567 drivers/net/hamradio/scc.c 	struct sk_buff *skb;
skb               574 drivers/net/hamradio/scc.c 	skb = scc->rx_buff;
skb               581 drivers/net/hamradio/scc.c 		if (skb != NULL) 
skb               582 drivers/net/hamradio/scc.c 			dev_kfree_skb_irq(skb);
skb               583 drivers/net/hamradio/scc.c 		scc->rx_buff = skb = NULL;
skb               586 drivers/net/hamradio/scc.c 	if(status & END_FR && skb != NULL)	/* end of frame */
skb               590 drivers/net/hamradio/scc.c 		if (!(status & CRC_ERR) && (status & 0xe) == RES8 && skb->len > 0)
skb               593 drivers/net/hamradio/scc.c 			skb_trim(skb, skb->len-1);
skb               594 drivers/net/hamradio/scc.c 			scc_net_rx(scc, skb);
skb               598 drivers/net/hamradio/scc.c 			dev_kfree_skb_irq(skb);
skb              1620 drivers/net/hamradio/scc.c static void scc_net_rx(struct scc_channel *scc, struct sk_buff *skb)
skb              1622 drivers/net/hamradio/scc.c 	if (skb->len == 0) {
skb              1623 drivers/net/hamradio/scc.c 		dev_kfree_skb_irq(skb);
skb              1628 drivers/net/hamradio/scc.c 	scc->dev_stat.rx_bytes += skb->len;
skb              1630 drivers/net/hamradio/scc.c 	skb->protocol = ax25_type_trans(skb, scc->dev);
skb              1632 drivers/net/hamradio/scc.c 	netif_rx(skb);
skb              1637 drivers/net/hamradio/scc.c static netdev_tx_t scc_net_tx(struct sk_buff *skb, struct net_device *dev)
skb              1643 drivers/net/hamradio/scc.c 	if (skb->protocol == htons(ETH_P_IP))
skb              1644 drivers/net/hamradio/scc.c 		return ax25_ip_xmit(skb);
skb              1646 drivers/net/hamradio/scc.c 	if (skb->len > scc->stat.bufsize || skb->len < 2) {
skb              1648 drivers/net/hamradio/scc.c 		dev_kfree_skb(skb);
skb              1653 drivers/net/hamradio/scc.c 	scc->dev_stat.tx_bytes += skb->len;
skb              1656 drivers/net/hamradio/scc.c 	kisscmd = *skb->data & 0x1f;
skb              1657 drivers/net/hamradio/scc.c 	skb_pull(skb, 1);
skb              1660 drivers/net/hamradio/scc.c 		scc_set_param(scc, kisscmd, *skb->data);
skb              1661 drivers/net/hamradio/scc.c 		dev_kfree_skb(skb);
skb              1672 drivers/net/hamradio/scc.c 	skb_queue_tail(&scc->tx_queue, skb);
skb               532 drivers/net/hamradio/yam.c 		struct sk_buff *skb;
skb               537 drivers/net/hamradio/yam.c 			if (!(skb = dev_alloc_skb(pkt_len))) {
skb               542 drivers/net/hamradio/yam.c 				cp = skb_put(skb, pkt_len);
skb               545 drivers/net/hamradio/yam.c 				skb->protocol = ax25_type_trans(skb, dev);
skb               546 drivers/net/hamradio/yam.c 				netif_rx(skb);
skb               580 drivers/net/hamradio/yam.c static netdev_tx_t yam_send_packet(struct sk_buff *skb,
skb               585 drivers/net/hamradio/yam.c 	if (skb->protocol == htons(ETH_P_IP))
skb               586 drivers/net/hamradio/yam.c 		return ax25_ip_xmit(skb);
skb               588 drivers/net/hamradio/yam.c 	skb_queue_tail(&yp->send_queue, skb);
skb               650 drivers/net/hamradio/yam.c 	struct sk_buff *skb;
skb               658 drivers/net/hamradio/yam.c 			if (!(skb = skb_dequeue(&yp->send_queue))) {
skb               664 drivers/net/hamradio/yam.c 			if (skb->data[0] != 0) {
skb               666 drivers/net/hamradio/yam.c 				dev_kfree_skb_any(skb);
skb               669 drivers/net/hamradio/yam.c 			yp->tx_len = skb->len - 1;	/* strip KISS byte */
skb               671 drivers/net/hamradio/yam.c         			dev_kfree_skb_any(skb);
skb               674 drivers/net/hamradio/yam.c 			skb_copy_from_linear_data_offset(skb, 1,
skb               677 drivers/net/hamradio/yam.c 			dev_kfree_skb_any(skb);
skb               898 drivers/net/hamradio/yam.c 	struct sk_buff *skb;
skb               913 drivers/net/hamradio/yam.c 	while ((skb = skb_dequeue(&yp->send_queue)))
skb               914 drivers/net/hamradio/yam.c 		dev_kfree_skb(skb);
skb               639 drivers/net/hippi/rrunner.c 		struct sk_buff *skb;
skb               643 drivers/net/hippi/rrunner.c 		skb = alloc_skb(dev->mtu + HIPPI_HLEN, GFP_ATOMIC);
skb               644 drivers/net/hippi/rrunner.c 		if (!skb) {
skb               650 drivers/net/hippi/rrunner.c 		rrpriv->rx_skbuff[i] = skb;
skb               651 drivers/net/hippi/rrunner.c 	        addr = pci_map_single(rrpriv->pci_dev, skb->data,
skb               657 drivers/net/hippi/rrunner.c 		if ((((unsigned long)skb->data) & 0xfff) > ~65320)
skb               699 drivers/net/hippi/rrunner.c 		struct sk_buff *skb = rrpriv->rx_skbuff[i];
skb               701 drivers/net/hippi/rrunner.c 		if (skb) {
skb               708 drivers/net/hippi/rrunner.c 			dev_kfree_skb(skb);
skb               945 drivers/net/hippi/rrunner.c 			struct sk_buff *skb, *rx_skb;
skb               950 drivers/net/hippi/rrunner.c 				skb = alloc_skb(pkt_len, GFP_ATOMIC);
skb               951 drivers/net/hippi/rrunner.c 				if (skb == NULL){
skb               961 drivers/net/hippi/rrunner.c 					skb_put_data(skb, rx_skb->data,
skb               980 drivers/net/hippi/rrunner.c 					skb = rx_skb;
skb               981 drivers/net/hippi/rrunner.c 					skb_put(skb, pkt_len);
skb               995 drivers/net/hippi/rrunner.c 			skb->protocol = hippi_type_trans(skb, dev);
skb               997 drivers/net/hippi/rrunner.c 			netif_rx(skb);		/* send it up */
skb              1063 drivers/net/hippi/rrunner.c 				struct sk_buff *skb;
skb              1066 drivers/net/hippi/rrunner.c 				skb = rrpriv->tx_skbuff[txcon];
skb              1069 drivers/net/hippi/rrunner.c 				dev->stats.tx_bytes += skb->len;
skb              1072 drivers/net/hippi/rrunner.c 						 desc->addr.addrlo, skb->len,
skb              1074 drivers/net/hippi/rrunner.c 				dev_kfree_skb_irq(skb);
skb              1108 drivers/net/hippi/rrunner.c 		struct sk_buff *skb = rrpriv->tx_skbuff[i];
skb              1110 drivers/net/hippi/rrunner.c 		if (skb) {
skb              1114 drivers/net/hippi/rrunner.c 				skb->len, PCI_DMA_TODEVICE);
skb              1117 drivers/net/hippi/rrunner.c 			dev_kfree_skb(skb);
skb              1130 drivers/net/hippi/rrunner.c 		struct sk_buff *skb = rrpriv->rx_skbuff[i];
skb              1132 drivers/net/hippi/rrunner.c 		if (skb) {
skb              1139 drivers/net/hippi/rrunner.c 			dev_kfree_skb(skb);
skb              1383 drivers/net/hippi/rrunner.c static netdev_tx_t rr_start_xmit(struct sk_buff *skb,
skb              1388 drivers/net/hippi/rrunner.c 	struct hippi_cb *hcb = (struct hippi_cb *) skb->cb;
skb              1391 drivers/net/hippi/rrunner.c 	u32 index, len = skb->len;
skb              1403 drivers/net/hippi/rrunner.c 	if (skb_headroom(skb) < 8){
skb              1406 drivers/net/hippi/rrunner.c 			dev_kfree_skb(skb);
skb              1412 drivers/net/hippi/rrunner.c 		skb_copy_from_linear_data(skb, new_skb->data, len);
skb              1413 drivers/net/hippi/rrunner.c 		dev_kfree_skb(skb);
skb              1414 drivers/net/hippi/rrunner.c 		skb = new_skb;
skb              1417 drivers/net/hippi/rrunner.c 	ifield = skb_push(skb, 8);
skb              1432 drivers/net/hippi/rrunner.c 	rrpriv->tx_skbuff[index] = skb;
skb              1434 drivers/net/hippi/rrunner.c 		rrpriv->pci_dev, skb->data, len + 8, PCI_DMA_TODEVICE));
skb               835 drivers/net/hippi/rrunner.h static netdev_tx_t rr_start_xmit(struct sk_buff *skb,
skb               192 drivers/net/hyperv/hyperv_net.h 		struct sk_buff *skb);
skb               835 drivers/net/hyperv/hyperv_net.h 	struct sk_buff *skb; /* skb containing the pkt */
skb               677 drivers/net/hyperv/netvsc.c 	struct sk_buff *skb = (struct sk_buff *)(unsigned long)desc->trans_id;
skb               683 drivers/net/hyperv/netvsc.c 	if (likely(skb)) {
skb               685 drivers/net/hyperv/netvsc.c 			= (struct hv_netvsc_packet *)skb->cb;
skb               700 drivers/net/hyperv/netvsc.c 		napi_consume_skb(skb, budget);
skb               808 drivers/net/hyperv/netvsc.c 	struct sk_buff *skb)
skb               824 drivers/net/hyperv/netvsc.c 	if (skb)
skb               835 drivers/net/hyperv/netvsc.c 	req_id = (ulong)skb;
skb               891 drivers/net/hyperv/netvsc.c 	*msd_skb = msdp->skb;
skb               893 drivers/net/hyperv/netvsc.c 	msdp->skb = NULL;
skb               903 drivers/net/hyperv/netvsc.c 		struct sk_buff *skb)
skb               929 drivers/net/hyperv/netvsc.c 	if (!skb)
skb               930 drivers/net/hyperv/netvsc.c 		return netvsc_send_pkt(device, packet, net_device, pb, skb);
skb               985 drivers/net/hyperv/netvsc.c 		if (msdp->skb)
skb               986 drivers/net/hyperv/netvsc.c 			dev_consume_skb_any(msdp->skb);
skb               989 drivers/net/hyperv/netvsc.c 			msdp->skb = skb;
skb               994 drivers/net/hyperv/netvsc.c 			msdp->skb = NULL;
skb              1015 drivers/net/hyperv/netvsc.c 		ret = netvsc_send_pkt(device, cur_send, net_device, pb, skb);
skb               250 drivers/net/hyperv/netvsc_drv.c 	struct sk_buff *skb,
skb               259 drivers/net/hyperv/netvsc_drv.c 	if (!skb_flow_dissect_flow_keys(skb, &flow, 0))
skb               281 drivers/net/hyperv/netvsc_drv.c 		return skb_get_hash(skb);
skb               290 drivers/net/hyperv/netvsc_drv.c 		__skb_set_sw_hash(skb, hash, false);
skb               297 drivers/net/hyperv/netvsc_drv.c 				      struct sk_buff *skb, int old_idx)
skb               300 drivers/net/hyperv/netvsc_drv.c 	struct sock *sk = skb->sk;
skb               303 drivers/net/hyperv/netvsc_drv.c 	q_idx = ndc->tx_table[netvsc_get_hash(skb, ndc) &
skb               325 drivers/net/hyperv/netvsc_drv.c static u16 netvsc_pick_tx(struct net_device *ndev, struct sk_buff *skb)
skb               327 drivers/net/hyperv/netvsc_drv.c 	int q_idx = sk_tx_queue_get(skb->sk);
skb               329 drivers/net/hyperv/netvsc_drv.c 	if (q_idx < 0 || skb->ooo_okay || q_idx >= ndev->real_num_tx_queues) {
skb               333 drivers/net/hyperv/netvsc_drv.c 		if (skb_rx_queue_recorded(skb))
skb               334 drivers/net/hyperv/netvsc_drv.c 			q_idx = skb_get_rx_queue(skb);
skb               336 drivers/net/hyperv/netvsc_drv.c 			q_idx = netvsc_get_tx_queue(ndev, skb, q_idx);
skb               342 drivers/net/hyperv/netvsc_drv.c static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
skb               355 drivers/net/hyperv/netvsc_drv.c 			txq = vf_ops->ndo_select_queue(vf_netdev, skb, sb_dev);
skb               357 drivers/net/hyperv/netvsc_drv.c 			txq = netdev_pick_tx(vf_netdev, skb, NULL);
skb               363 drivers/net/hyperv/netvsc_drv.c 		qdisc_skb_cb(skb)->slave_dev_queue_mapping = txq;
skb               365 drivers/net/hyperv/netvsc_drv.c 		txq = netvsc_pick_tx(ndev, skb);
skb               409 drivers/net/hyperv/netvsc_drv.c static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
skb               414 drivers/net/hyperv/netvsc_drv.c 	char *data = skb->data;
skb               415 drivers/net/hyperv/netvsc_drv.c 	int frags = skb_shinfo(skb)->nr_frags;
skb               432 drivers/net/hyperv/netvsc_drv.c 				skb_headlen(skb), &pb[slots_used]);
skb               435 drivers/net/hyperv/netvsc_drv.c 		skb_frag_t *frag = skb_shinfo(skb)->frags + i;
skb               444 drivers/net/hyperv/netvsc_drv.c static int count_skb_frag_slots(struct sk_buff *skb)
skb               446 drivers/net/hyperv/netvsc_drv.c 	int i, frags = skb_shinfo(skb)->nr_frags;
skb               450 drivers/net/hyperv/netvsc_drv.c 		skb_frag_t *frag = skb_shinfo(skb)->frags + i;
skb               461 drivers/net/hyperv/netvsc_drv.c static int netvsc_get_slots(struct sk_buff *skb)
skb               463 drivers/net/hyperv/netvsc_drv.c 	char *data = skb->data;
skb               465 drivers/net/hyperv/netvsc_drv.c 	unsigned int len = skb_headlen(skb);
skb               470 drivers/net/hyperv/netvsc_drv.c 	frag_slots = count_skb_frag_slots(skb);
skb               474 drivers/net/hyperv/netvsc_drv.c static u32 net_checksum_info(struct sk_buff *skb)
skb               476 drivers/net/hyperv/netvsc_drv.c 	if (skb->protocol == htons(ETH_P_IP)) {
skb               477 drivers/net/hyperv/netvsc_drv.c 		struct iphdr *ip = ip_hdr(skb);
skb               484 drivers/net/hyperv/netvsc_drv.c 		struct ipv6hdr *ip6 = ipv6_hdr(skb);
skb               497 drivers/net/hyperv/netvsc_drv.c 			  struct sk_buff *skb)
skb               500 drivers/net/hyperv/netvsc_drv.c 	unsigned int len = skb->len;
skb               503 drivers/net/hyperv/netvsc_drv.c 	skb->dev = vf_netdev;
skb               504 drivers/net/hyperv/netvsc_drv.c 	skb->queue_mapping = qdisc_skb_cb(skb)->slave_dev_queue_mapping;
skb               506 drivers/net/hyperv/netvsc_drv.c 	rc = dev_queue_xmit(skb);
skb               522 drivers/net/hyperv/netvsc_drv.c static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
skb               540 drivers/net/hyperv/netvsc_drv.c 		return netvsc_vf_xmit(net, vf_netdev, skb);
skb               548 drivers/net/hyperv/netvsc_drv.c 	num_data_pgs = netvsc_get_slots(skb) + 2;
skb               553 drivers/net/hyperv/netvsc_drv.c 		if (skb_linearize(skb))
skb               556 drivers/net/hyperv/netvsc_drv.c 		num_data_pgs = netvsc_get_slots(skb) + 2;
skb               568 drivers/net/hyperv/netvsc_drv.c 	ret = skb_cow_head(skb, RNDIS_AND_PPI_SIZE);
skb               575 drivers/net/hyperv/netvsc_drv.c 	packet = (struct hv_netvsc_packet *)skb->cb;
skb               577 drivers/net/hyperv/netvsc_drv.c 	packet->q_idx = skb_get_queue_mapping(skb);
skb               579 drivers/net/hyperv/netvsc_drv.c 	packet->total_data_buflen = skb->len;
skb               580 drivers/net/hyperv/netvsc_drv.c 	packet->total_bytes = skb->len;
skb               583 drivers/net/hyperv/netvsc_drv.c 	rndis_msg = (struct rndis_message *)skb->head;
skb               597 drivers/net/hyperv/netvsc_drv.c 	hash = skb_get_hash_raw(skb);
skb               607 drivers/net/hyperv/netvsc_drv.c 	if (skb_vlan_tag_present(skb)) {
skb               615 drivers/net/hyperv/netvsc_drv.c 		vlan->vlanid = skb_vlan_tag_get_id(skb);
skb               616 drivers/net/hyperv/netvsc_drv.c 		vlan->cfi = skb_vlan_tag_get_cfi(skb);
skb               617 drivers/net/hyperv/netvsc_drv.c 		vlan->pri = skb_vlan_tag_get_prio(skb);
skb               620 drivers/net/hyperv/netvsc_drv.c 	if (skb_is_gso(skb)) {
skb               629 drivers/net/hyperv/netvsc_drv.c 		if (skb->protocol == htons(ETH_P_IP)) {
skb               632 drivers/net/hyperv/netvsc_drv.c 			ip_hdr(skb)->tot_len = 0;
skb               633 drivers/net/hyperv/netvsc_drv.c 			ip_hdr(skb)->check = 0;
skb               634 drivers/net/hyperv/netvsc_drv.c 			tcp_hdr(skb)->check =
skb               635 drivers/net/hyperv/netvsc_drv.c 				~csum_tcpudp_magic(ip_hdr(skb)->saddr,
skb               636 drivers/net/hyperv/netvsc_drv.c 						   ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
skb               640 drivers/net/hyperv/netvsc_drv.c 			ipv6_hdr(skb)->payload_len = 0;
skb               641 drivers/net/hyperv/netvsc_drv.c 			tcp_hdr(skb)->check =
skb               642 drivers/net/hyperv/netvsc_drv.c 				~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
skb               643 drivers/net/hyperv/netvsc_drv.c 						 &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
skb               645 drivers/net/hyperv/netvsc_drv.c 		lso_info->lso_v2_transmit.tcp_header_offset = skb_transport_offset(skb);
skb               646 drivers/net/hyperv/netvsc_drv.c 		lso_info->lso_v2_transmit.mss = skb_shinfo(skb)->gso_size;
skb               647 drivers/net/hyperv/netvsc_drv.c 	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb               648 drivers/net/hyperv/netvsc_drv.c 		if (net_checksum_info(skb) & net_device_ctx->tx_checksum_mask) {
skb               656 drivers/net/hyperv/netvsc_drv.c 			csum_info->transmit.tcp_header_offset = skb_transport_offset(skb);
skb               658 drivers/net/hyperv/netvsc_drv.c 			if (skb->protocol == htons(ETH_P_IP)) {
skb               661 drivers/net/hyperv/netvsc_drv.c 				if (ip_hdr(skb)->protocol == IPPROTO_TCP)
skb               668 drivers/net/hyperv/netvsc_drv.c 				if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
skb               675 drivers/net/hyperv/netvsc_drv.c 			if (skb_checksum_help(skb))
skb               684 drivers/net/hyperv/netvsc_drv.c 					       skb, packet, pb);
skb               687 drivers/net/hyperv/netvsc_drv.c 	skb_tx_timestamp(skb);
skb               689 drivers/net/hyperv/netvsc_drv.c 	ret = netvsc_send(net, packet, rndis_msg, pb, skb);
skb               702 drivers/net/hyperv/netvsc_drv.c 	dev_kfree_skb_any(skb);
skb               754 drivers/net/hyperv/netvsc_drv.c static void netvsc_comp_ipcsum(struct sk_buff *skb)
skb               756 drivers/net/hyperv/netvsc_drv.c 	struct iphdr *iph = (struct iphdr *)skb->data;
skb               769 drivers/net/hyperv/netvsc_drv.c 	struct sk_buff *skb;
skb               772 drivers/net/hyperv/netvsc_drv.c 	skb = napi_alloc_skb(napi, nvchan->rsc.pktlen);
skb               773 drivers/net/hyperv/netvsc_drv.c 	if (!skb)
skb               774 drivers/net/hyperv/netvsc_drv.c 		return skb;
skb               781 drivers/net/hyperv/netvsc_drv.c 		skb_put_data(skb, nvchan->rsc.data[i], nvchan->rsc.len[i]);
skb               783 drivers/net/hyperv/netvsc_drv.c 	skb->protocol = eth_type_trans(skb, net);
skb               786 drivers/net/hyperv/netvsc_drv.c 	skb_checksum_none_assert(skb);
skb               795 drivers/net/hyperv/netvsc_drv.c 	    skb->protocol == htons(ETH_P_IP))
skb               796 drivers/net/hyperv/netvsc_drv.c 		netvsc_comp_ipcsum(skb);
skb               802 drivers/net/hyperv/netvsc_drv.c 			skb->ip_summed = CHECKSUM_UNNECESSARY;
skb               809 drivers/net/hyperv/netvsc_drv.c 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
skb               813 drivers/net/hyperv/netvsc_drv.c 	return skb;
skb               827 drivers/net/hyperv/netvsc_drv.c 	struct sk_buff *skb;
skb               834 drivers/net/hyperv/netvsc_drv.c 	skb = netvsc_alloc_recv_skb(net, nvchan);
skb               836 drivers/net/hyperv/netvsc_drv.c 	if (unlikely(!skb)) {
skb               841 drivers/net/hyperv/netvsc_drv.c 	skb_record_rx_queue(skb, q_idx);
skb               853 drivers/net/hyperv/netvsc_drv.c 	if (skb->pkt_type == PACKET_BROADCAST)
skb               855 drivers/net/hyperv/netvsc_drv.c 	else if (skb->pkt_type == PACKET_MULTICAST)
skb               859 drivers/net/hyperv/netvsc_drv.c 	napi_gro_receive(&nvchan->napi, skb);
skb              2013 drivers/net/hyperv/netvsc_drv.c 	struct sk_buff *skb = *pskb;
skb              2014 drivers/net/hyperv/netvsc_drv.c 	struct net_device *ndev = rcu_dereference(skb->dev->rx_handler_data);
skb              2019 drivers/net/hyperv/netvsc_drv.c 	skb = skb_share_check(skb, GFP_ATOMIC);
skb              2020 drivers/net/hyperv/netvsc_drv.c 	if (unlikely(!skb))
skb              2023 drivers/net/hyperv/netvsc_drv.c 	*pskb = skb;
skb              2025 drivers/net/hyperv/netvsc_drv.c 	skb->dev = ndev;
skb              2029 drivers/net/hyperv/netvsc_drv.c 	pcpu_stats->rx_bytes += skb->len;
skb               830 drivers/net/ieee802154/adf7242.c static int adf7242_xmit(struct ieee802154_hw *hw, struct sk_buff *skb)
skb               843 drivers/net/ieee802154/adf7242.c 	ret = adf7242_write_fbuf(lp, skb->data, skb->len);
skb               880 drivers/net/ieee802154/adf7242.c 	struct sk_buff *skb;
skb               895 drivers/net/ieee802154/adf7242.c 	skb = dev_alloc_skb(len);
skb               896 drivers/net/ieee802154/adf7242.c 	if (!skb) {
skb               901 drivers/net/ieee802154/adf7242.c 	data = skb_put(skb, len);
skb               904 drivers/net/ieee802154/adf7242.c 		kfree_skb(skb);
skb               914 drivers/net/ieee802154/adf7242.c 	skb_trim(skb, len - 2);	/* Don't put RSSI/LQI or CRC into the frame */
skb               916 drivers/net/ieee802154/adf7242.c 	ieee802154_rx_irqsafe(lp->hw, skb, lqi);
skb               701 drivers/net/ieee802154/at86rf230.c 	struct sk_buff *skb;
skb               711 drivers/net/ieee802154/at86rf230.c 	skb = dev_alloc_skb(IEEE802154_MTU);
skb               712 drivers/net/ieee802154/at86rf230.c 	if (!skb) {
skb               718 drivers/net/ieee802154/at86rf230.c 	skb_put_data(skb, buf + 2, len);
skb               719 drivers/net/ieee802154/at86rf230.c 	ieee802154_rx_irqsafe(lp->hw, skb, lqi);
skb               861 drivers/net/ieee802154/at86rf230.c 	struct sk_buff *skb = lp->tx_skb;
skb               868 drivers/net/ieee802154/at86rf230.c 	buf[1] = skb->len + 2;
skb               869 drivers/net/ieee802154/at86rf230.c 	memcpy(buf + 2, skb->data, skb->len);
skb               870 drivers/net/ieee802154/at86rf230.c 	ctx->trx.len = skb->len + 2;
skb               905 drivers/net/ieee802154/at86rf230.c at86rf230_xmit(struct ieee802154_hw *hw, struct sk_buff *skb)
skb               910 drivers/net/ieee802154/at86rf230.c 	lp->tx_skb = skb;
skb               198 drivers/net/ieee802154/atusb.c #define SKB_ATUSB(skb)	(*(struct atusb **)(skb)->cb)
skb               205 drivers/net/ieee802154/atusb.c 	struct sk_buff *skb = urb->context;
skb               208 drivers/net/ieee802154/atusb.c 	if (!skb) {
skb               209 drivers/net/ieee802154/atusb.c 		skb = alloc_skb(MAX_RX_XFER, GFP_KERNEL);
skb               210 drivers/net/ieee802154/atusb.c 		if (!skb) {
skb               215 drivers/net/ieee802154/atusb.c 		skb_put(skb, MAX_RX_XFER);
skb               216 drivers/net/ieee802154/atusb.c 		SKB_ATUSB(skb) = atusb;
skb               220 drivers/net/ieee802154/atusb.c 			  skb->data, MAX_RX_XFER, atusb_in, skb);
skb               226 drivers/net/ieee802154/atusb.c 		kfree_skb(skb);
skb               283 drivers/net/ieee802154/atusb.c 	struct sk_buff *skb = urb->context;
skb               284 drivers/net/ieee802154/atusb.c 	struct atusb *atusb = SKB_ATUSB(skb);
skb               292 drivers/net/ieee802154/atusb.c 	len = *skb->data;
skb               310 drivers/net/ieee802154/atusb.c 	lqi = skb->data[len + 1];
skb               312 drivers/net/ieee802154/atusb.c 	skb_pull(skb, 1);	/* remove PHR */
skb               313 drivers/net/ieee802154/atusb.c 	skb_trim(skb, len);	/* get payload only */
skb               314 drivers/net/ieee802154/atusb.c 	ieee802154_rx_irqsafe(atusb->hw, skb, lqi);
skb               321 drivers/net/ieee802154/atusb.c 	struct sk_buff *skb = urb->context;
skb               322 drivers/net/ieee802154/atusb.c 	struct atusb *atusb = SKB_ATUSB(skb);
skb               328 drivers/net/ieee802154/atusb.c 			kfree_skb(skb);
skb               380 drivers/net/ieee802154/atusb.c static int atusb_xmit(struct ieee802154_hw *hw, struct sk_buff *skb)
skb               386 drivers/net/ieee802154/atusb.c 	dev_dbg(&usb_dev->dev, "%s (%d)\n", __func__, skb->len);
skb               387 drivers/net/ieee802154/atusb.c 	atusb->tx_skb = skb;
skb               390 drivers/net/ieee802154/atusb.c 	atusb->tx_dr.wLength = cpu_to_le16(skb->len);
skb               394 drivers/net/ieee802154/atusb.c 			     (unsigned char *)&atusb->tx_dr, skb->data,
skb               395 drivers/net/ieee802154/atusb.c 			     skb->len, atusb_xmit_complete, NULL);
skb              1805 drivers/net/ieee802154/ca8210.c 	struct sk_buff *skb;
skb              1809 drivers/net/ieee802154/ca8210.c 	skb = dev_alloc_skb(IEEE802154_MTU + sizeof(hdr));
skb              1810 drivers/net/ieee802154/ca8210.c 	if (!skb)
skb              1813 drivers/net/ieee802154/ca8210.c 	skb_reserve(skb, sizeof(hdr));
skb              1821 drivers/net/ieee802154/ca8210.c 		kfree_skb(skb);
skb              1862 drivers/net/ieee802154/ca8210.c 	hlen = ieee802154_hdr_push(skb, &hdr);
skb              1866 drivers/net/ieee802154/ca8210.c 		kfree_skb(skb);
skb              1870 drivers/net/ieee802154/ca8210.c 	skb_reset_mac_header(skb);
skb              1871 drivers/net/ieee802154/ca8210.c 	skb->mac_len = hlen;
skb              1876 drivers/net/ieee802154/ca8210.c 	skb_put_data(skb, &data_ind[29], msdulen);
skb              1878 drivers/net/ieee802154/ca8210.c 	ieee802154_rx_irqsafe(hw, skb, mpdulinkquality);
skb              1941 drivers/net/ieee802154/ca8210.c 	struct sk_buff      *skb,
skb              1956 drivers/net/ieee802154/ca8210.c 	mac_len = ieee802154_hdr_peek_addrs(skb, &header);
skb              1972 drivers/net/ieee802154/ca8210.c 		skb->len - mac_len,
skb              1973 drivers/net/ieee802154/ca8210.c 		&skb->data[mac_len],
skb              2049 drivers/net/ieee802154/ca8210.c static int ca8210_xmit_async(struct ieee802154_hw *hw, struct sk_buff *skb)
skb              2056 drivers/net/ieee802154/ca8210.c 	priv->tx_skb = skb;
skb              2058 drivers/net/ieee802154/ca8210.c 	status = ca8210_skb_tx(skb, priv->nextmsduhandle, priv);
skb               477 drivers/net/ieee802154/cc2520.c cc2520_tx(struct ieee802154_hw *hw, struct sk_buff *skb)
skb               489 drivers/net/ieee802154/cc2520.c 		u16 crc = crc_ccitt(0, skb->data, skb->len);
skb               491 drivers/net/ieee802154/cc2520.c 		put_unaligned_le16(crc, skb_put(skb, 2));
skb               492 drivers/net/ieee802154/cc2520.c 		pkt_len = skb->len;
skb               494 drivers/net/ieee802154/cc2520.c 		pkt_len = skb->len + 2;
skb               501 drivers/net/ieee802154/cc2520.c 	rc = cc2520_write_txfifo(priv, pkt_len, skb->data, skb->len);
skb               542 drivers/net/ieee802154/cc2520.c 	struct sk_buff *skb;
skb               555 drivers/net/ieee802154/cc2520.c 	skb = dev_alloc_skb(len);
skb               556 drivers/net/ieee802154/cc2520.c 	if (!skb)
skb               559 drivers/net/ieee802154/cc2520.c 	if (cc2520_read_rxfifo(priv, skb_put(skb, len), len)) {
skb               561 drivers/net/ieee802154/cc2520.c 		kfree_skb(skb);
skb               577 drivers/net/ieee802154/cc2520.c 		crc_ok = skb->data[len - 1] & BIT(7);
skb               582 drivers/net/ieee802154/cc2520.c 			kfree_skb(skb);
skb               593 drivers/net/ieee802154/cc2520.c 		lqi = skb->data[len - 1] & 0x7f;
skb               601 drivers/net/ieee802154/cc2520.c 	ieee802154_rx_irqsafe(priv->hw, skb, lqi);
skb                61 drivers/net/ieee802154/fakelb.c static int fakelb_hw_xmit(struct ieee802154_hw *hw, struct sk_buff *skb)
skb                73 drivers/net/ieee802154/fakelb.c 			struct sk_buff *newskb = pskb_copy(skb, GFP_ATOMIC);
skb                81 drivers/net/ieee802154/fakelb.c 	ieee802154_xmit_complete(hw, skb, false);
skb               109 drivers/net/ieee802154/mac802154_hwsim.c static int hwsim_hw_xmit(struct ieee802154_hw *hw, struct sk_buff *skb)
skb               132 drivers/net/ieee802154/mac802154_hwsim.c 			struct sk_buff *newskb = pskb_copy(skb, GFP_ATOMIC);
skb               142 drivers/net/ieee802154/mac802154_hwsim.c 	ieee802154_xmit_complete(hw, skb, false);
skb               205 drivers/net/ieee802154/mac802154_hwsim.c static int append_radio_msg(struct sk_buff *skb, struct hwsim_phy *phy)
skb               212 drivers/net/ieee802154/mac802154_hwsim.c 	ret = nla_put_u32(skb, MAC802154_HWSIM_ATTR_RADIO_ID, phy->idx);
skb               222 drivers/net/ieee802154/mac802154_hwsim.c 	nl_edges = nla_nest_start_noflag(skb,
skb               230 drivers/net/ieee802154/mac802154_hwsim.c 		nl_edge = nla_nest_start_noflag(skb,
skb               234 drivers/net/ieee802154/mac802154_hwsim.c 			nla_nest_cancel(skb, nl_edges);
skb               238 drivers/net/ieee802154/mac802154_hwsim.c 		ret = nla_put_u32(skb, MAC802154_HWSIM_EDGE_ATTR_ENDPOINT_ID,
skb               242 drivers/net/ieee802154/mac802154_hwsim.c 			nla_nest_cancel(skb, nl_edge);
skb               243 drivers/net/ieee802154/mac802154_hwsim.c 			nla_nest_cancel(skb, nl_edges);
skb               248 drivers/net/ieee802154/mac802154_hwsim.c 		ret = nla_put_u8(skb, MAC802154_HWSIM_EDGE_ATTR_LQI,
skb               252 drivers/net/ieee802154/mac802154_hwsim.c 			nla_nest_cancel(skb, nl_edge);
skb               253 drivers/net/ieee802154/mac802154_hwsim.c 			nla_nest_cancel(skb, nl_edges);
skb               257 drivers/net/ieee802154/mac802154_hwsim.c 		nla_nest_end(skb, nl_edge);
skb               261 drivers/net/ieee802154/mac802154_hwsim.c 	nla_nest_end(skb, nl_edges);
skb               266 drivers/net/ieee802154/mac802154_hwsim.c static int hwsim_get_radio(struct sk_buff *skb, struct hwsim_phy *phy,
skb               273 drivers/net/ieee802154/mac802154_hwsim.c 	hdr = genlmsg_put(skb, portid, seq, &hwsim_genl_family, flags,
skb               281 drivers/net/ieee802154/mac802154_hwsim.c 	res = append_radio_msg(skb, phy);
skb               285 drivers/net/ieee802154/mac802154_hwsim.c 	genlmsg_end(skb, hdr);
skb               289 drivers/net/ieee802154/mac802154_hwsim.c 	genlmsg_cancel(skb, hdr);
skb               296 drivers/net/ieee802154/mac802154_hwsim.c 	struct sk_buff *skb;
skb               308 drivers/net/ieee802154/mac802154_hwsim.c 		skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
skb               309 drivers/net/ieee802154/mac802154_hwsim.c 		if (!skb) {
skb               314 drivers/net/ieee802154/mac802154_hwsim.c 		res = hwsim_get_radio(skb, phy, info->snd_portid,
skb               317 drivers/net/ieee802154/mac802154_hwsim.c 			nlmsg_free(skb);
skb               321 drivers/net/ieee802154/mac802154_hwsim.c 		res = genlmsg_reply(skb, info);
skb               331 drivers/net/ieee802154/mac802154_hwsim.c static int hwsim_dump_radio_nl(struct sk_buff *skb,
skb               347 drivers/net/ieee802154/mac802154_hwsim.c 		res = hwsim_get_radio(skb, phy, NETLINK_CB(cb->skb).portid,
skb               359 drivers/net/ieee802154/mac802154_hwsim.c 	return skb->len;
skb               463 drivers/net/ieee802154/mcr20a.c mcr20a_xmit(struct ieee802154_hw *hw, struct sk_buff *skb)
skb               469 drivers/net/ieee802154/mcr20a.c 	lp->tx_skb = skb;
skb               472 drivers/net/ieee802154/mcr20a.c 			     skb->data, skb->len, 0);
skb               786 drivers/net/ieee802154/mcr20a.c 	struct sk_buff *skb;
skb               799 drivers/net/ieee802154/mcr20a.c 	skb = dev_alloc_skb(len);
skb               800 drivers/net/ieee802154/mcr20a.c 	if (!skb)
skb               803 drivers/net/ieee802154/mcr20a.c 	__skb_put_data(skb, lp->rx_buf, len);
skb               804 drivers/net/ieee802154/mcr20a.c 	ieee802154_rx_irqsafe(lp->hw, skb, lp->rx_lqi[0]);
skb               592 drivers/net/ieee802154/mrf24j40.c static int mrf24j40_tx(struct ieee802154_hw *hw, struct sk_buff *skb)
skb               596 drivers/net/ieee802154/mrf24j40.c 	dev_dbg(printdev(devrec), "tx packet of %d bytes\n", skb->len);
skb               597 drivers/net/ieee802154/mrf24j40.c 	devrec->tx_skb = skb;
skb               599 drivers/net/ieee802154/mrf24j40.c 	return write_tx_buf(devrec, 0x000, skb->data, skb->len);
skb               757 drivers/net/ieee802154/mrf24j40.c 	struct sk_buff *skb;
skb               762 drivers/net/ieee802154/mrf24j40.c 	skb = dev_alloc_skb(IEEE802154_MTU);
skb               763 drivers/net/ieee802154/mrf24j40.c 	if (!skb) {
skb               768 drivers/net/ieee802154/mrf24j40.c 	skb_put_data(skb, rx_local_buf, len);
skb               769 drivers/net/ieee802154/mrf24j40.c 	ieee802154_rx_irqsafe(devrec->hw, skb, 0);
skb                58 drivers/net/ifb.c static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev);
skb                66 drivers/net/ifb.c 	struct sk_buff *skb;
skb                69 drivers/net/ifb.c 	skb = skb_peek(&txp->tq);
skb                70 drivers/net/ifb.c 	if (!skb) {
skb                77 drivers/net/ifb.c 	while ((skb = __skb_dequeue(&txp->tq)) != NULL) {
skb                78 drivers/net/ifb.c 		skb->redirected = 0;
skb                79 drivers/net/ifb.c 		skb->tc_skip_classify = 1;
skb                83 drivers/net/ifb.c 		txp->tx_bytes += skb->len;
skb                87 drivers/net/ifb.c 		skb->dev = dev_get_by_index_rcu(dev_net(txp->dev), skb->skb_iif);
skb                88 drivers/net/ifb.c 		if (!skb->dev) {
skb                90 drivers/net/ifb.c 			dev_kfree_skb(skb);
skb                97 drivers/net/ifb.c 		skb->skb_iif = txp->dev->ifindex;
skb                99 drivers/net/ifb.c 		if (!skb->from_ingress) {
skb               100 drivers/net/ifb.c 			dev_queue_xmit(skb);
skb               102 drivers/net/ifb.c 			skb_pull_rcsum(skb, skb->mac_len);
skb               103 drivers/net/ifb.c 			netif_receive_skb(skb);
skb               108 drivers/net/ifb.c 		skb = skb_peek(&txp->rq);
skb               109 drivers/net/ifb.c 		if (!skb) {
skb               236 drivers/net/ifb.c static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev)
skb               239 drivers/net/ifb.c 	struct ifb_q_private *txp = dp->tx_private + skb_get_queue_mapping(skb);
skb               243 drivers/net/ifb.c 	txp->rx_bytes += skb->len;
skb               246 drivers/net/ifb.c 	if (!skb->redirected || !skb->skb_iif) {
skb               247 drivers/net/ifb.c 		dev_kfree_skb(skb);
skb               255 drivers/net/ifb.c 	__skb_queue_tail(&txp->rq, skb);
skb               157 drivers/net/ipvlan/ipvlan.h int ipvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev);
skb               165 drivers/net/ipvlan/ipvlan.h void *ipvlan_get_L3_hdr(struct ipvl_port *port, struct sk_buff *skb, int *type);
skb               136 drivers/net/ipvlan/ipvlan_core.c void *ipvlan_get_L3_hdr(struct ipvl_port *port, struct sk_buff *skb, int *type)
skb               140 drivers/net/ipvlan/ipvlan_core.c 	switch (skb->protocol) {
skb               144 drivers/net/ipvlan/ipvlan_core.c 		if (unlikely(!pskb_may_pull(skb, arp_hdr_len(port->dev))))
skb               147 drivers/net/ipvlan/ipvlan_core.c 		arph = arp_hdr(skb);
skb               156 drivers/net/ipvlan/ipvlan_core.c 		if (unlikely(!pskb_may_pull(skb, sizeof(*ip4h))))
skb               159 drivers/net/ipvlan/ipvlan_core.c 		ip4h = ip_hdr(skb);
skb               163 drivers/net/ipvlan/ipvlan_core.c 		if (skb->len < pktlen || pktlen < (ip4h->ihl * 4))
skb               174 drivers/net/ipvlan/ipvlan_core.c 		if (unlikely(!pskb_may_pull(skb, sizeof(*ip6h))))
skb               177 drivers/net/ipvlan/ipvlan_core.c 		ip6h = ipv6_hdr(skb);
skb               188 drivers/net/ipvlan/ipvlan_core.c 			if (unlikely(!pskb_may_pull(skb, sizeof(*ip6h) + sizeof(*icmph))))
skb               191 drivers/net/ipvlan/ipvlan_core.c 			ip6h = ipv6_hdr(skb);
skb               196 drivers/net/ipvlan/ipvlan_core.c 				if (unlikely(!pskb_may_pull(skb, sizeof(*ip6h) + sizeof(*icmph)
skb               200 drivers/net/ipvlan/ipvlan_core.c 				ip6h = ipv6_hdr(skb);
skb               230 drivers/net/ipvlan/ipvlan_core.c 	struct sk_buff *skb, *nskb;
skb               244 drivers/net/ipvlan/ipvlan_core.c 	while ((skb = __skb_dequeue(&list)) != NULL) {
skb               245 drivers/net/ipvlan/ipvlan_core.c 		struct net_device *dev = skb->dev;
skb               248 drivers/net/ipvlan/ipvlan_core.c 		ethh = eth_hdr(skb);
skb               249 drivers/net/ipvlan/ipvlan_core.c 		tx_pkt = IPVL_SKB_CB(skb)->tx_pkt;
skb               259 drivers/net/ipvlan/ipvlan_core.c 			if (tx_pkt && (ipvlan->dev == skb->dev))
skb               266 drivers/net/ipvlan/ipvlan_core.c 			len = skb->len + ETH_HLEN;
skb               267 drivers/net/ipvlan/ipvlan_core.c 			nskb = skb_clone(skb, GFP_ATOMIC);
skb               285 drivers/net/ipvlan/ipvlan_core.c 			skb->dev = port->dev;
skb               286 drivers/net/ipvlan/ipvlan_core.c 			skb->pkt_type = pkt_type;
skb               287 drivers/net/ipvlan/ipvlan_core.c 			dev_queue_xmit(skb);
skb               290 drivers/net/ipvlan/ipvlan_core.c 				consume_skb(skb);
skb               292 drivers/net/ipvlan/ipvlan_core.c 				kfree_skb(skb);
skb               300 drivers/net/ipvlan/ipvlan_core.c static void ipvlan_skb_crossing_ns(struct sk_buff *skb, struct net_device *dev)
skb               305 drivers/net/ipvlan/ipvlan_core.c 		xnet = !net_eq(dev_net(skb->dev), dev_net(dev));
skb               307 drivers/net/ipvlan/ipvlan_core.c 	skb_scrub_packet(skb, xnet);
skb               309 drivers/net/ipvlan/ipvlan_core.c 		skb->dev = dev;
skb               320 drivers/net/ipvlan/ipvlan_core.c 	struct sk_buff *skb = *pskb;
skb               322 drivers/net/ipvlan/ipvlan_core.c 	len = skb->len + ETH_HLEN;
skb               328 drivers/net/ipvlan/ipvlan_core.c 			kfree_skb(skb);
skb               332 drivers/net/ipvlan/ipvlan_core.c 		skb = skb_share_check(skb, GFP_ATOMIC);
skb               333 drivers/net/ipvlan/ipvlan_core.c 		if (!skb)
skb               336 drivers/net/ipvlan/ipvlan_core.c 		*pskb = skb;
skb               340 drivers/net/ipvlan/ipvlan_core.c 		skb->pkt_type = PACKET_HOST;
skb               341 drivers/net/ipvlan/ipvlan_core.c 		if (dev_forward_skb(ipvlan->dev, skb) == NET_RX_SUCCESS)
skb               344 drivers/net/ipvlan/ipvlan_core.c 		skb->dev = dev;
skb               415 drivers/net/ipvlan/ipvlan_core.c static int ipvlan_process_v4_outbound(struct sk_buff *skb)
skb               417 drivers/net/ipvlan/ipvlan_core.c 	const struct iphdr *ip4h = ip_hdr(skb);
skb               418 drivers/net/ipvlan/ipvlan_core.c 	struct net_device *dev = skb->dev;
skb               426 drivers/net/ipvlan/ipvlan_core.c 		.flowi4_mark = skb->mark,
skb               439 drivers/net/ipvlan/ipvlan_core.c 	skb_dst_set(skb, &rt->dst);
skb               440 drivers/net/ipvlan/ipvlan_core.c 	err = ip_local_out(net, skb->sk, skb);
skb               448 drivers/net/ipvlan/ipvlan_core.c 	kfree_skb(skb);
skb               454 drivers/net/ipvlan/ipvlan_core.c static int ipvlan_process_v6_outbound(struct sk_buff *skb)
skb               456 drivers/net/ipvlan/ipvlan_core.c 	const struct ipv6hdr *ip6h = ipv6_hdr(skb);
skb               457 drivers/net/ipvlan/ipvlan_core.c 	struct net_device *dev = skb->dev;
skb               467 drivers/net/ipvlan/ipvlan_core.c 		.flowi6_mark = skb->mark,
skb               477 drivers/net/ipvlan/ipvlan_core.c 	skb_dst_set(skb, dst);
skb               478 drivers/net/ipvlan/ipvlan_core.c 	err = ip6_local_out(net, skb->sk, skb);
skb               486 drivers/net/ipvlan/ipvlan_core.c 	kfree_skb(skb);
skb               491 drivers/net/ipvlan/ipvlan_core.c static int ipvlan_process_v6_outbound(struct sk_buff *skb)
skb               497 drivers/net/ipvlan/ipvlan_core.c static int ipvlan_process_outbound(struct sk_buff *skb)
skb               499 drivers/net/ipvlan/ipvlan_core.c 	struct ethhdr *ethh = eth_hdr(skb);
skb               506 drivers/net/ipvlan/ipvlan_core.c 	if (skb_mac_header_was_set(skb)) {
skb               512 drivers/net/ipvlan/ipvlan_core.c 				ntohs(skb->protocol));
skb               513 drivers/net/ipvlan/ipvlan_core.c 			kfree_skb(skb);
skb               517 drivers/net/ipvlan/ipvlan_core.c 		skb_pull(skb, sizeof(*ethh));
skb               518 drivers/net/ipvlan/ipvlan_core.c 		skb->mac_header = (typeof(skb->mac_header))~0U;
skb               519 drivers/net/ipvlan/ipvlan_core.c 		skb_reset_network_header(skb);
skb               522 drivers/net/ipvlan/ipvlan_core.c 	if (skb->protocol == htons(ETH_P_IPV6))
skb               523 drivers/net/ipvlan/ipvlan_core.c 		ret = ipvlan_process_v6_outbound(skb);
skb               524 drivers/net/ipvlan/ipvlan_core.c 	else if (skb->protocol == htons(ETH_P_IP))
skb               525 drivers/net/ipvlan/ipvlan_core.c 		ret = ipvlan_process_v4_outbound(skb);
skb               528 drivers/net/ipvlan/ipvlan_core.c 				    ntohs(skb->protocol));
skb               529 drivers/net/ipvlan/ipvlan_core.c 		kfree_skb(skb);
skb               536 drivers/net/ipvlan/ipvlan_core.c 				     struct sk_buff *skb, bool tx_pkt)
skb               538 drivers/net/ipvlan/ipvlan_core.c 	if (skb->protocol == htons(ETH_P_PAUSE)) {
skb               539 drivers/net/ipvlan/ipvlan_core.c 		kfree_skb(skb);
skb               548 drivers/net/ipvlan/ipvlan_core.c 	IPVL_SKB_CB(skb)->tx_pkt = tx_pkt;
skb               552 drivers/net/ipvlan/ipvlan_core.c 		if (skb->dev)
skb               553 drivers/net/ipvlan/ipvlan_core.c 			dev_hold(skb->dev);
skb               554 drivers/net/ipvlan/ipvlan_core.c 		__skb_queue_tail(&port->backlog, skb);
skb               559 drivers/net/ipvlan/ipvlan_core.c 		atomic_long_inc(&skb->dev->rx_dropped);
skb               560 drivers/net/ipvlan/ipvlan_core.c 		kfree_skb(skb);
skb               564 drivers/net/ipvlan/ipvlan_core.c static int ipvlan_xmit_mode_l3(struct sk_buff *skb, struct net_device *dev)
skb               571 drivers/net/ipvlan/ipvlan_core.c 	lyr3h = ipvlan_get_L3_hdr(ipvlan->port, skb, &addr_type);
skb               579 drivers/net/ipvlan/ipvlan_core.c 				consume_skb(skb);
skb               582 drivers/net/ipvlan/ipvlan_core.c 			return ipvlan_rcv_frame(addr, &skb, true);
skb               586 drivers/net/ipvlan/ipvlan_core.c 	ipvlan_skb_crossing_ns(skb, ipvlan->phy_dev);
skb               587 drivers/net/ipvlan/ipvlan_core.c 	return ipvlan_process_outbound(skb);
skb               590 drivers/net/ipvlan/ipvlan_core.c static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
skb               593 drivers/net/ipvlan/ipvlan_core.c 	struct ethhdr *eth = eth_hdr(skb);
skb               600 drivers/net/ipvlan/ipvlan_core.c 		lyr3h = ipvlan_get_L3_hdr(ipvlan->port, skb, &addr_type);
skb               605 drivers/net/ipvlan/ipvlan_core.c 					consume_skb(skb);
skb               608 drivers/net/ipvlan/ipvlan_core.c 				return ipvlan_rcv_frame(addr, &skb, true);
skb               611 drivers/net/ipvlan/ipvlan_core.c 		skb = skb_share_check(skb, GFP_ATOMIC);
skb               612 drivers/net/ipvlan/ipvlan_core.c 		if (!skb)
skb               620 drivers/net/ipvlan/ipvlan_core.c 		return dev_forward_skb(ipvlan->phy_dev, skb);
skb               623 drivers/net/ipvlan/ipvlan_core.c 		ipvlan_skb_crossing_ns(skb, NULL);
skb               624 drivers/net/ipvlan/ipvlan_core.c 		ipvlan_multicast_enqueue(ipvlan->port, skb, true);
skb               628 drivers/net/ipvlan/ipvlan_core.c 	skb->dev = ipvlan->phy_dev;
skb               629 drivers/net/ipvlan/ipvlan_core.c 	return dev_queue_xmit(skb);
skb               632 drivers/net/ipvlan/ipvlan_core.c int ipvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
skb               640 drivers/net/ipvlan/ipvlan_core.c 	if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
skb               645 drivers/net/ipvlan/ipvlan_core.c 		return ipvlan_xmit_mode_l2(skb, dev);
skb               650 drivers/net/ipvlan/ipvlan_core.c 		return ipvlan_xmit_mode_l3(skb, dev);
skb               657 drivers/net/ipvlan/ipvlan_core.c 	kfree_skb(skb);
skb               661 drivers/net/ipvlan/ipvlan_core.c static bool ipvlan_external_frame(struct sk_buff *skb, struct ipvl_port *port)
skb               663 drivers/net/ipvlan/ipvlan_core.c 	struct ethhdr *eth = eth_hdr(skb);
skb               668 drivers/net/ipvlan/ipvlan_core.c 	if (ether_addr_equal(eth->h_source, skb->dev->dev_addr)) {
skb               669 drivers/net/ipvlan/ipvlan_core.c 		lyr3h = ipvlan_get_L3_hdr(port, skb, &addr_type);
skb               687 drivers/net/ipvlan/ipvlan_core.c 	struct sk_buff *skb = *pskb;
skb               690 drivers/net/ipvlan/ipvlan_core.c 	lyr3h = ipvlan_get_L3_hdr(port, skb, &addr_type);
skb               705 drivers/net/ipvlan/ipvlan_core.c 	struct sk_buff *skb = *pskb;
skb               706 drivers/net/ipvlan/ipvlan_core.c 	struct ethhdr *eth = eth_hdr(skb);
skb               710 drivers/net/ipvlan/ipvlan_core.c 		if (ipvlan_external_frame(skb, port)) {
skb               711 drivers/net/ipvlan/ipvlan_core.c 			struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
skb               734 drivers/net/ipvlan/ipvlan_core.c 	struct sk_buff *skb = *pskb;
skb               735 drivers/net/ipvlan/ipvlan_core.c 	struct ipvl_port *port = ipvlan_port_get_rcu(skb->dev);
skb               754 drivers/net/ipvlan/ipvlan_core.c 	kfree_skb(skb);
skb                13 drivers/net/ipvlan/ipvlan_l3s.c static struct ipvl_addr *ipvlan_skb_to_addr(struct sk_buff *skb,
skb                28 drivers/net/ipvlan/ipvlan_l3s.c 	lyr3h = ipvlan_get_L3_hdr(port, skb, &addr_type);
skb                38 drivers/net/ipvlan/ipvlan_l3s.c 				     struct sk_buff *skb, u16 proto)
skb                43 drivers/net/ipvlan/ipvlan_l3s.c 	addr = ipvlan_skb_to_addr(skb, dev);
skb                51 drivers/net/ipvlan/ipvlan_l3s.c 		struct iphdr *ip4h = ip_hdr(skb);
skb                54 drivers/net/ipvlan/ipvlan_l3s.c 		err = ip_route_input_noref(skb, ip4h->daddr, ip4h->saddr,
skb                64 drivers/net/ipvlan/ipvlan_l3s.c 		struct ipv6hdr *ip6h = ipv6_hdr(skb);
skb                71 drivers/net/ipvlan/ipvlan_l3s.c 			.flowi6_mark  = skb->mark,
skb                75 drivers/net/ipvlan/ipvlan_l3s.c 		skb_dst_drop(skb);
skb                77 drivers/net/ipvlan/ipvlan_l3s.c 					     skb, flags);
skb                78 drivers/net/ipvlan/ipvlan_l3s.c 		skb_dst_set(skb, dst);
skb                86 drivers/net/ipvlan/ipvlan_l3s.c 	return skb;
skb                93 drivers/net/ipvlan/ipvlan_l3s.c static unsigned int ipvlan_nf_input(void *priv, struct sk_buff *skb,
skb                99 drivers/net/ipvlan/ipvlan_l3s.c 	addr = ipvlan_skb_to_addr(skb, skb->dev);
skb               103 drivers/net/ipvlan/ipvlan_l3s.c 	skb->dev = addr->master->dev;
skb               104 drivers/net/ipvlan/ipvlan_l3s.c 	len = skb->len + ETH_HLEN;
skb                94 drivers/net/ipvlan/ipvlan_main.c 	struct sk_buff *skb;
skb               100 drivers/net/ipvlan/ipvlan_main.c 	while ((skb = __skb_dequeue(&port->backlog)) != NULL) {
skb               101 drivers/net/ipvlan/ipvlan_main.c 		if (skb->dev)
skb               102 drivers/net/ipvlan/ipvlan_main.c 			dev_put(skb->dev);
skb               103 drivers/net/ipvlan/ipvlan_main.c 		kfree_skb(skb);
skb               200 drivers/net/ipvlan/ipvlan_main.c static netdev_tx_t ipvlan_start_xmit(struct sk_buff *skb,
skb               204 drivers/net/ipvlan/ipvlan_main.c 	int skblen = skb->len;
skb               207 drivers/net/ipvlan/ipvlan_main.c 	ret = ipvlan_queue_xmit(skb, dev);
skb               347 drivers/net/ipvlan/ipvlan_main.c static int ipvlan_hard_header(struct sk_buff *skb, struct net_device *dev,
skb               358 drivers/net/ipvlan/ipvlan_main.c 	return dev_hard_header(skb, phy_dev, type, daddr,
skb               488 drivers/net/ipvlan/ipvlan_main.c static int ipvlan_nl_fillinfo(struct sk_buff *skb,
skb               499 drivers/net/ipvlan/ipvlan_main.c 	if (nla_put_u16(skb, IFLA_IPVLAN_MODE, port->mode))
skb               501 drivers/net/ipvlan/ipvlan_main.c 	if (nla_put_u16(skb, IFLA_IPVLAN_FLAGS, port->flags))
skb                68 drivers/net/loopback.c static netdev_tx_t loopback_xmit(struct sk_buff *skb,
skb                74 drivers/net/loopback.c 	skb_tx_timestamp(skb);
skb                77 drivers/net/loopback.c 	skb->tstamp = 0;
skb                79 drivers/net/loopback.c 	skb_orphan(skb);
skb                84 drivers/net/loopback.c 	skb_dst_force(skb);
skb                86 drivers/net/loopback.c 	skb->protocol = eth_type_trans(skb, dev);
skb                91 drivers/net/loopback.c 	len = skb->len;
skb                92 drivers/net/loopback.c 	if (likely(netif_rx(skb) == NET_RX_SUCCESS)) {
skb               236 drivers/net/loopback.c static netdev_tx_t blackhole_netdev_xmit(struct sk_buff *skb,
skb               239 drivers/net/loopback.c 	kfree_skb(skb);
skb               382 drivers/net/macsec.c static struct macsec_cb *macsec_skb_cb(struct sk_buff *skb)
skb               384 drivers/net/macsec.c 	BUILD_BUG_ON(sizeof(struct macsec_cb) > sizeof(skb->cb));
skb               385 drivers/net/macsec.c 	return (struct macsec_cb *)skb->cb;
skb               485 drivers/net/macsec.c static bool macsec_validate_skb(struct sk_buff *skb, u16 icv_len)
skb               487 drivers/net/macsec.c 	struct macsec_eth_header *h = (struct macsec_eth_header *)skb->data;
skb               488 drivers/net/macsec.c 	int len = skb->len - 2 * ETH_ALEN;
skb               492 drivers/net/macsec.c 	if (skb->len <= 16)
skb               531 drivers/net/macsec.c static struct macsec_eth_header *macsec_ethhdr(struct sk_buff *skb)
skb               533 drivers/net/macsec.c 	return (struct macsec_eth_header *)skb_mac_header(skb);
skb               555 drivers/net/macsec.c static void macsec_encrypt_finish(struct sk_buff *skb, struct net_device *dev)
skb               559 drivers/net/macsec.c 	skb->dev = macsec->real_dev;
skb               560 drivers/net/macsec.c 	skb_reset_mac_header(skb);
skb               561 drivers/net/macsec.c 	skb->protocol = eth_hdr(skb)->h_proto;
skb               564 drivers/net/macsec.c static void macsec_count_tx(struct sk_buff *skb, struct macsec_tx_sc *tx_sc,
skb               571 drivers/net/macsec.c 		txsc_stats->stats.OutOctetsEncrypted += skb->len;
skb               575 drivers/net/macsec.c 		txsc_stats->stats.OutOctetsProtected += skb->len;
skb               596 drivers/net/macsec.c 	struct sk_buff *skb = base->data;
skb               597 drivers/net/macsec.c 	struct net_device *dev = skb->dev;
skb               599 drivers/net/macsec.c 	struct macsec_tx_sa *sa = macsec_skb_cb(skb)->tx_sa;
skb               602 drivers/net/macsec.c 	aead_request_free(macsec_skb_cb(skb)->req);
skb               605 drivers/net/macsec.c 	macsec_encrypt_finish(skb, dev);
skb               606 drivers/net/macsec.c 	macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa);
skb               607 drivers/net/macsec.c 	len = skb->len;
skb               608 drivers/net/macsec.c 	ret = dev_queue_xmit(skb);
skb               646 drivers/net/macsec.c static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
skb               671 drivers/net/macsec.c 		kfree_skb(skb);
skb               675 drivers/net/macsec.c 	if (unlikely(skb_headroom(skb) < MACSEC_NEEDED_HEADROOM ||
skb               676 drivers/net/macsec.c 		     skb_tailroom(skb) < MACSEC_NEEDED_TAILROOM)) {
skb               677 drivers/net/macsec.c 		struct sk_buff *nskb = skb_copy_expand(skb,
skb               682 drivers/net/macsec.c 			consume_skb(skb);
skb               683 drivers/net/macsec.c 			skb = nskb;
skb               686 drivers/net/macsec.c 			kfree_skb(skb);
skb               690 drivers/net/macsec.c 		skb = skb_unshare(skb, GFP_ATOMIC);
skb               691 drivers/net/macsec.c 		if (!skb) {
skb               697 drivers/net/macsec.c 	unprotected_len = skb->len;
skb               698 drivers/net/macsec.c 	eth = eth_hdr(skb);
skb               700 drivers/net/macsec.c 	hh = skb_push(skb, macsec_extra_len(sci_present));
skb               706 drivers/net/macsec.c 		kfree_skb(skb);
skb               712 drivers/net/macsec.c 	skb_put(skb, secy->icv_len);
skb               714 drivers/net/macsec.c 	if (skb->len - ETH_HLEN > macsec_priv(dev)->real_dev->mtu) {
skb               722 drivers/net/macsec.c 		kfree_skb(skb);
skb               726 drivers/net/macsec.c 	ret = skb_cow_data(skb, 0, &trailer);
skb               729 drivers/net/macsec.c 		kfree_skb(skb);
skb               736 drivers/net/macsec.c 		kfree_skb(skb);
skb               743 drivers/net/macsec.c 	ret = skb_to_sgvec(skb, sg, 0, skb->len);
skb               747 drivers/net/macsec.c 		kfree_skb(skb);
skb               752 drivers/net/macsec.c 		int len = skb->len - macsec_hdr_len(sci_present) -
skb               758 drivers/net/macsec.c 		aead_request_set_ad(req, skb->len - secy->icv_len);
skb               761 drivers/net/macsec.c 	macsec_skb_cb(skb)->req = req;
skb               762 drivers/net/macsec.c 	macsec_skb_cb(skb)->tx_sa = tx_sa;
skb               763 drivers/net/macsec.c 	aead_request_set_callback(req, 0, macsec_encrypt_done, skb);
skb               765 drivers/net/macsec.c 	dev_hold(skb->dev);
skb               770 drivers/net/macsec.c 		dev_put(skb->dev);
skb               771 drivers/net/macsec.c 		kfree_skb(skb);
skb               777 drivers/net/macsec.c 	dev_put(skb->dev);
skb               781 drivers/net/macsec.c 	return skb;
skb               784 drivers/net/macsec.c static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u32 pn)
skb               786 drivers/net/macsec.c 	struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa;
skb               788 drivers/net/macsec.c 	struct macsec_eth_header *hdr = macsec_ethhdr(skb);
skb               809 drivers/net/macsec.c 			rxsc_stats->stats.InOctetsDecrypted += skb->len;
skb               811 drivers/net/macsec.c 			rxsc_stats->stats.InOctetsValidated += skb->len;
skb               815 drivers/net/macsec.c 	if (!macsec_skb_cb(skb)->valid) {
skb               855 drivers/net/macsec.c static void macsec_reset_skb(struct sk_buff *skb, struct net_device *dev)
skb               857 drivers/net/macsec.c 	skb->pkt_type = PACKET_HOST;
skb               858 drivers/net/macsec.c 	skb->protocol = eth_type_trans(skb, dev);
skb               860 drivers/net/macsec.c 	skb_reset_network_header(skb);
skb               861 drivers/net/macsec.c 	if (!skb_transport_header_was_set(skb))
skb               862 drivers/net/macsec.c 		skb_reset_transport_header(skb);
skb               863 drivers/net/macsec.c 	skb_reset_mac_len(skb);
skb               866 drivers/net/macsec.c static void macsec_finalize_skb(struct sk_buff *skb, u8 icv_len, u8 hdr_len)
skb               868 drivers/net/macsec.c 	skb->ip_summed = CHECKSUM_NONE;
skb               869 drivers/net/macsec.c 	memmove(skb->data + hdr_len, skb->data, 2 * ETH_ALEN);
skb               870 drivers/net/macsec.c 	skb_pull(skb, hdr_len);
skb               871 drivers/net/macsec.c 	pskb_trim_unique(skb, skb->len - icv_len);
skb               886 drivers/net/macsec.c 	struct sk_buff *skb = base->data;
skb               887 drivers/net/macsec.c 	struct net_device *dev = skb->dev;
skb               889 drivers/net/macsec.c 	struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa;
skb               894 drivers/net/macsec.c 	aead_request_free(macsec_skb_cb(skb)->req);
skb               897 drivers/net/macsec.c 		macsec_skb_cb(skb)->valid = true;
skb               900 drivers/net/macsec.c 	pn = ntohl(macsec_ethhdr(skb)->packet_number);
skb               901 drivers/net/macsec.c 	if (!macsec_post_decrypt(skb, &macsec->secy, pn)) {
skb               903 drivers/net/macsec.c 		kfree_skb(skb);
skb               907 drivers/net/macsec.c 	macsec_finalize_skb(skb, macsec->secy.icv_len,
skb               908 drivers/net/macsec.c 			    macsec_extra_len(macsec_skb_cb(skb)->has_sci));
skb               909 drivers/net/macsec.c 	macsec_reset_skb(skb, macsec->secy.netdev);
skb               911 drivers/net/macsec.c 	len = skb->len;
skb               912 drivers/net/macsec.c 	if (gro_cells_receive(&macsec->gro_cells, skb) == NET_RX_SUCCESS)
skb               923 drivers/net/macsec.c static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
skb               937 drivers/net/macsec.c 	macsec_skb_cb(skb)->valid = false;
skb               938 drivers/net/macsec.c 	skb = skb_share_check(skb, GFP_ATOMIC);
skb               939 drivers/net/macsec.c 	if (!skb)
skb               942 drivers/net/macsec.c 	ret = skb_cow_data(skb, 0, &trailer);
skb               944 drivers/net/macsec.c 		kfree_skb(skb);
skb               949 drivers/net/macsec.c 		kfree_skb(skb);
skb               953 drivers/net/macsec.c 	hdr = (struct macsec_eth_header *)skb->data;
skb               957 drivers/net/macsec.c 	ret = skb_to_sgvec(skb, sg, 0, skb->len);
skb               960 drivers/net/macsec.c 		kfree_skb(skb);
skb               968 drivers/net/macsec.c 		int len = skb->len - macsec_hdr_len(macsec_skb_cb(skb)->has_sci);
skb               971 drivers/net/macsec.c 		aead_request_set_ad(req, macsec_hdr_len(macsec_skb_cb(skb)->has_sci));
skb               972 drivers/net/macsec.c 		skb = skb_unshare(skb, GFP_ATOMIC);
skb               973 drivers/net/macsec.c 		if (!skb) {
skb               980 drivers/net/macsec.c 		aead_request_set_ad(req, skb->len - icv_len);
skb               983 drivers/net/macsec.c 	macsec_skb_cb(skb)->req = req;
skb               984 drivers/net/macsec.c 	skb->dev = dev;
skb               985 drivers/net/macsec.c 	aead_request_set_callback(req, 0, macsec_decrypt_done, skb);
skb               996 drivers/net/macsec.c 			kfree_skb(skb);
skb               997 drivers/net/macsec.c 			skb = ERR_PTR(ret);
skb              1000 drivers/net/macsec.c 		macsec_skb_cb(skb)->valid = true;
skb              1006 drivers/net/macsec.c 	return skb;
skb              1033 drivers/net/macsec.c static void handle_not_macsec(struct sk_buff *skb)
skb              1039 drivers/net/macsec.c 	rxd = macsec_data_rcu(skb->dev);
skb              1057 drivers/net/macsec.c 		nskb = skb_clone(skb, GFP_ATOMIC);
skb              1075 drivers/net/macsec.c 	struct sk_buff *skb = *pskb;
skb              1076 drivers/net/macsec.c 	struct net_device *dev = skb->dev;
skb              1091 drivers/net/macsec.c 	if (skb_headroom(skb) < ETH_HLEN)
skb              1094 drivers/net/macsec.c 	hdr = macsec_ethhdr(skb);
skb              1096 drivers/net/macsec.c 		handle_not_macsec(skb);
skb              1102 drivers/net/macsec.c 	skb = skb_unshare(skb, GFP_ATOMIC);
skb              1103 drivers/net/macsec.c 	*pskb = skb;
skb              1104 drivers/net/macsec.c 	if (!skb)
skb              1107 drivers/net/macsec.c 	pulled_sci = pskb_may_pull(skb, macsec_extra_len(true));
skb              1109 drivers/net/macsec.c 		if (!pskb_may_pull(skb, macsec_extra_len(false)))
skb              1113 drivers/net/macsec.c 	hdr = macsec_ethhdr(skb);
skb              1130 drivers/net/macsec.c 	skb_push(skb, ETH_HLEN);
skb              1132 drivers/net/macsec.c 	macsec_skb_cb(skb)->has_sci = !!(hdr->tci_an & MACSEC_TCI_SC);
skb              1133 drivers/net/macsec.c 	macsec_skb_cb(skb)->assoc_num = hdr->tci_an & MACSEC_AN_MASK;
skb              1134 drivers/net/macsec.c 	sci = macsec_frame_sci(hdr, macsec_skb_cb(skb)->has_sci);
skb              1137 drivers/net/macsec.c 	rxd = macsec_data_rcu(skb->dev);
skb              1159 drivers/net/macsec.c 	if (!macsec_validate_skb(skb, secy->icv_len)) {
skb              1166 drivers/net/macsec.c 	rx_sa = macsec_rxsa_get(rx_sc->sa[macsec_skb_cb(skb)->assoc_num]);
skb              1208 drivers/net/macsec.c 	macsec_skb_cb(skb)->rx_sa = rx_sa;
skb              1213 drivers/net/macsec.c 		skb = macsec_decrypt(skb, dev, rx_sa, sci, secy);
skb              1215 drivers/net/macsec.c 	if (IS_ERR(skb)) {
skb              1217 drivers/net/macsec.c 		if (PTR_ERR(skb) != -EINPROGRESS) {
skb              1226 drivers/net/macsec.c 	if (!macsec_post_decrypt(skb, secy, pn))
skb              1230 drivers/net/macsec.c 	macsec_finalize_skb(skb, secy->icv_len,
skb              1231 drivers/net/macsec.c 			    macsec_extra_len(macsec_skb_cb(skb)->has_sci));
skb              1232 drivers/net/macsec.c 	macsec_reset_skb(skb, secy->netdev);
skb              1238 drivers/net/macsec.c 	skb_orphan(skb);
skb              1239 drivers/net/macsec.c 	ret = gro_cells_receive(&macsec->gro_cells, skb);
skb              1241 drivers/net/macsec.c 		count_rx(dev, skb->len);
skb              1256 drivers/net/macsec.c 	kfree_skb(skb);
skb              1264 drivers/net/macsec.c 		macsec_finalize_skb(skb, DEFAULT_ICV_LEN,
skb              1265 drivers/net/macsec.c 				    macsec_extra_len(macsec_skb_cb(skb)->has_sci));
skb              1286 drivers/net/macsec.c 		nskb = skb_clone(skb, GFP_ATOMIC);
skb              1303 drivers/net/macsec.c 	*pskb = skb;
skb              1480 drivers/net/macsec.c static int nla_put_sci(struct sk_buff *skb, int attrtype, sci_t value,
skb              1483 drivers/net/macsec.c 	return nla_put_u64_64bit(skb, attrtype, (__force u64)value, padattr);
skb              1653 drivers/net/macsec.c static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
skb              1744 drivers/net/macsec.c static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info)
skb              1809 drivers/net/macsec.c static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
skb              1887 drivers/net/macsec.c static int macsec_del_rxsa(struct sk_buff *skb, struct genl_info *info)
skb              1928 drivers/net/macsec.c static int macsec_del_rxsc(struct sk_buff *skb, struct genl_info *info)
skb              1968 drivers/net/macsec.c static int macsec_del_txsa(struct sk_buff *skb, struct genl_info *info)
skb              2026 drivers/net/macsec.c static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info)
skb              2070 drivers/net/macsec.c static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info)
skb              2114 drivers/net/macsec.c static int macsec_upd_rxsc(struct sk_buff *skb, struct genl_info *info)
skb              2152 drivers/net/macsec.c static int copy_tx_sa_stats(struct sk_buff *skb,
skb              2165 drivers/net/macsec.c 	if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_PROTECTED, sum.OutPktsProtected) ||
skb              2166 drivers/net/macsec.c 	    nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_ENCRYPTED, sum.OutPktsEncrypted))
skb              2173 drivers/net/macsec.c copy_rx_sa_stats(struct sk_buff *skb,
skb              2189 drivers/net/macsec.c 	if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_OK, sum.InPktsOK) ||
skb              2190 drivers/net/macsec.c 	    nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_INVALID, sum.InPktsInvalid) ||
skb              2191 drivers/net/macsec.c 	    nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_VALID, sum.InPktsNotValid) ||
skb              2192 drivers/net/macsec.c 	    nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_USING_SA, sum.InPktsNotUsingSA) ||
skb              2193 drivers/net/macsec.c 	    nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_UNUSED_SA, sum.InPktsUnusedSA))
skb              2200 drivers/net/macsec.c copy_rx_sc_stats(struct sk_buff *skb, struct pcpu_rx_sc_stats __percpu *pstats)
skb              2228 drivers/net/macsec.c 	if (nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_VALIDATED,
skb              2231 drivers/net/macsec.c 	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_DECRYPTED,
skb              2234 drivers/net/macsec.c 	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNCHECKED,
skb              2237 drivers/net/macsec.c 	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_DELAYED,
skb              2240 drivers/net/macsec.c 	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_OK,
skb              2243 drivers/net/macsec.c 	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_INVALID,
skb              2246 drivers/net/macsec.c 	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_LATE,
skb              2249 drivers/net/macsec.c 	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_VALID,
skb              2252 drivers/net/macsec.c 	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_USING_SA,
skb              2255 drivers/net/macsec.c 	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNUSED_SA,
skb              2264 drivers/net/macsec.c copy_tx_sc_stats(struct sk_buff *skb, struct pcpu_tx_sc_stats __percpu *pstats)
skb              2286 drivers/net/macsec.c 	if (nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_PROTECTED,
skb              2289 drivers/net/macsec.c 	    nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_ENCRYPTED,
skb              2292 drivers/net/macsec.c 	    nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_PROTECTED,
skb              2295 drivers/net/macsec.c 	    nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_ENCRYPTED,
skb              2304 drivers/net/macsec.c copy_secy_stats(struct sk_buff *skb, struct pcpu_secy_stats __percpu *pstats)
skb              2330 drivers/net/macsec.c 	if (nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_UNTAGGED,
skb              2333 drivers/net/macsec.c 	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNTAGGED,
skb              2336 drivers/net/macsec.c 	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_TOO_LONG,
skb              2339 drivers/net/macsec.c 	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_TAG,
skb              2342 drivers/net/macsec.c 	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_BAD_TAG,
skb              2345 drivers/net/macsec.c 	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNKNOWN_SCI,
skb              2348 drivers/net/macsec.c 	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_SCI,
skb              2351 drivers/net/macsec.c 	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_OVERRUN,
skb              2359 drivers/net/macsec.c static int nla_put_secy(struct macsec_secy *secy, struct sk_buff *skb)
skb              2362 drivers/net/macsec.c 	struct nlattr *secy_nest = nla_nest_start_noflag(skb,
skb              2380 drivers/net/macsec.c 	if (nla_put_sci(skb, MACSEC_SECY_ATTR_SCI, secy->sci,
skb              2382 drivers/net/macsec.c 	    nla_put_u64_64bit(skb, MACSEC_SECY_ATTR_CIPHER_SUITE,
skb              2384 drivers/net/macsec.c 	    nla_put_u8(skb, MACSEC_SECY_ATTR_ICV_LEN, secy->icv_len) ||
skb              2385 drivers/net/macsec.c 	    nla_put_u8(skb, MACSEC_SECY_ATTR_OPER, secy->operational) ||
skb              2386 drivers/net/macsec.c 	    nla_put_u8(skb, MACSEC_SECY_ATTR_PROTECT, secy->protect_frames) ||
skb              2387 drivers/net/macsec.c 	    nla_put_u8(skb, MACSEC_SECY_ATTR_REPLAY, secy->replay_protect) ||
skb              2388 drivers/net/macsec.c 	    nla_put_u8(skb, MACSEC_SECY_ATTR_VALIDATE, secy->validate_frames) ||
skb              2389 drivers/net/macsec.c 	    nla_put_u8(skb, MACSEC_SECY_ATTR_ENCRYPT, tx_sc->encrypt) ||
skb              2390 drivers/net/macsec.c 	    nla_put_u8(skb, MACSEC_SECY_ATTR_INC_SCI, tx_sc->send_sci) ||
skb              2391 drivers/net/macsec.c 	    nla_put_u8(skb, MACSEC_SECY_ATTR_ES, tx_sc->end_station) ||
skb              2392 drivers/net/macsec.c 	    nla_put_u8(skb, MACSEC_SECY_ATTR_SCB, tx_sc->scb) ||
skb              2393 drivers/net/macsec.c 	    nla_put_u8(skb, MACSEC_SECY_ATTR_ENCODING_SA, tx_sc->encoding_sa))
skb              2397 drivers/net/macsec.c 		if (nla_put_u32(skb, MACSEC_SECY_ATTR_WINDOW, secy->replay_window))
skb              2401 drivers/net/macsec.c 	nla_nest_end(skb, secy_nest);
skb              2405 drivers/net/macsec.c 	nla_nest_cancel(skb, secy_nest);
skb              2411 drivers/net/macsec.c 	  struct sk_buff *skb, struct netlink_callback *cb)
skb              2420 drivers/net/macsec.c 	hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
skb              2427 drivers/net/macsec.c 	if (nla_put_u32(skb, MACSEC_ATTR_IFINDEX, dev->ifindex))
skb              2430 drivers/net/macsec.c 	if (nla_put_secy(secy, skb))
skb              2433 drivers/net/macsec.c 	attr = nla_nest_start_noflag(skb, MACSEC_ATTR_TXSC_STATS);
skb              2436 drivers/net/macsec.c 	if (copy_tx_sc_stats(skb, tx_sc->stats)) {
skb              2437 drivers/net/macsec.c 		nla_nest_cancel(skb, attr);
skb              2440 drivers/net/macsec.c 	nla_nest_end(skb, attr);
skb              2442 drivers/net/macsec.c 	attr = nla_nest_start_noflag(skb, MACSEC_ATTR_SECY_STATS);
skb              2445 drivers/net/macsec.c 	if (copy_secy_stats(skb, macsec_priv(dev)->stats)) {
skb              2446 drivers/net/macsec.c 		nla_nest_cancel(skb, attr);
skb              2449 drivers/net/macsec.c 	nla_nest_end(skb, attr);
skb              2451 drivers/net/macsec.c 	txsa_list = nla_nest_start_noflag(skb, MACSEC_ATTR_TXSA_LIST);
skb              2461 drivers/net/macsec.c 		txsa_nest = nla_nest_start_noflag(skb, j++);
skb              2463 drivers/net/macsec.c 			nla_nest_cancel(skb, txsa_list);
skb              2467 drivers/net/macsec.c 		if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) ||
skb              2468 drivers/net/macsec.c 		    nla_put_u32(skb, MACSEC_SA_ATTR_PN, tx_sa->next_pn) ||
skb              2469 drivers/net/macsec.c 		    nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, tx_sa->key.id) ||
skb              2470 drivers/net/macsec.c 		    nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, tx_sa->active)) {
skb              2471 drivers/net/macsec.c 			nla_nest_cancel(skb, txsa_nest);
skb              2472 drivers/net/macsec.c 			nla_nest_cancel(skb, txsa_list);
skb              2476 drivers/net/macsec.c 		attr = nla_nest_start_noflag(skb, MACSEC_SA_ATTR_STATS);
skb              2478 drivers/net/macsec.c 			nla_nest_cancel(skb, txsa_nest);
skb              2479 drivers/net/macsec.c 			nla_nest_cancel(skb, txsa_list);
skb              2482 drivers/net/macsec.c 		if (copy_tx_sa_stats(skb, tx_sa->stats)) {
skb              2483 drivers/net/macsec.c 			nla_nest_cancel(skb, attr);
skb              2484 drivers/net/macsec.c 			nla_nest_cancel(skb, txsa_nest);
skb              2485 drivers/net/macsec.c 			nla_nest_cancel(skb, txsa_list);
skb              2488 drivers/net/macsec.c 		nla_nest_end(skb, attr);
skb              2490 drivers/net/macsec.c 		nla_nest_end(skb, txsa_nest);
skb              2492 drivers/net/macsec.c 	nla_nest_end(skb, txsa_list);
skb              2494 drivers/net/macsec.c 	rxsc_list = nla_nest_start_noflag(skb, MACSEC_ATTR_RXSC_LIST);
skb              2502 drivers/net/macsec.c 		struct nlattr *rxsc_nest = nla_nest_start_noflag(skb, j++);
skb              2505 drivers/net/macsec.c 			nla_nest_cancel(skb, rxsc_list);
skb              2509 drivers/net/macsec.c 		if (nla_put_u8(skb, MACSEC_RXSC_ATTR_ACTIVE, rx_sc->active) ||
skb              2510 drivers/net/macsec.c 		    nla_put_sci(skb, MACSEC_RXSC_ATTR_SCI, rx_sc->sci,
skb              2512 drivers/net/macsec.c 			nla_nest_cancel(skb, rxsc_nest);
skb              2513 drivers/net/macsec.c 			nla_nest_cancel(skb, rxsc_list);
skb              2517 drivers/net/macsec.c 		attr = nla_nest_start_noflag(skb, MACSEC_RXSC_ATTR_STATS);
skb              2519 drivers/net/macsec.c 			nla_nest_cancel(skb, rxsc_nest);
skb              2520 drivers/net/macsec.c 			nla_nest_cancel(skb, rxsc_list);
skb              2523 drivers/net/macsec.c 		if (copy_rx_sc_stats(skb, rx_sc->stats)) {
skb              2524 drivers/net/macsec.c 			nla_nest_cancel(skb, attr);
skb              2525 drivers/net/macsec.c 			nla_nest_cancel(skb, rxsc_nest);
skb              2526 drivers/net/macsec.c 			nla_nest_cancel(skb, rxsc_list);
skb              2529 drivers/net/macsec.c 		nla_nest_end(skb, attr);
skb              2531 drivers/net/macsec.c 		rxsa_list = nla_nest_start_noflag(skb,
skb              2534 drivers/net/macsec.c 			nla_nest_cancel(skb, rxsc_nest);
skb              2535 drivers/net/macsec.c 			nla_nest_cancel(skb, rxsc_list);
skb              2546 drivers/net/macsec.c 			rxsa_nest = nla_nest_start_noflag(skb, k++);
skb              2548 drivers/net/macsec.c 				nla_nest_cancel(skb, rxsa_list);
skb              2549 drivers/net/macsec.c 				nla_nest_cancel(skb, rxsc_nest);
skb              2550 drivers/net/macsec.c 				nla_nest_cancel(skb, rxsc_list);
skb              2554 drivers/net/macsec.c 			attr = nla_nest_start_noflag(skb,
skb              2557 drivers/net/macsec.c 				nla_nest_cancel(skb, rxsa_list);
skb              2558 drivers/net/macsec.c 				nla_nest_cancel(skb, rxsc_nest);
skb              2559 drivers/net/macsec.c 				nla_nest_cancel(skb, rxsc_list);
skb              2562 drivers/net/macsec.c 			if (copy_rx_sa_stats(skb, rx_sa->stats)) {
skb              2563 drivers/net/macsec.c 				nla_nest_cancel(skb, attr);
skb              2564 drivers/net/macsec.c 				nla_nest_cancel(skb, rxsa_list);
skb              2565 drivers/net/macsec.c 				nla_nest_cancel(skb, rxsc_nest);
skb              2566 drivers/net/macsec.c 				nla_nest_cancel(skb, rxsc_list);
skb              2569 drivers/net/macsec.c 			nla_nest_end(skb, attr);
skb              2571 drivers/net/macsec.c 			if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) ||
skb              2572 drivers/net/macsec.c 			    nla_put_u32(skb, MACSEC_SA_ATTR_PN, rx_sa->next_pn) ||
skb              2573 drivers/net/macsec.c 			    nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, rx_sa->key.id) ||
skb              2574 drivers/net/macsec.c 			    nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, rx_sa->active)) {
skb              2575 drivers/net/macsec.c 				nla_nest_cancel(skb, rxsa_nest);
skb              2576 drivers/net/macsec.c 				nla_nest_cancel(skb, rxsc_nest);
skb              2577 drivers/net/macsec.c 				nla_nest_cancel(skb, rxsc_list);
skb              2580 drivers/net/macsec.c 			nla_nest_end(skb, rxsa_nest);
skb              2583 drivers/net/macsec.c 		nla_nest_end(skb, rxsa_list);
skb              2584 drivers/net/macsec.c 		nla_nest_end(skb, rxsc_nest);
skb              2587 drivers/net/macsec.c 	nla_nest_end(skb, rxsc_list);
skb              2589 drivers/net/macsec.c 	genlmsg_end(skb, hdr);
skb              2594 drivers/net/macsec.c 	genlmsg_cancel(skb, hdr);
skb              2600 drivers/net/macsec.c static int macsec_dump_txsc(struct sk_buff *skb, struct netlink_callback *cb)
skb              2602 drivers/net/macsec.c 	struct net *net = sock_net(skb->sk);
skb              2623 drivers/net/macsec.c 		if (dump_secy(secy, dev, skb, cb) < 0)
skb              2632 drivers/net/macsec.c 	return skb->len;
skb              2709 drivers/net/macsec.c static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
skb              2723 drivers/net/macsec.c 		skb->dev = macsec->real_dev;
skb              2724 drivers/net/macsec.c 		len = skb->len;
skb              2725 drivers/net/macsec.c 		ret = dev_queue_xmit(skb);
skb              2731 drivers/net/macsec.c 		kfree_skb(skb);
skb              2736 drivers/net/macsec.c 	skb = macsec_encrypt(skb, dev);
skb              2737 drivers/net/macsec.c 	if (IS_ERR(skb)) {
skb              2738 drivers/net/macsec.c 		if (PTR_ERR(skb) != -EINPROGRESS)
skb              2743 drivers/net/macsec.c 	macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa);
skb              2745 drivers/net/macsec.c 	macsec_encrypt_finish(skb, dev);
skb              2746 drivers/net/macsec.c 	len = skb->len;
skb              2747 drivers/net/macsec.c 	ret = dev_queue_xmit(skb);
skb              3409 drivers/net/macsec.c static int macsec_fill_info(struct sk_buff *skb,
skb              3427 drivers/net/macsec.c 	if (nla_put_sci(skb, IFLA_MACSEC_SCI, secy->sci,
skb              3429 drivers/net/macsec.c 	    nla_put_u8(skb, IFLA_MACSEC_ICV_LEN, secy->icv_len) ||
skb              3430 drivers/net/macsec.c 	    nla_put_u64_64bit(skb, IFLA_MACSEC_CIPHER_SUITE,
skb              3432 drivers/net/macsec.c 	    nla_put_u8(skb, IFLA_MACSEC_ENCODING_SA, tx_sc->encoding_sa) ||
skb              3433 drivers/net/macsec.c 	    nla_put_u8(skb, IFLA_MACSEC_ENCRYPT, tx_sc->encrypt) ||
skb              3434 drivers/net/macsec.c 	    nla_put_u8(skb, IFLA_MACSEC_PROTECT, secy->protect_frames) ||
skb              3435 drivers/net/macsec.c 	    nla_put_u8(skb, IFLA_MACSEC_INC_SCI, tx_sc->send_sci) ||
skb              3436 drivers/net/macsec.c 	    nla_put_u8(skb, IFLA_MACSEC_ES, tx_sc->end_station) ||
skb              3437 drivers/net/macsec.c 	    nla_put_u8(skb, IFLA_MACSEC_SCB, tx_sc->scb) ||
skb              3438 drivers/net/macsec.c 	    nla_put_u8(skb, IFLA_MACSEC_REPLAY_PROTECT, secy->replay_protect) ||
skb              3439 drivers/net/macsec.c 	    nla_put_u8(skb, IFLA_MACSEC_VALIDATION, secy->validate_frames) ||
skb              3444 drivers/net/macsec.c 		if (nla_put_u32(skb, IFLA_MACSEC_WINDOW, secy->replay_window))
skb               224 drivers/net/macvlan.c static int macvlan_broadcast_one(struct sk_buff *skb,
skb               231 drivers/net/macvlan.c 		return __dev_forward_skb(dev, skb);
skb               233 drivers/net/macvlan.c 	skb->dev = dev;
skb               235 drivers/net/macvlan.c 		skb->pkt_type = PACKET_BROADCAST;
skb               237 drivers/net/macvlan.c 		skb->pkt_type = PACKET_MULTICAST;
skb               257 drivers/net/macvlan.c static void macvlan_broadcast(struct sk_buff *skb,
skb               262 drivers/net/macvlan.c 	const struct ethhdr *eth = eth_hdr(skb);
skb               269 drivers/net/macvlan.c 	if (skb->protocol == htons(ETH_P_PAUSE))
skb               282 drivers/net/macvlan.c 			nskb = skb_clone(skb, GFP_ATOMIC);
skb               288 drivers/net/macvlan.c 			macvlan_count_rx(vlan, skb->len + ETH_HLEN,
skb               298 drivers/net/macvlan.c 	struct sk_buff *skb;
skb               307 drivers/net/macvlan.c 	while ((skb = __skb_dequeue(&list))) {
skb               308 drivers/net/macvlan.c 		const struct macvlan_dev *src = MACVLAN_SKB_CB(skb)->src;
skb               314 drivers/net/macvlan.c 			macvlan_broadcast(skb, port, NULL,
skb               321 drivers/net/macvlan.c 			macvlan_broadcast(skb, port, src->dev,
skb               329 drivers/net/macvlan.c 			macvlan_broadcast(skb, port, src->dev,
skb               336 drivers/net/macvlan.c 		consume_skb(skb);
skb               344 drivers/net/macvlan.c 				      struct sk_buff *skb)
skb               349 drivers/net/macvlan.c 	nskb = skb_clone(skb, GFP_ATOMIC);
skb               374 drivers/net/macvlan.c 	atomic_long_inc(&skb->dev->rx_dropped);
skb               397 drivers/net/macvlan.c static void macvlan_forward_source_one(struct sk_buff *skb,
skb               409 drivers/net/macvlan.c 	nskb = skb_clone(skb, GFP_ATOMIC);
skb               416 drivers/net/macvlan.c 	if (ether_addr_equal_64bits(eth_hdr(skb)->h_dest, dev->dev_addr))
skb               423 drivers/net/macvlan.c static void macvlan_forward_source(struct sk_buff *skb,
skb               433 drivers/net/macvlan.c 			macvlan_forward_source_one(skb, entry->vlan);
skb               441 drivers/net/macvlan.c 	struct sk_buff *skb = *pskb;
skb               442 drivers/net/macvlan.c 	const struct ethhdr *eth = eth_hdr(skb);
skb               450 drivers/net/macvlan.c 	port = macvlan_port_get_rcu(skb->dev);
skb               454 drivers/net/macvlan.c 		skb = ip_check_defrag(dev_net(skb->dev), skb, IP_DEFRAG_MACVLAN);
skb               455 drivers/net/macvlan.c 		if (!skb)
skb               457 drivers/net/macvlan.c 		*pskb = skb;
skb               458 drivers/net/macvlan.c 		eth = eth_hdr(skb);
skb               459 drivers/net/macvlan.c 		macvlan_forward_source(skb, port, eth->h_source);
skb               465 drivers/net/macvlan.c 			ret = macvlan_broadcast_one(skb, vlan, eth, 0) ?:
skb               466 drivers/net/macvlan.c 			      netif_rx(skb);
skb               473 drivers/net/macvlan.c 			macvlan_broadcast_enqueue(port, src, skb);
skb               478 drivers/net/macvlan.c 	macvlan_forward_source(skb, port, eth->h_source);
skb               489 drivers/net/macvlan.c 		kfree_skb(skb);
skb               492 drivers/net/macvlan.c 	len = skb->len + ETH_HLEN;
skb               493 drivers/net/macvlan.c 	skb = skb_share_check(skb, GFP_ATOMIC);
skb               494 drivers/net/macvlan.c 	if (!skb) {
skb               500 drivers/net/macvlan.c 	*pskb = skb;
skb               501 drivers/net/macvlan.c 	skb->dev = dev;
skb               502 drivers/net/macvlan.c 	skb->pkt_type = PACKET_HOST;
skb               511 drivers/net/macvlan.c static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
skb               518 drivers/net/macvlan.c 		const struct ethhdr *eth = skb_eth_hdr(skb);
skb               522 drivers/net/macvlan.c 			skb_reset_mac_header(skb);
skb               523 drivers/net/macvlan.c 			macvlan_broadcast(skb, port, dev, MACVLAN_MODE_BRIDGE);
skb               530 drivers/net/macvlan.c 			dev_forward_skb(vlan->lowerdev, skb);
skb               536 drivers/net/macvlan.c 	skb->dev = vlan->lowerdev;
skb               537 drivers/net/macvlan.c 	return dev_queue_xmit_accel(skb,
skb               541 drivers/net/macvlan.c static inline netdev_tx_t macvlan_netpoll_send_skb(struct macvlan_dev *vlan, struct sk_buff *skb)
skb               545 drivers/net/macvlan.c 		netpoll_send_skb(vlan->netpoll, skb);
skb               552 drivers/net/macvlan.c static netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
skb               556 drivers/net/macvlan.c 	unsigned int len = skb->len;
skb               560 drivers/net/macvlan.c 		return macvlan_netpoll_send_skb(vlan, skb);
skb               562 drivers/net/macvlan.c 	ret = macvlan_queue_xmit(skb, dev);
skb               578 drivers/net/macvlan.c static int macvlan_hard_header(struct sk_buff *skb, struct net_device *dev,
skb               585 drivers/net/macvlan.c 	return dev_hard_header(skb, lowerdev, type, daddr,
skb              1221 drivers/net/macvlan.c 	struct sk_buff *skb;
skb              1231 drivers/net/macvlan.c 	while ((skb = __skb_dequeue(&port->bc_queue))) {
skb              1232 drivers/net/macvlan.c 		const struct macvlan_dev *src = MACVLAN_SKB_CB(skb)->src;
skb              1237 drivers/net/macvlan.c 		kfree_skb(skb);
skb              1581 drivers/net/macvlan.c static int macvlan_fill_info_macaddr(struct sk_buff *skb,
skb              1591 drivers/net/macvlan.c 		if (nla_put(skb, IFLA_MACVLAN_MACADDR, ETH_ALEN, entry->addr))
skb              1597 drivers/net/macvlan.c static int macvlan_fill_info(struct sk_buff *skb,
skb              1604 drivers/net/macvlan.c 	if (nla_put_u32(skb, IFLA_MACVLAN_MODE, vlan->mode))
skb              1606 drivers/net/macvlan.c 	if (nla_put_u16(skb, IFLA_MACVLAN_FLAGS, vlan->flags))
skb              1608 drivers/net/macvlan.c 	if (nla_put_u32(skb, IFLA_MACVLAN_MACADDR_COUNT, vlan->macaddr_count))
skb              1611 drivers/net/macvlan.c 		nest = nla_nest_start_noflag(skb, IFLA_MACVLAN_MACADDR_DATA);
skb              1616 drivers/net/macvlan.c 			if (macvlan_fill_info_macaddr(skb, vlan, i))
skb              1619 drivers/net/macvlan.c 		nla_nest_end(skb, nest);
skb                89 drivers/net/net_failover.c static netdev_tx_t net_failover_drop_xmit(struct sk_buff *skb,
skb                93 drivers/net/net_failover.c 	dev_kfree_skb_any(skb);
skb                97 drivers/net/net_failover.c static netdev_tx_t net_failover_start_xmit(struct sk_buff *skb,
skb               108 drivers/net/net_failover.c 			return net_failover_drop_xmit(skb, dev);
skb               111 drivers/net/net_failover.c 	skb->dev = xmit_dev;
skb               112 drivers/net/net_failover.c 	skb->queue_mapping = qdisc_skb_cb(skb)->slave_dev_queue_mapping;
skb               114 drivers/net/net_failover.c 	return dev_queue_xmit(skb);
skb               118 drivers/net/net_failover.c 				     struct sk_buff *skb,
skb               130 drivers/net/net_failover.c 			txq = ops->ndo_select_queue(primary_dev, skb, sb_dev);
skb               132 drivers/net/net_failover.c 			txq = netdev_pick_tx(primary_dev, skb, NULL);
skb               134 drivers/net/net_failover.c 		qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping;
skb               139 drivers/net/net_failover.c 	txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0;
skb               142 drivers/net/net_failover.c 	qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping;
skb               364 drivers/net/net_failover.c 	struct sk_buff *skb = *pskb;
skb               365 drivers/net/net_failover.c 	struct net_device *dev = rcu_dereference(skb->dev->rx_handler_data);
skb               372 drivers/net/net_failover.c 	if (primary_dev && skb->dev == standby_dev)
skb               375 drivers/net/net_failover.c 	skb->dev = dev;
skb               367 drivers/net/netdevsim/dev.c 	struct sk_buff *skb;
skb               372 drivers/net/netdevsim/dev.c 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_ATOMIC);
skb               373 drivers/net/netdevsim/dev.c 	if (!skb)
skb               377 drivers/net/netdevsim/dev.c 	skb_reset_mac_header(skb);
skb               378 drivers/net/netdevsim/dev.c 	eth = skb_put(skb, sizeof(struct ethhdr));
skb               382 drivers/net/netdevsim/dev.c 	skb->protocol = htons(ETH_P_IP);
skb               384 drivers/net/netdevsim/dev.c 	skb_set_network_header(skb, skb->len);
skb               385 drivers/net/netdevsim/dev.c 	iph = skb_put(skb, sizeof(struct iphdr));
skb               397 drivers/net/netdevsim/dev.c 	skb_set_transport_header(skb, skb->len);
skb               398 drivers/net/netdevsim/dev.c 	udph = skb_put_zero(skb, sizeof(struct udphdr) + data_len);
skb               403 drivers/net/netdevsim/dev.c 	return skb;
skb               418 drivers/net/netdevsim/dev.c 		struct sk_buff *skb;
skb               424 drivers/net/netdevsim/dev.c 		skb = nsim_dev_trap_skb_build();
skb               425 drivers/net/netdevsim/dev.c 		if (!skb)
skb               427 drivers/net/netdevsim/dev.c 		skb->dev = nsim_dev_port->ns->netdev;
skb               435 drivers/net/netdevsim/dev.c 		devlink_trap_report(devlink, skb, nsim_trap_item->trap_ctx,
skb               438 drivers/net/netdevsim/dev.c 		consume_skb(skb);
skb               212 drivers/net/netdevsim/ipsec.c static bool nsim_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs)
skb               228 drivers/net/netdevsim/ipsec.c bool nsim_ipsec_tx(struct netdevsim *ns, struct sk_buff *skb)
skb               230 drivers/net/netdevsim/ipsec.c 	struct sec_path *sp = skb_sec_path(skb);
skb               246 drivers/net/netdevsim/ipsec.c 	xs = xfrm_input_state(skb);
skb                28 drivers/net/netdevsim/netdev.c static netdev_tx_t nsim_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb                32 drivers/net/netdevsim/netdev.c 	if (!nsim_ipsec_tx(ns, skb))
skb                37 drivers/net/netdevsim/netdev.c 	ns->tx_bytes += skb->len;
skb                41 drivers/net/netdevsim/netdev.c 	dev_kfree_skb(skb);
skb               185 drivers/net/netdevsim/netdevsim.h bool nsim_ipsec_tx(struct netdevsim *ns, struct sk_buff *skb);
skb               195 drivers/net/netdevsim/netdevsim.h static inline bool nsim_ipsec_tx(struct netdevsim *ns, struct sk_buff *skb)
skb                10 drivers/net/nlmon.c static netdev_tx_t nlmon_xmit(struct sk_buff *skb, struct net_device *dev)
skb                12 drivers/net/nlmon.c 	int len = skb->len;
skb                20 drivers/net/nlmon.c 	dev_kfree_skb(skb);
skb               103 drivers/net/ntb_netdev.c 	struct sk_buff *skb;
skb               106 drivers/net/ntb_netdev.c 	skb = data;
skb               107 drivers/net/ntb_netdev.c 	if (!skb)
skb               118 drivers/net/ntb_netdev.c 	skb_put(skb, len);
skb               119 drivers/net/ntb_netdev.c 	skb->protocol = eth_type_trans(skb, ndev);
skb               120 drivers/net/ntb_netdev.c 	skb->ip_summed = CHECKSUM_NONE;
skb               122 drivers/net/ntb_netdev.c 	if (netif_rx(skb) == NET_RX_DROP) {
skb               130 drivers/net/ntb_netdev.c 	skb = netdev_alloc_skb(ndev, ndev->mtu + ETH_HLEN);
skb               131 drivers/net/ntb_netdev.c 	if (!skb) {
skb               138 drivers/net/ntb_netdev.c 	rc = ntb_transport_rx_enqueue(qp, skb, skb->data, ndev->mtu + ETH_HLEN);
skb               140 drivers/net/ntb_netdev.c 		dev_kfree_skb(skb);
skb               180 drivers/net/ntb_netdev.c 	struct sk_buff *skb;
skb               183 drivers/net/ntb_netdev.c 	skb = data;
skb               184 drivers/net/ntb_netdev.c 	if (!skb || !ndev)
skb               189 drivers/net/ntb_netdev.c 		ndev->stats.tx_bytes += skb->len;
skb               195 drivers/net/ntb_netdev.c 	dev_kfree_skb(skb);
skb               207 drivers/net/ntb_netdev.c static netdev_tx_t ntb_netdev_start_xmit(struct sk_buff *skb,
skb               215 drivers/net/ntb_netdev.c 	rc = ntb_transport_tx_enqueue(dev->qp, skb, skb->data, skb->len);
skb               250 drivers/net/ntb_netdev.c 	struct sk_buff *skb;
skb               255 drivers/net/ntb_netdev.c 		skb = netdev_alloc_skb(ndev, ndev->mtu + ETH_HLEN);
skb               256 drivers/net/ntb_netdev.c 		if (!skb) {
skb               261 drivers/net/ntb_netdev.c 		rc = ntb_transport_rx_enqueue(dev->qp, skb, skb->data,
skb               264 drivers/net/ntb_netdev.c 			dev_kfree_skb(skb);
skb               278 drivers/net/ntb_netdev.c 	while ((skb = ntb_transport_rx_remove(dev->qp, &len)))
skb               279 drivers/net/ntb_netdev.c 		dev_kfree_skb(skb);
skb               286 drivers/net/ntb_netdev.c 	struct sk_buff *skb;
skb               291 drivers/net/ntb_netdev.c 	while ((skb = ntb_transport_rx_remove(dev->qp, &len)))
skb               292 drivers/net/ntb_netdev.c 		dev_kfree_skb(skb);
skb               302 drivers/net/ntb_netdev.c 	struct sk_buff *skb;
skb               319 drivers/net/ntb_netdev.c 		for (i = 0; (skb = ntb_transport_rx_remove(dev->qp, &len)); i++)
skb               320 drivers/net/ntb_netdev.c 			dev_kfree_skb(skb);
skb               323 drivers/net/ntb_netdev.c 			skb = netdev_alloc_skb(ndev, new_mtu + ETH_HLEN);
skb               324 drivers/net/ntb_netdev.c 			if (!skb) {
skb               329 drivers/net/ntb_netdev.c 			rc = ntb_transport_rx_enqueue(dev->qp, skb, skb->data,
skb               332 drivers/net/ntb_netdev.c 				dev_kfree_skb(skb);
skb               347 drivers/net/ntb_netdev.c 	while ((skb = ntb_transport_rx_remove(dev->qp, &len)))
skb               348 drivers/net/ntb_netdev.c 		dev_kfree_skb(skb);
skb               573 drivers/net/phy/dp83640.c static bool is_status_frame(struct sk_buff *skb, int type)
skb               575 drivers/net/phy/dp83640.c 	struct ethhdr *h = eth_hdr(skb);
skb               803 drivers/net/phy/dp83640.c static int match(struct sk_buff *skb, unsigned int type, struct rxts *rxts)
skb               807 drivers/net/phy/dp83640.c 	u8 *msgtype, *data = skb_mac_header(skb);
skb               828 drivers/net/phy/dp83640.c 	if (skb->len + ETH_HLEN < offset + OFF_PTP_SEQUENCE_ID + sizeof(*seqid))
skb               855 drivers/net/phy/dp83640.c 	struct sk_buff *skb;
skb               876 drivers/net/phy/dp83640.c 	skb_queue_walk(&dp83640->rx_queue, skb) {
skb               879 drivers/net/phy/dp83640.c 		skb_info = (struct dp83640_skb_info *)skb->cb;
skb               880 drivers/net/phy/dp83640.c 		if (match(skb, skb_info->ptp_type, rxts)) {
skb               881 drivers/net/phy/dp83640.c 			__skb_unlink(skb, &dp83640->rx_queue);
skb               882 drivers/net/phy/dp83640.c 			shhwtstamps = skb_hwtstamps(skb);
skb               897 drivers/net/phy/dp83640.c 		netif_rx_ni(skb);
skb               905 drivers/net/phy/dp83640.c 	struct sk_buff *skb;
skb               911 drivers/net/phy/dp83640.c 	skb = skb_dequeue(&dp83640->tx_queue);
skb               912 drivers/net/phy/dp83640.c 	if (!skb) {
skb               920 drivers/net/phy/dp83640.c 		while (skb) {
skb               921 drivers/net/phy/dp83640.c 			kfree_skb(skb);
skb               922 drivers/net/phy/dp83640.c 			skb = skb_dequeue(&dp83640->tx_queue);
skb               926 drivers/net/phy/dp83640.c 	skb_info = (struct dp83640_skb_info *)skb->cb;
skb               928 drivers/net/phy/dp83640.c 		kfree_skb(skb);
skb               935 drivers/net/phy/dp83640.c 	skb_complete_tx_timestamp(skb, &shhwtstamps);
skb               939 drivers/net/phy/dp83640.c 				struct sk_buff *skb)
skb               947 drivers/net/phy/dp83640.c 	ptr = skb->data + 2;
skb               949 drivers/net/phy/dp83640.c 	for (len = skb_headlen(skb) - 2; len > sizeof(type); len -= size) {
skb               981 drivers/net/phy/dp83640.c static int is_sync(struct sk_buff *skb, int type)
skb               983 drivers/net/phy/dp83640.c 	u8 *data = skb->data, *msgtype;
skb              1006 drivers/net/phy/dp83640.c 	if (skb->len < offset + 1)
skb              1412 drivers/net/phy/dp83640.c 	struct sk_buff *skb;
skb              1415 drivers/net/phy/dp83640.c 	while ((skb = skb_dequeue(&dp83640->rx_queue))) {
skb              1418 drivers/net/phy/dp83640.c 		skb_info = (struct dp83640_skb_info *)skb->cb;
skb              1420 drivers/net/phy/dp83640.c 			skb_queue_head(&dp83640->rx_queue, skb);
skb              1424 drivers/net/phy/dp83640.c 		netif_rx_ni(skb);
skb              1432 drivers/net/phy/dp83640.c 			     struct sk_buff *skb, int type)
skb              1435 drivers/net/phy/dp83640.c 	struct dp83640_skb_info *skb_info = (struct dp83640_skb_info *)skb->cb;
skb              1441 drivers/net/phy/dp83640.c 	if (is_status_frame(skb, type)) {
skb              1442 drivers/net/phy/dp83640.c 		decode_status_frame(dp83640, skb);
skb              1443 drivers/net/phy/dp83640.c 		kfree_skb(skb);
skb              1457 drivers/net/phy/dp83640.c 		if (match(skb, type, rxts)) {
skb              1458 drivers/net/phy/dp83640.c 			shhwtstamps = skb_hwtstamps(skb);
skb              1471 drivers/net/phy/dp83640.c 		skb_queue_tail(&dp83640->rx_queue, skb);
skb              1474 drivers/net/phy/dp83640.c 		netif_rx_ni(skb);
skb              1481 drivers/net/phy/dp83640.c 			     struct sk_buff *skb, int type)
skb              1483 drivers/net/phy/dp83640.c 	struct dp83640_skb_info *skb_info = (struct dp83640_skb_info *)skb->cb;
skb              1489 drivers/net/phy/dp83640.c 		if (is_sync(skb, type)) {
skb              1490 drivers/net/phy/dp83640.c 			kfree_skb(skb);
skb              1495 drivers/net/phy/dp83640.c 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
skb              1497 drivers/net/phy/dp83640.c 		skb_queue_tail(&dp83640->tx_queue, skb);
skb              1502 drivers/net/phy/dp83640.c 		kfree_skb(skb);
skb               145 drivers/net/plip/plip.c static int plip_tx_packet(struct sk_buff *skb, struct net_device *dev);
skb               146 drivers/net/plip/plip.c static int plip_hard_header(struct sk_buff *skb, struct net_device *dev,
skb               200 drivers/net/plip/plip.c 	struct sk_buff *skb;
skb               446 drivers/net/plip/plip.c 	if (rcv->skb) {
skb               447 drivers/net/plip/plip.c 		kfree_skb(rcv->skb);
skb               448 drivers/net/plip/plip.c 		rcv->skb = NULL;
skb               451 drivers/net/plip/plip.c 	if (snd->skb) {
skb               452 drivers/net/plip/plip.c 		dev_kfree_skb(snd->skb);
skb               453 drivers/net/plip/plip.c 		snd->skb = NULL;
skb               537 drivers/net/plip/plip.c static __be16 plip_type_trans(struct sk_buff *skb, struct net_device *dev)
skb               542 drivers/net/plip/plip.c 	skb_reset_mac_header(skb);
skb               543 drivers/net/plip/plip.c 	skb_pull(skb,dev->hard_header_len);
skb               544 drivers/net/plip/plip.c 	eth = eth_hdr(skb);
skb               549 drivers/net/plip/plip.c 			skb->pkt_type=PACKET_BROADCAST;
skb               551 drivers/net/plip/plip.c 			skb->pkt_type=PACKET_MULTICAST;
skb               562 drivers/net/plip/plip.c 	rawp = skb->data;
skb               630 drivers/net/plip/plip.c 		rcv->skb = dev_alloc_skb(rcv->length.h + 2);
skb               631 drivers/net/plip/plip.c 		if (rcv->skb == NULL) {
skb               635 drivers/net/plip/plip.c 		skb_reserve(rcv->skb, 2);	/* Align IP on 16 byte boundaries */
skb               636 drivers/net/plip/plip.c 		skb_put(rcv->skb,rcv->length.h);
skb               637 drivers/net/plip/plip.c 		rcv->skb->dev = dev;
skb               644 drivers/net/plip/plip.c 		lbuf = rcv->skb->data;
skb               671 drivers/net/plip/plip.c 		rcv->skb->protocol=plip_type_trans(rcv->skb, dev);
skb               672 drivers/net/plip/plip.c 		netif_rx_ni(rcv->skb);
skb               675 drivers/net/plip/plip.c 		rcv->skb = NULL;
skb               757 drivers/net/plip/plip.c 	if (snd->skb == NULL || (lbuf = snd->skb->data) == NULL) {
skb               760 drivers/net/plip/plip.c 		snd->skb = NULL;
skb               845 drivers/net/plip/plip.c 		dev->stats.tx_bytes += snd->skb->len;
skb               846 drivers/net/plip/plip.c 		dev_kfree_skb(snd->skb);
skb               854 drivers/net/plip/plip.c 		snd->skb = NULL;
skb               962 drivers/net/plip/plip.c plip_tx_packet(struct sk_buff *skb, struct net_device *dev)
skb               979 drivers/net/plip/plip.c 	if (skb->len > dev->mtu + dev->hard_header_len) {
skb               980 drivers/net/plip/plip.c 		printk(KERN_WARNING "%s: packet too big, %d.\n", dev->name, (int)skb->len);
skb               989 drivers/net/plip/plip.c 	snd->skb = skb;
skb               990 drivers/net/plip/plip.c 	snd->length.h = skb->len;
skb              1022 drivers/net/plip/plip.c plip_hard_header(struct sk_buff *skb, struct net_device *dev,
skb              1028 drivers/net/plip/plip.c 	ret = eth_header(skb, dev, type, daddr, saddr, len);
skb              1030 drivers/net/plip/plip.c 		plip_rewrite_address (dev, (struct ethhdr *)skb->data);
skb              1085 drivers/net/plip/plip.c 	nl->rcv_data.skb = nl->snd_data.skb = NULL;
skb              1147 drivers/net/plip/plip.c 	if (snd->skb) {
skb              1148 drivers/net/plip/plip.c 		dev_kfree_skb(snd->skb);
skb              1149 drivers/net/plip/plip.c 		snd->skb = NULL;
skb              1152 drivers/net/plip/plip.c 	if (rcv->skb) {
skb              1153 drivers/net/plip/plip.c 		kfree_skb(rcv->skb);
skb              1154 drivers/net/plip/plip.c 		rcv->skb = NULL;
skb                97 drivers/net/ppp/ppp_async.c static int ppp_async_send(struct ppp_channel *chan, struct sk_buff *skb);
skb               494 drivers/net/ppp/ppp_async.c 	struct sk_buff *skb;
skb               497 drivers/net/ppp/ppp_async.c 	while ((skb = skb_dequeue(&ap->rqueue)) != NULL) {
skb               498 drivers/net/ppp/ppp_async.c 		if (skb->cb[0])
skb               500 drivers/net/ppp/ppp_async.c 		ppp_input(&ap->chan, skb);
skb               628 drivers/net/ppp/ppp_async.c ppp_async_send(struct ppp_channel *chan, struct sk_buff *skb)
skb               636 drivers/net/ppp/ppp_async.c 	ap->tpkt = skb;
skb               767 drivers/net/ppp/ppp_async.c 	struct sk_buff *skb;
skb               771 drivers/net/ppp/ppp_async.c 	skb = ap->rpkt;
skb               775 drivers/net/ppp/ppp_async.c 	if (skb == NULL)
skb               779 drivers/net/ppp/ppp_async.c 	p = skb->data;
skb               780 drivers/net/ppp/ppp_async.c 	len = skb->len;
skb               788 drivers/net/ppp/ppp_async.c 	skb_trim(skb, skb->len - 2);
skb               791 drivers/net/ppp/ppp_async.c 	p = skb->data;
skb               794 drivers/net/ppp/ppp_async.c 		if (p[1] != PPP_UI || skb->len < 3)
skb               796 drivers/net/ppp/ppp_async.c 		p = skb_pull(skb, 2);
skb               803 drivers/net/ppp/ppp_async.c 		if (skb->len < 2)
skb               807 drivers/net/ppp/ppp_async.c 			async_lcp_peek(ap, p, skb->len, 1);
skb               811 drivers/net/ppp/ppp_async.c 	skb->cb[0] = ap->state;
skb               812 drivers/net/ppp/ppp_async.c 	skb_queue_tail(&ap->rqueue, skb);
skb               820 drivers/net/ppp/ppp_async.c 	if (skb) {
skb               822 drivers/net/ppp/ppp_async.c 		skb_trim(skb, 0);
skb               823 drivers/net/ppp/ppp_async.c 		skb_reserve(skb, - skb_headroom(skb));
skb               834 drivers/net/ppp/ppp_async.c 	struct sk_buff *skb;
skb               872 drivers/net/ppp/ppp_async.c 			skb = ap->rpkt;
skb               873 drivers/net/ppp/ppp_async.c 			if (!skb) {
skb               874 drivers/net/ppp/ppp_async.c 				skb = dev_alloc_skb(ap->mru + PPP_HDRLEN + 2);
skb               875 drivers/net/ppp/ppp_async.c 				if (!skb)
skb               877 drivers/net/ppp/ppp_async.c 				ap->rpkt = skb;
skb               879 drivers/net/ppp/ppp_async.c 			if (skb->len == 0) {
skb               887 drivers/net/ppp/ppp_async.c 					skb_reserve(skb, 2 + (buf[0] & 1));
skb               889 drivers/net/ppp/ppp_async.c 			if (n > skb_tailroom(skb)) {
skb               893 drivers/net/ppp/ppp_async.c 				sp = skb_put_data(skb, buf, n);
skb               232 drivers/net/ppp/ppp_generic.c #define PPP_PROTO(skb)	get_unaligned_be16((skb)->data)
skb               256 drivers/net/ppp/ppp_generic.c static void ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb);
skb               257 drivers/net/ppp/ppp_generic.c static void ppp_send_frame(struct ppp *ppp, struct sk_buff *skb);
skb               260 drivers/net/ppp/ppp_generic.c static void ppp_receive_frame(struct ppp *ppp, struct sk_buff *skb,
skb               263 drivers/net/ppp/ppp_generic.c static void ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb);
skb               265 drivers/net/ppp/ppp_generic.c 					    struct sk_buff *skb);
skb               267 drivers/net/ppp/ppp_generic.c static void ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb,
skb               269 drivers/net/ppp/ppp_generic.c static void ppp_mp_insert(struct ppp *ppp, struct sk_buff *skb);
skb               271 drivers/net/ppp/ppp_generic.c static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb);
skb               274 drivers/net/ppp/ppp_generic.c static void ppp_ccp_peek(struct ppp *ppp, struct sk_buff *skb, int inbound);
skb               428 drivers/net/ppp/ppp_generic.c 	struct sk_buff *skb = NULL;
skb               439 drivers/net/ppp/ppp_generic.c 		skb = skb_dequeue(&pf->rq);
skb               440 drivers/net/ppp/ppp_generic.c 		if (skb)
skb               472 drivers/net/ppp/ppp_generic.c 	if (!skb)
skb               476 drivers/net/ppp/ppp_generic.c 	if (skb->len > count)
skb               482 drivers/net/ppp/ppp_generic.c 	if (skb_copy_datagram_iter(skb, 0, &to, skb->len))
skb               484 drivers/net/ppp/ppp_generic.c 	ret = skb->len;
skb               487 drivers/net/ppp/ppp_generic.c 	kfree_skb(skb);
skb               496 drivers/net/ppp/ppp_generic.c 	struct sk_buff *skb;
skb               502 drivers/net/ppp/ppp_generic.c 	skb = alloc_skb(count + pf->hdrlen, GFP_KERNEL);
skb               503 drivers/net/ppp/ppp_generic.c 	if (!skb)
skb               505 drivers/net/ppp/ppp_generic.c 	skb_reserve(skb, pf->hdrlen);
skb               507 drivers/net/ppp/ppp_generic.c 	if (copy_from_user(skb_put(skb, count), buf, count)) {
skb               508 drivers/net/ppp/ppp_generic.c 		kfree_skb(skb);
skb               514 drivers/net/ppp/ppp_generic.c 		ppp_xmit_process(PF_TO_PPP(pf), skb);
skb               517 drivers/net/ppp/ppp_generic.c 		skb_queue_tail(&pf->xq, skb);
skb              1136 drivers/net/ppp/ppp_generic.c static int ppp_nl_fill_info(struct sk_buff *skb, const struct net_device *dev)
skb              1215 drivers/net/ppp/ppp_generic.c ppp_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb              1221 drivers/net/ppp/ppp_generic.c 	npi = ethertype_to_npindex(ntohs(skb->protocol));
skb              1240 drivers/net/ppp/ppp_generic.c 	if (skb_cow_head(skb, PPP_HDRLEN))
skb              1243 drivers/net/ppp/ppp_generic.c 	pp = skb_push(skb, 2);
skb              1247 drivers/net/ppp/ppp_generic.c 	skb_scrub_packet(skb, !net_eq(ppp->ppp_net, dev_net(dev)));
skb              1248 drivers/net/ppp/ppp_generic.c 	ppp_xmit_process(ppp, skb);
skb              1253 drivers/net/ppp/ppp_generic.c 	kfree_skb(skb);
skb              1399 drivers/net/ppp/ppp_generic.c static void __ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb)
skb              1405 drivers/net/ppp/ppp_generic.c 		if (skb)
skb              1406 drivers/net/ppp/ppp_generic.c 			skb_queue_tail(&ppp->file.xq, skb);
skb              1408 drivers/net/ppp/ppp_generic.c 		       (skb = skb_dequeue(&ppp->file.xq)))
skb              1409 drivers/net/ppp/ppp_generic.c 			ppp_send_frame(ppp, skb);
skb              1417 drivers/net/ppp/ppp_generic.c 		kfree_skb(skb);
skb              1422 drivers/net/ppp/ppp_generic.c static void ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb)
skb              1430 drivers/net/ppp/ppp_generic.c 	__ppp_xmit_process(ppp, skb);
skb              1440 drivers/net/ppp/ppp_generic.c 	kfree_skb(skb);
skb              1447 drivers/net/ppp/ppp_generic.c pad_compress_skb(struct ppp *ppp, struct sk_buff *skb)
skb              1466 drivers/net/ppp/ppp_generic.c 	len = ppp->xcomp->compress(ppp->xc_state, skb->data - 2,
skb              1467 drivers/net/ppp/ppp_generic.c 				   new_skb->data, skb->len + 2,
skb              1470 drivers/net/ppp/ppp_generic.c 		consume_skb(skb);
skb              1471 drivers/net/ppp/ppp_generic.c 		skb = new_skb;
skb              1472 drivers/net/ppp/ppp_generic.c 		skb_put(skb, len);
skb              1473 drivers/net/ppp/ppp_generic.c 		skb_pull(skb, 2);	/* pull off A/C bytes */
skb              1477 drivers/net/ppp/ppp_generic.c 		new_skb = skb;
skb              1489 drivers/net/ppp/ppp_generic.c 		kfree_skb(skb);
skb              1502 drivers/net/ppp/ppp_generic.c ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
skb              1504 drivers/net/ppp/ppp_generic.c 	int proto = PPP_PROTO(skb);
skb              1514 drivers/net/ppp/ppp_generic.c 		*(u8 *)skb_push(skb, 2) = 1;
skb              1516 drivers/net/ppp/ppp_generic.c 		    BPF_PROG_RUN(ppp->pass_filter, skb) == 0) {
skb              1521 drivers/net/ppp/ppp_generic.c 			kfree_skb(skb);
skb              1526 drivers/net/ppp/ppp_generic.c 		      BPF_PROG_RUN(ppp->active_filter, skb) == 0))
skb              1528 drivers/net/ppp/ppp_generic.c 		skb_pull(skb, 2);
skb              1536 drivers/net/ppp/ppp_generic.c 	ppp->stats64.tx_bytes += skb->len - 2;
skb              1543 drivers/net/ppp/ppp_generic.c 		new_skb = alloc_skb(skb->len + ppp->dev->hard_header_len - 2,
skb              1550 drivers/net/ppp/ppp_generic.c 		cp = skb->data + 2;
skb              1551 drivers/net/ppp/ppp_generic.c 		len = slhc_compress(ppp->vj, cp, skb->len - 2,
skb              1554 drivers/net/ppp/ppp_generic.c 		if (cp == skb->data + 2) {
skb              1563 drivers/net/ppp/ppp_generic.c 				cp[0] = skb->data[2];
skb              1565 drivers/net/ppp/ppp_generic.c 			consume_skb(skb);
skb              1566 drivers/net/ppp/ppp_generic.c 			skb = new_skb;
skb              1567 drivers/net/ppp/ppp_generic.c 			cp = skb_put(skb, len + 2);
skb              1575 drivers/net/ppp/ppp_generic.c 		ppp_ccp_peek(ppp, skb, 0);
skb              1589 drivers/net/ppp/ppp_generic.c 		skb = pad_compress_skb(ppp, skb);
skb              1590 drivers/net/ppp/ppp_generic.c 		if (!skb)
skb              1601 drivers/net/ppp/ppp_generic.c 		skb_queue_tail(&ppp->file.rq, skb);
skb              1606 drivers/net/ppp/ppp_generic.c 	ppp->xmit_pending = skb;
skb              1611 drivers/net/ppp/ppp_generic.c 	kfree_skb(skb);
skb              1624 drivers/net/ppp/ppp_generic.c 	struct sk_buff *skb = ppp->xmit_pending;
skb              1626 drivers/net/ppp/ppp_generic.c 	if (!skb)
skb              1633 drivers/net/ppp/ppp_generic.c 		kfree_skb(skb);
skb              1644 drivers/net/ppp/ppp_generic.c 			if (pch->chan->ops->start_xmit(pch->chan, skb))
skb              1648 drivers/net/ppp/ppp_generic.c 			kfree_skb(skb);
skb              1658 drivers/net/ppp/ppp_generic.c 	if (!ppp_mp_explode(ppp, skb))
skb              1663 drivers/net/ppp/ppp_generic.c 	kfree_skb(skb);
skb              1676 drivers/net/ppp/ppp_generic.c static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
skb              1735 drivers/net/ppp/ppp_generic.c 	p = skb->data;
skb              1736 drivers/net/ppp/ppp_generic.c 	len = skb->len;
skb              1904 drivers/net/ppp/ppp_generic.c 	struct sk_buff *skb;
skb              1910 drivers/net/ppp/ppp_generic.c 			skb = skb_dequeue(&pch->file.xq);
skb              1911 drivers/net/ppp/ppp_generic.c 			if (!pch->chan->ops->start_xmit(pch->chan, skb)) {
skb              1913 drivers/net/ppp/ppp_generic.c 				skb_queue_head(&pch->file.xq, skb);
skb              1951 drivers/net/ppp/ppp_generic.c #define PPP_MP_CB(skb)	((struct ppp_mp_skb_parm *)((skb)->cb))
skb              1954 drivers/net/ppp/ppp_generic.c ppp_do_recv(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
skb              1958 drivers/net/ppp/ppp_generic.c 		ppp_receive_frame(ppp, skb, pch);
skb              1960 drivers/net/ppp/ppp_generic.c 		kfree_skb(skb);
skb              1974 drivers/net/ppp/ppp_generic.c static void __ppp_decompress_proto(struct sk_buff *skb)
skb              1976 drivers/net/ppp/ppp_generic.c 	if (skb->data[0] & 0x01)
skb              1977 drivers/net/ppp/ppp_generic.c 		*(u8 *)skb_push(skb, 1) = 0x00;
skb              1992 drivers/net/ppp/ppp_generic.c static bool ppp_decompress_proto(struct sk_buff *skb)
skb              1995 drivers/net/ppp/ppp_generic.c 	if (!pskb_may_pull(skb, 1))
skb              1998 drivers/net/ppp/ppp_generic.c 	__ppp_decompress_proto(skb);
skb              2001 drivers/net/ppp/ppp_generic.c 	return pskb_may_pull(skb, 2);
skb              2005 drivers/net/ppp/ppp_generic.c ppp_input(struct ppp_channel *chan, struct sk_buff *skb)
skb              2011 drivers/net/ppp/ppp_generic.c 		kfree_skb(skb);
skb              2016 drivers/net/ppp/ppp_generic.c 	if (!ppp_decompress_proto(skb)) {
skb              2017 drivers/net/ppp/ppp_generic.c 		kfree_skb(skb);
skb              2025 drivers/net/ppp/ppp_generic.c 	proto = PPP_PROTO(skb);
skb              2028 drivers/net/ppp/ppp_generic.c 		skb_queue_tail(&pch->file.rq, skb);
skb              2031 drivers/net/ppp/ppp_generic.c 		       (skb = skb_dequeue(&pch->file.rq)))
skb              2032 drivers/net/ppp/ppp_generic.c 			kfree_skb(skb);
skb              2035 drivers/net/ppp/ppp_generic.c 		ppp_do_recv(pch->ppp, skb, pch);
skb              2047 drivers/net/ppp/ppp_generic.c 	struct sk_buff *skb;
skb              2054 drivers/net/ppp/ppp_generic.c 		skb = alloc_skb(0, GFP_ATOMIC);
skb              2055 drivers/net/ppp/ppp_generic.c 		if (skb) {
skb              2056 drivers/net/ppp/ppp_generic.c 			skb->len = 0;		/* probably unnecessary */
skb              2057 drivers/net/ppp/ppp_generic.c 			skb->cb[0] = code;
skb              2058 drivers/net/ppp/ppp_generic.c 			ppp_do_recv(pch->ppp, skb, pch);
skb              2069 drivers/net/ppp/ppp_generic.c ppp_receive_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
skb              2072 drivers/net/ppp/ppp_generic.c 	if (skb->len > 0) {
skb              2073 drivers/net/ppp/ppp_generic.c 		skb_checksum_complete_unset(skb);
skb              2076 drivers/net/ppp/ppp_generic.c 		if (PPP_PROTO(skb) == PPP_MP)
skb              2077 drivers/net/ppp/ppp_generic.c 			ppp_receive_mp_frame(ppp, skb, pch);
skb              2080 drivers/net/ppp/ppp_generic.c 			ppp_receive_nonmp_frame(ppp, skb);
skb              2082 drivers/net/ppp/ppp_generic.c 		kfree_skb(skb);
skb              2096 drivers/net/ppp/ppp_generic.c ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
skb              2108 drivers/net/ppp/ppp_generic.c 		skb = ppp_decompress_frame(ppp, skb);
skb              2116 drivers/net/ppp/ppp_generic.c 	proto = PPP_PROTO(skb);
skb              2123 drivers/net/ppp/ppp_generic.c 		if (skb_tailroom(skb) < 124 || skb_cloned(skb)) {
skb              2125 drivers/net/ppp/ppp_generic.c 			ns = dev_alloc_skb(skb->len + 128);
skb              2132 drivers/net/ppp/ppp_generic.c 			skb_copy_bits(skb, 0, skb_put(ns, skb->len), skb->len);
skb              2133 drivers/net/ppp/ppp_generic.c 			consume_skb(skb);
skb              2134 drivers/net/ppp/ppp_generic.c 			skb = ns;
skb              2137 drivers/net/ppp/ppp_generic.c 			skb->ip_summed = CHECKSUM_NONE;
skb              2139 drivers/net/ppp/ppp_generic.c 		len = slhc_uncompress(ppp->vj, skb->data + 2, skb->len - 2);
skb              2146 drivers/net/ppp/ppp_generic.c 		if (len > skb->len)
skb              2147 drivers/net/ppp/ppp_generic.c 			skb_put(skb, len - skb->len);
skb              2148 drivers/net/ppp/ppp_generic.c 		else if (len < skb->len)
skb              2149 drivers/net/ppp/ppp_generic.c 			skb_trim(skb, len);
skb              2160 drivers/net/ppp/ppp_generic.c 		if (!pskb_may_pull(skb, skb->len))
skb              2163 drivers/net/ppp/ppp_generic.c 		if (slhc_remember(ppp->vj, skb->data + 2, skb->len - 2) <= 0) {
skb              2171 drivers/net/ppp/ppp_generic.c 		ppp_ccp_peek(ppp, skb, 1);
skb              2176 drivers/net/ppp/ppp_generic.c 	ppp->stats64.rx_bytes += skb->len - 2;
skb              2181 drivers/net/ppp/ppp_generic.c 		skb_queue_tail(&ppp->file.rq, skb);
skb              2184 drivers/net/ppp/ppp_generic.c 		       (skb = skb_dequeue(&ppp->file.rq)))
skb              2185 drivers/net/ppp/ppp_generic.c 			kfree_skb(skb);
skb              2197 drivers/net/ppp/ppp_generic.c 			if (skb_unclone(skb, GFP_ATOMIC))
skb              2200 drivers/net/ppp/ppp_generic.c 			*(u8 *)skb_push(skb, 2) = 0;
skb              2202 drivers/net/ppp/ppp_generic.c 			    BPF_PROG_RUN(ppp->pass_filter, skb) == 0) {
skb              2207 drivers/net/ppp/ppp_generic.c 				kfree_skb(skb);
skb              2211 drivers/net/ppp/ppp_generic.c 			      BPF_PROG_RUN(ppp->active_filter, skb) == 0))
skb              2213 drivers/net/ppp/ppp_generic.c 			__skb_pull(skb, 2);
skb              2220 drivers/net/ppp/ppp_generic.c 			kfree_skb(skb);
skb              2223 drivers/net/ppp/ppp_generic.c 			skb_pull_rcsum(skb, 2);
skb              2224 drivers/net/ppp/ppp_generic.c 			skb->dev = ppp->dev;
skb              2225 drivers/net/ppp/ppp_generic.c 			skb->protocol = htons(npindex_to_ethertype[npi]);
skb              2226 drivers/net/ppp/ppp_generic.c 			skb_reset_mac_header(skb);
skb              2227 drivers/net/ppp/ppp_generic.c 			skb_scrub_packet(skb, !net_eq(ppp->ppp_net,
skb              2229 drivers/net/ppp/ppp_generic.c 			netif_rx(skb);
skb              2235 drivers/net/ppp/ppp_generic.c 	kfree_skb(skb);
skb              2240 drivers/net/ppp/ppp_generic.c ppp_decompress_frame(struct ppp *ppp, struct sk_buff *skb)
skb              2242 drivers/net/ppp/ppp_generic.c 	int proto = PPP_PROTO(skb);
skb              2249 drivers/net/ppp/ppp_generic.c 	if (!pskb_may_pull(skb, skb->len))
skb              2271 drivers/net/ppp/ppp_generic.c 		len = ppp->rcomp->decompress(ppp->rc_state, skb->data - 2,
skb              2272 drivers/net/ppp/ppp_generic.c 				skb->len + 2, ns->data, obuff_size);
skb              2282 drivers/net/ppp/ppp_generic.c 		consume_skb(skb);
skb              2283 drivers/net/ppp/ppp_generic.c 		skb = ns;
skb              2284 drivers/net/ppp/ppp_generic.c 		skb_put(skb, len);
skb              2285 drivers/net/ppp/ppp_generic.c 		skb_pull(skb, 2);	/* pull off the A/C bytes */
skb              2294 drivers/net/ppp/ppp_generic.c 			ppp->rcomp->incomp(ppp->rc_state, skb->data - 2,
skb              2295 drivers/net/ppp/ppp_generic.c 					   skb->len + 2);
skb              2298 drivers/net/ppp/ppp_generic.c 	return skb;
skb              2303 drivers/net/ppp/ppp_generic.c 	return skb;
skb              2313 drivers/net/ppp/ppp_generic.c ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
skb              2319 drivers/net/ppp/ppp_generic.c 	if (!pskb_may_pull(skb, mphdrlen + 1) || ppp->mrru == 0)
skb              2324 drivers/net/ppp/ppp_generic.c 		seq = ((skb->data[2] & 0x0f) << 8) | skb->data[3];
skb              2327 drivers/net/ppp/ppp_generic.c 		seq = (skb->data[3] << 16) | (skb->data[4] << 8)| skb->data[5];
skb              2330 drivers/net/ppp/ppp_generic.c 	PPP_MP_CB(skb)->BEbits = skb->data[2];
skb              2331 drivers/net/ppp/ppp_generic.c 	skb_pull(skb, mphdrlen);	/* pull off PPP and MP headers */
skb              2338 drivers/net/ppp/ppp_generic.c 	if (PPP_MP_CB(skb)->BEbits & B)
skb              2339 drivers/net/ppp/ppp_generic.c 		__ppp_decompress_proto(skb);
skb              2350 drivers/net/ppp/ppp_generic.c 	PPP_MP_CB(skb)->sequence = seq;
skb              2358 drivers/net/ppp/ppp_generic.c 		kfree_skb(skb);
skb              2380 drivers/net/ppp/ppp_generic.c 	ppp_mp_insert(ppp, skb);
skb              2391 drivers/net/ppp/ppp_generic.c 	while ((skb = ppp_mp_reconstruct(ppp))) {
skb              2392 drivers/net/ppp/ppp_generic.c 		if (pskb_may_pull(skb, 2))
skb              2393 drivers/net/ppp/ppp_generic.c 			ppp_receive_nonmp_frame(ppp, skb);
skb              2396 drivers/net/ppp/ppp_generic.c 			kfree_skb(skb);
skb              2404 drivers/net/ppp/ppp_generic.c 	kfree_skb(skb);
skb              2413 drivers/net/ppp/ppp_generic.c ppp_mp_insert(struct ppp *ppp, struct sk_buff *skb)
skb              2417 drivers/net/ppp/ppp_generic.c 	u32 seq = PPP_MP_CB(skb)->sequence;
skb              2425 drivers/net/ppp/ppp_generic.c 	__skb_queue_before(list, p, skb);
skb              2442 drivers/net/ppp/ppp_generic.c 	struct sk_buff *skb = NULL;
skb              2560 drivers/net/ppp/ppp_generic.c 		skb = head;
skb              2562 drivers/net/ppp/ppp_generic.c 			struct sk_buff **fragpp = &skb_shinfo(skb)->frag_list;
skb              2564 drivers/net/ppp/ppp_generic.c 			__skb_unlink(skb, list);
skb              2571 drivers/net/ppp/ppp_generic.c 				skb->len += p->len;
skb              2572 drivers/net/ppp/ppp_generic.c 				skb->data_len += p->len;
skb              2573 drivers/net/ppp/ppp_generic.c 				skb->truesize += p->truesize;
skb              2579 drivers/net/ppp/ppp_generic.c 			__skb_unlink(skb, list);
skb              2585 drivers/net/ppp/ppp_generic.c 	return skb;
skb              2810 drivers/net/ppp/ppp_generic.c ppp_ccp_peek(struct ppp *ppp, struct sk_buff *skb, int inbound)
skb              2815 drivers/net/ppp/ppp_generic.c 	if (!pskb_may_pull(skb, CCP_HDRLEN + 2))
skb              2817 drivers/net/ppp/ppp_generic.c 	dp = skb->data + 2;
skb              2851 drivers/net/ppp/ppp_generic.c 		if (!pskb_may_pull(skb, len + 2))
skb                90 drivers/net/ppp/ppp_synctty.c static int ppp_sync_send(struct ppp_channel *chan, struct sk_buff *skb);
skb               486 drivers/net/ppp/ppp_synctty.c 	struct sk_buff *skb;
skb               489 drivers/net/ppp/ppp_synctty.c 	while ((skb = skb_dequeue(&ap->rqueue)) != NULL) {
skb               490 drivers/net/ppp/ppp_synctty.c 		if (skb->len == 0) {
skb               493 drivers/net/ppp/ppp_synctty.c 			kfree_skb(skb);
skb               496 drivers/net/ppp/ppp_synctty.c 			ppp_input(&ap->chan, skb);
skb               509 drivers/net/ppp/ppp_synctty.c ppp_sync_txmunge(struct syncppp *ap, struct sk_buff *skb)
skb               515 drivers/net/ppp/ppp_synctty.c 	data  = skb->data;
skb               526 drivers/net/ppp/ppp_synctty.c 		skb_pull(skb,1);
skb               530 drivers/net/ppp/ppp_synctty.c 		if (skb_headroom(skb) < 2) {
skb               531 drivers/net/ppp/ppp_synctty.c 			struct sk_buff *npkt = dev_alloc_skb(skb->len + 2);
skb               533 drivers/net/ppp/ppp_synctty.c 				kfree_skb(skb);
skb               537 drivers/net/ppp/ppp_synctty.c 			skb_copy_from_linear_data(skb,
skb               538 drivers/net/ppp/ppp_synctty.c 				      skb_put(npkt, skb->len), skb->len);
skb               539 drivers/net/ppp/ppp_synctty.c 			consume_skb(skb);
skb               540 drivers/net/ppp/ppp_synctty.c 			skb = npkt;
skb               542 drivers/net/ppp/ppp_synctty.c 		skb_push(skb,2);
skb               543 drivers/net/ppp/ppp_synctty.c 		skb->data[0] = PPP_ALLSTATIONS;
skb               544 drivers/net/ppp/ppp_synctty.c 		skb->data[1] = PPP_UI;
skb               549 drivers/net/ppp/ppp_synctty.c 	if (skb && ap->flags & SC_LOG_OUTPKT)
skb               550 drivers/net/ppp/ppp_synctty.c 		ppp_print_buffer ("send buffer", skb->data, skb->len);
skb               552 drivers/net/ppp/ppp_synctty.c 	return skb;
skb               566 drivers/net/ppp/ppp_synctty.c ppp_sync_send(struct ppp_channel *chan, struct sk_buff *skb)
skb               574 drivers/net/ppp/ppp_synctty.c 	skb = ppp_sync_txmunge(ap, skb);
skb               575 drivers/net/ppp/ppp_synctty.c 	if (skb != NULL)
skb               576 drivers/net/ppp/ppp_synctty.c 		ap->tpkt = skb;
skb               670 drivers/net/ppp/ppp_synctty.c 	struct sk_buff *skb;
skb               680 drivers/net/ppp/ppp_synctty.c 	skb = dev_alloc_skb(ap->mru + PPP_HDRLEN + 2);
skb               681 drivers/net/ppp/ppp_synctty.c 	if (!skb) {
skb               687 drivers/net/ppp/ppp_synctty.c 		skb_reserve(skb, 2 + (buf[0] & 1));
skb               692 drivers/net/ppp/ppp_synctty.c 	} else if (count > skb_tailroom(skb)) {
skb               697 drivers/net/ppp/ppp_synctty.c 	skb_put_data(skb, buf, count);
skb               700 drivers/net/ppp/ppp_synctty.c 	p = skb->data;
skb               703 drivers/net/ppp/ppp_synctty.c 		if (skb->len < 3)
skb               705 drivers/net/ppp/ppp_synctty.c 		p = skb_pull(skb, 2);
skb               711 drivers/net/ppp/ppp_synctty.c 	if (!(p[0] & 0x01) && skb->len < 2)
skb               715 drivers/net/ppp/ppp_synctty.c 	skb_queue_tail(&ap->rqueue, skb);
skb               720 drivers/net/ppp/ppp_synctty.c 	if (skb || (skb = dev_alloc_skb(0))) {
skb               721 drivers/net/ppp/ppp_synctty.c 		skb_trim(skb, 0);
skb               722 drivers/net/ppp/ppp_synctty.c 		skb_queue_tail(&ap->rqueue, skb);
skb                87 drivers/net/ppp/pppoe.c static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb);
skb               367 drivers/net/ppp/pppoe.c static int pppoe_rcv_core(struct sock *sk, struct sk_buff *skb)
skb               377 drivers/net/ppp/pppoe.c 	if (skb->pkt_type == PACKET_OTHERHOST)
skb               381 drivers/net/ppp/pppoe.c 		ppp_input(&po->chan, skb);
skb               391 drivers/net/ppp/pppoe.c 		if (!__pppoe_xmit(sk_pppox(relay_po), skb))
skb               396 drivers/net/ppp/pppoe.c 		if (sock_queue_rcv_skb(sk, skb))
skb               406 drivers/net/ppp/pppoe.c 	kfree_skb(skb);
skb               415 drivers/net/ppp/pppoe.c static int pppoe_rcv(struct sk_buff *skb, struct net_device *dev,
skb               423 drivers/net/ppp/pppoe.c 	skb = skb_share_check(skb, GFP_ATOMIC);
skb               424 drivers/net/ppp/pppoe.c 	if (!skb)
skb               427 drivers/net/ppp/pppoe.c 	if (skb_mac_header_len(skb) < ETH_HLEN)
skb               430 drivers/net/ppp/pppoe.c 	if (!pskb_may_pull(skb, sizeof(struct pppoe_hdr)))
skb               433 drivers/net/ppp/pppoe.c 	ph = pppoe_hdr(skb);
skb               436 drivers/net/ppp/pppoe.c 	skb_pull_rcsum(skb, sizeof(*ph));
skb               437 drivers/net/ppp/pppoe.c 	if (skb->len < len)
skb               440 drivers/net/ppp/pppoe.c 	if (pskb_trim_rcsum(skb, len))
skb               443 drivers/net/ppp/pppoe.c 	ph = pppoe_hdr(skb);
skb               449 drivers/net/ppp/pppoe.c 	po = get_item(pn, ph->sid, eth_hdr(skb)->h_source, dev->ifindex);
skb               453 drivers/net/ppp/pppoe.c 	return sk_receive_skb(sk_pppox(po), skb, 0);
skb               456 drivers/net/ppp/pppoe.c 	kfree_skb(skb);
skb               483 drivers/net/ppp/pppoe.c static int pppoe_disc_rcv(struct sk_buff *skb, struct net_device *dev,
skb               491 drivers/net/ppp/pppoe.c 	skb = skb_share_check(skb, GFP_ATOMIC);
skb               492 drivers/net/ppp/pppoe.c 	if (!skb)
skb               495 drivers/net/ppp/pppoe.c 	if (skb->pkt_type != PACKET_HOST)
skb               498 drivers/net/ppp/pppoe.c 	if (!pskb_may_pull(skb, sizeof(struct pppoe_hdr)))
skb               501 drivers/net/ppp/pppoe.c 	ph = pppoe_hdr(skb);
skb               506 drivers/net/ppp/pppoe.c 	po = get_item(pn, ph->sid, eth_hdr(skb)->h_source, dev->ifindex);
skb               512 drivers/net/ppp/pppoe.c 	kfree_skb(skb);
skb               841 drivers/net/ppp/pppoe.c 	struct sk_buff *skb;
skb               869 drivers/net/ppp/pppoe.c 	skb = sock_wmalloc(sk, hlen + sizeof(*ph) + total_len +
skb               871 drivers/net/ppp/pppoe.c 	if (!skb) {
skb               877 drivers/net/ppp/pppoe.c 	skb_reserve(skb, hlen);
skb               878 drivers/net/ppp/pppoe.c 	skb_reset_network_header(skb);
skb               880 drivers/net/ppp/pppoe.c 	skb->dev = dev;
skb               882 drivers/net/ppp/pppoe.c 	skb->priority = sk->sk_priority;
skb               883 drivers/net/ppp/pppoe.c 	skb->protocol = cpu_to_be16(ETH_P_PPP_SES);
skb               885 drivers/net/ppp/pppoe.c 	ph = skb_put(skb, total_len + sizeof(struct pppoe_hdr));
skb               890 drivers/net/ppp/pppoe.c 		kfree_skb(skb);
skb               895 drivers/net/ppp/pppoe.c 	dev_hard_header(skb, dev, ETH_P_PPP_SES,
skb               902 drivers/net/ppp/pppoe.c 	dev_queue_xmit(skb);
skb               914 drivers/net/ppp/pppoe.c static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb)
skb               919 drivers/net/ppp/pppoe.c 	int data_len = skb->len;
skb               938 drivers/net/ppp/pppoe.c 	if (skb_cow_head(skb, LL_RESERVED_SPACE(dev) + sizeof(*ph)))
skb               941 drivers/net/ppp/pppoe.c 	__skb_push(skb, sizeof(*ph));
skb               942 drivers/net/ppp/pppoe.c 	skb_reset_network_header(skb);
skb               944 drivers/net/ppp/pppoe.c 	ph = pppoe_hdr(skb);
skb               951 drivers/net/ppp/pppoe.c 	skb->protocol = cpu_to_be16(ETH_P_PPP_SES);
skb               952 drivers/net/ppp/pppoe.c 	skb->dev = dev;
skb               954 drivers/net/ppp/pppoe.c 	dev_hard_header(skb, dev, ETH_P_PPP_SES,
skb               957 drivers/net/ppp/pppoe.c 	dev_queue_xmit(skb);
skb               961 drivers/net/ppp/pppoe.c 	kfree_skb(skb);
skb               971 drivers/net/ppp/pppoe.c static int pppoe_xmit(struct ppp_channel *chan, struct sk_buff *skb)
skb               974 drivers/net/ppp/pppoe.c 	return __pppoe_xmit(sk, skb);
skb               985 drivers/net/ppp/pppoe.c 	struct sk_buff *skb;
skb               993 drivers/net/ppp/pppoe.c 	skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
skb               998 drivers/net/ppp/pppoe.c 	if (skb) {
skb               999 drivers/net/ppp/pppoe.c 		total_len = min_t(size_t, total_len, skb->len);
skb              1000 drivers/net/ppp/pppoe.c 		error = skb_copy_datagram_msg(skb, 0, m, total_len);
skb              1002 drivers/net/ppp/pppoe.c 			consume_skb(skb);
skb              1007 drivers/net/ppp/pppoe.c 	kfree_skb(skb);
skb               131 drivers/net/ppp/pptp.c static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
skb               166 drivers/net/ppp/pptp.c 	if (skb_headroom(skb) < max_headroom || skb_cloned(skb) || skb_shared(skb)) {
skb               167 drivers/net/ppp/pptp.c 		struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
skb               172 drivers/net/ppp/pptp.c 		if (skb->sk)
skb               173 drivers/net/ppp/pptp.c 			skb_set_owner_w(new_skb, skb->sk);
skb               174 drivers/net/ppp/pptp.c 		consume_skb(skb);
skb               175 drivers/net/ppp/pptp.c 		skb = new_skb;
skb               178 drivers/net/ppp/pptp.c 	data = skb->data;
skb               183 drivers/net/ppp/pptp.c 		skb_pull(skb, 1);
skb               187 drivers/net/ppp/pptp.c 		data = skb_push(skb, 2);
skb               192 drivers/net/ppp/pptp.c 	len = skb->len;
skb               200 drivers/net/ppp/pptp.c 	skb_push(skb, header_len);
skb               201 drivers/net/ppp/pptp.c 	hdr = (struct pptp_gre_header *)(skb->data);
skb               218 drivers/net/ppp/pptp.c 	skb_reset_transport_header(skb);
skb               219 drivers/net/ppp/pptp.c 	skb_push(skb, sizeof(*iph));
skb               220 drivers/net/ppp/pptp.c 	skb_reset_network_header(skb);
skb               221 drivers/net/ppp/pptp.c 	memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
skb               222 drivers/net/ppp/pptp.c 	IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | IPSKB_REROUTED);
skb               224 drivers/net/ppp/pptp.c 	iph =	ip_hdr(skb);
skb               236 drivers/net/ppp/pptp.c 	iph->tot_len  = htons(skb->len);
skb               238 drivers/net/ppp/pptp.c 	skb_dst_drop(skb);
skb               239 drivers/net/ppp/pptp.c 	skb_dst_set(skb, &rt->dst);
skb               241 drivers/net/ppp/pptp.c 	nf_reset_ct(skb);
skb               243 drivers/net/ppp/pptp.c 	skb->ip_summed = CHECKSUM_NONE;
skb               244 drivers/net/ppp/pptp.c 	ip_select_ident(net, skb, NULL);
skb               247 drivers/net/ppp/pptp.c 	ip_local_out(net, skb->sk, skb);
skb               251 drivers/net/ppp/pptp.c 	kfree_skb(skb);
skb               255 drivers/net/ppp/pptp.c static int pptp_rcv_core(struct sock *sk, struct sk_buff *skb)
skb               264 drivers/net/ppp/pptp.c 		if (sock_queue_rcv_skb(sk, skb))
skb               269 drivers/net/ppp/pptp.c 	header = (struct pptp_gre_header *)(skb->data);
skb               276 drivers/net/ppp/pptp.c 		if (!pskb_may_pull(skb, headersize))
skb               278 drivers/net/ppp/pptp.c 		header = (struct pptp_gre_header *)(skb->data);
skb               301 drivers/net/ppp/pptp.c 	if (!pskb_may_pull(skb, headersize + payload_len))
skb               304 drivers/net/ppp/pptp.c 	payload = skb->data + headersize;
skb               314 drivers/net/ppp/pptp.c 		skb_pull(skb, headersize);
skb               318 drivers/net/ppp/pptp.c 			if (skb->len < 3)
skb               320 drivers/net/ppp/pptp.c 			skb_pull(skb, 2);
skb               323 drivers/net/ppp/pptp.c 		skb->ip_summed = CHECKSUM_NONE;
skb               324 drivers/net/ppp/pptp.c 		skb_set_network_header(skb, skb->head-skb->data);
skb               325 drivers/net/ppp/pptp.c 		ppp_input(&po->chan, skb);
skb               330 drivers/net/ppp/pptp.c 	kfree_skb(skb);
skb               334 drivers/net/ppp/pptp.c static int pptp_rcv(struct sk_buff *skb)
skb               340 drivers/net/ppp/pptp.c 	if (skb->pkt_type != PACKET_HOST)
skb               343 drivers/net/ppp/pptp.c 	if (!pskb_may_pull(skb, 12))
skb               346 drivers/net/ppp/pptp.c 	iph = ip_hdr(skb);
skb               348 drivers/net/ppp/pptp.c 	header = (struct pptp_gre_header *)skb->data;
skb               360 drivers/net/ppp/pptp.c 		skb_dst_drop(skb);
skb               361 drivers/net/ppp/pptp.c 		nf_reset_ct(skb);
skb               362 drivers/net/ppp/pptp.c 		return sk_receive_skb(sk_pppox(po), skb, 0);
skb               365 drivers/net/ppp/pptp.c 	kfree_skb(skb);
skb               145 drivers/net/rionet.c static int rionet_queue_tx_msg(struct sk_buff *skb, struct net_device *ndev,
skb               150 drivers/net/rionet.c 	rio_add_outb_message(rnet->mport, rdev, 0, skb->data, skb->len);
skb               151 drivers/net/rionet.c 	rnet->tx_skb[rnet->tx_slot] = skb;
skb               154 drivers/net/rionet.c 	ndev->stats.tx_bytes += skb->len;
skb               164 drivers/net/rionet.c 		       skb->len);
skb               169 drivers/net/rionet.c static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
skb               173 drivers/net/rionet.c 	struct ethhdr *eth = (struct ethhdr *)skb->data;
skb               197 drivers/net/rionet.c 				rionet_queue_tx_msg(skb, ndev,
skb               200 drivers/net/rionet.c 					refcount_inc(&skb->users);
skb               206 drivers/net/rionet.c 			rionet_queue_tx_msg(skb, ndev,
skb               216 drivers/net/rionet.c 			ndev->stats.tx_bytes += skb->len;
skb               217 drivers/net/rionet.c 			dev_kfree_skb_any(skb);
skb                82 drivers/net/sb1000.c static netdev_tx_t sb1000_start_xmit(struct sk_buff *skb,
skb               745 drivers/net/sb1000.c 	struct sk_buff *skb;
skb               791 drivers/net/sb1000.c 	skb = lp->rx_skb[ns];
skb               795 drivers/net/sb1000.c 			skb ? session_id : session_id | 0x40, frame_id);
skb               796 drivers/net/sb1000.c 	if (skb) {
skb               797 drivers/net/sb1000.c 		dev_kfree_skb(skb);
skb               798 drivers/net/sb1000.c 		skb = NULL;
skb               832 drivers/net/sb1000.c 		if ((skb = alloc_skb(skbsize, GFP_ATOMIC)) == NULL) {
skb               840 drivers/net/sb1000.c 		skb->dev = dev;
skb               841 drivers/net/sb1000.c 		skb_reset_mac_header(skb);
skb               842 drivers/net/sb1000.c 		skb->protocol = (unsigned short) buffer[NewDatagramHeaderSkip + 16];
skb               843 drivers/net/sb1000.c 		insw(ioaddr, skb_put(skb, NewDatagramDataSize),
skb               845 drivers/net/sb1000.c 		lp->rx_skb[ns] = skb;
skb               858 drivers/net/sb1000.c 		skb = lp->rx_skb[ns];
skb               859 drivers/net/sb1000.c 		insw(ioaddr, skb_put(skb, ContDatagramDataSize),
skb               863 drivers/net/sb1000.c 	if (skb->len < dlen + TrailerSize) {
skb               869 drivers/net/sb1000.c 	skb_trim(skb, dlen);
skb               870 drivers/net/sb1000.c 	netif_rx(skb);
skb               889 drivers/net/sb1000.c 		if ((skb = lp->rx_skb[ns])) {
skb               890 drivers/net/sb1000.c 			dev_kfree_skb(skb);
skb              1076 drivers/net/sb1000.c sb1000_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb              1080 drivers/net/sb1000.c 	dev_kfree_skb(skb);
skb               322 drivers/net/slip/slip.c 	struct sk_buff *skb;
skb               360 drivers/net/slip/slip.c 	skb = dev_alloc_skb(count);
skb               361 drivers/net/slip/slip.c 	if (skb == NULL) {
skb               366 drivers/net/slip/slip.c 	skb->dev = dev;
skb               367 drivers/net/slip/slip.c 	skb_put_data(skb, sl->rbuff, count);
skb               368 drivers/net/slip/slip.c 	skb_reset_mac_header(skb);
skb               369 drivers/net/slip/slip.c 	skb->protocol = htons(ETH_P_IP);
skb               370 drivers/net/slip/slip.c 	netif_rx_ni(skb);
skb               501 drivers/net/slip/slip.c sl_xmit(struct sk_buff *skb, struct net_device *dev)
skb               509 drivers/net/slip/slip.c 		dev_kfree_skb(skb);
skb               514 drivers/net/slip/slip.c 		dev_kfree_skb(skb);
skb               519 drivers/net/slip/slip.c 	dev->stats.tx_bytes += skb->len;
skb               520 drivers/net/slip/slip.c 	sl_encaps(sl, skb->data, skb->len);
skb               523 drivers/net/slip/slip.c 	dev_kfree_skb(skb);
skb               253 drivers/net/tap.c 				       struct sk_buff *skb)
skb               271 drivers/net/tap.c 	rxq = skb_get_hash(skb);
skb               277 drivers/net/tap.c 	if (likely(skb_rx_queue_recorded(skb))) {
skb               278 drivers/net/tap.c 		rxq = skb_get_rx_queue(skb);
skb               320 drivers/net/tap.c 	struct sk_buff *skb = *pskb;
skb               321 drivers/net/tap.c 	struct net_device *dev = skb->dev;
skb               330 drivers/net/tap.c 	q = tap_get_queue(tap, skb);
skb               334 drivers/net/tap.c 	skb_push(skb, ETH_HLEN);
skb               342 drivers/net/tap.c 	if (netif_needs_gso(skb, features)) {
skb               343 drivers/net/tap.c 		struct sk_buff *segs = __skb_gso_segment(skb, features, false);
skb               349 drivers/net/tap.c 			if (ptr_ring_produce(&q->ring, skb))
skb               354 drivers/net/tap.c 		consume_skb(skb);
skb               372 drivers/net/tap.c 		if (skb->ip_summed == CHECKSUM_PARTIAL &&
skb               374 drivers/net/tap.c 		    skb_checksum_help(skb))
skb               376 drivers/net/tap.c 		if (ptr_ring_produce(&q->ring, skb))
skb               388 drivers/net/tap.c 	kfree_skb(skb);
skb               599 drivers/net/tap.c 	struct sk_buff *skb;
skb               605 drivers/net/tap.c 	skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
skb               607 drivers/net/tap.c 	if (!skb)
skb               610 drivers/net/tap.c 	skb_reserve(skb, prepad);
skb               611 drivers/net/tap.c 	skb_put(skb, linear);
skb               612 drivers/net/tap.c 	skb->data_len = len - linear;
skb               613 drivers/net/tap.c 	skb->len += len - linear;
skb               615 drivers/net/tap.c 	return skb;
skb               626 drivers/net/tap.c 	struct sk_buff *skb;
skb               691 drivers/net/tap.c 	skb = tap_alloc_skb(&q->sk, TAP_RESERVE, copylen,
skb               693 drivers/net/tap.c 	if (!skb)
skb               697 drivers/net/tap.c 		err = zerocopy_sg_from_iter(skb, from);
skb               699 drivers/net/tap.c 		err = skb_copy_datagram_from_iter(skb, 0, from, len);
skb               704 drivers/net/tap.c 	skb_set_network_header(skb, ETH_HLEN);
skb               705 drivers/net/tap.c 	skb_reset_mac_header(skb);
skb               706 drivers/net/tap.c 	skb->protocol = eth_hdr(skb)->h_proto;
skb               709 drivers/net/tap.c 		err = virtio_net_hdr_to_skb(skb, &vnet_hdr,
skb               715 drivers/net/tap.c 	skb_probe_transport_header(skb);
skb               718 drivers/net/tap.c 	if ((skb->protocol == htons(ETH_P_8021Q) ||
skb               719 drivers/net/tap.c 	     skb->protocol == htons(ETH_P_8021AD)) &&
skb               720 drivers/net/tap.c 	    __vlan_get_protocol(skb, skb->protocol, &depth) != 0)
skb               721 drivers/net/tap.c 		skb_set_network_header(skb, depth);
skb               727 drivers/net/tap.c 		skb_shinfo(skb)->destructor_arg = msg_control;
skb               728 drivers/net/tap.c 		skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
skb               729 drivers/net/tap.c 		skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
skb               736 drivers/net/tap.c 		skb->dev = tap->dev;
skb               737 drivers/net/tap.c 		dev_queue_xmit(skb);
skb               739 drivers/net/tap.c 		kfree_skb(skb);
skb               746 drivers/net/tap.c 	kfree_skb(skb);
skb               768 drivers/net/tap.c 			    const struct sk_buff *skb,
skb               777 drivers/net/tap.c 		int vlan_hlen = skb_vlan_tag_present(skb) ? VLAN_HLEN : 0;
skb               784 drivers/net/tap.c 		if (virtio_net_hdr_from_skb(skb, &vnet_hdr,
skb               796 drivers/net/tap.c 	total += skb->len;
skb               798 drivers/net/tap.c 	if (skb_vlan_tag_present(skb)) {
skb               803 drivers/net/tap.c 		veth.h_vlan_proto = skb->vlan_proto;
skb               804 drivers/net/tap.c 		veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb));
skb               809 drivers/net/tap.c 		ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset);
skb               818 drivers/net/tap.c 	ret = skb_copy_datagram_iter(skb, vlan_offset, iter,
skb               819 drivers/net/tap.c 				     skb->len - vlan_offset);
skb               827 drivers/net/tap.c 			   int noblock, struct sk_buff *skb)
skb               833 drivers/net/tap.c 		kfree_skb(skb);
skb               837 drivers/net/tap.c 	if (skb)
skb               846 drivers/net/tap.c 		skb = ptr_ring_consume(&q->ring);
skb               847 drivers/net/tap.c 		if (skb)
skb               864 drivers/net/tap.c 	if (skb) {
skb               865 drivers/net/tap.c 		ret = tap_put_user(q, skb, to);
skb               867 drivers/net/tap.c 			kfree_skb(skb);
skb               869 drivers/net/tap.c 			consume_skb(skb);
skb              1155 drivers/net/tap.c 	struct sk_buff *skb;
skb              1161 drivers/net/tap.c 	skb = build_skb(xdp->data_hard_start, buflen);
skb              1162 drivers/net/tap.c 	if (!skb) {
skb              1167 drivers/net/tap.c 	skb_reserve(skb, xdp->data - xdp->data_hard_start);
skb              1168 drivers/net/tap.c 	skb_put(skb, xdp->data_end - xdp->data);
skb              1170 drivers/net/tap.c 	skb_set_network_header(skb, ETH_HLEN);
skb              1171 drivers/net/tap.c 	skb_reset_mac_header(skb);
skb              1172 drivers/net/tap.c 	skb->protocol = eth_hdr(skb)->h_proto;
skb              1175 drivers/net/tap.c 		err = virtio_net_hdr_to_skb(skb, gso, tap_is_little_endian(q));
skb              1181 drivers/net/tap.c 	if ((skb->protocol == htons(ETH_P_8021Q) ||
skb              1182 drivers/net/tap.c 	     skb->protocol == htons(ETH_P_8021AD)) &&
skb              1183 drivers/net/tap.c 	    __vlan_get_protocol(skb, skb->protocol, &depth) != 0)
skb              1184 drivers/net/tap.c 		skb_set_network_header(skb, depth);
skb              1189 drivers/net/tap.c 		skb->dev = tap->dev;
skb              1190 drivers/net/tap.c 		skb_probe_transport_header(skb);
skb              1191 drivers/net/tap.c 		dev_queue_xmit(skb);
skb              1193 drivers/net/tap.c 		kfree_skb(skb);
skb              1200 drivers/net/tap.c 	kfree_skb(skb);
skb              1234 drivers/net/tap.c 	struct sk_buff *skb = m->msg_control;
skb              1237 drivers/net/tap.c 		kfree_skb(skb);
skb              1240 drivers/net/tap.c 	ret = tap_do_read(q, &m->msg_iter, flags & MSG_DONTWAIT, skb);
skb               498 drivers/net/team/team.c static bool team_dummy_transmit(struct team *team, struct sk_buff *skb)
skb               500 drivers/net/team/team.c 	dev_kfree_skb_any(skb);
skb               506 drivers/net/team/team.c 					      struct sk_buff *skb)
skb               722 drivers/net/team/team.c 	struct sk_buff *skb = *pskb;
skb               727 drivers/net/team/team.c 	skb = skb_share_check(skb, GFP_ATOMIC);
skb               728 drivers/net/team/team.c 	if (!skb)
skb               731 drivers/net/team/team.c 	*pskb = skb;
skb               733 drivers/net/team/team.c 	port = team_port_get_rcu(skb->dev);
skb               739 drivers/net/team/team.c 		res = team->ops.receive(team, port, skb);
skb               747 drivers/net/team/team.c 		pcpu_stats->rx_bytes += skb->len;
skb               748 drivers/net/team/team.c 		if (skb->pkt_type == PACKET_MULTICAST)
skb               752 drivers/net/team/team.c 		skb->dev = team->dev;
skb               798 drivers/net/team/team.c static bool team_queue_override_transmit(struct team *team, struct sk_buff *skb)
skb               803 drivers/net/team/team.c 	if (!team->queue_override_enabled || !skb->queue_mapping)
skb               805 drivers/net/team/team.c 	qom_list = __team_get_qom_list(team, skb->queue_mapping);
skb               807 drivers/net/team/team.c 		if (!team_dev_queue_xmit(team, port, skb))
skb              1703 drivers/net/team/team.c static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev)
skb              1707 drivers/net/team/team.c 	unsigned int len = skb->len;
skb              1709 drivers/net/team/team.c 	tx_success = team_queue_override_transmit(team, skb);
skb              1711 drivers/net/team/team.c 		tx_success = team->ops.transmit(team, skb);
skb              1727 drivers/net/team/team.c static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb,
skb              1736 drivers/net/team/team.c 	u16 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0;
skb              1741 drivers/net/team/team.c 	qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping;
skb              2251 drivers/net/team/team.c static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
skb              2311 drivers/net/team/team.c typedef int team_nl_send_func_t(struct sk_buff *skb,
skb              2314 drivers/net/team/team.c static int team_nl_send_unicast(struct sk_buff *skb, struct team *team, u32 portid)
skb              2316 drivers/net/team/team.c 	return genlmsg_unicast(dev_net(team->dev), skb, portid);
skb              2319 drivers/net/team/team.c static int team_nl_fill_one_option_get(struct sk_buff *skb, struct team *team,
skb              2333 drivers/net/team/team.c 	option_item = nla_nest_start_noflag(skb, TEAM_ATTR_ITEM_OPTION);
skb              2337 drivers/net/team/team.c 	if (nla_put_string(skb, TEAM_ATTR_OPTION_NAME, option->name))
skb              2340 drivers/net/team/team.c 	    nla_put_u32(skb, TEAM_ATTR_OPTION_PORT_IFINDEX,
skb              2344 drivers/net/team/team.c 	    nla_put_u32(skb, TEAM_ATTR_OPTION_ARRAY_INDEX,
skb              2350 drivers/net/team/team.c 		if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_U32))
skb              2352 drivers/net/team/team.c 		if (nla_put_u32(skb, TEAM_ATTR_OPTION_DATA, ctx.data.u32_val))
skb              2356 drivers/net/team/team.c 		if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_STRING))
skb              2358 drivers/net/team/team.c 		if (nla_put_string(skb, TEAM_ATTR_OPTION_DATA,
skb              2363 drivers/net/team/team.c 		if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_BINARY))
skb              2365 drivers/net/team/team.c 		if (nla_put(skb, TEAM_ATTR_OPTION_DATA, ctx.data.bin_val.len,
skb              2370 drivers/net/team/team.c 		if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_FLAG))
skb              2373 drivers/net/team/team.c 		    nla_put_flag(skb, TEAM_ATTR_OPTION_DATA))
skb              2377 drivers/net/team/team.c 		if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_S32))
skb              2379 drivers/net/team/team.c 		if (nla_put_s32(skb, TEAM_ATTR_OPTION_DATA, ctx.data.s32_val))
skb              2385 drivers/net/team/team.c 	if (opt_inst->removed && nla_put_flag(skb, TEAM_ATTR_OPTION_REMOVED))
skb              2388 drivers/net/team/team.c 		if (nla_put_flag(skb, TEAM_ATTR_OPTION_CHANGED))
skb              2392 drivers/net/team/team.c 	nla_nest_end(skb, option_item);
skb              2396 drivers/net/team/team.c 	nla_nest_cancel(skb, option_item);
skb              2426 drivers/net/team/team.c 	struct sk_buff *skb = NULL;
skb              2434 drivers/net/team/team.c 	err = __send_and_alloc_skb(&skb, team, portid, send_func);
skb              2438 drivers/net/team/team.c 	hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags | NLM_F_MULTI,
skb              2441 drivers/net/team/team.c 		nlmsg_free(skb);
skb              2445 drivers/net/team/team.c 	if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
skb              2447 drivers/net/team/team.c 	option_list = nla_nest_start_noflag(skb, TEAM_ATTR_LIST_OPTION);
skb              2454 drivers/net/team/team.c 		err = team_nl_fill_one_option_get(skb, team, opt_inst);
skb              2467 drivers/net/team/team.c 	nla_nest_end(skb, option_list);
skb              2468 drivers/net/team/team.c 	genlmsg_end(skb, hdr);
skb              2473 drivers/net/team/team.c 	nlh = nlmsg_put(skb, portid, seq, NLMSG_DONE, 0, flags | NLM_F_MULTI);
skb              2475 drivers/net/team/team.c 		err = __send_and_alloc_skb(&skb, team, portid, send_func);
skb              2481 drivers/net/team/team.c 	return send_func(skb, team, portid);
skb              2486 drivers/net/team/team.c 	nlmsg_free(skb);
skb              2490 drivers/net/team/team.c static int team_nl_cmd_options_get(struct sk_buff *skb, struct genl_info *info)
skb              2515 drivers/net/team/team.c static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
skb              2666 drivers/net/team/team.c static int team_nl_fill_one_port_get(struct sk_buff *skb,
skb              2671 drivers/net/team/team.c 	port_item = nla_nest_start_noflag(skb, TEAM_ATTR_ITEM_PORT);
skb              2674 drivers/net/team/team.c 	if (nla_put_u32(skb, TEAM_ATTR_PORT_IFINDEX, port->dev->ifindex))
skb              2677 drivers/net/team/team.c 		if (nla_put_flag(skb, TEAM_ATTR_PORT_CHANGED))
skb              2682 drivers/net/team/team.c 	     nla_put_flag(skb, TEAM_ATTR_PORT_REMOVED)) ||
skb              2684 drivers/net/team/team.c 	     nla_put_flag(skb, TEAM_ATTR_PORT_LINKUP)) ||
skb              2685 drivers/net/team/team.c 	    nla_put_u32(skb, TEAM_ATTR_PORT_SPEED, port->state.speed) ||
skb              2686 drivers/net/team/team.c 	    nla_put_u8(skb, TEAM_ATTR_PORT_DUPLEX, port->state.duplex))
skb              2688 drivers/net/team/team.c 	nla_nest_end(skb, port_item);
skb              2692 drivers/net/team/team.c 	nla_nest_cancel(skb, port_item);
skb              2705 drivers/net/team/team.c 	struct sk_buff *skb = NULL;
skb              2713 drivers/net/team/team.c 	err = __send_and_alloc_skb(&skb, team, portid, send_func);
skb              2717 drivers/net/team/team.c 	hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags | NLM_F_MULTI,
skb              2720 drivers/net/team/team.c 		nlmsg_free(skb);
skb              2724 drivers/net/team/team.c 	if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
skb              2726 drivers/net/team/team.c 	port_list = nla_nest_start_noflag(skb, TEAM_ATTR_LIST_PORT);
skb              2737 drivers/net/team/team.c 		err = team_nl_fill_one_port_get(skb, one_port);
skb              2742 drivers/net/team/team.c 			err = team_nl_fill_one_port_get(skb, port);
skb              2756 drivers/net/team/team.c 	nla_nest_end(skb, port_list);
skb              2757 drivers/net/team/team.c 	genlmsg_end(skb, hdr);
skb              2762 drivers/net/team/team.c 	nlh = nlmsg_put(skb, portid, seq, NLMSG_DONE, 0, flags | NLM_F_MULTI);
skb              2764 drivers/net/team/team.c 		err = __send_and_alloc_skb(&skb, team, portid, send_func);
skb              2770 drivers/net/team/team.c 	return send_func(skb, team, portid);
skb              2775 drivers/net/team/team.c 	nlmsg_free(skb);
skb              2779 drivers/net/team/team.c static int team_nl_cmd_port_list_get(struct sk_buff *skb,
skb              2840 drivers/net/team/team.c static int team_nl_send_multicast(struct sk_buff *skb,
skb              2844 drivers/net/team/team.c 				       skb, 0, 0, GFP_KERNEL);
skb                27 drivers/net/team/team_mode_activebackup.c 				      struct sk_buff *skb) {
skb                36 drivers/net/team/team_mode_activebackup.c static bool ab_transmit(struct team *team, struct sk_buff *skb)
skb                43 drivers/net/team/team_mode_activebackup.c 	if (team_dev_queue_xmit(team, active_port, skb))
skb                48 drivers/net/team/team_mode_activebackup.c 	dev_kfree_skb_any(skb);
skb                15 drivers/net/team/team_mode_broadcast.c static bool bc_transmit(struct team *team, struct sk_buff *skb)
skb                26 drivers/net/team/team_mode_broadcast.c 				skb2 = skb_clone(skb, GFP_ATOMIC);
skb                38 drivers/net/team/team_mode_broadcast.c 		ret = !team_dev_queue_xmit(team, last, skb);
skb                18 drivers/net/team/team_mode_loadbalance.c 				      struct sk_buff *skb)
skb                20 drivers/net/team/team_mode_loadbalance.c 	if (unlikely(skb->protocol == htons(ETH_P_SLOW))) {
skb                22 drivers/net/team/team_mode_loadbalance.c 		const unsigned char *dest = eth_hdr(skb)->h_dest;
skb               122 drivers/net/team/team_mode_loadbalance.c 						struct sk_buff *skb,
skb               133 drivers/net/team/team_mode_loadbalance.c 						struct sk_buff *skb,
skb               142 drivers/net/team/team_mode_loadbalance.c 	return lb_hash_select_tx_port(team, lb_priv, skb, hash);
skb               191 drivers/net/team/team_mode_loadbalance.c 				    struct sk_buff *skb)
skb               200 drivers/net/team/team_mode_loadbalance.c 	lhash = BPF_PROG_RUN(fp, skb);
skb               222 drivers/net/team/team_mode_loadbalance.c static bool lb_transmit(struct team *team, struct sk_buff *skb)
skb               228 drivers/net/team/team_mode_loadbalance.c 	unsigned int tx_bytes = skb->len;
skb               230 drivers/net/team/team_mode_loadbalance.c 	hash = lb_get_skb_hash(lb_priv, skb);
skb               232 drivers/net/team/team_mode_loadbalance.c 	port = select_tx_port_func(team, lb_priv, skb, hash);
skb               235 drivers/net/team/team_mode_loadbalance.c 	if (team_dev_queue_xmit(team, port, skb))
skb               241 drivers/net/team/team_mode_loadbalance.c 	dev_kfree_skb_any(skb);
skb                14 drivers/net/team/team_mode_random.c static bool rnd_transmit(struct team *team, struct sk_buff *skb)
skb                26 drivers/net/team/team_mode_random.c 	if (team_dev_queue_xmit(team, port, skb))
skb                31 drivers/net/team/team_mode_random.c 	dev_kfree_skb_any(skb);
skb                24 drivers/net/team/team_mode_roundrobin.c static bool rr_transmit(struct team *team, struct sk_buff *skb)
skb                37 drivers/net/team/team_mode_roundrobin.c 	if (team_dev_queue_xmit(team, port, skb))
skb                42 drivers/net/team/team_mode_roundrobin.c 	dev_kfree_skb_any(skb);
skb               183 drivers/net/thunderbolt.c 	struct sk_buff *skb;
skb               703 drivers/net/thunderbolt.c 	if (net->skb && net->rx_hdr.frame_count) {
skb               719 drivers/net/thunderbolt.c 		if (net->skb->len + frame_size > TBNET_MAX_MTU) {
skb               750 drivers/net/thunderbolt.c 		struct sk_buff *skb = NULL;
skb               783 drivers/net/thunderbolt.c 			dev_kfree_skb_any(net->skb);
skb               784 drivers/net/thunderbolt.c 			net->skb = NULL;
skb               790 drivers/net/thunderbolt.c 		skb = net->skb;
skb               791 drivers/net/thunderbolt.c 		if (!skb) {
skb               792 drivers/net/thunderbolt.c 			skb = build_skb(page_address(page),
skb               794 drivers/net/thunderbolt.c 			if (!skb) {
skb               800 drivers/net/thunderbolt.c 			skb_reserve(skb, hdr_size);
skb               801 drivers/net/thunderbolt.c 			skb_put(skb, frame_size);
skb               803 drivers/net/thunderbolt.c 			net->skb = skb;
skb               805 drivers/net/thunderbolt.c 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
skb               820 drivers/net/thunderbolt.c 			skb->protocol = eth_type_trans(skb, net->dev);
skb               821 drivers/net/thunderbolt.c 			napi_gro_receive(&net->napi, skb);
skb               822 drivers/net/thunderbolt.c 			net->skb = NULL;
skb               902 drivers/net/thunderbolt.c static bool tbnet_xmit_csum_and_map(struct tbnet *net, struct sk_buff *skb,
skb               907 drivers/net/thunderbolt.c 	__wsum wsum = htonl(skb->len - skb_transport_offset(skb));
skb               908 drivers/net/thunderbolt.c 	unsigned int i, len, offset = skb_transport_offset(skb);
skb               909 drivers/net/thunderbolt.c 	__be16 protocol = skb->protocol;
skb               910 drivers/net/thunderbolt.c 	void *data = skb->data;
skb               914 drivers/net/thunderbolt.c 	if (skb->ip_summed != CHECKSUM_PARTIAL) {
skb               932 drivers/net/thunderbolt.c 		vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(vh), &vh);
skb               945 drivers/net/thunderbolt.c 		__sum16 *ipcso = dest + ((void *)&(ip_hdr(skb)->check) - data);
skb               948 drivers/net/thunderbolt.c 		*ipcso = ip_fast_csum(dest + skb_network_offset(skb),
skb               949 drivers/net/thunderbolt.c 				      ip_hdr(skb)->ihl);
skb               951 drivers/net/thunderbolt.c 		if (ip_hdr(skb)->protocol == IPPROTO_TCP)
skb               952 drivers/net/thunderbolt.c 			tucso = dest + ((void *)&(tcp_hdr(skb)->check) - data);
skb               953 drivers/net/thunderbolt.c 		else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
skb               954 drivers/net/thunderbolt.c 			tucso = dest + ((void *)&(udp_hdr(skb)->check) - data);
skb               958 drivers/net/thunderbolt.c 		*tucso = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
skb               959 drivers/net/thunderbolt.c 					    ip_hdr(skb)->daddr, 0,
skb               960 drivers/net/thunderbolt.c 					    ip_hdr(skb)->protocol, 0);
skb               961 drivers/net/thunderbolt.c 	} else if (skb_is_gso_v6(skb)) {
skb               962 drivers/net/thunderbolt.c 		tucso = dest + ((void *)&(tcp_hdr(skb)->check) - data);
skb               963 drivers/net/thunderbolt.c 		*tucso = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
skb               964 drivers/net/thunderbolt.c 					  &ipv6_hdr(skb)->daddr, 0,
skb               968 drivers/net/thunderbolt.c 		tucso = dest + skb_checksum_start_offset(skb) + skb->csum_offset;
skb               969 drivers/net/thunderbolt.c 		*tucso = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
skb               970 drivers/net/thunderbolt.c 					  &ipv6_hdr(skb)->daddr, 0,
skb               971 drivers/net/thunderbolt.c 					  ipv6_hdr(skb)->nexthdr, 0);
skb              1002 drivers/net/thunderbolt.c static void *tbnet_kmap_frag(struct sk_buff *skb, unsigned int frag_num,
skb              1005 drivers/net/thunderbolt.c 	const skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_num];
skb              1011 drivers/net/thunderbolt.c static netdev_tx_t tbnet_start_xmit(struct sk_buff *skb,
skb              1018 drivers/net/thunderbolt.c 	unsigned int len = skb_headlen(skb);
skb              1019 drivers/net/thunderbolt.c 	unsigned int data_len = skb->len;
skb              1022 drivers/net/thunderbolt.c 	void *src = skb->data;
skb              1071 drivers/net/thunderbolt.c 			if (frag < skb_shinfo(skb)->nr_frags) {
skb              1073 drivers/net/thunderbolt.c 				src = tbnet_kmap_frag(skb, frag++, &len);
skb              1108 drivers/net/thunderbolt.c 		if (frag < skb_shinfo(skb)->nr_frags) {
skb              1109 drivers/net/thunderbolt.c 			src = tbnet_kmap_frag(skb, frag++, &len);
skb              1121 drivers/net/thunderbolt.c 	if (!tbnet_xmit_csum_and_map(net, skb, frames, frame_index + 1))
skb              1131 drivers/net/thunderbolt.c 	net->stats.tx_bytes += skb->len;
skb              1133 drivers/net/thunderbolt.c 	dev_consume_skb_any(skb);
skb              1141 drivers/net/thunderbolt.c 	dev_kfree_skb_any(skb);
skb               275 drivers/net/tun.c 	struct sk_buff *skb;
skb               284 drivers/net/tun.c 	while (received < budget && (skb = __skb_dequeue(&process_queue))) {
skb               285 drivers/net/tun.c 		napi_gro_receive(napi, skb);
skb               565 drivers/net/tun.c static u16 tun_automq_select_queue(struct tun_struct *tun, struct sk_buff *skb)
skb               573 drivers/net/tun.c 	txq = __skb_get_hash_symmetric(skb);
skb               586 drivers/net/tun.c static u16 tun_ebpf_select_queue(struct tun_struct *tun, struct sk_buff *skb)
skb               598 drivers/net/tun.c 		ret = bpf_prog_run_clear_cb(prog->prog, skb);
skb               603 drivers/net/tun.c static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
skb               611 drivers/net/tun.c 		ret = tun_ebpf_select_queue(tun, skb);
skb               613 drivers/net/tun.c 		ret = tun_automq_select_queue(tun, skb);
skb               974 drivers/net/tun.c static int run_filter(struct tap_filter *filter, const struct sk_buff *skb)
skb               978 drivers/net/tun.c 	struct ethhdr *eh = (struct ethhdr *) skb->data;
skb               997 drivers/net/tun.c static int check_filter(struct tap_filter *filter, const struct sk_buff *skb)
skb              1002 drivers/net/tun.c 	return run_filter(filter, skb);
skb              1031 drivers/net/tun.c static void tun_automq_xmit(struct tun_struct *tun, struct sk_buff *skb)
skb              1041 drivers/net/tun.c 		rxhash = __skb_get_hash_symmetric(skb);
skb              1050 drivers/net/tun.c 				    struct sk_buff *skb,
skb              1056 drivers/net/tun.c 		len = bpf_prog_run_clear_cb(prog->prog, skb);
skb              1062 drivers/net/tun.c static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
skb              1065 drivers/net/tun.c 	int txq = skb->queue_mapping;
skb              1067 drivers/net/tun.c 	int len = skb->len;
skb              1077 drivers/net/tun.c 		tun_automq_xmit(tun, skb);
skb              1079 drivers/net/tun.c 	tun_debug(KERN_INFO, tun, "tun_net_xmit %d\n", skb->len);
skb              1086 drivers/net/tun.c 	if (!check_filter(&tun->txflt, skb))
skb              1090 drivers/net/tun.c 	    sk_filter(tfile->socket.sk, skb))
skb              1093 drivers/net/tun.c 	len = run_ebpf_filter(tun, skb, len);
skb              1094 drivers/net/tun.c 	if (len == 0 || pskb_trim(skb, len))
skb              1097 drivers/net/tun.c 	if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
skb              1100 drivers/net/tun.c 	skb_tx_timestamp(skb);
skb              1105 drivers/net/tun.c 	skb_orphan(skb);
skb              1107 drivers/net/tun.c 	nf_reset_ct(skb);
skb              1109 drivers/net/tun.c 	if (ptr_ring_produce(&tfile->tx_ring, skb))
skb              1122 drivers/net/tun.c 	skb_tx_error(skb);
skb              1123 drivers/net/tun.c 	kfree_skb(skb);
skb              1466 drivers/net/tun.c 	struct sk_buff *skb;
skb              1475 drivers/net/tun.c 	skb = napi_get_frags(&tfile->napi);
skb              1477 drivers/net/tun.c 	if (!skb)
skb              1481 drivers/net/tun.c 	err = __skb_grow(skb, linear);
skb              1485 drivers/net/tun.c 	skb->len = len;
skb              1486 drivers/net/tun.c 	skb->data_len = len - linear;
skb              1487 drivers/net/tun.c 	skb->truesize += skb->data_len;
skb              1504 drivers/net/tun.c 		skb_fill_page_desc(skb, i - 1, page,
skb              1508 drivers/net/tun.c 	return skb;
skb              1522 drivers/net/tun.c 	struct sk_buff *skb;
skb              1529 drivers/net/tun.c 	skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
skb              1531 drivers/net/tun.c 	if (!skb)
skb              1534 drivers/net/tun.c 	skb_reserve(skb, prepad);
skb              1535 drivers/net/tun.c 	skb_put(skb, linear);
skb              1536 drivers/net/tun.c 	skb->data_len = len - linear;
skb              1537 drivers/net/tun.c 	skb->len += len - linear;
skb              1539 drivers/net/tun.c 	return skb;
skb              1543 drivers/net/tun.c 			   struct sk_buff *skb, int more)
skb              1552 drivers/net/tun.c 		skb_record_rx_queue(skb, tfile->queue_index);
skb              1553 drivers/net/tun.c 		netif_receive_skb(skb);
skb              1564 drivers/net/tun.c 		__skb_queue_tail(queue, skb);
skb              1576 drivers/net/tun.c 		skb_record_rx_queue(skb, tfile->queue_index);
skb              1577 drivers/net/tun.c 		netif_receive_skb(skb);
skb              1608 drivers/net/tun.c 	struct sk_buff *skb = build_skb(buf, buflen);
skb              1610 drivers/net/tun.c 	if (!skb)
skb              1613 drivers/net/tun.c 	skb_reserve(skb, pad);
skb              1614 drivers/net/tun.c 	skb_put(skb, len);
skb              1615 drivers/net/tun.c 	skb_set_owner_w(skb, tfile->socket.sk);
skb              1620 drivers/net/tun.c 	return skb;
skb              1749 drivers/net/tun.c 	struct sk_buff *skb;
skb              1820 drivers/net/tun.c 		skb = tun_build_skb(tun, tfile, from, &gso, len, &skb_xdp);
skb              1821 drivers/net/tun.c 		if (IS_ERR(skb)) {
skb              1823 drivers/net/tun.c 			return PTR_ERR(skb);
skb              1825 drivers/net/tun.c 		if (!skb)
skb              1838 drivers/net/tun.c 			skb = tun_napi_alloc_frags(tfile, copylen, from);
skb              1845 drivers/net/tun.c 			skb = tun_alloc_skb(tfile, align, copylen, linear,
skb              1849 drivers/net/tun.c 		if (IS_ERR(skb)) {
skb              1850 drivers/net/tun.c 			if (PTR_ERR(skb) != -EAGAIN)
skb              1854 drivers/net/tun.c 			return PTR_ERR(skb);
skb              1858 drivers/net/tun.c 			err = zerocopy_sg_from_iter(skb, from);
skb              1860 drivers/net/tun.c 			err = skb_copy_datagram_from_iter(skb, 0, from, len);
skb              1866 drivers/net/tun.c 			kfree_skb(skb);
skb              1868 drivers/net/tun.c 				tfile->napi.skb = NULL;
skb              1876 drivers/net/tun.c 	if (virtio_net_hdr_to_skb(skb, &gso, tun_is_little_endian(tun))) {
skb              1878 drivers/net/tun.c 		kfree_skb(skb);
skb              1880 drivers/net/tun.c 			tfile->napi.skb = NULL;
skb              1890 drivers/net/tun.c 			u8 ip_version = skb->len ? (skb->data[0] >> 4) : 0;
skb              1901 drivers/net/tun.c 				kfree_skb(skb);
skb              1906 drivers/net/tun.c 		skb_reset_mac_header(skb);
skb              1907 drivers/net/tun.c 		skb->protocol = pi.proto;
skb              1908 drivers/net/tun.c 		skb->dev = tun->dev;
skb              1911 drivers/net/tun.c 		if (frags && !pskb_may_pull(skb, ETH_HLEN)) {
skb              1915 drivers/net/tun.c 		skb->protocol = eth_type_trans(skb, tun->dev);
skb              1921 drivers/net/tun.c 		skb_shinfo(skb)->destructor_arg = msg_control;
skb              1922 drivers/net/tun.c 		skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
skb              1923 drivers/net/tun.c 		skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
skb              1929 drivers/net/tun.c 	skb_reset_network_header(skb);
skb              1930 drivers/net/tun.c 	skb_probe_transport_header(skb);
skb              1931 drivers/net/tun.c 	skb_record_rx_queue(skb, tfile->queue_index);
skb              1941 drivers/net/tun.c 			ret = do_xdp_generic(xdp_prog, skb);
skb              1946 drivers/net/tun.c 					tfile->napi.skb = NULL;
skb              1962 drivers/net/tun.c 		rxhash = __skb_get_hash_symmetric(skb);
skb              1975 drivers/net/tun.c 		skb_push(skb, ETH_HLEN);
skb              1976 drivers/net/tun.c 		headlen = eth_get_headlen(tun->dev, skb->data,
skb              1977 drivers/net/tun.c 					  skb_headlen(skb));
skb              1979 drivers/net/tun.c 		if (unlikely(headlen > skb_headlen(skb))) {
skb              1997 drivers/net/tun.c 		__skb_queue_tail(queue, skb);
skb              2006 drivers/net/tun.c 		tun_rx_batched(tun, tfile, skb, more);
skb              2008 drivers/net/tun.c 		netif_rx_ni(skb);
skb              2079 drivers/net/tun.c 			    struct sk_buff *skb,
skb              2082 drivers/net/tun.c 	struct tun_pi pi = { 0, skb->protocol };
skb              2089 drivers/net/tun.c 	if (skb_vlan_tag_present(skb))
skb              2095 drivers/net/tun.c 	total = skb->len + vlan_hlen + vnet_hdr_sz;
skb              2117 drivers/net/tun.c 		if (virtio_net_hdr_from_skb(skb, &gso,
skb              2120 drivers/net/tun.c 			struct skb_shared_info *sinfo = skb_shinfo(skb);
skb              2127 drivers/net/tun.c 				       16, 1, skb->head,
skb              2143 drivers/net/tun.c 		veth.h_vlan_proto = skb->vlan_proto;
skb              2144 drivers/net/tun.c 		veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb));
skb              2148 drivers/net/tun.c 		ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset);
skb              2157 drivers/net/tun.c 	skb_copy_datagram_iter(skb, vlan_offset, iter, skb->len - vlan_offset);
skb              2164 drivers/net/tun.c 	stats->tx_bytes += skb->len + vlan_hlen;
skb              2239 drivers/net/tun.c 		struct sk_buff *skb = ptr;
skb              2241 drivers/net/tun.c 		ret = tun_put_user(tun, tfile, skb, to);
skb              2243 drivers/net/tun.c 			kfree_skb(skb);
skb              2245 drivers/net/tun.c 			consume_skb(skb);
skb              2356 drivers/net/tun.c static int tun_fill_info(struct sk_buff *skb, const struct net_device *dev)
skb              2360 drivers/net/tun.c 	if (nla_put_u8(skb, IFLA_TUN_TYPE, tun->flags & TUN_TYPE_MASK))
skb              2363 drivers/net/tun.c 	    nla_put_u32(skb, IFLA_TUN_OWNER,
skb              2367 drivers/net/tun.c 	    nla_put_u32(skb, IFLA_TUN_GROUP,
skb              2370 drivers/net/tun.c 	if (nla_put_u8(skb, IFLA_TUN_PI, !(tun->flags & IFF_NO_PI)))
skb              2372 drivers/net/tun.c 	if (nla_put_u8(skb, IFLA_TUN_VNET_HDR, !!(tun->flags & IFF_VNET_HDR)))
skb              2374 drivers/net/tun.c 	if (nla_put_u8(skb, IFLA_TUN_PERSIST, !!(tun->flags & IFF_PERSIST)))
skb              2376 drivers/net/tun.c 	if (nla_put_u8(skb, IFLA_TUN_MULTI_QUEUE,
skb              2380 drivers/net/tun.c 		if (nla_put_u32(skb, IFLA_TUN_NUM_QUEUES, tun->numqueues))
skb              2382 drivers/net/tun.c 		if (nla_put_u32(skb, IFLA_TUN_NUM_DISABLED_QUEUES,
skb              2438 drivers/net/tun.c 	struct sk_buff *skb = NULL;
skb              2483 drivers/net/tun.c 	skb = build_skb(xdp->data_hard_start, buflen);
skb              2484 drivers/net/tun.c 	if (!skb) {
skb              2489 drivers/net/tun.c 	skb_reserve(skb, xdp->data - xdp->data_hard_start);
skb              2490 drivers/net/tun.c 	skb_put(skb, xdp->data_end - xdp->data);
skb              2492 drivers/net/tun.c 	if (virtio_net_hdr_to_skb(skb, gso, tun_is_little_endian(tun))) {
skb              2494 drivers/net/tun.c 		kfree_skb(skb);
skb              2499 drivers/net/tun.c 	skb->protocol = eth_type_trans(skb, tun->dev);
skb              2500 drivers/net/tun.c 	skb_reset_network_header(skb);
skb              2501 drivers/net/tun.c 	skb_probe_transport_header(skb);
skb              2502 drivers/net/tun.c 	skb_record_rx_queue(skb, tfile->queue_index);
skb              2505 drivers/net/tun.c 		err = do_xdp_generic(xdp_prog, skb);
skb              2512 drivers/net/tun.c 		rxhash = __skb_get_hash_symmetric(skb);
skb              2514 drivers/net/tun.c 	netif_receive_skb(skb);
skb              1056 drivers/net/usb/aqc111.c static void aqc111_rx_checksum(struct sk_buff *skb, u64 pkt_desc)
skb              1060 drivers/net/usb/aqc111.c 	skb->ip_summed = CHECKSUM_NONE;
skb              1068 drivers/net/usb/aqc111.c 		skb->ip_summed = CHECKSUM_UNNECESSARY;
skb              1071 drivers/net/usb/aqc111.c static int aqc111_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
skb              1084 drivers/net/usb/aqc111.c 	if (!skb)
skb              1087 drivers/net/usb/aqc111.c 	if (skb->len == 0)
skb              1090 drivers/net/usb/aqc111.c 	skb_len = skb->len;
skb              1092 drivers/net/usb/aqc111.c 	skb_trim(skb, skb->len - sizeof(desc_hdr));
skb              1093 drivers/net/usb/aqc111.c 	desc_hdr = le64_to_cpup((u64 *)skb_tail_pointer(skb));
skb              1113 drivers/net/usb/aqc111.c 	pkt_desc_ptr = (u64 *)(skb->data + desc_offset);
skb              1133 drivers/net/usb/aqc111.c 			skb_pull(skb, pkt_len_with_padd);
skb              1140 drivers/net/usb/aqc111.c 		new_skb = skb_clone(skb, GFP_ATOMIC);
skb              1163 drivers/net/usb/aqc111.c 		skb_pull(skb, pkt_len_with_padd);
skb              1177 drivers/net/usb/aqc111.c static struct sk_buff *aqc111_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
skb              1190 drivers/net/usb/aqc111.c 	tx_desc |= skb->len & AQ_TX_DESC_LEN_MASK;
skb              1193 drivers/net/usb/aqc111.c 	tx_desc |= ((u64)(skb_shinfo(skb)->gso_size & AQ_TX_DESC_MSS_MASK)) <<
skb              1196 drivers/net/usb/aqc111.c 	headroom = (skb->len + sizeof(tx_desc)) % 8;
skb              1200 drivers/net/usb/aqc111.c 	if (((skb->len + sizeof(tx_desc) + padding_size) % frame_size) == 0) {
skb              1206 drivers/net/usb/aqc111.c 	if (vlan_get_tag(skb, &tci) >= 0) {
skb              1213 drivers/net/usb/aqc111.c 	    skb_linearize(skb))
skb              1216 drivers/net/usb/aqc111.c 	headroom = skb_headroom(skb);
skb              1217 drivers/net/usb/aqc111.c 	tailroom = skb_tailroom(skb);
skb              1220 drivers/net/usb/aqc111.c 		new_skb = skb_copy_expand(skb, sizeof(tx_desc),
skb              1222 drivers/net/usb/aqc111.c 		dev_kfree_skb_any(skb);
skb              1223 drivers/net/usb/aqc111.c 		skb = new_skb;
skb              1224 drivers/net/usb/aqc111.c 		if (!skb)
skb              1228 drivers/net/usb/aqc111.c 		skb_put_zero(skb, padding_size);
skb              1230 drivers/net/usb/aqc111.c 	tx_desc_ptr = skb_push(skb, sizeof(tx_desc));
skb              1233 drivers/net/usb/aqc111.c 	usbnet_set_skb_tx_stats(skb, 1, 0);
skb              1235 drivers/net/usb/aqc111.c 	return skb;
skb               197 drivers/net/usb/asix.h int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb,
skb               199 drivers/net/usb/asix.h int asix_rx_fixup_common(struct usbnet *dev, struct sk_buff *skb);
skb               202 drivers/net/usb/asix.h struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
skb                87 drivers/net/usb/asix_common.c int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb,
skb               102 drivers/net/usb/asix_common.c 	if (rx->remaining && (rx->remaining + sizeof(u32) <= skb->len)) {
skb               104 drivers/net/usb/asix_common.c 		rx->header = get_unaligned_le32(skb->data + offset);
skb               115 drivers/net/usb/asix_common.c 	while (offset + sizeof(u16) <= skb->len) {
skb               119 drivers/net/usb/asix_common.c 			if (skb->len - offset == sizeof(u16)) {
skb               121 drivers/net/usb/asix_common.c 						skb->data + offset);
skb               129 drivers/net/usb/asix_common.c 						skb->data + offset) << 16);
skb               133 drivers/net/usb/asix_common.c 				rx->header = get_unaligned_le32(skb->data +
skb               163 drivers/net/usb/asix_common.c 		if (rx->remaining > skb->len - offset) {
skb               164 drivers/net/usb/asix_common.c 			copy_length = skb->len - offset;
skb               172 drivers/net/usb/asix_common.c 			skb_put_data(rx->ax_skb, skb->data + offset,
skb               183 drivers/net/usb/asix_common.c 	if (skb->len != offset) {
skb               185 drivers/net/usb/asix_common.c 			   skb->len, offset);
skb               193 drivers/net/usb/asix_common.c int asix_rx_fixup_common(struct usbnet *dev, struct sk_buff *skb)
skb               198 drivers/net/usb/asix_common.c 	return asix_rx_fixup_internal(dev, skb, rx);
skb               216 drivers/net/usb/asix_common.c struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
skb               220 drivers/net/usb/asix_common.c 	int headroom = skb_headroom(skb);
skb               221 drivers/net/usb/asix_common.c 	int tailroom = skb_tailroom(skb);
skb               226 drivers/net/usb/asix_common.c 	padlen = ((skb->len + 4) & (dev->maxpacket - 1)) ? 0 : 4;
skb               240 drivers/net/usb/asix_common.c 	if (!skb_header_cloned(skb) &&
skb               241 drivers/net/usb/asix_common.c 	    !(padlen && skb_cloned(skb)) &&
skb               246 drivers/net/usb/asix_common.c 			skb->data = memmove(skb->head + 4, skb->data, skb->len);
skb               247 drivers/net/usb/asix_common.c 			skb_set_tail_pointer(skb, skb->len);
skb               252 drivers/net/usb/asix_common.c 		skb2 = skb_copy_expand(skb, 4, padlen, flags);
skb               253 drivers/net/usb/asix_common.c 		dev_kfree_skb_any(skb);
skb               254 drivers/net/usb/asix_common.c 		skb = skb2;
skb               255 drivers/net/usb/asix_common.c 		if (!skb)
skb               259 drivers/net/usb/asix_common.c 	packet_len = ((skb->len ^ 0x0000ffff) << 16) + skb->len;
skb               260 drivers/net/usb/asix_common.c 	ptr = skb_push(skb, 4);
skb               264 drivers/net/usb/asix_common.c 		put_unaligned_le32(padbytes, skb_tail_pointer(skb));
skb               265 drivers/net/usb/asix_common.c 		skb_put(skb, sizeof(padbytes));
skb               268 drivers/net/usb/asix_common.c 	usbnet_set_skb_tx_stats(skb, 1, 0);
skb               269 drivers/net/usb/asix_common.c 	return skb;
skb               356 drivers/net/usb/ax88172a.c static int ax88172a_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
skb               361 drivers/net/usb/ax88172a.c 	return asix_rx_fixup_internal(dev, skb, rx);
skb              1341 drivers/net/usb/ax88179_178a.c ax88179_rx_checksum(struct sk_buff *skb, u32 *pkt_hdr)
skb              1343 drivers/net/usb/ax88179_178a.c 	skb->ip_summed = CHECKSUM_NONE;
skb              1353 drivers/net/usb/ax88179_178a.c 		skb->ip_summed = CHECKSUM_UNNECESSARY;
skb              1356 drivers/net/usb/ax88179_178a.c static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
skb              1365 drivers/net/usb/ax88179_178a.c 	if (skb->len < dev->net->hard_header_len)
skb              1368 drivers/net/usb/ax88179_178a.c 	skb_trim(skb, skb->len - 4);
skb              1369 drivers/net/usb/ax88179_178a.c 	rx_hdr = get_unaligned_le32(skb_tail_pointer(skb));
skb              1373 drivers/net/usb/ax88179_178a.c 	pkt_hdr = (u32 *)(skb->data + hdr_off);
skb              1384 drivers/net/usb/ax88179_178a.c 			skb_pull(skb, (pkt_len + 7) & 0xFFF8);
skb              1391 drivers/net/usb/ax88179_178a.c 			skb_pull(skb, 2);
skb              1392 drivers/net/usb/ax88179_178a.c 			skb->len = pkt_len;
skb              1393 drivers/net/usb/ax88179_178a.c 			skb_set_tail_pointer(skb, pkt_len);
skb              1394 drivers/net/usb/ax88179_178a.c 			skb->truesize = pkt_len + sizeof(struct sk_buff);
skb              1395 drivers/net/usb/ax88179_178a.c 			ax88179_rx_checksum(skb, pkt_hdr);
skb              1399 drivers/net/usb/ax88179_178a.c 		ax_skb = skb_clone(skb, GFP_ATOMIC);
skb              1402 drivers/net/usb/ax88179_178a.c 			ax_skb->data = skb->data + 2;
skb              1411 drivers/net/usb/ax88179_178a.c 		skb_pull(skb, (pkt_len + 7) & 0xFFF8);
skb              1418 drivers/net/usb/ax88179_178a.c ax88179_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
skb              1422 drivers/net/usb/ax88179_178a.c 	int mss = skb_shinfo(skb)->gso_size;
skb              1426 drivers/net/usb/ax88179_178a.c 	tx_hdr1 = skb->len;
skb              1428 drivers/net/usb/ax88179_178a.c 	if (((skb->len + 8) % frame_size) == 0)
skb              1431 drivers/net/usb/ax88179_178a.c 	headroom = skb_headroom(skb) - 8;
skb              1433 drivers/net/usb/ax88179_178a.c 	if ((skb_header_cloned(skb) || headroom < 0) &&
skb              1434 drivers/net/usb/ax88179_178a.c 	    pskb_expand_head(skb, headroom < 0 ? 8 : 0, 0, GFP_ATOMIC)) {
skb              1435 drivers/net/usb/ax88179_178a.c 		dev_kfree_skb_any(skb);
skb              1439 drivers/net/usb/ax88179_178a.c 	ptr = skb_push(skb, 8);
skb              1443 drivers/net/usb/ax88179_178a.c 	return skb;
skb               216 drivers/net/usb/catc.c 	struct sk_buff *skb;
skb               243 drivers/net/usb/catc.c 		if (!(skb = dev_alloc_skb(pkt_len)))
skb               246 drivers/net/usb/catc.c 		skb_copy_to_linear_data(skb, pkt_start + pkt_offset, pkt_len);
skb               247 drivers/net/usb/catc.c 		skb_put(skb, pkt_len);
skb               249 drivers/net/usb/catc.c 		skb->protocol = eth_type_trans(skb, catc->netdev);
skb               250 drivers/net/usb/catc.c 		netif_rx(skb);
skb               409 drivers/net/usb/catc.c static netdev_tx_t catc_start_xmit(struct sk_buff *skb,
skb               422 drivers/net/usb/catc.c 		*(__be16 *)tx_buf = cpu_to_be16(skb->len);
skb               424 drivers/net/usb/catc.c 		*(__le16 *)tx_buf = cpu_to_le16(skb->len);
skb               425 drivers/net/usb/catc.c 	skb_copy_from_linear_data(skb, tx_buf + 2, skb->len);
skb               426 drivers/net/usb/catc.c 	catc->tx_ptr += skb->len + 2;
skb               441 drivers/net/usb/catc.c 		catc->netdev->stats.tx_bytes += skb->len;
skb               445 drivers/net/usb/catc.c 	dev_kfree_skb(skb);
skb                48 drivers/net/usb/cdc-phonet.c static netdev_tx_t usbpn_xmit(struct sk_buff *skb, struct net_device *dev)
skb                55 drivers/net/usb/cdc-phonet.c 	if (skb->protocol != htons(ETH_P_PHONET))
skb                61 drivers/net/usb/cdc-phonet.c 	usb_fill_bulk_urb(req, pnd->usb, pnd->tx_pipe, skb->data, skb->len,
skb                62 drivers/net/usb/cdc-phonet.c 				tx_complete, skb);
skb                78 drivers/net/usb/cdc-phonet.c 	dev_kfree_skb(skb);
skb                85 drivers/net/usb/cdc-phonet.c 	struct sk_buff *skb = req->context;
skb                86 drivers/net/usb/cdc-phonet.c 	struct net_device *dev = skb->dev;
skb                93 drivers/net/usb/cdc-phonet.c 		dev->stats.tx_bytes += skb->len;
skb               112 drivers/net/usb/cdc-phonet.c 	dev_kfree_skb_any(skb);
skb               142 drivers/net/usb/cdc-phonet.c 	struct sk_buff *skb;
skb               149 drivers/net/usb/cdc-phonet.c 		skb = pnd->rx_skb;
skb               150 drivers/net/usb/cdc-phonet.c 		if (!skb) {
skb               151 drivers/net/usb/cdc-phonet.c 			skb = pnd->rx_skb = netdev_alloc_skb(dev, 12);
skb               152 drivers/net/usb/cdc-phonet.c 			if (likely(skb)) {
skb               154 drivers/net/usb/cdc-phonet.c 				skb_put_data(skb, page_address(page), 1);
skb               155 drivers/net/usb/cdc-phonet.c 				skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
skb               161 drivers/net/usb/cdc-phonet.c 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
skb               169 drivers/net/usb/cdc-phonet.c 			skb = NULL;
skb               171 drivers/net/usb/cdc-phonet.c 		if (skb) {
skb               172 drivers/net/usb/cdc-phonet.c 			skb->protocol = htons(ETH_P_PHONET);
skb               173 drivers/net/usb/cdc-phonet.c 			skb_reset_mac_header(skb);
skb               174 drivers/net/usb/cdc-phonet.c 			__skb_pull(skb, 1);
skb               175 drivers/net/usb/cdc-phonet.c 			skb->dev = dev;
skb               177 drivers/net/usb/cdc-phonet.c 			dev->stats.rx_bytes += skb->len;
skb               179 drivers/net/usb/cdc-phonet.c 			netif_rx(skb);
skb                47 drivers/net/usb/cdc_eem.c static void eem_linkcmd(struct usbnet *dev, struct sk_buff *skb)
skb                57 drivers/net/usb/cdc_eem.c 			skb->data, skb->len, eem_linkcmd_complete, skb);
skb                63 drivers/net/usb/cdc_eem.c 		dev_kfree_skb(skb);
skb                89 drivers/net/usb/cdc_eem.c static struct sk_buff *eem_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
skb                93 drivers/net/usb/cdc_eem.c 	u16		len = skb->len;
skb               106 drivers/net/usb/cdc_eem.c 	if (!skb_cloned(skb)) {
skb               107 drivers/net/usb/cdc_eem.c 		int	headroom = skb_headroom(skb);
skb               108 drivers/net/usb/cdc_eem.c 		int	tailroom = skb_tailroom(skb);
skb               116 drivers/net/usb/cdc_eem.c 			skb->data = memmove(skb->head +
skb               118 drivers/net/usb/cdc_eem.c 					skb->data,
skb               119 drivers/net/usb/cdc_eem.c 					skb->len);
skb               120 drivers/net/usb/cdc_eem.c 			skb_set_tail_pointer(skb, len);
skb               125 drivers/net/usb/cdc_eem.c 	skb2 = skb_copy_expand(skb, EEM_HEAD, ETH_FCS_LEN + padlen, flags);
skb               129 drivers/net/usb/cdc_eem.c 	dev_kfree_skb_any(skb);
skb               130 drivers/net/usb/cdc_eem.c 	skb = skb2;
skb               134 drivers/net/usb/cdc_eem.c 	crc = crc32_le(~0, skb->data, skb->len);
skb               137 drivers/net/usb/cdc_eem.c 	put_unaligned_le32(crc, skb_put(skb, 4));
skb               144 drivers/net/usb/cdc_eem.c 	len = skb->len;
skb               145 drivers/net/usb/cdc_eem.c 	put_unaligned_le16(BIT(14) | len, skb_push(skb, 2));
skb               149 drivers/net/usb/cdc_eem.c 		put_unaligned_le16(0, skb_put(skb, 2));
skb               151 drivers/net/usb/cdc_eem.c 	return skb;
skb               154 drivers/net/usb/cdc_eem.c static int eem_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
skb               173 drivers/net/usb/cdc_eem.c 		if (skb->len < EEM_HEAD)
skb               181 drivers/net/usb/cdc_eem.c 		header = get_unaligned_le16(skb->data);
skb               182 drivers/net/usb/cdc_eem.c 		skb_pull(skb, EEM_HEAD);
skb               214 drivers/net/usb/cdc_eem.c 				if (skb->len < len)
skb               217 drivers/net/usb/cdc_eem.c 				skb2 = skb_clone(skb, GFP_ATOMIC);
skb               272 drivers/net/usb/cdc_eem.c 			if (skb->len < len)
skb               286 drivers/net/usb/cdc_eem.c 			is_last = (len == skb->len);
skb               288 drivers/net/usb/cdc_eem.c 				skb2 = skb;
skb               290 drivers/net/usb/cdc_eem.c 				skb2 = skb_clone(skb, GFP_ATOMIC);
skb               324 drivers/net/usb/cdc_eem.c 		skb_pull(skb, len);
skb               325 drivers/net/usb/cdc_eem.c 	} while (skb->len);
skb               470 drivers/net/usb/cdc_ether.c static int usbnet_cdc_zte_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
skb               472 drivers/net/usb/cdc_ether.c 	if (skb->len < ETH_HLEN || !(skb->data[0] & 0x02))
skb               475 drivers/net/usb/cdc_ether.c 	skb_reset_mac_header(skb);
skb               476 drivers/net/usb/cdc_ether.c 	ether_addr_copy(eth_hdr(skb)->h_dest, dev->net->dev_addr);
skb               219 drivers/net/usb/cdc_mbim.c static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
skb               232 drivers/net/usb/cdc_mbim.c 	if (skb) {
skb               233 drivers/net/usb/cdc_mbim.c 		if (skb->len <= ETH_HLEN)
skb               242 drivers/net/usb/cdc_mbim.c 		skb_reset_mac_header(skb);
skb               243 drivers/net/usb/cdc_mbim.c 		if (vlan_get_tag(skb, &tci) < 0 && skb->len > VLAN_ETH_HLEN &&
skb               244 drivers/net/usb/cdc_mbim.c 		    __vlan_get_tag(skb, &tci) == 0) {
skb               245 drivers/net/usb/cdc_mbim.c 			is_ip = is_ip_proto(vlan_eth_hdr(skb)->h_vlan_encapsulated_proto);
skb               246 drivers/net/usb/cdc_mbim.c 			skb_pull(skb, VLAN_ETH_HLEN);
skb               248 drivers/net/usb/cdc_mbim.c 			is_ip = is_ip_proto(eth_hdr(skb)->h_proto);
skb               249 drivers/net/usb/cdc_mbim.c 			skb_pull(skb, ETH_HLEN);
skb               292 drivers/net/usb/cdc_mbim.c 	skb_out = cdc_ncm_fill_tx_frame(dev, skb, sign);
skb               297 drivers/net/usb/cdc_mbim.c 	if (skb)
skb               298 drivers/net/usb/cdc_mbim.c 		dev_kfree_skb_any(skb);
skb               368 drivers/net/usb/cdc_mbim.c 	struct sk_buff *skb = NULL;
skb               388 drivers/net/usb/cdc_mbim.c 	skb = netdev_alloc_skb_ip_align(dev->net,  len + ETH_HLEN);
skb               389 drivers/net/usb/cdc_mbim.c 	if (!skb)
skb               393 drivers/net/usb/cdc_mbim.c 	skb_put(skb, ETH_HLEN);
skb               394 drivers/net/usb/cdc_mbim.c 	skb_reset_mac_header(skb);
skb               395 drivers/net/usb/cdc_mbim.c 	eth_hdr(skb)->h_proto = proto;
skb               396 drivers/net/usb/cdc_mbim.c 	eth_zero_addr(eth_hdr(skb)->h_source);
skb               397 drivers/net/usb/cdc_mbim.c 	memcpy(eth_hdr(skb)->h_dest, dev->net->dev_addr, ETH_ALEN);
skb               400 drivers/net/usb/cdc_mbim.c 	skb_put_data(skb, buf, len);
skb               404 drivers/net/usb/cdc_mbim.c 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tci);
skb               406 drivers/net/usb/cdc_mbim.c 	return skb;
skb               411 drivers/net/usb/cdc_mbim.c 	struct sk_buff *skb;
skb               481 drivers/net/usb/cdc_mbim.c 			skb = cdc_mbim_process_dgram(dev, skb_in->data + offset, len, tci);
skb               482 drivers/net/usb/cdc_mbim.c 			if (!skb)
skb               484 drivers/net/usb/cdc_mbim.c 			usbnet_skb_return(dev, skb);
skb              1045 drivers/net/usb/cdc_ncm.c static void cdc_ncm_align_tail(struct sk_buff *skb, size_t modulus, size_t remainder, size_t max)
skb              1047 drivers/net/usb/cdc_ncm.c 	size_t align = ALIGN(skb->len, modulus) - skb->len + remainder;
skb              1049 drivers/net/usb/cdc_ncm.c 	if (skb->len + align > max)
skb              1050 drivers/net/usb/cdc_ncm.c 		align = max - skb->len;
skb              1051 drivers/net/usb/cdc_ncm.c 	if (align && skb_tailroom(skb) >= align)
skb              1052 drivers/net/usb/cdc_ncm.c 		skb_put_zero(skb, align);
skb              1058 drivers/net/usb/cdc_ncm.c static struct usb_cdc_ncm_ndp16 *cdc_ncm_ndp(struct cdc_ncm_ctx *ctx, struct sk_buff *skb, __le32 sign, size_t reserve)
skb              1061 drivers/net/usb/cdc_ncm.c 	struct usb_cdc_ncm_nth16 *nth16 = (void *)skb->data;
skb              1082 drivers/net/usb/cdc_ncm.c 		ndp16 = (struct usb_cdc_ncm_ndp16 *)(skb->data + ndpoffset);
skb              1090 drivers/net/usb/cdc_ncm.c 		cdc_ncm_align_tail(skb, ctx->tx_ndp_modulus, 0, ctx->tx_curr_size);
skb              1093 drivers/net/usb/cdc_ncm.c 	if ((ctx->tx_curr_size - skb->len - reserve) < ctx->max_ndp_size)
skb              1098 drivers/net/usb/cdc_ncm.c 		ndp16->wNextNdpIndex = cpu_to_le16(skb->len);
skb              1100 drivers/net/usb/cdc_ncm.c 		nth16->wNdpIndex = cpu_to_le16(skb->len);
skb              1104 drivers/net/usb/cdc_ncm.c 		ndp16 = skb_put_zero(skb, ctx->max_ndp_size);
skb              1114 drivers/net/usb/cdc_ncm.c cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
skb              1134 drivers/net/usb/cdc_ncm.c 	if (skb != NULL) {
skb              1135 drivers/net/usb/cdc_ncm.c 		swap(skb, ctx->tx_rem_skb);
skb              1165 drivers/net/usb/cdc_ncm.c 			if (skb)
skb              1166 drivers/net/usb/cdc_ncm.c 				ctx->tx_curr_size = max(skb->len,
skb              1174 drivers/net/usb/cdc_ncm.c 				if (skb != NULL) {
skb              1175 drivers/net/usb/cdc_ncm.c 					dev_kfree_skb_any(skb);
skb              1197 drivers/net/usb/cdc_ncm.c 		if (skb == NULL) {
skb              1198 drivers/net/usb/cdc_ncm.c 			skb = ctx->tx_rem_skb;
skb              1203 drivers/net/usb/cdc_ncm.c 			if (skb == NULL)
skb              1208 drivers/net/usb/cdc_ncm.c 		ndp16 = cdc_ncm_ndp(ctx, skb_out, sign, skb->len + ctx->tx_modulus + ctx->tx_remainder);
skb              1214 drivers/net/usb/cdc_ncm.c 		if (!ndp16 || skb_out->len + skb->len + delayed_ndp_size > ctx->tx_curr_size) {
skb              1217 drivers/net/usb/cdc_ncm.c 				dev_kfree_skb_any(skb);
skb              1218 drivers/net/usb/cdc_ncm.c 				skb = NULL;
skb              1226 drivers/net/usb/cdc_ncm.c 				ctx->tx_rem_skb = skb;
skb              1228 drivers/net/usb/cdc_ncm.c 				skb = NULL;
skb              1240 drivers/net/usb/cdc_ncm.c 		ndp16->dpe16[index].wDatagramLength = cpu_to_le16(skb->len);
skb              1243 drivers/net/usb/cdc_ncm.c 		skb_put_data(skb_out, skb->data, skb->len);
skb              1244 drivers/net/usb/cdc_ncm.c 		ctx->tx_curr_frame_payload += skb->len;	/* count real tx payload data */
skb              1245 drivers/net/usb/cdc_ncm.c 		dev_kfree_skb_any(skb);
skb              1246 drivers/net/usb/cdc_ncm.c 		skb = NULL;
skb              1257 drivers/net/usb/cdc_ncm.c 	if (skb != NULL) {
skb              1258 drivers/net/usb/cdc_ncm.c 		dev_kfree_skb_any(skb);
skb              1259 drivers/net/usb/cdc_ncm.c 		skb = NULL;
skb              1385 drivers/net/usb/cdc_ncm.c cdc_ncm_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
skb              1401 drivers/net/usb/cdc_ncm.c 	skb_out = cdc_ncm_fill_tx_frame(dev, skb, cpu_to_le32(USB_CDC_NCM_NDP16_NOCRC_SIGN));
skb              1406 drivers/net/usb/cdc_ncm.c 	if (skb != NULL)
skb              1407 drivers/net/usb/cdc_ncm.c 		dev_kfree_skb_any(skb);
skb              1500 drivers/net/usb/cdc_ncm.c 	struct sk_buff *skb;
skb              1557 drivers/net/usb/cdc_ncm.c 			skb = netdev_alloc_skb_ip_align(dev->net,  len);
skb              1558 drivers/net/usb/cdc_ncm.c 			if (!skb)
skb              1560 drivers/net/usb/cdc_ncm.c 			skb_put_data(skb, skb_in->data + offset, len);
skb              1561 drivers/net/usb/cdc_ncm.c 			usbnet_skb_return(dev, skb);
skb               247 drivers/net/usb/ch9200.c static struct sk_buff *ch9200_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
skb               256 drivers/net/usb/ch9200.c 	len = skb->len;
skb               257 drivers/net/usb/ch9200.c 	if (skb_cow_head(skb, tx_overhead)) {
skb               258 drivers/net/usb/ch9200.c 		dev_kfree_skb_any(skb);
skb               262 drivers/net/usb/ch9200.c 	__skb_push(skb, tx_overhead);
skb               266 drivers/net/usb/ch9200.c 	if ((skb->len % dev->maxpacket) == 0)
skb               269 drivers/net/usb/ch9200.c 	skb->data[0] = len;
skb               270 drivers/net/usb/ch9200.c 	skb->data[1] = len >> 8;
skb               271 drivers/net/usb/ch9200.c 	skb->data[2] = 0x00;
skb               272 drivers/net/usb/ch9200.c 	skb->data[3] = 0x80;
skb               275 drivers/net/usb/ch9200.c 		skb->data[i] = 0x00;
skb               277 drivers/net/usb/ch9200.c 	skb->data[48] = len;
skb               278 drivers/net/usb/ch9200.c 	skb->data[49] = len >> 8;
skb               279 drivers/net/usb/ch9200.c 	skb->data[50] = 0x00;
skb               280 drivers/net/usb/ch9200.c 	skb->data[51] = 0x80;
skb               283 drivers/net/usb/ch9200.c 		skb->data[i] = 0x00;
skb               285 drivers/net/usb/ch9200.c 	return skb;
skb               288 drivers/net/usb/ch9200.c static int ch9200_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
skb               295 drivers/net/usb/ch9200.c 	if (unlikely(skb->len < rx_overhead)) {
skb               300 drivers/net/usb/ch9200.c 	len = (skb->data[skb->len - 16] | skb->data[skb->len - 15] << 8);
skb               301 drivers/net/usb/ch9200.c 	skb_trim(skb, len);
skb               210 drivers/net/usb/cx82310_eth.c static int cx82310_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
skb               227 drivers/net/usb/cx82310_eth.c 		memcpy(skb2->data + dev->partial_len, skb->data,
skb               230 drivers/net/usb/cx82310_eth.c 		skb_pull(skb, (dev->partial_rem + 1) & ~1);
skb               232 drivers/net/usb/cx82310_eth.c 		if (skb->len < 2)
skb               237 drivers/net/usb/cx82310_eth.c 	while (skb->len > 1) {
skb               239 drivers/net/usb/cx82310_eth.c 		len = skb->data[0] | (skb->data[1] << 8);
skb               240 drivers/net/usb/cx82310_eth.c 		skb_pull(skb, 2);
skb               243 drivers/net/usb/cx82310_eth.c 		if (len == skb->len || len + 1 == skb->len) {
skb               244 drivers/net/usb/cx82310_eth.c 			skb_trim(skb, len);
skb               255 drivers/net/usb/cx82310_eth.c 		if (len > skb->len) {
skb               256 drivers/net/usb/cx82310_eth.c 			dev->partial_len = skb->len;
skb               257 drivers/net/usb/cx82310_eth.c 			dev->partial_rem = len - skb->len;
skb               258 drivers/net/usb/cx82310_eth.c 			memcpy((void *)dev->partial_data, skb->data,
skb               260 drivers/net/usb/cx82310_eth.c 			skb_pull(skb, skb->len);
skb               268 drivers/net/usb/cx82310_eth.c 		memcpy(skb2->data, skb->data, len);
skb               272 drivers/net/usb/cx82310_eth.c 		skb_pull(skb, (len + 1) & ~1);
skb               280 drivers/net/usb/cx82310_eth.c static struct sk_buff *cx82310_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
skb               283 drivers/net/usb/cx82310_eth.c 	int len = skb->len;
skb               285 drivers/net/usb/cx82310_eth.c 	if (skb_cow_head(skb, 2)) {
skb               286 drivers/net/usb/cx82310_eth.c 		dev_kfree_skb_any(skb);
skb               289 drivers/net/usb/cx82310_eth.c 	skb_push(skb, 2);
skb               291 drivers/net/usb/cx82310_eth.c 	skb->data[0] = len;
skb               292 drivers/net/usb/cx82310_eth.c 	skb->data[1] = len >> 8;
skb               294 drivers/net/usb/cx82310_eth.c 	return skb;
skb               436 drivers/net/usb/dm9601.c static int dm9601_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
skb               449 drivers/net/usb/dm9601.c 	if (unlikely(skb->len < DM_RX_OVERHEAD)) {
skb               454 drivers/net/usb/dm9601.c 	status = skb->data[0];
skb               455 drivers/net/usb/dm9601.c 	len = (skb->data[1] | (skb->data[2] << 8)) - 4;
skb               466 drivers/net/usb/dm9601.c 	skb_pull(skb, 3);
skb               467 drivers/net/usb/dm9601.c 	skb_trim(skb, len);
skb               472 drivers/net/usb/dm9601.c static struct sk_buff *dm9601_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
skb               483 drivers/net/usb/dm9601.c 	len = skb->len + DM_TX_OVERHEAD;
skb               494 drivers/net/usb/dm9601.c 	pad = len - skb->len;
skb               496 drivers/net/usb/dm9601.c 	if (skb_headroom(skb) < DM_TX_OVERHEAD || skb_tailroom(skb) < pad) {
skb               499 drivers/net/usb/dm9601.c 		skb2 = skb_copy_expand(skb, DM_TX_OVERHEAD, pad, flags);
skb               500 drivers/net/usb/dm9601.c 		dev_kfree_skb_any(skb);
skb               501 drivers/net/usb/dm9601.c 		skb = skb2;
skb               502 drivers/net/usb/dm9601.c 		if (!skb)
skb               506 drivers/net/usb/dm9601.c 	__skb_push(skb, DM_TX_OVERHEAD);
skb               509 drivers/net/usb/dm9601.c 		memset(skb->data + skb->len, 0, pad);
skb               510 drivers/net/usb/dm9601.c 		__skb_put(skb, pad);
skb               513 drivers/net/usb/dm9601.c 	skb->data[0] = len;
skb               514 drivers/net/usb/dm9601.c 	skb->data[1] = len >> 8;
skb               516 drivers/net/usb/dm9601.c 	return skb;
skb                67 drivers/net/usb/gl620a.c static int genelink_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
skb                76 drivers/net/usb/gl620a.c 	if (skb->len < dev->net->hard_header_len)
skb                79 drivers/net/usb/gl620a.c 	header = (struct gl_header *) skb->data;
skb                94 drivers/net/usb/gl620a.c 	skb_pull(skb, 4);
skb               121 drivers/net/usb/gl620a.c 		skb_pull(skb, size + 4);
skb               125 drivers/net/usb/gl620a.c 	skb_pull(skb, 4);
skb               127 drivers/net/usb/gl620a.c 	if (skb->len > GL_MAX_PACKET_LEN) {
skb               129 drivers/net/usb/gl620a.c 			   skb->len);
skb               136 drivers/net/usb/gl620a.c genelink_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
skb               139 drivers/net/usb/gl620a.c 	int	length = skb->len;
skb               140 drivers/net/usb/gl620a.c 	int	headroom = skb_headroom(skb);
skb               141 drivers/net/usb/gl620a.c 	int	tailroom = skb_tailroom(skb);
skb               146 drivers/net/usb/gl620a.c 	padlen = ((skb->len + (4 + 4*1)) % 64) ? 0 : 1;
skb               148 drivers/net/usb/gl620a.c 	if ((!skb_cloned(skb))
skb               151 drivers/net/usb/gl620a.c 			skb->data = memmove(skb->head + (4 + 4*1),
skb               152 drivers/net/usb/gl620a.c 					     skb->data, skb->len);
skb               153 drivers/net/usb/gl620a.c 			skb_set_tail_pointer(skb, skb->len);
skb               157 drivers/net/usb/gl620a.c 		skb2 = skb_copy_expand(skb, (4 + 4*1) , padlen, flags);
skb               158 drivers/net/usb/gl620a.c 		dev_kfree_skb_any(skb);
skb               159 drivers/net/usb/gl620a.c 		skb = skb2;
skb               160 drivers/net/usb/gl620a.c 		if (!skb)
skb               165 drivers/net/usb/gl620a.c 	packet_count = skb_push(skb, (4 + 4 * 1));
skb               172 drivers/net/usb/gl620a.c 	if ((skb->len % dev->maxpacket) == 0)
skb               173 drivers/net/usb/gl620a.c 		skb_put(skb, 1);
skb               175 drivers/net/usb/gl620a.c 	return skb;
skb               771 drivers/net/usb/hso.c static netdev_tx_t hso_net_start_xmit(struct sk_buff *skb,
skb               780 drivers/net/usb/hso.c 		odev->skb_tx_buf = skb;
skb               785 drivers/net/usb/hso.c 	DUMP1(skb->data, skb->len);
skb               787 drivers/net/usb/hso.c 	memcpy(odev->mux_bulk_tx_buf, skb->data, skb->len);
skb               788 drivers/net/usb/hso.c 	hso_dbg(0x1, "len: %d/%d\n", skb->len, MUX_BULK_TX_BUF_SIZE);
skb               796 drivers/net/usb/hso.c 			  odev->mux_bulk_tx_buf, skb->len, write_bulk_callback,
skb               811 drivers/net/usb/hso.c 		net->stats.tx_bytes += skb->len;
skb               813 drivers/net/usb/hso.c 	dev_kfree_skb(skb);
skb                38 drivers/net/usb/int51x1.c static int int51x1_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
skb                42 drivers/net/usb/int51x1.c 	if (!(pskb_may_pull(skb, INT51X1_HEADER_SIZE))) {
skb                47 drivers/net/usb/int51x1.c 	len = le16_to_cpu(*(__le16 *)&skb->data[skb->len - 2]);
skb                49 drivers/net/usb/int51x1.c 	skb_trim(skb, len);
skb                55 drivers/net/usb/int51x1.c 		struct sk_buff *skb, gfp_t flags)
skb                57 drivers/net/usb/int51x1.c 	int pack_len = skb->len;
skb                59 drivers/net/usb/int51x1.c 	int headroom = skb_headroom(skb);
skb                60 drivers/net/usb/int51x1.c 	int tailroom = skb_tailroom(skb);
skb                75 drivers/net/usb/int51x1.c 	if (!skb_cloned(skb) &&
skb                78 drivers/net/usb/int51x1.c 			skb->data = memmove(skb->head + INT51X1_HEADER_SIZE,
skb                79 drivers/net/usb/int51x1.c 					skb->data, skb->len);
skb                80 drivers/net/usb/int51x1.c 			skb_set_tail_pointer(skb, skb->len);
skb                85 drivers/net/usb/int51x1.c 		skb2 = skb_copy_expand(skb,
skb                89 drivers/net/usb/int51x1.c 		dev_kfree_skb_any(skb);
skb                92 drivers/net/usb/int51x1.c 		skb = skb2;
skb                98 drivers/net/usb/int51x1.c 	len = __skb_push(skb, INT51X1_HEADER_SIZE);
skb               102 drivers/net/usb/int51x1.c 		__skb_put_zero(skb, need_tail);
skb               104 drivers/net/usb/int51x1.c 	return skb;
skb               166 drivers/net/usb/ipheth.c 	struct sk_buff *skb;
skb               197 drivers/net/usb/ipheth.c 	skb = dev_alloc_skb(len);
skb               198 drivers/net/usb/ipheth.c 	if (!skb) {
skb               205 drivers/net/usb/ipheth.c 	skb_put_data(skb, buf, len);
skb               206 drivers/net/usb/ipheth.c 	skb->dev = dev->net;
skb               207 drivers/net/usb/ipheth.c 	skb->protocol = eth_type_trans(skb, dev->net);
skb               212 drivers/net/usb/ipheth.c 	netif_rx(skb);
skb               361 drivers/net/usb/ipheth.c static int ipheth_tx(struct sk_buff *skb, struct net_device *net)
skb               368 drivers/net/usb/ipheth.c 	if (skb->len > IPHETH_BUF_SIZE) {
skb               369 drivers/net/usb/ipheth.c 		WARN(1, "%s: skb too large: %d bytes\n", __func__, skb->len);
skb               371 drivers/net/usb/ipheth.c 		dev_kfree_skb_any(skb);
skb               375 drivers/net/usb/ipheth.c 	memcpy(dev->tx_buf, skb->data, skb->len);
skb               376 drivers/net/usb/ipheth.c 	if (skb->len < IPHETH_BUF_SIZE)
skb               377 drivers/net/usb/ipheth.c 		memset(dev->tx_buf + skb->len, 0, IPHETH_BUF_SIZE - skb->len);
skb               392 drivers/net/usb/ipheth.c 		dev_kfree_skb_any(skb);
skb               396 drivers/net/usb/ipheth.c 		dev->net->stats.tx_bytes += skb->len;
skb               397 drivers/net/usb/ipheth.c 		dev_consume_skb_any(skb);
skb               158 drivers/net/usb/kalmia.c kalmia_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
skb               166 drivers/net/usb/kalmia.c 	if (!skb_cloned(skb)) {
skb               167 drivers/net/usb/kalmia.c 		int headroom = skb_headroom(skb);
skb               168 drivers/net/usb/kalmia.c 		int tailroom = skb_tailroom(skb);
skb               176 drivers/net/usb/kalmia.c 			skb->data = memmove(skb->head + KALMIA_HEADER_LENGTH,
skb               177 drivers/net/usb/kalmia.c 				skb->data, skb->len);
skb               178 drivers/net/usb/kalmia.c 			skb_set_tail_pointer(skb, skb->len);
skb               183 drivers/net/usb/kalmia.c 	skb2 = skb_copy_expand(skb, KALMIA_HEADER_LENGTH,
skb               188 drivers/net/usb/kalmia.c 	dev_kfree_skb_any(skb);
skb               189 drivers/net/usb/kalmia.c 	skb = skb2;
skb               192 drivers/net/usb/kalmia.c 	header_start = skb_push(skb, KALMIA_HEADER_LENGTH);
skb               202 drivers/net/usb/kalmia.c 	content_len = skb->len - KALMIA_HEADER_LENGTH;
skb               209 drivers/net/usb/kalmia.c 	remainder = skb->len % KALMIA_ALIGN_SIZE;
skb               212 drivers/net/usb/kalmia.c 		skb_put_zero(skb, padlen);
skb               219 drivers/net/usb/kalmia.c 	return skb;
skb               223 drivers/net/usb/kalmia.c kalmia_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
skb               238 drivers/net/usb/kalmia.c 	if (skb->len < KALMIA_HEADER_LENGTH)
skb               247 drivers/net/usb/kalmia.c 		header_start = skb->data;
skb               257 drivers/net/usb/kalmia.c 					skb->len - KALMIA_HEADER_LENGTH);
skb               263 drivers/net/usb/kalmia.c 					skb->len - KALMIA_HEADER_LENGTH);
skb               270 drivers/net/usb/kalmia.c 				header_start, skb->len - KALMIA_HEADER_LENGTH);
skb               273 drivers/net/usb/kalmia.c 		usb_packet_length = skb->len - (2 * KALMIA_HEADER_LENGTH);
skb               275 drivers/net/usb/kalmia.c 		skb_pull(skb, KALMIA_HEADER_LENGTH);
skb               287 drivers/net/usb/kalmia.c 			is_last = (memcmp(skb->data + ether_packet_length,
skb               291 drivers/net/usb/kalmia.c 				header_start = skb->data + ether_packet_length;
skb               295 drivers/net/usb/kalmia.c 					skb->len - KALMIA_HEADER_LENGTH);
skb               300 drivers/net/usb/kalmia.c 			skb2 = skb;
skb               303 drivers/net/usb/kalmia.c 			skb2 = skb_clone(skb, GFP_ATOMIC);
skb               315 drivers/net/usb/kalmia.c 			skb_pull(skb, ether_packet_length);
skb               320 drivers/net/usb/kalmia.c 	while (skb->len);
skb               584 drivers/net/usb/kaweth.c 	struct sk_buff *skb;
skb               637 drivers/net/usb/kaweth.c 		if(!(skb = dev_alloc_skb(pkt_len+2))) {
skb               642 drivers/net/usb/kaweth.c 		skb_reserve(skb, 2);    /* Align IP on 16 byte boundaries */
skb               644 drivers/net/usb/kaweth.c 		skb_copy_to_linear_data(skb, kaweth->rx_buf + 2, pkt_len);
skb               646 drivers/net/usb/kaweth.c 		skb_put(skb, pkt_len);
skb               648 drivers/net/usb/kaweth.c 		skb->protocol = eth_type_trans(skb, net);
skb               650 drivers/net/usb/kaweth.c 		netif_rx(skb);
skb               760 drivers/net/usb/kaweth.c 	struct sk_buff *skb = kaweth->tx_skb;
skb               769 drivers/net/usb/kaweth.c 	dev_kfree_skb_irq(skb);
skb               775 drivers/net/usb/kaweth.c static netdev_tx_t kaweth_start_xmit(struct sk_buff *skb,
skb               792 drivers/net/usb/kaweth.c 	if (skb_cow_head(skb, 2)) {
skb               796 drivers/net/usb/kaweth.c 		dev_kfree_skb_any(skb);
skb               800 drivers/net/usb/kaweth.c 	private_header = __skb_push(skb, 2);
skb               801 drivers/net/usb/kaweth.c 	*private_header = cpu_to_le16(skb->len-2);
skb               802 drivers/net/usb/kaweth.c 	kaweth->tx_skb = skb;
skb               808 drivers/net/usb/kaweth.c 		      skb->len,
skb               820 drivers/net/usb/kaweth.c 		dev_kfree_skb_irq(skb);
skb               825 drivers/net/usb/kaweth.c 		net->stats.tx_bytes += skb->len;
skb              2233 drivers/net/usb/lan78xx.c 	struct sk_buff *skb;
skb              2243 drivers/net/usb/lan78xx.c 		skb_queue_walk(q, skb) {
skb              2244 drivers/net/usb/lan78xx.c 			entry = (struct skb_data *)skb->cb;
skb              2729 drivers/net/usb/lan78xx.c 				       struct sk_buff *skb, gfp_t flags)
skb              2734 drivers/net/usb/lan78xx.c 	if (skb_cow_head(skb, TX_OVERHEAD)) {
skb              2735 drivers/net/usb/lan78xx.c 		dev_kfree_skb_any(skb);
skb              2739 drivers/net/usb/lan78xx.c 	if (skb_linearize(skb)) {
skb              2740 drivers/net/usb/lan78xx.c 		dev_kfree_skb_any(skb);
skb              2744 drivers/net/usb/lan78xx.c 	tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
skb              2746 drivers/net/usb/lan78xx.c 	if (skb->ip_summed == CHECKSUM_PARTIAL)
skb              2750 drivers/net/usb/lan78xx.c 	if (skb_is_gso(skb)) {
skb              2751 drivers/net/usb/lan78xx.c 		u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
skb              2758 drivers/net/usb/lan78xx.c 	if (skb_vlan_tag_present(skb)) {
skb              2760 drivers/net/usb/lan78xx.c 		tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
skb              2763 drivers/net/usb/lan78xx.c 	ptr = skb_push(skb, 8);
skb              2767 drivers/net/usb/lan78xx.c 	return skb;
skb              2770 drivers/net/usb/lan78xx.c static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
skb              2775 drivers/net/usb/lan78xx.c 	struct skb_data *entry = (struct skb_data *)skb->cb;
skb              2781 drivers/net/usb/lan78xx.c 	__skb_unlink(skb, list);
skb              2785 drivers/net/usb/lan78xx.c 	__skb_queue_tail(&dev->done, skb);
skb              2795 drivers/net/usb/lan78xx.c 	struct sk_buff *skb = (struct sk_buff *)urb->context;
skb              2796 drivers/net/usb/lan78xx.c 	struct skb_data *entry = (struct skb_data *)skb->cb;
skb              2829 drivers/net/usb/lan78xx.c 	defer_bh(dev, skb, &dev->txq, tx_done);
skb              2842 drivers/net/usb/lan78xx.c lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
skb              2847 drivers/net/usb/lan78xx.c 	if (skb) {
skb              2848 drivers/net/usb/lan78xx.c 		skb_tx_timestamp(skb);
skb              2849 drivers/net/usb/lan78xx.c 		skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
skb              3041 drivers/net/usb/lan78xx.c 				    struct sk_buff *skb,
skb              3051 drivers/net/usb/lan78xx.c 		skb->ip_summed = CHECKSUM_NONE;
skb              3053 drivers/net/usb/lan78xx.c 		skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
skb              3054 drivers/net/usb/lan78xx.c 		skb->ip_summed = CHECKSUM_COMPLETE;
skb              3059 drivers/net/usb/lan78xx.c 				    struct sk_buff *skb,
skb              3064 drivers/net/usb/lan78xx.c 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
skb              3068 drivers/net/usb/lan78xx.c static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
skb              3073 drivers/net/usb/lan78xx.c 		skb_queue_tail(&dev->rxq_pause, skb);
skb              3078 drivers/net/usb/lan78xx.c 	dev->net->stats.rx_bytes += skb->len;
skb              3080 drivers/net/usb/lan78xx.c 	skb->protocol = eth_type_trans(skb, dev->net);
skb              3083 drivers/net/usb/lan78xx.c 		  skb->len + sizeof(struct ethhdr), skb->protocol);
skb              3084 drivers/net/usb/lan78xx.c 	memset(skb->cb, 0, sizeof(struct skb_data));
skb              3086 drivers/net/usb/lan78xx.c 	if (skb_defer_rx_timestamp(skb))
skb              3089 drivers/net/usb/lan78xx.c 	status = netif_rx(skb);
skb              3095 drivers/net/usb/lan78xx.c static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
skb              3097 drivers/net/usb/lan78xx.c 	if (skb->len < dev->net->hard_header_len)
skb              3100 drivers/net/usb/lan78xx.c 	while (skb->len > 0) {
skb              3106 drivers/net/usb/lan78xx.c 		rx_cmd_a = get_unaligned_le32(skb->data);
skb              3107 drivers/net/usb/lan78xx.c 		skb_pull(skb, sizeof(rx_cmd_a));
skb              3109 drivers/net/usb/lan78xx.c 		rx_cmd_b = get_unaligned_le32(skb->data);
skb              3110 drivers/net/usb/lan78xx.c 		skb_pull(skb, sizeof(rx_cmd_b));
skb              3112 drivers/net/usb/lan78xx.c 		rx_cmd_c = get_unaligned_le16(skb->data);
skb              3113 drivers/net/usb/lan78xx.c 		skb_pull(skb, sizeof(rx_cmd_c));
skb              3115 drivers/net/usb/lan78xx.c 		packet = skb->data;
skb              3126 drivers/net/usb/lan78xx.c 			if (skb->len == size) {
skb              3127 drivers/net/usb/lan78xx.c 				lan78xx_rx_csum_offload(dev, skb,
skb              3129 drivers/net/usb/lan78xx.c 				lan78xx_rx_vlan_offload(dev, skb,
skb              3132 drivers/net/usb/lan78xx.c 				skb_trim(skb, skb->len - 4); /* remove fcs */
skb              3133 drivers/net/usb/lan78xx.c 				skb->truesize = size + sizeof(struct sk_buff);
skb              3138 drivers/net/usb/lan78xx.c 			skb2 = skb_clone(skb, GFP_ATOMIC);
skb              3157 drivers/net/usb/lan78xx.c 		skb_pull(skb, size);
skb              3160 drivers/net/usb/lan78xx.c 		if (skb->len)
skb              3161 drivers/net/usb/lan78xx.c 			skb_pull(skb, align_count);
skb              3167 drivers/net/usb/lan78xx.c static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
skb              3169 drivers/net/usb/lan78xx.c 	if (!lan78xx_rx(dev, skb)) {
skb              3174 drivers/net/usb/lan78xx.c 	if (skb->len) {
skb              3175 drivers/net/usb/lan78xx.c 		lan78xx_skb_return(dev, skb);
skb              3182 drivers/net/usb/lan78xx.c 	skb_queue_tail(&dev->done, skb);
skb              3189 drivers/net/usb/lan78xx.c 	struct sk_buff *skb;
skb              3195 drivers/net/usb/lan78xx.c 	skb = netdev_alloc_skb_ip_align(dev->net, size);
skb              3196 drivers/net/usb/lan78xx.c 	if (!skb) {
skb              3201 drivers/net/usb/lan78xx.c 	entry = (struct skb_data *)skb->cb;
skb              3207 drivers/net/usb/lan78xx.c 			  skb->data, size, rx_complete, skb);
skb              3218 drivers/net/usb/lan78xx.c 			lan78xx_queue_skb(&dev->rxq, skb, rx_start);
skb              3241 drivers/net/usb/lan78xx.c 		dev_kfree_skb_any(skb);
skb              3249 drivers/net/usb/lan78xx.c 	struct sk_buff	*skb = (struct sk_buff *)urb->context;
skb              3250 drivers/net/usb/lan78xx.c 	struct skb_data	*entry = (struct skb_data *)skb->cb;
skb              3255 drivers/net/usb/lan78xx.c 	skb_put(skb, urb->actual_length);
skb              3261 drivers/net/usb/lan78xx.c 		if (skb->len < dev->net->hard_header_len) {
skb              3266 drivers/net/usb/lan78xx.c 				  "rx length %d\n", skb->len);
skb              3303 drivers/net/usb/lan78xx.c 	state = defer_bh(dev, skb, &dev->rxq, state);
skb              3324 drivers/net/usb/lan78xx.c 	struct sk_buff *skb, *skb2;
skb              3334 drivers/net/usb/lan78xx.c 	skb_queue_walk(tqp, skb) {
skb              3335 drivers/net/usb/lan78xx.c 		if (skb_is_gso(skb)) {
skb              3336 drivers/net/usb/lan78xx.c 			if (!skb_queue_is_first(tqp, skb)) {
skb              3341 drivers/net/usb/lan78xx.c 			length = skb->len - TX_OVERHEAD;
skb              3342 drivers/net/usb/lan78xx.c 			__skb_unlink(skb, tqp);
skb              3347 drivers/net/usb/lan78xx.c 		if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
skb              3349 drivers/net/usb/lan78xx.c 		skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
skb              3355 drivers/net/usb/lan78xx.c 	skb = alloc_skb(skb_totallen, GFP_ATOMIC);
skb              3356 drivers/net/usb/lan78xx.c 	if (!skb)
skb              3359 drivers/net/usb/lan78xx.c 	skb_put(skb, skb_totallen);
skb              3365 drivers/net/usb/lan78xx.c 			memcpy(skb->data + pos, skb2->data, skb2->len);
skb              3376 drivers/net/usb/lan78xx.c 	entry = (struct skb_data *)skb->cb;
skb              3390 drivers/net/usb/lan78xx.c 			  skb->data, skb->len, tx_complete, skb);
skb              3415 drivers/net/usb/lan78xx.c 		lan78xx_queue_skb(&dev->txq, skb, tx_start);
skb              3437 drivers/net/usb/lan78xx.c 		if (skb)
skb              3438 drivers/net/usb/lan78xx.c 			dev_kfree_skb_any(skb);
skb              3442 drivers/net/usb/lan78xx.c 			  "> tx, len %d, type 0x%x\n", length, skb->protocol);
skb              3470 drivers/net/usb/lan78xx.c 	struct sk_buff *skb;
skb              3473 drivers/net/usb/lan78xx.c 	while ((skb = skb_dequeue(&dev->done))) {
skb              3474 drivers/net/usb/lan78xx.c 		entry = (struct skb_data *)(skb->cb);
skb              3478 drivers/net/usb/lan78xx.c 			rx_process(dev, skb);
skb              3482 drivers/net/usb/lan78xx.c 			dev_kfree_skb(skb);
skb              3486 drivers/net/usb/lan78xx.c 			dev_kfree_skb(skb);
skb              3672 drivers/net/usb/lan78xx.c static netdev_features_t lan78xx_features_check(struct sk_buff *skb,
skb              3676 drivers/net/usb/lan78xx.c 	if (skb->len + TX_OVERHEAD > MAX_SINGLE_PACKET_SIZE)
skb              3679 drivers/net/usb/lan78xx.c 	features = vlan_features_check(skb, features);
skb              3680 drivers/net/usb/lan78xx.c 	features = vxlan_features_check(skb, features);
skb              4105 drivers/net/usb/lan78xx.c 	struct sk_buff *skb;
skb              4123 drivers/net/usb/lan78xx.c 			skb = (struct sk_buff *)res->context;
skb              4126 drivers/net/usb/lan78xx.c 				dev_kfree_skb_any(skb);
skb              4131 drivers/net/usb/lan78xx.c 				lan78xx_queue_skb(&dev->txq, skb, tx_start);
skb                96 drivers/net/usb/lg-vl600.c static int vl600_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
skb               102 drivers/net/usb/lg-vl600.c 	struct sk_buff *buf = skb;
skb               116 drivers/net/usb/lg-vl600.c 		if (skb->len + s->current_rx_buf->len >
skb               124 drivers/net/usb/lg-vl600.c 		skb_put_data(buf, skb->data, skb->len);
skb               125 drivers/net/usb/lg-vl600.c 	} else if (skb->len < 4) {
skb               144 drivers/net/usb/lg-vl600.c 		s->current_rx_buf = skb_copy_expand(skb, 0,
skb               175 drivers/net/usb/lg-vl600.c 		ethhdr = (struct ethhdr *) skb->data;
skb               231 drivers/net/usb/lg-vl600.c 		struct sk_buff *skb, gfp_t flags)
skb               237 drivers/net/usb/lg-vl600.c 	int orig_len = skb->len - sizeof(struct ethhdr);
skb               238 drivers/net/usb/lg-vl600.c 	int full_len = (skb->len + sizeof(struct vl600_frame_hdr) + 3) & ~3;
skb               240 drivers/net/usb/lg-vl600.c 	frame = (struct vl600_frame_hdr *) skb->data;
skb               241 drivers/net/usb/lg-vl600.c 	if (skb->len > sizeof(*frame) && skb->len == le32_to_cpup(&frame->len))
skb               242 drivers/net/usb/lg-vl600.c 		return skb; /* Already encapsulated? */
skb               244 drivers/net/usb/lg-vl600.c 	if (skb->len < sizeof(struct ethhdr))
skb               248 drivers/net/usb/lg-vl600.c 	if (!skb_cloned(skb)) {
skb               249 drivers/net/usb/lg-vl600.c 		int headroom = skb_headroom(skb);
skb               250 drivers/net/usb/lg-vl600.c 		int tailroom = skb_tailroom(skb);
skb               252 drivers/net/usb/lg-vl600.c 		if (tailroom >= full_len - skb->len - sizeof(*frame) &&
skb               257 drivers/net/usb/lg-vl600.c 		if (headroom + tailroom + skb->len >= full_len) {
skb               259 drivers/net/usb/lg-vl600.c 			skb->data = memmove(skb->head + sizeof(*frame),
skb               260 drivers/net/usb/lg-vl600.c 					skb->data, skb->len);
skb               261 drivers/net/usb/lg-vl600.c 			skb_set_tail_pointer(skb, skb->len);
skb               267 drivers/net/usb/lg-vl600.c 	ret = skb_copy_expand(skb, sizeof(struct vl600_frame_hdr), full_len -
skb               268 drivers/net/usb/lg-vl600.c 			skb->len - sizeof(struct vl600_frame_hdr), flags);
skb               269 drivers/net/usb/lg-vl600.c 	dev_kfree_skb_any(skb);
skb               272 drivers/net/usb/lg-vl600.c 	skb = ret;
skb               280 drivers/net/usb/lg-vl600.c 	packet = (struct vl600_pkt_hdr *) skb->data;
skb               289 drivers/net/usb/lg-vl600.c 	frame = skb_push(skb, sizeof(*frame));
skb               295 drivers/net/usb/lg-vl600.c 	if (skb->len < full_len) /* Pad */
skb               296 drivers/net/usb/lg-vl600.c 		skb_put(skb, full_len - skb->len);
skb               298 drivers/net/usb/lg-vl600.c 	return skb;
skb               512 drivers/net/usb/mcs7830.c static int mcs7830_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
skb               517 drivers/net/usb/mcs7830.c 	if (skb->len < dev->net->hard_header_len) {
skb               522 drivers/net/usb/mcs7830.c 	skb_trim(skb, skb->len - 1);
skb               523 drivers/net/usb/mcs7830.c 	status = skb->data[skb->len];
skb               542 drivers/net/usb/mcs7830.c 	return skb->len > 0;
skb               340 drivers/net/usb/net1080.c static int net1080_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
skb               347 drivers/net/usb/net1080.c 	if (skb->len < dev->net->hard_header_len)
skb               350 drivers/net/usb/net1080.c 	if (!(skb->len & 0x01)) {
skb               352 drivers/net/usb/net1080.c 			   skb->len, dev->net->hard_header_len, dev->hard_mtu,
skb               359 drivers/net/usb/net1080.c 	header = (struct nc_header *) skb->data;
skb               378 drivers/net/usb/net1080.c 	skb_pull(skb, hdr_len);
skb               381 drivers/net/usb/net1080.c 		(skb->data + skb->len - sizeof *trailer);
skb               382 drivers/net/usb/net1080.c 	skb_trim(skb, skb->len - sizeof *trailer);
skb               385 drivers/net/usb/net1080.c 		if (skb->data [packet_len] != PAD_BYTE) {
skb               390 drivers/net/usb/net1080.c 		skb_trim(skb, skb->len - 1);
skb               392 drivers/net/usb/net1080.c 	if (skb->len != packet_len) {
skb               395 drivers/net/usb/net1080.c 			   skb->len, packet_len);
skb               415 drivers/net/usb/net1080.c net1080_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
skb               421 drivers/net/usb/net1080.c 	int			len = skb->len;
skb               425 drivers/net/usb/net1080.c 	if (!skb_cloned(skb)) {
skb               426 drivers/net/usb/net1080.c 		int	headroom = skb_headroom(skb);
skb               427 drivers/net/usb/net1080.c 		int	tailroom = skb_tailroom(skb);
skb               437 drivers/net/usb/net1080.c 			skb->data = memmove(skb->head
skb               439 drivers/net/usb/net1080.c 					    skb->data, skb->len);
skb               440 drivers/net/usb/net1080.c 			skb_set_tail_pointer(skb, len);
skb               446 drivers/net/usb/net1080.c 	skb2 = skb_copy_expand(skb,
skb               450 drivers/net/usb/net1080.c 	dev_kfree_skb_any(skb);
skb               453 drivers/net/usb/net1080.c 	skb = skb2;
skb               457 drivers/net/usb/net1080.c 	header = skb_push(skb, sizeof *header);
skb               463 drivers/net/usb/net1080.c 	if (!((skb->len + sizeof *trailer) & 0x01))
skb               464 drivers/net/usb/net1080.c 		skb_put_u8(skb, PAD_BYTE);
skb               465 drivers/net/usb/net1080.c 	trailer = skb_put(skb, sizeof *trailer);
skb               472 drivers/net/usb/net1080.c 	return skb;
skb               704 drivers/net/usb/pegasus.c static netdev_tx_t pegasus_start_xmit(struct sk_buff *skb,
skb               708 drivers/net/usb/pegasus.c 	int count = ((skb->len + 2) & 0x3f) ? skb->len + 2 : skb->len + 3;
skb               710 drivers/net/usb/pegasus.c 	__u16 l16 = skb->len;
skb               715 drivers/net/usb/pegasus.c 	skb_copy_from_linear_data(skb, pegasus->tx_buff + 2, skb->len);
skb               736 drivers/net/usb/pegasus.c 		net->stats.tx_bytes += skb->len;
skb               738 drivers/net/usb/pegasus.c 	dev_kfree_skb(skb);
skb                97 drivers/net/usb/qmi_wwan.c static netdev_tx_t qmimux_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb               100 drivers/net/usb/qmi_wwan.c 	unsigned int len = skb->len;
skb               104 drivers/net/usb/qmi_wwan.c 	hdr = skb_push(skb, sizeof(struct qmimux_hdr));
skb               108 drivers/net/usb/qmi_wwan.c 	skb->dev = priv->real_dev;
skb               109 drivers/net/usb/qmi_wwan.c 	ret = dev_queue_xmit(skb);
skb               198 drivers/net/usb/qmi_wwan.c static int qmimux_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
skb               206 drivers/net/usb/qmi_wwan.c 	while (offset + qmimux_hdr_sz < skb->len) {
skb               207 drivers/net/usb/qmi_wwan.c 		hdr = (struct qmimux_hdr *)(skb->data + offset);
skb               211 drivers/net/usb/qmi_wwan.c 		if (offset + len + qmimux_hdr_sz > skb->len)
skb               232 drivers/net/usb/qmi_wwan.c 		switch (skb->data[offset + qmimux_hdr_sz] & 0xf0) {
skb               244 drivers/net/usb/qmi_wwan.c 		skb_put_data(skbn, skb->data + offset + qmimux_hdr_sz, pkt_len);
skb               544 drivers/net/usb/qmi_wwan.c static int qmi_wwan_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
skb               551 drivers/net/usb/qmi_wwan.c 	if (skb->len < dev->net->hard_header_len)
skb               555 drivers/net/usb/qmi_wwan.c 		return qmimux_rx_fixup(dev, skb);
skb               557 drivers/net/usb/qmi_wwan.c 	switch (skb->data[0] & 0xf0) {
skb               567 drivers/net/usb/qmi_wwan.c 		if (is_multicast_ether_addr(skb->data))
skb               570 drivers/net/usb/qmi_wwan.c 		skb_reset_mac_header(skb);
skb               579 drivers/net/usb/qmi_wwan.c 		skb_reset_mac_header(skb);
skb               580 drivers/net/usb/qmi_wwan.c 		skb->dev = dev->net; /* normally set by eth_type_trans */
skb               581 drivers/net/usb/qmi_wwan.c 		skb->protocol = proto;
skb               585 drivers/net/usb/qmi_wwan.c 	if (skb_headroom(skb) < ETH_HLEN)
skb               587 drivers/net/usb/qmi_wwan.c 	skb_push(skb, ETH_HLEN);
skb               588 drivers/net/usb/qmi_wwan.c 	skb_reset_mac_header(skb);
skb               589 drivers/net/usb/qmi_wwan.c 	eth_hdr(skb)->h_proto = proto;
skb               590 drivers/net/usb/qmi_wwan.c 	eth_zero_addr(eth_hdr(skb)->h_source);
skb               592 drivers/net/usb/qmi_wwan.c 	memcpy(eth_hdr(skb)->h_dest, dev->net->dev_addr, ETH_ALEN);
skb              1708 drivers/net/usb/r8152.c static void r8152_csum_workaround(struct r8152 *tp, struct sk_buff *skb,
skb              1711 drivers/net/usb/r8152.c 	if (skb_shinfo(skb)->gso_size) {
skb              1717 drivers/net/usb/r8152.c 		segs = skb_gso_segment(skb, features);
skb              1731 drivers/net/usb/r8152.c 		dev_kfree_skb(skb);
skb              1732 drivers/net/usb/r8152.c 	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb              1733 drivers/net/usb/r8152.c 		if (skb_checksum_help(skb) < 0)
skb              1736 drivers/net/usb/r8152.c 		__skb_queue_head(list, skb);
skb              1743 drivers/net/usb/r8152.c 		dev_kfree_skb(skb);
skb              1751 drivers/net/usb/r8152.c static int msdn_giant_send_check(struct sk_buff *skb)
skb              1757 drivers/net/usb/r8152.c 	ret = skb_cow_head(skb, 0);
skb              1761 drivers/net/usb/r8152.c 	ipv6h = ipv6_hdr(skb);
skb              1762 drivers/net/usb/r8152.c 	th = tcp_hdr(skb);
skb              1770 drivers/net/usb/r8152.c static inline void rtl_tx_vlan_tag(struct tx_desc *desc, struct sk_buff *skb)
skb              1772 drivers/net/usb/r8152.c 	if (skb_vlan_tag_present(skb)) {
skb              1775 drivers/net/usb/r8152.c 		opts2 = TX_VLAN_TAG | swab16(skb_vlan_tag_get(skb));
skb              1780 drivers/net/usb/r8152.c static inline void rtl_rx_vlan_tag(struct rx_desc *desc, struct sk_buff *skb)
skb              1785 drivers/net/usb/r8152.c 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
skb              1790 drivers/net/usb/r8152.c 			 struct sk_buff *skb, u32 len, u32 transport_offset)
skb              1792 drivers/net/usb/r8152.c 	u32 mss = skb_shinfo(skb)->gso_size;
skb              1809 drivers/net/usb/r8152.c 		switch (vlan_get_protocol(skb)) {
skb              1815 drivers/net/usb/r8152.c 			if (msdn_giant_send_check(skb)) {
skb              1829 drivers/net/usb/r8152.c 	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb              1840 drivers/net/usb/r8152.c 		switch (vlan_get_protocol(skb)) {
skb              1843 drivers/net/usb/r8152.c 			ip_protocol = ip_hdr(skb)->protocol;
skb              1848 drivers/net/usb/r8152.c 			ip_protocol = ipv6_hdr(skb)->nexthdr;
skb              1891 drivers/net/usb/r8152.c 		struct sk_buff *skb;
skb              1895 drivers/net/usb/r8152.c 		skb = __skb_dequeue(&skb_head);
skb              1896 drivers/net/usb/r8152.c 		if (!skb)
skb              1899 drivers/net/usb/r8152.c 		len = skb->len + sizeof(*tx_desc);
skb              1902 drivers/net/usb/r8152.c 			__skb_queue_head(&skb_head, skb);
skb              1909 drivers/net/usb/r8152.c 		offset = (u32)skb_transport_offset(skb);
skb              1911 drivers/net/usb/r8152.c 		if (r8152_tx_csum(tp, tx_desc, skb, skb->len, offset)) {
skb              1912 drivers/net/usb/r8152.c 			r8152_csum_workaround(tp, skb, &skb_head);
skb              1916 drivers/net/usb/r8152.c 		rtl_tx_vlan_tag(tx_desc, skb);
skb              1920 drivers/net/usb/r8152.c 		len = skb->len;
skb              1921 drivers/net/usb/r8152.c 		if (skb_copy_bits(skb, 0, tx_data, len) < 0) {
skb              1925 drivers/net/usb/r8152.c 			dev_kfree_skb_any(skb);
skb              1932 drivers/net/usb/r8152.c 		agg->skb_num += skb_shinfo(skb)->gso_segs ?: 1;
skb              1934 drivers/net/usb/r8152.c 		dev_kfree_skb_any(skb);
skb              2050 drivers/net/usb/r8152.c 			struct sk_buff *skb = __skb_dequeue(&tp->rx_queue);
skb              2055 drivers/net/usb/r8152.c 			if (!skb)
skb              2058 drivers/net/usb/r8152.c 			pkt_len = skb->len;
skb              2059 drivers/net/usb/r8152.c 			napi_gro_receive(napi, skb);
skb              2098 drivers/net/usb/r8152.c 			struct sk_buff *skb;
skb              2120 drivers/net/usb/r8152.c 			skb = napi_alloc_skb(napi, rx_frag_head_sz);
skb              2121 drivers/net/usb/r8152.c 			if (!skb) {
skb              2126 drivers/net/usb/r8152.c 			skb->ip_summed = r8152_rx_csum(tp, rx_desc);
skb              2127 drivers/net/usb/r8152.c 			memcpy(skb->data, rx_data, rx_frag_head_sz);
skb              2128 drivers/net/usb/r8152.c 			skb_put(skb, rx_frag_head_sz);
skb              2132 drivers/net/usb/r8152.c 				skb_add_rx_frag(skb, 0, agg->page,
skb              2139 drivers/net/usb/r8152.c 			skb->protocol = eth_type_trans(skb, netdev);
skb              2140 drivers/net/usb/r8152.c 			rtl_rx_vlan_tag(rx_desc, skb);
skb              2144 drivers/net/usb/r8152.c 				stats->rx_bytes += skb->len;
skb              2145 drivers/net/usb/r8152.c 				napi_gro_receive(napi, skb);
skb              2147 drivers/net/usb/r8152.c 				__skb_queue_tail(&tp->rx_queue, skb);
skb              2307 drivers/net/usb/r8152.c 	struct sk_buff *skb;
skb              2317 drivers/net/usb/r8152.c 	while ((skb = __skb_dequeue(&skb_head))) {
skb              2318 drivers/net/usb/r8152.c 		dev_kfree_skb(skb);
skb              2388 drivers/net/usb/r8152.c rtl8152_features_check(struct sk_buff *skb, struct net_device *dev,
skb              2391 drivers/net/usb/r8152.c 	u32 mss = skb_shinfo(skb)->gso_size;
skb              2393 drivers/net/usb/r8152.c 	int offset = skb_transport_offset(skb);
skb              2395 drivers/net/usb/r8152.c 	if ((mss || skb->ip_summed == CHECKSUM_PARTIAL) && offset > max_offset)
skb              2397 drivers/net/usb/r8152.c 	else if ((skb->len + sizeof(struct tx_desc)) > agg_buf_sz)
skb              2403 drivers/net/usb/r8152.c static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb,
skb              2408 drivers/net/usb/r8152.c 	skb_tx_timestamp(skb);
skb              2410 drivers/net/usb/r8152.c 	skb_queue_tail(&tp->tx_queue, skb);
skb               486 drivers/net/usb/rndis_host.c int rndis_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
skb               489 drivers/net/usb/rndis_host.c 	if (skb->len < dev->net->hard_header_len)
skb               493 drivers/net/usb/rndis_host.c 	while (likely(skb->len)) {
skb               494 drivers/net/usb/rndis_host.c 		struct rndis_data_hdr	*hdr = (void *)skb->data;
skb               504 drivers/net/usb/rndis_host.c 		if (unlikely(msg_type != RNDIS_MSG_PACKET || skb->len < msg_len
skb               509 drivers/net/usb/rndis_host.c 				   msg_len, data_offset, data_len, skb->len);
skb               512 drivers/net/usb/rndis_host.c 		skb_pull(skb, 8 + data_offset);
skb               515 drivers/net/usb/rndis_host.c 		if (likely((data_len - skb->len) <= sizeof *hdr)) {
skb               516 drivers/net/usb/rndis_host.c 			skb_trim(skb, data_len);
skb               521 drivers/net/usb/rndis_host.c 		skb2 = skb_clone(skb, GFP_ATOMIC);
skb               524 drivers/net/usb/rndis_host.c 		skb_pull(skb, msg_len - sizeof *hdr);
skb               535 drivers/net/usb/rndis_host.c rndis_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
skb               539 drivers/net/usb/rndis_host.c 	unsigned		len = skb->len;
skb               541 drivers/net/usb/rndis_host.c 	if (likely(!skb_cloned(skb))) {
skb               542 drivers/net/usb/rndis_host.c 		int	room = skb_headroom(skb);
skb               549 drivers/net/usb/rndis_host.c 		room += skb_tailroom(skb);
skb               551 drivers/net/usb/rndis_host.c 			skb->data = memmove(skb->head + sizeof *hdr,
skb               552 drivers/net/usb/rndis_host.c 					    skb->data, len);
skb               553 drivers/net/usb/rndis_host.c 			skb_set_tail_pointer(skb, len);
skb               559 drivers/net/usb/rndis_host.c 	skb2 = skb_copy_expand(skb, sizeof *hdr, 1, flags);
skb               560 drivers/net/usb/rndis_host.c 	dev_kfree_skb_any(skb);
skb               563 drivers/net/usb/rndis_host.c 	skb = skb2;
skb               569 drivers/net/usb/rndis_host.c 	hdr = __skb_push(skb, sizeof *hdr);
skb               572 drivers/net/usb/rndis_host.c 	hdr->msg_len = cpu_to_le32(skb->len);
skb               577 drivers/net/usb/rndis_host.c 	return skb;
skb               369 drivers/net/usb/rtl8150.c 	struct sk_buff *skb;
skb               374 drivers/net/usb/rtl8150.c 			skb = dev->rx_skb_pool[i];
skb               376 drivers/net/usb/rtl8150.c 			return skb;
skb               386 drivers/net/usb/rtl8150.c 	struct sk_buff *skb;
skb               432 drivers/net/usb/rtl8150.c 	skb = pull_skb(dev);
skb               434 drivers/net/usb/rtl8150.c 	if (!skb)
skb               437 drivers/net/usb/rtl8150.c 	dev->rx_skb = skb;
skb               569 drivers/net/usb/rtl8150.c 	struct sk_buff *skb;
skb               575 drivers/net/usb/rtl8150.c 		skb = dev_alloc_skb(RTL8150_MTU + 2);
skb               576 drivers/net/usb/rtl8150.c 		if (!skb) {
skb               579 drivers/net/usb/rtl8150.c 		skb_reserve(skb, 2);
skb               580 drivers/net/usb/rtl8150.c 		dev->rx_skb_pool[i] = skb;
skb               595 drivers/net/usb/rtl8150.c 	struct sk_buff *skb;
skb               605 drivers/net/usb/rtl8150.c 	skb = pull_skb(dev);
skb               607 drivers/net/usb/rtl8150.c 	if (skb == NULL)
skb               609 drivers/net/usb/rtl8150.c 	dev->rx_skb = skb;
skb               688 drivers/net/usb/rtl8150.c static netdev_tx_t rtl8150_start_xmit(struct sk_buff *skb,
skb               695 drivers/net/usb/rtl8150.c 	count = (skb->len < 60) ? 60 : skb->len;
skb               697 drivers/net/usb/rtl8150.c 	dev->tx_skb = skb;
skb               699 drivers/net/usb/rtl8150.c 		      skb->data, count, write_bulk_callback, dev);
skb               711 drivers/net/usb/rtl8150.c 		netdev->stats.tx_bytes += skb->len;
skb               206 drivers/net/usb/sierra_net.c static inline int is_ip(struct sk_buff *skb)
skb               208 drivers/net/usb/sierra_net.c 	return skb->protocol == cpu_to_be16(ETH_P_IP) ||
skb               209 drivers/net/usb/sierra_net.c 	       skb->protocol == cpu_to_be16(ETH_P_IPV6);
skb               217 drivers/net/usb/sierra_net.c static int check_ethip_packet(struct sk_buff *skb, struct usbnet *dev)
skb               219 drivers/net/usb/sierra_net.c 	skb_reset_mac_header(skb); /* ethernet header */
skb               221 drivers/net/usb/sierra_net.c 	if (skb_is_nonlinear(skb)) {
skb               226 drivers/net/usb/sierra_net.c 	if (!pskb_may_pull(skb, ETH_HLEN))
skb               228 drivers/net/usb/sierra_net.c 	skb->protocol = eth_hdr(skb)->h_proto;
skb               782 drivers/net/usb/sierra_net.c 		struct sk_buff *skb, int len)
skb               787 drivers/net/usb/sierra_net.c 	new_skb = skb_clone(skb, GFP_ATOMIC);
skb               790 drivers/net/usb/sierra_net.c 	skb_pull(skb, len);
skb               805 drivers/net/usb/sierra_net.c static int sierra_net_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
skb               814 drivers/net/usb/sierra_net.c 	while (likely(skb->len)) {
skb               815 drivers/net/usb/sierra_net.c 		err = parse_hip(skb->data, skb->len, &hh);
skb               836 drivers/net/usb/sierra_net.c 		skb_pull(skb, hh.hdrlen);
skb               841 drivers/net/usb/sierra_net.c 		skb_reset_mac_header(skb);
skb               842 drivers/net/usb/sierra_net.c 		if (eth_hdr(skb)->h_proto != cpu_to_be16(ETH_P_IPV6))
skb               843 drivers/net/usb/sierra_net.c 			eth_hdr(skb)->h_proto = cpu_to_be16(ETH_P_IP);
skb               844 drivers/net/usb/sierra_net.c 		eth_zero_addr(eth_hdr(skb)->h_source);
skb               845 drivers/net/usb/sierra_net.c 		memcpy(eth_hdr(skb)->h_dest, dev->net->dev_addr, ETH_ALEN);
skb               848 drivers/net/usb/sierra_net.c 		if (hh.payload_len.word == skb->len)
skb               851 drivers/net/usb/sierra_net.c 		new_skb = sierra_net_skb_clone(dev, skb, hh.payload_len.word);
skb               862 drivers/net/usb/sierra_net.c 					   struct sk_buff *skb, gfp_t flags)
skb               872 drivers/net/usb/sierra_net.c 	if (priv->link_up && check_ethip_packet(skb, dev) && is_ip(skb)) {
skb               874 drivers/net/usb/sierra_net.c 		if (SIERRA_NET_HIP_EXT_HDR_LEN <= skb_headroom(skb)) {
skb               876 drivers/net/usb/sierra_net.c 			len = skb->len;
skb               877 drivers/net/usb/sierra_net.c 			skb_push(skb, SIERRA_NET_HIP_EXT_HDR_LEN);
skb               882 drivers/net/usb/sierra_net.c 				if (unlikely(skb_tailroom(skb) == 0)) {
skb               885 drivers/net/usb/sierra_net.c 					dev_kfree_skb_any(skb);
skb               888 drivers/net/usb/sierra_net.c 					skb->data[skb->len] = 0;
skb               889 drivers/net/usb/sierra_net.c 					__skb_put(skb, 1);
skb               893 drivers/net/usb/sierra_net.c 			build_hip(skb->data, len, priv);
skb               894 drivers/net/usb/sierra_net.c 			return skb;
skb               909 drivers/net/usb/sierra_net.c 	dev_kfree_skb_any(skb);
skb              2160 drivers/net/usb/smsc75xx.c static void smsc75xx_rx_csum_offload(struct usbnet *dev, struct sk_buff *skb,
skb              2165 drivers/net/usb/smsc75xx.c 		skb->ip_summed = CHECKSUM_NONE;
skb              2167 drivers/net/usb/smsc75xx.c 		skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT));
skb              2168 drivers/net/usb/smsc75xx.c 		skb->ip_summed = CHECKSUM_COMPLETE;
skb              2172 drivers/net/usb/smsc75xx.c static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
skb              2175 drivers/net/usb/smsc75xx.c 	if (skb->len < dev->net->hard_header_len)
skb              2178 drivers/net/usb/smsc75xx.c 	while (skb->len > 0) {
skb              2183 drivers/net/usb/smsc75xx.c 		rx_cmd_a = get_unaligned_le32(skb->data);
skb              2184 drivers/net/usb/smsc75xx.c 		skb_pull(skb, 4);
skb              2186 drivers/net/usb/smsc75xx.c 		rx_cmd_b = get_unaligned_le32(skb->data);
skb              2187 drivers/net/usb/smsc75xx.c 		skb_pull(skb, 4 + RXW_PADDING);
skb              2189 drivers/net/usb/smsc75xx.c 		packet = skb->data;
skb              2215 drivers/net/usb/smsc75xx.c 			if (skb->len == size) {
skb              2216 drivers/net/usb/smsc75xx.c 				smsc75xx_rx_csum_offload(dev, skb, rx_cmd_a,
skb              2219 drivers/net/usb/smsc75xx.c 				skb_trim(skb, skb->len - 4); /* remove fcs */
skb              2220 drivers/net/usb/smsc75xx.c 				skb->truesize = size + sizeof(struct sk_buff);
skb              2225 drivers/net/usb/smsc75xx.c 			ax_skb = skb_clone(skb, GFP_ATOMIC);
skb              2244 drivers/net/usb/smsc75xx.c 		skb_pull(skb, size);
skb              2247 drivers/net/usb/smsc75xx.c 		if (skb->len)
skb              2248 drivers/net/usb/smsc75xx.c 			skb_pull(skb, align_count);
skb              2255 drivers/net/usb/smsc75xx.c 					 struct sk_buff *skb, gfp_t flags)
skb              2260 drivers/net/usb/smsc75xx.c 	if (skb_cow_head(skb, SMSC75XX_TX_OVERHEAD)) {
skb              2261 drivers/net/usb/smsc75xx.c 		dev_kfree_skb_any(skb);
skb              2265 drivers/net/usb/smsc75xx.c 	tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN) | TX_CMD_A_FCS;
skb              2267 drivers/net/usb/smsc75xx.c 	if (skb->ip_summed == CHECKSUM_PARTIAL)
skb              2270 drivers/net/usb/smsc75xx.c 	if (skb_is_gso(skb)) {
skb              2271 drivers/net/usb/smsc75xx.c 		u16 mss = max(skb_shinfo(skb)->gso_size, TX_MSS_MIN);
skb              2279 drivers/net/usb/smsc75xx.c 	ptr = skb_push(skb, 8);
skb              2283 drivers/net/usb/smsc75xx.c 	return skb;
skb              1904 drivers/net/usb/smsc95xx.c static void smsc95xx_rx_csum_offload(struct sk_buff *skb)
skb              1906 drivers/net/usb/smsc95xx.c 	skb->csum = *(u16 *)(skb_tail_pointer(skb) - 2);
skb              1907 drivers/net/usb/smsc95xx.c 	skb->ip_summed = CHECKSUM_COMPLETE;
skb              1908 drivers/net/usb/smsc95xx.c 	skb_trim(skb, skb->len - 2);
skb              1911 drivers/net/usb/smsc95xx.c static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
skb              1914 drivers/net/usb/smsc95xx.c 	if (skb->len < dev->net->hard_header_len)
skb              1917 drivers/net/usb/smsc95xx.c 	while (skb->len > 0) {
skb              1923 drivers/net/usb/smsc95xx.c 		header = get_unaligned_le32(skb->data);
skb              1924 drivers/net/usb/smsc95xx.c 		skb_pull(skb, 4 + NET_IP_ALIGN);
skb              1925 drivers/net/usb/smsc95xx.c 		packet = skb->data;
skb              1956 drivers/net/usb/smsc95xx.c 			if (skb->len == size) {
skb              1958 drivers/net/usb/smsc95xx.c 					smsc95xx_rx_csum_offload(skb);
skb              1959 drivers/net/usb/smsc95xx.c 				skb_trim(skb, skb->len - 4); /* remove fcs */
skb              1960 drivers/net/usb/smsc95xx.c 				skb->truesize = size + sizeof(struct sk_buff);
skb              1965 drivers/net/usb/smsc95xx.c 			ax_skb = skb_clone(skb, GFP_ATOMIC);
skb              1983 drivers/net/usb/smsc95xx.c 		skb_pull(skb, size);
skb              1986 drivers/net/usb/smsc95xx.c 		if (skb->len)
skb              1987 drivers/net/usb/smsc95xx.c 			skb_pull(skb, align_count);
skb              1993 drivers/net/usb/smsc95xx.c static u32 smsc95xx_calc_csum_preamble(struct sk_buff *skb)
skb              1995 drivers/net/usb/smsc95xx.c 	u16 low_16 = (u16)skb_checksum_start_offset(skb);
skb              1996 drivers/net/usb/smsc95xx.c 	u16 high_16 = low_16 + skb->csum_offset;
skb              2008 drivers/net/usb/smsc95xx.c static bool smsc95xx_can_tx_checksum(struct sk_buff *skb)
skb              2010 drivers/net/usb/smsc95xx.c        unsigned int len = skb->len - skb_checksum_start_offset(skb);
skb              2012 drivers/net/usb/smsc95xx.c        if (skb->len <= 45)
skb              2014 drivers/net/usb/smsc95xx.c        return skb->csum_offset < (len - (4 + 1));
skb              2018 drivers/net/usb/smsc95xx.c 					 struct sk_buff *skb, gfp_t flags)
skb              2020 drivers/net/usb/smsc95xx.c 	bool csum = skb->ip_summed == CHECKSUM_PARTIAL;
skb              2026 drivers/net/usb/smsc95xx.c 	BUG_ON(skb_shinfo(skb)->nr_frags);
skb              2029 drivers/net/usb/smsc95xx.c 	if (skb_cow_head(skb, overhead)) {
skb              2033 drivers/net/usb/smsc95xx.c 		dev_kfree_skb_any(skb);
skb              2037 drivers/net/usb/smsc95xx.c 	tx_cmd_b = (u32)skb->len;
skb              2041 drivers/net/usb/smsc95xx.c 		if (!smsc95xx_can_tx_checksum(skb)) {
skb              2044 drivers/net/usb/smsc95xx.c 			long csstart = skb_checksum_start_offset(skb);
skb              2045 drivers/net/usb/smsc95xx.c 			__wsum calc = csum_partial(skb->data + csstart,
skb              2046 drivers/net/usb/smsc95xx.c 				skb->len - csstart, 0);
skb              2047 drivers/net/usb/smsc95xx.c 			*((__sum16 *)(skb->data + csstart
skb              2048 drivers/net/usb/smsc95xx.c 				+ skb->csum_offset)) = csum_fold(calc);
skb              2052 drivers/net/usb/smsc95xx.c 			u32 csum_preamble = smsc95xx_calc_csum_preamble(skb);
skb              2053 drivers/net/usb/smsc95xx.c 			ptr = skb_push(skb, 4);
skb              2062 drivers/net/usb/smsc95xx.c 	ptr = skb_push(skb, 8);
skb              2066 drivers/net/usb/smsc95xx.c 	return skb;
skb               378 drivers/net/usb/sr9700.c static int sr9700_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
skb               400 drivers/net/usb/sr9700.c 	if (unlikely(skb->len < SR_RX_OVERHEAD)) {
skb               406 drivers/net/usb/sr9700.c 	while (skb->len > SR_RX_OVERHEAD) {
skb               407 drivers/net/usb/sr9700.c 		if (skb->data[0] != 0x40)
skb               411 drivers/net/usb/sr9700.c 		len = (skb->data[1] | (skb->data[2] << 8)) - 4;
skb               417 drivers/net/usb/sr9700.c 		if (skb->len == (len + SR_RX_OVERHEAD))	{
skb               418 drivers/net/usb/sr9700.c 			skb_pull(skb, 3);
skb               419 drivers/net/usb/sr9700.c 			skb->len = len;
skb               420 drivers/net/usb/sr9700.c 			skb_set_tail_pointer(skb, len);
skb               421 drivers/net/usb/sr9700.c 			skb->truesize = len + sizeof(struct sk_buff);
skb               426 drivers/net/usb/sr9700.c 		sr_skb = skb_clone(skb, GFP_ATOMIC);
skb               431 drivers/net/usb/sr9700.c 		sr_skb->data = skb->data + 3;
skb               436 drivers/net/usb/sr9700.c 		skb_pull(skb, len + SR_RX_OVERHEAD);
skb               442 drivers/net/usb/sr9700.c static struct sk_buff *sr9700_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
skb               458 drivers/net/usb/sr9700.c 	len = skb->len;
skb               460 drivers/net/usb/sr9700.c 	if (skb_cow_head(skb, SR_TX_OVERHEAD)) {
skb               461 drivers/net/usb/sr9700.c 		dev_kfree_skb_any(skb);
skb               465 drivers/net/usb/sr9700.c 	__skb_push(skb, SR_TX_OVERHEAD);
skb               470 drivers/net/usb/sr9700.c 	if ((skb->len % dev->maxpacket) == 0)
skb               473 drivers/net/usb/sr9700.c 	skb->data[0] = len;
skb               474 drivers/net/usb/sr9700.c 	skb->data[1] = len >> 8;
skb               476 drivers/net/usb/sr9700.c 	return skb;
skb                62 drivers/net/usb/sr9800.c static int sr_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
skb                67 drivers/net/usb/sr9800.c 	if (skb->len < dev->net->hard_header_len)
skb                70 drivers/net/usb/sr9800.c 	while (offset + sizeof(u32) < skb->len) {
skb                73 drivers/net/usb/sr9800.c 		u32 header = get_unaligned_le32(skb->data + offset);
skb                85 drivers/net/usb/sr9800.c 		    (size + offset > skb->len)) {
skb                95 drivers/net/usb/sr9800.c 		memcpy(sr_skb->data, skb->data + offset, size);
skb               101 drivers/net/usb/sr9800.c 	if (skb->len != offset) {
skb               103 drivers/net/usb/sr9800.c 			   skb->len);
skb               110 drivers/net/usb/sr9800.c static struct sk_buff *sr_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
skb               113 drivers/net/usb/sr9800.c 	int headroom = skb_headroom(skb);
skb               114 drivers/net/usb/sr9800.c 	int tailroom = skb_tailroom(skb);
skb               120 drivers/net/usb/sr9800.c 	padlen = ((skb->len + 4) % (dev->maxpacket - 1)) ? 0 : 4;
skb               122 drivers/net/usb/sr9800.c 	if ((!skb_cloned(skb)) && ((headroom + tailroom) >= (4 + padlen))) {
skb               124 drivers/net/usb/sr9800.c 			skb->data = memmove(skb->head + 4, skb->data,
skb               125 drivers/net/usb/sr9800.c 					    skb->len);
skb               126 drivers/net/usb/sr9800.c 			skb_set_tail_pointer(skb, skb->len);
skb               130 drivers/net/usb/sr9800.c 		skb2 = skb_copy_expand(skb, 4, padlen, flags);
skb               131 drivers/net/usb/sr9800.c 		dev_kfree_skb_any(skb);
skb               132 drivers/net/usb/sr9800.c 		skb = skb2;
skb               133 drivers/net/usb/sr9800.c 		if (!skb)
skb               137 drivers/net/usb/sr9800.c 	ptr = skb_push(skb, 4);
skb               138 drivers/net/usb/sr9800.c 	packet_len = (((skb->len - 4) ^ 0x0000ffff) << 16) + (skb->len - 4);
skb               142 drivers/net/usb/sr9800.c 		put_unaligned_le32(padbytes, skb_tail_pointer(skb));
skb               143 drivers/net/usb/sr9800.c 		skb_put(skb, sizeof(padbytes));
skb               146 drivers/net/usb/sr9800.c 	usbnet_set_skb_tx_stats(skb, 1, 0);
skb               147 drivers/net/usb/sr9800.c 	return skb;
skb               308 drivers/net/usb/usbnet.c void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb)
skb               315 drivers/net/usb/usbnet.c 		skb_queue_tail(&dev->rxq_pause, skb);
skb               320 drivers/net/usb/usbnet.c 	if (skb->protocol == 0)
skb               321 drivers/net/usb/usbnet.c 		skb->protocol = eth_type_trans (skb, dev->net);
skb               325 drivers/net/usb/usbnet.c 	stats64->rx_bytes += skb->len;
skb               329 drivers/net/usb/usbnet.c 		  skb->len + sizeof (struct ethhdr), skb->protocol);
skb               330 drivers/net/usb/usbnet.c 	memset (skb->cb, 0, sizeof (struct skb_data));
skb               332 drivers/net/usb/usbnet.c 	if (skb_defer_rx_timestamp(skb))
skb               335 drivers/net/usb/usbnet.c 	status = netif_rx (skb);
skb               423 drivers/net/usb/usbnet.c static enum skb_state defer_bh(struct usbnet *dev, struct sk_buff *skb,
skb               428 drivers/net/usb/usbnet.c 	struct skb_data *entry = (struct skb_data *) skb->cb;
skb               433 drivers/net/usb/usbnet.c 	__skb_unlink(skb, list);
skb               441 drivers/net/usb/usbnet.c 	__skb_queue_tail(&dev->done, skb);
skb               470 drivers/net/usb/usbnet.c 	struct sk_buff		*skb;
skb               483 drivers/net/usb/usbnet.c 		skb = __netdev_alloc_skb(dev->net, size, flags);
skb               485 drivers/net/usb/usbnet.c 		skb = __netdev_alloc_skb_ip_align(dev->net, size, flags);
skb               486 drivers/net/usb/usbnet.c 	if (!skb) {
skb               493 drivers/net/usb/usbnet.c 	entry = (struct skb_data *) skb->cb;
skb               499 drivers/net/usb/usbnet.c 		skb->data, size, rx_complete, skb);
skb               528 drivers/net/usb/usbnet.c 			__usbnet_queue_skb(&dev->rxq, skb, rx_start);
skb               536 drivers/net/usb/usbnet.c 		dev_kfree_skb_any (skb);
skb               545 drivers/net/usb/usbnet.c static inline void rx_process (struct usbnet *dev, struct sk_buff *skb)
skb               548 drivers/net/usb/usbnet.c 	    !dev->driver_info->rx_fixup (dev, skb)) {
skb               560 drivers/net/usb/usbnet.c 	if (skb->len < ETH_HLEN) {
skb               563 drivers/net/usb/usbnet.c 		netif_dbg(dev, rx_err, dev->net, "rx length %d\n", skb->len);
skb               565 drivers/net/usb/usbnet.c 		usbnet_skb_return(dev, skb);
skb               570 drivers/net/usb/usbnet.c 	skb_queue_tail(&dev->done, skb);
skb               577 drivers/net/usb/usbnet.c 	struct sk_buff		*skb = (struct sk_buff *) urb->context;
skb               578 drivers/net/usb/usbnet.c 	struct skb_data		*entry = (struct skb_data *) skb->cb;
skb               583 drivers/net/usb/usbnet.c 	skb_put (skb, urb->actual_length);
skb               651 drivers/net/usb/usbnet.c 	state = defer_bh(dev, skb, &dev->rxq, state);
skb               677 drivers/net/usb/usbnet.c 	struct sk_buff *skb;
skb               682 drivers/net/usb/usbnet.c 	while ((skb = skb_dequeue(&dev->rxq_pause)) != NULL) {
skb               683 drivers/net/usb/usbnet.c 		usbnet_skb_return(dev, skb);
skb               707 drivers/net/usb/usbnet.c 	struct sk_buff		*skb;
skb               716 drivers/net/usb/usbnet.c 		skb_queue_walk(q, skb) {
skb               717 drivers/net/usb/usbnet.c 			entry = (struct skb_data *) skb->cb;
skb              1243 drivers/net/usb/usbnet.c 	struct sk_buff		*skb = (struct sk_buff *) urb->context;
skb              1244 drivers/net/usb/usbnet.c 	struct skb_data		*entry = (struct skb_data *) skb->cb;
skb              1291 drivers/net/usb/usbnet.c 	(void) defer_bh(dev, skb, &dev->txq, tx_done);
skb              1313 drivers/net/usb/usbnet.c static int build_dma_sg(const struct sk_buff *skb, struct urb *urb)
skb              1318 drivers/net/usb/usbnet.c 	num_sgs = skb_shinfo(skb)->nr_frags + 1;
skb              1331 drivers/net/usb/usbnet.c 	sg_set_buf(&urb->sg[s++], skb->data, skb_headlen(skb));
skb              1332 drivers/net/usb/usbnet.c 	total_len += skb_headlen(skb);
skb              1334 drivers/net/usb/usbnet.c 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb              1335 drivers/net/usb/usbnet.c 		skb_frag_t *f = &skb_shinfo(skb)->frags[i];
skb              1346 drivers/net/usb/usbnet.c netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
skb              1357 drivers/net/usb/usbnet.c 	if (skb)
skb              1358 drivers/net/usb/usbnet.c 		skb_tx_timestamp(skb);
skb              1363 drivers/net/usb/usbnet.c 		skb = info->tx_fixup (dev, skb, GFP_ATOMIC);
skb              1364 drivers/net/usb/usbnet.c 		if (!skb) {
skb              1378 drivers/net/usb/usbnet.c 	entry = (struct skb_data *) skb->cb;
skb              1383 drivers/net/usb/usbnet.c 			skb->data, skb->len, tx_complete, skb);
skb              1385 drivers/net/usb/usbnet.c 		if (build_dma_sg(skb, urb) < 0)
skb              1401 drivers/net/usb/usbnet.c 				if (skb_tailroom(skb) && !urb->num_sgs) {
skb              1402 drivers/net/usb/usbnet.c 					skb->data[skb->len] = 0;
skb              1403 drivers/net/usb/usbnet.c 					__skb_put(skb, 1);
skb              1422 drivers/net/usb/usbnet.c 		usbnet_set_skb_tx_stats(skb, 1, length);
skb              1464 drivers/net/usb/usbnet.c 		__usbnet_queue_skb(&dev->txq, skb, tx_start);
skb              1475 drivers/net/usb/usbnet.c 		if (skb)
skb              1476 drivers/net/usb/usbnet.c 			dev_kfree_skb_any (skb);
skb              1483 drivers/net/usb/usbnet.c 			  "> tx, len %u, type 0x%x\n", length, skb->protocol);
skb              1520 drivers/net/usb/usbnet.c 	struct sk_buff		*skb;
skb              1523 drivers/net/usb/usbnet.c 	while ((skb = skb_dequeue (&dev->done))) {
skb              1524 drivers/net/usb/usbnet.c 		entry = (struct skb_data *) skb->cb;
skb              1528 drivers/net/usb/usbnet.c 			rx_process (dev, skb);
skb              1535 drivers/net/usb/usbnet.c 			dev_kfree_skb (skb);
skb              1884 drivers/net/usb/usbnet.c 	struct sk_buff          *skb;
skb              1895 drivers/net/usb/usbnet.c 			skb = (struct sk_buff *)res->context;
skb              1898 drivers/net/usb/usbnet.c 				dev_kfree_skb_any(skb);
skb              1904 drivers/net/usb/usbnet.c 				__skb_queue_tail(&dev->txq, skb);
skb                45 drivers/net/usb/zaurus.c zaurus_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
skb                51 drivers/net/usb/zaurus.c 	if (!skb_cloned(skb)) {
skb                52 drivers/net/usb/zaurus.c 		int	tailroom = skb_tailroom(skb);
skb                56 drivers/net/usb/zaurus.c 	skb2 = skb_copy_expand(skb, 0, 4 + padlen, flags);
skb                57 drivers/net/usb/zaurus.c 	dev_kfree_skb_any(skb);
skb                58 drivers/net/usb/zaurus.c 	skb = skb2;
skb                59 drivers/net/usb/zaurus.c 	if (skb) {
skb                62 drivers/net/usb/zaurus.c 		fcs = crc32_le(~0, skb->data, skb->len);
skb                65 drivers/net/usb/zaurus.c 		skb_put_u8(skb, fcs & 0xff);
skb                66 drivers/net/usb/zaurus.c 		skb_put_u8(skb, (fcs >> 8) & 0xff);
skb                67 drivers/net/usb/zaurus.c 		skb_put_u8(skb, (fcs >> 16) & 0xff);
skb                68 drivers/net/usb/zaurus.c 		skb_put_u8(skb, (fcs >> 24) & 0xff);
skb                70 drivers/net/usb/zaurus.c 	return skb;
skb               218 drivers/net/veth.c static int veth_xdp_rx(struct veth_rq *rq, struct sk_buff *skb)
skb               220 drivers/net/veth.c 	if (unlikely(ptr_ring_produce(&rq->xdp_ring, skb))) {
skb               221 drivers/net/veth.c 		dev_kfree_skb_any(skb);
skb               228 drivers/net/veth.c static int veth_forward_skb(struct net_device *dev, struct sk_buff *skb,
skb               231 drivers/net/veth.c 	return __dev_forward_skb(dev, skb) ?: xdp ?
skb               232 drivers/net/veth.c 		veth_xdp_rx(rq, skb) :
skb               233 drivers/net/veth.c 		netif_rx(skb);
skb               236 drivers/net/veth.c static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
skb               241 drivers/net/veth.c 	int length = skb->len;
skb               248 drivers/net/veth.c 		kfree_skb(skb);
skb               253 drivers/net/veth.c 	rxq = skb_get_queue_mapping(skb);
skb               258 drivers/net/veth.c 			skb_record_rx_queue(skb, rxq);
skb               261 drivers/net/veth.c 	skb_tx_timestamp(skb);
skb               262 drivers/net/veth.c 	if (likely(veth_forward_skb(rcv, skb, rq, rcv_xdp) == NET_RX_SUCCESS)) {
skb               371 drivers/net/veth.c 	struct sk_buff *skb;
skb               377 drivers/net/veth.c 	skb = build_skb(head, buflen);
skb               378 drivers/net/veth.c 	if (!skb)
skb               381 drivers/net/veth.c 	skb_reserve(skb, headroom);
skb               382 drivers/net/veth.c 	skb_put(skb, len);
skb               384 drivers/net/veth.c 	return skb;
skb               518 drivers/net/veth.c 	struct sk_buff *skb;
skb               575 drivers/net/veth.c 	skb = veth_build_skb(head, headroom, len, 0);
skb               576 drivers/net/veth.c 	if (!skb) {
skb               583 drivers/net/veth.c 	skb->protocol = eth_type_trans(skb, rq->dev);
skb               585 drivers/net/veth.c 	return skb;
skb               593 drivers/net/veth.c static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq, struct sk_buff *skb,
skb               603 drivers/net/veth.c 	skb_orphan(skb);
skb               612 drivers/net/veth.c 	mac_len = skb->data - skb_mac_header(skb);
skb               613 drivers/net/veth.c 	pktlen = skb->len + mac_len;
skb               614 drivers/net/veth.c 	headroom = skb_headroom(skb) - mac_len;
skb               616 drivers/net/veth.c 	if (skb_shared(skb) || skb_head_is_locked(skb) ||
skb               617 drivers/net/veth.c 	    skb_is_nonlinear(skb) || headroom < XDP_PACKET_HEADROOM) {
skb               634 drivers/net/veth.c 		if (skb_copy_bits(skb, -mac_len, start, pktlen)) {
skb               640 drivers/net/veth.c 				      VETH_XDP_HEADROOM + mac_len, skb->len,
skb               647 drivers/net/veth.c 		skb_copy_header(nskb, skb);
skb               648 drivers/net/veth.c 		head_off = skb_headroom(nskb) - skb_headroom(skb);
skb               650 drivers/net/veth.c 		consume_skb(skb);
skb               651 drivers/net/veth.c 		skb = nskb;
skb               654 drivers/net/veth.c 	xdp.data_hard_start = skb->head;
skb               655 drivers/net/veth.c 	xdp.data = skb_mac_header(skb);
skb               669 drivers/net/veth.c 		consume_skb(skb);
skb               680 drivers/net/veth.c 		consume_skb(skb);
skb               701 drivers/net/veth.c 		__skb_push(skb, off);
skb               703 drivers/net/veth.c 		__skb_pull(skb, -off);
skb               704 drivers/net/veth.c 	skb->mac_header -= delta;
skb               707 drivers/net/veth.c 		__skb_put(skb, off);
skb               708 drivers/net/veth.c 	skb->protocol = eth_type_trans(skb, rq->dev);
skb               712 drivers/net/veth.c 		skb_metadata_set(skb, metalen);
skb               714 drivers/net/veth.c 	return skb;
skb               717 drivers/net/veth.c 	kfree_skb(skb);
skb               734 drivers/net/veth.c 		struct sk_buff *skb;
skb               743 drivers/net/veth.c 			skb = veth_xdp_rcv_one(rq, frame, &xdp_xmit_one, bq);
skb               745 drivers/net/veth.c 			skb = ptr;
skb               746 drivers/net/veth.c 			bytes += skb->len;
skb               747 drivers/net/veth.c 			skb = veth_xdp_rcv_skb(rq, skb, &xdp_xmit_one, bq);
skb               751 drivers/net/veth.c 		if (skb)
skb               752 drivers/net/veth.c 			napi_gro_receive(&rq->xdp_napi, skb);
skb               282 drivers/net/virtio_net.c static inline struct virtio_net_hdr_mrg_rxbuf *skb_vnet_hdr(struct sk_buff *skb)
skb               284 drivers/net/virtio_net.c 	return (struct virtio_net_hdr_mrg_rxbuf *)skb->cb;
skb               376 drivers/net/virtio_net.c 	struct sk_buff *skb;
skb               384 drivers/net/virtio_net.c 	skb = napi_alloc_skb(&rq->napi, GOOD_COPY_LEN);
skb               385 drivers/net/virtio_net.c 	if (unlikely(!skb))
skb               388 drivers/net/virtio_net.c 	hdr = skb_vnet_hdr(skb);
skb               404 drivers/net/virtio_net.c 	if (copy > skb_tailroom(skb))
skb               405 drivers/net/virtio_net.c 		copy = skb_tailroom(skb);
skb               406 drivers/net/virtio_net.c 	skb_put_data(skb, p, copy);
skb               413 drivers/net/virtio_net.c 			skb_add_rx_frag(skb, 0, page, offset, len, truesize);
skb               416 drivers/net/virtio_net.c 		return skb;
skb               426 drivers/net/virtio_net.c 		net_dbg_ratelimited("%s: too much data\n", skb->dev->name);
skb               427 drivers/net/virtio_net.c 		dev_kfree_skb(skb);
skb               433 drivers/net/virtio_net.c 		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset,
skb               443 drivers/net/virtio_net.c 	return skb;
skb               524 drivers/net/virtio_net.c 			struct sk_buff *skb = ptr;
skb               526 drivers/net/virtio_net.c 			bytes += skb->len;
skb               527 drivers/net/virtio_net.c 			napi_consume_skb(skb, false);
skb               636 drivers/net/virtio_net.c 	struct sk_buff *skb;
skb               731 drivers/net/virtio_net.c 	skb = build_skb(buf, buflen);
skb               732 drivers/net/virtio_net.c 	if (!skb) {
skb               736 drivers/net/virtio_net.c 	skb_reserve(skb, headroom - delta);
skb               737 drivers/net/virtio_net.c 	skb_put(skb, len);
skb               740 drivers/net/virtio_net.c 		memcpy(skb_vnet_hdr(skb), buf, vi->hdr_len);
skb               744 drivers/net/virtio_net.c 	return skb;
skb               763 drivers/net/virtio_net.c 	struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len,
skb               767 drivers/net/virtio_net.c 	if (unlikely(!skb))
skb               770 drivers/net/virtio_net.c 	return skb;
skb              1016 drivers/net/virtio_net.c 	struct sk_buff *skb;
skb              1033 drivers/net/virtio_net.c 		skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit,
skb              1036 drivers/net/virtio_net.c 		skb = receive_big(dev, vi, rq, buf, len, stats);
skb              1038 drivers/net/virtio_net.c 		skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit, stats);
skb              1040 drivers/net/virtio_net.c 	if (unlikely(!skb))
skb              1043 drivers/net/virtio_net.c 	hdr = skb_vnet_hdr(skb);
skb              1046 drivers/net/virtio_net.c 		skb->ip_summed = CHECKSUM_UNNECESSARY;
skb              1048 drivers/net/virtio_net.c 	if (virtio_net_hdr_to_skb(skb, &hdr->hdr,
skb              1056 drivers/net/virtio_net.c 	skb_record_rx_queue(skb, vq2rxq(rq->vq));
skb              1057 drivers/net/virtio_net.c 	skb->protocol = eth_type_trans(skb, dev);
skb              1059 drivers/net/virtio_net.c 		 ntohs(skb->protocol), skb->len, skb->pkt_type);
skb              1061 drivers/net/virtio_net.c 	napi_gro_receive(&rq->napi, skb);
skb              1066 drivers/net/virtio_net.c 	dev_kfree_skb(skb);
skb              1363 drivers/net/virtio_net.c 			struct sk_buff *skb = ptr;
skb              1365 drivers/net/virtio_net.c 			pr_debug("Sent skb %p\n", skb);
skb              1367 drivers/net/virtio_net.c 			bytes += skb->len;
skb              1368 drivers/net/virtio_net.c 			napi_consume_skb(skb, in_napi);
skb              1506 drivers/net/virtio_net.c static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
skb              1509 drivers/net/virtio_net.c 	const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
skb              1515 drivers/net/virtio_net.c 	pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
skb              1518 drivers/net/virtio_net.c 		!((unsigned long)skb->data & (__alignof__(*hdr) - 1)) &&
skb              1519 drivers/net/virtio_net.c 		!skb_header_cloned(skb) && skb_headroom(skb) >= hdr_len;
skb              1523 drivers/net/virtio_net.c 		hdr = (struct virtio_net_hdr_mrg_rxbuf *)(skb->data - hdr_len);
skb              1525 drivers/net/virtio_net.c 		hdr = skb_vnet_hdr(skb);
skb              1527 drivers/net/virtio_net.c 	if (virtio_net_hdr_from_skb(skb, &hdr->hdr,
skb              1535 drivers/net/virtio_net.c 	sg_init_table(sq->sg, skb_shinfo(skb)->nr_frags + (can_push ? 1 : 2));
skb              1537 drivers/net/virtio_net.c 		__skb_push(skb, hdr_len);
skb              1538 drivers/net/virtio_net.c 		num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len);
skb              1542 drivers/net/virtio_net.c 		__skb_pull(skb, hdr_len);
skb              1545 drivers/net/virtio_net.c 		num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len);
skb              1550 drivers/net/virtio_net.c 	return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC);
skb              1553 drivers/net/virtio_net.c static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
skb              1556 drivers/net/virtio_net.c 	int qnum = skb_get_queue_mapping(skb);
skb              1570 drivers/net/virtio_net.c 	skb_tx_timestamp(skb);
skb              1573 drivers/net/virtio_net.c 	err = xmit_skb(sq, skb);
skb              1583 drivers/net/virtio_net.c 		dev_kfree_skb_any(skb);
skb              1589 drivers/net/virtio_net.c 		skb_orphan(skb);
skb              1590 drivers/net/virtio_net.c 		nf_reset_ct(skb);
skb               332 drivers/net/vmxnet3/vmxnet3_drv.c 	struct sk_buff *skb;
skb               339 drivers/net/vmxnet3/vmxnet3_drv.c 	skb = tq->buf_info[eop_idx].skb;
skb               340 drivers/net/vmxnet3/vmxnet3_drv.c 	BUG_ON(skb == NULL);
skb               341 drivers/net/vmxnet3/vmxnet3_drv.c 	tq->buf_info[eop_idx].skb = NULL;
skb               358 drivers/net/vmxnet3/vmxnet3_drv.c 	dev_kfree_skb_any(skb);
skb               411 drivers/net/vmxnet3/vmxnet3_drv.c 		if (tbi->skb) {
skb               412 drivers/net/vmxnet3/vmxnet3_drv.c 			dev_kfree_skb_any(tbi->skb);
skb               413 drivers/net/vmxnet3/vmxnet3_drv.c 			tbi->skb = NULL;
skb               420 drivers/net/vmxnet3/vmxnet3_drv.c 		BUG_ON(tq->buf_info[i].skb != NULL ||
skb               582 drivers/net/vmxnet3/vmxnet3_drv.c 			if (rbi->skb == NULL) {
skb               583 drivers/net/vmxnet3/vmxnet3_drv.c 				rbi->skb = __netdev_alloc_skb_ip_align(adapter->netdev,
skb               586 drivers/net/vmxnet3/vmxnet3_drv.c 				if (unlikely(rbi->skb == NULL)) {
skb               593 drivers/net/vmxnet3/vmxnet3_drv.c 						rbi->skb->data, rbi->len,
skb               597 drivers/net/vmxnet3/vmxnet3_drv.c 					dev_kfree_skb_any(rbi->skb);
skb               657 drivers/net/vmxnet3/vmxnet3_drv.c vmxnet3_append_frag(struct sk_buff *skb, struct Vmxnet3_RxCompDesc *rcd,
skb               660 drivers/net/vmxnet3/vmxnet3_drv.c 	skb_frag_t *frag = skb_shinfo(skb)->frags + skb_shinfo(skb)->nr_frags;
skb               662 drivers/net/vmxnet3/vmxnet3_drv.c 	BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS);
skb               667 drivers/net/vmxnet3/vmxnet3_drv.c 	skb->data_len += rcd->len;
skb               668 drivers/net/vmxnet3/vmxnet3_drv.c 	skb->truesize += PAGE_SIZE;
skb               669 drivers/net/vmxnet3/vmxnet3_drv.c 	skb_shinfo(skb)->nr_frags++;
skb               674 drivers/net/vmxnet3/vmxnet3_drv.c vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
skb               684 drivers/net/vmxnet3/vmxnet3_drv.c 	BUG_ON(ctx->copy_size > skb_headlen(skb));
skb               715 drivers/net/vmxnet3/vmxnet3_drv.c 	len = skb_headlen(skb) - ctx->copy_size;
skb               731 drivers/net/vmxnet3/vmxnet3_drv.c 				skb->data + buf_offset, buf_size,
skb               756 drivers/net/vmxnet3/vmxnet3_drv.c 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb               757 drivers/net/vmxnet3/vmxnet3_drv.c 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
skb               802 drivers/net/vmxnet3/vmxnet3_drv.c 	tbi->skb = skb;
skb               838 drivers/net/vmxnet3/vmxnet3_drv.c vmxnet3_parse_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
skb               845 drivers/net/vmxnet3/vmxnet3_drv.c 		ctx->eth_ip_hdr_size = skb_transport_offset(skb);
skb               846 drivers/net/vmxnet3/vmxnet3_drv.c 		ctx->l4_hdr_size = tcp_hdrlen(skb);
skb               849 drivers/net/vmxnet3/vmxnet3_drv.c 		if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb               850 drivers/net/vmxnet3/vmxnet3_drv.c 			ctx->eth_ip_hdr_size = skb_checksum_start_offset(skb);
skb               853 drivers/net/vmxnet3/vmxnet3_drv.c 				const struct iphdr *iph = ip_hdr(skb);
skb               857 drivers/net/vmxnet3/vmxnet3_drv.c 				const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
skb               864 drivers/net/vmxnet3/vmxnet3_drv.c 				ctx->l4_hdr_size = tcp_hdrlen(skb);
skb               875 drivers/net/vmxnet3/vmxnet3_drv.c 					 ctx->l4_hdr_size, skb->len);
skb               882 drivers/net/vmxnet3/vmxnet3_drv.c 					       skb_headlen(skb));
skb               885 drivers/net/vmxnet3/vmxnet3_drv.c 		if (skb->len <= VMXNET3_HDR_COPY_SIZE)
skb               886 drivers/net/vmxnet3/vmxnet3_drv.c 			ctx->copy_size = skb->len;
skb               889 drivers/net/vmxnet3/vmxnet3_drv.c 		if (unlikely(!pskb_may_pull(skb, ctx->copy_size)))
skb               915 drivers/net/vmxnet3/vmxnet3_drv.c vmxnet3_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
skb               925 drivers/net/vmxnet3/vmxnet3_drv.c 	memcpy(tdd->data, skb->data, ctx->copy_size);
skb               933 drivers/net/vmxnet3/vmxnet3_drv.c vmxnet3_prepare_tso(struct sk_buff *skb,
skb               936 drivers/net/vmxnet3/vmxnet3_drv.c 	struct tcphdr *tcph = tcp_hdr(skb);
skb               939 drivers/net/vmxnet3/vmxnet3_drv.c 		struct iphdr *iph = ip_hdr(skb);
skb               945 drivers/net/vmxnet3/vmxnet3_drv.c 		struct ipv6hdr *iph = ipv6_hdr(skb);
skb               952 drivers/net/vmxnet3/vmxnet3_drv.c static int txd_estimate(const struct sk_buff *skb)
skb               954 drivers/net/vmxnet3/vmxnet3_drv.c 	int count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
skb               957 drivers/net/vmxnet3/vmxnet3_drv.c 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb               958 drivers/net/vmxnet3/vmxnet3_drv.c 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
skb               979 drivers/net/vmxnet3/vmxnet3_drv.c vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
skb               994 drivers/net/vmxnet3/vmxnet3_drv.c 	count = txd_estimate(skb);
skb               996 drivers/net/vmxnet3/vmxnet3_drv.c 	ctx.ipv4 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IP));
skb               997 drivers/net/vmxnet3/vmxnet3_drv.c 	ctx.ipv6 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IPV6));
skb               999 drivers/net/vmxnet3/vmxnet3_drv.c 	ctx.mss = skb_shinfo(skb)->gso_size;
skb              1001 drivers/net/vmxnet3/vmxnet3_drv.c 		if (skb_header_cloned(skb)) {
skb              1002 drivers/net/vmxnet3/vmxnet3_drv.c 			if (unlikely(pskb_expand_head(skb, 0, 0,
skb              1009 drivers/net/vmxnet3/vmxnet3_drv.c 		vmxnet3_prepare_tso(skb, &ctx);
skb              1016 drivers/net/vmxnet3/vmxnet3_drv.c 			if (skb_linearize(skb) != 0) {
skb              1023 drivers/net/vmxnet3/vmxnet3_drv.c 			count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
skb              1027 drivers/net/vmxnet3/vmxnet3_drv.c 	ret = vmxnet3_parse_hdr(skb, tq, &ctx, adapter);
skb              1038 drivers/net/vmxnet3/vmxnet3_drv.c 			if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb              1040 drivers/net/vmxnet3/vmxnet3_drv.c 					     skb->csum_offset >
skb              1067 drivers/net/vmxnet3/vmxnet3_drv.c 	vmxnet3_copy_hdr(skb, tq, &ctx, adapter);
skb              1070 drivers/net/vmxnet3/vmxnet3_drv.c 	if (vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter))
skb              1089 drivers/net/vmxnet3/vmxnet3_drv.c 		num_pkts = (skb->len - gdesc->txd.hlen + ctx.mss - 1) / ctx.mss;
skb              1091 drivers/net/vmxnet3/vmxnet3_drv.c 		if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb              1095 drivers/net/vmxnet3/vmxnet3_drv.c 					    skb->csum_offset;
skb              1105 drivers/net/vmxnet3/vmxnet3_drv.c 	if (skb_vlan_tag_present(skb)) {
skb              1107 drivers/net/vmxnet3/vmxnet3_drv.c 		gdesc->txd.tci = skb_vlan_tag_get(skb);
skb              1147 drivers/net/vmxnet3/vmxnet3_drv.c 	dev_kfree_skb_any(skb);
skb              1153 drivers/net/vmxnet3/vmxnet3_drv.c vmxnet3_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
skb              1157 drivers/net/vmxnet3/vmxnet3_drv.c 	BUG_ON(skb->queue_mapping > adapter->num_tx_queues);
skb              1158 drivers/net/vmxnet3/vmxnet3_drv.c 	return vmxnet3_tq_xmit(skb,
skb              1159 drivers/net/vmxnet3/vmxnet3_drv.c 			       &adapter->tx_queue[skb->queue_mapping],
skb              1166 drivers/net/vmxnet3/vmxnet3_drv.c 		struct sk_buff *skb,
skb              1173 drivers/net/vmxnet3/vmxnet3_drv.c 			skb->ip_summed = CHECKSUM_UNNECESSARY;
skb              1178 drivers/net/vmxnet3/vmxnet3_drv.c 			skb->ip_summed = CHECKSUM_UNNECESSARY;
skb              1183 drivers/net/vmxnet3/vmxnet3_drv.c 				skb->csum = htons(gdesc->rcd.csum);
skb              1184 drivers/net/vmxnet3/vmxnet3_drv.c 				skb->ip_summed = CHECKSUM_PARTIAL;
skb              1186 drivers/net/vmxnet3/vmxnet3_drv.c 				skb_checksum_none_assert(skb);
skb              1190 drivers/net/vmxnet3/vmxnet3_drv.c 		skb_checksum_none_assert(skb);
skb              1215 drivers/net/vmxnet3/vmxnet3_drv.c 	if (ctx->skb)
skb              1216 drivers/net/vmxnet3/vmxnet3_drv.c 		dev_kfree_skb_irq(ctx->skb);
skb              1218 drivers/net/vmxnet3/vmxnet3_drv.c 	ctx->skb = NULL;
skb              1223 drivers/net/vmxnet3/vmxnet3_drv.c vmxnet3_get_hdr_len(struct vmxnet3_adapter *adapter, struct sk_buff *skb,
skb              1237 drivers/net/vmxnet3/vmxnet3_drv.c 	maplen = skb_headlen(skb);
skb              1241 drivers/net/vmxnet3/vmxnet3_drv.c 	if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
skb              1242 drivers/net/vmxnet3/vmxnet3_drv.c 	    skb->protocol == cpu_to_be16(ETH_P_8021AD))
skb              1247 drivers/net/vmxnet3/vmxnet3_drv.c 	hdr.eth = eth_hdr(skb);
skb              1297 drivers/net/vmxnet3/vmxnet3_drv.c 		struct sk_buff *skb, *new_skb = NULL;
skb              1342 drivers/net/vmxnet3/vmxnet3_drv.c 			BUG_ON(ctx->skb != NULL || rbi->skb == NULL);
skb              1354 drivers/net/vmxnet3/vmxnet3_drv.c 			ctx->skb = rbi->skb;
skb              1366 drivers/net/vmxnet3/vmxnet3_drv.c 				ctx->skb = NULL;
skb              1377 drivers/net/vmxnet3/vmxnet3_drv.c 				ctx->skb = new_skb;
skb              1382 drivers/net/vmxnet3/vmxnet3_drv.c 				ctx->skb = rbi->skb;
skb              1396 drivers/net/vmxnet3/vmxnet3_drv.c 					ctx->skb = NULL;
skb              1408 drivers/net/vmxnet3/vmxnet3_drv.c 				rbi->skb = new_skb;
skb              1417 drivers/net/vmxnet3/vmxnet3_drv.c 				skb_set_hash(ctx->skb,
skb              1421 drivers/net/vmxnet3/vmxnet3_drv.c 			skb_put(ctx->skb, rcd->len);
skb              1437 drivers/net/vmxnet3/vmxnet3_drv.c 			BUG_ON(ctx->skb == NULL && !skip_page_frags);
skb              1458 drivers/net/vmxnet3/vmxnet3_drv.c 					dev_kfree_skb(ctx->skb);
skb              1459 drivers/net/vmxnet3/vmxnet3_drv.c 					ctx->skb = NULL;
skb              1471 drivers/net/vmxnet3/vmxnet3_drv.c 					dev_kfree_skb(ctx->skb);
skb              1472 drivers/net/vmxnet3/vmxnet3_drv.c 					ctx->skb = NULL;
skb              1481 drivers/net/vmxnet3/vmxnet3_drv.c 				vmxnet3_append_frag(ctx->skb, rcd, rbi);
skb              1492 drivers/net/vmxnet3/vmxnet3_drv.c 		skb = ctx->skb;
skb              1495 drivers/net/vmxnet3/vmxnet3_drv.c 			skb->len += skb->data_len;
skb              1497 drivers/net/vmxnet3/vmxnet3_drv.c 			vmxnet3_rx_csum(adapter, skb,
skb              1499 drivers/net/vmxnet3/vmxnet3_drv.c 			skb->protocol = eth_type_trans(skb, adapter->netdev);
skb              1505 drivers/net/vmxnet3/vmxnet3_drv.c 				skb_shinfo(skb)->gso_type = rcd->v4 ?
skb              1507 drivers/net/vmxnet3/vmxnet3_drv.c 				skb_shinfo(skb)->gso_size = mss;
skb              1508 drivers/net/vmxnet3/vmxnet3_drv.c 				skb_shinfo(skb)->gso_segs = segCnt;
skb              1509 drivers/net/vmxnet3/vmxnet3_drv.c 			} else if (segCnt != 0 || skb->len > mtu) {
skb              1512 drivers/net/vmxnet3/vmxnet3_drv.c 				hlen = vmxnet3_get_hdr_len(adapter, skb,
skb              1517 drivers/net/vmxnet3/vmxnet3_drv.c 				skb_shinfo(skb)->gso_type =
skb              1520 drivers/net/vmxnet3/vmxnet3_drv.c 					skb_shinfo(skb)->gso_segs = segCnt;
skb              1521 drivers/net/vmxnet3/vmxnet3_drv.c 					skb_shinfo(skb)->gso_size =
skb              1522 drivers/net/vmxnet3/vmxnet3_drv.c 						DIV_ROUND_UP(skb->len -
skb              1525 drivers/net/vmxnet3/vmxnet3_drv.c 					skb_shinfo(skb)->gso_size = mtu - hlen;
skb              1530 drivers/net/vmxnet3/vmxnet3_drv.c 				__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rcd->tci);
skb              1533 drivers/net/vmxnet3/vmxnet3_drv.c 				netif_receive_skb(skb);
skb              1535 drivers/net/vmxnet3/vmxnet3_drv.c 				napi_gro_receive(&rq->napi, skb);
skb              1537 drivers/net/vmxnet3/vmxnet3_drv.c 			ctx->skb = NULL;
skb              1595 drivers/net/vmxnet3/vmxnet3_drv.c 					rq->buf_info[ring_idx][i].skb) {
skb              1598 drivers/net/vmxnet3/vmxnet3_drv.c 				dev_kfree_skb(rq->buf_info[ring_idx][i].skb);
skb              1599 drivers/net/vmxnet3/vmxnet3_drv.c 				rq->buf_info[ring_idx][i].skb = NULL;
skb              1743 drivers/net/vmxnet3/vmxnet3_drv.c 	rq->rx_ctx.skb = NULL;
skb               198 drivers/net/vmxnet3/vmxnet3_int.h 	struct sk_buff *skb;
skb               258 drivers/net/vmxnet3/vmxnet3_int.h 		struct sk_buff *skb;
skb               265 drivers/net/vmxnet3/vmxnet3_int.h 	struct sk_buff *skb;
skb                73 drivers/net/vrf.c static void vrf_tx_error(struct net_device *vrf_dev, struct sk_buff *skb)
skb                76 drivers/net/vrf.c 	kfree_skb(skb);
skb               126 drivers/net/vrf.c static int vrf_local_xmit(struct sk_buff *skb, struct net_device *dev,
skb               129 drivers/net/vrf.c 	int len = skb->len;
skb               131 drivers/net/vrf.c 	skb_orphan(skb);
skb               133 drivers/net/vrf.c 	skb_dst_set(skb, dst);
skb               138 drivers/net/vrf.c 	skb->pkt_type = PACKET_LOOPBACK;
skb               140 drivers/net/vrf.c 	skb->protocol = eth_type_trans(skb, dev);
skb               142 drivers/net/vrf.c 	if (likely(netif_rx(skb) == NET_RX_SUCCESS))
skb               152 drivers/net/vrf.c 			     struct sk_buff *skb)
skb               157 drivers/net/vrf.c 		      sk, skb, NULL, skb_dst(skb)->dev, dst_output);
skb               160 drivers/net/vrf.c 		err = dst_output(net, sk, skb);
skb               165 drivers/net/vrf.c static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
skb               169 drivers/net/vrf.c 	struct net *net = dev_net(skb->dev);
skb               175 drivers/net/vrf.c 	if (!pskb_may_pull(skb, ETH_HLEN + sizeof(struct ipv6hdr)))
skb               178 drivers/net/vrf.c 	iph = ipv6_hdr(skb);
skb               187 drivers/net/vrf.c 	fl6.flowi6_mark = skb->mark;
skb               195 drivers/net/vrf.c 	skb_dst_drop(skb);
skb               202 drivers/net/vrf.c 		return vrf_local_xmit(skb, dev, dst);
skb               204 drivers/net/vrf.c 	skb_dst_set(skb, dst);
skb               207 drivers/net/vrf.c 	__skb_pull(skb, skb_network_offset(skb));
skb               209 drivers/net/vrf.c 	ret = vrf_ip6_local_out(net, skb->sk, skb);
skb               217 drivers/net/vrf.c 	vrf_tx_error(dev, skb);
skb               221 drivers/net/vrf.c static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
skb               224 drivers/net/vrf.c 	vrf_tx_error(dev, skb);
skb               231 drivers/net/vrf.c 			    struct sk_buff *skb)
skb               236 drivers/net/vrf.c 		      skb, NULL, skb_dst(skb)->dev, dst_output);
skb               238 drivers/net/vrf.c 		err = dst_output(net, sk, skb);
skb               243 drivers/net/vrf.c static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb,
skb               252 drivers/net/vrf.c 	if (!pskb_may_pull(skb, ETH_HLEN + sizeof(struct iphdr)))
skb               255 drivers/net/vrf.c 	ip4h = ip_hdr(skb);
skb               271 drivers/net/vrf.c 	skb_dst_drop(skb);
skb               278 drivers/net/vrf.c 		return vrf_local_xmit(skb, vrf_dev, &rt->dst);
skb               280 drivers/net/vrf.c 	skb_dst_set(skb, &rt->dst);
skb               283 drivers/net/vrf.c 	__skb_pull(skb, skb_network_offset(skb));
skb               286 drivers/net/vrf.c 		ip4h->saddr = inet_select_addr(skb_dst(skb)->dev, 0,
skb               290 drivers/net/vrf.c 	ret = vrf_ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);
skb               299 drivers/net/vrf.c 	vrf_tx_error(vrf_dev, skb);
skb               303 drivers/net/vrf.c static netdev_tx_t is_ip_tx_frame(struct sk_buff *skb, struct net_device *dev)
skb               305 drivers/net/vrf.c 	switch (skb->protocol) {
skb               307 drivers/net/vrf.c 		return vrf_process_v4_outbound(skb, dev);
skb               309 drivers/net/vrf.c 		return vrf_process_v6_outbound(skb, dev);
skb               311 drivers/net/vrf.c 		vrf_tx_error(dev, skb);
skb               316 drivers/net/vrf.c static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev)
skb               318 drivers/net/vrf.c 	int len = skb->len;
skb               319 drivers/net/vrf.c 	netdev_tx_t ret = is_ip_tx_frame(skb, dev);
skb               336 drivers/net/vrf.c 			     struct sk_buff *skb)
skb               338 drivers/net/vrf.c 	struct net_device *vrf_dev = skb->dev;
skb               341 drivers/net/vrf.c 	    likely(skb_headroom(skb) >= ETH_HLEN)) {
skb               342 drivers/net/vrf.c 		struct ethhdr *eth = skb_push(skb, ETH_HLEN);
skb               346 drivers/net/vrf.c 		eth->h_proto = skb->protocol;
skb               349 drivers/net/vrf.c 		dev_queue_xmit_nit(skb, vrf_dev);
skb               352 drivers/net/vrf.c 		skb_pull(skb, ETH_HLEN);
skb               361 drivers/net/vrf.c 			      struct sk_buff *skb)
skb               363 drivers/net/vrf.c 	struct dst_entry *dst = skb_dst(skb);
skb               369 drivers/net/vrf.c 	nf_reset_ct(skb);
skb               371 drivers/net/vrf.c 	skb->protocol = htons(ETH_P_IPV6);
skb               372 drivers/net/vrf.c 	skb->dev = dev;
skb               375 drivers/net/vrf.c 	nexthop = rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr);
skb               380 drivers/net/vrf.c 		sock_confirm_neigh(skb, neigh);
skb               381 drivers/net/vrf.c 		ret = neigh_output(neigh, skb, false);
skb               389 drivers/net/vrf.c 	kfree_skb(skb);
skb               394 drivers/net/vrf.c static int vrf_output6(struct net *net, struct sock *sk, struct sk_buff *skb)
skb               397 drivers/net/vrf.c 			    net, sk, skb, NULL, skb_dst(skb)->dev,
skb               399 drivers/net/vrf.c 			    !(IP6CB(skb)->flags & IP6SKB_REROUTED));
skb               407 drivers/net/vrf.c 					    struct sk_buff *skb)
skb               424 drivers/net/vrf.c 		vrf_tx_error(vrf_dev, skb);
skb               428 drivers/net/vrf.c 	skb_dst_drop(skb);
skb               429 drivers/net/vrf.c 	skb_dst_set(skb, dst);
skb               431 drivers/net/vrf.c 	return skb;
skb               435 drivers/net/vrf.c 			      struct sk_buff *skb)
skb               437 drivers/net/vrf.c 	skb->protocol = htons(ETH_P_IPV6);
skb               440 drivers/net/vrf.c 			    net, sk, skb, NULL, skb->dev,
skb               442 drivers/net/vrf.c 			    !(IPCB(skb)->flags & IPSKB_REROUTED));
skb               447 drivers/net/vrf.c 					  struct sk_buff *skb)
skb               452 drivers/net/vrf.c 	skb->dev = vrf_dev;
skb               455 drivers/net/vrf.c 		      skb, NULL, vrf_dev, vrf_output6_direct);
skb               458 drivers/net/vrf.c 		err = vrf_output6_direct(net, sk, skb);
skb               462 drivers/net/vrf.c 		nf_reset_ct(skb);
skb               464 drivers/net/vrf.c 		skb = NULL;
skb               466 drivers/net/vrf.c 	return skb;
skb               471 drivers/net/vrf.c 				   struct sk_buff *skb)
skb               474 drivers/net/vrf.c 	if (rt6_need_strict(&ipv6_hdr(skb)->daddr))
skb               475 drivers/net/vrf.c 		return skb;
skb               478 drivers/net/vrf.c 	    IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED)
skb               479 drivers/net/vrf.c 		return vrf_ip6_out_direct(vrf_dev, sk, skb);
skb               481 drivers/net/vrf.c 	return vrf_ip6_out_redirect(vrf_dev, skb);
skb               538 drivers/net/vrf.c 				   struct sk_buff *skb)
skb               540 drivers/net/vrf.c 	return skb;
skb               554 drivers/net/vrf.c static int vrf_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
skb               556 drivers/net/vrf.c 	struct dst_entry *dst = skb_dst(skb);
skb               564 drivers/net/vrf.c 	nf_reset_ct(skb);
skb               567 drivers/net/vrf.c 	if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
skb               570 drivers/net/vrf.c 		skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
skb               575 drivers/net/vrf.c 		if (skb->sk)
skb               576 drivers/net/vrf.c 			skb_set_owner_w(skb2, skb->sk);
skb               578 drivers/net/vrf.c 		consume_skb(skb);
skb               579 drivers/net/vrf.c 		skb = skb2;
skb               584 drivers/net/vrf.c 	neigh = ip_neigh_for_gw(rt, skb, &is_v6gw);
skb               586 drivers/net/vrf.c 		sock_confirm_neigh(skb, neigh);
skb               588 drivers/net/vrf.c 		ret = neigh_output(neigh, skb, is_v6gw);
skb               595 drivers/net/vrf.c 	vrf_tx_error(skb->dev, skb);
skb               599 drivers/net/vrf.c static int vrf_output(struct net *net, struct sock *sk, struct sk_buff *skb)
skb               601 drivers/net/vrf.c 	struct net_device *dev = skb_dst(skb)->dev;
skb               603 drivers/net/vrf.c 	IP_UPD_PO_STATS(net, IPSTATS_MIB_OUT, skb->len);
skb               605 drivers/net/vrf.c 	skb->dev = dev;
skb               606 drivers/net/vrf.c 	skb->protocol = htons(ETH_P_IP);
skb               609 drivers/net/vrf.c 			    net, sk, skb, NULL, dev,
skb               611 drivers/net/vrf.c 			    !(IPCB(skb)->flags & IPSKB_REROUTED));
skb               619 drivers/net/vrf.c 					   struct sk_buff *skb)
skb               636 drivers/net/vrf.c 		vrf_tx_error(vrf_dev, skb);
skb               640 drivers/net/vrf.c 	skb_dst_drop(skb);
skb               641 drivers/net/vrf.c 	skb_dst_set(skb, dst);
skb               643 drivers/net/vrf.c 	return skb;
skb               647 drivers/net/vrf.c 			     struct sk_buff *skb)
skb               649 drivers/net/vrf.c 	skb->protocol = htons(ETH_P_IP);
skb               652 drivers/net/vrf.c 			    net, sk, skb, NULL, skb->dev,
skb               654 drivers/net/vrf.c 			    !(IPCB(skb)->flags & IPSKB_REROUTED));
skb               659 drivers/net/vrf.c 					 struct sk_buff *skb)
skb               664 drivers/net/vrf.c 	skb->dev = vrf_dev;
skb               667 drivers/net/vrf.c 		      skb, NULL, vrf_dev, vrf_output_direct);
skb               670 drivers/net/vrf.c 		err = vrf_output_direct(net, sk, skb);
skb               674 drivers/net/vrf.c 		nf_reset_ct(skb);
skb               676 drivers/net/vrf.c 		skb = NULL;
skb               678 drivers/net/vrf.c 	return skb;
skb               683 drivers/net/vrf.c 				  struct sk_buff *skb)
skb               686 drivers/net/vrf.c 	if (ipv4_is_multicast(ip_hdr(skb)->daddr) ||
skb               687 drivers/net/vrf.c 	    ipv4_is_lbcast(ip_hdr(skb)->daddr))
skb               688 drivers/net/vrf.c 		return skb;
skb               691 drivers/net/vrf.c 	    IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED)
skb               692 drivers/net/vrf.c 		return vrf_ip_out_direct(vrf_dev, sk, skb);
skb               694 drivers/net/vrf.c 	return vrf_ip_out_redirect(vrf_dev, skb);
skb               700 drivers/net/vrf.c 				  struct sk_buff *skb,
skb               705 drivers/net/vrf.c 		return vrf_ip_out(vrf_dev, sk, skb);
skb               707 drivers/net/vrf.c 		return vrf_ip6_out(vrf_dev, sk, skb);
skb               710 drivers/net/vrf.c 	return skb;
skb               898 drivers/net/vrf.c static int vrf_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
skb               900 drivers/net/vrf.c 	kfree_skb(skb);
skb               905 drivers/net/vrf.c 				      struct sk_buff *skb,
skb               910 drivers/net/vrf.c 	if (nf_hook(pf, hook, net, NULL, skb, dev, NULL, vrf_rcv_finish) != 1)
skb               911 drivers/net/vrf.c 		skb = NULL;    /* kfree_skb(skb) handled by nf code */
skb               913 drivers/net/vrf.c 	return skb;
skb               922 drivers/net/vrf.c static bool ipv6_ndisc_frame(const struct sk_buff *skb)
skb               924 drivers/net/vrf.c 	const struct ipv6hdr *iph = ipv6_hdr(skb);
skb               931 drivers/net/vrf.c 		icmph = skb_header_pointer(skb, sizeof(*iph),
skb               955 drivers/net/vrf.c 					     const struct sk_buff *skb,
skb               960 drivers/net/vrf.c 	return ip6_pol_route(net, vrf->fib6_table, ifindex, fl6, skb, flags);
skb               963 drivers/net/vrf.c static void vrf_ip6_input_dst(struct sk_buff *skb, struct net_device *vrf_dev,
skb               966 drivers/net/vrf.c 	const struct ipv6hdr *iph = ipv6_hdr(skb);
skb               969 drivers/net/vrf.c 		.flowi6_mark    = skb->mark,
skb               978 drivers/net/vrf.c 	rt6 = vrf_ip6_route_lookup(net, vrf_dev, &fl6, ifindex, skb,
skb               986 drivers/net/vrf.c 	skb_dst_set(skb, &rt6->dst);
skb               990 drivers/net/vrf.c 				   struct sk_buff *skb)
skb               992 drivers/net/vrf.c 	int orig_iif = skb->skb_iif;
skb               993 drivers/net/vrf.c 	bool need_strict = rt6_need_strict(&ipv6_hdr(skb)->daddr);
skb               994 drivers/net/vrf.c 	bool is_ndisc = ipv6_ndisc_frame(skb);
skb               999 drivers/net/vrf.c 	if (skb->pkt_type == PACKET_LOOPBACK || (need_strict && !is_ndisc)) {
skb              1000 drivers/net/vrf.c 		skb->dev = vrf_dev;
skb              1001 drivers/net/vrf.c 		skb->skb_iif = vrf_dev->ifindex;
skb              1002 drivers/net/vrf.c 		IP6CB(skb)->flags |= IP6SKB_L3SLAVE;
skb              1003 drivers/net/vrf.c 		if (skb->pkt_type == PACKET_LOOPBACK)
skb              1004 drivers/net/vrf.c 			skb->pkt_type = PACKET_HOST;
skb              1010 drivers/net/vrf.c 		vrf_rx_stats(vrf_dev, skb->len);
skb              1011 drivers/net/vrf.c 		skb->dev = vrf_dev;
skb              1012 drivers/net/vrf.c 		skb->skb_iif = vrf_dev->ifindex;
skb              1015 drivers/net/vrf.c 			skb_push(skb, skb->mac_len);
skb              1016 drivers/net/vrf.c 			dev_queue_xmit_nit(skb, vrf_dev);
skb              1017 drivers/net/vrf.c 			skb_pull(skb, skb->mac_len);
skb              1020 drivers/net/vrf.c 		IP6CB(skb)->flags |= IP6SKB_L3SLAVE;
skb              1024 drivers/net/vrf.c 		vrf_ip6_input_dst(skb, vrf_dev, orig_iif);
skb              1026 drivers/net/vrf.c 	skb = vrf_rcv_nfhook(NFPROTO_IPV6, NF_INET_PRE_ROUTING, skb, vrf_dev);
skb              1028 drivers/net/vrf.c 	return skb;
skb              1033 drivers/net/vrf.c 				   struct sk_buff *skb)
skb              1035 drivers/net/vrf.c 	return skb;
skb              1040 drivers/net/vrf.c 				  struct sk_buff *skb)
skb              1042 drivers/net/vrf.c 	skb->dev = vrf_dev;
skb              1043 drivers/net/vrf.c 	skb->skb_iif = vrf_dev->ifindex;
skb              1044 drivers/net/vrf.c 	IPCB(skb)->flags |= IPSKB_L3SLAVE;
skb              1046 drivers/net/vrf.c 	if (ipv4_is_multicast(ip_hdr(skb)->daddr))
skb              1052 drivers/net/vrf.c 	if (skb->pkt_type == PACKET_LOOPBACK) {
skb              1053 drivers/net/vrf.c 		skb->pkt_type = PACKET_HOST;
skb              1057 drivers/net/vrf.c 	vrf_rx_stats(vrf_dev, skb->len);
skb              1060 drivers/net/vrf.c 		skb_push(skb, skb->mac_len);
skb              1061 drivers/net/vrf.c 		dev_queue_xmit_nit(skb, vrf_dev);
skb              1062 drivers/net/vrf.c 		skb_pull(skb, skb->mac_len);
skb              1065 drivers/net/vrf.c 	skb = vrf_rcv_nfhook(NFPROTO_IPV4, NF_INET_PRE_ROUTING, skb, vrf_dev);
skb              1067 drivers/net/vrf.c 	return skb;
skb              1072 drivers/net/vrf.c 				  struct sk_buff *skb,
skb              1077 drivers/net/vrf.c 		return vrf_ip_rcv(vrf_dev, skb);
skb              1079 drivers/net/vrf.c 		return vrf_ip6_rcv(vrf_dev, skb);
skb              1082 drivers/net/vrf.c 	return skb;
skb              1155 drivers/net/vrf.c 	struct sk_buff *skb;
skb              1162 drivers/net/vrf.c 	skb = nlmsg_new(vrf_fib_rule_nl_size(), GFP_KERNEL);
skb              1163 drivers/net/vrf.c 	if (!skb)
skb              1166 drivers/net/vrf.c 	nlh = nlmsg_put(skb, 0, 0, 0, sizeof(*frh), 0);
skb              1178 drivers/net/vrf.c 	if (nla_put_u8(skb, FRA_PROTOCOL, RTPROT_KERNEL))
skb              1181 drivers/net/vrf.c 	if (nla_put_u8(skb, FRA_L3MDEV, 1))
skb              1184 drivers/net/vrf.c 	if (nla_put_u32(skb, FRA_PRIORITY, FIB_RULE_PREF))
skb              1187 drivers/net/vrf.c 	nlmsg_end(skb, nlh);
skb              1190 drivers/net/vrf.c 	skb->sk = dev_net(dev)->rtnl;
skb              1192 drivers/net/vrf.c 		err = fib_nl_newrule(skb, nlh, NULL);
skb              1196 drivers/net/vrf.c 		err = fib_nl_delrule(skb, nlh, NULL);
skb              1200 drivers/net/vrf.c 	nlmsg_free(skb);
skb              1205 drivers/net/vrf.c 	nlmsg_free(skb);
skb              1371 drivers/net/vrf.c static int vrf_fillinfo(struct sk_buff *skb,
skb              1376 drivers/net/vrf.c 	return nla_put_u32(skb, IFLA_VRF_TABLE, vrf->tb_id);
skb              1385 drivers/net/vrf.c static int vrf_fill_slave_info(struct sk_buff *skb,
skb              1391 drivers/net/vrf.c 	if (nla_put_u32(skb, IFLA_VRF_PORT_TABLE, vrf->tb_id))
skb                48 drivers/net/vsockmon.c static netdev_tx_t vsockmon_xmit(struct sk_buff *skb, struct net_device *dev)
skb                50 drivers/net/vsockmon.c 	int len = skb->len;
skb                58 drivers/net/vsockmon.c 	dev_kfree_skb(skb);
skb               121 drivers/net/vxlan.c static int vxlan_nla_put_addr(struct sk_buff *skb, int attr,
skb               125 drivers/net/vxlan.c 		return nla_put_in6_addr(skb, attr, &ip->sin6.sin6_addr);
skb               127 drivers/net/vxlan.c 		return nla_put_in_addr(skb, attr, ip->sin.sin_addr.s_addr);
skb               151 drivers/net/vxlan.c static int vxlan_nla_put_addr(struct sk_buff *skb, int attr,
skb               154 drivers/net/vxlan.c 	return nla_put_in_addr(skb, attr, ip->sin.sin_addr.s_addr);
skb               247 drivers/net/vxlan.c static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
skb               258 drivers/net/vxlan.c 	nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags);
skb               281 drivers/net/vxlan.c 	    nla_put_s32(skb, NDA_LINK_NETNSID,
skb               285 drivers/net/vxlan.c 	if (send_eth && nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr))
skb               288 drivers/net/vxlan.c 	if (send_ip && vxlan_nla_put_addr(skb, NDA_DST, &rdst->remote_ip))
skb               292 drivers/net/vxlan.c 	    nla_put_be16(skb, NDA_PORT, rdst->remote_port))
skb               295 drivers/net/vxlan.c 	    nla_put_u32(skb, NDA_VNI, be32_to_cpu(rdst->remote_vni)))
skb               298 drivers/net/vxlan.c 	    nla_put_u32(skb, NDA_SRC_VNI,
skb               302 drivers/net/vxlan.c 	    nla_put_u32(skb, NDA_IFINDEX, rdst->remote_ifindex))
skb               310 drivers/net/vxlan.c 	if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
skb               313 drivers/net/vxlan.c 	nlmsg_end(skb, nlh);
skb               317 drivers/net/vxlan.c 	nlmsg_cancel(skb, nlh);
skb               337 drivers/net/vxlan.c 	struct sk_buff *skb;
skb               340 drivers/net/vxlan.c 	skb = nlmsg_new(vxlan_nlmsg_size(), GFP_ATOMIC);
skb               341 drivers/net/vxlan.c 	if (skb == NULL)
skb               344 drivers/net/vxlan.c 	err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0, rd);
skb               348 drivers/net/vxlan.c 		kfree_skb(skb);
skb               352 drivers/net/vxlan.c 	rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
skb               701 drivers/net/vxlan.c static struct vxlanhdr *vxlan_gro_remcsum(struct sk_buff *skb,
skb               710 drivers/net/vxlan.c 	if (skb->remcsum_offload)
skb               713 drivers/net/vxlan.c 	if (!NAPI_GRO_CB(skb)->csum_valid)
skb               719 drivers/net/vxlan.c 	vh = skb_gro_remcsum_process(skb, (void *)vh, off, hdrlen,
skb               722 drivers/net/vxlan.c 	skb->remcsum_offload = 1;
skb               729 drivers/net/vxlan.c 					 struct sk_buff *skb)
skb               742 drivers/net/vxlan.c 	off_vx = skb_gro_offset(skb);
skb               744 drivers/net/vxlan.c 	vh   = skb_gro_header_fast(skb, off_vx);
skb               745 drivers/net/vxlan.c 	if (skb_gro_header_hard(skb, hlen)) {
skb               746 drivers/net/vxlan.c 		vh = skb_gro_header_slow(skb, hlen, off_vx);
skb               751 drivers/net/vxlan.c 	skb_gro_postpull_rcsum(skb, vh, sizeof(struct vxlanhdr));
skb               756 drivers/net/vxlan.c 		vh = vxlan_gro_remcsum(skb, off_vx, vh, sizeof(struct vxlanhdr),
skb               765 drivers/net/vxlan.c 	skb_gro_pull(skb, sizeof(struct vxlanhdr)); /* pull vxlan header */
skb               779 drivers/net/vxlan.c 	pp = call_gro_receive(eth_gro_receive, head, skb);
skb               783 drivers/net/vxlan.c 	skb_gro_flush_final_remcsum(skb, pp, flush, &grc);
skb               788 drivers/net/vxlan.c static int vxlan_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff)
skb               793 drivers/net/vxlan.c 	return eth_gro_complete(skb, nhoff + sizeof(struct vxlanhdr));
skb              1217 drivers/net/vxlan.c static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
skb              1235 drivers/net/vxlan.c 				err = vxlan_fdb_info(skb, vxlan, f,
skb              1236 drivers/net/vxlan.c 						     NETLINK_CB(cb->skb).portid,
skb              1251 drivers/net/vxlan.c static int vxlan_fdb_get(struct sk_buff *skb,
skb              1277 drivers/net/vxlan.c 	err = vxlan_fdb_info(skb, vxlan, f, portid, seq,
skb              1512 drivers/net/vxlan.c 			  struct sk_buff *skb, u32 vxflags)
skb              1516 drivers/net/vxlan.c 	if (!(unparsed->vx_flags & VXLAN_HF_RCO) || skb->remcsum_offload)
skb              1522 drivers/net/vxlan.c 	if (!pskb_may_pull(skb, offset + sizeof(u16)))
skb              1525 drivers/net/vxlan.c 	skb_remcsum_process(skb, (void *)(vxlan_hdr(skb) + 1), start, offset,
skb              1534 drivers/net/vxlan.c 				struct sk_buff *skb, u32 vxflags,
skb              1545 drivers/net/vxlan.c 	tun_dst = (struct metadata_dst *)skb_dst(skb);
skb              1558 drivers/net/vxlan.c 		skb->mark = md->gbp;
skb              1565 drivers/net/vxlan.c 				struct sk_buff *skb, u32 vxflags)
skb              1594 drivers/net/vxlan.c 			  struct sk_buff *skb, __be32 vni)
skb              1597 drivers/net/vxlan.c 	u32 ifindex = skb->dev->ifindex;
skb              1599 drivers/net/vxlan.c 	skb_reset_mac_header(skb);
skb              1600 drivers/net/vxlan.c 	skb->protocol = eth_type_trans(skb, vxlan->dev);
skb              1601 drivers/net/vxlan.c 	skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
skb              1604 drivers/net/vxlan.c 	if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr))
skb              1609 drivers/net/vxlan.c 		saddr.sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
skb              1613 drivers/net/vxlan.c 		saddr.sin6.sin6_addr = ipv6_hdr(skb)->saddr;
skb              1619 drivers/net/vxlan.c 	    vxlan_snoop(skb->dev, &saddr, eth_hdr(skb)->h_source, ifindex, vni))
skb              1626 drivers/net/vxlan.c 				  struct sk_buff *skb)
skb              1631 drivers/net/vxlan.c 		err = IP_ECN_decapsulate(oiph, skb);
skb              1634 drivers/net/vxlan.c 		err = IP6_ECN_decapsulate(oiph, skb);
skb              1650 drivers/net/vxlan.c static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
skb              1664 drivers/net/vxlan.c 	if (!pskb_may_pull(skb, VXLAN_HLEN))
skb              1667 drivers/net/vxlan.c 	unparsed = *vxlan_hdr(skb);
skb              1670 drivers/net/vxlan.c 		netdev_dbg(skb->dev, "invalid vxlan flags=%#x vni=%#x\n",
skb              1671 drivers/net/vxlan.c 			   ntohl(vxlan_hdr(skb)->vx_flags),
skb              1672 drivers/net/vxlan.c 			   ntohl(vxlan_hdr(skb)->vx_vni));
skb              1683 drivers/net/vxlan.c 	vni = vxlan_vni(vxlan_hdr(skb)->vx_vni);
skb              1685 drivers/net/vxlan.c 	vxlan = vxlan_vs_find_vni(vs, skb->dev->ifindex, vni);
skb              1693 drivers/net/vxlan.c 		if (!vxlan_parse_gpe_hdr(&unparsed, &protocol, skb, vs->flags))
skb              1698 drivers/net/vxlan.c 	if (__iptunnel_pull_header(skb, VXLAN_HLEN, protocol, raw_proto,
skb              1705 drivers/net/vxlan.c 		tun_dst = udp_tun_rx_dst(skb, vxlan_get_sk_family(vs), TUNNEL_KEY,
skb              1713 drivers/net/vxlan.c 		skb_dst_set(skb, (struct dst_entry *)tun_dst);
skb              1719 drivers/net/vxlan.c 		if (!vxlan_remcsum(&unparsed, skb, vs->flags))
skb              1722 drivers/net/vxlan.c 		vxlan_parse_gbp_hdr(&unparsed, skb, vs->flags, md);
skb              1740 drivers/net/vxlan.c 		if (!vxlan_set_mac(vxlan, vs, skb, vni))
skb              1743 drivers/net/vxlan.c 		skb_reset_mac_header(skb);
skb              1744 drivers/net/vxlan.c 		skb->dev = vxlan->dev;
skb              1745 drivers/net/vxlan.c 		skb->pkt_type = PACKET_HOST;
skb              1748 drivers/net/vxlan.c 	oiph = skb_network_header(skb);
skb              1749 drivers/net/vxlan.c 	skb_reset_network_header(skb);
skb              1751 drivers/net/vxlan.c 	if (!vxlan_ecn_decapsulate(vs, oiph, skb)) {
skb              1768 drivers/net/vxlan.c 	stats->rx_bytes += skb->len;
skb              1771 drivers/net/vxlan.c 	gro_cells_receive(&vxlan->gro_cells, skb);
skb              1779 drivers/net/vxlan.c 	kfree_skb(skb);
skb              1784 drivers/net/vxlan.c static int vxlan_err_lookup(struct sock *sk, struct sk_buff *skb)
skb              1791 drivers/net/vxlan.c 	if (!pskb_may_pull(skb, skb_transport_offset(skb) + VXLAN_HLEN))
skb              1794 drivers/net/vxlan.c 	hdr = vxlan_hdr(skb);
skb              1804 drivers/net/vxlan.c 	vxlan = vxlan_vs_find_vni(vs, skb->dev->ifindex, vni);
skb              1811 drivers/net/vxlan.c static int arp_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
skb              1822 drivers/net/vxlan.c 	if (!pskb_may_pull(skb, arp_hdr_len(dev))) {
skb              1826 drivers/net/vxlan.c 	parp = arp_hdr(skb);
skb              1889 drivers/net/vxlan.c 	consume_skb(skb);
skb              1985 drivers/net/vxlan.c static int neigh_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
skb              1998 drivers/net/vxlan.c 	iphdr = ipv6_hdr(skb);
skb              2024 drivers/net/vxlan.c 		reply = vxlan_na_create(skb, n,
skb              2045 drivers/net/vxlan.c 	consume_skb(skb);
skb              2050 drivers/net/vxlan.c static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb)
skb              2055 drivers/net/vxlan.c 	if (is_multicast_ether_addr(eth_hdr(skb)->h_dest))
skb              2059 drivers/net/vxlan.c 	switch (ntohs(eth_hdr(skb)->h_proto)) {
skb              2064 drivers/net/vxlan.c 		if (!pskb_may_pull(skb, sizeof(struct iphdr)))
skb              2066 drivers/net/vxlan.c 		pip = ip_hdr(skb);
skb              2085 drivers/net/vxlan.c 		if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
skb              2087 drivers/net/vxlan.c 		pip6 = ipv6_hdr(skb);
skb              2109 drivers/net/vxlan.c 		diff = !ether_addr_equal(eth_hdr(skb)->h_dest, n->ha);
skb              2111 drivers/net/vxlan.c 			memcpy(eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
skb              2113 drivers/net/vxlan.c 			memcpy(eth_hdr(skb)->h_dest, n->ha, dev->addr_len);
skb              2154 drivers/net/vxlan.c static int vxlan_build_skb(struct sk_buff *skb, struct dst_entry *dst,
skb              2166 drivers/net/vxlan.c 	    skb->ip_summed == CHECKSUM_PARTIAL) {
skb              2167 drivers/net/vxlan.c 		int csum_start = skb_checksum_start_offset(skb);
skb              2171 drivers/net/vxlan.c 		    (skb->csum_offset == offsetof(struct udphdr, check) ||
skb              2172 drivers/net/vxlan.c 		     skb->csum_offset == offsetof(struct tcphdr, check)))
skb              2180 drivers/net/vxlan.c 	err = skb_cow_head(skb, min_headroom);
skb              2184 drivers/net/vxlan.c 	err = iptunnel_handle_offloads(skb, type);
skb              2188 drivers/net/vxlan.c 	vxh = __skb_push(skb, sizeof(*vxh));
skb              2195 drivers/net/vxlan.c 		start = skb_checksum_start_offset(skb) - sizeof(struct vxlanhdr);
skb              2196 drivers/net/vxlan.c 		vxh->vx_vni |= vxlan_compute_rco(start, skb->csum_offset);
skb              2199 drivers/net/vxlan.c 		if (!skb_is_gso(skb)) {
skb              2200 drivers/net/vxlan.c 			skb->ip_summed = CHECKSUM_NONE;
skb              2201 drivers/net/vxlan.c 			skb->encapsulation = 0;
skb              2208 drivers/net/vxlan.c 		err = vxlan_build_gpe_hdr(vxh, vxflags, skb->protocol);
skb              2211 drivers/net/vxlan.c 		inner_protocol = skb->protocol;
skb              2214 drivers/net/vxlan.c 	skb_set_inner_protocol(skb, inner_protocol);
skb              2220 drivers/net/vxlan.c 				      struct sk_buff *skb, int oif, u8 tos,
skb              2225 drivers/net/vxlan.c 	bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
skb              2243 drivers/net/vxlan.c 	fl4.flowi4_mark = skb->mark;
skb              2272 drivers/net/vxlan.c 					  struct sk_buff *skb, int oif, u8 tos,
skb              2280 drivers/net/vxlan.c 	bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
skb              2300 drivers/net/vxlan.c 	fl6.flowi6_mark = skb->mark;
skb              2326 drivers/net/vxlan.c static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
skb              2333 drivers/net/vxlan.c 	int len = skb->len;
skb              2337 drivers/net/vxlan.c 	skb->pkt_type = PACKET_HOST;
skb              2338 drivers/net/vxlan.c 	skb->encapsulation = 0;
skb              2339 drivers/net/vxlan.c 	skb->dev = dst_vxlan->dev;
skb              2340 drivers/net/vxlan.c 	__skb_pull(skb, skb_network_offset(skb));
skb              2353 drivers/net/vxlan.c 	dev = skb->dev;
skb              2355 drivers/net/vxlan.c 		kfree_skb(skb);
skb              2360 drivers/net/vxlan.c 		vxlan_snoop(dev, &loopback, eth_hdr(skb)->h_source, 0, vni);
skb              2367 drivers/net/vxlan.c 	if (netif_rx(skb) == NET_RX_SUCCESS) {
skb              2379 drivers/net/vxlan.c static int encap_bypass_if_local(struct sk_buff *skb, struct net_device *dev,
skb              2404 drivers/net/vxlan.c 			kfree_skb(skb);
skb              2408 drivers/net/vxlan.c 		vxlan_encap_bypass(skb, vxlan, dst_vxlan, vni);
skb              2415 drivers/net/vxlan.c static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
skb              2422 drivers/net/vxlan.c 	const struct iphdr *old_iph = ip_hdr(skb);
skb              2437 drivers/net/vxlan.c 	info = skb_tunnel_info(skb);
skb              2444 drivers/net/vxlan.c 				vxlan_encap_bypass(skb, vxlan, vxlan, default_vni);
skb              2455 drivers/net/vxlan.c 		md->gbp = skb->mark;
skb              2457 drivers/net/vxlan.c 			ttl = ip_tunnel_get_ttl(old_iph, skb);
skb              2466 drivers/net/vxlan.c 			tos = ip_tunnel_get_dsfield(old_iph, skb);
skb              2502 drivers/net/vxlan.c 	src_port = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min,
skb              2514 drivers/net/vxlan.c 		rt = vxlan_get_route(vxlan, dev, sock4, skb, ifindex, tos,
skb              2526 drivers/net/vxlan.c 			err = encap_bypass_if_local(skb, dev, vxlan, dst,
skb              2535 drivers/net/vxlan.c 				struct ethhdr *eth = eth_hdr(skb);
skb              2547 drivers/net/vxlan.c 		skb_tunnel_check_pmtu(skb, ndst, VXLAN_HEADROOM);
skb              2549 drivers/net/vxlan.c 		tos = ip_tunnel_ecn_encap(RT_TOS(tos), old_iph, skb);
skb              2551 drivers/net/vxlan.c 		err = vxlan_build_skb(skb, ndst, sizeof(struct iphdr),
skb              2556 drivers/net/vxlan.c 		udp_tunnel_xmit_skb(rt, sock4->sock->sk, skb, local_ip.sin.sin_addr.s_addr,
skb              2566 drivers/net/vxlan.c 		ndst = vxlan6_get_route(vxlan, dev, sock6, skb, ifindex, tos,
skb              2580 drivers/net/vxlan.c 			err = encap_bypass_if_local(skb, dev, vxlan, dst,
skb              2587 drivers/net/vxlan.c 		skb_tunnel_check_pmtu(skb, ndst, VXLAN6_HEADROOM);
skb              2589 drivers/net/vxlan.c 		tos = ip_tunnel_ecn_encap(RT_TOS(tos), old_iph, skb);
skb              2591 drivers/net/vxlan.c 		skb_scrub_packet(skb, xnet);
skb              2592 drivers/net/vxlan.c 		err = vxlan_build_skb(skb, ndst, sizeof(struct ipv6hdr),
skb              2597 drivers/net/vxlan.c 		udp_tunnel6_xmit_skb(ndst, sock6->sock->sk, skb, dev,
skb              2609 drivers/net/vxlan.c 	dev_kfree_skb(skb);
skb              2620 drivers/net/vxlan.c 	kfree_skb(skb);
skb              2629 drivers/net/vxlan.c static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
skb              2639 drivers/net/vxlan.c 	info = skb_tunnel_info(skb);
skb              2641 drivers/net/vxlan.c 	skb_reset_mac_header(skb);
skb              2649 drivers/net/vxlan.c 				vxlan_xmit_one(skb, dev, vni, NULL, false);
skb              2651 drivers/net/vxlan.c 				kfree_skb(skb);
skb              2657 drivers/net/vxlan.c 		eth = eth_hdr(skb);
skb              2659 drivers/net/vxlan.c 			return arp_reduce(dev, skb, vni);
skb              2662 drivers/net/vxlan.c 			 pskb_may_pull(skb, sizeof(struct ipv6hdr) +
skb              2664 drivers/net/vxlan.c 			 ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6) {
skb              2665 drivers/net/vxlan.c 			struct nd_msg *m = (struct nd_msg *)(ipv6_hdr(skb) + 1);
skb              2669 drivers/net/vxlan.c 				return neigh_reduce(dev, skb, vni);
skb              2674 drivers/net/vxlan.c 	eth = eth_hdr(skb);
skb              2681 drivers/net/vxlan.c 		did_rsc = route_shortcircuit(dev, skb);
skb              2694 drivers/net/vxlan.c 			kfree_skb(skb);
skb              2706 drivers/net/vxlan.c 		skb1 = skb_clone(skb, GFP_ATOMIC);
skb              2712 drivers/net/vxlan.c 		vxlan_xmit_one(skb, dev, vni, fdst, did_rsc);
skb              2714 drivers/net/vxlan.c 		kfree_skb(skb);
skb              2919 drivers/net/vxlan.c static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
skb              2922 drivers/net/vxlan.c 	struct ip_tunnel_info *info = skb_tunnel_info(skb);
skb              2925 drivers/net/vxlan.c 	sport = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min,
skb              2933 drivers/net/vxlan.c 		rt = vxlan_get_route(vxlan, dev, sock4, skb, 0, info->key.tos,
skb              2945 drivers/net/vxlan.c 		ndst = vxlan6_get_route(vxlan, dev, sock6, skb, 0, info->key.tos,
skb              4073 drivers/net/vxlan.c static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
skb              4082 drivers/net/vxlan.c 	if (nla_put_u32(skb, IFLA_VXLAN_ID, be32_to_cpu(dst->remote_vni)))
skb              4087 drivers/net/vxlan.c 			if (nla_put_in_addr(skb, IFLA_VXLAN_GROUP,
skb              4092 drivers/net/vxlan.c 			if (nla_put_in6_addr(skb, IFLA_VXLAN_GROUP6,
skb              4099 drivers/net/vxlan.c 	if (dst->remote_ifindex && nla_put_u32(skb, IFLA_VXLAN_LINK, dst->remote_ifindex))
skb              4104 drivers/net/vxlan.c 			if (nla_put_in_addr(skb, IFLA_VXLAN_LOCAL,
skb              4109 drivers/net/vxlan.c 			if (nla_put_in6_addr(skb, IFLA_VXLAN_LOCAL6,
skb              4116 drivers/net/vxlan.c 	if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->cfg.ttl) ||
skb              4117 drivers/net/vxlan.c 	    nla_put_u8(skb, IFLA_VXLAN_TTL_INHERIT,
skb              4119 drivers/net/vxlan.c 	    nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->cfg.tos) ||
skb              4120 drivers/net/vxlan.c 	    nla_put_u8(skb, IFLA_VXLAN_DF, vxlan->cfg.df) ||
skb              4121 drivers/net/vxlan.c 	    nla_put_be32(skb, IFLA_VXLAN_LABEL, vxlan->cfg.label) ||
skb              4122 drivers/net/vxlan.c 	    nla_put_u8(skb, IFLA_VXLAN_LEARNING,
skb              4124 drivers/net/vxlan.c 	    nla_put_u8(skb, IFLA_VXLAN_PROXY,
skb              4126 drivers/net/vxlan.c 	    nla_put_u8(skb, IFLA_VXLAN_RSC,
skb              4128 drivers/net/vxlan.c 	    nla_put_u8(skb, IFLA_VXLAN_L2MISS,
skb              4130 drivers/net/vxlan.c 	    nla_put_u8(skb, IFLA_VXLAN_L3MISS,
skb              4132 drivers/net/vxlan.c 	    nla_put_u8(skb, IFLA_VXLAN_COLLECT_METADATA,
skb              4134 drivers/net/vxlan.c 	    nla_put_u32(skb, IFLA_VXLAN_AGEING, vxlan->cfg.age_interval) ||
skb              4135 drivers/net/vxlan.c 	    nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->cfg.addrmax) ||
skb              4136 drivers/net/vxlan.c 	    nla_put_be16(skb, IFLA_VXLAN_PORT, vxlan->cfg.dst_port) ||
skb              4137 drivers/net/vxlan.c 	    nla_put_u8(skb, IFLA_VXLAN_UDP_CSUM,
skb              4139 drivers/net/vxlan.c 	    nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_TX,
skb              4141 drivers/net/vxlan.c 	    nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_RX,
skb              4143 drivers/net/vxlan.c 	    nla_put_u8(skb, IFLA_VXLAN_REMCSUM_TX,
skb              4145 drivers/net/vxlan.c 	    nla_put_u8(skb, IFLA_VXLAN_REMCSUM_RX,
skb              4149 drivers/net/vxlan.c 	if (nla_put(skb, IFLA_VXLAN_PORT_RANGE, sizeof(ports), &ports))
skb              4153 drivers/net/vxlan.c 	    nla_put_flag(skb, IFLA_VXLAN_GBP))
skb              4157 drivers/net/vxlan.c 	    nla_put_flag(skb, IFLA_VXLAN_GPE))
skb              4161 drivers/net/vxlan.c 	    nla_put_flag(skb, IFLA_VXLAN_REMCSUM_NOPARTIAL))
skb               272 drivers/net/wan/cosa.c static netdev_tx_t cosa_net_tx(struct sk_buff *skb, struct net_device *d);
skb               661 drivers/net/wan/cosa.c static netdev_tx_t cosa_net_tx(struct sk_buff *skb,
skb               668 drivers/net/wan/cosa.c 	chan->tx_skb = skb;
skb               669 drivers/net/wan/cosa.c 	cosa_start_tx(chan, skb->data, skb->len);
skb                65 drivers/net/wan/dlci.c static int dlci_header(struct sk_buff *skb, struct net_device *dev, 
skb                92 drivers/net/wan/dlci.c 	dest = skb_push(skb, hlen);
skb               101 drivers/net/wan/dlci.c static void dlci_receive(struct sk_buff *skb, struct net_device *dev)
skb               106 drivers/net/wan/dlci.c 	if (!pskb_may_pull(skb, sizeof(*hdr))) {
skb               109 drivers/net/wan/dlci.c 		kfree_skb(skb);
skb               113 drivers/net/wan/dlci.c 	hdr = (struct frhdr *) skb->data;
skb               116 drivers/net/wan/dlci.c 	skb->dev = dev;
skb               149 drivers/net/wan/dlci.c 				skb->protocol = hdr->PID;
skb               155 drivers/net/wan/dlci.c 				skb->protocol = htons(ETH_P_IP);
skb               177 drivers/net/wan/dlci.c 		skb_reset_mac_header(skb);
skb               178 drivers/net/wan/dlci.c 		skb_pull(skb, header);
skb               179 drivers/net/wan/dlci.c 		dev->stats.rx_bytes += skb->len;
skb               180 drivers/net/wan/dlci.c 		netif_rx(skb);
skb               184 drivers/net/wan/dlci.c 		dev_kfree_skb(skb);
skb               187 drivers/net/wan/dlci.c static netdev_tx_t dlci_transmit(struct sk_buff *skb, struct net_device *dev)
skb               191 drivers/net/wan/dlci.c 	if (skb) {
skb               192 drivers/net/wan/dlci.c 		struct netdev_queue *txq = skb_get_tx_queue(dev, skb);
skb               193 drivers/net/wan/dlci.c 		netdev_start_xmit(skb, dlp->slave, txq, false);
skb               836 drivers/net/wan/farsync.c static __be16 farsync_type_trans(struct sk_buff *skb, struct net_device *dev)
skb               838 drivers/net/wan/farsync.c 	skb->dev = dev;
skb               839 drivers/net/wan/farsync.c 	skb_reset_mac_header(skb);
skb               840 drivers/net/wan/farsync.c 	skb->pkt_type = PACKET_HOST;
skb               848 drivers/net/wan/farsync.c 		    int len, struct sk_buff *skb, int rxp)
skb               856 drivers/net/wan/farsync.c 	skb_put_data(skb, card->rx_dma_handle_host, len);
skb               868 drivers/net/wan/farsync.c 		skb->protocol = farsync_type_trans(skb, dev);
skb               870 drivers/net/wan/farsync.c 		skb->protocol = hdlc_type_trans(skb, dev);
skb               871 drivers/net/wan/farsync.c 	rx_status = netif_rx(skb);
skb              1210 drivers/net/wan/farsync.c 	struct sk_buff *skb;
skb              1257 drivers/net/wan/farsync.c 	if ((skb = dev_alloc_skb(len)) == NULL) {
skb              1277 drivers/net/wan/farsync.c 		memcpy_fromio(skb_put(skb, len),
skb              1291 drivers/net/wan/farsync.c 			skb->protocol = farsync_type_trans(skb, dev);
skb              1293 drivers/net/wan/farsync.c 			skb->protocol = hdlc_type_trans(skb, dev);
skb              1294 drivers/net/wan/farsync.c 		rx_status = netif_rx(skb);
skb              1299 drivers/net/wan/farsync.c 		card->dma_skb_rx = skb;
skb              1325 drivers/net/wan/farsync.c 	struct sk_buff *skb;
skb              1362 drivers/net/wan/farsync.c 				skb = port->txq[port->txqs];
skb              1373 drivers/net/wan/farsync.c 					cnv_bcnt(skb->len));
skb              1374 drivers/net/wan/farsync.c 				if ((skb->len < FST_MIN_DMA_LEN) ||
skb              1381 drivers/net/wan/farsync.c 						    skb->data, skb->len);
skb              1387 drivers/net/wan/farsync.c 					dev->stats.tx_bytes += skb->len;
skb              1392 drivers/net/wan/farsync.c 					       skb->data, skb->len);
skb              1394 drivers/net/wan/farsync.c 					card->dma_len_tx = skb->len;
skb              1400 drivers/net/wan/farsync.c 						   skb->len);
skb              1414 drivers/net/wan/farsync.c 				dev_kfree_skb(skb);
skb              2261 drivers/net/wan/farsync.c fst_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb              2270 drivers/net/wan/farsync.c 	dbg(DBG_TX, "fst_start_xmit: length = %d\n", skb->len);
skb              2274 drivers/net/wan/farsync.c 		dev_kfree_skb(skb);
skb              2284 drivers/net/wan/farsync.c 	if (skb->len > LEN_TX_BUFFER) {
skb              2285 drivers/net/wan/farsync.c 		dbg(DBG_ASS, "Packet too large %d vs %d\n", skb->len,
skb              2287 drivers/net/wan/farsync.c 		dev_kfree_skb(skb);
skb              2319 drivers/net/wan/farsync.c 		dev_kfree_skb(skb);
skb              2330 drivers/net/wan/farsync.c 	port->txq[port->txqe] = skb;
skb               347 drivers/net/wan/fsl_ucc_hdlc.c static netdev_tx_t ucc_hdlc_tx(struct sk_buff *skb, struct net_device *dev)
skb               358 drivers/net/wan/fsl_ucc_hdlc.c 		if (skb_headroom(skb) < HDLC_HEAD_LEN) {
skb               360 drivers/net/wan/fsl_ucc_hdlc.c 			dev_kfree_skb(skb);
skb               365 drivers/net/wan/fsl_ucc_hdlc.c 		skb_push(skb, HDLC_HEAD_LEN);
skb               367 drivers/net/wan/fsl_ucc_hdlc.c 		proto_head = (u16 *)skb->data;
skb               370 drivers/net/wan/fsl_ucc_hdlc.c 		dev->stats.tx_bytes += skb->len;
skb               374 drivers/net/wan/fsl_ucc_hdlc.c 		proto_head = (u16 *)skb->data;
skb               377 drivers/net/wan/fsl_ucc_hdlc.c 			dev_kfree_skb(skb);
skb               382 drivers/net/wan/fsl_ucc_hdlc.c 		dev->stats.tx_bytes += skb->len;
skb               386 drivers/net/wan/fsl_ucc_hdlc.c 		dev->stats.tx_bytes += skb->len;
skb               391 drivers/net/wan/fsl_ucc_hdlc.c 		dev_kfree_skb(skb);
skb               394 drivers/net/wan/fsl_ucc_hdlc.c 	netdev_sent_queue(dev, skb->len);
skb               401 drivers/net/wan/fsl_ucc_hdlc.c 	priv->tx_skbuff[priv->skb_curtx] = skb;
skb               409 drivers/net/wan/fsl_ucc_hdlc.c 	       skb->data, skb->len);
skb               414 drivers/net/wan/fsl_ucc_hdlc.c 	iowrite16be(skb->len, &bd->length);
skb               462 drivers/net/wan/fsl_ucc_hdlc.c 		struct sk_buff *skb;
skb               477 drivers/net/wan/fsl_ucc_hdlc.c 		skb = priv->tx_skbuff[priv->skb_dirtytx];
skb               478 drivers/net/wan/fsl_ucc_hdlc.c 		if (!skb)
skb               481 drivers/net/wan/fsl_ucc_hdlc.c 		bytes_sent += skb->len;
skb               485 drivers/net/wan/fsl_ucc_hdlc.c 		       0, skb->len);
skb               486 drivers/net/wan/fsl_ucc_hdlc.c 		dev_consume_skb_irq(skb);
skb               516 drivers/net/wan/fsl_ucc_hdlc.c 	struct sk_buff *skb = NULL;
skb               555 drivers/net/wan/fsl_ucc_hdlc.c 			skb = dev_alloc_skb(length);
skb               556 drivers/net/wan/fsl_ucc_hdlc.c 			if (!skb) {
skb               561 drivers/net/wan/fsl_ucc_hdlc.c 			skb_put(skb, length);
skb               562 drivers/net/wan/fsl_ucc_hdlc.c 			skb->len = length;
skb               563 drivers/net/wan/fsl_ucc_hdlc.c 			skb->dev = dev;
skb               564 drivers/net/wan/fsl_ucc_hdlc.c 			memcpy(skb->data, bdbuffer, length);
skb               571 drivers/net/wan/fsl_ucc_hdlc.c 			skb = dev_alloc_skb(length);
skb               572 drivers/net/wan/fsl_ucc_hdlc.c 			if (!skb) {
skb               577 drivers/net/wan/fsl_ucc_hdlc.c 			skb_put(skb, length);
skb               578 drivers/net/wan/fsl_ucc_hdlc.c 			skb->len = length;
skb               579 drivers/net/wan/fsl_ucc_hdlc.c 			skb->dev = dev;
skb               580 drivers/net/wan/fsl_ucc_hdlc.c 			memcpy(skb->data, bdbuffer, length);
skb               585 drivers/net/wan/fsl_ucc_hdlc.c 		dev->stats.rx_bytes += skb->len;
skb               588 drivers/net/wan/fsl_ucc_hdlc.c 			skb->protocol = hdlc_type_trans(skb, dev);
skb               589 drivers/net/wan/fsl_ucc_hdlc.c 		netif_receive_skb(skb);
skb               244 drivers/net/wan/hd64570.c 	struct sk_buff *skb;
skb               251 drivers/net/wan/hd64570.c 	skb = dev_alloc_skb(len);
skb               252 drivers/net/wan/hd64570.c 	if (!skb) {
skb               265 drivers/net/wan/hd64570.c 		memcpy_fromio(skb->data, winbase(card) + buff, maxlen);
skb               267 drivers/net/wan/hd64570.c 		memcpy_fromio(skb->data + maxlen, winbase(card), len - maxlen);
skb               269 drivers/net/wan/hd64570.c 		memcpy_fromio(skb->data, winbase(card) + buff, len);
skb               274 drivers/net/wan/hd64570.c 	skb_put(skb, len);
skb               276 drivers/net/wan/hd64570.c 	printk(KERN_DEBUG "%s RX(%i):", dev->name, skb->len);
skb               277 drivers/net/wan/hd64570.c 	debug_frame(skb);
skb               280 drivers/net/wan/hd64570.c 	dev->stats.rx_bytes += skb->len;
skb               281 drivers/net/wan/hd64570.c 	skb->protocol = hdlc_type_trans(skb, dev);
skb               282 drivers/net/wan/hd64570.c 	netif_rx(skb);
skb               617 drivers/net/wan/hd64570.c static netdev_tx_t sca_xmit(struct sk_buff *skb, struct net_device *dev)
skb               632 drivers/net/wan/hd64570.c 	printk(KERN_DEBUG "%s TX(%i):", dev->name, skb->len);
skb               633 drivers/net/wan/hd64570.c 	debug_frame(skb);
skb               638 drivers/net/wan/hd64570.c 	len = skb->len;
skb               645 drivers/net/wan/hd64570.c 		memcpy_toio(winbase(card) + buff, skb->data, maxlen);
skb               647 drivers/net/wan/hd64570.c 		memcpy_toio(winbase(card), skb->data + maxlen, len - maxlen);
skb               649 drivers/net/wan/hd64570.c 		memcpy_toio(winbase(card) + buff, skb->data, len);
skb               669 drivers/net/wan/hd64570.c 	dev_kfree_skb(skb);
skb               203 drivers/net/wan/hd64572.c 	struct sk_buff *skb;
skb               208 drivers/net/wan/hd64572.c 	skb = dev_alloc_skb(len);
skb               209 drivers/net/wan/hd64572.c 	if (!skb) {
skb               215 drivers/net/wan/hd64572.c 	memcpy_fromio(skb->data, card->rambase + buff, len);
skb               217 drivers/net/wan/hd64572.c 	skb_put(skb, len);
skb               219 drivers/net/wan/hd64572.c 	printk(KERN_DEBUG "%s RX(%i):", dev->name, skb->len);
skb               220 drivers/net/wan/hd64572.c 	debug_frame(skb);
skb               223 drivers/net/wan/hd64572.c 	dev->stats.rx_bytes += skb->len;
skb               224 drivers/net/wan/hd64572.c 	skb->protocol = hdlc_type_trans(skb, dev);
skb               225 drivers/net/wan/hd64572.c 	netif_receive_skb(skb);
skb               562 drivers/net/wan/hd64572.c static netdev_tx_t sca_xmit(struct sk_buff *skb, struct net_device *dev)
skb               575 drivers/net/wan/hd64572.c 	printk(KERN_DEBUG "%s TX(%i):", dev->name, skb->len);
skb               576 drivers/net/wan/hd64572.c 	debug_frame(skb);
skb               581 drivers/net/wan/hd64572.c 	len = skb->len;
skb               582 drivers/net/wan/hd64572.c 	memcpy_toio(card->rambase + buff, skb->data, len);
skb               599 drivers/net/wan/hd64572.c 	dev_kfree_skb(skb);
skb                46 drivers/net/wan/hdlc.c static int hdlc_rcv(struct sk_buff *skb, struct net_device *dev,
skb                52 drivers/net/wan/hdlc.c 		kfree_skb(skb);
skb                57 drivers/net/wan/hdlc.c 	return hdlc->proto->netif_rx(skb);
skb                60 drivers/net/wan/hdlc.c netdev_tx_t hdlc_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb                65 drivers/net/wan/hdlc.c 		return hdlc->proto->xmit(skb, dev);
skb                67 drivers/net/wan/hdlc.c 	return hdlc->xmit(skb, dev); /* call hardware driver directly */
skb                72 drivers/net/wan/hdlc_cisco.c static int cisco_hard_header(struct sk_buff *skb, struct net_device *dev,
skb                81 drivers/net/wan/hdlc_cisco.c 	skb_push(skb, sizeof(struct hdlc_header));
skb                82 drivers/net/wan/hdlc_cisco.c 	data = (struct hdlc_header*)skb->data;
skb                98 drivers/net/wan/hdlc_cisco.c 	struct sk_buff *skb;
skb               101 drivers/net/wan/hdlc_cisco.c 	skb = dev_alloc_skb(sizeof(struct hdlc_header) +
skb               103 drivers/net/wan/hdlc_cisco.c 	if (!skb) {
skb               107 drivers/net/wan/hdlc_cisco.c 	skb_reserve(skb, 4);
skb               108 drivers/net/wan/hdlc_cisco.c 	cisco_hard_header(skb, dev, CISCO_KEEPALIVE, NULL, NULL, 0);
skb               109 drivers/net/wan/hdlc_cisco.c 	data = (struct cisco_packet*)(skb->data + 4);
skb               118 drivers/net/wan/hdlc_cisco.c 	skb_put(skb, sizeof(struct cisco_packet));
skb               119 drivers/net/wan/hdlc_cisco.c 	skb->priority = TC_PRIO_CONTROL;
skb               120 drivers/net/wan/hdlc_cisco.c 	skb->dev = dev;
skb               121 drivers/net/wan/hdlc_cisco.c 	skb_reset_network_header(skb);
skb               123 drivers/net/wan/hdlc_cisco.c 	dev_queue_xmit(skb);
skb               128 drivers/net/wan/hdlc_cisco.c static __be16 cisco_type_trans(struct sk_buff *skb, struct net_device *dev)
skb               130 drivers/net/wan/hdlc_cisco.c 	struct hdlc_header *data = (struct hdlc_header*)skb->data;
skb               132 drivers/net/wan/hdlc_cisco.c 	if (skb->len < sizeof(struct hdlc_header))
skb               143 drivers/net/wan/hdlc_cisco.c 		skb_pull(skb, sizeof(struct hdlc_header));
skb               151 drivers/net/wan/hdlc_cisco.c static int cisco_rx(struct sk_buff *skb)
skb               153 drivers/net/wan/hdlc_cisco.c 	struct net_device *dev = skb->dev;
skb               156 drivers/net/wan/hdlc_cisco.c 	struct hdlc_header *data = (struct hdlc_header*)skb->data;
skb               162 drivers/net/wan/hdlc_cisco.c 	if (skb->len < sizeof(struct hdlc_header))
skb               172 drivers/net/wan/hdlc_cisco.c 		dev_kfree_skb_any(skb);
skb               176 drivers/net/wan/hdlc_cisco.c 		if ((skb->len != sizeof(struct hdlc_header) +
skb               178 drivers/net/wan/hdlc_cisco.c 		    (skb->len != sizeof(struct hdlc_header) +
skb               181 drivers/net/wan/hdlc_cisco.c 				    skb->len);
skb               185 drivers/net/wan/hdlc_cisco.c 		cisco_data = (struct cisco_packet*)(skb->data + sizeof
skb               211 drivers/net/wan/hdlc_cisco.c 			dev_kfree_skb_any(skb);
skb               240 drivers/net/wan/hdlc_cisco.c 			dev_kfree_skb_any(skb);
skb               246 drivers/net/wan/hdlc_cisco.c 	dev_kfree_skb_any(skb);
skb               251 drivers/net/wan/hdlc_cisco.c 	dev_kfree_skb_any(skb);
skb               277 drivers/net/wan/hdlc_fr.c 	struct sk_buff *skb = *skb_p;
skb               279 drivers/net/wan/hdlc_fr.c 	switch (skb->protocol) {
skb               282 drivers/net/wan/hdlc_fr.c 		skb_push(skb, head_len);
skb               283 drivers/net/wan/hdlc_fr.c 		skb->data[3] = NLPID_CCITT_ANSI_LMI;
skb               288 drivers/net/wan/hdlc_fr.c 		skb_push(skb, head_len);
skb               289 drivers/net/wan/hdlc_fr.c 		skb->data[3] = NLPID_CISCO_LMI;
skb               294 drivers/net/wan/hdlc_fr.c 		skb_push(skb, head_len);
skb               295 drivers/net/wan/hdlc_fr.c 		skb->data[3] = NLPID_IP;
skb               300 drivers/net/wan/hdlc_fr.c 		skb_push(skb, head_len);
skb               301 drivers/net/wan/hdlc_fr.c 		skb->data[3] = NLPID_IPV6;
skb               306 drivers/net/wan/hdlc_fr.c 		if (skb_headroom(skb) < head_len) {
skb               307 drivers/net/wan/hdlc_fr.c 			struct sk_buff *skb2 = skb_realloc_headroom(skb,
skb               311 drivers/net/wan/hdlc_fr.c 			dev_kfree_skb(skb);
skb               312 drivers/net/wan/hdlc_fr.c 			skb = *skb_p = skb2;
skb               314 drivers/net/wan/hdlc_fr.c 		skb_push(skb, head_len);
skb               315 drivers/net/wan/hdlc_fr.c 		skb->data[3] = FR_PAD;
skb               316 drivers/net/wan/hdlc_fr.c 		skb->data[4] = NLPID_SNAP;
skb               317 drivers/net/wan/hdlc_fr.c 		skb->data[5] = FR_PAD;
skb               318 drivers/net/wan/hdlc_fr.c 		skb->data[6] = 0x80;
skb               319 drivers/net/wan/hdlc_fr.c 		skb->data[7] = 0xC2;
skb               320 drivers/net/wan/hdlc_fr.c 		skb->data[8] = 0x00;
skb               321 drivers/net/wan/hdlc_fr.c 		skb->data[9] = 0x07; /* bridged Ethernet frame w/out FCS */
skb               326 drivers/net/wan/hdlc_fr.c 		skb_push(skb, head_len);
skb               327 drivers/net/wan/hdlc_fr.c 		skb->data[3] = FR_PAD;
skb               328 drivers/net/wan/hdlc_fr.c 		skb->data[4] = NLPID_SNAP;
skb               329 drivers/net/wan/hdlc_fr.c 		skb->data[5] = FR_PAD;
skb               330 drivers/net/wan/hdlc_fr.c 		skb->data[6] = FR_PAD;
skb               331 drivers/net/wan/hdlc_fr.c 		skb->data[7] = FR_PAD;
skb               332 drivers/net/wan/hdlc_fr.c 		*(__be16*)(skb->data + 8) = skb->protocol;
skb               335 drivers/net/wan/hdlc_fr.c 	dlci_to_q922(skb->data, dlci);
skb               336 drivers/net/wan/hdlc_fr.c 	skb->data[2] = FR_UI;
skb               409 drivers/net/wan/hdlc_fr.c static netdev_tx_t pvc_xmit(struct sk_buff *skb, struct net_device *dev)
skb               415 drivers/net/wan/hdlc_fr.c 			int pad = ETH_ZLEN - skb->len;
skb               417 drivers/net/wan/hdlc_fr.c 				int len = skb->len;
skb               418 drivers/net/wan/hdlc_fr.c 				if (skb_tailroom(skb) < pad)
skb               419 drivers/net/wan/hdlc_fr.c 					if (pskb_expand_head(skb, 0, pad,
skb               422 drivers/net/wan/hdlc_fr.c 						dev_kfree_skb(skb);
skb               425 drivers/net/wan/hdlc_fr.c 				skb_put(skb, pad);
skb               426 drivers/net/wan/hdlc_fr.c 				memset(skb->data + len, 0, pad);
skb               428 drivers/net/wan/hdlc_fr.c 			skb->protocol = cpu_to_be16(ETH_P_802_3);
skb               430 drivers/net/wan/hdlc_fr.c 		if (!fr_hard_header(&skb, pvc->dlci)) {
skb               431 drivers/net/wan/hdlc_fr.c 			dev->stats.tx_bytes += skb->len;
skb               435 drivers/net/wan/hdlc_fr.c 			skb->dev = pvc->frad;
skb               436 drivers/net/wan/hdlc_fr.c 			dev_queue_xmit(skb);
skb               442 drivers/net/wan/hdlc_fr.c 	dev_kfree_skb(skb);
skb               470 drivers/net/wan/hdlc_fr.c 	struct sk_buff *skb;
skb               487 drivers/net/wan/hdlc_fr.c 	skb = dev_alloc_skb(len);
skb               488 drivers/net/wan/hdlc_fr.c 	if (!skb) {
skb               492 drivers/net/wan/hdlc_fr.c 	memset(skb->data, 0, len);
skb               493 drivers/net/wan/hdlc_fr.c 	skb_reserve(skb, 4);
skb               495 drivers/net/wan/hdlc_fr.c 		skb->protocol = cpu_to_be16(NLPID_CISCO_LMI);
skb               496 drivers/net/wan/hdlc_fr.c 		fr_hard_header(&skb, LMI_CISCO_DLCI);
skb               498 drivers/net/wan/hdlc_fr.c 		skb->protocol = cpu_to_be16(NLPID_CCITT_ANSI_LMI);
skb               499 drivers/net/wan/hdlc_fr.c 		fr_hard_header(&skb, LMI_CCITT_ANSI_DLCI);
skb               501 drivers/net/wan/hdlc_fr.c 	data = skb_tail_pointer(skb);
skb               555 drivers/net/wan/hdlc_fr.c 	skb_put(skb, i);
skb               556 drivers/net/wan/hdlc_fr.c 	skb->priority = TC_PRIO_CONTROL;
skb               557 drivers/net/wan/hdlc_fr.c 	skb->dev = dev;
skb               558 drivers/net/wan/hdlc_fr.c 	skb_reset_network_header(skb);
skb               560 drivers/net/wan/hdlc_fr.c 	dev_queue_xmit(skb);
skb               650 drivers/net/wan/hdlc_fr.c static int fr_lmi_recv(struct net_device *dev, struct sk_buff *skb)
skb               659 drivers/net/wan/hdlc_fr.c 	if (skb->len < (lmi == LMI_ANSI ? LMI_ANSI_LENGTH :
skb               665 drivers/net/wan/hdlc_fr.c 	if (skb->data[3] != (lmi == LMI_CISCO ? NLPID_CISCO_LMI :
skb               671 drivers/net/wan/hdlc_fr.c 	if (skb->data[4] != LMI_CALLREF) {
skb               673 drivers/net/wan/hdlc_fr.c 			    skb->data[4]);
skb               677 drivers/net/wan/hdlc_fr.c 	if (skb->data[5] != (dce ? LMI_STATUS_ENQUIRY : LMI_STATUS)) {
skb               679 drivers/net/wan/hdlc_fr.c 			    skb->data[5]);
skb               684 drivers/net/wan/hdlc_fr.c 		if (skb->data[6] != LMI_ANSI_LOCKSHIFT) {
skb               686 drivers/net/wan/hdlc_fr.c 				    skb->data[6]);
skb               693 drivers/net/wan/hdlc_fr.c 	if (skb->data[i] != (lmi == LMI_CCITT ? LMI_CCITT_REPTYPE :
skb               696 drivers/net/wan/hdlc_fr.c 			    skb->data[i]);
skb               700 drivers/net/wan/hdlc_fr.c 	if (skb->data[++i] != LMI_REPT_LEN) {
skb               702 drivers/net/wan/hdlc_fr.c 			    skb->data[i]);
skb               706 drivers/net/wan/hdlc_fr.c 	reptype = skb->data[++i];
skb               713 drivers/net/wan/hdlc_fr.c 	if (skb->data[++i] != (lmi == LMI_CCITT ? LMI_CCITT_ALIVE :
skb               716 drivers/net/wan/hdlc_fr.c 			    skb->data[i]);
skb               720 drivers/net/wan/hdlc_fr.c 	if (skb->data[++i] != LMI_INTEG_LEN) {
skb               722 drivers/net/wan/hdlc_fr.c 			    skb->data[i]);
skb               727 drivers/net/wan/hdlc_fr.c 	state(hdlc)->rxseq = skb->data[i++]; /* TX sequence from peer */
skb               728 drivers/net/wan/hdlc_fr.c 	rxseq = skb->data[i++];	/* Should confirm our sequence */
skb               789 drivers/net/wan/hdlc_fr.c 	while (skb->len >= i + 2 + stat_len) {
skb               794 drivers/net/wan/hdlc_fr.c 		if (skb->data[i] != (lmi == LMI_CCITT ? LMI_CCITT_PVCSTAT :
skb               797 drivers/net/wan/hdlc_fr.c 				    skb->data[i]);
skb               801 drivers/net/wan/hdlc_fr.c 		if (skb->data[++i] != stat_len) {
skb               803 drivers/net/wan/hdlc_fr.c 				    skb->data[i]);
skb               808 drivers/net/wan/hdlc_fr.c 		new = !! (skb->data[i + 2] & 0x08);
skb               809 drivers/net/wan/hdlc_fr.c 		active = !! (skb->data[i + 2] & 0x02);
skb               811 drivers/net/wan/hdlc_fr.c 			dlci = (skb->data[i] << 8) | skb->data[i + 1];
skb               812 drivers/net/wan/hdlc_fr.c 			bw = (skb->data[i + 3] << 16) |
skb               813 drivers/net/wan/hdlc_fr.c 				(skb->data[i + 4] << 8) |
skb               814 drivers/net/wan/hdlc_fr.c 				(skb->data[i + 5]);
skb               816 drivers/net/wan/hdlc_fr.c 			dlci = ((skb->data[i] & 0x3F) << 4) |
skb               817 drivers/net/wan/hdlc_fr.c 				((skb->data[i + 1] & 0x78) >> 3);
skb               866 drivers/net/wan/hdlc_fr.c static int fr_rx(struct sk_buff *skb)
skb               868 drivers/net/wan/hdlc_fr.c 	struct net_device *frad = skb->dev;
skb               870 drivers/net/wan/hdlc_fr.c 	struct fr_hdr *fh = (struct fr_hdr *)skb->data;
skb               871 drivers/net/wan/hdlc_fr.c 	u8 *data = skb->data;
skb               876 drivers/net/wan/hdlc_fr.c 	if (skb->len <= 4 || fh->ea1 || data[2] != FR_UI)
skb               879 drivers/net/wan/hdlc_fr.c 	dlci = q922_to_dlci(skb->data);
skb               886 drivers/net/wan/hdlc_fr.c 		if (fr_lmi_recv(frad, skb))
skb               888 drivers/net/wan/hdlc_fr.c 		dev_kfree_skb_any(skb);
skb               898 drivers/net/wan/hdlc_fr.c 		dev_kfree_skb_any(skb);
skb               919 drivers/net/wan/hdlc_fr.c 	if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) {
skb               925 drivers/net/wan/hdlc_fr.c 		skb_pull(skb, 4); /* Remove 4-byte header (hdr, UI, NLPID) */
skb               927 drivers/net/wan/hdlc_fr.c 		skb->protocol = htons(ETH_P_IP);
skb               930 drivers/net/wan/hdlc_fr.c 		skb_pull(skb, 4); /* Remove 4-byte header (hdr, UI, NLPID) */
skb               932 drivers/net/wan/hdlc_fr.c 		skb->protocol = htons(ETH_P_IPV6);
skb               934 drivers/net/wan/hdlc_fr.c 	} else if (skb->len > 10 && data[3] == FR_PAD &&
skb               938 drivers/net/wan/hdlc_fr.c 		skb_pull(skb, 10);
skb               946 drivers/net/wan/hdlc_fr.c 			skb->protocol = htons(pid);
skb               951 drivers/net/wan/hdlc_fr.c 				skb->protocol = eth_type_trans(skb, dev);
skb               957 drivers/net/wan/hdlc_fr.c 			dev_kfree_skb_any(skb);
skb               962 drivers/net/wan/hdlc_fr.c 			    data[3], skb->len);
skb               963 drivers/net/wan/hdlc_fr.c 		dev_kfree_skb_any(skb);
skb               969 drivers/net/wan/hdlc_fr.c 		dev->stats.rx_bytes += skb->len;
skb               972 drivers/net/wan/hdlc_fr.c 		skb->dev = dev;
skb               973 drivers/net/wan/hdlc_fr.c 		netif_rx(skb);
skb               976 drivers/net/wan/hdlc_fr.c 		dev_kfree_skb_any(skb);
skb               982 drivers/net/wan/hdlc_fr.c 	dev_kfree_skb_any(skb);
skb               139 drivers/net/wan/hdlc_ppp.c static __be16 ppp_type_trans(struct sk_buff *skb, struct net_device *dev)
skb               141 drivers/net/wan/hdlc_ppp.c 	struct hdlc_header *data = (struct hdlc_header*)skb->data;
skb               143 drivers/net/wan/hdlc_ppp.c 	if (skb->len < sizeof(struct hdlc_header))
skb               151 drivers/net/wan/hdlc_ppp.c 		skb_pull(skb, sizeof(struct hdlc_header));
skb               155 drivers/net/wan/hdlc_ppp.c 		skb_pull(skb, sizeof(struct hdlc_header));
skb               164 drivers/net/wan/hdlc_ppp.c static int ppp_hard_header(struct sk_buff *skb, struct net_device *dev,
skb               173 drivers/net/wan/hdlc_ppp.c 	skb_push(skb, sizeof(struct hdlc_header));
skb               174 drivers/net/wan/hdlc_ppp.c 	data = (struct hdlc_header*)skb->data;
skb               199 drivers/net/wan/hdlc_ppp.c 	struct sk_buff *skb;
skb               200 drivers/net/wan/hdlc_ppp.c 	while ((skb = skb_dequeue(&tx_queue)) != NULL)
skb               201 drivers/net/wan/hdlc_ppp.c 		dev_queue_xmit(skb);
skb               207 drivers/net/wan/hdlc_ppp.c 	struct sk_buff *skb;
skb               220 drivers/net/wan/hdlc_ppp.c 	skb = dev_alloc_skb(sizeof(struct hdlc_header) +
skb               222 drivers/net/wan/hdlc_ppp.c 	if (!skb) {
skb               226 drivers/net/wan/hdlc_ppp.c 	skb_reserve(skb, sizeof(struct hdlc_header));
skb               228 drivers/net/wan/hdlc_ppp.c 	cp = skb_put(skb, sizeof(struct cp_header));
skb               234 drivers/net/wan/hdlc_ppp.c 		skb_put_data(skb, &magic, magic_len);
skb               236 drivers/net/wan/hdlc_ppp.c 		skb_put_data(skb, data, len);
skb               243 drivers/net/wan/hdlc_ppp.c 		sprintf(ptr, " %02X", skb->data[sizeof(struct cp_header) + i]);
skb               250 drivers/net/wan/hdlc_ppp.c 	ppp_hard_header(skb, dev, pid, NULL, NULL, 0);
skb               252 drivers/net/wan/hdlc_ppp.c 	skb->priority = TC_PRIO_CONTROL;
skb               253 drivers/net/wan/hdlc_ppp.c 	skb->dev = dev;
skb               254 drivers/net/wan/hdlc_ppp.c 	skb_reset_network_header(skb);
skb               255 drivers/net/wan/hdlc_ppp.c 	skb_queue_tail(&tx_queue, skb);
skb               429 drivers/net/wan/hdlc_ppp.c static int ppp_rx(struct sk_buff *skb)
skb               431 drivers/net/wan/hdlc_ppp.c 	struct hdlc_header *hdr = (struct hdlc_header*)skb->data;
skb               432 drivers/net/wan/hdlc_ppp.c 	struct net_device *dev = skb->dev;
skb               446 drivers/net/wan/hdlc_ppp.c 	if (skb->len < sizeof(struct hdlc_header))
skb               448 drivers/net/wan/hdlc_ppp.c 	cp = skb_pull(skb, sizeof(struct hdlc_header));
skb               458 drivers/net/wan/hdlc_ppp.c 				  ++ppp->seq, skb->len + 2, &hdr->protocol);
skb               464 drivers/net/wan/hdlc_ppp.c 	    skb->len < len /* truncated packet? */)
skb               466 drivers/net/wan/hdlc_ppp.c 	skb_pull(skb, sizeof(struct cp_header));
skb               478 drivers/net/wan/hdlc_ppp.c 		sprintf(ptr, " %02X", skb->data[i]);
skb               489 drivers/net/wan/hdlc_ppp.c 			pid = ntohs(*(__be16*)skb->data);
skb               499 drivers/net/wan/hdlc_ppp.c 					  cp->id, len - 4, skb->data + 4);
skb               514 drivers/net/wan/hdlc_ppp.c 		ppp_cp_parse_cr(dev, pid, cp->id, len, skb->data);
skb               553 drivers/net/wan/hdlc_ppp.c 	dev_kfree_skb_any(skb);
skb                24 drivers/net/wan/hdlc_raw.c static __be16 raw_type_trans(struct sk_buff *skb, struct net_device *dev)
skb                25 drivers/net/wan/hdlc_raw_eth.c static netdev_tx_t eth_tx(struct sk_buff *skb, struct net_device *dev)
skb                27 drivers/net/wan/hdlc_raw_eth.c 	int pad = ETH_ZLEN - skb->len;
skb                29 drivers/net/wan/hdlc_raw_eth.c 		int len = skb->len;
skb                30 drivers/net/wan/hdlc_raw_eth.c 		if (skb_tailroom(skb) < pad)
skb                31 drivers/net/wan/hdlc_raw_eth.c 			if (pskb_expand_head(skb, 0, pad, GFP_ATOMIC)) {
skb                33 drivers/net/wan/hdlc_raw_eth.c 				dev_kfree_skb(skb);
skb                36 drivers/net/wan/hdlc_raw_eth.c 		skb_put(skb, pad);
skb                37 drivers/net/wan/hdlc_raw_eth.c 		memset(skb->data + len, 0, pad);
skb                39 drivers/net/wan/hdlc_raw_eth.c 	return dev_to_hdlc(dev)->xmit(skb, dev);
skb                30 drivers/net/wan/hdlc_x25.c 	struct sk_buff *skb;
skb                33 drivers/net/wan/hdlc_x25.c 	if ((skb = dev_alloc_skb(1)) == NULL) {
skb                38 drivers/net/wan/hdlc_x25.c 	ptr = skb_put(skb, 1);
skb                41 drivers/net/wan/hdlc_x25.c 	skb->protocol = x25_type_trans(skb, dev);
skb                42 drivers/net/wan/hdlc_x25.c 	netif_rx(skb);
skb                61 drivers/net/wan/hdlc_x25.c static int x25_data_indication(struct net_device *dev, struct sk_buff *skb)
skb                65 drivers/net/wan/hdlc_x25.c 	if (skb_cow(skb, 1))
skb                68 drivers/net/wan/hdlc_x25.c 	skb_push(skb, 1);
skb                69 drivers/net/wan/hdlc_x25.c 	skb_reset_network_header(skb);
skb                71 drivers/net/wan/hdlc_x25.c 	ptr  = skb->data;
skb                74 drivers/net/wan/hdlc_x25.c 	skb->protocol = x25_type_trans(skb, dev);
skb                75 drivers/net/wan/hdlc_x25.c 	return netif_rx(skb);
skb                80 drivers/net/wan/hdlc_x25.c static void x25_data_transmit(struct net_device *dev, struct sk_buff *skb)
skb                84 drivers/net/wan/hdlc_x25.c 	skb_reset_network_header(skb);
skb                85 drivers/net/wan/hdlc_x25.c 	skb->protocol = hdlc_type_trans(skb, dev);
skb                88 drivers/net/wan/hdlc_x25.c 		dev_queue_xmit_nit(skb, dev);
skb                90 drivers/net/wan/hdlc_x25.c 	hdlc->xmit(skb, dev); /* Ignore return value :-( */
skb                95 drivers/net/wan/hdlc_x25.c static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev)
skb               101 drivers/net/wan/hdlc_x25.c 	switch (skb->data[0]) {
skb               103 drivers/net/wan/hdlc_x25.c 		skb_pull(skb, 1);
skb               104 drivers/net/wan/hdlc_x25.c 		skb_reset_network_header(skb);
skb               105 drivers/net/wan/hdlc_x25.c 		if ((result = lapb_data_request(dev, skb)) != LAPB_OK)
skb               106 drivers/net/wan/hdlc_x25.c 			dev_kfree_skb(skb);
skb               135 drivers/net/wan/hdlc_x25.c 	dev_kfree_skb(skb);
skb               168 drivers/net/wan/hdlc_x25.c static int x25_rx(struct sk_buff *skb)
skb               170 drivers/net/wan/hdlc_x25.c 	struct net_device *dev = skb->dev;
skb               172 drivers/net/wan/hdlc_x25.c 	if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) {
skb               177 drivers/net/wan/hdlc_x25.c 	if (lapb_data_received(dev, skb) == LAPB_OK)
skb               181 drivers/net/wan/hdlc_x25.c 	dev_kfree_skb_any(skb);
skb                61 drivers/net/wan/hostess_sv11.c static void hostess_input(struct z8530_channel *c, struct sk_buff *skb)
skb                64 drivers/net/wan/hostess_sv11.c 	skb_trim(skb, skb->len - 2);
skb                65 drivers/net/wan/hostess_sv11.c 	skb->protocol = hdlc_type_trans(skb, c->netdevice);
skb                66 drivers/net/wan/hostess_sv11.c 	skb_reset_mac_header(skb);
skb                67 drivers/net/wan/hostess_sv11.c 	skb->dev = c->netdevice;
skb                72 drivers/net/wan/hostess_sv11.c 	netif_rx(skb);
skb               163 drivers/net/wan/hostess_sv11.c static netdev_tx_t hostess_queue_xmit(struct sk_buff *skb,
skb               166 drivers/net/wan/hostess_sv11.c 	return z8530_queue_xmit(&dev_to_sv(d)->chanA, skb);
skb               664 drivers/net/wan/ixp4xx_hss.c 		struct sk_buff *skb;
skb               703 drivers/net/wan/ixp4xx_hss.c 		skb = NULL;
skb               707 drivers/net/wan/ixp4xx_hss.c 			if ((skb = netdev_alloc_skb(dev, RX_SIZE)) != NULL) {
skb               708 drivers/net/wan/ixp4xx_hss.c 				phys = dma_map_single(&dev->dev, skb->data,
skb               712 drivers/net/wan/ixp4xx_hss.c 					dev_kfree_skb(skb);
skb               713 drivers/net/wan/ixp4xx_hss.c 					skb = NULL;
skb               717 drivers/net/wan/ixp4xx_hss.c 			skb = netdev_alloc_skb(dev, desc->pkt_len);
skb               719 drivers/net/wan/ixp4xx_hss.c 			if (!skb)
skb               741 drivers/net/wan/ixp4xx_hss.c 		if (!skb) {
skb               751 drivers/net/wan/ixp4xx_hss.c 		temp = skb;
skb               752 drivers/net/wan/ixp4xx_hss.c 		skb = port->rx_buff_tab[n];
skb               758 drivers/net/wan/ixp4xx_hss.c 		memcpy_swab32((u32 *)skb->data, (u32 *)port->rx_buff_tab[n],
skb               761 drivers/net/wan/ixp4xx_hss.c 		skb_put(skb, desc->pkt_len);
skb               763 drivers/net/wan/ixp4xx_hss.c 		debug_pkt(dev, "hss_hdlc_poll", skb->data, skb->len);
skb               765 drivers/net/wan/ixp4xx_hss.c 		skb->protocol = hdlc_type_trans(skb, dev);
skb               767 drivers/net/wan/ixp4xx_hss.c 		dev->stats.rx_bytes += skb->len;
skb               768 drivers/net/wan/ixp4xx_hss.c 		netif_receive_skb(skb);
skb               827 drivers/net/wan/ixp4xx_hss.c static int hss_hdlc_xmit(struct sk_buff *skb, struct net_device *dev)
skb               840 drivers/net/wan/ixp4xx_hss.c 	if (unlikely(skb->len > HDLC_MAX_MRU)) {
skb               841 drivers/net/wan/ixp4xx_hss.c 		dev_kfree_skb(skb);
skb               846 drivers/net/wan/ixp4xx_hss.c 	debug_pkt(dev, "hss_hdlc_xmit", skb->data, skb->len);
skb               848 drivers/net/wan/ixp4xx_hss.c 	len = skb->len;
skb               852 drivers/net/wan/ixp4xx_hss.c 	mem = skb->data;
skb               854 drivers/net/wan/ixp4xx_hss.c 	offset = (int)skb->data & 3; /* keep 32-bit alignment */
skb               857 drivers/net/wan/ixp4xx_hss.c 		dev_kfree_skb(skb);
skb               861 drivers/net/wan/ixp4xx_hss.c 	memcpy_swab32(mem, (u32 *)((uintptr_t)skb->data & ~3), bytes / 4);
skb               862 drivers/net/wan/ixp4xx_hss.c 	dev_kfree_skb(skb);
skb               868 drivers/net/wan/ixp4xx_hss.c 		dev_kfree_skb(skb);
skb               881 drivers/net/wan/ixp4xx_hss.c 	port->tx_buff_tab[n] = skb;
skb                84 drivers/net/wan/lapbether.c static int lapbeth_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *ptype, struct net_device *orig_dev)
skb                92 drivers/net/wan/lapbether.c 	if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
skb                95 drivers/net/wan/lapbether.c 	if (!pskb_may_pull(skb, 2))
skb               105 drivers/net/wan/lapbether.c 	len = skb->data[0] + skb->data[1] * 256;
skb               109 drivers/net/wan/lapbether.c 	skb_pull(skb, 2);	/* Remove the length bytes */
skb               110 drivers/net/wan/lapbether.c 	skb_trim(skb, len);	/* Set the length of the data */
skb               112 drivers/net/wan/lapbether.c 	if ((err = lapb_data_received(lapbeth->axdev, skb)) != LAPB_OK) {
skb               120 drivers/net/wan/lapbether.c 	kfree_skb(skb);
skb               123 drivers/net/wan/lapbether.c 	kfree_skb(skb);
skb               127 drivers/net/wan/lapbether.c static int lapbeth_data_indication(struct net_device *dev, struct sk_buff *skb)
skb               131 drivers/net/wan/lapbether.c 	skb_push(skb, 1);
skb               133 drivers/net/wan/lapbether.c 	if (skb_cow(skb, 1))
skb               136 drivers/net/wan/lapbether.c 	ptr  = skb->data;
skb               139 drivers/net/wan/lapbether.c 	skb->protocol = x25_type_trans(skb, dev);
skb               140 drivers/net/wan/lapbether.c 	return netif_rx(skb);
skb               146 drivers/net/wan/lapbether.c static netdev_tx_t lapbeth_xmit(struct sk_buff *skb,
skb               158 drivers/net/wan/lapbether.c 	switch (skb->data[0]) {
skb               173 drivers/net/wan/lapbether.c 	skb_pull(skb, 1);
skb               175 drivers/net/wan/lapbether.c 	if ((err = lapb_data_request(dev, skb)) != LAPB_OK) {
skb               182 drivers/net/wan/lapbether.c 	kfree_skb(skb);
skb               186 drivers/net/wan/lapbether.c static void lapbeth_data_transmit(struct net_device *ndev, struct sk_buff *skb)
skb               191 drivers/net/wan/lapbether.c 	int size = skb->len;
skb               193 drivers/net/wan/lapbether.c 	skb->protocol = htons(ETH_P_X25);
skb               195 drivers/net/wan/lapbether.c 	ptr = skb_push(skb, 2);
skb               203 drivers/net/wan/lapbether.c 	skb->dev = dev = lapbeth->ethdev;
skb               205 drivers/net/wan/lapbether.c 	dev_hard_header(skb, dev, ETH_P_DEC, bcast_addr, NULL, 0);
skb               207 drivers/net/wan/lapbether.c 	dev_queue_xmit(skb);
skb               213 drivers/net/wan/lapbether.c 	struct sk_buff *skb = dev_alloc_skb(1);
skb               215 drivers/net/wan/lapbether.c 	if (!skb) {
skb               220 drivers/net/wan/lapbether.c 	ptr  = skb_put(skb, 1);
skb               223 drivers/net/wan/lapbether.c 	skb->protocol = x25_type_trans(skb, dev);
skb               224 drivers/net/wan/lapbether.c 	netif_rx(skb);
skb               230 drivers/net/wan/lapbether.c 	struct sk_buff *skb = dev_alloc_skb(1);
skb               232 drivers/net/wan/lapbether.c 	if (!skb) {
skb               237 drivers/net/wan/lapbether.c 	ptr  = skb_put(skb, 1);
skb               240 drivers/net/wan/lapbether.c 	skb->protocol = x25_type_trans(skb, dev);
skb               241 drivers/net/wan/lapbether.c 	netif_rx(skb);
skb                88 drivers/net/wan/lmc/lmc_main.c static netdev_tx_t lmc_start_xmit(struct sk_buff *skb,
skb              1181 drivers/net/wan/lmc/lmc_main.c         struct sk_buff *skb = sc->lmc_rxq[i];
skb              1186 drivers/net/wan/lmc/lmc_main.c         if (skb != NULL)
skb              1187 drivers/net/wan/lmc/lmc_main.c             dev_kfree_skb(skb);
skb              1396 drivers/net/wan/lmc/lmc_main.c static netdev_tx_t lmc_start_xmit(struct sk_buff *skb,
skb              1412 drivers/net/wan/lmc/lmc_main.c     sc->lmc_txq[entry] = skb;
skb              1413 drivers/net/wan/lmc/lmc_main.c     sc->lmc_txring[entry].buffer1 = virt_to_bus (skb->data);
skb              1415 drivers/net/wan/lmc/lmc_main.c     LMC_CONSOLE_LOG("xmit", skb->data, skb->len);
skb              1461 drivers/net/wan/lmc/lmc_main.c     flag = sc->lmc_txring[entry].length = (skb->len) | flag |
skb              1493 drivers/net/wan/lmc/lmc_main.c     struct sk_buff *skb, *nsb;
skb              1548 drivers/net/wan/lmc/lmc_main.c         skb = sc->lmc_rxq[i];
skb              1555 drivers/net/wan/lmc/lmc_main.c         if (!skb) {
skb              1569 drivers/net/wan/lmc/lmc_main.c         LMC_CONSOLE_LOG("recv", skb->data, len);
skb              1587 drivers/net/wan/lmc/lmc_main.c             skb_put (skb, len);
skb              1588 drivers/net/wan/lmc/lmc_main.c             skb->protocol = lmc_proto_type(sc, skb);
skb              1589 drivers/net/wan/lmc/lmc_main.c             skb_reset_mac_header(skb);
skb              1591 drivers/net/wan/lmc/lmc_main.c             skb->dev = dev;
skb              1592 drivers/net/wan/lmc/lmc_main.c             lmc_proto_netif(sc, skb);
skb              1624 drivers/net/wan/lmc/lmc_main.c             skb_copy_from_linear_data(skb, skb_put(nsb, len), len);
skb              1815 drivers/net/wan/lmc/lmc_main.c         struct sk_buff *skb;
skb              1819 drivers/net/wan/lmc/lmc_main.c             skb = dev_alloc_skb (LMC_PKT_BUF_SZ + 2);
skb              1820 drivers/net/wan/lmc/lmc_main.c             if(skb == NULL){
skb              1826 drivers/net/wan/lmc/lmc_main.c                 sc->lmc_rxq[i] = skb;
skb              1831 drivers/net/wan/lmc/lmc_main.c             skb = sc->lmc_rxq[i];
skb              1834 drivers/net/wan/lmc/lmc_main.c         skb->dev = sc->lmc_device;
skb              1840 drivers/net/wan/lmc/lmc_main.c         sc->lmc_rxring[i].length = skb_tailroom(skb);
skb              1845 drivers/net/wan/lmc/lmc_main.c         sc->lmc_rxring[i].buffer1 = virt_to_bus (skb->data);
skb                98 drivers/net/wan/lmc/lmc_proto.c __be16 lmc_proto_type(lmc_softc_t *sc, struct sk_buff *skb) /*FOLD00*/
skb               103 drivers/net/wan/lmc/lmc_proto.c 	    return hdlc_type_trans(skb, sc->lmc_device);
skb               120 drivers/net/wan/lmc/lmc_proto.c void lmc_proto_netif(lmc_softc_t *sc, struct sk_buff *skb) /*FOLD00*/
skb               127 drivers/net/wan/lmc/lmc_proto.c         netif_rx(skb);
skb                11 drivers/net/wan/lmc/lmc_proto.h __be16 lmc_proto_type(lmc_softc_t *sc, struct sk_buff *skb);
skb                12 drivers/net/wan/lmc/lmc_proto.h void lmc_proto_netif(lmc_softc_t *sc, struct sk_buff *skb);
skb               442 drivers/net/wan/sbni.c sbni_start_xmit( struct sk_buff  *skb,  struct net_device  *dev )
skb               457 drivers/net/wan/sbni.c 			prepare_to_send( skb, p );
skb               470 drivers/net/wan/sbni.c sbni_start_xmit( struct sk_buff  *skb,  struct net_device  *dev )
skb               477 drivers/net/wan/sbni.c 	prepare_to_send( skb, dev );
skb               684 drivers/net/wan/sbni.c 	struct sk_buff    *skb	 = nl->tx_buf_p;
skb               686 drivers/net/wan/sbni.c 	unsigned  len = min_t(unsigned int, skb->len - nl->outpos, nl->framelen);
skb               688 drivers/net/wan/sbni.c 	outsb( dev->base_addr + DAT, skb->data + nl->outpos, len );
skb               689 drivers/net/wan/sbni.c 	*crc_p = calc_crc32( *crc_p, skb->data + nl->outpos, len );
skb               840 drivers/net/wan/sbni.c prepare_to_send( struct sk_buff  *skb,  struct net_device  *dev )
skb               853 drivers/net/wan/sbni.c 	len = skb->len;
skb               857 drivers/net/wan/sbni.c 	nl->tx_buf_p	= skb;
skb               993 drivers/net/wan/sbni.c 	struct sk_buff  *skb = dev_alloc_skb( ETHER_MAX_LEN + 2 );
skb               994 drivers/net/wan/sbni.c 	if( !skb )
skb               997 drivers/net/wan/sbni.c 	skb_reserve( skb, 2 );		/* Align IP on longword boundaries */
skb               998 drivers/net/wan/sbni.c 	return  skb;
skb              1006 drivers/net/wan/sbni.c 	struct sk_buff    *skb = nl->rx_buf_p;
skb              1008 drivers/net/wan/sbni.c 	skb_put( skb, nl->inppos );
skb              1011 drivers/net/wan/sbni.c 	skb->protocol = eth_type_trans( skb, nl->master );
skb              1012 drivers/net/wan/sbni.c 	netif_rx( skb );
skb              1016 drivers/net/wan/sbni.c 	skb->protocol = eth_type_trans( skb, dev );
skb              1017 drivers/net/wan/sbni.c 	netif_rx( skb );
skb               656 drivers/net/wan/sdla.c static netdev_tx_t sdla_transmit(struct sk_buff *skb,
skb               681 drivers/net/wan/sdla.c 			if (skb->dev->type != ARPHRD_DLCI)
skb               684 drivers/net/wan/sdla.c 					    skb->dev->type);
skb               701 drivers/net/wan/sdla.c 				ret = sdla_cmd(dev, SDLA_INFORMATION_WRITE, *(short *)(skb->dev->dev_addr), 0, skb->data, skb->len, NULL, NULL);
skb               705 drivers/net/wan/sdla.c 				ret = sdla_cmd(dev, SDLA_INFORMATION_WRITE, *(short *)(skb->dev->dev_addr), 0, NULL, skb->len, &addr, &size);
skb               712 drivers/net/wan/sdla.c 					__sdla_write(dev, pbuf->buf_addr, skb->data, skb->len);
skb               744 drivers/net/wan/sdla.c 	dev_kfree_skb(skb);
skb               753 drivers/net/wan/sdla.c 	struct sk_buff	 *skb;
skb               767 drivers/net/wan/sdla.c 	skb = NULL;
skb               824 drivers/net/wan/sdla.c 		skb = dev_alloc_skb(len + sizeof(struct frhdr));
skb               825 drivers/net/wan/sdla.c 		if (skb == NULL) 
skb               832 drivers/net/wan/sdla.c 			skb_reserve(skb, sizeof(struct frhdr));
skb               841 drivers/net/wan/sdla.c 				__sdla_read(dev, SDLA_502_RCV_BUF + SDLA_502_DATA_OFS, skb_put(skb,len), len);
skb               854 drivers/net/wan/sdla.c 				__sdla_read(dev, addr, skb_put(skb, len2), len2);
skb               856 drivers/net/wan/sdla.c 					__sdla_read(dev, buf_base, skb_put(skb, split), split);
skb               870 drivers/net/wan/sdla.c 		(*dlp->receive)(skb, master);
skb                61 drivers/net/wan/sealevel.c static void sealevel_input(struct z8530_channel *c, struct sk_buff *skb)
skb                64 drivers/net/wan/sealevel.c 	skb_trim(skb, skb->len - 2);
skb                65 drivers/net/wan/sealevel.c 	skb->protocol = hdlc_type_trans(skb, c->netdevice);
skb                66 drivers/net/wan/sealevel.c 	skb_reset_mac_header(skb);
skb                67 drivers/net/wan/sealevel.c 	skb->dev = c->netdevice;
skb                68 drivers/net/wan/sealevel.c 	netif_rx(skb);
skb               155 drivers/net/wan/sealevel.c static netdev_tx_t sealevel_queue_xmit(struct sk_buff *skb,
skb               158 drivers/net/wan/sealevel.c 	return z8530_queue_xmit(dev_to_chan(d)->chan, skb);
skb               165 drivers/net/wan/wanxl.c 		struct sk_buff *skb = port->tx_skbs[port->tx_in];
skb               180 drivers/net/wan/wanxl.c 			dev->stats.tx_bytes += skb->len;
skb               183 drivers/net/wan/wanxl.c 		pci_unmap_single(port->card->pdev, desc->address, skb->len,
skb               185 drivers/net/wan/wanxl.c 		dev_consume_skb_irq(skb);
skb               202 drivers/net/wan/wanxl.c 			struct sk_buff *skb = card->rx_skbs[card->rx_in];
skb               207 drivers/net/wan/wanxl.c 			if (!skb)
skb               213 drivers/net/wan/wanxl.c 				skb_put(skb, desc->length);
skb               217 drivers/net/wan/wanxl.c 				       skb->len);
skb               218 drivers/net/wan/wanxl.c 				debug_frame(skb);
skb               221 drivers/net/wan/wanxl.c 				dev->stats.rx_bytes += skb->len;
skb               222 drivers/net/wan/wanxl.c 				skb->protocol = hdlc_type_trans(skb, dev);
skb               223 drivers/net/wan/wanxl.c 				netif_rx(skb);
skb               224 drivers/net/wan/wanxl.c 				skb = NULL;
skb               227 drivers/net/wan/wanxl.c 			if (!skb) {
skb               228 drivers/net/wan/wanxl.c 				skb = dev_alloc_skb(BUFFER_LENGTH);
skb               229 drivers/net/wan/wanxl.c 				desc->address = skb ?
skb               230 drivers/net/wan/wanxl.c 					pci_map_single(card->pdev, skb->data,
skb               233 drivers/net/wan/wanxl.c 				card->rx_skbs[card->rx_in] = skb;
skb               270 drivers/net/wan/wanxl.c static netdev_tx_t wanxl_xmit(struct sk_buff *skb, struct net_device *dev)
skb               289 drivers/net/wan/wanxl.c 	printk(KERN_DEBUG "%s TX(%i):", dev->name, skb->len);
skb               290 drivers/net/wan/wanxl.c 	debug_frame(skb);
skb               293 drivers/net/wan/wanxl.c 	port->tx_skbs[port->tx_out] = skb;
skb               294 drivers/net/wan/wanxl.c 	desc->address = pci_map_single(port->card->pdev, skb->data, skb->len,
skb               296 drivers/net/wan/wanxl.c 	desc->length = skb->len;
skb               698 drivers/net/wan/wanxl.c 		struct sk_buff *skb = dev_alloc_skb(BUFFER_LENGTH);
skb               699 drivers/net/wan/wanxl.c 		card->rx_skbs[i] = skb;
skb               700 drivers/net/wan/wanxl.c 		if (skb)
skb               702 drivers/net/wan/wanxl.c 				pci_map_single(card->pdev, skb->data,
skb               191 drivers/net/wan/x25_asy.c 	struct sk_buff *skb;
skb               198 drivers/net/wan/x25_asy.c 	skb = dev_alloc_skb(count+1);
skb               199 drivers/net/wan/x25_asy.c 	if (skb == NULL) {
skb               204 drivers/net/wan/x25_asy.c 	skb_push(skb, 1);	/* LAPB internal control */
skb               205 drivers/net/wan/x25_asy.c 	skb_put_data(skb, sl->rbuff, count);
skb               206 drivers/net/wan/x25_asy.c 	skb->protocol = x25_type_trans(skb, sl->dev);
skb               207 drivers/net/wan/x25_asy.c 	err = lapb_data_received(skb->dev, skb);
skb               209 drivers/net/wan/x25_asy.c 		kfree_skb(skb);
skb               212 drivers/net/wan/x25_asy.c 		netif_rx(skb);
skb               300 drivers/net/wan/x25_asy.c static netdev_tx_t x25_asy_xmit(struct sk_buff *skb,
skb               308 drivers/net/wan/x25_asy.c 		kfree_skb(skb);
skb               312 drivers/net/wan/x25_asy.c 	switch (skb->data[0]) {
skb               320 drivers/net/wan/x25_asy.c 		kfree_skb(skb);
skb               329 drivers/net/wan/x25_asy.c 		kfree_skb(skb);
skb               332 drivers/net/wan/x25_asy.c 	skb_pull(skb, 1);	/* Remove control byte */
skb               344 drivers/net/wan/x25_asy.c 	err = lapb_data_request(dev, skb);
skb               347 drivers/net/wan/x25_asy.c 		kfree_skb(skb);
skb               363 drivers/net/wan/x25_asy.c static int x25_asy_data_indication(struct net_device *dev, struct sk_buff *skb)
skb               365 drivers/net/wan/x25_asy.c 	return netif_rx(skb);
skb               374 drivers/net/wan/x25_asy.c static void x25_asy_data_transmit(struct net_device *dev, struct sk_buff *skb)
skb               382 drivers/net/wan/x25_asy.c 		kfree_skb(skb);
skb               386 drivers/net/wan/x25_asy.c 	if (skb != NULL) {
skb               388 drivers/net/wan/x25_asy.c 		dev->stats.tx_bytes += skb->len;
skb               389 drivers/net/wan/x25_asy.c 		x25_asy_encaps(sl, skb->data, skb->len);
skb               390 drivers/net/wan/x25_asy.c 		dev_kfree_skb(skb);
skb               402 drivers/net/wan/x25_asy.c 	struct sk_buff *skb;
skb               405 drivers/net/wan/x25_asy.c 	skb = dev_alloc_skb(1);
skb               406 drivers/net/wan/x25_asy.c 	if (skb == NULL) {
skb               411 drivers/net/wan/x25_asy.c 	ptr  = skb_put(skb, 1);
skb               414 drivers/net/wan/x25_asy.c 	skb->protocol = x25_type_trans(skb, sl->dev);
skb               415 drivers/net/wan/x25_asy.c 	netif_rx(skb);
skb               421 drivers/net/wan/x25_asy.c 	struct sk_buff *skb;
skb               424 drivers/net/wan/x25_asy.c 	skb = dev_alloc_skb(1);
skb               425 drivers/net/wan/x25_asy.c 	if (skb == NULL) {
skb               430 drivers/net/wan/x25_asy.c 	ptr  = skb_put(skb, 1);
skb               433 drivers/net/wan/x25_asy.c 	skb->protocol = x25_type_trans(skb, sl->dev);
skb               434 drivers/net/wan/x25_asy.c 	netif_rx(skb);
skb               362 drivers/net/wan/z85230.c 				if(c->skb)
skb               363 drivers/net/wan/z85230.c 					c->dptr=c->skb->data;
skb               794 drivers/net/wan/z85230.c 	c->skb = NULL;
skb               860 drivers/net/wan/z85230.c 	c->skb = NULL;
skb              1055 drivers/net/wan/z85230.c 	c->skb = NULL;
skb              1395 drivers/net/wan/z85230.c 	c->skb=NULL;
skb              1525 drivers/net/wan/z85230.c 	struct sk_buff *skb;
skb              1531 drivers/net/wan/z85230.c 	skb = c->tx_skb;
skb              1535 drivers/net/wan/z85230.c 	c->netdevice->stats.tx_bytes += skb->len;
skb              1536 drivers/net/wan/z85230.c 	dev_consume_skb_irq(skb);
skb              1548 drivers/net/wan/z85230.c void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb)
skb              1550 drivers/net/wan/z85230.c 	dev_kfree_skb_any(skb);
skb              1570 drivers/net/wan/z85230.c 	struct sk_buff *skb;
skb              1634 drivers/net/wan/z85230.c 		skb = dev_alloc_skb(ct);
skb              1635 drivers/net/wan/z85230.c 		if (skb == NULL) {
skb              1639 drivers/net/wan/z85230.c 			skb_put(skb, ct);
skb              1640 drivers/net/wan/z85230.c 			skb_copy_to_linear_data(skb, rxb, ct);
skb              1647 drivers/net/wan/z85230.c 		skb = c->skb;
skb              1663 drivers/net/wan/z85230.c 		c->skb = c->skb2;
skb              1666 drivers/net/wan/z85230.c 		if (c->skb) {
skb              1667 drivers/net/wan/z85230.c 			c->dptr = c->skb->data;
skb              1686 drivers/net/wan/z85230.c 	if (skb) {
skb              1687 drivers/net/wan/z85230.c 		skb_trim(skb, ct);
skb              1688 drivers/net/wan/z85230.c 		c->rx_function(c, skb);
skb              1703 drivers/net/wan/z85230.c static inline int spans_boundary(struct sk_buff *skb)
skb              1705 drivers/net/wan/z85230.c 	unsigned long a=(unsigned long)skb->data;
skb              1706 drivers/net/wan/z85230.c 	a^=(a+skb->len);
skb              1726 drivers/net/wan/z85230.c netdev_tx_t z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb)
skb              1742 drivers/net/wan/z85230.c 	if(c->dma_tx && ((unsigned long)(virt_to_bus(skb->data+skb->len))>=16*1024*1024 || spans_boundary(skb)))
skb              1753 drivers/net/wan/z85230.c 		skb_copy_from_linear_data(skb, c->tx_next_ptr, skb->len);
skb              1756 drivers/net/wan/z85230.c 		c->tx_next_ptr=skb->data;	
skb              1758 drivers/net/wan/z85230.c 	c->tx_next_skb=skb;
skb               274 drivers/net/wan/z85230.h 	struct sk_buff	*skb;		/* Buffer dptr points into */
skb               410 drivers/net/wan/z85230.h netdev_tx_t z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb);
skb               411 drivers/net/wan/z85230.h void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb);
skb               207 drivers/net/wimax/i2400m/driver.c 	struct sk_buff *skb;
skb               212 drivers/net/wimax/i2400m/driver.c 	skb = i2400m_get_device_info(i2400m);
skb               213 drivers/net/wimax/i2400m/driver.c 	if (IS_ERR(skb)) {
skb               214 drivers/net/wimax/i2400m/driver.c 		result = PTR_ERR(skb);
skb               220 drivers/net/wimax/i2400m/driver.c 	ddi = (void *) skb->data;
skb               242 drivers/net/wimax/i2400m/driver.c 	kfree_skb(skb);
skb               144 drivers/net/wimax/i2400m/netdev.c 	struct sk_buff *skb;
skb               148 drivers/net/wimax/i2400m/netdev.c 	skb = i2400m->wake_tx_skb;
skb               152 drivers/net/wimax/i2400m/netdev.c 	d_fnstart(3, dev, "(ws %p i2400m %p skb %p)\n", ws, i2400m, skb);
skb               154 drivers/net/wimax/i2400m/netdev.c 	if (skb == NULL) {
skb               184 drivers/net/wimax/i2400m/netdev.c 	result = i2400m_tx(i2400m, skb->data, skb->len, I2400M_PT_DATA);
skb               188 drivers/net/wimax/i2400m/netdev.c 	kfree_skb(skb);	/* refcount transferred by _hard_start_xmit() */
skb               192 drivers/net/wimax/i2400m/netdev.c 		ws, i2400m, skb, result);
skb               205 drivers/net/wimax/i2400m/netdev.c void i2400m_tx_prep_header(struct sk_buff *skb)
skb               208 drivers/net/wimax/i2400m/netdev.c 	skb_pull(skb, ETH_HLEN);
skb               209 drivers/net/wimax/i2400m/netdev.c 	pl_hdr = skb_push(skb, sizeof(*pl_hdr));
skb               261 drivers/net/wimax/i2400m/netdev.c 		       struct sk_buff *skb)
skb               267 drivers/net/wimax/i2400m/netdev.c 	d_fnstart(3, dev, "(skb %p net_dev %p)\n", skb, net_dev);
skb               271 drivers/net/wimax/i2400m/netdev.c 			 skb, skb->len);
skb               272 drivers/net/wimax/i2400m/netdev.c 		d_dump(4, dev, skb->data, skb->len);
skb               282 drivers/net/wimax/i2400m/netdev.c 		i2400m->wake_tx_skb = skb_get(skb);	/* transfer ref count */
skb               283 drivers/net/wimax/i2400m/netdev.c 		i2400m_tx_prep_header(skb);
skb               296 drivers/net/wimax/i2400m/netdev.c 				 skb, netif_queue_stopped(net_dev));
skb               299 drivers/net/wimax/i2400m/netdev.c 	d_fnend(3, dev, "(skb %p net_dev %p) = %d\n", skb, net_dev, result);
skb               314 drivers/net/wimax/i2400m/netdev.c 		  struct sk_buff *skb)
skb               320 drivers/net/wimax/i2400m/netdev.c 		  i2400m, net_dev, skb);
skb               323 drivers/net/wimax/i2400m/netdev.c 	i2400m_tx_prep_header(skb);
skb               325 drivers/net/wimax/i2400m/netdev.c 		 skb, skb->len);
skb               326 drivers/net/wimax/i2400m/netdev.c 	d_dump(4, dev, skb->data, skb->len);
skb               327 drivers/net/wimax/i2400m/netdev.c 	result = i2400m_tx(i2400m, skb->data, skb->len, I2400M_PT_DATA);
skb               329 drivers/net/wimax/i2400m/netdev.c 		i2400m, net_dev, skb, result);
skb               353 drivers/net/wimax/i2400m/netdev.c netdev_tx_t i2400m_hard_start_xmit(struct sk_buff *skb,
skb               360 drivers/net/wimax/i2400m/netdev.c 	d_fnstart(3, dev, "(skb %p net_dev %p)\n", skb, net_dev);
skb               362 drivers/net/wimax/i2400m/netdev.c 	if (skb_cow_head(skb, 0))
skb               366 drivers/net/wimax/i2400m/netdev.c 		result = i2400m_net_wake_tx(i2400m, net_dev, skb);
skb               368 drivers/net/wimax/i2400m/netdev.c 		result = i2400m_net_tx(i2400m, net_dev, skb);
skb               374 drivers/net/wimax/i2400m/netdev.c 		net_dev->stats.tx_bytes += skb->len;
skb               376 drivers/net/wimax/i2400m/netdev.c 	dev_kfree_skb(skb);
skb               377 drivers/net/wimax/i2400m/netdev.c 	d_fnend(3, dev, "(skb %p net_dev %p) = %d\n", skb, net_dev, result);
skb               458 drivers/net/wimax/i2400m/netdev.c 	struct sk_buff *skb;
skb               463 drivers/net/wimax/i2400m/netdev.c 		skb = skb_get(skb_rx);
skb               464 drivers/net/wimax/i2400m/netdev.c 		d_printf(2, dev, "RX: reusing first payload skb %p\n", skb);
skb               465 drivers/net/wimax/i2400m/netdev.c 		skb_pull(skb, buf - (void *) skb->data);
skb               466 drivers/net/wimax/i2400m/netdev.c 		skb_trim(skb, (void *) skb_end_pointer(skb) - buf);
skb               470 drivers/net/wimax/i2400m/netdev.c 		skb = __netdev_alloc_skb(net_dev, buf_len, GFP_KERNEL);
skb               471 drivers/net/wimax/i2400m/netdev.c 		if (skb == NULL) {
skb               476 drivers/net/wimax/i2400m/netdev.c 		skb_put_data(skb, buf, buf_len);
skb               479 drivers/net/wimax/i2400m/netdev.c 				  skb->data - ETH_HLEN,
skb               481 drivers/net/wimax/i2400m/netdev.c 	skb_set_mac_header(skb, -ETH_HLEN);
skb               482 drivers/net/wimax/i2400m/netdev.c 	skb->dev = i2400m->wimax_dev.net_dev;
skb               483 drivers/net/wimax/i2400m/netdev.c 	skb->protocol = htons(ETH_P_IP);
skb               489 drivers/net/wimax/i2400m/netdev.c 	netif_rx_ni(skb);	/* see notes in function header */
skb               518 drivers/net/wimax/i2400m/netdev.c void i2400m_net_erx(struct i2400m *i2400m, struct sk_buff *skb,
skb               525 drivers/net/wimax/i2400m/netdev.c 		  i2400m, skb, skb->len, cs);
skb               530 drivers/net/wimax/i2400m/netdev.c 					  skb->data - ETH_HLEN,
skb               532 drivers/net/wimax/i2400m/netdev.c 		skb_set_mac_header(skb, -ETH_HLEN);
skb               533 drivers/net/wimax/i2400m/netdev.c 		skb->dev = i2400m->wimax_dev.net_dev;
skb               534 drivers/net/wimax/i2400m/netdev.c 		skb->protocol = htons(ETH_P_IP);
skb               536 drivers/net/wimax/i2400m/netdev.c 		net_dev->stats.rx_bytes += skb->len;
skb               544 drivers/net/wimax/i2400m/netdev.c 		 skb->len);
skb               545 drivers/net/wimax/i2400m/netdev.c 	d_dump(4, dev, skb->data, skb->len);
skb               546 drivers/net/wimax/i2400m/netdev.c 	netif_rx_ni(skb);	/* see notes in function header */
skb               549 drivers/net/wimax/i2400m/netdev.c 		i2400m, skb, skb->len, cs);
skb               658 drivers/net/wimax/i2400m/rx.c 			struct sk_buff *skb, unsigned sn, unsigned nsn)
skb               666 drivers/net/wimax/i2400m/rx.c 		  i2400m, roq, skb, sn, nsn);
skb               668 drivers/net/wimax/i2400m/rx.c 	roq_data = (struct i2400m_roq_data *) &skb->cb;
skb               669 drivers/net/wimax/i2400m/rx.c 	BUILD_BUG_ON(sizeof(*roq_data) > sizeof(skb->cb));
skb               678 drivers/net/wimax/i2400m/rx.c 		__skb_queue_head(&roq->queue, skb);
skb               689 drivers/net/wimax/i2400m/rx.c 		__skb_queue_tail(&roq->queue, skb);
skb               706 drivers/net/wimax/i2400m/rx.c 			__skb_queue_before(&roq->queue, skb_itr, skb);
skb               714 drivers/net/wimax/i2400m/rx.c 		roq, roq->ws, skb, nsn, roq_data->sn);
skb               725 drivers/net/wimax/i2400m/rx.c 		i2400m, roq, skb, sn, nsn);
skb               822 drivers/net/wimax/i2400m/rx.c 		      struct sk_buff * skb, unsigned lbn)
skb               828 drivers/net/wimax/i2400m/rx.c 		  i2400m, roq, skb, lbn);
skb               837 drivers/net/wimax/i2400m/rx.c 		__i2400m_roq_queue(i2400m, roq, skb, lbn, nsn);
skb               842 drivers/net/wimax/i2400m/rx.c 		i2400m, roq, skb, lbn);
skb               885 drivers/net/wimax/i2400m/rx.c 				struct sk_buff * skb, unsigned sn)
skb               891 drivers/net/wimax/i2400m/rx.c 		  i2400m, roq, skb, sn);
skb               906 drivers/net/wimax/i2400m/rx.c 		roq_data = (struct i2400m_roq_data *) &skb->cb;
skb               907 drivers/net/wimax/i2400m/rx.c 		i2400m_net_erx(i2400m, skb, roq_data->cs);
skb               909 drivers/net/wimax/i2400m/rx.c 		__i2400m_roq_queue(i2400m, roq, skb, sn, nsn);
skb               916 drivers/net/wimax/i2400m/rx.c 		i2400m, roq, skb, sn);
skb               986 drivers/net/wimax/i2400m/rx.c 	struct sk_buff *skb;
skb              1005 drivers/net/wimax/i2400m/rx.c 		skb = skb_get(skb_rx);
skb              1006 drivers/net/wimax/i2400m/rx.c 		d_printf(3, dev, "ERX: skb %p reusing\n", skb);
skb              1008 drivers/net/wimax/i2400m/rx.c 		skb = skb_clone(skb_rx, GFP_KERNEL);
skb              1009 drivers/net/wimax/i2400m/rx.c 		if (skb == NULL) {
skb              1014 drivers/net/wimax/i2400m/rx.c 		d_printf(3, dev, "ERX: skb %p cloned from %p\n", skb, skb_rx);
skb              1020 drivers/net/wimax/i2400m/rx.c 	skb_pull(skb, payload + sizeof(*hdr) - (void *) skb->data);
skb              1021 drivers/net/wimax/i2400m/rx.c 	skb_trim(skb, (void *) skb_end_pointer(skb) - payload - sizeof(*hdr));
skb              1033 drivers/net/wimax/i2400m/rx.c 			kfree_skb(skb);	/* rx_roq is already destroyed */
skb              1041 drivers/net/wimax/i2400m/rx.c 		roq_data = (struct i2400m_roq_data *) &skb->cb;
skb              1052 drivers/net/wimax/i2400m/rx.c 			kfree_skb(skb);	/* no data here */
skb              1055 drivers/net/wimax/i2400m/rx.c 			i2400m_roq_queue(i2400m, roq, skb, ro_sn);
skb              1059 drivers/net/wimax/i2400m/rx.c 			kfree_skb(skb);	/* no data here */
skb              1062 drivers/net/wimax/i2400m/rx.c 			i2400m_roq_queue_update_ws(i2400m, roq, skb, ro_sn);
skb              1073 drivers/net/wimax/i2400m/rx.c 		i2400m_net_erx(i2400m, skb, cs);
skb              1244 drivers/net/wimax/i2400m/rx.c int i2400m_rx(struct i2400m *i2400m, struct sk_buff *skb)
skb              1253 drivers/net/wimax/i2400m/rx.c 	skb_len = skb->len;
skb              1255 drivers/net/wimax/i2400m/rx.c 		  i2400m, skb, skb_len);
skb              1256 drivers/net/wimax/i2400m/rx.c 	msg_hdr = (void *) skb->data;
skb              1280 drivers/net/wimax/i2400m/rx.c 		i2400m_rx_payload(i2400m, skb, single_last, &msg_hdr->pld[i],
skb              1281 drivers/net/wimax/i2400m/rx.c 				  skb->data + pl_itr);
skb              1285 drivers/net/wimax/i2400m/rx.c 	kfree_skb(skb);
skb              1304 drivers/net/wimax/i2400m/rx.c 		i2400m, skb, skb_len, result);
skb               315 drivers/net/wireless/admtek/adm8211.c 		struct sk_buff *skb;
skb               322 drivers/net/wireless/admtek/adm8211.c 		skb = info->skb;
skb               323 drivers/net/wireless/admtek/adm8211.c 		txi = IEEE80211_SKB_CB(skb);
skb               328 drivers/net/wireless/admtek/adm8211.c 				 info->skb->len, PCI_DMA_TODEVICE);
skb               332 drivers/net/wireless/admtek/adm8211.c 		skb_pull(skb, sizeof(struct adm8211_tx_hdr));
skb               333 drivers/net/wireless/admtek/adm8211.c 		memcpy(skb_push(skb, info->hdrlen), skb->cb, info->hdrlen);
skb               338 drivers/net/wireless/admtek/adm8211.c 		ieee80211_tx_status_irqsafe(dev, skb);
skb               340 drivers/net/wireless/admtek/adm8211.c 		info->skb = NULL;
skb               357 drivers/net/wireless/admtek/adm8211.c 	struct sk_buff *skb, *newskb;
skb               379 drivers/net/wireless/admtek/adm8211.c 			skb = NULL; /* old buffer will be reused */
skb               383 drivers/net/wireless/admtek/adm8211.c 			skb = dev_alloc_skb(pktlen);
skb               384 drivers/net/wireless/admtek/adm8211.c 			if (skb) {
skb               389 drivers/net/wireless/admtek/adm8211.c 				skb_put_data(skb,
skb               390 drivers/net/wireless/admtek/adm8211.c 					     skb_tail_pointer(priv->rx_buffers[entry].skb),
skb               400 drivers/net/wireless/admtek/adm8211.c 				skb = priv->rx_buffers[entry].skb;
skb               401 drivers/net/wireless/admtek/adm8211.c 				skb_put(skb, pktlen);
skb               406 drivers/net/wireless/admtek/adm8211.c 				priv->rx_buffers[entry].skb = newskb;
skb               414 drivers/net/wireless/admtek/adm8211.c 					priv->rx_buffers[entry].skb = NULL;
skb               416 drivers/net/wireless/admtek/adm8211.c 					skb = NULL;
skb               420 drivers/net/wireless/admtek/adm8211.c 				skb = NULL;
skb               435 drivers/net/wireless/admtek/adm8211.c 		if (skb) {
skb               448 drivers/net/wireless/admtek/adm8211.c 			memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
skb               449 drivers/net/wireless/admtek/adm8211.c 			ieee80211_rx_irqsafe(dev, skb);
skb              1439 drivers/net/wireless/admtek/adm8211.c 		priv->rx_buffers[i].skb = NULL;
skb              1449 drivers/net/wireless/admtek/adm8211.c 		rx_info->skb = dev_alloc_skb(RX_PKT_SIZE);
skb              1450 drivers/net/wireless/admtek/adm8211.c 		if (rx_info->skb == NULL)
skb              1453 drivers/net/wireless/admtek/adm8211.c 						  skb_tail_pointer(rx_info->skb),
skb              1457 drivers/net/wireless/admtek/adm8211.c 			dev_kfree_skb(rx_info->skb);
skb              1458 drivers/net/wireless/admtek/adm8211.c 			rx_info->skb = NULL;
skb              1471 drivers/net/wireless/admtek/adm8211.c 		tx_info->skb = NULL;
skb              1490 drivers/net/wireless/admtek/adm8211.c 		if (!priv->rx_buffers[i].skb)
skb              1498 drivers/net/wireless/admtek/adm8211.c 		dev_kfree_skb(priv->rx_buffers[i].skb);
skb              1502 drivers/net/wireless/admtek/adm8211.c 		if (!priv->tx_buffers[i].skb)
skb              1507 drivers/net/wireless/admtek/adm8211.c 				 priv->tx_buffers[i].skb->len,
skb              1510 drivers/net/wireless/admtek/adm8211.c 		dev_kfree_skb(priv->tx_buffers[i].skb);
skb              1625 drivers/net/wireless/admtek/adm8211.c static int adm8211_tx_raw(struct ieee80211_hw *dev, struct sk_buff *skb,
skb              1635 drivers/net/wireless/admtek/adm8211.c 	mapping = pci_map_single(priv->pdev, skb->data, skb->len,
skb              1652 drivers/net/wireless/admtek/adm8211.c 	priv->tx_buffers[entry].skb = skb;
skb              1659 drivers/net/wireless/admtek/adm8211.c 	priv->tx_ring[entry].length = cpu_to_le32(flag | skb->len);
skb              1678 drivers/net/wireless/admtek/adm8211.c 		       struct sk_buff *skb)
skb              1684 drivers/net/wireless/admtek/adm8211.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb              1692 drivers/net/wireless/admtek/adm8211.c 	hdr = (struct ieee80211_hdr *)skb->data;
skb              1694 drivers/net/wireless/admtek/adm8211.c 	memcpy(skb->cb, skb->data, hdrlen);
skb              1695 drivers/net/wireless/admtek/adm8211.c 	hdr = (struct ieee80211_hdr *)skb->cb;
skb              1696 drivers/net/wireless/admtek/adm8211.c 	skb_pull(skb, hdrlen);
skb              1697 drivers/net/wireless/admtek/adm8211.c 	payload_len = skb->len;
skb              1699 drivers/net/wireless/admtek/adm8211.c 	txhdr = skb_push(skb, sizeof(*txhdr));
skb              1726 drivers/net/wireless/admtek/adm8211.c 	if (adm8211_tx_raw(dev, skb, plcp_signal, hdrlen)) {
skb              1728 drivers/net/wireless/admtek/adm8211.c 		ieee80211_free_txskb(dev, skb);
skb               440 drivers/net/wireless/admtek/adm8211.h 	struct sk_buff *skb;
skb               445 drivers/net/wireless/admtek/adm8211.h 	struct sk_buff *skb;
skb               550 drivers/net/wireless/ath/ar5523/ar5523.c 	chunk = (struct ar5523_chunk *) data->skb->data;
skb               562 drivers/net/wireless/ath/ar5523/ar5523.c 		(data->skb->data + usblen - sizeof(struct ar5523_rx_desc));
skb               582 drivers/net/wireless/ath/ar5523/ar5523.c 	skb_reserve(data->skb, sizeof(*chunk));
skb               583 drivers/net/wireless/ath/ar5523/ar5523.c 	skb_put(data->skb, rxlen - sizeof(struct ar5523_rx_desc));
skb               585 drivers/net/wireless/ath/ar5523/ar5523.c 	hdrlen = ieee80211_get_hdrlen_from_skb(data->skb);
skb               589 drivers/net/wireless/ath/ar5523/ar5523.c 		memmove(data->skb->data + pad, data->skb->data, hdrlen);
skb               590 drivers/net/wireless/ath/ar5523/ar5523.c 		skb_pull(data->skb, pad);
skb               591 drivers/net/wireless/ath/ar5523/ar5523.c 		skb_put(data->skb, pad);
skb               594 drivers/net/wireless/ath/ar5523/ar5523.c 	rx_status = IEEE80211_SKB_RXCB(data->skb);
skb               600 drivers/net/wireless/ath/ar5523/ar5523.c 	ieee80211_rx_irqsafe(hw, data->skb);
skb               601 drivers/net/wireless/ath/ar5523/ar5523.c 	data->skb = NULL;
skb               604 drivers/net/wireless/ath/ar5523/ar5523.c 	if (data->skb) {
skb               605 drivers/net/wireless/ath/ar5523/ar5523.c 		dev_kfree_skb_irq(data->skb);
skb               606 drivers/net/wireless/ath/ar5523/ar5523.c 		data->skb = NULL;
skb               636 drivers/net/wireless/ath/ar5523/ar5523.c 		data->skb = alloc_skb(ar->rxbufsz, GFP_KERNEL);
skb               637 drivers/net/wireless/ath/ar5523/ar5523.c 		if (!data->skb) {
skb               643 drivers/net/wireless/ath/ar5523/ar5523.c 				  ar5523_data_rx_pipe(ar->dev), data->skb->data,
skb               653 drivers/net/wireless/ath/ar5523/ar5523.c 			kfree_skb(data->skb);
skb               738 drivers/net/wireless/ath/ar5523/ar5523.c 	struct sk_buff *skb = urb->context;
skb               739 drivers/net/wireless/ath/ar5523/ar5523.c 	struct ieee80211_tx_info *txi = IEEE80211_SKB_CB(skb);
skb               754 drivers/net/wireless/ath/ar5523/ar5523.c 		ieee80211_free_txskb(ar->hw, skb);
skb               756 drivers/net/wireless/ath/ar5523/ar5523.c 		skb_pull(skb, sizeof(struct ar5523_tx_desc) + sizeof(__be32));
skb               757 drivers/net/wireless/ath/ar5523/ar5523.c 		ieee80211_tx_status_irqsafe(ar->hw, skb);
skb               764 drivers/net/wireless/ath/ar5523/ar5523.c 		       struct sk_buff *skb)
skb               766 drivers/net/wireless/ath/ar5523/ar5523.c 	struct ieee80211_tx_info *txi = IEEE80211_SKB_CB(skb);
skb               795 drivers/net/wireless/ath/ar5523/ar5523.c 	struct sk_buff *skb;
skb               821 drivers/net/wireless/ath/ar5523/ar5523.c 		skb = container_of((void *)txi, struct sk_buff, cb);
skb               822 drivers/net/wireless/ath/ar5523/ar5523.c 		paylen = skb->len;
skb               826 drivers/net/wireless/ath/ar5523/ar5523.c 			ieee80211_free_txskb(ar->hw, skb);
skb               833 drivers/net/wireless/ath/ar5523/ar5523.c 		desc = skb_push(skb, sizeof(*desc));
skb               834 drivers/net/wireless/ath/ar5523/ar5523.c 		chunk = skb_push(skb, sizeof(*chunk));
skb               838 drivers/net/wireless/ath/ar5523/ar5523.c 		chunk->length = cpu_to_be16(skb->len);
skb               840 drivers/net/wireless/ath/ar5523/ar5523.c 		desc->msglen = cpu_to_be32(skb->len);
skb               858 drivers/net/wireless/ath/ar5523/ar5523.c 				  skb->data, skb->len, ar5523_data_tx_cb, skb);
skb               878 drivers/net/wireless/ath/ar5523/ar5523.c 			ieee80211_free_txskb(ar->hw, skb);
skb                84 drivers/net/wireless/ath/ar5523/ar5523.h 	struct sk_buff		*skb;
skb               129 drivers/net/wireless/ath/ath10k/core.h static inline struct ath10k_skb_cb *ATH10K_SKB_CB(struct sk_buff *skb)
skb               133 drivers/net/wireless/ath/ath10k/core.h 	return (struct ath10k_skb_cb *)&IEEE80211_SKB_CB(skb)->driver_data;
skb               136 drivers/net/wireless/ath/ath10k/core.h static inline struct ath10k_skb_rxcb *ATH10K_SKB_RXCB(struct sk_buff *skb)
skb               138 drivers/net/wireless/ath/ath10k/core.h 	BUILD_BUG_ON(sizeof(struct ath10k_skb_rxcb) > sizeof(skb->cb));
skb               139 drivers/net/wireless/ath/ath10k/core.h 	return (struct ath10k_skb_rxcb *)skb->cb;
skb               254 drivers/net/wireless/ath/ath10k/debug.c void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb)
skb               268 drivers/net/wireless/ath/ath10k/debug.c 	ret = ath10k_wmi_pull_fw_stats(ar, skb, &stats);
skb               504 drivers/net/wireless/ath/ath10k/debug.c 	struct sk_buff *skb;
skb               506 drivers/net/wireless/ath/ath10k/debug.c 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd) + 16);
skb               507 drivers/net/wireless/ath/ath10k/debug.c 	if (!skb)
skb               510 drivers/net/wireless/ath/ath10k/debug.c 	cmd = (struct wmi_vdev_install_key_cmd *)skb->data;
skb               516 drivers/net/wireless/ath/ath10k/debug.c 	return ath10k_wmi_cmd_send(ar, skb,
skb                95 drivers/net/wireless/ath/ath10k/debug.h void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb);
skb               158 drivers/net/wireless/ath/ath10k/debug.h 						 struct sk_buff *skb)
skb                16 drivers/net/wireless/ath/ath10k/htc.c 					   struct sk_buff *skb)
skb                18 drivers/net/wireless/ath/ath10k/htc.c 	kfree_skb(skb);
skb                23 drivers/net/wireless/ath/ath10k/htc.c 	struct sk_buff *skb;
skb                26 drivers/net/wireless/ath/ath10k/htc.c 	skb = dev_alloc_skb(ATH10K_HTC_CONTROL_BUFFER_SIZE);
skb                27 drivers/net/wireless/ath/ath10k/htc.c 	if (!skb)
skb                30 drivers/net/wireless/ath/ath10k/htc.c 	skb_reserve(skb, 20); /* FIXME: why 20 bytes? */
skb                31 drivers/net/wireless/ath/ath10k/htc.c 	WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
skb                33 drivers/net/wireless/ath/ath10k/htc.c 	skb_cb = ATH10K_SKB_CB(skb);
skb                36 drivers/net/wireless/ath/ath10k/htc.c 	ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: skb %pK\n", __func__, skb);
skb                37 drivers/net/wireless/ath/ath10k/htc.c 	return skb;
skb                41 drivers/net/wireless/ath/ath10k/htc.c 					     struct sk_buff *skb)
skb                43 drivers/net/wireless/ath/ath10k/htc.c 	struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
skb                46 drivers/net/wireless/ath/ath10k/htc.c 		dma_unmap_single(htc->ar->dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
skb                47 drivers/net/wireless/ath/ath10k/htc.c 	skb_pull(skb, sizeof(struct ath10k_htc_hdr));
skb                51 drivers/net/wireless/ath/ath10k/htc.c 				     struct sk_buff *skb)
skb                56 drivers/net/wireless/ath/ath10k/htc.c 		   ep->eid, skb);
skb                58 drivers/net/wireless/ath/ath10k/htc.c 	ath10k_htc_restore_tx_skb(ep->htc, skb);
skb                62 drivers/net/wireless/ath/ath10k/htc.c 		dev_kfree_skb_any(skb);
skb                66 drivers/net/wireless/ath/ath10k/htc.c 	ep->ep_ops.ep_tx_complete(ep->htc->ar, skb);
skb                71 drivers/net/wireless/ath/ath10k/htc.c 				      struct sk_buff *skb)
skb                75 drivers/net/wireless/ath/ath10k/htc.c 	hdr = (struct ath10k_htc_hdr *)skb->data;
skb                79 drivers/net/wireless/ath/ath10k/htc.c 	hdr->len = __cpu_to_le16(skb->len - sizeof(*hdr));
skb                91 drivers/net/wireless/ath/ath10k/htc.c 		    struct sk_buff *skb)
skb                95 drivers/net/wireless/ath/ath10k/htc.c 	struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
skb               109 drivers/net/wireless/ath/ath10k/htc.c 	skb_push(skb, sizeof(struct ath10k_htc_hdr));
skb               112 drivers/net/wireless/ath/ath10k/htc.c 		credits = DIV_ROUND_UP(skb->len, htc->target_credit_size);
skb               129 drivers/net/wireless/ath/ath10k/htc.c 	ath10k_htc_prepare_tx_skb(ep, skb);
skb               133 drivers/net/wireless/ath/ath10k/htc.c 		skb_cb->paddr = dma_map_single(dev, skb->data, skb->len,
skb               143 drivers/net/wireless/ath/ath10k/htc.c 	sg_item.transfer_context = skb;
skb               144 drivers/net/wireless/ath/ath10k/htc.c 	sg_item.vaddr = skb->data;
skb               146 drivers/net/wireless/ath/ath10k/htc.c 	sg_item.len = skb->len;
skb               156 drivers/net/wireless/ath/ath10k/htc.c 		dma_unmap_single(dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
skb               170 drivers/net/wireless/ath/ath10k/htc.c 	skb_pull(skb, sizeof(struct ath10k_htc_hdr));
skb               174 drivers/net/wireless/ath/ath10k/htc.c void ath10k_htc_tx_completion_handler(struct ath10k *ar, struct sk_buff *skb)
skb               180 drivers/net/wireless/ath/ath10k/htc.c 	if (WARN_ON_ONCE(!skb))
skb               183 drivers/net/wireless/ath/ath10k/htc.c 	skb_cb = ATH10K_SKB_CB(skb);
skb               186 drivers/net/wireless/ath/ath10k/htc.c 	ath10k_htc_notify_tx_completion(ep, skb);
skb               386 drivers/net/wireless/ath/ath10k/htc.c void ath10k_htc_rx_completion_handler(struct ath10k *ar, struct sk_buff *skb)
skb               398 drivers/net/wireless/ath/ath10k/htc.c 	hdr = (struct ath10k_htc_hdr *)skb->data;
skb               399 drivers/net/wireless/ath/ath10k/htc.c 	skb_pull(skb, sizeof(*hdr));
skb               422 drivers/net/wireless/ath/ath10k/htc.c 	if (skb->len < payload_len) {
skb               425 drivers/net/wireless/ath/ath10k/htc.c 			   skb->len, payload_len);
skb               456 drivers/net/wireless/ath/ath10k/htc.c 		skb_trim(skb, skb->len - trailer_len);
skb               464 drivers/net/wireless/ath/ath10k/htc.c 		   eid, skb);
skb               465 drivers/net/wireless/ath/ath10k/htc.c 	ep->ep_ops.ep_rx_complete(ar, skb);
skb               468 drivers/net/wireless/ath/ath10k/htc.c 	skb = NULL;
skb               470 drivers/net/wireless/ath/ath10k/htc.c 	kfree_skb(skb);
skb               475 drivers/net/wireless/ath/ath10k/htc.c 					   struct sk_buff *skb)
skb               478 drivers/net/wireless/ath/ath10k/htc.c 	struct ath10k_htc_msg *msg = (struct ath10k_htc_msg *)skb->data;
skb               494 drivers/net/wireless/ath/ath10k/htc.c 			min_t(int, skb->len,
skb               497 drivers/net/wireless/ath/ath10k/htc.c 		memcpy(htc->control_resp_buffer, skb->data,
skb               511 drivers/net/wireless/ath/ath10k/htc.c 	kfree_skb(skb);
skb               674 drivers/net/wireless/ath/ath10k/htc.c 	struct sk_buff *skb;
skb               698 drivers/net/wireless/ath/ath10k/htc.c 	skb = ath10k_htc_build_tx_ctrl_skb(htc->ar);
skb               699 drivers/net/wireless/ath/ath10k/htc.c 	if (!skb) {
skb               705 drivers/net/wireless/ath/ath10k/htc.c 	skb_put(skb, length);
skb               706 drivers/net/wireless/ath/ath10k/htc.c 	memset(skb->data, 0, length);
skb               708 drivers/net/wireless/ath/ath10k/htc.c 	msg = (struct ath10k_htc_msg *)skb->data;
skb               726 drivers/net/wireless/ath/ath10k/htc.c 	status = ath10k_htc_send(htc, ATH10K_HTC_EP_0, skb);
skb               728 drivers/net/wireless/ath/ath10k/htc.c 		kfree_skb(skb);
skb               825 drivers/net/wireless/ath/ath10k/htc.c 	struct sk_buff *skb;
skb               827 drivers/net/wireless/ath/ath10k/htc.c 	skb = dev_alloc_skb(size + sizeof(struct ath10k_htc_hdr));
skb               828 drivers/net/wireless/ath/ath10k/htc.c 	if (!skb)
skb               831 drivers/net/wireless/ath/ath10k/htc.c 	skb_reserve(skb, sizeof(struct ath10k_htc_hdr));
skb               834 drivers/net/wireless/ath/ath10k/htc.c 	if (!IS_ALIGNED((unsigned long)skb->data, 4))
skb               837 drivers/net/wireless/ath/ath10k/htc.c 	return skb;
skb               840 drivers/net/wireless/ath/ath10k/htc.c static void ath10k_htc_pktlog_process_rx(struct ath10k *ar, struct sk_buff *skb)
skb               842 drivers/net/wireless/ath/ath10k/htc.c 	trace_ath10k_htt_pktlog(ar, skb->data, skb->len);
skb               843 drivers/net/wireless/ath/ath10k/htc.c 	dev_kfree_skb_any(skb);
skb               893 drivers/net/wireless/ath/ath10k/htc.c 	struct sk_buff *skb;
skb               897 drivers/net/wireless/ath/ath10k/htc.c 	skb = ath10k_htc_build_tx_ctrl_skb(htc->ar);
skb               898 drivers/net/wireless/ath/ath10k/htc.c 	if (!skb)
skb               901 drivers/net/wireless/ath/ath10k/htc.c 	skb_put(skb, sizeof(msg->hdr) + sizeof(msg->setup_complete_ext));
skb               902 drivers/net/wireless/ath/ath10k/htc.c 	memset(skb->data, 0, skb->len);
skb               904 drivers/net/wireless/ath/ath10k/htc.c 	msg = (struct ath10k_htc_msg *)skb->data;
skb               917 drivers/net/wireless/ath/ath10k/htc.c 	status = ath10k_htc_send(htc, ATH10K_HTC_EP_0, skb);
skb               919 drivers/net/wireless/ath/ath10k/htc.c 		kfree_skb(skb);
skb               373 drivers/net/wireless/ath/ath10k/htc.h void ath10k_htc_tx_completion_handler(struct ath10k *ar, struct sk_buff *skb);
skb               374 drivers/net/wireless/ath/ath10k/htc.h void ath10k_htc_rx_completion_handler(struct ath10k *ar, struct sk_buff *skb);
skb               376 drivers/net/wireless/ath/ath10k/htc.h 				     struct sk_buff *skb);
skb              2111 drivers/net/wireless/ath/ath10k/htt.h 					struct sk_buff *skb);
skb              2153 drivers/net/wireless/ath/ath10k/htt.h 						  struct sk_buff *skb)
skb              2158 drivers/net/wireless/ath/ath10k/htt.h 	return htt->rx_ops->htt_rx_proc_rx_frag_ind(htt, rx, skb);
skb              2256 drivers/net/wireless/ath/ath10k/htt.h void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb);
skb              2257 drivers/net/wireless/ath/ath10k/htt.h void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb);
skb              2258 drivers/net/wireless/ath/ath10k/htt.h bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb);
skb              2262 drivers/net/wireless/ath/ath10k/htt.h void ath10k_htt_hif_tx_complete(struct ath10k *ar, struct sk_buff *skb);
skb              2280 drivers/net/wireless/ath/ath10k/htt.h int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb);
skb              2284 drivers/net/wireless/ath/ath10k/htt.h 					     struct sk_buff *skb);
skb                24 drivers/net/wireless/ath/ath10k/htt_rx.c static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
skb                41 drivers/net/wireless/ath/ath10k/htt_rx.c 	struct sk_buff *skb;
skb                48 drivers/net/wireless/ath/ath10k/htt_rx.c 			skb = ATH10K_RXCB_SKB(rxcb);
skb                50 drivers/net/wireless/ath/ath10k/htt_rx.c 					 skb->len + skb_tailroom(skb),
skb                53 drivers/net/wireless/ath/ath10k/htt_rx.c 			dev_kfree_skb_any(skb);
skb                57 drivers/net/wireless/ath/ath10k/htt_rx.c 			skb = htt->rx_ring.netbufs_ring[i];
skb                58 drivers/net/wireless/ath/ath10k/htt_rx.c 			if (!skb)
skb                61 drivers/net/wireless/ath/ath10k/htt_rx.c 			rxcb = ATH10K_SKB_RXCB(skb);
skb                63 drivers/net/wireless/ath/ath10k/htt_rx.c 					 skb->len + skb_tailroom(skb),
skb                65 drivers/net/wireless/ath/ath10k/htt_rx.c 			dev_kfree_skb_any(skb);
skb               133 drivers/net/wireless/ath/ath10k/htt_rx.c 	struct sk_buff *skb;
skb               146 drivers/net/wireless/ath/ath10k/htt_rx.c 		skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN);
skb               147 drivers/net/wireless/ath/ath10k/htt_rx.c 		if (!skb) {
skb               152 drivers/net/wireless/ath/ath10k/htt_rx.c 		if (!IS_ALIGNED((unsigned long)skb->data, HTT_RX_DESC_ALIGN))
skb               153 drivers/net/wireless/ath/ath10k/htt_rx.c 			skb_pull(skb,
skb               154 drivers/net/wireless/ath/ath10k/htt_rx.c 				 PTR_ALIGN(skb->data, HTT_RX_DESC_ALIGN) -
skb               155 drivers/net/wireless/ath/ath10k/htt_rx.c 				 skb->data);
skb               158 drivers/net/wireless/ath/ath10k/htt_rx.c 		rx_desc = (struct htt_rx_desc *)skb->data;
skb               161 drivers/net/wireless/ath/ath10k/htt_rx.c 		paddr = dma_map_single(htt->ar->dev, skb->data,
skb               162 drivers/net/wireless/ath/ath10k/htt_rx.c 				       skb->len + skb_tailroom(skb),
skb               166 drivers/net/wireless/ath/ath10k/htt_rx.c 			dev_kfree_skb_any(skb);
skb               171 drivers/net/wireless/ath/ath10k/htt_rx.c 		rxcb = ATH10K_SKB_RXCB(skb);
skb               173 drivers/net/wireless/ath/ath10k/htt_rx.c 		htt->rx_ring.netbufs_ring[idx] = skb;
skb               179 drivers/net/wireless/ath/ath10k/htt_rx.c 				 &ATH10K_SKB_RXCB(skb)->hlist,
skb              1270 drivers/net/wireless/ath/ath10k/htt_rx.c 				       struct sk_buff *skb)
skb              1274 drivers/net/wireless/ath/ath10k/htt_rx.c 	status = IEEE80211_SKB_RXCB(skb);
skb              1277 drivers/net/wireless/ath/ath10k/htt_rx.c 	skb_queue_tail(&ar->htt.rx_msdus_q, skb);
skb              1280 drivers/net/wireless/ath/ath10k/htt_rx.c static void ath10k_process_rx(struct ath10k *ar, struct sk_buff *skb)
skb              1283 drivers/net/wireless/ath/ath10k/htt_rx.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
skb              1286 drivers/net/wireless/ath/ath10k/htt_rx.c 	status = IEEE80211_SKB_RXCB(skb);
skb              1290 drivers/net/wireless/ath/ath10k/htt_rx.c 		   skb,
skb              1291 drivers/net/wireless/ath/ath10k/htt_rx.c 		   skb->len,
skb              1312 drivers/net/wireless/ath/ath10k/htt_rx.c 			skb->data, skb->len);
skb              1313 drivers/net/wireless/ath/ath10k/htt_rx.c 	trace_ath10k_rx_hdr(ar, skb->data, skb->len);
skb              1314 drivers/net/wireless/ath/ath10k/htt_rx.c 	trace_ath10k_rx_payload(ar, skb->data, skb->len);
skb              1316 drivers/net/wireless/ath/ath10k/htt_rx.c 	ieee80211_rx_napi(ar->hw, NULL, skb, &ar->napi);
skb              1692 drivers/net/wireless/ath/ath10k/htt_rx.c static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
skb              1700 drivers/net/wireless/ath/ath10k/htt_rx.c 	rxd = (void *)skb->data - sizeof(*rxd);
skb              1899 drivers/net/wireless/ath/ath10k/htt_rx.c 	struct sk_buff *skb, *first;
skb              1914 drivers/net/wireless/ath/ath10k/htt_rx.c 	skb_queue_walk(amsdu, skb)
skb              1915 drivers/net/wireless/ath/ath10k/htt_rx.c 		total_len += skb->len;
skb              1931 drivers/net/wireless/ath/ath10k/htt_rx.c 	while ((skb = __skb_dequeue(amsdu))) {
skb              1932 drivers/net/wireless/ath/ath10k/htt_rx.c 		skb_copy_from_linear_data(skb, skb_put(first, skb->len),
skb              1933 drivers/net/wireless/ath/ath10k/htt_rx.c 					  skb->len);
skb              1934 drivers/net/wireless/ath/ath10k/htt_rx.c 		dev_kfree_skb_any(skb);
skb              2146 drivers/net/wireless/ath/ath10k/htt_rx.c 					 struct sk_buff *skb,
skb              2234 drivers/net/wireless/ath/ath10k/htt_rx.c 	skb_pull(skb, tot_hdr_len);
skb              2236 drivers/net/wireless/ath/ath10k/htt_rx.c 	hdr = (struct ieee80211_hdr *)skb->data;
skb              2238 drivers/net/wireless/ath/ath10k/htt_rx.c 	rx_status = IEEE80211_SKB_RXCB(skb);
skb              2292 drivers/net/wireless/ath/ath10k/htt_rx.c 			hdr = (struct ieee80211_hdr *)skb->data;
skb              2297 drivers/net/wireless/ath/ath10k/htt_rx.c 			memmove(skb->data - IEEE80211_CCMP_HDR_LEN,
skb              2298 drivers/net/wireless/ath/ath10k/htt_rx.c 				skb->data, offset);
skb              2299 drivers/net/wireless/ath/ath10k/htt_rx.c 			skb_push(skb, IEEE80211_CCMP_HDR_LEN);
skb              2300 drivers/net/wireless/ath/ath10k/htt_rx.c 			ivp = skb->data + offset;
skb              2301 drivers/net/wireless/ath/ath10k/htt_rx.c 			memset(skb->data + offset, 0, IEEE80211_CCMP_HDR_LEN);
skb              2317 drivers/net/wireless/ath/ath10k/htt_rx.c 				memcpy(skb->data + offset, &pn48, 2);
skb              2319 drivers/net/wireless/ath/ath10k/htt_rx.c 				memcpy(skb->data + offset + 4, ((u8 *)&pn48) + 2, 4);
skb              2323 drivers/net/wireless/ath/ath10k/htt_rx.c 				memcpy(skb->data + offset + 2, &pn48, 1);
skb              2325 drivers/net/wireless/ath/ath10k/htt_rx.c 				memcpy(skb->data + offset, ((u8 *)&pn48) + 1, 1);
skb              2327 drivers/net/wireless/ath/ath10k/htt_rx.c 				memcpy(skb->data + offset + 4, ((u8 *)&pn48) + 2, 4);
skb              2343 drivers/net/wireless/ath/ath10k/htt_rx.c 		hdr = (struct ieee80211_hdr *)skb->data;
skb              2347 drivers/net/wireless/ath/ath10k/htt_rx.c 		memmove(skb->data - IEEE80211_QOS_CTL_LEN, skb->data, offset);
skb              2348 drivers/net/wireless/ath/ath10k/htt_rx.c 		skb_push(skb, IEEE80211_QOS_CTL_LEN);
skb              2350 drivers/net/wireless/ath/ath10k/htt_rx.c 		memcpy(skb->data + offset, &qos_ctrl, IEEE80211_QOS_CTL_LEN);
skb              2353 drivers/net/wireless/ath/ath10k/htt_rx.c 	ieee80211_rx_ni(ar->hw, skb);
skb              2366 drivers/net/wireless/ath/ath10k/htt_rx.c static int ath10k_htt_rx_frag_tkip_decap_nomic(struct sk_buff *skb,
skb              2372 drivers/net/wireless/ath/ath10k/htt_rx.c 	orig_hdr = skb->data;
skb              2380 drivers/net/wireless/ath/ath10k/htt_rx.c 	skb_pull(skb, IEEE80211_TKIP_IV_LEN);
skb              2381 drivers/net/wireless/ath/ath10k/htt_rx.c 	skb_trim(skb, skb->len - ATH10K_IEEE80211_TKIP_MICLEN);
skb              2385 drivers/net/wireless/ath/ath10k/htt_rx.c static int ath10k_htt_rx_frag_tkip_decap_withmic(struct sk_buff *skb,
skb              2391 drivers/net/wireless/ath/ath10k/htt_rx.c 	orig_hdr = skb->data;
skb              2399 drivers/net/wireless/ath/ath10k/htt_rx.c 	skb_pull(skb, IEEE80211_TKIP_IV_LEN);
skb              2400 drivers/net/wireless/ath/ath10k/htt_rx.c 	skb_trim(skb, skb->len - IEEE80211_TKIP_ICV_LEN);
skb              2404 drivers/net/wireless/ath/ath10k/htt_rx.c static int ath10k_htt_rx_frag_ccmp_decap(struct sk_buff *skb,
skb              2410 drivers/net/wireless/ath/ath10k/htt_rx.c 	orig_hdr = skb->data;
skb              2417 drivers/net/wireless/ath/ath10k/htt_rx.c 	skb_trim(skb, skb->len - IEEE80211_CCMP_MIC_LEN);
skb              2419 drivers/net/wireless/ath/ath10k/htt_rx.c 	skb_pull(skb, IEEE80211_CCMP_HDR_LEN);
skb              2423 drivers/net/wireless/ath/ath10k/htt_rx.c static int ath10k_htt_rx_frag_wep_decap(struct sk_buff *skb,
skb              2429 drivers/net/wireless/ath/ath10k/htt_rx.c 	orig_hdr = skb->data;
skb              2433 drivers/net/wireless/ath/ath10k/htt_rx.c 	skb_pull(skb, IEEE80211_WEP_IV_LEN);
skb              2434 drivers/net/wireless/ath/ath10k/htt_rx.c 	skb_trim(skb, skb->len - IEEE80211_WEP_ICV_LEN);
skb              2440 drivers/net/wireless/ath/ath10k/htt_rx.c 					      struct sk_buff *skb)
skb              2458 drivers/net/wireless/ath/ath10k/htt_rx.c 	resp = (struct htt_resp *)(skb->data + HTT_RX_FRAG_IND_INFO0_HEADER_LEN);
skb              2459 drivers/net/wireless/ath/ath10k/htt_rx.c 	skb_pull(skb, HTT_RX_FRAG_IND_INFO0_HEADER_LEN);
skb              2460 drivers/net/wireless/ath/ath10k/htt_rx.c 	skb_trim(skb, skb->len - FCS_LEN);
skb              2483 drivers/net/wireless/ath/ath10k/htt_rx.c 	rx_desc = (struct htt_hl_rx_desc *)(skb->data + tot_hdr_len);
skb              2488 drivers/net/wireless/ath/ath10k/htt_rx.c 		return ath10k_htt_rx_proc_rx_ind_hl(htt, &resp->rx_ind_hl, skb,
skb              2511 drivers/net/wireless/ath/ath10k/htt_rx.c 		ret = ath10k_htt_rx_frag_tkip_decap_withmic(skb,
skb              2519 drivers/net/wireless/ath/ath10k/htt_rx.c 		ret = ath10k_htt_rx_frag_tkip_decap_nomic(skb,
skb              2527 drivers/net/wireless/ath/ath10k/htt_rx.c 		ret = ath10k_htt_rx_frag_ccmp_decap(skb,
skb              2536 drivers/net/wireless/ath/ath10k/htt_rx.c 		ret = ath10k_htt_rx_frag_wep_decap(skb,
skb              2546 drivers/net/wireless/ath/ath10k/htt_rx.c 	resp = (struct htt_resp *)(skb->data);
skb              2552 drivers/net/wireless/ath/ath10k/htt_rx.c 		return ath10k_htt_rx_proc_rx_ind_hl(htt, &resp->rx_ind_hl, skb,
skb              2579 drivers/net/wireless/ath/ath10k/htt_rx.c 	return ath10k_htt_rx_proc_rx_ind_hl(htt, &resp->rx_ind_hl, skb,
skb              2621 drivers/net/wireless/ath/ath10k/htt_rx.c 				       struct sk_buff *skb)
skb              2624 drivers/net/wireless/ath/ath10k/htt_rx.c 	struct htt_resp *resp = (struct htt_resp *)skb->data;
skb              2858 drivers/net/wireless/ath/ath10k/htt_rx.c 					    struct sk_buff *skb)
skb              2860 drivers/net/wireless/ath/ath10k/htt_rx.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
skb              2926 drivers/net/wireless/ath/ath10k/htt_rx.c static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
skb              2929 drivers/net/wireless/ath/ath10k/htt_rx.c 	struct htt_resp *resp = (void *)skb->data;
skb              2946 drivers/net/wireless/ath/ath10k/htt_rx.c 	skb_pull(skb, sizeof(resp->hdr));
skb              2947 drivers/net/wireless/ath/ath10k/htt_rx.c 	skb_pull(skb, sizeof(resp->rx_in_ord_ind));
skb              2961 drivers/net/wireless/ath/ath10k/htt_rx.c 	if (skb->len < msdu_count * sizeof(*resp->rx_in_ord_ind.msdu_descs32)) {
skb              3039 drivers/net/wireless/ath/ath10k/htt_rx.c static void ath10k_htt_rx_tx_fetch_ind(struct ath10k *ar, struct sk_buff *skb)
skb              3043 drivers/net/wireless/ath/ath10k/htt_rx.c 	struct htt_resp *resp = (struct htt_resp *)skb->data;
skb              3062 drivers/net/wireless/ath/ath10k/htt_rx.c 	if (unlikely(skb->len < len)) {
skb              3073 drivers/net/wireless/ath/ath10k/htt_rx.c 	if (unlikely(skb->len < len)) {
skb              3173 drivers/net/wireless/ath/ath10k/htt_rx.c 					   struct sk_buff *skb)
skb              3175 drivers/net/wireless/ath/ath10k/htt_rx.c 	const struct htt_resp *resp = (void *)skb->data;
skb              3182 drivers/net/wireless/ath/ath10k/htt_rx.c 	if (unlikely(skb->len < len)) {
skb              3190 drivers/net/wireless/ath/ath10k/htt_rx.c 	if (unlikely(skb->len < len)) {
skb              3201 drivers/net/wireless/ath/ath10k/htt_rx.c 					     struct sk_buff *skb)
skb              3203 drivers/net/wireless/ath/ath10k/htt_rx.c 	const struct htt_resp *resp = (void *)skb->data;
skb              3221 drivers/net/wireless/ath/ath10k/htt_rx.c 	if (unlikely(skb->len < len)) {
skb              3240 drivers/net/wireless/ath/ath10k/htt_rx.c 	if (unlikely(skb->len < len)) {
skb              3302 drivers/net/wireless/ath/ath10k/htt_rx.c void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
skb              3306 drivers/net/wireless/ath/ath10k/htt_rx.c 	release = ath10k_htt_t2h_msg_handler(ar, skb);
skb              3310 drivers/net/wireless/ath/ath10k/htt_rx.c 		dev_kfree_skb_any(skb);
skb              3570 drivers/net/wireless/ath/ath10k/htt_rx.c 					struct sk_buff *skb)
skb              3572 drivers/net/wireless/ath/ath10k/htt_rx.c 	struct htt_resp *resp = (struct htt_resp *)skb->data;
skb              3583 drivers/net/wireless/ath/ath10k/htt_rx.c 	if (skb->len < sizeof(struct htt_resp_hdr) + num_ppdu * ppdu_len) {
skb              3584 drivers/net/wireless/ath/ath10k/htt_rx.c 		ath10k_warn(ar, "Invalid peer stats buf length %d\n", skb->len);
skb              3726 drivers/net/wireless/ath/ath10k/htt_rx.c bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
skb              3729 drivers/net/wireless/ath/ath10k/htt_rx.c 	struct htt_resp *resp = (struct htt_resp *)skb->data;
skb              3733 drivers/net/wireless/ath/ath10k/htt_rx.c 	if (!IS_ALIGNED((unsigned long)skb->data, 4))
skb              3757 drivers/net/wireless/ath/ath10k/htt_rx.c 							    skb,
skb              3815 drivers/net/wireless/ath/ath10k/htt_rx.c 		ath10k_htt_rx_tx_compl_ind(htt->ar, skb);
skb              3832 drivers/net/wireless/ath/ath10k/htt_rx.c 				skb->data, skb->len);
skb              3837 drivers/net/wireless/ath/ath10k/htt_rx.c 						      skb);
skb              3843 drivers/net/wireless/ath/ath10k/htt_rx.c 		trace_ath10k_htt_stats(ar, skb->data, skb->len);
skb              3861 drivers/net/wireless/ath/ath10k/htt_rx.c 					skb->len -
skb              3877 drivers/net/wireless/ath/ath10k/htt_rx.c 		skb_queue_tail(&htt->rx_in_ord_compl_q, skb);
skb              3895 drivers/net/wireless/ath/ath10k/htt_rx.c 		struct sk_buff *tx_fetch_ind = skb_copy(skb, GFP_ATOMIC);
skb              3905 drivers/net/wireless/ath/ath10k/htt_rx.c 		ath10k_htt_rx_tx_fetch_confirm(ar, skb);
skb              3908 drivers/net/wireless/ath/ath10k/htt_rx.c 		ath10k_htt_rx_tx_mode_switch_ind(ar, skb);
skb              3911 drivers/net/wireless/ath/ath10k/htt_rx.c 		ath10k_htt_fetch_peer_stats(ar, skb);
skb              3918 drivers/net/wireless/ath/ath10k/htt_rx.c 				skb->data, skb->len);
skb              3926 drivers/net/wireless/ath/ath10k/htt_rx.c 					     struct sk_buff *skb)
skb              3928 drivers/net/wireless/ath/ath10k/htt_rx.c 	trace_ath10k_htt_pktlog(ar, skb->data, skb->len);
skb              3929 drivers/net/wireless/ath/ath10k/htt_rx.c 	dev_kfree_skb_any(skb);
skb              3935 drivers/net/wireless/ath/ath10k/htt_rx.c 	struct sk_buff *skb;
skb              3941 drivers/net/wireless/ath/ath10k/htt_rx.c 		skb = skb_dequeue(&ar->htt.rx_msdus_q);
skb              3942 drivers/net/wireless/ath/ath10k/htt_rx.c 		if (!skb)
skb              3944 drivers/net/wireless/ath/ath10k/htt_rx.c 		ath10k_process_rx(ar, skb);
skb              3956 drivers/net/wireless/ath/ath10k/htt_rx.c 	struct sk_buff *skb;
skb              3972 drivers/net/wireless/ath/ath10k/htt_rx.c 	while ((skb = skb_dequeue(&htt->rx_in_ord_compl_q))) {
skb              3974 drivers/net/wireless/ath/ath10k/htt_rx.c 		ret = ath10k_htt_rx_in_ord_ind(ar, skb);
skb              3977 drivers/net/wireless/ath/ath10k/htt_rx.c 		dev_kfree_skb_any(skb);
skb              4018 drivers/net/wireless/ath/ath10k/htt_rx.c 	while ((skb = __skb_dequeue(&tx_ind_q))) {
skb              4019 drivers/net/wireless/ath/ath10k/htt_rx.c 		ath10k_htt_rx_tx_fetch_ind(ar, skb);
skb              4020 drivers/net/wireless/ath/ath10k/htt_rx.c 		dev_kfree_skb_any(skb);
skb               195 drivers/net/wireless/ath/ath10k/htt_tx.c int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb)
skb               201 drivers/net/wireless/ath/ath10k/htt_tx.c 	ret = idr_alloc(&htt->pending_tx, skb, 0,
skb               504 drivers/net/wireless/ath/ath10k/htt_tx.c static int ath10k_htt_tx_clean_up_pending(int msdu_id, void *skb, void *ctx)
skb               544 drivers/net/wireless/ath/ath10k/htt_tx.c void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
skb               546 drivers/net/wireless/ath/ath10k/htt_tx.c 	dev_kfree_skb_any(skb);
skb               549 drivers/net/wireless/ath/ath10k/htt_tx.c void ath10k_htt_hif_tx_complete(struct ath10k *ar, struct sk_buff *skb)
skb               551 drivers/net/wireless/ath/ath10k/htt_tx.c 	dev_kfree_skb_any(skb);
skb               558 drivers/net/wireless/ath/ath10k/htt_tx.c 	struct sk_buff *skb;
skb               566 drivers/net/wireless/ath/ath10k/htt_tx.c 	skb = ath10k_htc_alloc_skb(ar, len);
skb               567 drivers/net/wireless/ath/ath10k/htt_tx.c 	if (!skb)
skb               570 drivers/net/wireless/ath/ath10k/htt_tx.c 	skb_put(skb, len);
skb               571 drivers/net/wireless/ath/ath10k/htt_tx.c 	cmd = (struct htt_cmd *)skb->data;
skb               574 drivers/net/wireless/ath/ath10k/htt_tx.c 	ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
skb               576 drivers/net/wireless/ath/ath10k/htt_tx.c 		dev_kfree_skb_any(skb);
skb               588 drivers/net/wireless/ath/ath10k/htt_tx.c 	struct sk_buff *skb;
skb               595 drivers/net/wireless/ath/ath10k/htt_tx.c 	skb = ath10k_htc_alloc_skb(ar, len);
skb               596 drivers/net/wireless/ath/ath10k/htt_tx.c 	if (!skb)
skb               599 drivers/net/wireless/ath/ath10k/htt_tx.c 	skb_put(skb, len);
skb               600 drivers/net/wireless/ath/ath10k/htt_tx.c 	cmd = (struct htt_cmd *)skb->data;
skb               616 drivers/net/wireless/ath/ath10k/htt_tx.c 	ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
skb               620 drivers/net/wireless/ath/ath10k/htt_tx.c 		dev_kfree_skb_any(skb);
skb               630 drivers/net/wireless/ath/ath10k/htt_tx.c 	struct sk_buff *skb;
skb               645 drivers/net/wireless/ath/ath10k/htt_tx.c 	skb = ath10k_htc_alloc_skb(ar, size);
skb               646 drivers/net/wireless/ath/ath10k/htt_tx.c 	if (!skb)
skb               649 drivers/net/wireless/ath/ath10k/htt_tx.c 	skb_put(skb, size);
skb               650 drivers/net/wireless/ath/ath10k/htt_tx.c 	cmd = (struct htt_cmd *)skb->data;
skb               678 drivers/net/wireless/ath/ath10k/htt_tx.c 	ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
skb               682 drivers/net/wireless/ath/ath10k/htt_tx.c 		dev_kfree_skb_any(skb);
skb               692 drivers/net/wireless/ath/ath10k/htt_tx.c 	struct sk_buff *skb;
skb               707 drivers/net/wireless/ath/ath10k/htt_tx.c 	skb = ath10k_htc_alloc_skb(ar, size);
skb               708 drivers/net/wireless/ath/ath10k/htt_tx.c 	if (!skb)
skb               711 drivers/net/wireless/ath/ath10k/htt_tx.c 	skb_put(skb, size);
skb               712 drivers/net/wireless/ath/ath10k/htt_tx.c 	cmd = (struct htt_cmd *)skb->data;
skb               740 drivers/net/wireless/ath/ath10k/htt_tx.c 	ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
skb               744 drivers/net/wireless/ath/ath10k/htt_tx.c 		dev_kfree_skb_any(skb);
skb               792 drivers/net/wireless/ath/ath10k/htt_tx.c 	struct sk_buff *skb;
skb               810 drivers/net/wireless/ath/ath10k/htt_tx.c 	skb = ath10k_htc_alloc_skb(ar, len);
skb               811 drivers/net/wireless/ath/ath10k/htt_tx.c 	if (!skb)
skb               814 drivers/net/wireless/ath/ath10k/htt_tx.c 	skb_put(skb, len);
skb               816 drivers/net/wireless/ath/ath10k/htt_tx.c 	cmd = (struct htt_cmd *)skb->data;
skb               852 drivers/net/wireless/ath/ath10k/htt_tx.c 	ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
skb               854 drivers/net/wireless/ath/ath10k/htt_tx.c 		dev_kfree_skb_any(skb);
skb               864 drivers/net/wireless/ath/ath10k/htt_tx.c 	struct sk_buff *skb;
skb               881 drivers/net/wireless/ath/ath10k/htt_tx.c 	skb = ath10k_htc_alloc_skb(ar, len);
skb               882 drivers/net/wireless/ath/ath10k/htt_tx.c 	if (!skb)
skb               885 drivers/net/wireless/ath/ath10k/htt_tx.c 	skb_put(skb, len);
skb               887 drivers/net/wireless/ath/ath10k/htt_tx.c 	cmd = (struct htt_cmd *)skb->data;
skb               921 drivers/net/wireless/ath/ath10k/htt_tx.c 	ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
skb               923 drivers/net/wireless/ath/ath10k/htt_tx.c 		dev_kfree_skb_any(skb);
skb               933 drivers/net/wireless/ath/ath10k/htt_tx.c 	struct sk_buff *skb;
skb               950 drivers/net/wireless/ath/ath10k/htt_tx.c 	skb = ath10k_htc_alloc_skb(ar, len);
skb               951 drivers/net/wireless/ath/ath10k/htt_tx.c 	if (!skb)
skb               954 drivers/net/wireless/ath/ath10k/htt_tx.c 	skb_put(skb, len);
skb               956 drivers/net/wireless/ath/ath10k/htt_tx.c 	cmd = (struct htt_cmd *)skb->data;
skb               972 drivers/net/wireless/ath/ath10k/htt_tx.c 	ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
skb               974 drivers/net/wireless/ath/ath10k/htt_tx.c 		dev_kfree_skb_any(skb);
skb               987 drivers/net/wireless/ath/ath10k/htt_tx.c 	struct sk_buff *skb;
skb              1003 drivers/net/wireless/ath/ath10k/htt_tx.c 	skb = ath10k_htc_alloc_skb(ar, len);
skb              1004 drivers/net/wireless/ath/ath10k/htt_tx.c 	if (!skb)
skb              1007 drivers/net/wireless/ath/ath10k/htt_tx.c 	skb_put(skb, len);
skb              1008 drivers/net/wireless/ath/ath10k/htt_tx.c 	cmd = (struct htt_cmd *)skb->data;
skb              1019 drivers/net/wireless/ath/ath10k/htt_tx.c 	ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
skb              1021 drivers/net/wireless/ath/ath10k/htt_tx.c 		dev_kfree_skb_any(skb);
skb              1034 drivers/net/wireless/ath/ath10k/htt_tx.c 	struct sk_buff *skb;
skb              1050 drivers/net/wireless/ath/ath10k/htt_tx.c 	skb = ath10k_htc_alloc_skb(ar, len);
skb              1051 drivers/net/wireless/ath/ath10k/htt_tx.c 	if (!skb)
skb              1054 drivers/net/wireless/ath/ath10k/htt_tx.c 	skb_put(skb, len);
skb              1055 drivers/net/wireless/ath/ath10k/htt_tx.c 	cmd = (struct htt_cmd *)skb->data;
skb              1066 drivers/net/wireless/ath/ath10k/htt_tx.c 	ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
skb              1068 drivers/net/wireless/ath/ath10k/htt_tx.c 		dev_kfree_skb_any(skb);
skb              1081 drivers/net/wireless/ath/ath10k/htt_tx.c 	struct sk_buff *skb;
skb              1095 drivers/net/wireless/ath/ath10k/htt_tx.c 	skb = ath10k_htc_alloc_skb(ar, len);
skb              1096 drivers/net/wireless/ath/ath10k/htt_tx.c 	if (!skb)
skb              1099 drivers/net/wireless/ath/ath10k/htt_tx.c 	skb_put(skb, len);
skb              1100 drivers/net/wireless/ath/ath10k/htt_tx.c 	cmd = (struct htt_cmd *)skb->data;
skb              1110 drivers/net/wireless/ath/ath10k/htt_tx.c 	ret = ath10k_htc_send(&ar->htc, ar->htt.eid, skb);
skb              1119 drivers/net/wireless/ath/ath10k/htt_tx.c 	dev_kfree_skb_any(skb);
skb              1124 drivers/net/wireless/ath/ath10k/htt_tx.c static u8 ath10k_htt_tx_get_vdev_id(struct ath10k *ar, struct sk_buff *skb)
skb              1126 drivers/net/wireless/ath/ath10k/htt_tx.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb              1127 drivers/net/wireless/ath/ath10k/htt_tx.c 	struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
skb              1142 drivers/net/wireless/ath/ath10k/htt_tx.c static u8 ath10k_htt_tx_get_tid(struct sk_buff *skb, bool is_eth)
skb              1144 drivers/net/wireless/ath/ath10k/htt_tx.c 	struct ieee80211_hdr *hdr = (void *)skb->data;
skb              1145 drivers/net/wireless/ath/ath10k/htt_tx.c 	struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
skb              1150 drivers/net/wireless/ath/ath10k/htt_tx.c 		return skb->priority & IEEE80211_QOS_CTL_TID_MASK;
skb              1575 drivers/net/wireless/ath/ath10k/mac.c static int ath10k_mac_remove_vendor_ie(struct sk_buff *skb, unsigned int oui,
skb              1583 drivers/net/wireless/ath/ath10k/mac.c 	if (WARN_ON(skb->len < ie_offset))
skb              1587 drivers/net/wireless/ath/ath10k/mac.c 					   skb->data + ie_offset,
skb              1588 drivers/net/wireless/ath/ath10k/mac.c 					   skb->len - ie_offset);
skb              1593 drivers/net/wireless/ath/ath10k/mac.c 	end = skb->data + skb->len;
skb              1600 drivers/net/wireless/ath/ath10k/mac.c 	skb_trim(skb, skb->len - len);
skb              2056 drivers/net/wireless/ath/ath10k/mac.c 	struct sk_buff *skb = data;
skb              2057 drivers/net/wireless/ath/ath10k/mac.c 	struct ieee80211_mgmt *mgmt = (void *)skb->data;
skb              2069 drivers/net/wireless/ath/ath10k/mac.c void ath10k_mac_handle_beacon(struct ath10k *ar, struct sk_buff *skb)
skb              2074 drivers/net/wireless/ath/ath10k/mac.c 						   skb);
skb              3426 drivers/net/wireless/ath/ath10k/mac.c 			   struct sk_buff *skb)
skb              3428 drivers/net/wireless/ath/ath10k/mac.c 	const struct ieee80211_hdr *hdr = (void *)skb->data;
skb              3429 drivers/net/wireless/ath/ath10k/mac.c 	const struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
skb              3479 drivers/net/wireless/ath/ath10k/mac.c 				     struct sk_buff *skb)
skb              3481 drivers/net/wireless/ath/ath10k/mac.c 	const struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb              3482 drivers/net/wireless/ath/ath10k/mac.c 	const struct ieee80211_hdr *hdr = (void *)skb->data;
skb              3501 drivers/net/wireless/ath/ath10k/mac.c static void ath10k_tx_h_nwifi(struct ieee80211_hw *hw, struct sk_buff *skb)
skb              3503 drivers/net/wireless/ath/ath10k/mac.c 	struct ieee80211_hdr *hdr = (void *)skb->data;
skb              3504 drivers/net/wireless/ath/ath10k/mac.c 	struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
skb              3511 drivers/net/wireless/ath/ath10k/mac.c 	memmove(skb->data + IEEE80211_QOS_CTL_LEN,
skb              3512 drivers/net/wireless/ath/ath10k/mac.c 		skb->data, (void *)qos_ctl - (void *)skb->data);
skb              3513 drivers/net/wireless/ath/ath10k/mac.c 	skb_pull(skb, IEEE80211_QOS_CTL_LEN);
skb              3519 drivers/net/wireless/ath/ath10k/mac.c 	hdr = (void *)skb->data;
skb              3526 drivers/net/wireless/ath/ath10k/mac.c static void ath10k_tx_h_8023(struct sk_buff *skb)
skb              3536 drivers/net/wireless/ath/ath10k/mac.c 	hdr = (void *)skb->data;
skb              3538 drivers/net/wireless/ath/ath10k/mac.c 	rfc1042 = (void *)skb->data + hdrlen;
skb              3544 drivers/net/wireless/ath/ath10k/mac.c 	skb_pull(skb, hdrlen + sizeof(*rfc1042));
skb              3545 drivers/net/wireless/ath/ath10k/mac.c 	skb_push(skb, sizeof(*eth));
skb              3547 drivers/net/wireless/ath/ath10k/mac.c 	eth = (void *)skb->data;
skb              3555 drivers/net/wireless/ath/ath10k/mac.c 				       struct sk_buff *skb)
skb              3557 drivers/net/wireless/ath/ath10k/mac.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
skb              3567 drivers/net/wireless/ath/ath10k/mac.c 			if (!pskb_expand_head(skb, 0, arvif->u.ap.noa_len,
skb              3569 drivers/net/wireless/ath/ath10k/mac.c 				skb_put_data(skb, arvif->u.ap.noa_data,
skb              3578 drivers/net/wireless/ath/ath10k/mac.c 				    struct sk_buff *skb, u16 airtime)
skb              3580 drivers/net/wireless/ath/ath10k/mac.c 	struct ieee80211_hdr *hdr = (void *)skb->data;
skb              3581 drivers/net/wireless/ath/ath10k/mac.c 	struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
skb              3582 drivers/net/wireless/ath/ath10k/mac.c 	const struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb              3587 drivers/net/wireless/ath/ath10k/mac.c 	if (!ath10k_tx_h_use_hwcrypto(vif, skb))
skb              3624 drivers/net/wireless/ath/ath10k/mac.c static int ath10k_mac_tx_wmi_mgmt(struct ath10k *ar, struct sk_buff *skb)
skb              3637 drivers/net/wireless/ath/ath10k/mac.c 	__skb_queue_tail(q, skb);
skb              3648 drivers/net/wireless/ath/ath10k/mac.c 			   struct sk_buff *skb,
skb              3674 drivers/net/wireless/ath/ath10k/mac.c 				struct sk_buff *skb)
skb              3681 drivers/net/wireless/ath/ath10k/mac.c 		ret = ath10k_htt_tx(htt, txmode, skb);
skb              3684 drivers/net/wireless/ath/ath10k/mac.c 		ret = ath10k_htt_mgmt_tx(htt, skb);
skb              3687 drivers/net/wireless/ath/ath10k/mac.c 		ret = ath10k_mac_tx_wmi_mgmt(ar, skb);
skb              3698 drivers/net/wireless/ath/ath10k/mac.c 		ieee80211_free_txskb(ar->hw, skb);
skb              3711 drivers/net/wireless/ath/ath10k/mac.c 			 struct sk_buff *skb, bool noque_offchan)
skb              3714 drivers/net/wireless/ath/ath10k/mac.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb              3715 drivers/net/wireless/ath/ath10k/mac.c 	const struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
skb              3725 drivers/net/wireless/ath/ath10k/mac.c 		ath10k_tx_h_nwifi(hw, skb);
skb              3726 drivers/net/wireless/ath/ath10k/mac.c 		ath10k_tx_h_add_p2p_noa_ie(ar, vif, skb);
skb              3727 drivers/net/wireless/ath/ath10k/mac.c 		ath10k_tx_h_seq_no(vif, skb);
skb              3730 drivers/net/wireless/ath/ath10k/mac.c 		ath10k_tx_h_8023(skb);
skb              3736 drivers/net/wireless/ath/ath10k/mac.c 			ieee80211_free_txskb(hw, skb);
skb              3744 drivers/net/wireless/ath/ath10k/mac.c 				   skb, skb->len);
skb              3746 drivers/net/wireless/ath/ath10k/mac.c 			skb_queue_tail(&ar->offchan_tx_queue, skb);
skb              3752 drivers/net/wireless/ath/ath10k/mac.c 	ret = ath10k_mac_tx_submit(ar, txmode, txpath, skb);
skb              3763 drivers/net/wireless/ath/ath10k/mac.c 	struct sk_buff *skb;
skb              3766 drivers/net/wireless/ath/ath10k/mac.c 		skb = skb_dequeue(&ar->offchan_tx_queue);
skb              3767 drivers/net/wireless/ath/ath10k/mac.c 		if (!skb)
skb              3770 drivers/net/wireless/ath/ath10k/mac.c 		ieee80211_free_txskb(ar->hw, skb);
skb              3784 drivers/net/wireless/ath/ath10k/mac.c 	struct sk_buff *skb;
skb              3800 drivers/net/wireless/ath/ath10k/mac.c 		skb = skb_dequeue(&ar->offchan_tx_queue);
skb              3801 drivers/net/wireless/ath/ath10k/mac.c 		if (!skb)
skb              3807 drivers/net/wireless/ath/ath10k/mac.c 			   skb, skb->len);
skb              3809 drivers/net/wireless/ath/ath10k/mac.c 		hdr = (struct ieee80211_hdr *)skb->data;
skb              3834 drivers/net/wireless/ath/ath10k/mac.c 		ar->offchan_tx_skb = skb;
skb              3850 drivers/net/wireless/ath/ath10k/mac.c 		txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
skb              3851 drivers/net/wireless/ath/ath10k/mac.c 		txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
skb              3853 drivers/net/wireless/ath/ath10k/mac.c 		ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb, true);
skb              3864 drivers/net/wireless/ath/ath10k/mac.c 				    skb, skb->len);
skb              3879 drivers/net/wireless/ath/ath10k/mac.c 	struct sk_buff *skb;
skb              3882 drivers/net/wireless/ath/ath10k/mac.c 		skb = skb_dequeue(&ar->wmi_mgmt_tx_queue);
skb              3883 drivers/net/wireless/ath/ath10k/mac.c 		if (!skb)
skb              3886 drivers/net/wireless/ath/ath10k/mac.c 		ieee80211_free_txskb(ar->hw, skb);
skb              3893 drivers/net/wireless/ath/ath10k/mac.c 	struct sk_buff *skb;
skb              3898 drivers/net/wireless/ath/ath10k/mac.c 		skb = skb_dequeue(&ar->wmi_mgmt_tx_queue);
skb              3899 drivers/net/wireless/ath/ath10k/mac.c 		if (!skb)
skb              3904 drivers/net/wireless/ath/ath10k/mac.c 			paddr = dma_map_single(ar->dev, skb->data,
skb              3905 drivers/net/wireless/ath/ath10k/mac.c 					       skb->len, DMA_TO_DEVICE);
skb              3907 drivers/net/wireless/ath/ath10k/mac.c 				ieee80211_free_txskb(ar->hw, skb);
skb              3910 drivers/net/wireless/ath/ath10k/mac.c 			ret = ath10k_wmi_mgmt_tx_send(ar, skb, paddr);
skb              3914 drivers/net/wireless/ath/ath10k/mac.c 				dma_unmap_single(ar->dev, paddr, skb->len,
skb              3916 drivers/net/wireless/ath/ath10k/mac.c 				ieee80211_free_txskb(ar->hw, skb);
skb              3919 drivers/net/wireless/ath/ath10k/mac.c 			ret = ath10k_wmi_mgmt_tx(ar, skb);
skb              3923 drivers/net/wireless/ath/ath10k/mac.c 				ieee80211_free_txskb(ar->hw, skb);
skb              4010 drivers/net/wireless/ath/ath10k/mac.c 				     struct sk_buff *skb)
skb              4025 drivers/net/wireless/ath/ath10k/mac.c 	pktlen = skb->len + 38; /* Assume MAC header 30, SNAP 8 for most case */
skb              4056 drivers/net/wireless/ath/ath10k/mac.c 	struct sk_buff *skb;
skb              4070 drivers/net/wireless/ath/ath10k/mac.c 	skb = ieee80211_tx_dequeue(hw, txq);
skb              4071 drivers/net/wireless/ath/ath10k/mac.c 	if (!skb) {
skb              4079 drivers/net/wireless/ath/ath10k/mac.c 	airtime = ath10k_mac_update_airtime(ar, txq, skb);
skb              4080 drivers/net/wireless/ath/ath10k/mac.c 	ath10k_mac_tx_h_fill_cb(ar, vif, txq, skb, airtime);
skb              4082 drivers/net/wireless/ath/ath10k/mac.c 	skb_len = skb->len;
skb              4083 drivers/net/wireless/ath/ath10k/mac.c 	txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
skb              4084 drivers/net/wireless/ath/ath10k/mac.c 	txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
skb              4088 drivers/net/wireless/ath/ath10k/mac.c 		hdr = (struct ieee80211_hdr *)skb->data;
skb              4102 drivers/net/wireless/ath/ath10k/mac.c 	ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb, false);
skb              4332 drivers/net/wireless/ath/ath10k/mac.c 			     struct sk_buff *skb)
skb              4336 drivers/net/wireless/ath/ath10k/mac.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb              4340 drivers/net/wireless/ath/ath10k/mac.c 	struct ieee80211_hdr *hdr = (void *)skb->data;
skb              4349 drivers/net/wireless/ath/ath10k/mac.c 	airtime = ath10k_mac_update_airtime(ar, txq, skb);
skb              4350 drivers/net/wireless/ath/ath10k/mac.c 	ath10k_mac_tx_h_fill_cb(ar, vif, txq, skb, airtime);
skb              4352 drivers/net/wireless/ath/ath10k/mac.c 	txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
skb              4353 drivers/net/wireless/ath/ath10k/mac.c 	txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
skb              4367 drivers/net/wireless/ath/ath10k/mac.c 			ieee80211_free_txskb(ar->hw, skb);
skb              4377 drivers/net/wireless/ath/ath10k/mac.c 			ieee80211_free_txskb(ar->hw, skb);
skb              4383 drivers/net/wireless/ath/ath10k/mac.c 	ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb, false);
skb                51 drivers/net/wireless/ath/ath10k/mac.h void ath10k_mac_handle_beacon(struct ath10k *ar, struct sk_buff *skb);
skb                77 drivers/net/wireless/ath/ath10k/mac.h 				      struct sk_buff *skb)
skb                79 drivers/net/wireless/ath/ath10k/mac.h 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb                80 drivers/net/wireless/ath/ath10k/mac.h 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
skb               769 drivers/net/wireless/ath/ath10k/pci.c 	struct sk_buff *skb;
skb               773 drivers/net/wireless/ath/ath10k/pci.c 	skb = dev_alloc_skb(pipe->buf_sz);
skb               774 drivers/net/wireless/ath/ath10k/pci.c 	if (!skb)
skb               777 drivers/net/wireless/ath/ath10k/pci.c 	WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
skb               779 drivers/net/wireless/ath/ath10k/pci.c 	paddr = dma_map_single(ar->dev, skb->data,
skb               780 drivers/net/wireless/ath/ath10k/pci.c 			       skb->len + skb_tailroom(skb),
skb               784 drivers/net/wireless/ath/ath10k/pci.c 		dev_kfree_skb_any(skb);
skb               788 drivers/net/wireless/ath/ath10k/pci.c 	ATH10K_SKB_RXCB(skb)->paddr = paddr;
skb               791 drivers/net/wireless/ath/ath10k/pci.c 	ret = ce_pipe->ops->ce_rx_post_buf(ce_pipe, skb, paddr);
skb               794 drivers/net/wireless/ath/ath10k/pci.c 		dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb),
skb               796 drivers/net/wireless/ath/ath10k/pci.c 		dev_kfree_skb_any(skb);
skb              1175 drivers/net/wireless/ath/ath10k/pci.c 	struct sk_buff *skb;
skb              1178 drivers/net/wireless/ath/ath10k/pci.c 	while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
skb              1180 drivers/net/wireless/ath/ath10k/pci.c 		if (skb == NULL)
skb              1183 drivers/net/wireless/ath/ath10k/pci.c 		__skb_queue_tail(&list, skb);
skb              1186 drivers/net/wireless/ath/ath10k/pci.c 	while ((skb = __skb_dequeue(&list)))
skb              1187 drivers/net/wireless/ath/ath10k/pci.c 		ath10k_htc_tx_completion_handler(ar, skb);
skb              1192 drivers/net/wireless/ath/ath10k/pci.c 						      struct sk_buff *skb))
skb              1197 drivers/net/wireless/ath/ath10k/pci.c 	struct sk_buff *skb;
skb              1205 drivers/net/wireless/ath/ath10k/pci.c 		skb = transfer_context;
skb              1206 drivers/net/wireless/ath/ath10k/pci.c 		max_nbytes = skb->len + skb_tailroom(skb);
skb              1207 drivers/net/wireless/ath/ath10k/pci.c 		dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
skb              1213 drivers/net/wireless/ath/ath10k/pci.c 			dev_kfree_skb_any(skb);
skb              1217 drivers/net/wireless/ath/ath10k/pci.c 		skb_put(skb, nbytes);
skb              1218 drivers/net/wireless/ath/ath10k/pci.c 		__skb_queue_tail(&list, skb);
skb              1221 drivers/net/wireless/ath/ath10k/pci.c 	while ((skb = __skb_dequeue(&list))) {
skb              1223 drivers/net/wireless/ath/ath10k/pci.c 			   ce_state->id, skb->len);
skb              1225 drivers/net/wireless/ath/ath10k/pci.c 				skb->data, skb->len);
skb              1227 drivers/net/wireless/ath/ath10k/pci.c 		callback(ar, skb);
skb              1235 drivers/net/wireless/ath/ath10k/pci.c 							  struct sk_buff *skb))
skb              1241 drivers/net/wireless/ath/ath10k/pci.c 	struct sk_buff *skb;
skb              1254 drivers/net/wireless/ath/ath10k/pci.c 		skb = transfer_context;
skb              1255 drivers/net/wireless/ath/ath10k/pci.c 		max_nbytes = skb->len + skb_tailroom(skb);
skb              1263 drivers/net/wireless/ath/ath10k/pci.c 		dma_sync_single_for_cpu(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
skb              1265 drivers/net/wireless/ath/ath10k/pci.c 		skb_put(skb, nbytes);
skb              1266 drivers/net/wireless/ath/ath10k/pci.c 		__skb_queue_tail(&list, skb);
skb              1270 drivers/net/wireless/ath/ath10k/pci.c 	while ((skb = __skb_dequeue(&list))) {
skb              1272 drivers/net/wireless/ath/ath10k/pci.c 			   ce_state->id, skb->len);
skb              1274 drivers/net/wireless/ath/ath10k/pci.c 				skb->data, skb->len);
skb              1276 drivers/net/wireless/ath/ath10k/pci.c 		orig_len = skb->len;
skb              1277 drivers/net/wireless/ath/ath10k/pci.c 		callback(ar, skb);
skb              1278 drivers/net/wireless/ath/ath10k/pci.c 		skb_push(skb, orig_len - skb->len);
skb              1279 drivers/net/wireless/ath/ath10k/pci.c 		skb_reset_tail_pointer(skb);
skb              1280 drivers/net/wireless/ath/ath10k/pci.c 		skb_trim(skb, 0);
skb              1283 drivers/net/wireless/ath/ath10k/pci.c 		dma_sync_single_for_device(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
skb              1284 drivers/net/wireless/ath/ath10k/pci.c 					   skb->len + skb_tailroom(skb),
skb              1319 drivers/net/wireless/ath/ath10k/pci.c 	struct sk_buff *skb;
skb              1321 drivers/net/wireless/ath/ath10k/pci.c 	while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
skb              1323 drivers/net/wireless/ath/ath10k/pci.c 		if (!skb)
skb              1326 drivers/net/wireless/ath/ath10k/pci.c 		dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
skb              1327 drivers/net/wireless/ath/ath10k/pci.c 				 skb->len, DMA_TO_DEVICE);
skb              1328 drivers/net/wireless/ath/ath10k/pci.c 		ath10k_htt_hif_tx_complete(ar, skb);
skb              1332 drivers/net/wireless/ath/ath10k/pci.c static void ath10k_pci_htt_rx_deliver(struct ath10k *ar, struct sk_buff *skb)
skb              1334 drivers/net/wireless/ath/ath10k/pci.c 	skb_pull(skb, sizeof(struct ath10k_htc_hdr));
skb              1335 drivers/net/wireless/ath/ath10k/pci.c 	ath10k_htt_t2h_msg_handler(ar, skb);
skb              1974 drivers/net/wireless/ath/ath10k/pci.c 	struct sk_buff *skb;
skb              1988 drivers/net/wireless/ath/ath10k/pci.c 		skb = ce_ring->per_transfer_context[i];
skb              1989 drivers/net/wireless/ath/ath10k/pci.c 		if (!skb)
skb              1994 drivers/net/wireless/ath/ath10k/pci.c 		dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
skb              1995 drivers/net/wireless/ath/ath10k/pci.c 				 skb->len + skb_tailroom(skb),
skb              1997 drivers/net/wireless/ath/ath10k/pci.c 		dev_kfree_skb_any(skb);
skb              2006 drivers/net/wireless/ath/ath10k/pci.c 	struct sk_buff *skb;
skb              2020 drivers/net/wireless/ath/ath10k/pci.c 		skb = ce_ring->per_transfer_context[i];
skb              2021 drivers/net/wireless/ath/ath10k/pci.c 		if (!skb)
skb              2026 drivers/net/wireless/ath/ath10k/pci.c 		ath10k_htc_tx_completion_handler(ar, skb);
skb                42 drivers/net/wireless/ath/ath10k/sdio.c 	dev_kfree_skb(pkt->skb);
skb                43 drivers/net/wireless/ath/ath10k/sdio.c 	pkt->skb = NULL;
skb                54 drivers/net/wireless/ath/ath10k/sdio.c 	pkt->skb = dev_alloc_skb(full_len);
skb                55 drivers/net/wireless/ath/ath10k/sdio.c 	if (!pkt->skb)
skb                71 drivers/net/wireless/ath/ath10k/sdio.c 		(struct ath10k_htc_hdr *)pkt->skb->data;
skb               380 drivers/net/wireless/ath/ath10k/sdio.c 	struct sk_buff *skb = pkt->skb;
skb               381 drivers/net/wireless/ath/ath10k/sdio.c 	struct ath10k_htc_hdr *htc_hdr = (struct ath10k_htc_hdr *)skb->data;
skb               388 drivers/net/wireless/ath/ath10k/sdio.c 		trailer = skb->data + skb->len - htc_hdr->trailer_len;
skb               404 drivers/net/wireless/ath/ath10k/sdio.c 		skb_trim(skb, skb->len - htc_hdr->trailer_len);
skb               407 drivers/net/wireless/ath/ath10k/sdio.c 	skb_pull(skb, sizeof(*htc_hdr));
skb               466 drivers/net/wireless/ath/ath10k/sdio.c 			ep->ep_ops.ep_rx_complete(ar_sdio->ar, pkt->skb);
skb               468 drivers/net/wireless/ath/ath10k/sdio.c 			kfree_skb(pkt->skb);
skb               471 drivers/net/wireless/ath/ath10k/sdio.c 		pkt->skb = NULL;
skb               629 drivers/net/wireless/ath/ath10k/sdio.c 	struct sk_buff *skb = pkt->skb;
skb               634 drivers/net/wireless/ath/ath10k/sdio.c 				 skb->data, pkt->alloc_len);
skb               642 drivers/net/wireless/ath/ath10k/sdio.c 	htc_hdr = (struct ath10k_htc_hdr *)skb->data;
skb               651 drivers/net/wireless/ath/ath10k/sdio.c 	skb_put(skb, pkt->act_len);
skb              1277 drivers/net/wireless/ath/ath10k/sdio.c 	struct sk_buff *skb;
skb              1280 drivers/net/wireless/ath/ath10k/sdio.c 	skb = req->skb;
skb              1281 drivers/net/wireless/ath/ath10k/sdio.c 	ret = ath10k_sdio_write(ar, req->address, skb->data, skb->len);
skb              1288 drivers/net/wireless/ath/ath10k/sdio.c 		ath10k_htc_notify_tx_completion(ep, skb);
skb              1316 drivers/net/wireless/ath/ath10k/sdio.c 				      struct sk_buff *skb,
skb              1333 drivers/net/wireless/ath/ath10k/sdio.c 	bus_req->skb = skb;
skb              1476 drivers/net/wireless/ath/ath10k/sdio.c 	struct sk_buff *skb;
skb              1485 drivers/net/wireless/ath/ath10k/sdio.c 		skb = items[i].transfer_context;
skb              1487 drivers/net/wireless/ath/ath10k/sdio.c 							      skb->len);
skb              1488 drivers/net/wireless/ath/ath10k/sdio.c 		skb_trim(skb, padded_len);
skb              1492 drivers/net/wireless/ath/ath10k/sdio.c 			  skb->len;
skb              1493 drivers/net/wireless/ath/ath10k/sdio.c 		ret = ath10k_sdio_prep_async_req(ar, address, skb,
skb              1737 drivers/net/wireless/ath/ath10k/sdio.c 	struct sk_buff *skb;
skb              1741 drivers/net/wireless/ath/ath10k/sdio.c 	skb = dev_alloc_skb(sizeof(*regs));
skb              1742 drivers/net/wireless/ath/ath10k/sdio.c 	if (!skb)
skb              1748 drivers/net/wireless/ath/ath10k/sdio.c 	memcpy(skb->data, regs, sizeof(*regs));
skb              1749 drivers/net/wireless/ath/ath10k/sdio.c 	skb_put(skb, sizeof(*regs));
skb              1755 drivers/net/wireless/ath/ath10k/sdio.c 					 skb, &irqs_disabled_comp, false, 0);
skb              1778 drivers/net/wireless/ath/ath10k/sdio.c 	kfree_skb(skb);
skb              1800 drivers/net/wireless/ath/ath10k/sdio.c 			ath10k_htc_notify_tx_completion(ep, req->skb);
skb              1801 drivers/net/wireless/ath/ath10k/sdio.c 		} else if (req->skb) {
skb              1802 drivers/net/wireless/ath/ath10k/sdio.c 			kfree_skb(req->skb);
skb               107 drivers/net/wireless/ath/ath10k/sdio.h 	struct sk_buff *skb;
skb               122 drivers/net/wireless/ath/ath10k/sdio.h 	struct sk_buff *skb;
skb               489 drivers/net/wireless/ath/ath10k/snoc.c 	struct sk_buff *skb;
skb               493 drivers/net/wireless/ath/ath10k/snoc.c 	skb = dev_alloc_skb(pipe->buf_sz);
skb               494 drivers/net/wireless/ath/ath10k/snoc.c 	if (!skb)
skb               497 drivers/net/wireless/ath/ath10k/snoc.c 	WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
skb               499 drivers/net/wireless/ath/ath10k/snoc.c 	paddr = dma_map_single(ar->dev, skb->data,
skb               500 drivers/net/wireless/ath/ath10k/snoc.c 			       skb->len + skb_tailroom(skb),
skb               504 drivers/net/wireless/ath/ath10k/snoc.c 		dev_kfree_skb_any(skb);
skb               508 drivers/net/wireless/ath/ath10k/snoc.c 	ATH10K_SKB_RXCB(skb)->paddr = paddr;
skb               511 drivers/net/wireless/ath/ath10k/snoc.c 	ret = ce_pipe->ops->ce_rx_post_buf(ce_pipe, skb, paddr);
skb               514 drivers/net/wireless/ath/ath10k/snoc.c 		dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb),
skb               516 drivers/net/wireless/ath/ath10k/snoc.c 		dev_kfree_skb_any(skb);
skb               564 drivers/net/wireless/ath/ath10k/snoc.c 						       struct sk_buff *skb))
skb               569 drivers/net/wireless/ath/ath10k/snoc.c 	struct sk_buff *skb;
skb               577 drivers/net/wireless/ath/ath10k/snoc.c 		skb = transfer_context;
skb               578 drivers/net/wireless/ath/ath10k/snoc.c 		max_nbytes = skb->len + skb_tailroom(skb);
skb               579 drivers/net/wireless/ath/ath10k/snoc.c 		dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
skb               585 drivers/net/wireless/ath/ath10k/snoc.c 			dev_kfree_skb_any(skb);
skb               589 drivers/net/wireless/ath/ath10k/snoc.c 		skb_put(skb, nbytes);
skb               590 drivers/net/wireless/ath/ath10k/snoc.c 		__skb_queue_tail(&list, skb);
skb               593 drivers/net/wireless/ath/ath10k/snoc.c 	while ((skb = __skb_dequeue(&list))) {
skb               595 drivers/net/wireless/ath/ath10k/snoc.c 			   ce_state->id, skb->len);
skb               597 drivers/net/wireless/ath/ath10k/snoc.c 		callback(ar, skb);
skb               626 drivers/net/wireless/ath/ath10k/snoc.c static void ath10k_snoc_htt_rx_deliver(struct ath10k *ar, struct sk_buff *skb)
skb               628 drivers/net/wireless/ath/ath10k/snoc.c 	skb_pull(skb, sizeof(struct ath10k_htc_hdr));
skb               629 drivers/net/wireless/ath/ath10k/snoc.c 	ath10k_htt_t2h_msg_handler(ar, skb);
skb               650 drivers/net/wireless/ath/ath10k/snoc.c 	struct sk_buff *skb;
skb               653 drivers/net/wireless/ath/ath10k/snoc.c 	while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
skb               654 drivers/net/wireless/ath/ath10k/snoc.c 		if (!skb)
skb               657 drivers/net/wireless/ath/ath10k/snoc.c 		__skb_queue_tail(&list, skb);
skb               660 drivers/net/wireless/ath/ath10k/snoc.c 	while ((skb = __skb_dequeue(&list)))
skb               661 drivers/net/wireless/ath/ath10k/snoc.c 		ath10k_htc_tx_completion_handler(ar, skb);
skb               667 drivers/net/wireless/ath/ath10k/snoc.c 	struct sk_buff *skb;
skb               669 drivers/net/wireless/ath/ath10k/snoc.c 	while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
skb               670 drivers/net/wireless/ath/ath10k/snoc.c 		if (!skb)
skb               673 drivers/net/wireless/ath/ath10k/snoc.c 		dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
skb               674 drivers/net/wireless/ath/ath10k/snoc.c 				 skb->len, DMA_TO_DEVICE);
skb               675 drivers/net/wireless/ath/ath10k/snoc.c 		ath10k_htt_hif_tx_complete(ar, skb);
skb               836 drivers/net/wireless/ath/ath10k/snoc.c 	struct sk_buff *skb;
skb               851 drivers/net/wireless/ath/ath10k/snoc.c 		skb = ce_ring->per_transfer_context[i];
skb               852 drivers/net/wireless/ath/ath10k/snoc.c 		if (!skb)
skb               857 drivers/net/wireless/ath/ath10k/snoc.c 		dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
skb               858 drivers/net/wireless/ath/ath10k/snoc.c 				 skb->len + skb_tailroom(skb),
skb               860 drivers/net/wireless/ath/ath10k/snoc.c 		dev_kfree_skb_any(skb);
skb               868 drivers/net/wireless/ath/ath10k/snoc.c 	struct sk_buff *skb;
skb               883 drivers/net/wireless/ath/ath10k/snoc.c 		skb = ce_ring->per_transfer_context[i];
skb               884 drivers/net/wireless/ath/ath10k/snoc.c 		if (!skb)
skb               889 drivers/net/wireless/ath/ath10k/snoc.c 		ath10k_htc_tx_completion_handler(ar, skb);
skb                31 drivers/net/wireless/ath/ath10k/testmode.c bool ath10k_tm_event_wmi(struct ath10k *ar, u32 cmd_id, struct sk_buff *skb)
skb                39 drivers/net/wireless/ath/ath10k/testmode.c 		   cmd_id, skb, skb->len);
skb                41 drivers/net/wireless/ath/ath10k/testmode.c 	ath10k_dbg_dump(ar, ATH10K_DBG_TESTMODE, NULL, "", skb->data, skb->len);
skb                57 drivers/net/wireless/ath/ath10k/testmode.c 						   2 * sizeof(u32) + skb->len,
skb                83 drivers/net/wireless/ath/ath10k/testmode.c 	ret = nla_put(nl_skb, ATH10K_TM_ATTR_DATA, skb->len, skb->data);
skb               102 drivers/net/wireless/ath/ath10k/testmode.c 	struct sk_buff *skb;
skb               110 drivers/net/wireless/ath/ath10k/testmode.c 	skb = cfg80211_testmode_alloc_reply_skb(ar->hw->wiphy,
skb               112 drivers/net/wireless/ath/ath10k/testmode.c 	if (!skb)
skb               115 drivers/net/wireless/ath/ath10k/testmode.c 	ret = nla_put_u32(skb, ATH10K_TM_ATTR_VERSION_MAJOR,
skb               118 drivers/net/wireless/ath/ath10k/testmode.c 		kfree_skb(skb);
skb               122 drivers/net/wireless/ath/ath10k/testmode.c 	ret = nla_put_u32(skb, ATH10K_TM_ATTR_VERSION_MINOR,
skb               125 drivers/net/wireless/ath/ath10k/testmode.c 		kfree_skb(skb);
skb               129 drivers/net/wireless/ath/ath10k/testmode.c 	ret = nla_put_u32(skb, ATH10K_TM_ATTR_WMI_OP_VERSION,
skb               132 drivers/net/wireless/ath/ath10k/testmode.c 		kfree_skb(skb);
skb               136 drivers/net/wireless/ath/ath10k/testmode.c 	return cfg80211_testmode_reply(skb);
skb               373 drivers/net/wireless/ath/ath10k/testmode.c 	struct sk_buff *skb;
skb               405 drivers/net/wireless/ath/ath10k/testmode.c 	skb = ath10k_wmi_alloc_skb(ar, buf_len);
skb               406 drivers/net/wireless/ath/ath10k/testmode.c 	if (!skb) {
skb               411 drivers/net/wireless/ath/ath10k/testmode.c 	memcpy(skb->data, buf, buf_len);
skb               413 drivers/net/wireless/ath/ath10k/testmode.c 	ret = ath10k_wmi_cmd_send(ar, skb, cmd_id);
skb                12 drivers/net/wireless/ath/ath10k/testmode.h bool ath10k_tm_event_wmi(struct ath10k *ar, u32 cmd_id, struct sk_buff *skb);
skb                23 drivers/net/wireless/ath/ath10k/testmode.h 				       struct sk_buff *skb)
skb                14 drivers/net/wireless/ath/ath10k/txrx.c static void ath10k_report_offchan_tx(struct ath10k *ar, struct sk_buff *skb)
skb                16 drivers/net/wireless/ath/ath10k/txrx.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb                30 drivers/net/wireless/ath/ath10k/txrx.c 	if (ar->offchan_tx_skb != skb) {
skb                38 drivers/net/wireless/ath/ath10k/txrx.c 	ath10k_dbg(ar, ATH10K_DBG_HTT, "completed offchannel skb %pK\n", skb);
skb                76 drivers/net/wireless/ath/ath10k/usb.c 	dev_kfree_skb(urb_context->skb);
skb                77 drivers/net/wireless/ath/ath10k/usb.c 	urb_context->skb = NULL;
skb               130 drivers/net/wireless/ath/ath10k/usb.c 	struct sk_buff *skb;
skb               162 drivers/net/wireless/ath/ath10k/usb.c 	skb = urb_context->skb;
skb               165 drivers/net/wireless/ath/ath10k/usb.c 	urb_context->skb = NULL;
skb               166 drivers/net/wireless/ath/ath10k/usb.c 	skb_put(skb, urb->actual_length);
skb               169 drivers/net/wireless/ath/ath10k/usb.c 	skb_queue_tail(&pipe->io_comp_queue, skb);
skb               187 drivers/net/wireless/ath/ath10k/usb.c 	struct sk_buff *skb;
skb               195 drivers/net/wireless/ath/ath10k/usb.c 	skb = urb_context->skb;
skb               196 drivers/net/wireless/ath/ath10k/usb.c 	urb_context->skb = NULL;
skb               200 drivers/net/wireless/ath/ath10k/usb.c 	skb_queue_tail(&pipe->io_comp_queue, skb);
skb               217 drivers/net/wireless/ath/ath10k/usb.c 		urb_context->skb = dev_alloc_skb(ATH10K_USB_RX_BUFFER_SIZE);
skb               218 drivers/net/wireless/ath/ath10k/usb.c 		if (!urb_context->skb)
skb               228 drivers/net/wireless/ath/ath10k/usb.c 				  urb_context->skb->data,
skb               236 drivers/net/wireless/ath/ath10k/usb.c 			   ATH10K_USB_RX_BUFFER_SIZE, urb_context->skb);
skb               281 drivers/net/wireless/ath/ath10k/usb.c static void ath10k_usb_tx_complete(struct ath10k *ar, struct sk_buff *skb)
skb               286 drivers/net/wireless/ath/ath10k/usb.c 	htc_hdr = (struct ath10k_htc_hdr *)skb->data;
skb               288 drivers/net/wireless/ath/ath10k/usb.c 	ath10k_htc_notify_tx_completion(ep, skb);
skb               292 drivers/net/wireless/ath/ath10k/usb.c static void ath10k_usb_rx_complete(struct ath10k *ar, struct sk_buff *skb)
skb               302 drivers/net/wireless/ath/ath10k/usb.c 	htc_hdr = (struct ath10k_htc_hdr *)skb->data;
skb               323 drivers/net/wireless/ath/ath10k/usb.c 		trailer = skb->data + sizeof(*htc_hdr) + payload_len -
skb               341 drivers/net/wireless/ath/ath10k/usb.c 		skb_trim(skb, skb->len - htc_hdr->trailer_len);
skb               344 drivers/net/wireless/ath/ath10k/usb.c 	skb_pull(skb, sizeof(*htc_hdr));
skb               345 drivers/net/wireless/ath/ath10k/usb.c 	ep->ep_ops.ep_rx_complete(ar, skb);
skb               351 drivers/net/wireless/ath/ath10k/usb.c 	dev_kfree_skb(skb);
skb               360 drivers/net/wireless/ath/ath10k/usb.c 	struct sk_buff *skb;
skb               362 drivers/net/wireless/ath/ath10k/usb.c 	while ((skb = skb_dequeue(&pipe->io_comp_queue))) {
skb               364 drivers/net/wireless/ath/ath10k/usb.c 			ath10k_usb_tx_complete(ar, skb);
skb               366 drivers/net/wireless/ath/ath10k/usb.c 			ath10k_usb_rx_complete(ar, skb);
skb               408 drivers/net/wireless/ath/ath10k/usb.c 	struct sk_buff *skb;
skb               419 drivers/net/wireless/ath/ath10k/usb.c 		skb = items[i].transfer_context;
skb               420 drivers/net/wireless/ath/ath10k/usb.c 		urb_context->skb = skb;
skb               431 drivers/net/wireless/ath/ath10k/usb.c 				  skb->data,
skb               432 drivers/net/wireless/ath/ath10k/usb.c 				  skb->len,
skb               435 drivers/net/wireless/ath/ath10k/usb.c 		if (!(skb->len % pipe->max_packet_size)) {
skb               108 drivers/net/wireless/ath/ath10k/usb.h 	struct sk_buff *skb;
skb                15 drivers/net/wireless/ath/ath10k/wmi-ops.h 	void (*rx)(struct ath10k *ar, struct sk_buff *skb);
skb                19 drivers/net/wireless/ath/ath10k/wmi-ops.h 	int (*pull_scan)(struct ath10k *ar, struct sk_buff *skb,
skb                21 drivers/net/wireless/ath/ath10k/wmi-ops.h 	int (*pull_mgmt_rx)(struct ath10k *ar, struct sk_buff *skb,
skb                23 drivers/net/wireless/ath/ath10k/wmi-ops.h 	int (*pull_mgmt_tx_compl)(struct ath10k *ar, struct sk_buff *skb,
skb                26 drivers/net/wireless/ath/ath10k/wmi-ops.h 				struct ath10k *ar, struct sk_buff *skb,
skb                28 drivers/net/wireless/ath/ath10k/wmi-ops.h 	int (*pull_ch_info)(struct ath10k *ar, struct sk_buff *skb,
skb                30 drivers/net/wireless/ath/ath10k/wmi-ops.h 	int (*pull_vdev_start)(struct ath10k *ar, struct sk_buff *skb,
skb                32 drivers/net/wireless/ath/ath10k/wmi-ops.h 	int (*pull_peer_kick)(struct ath10k *ar, struct sk_buff *skb,
skb                34 drivers/net/wireless/ath/ath10k/wmi-ops.h 	int (*pull_swba)(struct ath10k *ar, struct sk_buff *skb,
skb                36 drivers/net/wireless/ath/ath10k/wmi-ops.h 	int (*pull_phyerr_hdr)(struct ath10k *ar, struct sk_buff *skb,
skb                40 drivers/net/wireless/ath/ath10k/wmi-ops.h 	int (*pull_svc_rdy)(struct ath10k *ar, struct sk_buff *skb,
skb                42 drivers/net/wireless/ath/ath10k/wmi-ops.h 	int (*pull_rdy)(struct ath10k *ar, struct sk_buff *skb,
skb                44 drivers/net/wireless/ath/ath10k/wmi-ops.h 	int (*pull_fw_stats)(struct ath10k *ar, struct sk_buff *skb,
skb                46 drivers/net/wireless/ath/ath10k/wmi-ops.h 	int (*pull_roam_ev)(struct ath10k *ar, struct sk_buff *skb,
skb                48 drivers/net/wireless/ath/ath10k/wmi-ops.h 	int (*pull_wow_event)(struct ath10k *ar, struct sk_buff *skb,
skb                50 drivers/net/wireless/ath/ath10k/wmi-ops.h 	int (*pull_echo_ev)(struct ath10k *ar, struct sk_buff *skb,
skb                52 drivers/net/wireless/ath/ath10k/wmi-ops.h 	int (*pull_dfs_status_ev)(struct ath10k *ar, struct sk_buff *skb,
skb                54 drivers/net/wireless/ath/ath10k/wmi-ops.h 	int (*pull_svc_avail)(struct ath10k *ar, struct sk_buff *skb,
skb               132 drivers/net/wireless/ath/ath10k/wmi-ops.h 	struct sk_buff *(*gen_mgmt_tx)(struct ath10k *ar, struct sk_buff *skb);
skb               134 drivers/net/wireless/ath/ath10k/wmi-ops.h 					    struct sk_buff *skb,
skb               222 drivers/net/wireless/ath/ath10k/wmi-ops.h int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
skb               225 drivers/net/wireless/ath/ath10k/wmi-ops.h ath10k_wmi_rx(struct ath10k *ar, struct sk_buff *skb)
skb               230 drivers/net/wireless/ath/ath10k/wmi-ops.h 	ar->wmi.ops->rx(ar, skb);
skb               257 drivers/net/wireless/ath/ath10k/wmi-ops.h ath10k_wmi_pull_scan(struct ath10k *ar, struct sk_buff *skb,
skb               263 drivers/net/wireless/ath/ath10k/wmi-ops.h 	return ar->wmi.ops->pull_scan(ar, skb, arg);
skb               267 drivers/net/wireless/ath/ath10k/wmi-ops.h ath10k_wmi_pull_mgmt_tx_compl(struct ath10k *ar, struct sk_buff *skb,
skb               273 drivers/net/wireless/ath/ath10k/wmi-ops.h 	return ar->wmi.ops->pull_mgmt_tx_compl(ar, skb, arg);
skb               277 drivers/net/wireless/ath/ath10k/wmi-ops.h ath10k_wmi_pull_mgmt_tx_bundle_compl(struct ath10k *ar, struct sk_buff *skb,
skb               283 drivers/net/wireless/ath/ath10k/wmi-ops.h 	return ar->wmi.ops->pull_mgmt_tx_bundle_compl(ar, skb, arg);
skb               287 drivers/net/wireless/ath/ath10k/wmi-ops.h ath10k_wmi_pull_mgmt_rx(struct ath10k *ar, struct sk_buff *skb,
skb               293 drivers/net/wireless/ath/ath10k/wmi-ops.h 	return ar->wmi.ops->pull_mgmt_rx(ar, skb, arg);
skb               297 drivers/net/wireless/ath/ath10k/wmi-ops.h ath10k_wmi_pull_ch_info(struct ath10k *ar, struct sk_buff *skb,
skb               303 drivers/net/wireless/ath/ath10k/wmi-ops.h 	return ar->wmi.ops->pull_ch_info(ar, skb, arg);
skb               307 drivers/net/wireless/ath/ath10k/wmi-ops.h ath10k_wmi_pull_vdev_start(struct ath10k *ar, struct sk_buff *skb,
skb               313 drivers/net/wireless/ath/ath10k/wmi-ops.h 	return ar->wmi.ops->pull_vdev_start(ar, skb, arg);
skb               317 drivers/net/wireless/ath/ath10k/wmi-ops.h ath10k_wmi_pull_peer_kick(struct ath10k *ar, struct sk_buff *skb,
skb               323 drivers/net/wireless/ath/ath10k/wmi-ops.h 	return ar->wmi.ops->pull_peer_kick(ar, skb, arg);
skb               327 drivers/net/wireless/ath/ath10k/wmi-ops.h ath10k_wmi_pull_swba(struct ath10k *ar, struct sk_buff *skb,
skb               333 drivers/net/wireless/ath/ath10k/wmi-ops.h 	return ar->wmi.ops->pull_swba(ar, skb, arg);
skb               337 drivers/net/wireless/ath/ath10k/wmi-ops.h ath10k_wmi_pull_phyerr_hdr(struct ath10k *ar, struct sk_buff *skb,
skb               343 drivers/net/wireless/ath/ath10k/wmi-ops.h 	return ar->wmi.ops->pull_phyerr_hdr(ar, skb, arg);
skb               357 drivers/net/wireless/ath/ath10k/wmi-ops.h ath10k_wmi_pull_svc_rdy(struct ath10k *ar, struct sk_buff *skb,
skb               363 drivers/net/wireless/ath/ath10k/wmi-ops.h 	return ar->wmi.ops->pull_svc_rdy(ar, skb, arg);
skb               367 drivers/net/wireless/ath/ath10k/wmi-ops.h ath10k_wmi_pull_rdy(struct ath10k *ar, struct sk_buff *skb,
skb               373 drivers/net/wireless/ath/ath10k/wmi-ops.h 	return ar->wmi.ops->pull_rdy(ar, skb, arg);
skb               377 drivers/net/wireless/ath/ath10k/wmi-ops.h ath10k_wmi_pull_svc_avail(struct ath10k *ar, struct sk_buff *skb,
skb               382 drivers/net/wireless/ath/ath10k/wmi-ops.h 	return ar->wmi.ops->pull_svc_avail(ar, skb, arg);
skb               386 drivers/net/wireless/ath/ath10k/wmi-ops.h ath10k_wmi_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb,
skb               392 drivers/net/wireless/ath/ath10k/wmi-ops.h 	return ar->wmi.ops->pull_fw_stats(ar, skb, stats);
skb               396 drivers/net/wireless/ath/ath10k/wmi-ops.h ath10k_wmi_pull_roam_ev(struct ath10k *ar, struct sk_buff *skb,
skb               402 drivers/net/wireless/ath/ath10k/wmi-ops.h 	return ar->wmi.ops->pull_roam_ev(ar, skb, arg);
skb               406 drivers/net/wireless/ath/ath10k/wmi-ops.h ath10k_wmi_pull_wow_event(struct ath10k *ar, struct sk_buff *skb,
skb               412 drivers/net/wireless/ath/ath10k/wmi-ops.h 	return ar->wmi.ops->pull_wow_event(ar, skb, arg);
skb               416 drivers/net/wireless/ath/ath10k/wmi-ops.h ath10k_wmi_pull_echo_ev(struct ath10k *ar, struct sk_buff *skb,
skb               422 drivers/net/wireless/ath/ath10k/wmi-ops.h 	return ar->wmi.ops->pull_echo_ev(ar, skb, arg);
skb               426 drivers/net/wireless/ath/ath10k/wmi-ops.h ath10k_wmi_pull_dfs_status(struct ath10k *ar, struct sk_buff *skb,
skb               432 drivers/net/wireless/ath/ath10k/wmi-ops.h 	return ar->wmi.ops->pull_dfs_status_ev(ar, skb, arg);
skb               448 drivers/net/wireless/ath/ath10k/wmi-ops.h 	struct sk_buff *skb;
skb               454 drivers/net/wireless/ath/ath10k/wmi-ops.h 	skb = ar->wmi.ops->gen_mgmt_tx_send(ar, msdu, paddr);
skb               455 drivers/net/wireless/ath/ath10k/wmi-ops.h 	if (IS_ERR(skb))
skb               456 drivers/net/wireless/ath/ath10k/wmi-ops.h 		return PTR_ERR(skb);
skb               458 drivers/net/wireless/ath/ath10k/wmi-ops.h 	ret = ath10k_wmi_cmd_send(ar, skb,
skb               470 drivers/net/wireless/ath/ath10k/wmi-ops.h 	struct sk_buff *skb;
skb               476 drivers/net/wireless/ath/ath10k/wmi-ops.h 	skb = ar->wmi.ops->gen_mgmt_tx(ar, msdu);
skb               477 drivers/net/wireless/ath/ath10k/wmi-ops.h 	if (IS_ERR(skb))
skb               478 drivers/net/wireless/ath/ath10k/wmi-ops.h 		return PTR_ERR(skb);
skb               480 drivers/net/wireless/ath/ath10k/wmi-ops.h 	ret = ath10k_wmi_cmd_send(ar, skb,
skb               499 drivers/net/wireless/ath/ath10k/wmi-ops.h 	struct sk_buff *skb;
skb               504 drivers/net/wireless/ath/ath10k/wmi-ops.h 	skb = ar->wmi.ops->gen_pdev_set_rd(ar, rd, rd2g, rd5g, ctl2g, ctl5g,
skb               506 drivers/net/wireless/ath/ath10k/wmi-ops.h 	if (IS_ERR(skb))
skb               507 drivers/net/wireless/ath/ath10k/wmi-ops.h 		return PTR_ERR(skb);
skb               509 drivers/net/wireless/ath/ath10k/wmi-ops.h 	return ath10k_wmi_cmd_send(ar, skb,
skb               516 drivers/net/wireless/ath/ath10k/wmi-ops.h 	struct sk_buff *skb;
skb               521 drivers/net/wireless/ath/ath10k/wmi-ops.h 	skb = ar->wmi.ops->gen_pdev_set_base_macaddr(ar, macaddr);
skb               522 drivers/net/wireless/ath/ath10k/wmi-ops.h 	if (IS_ERR(skb))
skb               523 drivers/net/wireless/ath/ath10k/wmi-ops.h 		return PTR_ERR(skb);
skb               525 drivers/net/wireless/ath/ath10k/wmi-ops.h 	return ath10k_wmi_cmd_send(ar, skb,
skb               532 drivers/net/wireless/ath/ath10k/wmi-ops.h 	struct sk_buff *skb;
skb               537 drivers/net/wireless/ath/ath10k/wmi-ops.h 	skb = ar->wmi.ops->gen_pdev_suspend(ar, suspend_opt);
skb               538 drivers/net/wireless/ath/ath10k/wmi-ops.h 	if (IS_ERR(skb))
skb               539 drivers/net/wireless/ath/ath10k/wmi-ops.h 		return PTR_ERR(skb);
skb               541 drivers/net/wireless/ath/ath10k/wmi-ops.h 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_suspend_cmdid);
skb               547 drivers/net/wireless/ath/ath10k/wmi-ops.h 	struct sk_buff *skb;
skb               552 drivers/net/wireless/ath/ath10k/wmi-ops.h 	skb = ar->wmi.ops->gen_pdev_resume(ar);
skb               553 drivers/net/wireless/ath/ath10k/wmi-ops.h 	if (IS_ERR(skb))
skb               554 drivers/net/wireless/ath/ath10k/wmi-ops.h 		return PTR_ERR(skb);
skb               556 drivers/net/wireless/ath/ath10k/wmi-ops.h 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_resume_cmdid);
skb               562 drivers/net/wireless/ath/ath10k/wmi-ops.h 	struct sk_buff *skb;
skb               567 drivers/net/wireless/ath/ath10k/wmi-ops.h 	skb = ar->wmi.ops->gen_pdev_set_param(ar, id, value);
skb               568 drivers/net/wireless/ath/ath10k/wmi-ops.h 	if (IS_ERR(skb))
skb               569 drivers/net/wireless/ath/ath10k/wmi-ops.h 		return PTR_ERR(skb);
skb               571 drivers/net/wireless/ath/ath10k/wmi-ops.h 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid);
skb               577 drivers/net/wireless/ath/ath10k/wmi-ops.h 	struct sk_buff *skb;
skb               582 drivers/net/wireless/ath/ath10k/wmi-ops.h 	skb = ar->wmi.ops->gen_init(ar);
skb               583 drivers/net/wireless/ath/ath10k/wmi-ops.h 	if (IS_ERR(skb))
skb               584 drivers/net/wireless/ath/ath10k/wmi-ops.h 		return PTR_ERR(skb);
skb               586 drivers/net/wireless/ath/ath10k/wmi-ops.h 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->init_cmdid);
skb               593 drivers/net/wireless/ath/ath10k/wmi-ops.h 	struct sk_buff *skb;
skb               598 drivers/net/wireless/ath/ath10k/wmi-ops.h 	skb = ar->wmi.ops->gen_start_scan(ar, arg);
skb               599 drivers/net/wireless/ath/ath10k/wmi-ops.h 	if (IS_ERR(skb))
skb               600 drivers/net/wireless/ath/ath10k/wmi-ops.h 		return PTR_ERR(skb);
skb               602 drivers/net/wireless/ath/ath10k/wmi-ops.h 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->start_scan_cmdid);
skb               608 drivers/net/wireless/ath/ath10k/wmi-ops.h 	struct sk_buff *skb;
skb               613 drivers/net/wireless/ath/ath10k/wmi-ops.h 	skb = ar->wmi.ops->gen_stop_scan(ar, arg);
skb               614 drivers/net/wireless/ath/ath10k/wmi-ops.h 	if (IS_ERR(skb))
skb               615 drivers/net/wireless/ath/ath10k/wmi-ops.h 		return PTR_ERR(skb);
skb               617 drivers/net/wireless/ath/ath10k/wmi-ops.h 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->stop_scan_cmdid);
skb               626 drivers/net/wireless/ath/ath10k/wmi-ops.h 	struct sk_buff *skb;
skb               631 drivers/net/wireless/ath/ath10k/wmi-ops.h 	skb = ar->wmi.ops->gen_vdev_create(ar, vdev_id, type, subtype, macaddr);
skb               632 drivers/net/wireless/ath/ath10k/wmi-ops.h 	if (IS_ERR(skb))
skb               633 drivers/net/wireless/ath/ath10k/wmi-ops.h 		return PTR_ERR(skb);
skb               635 drivers/net/wireless/ath/ath10k/wmi-ops.h 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_create_cmdid);
skb               641 drivers/net/wireless/ath/ath10k/wmi-ops.h 	struct sk_buff *skb;
skb               646 drivers/net/wireless/ath/ath10k/wmi-ops.h 	skb = ar->wmi.ops->gen_vdev_delete(ar, vdev_id);
skb               647 drivers/net/wireless/ath/ath10k/wmi-ops.h 	if (IS_ERR(skb))
skb               648 drivers/net/wireless/ath/ath10k/wmi-ops.h 		return PTR_ERR(skb);
skb               650 drivers/net/wireless/ath/ath10k/wmi-ops.h 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_delete_cmdid);
skb               657 drivers/net/wireless/ath/ath10k/wmi-ops.h 	struct sk_buff *skb;
skb               662 drivers/net/wireless/ath/ath10k/wmi-ops.h 	skb = ar->wmi.ops->gen_vdev_start(ar, arg, false);
skb               663 drivers/net/wireless/ath/ath10k/wmi-ops.h 	if (IS_ERR(skb))
skb               664 drivers/net/wireless/ath/ath10k/wmi-ops.h 		return PTR_ERR(skb);
skb               666 drivers/net/wireless/ath/ath10k/wmi-ops.h 	return ath10k_wmi_cmd_send(ar, skb,
skb               674 drivers/net/wireless/ath/ath10k/wmi-ops.h 	struct sk_buff *skb;
skb               679 drivers/net/wireless/ath/ath10k/wmi-ops.h 	skb = ar->wmi.ops->gen_vdev_start(ar, arg, true);
skb               680 drivers/net/wireless/ath/ath10k/wmi-ops.h 	if (IS_ERR(skb))
skb               681 drivers/net/wireless/ath/ath10k/wmi-ops.h 		return PTR_ERR(skb);
skb               683 drivers/net/wireless/ath/ath10k/wmi-ops.h 	return ath10k_wmi_cmd_send(ar, skb,
skb               690 drivers/net/wireless/ath/ath10k/wmi-ops.h 	struct sk_buff *skb;
skb               695 drivers/net/wireless/ath/ath10k/wmi-ops.h 	skb = ar->wmi.ops->gen_vdev_stop(ar, vdev_id);
skb               696 drivers/net/wireless/ath/ath10k/wmi-ops.h 	if (IS_ERR(skb))
skb               697 drivers/net/wireless/ath/ath10k/wmi-ops.h 		return PTR_ERR(skb);
skb               699 drivers/net/wireless/ath/ath10k/wmi-ops.h 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_stop_cmdid);
skb               705 drivers/net/wireless/ath/ath10k/wmi-ops.h 	struct sk_buff *skb;
skb               710 drivers/net/wireless/ath/ath10k/wmi-ops.h 	skb = ar->wmi.ops->gen_vdev_up(ar, vdev_id, aid, bssid);
skb               711 drivers/net/wireless/ath/ath10k/wmi-ops.h 	if (IS_ERR(skb))
skb               712 drivers/net/wireless/ath/ath10k/wmi-ops.h 		return PTR_ERR(skb);
skb               714 drivers/net/wireless/ath/ath10k/wmi-ops.h 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_up_cmdid);
skb               720 drivers/net/wireless/ath/ath10k/wmi-ops.h 	struct sk_buff *skb;
skb               725 drivers/net/wireless/ath/ath10k/wmi-ops.h 	skb = ar->wmi.ops->gen_vdev_down(ar, vdev_id);
skb               726 drivers/net/wireless/ath/ath10k/wmi-ops.h 	if (IS_ERR(skb))
skb               727 drivers/net/wireless/ath/ath10k/wmi-ops.h 		return PTR_ERR(skb);
skb               729 drivers/net/wireless/ath/ath10k/wmi-ops.h 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_down_cmdid);
skb               736 drivers/net/wireless/ath/ath10k/wmi-ops.h 	struct sk_buff *skb;
skb               741 drivers/net/wireless/ath/ath10k/wmi-ops.h 	skb = ar->wmi.ops->gen_vdev_set_param(ar, vdev_id, param_id,
skb               743 drivers/net/wireless/ath/ath10k/wmi-ops.h 	if (IS_ERR(skb))
skb               744 drivers/net/wireless/ath/ath10k/wmi-ops.h 		return PTR_ERR(skb);
skb               746 drivers/net/wireless/ath/ath10k/wmi-ops.h 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_set_param_cmdid);
skb               753 drivers/net/wireless/ath/ath10k/wmi-ops.h 	struct sk_buff *skb;
skb               758 drivers/net/wireless/ath/ath10k/wmi-ops.h 	skb = ar->wmi.ops->gen_vdev_install_key(ar, arg);
skb               759 drivers/net/wireless/ath/ath10k/wmi-ops.h 	if (IS_ERR(skb))
skb               760 drivers/net/wireless/ath/ath10k/wmi-ops.h 		return PTR_ERR(skb);
skb               762 drivers/net/wireless/ath/ath10k/wmi-ops.h 	return ath10k_wmi_cmd_send(ar, skb,
skb               770 drivers/net/wireless/ath/ath10k/wmi-ops.h 	struct sk_buff *skb;
skb               776 drivers/net/wireless/ath/ath10k/wmi-ops.h 	skb = ar->wmi.ops->gen_vdev_spectral_conf(ar, arg);
skb               777 drivers/net/wireless/ath/ath10k/wmi-ops.h 	if (IS_ERR(skb))
skb               778 drivers/net/wireless/ath/ath10k/wmi-ops.h 		return PTR_ERR(skb);
skb               781 drivers/net/wireless/ath/ath10k/wmi-ops.h 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
skb               788 drivers/net/wireless/ath/ath10k/wmi-ops.h 	struct sk_buff *skb;
skb               794 drivers/net/wireless/ath/ath10k/wmi-ops.h 	skb = ar->wmi.ops->gen_vdev_spectral_enable(ar, vdev_id, trigger,
skb               796 drivers/net/wireless/ath/ath10k/wmi-ops.h 	if (IS_ERR(skb))
skb               797 drivers/net/wireless/ath/ath10k/wmi-ops.h 		return PTR_ERR(skb);
skb               800 drivers/net/wireless/ath/ath10k/wmi-ops.h 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
skb               809 drivers/net/wireless/ath/ath10k/wmi-ops.h 	struct sk_buff *skb;
skb               815 drivers/net/wireless/ath/ath10k/wmi-ops.h 	skb = ar->wmi.ops->gen_vdev_sta_uapsd(ar, vdev_id, peer_addr, args,
skb               817 drivers/net/wireless/ath/ath10k/wmi-ops.h 	if (IS_ERR(skb))
skb               818 drivers/net/wireless/ath/ath10k/wmi-ops.h 		return PTR_ERR(skb);
skb               821 drivers/net/wireless/ath/ath10k/wmi-ops.h 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
skb               828 drivers/net/wireless/ath/ath10k/wmi-ops.h 	struct sk_buff *skb;
skb               831 drivers/net/wireless/ath/ath10k/wmi-ops.h 	skb = ar->wmi.ops->gen_vdev_wmm_conf(ar, vdev_id, arg);
skb               832 drivers/net/wireless/ath/ath10k/wmi-ops.h 	if (IS_ERR(skb))
skb               833 drivers/net/wireless/ath/ath10k/wmi-ops.h 		return PTR_ERR(skb);
skb               836 drivers/net/wireless/ath/ath10k/wmi-ops.h 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
skb               844 drivers/net/wireless/ath/ath10k/wmi-ops.h 	struct sk_buff *skb;
skb               849 drivers/net/wireless/ath/ath10k/wmi-ops.h 	skb = ar->wmi.ops->gen_peer_create(ar, vdev_id, peer_addr, peer_type);
skb               850 drivers/net/wireless/ath/ath10k/wmi-ops.h 	if (IS_ERR(skb))
skb               851 drivers/net/wireless/ath/ath10k/wmi-ops.h 		return PTR_ERR(skb);
skb               853 drivers/net/wireless/ath/ath10k/wmi-ops.h 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_create_cmdid);
skb               860 drivers/net/wireless/ath/ath10k/wmi-ops.h 	struct sk_buff *skb;
skb               865 drivers/net/wireless/ath/ath10k/wmi-ops.h 	skb = ar->wmi.ops->gen_peer_delete(ar, vdev_id, peer_addr);
skb               866 drivers/net/wireless/ath/ath10k/wmi-ops.h 	if (IS_ERR(skb))
skb               867 drivers/net/wireless/ath/ath10k/wmi-ops.h 		return PTR_ERR(skb);
skb               869 drivers/net/wireless/ath/ath10k/wmi-ops.h 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_delete_cmdid);
skb               876 drivers/net/wireless/ath/ath10k/wmi-ops.h 	struct sk_buff *skb;
skb               881 drivers/net/wireless/ath/ath10k/wmi-ops.h 	skb = ar->wmi.ops->gen_peer_flush(ar, vdev_id, peer_addr, tid_bitmap);
skb               882 drivers/net/wireless/ath/ath10k/wmi-ops.h 	if (IS_ERR(skb))
skb               883 drivers/net/wireless/ath/ath10k/wmi-ops.h 		return PTR_ERR(skb);
skb               885 drivers/net/wireless/ath/ath10k/wmi-ops.h 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_flush_tids_cmdid);
skb               892 drivers/net/wireless/ath/ath10k/wmi-ops.h 	struct sk_buff *skb;
skb               897 drivers/net/wireless/ath/ath10k/wmi-ops.h 	skb = ar->wmi.ops->gen_peer_set_param(ar, vdev_id, peer_addr, param_id,
skb               899 drivers/net/wireless/ath/ath10k/wmi-ops.h 	if (IS_ERR(skb))
skb               900 drivers/net/wireless/ath/ath10k/wmi-ops.h 		return PTR_ERR(skb);
skb               902 drivers/net/wireless/ath/ath10k/wmi-ops.h 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_set_param_cmdid);
skb               909 drivers/net/wireless/ath/ath10k/wmi-ops.h 	struct sk_buff *skb;
skb               914 drivers/net/wireless/ath/ath10k/wmi-ops.h 	skb = ar->wmi.ops->gen_set_psmode(ar, vdev_id, psmode);
skb               915 drivers/net/wireless/ath/ath10k/wmi-ops.h 	if (IS_ERR(skb))
skb               916 drivers/net/wireless/ath/ath10k/wmi-ops.h 		return PTR_ERR(skb);
skb               918 drivers/net/wireless/ath/ath10k/wmi-ops.h 	return ath10k_wmi_cmd_send(ar, skb,
skb               926 drivers/net/wireless/ath/ath10k/wmi-ops.h 	struct sk_buff *skb;
skb               931 drivers/net/wireless/ath/ath10k/wmi-ops.h 	skb = ar->wmi.ops->gen_set_sta_ps(ar, vdev_id, param_id, value);
skb               932 drivers/net/wireless/ath/ath10k/wmi-ops.h 	if (IS_ERR(skb))
skb               933 drivers/net/wireless/ath/ath10k/wmi-ops.h 		return PTR_ERR(skb);
skb               935 drivers/net/wireless/ath/ath10k/wmi-ops.h 	return ath10k_wmi_cmd_send(ar, skb,
skb               943 drivers/net/wireless/ath/ath10k/wmi-ops.h 	struct sk_buff *skb;
skb               948 drivers/net/wireless/ath/ath10k/wmi-ops.h 	skb = ar->wmi.ops->gen_set_ap_ps(ar, vdev_id, mac, param_id, value);
skb               949 drivers/net/wireless/ath/ath10k/wmi-ops.h 	if (IS_ERR(skb))
skb               950 drivers/net/wireless/ath/ath10k/wmi-ops.h 		return PTR_ERR(skb);
skb               952 drivers/net/wireless/ath/ath10k/wmi-ops.h 	return ath10k_wmi_cmd_send(ar, skb,
skb               960 drivers/net/wireless/ath/ath10k/wmi-ops.h 	struct sk_buff *skb;
skb               965 drivers/net/wireless/ath/ath10k/wmi-ops.h 	skb = ar->wmi.ops->gen_scan_chan_list(ar, arg);
skb               966 drivers/net/wireless/ath/ath10k/wmi-ops.h 	if (IS_ERR(skb))
skb               967 drivers/net/wireless/ath/ath10k/wmi-ops.h 		return PTR_ERR(skb);
skb               969 drivers/net/wireless/ath/ath10k/wmi-ops.h 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->scan_chan_list_cmdid);
skb               975 drivers/net/wireless/ath/ath10k/wmi-ops.h 	struct sk_buff *skb;
skb               984 drivers/net/wireless/ath/ath10k/wmi-ops.h 	skb = ar->wmi.ops->gen_scan_prob_req_oui(ar, prob_req_oui);
skb               985 drivers/net/wireless/ath/ath10k/wmi-ops.h 	if (IS_ERR(skb))
skb               986 drivers/net/wireless/ath/ath10k/wmi-ops.h 		return PTR_ERR(skb);
skb               988 drivers/net/wireless/ath/ath10k/wmi-ops.h 	return ath10k_wmi_cmd_send(ar, skb,
skb               996 drivers/net/wireless/ath/ath10k/wmi-ops.h 	struct sk_buff *skb;
skb              1001 drivers/net/wireless/ath/ath10k/wmi-ops.h 	skb = ar->wmi.ops->gen_peer_assoc(ar, arg);
skb              1002 drivers/net/wireless/ath/ath10k/wmi-ops.h 	if (IS_ERR(skb))
skb              1003 drivers/net/wireless/ath/ath10k/wmi-ops.h 		return PTR_ERR(skb);
skb              1005 drivers/net/wireless/ath/ath10k/wmi-ops.h 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid);
skb              1014 drivers/net/wireless/ath/ath10k/wmi-ops.h 	struct sk_buff *skb;
skb              1020 drivers/net/wireless/ath/ath10k/wmi-ops.h 	skb = ar->wmi.ops->gen_beacon_dma(ar, vdev_id, bcn, bcn_len, bcn_paddr,
skb              1022 drivers/net/wireless/ath/ath10k/wmi-ops.h 	if (IS_ERR(skb))
skb              1023 drivers/net/wireless/ath/ath10k/wmi-ops.h 		return PTR_ERR(skb);
skb              1025 drivers/net/wireless/ath/ath10k/wmi-ops.h 	ret = ath10k_wmi_cmd_send_nowait(ar, skb,
skb              1028 drivers/net/wireless/ath/ath10k/wmi-ops.h 		dev_kfree_skb(skb);
skb              1039 drivers/net/wireless/ath/ath10k/wmi-ops.h 	struct sk_buff *skb;
skb              1044 drivers/net/wireless/ath/ath10k/wmi-ops.h 	skb = ar->wmi.ops->gen_pdev_set_wmm(ar, arg);
skb              1045 drivers/net/wireless/ath/ath10k/wmi-ops.h 	if (IS_ERR(skb))
skb              1046 drivers/net/wireless/ath/ath10k/wmi-ops.h 		return PTR_ERR(skb);
skb              1048 drivers/net/wireless/ath/ath10k/wmi-ops.h 	return ath10k_wmi_cmd_send(ar, skb,
skb              1055 drivers/net/wireless/ath/ath10k/wmi-ops.h 	struct sk_buff *skb;
skb              1060 drivers/net/wireless/ath/ath10k/wmi-ops.h 	skb = ar->wmi.ops->gen_request_stats(ar, stats_mask);
skb              1061 drivers/net/wireless/ath/ath10k/wmi-ops.h 	if (IS_ERR(skb))
skb              1062 drivers/net/wireless/ath/ath10k/wmi-ops.h 		return PTR_ERR(skb);
skb              1064 drivers/net/wireless/ath/ath10k/wmi-ops.h 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_stats_cmdid);
skb              1071 drivers/net/wireless/ath/ath10k/wmi-ops.h 	struct sk_buff *skb;
skb              1076 drivers/net/wireless/ath/ath10k/wmi-ops.h 	skb = ar->wmi.ops->gen_force_fw_hang(ar, type, delay_ms);
skb              1077 drivers/net/wireless/ath/ath10k/wmi-ops.h 	if (IS_ERR(skb))
skb              1078 drivers/net/wireless/ath/ath10k/wmi-ops.h 		return PTR_ERR(skb);
skb              1080 drivers/net/wireless/ath/ath10k/wmi-ops.h 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid);
skb              1086 drivers/net/wireless/ath/ath10k/wmi-ops.h 	struct sk_buff *skb;
skb              1091 drivers/net/wireless/ath/ath10k/wmi-ops.h 	skb = ar->wmi.ops->gen_dbglog_cfg(ar, module_enable, log_level);
skb              1092 drivers/net/wireless/ath/ath10k/wmi-ops.h 	if (IS_ERR(skb))
skb              1093 drivers/net/wireless/ath/ath10k/wmi-ops.h 		return PTR_ERR(skb);
skb              1095 drivers/net/wireless/ath/ath10k/wmi-ops.h 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->dbglog_cfg_cmdid);
skb              1101 drivers/net/wireless/ath/ath10k/wmi-ops.h 	struct sk_buff *skb;
skb              1106 drivers/net/wireless/ath/ath10k/wmi-ops.h 	skb = ar->wmi.ops->gen_pktlog_enable(ar, filter);
skb              1107 drivers/net/wireless/ath/ath10k/wmi-ops.h 	if (IS_ERR(skb))
skb              1108 drivers/net/wireless/ath/ath10k/wmi-ops.h 		return PTR_ERR(skb);
skb              1110 drivers/net/wireless/ath/ath10k/wmi-ops.h 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_pktlog_enable_cmdid);
skb              1116 drivers/net/wireless/ath/ath10k/wmi-ops.h 	struct sk_buff *skb;
skb              1121 drivers/net/wireless/ath/ath10k/wmi-ops.h 	skb = ar->wmi.ops->gen_pktlog_disable(ar);
skb              1122 drivers/net/wireless/ath/ath10k/wmi-ops.h 	if (IS_ERR(skb))
skb              1123 drivers/net/wireless/ath/ath10k/wmi-ops.h 		return PTR_ERR(skb);
skb              1125 drivers/net/wireless/ath/ath10k/wmi-ops.h 	return ath10k_wmi_cmd_send(ar, skb,
skb              1133 drivers/net/wireless/ath/ath10k/wmi-ops.h 	struct sk_buff *skb;
skb              1138 drivers/net/wireless/ath/ath10k/wmi-ops.h 	skb = ar->wmi.ops->gen_pdev_set_quiet_mode(ar, period, duration,
skb              1140 drivers/net/wireless/ath/ath10k/wmi-ops.h 	if (IS_ERR(skb))
skb              1141 drivers/net/wireless/ath/ath10k/wmi-ops.h 		return PTR_ERR(skb);
skb              1143 drivers/net/wireless/ath/ath10k/wmi-ops.h 	return ath10k_wmi_cmd_send(ar, skb,
skb              1150 drivers/net/wireless/ath/ath10k/wmi-ops.h 	struct sk_buff *skb;
skb              1155 drivers/net/wireless/ath/ath10k/wmi-ops.h 	skb = ar->wmi.ops->gen_pdev_get_temperature(ar);
skb              1156 drivers/net/wireless/ath/ath10k/wmi-ops.h 	if (IS_ERR(skb))
skb              1157 drivers/net/wireless/ath/ath10k/wmi-ops.h 		return PTR_ERR(skb);
skb              1159 drivers/net/wireless/ath/ath10k/wmi-ops.h 	return ath10k_wmi_cmd_send(ar, skb,
skb              1166 drivers/net/wireless/ath/ath10k/wmi-ops.h 	struct sk_buff *skb;
skb              1171 drivers/net/wireless/ath/ath10k/wmi-ops.h 	skb = ar->wmi.ops->gen_addba_clear_resp(ar, vdev_id, mac);
skb              1172 drivers/net/wireless/ath/ath10k/wmi-ops.h 	if (IS_ERR(skb))
skb              1173 drivers/net/wireless/ath/ath10k/wmi-ops.h 		return PTR_ERR(skb);
skb              1175 drivers/net/wireless/ath/ath10k/wmi-ops.h 	return ath10k_wmi_cmd_send(ar, skb,
skb              1183 drivers/net/wireless/ath/ath10k/wmi-ops.h 	struct sk_buff *skb;
skb              1188 drivers/net/wireless/ath/ath10k/wmi-ops.h 	skb = ar->wmi.ops->gen_addba_send(ar, vdev_id, mac, tid, buf_size);
skb              1189 drivers/net/wireless/ath/ath10k/wmi-ops.h 	if (IS_ERR(skb))
skb              1190 drivers/net/wireless/ath/ath10k/wmi-ops.h 		return PTR_ERR(skb);
skb              1192 drivers/net/wireless/ath/ath10k/wmi-ops.h 	return ath10k_wmi_cmd_send(ar, skb,
skb              1200 drivers/net/wireless/ath/ath10k/wmi-ops.h 	struct sk_buff *skb;
skb              1205 drivers/net/wireless/ath/ath10k/wmi-ops.h 	skb = ar->wmi.ops->gen_addba_set_resp(ar, vdev_id, mac, tid, status);
skb              1206 drivers/net/wireless/ath/ath10k/wmi-ops.h 	if (IS_ERR(skb))
skb              1207 drivers/net/wireless/ath/ath10k/wmi-ops.h 		return PTR_ERR(skb);
skb              1209 drivers/net/wireless/ath/ath10k/wmi-ops.h 	return ath10k_wmi_cmd_send(ar, skb,
skb              1217 drivers/net/wireless/ath/ath10k/wmi-ops.h 	struct sk_buff *skb;
skb              1222 drivers/net/wireless/ath/ath10k/wmi-ops.h 	skb = ar->wmi.ops->gen_delba_send(ar, vdev_id, mac, tid, initiator,
skb              1224 drivers/net/wireless/ath/ath10k/wmi-ops.h 	if (IS_ERR(skb))
skb              1225 drivers/net/wireless/ath/ath10k/wmi-ops.h 		return PTR_ERR(skb);
skb              1227 drivers/net/wireless/ath/ath10k/wmi-ops.h 	return ath10k_wmi_cmd_send(ar, skb,
skb              1236 drivers/net/wireless/ath/ath10k/wmi-ops.h 	struct sk_buff *skb;
skb              1241 drivers/net/wireless/ath/ath10k/wmi-ops.h 	skb = ar->wmi.ops->gen_bcn_tmpl(ar, vdev_id, tim_ie_offset, bcn,
skb              1244 drivers/net/wireless/ath/ath10k/wmi-ops.h 	if (IS_ERR(skb))
skb              1245 drivers/net/wireless/ath/ath10k/wmi-ops.h 		return PTR_ERR(skb);
skb              1247 drivers/net/wireless/ath/ath10k/wmi-ops.h 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->bcn_tmpl_cmdid);
skb              1253 drivers/net/wireless/ath/ath10k/wmi-ops.h 	struct sk_buff *skb;
skb              1258 drivers/net/wireless/ath/ath10k/wmi-ops.h 	skb = ar->wmi.ops->gen_prb_tmpl(ar, vdev_id, prb);
skb              1259 drivers/net/wireless/ath/ath10k/wmi-ops.h 	if (IS_ERR(skb))
skb              1260 drivers/net/wireless/ath/ath10k/wmi-ops.h 		return PTR_ERR(skb);
skb              1262 drivers/net/wireless/ath/ath10k/wmi-ops.h 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->prb_tmpl_cmdid);
skb              1268 drivers/net/wireless/ath/ath10k/wmi-ops.h 	struct sk_buff *skb;
skb              1273 drivers/net/wireless/ath/ath10k/wmi-ops.h 	skb = ar->wmi.ops->gen_p2p_go_bcn_ie(ar, vdev_id, p2p_ie);
skb              1274 drivers/net/wireless/ath/ath10k/wmi-ops.h 	if (IS_ERR(skb))
skb              1275 drivers/net/wireless/ath/ath10k/wmi-ops.h 		return PTR_ERR(skb);
skb              1277 drivers/net/wireless/ath/ath10k/wmi-ops.h 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->p2p_go_set_beacon_ie);
skb              1284 drivers/net/wireless/ath/ath10k/wmi-ops.h 	struct sk_buff *skb;
skb              1290 drivers/net/wireless/ath/ath10k/wmi-ops.h 	skb = ar->wmi.ops->gen_sta_keepalive(ar, arg);
skb              1291 drivers/net/wireless/ath/ath10k/wmi-ops.h 	if (IS_ERR(skb))
skb              1292 drivers/net/wireless/ath/ath10k/wmi-ops.h 		return PTR_ERR(skb);
skb              1295 drivers/net/wireless/ath/ath10k/wmi-ops.h 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
skb              1301 drivers/net/wireless/ath/ath10k/wmi-ops.h 	struct sk_buff *skb;
skb              1307 drivers/net/wireless/ath/ath10k/wmi-ops.h 	skb = ar->wmi.ops->gen_wow_enable(ar);
skb              1308 drivers/net/wireless/ath/ath10k/wmi-ops.h 	if (IS_ERR(skb))
skb              1309 drivers/net/wireless/ath/ath10k/wmi-ops.h 		return PTR_ERR(skb);
skb              1312 drivers/net/wireless/ath/ath10k/wmi-ops.h 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
skb              1320 drivers/net/wireless/ath/ath10k/wmi-ops.h 	struct sk_buff *skb;
skb              1326 drivers/net/wireless/ath/ath10k/wmi-ops.h 	skb = ar->wmi.ops->gen_wow_add_wakeup_event(ar, vdev_id, event, enable);
skb              1327 drivers/net/wireless/ath/ath10k/wmi-ops.h 	if (IS_ERR(skb))
skb              1328 drivers/net/wireless/ath/ath10k/wmi-ops.h 		return PTR_ERR(skb);
skb              1331 drivers/net/wireless/ath/ath10k/wmi-ops.h 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
skb              1337 drivers/net/wireless/ath/ath10k/wmi-ops.h 	struct sk_buff *skb;
skb              1343 drivers/net/wireless/ath/ath10k/wmi-ops.h 	skb = ar->wmi.ops->gen_wow_host_wakeup_ind(ar);
skb              1344 drivers/net/wireless/ath/ath10k/wmi-ops.h 	if (IS_ERR(skb))
skb              1345 drivers/net/wireless/ath/ath10k/wmi-ops.h 		return PTR_ERR(skb);
skb              1348 drivers/net/wireless/ath/ath10k/wmi-ops.h 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
skb              1356 drivers/net/wireless/ath/ath10k/wmi-ops.h 	struct sk_buff *skb;
skb              1362 drivers/net/wireless/ath/ath10k/wmi-ops.h 	skb = ar->wmi.ops->gen_wow_add_pattern(ar, vdev_id, pattern_id,
skb              1365 drivers/net/wireless/ath/ath10k/wmi-ops.h 	if (IS_ERR(skb))
skb              1366 drivers/net/wireless/ath/ath10k/wmi-ops.h 		return PTR_ERR(skb);
skb              1369 drivers/net/wireless/ath/ath10k/wmi-ops.h 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
skb              1375 drivers/net/wireless/ath/ath10k/wmi-ops.h 	struct sk_buff *skb;
skb              1381 drivers/net/wireless/ath/ath10k/wmi-ops.h 	skb = ar->wmi.ops->gen_wow_del_pattern(ar, vdev_id, pattern_id);
skb              1382 drivers/net/wireless/ath/ath10k/wmi-ops.h 	if (IS_ERR(skb))
skb              1383 drivers/net/wireless/ath/ath10k/wmi-ops.h 		return PTR_ERR(skb);
skb              1386 drivers/net/wireless/ath/ath10k/wmi-ops.h 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
skb              1393 drivers/net/wireless/ath/ath10k/wmi-ops.h 	struct sk_buff *skb;
skb              1399 drivers/net/wireless/ath/ath10k/wmi-ops.h 	skb = ar->wmi.ops->gen_wow_config_pno(ar, vdev_id, pno_scan);
skb              1400 drivers/net/wireless/ath/ath10k/wmi-ops.h 	if (IS_ERR(skb))
skb              1401 drivers/net/wireless/ath/ath10k/wmi-ops.h 		return PTR_ERR(skb);
skb              1404 drivers/net/wireless/ath/ath10k/wmi-ops.h 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
skb              1411 drivers/net/wireless/ath/ath10k/wmi-ops.h 	struct sk_buff *skb;
skb              1416 drivers/net/wireless/ath/ath10k/wmi-ops.h 	skb = ar->wmi.ops->gen_update_fw_tdls_state(ar, vdev_id, state);
skb              1417 drivers/net/wireless/ath/ath10k/wmi-ops.h 	if (IS_ERR(skb))
skb              1418 drivers/net/wireless/ath/ath10k/wmi-ops.h 		return PTR_ERR(skb);
skb              1420 drivers/net/wireless/ath/ath10k/wmi-ops.h 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->tdls_set_state_cmdid);
skb              1429 drivers/net/wireless/ath/ath10k/wmi-ops.h 	struct sk_buff *skb;
skb              1434 drivers/net/wireless/ath/ath10k/wmi-ops.h 	skb = ar->wmi.ops->gen_tdls_peer_update(ar, arg, cap, chan);
skb              1435 drivers/net/wireless/ath/ath10k/wmi-ops.h 	if (IS_ERR(skb))
skb              1436 drivers/net/wireless/ath/ath10k/wmi-ops.h 		return PTR_ERR(skb);
skb              1438 drivers/net/wireless/ath/ath10k/wmi-ops.h 	return ath10k_wmi_cmd_send(ar, skb,
skb              1445 drivers/net/wireless/ath/ath10k/wmi-ops.h 	struct sk_buff *skb;
skb              1450 drivers/net/wireless/ath/ath10k/wmi-ops.h 	skb = ar->wmi.ops->gen_adaptive_qcs(ar, enable);
skb              1451 drivers/net/wireless/ath/ath10k/wmi-ops.h 	if (IS_ERR(skb))
skb              1452 drivers/net/wireless/ath/ath10k/wmi-ops.h 		return PTR_ERR(skb);
skb              1454 drivers/net/wireless/ath/ath10k/wmi-ops.h 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->adaptive_qcs_cmdid);
skb              1460 drivers/net/wireless/ath/ath10k/wmi-ops.h 	struct sk_buff *skb;
skb              1465 drivers/net/wireless/ath/ath10k/wmi-ops.h 	skb = ar->wmi.ops->gen_pdev_get_tpc_config(ar, param);
skb              1467 drivers/net/wireless/ath/ath10k/wmi-ops.h 	if (IS_ERR(skb))
skb              1468 drivers/net/wireless/ath/ath10k/wmi-ops.h 		return PTR_ERR(skb);
skb              1470 drivers/net/wireless/ath/ath10k/wmi-ops.h 	return ath10k_wmi_cmd_send(ar, skb,
skb              1489 drivers/net/wireless/ath/ath10k/wmi-ops.h 	struct sk_buff *skb;
skb              1494 drivers/net/wireless/ath/ath10k/wmi-ops.h 	skb = ar->wmi.ops->gen_pdev_enable_adaptive_cca(ar, enable,
skb              1498 drivers/net/wireless/ath/ath10k/wmi-ops.h 	if (IS_ERR(skb))
skb              1499 drivers/net/wireless/ath/ath10k/wmi-ops.h 		return PTR_ERR(skb);
skb              1501 drivers/net/wireless/ath/ath10k/wmi-ops.h 	return ath10k_wmi_cmd_send(ar, skb,
skb              1510 drivers/net/wireless/ath/ath10k/wmi-ops.h 	struct sk_buff *skb;
skb              1515 drivers/net/wireless/ath/ath10k/wmi-ops.h 	skb = ar->wmi.ops->ext_resource_config(ar, type,
skb              1518 drivers/net/wireless/ath/ath10k/wmi-ops.h 	if (IS_ERR(skb))
skb              1519 drivers/net/wireless/ath/ath10k/wmi-ops.h 		return PTR_ERR(skb);
skb              1521 drivers/net/wireless/ath/ath10k/wmi-ops.h 	return ath10k_wmi_cmd_send(ar, skb,
skb              1539 drivers/net/wireless/ath/ath10k/wmi-ops.h 	struct sk_buff *skb;
skb              1544 drivers/net/wireless/ath/ath10k/wmi-ops.h 	skb = wmi->ops->gen_pdev_bss_chan_info_req(ar, type);
skb              1545 drivers/net/wireless/ath/ath10k/wmi-ops.h 	if (IS_ERR(skb))
skb              1546 drivers/net/wireless/ath/ath10k/wmi-ops.h 		return PTR_ERR(skb);
skb              1548 drivers/net/wireless/ath/ath10k/wmi-ops.h 	return ath10k_wmi_cmd_send(ar, skb,
skb              1556 drivers/net/wireless/ath/ath10k/wmi-ops.h 	struct sk_buff *skb;
skb              1561 drivers/net/wireless/ath/ath10k/wmi-ops.h 	skb = wmi->ops->gen_echo(ar, value);
skb              1562 drivers/net/wireless/ath/ath10k/wmi-ops.h 	if (IS_ERR(skb))
skb              1563 drivers/net/wireless/ath/ath10k/wmi-ops.h 		return PTR_ERR(skb);
skb              1565 drivers/net/wireless/ath/ath10k/wmi-ops.h 	return ath10k_wmi_cmd_send(ar, skb, wmi->cmd->echo_cmdid);
skb              1571 drivers/net/wireless/ath/ath10k/wmi-ops.h 	struct sk_buff *skb;
skb              1576 drivers/net/wireless/ath/ath10k/wmi-ops.h 	skb = ar->wmi.ops->gen_pdev_get_tpc_table_cmdid(ar, param);
skb              1578 drivers/net/wireless/ath/ath10k/wmi-ops.h 	if (IS_ERR(skb))
skb              1579 drivers/net/wireless/ath/ath10k/wmi-ops.h 		return PTR_ERR(skb);
skb              1581 drivers/net/wireless/ath/ath10k/wmi-ops.h 	return ath10k_wmi_cmd_send(ar, skb,
skb              1589 drivers/net/wireless/ath/ath10k/wmi-ops.h 	struct sk_buff *skb;
skb              1594 drivers/net/wireless/ath/ath10k/wmi-ops.h 	skb = ar->wmi.ops->gen_radar_found(ar, arg);
skb              1595 drivers/net/wireless/ath/ath10k/wmi-ops.h 	if (IS_ERR(skb))
skb              1596 drivers/net/wireless/ath/ath10k/wmi-ops.h 		return PTR_ERR(skb);
skb              1598 drivers/net/wireless/ath/ath10k/wmi-ops.h 	return ath10k_wmi_cmd_send(ar, skb,
skb              1606 drivers/net/wireless/ath/ath10k/wmi-ops.h 	struct sk_buff *skb;
skb              1611 drivers/net/wireless/ath/ath10k/wmi-ops.h 	skb = ar->wmi.ops->gen_bb_timing(ar, arg);
skb              1613 drivers/net/wireless/ath/ath10k/wmi-ops.h 	if (IS_ERR(skb))
skb              1614 drivers/net/wireless/ath/ath10k/wmi-ops.h 		return PTR_ERR(skb);
skb              1616 drivers/net/wireless/ath/ath10k/wmi-ops.h 	return ath10k_wmi_cmd_send(ar, skb,
skb               169 drivers/net/wireless/ath/ath10k/wmi-tlv.c 					      struct sk_buff *skb)
skb               177 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
skb               216 drivers/net/wireless/ath/ath10k/wmi-tlv.c 						  struct sk_buff *skb)
skb               223 drivers/net/wireless/ath/ath10k/wmi-tlv.c 					  struct sk_buff *skb)
skb               231 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
skb               286 drivers/net/wireless/ath/ath10k/wmi-tlv.c 				     struct sk_buff *skb)
skb               292 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
skb               314 drivers/net/wireless/ath/ath10k/wmi-tlv.c 					struct sk_buff *skb)
skb               321 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
skb               348 drivers/net/wireless/ath/ath10k/wmi-tlv.c 					 struct sk_buff *skb)
skb               355 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
skb               413 drivers/net/wireless/ath/ath10k/wmi-tlv.c 					    struct sk_buff *skb)
skb               417 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	ev = (struct wmi_tlv_pdev_temperature_event *)skb->data;
skb               418 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	if (WARN_ON(skb->len < sizeof(*ev)))
skb               425 drivers/net/wireless/ath/ath10k/wmi-tlv.c static void ath10k_wmi_event_tdls_peer(struct ath10k *ar, struct sk_buff *skb)
skb               432 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
skb               469 drivers/net/wireless/ath/ath10k/wmi-tlv.c 						 struct sk_buff *skb)
skb               474 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	tlv_hdr = (struct wmi_tlv *)skb->data;
skb               490 drivers/net/wireless/ath/ath10k/wmi-tlv.c static void ath10k_wmi_tlv_op_rx(struct ath10k *ar, struct sk_buff *skb)
skb               496 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
skb               499 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
skb               502 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
skb               504 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	consumed = ath10k_tm_event_wmi(ar, id, skb);
skb               518 drivers/net/wireless/ath/ath10k/wmi-tlv.c 		ath10k_wmi_event_mgmt_rx(ar, skb);
skb               522 drivers/net/wireless/ath/ath10k/wmi-tlv.c 		ath10k_wmi_event_scan(ar, skb);
skb               525 drivers/net/wireless/ath/ath10k/wmi-tlv.c 		ath10k_wmi_event_chan_info(ar, skb);
skb               528 drivers/net/wireless/ath/ath10k/wmi-tlv.c 		ath10k_wmi_event_echo(ar, skb);
skb               531 drivers/net/wireless/ath/ath10k/wmi-tlv.c 		ath10k_wmi_event_debug_mesg(ar, skb);
skb               534 drivers/net/wireless/ath/ath10k/wmi-tlv.c 		ath10k_wmi_event_update_stats(ar, skb);
skb               537 drivers/net/wireless/ath/ath10k/wmi-tlv.c 		ath10k_wmi_event_vdev_start_resp(ar, skb);
skb               540 drivers/net/wireless/ath/ath10k/wmi-tlv.c 		ath10k_wmi_event_vdev_stopped(ar, skb);
skb               543 drivers/net/wireless/ath/ath10k/wmi-tlv.c 		ath10k_wmi_tlv_event_vdev_delete_resp(ar, skb);
skb               546 drivers/net/wireless/ath/ath10k/wmi-tlv.c 		ath10k_wmi_event_peer_sta_kickout(ar, skb);
skb               549 drivers/net/wireless/ath/ath10k/wmi-tlv.c 		ath10k_wmi_event_host_swba(ar, skb);
skb               552 drivers/net/wireless/ath/ath10k/wmi-tlv.c 		ath10k_wmi_event_tbttoffset_update(ar, skb);
skb               555 drivers/net/wireless/ath/ath10k/wmi-tlv.c 		ath10k_wmi_event_phyerr(ar, skb);
skb               558 drivers/net/wireless/ath/ath10k/wmi-tlv.c 		ath10k_wmi_event_roam(ar, skb);
skb               561 drivers/net/wireless/ath/ath10k/wmi-tlv.c 		ath10k_wmi_event_profile_match(ar, skb);
skb               564 drivers/net/wireless/ath/ath10k/wmi-tlv.c 		ath10k_wmi_event_debug_print(ar, skb);
skb               567 drivers/net/wireless/ath/ath10k/wmi-tlv.c 		ath10k_wmi_event_pdev_qvit(ar, skb);
skb               570 drivers/net/wireless/ath/ath10k/wmi-tlv.c 		ath10k_wmi_event_wlan_profile_data(ar, skb);
skb               573 drivers/net/wireless/ath/ath10k/wmi-tlv.c 		ath10k_wmi_event_rtt_measurement_report(ar, skb);
skb               576 drivers/net/wireless/ath/ath10k/wmi-tlv.c 		ath10k_wmi_event_tsf_measurement_report(ar, skb);
skb               579 drivers/net/wireless/ath/ath10k/wmi-tlv.c 		ath10k_wmi_event_rtt_error_report(ar, skb);
skb               582 drivers/net/wireless/ath/ath10k/wmi-tlv.c 		ath10k_wmi_event_wow_wakeup_host(ar, skb);
skb               585 drivers/net/wireless/ath/ath10k/wmi-tlv.c 		ath10k_wmi_event_dcs_interference(ar, skb);
skb               588 drivers/net/wireless/ath/ath10k/wmi-tlv.c 		ath10k_wmi_event_pdev_tpc_config(ar, skb);
skb               591 drivers/net/wireless/ath/ath10k/wmi-tlv.c 		ath10k_wmi_event_pdev_ftm_intg(ar, skb);
skb               594 drivers/net/wireless/ath/ath10k/wmi-tlv.c 		ath10k_wmi_event_gtk_offload_status(ar, skb);
skb               597 drivers/net/wireless/ath/ath10k/wmi-tlv.c 		ath10k_wmi_event_gtk_rekey_fail(ar, skb);
skb               600 drivers/net/wireless/ath/ath10k/wmi-tlv.c 		ath10k_wmi_event_delba_complete(ar, skb);
skb               603 drivers/net/wireless/ath/ath10k/wmi-tlv.c 		ath10k_wmi_event_addba_complete(ar, skb);
skb               606 drivers/net/wireless/ath/ath10k/wmi-tlv.c 		ath10k_wmi_event_vdev_install_key_complete(ar, skb);
skb               609 drivers/net/wireless/ath/ath10k/wmi-tlv.c 		ath10k_wmi_event_service_ready(ar, skb);
skb               612 drivers/net/wireless/ath/ath10k/wmi-tlv.c 		ath10k_wmi_event_ready(ar, skb);
skb               615 drivers/net/wireless/ath/ath10k/wmi-tlv.c 		ath10k_wmi_event_service_available(ar, skb);
skb               618 drivers/net/wireless/ath/ath10k/wmi-tlv.c 		ath10k_wmi_tlv_event_bcn_tx_status(ar, skb);
skb               621 drivers/net/wireless/ath/ath10k/wmi-tlv.c 		ath10k_wmi_tlv_event_diag_data(ar, skb);
skb               624 drivers/net/wireless/ath/ath10k/wmi-tlv.c 		ath10k_wmi_tlv_event_diag(ar, skb);
skb               627 drivers/net/wireless/ath/ath10k/wmi-tlv.c 		ath10k_wmi_tlv_event_p2p_noa(ar, skb);
skb               630 drivers/net/wireless/ath/ath10k/wmi-tlv.c 		ath10k_wmi_tlv_event_tx_pause(ar, skb);
skb               633 drivers/net/wireless/ath/ath10k/wmi-tlv.c 		ath10k_wmi_tlv_event_temperature(ar, skb);
skb               636 drivers/net/wireless/ath/ath10k/wmi-tlv.c 		ath10k_wmi_event_tdls_peer(ar, skb);
skb               639 drivers/net/wireless/ath/ath10k/wmi-tlv.c 		ath10k_wmi_tlv_event_peer_delete_resp(ar, skb);
skb               642 drivers/net/wireless/ath/ath10k/wmi-tlv.c 		ath10k_wmi_event_mgmt_tx_compl(ar, skb);
skb               645 drivers/net/wireless/ath/ath10k/wmi-tlv.c 		ath10k_wmi_event_mgmt_tx_bundle_compl(ar, skb);
skb               653 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	dev_kfree_skb(skb);
skb               657 drivers/net/wireless/ath/ath10k/wmi-tlv.c 					  struct sk_buff *skb,
skb               664 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
skb               689 drivers/net/wireless/ath/ath10k/wmi-tlv.c ath10k_wmi_tlv_op_pull_mgmt_tx_compl_ev(struct ath10k *ar, struct sk_buff *skb,
skb               696 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
skb               761 drivers/net/wireless/ath/ath10k/wmi-tlv.c 				struct ath10k *ar, struct sk_buff *skb,
skb               767 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	ret = ath10k_wmi_tlv_iter(ar, skb->data, skb->len,
skb               791 drivers/net/wireless/ath/ath10k/wmi-tlv.c 					     struct sk_buff *skb,
skb               800 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
skb               824 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	if (skb->len < (frame - skb->data) + msdu_len) {
skb               830 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	skb_trim(skb, 0);
skb               831 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	skb_put(skb, frame - skb->data);
skb               832 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	skb_pull(skb, frame - skb->data);
skb               833 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	skb_put(skb, msdu_len);
skb               840 drivers/net/wireless/ath/ath10k/wmi-tlv.c 					     struct sk_buff *skb,
skb               847 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
skb               875 drivers/net/wireless/ath/ath10k/wmi-tlv.c ath10k_wmi_tlv_op_pull_vdev_start_ev(struct ath10k *ar, struct sk_buff *skb,
skb               882 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
skb               895 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	skb_pull(skb, sizeof(*ev));
skb               906 drivers/net/wireless/ath/ath10k/wmi-tlv.c 					       struct sk_buff *skb,
skb               913 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
skb              1021 drivers/net/wireless/ath/ath10k/wmi-tlv.c 					  struct sk_buff *skb,
skb              1029 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	ret = ath10k_wmi_tlv_iter(ar, skb->data, skb->len,
skb              1053 drivers/net/wireless/ath/ath10k/wmi-tlv.c 						struct sk_buff *skb,
skb              1061 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
skb              1156 drivers/net/wireless/ath/ath10k/wmi-tlv.c 					     struct sk_buff *skb,
skb              1166 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	ret = ath10k_wmi_tlv_iter(ar, skb->data, skb->len,
skb              1227 drivers/net/wireless/ath/ath10k/wmi-tlv.c 					 struct sk_buff *skb,
skb              1234 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
skb              1273 drivers/net/wireless/ath/ath10k/wmi-tlv.c 					    struct sk_buff *skb,
skb              1278 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	ret = ath10k_wmi_tlv_iter(ar, skb->data, skb->len,
skb              1326 drivers/net/wireless/ath/ath10k/wmi-tlv.c 					   struct sk_buff *skb,
skb              1343 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
skb              1465 drivers/net/wireless/ath/ath10k/wmi-tlv.c 					  struct sk_buff *skb,
skb              1472 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
skb              1494 drivers/net/wireless/ath/ath10k/wmi-tlv.c ath10k_wmi_tlv_op_pull_wow_ev(struct ath10k *ar, struct sk_buff *skb,
skb              1501 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
skb              1524 drivers/net/wireless/ath/ath10k/wmi-tlv.c 					  struct sk_buff *skb,
skb              1531 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
skb              1555 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	struct sk_buff *skb;
skb              1557 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
skb              1558 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	if (!skb)
skb              1561 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	tlv = (void *)skb->data;
skb              1568 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	return skb;
skb              1576 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	struct sk_buff *skb;
skb              1578 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
skb              1579 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	if (!skb)
skb              1582 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	tlv = (void *)skb->data;
skb              1589 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	return skb;
skb              1600 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	struct sk_buff *skb;
skb              1602 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
skb              1603 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	if (!skb)
skb              1606 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	tlv = (void *)skb->data;
skb              1617 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	return skb;
skb              1631 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	struct sk_buff *skb;
skb              1633 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
skb              1634 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	if (!skb)
skb              1637 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	tlv = (void *)skb->data;
skb              1646 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	return skb;
skb              1683 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	struct sk_buff *skb;
skb              1697 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	skb = ath10k_wmi_alloc_skb(ar, len);
skb              1698 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	if (!skb)
skb              1701 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	ptr = skb->data;
skb              1798 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	return skb;
skb              1807 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	struct sk_buff *skb;
skb              1829 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	skb = ath10k_wmi_alloc_skb(ar, len);
skb              1830 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	if (!skb)
skb              1833 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	ptr = (void *)skb->data;
skb              1898 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	return skb;
skb              1907 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	struct sk_buff *skb;
skb              1916 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
skb              1917 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	if (!skb)
skb              1926 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	tlv = (void *)skb->data;
skb              1936 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	return skb;
skb              1970 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	struct sk_buff *skb;
skb              1972 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
skb              1973 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	if (!skb)
skb              1976 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	tlv = (void *)skb->data;
skb              1986 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	return skb;
skb              1994 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	struct sk_buff *skb;
skb              1996 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
skb              1997 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	if (!skb)
skb              2000 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	tlv = (void *)skb->data;
skb              2007 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	return skb;
skb              2018 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	struct sk_buff *skb;
skb              2031 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	skb = ath10k_wmi_alloc_skb(ar, len);
skb              2032 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	if (!skb)
skb              2040 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	ptr = (void *)skb->data;
skb              2083 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	return skb;
skb              2091 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	struct sk_buff *skb;
skb              2093 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
skb              2094 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	if (!skb)
skb              2097 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	tlv = (void *)skb->data;
skb              2104 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	return skb;
skb              2114 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	struct sk_buff *skb;
skb              2116 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
skb              2117 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	if (!skb)
skb              2120 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	tlv = (void *)skb->data;
skb              2129 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	return skb;
skb              2137 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	struct sk_buff *skb;
skb              2139 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
skb              2140 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	if (!skb)
skb              2143 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	tlv = (void *)skb->data;
skb              2150 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	return skb;
skb              2159 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	struct sk_buff *skb;
skb              2161 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
skb              2162 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	if (!skb)
skb              2165 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	tlv = (void *)skb->data;
skb              2175 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	return skb;
skb              2184 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	struct sk_buff *skb;
skb              2197 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	skb = ath10k_wmi_alloc_skb(ar, len);
skb              2198 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	if (!skb)
skb              2201 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	ptr = (void *)skb->data;
skb              2230 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	return skb;
skb              2267 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	struct sk_buff *skb;
skb              2276 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	skb = ath10k_wmi_alloc_skb(ar, len);
skb              2277 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	if (!skb)
skb              2280 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	ptr = (void *)skb->data;
skb              2302 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	return skb;
skb              2326 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	struct sk_buff *skb;
skb              2331 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	skb = ath10k_wmi_alloc_skb(ar, len);
skb              2332 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	if (!skb)
skb              2335 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	ptr = (void *)skb->data;
skb              2348 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	return skb;
skb              2357 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	struct sk_buff *skb;
skb              2364 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	skb = ath10k_wmi_alloc_skb(ar, len);
skb              2365 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	if (!skb)
skb              2368 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	ptr = (void *)skb->data;
skb              2392 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	return skb;
skb              2402 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	struct sk_buff *skb;
skb              2404 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
skb              2405 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	if (!skb)
skb              2408 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	tlv = (void *)skb->data;
skb              2417 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	return skb;
skb              2426 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	struct sk_buff *skb;
skb              2428 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
skb              2429 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	if (!skb)
skb              2432 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	tlv = (void *)skb->data;
skb              2440 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	return skb;
skb              2449 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	struct sk_buff *skb;
skb              2451 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
skb              2452 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	if (!skb)
skb              2455 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	tlv = (void *)skb->data;
skb              2464 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	return skb;
skb              2475 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	struct sk_buff *skb;
skb              2477 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
skb              2478 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	if (!skb)
skb              2481 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	tlv = (void *)skb->data;
skb              2493 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	return skb;
skb              2503 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	struct sk_buff *skb;
skb              2521 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	skb = ath10k_wmi_alloc_skb(ar, len);
skb              2522 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	if (!skb)
skb              2525 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	ptr = (void *)skb->data;
skb              2583 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	return skb;
skb              2592 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	struct sk_buff *skb;
skb              2594 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
skb              2595 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	if (!skb)
skb              2598 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	tlv = (void *)skb->data;
skb              2606 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	return skb;
skb              2616 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	struct sk_buff *skb;
skb              2618 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
skb              2619 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	if (!skb)
skb              2622 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	tlv = (void *)skb->data;
skb              2631 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	return skb;
skb              2640 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	struct sk_buff *skb;
skb              2645 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
skb              2646 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	if (!skb)
skb              2649 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	tlv = (void *)skb->data;
skb              2659 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	return skb;
skb              2670 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	struct sk_buff *skb;
skb              2679 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	skb = ath10k_wmi_alloc_skb(ar, len);
skb              2680 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	if (!skb)
skb              2683 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	ptr = (void *)skb->data;
skb              2716 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	return skb;
skb              2724 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	struct sk_buff *skb;
skb              2726 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
skb              2727 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	if (!skb)
skb              2730 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	tlv = (void *)skb->data;
skb              2737 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	return skb;
skb              2749 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	struct sk_buff *skb;
skb              2753 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
skb              2754 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	if (!skb)
skb              2760 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	tlv = (void *)skb->data;
skb              2778 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	return skb;
skb              2788 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	struct sk_buff *skb;
skb              2794 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	skb = ath10k_wmi_alloc_skb(ar, len);
skb              2795 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	if (!skb)
skb              2798 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	ptr = (void *)skb->data;
skb              2816 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	return skb;
skb              2824 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	struct sk_buff *skb;
skb              2826 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
skb              2827 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	if (!skb)
skb              2830 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	tlv = (void *)skb->data;
skb              2837 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	return skb;
skb              2841 drivers/net/wireless/ath/ath10k/wmi-tlv.c ath10k_wmi_mgmt_tx_alloc_msdu_id(struct ath10k *ar, struct sk_buff *skb,
skb              2852 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	pkt_addr->vaddr = skb;
skb              2874 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	struct sk_buff *skb;
skb              2906 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	skb = ath10k_wmi_alloc_skb(ar, len);
skb              2907 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	if (!skb)
skb              2914 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	ptr = (void *)skb->data;
skb              2936 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	return skb;
skb              2939 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	dev_kfree_skb(skb);
skb              2950 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	struct sk_buff *skb;
skb              2952 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
skb              2953 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	if (!skb)
skb              2956 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	tlv = (void *)skb->data;
skb              2964 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	return skb;
skb              2973 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	struct sk_buff *skb;
skb              2990 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	skb = ath10k_wmi_alloc_skb(ar, len);
skb              2991 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	if (!skb)
skb              2994 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	ptr = (void *)skb->data;
skb              3016 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	return skb;
skb              3024 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	struct sk_buff *skb;
skb              3029 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	skb = ath10k_wmi_alloc_skb(ar, len);
skb              3030 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	if (!skb)
skb              3033 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	ptr = (void *)skb->data;
skb              3045 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	return skb;
skb              3053 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	struct sk_buff *skb;
skb              3055 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
skb              3056 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	if (!skb)
skb              3059 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	tlv = (void *)skb->data;
skb              3064 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	return skb;
skb              3072 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	struct sk_buff *skb;
skb              3077 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	skb = ath10k_wmi_alloc_skb(ar, len);
skb              3078 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	if (!skb)
skb              3081 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	ptr = (void *)skb->data;
skb              3091 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	return skb;
skb              3103 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	struct sk_buff *skb;
skb              3113 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	skb = ath10k_wmi_alloc_skb(ar, len);
skb              3114 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	if (!skb)
skb              3117 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	ptr = (void *)skb->data;
skb              3155 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	return skb;
skb              3165 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	struct sk_buff *skb;
skb              3172 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	skb = ath10k_wmi_alloc_skb(ar, len);
skb              3173 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	if (!skb)
skb              3176 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	ptr = (void *)skb->data;
skb              3204 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	return skb;
skb              3213 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	struct sk_buff *skb;
skb              3219 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	skb = ath10k_wmi_alloc_skb(ar, len);
skb              3220 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	if (!skb)
skb              3223 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	ptr = (void *)skb->data;
skb              3244 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	return skb;
skb              3253 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	struct sk_buff *skb;
skb              3271 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	skb = ath10k_wmi_alloc_skb(ar, len);
skb              3272 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	if (!skb)
skb              3275 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	ptr = (void *)skb->data;
skb              3300 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	return skb;
skb              3331 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	struct sk_buff *skb;
skb              3341 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	skb = ath10k_wmi_alloc_skb(ar, len);
skb              3342 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	if (!skb)
skb              3345 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	ptr = (void *)skb->data;
skb              3402 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	return skb;
skb              3412 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	struct sk_buff *skb;
skb              3414 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
skb              3415 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	if (!skb)
skb              3418 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	tlv = (void *)skb->data;
skb              3433 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	return skb;
skb              3441 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	struct sk_buff *skb;
skb              3445 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	skb = ath10k_wmi_alloc_skb(ar, len);
skb              3446 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	if (!skb)
skb              3449 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	tlv = (struct wmi_tlv *)skb->data;
skb              3459 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	return skb;
skb              3470 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	struct sk_buff *skb;
skb              3474 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	skb = ath10k_wmi_alloc_skb(ar, len);
skb              3475 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	if (!skb)
skb              3478 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	tlv = (struct wmi_tlv *)skb->data;
skb              3489 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	return skb;
skb              3497 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	struct sk_buff *skb;
skb              3501 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	skb = ath10k_wmi_alloc_skb(ar, len);
skb              3502 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	if (!skb)
skb              3505 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	tlv = (struct wmi_tlv *)skb->data;
skb              3511 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	return skb;
skb              3523 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	struct sk_buff *skb;
skb              3536 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	skb = ath10k_wmi_alloc_skb(ar, len);
skb              3537 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	if (!skb)
skb              3541 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	ptr = (void *)skb->data;
skb              3611 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	return skb;
skb              3620 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	struct sk_buff *skb;
skb              3624 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	skb = ath10k_wmi_alloc_skb(ar, len);
skb              3625 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	if (!skb)
skb              3628 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	tlv = (struct wmi_tlv *)skb->data;
skb              3639 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	return skb;
skb              3651 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	struct sk_buff *skb;
skb              3671 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	skb = ath10k_wmi_alloc_skb(ar, len);
skb              3672 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	if (!skb)
skb              3675 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	ptr = (void *)skb->data;
skb              3766 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	return skb;
skb              3775 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	struct sk_buff *skb;
skb              3786 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	skb = ath10k_wmi_alloc_skb(ar, len);
skb              3787 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	if (!skb)
skb              3790 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	ptr = (void *)skb->data;
skb              3815 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	return skb;
skb              3833 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	struct sk_buff *skb;
skb              3838 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	skb = ath10k_wmi_alloc_skb(ar, len);
skb              3839 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	if (!skb)
skb              3842 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	ptr = (void *)skb->data;
skb              3853 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	return skb;
skb              3861 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	struct sk_buff *skb;
skb              3866 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	skb = ath10k_wmi_alloc_skb(ar, len);
skb              3867 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	if (!skb)
skb              3870 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	ptr = (void *)skb->data;
skb              3881 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	return skb;
skb              3889 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	struct sk_buff *skb;
skb              3895 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	skb = ath10k_wmi_alloc_skb(ar, len);
skb              3896 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	if (!skb)
skb              3899 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	ptr = (void *)skb->data;
skb              3924 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	return skb;
skb              3932 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	struct sk_buff *skb;
skb              3938 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	skb = ath10k_wmi_alloc_skb(ar, len);
skb              3939 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	if (!skb)
skb              3942 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	ptr = (void *)skb->data;
skb              3951 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	return skb;
skb              1745 drivers/net/wireless/ath/ath10k/wmi.c 	struct sk_buff *skb;
skb              1748 drivers/net/wireless/ath/ath10k/wmi.c 	skb = ath10k_htc_alloc_skb(ar, WMI_SKB_HEADROOM + round_len);
skb              1749 drivers/net/wireless/ath/ath10k/wmi.c 	if (!skb)
skb              1752 drivers/net/wireless/ath/ath10k/wmi.c 	skb_reserve(skb, WMI_SKB_HEADROOM);
skb              1753 drivers/net/wireless/ath/ath10k/wmi.c 	if (!IS_ALIGNED((unsigned long)skb->data, 4))
skb              1756 drivers/net/wireless/ath/ath10k/wmi.c 	skb_put(skb, round_len);
skb              1757 drivers/net/wireless/ath/ath10k/wmi.c 	memset(skb->data, 0, round_len);
skb              1759 drivers/net/wireless/ath/ath10k/wmi.c 	return skb;
skb              1762 drivers/net/wireless/ath/ath10k/wmi.c static void ath10k_wmi_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
skb              1764 drivers/net/wireless/ath/ath10k/wmi.c 	dev_kfree_skb(skb);
skb              1767 drivers/net/wireless/ath/ath10k/wmi.c int ath10k_wmi_cmd_send_nowait(struct ath10k *ar, struct sk_buff *skb,
skb              1770 drivers/net/wireless/ath/ath10k/wmi.c 	struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
skb              1775 drivers/net/wireless/ath/ath10k/wmi.c 	if (skb_push(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
skb              1780 drivers/net/wireless/ath/ath10k/wmi.c 	cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
skb              1784 drivers/net/wireless/ath/ath10k/wmi.c 	trace_ath10k_wmi_cmd(ar, cmd_id, skb->data, skb->len);
skb              1785 drivers/net/wireless/ath/ath10k/wmi.c 	ret = ath10k_htc_send(&ar->htc, ar->wmi.eid, skb);
skb              1793 drivers/net/wireless/ath/ath10k/wmi.c 	skb_pull(skb, sizeof(struct wmi_cmd_hdr));
skb              1868 drivers/net/wireless/ath/ath10k/wmi.c int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id)
skb              1884 drivers/net/wireless/ath/ath10k/wmi.c 		ret = ath10k_wmi_cmd_send_nowait(ar, skb, cmd_id);
skb              1893 drivers/net/wireless/ath/ath10k/wmi.c 		dev_kfree_skb_any(skb);
skb              1911 drivers/net/wireless/ath/ath10k/wmi.c 	struct sk_buff *skb;
skb              1942 drivers/net/wireless/ath/ath10k/wmi.c 	skb = ath10k_wmi_alloc_skb(ar, len);
skb              1943 drivers/net/wireless/ath/ath10k/wmi.c 	if (!skb)
skb              1946 drivers/net/wireless/ath/ath10k/wmi.c 	cmd = (struct wmi_mgmt_tx_cmd *)skb->data;
skb              1957 drivers/net/wireless/ath/ath10k/wmi.c 		   msdu, skb->len, fc & IEEE80211_FCTL_FTYPE,
skb              1959 drivers/net/wireless/ath/ath10k/wmi.c 	trace_ath10k_tx_hdr(ar, skb->data, skb->len);
skb              1960 drivers/net/wireless/ath/ath10k/wmi.c 	trace_ath10k_tx_payload(ar, skb->data, skb->len);
skb              1962 drivers/net/wireless/ath/ath10k/wmi.c 	return skb;
skb              2114 drivers/net/wireless/ath/ath10k/wmi.c static int ath10k_wmi_op_pull_scan_ev(struct ath10k *ar, struct sk_buff *skb,
skb              2117 drivers/net/wireless/ath/ath10k/wmi.c 	struct wmi_scan_event *ev = (void *)skb->data;
skb              2119 drivers/net/wireless/ath/ath10k/wmi.c 	if (skb->len < sizeof(*ev))
skb              2122 drivers/net/wireless/ath/ath10k/wmi.c 	skb_pull(skb, sizeof(*ev));
skb              2133 drivers/net/wireless/ath/ath10k/wmi.c int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb)
skb              2144 drivers/net/wireless/ath/ath10k/wmi.c 	ret = ath10k_wmi_pull_scan(ar, skb, &arg);
skb              2198 drivers/net/wireless/ath/ath10k/wmi.c 					 struct sk_buff *skb,
skb              2201 drivers/net/wireless/ath/ath10k/wmi.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
skb              2211 drivers/net/wireless/ath/ath10k/wmi.c 	if (skb->len < (hdrlen + IEEE80211_WEP_IV_LEN))
skb              2214 drivers/net/wireless/ath/ath10k/wmi.c 	keyidx = skb->data[hdrlen + (IEEE80211_WEP_IV_LEN - 1)] >> WEP_KEYID_SHIFT;
skb              2228 drivers/net/wireless/ath/ath10k/wmi.c static int ath10k_wmi_op_pull_mgmt_rx_ev(struct ath10k *ar, struct sk_buff *skb,
skb              2241 drivers/net/wireless/ath/ath10k/wmi.c 		ev_v2 = (struct wmi_mgmt_rx_event_v2 *)skb->data;
skb              2245 drivers/net/wireless/ath/ath10k/wmi.c 		ev_v1 = (struct wmi_mgmt_rx_event_v1 *)skb->data;
skb              2250 drivers/net/wireless/ath/ath10k/wmi.c 	if (skb->len < pull_len)
skb              2253 drivers/net/wireless/ath/ath10k/wmi.c 	skb_pull(skb, pull_len);
skb              2262 drivers/net/wireless/ath/ath10k/wmi.c 	if (skb->len < msdu_len)
skb              2267 drivers/net/wireless/ath/ath10k/wmi.c 		ext_info = (struct wmi_mgmt_rx_ext_info *)(skb->data + len);
skb              2274 drivers/net/wireless/ath/ath10k/wmi.c 	skb_trim(skb, msdu_len);
skb              2280 drivers/net/wireless/ath/ath10k/wmi.c 					      struct sk_buff *skb,
skb              2290 drivers/net/wireless/ath/ath10k/wmi.c 	ev = (struct wmi_10_4_mgmt_rx_event *)skb->data;
skb              2294 drivers/net/wireless/ath/ath10k/wmi.c 	if (skb->len < pull_len)
skb              2297 drivers/net/wireless/ath/ath10k/wmi.c 	skb_pull(skb, pull_len);
skb              2306 drivers/net/wireless/ath/ath10k/wmi.c 	if (skb->len < msdu_len)
skb              2311 drivers/net/wireless/ath/ath10k/wmi.c 		ext_info = (struct wmi_mgmt_rx_ext_info *)(skb->data + len);
skb              2317 drivers/net/wireless/ath/ath10k/wmi.c 	skb_trim(skb, msdu_len);
skb              2388 drivers/net/wireless/ath/ath10k/wmi.c int ath10k_wmi_event_mgmt_tx_compl(struct ath10k *ar, struct sk_buff *skb)
skb              2394 drivers/net/wireless/ath/ath10k/wmi.c 	ret = ath10k_wmi_pull_mgmt_tx_compl(ar, skb, &arg);
skb              2414 drivers/net/wireless/ath/ath10k/wmi.c int ath10k_wmi_event_mgmt_tx_bundle_compl(struct ath10k *ar, struct sk_buff *skb)
skb              2421 drivers/net/wireless/ath/ath10k/wmi.c 	ret = ath10k_wmi_pull_mgmt_tx_bundle_compl(ar, skb, &arg);
skb              2444 drivers/net/wireless/ath/ath10k/wmi.c int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
skb              2447 drivers/net/wireless/ath/ath10k/wmi.c 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
skb              2458 drivers/net/wireless/ath/ath10k/wmi.c 	ret = ath10k_wmi_pull_mgmt_rx(ar, skb, &arg);
skb              2461 drivers/net/wireless/ath/ath10k/wmi.c 		dev_kfree_skb(skb);
skb              2479 drivers/net/wireless/ath/ath10k/wmi.c 		dev_kfree_skb(skb);
skb              2504 drivers/net/wireless/ath/ath10k/wmi.c 		dev_kfree_skb(skb);
skb              2517 drivers/net/wireless/ath/ath10k/wmi.c 	hdr = (struct ieee80211_hdr *)skb->data;
skb              2526 drivers/net/wireless/ath/ath10k/wmi.c 	ath10k_wmi_handle_wep_reauth(ar, skb, status);
skb              2542 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_mac_handle_beacon(ar, skb);
skb              2546 drivers/net/wireless/ath/ath10k/wmi.c 		   skb, skb->len,
skb              2554 drivers/net/wireless/ath/ath10k/wmi.c 	ieee80211_rx_ni(ar->hw, skb);
skb              2578 drivers/net/wireless/ath/ath10k/wmi.c static int ath10k_wmi_op_pull_ch_info_ev(struct ath10k *ar, struct sk_buff *skb,
skb              2581 drivers/net/wireless/ath/ath10k/wmi.c 	struct wmi_chan_info_event *ev = (void *)skb->data;
skb              2583 drivers/net/wireless/ath/ath10k/wmi.c 	if (skb->len < sizeof(*ev))
skb              2586 drivers/net/wireless/ath/ath10k/wmi.c 	skb_pull(skb, sizeof(*ev));
skb              2598 drivers/net/wireless/ath/ath10k/wmi.c 					      struct sk_buff *skb,
skb              2601 drivers/net/wireless/ath/ath10k/wmi.c 	struct wmi_10_4_chan_info_event *ev = (void *)skb->data;
skb              2603 drivers/net/wireless/ath/ath10k/wmi.c 	if (skb->len < sizeof(*ev))
skb              2606 drivers/net/wireless/ath/ath10k/wmi.c 	skb_pull(skb, sizeof(*ev));
skb              2698 drivers/net/wireless/ath/ath10k/wmi.c void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb)
skb              2704 drivers/net/wireless/ath/ath10k/wmi.c 	ret = ath10k_wmi_pull_ch_info(ar, skb, &arg);
skb              2746 drivers/net/wireless/ath/ath10k/wmi.c void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb)
skb              2751 drivers/net/wireless/ath/ath10k/wmi.c 	ret = ath10k_wmi_pull_echo_ev(ar, skb, &arg);
skb              2765 drivers/net/wireless/ath/ath10k/wmi.c int ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb)
skb              2768 drivers/net/wireless/ath/ath10k/wmi.c 		   skb->len);
skb              2770 drivers/net/wireless/ath/ath10k/wmi.c 	trace_ath10k_wmi_dbglog(ar, skb->data, skb->len);
skb              2926 drivers/net/wireless/ath/ath10k/wmi.c 					    struct sk_buff *skb,
skb              2929 drivers/net/wireless/ath/ath10k/wmi.c 	const struct wmi_stats_event *ev = (void *)skb->data;
skb              2933 drivers/net/wireless/ath/ath10k/wmi.c 	if (!skb_pull(skb, sizeof(*ev)))
skb              2943 drivers/net/wireless/ath/ath10k/wmi.c 		src = (void *)skb->data;
skb              2944 drivers/net/wireless/ath/ath10k/wmi.c 		if (!skb_pull(skb, sizeof(*src)))
skb              2964 drivers/net/wireless/ath/ath10k/wmi.c 		src = (void *)skb->data;
skb              2965 drivers/net/wireless/ath/ath10k/wmi.c 		if (!skb_pull(skb, sizeof(*src)))
skb              2980 drivers/net/wireless/ath/ath10k/wmi.c 					   struct sk_buff *skb,
skb              2983 drivers/net/wireless/ath/ath10k/wmi.c 	const struct wmi_stats_event *ev = (void *)skb->data;
skb              2987 drivers/net/wireless/ath/ath10k/wmi.c 	if (!skb_pull(skb, sizeof(*ev)))
skb              2997 drivers/net/wireless/ath/ath10k/wmi.c 		src = (void *)skb->data;
skb              2998 drivers/net/wireless/ath/ath10k/wmi.c 		if (!skb_pull(skb, sizeof(*src)))
skb              3019 drivers/net/wireless/ath/ath10k/wmi.c 		src = (void *)skb->data;
skb              3020 drivers/net/wireless/ath/ath10k/wmi.c 		if (!skb_pull(skb, sizeof(*src)))
skb              3038 drivers/net/wireless/ath/ath10k/wmi.c 					    struct sk_buff *skb,
skb              3041 drivers/net/wireless/ath/ath10k/wmi.c 	const struct wmi_10_2_stats_event *ev = (void *)skb->data;
skb              3047 drivers/net/wireless/ath/ath10k/wmi.c 	if (!skb_pull(skb, sizeof(*ev)))
skb              3058 drivers/net/wireless/ath/ath10k/wmi.c 		src = (void *)skb->data;
skb              3059 drivers/net/wireless/ath/ath10k/wmi.c 		if (!skb_pull(skb, sizeof(*src)))
skb              3078 drivers/net/wireless/ath/ath10k/wmi.c 		src = (void *)skb->data;
skb              3079 drivers/net/wireless/ath/ath10k/wmi.c 		if (!skb_pull(skb, sizeof(*src)))
skb              3095 drivers/net/wireless/ath/ath10k/wmi.c 		src = (void *)skb->data;
skb              3096 drivers/net/wireless/ath/ath10k/wmi.c 		if (!skb_pull(skb, sizeof(*src)))
skb              3115 drivers/net/wireless/ath/ath10k/wmi.c 					      struct sk_buff *skb,
skb              3118 drivers/net/wireless/ath/ath10k/wmi.c 	const struct wmi_10_2_stats_event *ev = (void *)skb->data;
skb              3124 drivers/net/wireless/ath/ath10k/wmi.c 	if (!skb_pull(skb, sizeof(*ev)))
skb              3135 drivers/net/wireless/ath/ath10k/wmi.c 		src = (void *)skb->data;
skb              3136 drivers/net/wireless/ath/ath10k/wmi.c 		if (!skb_pull(skb, sizeof(*src)))
skb              3155 drivers/net/wireless/ath/ath10k/wmi.c 		src = (void *)skb->data;
skb              3156 drivers/net/wireless/ath/ath10k/wmi.c 		if (!skb_pull(skb, sizeof(*src)))
skb              3178 drivers/net/wireless/ath/ath10k/wmi.c 		src = (void *)skb->data;
skb              3179 drivers/net/wireless/ath/ath10k/wmi.c 		if (!skb_pull(skb, stats_len))
skb              3201 drivers/net/wireless/ath/ath10k/wmi.c 					    struct sk_buff *skb,
skb              3204 drivers/net/wireless/ath/ath10k/wmi.c 	const struct wmi_10_2_stats_event *ev = (void *)skb->data;
skb              3213 drivers/net/wireless/ath/ath10k/wmi.c 	if (!skb_pull(skb, sizeof(*ev)))
skb              3227 drivers/net/wireless/ath/ath10k/wmi.c 		src = (void *)skb->data;
skb              3228 drivers/net/wireless/ath/ath10k/wmi.c 		if (!skb_pull(skb, sizeof(*src)))
skb              3247 drivers/net/wireless/ath/ath10k/wmi.c 		src = (void *)skb->data;
skb              3248 drivers/net/wireless/ath/ath10k/wmi.c 		if (!skb_pull(skb, sizeof(*src)))
skb              3264 drivers/net/wireless/ath/ath10k/wmi.c 		src = (void *)skb->data;
skb              3265 drivers/net/wireless/ath/ath10k/wmi.c 		if (!skb_pull(skb, sizeof(*src)))
skb              3273 drivers/net/wireless/ath/ath10k/wmi.c 		src = (void *)skb->data;
skb              3274 drivers/net/wireless/ath/ath10k/wmi.c 		if (!skb_pull(skb, sizeof(*src)))
skb              3288 drivers/net/wireless/ath/ath10k/wmi.c 		src = (void *)skb->data;
skb              3289 drivers/net/wireless/ath/ath10k/wmi.c 		if (!skb_pull(skb, sizeof(*src)))
skb              3306 drivers/net/wireless/ath/ath10k/wmi.c 			src = (void *)skb->data;
skb              3307 drivers/net/wireless/ath/ath10k/wmi.c 			if (!skb_pull(skb, sizeof(*src)))
skb              3326 drivers/net/wireless/ath/ath10k/wmi.c 			src = (void *)skb->data;
skb              3327 drivers/net/wireless/ath/ath10k/wmi.c 			if (!skb_pull(skb, sizeof(*src)))
skb              3341 drivers/net/wireless/ath/ath10k/wmi.c void ath10k_wmi_event_update_stats(struct ath10k *ar, struct sk_buff *skb)
skb              3344 drivers/net/wireless/ath/ath10k/wmi.c 	ath10k_debug_fw_stats_process(ar, skb);
skb              3348 drivers/net/wireless/ath/ath10k/wmi.c ath10k_wmi_op_pull_vdev_start_ev(struct ath10k *ar, struct sk_buff *skb,
skb              3351 drivers/net/wireless/ath/ath10k/wmi.c 	struct wmi_vdev_start_response_event *ev = (void *)skb->data;
skb              3353 drivers/net/wireless/ath/ath10k/wmi.c 	if (skb->len < sizeof(*ev))
skb              3356 drivers/net/wireless/ath/ath10k/wmi.c 	skb_pull(skb, sizeof(*ev));
skb              3365 drivers/net/wireless/ath/ath10k/wmi.c void ath10k_wmi_event_vdev_start_resp(struct ath10k *ar, struct sk_buff *skb)
skb              3375 drivers/net/wireless/ath/ath10k/wmi.c 	ret = ath10k_wmi_pull_vdev_start(ar, skb, &arg);
skb              3397 drivers/net/wireless/ath/ath10k/wmi.c void ath10k_wmi_event_vdev_stopped(struct ath10k *ar, struct sk_buff *skb)
skb              3404 drivers/net/wireless/ath/ath10k/wmi.c ath10k_wmi_op_pull_peer_kick_ev(struct ath10k *ar, struct sk_buff *skb,
skb              3407 drivers/net/wireless/ath/ath10k/wmi.c 	struct wmi_peer_sta_kickout_event *ev = (void *)skb->data;
skb              3409 drivers/net/wireless/ath/ath10k/wmi.c 	if (skb->len < sizeof(*ev))
skb              3412 drivers/net/wireless/ath/ath10k/wmi.c 	skb_pull(skb, sizeof(*ev));
skb              3418 drivers/net/wireless/ath/ath10k/wmi.c void ath10k_wmi_event_peer_sta_kickout(struct ath10k *ar, struct sk_buff *skb)
skb              3424 drivers/net/wireless/ath/ath10k/wmi.c 	ret = ath10k_wmi_pull_peer_kick(ar, skb, &arg);
skb              3589 drivers/net/wireless/ath/ath10k/wmi.c static int ath10k_wmi_op_pull_swba_ev(struct ath10k *ar, struct sk_buff *skb,
skb              3592 drivers/net/wireless/ath/ath10k/wmi.c 	struct wmi_host_swba_event *ev = (void *)skb->data;
skb              3596 drivers/net/wireless/ath/ath10k/wmi.c 	if (skb->len < sizeof(*ev))
skb              3599 drivers/net/wireless/ath/ath10k/wmi.c 	skb_pull(skb, sizeof(*ev));
skb              3635 drivers/net/wireless/ath/ath10k/wmi.c 					     struct sk_buff *skb,
skb              3638 drivers/net/wireless/ath/ath10k/wmi.c 	struct wmi_10_2_4_host_swba_event *ev = (void *)skb->data;
skb              3642 drivers/net/wireless/ath/ath10k/wmi.c 	if (skb->len < sizeof(*ev))
skb              3645 drivers/net/wireless/ath/ath10k/wmi.c 	skb_pull(skb, sizeof(*ev));
skb              3679 drivers/net/wireless/ath/ath10k/wmi.c 					   struct sk_buff *skb,
skb              3682 drivers/net/wireless/ath/ath10k/wmi.c 	struct wmi_10_4_host_swba_event *ev = (void *)skb->data;
skb              3686 drivers/net/wireless/ath/ath10k/wmi.c 	if (skb->len < sizeof(*ev))
skb              3689 drivers/net/wireless/ath/ath10k/wmi.c 	skb_pull(skb, sizeof(*ev));
skb              3740 drivers/net/wireless/ath/ath10k/wmi.c void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
skb              3752 drivers/net/wireless/ath/ath10k/wmi.c 	ret = ath10k_wmi_pull_swba(ar, skb, &arg);
skb              3883 drivers/net/wireless/ath/ath10k/wmi.c void ath10k_wmi_event_tbttoffset_update(struct ath10k *ar, struct sk_buff *skb)
skb              4226 drivers/net/wireless/ath/ath10k/wmi.c 					    struct sk_buff *skb,
skb              4229 drivers/net/wireless/ath/ath10k/wmi.c 	struct wmi_phyerr_event *ev = (void *)skb->data;
skb              4231 drivers/net/wireless/ath/ath10k/wmi.c 	if (skb->len < sizeof(*ev))
skb              4237 drivers/net/wireless/ath/ath10k/wmi.c 	arg->buf_len = skb->len - sizeof(*ev);
skb              4244 drivers/net/wireless/ath/ath10k/wmi.c 						 struct sk_buff *skb,
skb              4247 drivers/net/wireless/ath/ath10k/wmi.c 	struct wmi_10_4_phyerr_event *ev = (void *)skb->data;
skb              4249 drivers/net/wireless/ath/ath10k/wmi.c 	if (skb->len < sizeof(*ev))
skb              4257 drivers/net/wireless/ath/ath10k/wmi.c 	arg->buf_len = skb->len;
skb              4258 drivers/net/wireless/ath/ath10k/wmi.c 	arg->phyerrs = skb->data;
skb              4346 drivers/net/wireless/ath/ath10k/wmi.c void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb)
skb              4357 drivers/net/wireless/ath/ath10k/wmi.c 	ret = ath10k_wmi_pull_phyerr_hdr(ar, skb, &hdr_arg);
skb              4416 drivers/net/wireless/ath/ath10k/wmi.c ath10k_wmi_10_4_op_pull_dfs_status_ev(struct ath10k *ar, struct sk_buff *skb,
skb              4419 drivers/net/wireless/ath/ath10k/wmi.c 	struct wmi_dfs_status_ev_arg *ev = (void *)skb->data;
skb              4421 drivers/net/wireless/ath/ath10k/wmi.c 	if (skb->len < sizeof(*ev))
skb              4430 drivers/net/wireless/ath/ath10k/wmi.c ath10k_wmi_event_dfs_status_check(struct ath10k *ar, struct sk_buff *skb)
skb              4435 drivers/net/wireless/ath/ath10k/wmi.c 	ret = ath10k_wmi_pull_dfs_status(ar, skb, &status_arg);
skb              4456 drivers/net/wireless/ath/ath10k/wmi.c void ath10k_wmi_event_roam(struct ath10k *ar, struct sk_buff *skb)
skb              4464 drivers/net/wireless/ath/ath10k/wmi.c 	ret = ath10k_wmi_pull_roam_ev(ar, skb, &arg);
skb              4497 drivers/net/wireless/ath/ath10k/wmi.c void ath10k_wmi_event_profile_match(struct ath10k *ar, struct sk_buff *skb)
skb              4502 drivers/net/wireless/ath/ath10k/wmi.c void ath10k_wmi_event_debug_print(struct ath10k *ar, struct sk_buff *skb)
skb              4508 drivers/net/wireless/ath/ath10k/wmi.c 		if (i >= skb->len)
skb              4511 drivers/net/wireless/ath/ath10k/wmi.c 		c = skb->data[i];
skb              4523 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_warn(ar, "wmi debug print truncated: %d\n", skb->len);
skb              4526 drivers/net/wireless/ath/ath10k/wmi.c 	if (skb->data[i - 1] == '\n')
skb              4535 drivers/net/wireless/ath/ath10k/wmi.c void ath10k_wmi_event_pdev_qvit(struct ath10k *ar, struct sk_buff *skb)
skb              4540 drivers/net/wireless/ath/ath10k/wmi.c void ath10k_wmi_event_wlan_profile_data(struct ath10k *ar, struct sk_buff *skb)
skb              4546 drivers/net/wireless/ath/ath10k/wmi.c 					     struct sk_buff *skb)
skb              4552 drivers/net/wireless/ath/ath10k/wmi.c 					     struct sk_buff *skb)
skb              4557 drivers/net/wireless/ath/ath10k/wmi.c void ath10k_wmi_event_rtt_error_report(struct ath10k *ar, struct sk_buff *skb)
skb              4562 drivers/net/wireless/ath/ath10k/wmi.c void ath10k_wmi_event_wow_wakeup_host(struct ath10k *ar, struct sk_buff *skb)
skb              4569 drivers/net/wireless/ath/ath10k/wmi.c 	ret = ath10k_wmi_pull_wow_event(ar, skb, &ev);
skb              4579 drivers/net/wireless/ath/ath10k/wmi.c void ath10k_wmi_event_dcs_interference(struct ath10k *ar, struct sk_buff *skb)
skb              4791 drivers/net/wireless/ath/ath10k/wmi.c void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb)
skb              4799 drivers/net/wireless/ath/ath10k/wmi.c 	ev = (struct wmi_pdev_tpc_config_event *)skb->data;
skb              5044 drivers/net/wireless/ath/ath10k/wmi.c void ath10k_wmi_event_tpc_final_table(struct ath10k *ar, struct sk_buff *skb)
skb              5052 drivers/net/wireless/ath/ath10k/wmi.c 	ev = (struct wmi_pdev_tpc_final_table_event *)skb->data;
skb              5102 drivers/net/wireless/ath/ath10k/wmi.c ath10k_wmi_handle_tdls_peer_event(struct ath10k *ar, struct sk_buff *skb)
skb              5112 drivers/net/wireless/ath/ath10k/wmi.c 	if (skb->len < sizeof(*ev)) {
skb              5114 drivers/net/wireless/ath/ath10k/wmi.c 			   skb->len);
skb              5118 drivers/net/wireless/ath/ath10k/wmi.c 	ev = (struct wmi_tdls_peer_event *)skb->data;
skb              5170 drivers/net/wireless/ath/ath10k/wmi.c ath10k_wmi_event_peer_sta_ps_state_chg(struct ath10k *ar, struct sk_buff *skb)
skb              5179 drivers/net/wireless/ath/ath10k/wmi.c 	ev = (struct wmi_peer_sta_ps_state_chg_event *)skb->data;
skb              5199 drivers/net/wireless/ath/ath10k/wmi.c void ath10k_wmi_event_pdev_ftm_intg(struct ath10k *ar, struct sk_buff *skb)
skb              5204 drivers/net/wireless/ath/ath10k/wmi.c void ath10k_wmi_event_gtk_offload_status(struct ath10k *ar, struct sk_buff *skb)
skb              5209 drivers/net/wireless/ath/ath10k/wmi.c void ath10k_wmi_event_gtk_rekey_fail(struct ath10k *ar, struct sk_buff *skb)
skb              5214 drivers/net/wireless/ath/ath10k/wmi.c void ath10k_wmi_event_delba_complete(struct ath10k *ar, struct sk_buff *skb)
skb              5219 drivers/net/wireless/ath/ath10k/wmi.c void ath10k_wmi_event_addba_complete(struct ath10k *ar, struct sk_buff *skb)
skb              5225 drivers/net/wireless/ath/ath10k/wmi.c 						struct sk_buff *skb)
skb              5230 drivers/net/wireless/ath/ath10k/wmi.c void ath10k_wmi_event_inst_rssi_stats(struct ath10k *ar, struct sk_buff *skb)
skb              5235 drivers/net/wireless/ath/ath10k/wmi.c void ath10k_wmi_event_vdev_standby_req(struct ath10k *ar, struct sk_buff *skb)
skb              5240 drivers/net/wireless/ath/ath10k/wmi.c void ath10k_wmi_event_vdev_resume_req(struct ath10k *ar, struct sk_buff *skb)
skb              5332 drivers/net/wireless/ath/ath10k/wmi.c ath10k_wmi_main_op_pull_svc_rdy_ev(struct ath10k *ar, struct sk_buff *skb,
skb              5338 drivers/net/wireless/ath/ath10k/wmi.c 	if (skb->len < sizeof(*ev))
skb              5341 drivers/net/wireless/ath/ath10k/wmi.c 	ev = (void *)skb->data;
skb              5342 drivers/net/wireless/ath/ath10k/wmi.c 	skb_pull(skb, sizeof(*ev));
skb              5363 drivers/net/wireless/ath/ath10k/wmi.c 	if (skb->len <
skb              5371 drivers/net/wireless/ath/ath10k/wmi.c ath10k_wmi_10x_op_pull_svc_rdy_ev(struct ath10k *ar, struct sk_buff *skb,
skb              5377 drivers/net/wireless/ath/ath10k/wmi.c 	if (skb->len < sizeof(*ev))
skb              5380 drivers/net/wireless/ath/ath10k/wmi.c 	ev = (void *)skb->data;
skb              5381 drivers/net/wireless/ath/ath10k/wmi.c 	skb_pull(skb, sizeof(*ev));
skb              5401 drivers/net/wireless/ath/ath10k/wmi.c 	if (skb->len <
skb              5411 drivers/net/wireless/ath/ath10k/wmi.c 	struct sk_buff *skb = ar->svc_rdy_skb;
skb              5417 drivers/net/wireless/ath/ath10k/wmi.c 	if (!skb) {
skb              5422 drivers/net/wireless/ath/ath10k/wmi.c 	ret = ath10k_wmi_pull_svc_rdy(ar, skb, &arg);
skb              5560 drivers/net/wireless/ath/ath10k/wmi.c 	dev_kfree_skb(skb);
skb              5565 drivers/net/wireless/ath/ath10k/wmi.c void ath10k_wmi_event_service_ready(struct ath10k *ar, struct sk_buff *skb)
skb              5567 drivers/net/wireless/ath/ath10k/wmi.c 	ar->svc_rdy_skb = skb;
skb              5571 drivers/net/wireless/ath/ath10k/wmi.c static int ath10k_wmi_op_pull_rdy_ev(struct ath10k *ar, struct sk_buff *skb,
skb              5574 drivers/net/wireless/ath/ath10k/wmi.c 	struct wmi_ready_event *ev = (void *)skb->data;
skb              5576 drivers/net/wireless/ath/ath10k/wmi.c 	if (skb->len < sizeof(*ev))
skb              5579 drivers/net/wireless/ath/ath10k/wmi.c 	skb_pull(skb, sizeof(*ev));
skb              5588 drivers/net/wireless/ath/ath10k/wmi.c static int ath10k_wmi_op_pull_roam_ev(struct ath10k *ar, struct sk_buff *skb,
skb              5591 drivers/net/wireless/ath/ath10k/wmi.c 	struct wmi_roam_ev *ev = (void *)skb->data;
skb              5593 drivers/net/wireless/ath/ath10k/wmi.c 	if (skb->len < sizeof(*ev))
skb              5596 drivers/net/wireless/ath/ath10k/wmi.c 	skb_pull(skb, sizeof(*ev));
skb              5604 drivers/net/wireless/ath/ath10k/wmi.c 				      struct sk_buff *skb,
skb              5607 drivers/net/wireless/ath/ath10k/wmi.c 	struct wmi_echo_event *ev = (void *)skb->data;
skb              5614 drivers/net/wireless/ath/ath10k/wmi.c int ath10k_wmi_event_ready(struct ath10k *ar, struct sk_buff *skb)
skb              5619 drivers/net/wireless/ath/ath10k/wmi.c 	ret = ath10k_wmi_pull_rdy(ar, skb, &arg);
skb              5638 drivers/net/wireless/ath/ath10k/wmi.c void ath10k_wmi_event_service_available(struct ath10k *ar, struct sk_buff *skb)
skb              5643 drivers/net/wireless/ath/ath10k/wmi.c 	ret = ath10k_wmi_pull_svc_avail(ar, skb, &arg);
skb              5653 drivers/net/wireless/ath/ath10k/wmi.c static int ath10k_wmi_event_temperature(struct ath10k *ar, struct sk_buff *skb)
skb              5657 drivers/net/wireless/ath/ath10k/wmi.c 	ev = (struct wmi_pdev_temperature_event *)skb->data;
skb              5658 drivers/net/wireless/ath/ath10k/wmi.c 	if (WARN_ON(skb->len < sizeof(*ev)))
skb              5666 drivers/net/wireless/ath/ath10k/wmi.c 					       struct sk_buff *skb)
skb              5675 drivers/net/wireless/ath/ath10k/wmi.c 	ev = (struct wmi_pdev_bss_chan_info_event *)skb->data;
skb              5676 drivers/net/wireless/ath/ath10k/wmi.c 	if (WARN_ON(skb->len < sizeof(*ev)))
skb              5734 drivers/net/wireless/ath/ath10k/wmi.c static void ath10k_wmi_op_rx(struct ath10k *ar, struct sk_buff *skb)
skb              5739 drivers/net/wireless/ath/ath10k/wmi.c 	cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
skb              5742 drivers/net/wireless/ath/ath10k/wmi.c 	if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
skb              5745 drivers/net/wireless/ath/ath10k/wmi.c 	trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
skb              5749 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_mgmt_rx(ar, skb);
skb              5753 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_scan(ar, skb);
skb              5757 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_chan_info(ar, skb);
skb              5760 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_echo(ar, skb);
skb              5763 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_debug_mesg(ar, skb);
skb              5767 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_update_stats(ar, skb);
skb              5770 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_vdev_start_resp(ar, skb);
skb              5774 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_vdev_stopped(ar, skb);
skb              5778 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_peer_sta_kickout(ar, skb);
skb              5781 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_host_swba(ar, skb);
skb              5784 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_tbttoffset_update(ar, skb);
skb              5787 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_phyerr(ar, skb);
skb              5790 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_roam(ar, skb);
skb              5794 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_profile_match(ar, skb);
skb              5797 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_debug_print(ar, skb);
skb              5801 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_pdev_qvit(ar, skb);
skb              5804 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_wlan_profile_data(ar, skb);
skb              5807 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_rtt_measurement_report(ar, skb);
skb              5810 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_tsf_measurement_report(ar, skb);
skb              5813 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_rtt_error_report(ar, skb);
skb              5816 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_wow_wakeup_host(ar, skb);
skb              5819 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_dcs_interference(ar, skb);
skb              5822 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_pdev_tpc_config(ar, skb);
skb              5825 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_pdev_ftm_intg(ar, skb);
skb              5828 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_gtk_offload_status(ar, skb);
skb              5831 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_gtk_rekey_fail(ar, skb);
skb              5834 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_delba_complete(ar, skb);
skb              5837 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_addba_complete(ar, skb);
skb              5840 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_vdev_install_key_complete(ar, skb);
skb              5843 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_service_ready(ar, skb);
skb              5846 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_ready(ar, skb);
skb              5850 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_service_available(ar, skb);
skb              5858 drivers/net/wireless/ath/ath10k/wmi.c 	dev_kfree_skb(skb);
skb              5861 drivers/net/wireless/ath/ath10k/wmi.c static void ath10k_wmi_10_1_op_rx(struct ath10k *ar, struct sk_buff *skb)
skb              5867 drivers/net/wireless/ath/ath10k/wmi.c 	cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
skb              5870 drivers/net/wireless/ath/ath10k/wmi.c 	if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
skb              5873 drivers/net/wireless/ath/ath10k/wmi.c 	trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
skb              5875 drivers/net/wireless/ath/ath10k/wmi.c 	consumed = ath10k_tm_event_wmi(ar, id, skb);
skb              5889 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_mgmt_rx(ar, skb);
skb              5893 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_scan(ar, skb);
skb              5897 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_chan_info(ar, skb);
skb              5900 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_echo(ar, skb);
skb              5903 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_debug_mesg(ar, skb);
skb              5907 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_update_stats(ar, skb);
skb              5910 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_vdev_start_resp(ar, skb);
skb              5914 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_vdev_stopped(ar, skb);
skb              5918 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_peer_sta_kickout(ar, skb);
skb              5921 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_host_swba(ar, skb);
skb              5924 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_tbttoffset_update(ar, skb);
skb              5927 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_phyerr(ar, skb);
skb              5930 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_roam(ar, skb);
skb              5934 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_profile_match(ar, skb);
skb              5937 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_debug_print(ar, skb);
skb              5941 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_pdev_qvit(ar, skb);
skb              5944 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_wlan_profile_data(ar, skb);
skb              5947 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_rtt_measurement_report(ar, skb);
skb              5950 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_tsf_measurement_report(ar, skb);
skb              5953 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_rtt_error_report(ar, skb);
skb              5956 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_wow_wakeup_host(ar, skb);
skb              5959 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_dcs_interference(ar, skb);
skb              5962 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_pdev_tpc_config(ar, skb);
skb              5965 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_inst_rssi_stats(ar, skb);
skb              5968 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_vdev_standby_req(ar, skb);
skb              5971 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_vdev_resume_req(ar, skb);
skb              5974 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_service_ready(ar, skb);
skb              5977 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_ready(ar, skb);
skb              5989 drivers/net/wireless/ath/ath10k/wmi.c 	dev_kfree_skb(skb);
skb              5992 drivers/net/wireless/ath/ath10k/wmi.c static void ath10k_wmi_10_2_op_rx(struct ath10k *ar, struct sk_buff *skb)
skb              5998 drivers/net/wireless/ath/ath10k/wmi.c 	cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
skb              6001 drivers/net/wireless/ath/ath10k/wmi.c 	if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
skb              6004 drivers/net/wireless/ath/ath10k/wmi.c 	trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
skb              6006 drivers/net/wireless/ath/ath10k/wmi.c 	consumed = ath10k_tm_event_wmi(ar, id, skb);
skb              6020 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_mgmt_rx(ar, skb);
skb              6024 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_scan(ar, skb);
skb              6028 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_chan_info(ar, skb);
skb              6031 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_echo(ar, skb);
skb              6034 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_debug_mesg(ar, skb);
skb              6038 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_update_stats(ar, skb);
skb              6041 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_vdev_start_resp(ar, skb);
skb              6045 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_vdev_stopped(ar, skb);
skb              6049 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_peer_sta_kickout(ar, skb);
skb              6052 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_host_swba(ar, skb);
skb              6055 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_tbttoffset_update(ar, skb);
skb              6058 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_phyerr(ar, skb);
skb              6061 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_roam(ar, skb);
skb              6065 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_profile_match(ar, skb);
skb              6068 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_debug_print(ar, skb);
skb              6072 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_pdev_qvit(ar, skb);
skb              6075 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_wlan_profile_data(ar, skb);
skb              6078 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_rtt_measurement_report(ar, skb);
skb              6081 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_tsf_measurement_report(ar, skb);
skb              6084 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_rtt_error_report(ar, skb);
skb              6087 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_wow_wakeup_host(ar, skb);
skb              6090 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_dcs_interference(ar, skb);
skb              6093 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_pdev_tpc_config(ar, skb);
skb              6096 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_inst_rssi_stats(ar, skb);
skb              6099 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_vdev_standby_req(ar, skb);
skb              6103 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_vdev_resume_req(ar, skb);
skb              6107 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_service_ready(ar, skb);
skb              6110 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_ready(ar, skb);
skb              6114 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_temperature(ar, skb);
skb              6117 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_pdev_bss_chan_info(ar, skb);
skb              6130 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_peer_sta_ps_state_chg(ar, skb);
skb              6138 drivers/net/wireless/ath/ath10k/wmi.c 	dev_kfree_skb(skb);
skb              6141 drivers/net/wireless/ath/ath10k/wmi.c static void ath10k_wmi_10_4_op_rx(struct ath10k *ar, struct sk_buff *skb)
skb              6147 drivers/net/wireless/ath/ath10k/wmi.c 	cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
skb              6150 drivers/net/wireless/ath/ath10k/wmi.c 	if (!skb_pull(skb, sizeof(struct wmi_cmd_hdr)))
skb              6153 drivers/net/wireless/ath/ath10k/wmi.c 	trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
skb              6155 drivers/net/wireless/ath/ath10k/wmi.c 	consumed = ath10k_tm_event_wmi(ar, id, skb);
skb              6169 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_mgmt_rx(ar, skb);
skb              6173 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_echo(ar, skb);
skb              6176 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_debug_mesg(ar, skb);
skb              6180 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_service_ready(ar, skb);
skb              6183 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_scan(ar, skb);
skb              6187 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_chan_info(ar, skb);
skb              6190 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_phyerr(ar, skb);
skb              6193 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_ready(ar, skb);
skb              6197 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_peer_sta_kickout(ar, skb);
skb              6200 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_roam(ar, skb);
skb              6204 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_host_swba(ar, skb);
skb              6207 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_tbttoffset_update(ar, skb);
skb              6210 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_debug_print(ar, skb);
skb              6214 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_vdev_start_resp(ar, skb);
skb              6218 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_vdev_stopped(ar, skb);
skb              6229 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_update_stats(ar, skb);
skb              6232 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_temperature(ar, skb);
skb              6235 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_pdev_bss_chan_info(ar, skb);
skb              6238 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_pdev_tpc_config(ar, skb);
skb              6241 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_handle_tdls_peer_event(ar, skb);
skb              6244 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_tpc_final_table(ar, skb);
skb              6247 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_dfs_status_check(ar, skb);
skb              6250 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_event_peer_sta_ps_state_chg(ar, skb);
skb              6258 drivers/net/wireless/ath/ath10k/wmi.c 	dev_kfree_skb(skb);
skb              6261 drivers/net/wireless/ath/ath10k/wmi.c static void ath10k_wmi_process_rx(struct ath10k *ar, struct sk_buff *skb)
skb              6265 drivers/net/wireless/ath/ath10k/wmi.c 	ret = ath10k_wmi_rx(ar, skb);
skb              6305 drivers/net/wireless/ath/ath10k/wmi.c 	struct sk_buff *skb;
skb              6307 drivers/net/wireless/ath/ath10k/wmi.c 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
skb              6308 drivers/net/wireless/ath/ath10k/wmi.c 	if (!skb)
skb              6311 drivers/net/wireless/ath/ath10k/wmi.c 	cmd = (struct wmi_pdev_set_base_macaddr_cmd *)skb->data;
skb              6316 drivers/net/wireless/ath/ath10k/wmi.c 	return skb;
skb              6325 drivers/net/wireless/ath/ath10k/wmi.c 	struct sk_buff *skb;
skb              6327 drivers/net/wireless/ath/ath10k/wmi.c 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
skb              6328 drivers/net/wireless/ath/ath10k/wmi.c 	if (!skb)
skb              6331 drivers/net/wireless/ath/ath10k/wmi.c 	cmd = (struct wmi_pdev_set_regdomain_cmd *)skb->data;
skb              6341 drivers/net/wireless/ath/ath10k/wmi.c 	return skb;
skb              6350 drivers/net/wireless/ath/ath10k/wmi.c 	struct sk_buff *skb;
skb              6352 drivers/net/wireless/ath/ath10k/wmi.c 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
skb              6353 drivers/net/wireless/ath/ath10k/wmi.c 	if (!skb)
skb              6356 drivers/net/wireless/ath/ath10k/wmi.c 	cmd = (struct wmi_pdev_set_regdomain_cmd_10x *)skb->data;
skb              6367 drivers/net/wireless/ath/ath10k/wmi.c 	return skb;
skb              6374 drivers/net/wireless/ath/ath10k/wmi.c 	struct sk_buff *skb;
skb              6376 drivers/net/wireless/ath/ath10k/wmi.c 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
skb              6377 drivers/net/wireless/ath/ath10k/wmi.c 	if (!skb)
skb              6380 drivers/net/wireless/ath/ath10k/wmi.c 	cmd = (struct wmi_pdev_suspend_cmd *)skb->data;
skb              6383 drivers/net/wireless/ath/ath10k/wmi.c 	return skb;
skb              6389 drivers/net/wireless/ath/ath10k/wmi.c 	struct sk_buff *skb;
skb              6391 drivers/net/wireless/ath/ath10k/wmi.c 	skb = ath10k_wmi_alloc_skb(ar, 0);
skb              6392 drivers/net/wireless/ath/ath10k/wmi.c 	if (!skb)
skb              6395 drivers/net/wireless/ath/ath10k/wmi.c 	return skb;
skb              6402 drivers/net/wireless/ath/ath10k/wmi.c 	struct sk_buff *skb;
skb              6410 drivers/net/wireless/ath/ath10k/wmi.c 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
skb              6411 drivers/net/wireless/ath/ath10k/wmi.c 	if (!skb)
skb              6414 drivers/net/wireless/ath/ath10k/wmi.c 	cmd = (struct wmi_pdev_set_param_cmd *)skb->data;
skb              6420 drivers/net/wireless/ath/ath10k/wmi.c 	return skb;
skb              6893 drivers/net/wireless/ath/ath10k/wmi.c 	struct sk_buff *skb;
skb              6902 drivers/net/wireless/ath/ath10k/wmi.c 	skb = ath10k_wmi_alloc_skb(ar, len);
skb              6903 drivers/net/wireless/ath/ath10k/wmi.c 	if (!skb)
skb              6906 drivers/net/wireless/ath/ath10k/wmi.c 	cmd = (struct wmi_start_scan_cmd *)skb->data;
skb              6914 drivers/net/wireless/ath/ath10k/wmi.c 	return skb;
skb              6922 drivers/net/wireless/ath/ath10k/wmi.c 	struct sk_buff *skb;
skb              6931 drivers/net/wireless/ath/ath10k/wmi.c 	skb = ath10k_wmi_alloc_skb(ar, len);
skb              6932 drivers/net/wireless/ath/ath10k/wmi.c 	if (!skb)
skb              6935 drivers/net/wireless/ath/ath10k/wmi.c 	cmd = (struct wmi_10x_start_scan_cmd *)skb->data;
skb              6941 drivers/net/wireless/ath/ath10k/wmi.c 	return skb;
skb              6975 drivers/net/wireless/ath/ath10k/wmi.c 	struct sk_buff *skb;
skb              6984 drivers/net/wireless/ath/ath10k/wmi.c 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
skb              6985 drivers/net/wireless/ath/ath10k/wmi.c 	if (!skb)
skb              6994 drivers/net/wireless/ath/ath10k/wmi.c 	cmd = (struct wmi_stop_scan_cmd *)skb->data;
skb              7003 drivers/net/wireless/ath/ath10k/wmi.c 	return skb;
skb              7013 drivers/net/wireless/ath/ath10k/wmi.c 	struct sk_buff *skb;
skb              7015 drivers/net/wireless/ath/ath10k/wmi.c 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
skb              7016 drivers/net/wireless/ath/ath10k/wmi.c 	if (!skb)
skb              7019 drivers/net/wireless/ath/ath10k/wmi.c 	cmd = (struct wmi_vdev_create_cmd *)skb->data;
skb              7028 drivers/net/wireless/ath/ath10k/wmi.c 	return skb;
skb              7035 drivers/net/wireless/ath/ath10k/wmi.c 	struct sk_buff *skb;
skb              7037 drivers/net/wireless/ath/ath10k/wmi.c 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
skb              7038 drivers/net/wireless/ath/ath10k/wmi.c 	if (!skb)
skb              7041 drivers/net/wireless/ath/ath10k/wmi.c 	cmd = (struct wmi_vdev_delete_cmd *)skb->data;
skb              7046 drivers/net/wireless/ath/ath10k/wmi.c 	return skb;
skb              7055 drivers/net/wireless/ath/ath10k/wmi.c 	struct sk_buff *skb;
skb              7069 drivers/net/wireless/ath/ath10k/wmi.c 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
skb              7070 drivers/net/wireless/ath/ath10k/wmi.c 	if (!skb)
skb              7078 drivers/net/wireless/ath/ath10k/wmi.c 	cmd = (struct wmi_vdev_start_request_cmd *)skb->data;
skb              7100 drivers/net/wireless/ath/ath10k/wmi.c 	return skb;
skb              7107 drivers/net/wireless/ath/ath10k/wmi.c 	struct sk_buff *skb;
skb              7109 drivers/net/wireless/ath/ath10k/wmi.c 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
skb              7110 drivers/net/wireless/ath/ath10k/wmi.c 	if (!skb)
skb              7113 drivers/net/wireless/ath/ath10k/wmi.c 	cmd = (struct wmi_vdev_stop_cmd *)skb->data;
skb              7117 drivers/net/wireless/ath/ath10k/wmi.c 	return skb;
skb              7125 drivers/net/wireless/ath/ath10k/wmi.c 	struct sk_buff *skb;
skb              7127 drivers/net/wireless/ath/ath10k/wmi.c 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
skb              7128 drivers/net/wireless/ath/ath10k/wmi.c 	if (!skb)
skb              7131 drivers/net/wireless/ath/ath10k/wmi.c 	cmd = (struct wmi_vdev_up_cmd *)skb->data;
skb              7139 drivers/net/wireless/ath/ath10k/wmi.c 	return skb;
skb              7146 drivers/net/wireless/ath/ath10k/wmi.c 	struct sk_buff *skb;
skb              7148 drivers/net/wireless/ath/ath10k/wmi.c 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
skb              7149 drivers/net/wireless/ath/ath10k/wmi.c 	if (!skb)
skb              7152 drivers/net/wireless/ath/ath10k/wmi.c 	cmd = (struct wmi_vdev_down_cmd *)skb->data;
skb              7157 drivers/net/wireless/ath/ath10k/wmi.c 	return skb;
skb              7165 drivers/net/wireless/ath/ath10k/wmi.c 	struct sk_buff *skb;
skb              7174 drivers/net/wireless/ath/ath10k/wmi.c 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
skb              7175 drivers/net/wireless/ath/ath10k/wmi.c 	if (!skb)
skb              7178 drivers/net/wireless/ath/ath10k/wmi.c 	cmd = (struct wmi_vdev_set_param_cmd *)skb->data;
skb              7186 drivers/net/wireless/ath/ath10k/wmi.c 	return skb;
skb              7194 drivers/net/wireless/ath/ath10k/wmi.c 	struct sk_buff *skb;
skb              7201 drivers/net/wireless/ath/ath10k/wmi.c 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd) + arg->key_len);
skb              7202 drivers/net/wireless/ath/ath10k/wmi.c 	if (!skb)
skb              7205 drivers/net/wireless/ath/ath10k/wmi.c 	cmd = (struct wmi_vdev_install_key_cmd *)skb->data;
skb              7222 drivers/net/wireless/ath/ath10k/wmi.c 	return skb;
skb              7230 drivers/net/wireless/ath/ath10k/wmi.c 	struct sk_buff *skb;
skb              7232 drivers/net/wireless/ath/ath10k/wmi.c 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
skb              7233 drivers/net/wireless/ath/ath10k/wmi.c 	if (!skb)
skb              7236 drivers/net/wireless/ath/ath10k/wmi.c 	cmd = (struct wmi_vdev_spectral_conf_cmd *)skb->data;
skb              7257 drivers/net/wireless/ath/ath10k/wmi.c 	return skb;
skb              7265 drivers/net/wireless/ath/ath10k/wmi.c 	struct sk_buff *skb;
skb              7267 drivers/net/wireless/ath/ath10k/wmi.c 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
skb              7268 drivers/net/wireless/ath/ath10k/wmi.c 	if (!skb)
skb              7271 drivers/net/wireless/ath/ath10k/wmi.c 	cmd = (struct wmi_vdev_spectral_enable_cmd *)skb->data;
skb              7276 drivers/net/wireless/ath/ath10k/wmi.c 	return skb;
skb              7285 drivers/net/wireless/ath/ath10k/wmi.c 	struct sk_buff *skb;
skb              7287 drivers/net/wireless/ath/ath10k/wmi.c 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
skb              7288 drivers/net/wireless/ath/ath10k/wmi.c 	if (!skb)
skb              7291 drivers/net/wireless/ath/ath10k/wmi.c 	cmd = (struct wmi_peer_create_cmd *)skb->data;
skb              7299 drivers/net/wireless/ath/ath10k/wmi.c 	return skb;
skb              7307 drivers/net/wireless/ath/ath10k/wmi.c 	struct sk_buff *skb;
skb              7309 drivers/net/wireless/ath/ath10k/wmi.c 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
skb              7310 drivers/net/wireless/ath/ath10k/wmi.c 	if (!skb)
skb              7313 drivers/net/wireless/ath/ath10k/wmi.c 	cmd = (struct wmi_peer_delete_cmd *)skb->data;
skb              7320 drivers/net/wireless/ath/ath10k/wmi.c 	return skb;
skb              7328 drivers/net/wireless/ath/ath10k/wmi.c 	struct sk_buff *skb;
skb              7330 drivers/net/wireless/ath/ath10k/wmi.c 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
skb              7331 drivers/net/wireless/ath/ath10k/wmi.c 	if (!skb)
skb              7334 drivers/net/wireless/ath/ath10k/wmi.c 	cmd = (struct wmi_peer_flush_tids_cmd *)skb->data;
skb              7342 drivers/net/wireless/ath/ath10k/wmi.c 	return skb;
skb              7352 drivers/net/wireless/ath/ath10k/wmi.c 	struct sk_buff *skb;
skb              7354 drivers/net/wireless/ath/ath10k/wmi.c 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
skb              7355 drivers/net/wireless/ath/ath10k/wmi.c 	if (!skb)
skb              7358 drivers/net/wireless/ath/ath10k/wmi.c 	cmd = (struct wmi_peer_set_param_cmd *)skb->data;
skb              7367 drivers/net/wireless/ath/ath10k/wmi.c 	return skb;
skb              7375 drivers/net/wireless/ath/ath10k/wmi.c 	struct sk_buff *skb;
skb              7377 drivers/net/wireless/ath/ath10k/wmi.c 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
skb              7378 drivers/net/wireless/ath/ath10k/wmi.c 	if (!skb)
skb              7381 drivers/net/wireless/ath/ath10k/wmi.c 	cmd = (struct wmi_sta_powersave_mode_cmd *)skb->data;
skb              7388 drivers/net/wireless/ath/ath10k/wmi.c 	return skb;
skb              7397 drivers/net/wireless/ath/ath10k/wmi.c 	struct sk_buff *skb;
skb              7399 drivers/net/wireless/ath/ath10k/wmi.c 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
skb              7400 drivers/net/wireless/ath/ath10k/wmi.c 	if (!skb)
skb              7403 drivers/net/wireless/ath/ath10k/wmi.c 	cmd = (struct wmi_sta_powersave_param_cmd *)skb->data;
skb              7411 drivers/net/wireless/ath/ath10k/wmi.c 	return skb;
skb              7419 drivers/net/wireless/ath/ath10k/wmi.c 	struct sk_buff *skb;
skb              7424 drivers/net/wireless/ath/ath10k/wmi.c 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
skb              7425 drivers/net/wireless/ath/ath10k/wmi.c 	if (!skb)
skb              7428 drivers/net/wireless/ath/ath10k/wmi.c 	cmd = (struct wmi_ap_ps_peer_cmd *)skb->data;
skb              7437 drivers/net/wireless/ath/ath10k/wmi.c 	return skb;
skb              7445 drivers/net/wireless/ath/ath10k/wmi.c 	struct sk_buff *skb;
skb              7453 drivers/net/wireless/ath/ath10k/wmi.c 	skb = ath10k_wmi_alloc_skb(ar, len);
skb              7454 drivers/net/wireless/ath/ath10k/wmi.c 	if (!skb)
skb              7457 drivers/net/wireless/ath/ath10k/wmi.c 	cmd = (struct wmi_scan_chan_list_cmd *)skb->data;
skb              7467 drivers/net/wireless/ath/ath10k/wmi.c 	return skb;
skb              7581 drivers/net/wireless/ath/ath10k/wmi.c 	struct sk_buff *skb;
skb              7588 drivers/net/wireless/ath/ath10k/wmi.c 	skb = ath10k_wmi_alloc_skb(ar, len);
skb              7589 drivers/net/wireless/ath/ath10k/wmi.c 	if (!skb)
skb              7592 drivers/net/wireless/ath/ath10k/wmi.c 	ath10k_wmi_peer_assoc_fill_main(ar, skb->data, arg);
skb              7598 drivers/net/wireless/ath/ath10k/wmi.c 	return skb;
skb              7606 drivers/net/wireless/ath/ath10k/wmi.c 	struct sk_buff *skb;
skb              7613 drivers/net/wireless/ath/ath10k/wmi.c 	skb = ath10k_wmi_alloc_skb(ar, len);
skb              7614 drivers/net/wireless/ath/ath10k/wmi.c 	if (!skb)
skb              7617 drivers/net/wireless/ath/ath10k/wmi.c 	ath10k_wmi_peer_assoc_fill_10_1(ar, skb->data, arg);
skb              7623 drivers/net/wireless/ath/ath10k/wmi.c 	return skb;
skb              7631 drivers/net/wireless/ath/ath10k/wmi.c 	struct sk_buff *skb;
skb              7638 drivers/net/wireless/ath/ath10k/wmi.c 	skb = ath10k_wmi_alloc_skb(ar, len);
skb              7639 drivers/net/wireless/ath/ath10k/wmi.c 	if (!skb)
skb              7642 drivers/net/wireless/ath/ath10k/wmi.c 	ath10k_wmi_peer_assoc_fill_10_2(ar, skb->data, arg);
skb              7648 drivers/net/wireless/ath/ath10k/wmi.c 	return skb;
skb              7656 drivers/net/wireless/ath/ath10k/wmi.c 	struct sk_buff *skb;
skb              7663 drivers/net/wireless/ath/ath10k/wmi.c 	skb = ath10k_wmi_alloc_skb(ar, len);
skb              7664 drivers/net/wireless/ath/ath10k/wmi.c 	if (!skb)
skb              7667 drivers/net/wireless/ath/ath10k/wmi.c 	ath10k_wmi_peer_assoc_fill_10_4(ar, skb->data, arg);
skb              7673 drivers/net/wireless/ath/ath10k/wmi.c 	return skb;
skb              7679 drivers/net/wireless/ath/ath10k/wmi.c 	struct sk_buff *skb;
skb              7681 drivers/net/wireless/ath/ath10k/wmi.c 	skb = ath10k_wmi_alloc_skb(ar, 0);
skb              7682 drivers/net/wireless/ath/ath10k/wmi.c 	if (!skb)
skb              7686 drivers/net/wireless/ath/ath10k/wmi.c 	return skb;
skb              7694 drivers/net/wireless/ath/ath10k/wmi.c 	struct sk_buff *skb;
skb              7696 drivers/net/wireless/ath/ath10k/wmi.c 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
skb              7697 drivers/net/wireless/ath/ath10k/wmi.c 	if (!skb)
skb              7700 drivers/net/wireless/ath/ath10k/wmi.c 	cmd = (struct wmi_pdev_chan_info_req_cmd *)skb->data;
skb              7706 drivers/net/wireless/ath/ath10k/wmi.c 	return skb;
skb              7716 drivers/net/wireless/ath/ath10k/wmi.c 	struct sk_buff *skb;
skb              7720 drivers/net/wireless/ath/ath10k/wmi.c 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
skb              7721 drivers/net/wireless/ath/ath10k/wmi.c 	if (!skb)
skb              7727 drivers/net/wireless/ath/ath10k/wmi.c 	cmd = (struct wmi_bcn_tx_ref_cmd *)skb->data;
skb              7742 drivers/net/wireless/ath/ath10k/wmi.c 	return skb;
skb              7761 drivers/net/wireless/ath/ath10k/wmi.c 	struct sk_buff *skb;
skb              7763 drivers/net/wireless/ath/ath10k/wmi.c 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
skb              7764 drivers/net/wireless/ath/ath10k/wmi.c 	if (!skb)
skb              7767 drivers/net/wireless/ath/ath10k/wmi.c 	cmd = (struct wmi_pdev_set_wmm_params *)skb->data;
skb              7774 drivers/net/wireless/ath/ath10k/wmi.c 	return skb;
skb              7781 drivers/net/wireless/ath/ath10k/wmi.c 	struct sk_buff *skb;
skb              7783 drivers/net/wireless/ath/ath10k/wmi.c 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
skb              7784 drivers/net/wireless/ath/ath10k/wmi.c 	if (!skb)
skb              7787 drivers/net/wireless/ath/ath10k/wmi.c 	cmd = (struct wmi_request_stats_cmd *)skb->data;
skb              7792 drivers/net/wireless/ath/ath10k/wmi.c 	return skb;
skb              7800 drivers/net/wireless/ath/ath10k/wmi.c 	struct sk_buff *skb;
skb              7802 drivers/net/wireless/ath/ath10k/wmi.c 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
skb              7803 drivers/net/wireless/ath/ath10k/wmi.c 	if (!skb)
skb              7806 drivers/net/wireless/ath/ath10k/wmi.c 	cmd = (struct wmi_force_fw_hang_cmd *)skb->data;
skb              7812 drivers/net/wireless/ath/ath10k/wmi.c 	return skb;
skb              7820 drivers/net/wireless/ath/ath10k/wmi.c 	struct sk_buff *skb;
skb              7823 drivers/net/wireless/ath/ath10k/wmi.c 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
skb              7824 drivers/net/wireless/ath/ath10k/wmi.c 	if (!skb)
skb              7827 drivers/net/wireless/ath/ath10k/wmi.c 	cmd = (struct wmi_dbglog_cfg_cmd *)skb->data;
skb              7850 drivers/net/wireless/ath/ath10k/wmi.c 	return skb;
skb              7858 drivers/net/wireless/ath/ath10k/wmi.c 	struct sk_buff *skb;
skb              7861 drivers/net/wireless/ath/ath10k/wmi.c 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
skb              7862 drivers/net/wireless/ath/ath10k/wmi.c 	if (!skb)
skb              7865 drivers/net/wireless/ath/ath10k/wmi.c 	cmd = (struct wmi_10_4_dbglog_cfg_cmd *)skb->data;
skb              7888 drivers/net/wireless/ath/ath10k/wmi.c 	return skb;
skb              7895 drivers/net/wireless/ath/ath10k/wmi.c 	struct sk_buff *skb;
skb              7897 drivers/net/wireless/ath/ath10k/wmi.c 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
skb              7898 drivers/net/wireless/ath/ath10k/wmi.c 	if (!skb)
skb              7903 drivers/net/wireless/ath/ath10k/wmi.c 	cmd = (struct wmi_pdev_pktlog_enable_cmd *)skb->data;
skb              7908 drivers/net/wireless/ath/ath10k/wmi.c 	return skb;
skb              7914 drivers/net/wireless/ath/ath10k/wmi.c 	struct sk_buff *skb;
skb              7916 drivers/net/wireless/ath/ath10k/wmi.c 	skb = ath10k_wmi_alloc_skb(ar, 0);
skb              7917 drivers/net/wireless/ath/ath10k/wmi.c 	if (!skb)
skb              7921 drivers/net/wireless/ath/ath10k/wmi.c 	return skb;
skb              7930 drivers/net/wireless/ath/ath10k/wmi.c 	struct sk_buff *skb;
skb              7932 drivers/net/wireless/ath/ath10k/wmi.c 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
skb              7933 drivers/net/wireless/ath/ath10k/wmi.c 	if (!skb)
skb              7936 drivers/net/wireless/ath/ath10k/wmi.c 	cmd = (struct wmi_pdev_set_quiet_cmd *)skb->data;
skb              7945 drivers/net/wireless/ath/ath10k/wmi.c 	return skb;
skb              7953 drivers/net/wireless/ath/ath10k/wmi.c 	struct sk_buff *skb;
skb              7958 drivers/net/wireless/ath/ath10k/wmi.c 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
skb              7959 drivers/net/wireless/ath/ath10k/wmi.c 	if (!skb)
skb              7962 drivers/net/wireless/ath/ath10k/wmi.c 	cmd = (struct wmi_addba_clear_resp_cmd *)skb->data;
skb              7969 drivers/net/wireless/ath/ath10k/wmi.c 	return skb;
skb              7977 drivers/net/wireless/ath/ath10k/wmi.c 	struct sk_buff *skb;
skb              7982 drivers/net/wireless/ath/ath10k/wmi.c 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
skb              7983 drivers/net/wireless/ath/ath10k/wmi.c 	if (!skb)
skb              7986 drivers/net/wireless/ath/ath10k/wmi.c 	cmd = (struct wmi_addba_send_cmd *)skb->data;
skb              7995 drivers/net/wireless/ath/ath10k/wmi.c 	return skb;
skb              8003 drivers/net/wireless/ath/ath10k/wmi.c 	struct sk_buff *skb;
skb              8008 drivers/net/wireless/ath/ath10k/wmi.c 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
skb              8009 drivers/net/wireless/ath/ath10k/wmi.c 	if (!skb)
skb              8012 drivers/net/wireless/ath/ath10k/wmi.c 	cmd = (struct wmi_addba_setresponse_cmd *)skb->data;
skb              8021 drivers/net/wireless/ath/ath10k/wmi.c 	return skb;
skb              8029 drivers/net/wireless/ath/ath10k/wmi.c 	struct sk_buff *skb;
skb              8034 drivers/net/wireless/ath/ath10k/wmi.c 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
skb              8035 drivers/net/wireless/ath/ath10k/wmi.c 	if (!skb)
skb              8038 drivers/net/wireless/ath/ath10k/wmi.c 	cmd = (struct wmi_delba_send_cmd *)skb->data;
skb              8048 drivers/net/wireless/ath/ath10k/wmi.c 	return skb;
skb              8055 drivers/net/wireless/ath/ath10k/wmi.c 	struct sk_buff *skb;
skb              8057 drivers/net/wireless/ath/ath10k/wmi.c 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
skb              8058 drivers/net/wireless/ath/ath10k/wmi.c 	if (!skb)
skb              8061 drivers/net/wireless/ath/ath10k/wmi.c 	cmd = (struct wmi_pdev_get_tpc_config_cmd *)skb->data;
skb              8066 drivers/net/wireless/ath/ath10k/wmi.c 	return skb;
skb              8468 drivers/net/wireless/ath/ath10k/wmi.c 	struct sk_buff *skb;
skb              8470 drivers/net/wireless/ath/ath10k/wmi.c 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
skb              8471 drivers/net/wireless/ath/ath10k/wmi.c 	if (!skb)
skb              8474 drivers/net/wireless/ath/ath10k/wmi.c 	cmd = (struct wmi_pdev_set_adaptive_cca_params *)skb->data;
skb              8482 drivers/net/wireless/ath/ath10k/wmi.c 	return skb;
skb              8714 drivers/net/wireless/ath/ath10k/wmi.c 	struct sk_buff *skb;
skb              8717 drivers/net/wireless/ath/ath10k/wmi.c 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
skb              8718 drivers/net/wireless/ath/ath10k/wmi.c 	if (!skb)
skb              8724 drivers/net/wireless/ath/ath10k/wmi.c 	cmd = (struct wmi_ext_resource_config_10_4_cmd *)skb->data;
skb              8741 drivers/net/wireless/ath/ath10k/wmi.c 	return skb;
skb              8749 drivers/net/wireless/ath/ath10k/wmi.c 	struct sk_buff *skb;
skb              8752 drivers/net/wireless/ath/ath10k/wmi.c 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
skb              8753 drivers/net/wireless/ath/ath10k/wmi.c 	if (!skb)
skb              8763 drivers/net/wireless/ath/ath10k/wmi.c 	cmd = (struct wmi_10_4_tdls_set_state_cmd *)skb->data;
skb              8782 drivers/net/wireless/ath/ath10k/wmi.c 	return skb;
skb              8807 drivers/net/wireless/ath/ath10k/wmi.c 	struct sk_buff *skb;
skb              8809 drivers/net/wireless/ath/ath10k/wmi.c 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
skb              8810 drivers/net/wireless/ath/ath10k/wmi.c 	if (!skb)
skb              8813 drivers/net/wireless/ath/ath10k/wmi.c 	cmd = (struct wmi_pdev_get_tpc_table_cmd *)skb->data;
skb              8818 drivers/net/wireless/ath/ath10k/wmi.c 	return skb;
skb              8830 drivers/net/wireless/ath/ath10k/wmi.c 	struct sk_buff *skb;
skb              8840 drivers/net/wireless/ath/ath10k/wmi.c 	skb = ath10k_wmi_alloc_skb(ar, len);
skb              8841 drivers/net/wireless/ath/ath10k/wmi.c 	if (!skb)
skb              8844 drivers/net/wireless/ath/ath10k/wmi.c 	memset(skb->data, 0, sizeof(*cmd));
skb              8846 drivers/net/wireless/ath/ath10k/wmi.c 	cmd = (struct wmi_10_4_tdls_peer_update_cmd *)skb->data;
skb              8878 drivers/net/wireless/ath/ath10k/wmi.c 	return skb;
skb              8886 drivers/net/wireless/ath/ath10k/wmi.c 	struct sk_buff *skb;
skb              8888 drivers/net/wireless/ath/ath10k/wmi.c 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
skb              8889 drivers/net/wireless/ath/ath10k/wmi.c 	if (!skb)
skb              8892 drivers/net/wireless/ath/ath10k/wmi.c 	cmd = (struct wmi_radar_found_info *)skb->data;
skb              8904 drivers/net/wireless/ath/ath10k/wmi.c 	return skb;
skb              8911 drivers/net/wireless/ath/ath10k/wmi.c 	struct sk_buff *skb;
skb              8913 drivers/net/wireless/ath/ath10k/wmi.c 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
skb              8914 drivers/net/wireless/ath/ath10k/wmi.c 	if (!skb)
skb              8917 drivers/net/wireless/ath/ath10k/wmi.c 	cmd = (struct wmi_echo_cmd *)skb->data;
skb              8922 drivers/net/wireless/ath/ath10k/wmi.c 	return skb;
skb              8954 drivers/net/wireless/ath/ath10k/wmi.c 	struct sk_buff *skb;
skb              8956 drivers/net/wireless/ath/ath10k/wmi.c 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
skb              8957 drivers/net/wireless/ath/ath10k/wmi.c 	if (!skb)
skb              8960 drivers/net/wireless/ath/ath10k/wmi.c 	cmd = (struct wmi_pdev_bb_timing_cfg_cmd *)skb->data;
skb              8967 drivers/net/wireless/ath/ath10k/wmi.c 	return skb;
skb              7294 drivers/net/wireless/ath/ath10k/wmi.h int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
skb              7295 drivers/net/wireless/ath/ath10k/wmi.h int ath10k_wmi_cmd_send_nowait(struct ath10k *ar, struct sk_buff *skb,
skb              7319 drivers/net/wireless/ath/ath10k/wmi.h int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb);
skb              7320 drivers/net/wireless/ath/ath10k/wmi.h int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb);
skb              7321 drivers/net/wireless/ath/ath10k/wmi.h int ath10k_wmi_event_mgmt_tx_compl(struct ath10k *ar, struct sk_buff *skb);
skb              7322 drivers/net/wireless/ath/ath10k/wmi.h int ath10k_wmi_event_mgmt_tx_bundle_compl(struct ath10k *ar, struct sk_buff *skb);
skb              7323 drivers/net/wireless/ath/ath10k/wmi.h void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb);
skb              7324 drivers/net/wireless/ath/ath10k/wmi.h void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb);
skb              7325 drivers/net/wireless/ath/ath10k/wmi.h int ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb);
skb              7326 drivers/net/wireless/ath/ath10k/wmi.h void ath10k_wmi_event_update_stats(struct ath10k *ar, struct sk_buff *skb);
skb              7327 drivers/net/wireless/ath/ath10k/wmi.h void ath10k_wmi_event_vdev_start_resp(struct ath10k *ar, struct sk_buff *skb);
skb              7328 drivers/net/wireless/ath/ath10k/wmi.h void ath10k_wmi_event_vdev_stopped(struct ath10k *ar, struct sk_buff *skb);
skb              7329 drivers/net/wireless/ath/ath10k/wmi.h void ath10k_wmi_event_peer_sta_kickout(struct ath10k *ar, struct sk_buff *skb);
skb              7330 drivers/net/wireless/ath/ath10k/wmi.h void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb);
skb              7331 drivers/net/wireless/ath/ath10k/wmi.h void ath10k_wmi_event_tbttoffset_update(struct ath10k *ar, struct sk_buff *skb);
skb              7337 drivers/net/wireless/ath/ath10k/wmi.h void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb);
skb              7338 drivers/net/wireless/ath/ath10k/wmi.h void ath10k_wmi_event_roam(struct ath10k *ar, struct sk_buff *skb);
skb              7339 drivers/net/wireless/ath/ath10k/wmi.h void ath10k_wmi_event_profile_match(struct ath10k *ar, struct sk_buff *skb);
skb              7340 drivers/net/wireless/ath/ath10k/wmi.h void ath10k_wmi_event_debug_print(struct ath10k *ar, struct sk_buff *skb);
skb              7341 drivers/net/wireless/ath/ath10k/wmi.h void ath10k_wmi_event_pdev_qvit(struct ath10k *ar, struct sk_buff *skb);
skb              7342 drivers/net/wireless/ath/ath10k/wmi.h void ath10k_wmi_event_wlan_profile_data(struct ath10k *ar, struct sk_buff *skb);
skb              7344 drivers/net/wireless/ath/ath10k/wmi.h 					     struct sk_buff *skb);
skb              7346 drivers/net/wireless/ath/ath10k/wmi.h 					     struct sk_buff *skb);
skb              7347 drivers/net/wireless/ath/ath10k/wmi.h void ath10k_wmi_event_rtt_error_report(struct ath10k *ar, struct sk_buff *skb);
skb              7348 drivers/net/wireless/ath/ath10k/wmi.h void ath10k_wmi_event_wow_wakeup_host(struct ath10k *ar, struct sk_buff *skb);
skb              7349 drivers/net/wireless/ath/ath10k/wmi.h void ath10k_wmi_event_dcs_interference(struct ath10k *ar, struct sk_buff *skb);
skb              7350 drivers/net/wireless/ath/ath10k/wmi.h void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb);
skb              7351 drivers/net/wireless/ath/ath10k/wmi.h void ath10k_wmi_event_pdev_ftm_intg(struct ath10k *ar, struct sk_buff *skb);
skb              7353 drivers/net/wireless/ath/ath10k/wmi.h 					 struct sk_buff *skb);
skb              7354 drivers/net/wireless/ath/ath10k/wmi.h void ath10k_wmi_event_gtk_rekey_fail(struct ath10k *ar, struct sk_buff *skb);
skb              7355 drivers/net/wireless/ath/ath10k/wmi.h void ath10k_wmi_event_delba_complete(struct ath10k *ar, struct sk_buff *skb);
skb              7356 drivers/net/wireless/ath/ath10k/wmi.h void ath10k_wmi_event_addba_complete(struct ath10k *ar, struct sk_buff *skb);
skb              7358 drivers/net/wireless/ath/ath10k/wmi.h 						struct sk_buff *skb);
skb              7359 drivers/net/wireless/ath/ath10k/wmi.h void ath10k_wmi_event_inst_rssi_stats(struct ath10k *ar, struct sk_buff *skb);
skb              7360 drivers/net/wireless/ath/ath10k/wmi.h void ath10k_wmi_event_vdev_standby_req(struct ath10k *ar, struct sk_buff *skb);
skb              7361 drivers/net/wireless/ath/ath10k/wmi.h void ath10k_wmi_event_vdev_resume_req(struct ath10k *ar, struct sk_buff *skb);
skb              7362 drivers/net/wireless/ath/ath10k/wmi.h void ath10k_wmi_event_service_ready(struct ath10k *ar, struct sk_buff *skb);
skb              7363 drivers/net/wireless/ath/ath10k/wmi.h int ath10k_wmi_event_ready(struct ath10k *ar, struct sk_buff *skb);
skb              7364 drivers/net/wireless/ath/ath10k/wmi.h void ath10k_wmi_event_service_available(struct ath10k *ar, struct sk_buff *skb);
skb              7383 drivers/net/wireless/ath/ath10k/wmi.h void ath10k_wmi_event_tpc_final_table(struct ath10k *ar, struct sk_buff *skb);
skb               600 drivers/net/wireless/ath/ath5k/base.c 	struct sk_buff *skb;
skb               606 drivers/net/wireless/ath/ath5k/base.c 	skb = ath_rxbuf_alloc(common,
skb               610 drivers/net/wireless/ath/ath5k/base.c 	if (!skb) {
skb               617 drivers/net/wireless/ath/ath5k/base.c 				   skb->data, common->rx_bufsize,
skb               622 drivers/net/wireless/ath/ath5k/base.c 		dev_kfree_skb(skb);
skb               625 drivers/net/wireless/ath/ath5k/base.c 	return skb;
skb               631 drivers/net/wireless/ath/ath5k/base.c 	struct sk_buff *skb = bf->skb;
skb               635 drivers/net/wireless/ath/ath5k/base.c 	if (!skb) {
skb               636 drivers/net/wireless/ath/ath5k/base.c 		skb = ath5k_rx_skb_alloc(ah, &bf->skbaddr);
skb               637 drivers/net/wireless/ath/ath5k/base.c 		if (!skb)
skb               639 drivers/net/wireless/ath/ath5k/base.c 		bf->skb = skb;
skb               672 drivers/net/wireless/ath/ath5k/base.c static enum ath5k_pkt_type get_hw_packet_type(struct sk_buff *skb)
skb               678 drivers/net/wireless/ath/ath5k/base.c 	hdr = (struct ieee80211_hdr *)skb->data;
skb               737 drivers/net/wireless/ath/ath5k/base.c 	struct sk_buff *skb = bf->skb;
skb               738 drivers/net/wireless/ath/ath5k/base.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb               751 drivers/net/wireless/ath/ath5k/base.c 	bf->skbaddr = dma_map_single(ah->dev, skb->data, skb->len,
skb               757 drivers/net/wireless/ath/ath5k/base.c 	ieee80211_get_tx_rates(info->control.vif, (control) ? control->sta : NULL, skb, bf->rates,
skb               774 drivers/net/wireless/ath/ath5k/base.c 	pktlen = skb->len;
skb               797 drivers/net/wireless/ath/ath5k/base.c 		ieee80211_get_hdrlen_from_skb(skb), padsize,
skb               798 drivers/net/wireless/ath/ath5k/base.c 		get_hw_packet_type(skb),
skb               844 drivers/net/wireless/ath/ath5k/base.c 	dma_unmap_single(ah->dev, bf->skbaddr, skb->len, DMA_TO_DEVICE);
skb               921 drivers/net/wireless/ath/ath5k/base.c 	if (!bf->skb)
skb               923 drivers/net/wireless/ath/ath5k/base.c 	dma_unmap_single(ah->dev, bf->skbaddr, bf->skb->len,
skb               925 drivers/net/wireless/ath/ath5k/base.c 	ieee80211_free_txskb(ah->hw, bf->skb);
skb               926 drivers/net/wireless/ath/ath5k/base.c 	bf->skb = NULL;
skb               937 drivers/net/wireless/ath/ath5k/base.c 	if (!bf->skb)
skb               941 drivers/net/wireless/ath/ath5k/base.c 	dev_kfree_skb_any(bf->skb);
skb               942 drivers/net/wireless/ath/ath5k/base.c 	bf->skb = NULL;
skb              1211 drivers/net/wireless/ath/ath5k/base.c ath5k_rx_decrypted(struct ath5k_hw *ah, struct sk_buff *skb,
skb              1215 drivers/net/wireless/ath/ath5k/base.c 	struct ieee80211_hdr *hdr = (void *)skb->data;
skb              1228 drivers/net/wireless/ath/ath5k/base.c 	    skb->len >= hlen + 4) {
skb              1229 drivers/net/wireless/ath/ath5k/base.c 		keyix = skb->data[hlen + 3] >> 6;
skb              1240 drivers/net/wireless/ath/ath5k/base.c ath5k_check_ibss_tsf(struct ath5k_hw *ah, struct sk_buff *skb,
skb              1245 drivers/net/wireless/ath/ath5k/base.c 	struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
skb              1306 drivers/net/wireless/ath/ath5k/base.c static int ath5k_common_padpos(struct sk_buff *skb)
skb              1308 drivers/net/wireless/ath/ath5k/base.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
skb              1325 drivers/net/wireless/ath/ath5k/base.c static int ath5k_add_padding(struct sk_buff *skb)
skb              1327 drivers/net/wireless/ath/ath5k/base.c 	int padpos = ath5k_common_padpos(skb);
skb              1330 drivers/net/wireless/ath/ath5k/base.c 	if (padsize && skb->len > padpos) {
skb              1332 drivers/net/wireless/ath/ath5k/base.c 		if (skb_headroom(skb) < padsize)
skb              1335 drivers/net/wireless/ath/ath5k/base.c 		skb_push(skb, padsize);
skb              1336 drivers/net/wireless/ath/ath5k/base.c 		memmove(skb->data, skb->data + padsize, padpos);
skb              1356 drivers/net/wireless/ath/ath5k/base.c static int ath5k_remove_padding(struct sk_buff *skb)
skb              1358 drivers/net/wireless/ath/ath5k/base.c 	int padpos = ath5k_common_padpos(skb);
skb              1361 drivers/net/wireless/ath/ath5k/base.c 	if (padsize && skb->len >= padpos + padsize) {
skb              1362 drivers/net/wireless/ath/ath5k/base.c 		memmove(skb->data + padsize, skb->data, padpos);
skb              1363 drivers/net/wireless/ath/ath5k/base.c 		skb_pull(skb, padsize);
skb              1371 drivers/net/wireless/ath/ath5k/base.c ath5k_receive_frame(struct ath5k_hw *ah, struct sk_buff *skb,
skb              1377 drivers/net/wireless/ath/ath5k/base.c 	ath5k_remove_padding(skb);
skb              1379 drivers/net/wireless/ath/ath5k/base.c 	rxs = IEEE80211_SKB_RXCB(skb);
skb              1413 drivers/net/wireless/ath/ath5k/base.c 	rxs->flag |= ath5k_rx_decrypted(ah, skb, rs);
skb              1429 drivers/net/wireless/ath/ath5k/base.c 	trace_ath5k_rx(ah, skb);
skb              1431 drivers/net/wireless/ath/ath5k/base.c 	if (ath_is_mybeacon(common, (struct ieee80211_hdr *)skb->data)) {
skb              1436 drivers/net/wireless/ath/ath5k/base.c 			ath5k_check_ibss_tsf(ah, skb, rxs);
skb              1439 drivers/net/wireless/ath/ath5k/base.c 	ieee80211_rx(ah->hw, skb);
skb              1542 drivers/net/wireless/ath/ath5k/base.c 	struct sk_buff *skb, *next_skb;
skb              1557 drivers/net/wireless/ath/ath5k/base.c 		BUG_ON(bf->skb == NULL);
skb              1558 drivers/net/wireless/ath/ath5k/base.c 		skb = bf->skb;
skb              1588 drivers/net/wireless/ath/ath5k/base.c 			skb_put(skb, rs.rs_datalen);
skb              1590 drivers/net/wireless/ath/ath5k/base.c 			ath5k_receive_frame(ah, skb, &rs);
skb              1592 drivers/net/wireless/ath/ath5k/base.c 			bf->skb = next_skb;
skb              1610 drivers/net/wireless/ath/ath5k/base.c ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
skb              1618 drivers/net/wireless/ath/ath5k/base.c 	trace_ath5k_tx(ah, skb, txq);
skb              1624 drivers/net/wireless/ath/ath5k/base.c 	padsize = ath5k_add_padding(skb);
skb              1649 drivers/net/wireless/ath/ath5k/base.c 	bf->skb = skb;
skb              1652 drivers/net/wireless/ath/ath5k/base.c 		bf->skb = NULL;
skb              1662 drivers/net/wireless/ath/ath5k/base.c 	ieee80211_free_txskb(hw, skb);
skb              1666 drivers/net/wireless/ath/ath5k/base.c ath5k_tx_frame_completed(struct ath5k_hw *ah, struct sk_buff *skb,
skb              1676 drivers/net/wireless/ath/ath5k/base.c 	ah->stats.tx_bytes_count += skb->len;
skb              1677 drivers/net/wireless/ath/ath5k/base.c 	info = IEEE80211_SKB_CB(skb);
skb              1720 drivers/net/wireless/ath/ath5k/base.c 	ath5k_remove_padding(skb);
skb              1727 drivers/net/wireless/ath/ath5k/base.c 	trace_ath5k_tx_complete(ah, skb, txq, ts);
skb              1728 drivers/net/wireless/ath/ath5k/base.c 	ieee80211_tx_status(ah->hw, skb);
skb              1737 drivers/net/wireless/ath/ath5k/base.c 	struct sk_buff *skb;
skb              1746 drivers/net/wireless/ath/ath5k/base.c 		if (bf->skb != NULL) {
skb              1759 drivers/net/wireless/ath/ath5k/base.c 			skb = bf->skb;
skb              1760 drivers/net/wireless/ath/ath5k/base.c 			bf->skb = NULL;
skb              1762 drivers/net/wireless/ath/ath5k/base.c 			dma_unmap_single(ah->dev, bf->skbaddr, skb->len,
skb              1764 drivers/net/wireless/ath/ath5k/base.c 			ath5k_tx_frame_completed(ah, skb, txq, &ts, bf);
skb              1811 drivers/net/wireless/ath/ath5k/base.c 	struct sk_buff *skb = bf->skb;
skb              1812 drivers/net/wireless/ath/ath5k/base.c 	struct	ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb              1819 drivers/net/wireless/ath/ath5k/base.c 	bf->skbaddr = dma_map_single(ah->dev, skb->data, skb->len,
skb              1822 drivers/net/wireless/ath/ath5k/base.c 			"skbaddr %llx\n", skb, skb->data, skb->len,
skb              1827 drivers/net/wireless/ath/ath5k/base.c 		dev_kfree_skb_any(skb);
skb              1828 drivers/net/wireless/ath/ath5k/base.c 		bf->skb = NULL;
skb              1868 drivers/net/wireless/ath/ath5k/base.c 	ret = ah->ah_setup_tx_desc(ah, ds, skb->len,
skb              1869 drivers/net/wireless/ath/ath5k/base.c 			ieee80211_get_hdrlen_from_skb(skb), padsize,
skb              1880 drivers/net/wireless/ath/ath5k/base.c 	dma_unmap_single(ah->dev, bf->skbaddr, skb->len, DMA_TO_DEVICE);
skb              1897 drivers/net/wireless/ath/ath5k/base.c 	struct sk_buff *skb;
skb              1904 drivers/net/wireless/ath/ath5k/base.c 	skb = ieee80211_beacon_get(hw, vif);
skb              1906 drivers/net/wireless/ath/ath5k/base.c 	if (!skb) {
skb              1913 drivers/net/wireless/ath/ath5k/base.c 	avf->bbuf->skb = skb;
skb              1933 drivers/net/wireless/ath/ath5k/base.c 	struct sk_buff *skb;
skb              2003 drivers/net/wireless/ath/ath5k/base.c 	if (unlikely(bf->skb == NULL || ah->opmode == NL80211_IFTYPE_STATION ||
skb              2005 drivers/net/wireless/ath/ath5k/base.c 		ATH5K_WARN(ah, "bf=%p bf_skb=%p\n", bf, bf->skb);
skb              2009 drivers/net/wireless/ath/ath5k/base.c 	trace_ath5k_tx(ah, bf->skb, &ah->txqs[ah->bhalq]);
skb              2016 drivers/net/wireless/ath/ath5k/base.c 	skb = ieee80211_get_buffered_bc(ah->hw, vif);
skb              2017 drivers/net/wireless/ath/ath5k/base.c 	while (skb) {
skb              2018 drivers/net/wireless/ath/ath5k/base.c 		ath5k_tx_queue(ah->hw, skb, ah->cabq, NULL);
skb              2023 drivers/net/wireless/ath/ath5k/base.c 		skb = ieee80211_get_buffered_bc(ah->hw, vif);
skb                68 drivers/net/wireless/ath/ath5k/base.h 	struct sk_buff			*skb;		/* skbuff for buf */
skb               107 drivers/net/wireless/ath/ath5k/base.h void ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
skb                59 drivers/net/wireless/ath/ath5k/mac80211-ops.c 	 struct sk_buff *skb)
skb                62 drivers/net/wireless/ath/ath5k/mac80211-ops.c 	u16 qnum = skb_get_queue_mapping(skb);
skb                65 drivers/net/wireless/ath/ath5k/mac80211-ops.c 		ieee80211_free_txskb(hw, skb);
skb                69 drivers/net/wireless/ath/ath5k/mac80211-ops.c 	ath5k_tx_queue(hw, skb, &ah->txqs[qnum], control);
skb                22 drivers/net/wireless/ath/ath5k/trace.h 	TP_PROTO(struct ath5k_hw *priv, struct sk_buff *skb),
skb                23 drivers/net/wireless/ath/ath5k/trace.h 	TP_ARGS(priv, skb),
skb                27 drivers/net/wireless/ath/ath5k/trace.h 		__dynamic_array(u8, frame, skb->len)
skb                31 drivers/net/wireless/ath/ath5k/trace.h 		__entry->skbaddr = (unsigned long) skb;
skb                32 drivers/net/wireless/ath/ath5k/trace.h 		memcpy(__get_dynamic_array(frame), skb->data, skb->len);
skb                40 drivers/net/wireless/ath/ath5k/trace.h 	TP_PROTO(struct ath5k_hw *priv, struct sk_buff *skb,
skb                43 drivers/net/wireless/ath/ath5k/trace.h 	TP_ARGS(priv, skb, q),
skb                49 drivers/net/wireless/ath/ath5k/trace.h 		__dynamic_array(u8, frame, skb->len)
skb                54 drivers/net/wireless/ath/ath5k/trace.h 		__entry->skbaddr = (unsigned long) skb;
skb                56 drivers/net/wireless/ath/ath5k/trace.h 		memcpy(__get_dynamic_array(frame), skb->data, skb->len);
skb                66 drivers/net/wireless/ath/ath5k/trace.h 	TP_PROTO(struct ath5k_hw *priv, struct sk_buff *skb,
skb                69 drivers/net/wireless/ath/ath5k/trace.h 	TP_ARGS(priv, skb, q, ts),
skb                82 drivers/net/wireless/ath/ath5k/trace.h 		__entry->skbaddr = (unsigned long) skb;
skb                54 drivers/net/wireless/ath/ath6kl/core.c void ath6kl_core_tx_complete(struct ath6kl *ar, struct sk_buff *skb)
skb                56 drivers/net/wireless/ath/ath6kl/core.c 	ath6kl_htc_tx_complete(ar, skb);
skb                60 drivers/net/wireless/ath/ath6kl/core.c void ath6kl_core_rx_complete(struct ath6kl *ar, struct sk_buff *skb, u8 pipe)
skb                62 drivers/net/wireless/ath/ath6kl/core.c 	ath6kl_htc_rx_complete(ar, skb, pipe);
skb               318 drivers/net/wireless/ath/ath6kl/core.h 	struct sk_buff *skb;
skb               396 drivers/net/wireless/ath/ath6kl/core.h 	struct sk_buff *skb;
skb               917 drivers/net/wireless/ath/ath6kl/core.h netdev_tx_t ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev);
skb               936 drivers/net/wireless/ath/ath6kl/core.h int ath6kl_control_tx(void *devt, struct sk_buff *skb,
skb               976 drivers/net/wireless/ath/ath6kl/core.h void ath6kl_core_tx_complete(struct ath6kl *ar, struct sk_buff *skb);
skb               977 drivers/net/wireless/ath/ath6kl/core.h void ath6kl_core_rx_complete(struct ath6kl *ar, struct sk_buff *skb, u8 pipe);
skb               339 drivers/net/wireless/ath/ath6kl/debug.c 	struct sk_buff *skb;
skb               347 drivers/net/wireless/ath/ath6kl/debug.c 	skb = alloc_skb(slot_len, GFP_KERNEL);
skb               348 drivers/net/wireless/ath/ath6kl/debug.c 	if (!skb)
skb               351 drivers/net/wireless/ath/ath6kl/debug.c 	slot = skb_put(skb, slot_len);
skb               361 drivers/net/wireless/ath/ath6kl/debug.c 	__skb_queue_tail(&ar->debug.fwlog_queue, skb);
skb               367 drivers/net/wireless/ath/ath6kl/debug.c 		skb = __skb_dequeue(&ar->debug.fwlog_queue);
skb               368 drivers/net/wireless/ath/ath6kl/debug.c 		kfree_skb(skb);
skb               402 drivers/net/wireless/ath/ath6kl/debug.c 	struct sk_buff *skb;
skb               416 drivers/net/wireless/ath/ath6kl/debug.c 	while ((skb = __skb_dequeue(&ar->debug.fwlog_queue))) {
skb               417 drivers/net/wireless/ath/ath6kl/debug.c 		if (skb->len > count - len) {
skb               419 drivers/net/wireless/ath/ath6kl/debug.c 			__skb_queue_head(&ar->debug.fwlog_queue, skb);
skb               424 drivers/net/wireless/ath/ath6kl/debug.c 		memcpy(buf + len, skb->data, skb->len);
skb               425 drivers/net/wireless/ath/ath6kl/debug.c 		len += skb->len;
skb               427 drivers/net/wireless/ath/ath6kl/debug.c 		kfree_skb(skb);
skb               455 drivers/net/wireless/ath/ath6kl/debug.c 	struct sk_buff *skb;
skb               483 drivers/net/wireless/ath/ath6kl/debug.c 	while ((skb = __skb_dequeue(&ar->debug.fwlog_queue))) {
skb               484 drivers/net/wireless/ath/ath6kl/debug.c 		if (skb->len > count - len) {
skb               486 drivers/net/wireless/ath/ath6kl/debug.c 			__skb_queue_head(&ar->debug.fwlog_queue, skb);
skb               491 drivers/net/wireless/ath/ath6kl/debug.c 		memcpy(buf + len, skb->data, skb->len);
skb               492 drivers/net/wireless/ath/ath6kl/debug.c 		len += skb->len;
skb               494 drivers/net/wireless/ath/ath6kl/debug.c 		kfree_skb(skb);
skb               100 drivers/net/wireless/ath/ath6kl/htc-ops.h 					  struct sk_buff *skb)
skb               102 drivers/net/wireless/ath/ath6kl/htc-ops.h 	ar->htc_ops->tx_complete(ar, skb);
skb               107 drivers/net/wireless/ath/ath6kl/htc-ops.h 					  struct sk_buff *skb, u8 pipe)
skb               109 drivers/net/wireless/ath/ath6kl/htc-ops.h 	ar->htc_ops->rx_complete(ar, skb, pipe);
skb               330 drivers/net/wireless/ath/ath6kl/htc.h 	struct sk_buff *skb;
skb               570 drivers/net/wireless/ath/ath6kl/htc.h 	int (*tx_complete)(struct ath6kl *ar, struct sk_buff *skb);
skb               571 drivers/net/wireless/ath/ath6kl/htc.h 	int (*rx_complete)(struct ath6kl *ar, struct sk_buff *skb, u8 pipe);
skb                32 drivers/net/wireless/ath/ath6kl/htc_pipe.c 		skb_pull(packet->skb, sizeof(struct htc_frame_hdr));
skb               209 drivers/net/wireless/ath/ath6kl/htc_pipe.c 	struct sk_buff *skb;
skb               221 drivers/net/wireless/ath/ath6kl/htc_pipe.c 		skb = packet->skb;
skb               222 drivers/net/wireless/ath/ath6kl/htc_pipe.c 		if (!skb) {
skb               231 drivers/net/wireless/ath/ath6kl/htc_pipe.c 		htc_hdr = skb_push(skb, sizeof(*htc_hdr));
skb               255 drivers/net/wireless/ath/ath6kl/htc_pipe.c 					      ep->pipe.pipeid_ul, NULL, skb);
skb               503 drivers/net/wireless/ath/ath6kl/htc_pipe.c 	struct sk_buff *skb;
skb               504 drivers/net/wireless/ath/ath6kl/htc_pipe.c 	skb = packet->skb;
skb               505 drivers/net/wireless/ath/ath6kl/htc_pipe.c 	dev_kfree_skb(skb);
skb               512 drivers/net/wireless/ath/ath6kl/htc_pipe.c 	struct sk_buff *skb;
skb               518 drivers/net/wireless/ath/ath6kl/htc_pipe.c 	skb = __dev_alloc_skb(HTC_CONTROL_BUFFER_SIZE, GFP_KERNEL);
skb               520 drivers/net/wireless/ath/ath6kl/htc_pipe.c 	if (skb == NULL) {
skb               524 drivers/net/wireless/ath/ath6kl/htc_pipe.c 	packet->skb = skb;
skb               715 drivers/net/wireless/ath/ath6kl/htc_pipe.c 					       struct sk_buff *skb)
skb               729 drivers/net/wireless/ath/ath6kl/htc_pipe.c 		if (skb == packet->skb) {
skb               742 drivers/net/wireless/ath/ath6kl/htc_pipe.c static int ath6kl_htc_pipe_tx_complete(struct ath6kl *ar, struct sk_buff *skb)
skb               750 drivers/net/wireless/ath/ath6kl/htc_pipe.c 	netdata = skb->data;
skb               757 drivers/net/wireless/ath/ath6kl/htc_pipe.c 	packet = htc_lookup_tx_packet(target, ep, skb);
skb               766 drivers/net/wireless/ath/ath6kl/htc_pipe.c 	skb = NULL;
skb               942 drivers/net/wireless/ath/ath6kl/htc_pipe.c static int ath6kl_htc_pipe_rx_complete(struct ath6kl *ar, struct sk_buff *skb,
skb               970 drivers/net/wireless/ath/ath6kl/htc_pipe.c 	netdata = skb->data;
skb               971 drivers/net/wireless/ath/ath6kl/htc_pipe.c 	netlen = skb->len;
skb              1037 drivers/net/wireless/ath/ath6kl/htc_pipe.c 		skb_pull(skb, HTC_HDR_LENGTH);
skb              1039 drivers/net/wireless/ath/ath6kl/htc_pipe.c 		netdata = skb->data;
skb              1040 drivers/net/wireless/ath/ath6kl/htc_pipe.c 		netlen = skb->len;
skb              1052 drivers/net/wireless/ath/ath6kl/htc_pipe.c 		dev_kfree_skb(skb);
skb              1053 drivers/net/wireless/ath/ath6kl/htc_pipe.c 		skb = NULL;
skb              1071 drivers/net/wireless/ath/ath6kl/htc_pipe.c 	packet->pkt_cntxt = skb;
skb              1074 drivers/net/wireless/ath/ath6kl/htc_pipe.c 	packet->buf = skb_push(skb, 0) + HTC_HDR_LENGTH;
skb              1081 drivers/net/wireless/ath/ath6kl/htc_pipe.c 	skb_trim(skb, 0);
skb              1087 drivers/net/wireless/ath/ath6kl/htc_pipe.c 	skb = NULL;
skb              1090 drivers/net/wireless/ath/ath6kl/htc_pipe.c 	dev_kfree_skb(skb);
skb              1163 drivers/net/wireless/ath/ath6kl/htc_pipe.c 	struct sk_buff *skb = packet->skb;
skb              1167 drivers/net/wireless/ath/ath6kl/htc_pipe.c 	    skb != NULL)
skb              1168 drivers/net/wireless/ath/ath6kl/htc_pipe.c 		dev_kfree_skb(skb);
skb              1231 drivers/net/wireless/ath/ath6kl/htc_pipe.c 	struct sk_buff *skb;
skb              1263 drivers/net/wireless/ath/ath6kl/htc_pipe.c 		skb = packet->skb;
skb              1267 drivers/net/wireless/ath/ath6kl/htc_pipe.c 		conn_msg = skb_put(skb, length);
skb              1480 drivers/net/wireless/ath/ath6kl/htc_pipe.c 	struct sk_buff *skb;
skb              1493 drivers/net/wireless/ath/ath6kl/htc_pipe.c 	skb = packet->skb;
skb              1496 drivers/net/wireless/ath/ath6kl/htc_pipe.c 	setup = skb_put(skb, sizeof(*setup));
skb               221 drivers/net/wireless/ath/ath6kl/init.c 	struct sk_buff *skb;
skb               227 drivers/net/wireless/ath/ath6kl/init.c 	skb = dev_alloc_skb(size + reserved);
skb               229 drivers/net/wireless/ath/ath6kl/init.c 	if (skb)
skb               230 drivers/net/wireless/ath/ath6kl/init.c 		skb_reserve(skb, reserved - L1_CACHE_BYTES);
skb               231 drivers/net/wireless/ath/ath6kl/init.c 	return skb;
skb               868 drivers/net/wireless/ath/ath6kl/main.c 	struct sk_buff *skb;
skb               905 drivers/net/wireless/ath/ath6kl/main.c 		skb = skb_dequeue(&conn->psq);
skb               909 drivers/net/wireless/ath/ath6kl/main.c 		ath6kl_data_tx(skb, vif->ndev);
skb               924 drivers/net/wireless/ath/ath6kl/main.c 	struct sk_buff *skb;
skb               950 drivers/net/wireless/ath/ath6kl/main.c 	while ((skb = skb_dequeue(&ar->mcastpsq)) != NULL) {
skb               953 drivers/net/wireless/ath/ath6kl/main.c 		ath6kl_data_tx(skb, vif->ndev);
skb                48 drivers/net/wireless/ath/ath6kl/testmode.c 	struct sk_buff *skb;
skb                53 drivers/net/wireless/ath/ath6kl/testmode.c 	skb = cfg80211_testmode_alloc_event_skb(ar->wiphy, buf_len, GFP_KERNEL);
skb                54 drivers/net/wireless/ath/ath6kl/testmode.c 	if (!skb) {
skb                58 drivers/net/wireless/ath/ath6kl/testmode.c 	if (nla_put_u32(skb, ATH6KL_TM_ATTR_CMD, ATH6KL_TM_CMD_TCMD) ||
skb                59 drivers/net/wireless/ath/ath6kl/testmode.c 	    nla_put(skb, ATH6KL_TM_ATTR_DATA, buf_len, buf))
skb                61 drivers/net/wireless/ath/ath6kl/testmode.c 	cfg80211_testmode_event(skb, GFP_KERNEL);
skb                65 drivers/net/wireless/ath/ath6kl/testmode.c 	kfree_skb(skb);
skb                42 drivers/net/wireless/ath/ath6kl/txrx.c static u8 ath6kl_ibss_map_epid(struct sk_buff *skb, struct net_device *dev,
skb                51 drivers/net/wireless/ath/ath6kl/txrx.c 	datap = skb->data;
skb               104 drivers/net/wireless/ath/ath6kl/txrx.c 				struct sk_buff *skb,
skb               109 drivers/net/wireless/ath/ath6kl/txrx.c 	struct ethhdr *datap = (struct ethhdr *) skb->data;
skb               158 drivers/net/wireless/ath/ath6kl/txrx.c 	skb_queue_tail(&conn->apsdq, skb);
skb               177 drivers/net/wireless/ath/ath6kl/txrx.c 				struct sk_buff *skb,
skb               194 drivers/net/wireless/ath/ath6kl/txrx.c 	skb_queue_tail(&conn->psq, skb);
skb               209 drivers/net/wireless/ath/ath6kl/txrx.c static bool ath6kl_powersave_ap(struct ath6kl_vif *vif, struct sk_buff *skb,
skb               212 drivers/net/wireless/ath/ath6kl/txrx.c 	struct ethhdr *datap = (struct ethhdr *) skb->data;
skb               239 drivers/net/wireless/ath/ath6kl/txrx.c 				skb_queue_tail(&ar->mcastpsq, skb);
skb               267 drivers/net/wireless/ath/ath6kl/txrx.c 			dev_kfree_skb(skb);
skb               275 drivers/net/wireless/ath/ath6kl/txrx.c 						vif, skb, flags);
skb               278 drivers/net/wireless/ath/ath6kl/txrx.c 						vif, skb, flags);
skb               286 drivers/net/wireless/ath/ath6kl/txrx.c int ath6kl_control_tx(void *devt, struct sk_buff *skb,
skb               293 drivers/net/wireless/ath/ath6kl/txrx.c 	trace_ath6kl_wmi_cmd(skb->data, skb->len);
skb               296 drivers/net/wireless/ath/ath6kl/txrx.c 		dev_kfree_skb(skb);
skb               310 drivers/net/wireless/ath/ath6kl/txrx.c 		   skb, skb->len, eid);
skb               319 drivers/net/wireless/ath/ath6kl/txrx.c 			   skb, skb->len);
skb               337 drivers/net/wireless/ath/ath6kl/txrx.c 	cookie->skb = skb;
skb               339 drivers/net/wireless/ath/ath6kl/txrx.c 	set_htc_pkt_info(&cookie->htc_pkt, cookie, skb->data, skb->len,
skb               341 drivers/net/wireless/ath/ath6kl/txrx.c 	cookie->htc_pkt.skb = skb;
skb               352 drivers/net/wireless/ath/ath6kl/txrx.c 	dev_kfree_skb(skb);
skb               356 drivers/net/wireless/ath/ath6kl/txrx.c netdev_tx_t ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
skb               369 drivers/net/wireless/ath/ath6kl/txrx.c 	u8 csum_start = 0, csum_dest = 0, csum = skb->ip_summed;
skb               375 drivers/net/wireless/ath/ath6kl/txrx.c 		   skb, skb->data, skb->len);
skb               389 drivers/net/wireless/ath/ath6kl/txrx.c 		if (ath6kl_powersave_ap(vif, skb, &flags))
skb               396 drivers/net/wireless/ath/ath6kl/txrx.c 			csum_start = skb->csum_start -
skb               397 drivers/net/wireless/ath/ath6kl/txrx.c 					(skb_network_header(skb) - skb->head) +
skb               399 drivers/net/wireless/ath/ath6kl/txrx.c 			csum_dest = skb->csum_offset + csum_start;
skb               402 drivers/net/wireless/ath/ath6kl/txrx.c 		if (skb_cow_head(skb, dev->needed_headroom)) {
skb               404 drivers/net/wireless/ath/ath6kl/txrx.c 			kfree_skb(skb);
skb               408 drivers/net/wireless/ath/ath6kl/txrx.c 		if (ath6kl_wmi_dix_2_dot3(ar->wmi, skb)) {
skb               427 drivers/net/wireless/ath/ath6kl/txrx.c 		ret = ath6kl_wmi_data_hdr_add(ar->wmi, skb,
skb               444 drivers/net/wireless/ath/ath6kl/txrx.c 				    vif->fw_vif_idx, skb,
skb               456 drivers/net/wireless/ath/ath6kl/txrx.c 		eid = ath6kl_ibss_map_epid(skb, dev, &map_no);
skb               480 drivers/net/wireless/ath/ath6kl/txrx.c 	if (!IS_ALIGNED((unsigned long) skb->data - HTC_HDR_LENGTH, 4) &&
skb               481 drivers/net/wireless/ath/ath6kl/txrx.c 	    skb_cloned(skb)) {
skb               491 drivers/net/wireless/ath/ath6kl/txrx.c 		nskb = skb_copy_expand(skb, HTC_HDR_LENGTH, 0, GFP_ATOMIC);
skb               494 drivers/net/wireless/ath/ath6kl/txrx.c 		kfree_skb(skb);
skb               495 drivers/net/wireless/ath/ath6kl/txrx.c 		skb = nskb;
skb               498 drivers/net/wireless/ath/ath6kl/txrx.c 	cookie->skb = skb;
skb               500 drivers/net/wireless/ath/ath6kl/txrx.c 	set_htc_pkt_info(&cookie->htc_pkt, cookie, skb->data, skb->len,
skb               502 drivers/net/wireless/ath/ath6kl/txrx.c 	cookie->htc_pkt.skb = skb;
skb               505 drivers/net/wireless/ath/ath6kl/txrx.c 			skb->data, skb->len);
skb               516 drivers/net/wireless/ath/ath6kl/txrx.c 	dev_kfree_skb(skb);
skb               686 drivers/net/wireless/ath/ath6kl/txrx.c 	struct sk_buff *skb;
skb               716 drivers/net/wireless/ath/ath6kl/txrx.c 		skb = ath6kl_cookie->skb;
skb               720 drivers/net/wireless/ath/ath6kl/txrx.c 		if (WARN_ON_ONCE(!skb || !skb->data)) {
skb               721 drivers/net/wireless/ath/ath6kl/txrx.c 			dev_kfree_skb(skb);
skb               726 drivers/net/wireless/ath/ath6kl/txrx.c 		__skb_queue_tail(&skb_queue, skb);
skb               728 drivers/net/wireless/ath/ath6kl/txrx.c 		if (WARN_ON_ONCE(!status && (packet->act_len != skb->len))) {
skb               772 drivers/net/wireless/ath/ath6kl/txrx.c 				   __func__, skb, packet->buf, packet->act_len,
skb               777 drivers/net/wireless/ath/ath6kl/txrx.c 				   __func__, skb, packet->buf, packet->act_len,
skb               782 drivers/net/wireless/ath/ath6kl/txrx.c 			vif->ndev->stats.tx_bytes += skb->len;
skb               828 drivers/net/wireless/ath/ath6kl/txrx.c 					      struct sk_buff *skb)
skb               830 drivers/net/wireless/ath/ath6kl/txrx.c 	if (!skb)
skb               833 drivers/net/wireless/ath/ath6kl/txrx.c 	skb->dev = dev;
skb               835 drivers/net/wireless/ath/ath6kl/txrx.c 	if (!(skb->dev->flags & IFF_UP)) {
skb               836 drivers/net/wireless/ath/ath6kl/txrx.c 		dev_kfree_skb(skb);
skb               840 drivers/net/wireless/ath/ath6kl/txrx.c 	skb->protocol = eth_type_trans(skb, skb->dev);
skb               842 drivers/net/wireless/ath/ath6kl/txrx.c 	netif_rx_ni(skb);
skb               847 drivers/net/wireless/ath/ath6kl/txrx.c 	struct sk_buff *skb;
skb               850 drivers/net/wireless/ath/ath6kl/txrx.c 		skb = ath6kl_buf_alloc(ATH6KL_BUFFER_SIZE);
skb               851 drivers/net/wireless/ath/ath6kl/txrx.c 		if (!skb) {
skb               855 drivers/net/wireless/ath/ath6kl/txrx.c 		skb_queue_tail(q, skb);
skb               862 drivers/net/wireless/ath/ath6kl/txrx.c 	struct sk_buff *skb = NULL;
skb               869 drivers/net/wireless/ath/ath6kl/txrx.c 	skb = skb_dequeue(&p_aggr->rx_amsdu_freeq);
skb               871 drivers/net/wireless/ath/ath6kl/txrx.c 	return skb;
skb               877 drivers/net/wireless/ath/ath6kl/txrx.c 	struct sk_buff *skb;
skb               896 drivers/net/wireless/ath/ath6kl/txrx.c 		skb = ath6kl_buf_alloc(ATH6KL_BUFFER_SIZE);
skb               897 drivers/net/wireless/ath/ath6kl/txrx.c 		if (!skb)
skb               900 drivers/net/wireless/ath/ath6kl/txrx.c 		packet = (struct htc_packet *) skb->head;
skb               901 drivers/net/wireless/ath/ath6kl/txrx.c 		if (!IS_ALIGNED((unsigned long) skb->data, 4)) {
skb               902 drivers/net/wireless/ath/ath6kl/txrx.c 			size_t len = skb_headlen(skb);
skb               903 drivers/net/wireless/ath/ath6kl/txrx.c 			skb->data = PTR_ALIGN(skb->data - 4, 4);
skb               904 drivers/net/wireless/ath/ath6kl/txrx.c 			skb_set_tail_pointer(skb, len);
skb               906 drivers/net/wireless/ath/ath6kl/txrx.c 		set_htc_rxpkt_info(packet, skb, skb->data,
skb               908 drivers/net/wireless/ath/ath6kl/txrx.c 		packet->skb = skb;
skb               919 drivers/net/wireless/ath/ath6kl/txrx.c 	struct sk_buff *skb;
skb               922 drivers/net/wireless/ath/ath6kl/txrx.c 		skb = ath6kl_buf_alloc(ATH6KL_AMSDU_BUFFER_SIZE);
skb               923 drivers/net/wireless/ath/ath6kl/txrx.c 		if (!skb)
skb               926 drivers/net/wireless/ath/ath6kl/txrx.c 		packet = (struct htc_packet *) skb->head;
skb               927 drivers/net/wireless/ath/ath6kl/txrx.c 		if (!IS_ALIGNED((unsigned long) skb->data, 4)) {
skb               928 drivers/net/wireless/ath/ath6kl/txrx.c 			size_t len = skb_headlen(skb);
skb               929 drivers/net/wireless/ath/ath6kl/txrx.c 			skb->data = PTR_ALIGN(skb->data - 4, 4);
skb               930 drivers/net/wireless/ath/ath6kl/txrx.c 			skb_set_tail_pointer(skb, len);
skb               932 drivers/net/wireless/ath/ath6kl/txrx.c 		set_htc_rxpkt_info(packet, skb, skb->data,
skb               934 drivers/net/wireless/ath/ath6kl/txrx.c 		packet->skb = skb;
skb               991 drivers/net/wireless/ath/ath6kl/txrx.c 			     struct rxtid *rxtid, struct sk_buff *skb)
skb               999 drivers/net/wireless/ath/ath6kl/txrx.c 	framep = skb->data + mac_hdr_len;
skb              1000 drivers/net/wireless/ath/ath6kl/txrx.c 	amsdu_len = skb->len - mac_hdr_len;
skb              1043 drivers/net/wireless/ath/ath6kl/txrx.c 	dev_kfree_skb(skb);
skb              1049 drivers/net/wireless/ath/ath6kl/txrx.c 	struct sk_buff *skb;
skb              1079 drivers/net/wireless/ath/ath6kl/txrx.c 		if ((order == 1) && (!node->skb))
skb              1082 drivers/net/wireless/ath/ath6kl/txrx.c 		if (node->skb) {
skb              1085 drivers/net/wireless/ath/ath6kl/txrx.c 						 node->skb);
skb              1087 drivers/net/wireless/ath/ath6kl/txrx.c 				skb_queue_tail(&rxtid->q, node->skb);
skb              1088 drivers/net/wireless/ath/ath6kl/txrx.c 			node->skb = NULL;
skb              1101 drivers/net/wireless/ath/ath6kl/txrx.c 	while ((skb = skb_dequeue(&rxtid->q)))
skb              1102 drivers/net/wireless/ath/ath6kl/txrx.c 		ath6kl_deliver_frames_to_nw_stack(agg_conn->dev, skb);
skb              1111 drivers/net/wireless/ath/ath6kl/txrx.c 	struct sk_buff *skb;
skb              1127 drivers/net/wireless/ath/ath6kl/txrx.c 			while ((skb = skb_dequeue(&rxtid->q)))
skb              1129 drivers/net/wireless/ath/ath6kl/txrx.c 								  skb);
skb              1191 drivers/net/wireless/ath/ath6kl/txrx.c 	dev_kfree_skb(node->skb);
skb              1194 drivers/net/wireless/ath/ath6kl/txrx.c 	node->skb = frame;
skb              1213 drivers/net/wireless/ath/ath6kl/txrx.c 		if (rxtid->hold_q[idx].skb) {
skb              1238 drivers/net/wireless/ath/ath6kl/txrx.c 	struct sk_buff *skb = NULL;
skb              1267 drivers/net/wireless/ath/ath6kl/txrx.c 		skb = skb_dequeue(&conn->apsdq);
skb              1282 drivers/net/wireless/ath/ath6kl/txrx.c 		ath6kl_data_tx(skb, vif->ndev);
skb              1304 drivers/net/wireless/ath/ath6kl/txrx.c 	struct sk_buff *skb = packet->pkt_cntxt;
skb              1324 drivers/net/wireless/ath/ath6kl/txrx.c 		   __func__, ar, ept, skb, packet->buf,
skb              1328 drivers/net/wireless/ath/ath6kl/txrx.c 		dev_kfree_skb(skb);
skb              1332 drivers/net/wireless/ath/ath6kl/txrx.c 	skb_put(skb, packet->act_len + HTC_HDR_LENGTH);
skb              1333 drivers/net/wireless/ath/ath6kl/txrx.c 	skb_pull(skb, HTC_HDR_LENGTH);
skb              1336 drivers/net/wireless/ath/ath6kl/txrx.c 			skb->data, skb->len);
skb              1341 drivers/net/wireless/ath/ath6kl/txrx.c 			ath6kl_wmi_control_rx(ar->wmi, skb);
skb              1345 drivers/net/wireless/ath/ath6kl/txrx.c 		wmi_cmd_hdr_get_if_idx((struct wmi_cmd_hdr *) skb->data);
skb              1348 drivers/net/wireless/ath/ath6kl/txrx.c 		wmi_data_hdr_get_if_idx((struct wmi_data_hdr *) skb->data);
skb              1353 drivers/net/wireless/ath/ath6kl/txrx.c 		dev_kfree_skb(skb);
skb              1368 drivers/net/wireless/ath/ath6kl/txrx.c 	skb->dev = vif->ndev;
skb              1372 drivers/net/wireless/ath/ath6kl/txrx.c 			skb_pull(skb, EPPING_ALIGNMENT_PAD);
skb              1373 drivers/net/wireless/ath/ath6kl/txrx.c 		ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb);
skb              1382 drivers/net/wireless/ath/ath6kl/txrx.c 	dhdr = (struct wmi_data_hdr *) skb->data;
skb              1395 drivers/net/wireless/ath/ath6kl/txrx.c 		dev_kfree_skb(skb);
skb              1426 drivers/net/wireless/ath/ath6kl/txrx.c 		datap = (struct ethhdr *) (skb->data + offset);
skb              1430 drivers/net/wireless/ath/ath6kl/txrx.c 			dev_kfree_skb(skb);
skb              1515 drivers/net/wireless/ath/ath6kl/txrx.c 			dev_kfree_skb(skb);
skb              1526 drivers/net/wireless/ath/ath6kl/txrx.c 	skb_pull(skb, sizeof(struct wmi_data_hdr));
skb              1530 drivers/net/wireless/ath/ath6kl/txrx.c 		skb_pull(skb, sizeof(struct wmi_rx_meta_v1));
skb              1533 drivers/net/wireless/ath/ath6kl/txrx.c 		meta = (struct wmi_rx_meta_v2 *) skb->data;
skb              1535 drivers/net/wireless/ath/ath6kl/txrx.c 			skb->ip_summed = CHECKSUM_COMPLETE;
skb              1536 drivers/net/wireless/ath/ath6kl/txrx.c 			skb->csum = (__force __wsum) meta->csum;
skb              1538 drivers/net/wireless/ath/ath6kl/txrx.c 		skb_pull(skb, sizeof(struct wmi_rx_meta_v2));
skb              1544 drivers/net/wireless/ath/ath6kl/txrx.c 	skb_pull(skb, pad_before_data_start);
skb              1547 drivers/net/wireless/ath/ath6kl/txrx.c 		status = ath6kl_wmi_dot11_hdr_remove(ar->wmi, skb);
skb              1549 drivers/net/wireless/ath/ath6kl/txrx.c 		status = ath6kl_wmi_dot3_2_dix(skb);
skb              1556 drivers/net/wireless/ath/ath6kl/txrx.c 		dev_kfree_skb(skb);
skb              1561 drivers/net/wireless/ath/ath6kl/txrx.c 		dev_kfree_skb(skb);
skb              1566 drivers/net/wireless/ath/ath6kl/txrx.c 		datap = (struct ethhdr *) skb->data;
skb              1572 drivers/net/wireless/ath/ath6kl/txrx.c 			skb1 = skb_copy(skb, GFP_ATOMIC);
skb              1583 drivers/net/wireless/ath/ath6kl/txrx.c 				skb1 = skb;
skb              1584 drivers/net/wireless/ath/ath6kl/txrx.c 				skb = NULL;
skb              1586 drivers/net/wireless/ath/ath6kl/txrx.c 				dev_kfree_skb(skb);
skb              1587 drivers/net/wireless/ath/ath6kl/txrx.c 				skb = NULL;
skb              1593 drivers/net/wireless/ath/ath6kl/txrx.c 		if (skb == NULL) {
skb              1599 drivers/net/wireless/ath/ath6kl/txrx.c 	datap = (struct ethhdr *) skb->data;
skb              1612 drivers/net/wireless/ath/ath6kl/txrx.c 					  is_amsdu, skb)) {
skb              1620 drivers/net/wireless/ath/ath6kl/txrx.c 	ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb);
skb              1654 drivers/net/wireless/ath/ath6kl/txrx.c 				if (rxtid->hold_q[j].skb) {
skb                80 drivers/net/wireless/ath/ath6kl/usb.c 	struct sk_buff *skb;
skb               170 drivers/net/wireless/ath/ath6kl/usb.c 	dev_kfree_skb(urb_context->skb);
skb               171 drivers/net/wireless/ath/ath6kl/usb.c 	urb_context->skb = NULL;
skb               422 drivers/net/wireless/ath/ath6kl/usb.c 		urb_context->skb = dev_alloc_skb(buffer_length);
skb               423 drivers/net/wireless/ath/ath6kl/usb.c 		if (urb_context->skb == NULL)
skb               433 drivers/net/wireless/ath/ath6kl/usb.c 				  urb_context->skb->data,
skb               441 drivers/net/wireless/ath/ath6kl/usb.c 			   buffer_length, urb_context->skb);
skb               501 drivers/net/wireless/ath/ath6kl/usb.c 	struct sk_buff *skb = NULL;
skb               534 drivers/net/wireless/ath/ath6kl/usb.c 	skb = urb_context->skb;
skb               537 drivers/net/wireless/ath/ath6kl/usb.c 	urb_context->skb = NULL;
skb               538 drivers/net/wireless/ath/ath6kl/usb.c 	skb_put(skb, urb->actual_length);
skb               541 drivers/net/wireless/ath/ath6kl/usb.c 	skb_queue_tail(&pipe->io_comp_queue, skb);
skb               558 drivers/net/wireless/ath/ath6kl/usb.c 	struct sk_buff *skb;
skb               571 drivers/net/wireless/ath/ath6kl/usb.c 	skb = urb_context->skb;
skb               572 drivers/net/wireless/ath/ath6kl/usb.c 	urb_context->skb = NULL;
skb               576 drivers/net/wireless/ath/ath6kl/usb.c 	skb_queue_tail(&pipe->io_comp_queue, skb);
skb               586 drivers/net/wireless/ath/ath6kl/usb.c 	struct sk_buff *skb;
skb               590 drivers/net/wireless/ath/ath6kl/usb.c 	while ((skb = skb_dequeue(&pipe->io_comp_queue))) {
skb               593 drivers/net/wireless/ath/ath6kl/usb.c 				   "ath6kl usb xmit callback buf:0x%p\n", skb);
skb               594 drivers/net/wireless/ath/ath6kl/usb.c 			ath6kl_core_tx_complete(ar_usb->ar, skb);
skb               597 drivers/net/wireless/ath/ath6kl/usb.c 				   "ath6kl usb recv callback buf:0x%p\n", skb);
skb               598 drivers/net/wireless/ath/ath6kl/usb.c 			ath6kl_core_rx_complete(ar_usb->ar, skb,
skb               701 drivers/net/wireless/ath/ath6kl/usb.c 			   struct sk_buff *hdr_skb, struct sk_buff *skb)
skb               712 drivers/net/wireless/ath/ath6kl/usb.c 		   __func__, PipeID, skb);
skb               728 drivers/net/wireless/ath/ath6kl/usb.c 	urb_context->skb = skb;
skb               730 drivers/net/wireless/ath/ath6kl/usb.c 	data = skb->data;
skb               731 drivers/net/wireless/ath/ath6kl/usb.c 	len = skb->len;
skb               160 drivers/net/wireless/ath/ath6kl/wmi.c int ath6kl_wmi_dix_2_dot3(struct wmi *wmi, struct sk_buff *skb)
skb               169 drivers/net/wireless/ath/ath6kl/wmi.c 	if (WARN_ON(skb == NULL))
skb               173 drivers/net/wireless/ath/ath6kl/wmi.c 	if (skb_headroom(skb) < size)
skb               176 drivers/net/wireless/ath/ath6kl/wmi.c 	eth_hdr = (struct ethhdr *) skb->data;
skb               185 drivers/net/wireless/ath/ath6kl/wmi.c 	new_len = skb->len - sizeof(*eth_hdr) + sizeof(*llc_hdr);
skb               187 drivers/net/wireless/ath/ath6kl/wmi.c 	skb_push(skb, sizeof(struct ath6kl_llc_snap_hdr));
skb               188 drivers/net/wireless/ath/ath6kl/wmi.c 	datap = skb->data;
skb               206 drivers/net/wireless/ath/ath6kl/wmi.c static int ath6kl_wmi_meta_add(struct wmi *wmi, struct sk_buff *skb,
skb               212 drivers/net/wireless/ath/ath6kl/wmi.c 	if (WARN_ON(skb == NULL || version == NULL))
skb               217 drivers/net/wireless/ath/ath6kl/wmi.c 		skb_push(skb, WMI_MAX_TX_META_SZ);
skb               218 drivers/net/wireless/ath/ath6kl/wmi.c 		v1 = (struct wmi_tx_meta_v1 *) skb->data;
skb               224 drivers/net/wireless/ath/ath6kl/wmi.c 		skb_push(skb, WMI_MAX_TX_META_SZ);
skb               225 drivers/net/wireless/ath/ath6kl/wmi.c 		v2 = (struct wmi_tx_meta_v2 *) skb->data;
skb               234 drivers/net/wireless/ath/ath6kl/wmi.c int ath6kl_wmi_data_hdr_add(struct wmi *wmi, struct sk_buff *skb,
skb               242 drivers/net/wireless/ath/ath6kl/wmi.c 	if (WARN_ON(skb == NULL || (if_idx > wmi->parent_dev->vif_max - 1)))
skb               246 drivers/net/wireless/ath/ath6kl/wmi.c 		ret = ath6kl_wmi_meta_add(wmi, skb, &meta_ver, tx_meta_info);
skb               251 drivers/net/wireless/ath/ath6kl/wmi.c 	skb_push(skb, sizeof(struct wmi_data_hdr));
skb               253 drivers/net/wireless/ath/ath6kl/wmi.c 	data_hdr = (struct wmi_data_hdr *)skb->data;
skb               299 drivers/net/wireless/ath/ath6kl/wmi.c 				       struct sk_buff *skb,
skb               312 drivers/net/wireless/ath/ath6kl/wmi.c 	if (WARN_ON(skb == NULL))
skb               315 drivers/net/wireless/ath/ath6kl/wmi.c 	datap = skb->data;
skb               349 drivers/net/wireless/ath/ath6kl/wmi.c 		if (skb->protocol == cpu_to_be16(ETH_P_PAE))
skb               388 drivers/net/wireless/ath/ath6kl/wmi.c int ath6kl_wmi_dot11_hdr_remove(struct wmi *wmi, struct sk_buff *skb)
skb               397 drivers/net/wireless/ath/ath6kl/wmi.c 	if (WARN_ON(skb == NULL))
skb               400 drivers/net/wireless/ath/ath6kl/wmi.c 	datap = skb->data;
skb               411 drivers/net/wireless/ath/ath6kl/wmi.c 		skb_pull(skb, hdr_size);
skb               413 drivers/net/wireless/ath/ath6kl/wmi.c 		skb_pull(skb, sizeof(struct ieee80211_hdr_3addr));
skb               416 drivers/net/wireless/ath/ath6kl/wmi.c 	datap = skb->data;
skb               440 drivers/net/wireless/ath/ath6kl/wmi.c 	skb_pull(skb, sizeof(struct ath6kl_llc_snap_hdr));
skb               441 drivers/net/wireless/ath/ath6kl/wmi.c 	skb_push(skb, sizeof(eth_hdr));
skb               443 drivers/net/wireless/ath/ath6kl/wmi.c 	datap = skb->data;
skb               454 drivers/net/wireless/ath/ath6kl/wmi.c int ath6kl_wmi_dot3_2_dix(struct sk_buff *skb)
skb               460 drivers/net/wireless/ath/ath6kl/wmi.c 	if (WARN_ON(skb == NULL))
skb               463 drivers/net/wireless/ath/ath6kl/wmi.c 	datap = skb->data;
skb               470 drivers/net/wireless/ath/ath6kl/wmi.c 	skb_pull(skb, sizeof(struct ath6kl_llc_snap_hdr));
skb               471 drivers/net/wireless/ath/ath6kl/wmi.c 	datap = skb->data;
skb               712 drivers/net/wireless/ath/ath6kl/wmi.c 	struct sk_buff *skb;
skb               714 drivers/net/wireless/ath/ath6kl/wmi.c 	skb = ath6kl_buf_alloc(size);
skb               715 drivers/net/wireless/ath/ath6kl/wmi.c 	if (!skb)
skb               718 drivers/net/wireless/ath/ath6kl/wmi.c 	skb_put(skb, size);
skb               720 drivers/net/wireless/ath/ath6kl/wmi.c 		memset(skb->data, 0, size);
skb               722 drivers/net/wireless/ath/ath6kl/wmi.c 	return skb;
skb               729 drivers/net/wireless/ath/ath6kl/wmi.c 	struct sk_buff *skb;
skb               732 drivers/net/wireless/ath/ath6kl/wmi.c 	skb = ath6kl_wmi_get_new_buf(0);
skb               733 drivers/net/wireless/ath/ath6kl/wmi.c 	if (!skb)
skb               736 drivers/net/wireless/ath/ath6kl/wmi.c 	ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, cmd_id, NO_SYNC_WMIFLAG);
skb               763 drivers/net/wireless/ath/ath6kl/wmi.c 	struct sk_buff *skb;
skb               766 drivers/net/wireless/ath/ath6kl/wmi.c 	skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
skb               767 drivers/net/wireless/ath/ath6kl/wmi.c 	if (!skb)
skb               770 drivers/net/wireless/ath/ath6kl/wmi.c 	cmd = (struct roam_ctrl_cmd *) skb->data;
skb               779 drivers/net/wireless/ath/ath6kl/wmi.c 	return ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_SET_ROAM_CTRL_CMDID,
skb               785 drivers/net/wireless/ath/ath6kl/wmi.c 	struct sk_buff *skb;
skb               788 drivers/net/wireless/ath/ath6kl/wmi.c 	skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
skb               789 drivers/net/wireless/ath/ath6kl/wmi.c 	if (!skb)
skb               792 drivers/net/wireless/ath/ath6kl/wmi.c 	cmd = (struct roam_ctrl_cmd *) skb->data;
skb               798 drivers/net/wireless/ath/ath6kl/wmi.c 	return ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_SET_ROAM_CTRL_CMDID,
skb               805 drivers/net/wireless/ath/ath6kl/wmi.c 	struct sk_buff *skb;
skb               808 drivers/net/wireless/ath/ath6kl/wmi.c 	skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
skb               809 drivers/net/wireless/ath/ath6kl/wmi.c 	if (!skb)
skb               812 drivers/net/wireless/ath/ath6kl/wmi.c 	cmd = (struct set_beacon_int_cmd *) skb->data;
skb               815 drivers/net/wireless/ath/ath6kl/wmi.c 	return ath6kl_wmi_cmd_send(wmi, if_idx, skb,
skb               821 drivers/net/wireless/ath/ath6kl/wmi.c 	struct sk_buff *skb;
skb               824 drivers/net/wireless/ath/ath6kl/wmi.c 	skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
skb               825 drivers/net/wireless/ath/ath6kl/wmi.c 	if (!skb)
skb               828 drivers/net/wireless/ath/ath6kl/wmi.c 	cmd = (struct set_dtim_cmd *) skb->data;
skb               831 drivers/net/wireless/ath/ath6kl/wmi.c 	return ath6kl_wmi_cmd_send(wmi, if_idx, skb,
skb               837 drivers/net/wireless/ath/ath6kl/wmi.c 	struct sk_buff *skb;
skb               840 drivers/net/wireless/ath/ath6kl/wmi.c 	skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
skb               841 drivers/net/wireless/ath/ath6kl/wmi.c 	if (!skb)
skb               844 drivers/net/wireless/ath/ath6kl/wmi.c 	cmd = (struct roam_ctrl_cmd *) skb->data;
skb               850 drivers/net/wireless/ath/ath6kl/wmi.c 	return ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_SET_ROAM_CTRL_CMDID,
skb              1400 drivers/net/wireless/ath/ath6kl/wmi.c 	struct sk_buff *skb;
skb              1403 drivers/net/wireless/ath/ath6kl/wmi.c 	skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
skb              1404 drivers/net/wireless/ath/ath6kl/wmi.c 	if (!skb)
skb              1407 drivers/net/wireless/ath/ath6kl/wmi.c 	cmd = (struct wmi_rssi_threshold_params_cmd *) skb->data;
skb              1410 drivers/net/wireless/ath/ath6kl/wmi.c 	return ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_RSSI_THRESHOLD_PARAMS_CMDID,
skb              1616 drivers/net/wireless/ath/ath6kl/wmi.c 	struct sk_buff *skb;
skb              1619 drivers/net/wireless/ath/ath6kl/wmi.c 	skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
skb              1620 drivers/net/wireless/ath/ath6kl/wmi.c 	if (!skb)
skb              1623 drivers/net/wireless/ath/ath6kl/wmi.c 	cmd = (struct wmi_txe_notify_cmd *) skb->data;
skb              1628 drivers/net/wireless/ath/ath6kl/wmi.c 	return ath6kl_wmi_cmd_send(wmi, idx, skb, WMI_SET_TXE_NOTIFY_CMDID,
skb              1634 drivers/net/wireless/ath/ath6kl/wmi.c 	struct sk_buff *skb;
skb              1638 drivers/net/wireless/ath/ath6kl/wmi.c 	skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
skb              1639 drivers/net/wireless/ath/ath6kl/wmi.c 	if (!skb)
skb              1642 drivers/net/wireless/ath/ath6kl/wmi.c 	cmd = (struct wmi_set_rssi_filter_cmd *) skb->data;
skb              1645 drivers/net/wireless/ath/ath6kl/wmi.c 	ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_RSSI_FILTER_CMDID,
skb              1653 drivers/net/wireless/ath/ath6kl/wmi.c 	struct sk_buff *skb;
skb              1656 drivers/net/wireless/ath/ath6kl/wmi.c 	skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
skb              1657 drivers/net/wireless/ath/ath6kl/wmi.c 	if (!skb)
skb              1660 drivers/net/wireless/ath/ath6kl/wmi.c 	cmd = (struct wmi_snr_threshold_params_cmd *) skb->data;
skb              1663 drivers/net/wireless/ath/ath6kl/wmi.c 	return ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_SNR_THRESHOLD_PARAMS_CMDID,
skb              1788 drivers/net/wireless/ath/ath6kl/wmi.c int ath6kl_wmi_cmd_send(struct wmi *wmi, u8 if_idx, struct sk_buff *skb,
skb              1796 drivers/net/wireless/ath/ath6kl/wmi.c 	if (WARN_ON(skb == NULL ||
skb              1798 drivers/net/wireless/ath/ath6kl/wmi.c 		dev_kfree_skb(skb);
skb              1803 drivers/net/wireless/ath/ath6kl/wmi.c 		   cmd_id, skb->len, sync_flag);
skb              1805 drivers/net/wireless/ath/ath6kl/wmi.c 			skb->data, skb->len);
skb              1808 drivers/net/wireless/ath/ath6kl/wmi.c 		dev_kfree_skb(skb);
skb              1821 drivers/net/wireless/ath/ath6kl/wmi.c 	skb_push(skb, sizeof(struct wmi_cmd_hdr));
skb              1823 drivers/net/wireless/ath/ath6kl/wmi.c 	cmd_hdr = (struct wmi_cmd_hdr *) skb->data;
skb              1830 drivers/net/wireless/ath/ath6kl/wmi.c 		ret = ath6kl_wmi_data_hdr_add(wmi, skb, OPT_MSGTYPE,
skb              1833 drivers/net/wireless/ath/ath6kl/wmi.c 			dev_kfree_skb(skb);
skb              1839 drivers/net/wireless/ath/ath6kl/wmi.c 	ath6kl_control_tx(wmi->parent_dev, skb, ep_id);
skb              1864 drivers/net/wireless/ath/ath6kl/wmi.c 	struct sk_buff *skb;
skb              1883 drivers/net/wireless/ath/ath6kl/wmi.c 	skb = ath6kl_wmi_get_new_buf(sizeof(struct wmi_connect_cmd));
skb              1884 drivers/net/wireless/ath/ath6kl/wmi.c 	if (!skb)
skb              1887 drivers/net/wireless/ath/ath6kl/wmi.c 	cc = (struct wmi_connect_cmd *) skb->data;
skb              1907 drivers/net/wireless/ath/ath6kl/wmi.c 	ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_CONNECT_CMDID,
skb              1916 drivers/net/wireless/ath/ath6kl/wmi.c 	struct sk_buff *skb;
skb              1925 drivers/net/wireless/ath/ath6kl/wmi.c 	skb = ath6kl_wmi_get_new_buf(sizeof(struct wmi_reconnect_cmd));
skb              1926 drivers/net/wireless/ath/ath6kl/wmi.c 	if (!skb)
skb              1929 drivers/net/wireless/ath/ath6kl/wmi.c 	cc = (struct wmi_reconnect_cmd *) skb->data;
skb              1935 drivers/net/wireless/ath/ath6kl/wmi.c 	ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_RECONNECT_CMDID,
skb              1966 drivers/net/wireless/ath/ath6kl/wmi.c 	struct sk_buff *skb;
skb              1982 drivers/net/wireless/ath/ath6kl/wmi.c 	skb = ath6kl_wmi_get_new_buf(size);
skb              1983 drivers/net/wireless/ath/ath6kl/wmi.c 	if (!skb)
skb              1986 drivers/net/wireless/ath/ath6kl/wmi.c 	sc = (struct wmi_start_scan_cmd *) skb->data;
skb              1997 drivers/net/wireless/ath/ath6kl/wmi.c 	ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_START_SCAN_CMDID,
skb              2015 drivers/net/wireless/ath/ath6kl/wmi.c 	struct sk_buff *skb;
skb              2043 drivers/net/wireless/ath/ath6kl/wmi.c 	skb = ath6kl_wmi_get_new_buf(size);
skb              2044 drivers/net/wireless/ath/ath6kl/wmi.c 	if (!skb)
skb              2047 drivers/net/wireless/ath/ath6kl/wmi.c 	sc = (struct wmi_begin_scan_cmd *) skb->data;
skb              2081 drivers/net/wireless/ath/ath6kl/wmi.c 	ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_BEGIN_SCAN_CMDID,
skb              2089 drivers/net/wireless/ath/ath6kl/wmi.c 	struct sk_buff *skb;
skb              2093 drivers/net/wireless/ath/ath6kl/wmi.c 	skb = ath6kl_wmi_get_new_buf(sizeof(*sc));
skb              2094 drivers/net/wireless/ath/ath6kl/wmi.c 	if (!skb)
skb              2099 drivers/net/wireless/ath/ath6kl/wmi.c 	sc = (struct wmi_enable_sched_scan_cmd *) skb->data;
skb              2102 drivers/net/wireless/ath/ath6kl/wmi.c 	ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb,
skb              2116 drivers/net/wireless/ath/ath6kl/wmi.c 	struct sk_buff *skb;
skb              2120 drivers/net/wireless/ath/ath6kl/wmi.c 	skb = ath6kl_wmi_get_new_buf(sizeof(*sc));
skb              2121 drivers/net/wireless/ath/ath6kl/wmi.c 	if (!skb)
skb              2124 drivers/net/wireless/ath/ath6kl/wmi.c 	sc = (struct wmi_scan_params_cmd *) skb->data;
skb              2136 drivers/net/wireless/ath/ath6kl/wmi.c 	ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_SCAN_PARAMS_CMDID,
skb              2143 drivers/net/wireless/ath/ath6kl/wmi.c 	struct sk_buff *skb;
skb              2150 drivers/net/wireless/ath/ath6kl/wmi.c 	skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
skb              2151 drivers/net/wireless/ath/ath6kl/wmi.c 	if (!skb)
skb              2154 drivers/net/wireless/ath/ath6kl/wmi.c 	cmd = (struct wmi_bss_filter_cmd *) skb->data;
skb              2158 drivers/net/wireless/ath/ath6kl/wmi.c 	ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_BSS_FILTER_CMDID,
skb              2166 drivers/net/wireless/ath/ath6kl/wmi.c 	struct sk_buff *skb;
skb              2185 drivers/net/wireless/ath/ath6kl/wmi.c 	skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
skb              2186 drivers/net/wireless/ath/ath6kl/wmi.c 	if (!skb)
skb              2189 drivers/net/wireless/ath/ath6kl/wmi.c 	cmd = (struct wmi_probed_ssid_cmd *) skb->data;
skb              2195 drivers/net/wireless/ath/ath6kl/wmi.c 	ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_PROBED_SSID_CMDID,
skb              2204 drivers/net/wireless/ath/ath6kl/wmi.c 	struct sk_buff *skb;
skb              2208 drivers/net/wireless/ath/ath6kl/wmi.c 	skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
skb              2209 drivers/net/wireless/ath/ath6kl/wmi.c 	if (!skb)
skb              2212 drivers/net/wireless/ath/ath6kl/wmi.c 	cmd = (struct wmi_listen_int_cmd *) skb->data;
skb              2216 drivers/net/wireless/ath/ath6kl/wmi.c 	ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_LISTEN_INT_CMDID,
skb              2224 drivers/net/wireless/ath/ath6kl/wmi.c 	struct sk_buff *skb;
skb              2228 drivers/net/wireless/ath/ath6kl/wmi.c 	skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
skb              2229 drivers/net/wireless/ath/ath6kl/wmi.c 	if (!skb)
skb              2232 drivers/net/wireless/ath/ath6kl/wmi.c 	cmd = (struct wmi_bmiss_time_cmd *) skb->data;
skb              2236 drivers/net/wireless/ath/ath6kl/wmi.c 	ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_BMISS_TIME_CMDID,
skb              2243 drivers/net/wireless/ath/ath6kl/wmi.c 	struct sk_buff *skb;
skb              2247 drivers/net/wireless/ath/ath6kl/wmi.c 	skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
skb              2248 drivers/net/wireless/ath/ath6kl/wmi.c 	if (!skb)
skb              2251 drivers/net/wireless/ath/ath6kl/wmi.c 	cmd = (struct wmi_power_mode_cmd *) skb->data;
skb              2255 drivers/net/wireless/ath/ath6kl/wmi.c 	ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_POWER_MODE_CMDID,
skb              2265 drivers/net/wireless/ath/ath6kl/wmi.c 	struct sk_buff *skb;
skb              2269 drivers/net/wireless/ath/ath6kl/wmi.c 	skb = ath6kl_wmi_get_new_buf(sizeof(*pm));
skb              2270 drivers/net/wireless/ath/ath6kl/wmi.c 	if (!skb)
skb              2273 drivers/net/wireless/ath/ath6kl/wmi.c 	pm = (struct wmi_power_params_cmd *)skb->data;
skb              2281 drivers/net/wireless/ath/ath6kl/wmi.c 	ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_POWER_PARAMS_CMDID,
skb              2288 drivers/net/wireless/ath/ath6kl/wmi.c 	struct sk_buff *skb;
skb              2292 drivers/net/wireless/ath/ath6kl/wmi.c 	skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
skb              2293 drivers/net/wireless/ath/ath6kl/wmi.c 	if (!skb)
skb              2296 drivers/net/wireless/ath/ath6kl/wmi.c 	cmd = (struct wmi_disc_timeout_cmd *) skb->data;
skb              2299 drivers/net/wireless/ath/ath6kl/wmi.c 	ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_DISC_TIMEOUT_CMDID,
skb              2316 drivers/net/wireless/ath/ath6kl/wmi.c 	struct sk_buff *skb;
skb              2331 drivers/net/wireless/ath/ath6kl/wmi.c 	skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
skb              2332 drivers/net/wireless/ath/ath6kl/wmi.c 	if (!skb)
skb              2335 drivers/net/wireless/ath/ath6kl/wmi.c 	cmd = (struct wmi_add_cipher_key_cmd *) skb->data;
skb              2350 drivers/net/wireless/ath/ath6kl/wmi.c 	ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_ADD_CIPHER_KEY_CMDID,
skb              2358 drivers/net/wireless/ath/ath6kl/wmi.c 	struct sk_buff *skb;
skb              2362 drivers/net/wireless/ath/ath6kl/wmi.c 	skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
skb              2363 drivers/net/wireless/ath/ath6kl/wmi.c 	if (!skb)
skb              2366 drivers/net/wireless/ath/ath6kl/wmi.c 	cmd = (struct wmi_add_krk_cmd *) skb->data;
skb              2369 drivers/net/wireless/ath/ath6kl/wmi.c 	ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_ADD_KRK_CMDID,
skb              2377 drivers/net/wireless/ath/ath6kl/wmi.c 	struct sk_buff *skb;
skb              2384 drivers/net/wireless/ath/ath6kl/wmi.c 	skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
skb              2385 drivers/net/wireless/ath/ath6kl/wmi.c 	if (!skb)
skb              2388 drivers/net/wireless/ath/ath6kl/wmi.c 	cmd = (struct wmi_delete_cipher_key_cmd *) skb->data;
skb              2391 drivers/net/wireless/ath/ath6kl/wmi.c 	ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_DELETE_CIPHER_KEY_CMDID,
skb              2400 drivers/net/wireless/ath/ath6kl/wmi.c 	struct sk_buff *skb;
skb              2410 drivers/net/wireless/ath/ath6kl/wmi.c 	skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
skb              2411 drivers/net/wireless/ath/ath6kl/wmi.c 	if (!skb)
skb              2414 drivers/net/wireless/ath/ath6kl/wmi.c 	cmd = (struct wmi_setpmkid_cmd *) skb->data;
skb              2424 drivers/net/wireless/ath/ath6kl/wmi.c 	ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_PMKID_CMDID,
skb              2430 drivers/net/wireless/ath/ath6kl/wmi.c static int ath6kl_wmi_data_sync_send(struct wmi *wmi, struct sk_buff *skb,
skb              2436 drivers/net/wireless/ath/ath6kl/wmi.c 	if (WARN_ON(skb == NULL || ep_id == wmi->ep_id)) {
skb              2437 drivers/net/wireless/ath/ath6kl/wmi.c 		dev_kfree_skb(skb);
skb              2441 drivers/net/wireless/ath/ath6kl/wmi.c 	skb_push(skb, sizeof(struct wmi_data_hdr));
skb              2443 drivers/net/wireless/ath/ath6kl/wmi.c 	data_hdr = (struct wmi_data_hdr *) skb->data;
skb              2447 drivers/net/wireless/ath/ath6kl/wmi.c 	ret = ath6kl_control_tx(wmi->parent_dev, skb, ep_id);
skb              2454 drivers/net/wireless/ath/ath6kl/wmi.c 	struct sk_buff *skb;
skb              2475 drivers/net/wireless/ath/ath6kl/wmi.c 	skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
skb              2476 drivers/net/wireless/ath/ath6kl/wmi.c 	if (!skb)
skb              2479 drivers/net/wireless/ath/ath6kl/wmi.c 	cmd = (struct wmi_sync_cmd *) skb->data;
skb              2488 drivers/net/wireless/ath/ath6kl/wmi.c 		data_sync_bufs[index].skb = ath6kl_buf_alloc(0);
skb              2489 drivers/net/wireless/ath/ath6kl/wmi.c 		if (data_sync_bufs[index].skb == NULL) {
skb              2506 drivers/net/wireless/ath/ath6kl/wmi.c 	ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SYNCHRONIZE_CMDID,
skb              2513 drivers/net/wireless/ath/ath6kl/wmi.c 		if (WARN_ON(!data_sync_bufs[index].skb))
skb              2520 drivers/net/wireless/ath/ath6kl/wmi.c 		    ath6kl_wmi_data_sync_send(wmi, data_sync_bufs[index].skb,
skb              2523 drivers/net/wireless/ath/ath6kl/wmi.c 		data_sync_bufs[index].skb = NULL;
skb              2533 drivers/net/wireless/ath/ath6kl/wmi.c 	dev_kfree_skb(skb);
skb              2537 drivers/net/wireless/ath/ath6kl/wmi.c 		dev_kfree_skb((struct sk_buff *)data_sync_bufs[index].skb);
skb              2545 drivers/net/wireless/ath/ath6kl/wmi.c 	struct sk_buff *skb;
skb              2588 drivers/net/wireless/ath/ath6kl/wmi.c 	skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
skb              2589 drivers/net/wireless/ath/ath6kl/wmi.c 	if (!skb)
skb              2596 drivers/net/wireless/ath/ath6kl/wmi.c 	cmd = (struct wmi_create_pstream_cmd *) skb->data;
skb              2630 drivers/net/wireless/ath/ath6kl/wmi.c 	ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_CREATE_PSTREAM_CMDID,
skb              2638 drivers/net/wireless/ath/ath6kl/wmi.c 	struct sk_buff *skb;
skb              2648 drivers/net/wireless/ath/ath6kl/wmi.c 	skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
skb              2649 drivers/net/wireless/ath/ath6kl/wmi.c 	if (!skb)
skb              2652 drivers/net/wireless/ath/ath6kl/wmi.c 	cmd = (struct wmi_delete_pstream_cmd *) skb->data;
skb              2661 drivers/net/wireless/ath/ath6kl/wmi.c 		dev_kfree_skb(skb);
skb              2672 drivers/net/wireless/ath/ath6kl/wmi.c 	ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_DELETE_PSTREAM_CMDID,
skb              2696 drivers/net/wireless/ath/ath6kl/wmi.c 	struct sk_buff *skb;
skb              2705 drivers/net/wireless/ath/ath6kl/wmi.c 	skb = ath6kl_wmi_get_new_buf(sizeof(struct wmi_set_ip_cmd));
skb              2706 drivers/net/wireless/ath/ath6kl/wmi.c 	if (!skb)
skb              2709 drivers/net/wireless/ath/ath6kl/wmi.c 	cmd = (struct wmi_set_ip_cmd *) skb->data;
skb              2713 drivers/net/wireless/ath/ath6kl/wmi.c 	ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_IP_CMDID,
skb              2769 drivers/net/wireless/ath/ath6kl/wmi.c 	struct sk_buff *skb;
skb              2796 drivers/net/wireless/ath/ath6kl/wmi.c 	skb = ath6kl_wmi_get_new_buf(sizeof(*cmd) * WMI_RATES_MODE_MAX);
skb              2797 drivers/net/wireless/ath/ath6kl/wmi.c 	if (!skb)
skb              2800 drivers/net/wireless/ath/ath6kl/wmi.c 	cmd = (struct wmi_set_tx_select_rates64_cmd *) skb->data;
skb              2812 drivers/net/wireless/ath/ath6kl/wmi.c 	ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb,
skb              2821 drivers/net/wireless/ath/ath6kl/wmi.c 	struct sk_buff *skb;
skb              2846 drivers/net/wireless/ath/ath6kl/wmi.c 	skb = ath6kl_wmi_get_new_buf(sizeof(*cmd) * WMI_RATES_MODE_MAX);
skb              2847 drivers/net/wireless/ath/ath6kl/wmi.c 	if (!skb)
skb              2850 drivers/net/wireless/ath/ath6kl/wmi.c 	cmd = (struct wmi_set_tx_select_rates32_cmd *) skb->data;
skb              2862 drivers/net/wireless/ath/ath6kl/wmi.c 	ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb,
skb              2883 drivers/net/wireless/ath/ath6kl/wmi.c 	struct sk_buff *skb;
skb              2893 drivers/net/wireless/ath/ath6kl/wmi.c 	skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
skb              2894 drivers/net/wireless/ath/ath6kl/wmi.c 	if (!skb)
skb              2897 drivers/net/wireless/ath/ath6kl/wmi.c 	cmd = (struct wmi_set_host_sleep_mode_cmd *) skb->data;
skb              2906 drivers/net/wireless/ath/ath6kl/wmi.c 	ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb,
skb              2928 drivers/net/wireless/ath/ath6kl/wmi.c 	struct sk_buff *skb;
skb              2938 drivers/net/wireless/ath/ath6kl/wmi.c 	skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
skb              2939 drivers/net/wireless/ath/ath6kl/wmi.c 	if (!skb)
skb              2942 drivers/net/wireless/ath/ath6kl/wmi.c 	cmd = (struct wmi_set_wow_mode_cmd *) skb->data;
skb              2947 drivers/net/wireless/ath/ath6kl/wmi.c 	ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_WOW_MODE_CMDID,
skb              2957 drivers/net/wireless/ath/ath6kl/wmi.c 	struct sk_buff *skb;
skb              2969 drivers/net/wireless/ath/ath6kl/wmi.c 	skb = ath6kl_wmi_get_new_buf(size);
skb              2970 drivers/net/wireless/ath/ath6kl/wmi.c 	if (!skb)
skb              2973 drivers/net/wireless/ath/ath6kl/wmi.c 	cmd = (struct wmi_add_wow_pattern_cmd *) skb->data;
skb              2983 drivers/net/wireless/ath/ath6kl/wmi.c 	ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_ADD_WOW_PATTERN_CMDID,
skb              2992 drivers/net/wireless/ath/ath6kl/wmi.c 	struct sk_buff *skb;
skb              2996 drivers/net/wireless/ath/ath6kl/wmi.c 	skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
skb              2997 drivers/net/wireless/ath/ath6kl/wmi.c 	if (!skb)
skb              3000 drivers/net/wireless/ath/ath6kl/wmi.c 	cmd = (struct wmi_del_wow_pattern_cmd *) skb->data;
skb              3004 drivers/net/wireless/ath/ath6kl/wmi.c 	ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_DEL_WOW_PATTERN_CMDID,
skb              3009 drivers/net/wireless/ath/ath6kl/wmi.c static int ath6kl_wmi_cmd_send_xtnd(struct wmi *wmi, struct sk_buff *skb,
skb              3016 drivers/net/wireless/ath/ath6kl/wmi.c 	skb_push(skb, sizeof(struct wmix_cmd_hdr));
skb              3018 drivers/net/wireless/ath/ath6kl/wmi.c 	cmd_hdr = (struct wmix_cmd_hdr *) skb->data;
skb              3021 drivers/net/wireless/ath/ath6kl/wmi.c 	ret = ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_EXTENSION_CMDID, sync_flag);
skb              3028 drivers/net/wireless/ath/ath6kl/wmi.c 	struct sk_buff *skb;
skb              3032 drivers/net/wireless/ath/ath6kl/wmi.c 	skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
skb              3033 drivers/net/wireless/ath/ath6kl/wmi.c 	if (!skb)
skb              3036 drivers/net/wireless/ath/ath6kl/wmi.c 	cmd = (struct wmix_hb_challenge_resp_cmd *) skb->data;
skb              3040 drivers/net/wireless/ath/ath6kl/wmi.c 	ret = ath6kl_wmi_cmd_send_xtnd(wmi, skb, WMIX_HB_CHALLENGE_RESP_CMDID,
skb              3048 drivers/net/wireless/ath/ath6kl/wmi.c 	struct sk_buff *skb;
skb              3051 drivers/net/wireless/ath/ath6kl/wmi.c 	skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
skb              3052 drivers/net/wireless/ath/ath6kl/wmi.c 	if (!skb)
skb              3055 drivers/net/wireless/ath/ath6kl/wmi.c 	cmd = (struct ath6kl_wmix_dbglog_cfg_module_cmd *) skb->data;
skb              3059 drivers/net/wireless/ath/ath6kl/wmi.c 	ret = ath6kl_wmi_cmd_send_xtnd(wmi, skb, WMIX_DBGLOG_CFG_MODULE_CMDID,
skb              3071 drivers/net/wireless/ath/ath6kl/wmi.c 	struct sk_buff *skb;
skb              3075 drivers/net/wireless/ath/ath6kl/wmi.c 	skb = ath6kl_wmi_get_new_buf(sizeof(struct wmi_set_tx_pwr_cmd));
skb              3076 drivers/net/wireless/ath/ath6kl/wmi.c 	if (!skb)
skb              3079 drivers/net/wireless/ath/ath6kl/wmi.c 	cmd = (struct wmi_set_tx_pwr_cmd *) skb->data;
skb              3082 drivers/net/wireless/ath/ath6kl/wmi.c 	ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_TX_PWR_CMDID,
skb              3101 drivers/net/wireless/ath/ath6kl/wmi.c 	struct sk_buff *skb;
skb              3105 drivers/net/wireless/ath/ath6kl/wmi.c 	skb = ath6kl_wmi_get_new_buf(sizeof(struct wmi_set_lpreamble_cmd));
skb              3106 drivers/net/wireless/ath/ath6kl/wmi.c 	if (!skb)
skb              3109 drivers/net/wireless/ath/ath6kl/wmi.c 	cmd = (struct wmi_set_lpreamble_cmd *) skb->data;
skb              3113 drivers/net/wireless/ath/ath6kl/wmi.c 	ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_LPREAMBLE_CMDID,
skb              3120 drivers/net/wireless/ath/ath6kl/wmi.c 	struct sk_buff *skb;
skb              3124 drivers/net/wireless/ath/ath6kl/wmi.c 	skb = ath6kl_wmi_get_new_buf(sizeof(struct wmi_set_rts_cmd));
skb              3125 drivers/net/wireless/ath/ath6kl/wmi.c 	if (!skb)
skb              3128 drivers/net/wireless/ath/ath6kl/wmi.c 	cmd = (struct wmi_set_rts_cmd *) skb->data;
skb              3131 drivers/net/wireless/ath/ath6kl/wmi.c 	ret = ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_SET_RTS_CMDID,
skb              3138 drivers/net/wireless/ath/ath6kl/wmi.c 	struct sk_buff *skb;
skb              3145 drivers/net/wireless/ath/ath6kl/wmi.c 	skb = ath6kl_wmi_get_new_buf(sizeof(struct wmi_set_wmm_txop_cmd));
skb              3146 drivers/net/wireless/ath/ath6kl/wmi.c 	if (!skb)
skb              3149 drivers/net/wireless/ath/ath6kl/wmi.c 	cmd = (struct wmi_set_wmm_txop_cmd *) skb->data;
skb              3152 drivers/net/wireless/ath/ath6kl/wmi.c 	ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_WMM_TXOP_CMDID,
skb              3160 drivers/net/wireless/ath/ath6kl/wmi.c 	struct sk_buff *skb;
skb              3164 drivers/net/wireless/ath/ath6kl/wmi.c 	skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
skb              3165 drivers/net/wireless/ath/ath6kl/wmi.c 	if (!skb)
skb              3168 drivers/net/wireless/ath/ath6kl/wmi.c 	cmd = (struct wmi_set_keepalive_cmd *) skb->data;
skb              3171 drivers/net/wireless/ath/ath6kl/wmi.c 	ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_KEEPALIVE_CMDID,
skb              3184 drivers/net/wireless/ath/ath6kl/wmi.c 	struct sk_buff *skb;
skb              3187 drivers/net/wireless/ath/ath6kl/wmi.c 	skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
skb              3188 drivers/net/wireless/ath/ath6kl/wmi.c 	if (!skb)
skb              3191 drivers/net/wireless/ath/ath6kl/wmi.c 	cmd = (struct wmi_set_htcap_cmd *) skb->data;
skb              3213 drivers/net/wireless/ath/ath6kl/wmi.c 	return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_HT_CAP_CMDID,
skb              3219 drivers/net/wireless/ath/ath6kl/wmi.c 	struct sk_buff *skb;
skb              3222 drivers/net/wireless/ath/ath6kl/wmi.c 	skb = ath6kl_wmi_get_new_buf(len);
skb              3223 drivers/net/wireless/ath/ath6kl/wmi.c 	if (!skb)
skb              3226 drivers/net/wireless/ath/ath6kl/wmi.c 	memcpy(skb->data, buf, len);
skb              3228 drivers/net/wireless/ath/ath6kl/wmi.c 	ret = ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_TEST_CMDID, NO_SYNC_WMIFLAG);
skb              3235 drivers/net/wireless/ath/ath6kl/wmi.c 	struct sk_buff *skb;
skb              3239 drivers/net/wireless/ath/ath6kl/wmi.c 	skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
skb              3240 drivers/net/wireless/ath/ath6kl/wmi.c 	if (!skb)
skb              3243 drivers/net/wireless/ath/ath6kl/wmi.c 	cmd = (struct wmi_mcast_filter_cmd *) skb->data;
skb              3246 drivers/net/wireless/ath/ath6kl/wmi.c 	ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_MCAST_FILTER_CMDID,
skb              3254 drivers/net/wireless/ath/ath6kl/wmi.c 	struct sk_buff *skb;
skb              3265 drivers/net/wireless/ath/ath6kl/wmi.c 	skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
skb              3266 drivers/net/wireless/ath/ath6kl/wmi.c 	if (!skb)
skb              3269 drivers/net/wireless/ath/ath6kl/wmi.c 	cmd = (struct wmi_mcast_filter_add_del_cmd *) skb->data;
skb              3271 drivers/net/wireless/ath/ath6kl/wmi.c 	ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb,
skb              3281 drivers/net/wireless/ath/ath6kl/wmi.c 	struct sk_buff *skb;
skb              3285 drivers/net/wireless/ath/ath6kl/wmi.c 	skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
skb              3286 drivers/net/wireless/ath/ath6kl/wmi.c 	if (!skb)
skb              3289 drivers/net/wireless/ath/ath6kl/wmi.c 	cmd = (struct wmi_sta_bmiss_enhance_cmd *) skb->data;
skb              3292 drivers/net/wireless/ath/ath6kl/wmi.c 	ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb,
skb              3300 drivers/net/wireless/ath/ath6kl/wmi.c 	struct sk_buff *skb;
skb              3303 drivers/net/wireless/ath/ath6kl/wmi.c 	skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
skb              3304 drivers/net/wireless/ath/ath6kl/wmi.c 	if (!skb)
skb              3307 drivers/net/wireless/ath/ath6kl/wmi.c 	cmd = (struct wmi_set_regdomain_cmd *) skb->data;
skb              3310 drivers/net/wireless/ath/ath6kl/wmi.c 	return ath6kl_wmi_cmd_send(wmi, 0, skb,
skb              3391 drivers/net/wireless/ath/ath6kl/wmi.c 	struct sk_buff *skb;
skb              3395 drivers/net/wireless/ath/ath6kl/wmi.c 	skb = ath6kl_wmi_get_new_buf(sizeof(*cm));
skb              3396 drivers/net/wireless/ath/ath6kl/wmi.c 	if (!skb)
skb              3399 drivers/net/wireless/ath/ath6kl/wmi.c 	cm = (struct wmi_connect_cmd *) skb->data;
skb              3402 drivers/net/wireless/ath/ath6kl/wmi.c 	res = ath6kl_wmi_cmd_send(wmip, if_idx, skb, WMI_AP_CONFIG_COMMIT_CMDID,
skb              3414 drivers/net/wireless/ath/ath6kl/wmi.c 	struct sk_buff *skb;
skb              3417 drivers/net/wireless/ath/ath6kl/wmi.c 	skb = ath6kl_wmi_get_new_buf(sizeof(*cm));
skb              3418 drivers/net/wireless/ath/ath6kl/wmi.c 	if (!skb)
skb              3421 drivers/net/wireless/ath/ath6kl/wmi.c 	cm = (struct wmi_ap_set_mlme_cmd *) skb->data;
skb              3429 drivers/net/wireless/ath/ath6kl/wmi.c 	return ath6kl_wmi_cmd_send(wmip, if_idx, skb, WMI_AP_SET_MLME_CMDID,
skb              3435 drivers/net/wireless/ath/ath6kl/wmi.c 	struct sk_buff *skb;
skb              3438 drivers/net/wireless/ath/ath6kl/wmi.c 	skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
skb              3439 drivers/net/wireless/ath/ath6kl/wmi.c 	if (!skb)
skb              3442 drivers/net/wireless/ath/ath6kl/wmi.c 	cmd = (struct wmi_ap_hidden_ssid_cmd *) skb->data;
skb              3445 drivers/net/wireless/ath/ath6kl/wmi.c 	return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_AP_HIDDEN_SSID_CMDID,
skb              3453 drivers/net/wireless/ath/ath6kl/wmi.c 	struct sk_buff *skb;
skb              3455 drivers/net/wireless/ath/ath6kl/wmi.c 	skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
skb              3456 drivers/net/wireless/ath/ath6kl/wmi.c 	if (!skb)
skb              3459 drivers/net/wireless/ath/ath6kl/wmi.c 	cmd = (struct wmi_ap_set_apsd_cmd *)skb->data;
skb              3462 drivers/net/wireless/ath/ath6kl/wmi.c 	return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_AP_SET_APSD_CMDID,
skb              3470 drivers/net/wireless/ath/ath6kl/wmi.c 	struct sk_buff *skb;
skb              3472 drivers/net/wireless/ath/ath6kl/wmi.c 	skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
skb              3473 drivers/net/wireless/ath/ath6kl/wmi.c 	if (!skb)
skb              3476 drivers/net/wireless/ath/ath6kl/wmi.c 	cmd = (struct wmi_ap_apsd_buffered_traffic_cmd *)skb->data;
skb              3481 drivers/net/wireless/ath/ath6kl/wmi.c 	return ath6kl_wmi_cmd_send(wmi, if_idx, skb,
skb              3512 drivers/net/wireless/ath/ath6kl/wmi.c 	struct sk_buff *skb;
skb              3516 drivers/net/wireless/ath/ath6kl/wmi.c 	skb = ath6kl_wmi_get_new_buf(sizeof(struct wmi_ap_set_pvb_cmd));
skb              3517 drivers/net/wireless/ath/ath6kl/wmi.c 	if (!skb)
skb              3520 drivers/net/wireless/ath/ath6kl/wmi.c 	cmd = (struct wmi_ap_set_pvb_cmd *) skb->data;
skb              3525 drivers/net/wireless/ath/ath6kl/wmi.c 	ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_AP_SET_PVB_CMDID,
skb              3535 drivers/net/wireless/ath/ath6kl/wmi.c 	struct sk_buff *skb;
skb              3539 drivers/net/wireless/ath/ath6kl/wmi.c 	skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
skb              3540 drivers/net/wireless/ath/ath6kl/wmi.c 	if (!skb)
skb              3543 drivers/net/wireless/ath/ath6kl/wmi.c 	cmd = (struct wmi_rx_frame_format_cmd *) skb->data;
skb              3549 drivers/net/wireless/ath/ath6kl/wmi.c 	ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_RX_FRAME_FORMAT_CMDID,
skb              3558 drivers/net/wireless/ath/ath6kl/wmi.c 	struct sk_buff *skb;
skb              3561 drivers/net/wireless/ath/ath6kl/wmi.c 	skb = ath6kl_wmi_get_new_buf(sizeof(*p) + ie_len);
skb              3562 drivers/net/wireless/ath/ath6kl/wmi.c 	if (!skb)
skb              3568 drivers/net/wireless/ath/ath6kl/wmi.c 	p = (struct wmi_set_appie_cmd *) skb->data;
skb              3575 drivers/net/wireless/ath/ath6kl/wmi.c 	return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_APPIE_CMDID,
skb              3582 drivers/net/wireless/ath/ath6kl/wmi.c 	struct sk_buff *skb;
skb              3585 drivers/net/wireless/ath/ath6kl/wmi.c 	skb = ath6kl_wmi_get_new_buf(sizeof(*p) + ie_len);
skb              3586 drivers/net/wireless/ath/ath6kl/wmi.c 	if (!skb)
skb              3591 drivers/net/wireless/ath/ath6kl/wmi.c 	p = (struct wmi_set_ie_cmd *) skb->data;
skb              3598 drivers/net/wireless/ath/ath6kl/wmi.c 	return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_IE_CMDID,
skb              3604 drivers/net/wireless/ath/ath6kl/wmi.c 	struct sk_buff *skb;
skb              3607 drivers/net/wireless/ath/ath6kl/wmi.c 	skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
skb              3608 drivers/net/wireless/ath/ath6kl/wmi.c 	if (!skb)
skb              3613 drivers/net/wireless/ath/ath6kl/wmi.c 	cmd = (struct wmi_disable_11b_rates_cmd *) skb->data;
skb              3616 drivers/net/wireless/ath/ath6kl/wmi.c 	return ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_DISABLE_11B_RATES_CMDID,
skb              3622 drivers/net/wireless/ath/ath6kl/wmi.c 	struct sk_buff *skb;
skb              3625 drivers/net/wireless/ath/ath6kl/wmi.c 	skb = ath6kl_wmi_get_new_buf(sizeof(*p));
skb              3626 drivers/net/wireless/ath/ath6kl/wmi.c 	if (!skb)
skb              3631 drivers/net/wireless/ath/ath6kl/wmi.c 	p = (struct wmi_remain_on_chnl_cmd *) skb->data;
skb              3634 drivers/net/wireless/ath/ath6kl/wmi.c 	return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_REMAIN_ON_CHNL_CMDID,
skb              3646 drivers/net/wireless/ath/ath6kl/wmi.c 	struct sk_buff *skb;
skb              3657 drivers/net/wireless/ath/ath6kl/wmi.c 	skb = ath6kl_wmi_get_new_buf(sizeof(*p) + data_len);
skb              3658 drivers/net/wireless/ath/ath6kl/wmi.c 	if (!skb) {
skb              3671 drivers/net/wireless/ath/ath6kl/wmi.c 	p = (struct wmi_send_action_cmd *) skb->data;
skb              3677 drivers/net/wireless/ath/ath6kl/wmi.c 	return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SEND_ACTION_CMDID,
skb              3685 drivers/net/wireless/ath/ath6kl/wmi.c 	struct sk_buff *skb;
skb              3696 drivers/net/wireless/ath/ath6kl/wmi.c 	skb = ath6kl_wmi_get_new_buf(sizeof(*p) + data_len);
skb              3697 drivers/net/wireless/ath/ath6kl/wmi.c 	if (!skb) {
skb              3710 drivers/net/wireless/ath/ath6kl/wmi.c 	p = (struct wmi_send_mgmt_cmd *) skb->data;
skb              3717 drivers/net/wireless/ath/ath6kl/wmi.c 	return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SEND_MGMT_CMDID,
skb              3751 drivers/net/wireless/ath/ath6kl/wmi.c 	struct sk_buff *skb;
skb              3758 drivers/net/wireless/ath/ath6kl/wmi.c 	skb = ath6kl_wmi_get_new_buf(cmd_len);
skb              3759 drivers/net/wireless/ath/ath6kl/wmi.c 	if (!skb)
skb              3765 drivers/net/wireless/ath/ath6kl/wmi.c 	p = (struct wmi_p2p_probe_response_cmd *) skb->data;
skb              3770 drivers/net/wireless/ath/ath6kl/wmi.c 	return ath6kl_wmi_cmd_send(wmi, if_idx, skb,
skb              3777 drivers/net/wireless/ath/ath6kl/wmi.c 	struct sk_buff *skb;
skb              3780 drivers/net/wireless/ath/ath6kl/wmi.c 	skb = ath6kl_wmi_get_new_buf(sizeof(*p));
skb              3781 drivers/net/wireless/ath/ath6kl/wmi.c 	if (!skb)
skb              3786 drivers/net/wireless/ath/ath6kl/wmi.c 	p = (struct wmi_probe_req_report_cmd *) skb->data;
skb              3788 drivers/net/wireless/ath/ath6kl/wmi.c 	return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_PROBE_REQ_REPORT_CMDID,
skb              3794 drivers/net/wireless/ath/ath6kl/wmi.c 	struct sk_buff *skb;
skb              3797 drivers/net/wireless/ath/ath6kl/wmi.c 	skb = ath6kl_wmi_get_new_buf(sizeof(*p));
skb              3798 drivers/net/wireless/ath/ath6kl/wmi.c 	if (!skb)
skb              3803 drivers/net/wireless/ath/ath6kl/wmi.c 	p = (struct wmi_get_p2p_info *) skb->data;
skb              3805 drivers/net/wireless/ath/ath6kl/wmi.c 	return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_GET_P2P_INFO_CMDID,
skb              3818 drivers/net/wireless/ath/ath6kl/wmi.c 	struct sk_buff *skb;
skb              3821 drivers/net/wireless/ath/ath6kl/wmi.c 	skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
skb              3822 drivers/net/wireless/ath/ath6kl/wmi.c 	if (!skb)
skb              3825 drivers/net/wireless/ath/ath6kl/wmi.c 	cmd = (struct wmi_set_inact_period_cmd *) skb->data;
skb              3829 drivers/net/wireless/ath/ath6kl/wmi.c 	return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_AP_CONN_INACT_CMDID,
skb              3846 drivers/net/wireless/ath/ath6kl/wmi.c static int ath6kl_wmi_control_rx_xtnd(struct wmi *wmi, struct sk_buff *skb)
skb              3854 drivers/net/wireless/ath/ath6kl/wmi.c 	if (skb->len < sizeof(struct wmix_cmd_hdr)) {
skb              3859 drivers/net/wireless/ath/ath6kl/wmi.c 	cmd = (struct wmix_cmd_hdr *) skb->data;
skb              3862 drivers/net/wireless/ath/ath6kl/wmi.c 	skb_pull(skb, sizeof(struct wmix_cmd_hdr));
skb              3864 drivers/net/wireless/ath/ath6kl/wmi.c 	datap = skb->data;
skb              3865 drivers/net/wireless/ath/ath6kl/wmi.c 	len = skb->len;
skb              3974 drivers/net/wireless/ath/ath6kl/wmi.c static int ath6kl_wmi_proc_events(struct wmi *wmi, struct sk_buff *skb)
skb              3983 drivers/net/wireless/ath/ath6kl/wmi.c 	cmd = (struct wmi_cmd_hdr *) skb->data;
skb              3987 drivers/net/wireless/ath/ath6kl/wmi.c 	skb_pull(skb, sizeof(struct wmi_cmd_hdr));
skb              3988 drivers/net/wireless/ath/ath6kl/wmi.c 	datap = skb->data;
skb              3989 drivers/net/wireless/ath/ath6kl/wmi.c 	len = skb->len;
skb              4045 drivers/net/wireless/ath/ath6kl/wmi.c 		ret = ath6kl_wmi_control_rx_xtnd(wmi, skb);
skb              4118 drivers/net/wireless/ath/ath6kl/wmi.c 	dev_kfree_skb(skb);
skb              4123 drivers/net/wireless/ath/ath6kl/wmi.c int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
skb              4125 drivers/net/wireless/ath/ath6kl/wmi.c 	if (WARN_ON(skb == NULL))
skb              4128 drivers/net/wireless/ath/ath6kl/wmi.c 	if (skb->len < sizeof(struct wmi_cmd_hdr)) {
skb              4130 drivers/net/wireless/ath/ath6kl/wmi.c 		dev_kfree_skb(skb);
skb              4134 drivers/net/wireless/ath/ath6kl/wmi.c 	trace_ath6kl_wmi_event(skb->data, skb->len);
skb              4136 drivers/net/wireless/ath/ath6kl/wmi.c 	return ath6kl_wmi_proc_events(wmi, skb);
skb                99 drivers/net/wireless/ath/ath6kl/wmi.h 	struct sk_buff *skb;
skb              2538 drivers/net/wireless/ath/ath6kl/wmi.h int ath6kl_wmi_dix_2_dot3(struct wmi *wmi, struct sk_buff *skb);
skb              2539 drivers/net/wireless/ath/ath6kl/wmi.h int ath6kl_wmi_data_hdr_add(struct wmi *wmi, struct sk_buff *skb,
skb              2544 drivers/net/wireless/ath/ath6kl/wmi.h int ath6kl_wmi_dot11_hdr_remove(struct wmi *wmi, struct sk_buff *skb);
skb              2545 drivers/net/wireless/ath/ath6kl/wmi.h int ath6kl_wmi_dot3_2_dix(struct sk_buff *skb);
skb              2547 drivers/net/wireless/ath/ath6kl/wmi.h 				       struct sk_buff *skb, u32 layer2_priority,
skb              2550 drivers/net/wireless/ath/ath6kl/wmi.h int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb);
skb              2552 drivers/net/wireless/ath/ath6kl/wmi.h int ath6kl_wmi_cmd_send(struct wmi *wmi, u8 if_idx, struct sk_buff *skb,
skb               475 drivers/net/wireless/ath/ath9k/ath9k.h 			  struct sk_buff *skb);
skb               544 drivers/net/wireless/ath/ath9k/ath9k.h 					struct sk_buff *skb)
skb               595 drivers/net/wireless/ath/ath9k/ath9k.h void ath_assign_seq(struct ath_common *common, struct sk_buff *skb);
skb               596 drivers/net/wireless/ath/ath9k/ath9k.h int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
skb               599 drivers/net/wireless/ath/ath9k/ath9k.h 		 struct sk_buff *skb);
skb              1085 drivers/net/wireless/ath/ath9k/ath9k.h int ath9k_tx99_send(struct ath_softc *sc, struct sk_buff *skb,
skb              1092 drivers/net/wireless/ath/ath9k/ath9k.h 				  struct sk_buff *skb,
skb                75 drivers/net/wireless/ath/ath9k/beacon.c 	struct sk_buff *skb = bf->bf_mpdu;
skb                89 drivers/net/wireless/ath/ath9k/beacon.c 	info.pkt_len = skb->len + FCS_LEN;
skb                98 drivers/net/wireless/ath/ath9k/beacon.c 	info.buf_len[0] = roundup(skb->len, 4);
skb               119 drivers/net/wireless/ath/ath9k/beacon.c 	struct sk_buff *skb;
skb               129 drivers/net/wireless/ath/ath9k/beacon.c 	skb = bf->bf_mpdu;
skb               130 drivers/net/wireless/ath/ath9k/beacon.c 	if (skb) {
skb               132 drivers/net/wireless/ath/ath9k/beacon.c 				 skb->len, DMA_TO_DEVICE);
skb               133 drivers/net/wireless/ath/ath9k/beacon.c 		dev_kfree_skb_any(skb);
skb               138 drivers/net/wireless/ath/ath9k/beacon.c 	skb = ieee80211_beacon_get(hw, vif);
skb               139 drivers/net/wireless/ath/ath9k/beacon.c 	if (skb == NULL)
skb               142 drivers/net/wireless/ath/ath9k/beacon.c 	bf->bf_mpdu = skb;
skb               144 drivers/net/wireless/ath/ath9k/beacon.c 	mgmt_hdr = (struct ieee80211_mgmt *)skb->data;
skb               147 drivers/net/wireless/ath/ath9k/beacon.c 	info = IEEE80211_SKB_CB(skb);
skb               149 drivers/net/wireless/ath/ath9k/beacon.c 	ath_assign_seq(common, skb);
skb               153 drivers/net/wireless/ath/ath9k/beacon.c 		ath9k_beacon_add_noa(sc, avp, skb);
skb               155 drivers/net/wireless/ath/ath9k/beacon.c 	bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
skb               156 drivers/net/wireless/ath/ath9k/beacon.c 					 skb->len, DMA_TO_DEVICE);
skb               158 drivers/net/wireless/ath/ath9k/beacon.c 		dev_kfree_skb_any(skb);
skb               165 drivers/net/wireless/ath/ath9k/beacon.c 	skb = ieee80211_get_buffered_bc(hw, vif);
skb               179 drivers/net/wireless/ath/ath9k/beacon.c 	if (skb && cabq_depth) {
skb               189 drivers/net/wireless/ath/ath9k/beacon.c 	if (skb)
skb               190 drivers/net/wireless/ath/ath9k/beacon.c 		ath_tx_cabq(hw, vif, skb);
skb               229 drivers/net/wireless/ath/ath9k/beacon.c 		struct sk_buff *skb = bf->bf_mpdu;
skb               231 drivers/net/wireless/ath/ath9k/beacon.c 				 skb->len, DMA_TO_DEVICE);
skb               232 drivers/net/wireless/ath/ath9k/beacon.c 		dev_kfree_skb_any(skb);
skb               995 drivers/net/wireless/ath/ath9k/channel.c 	struct sk_buff *skb;
skb               999 drivers/net/wireless/ath/ath9k/channel.c 	skb = ieee80211_probereq_get(sc->hw, vif->addr,
skb              1001 drivers/net/wireless/ath/ath9k/channel.c 	if (!skb)
skb              1004 drivers/net/wireless/ath/ath9k/channel.c 	info = IEEE80211_SKB_CB(skb);
skb              1009 drivers/net/wireless/ath/ath9k/channel.c 		skb_put_data(skb, req->ie, req->ie_len);
skb              1011 drivers/net/wireless/ath/ath9k/channel.c 	skb_set_queue_mapping(skb, IEEE80211_AC_VO);
skb              1013 drivers/net/wireless/ath/ath9k/channel.c 	if (!ieee80211_tx_prepare_skb(sc->hw, vif, skb, band, NULL))
skb              1017 drivers/net/wireless/ath/ath9k/channel.c 	if (ath_tx_start(sc->hw, skb, &txctl))
skb              1023 drivers/net/wireless/ath/ath9k/channel.c 	ieee80211_free_txskb(sc->hw, skb);
skb              1108 drivers/net/wireless/ath/ath9k/channel.c 	struct sk_buff *skb;
skb              1116 drivers/net/wireless/ath/ath9k/channel.c 		skb = ieee80211_nullfunc_get(sc->hw, vif, false);
skb              1117 drivers/net/wireless/ath/ath9k/channel.c 		if (!skb)
skb              1120 drivers/net/wireless/ath/ath9k/channel.c 		nullfunc = (struct ieee80211_hdr_3addr *) skb->data;
skb              1125 drivers/net/wireless/ath/ath9k/channel.c 		skb->priority = 7;
skb              1126 drivers/net/wireless/ath/ath9k/channel.c 		skb_set_queue_mapping(skb, IEEE80211_AC_VO);
skb              1127 drivers/net/wireless/ath/ath9k/channel.c 		if (!ieee80211_tx_prepare_skb(sc->hw, vif, skb, band, &sta)) {
skb              1128 drivers/net/wireless/ath/ath9k/channel.c 			dev_kfree_skb_any(skb);
skb              1139 drivers/net/wireless/ath/ath9k/channel.c 	if (ath_tx_start(sc->hw, skb, &txctl)) {
skb              1140 drivers/net/wireless/ath/ath9k/channel.c 		ieee80211_free_txskb(sc->hw, skb);
skb              1501 drivers/net/wireless/ath/ath9k/channel.c 			  struct sk_buff *skb)
skb              1523 drivers/net/wireless/ath/ath9k/channel.c 	hdr = skb_put_data(skb, noa_ie_hdr, sizeof(noa_ie_hdr));
skb              1527 drivers/net/wireless/ath/ath9k/channel.c 	noa = skb_put_zero(skb, noa_len);
skb               119 drivers/net/wireless/ath/ath9k/common.c 				  struct sk_buff *skb,
skb               131 drivers/net/wireless/ath/ath9k/common.c 	hdr = (struct ieee80211_hdr *) skb->data;
skb               132 drivers/net/wireless/ath/ath9k/common.c 	hdrlen = ieee80211_get_hdrlen_from_skb(skb);
skb               145 drivers/net/wireless/ath/ath9k/common.c 	if (padsize && skb->len>=padpos+padsize+FCS_LEN) {
skb               146 drivers/net/wireless/ath/ath9k/common.c 		memmove(skb->data + padsize, skb->data, padpos);
skb               147 drivers/net/wireless/ath/ath9k/common.c 		skb_pull(skb, padsize);
skb               156 drivers/net/wireless/ath/ath9k/common.c 		   && !decrypt_error && skb->len >= hdrlen + 4) {
skb               157 drivers/net/wireless/ath/ath9k/common.c 		keyix = skb->data[hdrlen + 3] >> 6;
skb               275 drivers/net/wireless/ath/ath9k/common.c int ath9k_cmn_get_hw_crypto_keytype(struct sk_buff *skb)
skb               277 drivers/net/wireless/ath/ath9k/common.c 	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
skb                71 drivers/net/wireless/ath/ath9k/common.h 				  struct sk_buff *skb,
skb                83 drivers/net/wireless/ath/ath9k/common.h int ath9k_cmn_get_hw_crypto_keytype(struct sk_buff *skb);
skb               321 drivers/net/wireless/ath/ath9k/debug.h 			  struct sk_buff *skb);
skb               325 drivers/net/wireless/ath/ath9k/debug.h 					struct sk_buff *skb)
skb                93 drivers/net/wireless/ath/ath9k/debug_sta.c 			  struct sk_buff *skb)
skb                95 drivers/net/wireless/ath/ath9k/debug_sta.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
skb               113 drivers/net/wireless/ath/ath9k/debug_sta.c 	rxs = IEEE80211_SKB_RXCB(skb);
skb               207 drivers/net/wireless/ath/ath9k/dynack.c void ath_dynack_sample_tx_ts(struct ath_hw *ah, struct sk_buff *skb,
skb               214 drivers/net/wireless/ath/ath9k/dynack.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb               223 drivers/net/wireless/ath/ath9k/dynack.c 	hdr = (struct ieee80211_hdr *)skb->data;
skb               290 drivers/net/wireless/ath/ath9k/dynack.c void ath_dynack_sample_ack_ts(struct ath_hw *ah, struct sk_buff *skb,
skb               295 drivers/net/wireless/ath/ath9k/dynack.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
skb                87 drivers/net/wireless/ath/ath9k/dynack.h void ath_dynack_sample_ack_ts(struct ath_hw *ah, struct sk_buff *skb, u32 ts);
skb                88 drivers/net/wireless/ath/ath9k/dynack.h void ath_dynack_sample_tx_ts(struct ath_hw *ah, struct sk_buff *skb,
skb                98 drivers/net/wireless/ath/ath9k/dynack.h 					    struct sk_buff *skb, u32 ts) {}
skb               100 drivers/net/wireless/ath/ath9k/dynack.h 					   struct sk_buff *skb,
skb                91 drivers/net/wireless/ath/ath9k/hif_usb.c 					  cmd->skb, true);
skb                97 drivers/net/wireless/ath/ath9k/hif_usb.c 	kfree_skb(cmd->skb);
skb               102 drivers/net/wireless/ath/ath9k/hif_usb.c 			       struct sk_buff *skb)
skb               118 drivers/net/wireless/ath/ath9k/hif_usb.c 	cmd->skb = skb;
skb               123 drivers/net/wireless/ath/ath9k/hif_usb.c 			 skb->data, skb->len,
skb               144 drivers/net/wireless/ath/ath9k/hif_usb.c 	if (!cmd || !cmd->skb || !cmd->hif_dev)
skb               165 drivers/net/wireless/ath/ath9k/hif_usb.c 			dev_kfree_skb_any(cmd->skb);
skb               177 drivers/net/wireless/ath/ath9k/hif_usb.c 	skb_pull(cmd->skb, 4);
skb               179 drivers/net/wireless/ath/ath9k/hif_usb.c 				  cmd->skb, txok);
skb               184 drivers/net/wireless/ath/ath9k/hif_usb.c 			     struct sk_buff *skb)
skb               201 drivers/net/wireless/ath/ath9k/hif_usb.c 	cmd->skb = skb;
skb               204 drivers/net/wireless/ath/ath9k/hif_usb.c 	hdr = skb_push(skb, 4);
skb               205 drivers/net/wireless/ath/ath9k/hif_usb.c 	*hdr++ = cpu_to_le16(skb->len - 4);
skb               210 drivers/net/wireless/ath/ath9k/hif_usb.c 			 skb->data, skb->len,
skb               227 drivers/net/wireless/ath/ath9k/hif_usb.c 	struct sk_buff *skb;
skb               229 drivers/net/wireless/ath/ath9k/hif_usb.c 	while ((skb = __skb_dequeue(list)) != NULL) {
skb               230 drivers/net/wireless/ath/ath9k/hif_usb.c 		dev_kfree_skb_any(skb);
skb               238 drivers/net/wireless/ath/ath9k/hif_usb.c 	struct sk_buff *skb;
skb               240 drivers/net/wireless/ath/ath9k/hif_usb.c 	while ((skb = __skb_dequeue(queue)) != NULL) {
skb               242 drivers/net/wireless/ath/ath9k/hif_usb.c 		int ln = skb->len;
skb               245 drivers/net/wireless/ath/ath9k/hif_usb.c 					  skb, txok);
skb               379 drivers/net/wireless/ath/ath9k/hif_usb.c static int hif_usb_send_tx(struct hif_device_usb *hif_dev, struct sk_buff *skb)
skb               400 drivers/net/wireless/ath/ath9k/hif_usb.c 	tx_ctl = HTC_SKB_CB(skb);
skb               405 drivers/net/wireless/ath/ath9k/hif_usb.c 		ret = hif_usb_send_mgmt(hif_dev, skb);
skb               412 drivers/net/wireless/ath/ath9k/hif_usb.c 		__skb_queue_tail(&hif_dev->tx.tx_skb_queue, skb);
skb               460 drivers/net/wireless/ath/ath9k/hif_usb.c static int hif_usb_send(void *hif_handle, u8 pipe_id, struct sk_buff *skb)
skb               467 drivers/net/wireless/ath/ath9k/hif_usb.c 		ret = hif_usb_send_tx(hif_dev, skb);
skb               470 drivers/net/wireless/ath/ath9k/hif_usb.c 		ret = hif_usb_send_regout(hif_dev, skb);
skb               482 drivers/net/wireless/ath/ath9k/hif_usb.c static inline bool check_index(struct sk_buff *skb, u8 idx)
skb               486 drivers/net/wireless/ath/ath9k/hif_usb.c 	tx_ctl = HTC_SKB_CB(skb);
skb               498 drivers/net/wireless/ath/ath9k/hif_usb.c 	struct sk_buff *skb, *tmp;
skb               503 drivers/net/wireless/ath/ath9k/hif_usb.c 	skb_queue_walk_safe(&hif_dev->tx.tx_skb_queue, skb, tmp) {
skb               504 drivers/net/wireless/ath/ath9k/hif_usb.c 		if (check_index(skb, idx)) {
skb               505 drivers/net/wireless/ath/ath9k/hif_usb.c 			__skb_unlink(skb, &hif_dev->tx.tx_skb_queue);
skb               507 drivers/net/wireless/ath/ath9k/hif_usb.c 						  skb, false);
skb               530 drivers/net/wireless/ath/ath9k/hif_usb.c 				    struct sk_buff *skb)
skb               533 drivers/net/wireless/ath/ath9k/hif_usb.c 	int index = 0, i, len = skb->len;
skb               553 drivers/net/wireless/ath/ath9k/hif_usb.c 			memcpy(ptr, skb->data, rx_remain_len);
skb               574 drivers/net/wireless/ath/ath9k/hif_usb.c 		ptr = (u8 *) skb->data;
skb               608 drivers/net/wireless/ath/ath9k/hif_usb.c 			memcpy(nskb->data, &(skb->data[chk_idx+4]),
skb               629 drivers/net/wireless/ath/ath9k/hif_usb.c 			memcpy(nskb->data, &(skb->data[chk_idx+4]), pkt_len);
skb               648 drivers/net/wireless/ath/ath9k/hif_usb.c 	struct sk_buff *skb = rx_buf->skb;
skb               651 drivers/net/wireless/ath/ath9k/hif_usb.c 	if (!skb)
skb               670 drivers/net/wireless/ath/ath9k/hif_usb.c 		skb_put(skb, urb->actual_length);
skb               671 drivers/net/wireless/ath/ath9k/hif_usb.c 		ath9k_hif_usb_rx_stream(hif_dev, skb);
skb               675 drivers/net/wireless/ath/ath9k/hif_usb.c 	skb_reset_tail_pointer(skb);
skb               676 drivers/net/wireless/ath/ath9k/hif_usb.c 	skb_trim(skb, 0);
skb               687 drivers/net/wireless/ath/ath9k/hif_usb.c 	kfree_skb(skb);
skb               695 drivers/net/wireless/ath/ath9k/hif_usb.c 	struct sk_buff *skb = rx_buf->skb;
skb               699 drivers/net/wireless/ath/ath9k/hif_usb.c 	if (!skb)
skb               714 drivers/net/wireless/ath/ath9k/hif_usb.c 		skb_reset_tail_pointer(skb);
skb               715 drivers/net/wireless/ath/ath9k/hif_usb.c 		skb_trim(skb, 0);
skb               721 drivers/net/wireless/ath/ath9k/hif_usb.c 		skb_put(skb, urb->actual_length);
skb               724 drivers/net/wireless/ath/ath9k/hif_usb.c 		ath9k_htc_rx_msg(hif_dev->htc_handle, skb,
skb               725 drivers/net/wireless/ath/ath9k/hif_usb.c 				 skb->len, USB_REG_IN_PIPE);
skb               753 drivers/net/wireless/ath/ath9k/hif_usb.c 	kfree_skb(skb);
skb               838 drivers/net/wireless/ath/ath9k/hif_usb.c 	struct sk_buff *skb = NULL;
skb               861 drivers/net/wireless/ath/ath9k/hif_usb.c 		skb = alloc_skb(MAX_RX_BUF_SIZE, GFP_KERNEL);
skb               862 drivers/net/wireless/ath/ath9k/hif_usb.c 		if (!skb) {
skb               868 drivers/net/wireless/ath/ath9k/hif_usb.c 		rx_buf->skb = skb;
skb               873 drivers/net/wireless/ath/ath9k/hif_usb.c 				  skb->data, MAX_RX_BUF_SIZE,
skb               896 drivers/net/wireless/ath/ath9k/hif_usb.c 	kfree_skb(skb);
skb               914 drivers/net/wireless/ath/ath9k/hif_usb.c 	struct sk_buff *skb = NULL;
skb               936 drivers/net/wireless/ath/ath9k/hif_usb.c 		skb = alloc_skb(MAX_REG_IN_BUF_SIZE, GFP_KERNEL);
skb               937 drivers/net/wireless/ath/ath9k/hif_usb.c 		if (!skb) {
skb               943 drivers/net/wireless/ath/ath9k/hif_usb.c 		rx_buf->skb = skb;
skb               948 drivers/net/wireless/ath/ath9k/hif_usb.c 				  skb->data, MAX_REG_IN_BUF_SIZE,
skb               971 drivers/net/wireless/ath/ath9k/hif_usb.c 	kfree_skb(skb);
skb                90 drivers/net/wireless/ath/ath9k/hif_usb.h 	struct sk_buff *skb;
skb               108 drivers/net/wireless/ath/ath9k/hif_usb.h 	struct sk_buff *skb;
skb               276 drivers/net/wireless/ath/ath9k/htc.h 	struct sk_buff *skb;
skb               318 drivers/net/wireless/ath/ath9k/htc.h static inline struct ath9k_htc_tx_ctl *HTC_SKB_CB(struct sk_buff *skb)
skb               320 drivers/net/wireless/ath/ath9k/htc.h 	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
skb               557 drivers/net/wireless/ath/ath9k/htc.h void ath9k_htc_rxep(void *priv, struct sk_buff *skb,
skb               559 drivers/net/wireless/ath/ath9k/htc.h void ath9k_htc_txep(void *priv, struct sk_buff *skb, enum htc_endpoint_id ep_id,
skb               561 drivers/net/wireless/ath/ath9k/htc.h void ath9k_htc_beaconep(void *drv_priv, struct sk_buff *skb,
skb               573 drivers/net/wireless/ath/ath9k/htc.h 		       struct sk_buff *skb, u8 slot, bool is_cab);
skb               131 drivers/net/wireless/ath/ath9k/htc_drv_beacon.c void ath9k_htc_beaconep(void *drv_priv, struct sk_buff *skb,
skb               134 drivers/net/wireless/ath/ath9k/htc_drv_beacon.c 	dev_kfree_skb_any(skb);
skb               142 drivers/net/wireless/ath/ath9k/htc_drv_beacon.c 	struct sk_buff *skb;
skb               150 drivers/net/wireless/ath/ath9k/htc_drv_beacon.c 	skb = ieee80211_get_buffered_bc(priv->hw, vif);
skb               152 drivers/net/wireless/ath/ath9k/htc_drv_beacon.c 	while(skb) {
skb               153 drivers/net/wireless/ath/ath9k/htc_drv_beacon.c 		hdr = (struct ieee80211_hdr *) skb->data;
skb               157 drivers/net/wireless/ath/ath9k/htc_drv_beacon.c 		if (padsize && skb->len > padpos) {
skb               158 drivers/net/wireless/ath/ath9k/htc_drv_beacon.c 			if (skb_headroom(skb) < padsize) {
skb               159 drivers/net/wireless/ath/ath9k/htc_drv_beacon.c 				dev_kfree_skb_any(skb);
skb               162 drivers/net/wireless/ath/ath9k/htc_drv_beacon.c 			skb_push(skb, padsize);
skb               163 drivers/net/wireless/ath/ath9k/htc_drv_beacon.c 			memmove(skb->data, skb->data + padsize, padpos);
skb               169 drivers/net/wireless/ath/ath9k/htc_drv_beacon.c 			dev_kfree_skb_any(skb);
skb               173 drivers/net/wireless/ath/ath9k/htc_drv_beacon.c 		ret = ath9k_htc_tx_start(priv, NULL, skb, tx_slot, true);
skb               176 drivers/net/wireless/ath/ath9k/htc_drv_beacon.c 			dev_kfree_skb_any(skb);
skb               185 drivers/net/wireless/ath/ath9k/htc_drv_beacon.c 		skb = ieee80211_get_buffered_bc(priv->hw, vif);
skb               864 drivers/net/wireless/ath/ath9k/htc_drv_main.c 			 struct sk_buff *skb)
skb               871 drivers/net/wireless/ath/ath9k/htc_drv_main.c 	hdr = (struct ieee80211_hdr *) skb->data;
skb               876 drivers/net/wireless/ath/ath9k/htc_drv_main.c 	if (padsize && skb->len > padpos) {
skb               877 drivers/net/wireless/ath/ath9k/htc_drv_main.c 		if (skb_headroom(skb) < padsize) {
skb               881 drivers/net/wireless/ath/ath9k/htc_drv_main.c 		skb_push(skb, padsize);
skb               882 drivers/net/wireless/ath/ath9k/htc_drv_main.c 		memmove(skb->data, skb->data + padsize, padpos);
skb               891 drivers/net/wireless/ath/ath9k/htc_drv_main.c 	ret = ath9k_htc_tx_start(priv, control->sta, skb, slot, false);
skb               904 drivers/net/wireless/ath/ath9k/htc_drv_main.c 	dev_kfree_skb_any(skb);
skb               158 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 				   struct sk_buff *skb)
skb               164 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 	tx_ctl = HTC_SKB_CB(skb);
skb               168 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 			(struct tx_mgmt_hdr *)skb->data;
skb               170 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 		skb_pull(skb, sizeof(struct tx_mgmt_hdr));
skb               177 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 			(struct tx_frame_hdr *)skb->data;
skb               179 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 		skb_pull(skb, sizeof(struct tx_frame_hdr));
skb               216 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 			      struct sk_buff *skb,
skb               219 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
skb               226 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 	tx_ctl = HTC_SKB_CB(skb);
skb               227 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 	hdr = (struct ieee80211_hdr *) skb->data;
skb               237 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 		mgmt = (struct ieee80211_mgmt *)skb->data;
skb               249 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 	mgmt_hdr.key_type = ath9k_cmn_get_hw_crypto_keytype(skb);
skb               255 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 	tx_fhdr = skb_push(skb, sizeof(mgmt_hdr));
skb               262 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 			      struct sk_buff *skb,
skb               266 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
skb               274 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 	tx_ctl = HTC_SKB_CB(skb);
skb               275 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 	hdr = (struct ieee80211_hdr *) skb->data;
skb               307 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 		if (skb->len > priv->hw->wiphy->rts_threshold)
skb               316 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 	tx_hdr.key_type = ath9k_cmn_get_hw_crypto_keytype(skb);
skb               322 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 	tx_fhdr = skb_push(skb, sizeof(tx_hdr));
skb               331 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 	qnum = skb_get_queue_mapping(skb);
skb               337 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 		       struct sk_buff *skb,
skb               341 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
skb               347 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 	hdr = (struct ieee80211_hdr *) skb->data;
skb               377 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 		ath9k_htc_tx_data(priv, vif, skb,
skb               380 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 		ath9k_htc_tx_mgmt(priv, avp, skb,
skb               384 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 	return htc_send(priv->htc, skb);
skb               402 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 				    struct sk_buff *skb)
skb               408 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 	hdr = (struct ieee80211_hdr *) skb->data;
skb               420 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 	    !(skb->protocol == cpu_to_be16(ETH_P_PAE))) {
skb               441 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 				 struct sk_buff *skb,
skb               453 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 	slot = strip_drv_header(priv, skb);
skb               455 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 		dev_kfree_skb_any(skb);
skb               459 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 	tx_ctl = HTC_SKB_CB(skb);
skb               461 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 	tx_info = IEEE80211_SKB_CB(skb);
skb               501 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 	ath9k_htc_check_tx_aggr(priv, vif, skb);
skb               512 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 	hdrlen = ieee80211_get_hdrlen_from_skb(skb);
skb               515 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 	if (padsize && skb->len > hdrlen + padsize) {
skb               516 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 		memmove(skb->data + padsize, skb->data, hdrlen);
skb               517 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 		skb_pull(skb, padsize);
skb               521 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 	ieee80211_tx_status(priv->hw, skb);
skb               527 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 	struct sk_buff *skb;
skb               529 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 	while ((skb = skb_dequeue(queue)) != NULL) {
skb               530 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 		ath9k_htc_tx_process(priv, skb, NULL);
skb               588 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 				struct sk_buff *skb,
skb               595 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 		hdr = (struct tx_mgmt_hdr *) skb->data;
skb               603 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 		hdr = (struct tx_frame_hdr *) skb->data;
skb               618 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 	struct sk_buff *skb, *tmp;
skb               627 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 	skb_queue_walk_safe(epid_queue, skb, tmp) {
skb               628 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 		if (check_cookie(priv, skb, txs->cookie, epid)) {
skb               629 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 			__skb_unlink(skb, epid_queue);
skb               631 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 			return skb;
skb               646 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 	struct sk_buff *skb;
skb               655 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 		skb = ath9k_htc_tx_get_packet(priv, __txs);
skb               656 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 		if (!skb) {
skb               677 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 		ath9k_htc_tx_process(priv, skb, __txs);
skb               684 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c void ath9k_htc_txep(void *drv_priv, struct sk_buff *skb,
skb               691 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 	tx_ctl = HTC_SKB_CB(skb);
skb               696 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 		skb_queue_tail(&priv->tx.tx_failed, skb);
skb               703 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 		dev_kfree_skb_any(skb);
skb               707 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 	skb_queue_tail(epid_queue, skb);
skb               710 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c static inline bool check_packet(struct ath9k_htc_priv *priv, struct sk_buff *skb)
skb               715 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 	tx_ctl = HTC_SKB_CB(skb);
skb               732 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 	struct sk_buff *skb, *tmp;
skb               738 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 	skb_queue_walk_safe(epid_queue, skb, tmp) {
skb               739 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 		if (check_packet(priv, skb)) {
skb               740 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 			__skb_unlink(skb, epid_queue);
skb               741 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 			__skb_queue_tail(&queue, skb);
skb               748 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 		skb_queue_walk_safe(&queue, skb, tmp) {
skb               749 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 			__skb_unlink(skb, &queue);
skb               750 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 			ath9k_htc_tx_process(priv, skb, NULL);
skb               760 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 	struct sk_buff *skb;
skb               765 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 		skb = ath9k_htc_tx_get_packet(priv, &event->txs);
skb               766 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 		if (skb) {
skb               772 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 			ath9k_htc_tx_process(priv, skb, &event->txs);
skb               970 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 	struct sk_buff *skb = rxbuf->skb;
skb               979 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 	if (skb->len < HTC_RX_FRAME_HEADER_SIZE) {
skb               981 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 			skb->len);
skb               985 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 	rxstatus = (struct ath_htc_rx_status *)skb->data;
skb               989 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 	    (skb->len - HTC_RX_FRAME_HEADER_SIZE) != 0)) {
skb               992 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 			rs_datalen, skb->len);
skb              1017 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 	skb_pull(skb, HTC_RX_FRAME_HEADER_SIZE);
skb              1023 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 	hdr = (struct ieee80211_hdr *)skb->data;
skb              1042 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 	ath9k_cmn_rx_skb_postprocess(common, skb, &rx_stats,
skb              1069 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 	struct sk_buff *skb;
skb              1087 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 		if (!rxbuf->skb)
skb              1091 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 			dev_kfree_skb_any(rxbuf->skb);
skb              1095 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 		memcpy(IEEE80211_SKB_RXCB(rxbuf->skb), &rx_status,
skb              1097 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 		skb = rxbuf->skb;
skb              1098 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 		hdr = (struct ieee80211_hdr *) skb->data;
skb              1105 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 		ieee80211_rx(priv->hw, skb);
skb              1110 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 		rxbuf->skb = NULL;
skb              1118 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c void ath9k_htc_rxep(void *drv_priv, struct sk_buff *skb,
skb              1142 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 	rxbuf->skb = skb;
skb              1149 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 	dev_kfree_skb_any(skb);
skb              1160 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 		if (rxbuf->skb)
skb              1161 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 			dev_kfree_skb_any(rxbuf->skb);
skb                21 drivers/net/wireless/ath/ath9k/htc_hst.c static int htc_issue_send(struct htc_target *target, struct sk_buff* skb,
skb                29 drivers/net/wireless/ath/ath9k/htc_hst.c 	hdr = skb_push(skb, sizeof(struct htc_frame_hdr));
skb                34 drivers/net/wireless/ath/ath9k/htc_hst.c 	status = target->hif->send(target->hif_dev, endpoint->ul_pipeid, skb);
skb               149 drivers/net/wireless/ath/ath9k/htc_hst.c 	struct sk_buff *skb;
skb               154 drivers/net/wireless/ath/ath9k/htc_hst.c 	skb = alloc_skb(50 + sizeof(struct htc_frame_hdr), GFP_ATOMIC);
skb               155 drivers/net/wireless/ath/ath9k/htc_hst.c 	if (!skb) {
skb               159 drivers/net/wireless/ath/ath9k/htc_hst.c 	skb_reserve(skb, sizeof(struct htc_frame_hdr));
skb               161 drivers/net/wireless/ath/ath9k/htc_hst.c 	cp_msg = skb_put(skb, sizeof(struct htc_config_pipe_msg));
skb               169 drivers/net/wireless/ath/ath9k/htc_hst.c 	ret = htc_issue_send(target, skb, skb->len, 0, ENDPOINT0);
skb               181 drivers/net/wireless/ath/ath9k/htc_hst.c 	kfree_skb(skb);
skb               187 drivers/net/wireless/ath/ath9k/htc_hst.c 	struct sk_buff *skb;
skb               192 drivers/net/wireless/ath/ath9k/htc_hst.c 	skb = alloc_skb(50 + sizeof(struct htc_frame_hdr), GFP_ATOMIC);
skb               193 drivers/net/wireless/ath/ath9k/htc_hst.c 	if (!skb) {
skb               197 drivers/net/wireless/ath/ath9k/htc_hst.c 	skb_reserve(skb, sizeof(struct htc_frame_hdr));
skb               199 drivers/net/wireless/ath/ath9k/htc_hst.c 	comp_msg = skb_put(skb, sizeof(struct htc_comp_msg));
skb               204 drivers/net/wireless/ath/ath9k/htc_hst.c 	ret = htc_issue_send(target, skb, skb->len, 0, ENDPOINT0);
skb               217 drivers/net/wireless/ath/ath9k/htc_hst.c 	kfree_skb(skb);
skb               238 drivers/net/wireless/ath/ath9k/htc_hst.c 	struct sk_buff *skb;
skb               258 drivers/net/wireless/ath/ath9k/htc_hst.c 	skb = alloc_skb(sizeof(struct htc_conn_svc_msg) +
skb               260 drivers/net/wireless/ath/ath9k/htc_hst.c 	if (!skb) {
skb               266 drivers/net/wireless/ath/ath9k/htc_hst.c 	skb_reserve(skb, sizeof(struct htc_frame_hdr));
skb               268 drivers/net/wireless/ath/ath9k/htc_hst.c 	conn_msg = skb_put(skb, sizeof(struct htc_conn_svc_msg));
skb               275 drivers/net/wireless/ath/ath9k/htc_hst.c 	ret = htc_issue_send(target, skb, skb->len, 0, ENDPOINT0);
skb               289 drivers/net/wireless/ath/ath9k/htc_hst.c 	kfree_skb(skb);
skb               293 drivers/net/wireless/ath/ath9k/htc_hst.c int htc_send(struct htc_target *target, struct sk_buff *skb)
skb               297 drivers/net/wireless/ath/ath9k/htc_hst.c 	tx_ctl = HTC_SKB_CB(skb);
skb               298 drivers/net/wireless/ath/ath9k/htc_hst.c 	return htc_issue_send(target, skb, skb->len, 0, tx_ctl->epid);
skb               301 drivers/net/wireless/ath/ath9k/htc_hst.c int htc_send_epid(struct htc_target *target, struct sk_buff *skb,
skb               304 drivers/net/wireless/ath/ath9k/htc_hst.c 	return htc_issue_send(target, skb, skb->len, 0, epid);
skb               323 drivers/net/wireless/ath/ath9k/htc_hst.c 			       struct sk_buff *skb, bool txok)
skb               340 drivers/net/wireless/ath/ath9k/htc_hst.c 	if (skb) {
skb               341 drivers/net/wireless/ath/ath9k/htc_hst.c 		htc_hdr = (struct htc_frame_hdr *) skb->data;
skb               343 drivers/net/wireless/ath/ath9k/htc_hst.c 		skb_pull(skb, sizeof(struct htc_frame_hdr));
skb               347 drivers/net/wireless/ath/ath9k/htc_hst.c 						  skb, htc_hdr->endpoint_id,
skb               350 drivers/net/wireless/ath/ath9k/htc_hst.c 			kfree_skb(skb);
skb               356 drivers/net/wireless/ath/ath9k/htc_hst.c 	kfree_skb(skb);
skb               360 drivers/net/wireless/ath/ath9k/htc_hst.c 				      struct sk_buff *skb)
skb               362 drivers/net/wireless/ath/ath9k/htc_hst.c 	uint32_t *pattern = (uint32_t *)skb->data;
skb               368 drivers/net/wireless/ath/ath9k/htc_hst.c 		htc_panic = (struct htc_panic_bad_vaddr *) skb->data;
skb               378 drivers/net/wireless/ath/ath9k/htc_hst.c 		htc_panic = (struct htc_panic_bad_epid *) skb->data;
skb               397 drivers/net/wireless/ath/ath9k/htc_hst.c 		      struct sk_buff *skb, u32 len, u8 pipe_id)
skb               404 drivers/net/wireless/ath/ath9k/htc_hst.c 	if (!htc_handle || !skb)
skb               407 drivers/net/wireless/ath/ath9k/htc_hst.c 	htc_hdr = (struct htc_frame_hdr *) skb->data;
skb               411 drivers/net/wireless/ath/ath9k/htc_hst.c 		ath9k_htc_fw_panic_report(htc_handle, skb);
skb               412 drivers/net/wireless/ath/ath9k/htc_hst.c 		kfree_skb(skb);
skb               418 drivers/net/wireless/ath/ath9k/htc_hst.c 			dev_kfree_skb_any(skb);
skb               420 drivers/net/wireless/ath/ath9k/htc_hst.c 			kfree_skb(skb);
skb               428 drivers/net/wireless/ath/ath9k/htc_hst.c 			if (be32_to_cpu(*(__be32 *) skb->data) == 0x00C60000)
skb               430 drivers/net/wireless/ath/ath9k/htc_hst.c 				htc_hdr = (struct htc_frame_hdr *)(skb->data + 4);
skb               449 drivers/net/wireless/ath/ath9k/htc_hst.c 		kfree_skb(skb);
skb               453 drivers/net/wireless/ath/ath9k/htc_hst.c 			skb_trim(skb, len - htc_hdr->control[0]);
skb               455 drivers/net/wireless/ath/ath9k/htc_hst.c 		skb_pull(skb, sizeof(struct htc_frame_hdr));
skb               460 drivers/net/wireless/ath/ath9k/htc_hst.c 						  skb, epid);
skb               209 drivers/net/wireless/ath/ath9k/htc_hst.h int htc_send(struct htc_target *target, struct sk_buff *skb);
skb               210 drivers/net/wireless/ath/ath9k/htc_hst.h int htc_send_epid(struct htc_target *target, struct sk_buff *skb,
skb               217 drivers/net/wireless/ath/ath9k/htc_hst.h 		      struct sk_buff *skb, u32 len, u8 pipe_id);
skb               219 drivers/net/wireless/ath/ath9k/htc_hst.h 			       struct sk_buff *skb, bool txok);
skb               172 drivers/net/wireless/ath/ath9k/link.c static bool ath_paprd_send_frame(struct ath_softc *sc, struct sk_buff *skb, int chain)
skb               175 drivers/net/wireless/ath/ath9k/link.c 	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
skb               195 drivers/net/wireless/ath/ath9k/link.c 	if (ath_tx_start(hw, skb, &txctl) != 0) {
skb               197 drivers/net/wireless/ath/ath9k/link.c 		dev_kfree_skb_any(skb);
skb               218 drivers/net/wireless/ath/ath9k/link.c 	struct sk_buff *skb = NULL;
skb               239 drivers/net/wireless/ath/ath9k/link.c 	skb = alloc_skb(len, GFP_KERNEL);
skb               240 drivers/net/wireless/ath/ath9k/link.c 	if (!skb)
skb               243 drivers/net/wireless/ath/ath9k/link.c 	skb_put(skb, len);
skb               244 drivers/net/wireless/ath/ath9k/link.c 	memset(skb->data, 0, len);
skb               245 drivers/net/wireless/ath/ath9k/link.c 	hdr = (struct ieee80211_hdr *)skb->data;
skb               262 drivers/net/wireless/ath/ath9k/link.c 		if (!ath_paprd_send_frame(sc, skb, chain))
skb               286 drivers/net/wireless/ath/ath9k/link.c 	kfree_skb(skb);
skb               746 drivers/net/wireless/ath/ath9k/main.c 		     struct sk_buff *skb)
skb               751 drivers/net/wireless/ath/ath9k/main.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
skb               805 drivers/net/wireless/ath/ath9k/main.c 	txctl.txq = sc->tx.txq_map[skb_get_queue_mapping(skb)];
skb               808 drivers/net/wireless/ath/ath9k/main.c 	ath_dbg(common, XMIT, "transmitting packet, skb: %p\n", skb);
skb               810 drivers/net/wireless/ath/ath9k/main.c 	if (ath_tx_start(hw, skb, &txctl) != 0) {
skb               818 drivers/net/wireless/ath/ath9k/main.c 	ieee80211_free_txskb(hw, skb);
skb                43 drivers/net/wireless/ath/ath9k/recv.c 	struct sk_buff *skb;
skb                50 drivers/net/wireless/ath/ath9k/recv.c 	skb = bf->bf_mpdu;
skb                51 drivers/net/wireless/ath/ath9k/recv.c 	BUG_ON(skb == NULL);
skb                52 drivers/net/wireless/ath/ath9k/recv.c 	ds->ds_vdata = skb->data;
skb               115 drivers/net/wireless/ath/ath9k/recv.c 	struct sk_buff *skb;
skb               125 drivers/net/wireless/ath/ath9k/recv.c 	skb = bf->bf_mpdu;
skb               127 drivers/net/wireless/ath/ath9k/recv.c 	memset(skb->data, 0, ah->caps.rx_status_len);
skb               131 drivers/net/wireless/ath/ath9k/recv.c 	SKB_CB_ATHBUF(skb) = bf;
skb               133 drivers/net/wireless/ath/ath9k/recv.c 	__skb_queue_tail(&rx_edma->rx_fifo, skb);
skb               160 drivers/net/wireless/ath/ath9k/recv.c 	struct sk_buff *skb;
skb               164 drivers/net/wireless/ath/ath9k/recv.c 	while ((skb = __skb_dequeue(&rx_edma->rx_fifo)) != NULL) {
skb               165 drivers/net/wireless/ath/ath9k/recv.c 		bf = SKB_CB_ATHBUF(skb);
skb               202 drivers/net/wireless/ath/ath9k/recv.c 	struct sk_buff *skb;
skb               223 drivers/net/wireless/ath/ath9k/recv.c 		skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_KERNEL);
skb               224 drivers/net/wireless/ath/ath9k/recv.c 		if (!skb) {
skb               229 drivers/net/wireless/ath/ath9k/recv.c 		memset(skb->data, 0, common->rx_bufsize);
skb               230 drivers/net/wireless/ath/ath9k/recv.c 		bf->bf_mpdu = skb;
skb               232 drivers/net/wireless/ath/ath9k/recv.c 		bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
skb               237 drivers/net/wireless/ath/ath9k/recv.c 				dev_kfree_skb_any(skb);
skb               274 drivers/net/wireless/ath/ath9k/recv.c 	struct sk_buff *skb;
skb               301 drivers/net/wireless/ath/ath9k/recv.c 		skb = ath_rxbuf_alloc(common, common->rx_bufsize,
skb               303 drivers/net/wireless/ath/ath9k/recv.c 		if (skb == NULL) {
skb               308 drivers/net/wireless/ath/ath9k/recv.c 		bf->bf_mpdu = skb;
skb               309 drivers/net/wireless/ath/ath9k/recv.c 		bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
skb               314 drivers/net/wireless/ath/ath9k/recv.c 			dev_kfree_skb_any(skb);
skb               335 drivers/net/wireless/ath/ath9k/recv.c 	struct sk_buff *skb;
skb               344 drivers/net/wireless/ath/ath9k/recv.c 		skb = bf->bf_mpdu;
skb               345 drivers/net/wireless/ath/ath9k/recv.c 		if (skb) {
skb               349 drivers/net/wireless/ath/ath9k/recv.c 			dev_kfree_skb(skb);
skb               504 drivers/net/wireless/ath/ath9k/recv.c static bool ath_beacon_dtim_pending_cab(struct sk_buff *skb)
skb               511 drivers/net/wireless/ath/ath9k/recv.c 	mgmt = (struct ieee80211_mgmt *)skb->data;
skb               513 drivers/net/wireless/ath/ath9k/recv.c 	end = skb->data + skb->len;
skb               536 drivers/net/wireless/ath/ath9k/recv.c static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
skb               541 drivers/net/wireless/ath/ath9k/recv.c 	if (skb->len < 24 + 8 + 2 + 2)
skb               565 drivers/net/wireless/ath/ath9k/recv.c 	if (ath_beacon_dtim_pending_cab(skb)) {
skb               590 drivers/net/wireless/ath/ath9k/recv.c static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb, bool mybeacon)
skb               595 drivers/net/wireless/ath/ath9k/recv.c 	hdr = (struct ieee80211_hdr *)skb->data;
skb               600 drivers/net/wireless/ath/ath9k/recv.c 		ath_rx_ps_beacon(sc, skb);
skb               634 drivers/net/wireless/ath/ath9k/recv.c 	struct sk_buff *skb;
skb               638 drivers/net/wireless/ath/ath9k/recv.c 	skb = skb_peek(&rx_edma->rx_fifo);
skb               639 drivers/net/wireless/ath/ath9k/recv.c 	if (!skb)
skb               642 drivers/net/wireless/ath/ath9k/recv.c 	bf = SKB_CB_ATHBUF(skb);
skb               648 drivers/net/wireless/ath/ath9k/recv.c 	ret = ath9k_hw_process_rxdesc_edma(ah, rs, skb->data);
skb               656 drivers/net/wireless/ath/ath9k/recv.c 	__skb_unlink(skb, &rx_edma->rx_fifo);
skb               662 drivers/net/wireless/ath/ath9k/recv.c 		skb = skb_peek(&rx_edma->rx_fifo);
skb               663 drivers/net/wireless/ath/ath9k/recv.c 		if (skb) {
skb               664 drivers/net/wireless/ath/ath9k/recv.c 			bf = SKB_CB_ATHBUF(skb);
skb               667 drivers/net/wireless/ath/ath9k/recv.c 			__skb_unlink(skb, &rx_edma->rx_fifo);
skb               808 drivers/net/wireless/ath/ath9k/recv.c 				   struct sk_buff *skb,
skb               864 drivers/net/wireless/ath/ath9k/recv.c 	hdr = (struct ieee80211_hdr *) (skb->data + ah->caps.rx_status_len);
skb              1011 drivers/net/wireless/ath/ath9k/recv.c 				 struct sk_buff *skb)
skb              1013 drivers/net/wireless/ath/ath9k/recv.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
skb              1033 drivers/net/wireless/ath/ath9k/recv.c 	tidno = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
skb              1035 drivers/net/wireless/ath/ath9k/recv.c 	rxs = IEEE80211_SKB_RXCB(skb);
skb              1062 drivers/net/wireless/ath/ath9k/recv.c 	struct sk_buff *skb = NULL, *requeue_skb, *hdr_skb;
skb              1099 drivers/net/wireless/ath/ath9k/recv.c 		skb = bf->bf_mpdu;
skb              1100 drivers/net/wireless/ath/ath9k/recv.c 		if (!skb)
skb              1110 drivers/net/wireless/ath/ath9k/recv.c 			hdr_skb = skb;
skb              1148 drivers/net/wireless/ath/ath9k/recv.c 		skb_put(skb, rs.rs_datalen + ah->caps.rx_status_len);
skb              1150 drivers/net/wireless/ath/ath9k/recv.c 			skb_pull(skb, ah->caps.rx_status_len);
skb              1166 drivers/net/wireless/ath/ath9k/recv.c 				dev_kfree_skb_any(skb);
skb              1168 drivers/net/wireless/ath/ath9k/recv.c 				skb = NULL;
skb              1170 drivers/net/wireless/ath/ath9k/recv.c 			sc->rx.frag = skb;
skb              1175 drivers/net/wireless/ath/ath9k/recv.c 			int space = skb->len - skb_tailroom(hdr_skb);
skb              1178 drivers/net/wireless/ath/ath9k/recv.c 				dev_kfree_skb(skb);
skb              1185 drivers/net/wireless/ath/ath9k/recv.c 			skb_copy_from_linear_data(skb, skb_put(hdr_skb, skb->len),
skb              1186 drivers/net/wireless/ath/ath9k/recv.c 						  skb->len);
skb              1187 drivers/net/wireless/ath/ath9k/recv.c 			dev_kfree_skb_any(skb);
skb              1188 drivers/net/wireless/ath/ath9k/recv.c 			skb = hdr_skb;
skb              1192 drivers/net/wireless/ath/ath9k/recv.c 			skb_trim(skb, skb->len - 8);
skb              1199 drivers/net/wireless/ath/ath9k/recv.c 			ath_rx_ps(sc, skb, rs.is_mybeacon);
skb              1204 drivers/net/wireless/ath/ath9k/recv.c 		ath_debug_rate_stats(sc, &rs, skb);
skb              1205 drivers/net/wireless/ath/ath9k/recv.c 		ath_rx_count_airtime(sc, &rs, skb);
skb              1207 drivers/net/wireless/ath/ath9k/recv.c 		hdr = (struct ieee80211_hdr *)skb->data;
skb              1209 drivers/net/wireless/ath/ath9k/recv.c 			ath_dynack_sample_ack_ts(sc->sc_ah, skb, rs.rs_tstamp);
skb              1211 drivers/net/wireless/ath/ath9k/recv.c 		ieee80211_rx(hw, skb);
skb                56 drivers/net/wireless/ath/ath9k/tx99.c 	struct sk_buff *skb;
skb                59 drivers/net/wireless/ath/ath9k/tx99.c 	skb = alloc_skb(len, GFP_KERNEL);
skb                60 drivers/net/wireless/ath/ath9k/tx99.c 	if (!skb)
skb                63 drivers/net/wireless/ath/ath9k/tx99.c 	skb_put(skb, len);
skb                65 drivers/net/wireless/ath/ath9k/tx99.c 	memset(skb->data, 0, len);
skb                67 drivers/net/wireless/ath/ath9k/tx99.c 	hdr = (struct ieee80211_hdr *)skb->data;
skb                80 drivers/net/wireless/ath/ath9k/tx99.c 	tx_info = IEEE80211_SKB_CB(skb);
skb                93 drivers/net/wireless/ath/ath9k/tx99.c 	memcpy(skb->data + sizeof(*hdr), PN9Data, sizeof(PN9Data));
skb                95 drivers/net/wireless/ath/ath9k/tx99.c 	return skb;
skb               146 drivers/net/wireless/ath/ath9k/wmi.c 	struct sk_buff *skb = NULL;
skb               152 drivers/net/wireless/ath/ath9k/wmi.c 		skb = __skb_dequeue(&wmi->wmi_event_queue);
skb               153 drivers/net/wireless/ath/ath9k/wmi.c 		if (!skb) {
skb               159 drivers/net/wireless/ath/ath9k/wmi.c 		hdr = (struct wmi_cmd_hdr *) skb->data;
skb               161 drivers/net/wireless/ath/ath9k/wmi.c 		wmi_event = skb_pull(skb, sizeof(struct wmi_cmd_hdr));
skb               186 drivers/net/wireless/ath/ath9k/wmi.c 		kfree_skb(skb);
skb               200 drivers/net/wireless/ath/ath9k/wmi.c static void ath9k_wmi_rsp_callback(struct wmi *wmi, struct sk_buff *skb)
skb               202 drivers/net/wireless/ath/ath9k/wmi.c 	skb_pull(skb, sizeof(struct wmi_cmd_hdr));
skb               205 drivers/net/wireless/ath/ath9k/wmi.c 		memcpy(wmi->cmd_rsp_buf, skb->data, wmi->cmd_rsp_len);
skb               210 drivers/net/wireless/ath/ath9k/wmi.c static void ath9k_wmi_ctrl_rx(void *priv, struct sk_buff *skb,
skb               221 drivers/net/wireless/ath/ath9k/wmi.c 	hdr = (struct wmi_cmd_hdr *) skb->data;
skb               226 drivers/net/wireless/ath/ath9k/wmi.c 		__skb_queue_tail(&wmi->wmi_event_queue, skb);
skb               241 drivers/net/wireless/ath/ath9k/wmi.c 	ath9k_wmi_rsp_callback(wmi, skb);
skb               244 drivers/net/wireless/ath/ath9k/wmi.c 	kfree_skb(skb);
skb               247 drivers/net/wireless/ath/ath9k/wmi.c static void ath9k_wmi_ctrl_tx(void *priv, struct sk_buff *skb,
skb               250 drivers/net/wireless/ath/ath9k/wmi.c 	kfree_skb(skb);
skb               278 drivers/net/wireless/ath/ath9k/wmi.c 			       struct sk_buff *skb,
skb               284 drivers/net/wireless/ath/ath9k/wmi.c 	hdr = skb_push(skb, sizeof(struct wmi_cmd_hdr));
skb               292 drivers/net/wireless/ath/ath9k/wmi.c 	return htc_send_epid(wmi->htc, skb, wmi->ctrl_epid);
skb               304 drivers/net/wireless/ath/ath9k/wmi.c 	struct sk_buff *skb;
skb               311 drivers/net/wireless/ath/ath9k/wmi.c 	skb = alloc_skb(headroom + cmd_len, GFP_ATOMIC);
skb               312 drivers/net/wireless/ath/ath9k/wmi.c 	if (!skb)
skb               315 drivers/net/wireless/ath/ath9k/wmi.c 	skb_reserve(skb, headroom);
skb               318 drivers/net/wireless/ath/ath9k/wmi.c 		skb_put_data(skb, cmd_buf, cmd_len);
skb               333 drivers/net/wireless/ath/ath9k/wmi.c 	ret = ath9k_wmi_cmd_issue(wmi, skb, cmd_id, cmd_len);
skb               352 drivers/net/wireless/ath/ath9k/wmi.c 	kfree_skb(skb);
skb                51 drivers/net/wireless/ath/ath9k/xmit.c 			       struct ath_atx_tid *tid, struct sk_buff *skb);
skb                52 drivers/net/wireless/ath/ath9k/xmit.c static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
skb                69 drivers/net/wireless/ath/ath9k/xmit.c 					   struct sk_buff *skb);
skb                70 drivers/net/wireless/ath/ath9k/xmit.c static int ath_tx_prepare(struct ieee80211_hw *hw, struct sk_buff *skb,
skb                84 drivers/net/wireless/ath/ath9k/xmit.c static void ath_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
skb                86 drivers/net/wireless/ath/ath9k/xmit.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb                91 drivers/net/wireless/ath/ath9k/xmit.c 		ieee80211_tx_status(hw, skb);
skb                98 drivers/net/wireless/ath/ath9k/xmit.c 	dev_kfree_skb(skb);
skb               106 drivers/net/wireless/ath/ath9k/xmit.c 	struct sk_buff *skb;
skb               112 drivers/net/wireless/ath/ath9k/xmit.c 	while ((skb = __skb_dequeue(&q)))
skb               113 drivers/net/wireless/ath/ath9k/xmit.c 		ath_tx_status(hw, skb);
skb               140 drivers/net/wireless/ath/ath9k/xmit.c static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
skb               142 drivers/net/wireless/ath/ath9k/xmit.c 	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
skb               165 drivers/net/wireless/ath/ath9k/xmit.c 			     struct sk_buff *skb)
skb               167 drivers/net/wireless/ath/ath9k/xmit.c 	struct ath_frame_info *fi = get_frame_info(skb);
skb               180 drivers/net/wireless/ath/ath9k/xmit.c ath_get_skb_tid(struct ath_softc *sc, struct ath_node *an, struct sk_buff *skb)
skb               182 drivers/net/wireless/ath/ath9k/xmit.c 	u8 tidno = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
skb               196 drivers/net/wireless/ath/ath9k/xmit.c 	struct sk_buff *skb;
skb               200 drivers/net/wireless/ath/ath9k/xmit.c 	skb = ieee80211_tx_dequeue(hw, txq);
skb               201 drivers/net/wireless/ath/ath9k/xmit.c 	if (!skb)
skb               204 drivers/net/wireless/ath/ath9k/xmit.c 	ret = ath_tx_prepare(hw, skb, &txctl);
skb               206 drivers/net/wireless/ath/ath9k/xmit.c 		ieee80211_free_txskb(hw, skb);
skb               210 drivers/net/wireless/ath/ath9k/xmit.c 	q = skb_get_queue_mapping(skb);
skb               212 drivers/net/wireless/ath/ath9k/xmit.c 		fi = get_frame_info(skb);
skb               217 drivers/net/wireless/ath/ath9k/xmit.c 	*skbuf = skb;
skb               222 drivers/net/wireless/ath/ath9k/xmit.c 			   struct sk_buff **skb)
skb               225 drivers/net/wireless/ath/ath9k/xmit.c 	*skb = __skb_dequeue(&tid->retry_q);
skb               226 drivers/net/wireless/ath/ath9k/xmit.c 	if (!*skb)
skb               227 drivers/net/wireless/ath/ath9k/xmit.c 		ret = ath_tid_pull(tid, skb);
skb               235 drivers/net/wireless/ath/ath9k/xmit.c 	struct sk_buff *skb;
skb               246 drivers/net/wireless/ath/ath9k/xmit.c 	while ((skb = __skb_dequeue(&tid->retry_q))) {
skb               247 drivers/net/wireless/ath/ath9k/xmit.c 		fi = get_frame_info(skb);
skb               250 drivers/net/wireless/ath/ath9k/xmit.c 			ath_txq_skb_done(sc, txq, skb);
skb               251 drivers/net/wireless/ath/ath9k/xmit.c 			ieee80211_free_txskb(sc->hw, skb);
skb               320 drivers/net/wireless/ath/ath9k/xmit.c 	struct sk_buff *skb;
skb               330 drivers/net/wireless/ath/ath9k/xmit.c 	while ((ret = ath_tid_dequeue(tid, &skb)) == 0) {
skb               331 drivers/net/wireless/ath/ath9k/xmit.c 		fi = get_frame_info(skb);
skb               335 drivers/net/wireless/ath/ath9k/xmit.c 			ath_tx_complete(sc, skb, ATH_TX_ERROR, txq, NULL);
skb               345 drivers/net/wireless/ath/ath9k/xmit.c 			     struct sk_buff *skb, int count)
skb               347 drivers/net/wireless/ath/ath9k/xmit.c 	struct ath_frame_info *fi = get_frame_info(skb);
skb               358 drivers/net/wireless/ath/ath9k/xmit.c 	hdr = (struct ieee80211_hdr *)skb->data;
skb               446 drivers/net/wireless/ath/ath9k/xmit.c 	struct sk_buff *skb;
skb               462 drivers/net/wireless/ath/ath9k/xmit.c 	skb = bf->bf_mpdu;
skb               463 drivers/net/wireless/ath/ath9k/xmit.c 	tx_info = IEEE80211_SKB_CB(skb);
skb               530 drivers/net/wireless/ath/ath9k/xmit.c 		skb = bf->bf_mpdu;
skb               531 drivers/net/wireless/ath/ath9k/xmit.c 		tx_info = IEEE80211_SKB_CB(skb);
skb               532 drivers/net/wireless/ath/ath9k/xmit.c 		fi = get_frame_info(skb);
skb               623 drivers/net/wireless/ath/ath9k/xmit.c 			__skb_queue_tail(&bf_pending, skb);
skb               732 drivers/net/wireless/ath/ath9k/xmit.c 	struct sk_buff *skb;
skb               737 drivers/net/wireless/ath/ath9k/xmit.c 	skb = bf->bf_mpdu;
skb               738 drivers/net/wireless/ath/ath9k/xmit.c 	tx_info = IEEE80211_SKB_CB(skb);
skb               755 drivers/net/wireless/ath/ath9k/xmit.c 	struct sk_buff *skb;
skb               763 drivers/net/wireless/ath/ath9k/xmit.c 	skb = bf->bf_mpdu;
skb               764 drivers/net/wireless/ath/ath9k/xmit.c 	tx_info = IEEE80211_SKB_CB(skb);
skb               900 drivers/net/wireless/ath/ath9k/xmit.c 	struct sk_buff *skb, *first_skb = NULL;
skb               905 drivers/net/wireless/ath/ath9k/xmit.c 		ret = ath_tid_dequeue(tid, &skb);
skb               909 drivers/net/wireless/ath/ath9k/xmit.c 		fi = get_frame_info(skb);
skb               912 drivers/net/wireless/ath/ath9k/xmit.c 			bf = ath_tx_setup_buffer(sc, txq, tid, skb);
skb               917 drivers/net/wireless/ath/ath9k/xmit.c 			ath_txq_skb_done(sc, txq, skb);
skb               918 drivers/net/wireless/ath/ath9k/xmit.c 			ieee80211_free_txskb(sc->hw, skb);
skb               925 drivers/net/wireless/ath/ath9k/xmit.c 		tx_info = IEEE80211_SKB_CB(skb);
skb               947 drivers/net/wireless/ath/ath9k/xmit.c 			__skb_queue_tail(&tid->retry_q, skb);
skb               952 drivers/net/wireless/ath/ath9k/xmit.c 			if (!skb_queue_is_first(&tid->retry_q, skb) &&
skb               953 drivers/net/wireless/ath/ath9k/xmit.c 			    !WARN_ON(skb == first_skb)) {
skb               955 drivers/net/wireless/ath/ath9k/xmit.c 					first_skb = skb;
skb               994 drivers/net/wireless/ath/ath9k/xmit.c 	struct sk_buff *skb;
skb              1002 drivers/net/wireless/ath/ath9k/xmit.c 		skb = bf->bf_mpdu;
skb              1003 drivers/net/wireless/ath/ath9k/xmit.c 		fi = get_frame_info(skb);
skb              1134 drivers/net/wireless/ath/ath9k/xmit.c 	struct sk_buff *skb;
skb              1142 drivers/net/wireless/ath/ath9k/xmit.c 	skb = bf->bf_mpdu;
skb              1143 drivers/net/wireless/ath/ath9k/xmit.c 	fi = get_frame_info(skb);
skb              1144 drivers/net/wireless/ath/ath9k/xmit.c 	info = IEEE80211_SKB_CB(skb);
skb              1209 drivers/net/wireless/ath/ath9k/xmit.c 	struct sk_buff *skb;
skb              1219 drivers/net/wireless/ath/ath9k/xmit.c 	skb = bf->bf_mpdu;
skb              1220 drivers/net/wireless/ath/ath9k/xmit.c 	tx_info = IEEE80211_SKB_CB(skb);
skb              1222 drivers/net/wireless/ath/ath9k/xmit.c 	hdr = (struct ieee80211_hdr *)skb->data;
skb              1319 drivers/net/wireless/ath/ath9k/xmit.c static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
skb              1325 drivers/net/wireless/ath/ath9k/xmit.c 	hdr = (struct ieee80211_hdr *)skb->data;
skb              1357 drivers/net/wireless/ath/ath9k/xmit.c 		struct sk_buff *skb = bf->bf_mpdu;
skb              1358 drivers/net/wireless/ath/ath9k/xmit.c 		struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
skb              1359 drivers/net/wireless/ath/ath9k/xmit.c 		struct ath_frame_info *fi = get_frame_info(skb);
skb              1362 drivers/net/wireless/ath/ath9k/xmit.c 		info.type = get_hw_packet_type(skb);
skb              1409 drivers/net/wireless/ath/ath9k/xmit.c 		info.buf_len[0] = skb->len;
skb              2044 drivers/net/wireless/ath/ath9k/xmit.c 			       struct ath_atx_tid *tid, struct sk_buff *skb)
skb              2046 drivers/net/wireless/ath/ath9k/xmit.c 	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
skb              2047 drivers/net/wireless/ath/ath9k/xmit.c 	struct ath_frame_info *fi = get_frame_info(skb);
skb              2068 drivers/net/wireless/ath/ath9k/xmit.c 			     struct sk_buff *skb,
skb              2071 drivers/net/wireless/ath/ath9k/xmit.c 	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
skb              2073 drivers/net/wireless/ath/ath9k/xmit.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
skb              2075 drivers/net/wireless/ath/ath9k/xmit.c 	struct ath_frame_info *fi = get_frame_info(skb);
skb              2091 drivers/net/wireless/ath/ath9k/xmit.c 	keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
skb              2147 drivers/net/wireless/ath/ath9k/xmit.c 					   struct sk_buff *skb)
skb              2150 drivers/net/wireless/ath/ath9k/xmit.c 	struct ath_frame_info *fi = get_frame_info(skb);
skb              2151 drivers/net/wireless/ath/ath9k/xmit.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
skb              2178 drivers/net/wireless/ath/ath9k/xmit.c 	bf->bf_mpdu = skb;
skb              2180 drivers/net/wireless/ath/ath9k/xmit.c 	bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
skb              2181 drivers/net/wireless/ath/ath9k/xmit.c 					 skb->len, DMA_TO_DEVICE);
skb              2196 drivers/net/wireless/ath/ath9k/xmit.c void ath_assign_seq(struct ath_common *common, struct sk_buff *skb)
skb              2198 drivers/net/wireless/ath/ath9k/xmit.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
skb              2199 drivers/net/wireless/ath/ath9k/xmit.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb              2218 drivers/net/wireless/ath/ath9k/xmit.c static int ath_tx_prepare(struct ieee80211_hw *hw, struct sk_buff *skb,
skb              2221 drivers/net/wireless/ath/ath9k/xmit.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
skb              2222 drivers/net/wireless/ath/ath9k/xmit.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb              2227 drivers/net/wireless/ath/ath9k/xmit.c 	int frmlen = skb->len + FCS_LEN;
skb              2241 drivers/net/wireless/ath/ath9k/xmit.c 	ath_assign_seq(ath9k_hw_common(sc->sc_ah), skb);
skb              2251 drivers/net/wireless/ath/ath9k/xmit.c 	if (padsize && skb->len > padpos) {
skb              2252 drivers/net/wireless/ath/ath9k/xmit.c 		if (skb_headroom(skb) < padsize)
skb              2255 drivers/net/wireless/ath/ath9k/xmit.c 		skb_push(skb, padsize);
skb              2256 drivers/net/wireless/ath/ath9k/xmit.c 		memmove(skb->data, skb->data + padsize, padpos);
skb              2259 drivers/net/wireless/ath/ath9k/xmit.c 	setup_frame_info(hw, sta, skb, frmlen);
skb              2265 drivers/net/wireless/ath/ath9k/xmit.c int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
skb              2268 drivers/net/wireless/ath/ath9k/xmit.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb              2271 drivers/net/wireless/ath/ath9k/xmit.c 	struct ath_frame_info *fi = get_frame_info(skb);
skb              2282 drivers/net/wireless/ath/ath9k/xmit.c 	ret = ath_tx_prepare(hw, skb, txctl);
skb              2291 drivers/net/wireless/ath/ath9k/xmit.c 	q = skb_get_queue_mapping(skb);
skb              2298 drivers/net/wireless/ath/ath9k/xmit.c 		tid = ath_get_skb_tid(sc, an, skb);
skb              2307 drivers/net/wireless/ath/ath9k/xmit.c 	bf = ath_tx_setup_buffer(sc, txq, tid, skb);
skb              2309 drivers/net/wireless/ath/ath9k/xmit.c 		ath_txq_skb_done(sc, txq, skb);
skb              2311 drivers/net/wireless/ath/ath9k/xmit.c 			dev_kfree_skb_any(skb);
skb              2313 drivers/net/wireless/ath/ath9k/xmit.c 			ieee80211_free_txskb(sc->hw, skb);
skb              2323 drivers/net/wireless/ath/ath9k/xmit.c 	ath_tx_send_normal(sc, txq, tid, skb);
skb              2332 drivers/net/wireless/ath/ath9k/xmit.c 		 struct sk_buff *skb)
skb              2350 drivers/net/wireless/ath/ath9k/xmit.c 		struct ath_frame_info *fi = get_frame_info(skb);
skb              2352 drivers/net/wireless/ath/ath9k/xmit.c 		if (ath_tx_prepare(hw, skb, &txctl))
skb              2355 drivers/net/wireless/ath/ath9k/xmit.c 		bf = ath_tx_setup_buffer(sc, txctl.txq, NULL, skb);
skb              2368 drivers/net/wireless/ath/ath9k/xmit.c 		skb = NULL;
skb              2373 drivers/net/wireless/ath/ath9k/xmit.c 		skb = ieee80211_get_buffered_bc(hw, vif);
skb              2374 drivers/net/wireless/ath/ath9k/xmit.c 	} while(skb);
skb              2376 drivers/net/wireless/ath/ath9k/xmit.c 	if (skb)
skb              2377 drivers/net/wireless/ath/ath9k/xmit.c 		ieee80211_free_txskb(hw, skb);
skb              2397 drivers/net/wireless/ath/ath9k/xmit.c static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
skb              2401 drivers/net/wireless/ath/ath9k/xmit.c 	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
skb              2403 drivers/net/wireless/ath/ath9k/xmit.c 	struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
skb              2407 drivers/net/wireless/ath/ath9k/xmit.c 	ath_dbg(common, XMIT, "TX complete: skb: %p\n", skb);
skb              2422 drivers/net/wireless/ath/ath9k/xmit.c 		if (padsize && skb->len>padpos+padsize) {
skb              2427 drivers/net/wireless/ath/ath9k/xmit.c 			memmove(skb->data + padsize, skb->data, padpos);
skb              2428 drivers/net/wireless/ath/ath9k/xmit.c 			skb_pull(skb, padsize);
skb              2444 drivers/net/wireless/ath/ath9k/xmit.c 	ath_txq_skb_done(sc, txq, skb);
skb              2446 drivers/net/wireless/ath/ath9k/xmit.c 	__skb_queue_tail(&txq->complete_q, skb);
skb              2454 drivers/net/wireless/ath/ath9k/xmit.c 	struct sk_buff *skb = bf->bf_mpdu;
skb              2455 drivers/net/wireless/ath/ath9k/xmit.c 	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
skb              2465 drivers/net/wireless/ath/ath9k/xmit.c 	dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
skb              2474 drivers/net/wireless/ath/ath9k/xmit.c 			dev_kfree_skb_any(skb);
skb              2479 drivers/net/wireless/ath/ath9k/xmit.c 		ath_tx_complete(sc, skb, tx_flags, txq, sta);
skb              2499 drivers/net/wireless/ath/ath9k/xmit.c 	struct sk_buff *skb = bf->bf_mpdu;
skb              2500 drivers/net/wireless/ath/ath9k/xmit.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
skb              2501 drivers/net/wireless/ath/ath9k/xmit.c 	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
skb              2842 drivers/net/wireless/ath/ath9k/xmit.c int ath9k_tx99_send(struct ath_softc *sc, struct sk_buff *skb,
skb              2845 drivers/net/wireless/ath/ath9k/xmit.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
skb              2846 drivers/net/wireless/ath/ath9k/xmit.c 	struct ath_frame_info *fi = get_frame_info(skb);
skb              2854 drivers/net/wireless/ath/ath9k/xmit.c 	if (padsize && skb->len > padpos) {
skb              2855 drivers/net/wireless/ath/ath9k/xmit.c 		if (skb_headroom(skb) < padsize) {
skb              2861 drivers/net/wireless/ath/ath9k/xmit.c 		skb_push(skb, padsize);
skb              2862 drivers/net/wireless/ath/ath9k/xmit.c 		memmove(skb->data, skb->data + padsize, padpos);
skb              2866 drivers/net/wireless/ath/ath9k/xmit.c 	fi->framelen = skb->len + FCS_LEN;
skb              2869 drivers/net/wireless/ath/ath9k/xmit.c 	bf = ath_tx_setup_buffer(sc, txctl->txq, NULL, skb);
skb              2880 drivers/net/wireless/ath/ath9k/xmit.c 	ath_tx_send_normal(sc, txctl->txq, NULL, skb);
skb               474 drivers/net/wireless/ath/carl9170/carl9170.h 	struct sk_buff *skb;
skb               542 drivers/net/wireless/ath/carl9170/carl9170.h void carl9170_usb_tx(struct ar9170 *ar, struct sk_buff *skb);
skb               577 drivers/net/wireless/ath/carl9170/carl9170.h 		    struct sk_buff *skb);
skb               581 drivers/net/wireless/ath/carl9170/carl9170.h void carl9170_tx_status(struct ar9170 *ar, struct sk_buff *skb,
skb               583 drivers/net/wireless/ath/carl9170/carl9170.h void carl9170_tx_callback(struct ar9170 *ar, struct sk_buff *skb);
skb               584 drivers/net/wireless/ath/carl9170/carl9170.h void carl9170_tx_drop(struct ar9170 *ar, struct sk_buff *skb);
skb               586 drivers/net/wireless/ath/carl9170/carl9170.h void carl9170_tx_get_skb(struct sk_buff *skb);
skb               587 drivers/net/wireless/ath/carl9170/carl9170.h int carl9170_tx_put_skb(struct sk_buff *skb);
skb               615 drivers/net/wireless/ath/carl9170/carl9170.h static inline struct ieee80211_hdr *carl9170_get_hdr(struct sk_buff *skb)
skb               618 drivers/net/wireless/ath/carl9170/carl9170.h 		skb->data)->frame_data;
skb               626 drivers/net/wireless/ath/carl9170/carl9170.h static inline u16 carl9170_get_seq(struct sk_buff *skb)
skb               628 drivers/net/wireless/ath/carl9170/carl9170.h 	return get_seq_h(carl9170_get_hdr(skb));
skb               636 drivers/net/wireless/ath/carl9170/carl9170.h static inline u16 carl9170_get_tid(struct sk_buff *skb)
skb               638 drivers/net/wireless/ath/carl9170/carl9170.h 	return get_tid_h(carl9170_get_hdr(skb));
skb               284 drivers/net/wireless/ath/carl9170/debug.c 	struct sk_buff *skb, const char *prefix, char *buf,
skb               287 drivers/net/wireless/ath/carl9170/debug.c 	struct _carl9170_tx_superframe *txc = (void *) skb->data;
skb               288 drivers/net/wireless/ath/carl9170/debug.c 	struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb);
skb               293 drivers/net/wireless/ath/carl9170/debug.c 	    "pc:%.8x, to:%d ms\n", prefix, skb, txc->s.cookie,
skb               304 drivers/net/wireless/ath/carl9170/debug.c 	struct sk_buff *skb;
skb               338 drivers/net/wireless/ath/carl9170/debug.c 		skb_queue_walk(&iter->queue, skb) {
skb               342 drivers/net/wireless/ath/carl9170/debug.c 			carl9170_debugfs_format_frame(ar, skb, prefix, buf,
skb               359 drivers/net/wireless/ath/carl9170/debug.c 	struct sk_buff *skb;
skb               364 drivers/net/wireless/ath/carl9170/debug.c 	skb_queue_walk(queue, skb) {
skb               366 drivers/net/wireless/ath/carl9170/debug.c 		carl9170_debugfs_format_frame(ar, skb, prefix, buf,
skb               207 drivers/net/wireless/ath/carl9170/main.c 		struct sk_buff *skb;
skb               211 drivers/net/wireless/ath/carl9170/main.c 		while ((skb = __skb_dequeue(&tid_info->queue)))
skb               212 drivers/net/wireless/ath/carl9170/main.c 			carl9170_tx_status(ar, skb, false);
skb               230 drivers/net/wireless/ath/carl9170/main.c 			struct sk_buff *skb;
skb               232 drivers/net/wireless/ath/carl9170/main.c 			while ((skb = skb_dequeue(&ar->tx_pending[i]))) {
skb               235 drivers/net/wireless/ath/carl9170/main.c 				info = IEEE80211_SKB_CB(skb);
skb               239 drivers/net/wireless/ath/carl9170/main.c 				carl9170_tx_status(ar, skb, false);
skb               253 drivers/net/wireless/ath/carl9170/main.c 	struct sk_buff *skb;
skb               264 drivers/net/wireless/ath/carl9170/main.c 			while ((skb = __skb_dequeue(&tid_info->queue)))
skb               265 drivers/net/wireless/ath/carl9170/main.c 				__skb_queue_tail(&free, skb);
skb               272 drivers/net/wireless/ath/carl9170/main.c 	while ((skb = __skb_dequeue(&free)))
skb               273 drivers/net/wireless/ath/carl9170/main.c 		carl9170_tx_status(ar, skb, false);
skb               289 drivers/net/wireless/ath/carl9170/main.c 			struct sk_buff *skb;
skb               291 drivers/net/wireless/ath/carl9170/main.c 			skb = skb_peek(&ar->tx_status[i]);
skb               292 drivers/net/wireless/ath/carl9170/main.c 			carl9170_tx_get_skb(skb);
skb               294 drivers/net/wireless/ath/carl9170/main.c 			carl9170_tx_drop(ar, skb);
skb               296 drivers/net/wireless/ath/carl9170/main.c 			carl9170_tx_put_skb(skb);
skb              1777 drivers/net/wireless/ath/carl9170/main.c 	struct sk_buff *skb;
skb              1786 drivers/net/wireless/ath/carl9170/main.c 	skb = __dev_alloc_skb(AR9170_RX_STREAM_MAX_SIZE, GFP_KERNEL);
skb              1787 drivers/net/wireless/ath/carl9170/main.c 	if (!skb)
skb              1796 drivers/net/wireless/ath/carl9170/main.c 	ar->rx_failover = skb;
skb              1877 drivers/net/wireless/ath/carl9170/main.c 	kfree_skb(skb);
skb               464 drivers/net/wireless/ath/carl9170/rx.c 	struct sk_buff *skb;
skb               481 drivers/net/wireless/ath/carl9170/rx.c 	skb = dev_alloc_skb(len + reserved);
skb               482 drivers/net/wireless/ath/carl9170/rx.c 	if (likely(skb)) {
skb               483 drivers/net/wireless/ath/carl9170/rx.c 		skb_reserve(skb, reserved);
skb               484 drivers/net/wireless/ath/carl9170/rx.c 		skb_put_data(skb, buf, len);
skb               487 drivers/net/wireless/ath/carl9170/rx.c 	return skb;
skb               591 drivers/net/wireless/ath/carl9170/rx.c 		struct sk_buff *entry_skb = entry->skb;
skb               662 drivers/net/wireless/ath/carl9170/rx.c 	struct sk_buff *skb;
skb               679 drivers/net/wireless/ath/carl9170/rx.c 	skb = carl9170_rx_copy_data(buf, len);
skb               680 drivers/net/wireless/ath/carl9170/rx.c 	if (!skb)
skb               683 drivers/net/wireless/ath/carl9170/rx.c 	memcpy(IEEE80211_SKB_RXCB(skb), status, sizeof(*status));
skb               684 drivers/net/wireless/ath/carl9170/rx.c 	ieee80211_rx(ar->hw, skb);
skb                65 drivers/net/wireless/ath/carl9170/tx.c 					      struct sk_buff *skb)
skb                67 drivers/net/wireless/ath/carl9170/tx.c 	return __carl9170_get_queue(ar, skb_get_queue_mapping(skb));
skb                76 drivers/net/wireless/ath/carl9170/tx.c static void carl9170_tx_accounting(struct ar9170 *ar, struct sk_buff *skb)
skb                83 drivers/net/wireless/ath/carl9170/tx.c 	queue = skb_get_queue_mapping(skb);
skb               108 drivers/net/wireless/ath/carl9170/tx.c 						   struct sk_buff *skb)
skb               110 drivers/net/wireless/ath/carl9170/tx.c 	struct _carl9170_tx_superframe *super = (void *) skb->data;
skb               138 drivers/net/wireless/ath/carl9170/tx.c static void carl9170_tx_ps_unblock(struct ar9170 *ar, struct sk_buff *skb)
skb               144 drivers/net/wireless/ath/carl9170/tx.c 	sta = __carl9170_get_tx_sta(ar, skb);
skb               156 drivers/net/wireless/ath/carl9170/tx.c static void carl9170_tx_accounting_free(struct ar9170 *ar, struct sk_buff *skb)
skb               160 drivers/net/wireless/ath/carl9170/tx.c 	queue = skb_get_queue_mapping(skb);
skb               190 drivers/net/wireless/ath/carl9170/tx.c static int carl9170_alloc_dev_space(struct ar9170 *ar, struct sk_buff *skb)
skb               192 drivers/net/wireless/ath/carl9170/tx.c 	struct _carl9170_tx_superframe *super = (void *) skb->data;
skb               198 drivers/net/wireless/ath/carl9170/tx.c 	chunks = DIV_ROUND_UP(skb->len, ar->fw.mem_block_size);
skb               213 drivers/net/wireless/ath/carl9170/tx.c 	super = (void *) skb->data;
skb               226 drivers/net/wireless/ath/carl9170/tx.c static void carl9170_release_dev_space(struct ar9170 *ar, struct sk_buff *skb)
skb               228 drivers/net/wireless/ath/carl9170/tx.c 	struct _carl9170_tx_superframe *super = (void *) skb->data;
skb               253 drivers/net/wireless/ath/carl9170/tx.c 	atomic_add(DIV_ROUND_UP(skb->len, ar->fw.mem_block_size),
skb               267 drivers/net/wireless/ath/carl9170/tx.c 	struct sk_buff *skb;
skb               272 drivers/net/wireless/ath/carl9170/tx.c 	skb = container_of((void *) txinfo, struct sk_buff, cb);
skb               295 drivers/net/wireless/ath/carl9170/tx.c 			super = (void *)skb->data;
skb               316 drivers/net/wireless/ath/carl9170/tx.c 			ieee80211_free_txskb(ar->hw, skb);
skb               326 drivers/net/wireless/ath/carl9170/tx.c 	skb_pull(skb, sizeof(struct _carl9170_tx_superframe));
skb               327 drivers/net/wireless/ath/carl9170/tx.c 	ieee80211_tx_status_irqsafe(ar->hw, skb);
skb               330 drivers/net/wireless/ath/carl9170/tx.c void carl9170_tx_get_skb(struct sk_buff *skb)
skb               333 drivers/net/wireless/ath/carl9170/tx.c 		(IEEE80211_SKB_CB(skb))->rate_driver_data;
skb               337 drivers/net/wireless/ath/carl9170/tx.c int carl9170_tx_put_skb(struct sk_buff *skb)
skb               340 drivers/net/wireless/ath/carl9170/tx.c 		(IEEE80211_SKB_CB(skb))->rate_driver_data;
skb               379 drivers/net/wireless/ath/carl9170/tx.c 	struct sk_buff *skb, struct ieee80211_tx_info *txinfo)
skb               381 drivers/net/wireless/ath/carl9170/tx.c 	struct _carl9170_tx_superframe *super = (void *) skb->data;
skb               393 drivers/net/wireless/ath/carl9170/tx.c 	sta = __carl9170_get_tx_sta(ar, skb);
skb               437 drivers/net/wireless/ath/carl9170/tx.c static void carl9170_tx_bar_status(struct ar9170 *ar, struct sk_buff *skb,
skb               440 drivers/net/wireless/ath/carl9170/tx.c 	struct _carl9170_tx_superframe *super = (void *) skb->data;
skb               455 drivers/net/wireless/ath/carl9170/tx.c 		int queue = skb_get_queue_mapping(skb);
skb               459 drivers/net/wireless/ath/carl9170/tx.c 			if (entry->skb == skb) {
skb               476 drivers/net/wireless/ath/carl9170/tx.c void carl9170_tx_status(struct ar9170 *ar, struct sk_buff *skb,
skb               481 drivers/net/wireless/ath/carl9170/tx.c 	carl9170_tx_accounting_free(ar, skb);
skb               483 drivers/net/wireless/ath/carl9170/tx.c 	txinfo = IEEE80211_SKB_CB(skb);
skb               485 drivers/net/wireless/ath/carl9170/tx.c 	carl9170_tx_bar_status(ar, skb, txinfo);
skb               493 drivers/net/wireless/ath/carl9170/tx.c 		carl9170_tx_status_process_ampdu(ar, skb, txinfo);
skb               495 drivers/net/wireless/ath/carl9170/tx.c 	carl9170_tx_ps_unblock(ar, skb);
skb               496 drivers/net/wireless/ath/carl9170/tx.c 	carl9170_tx_put_skb(skb);
skb               500 drivers/net/wireless/ath/carl9170/tx.c void carl9170_tx_callback(struct ar9170 *ar, struct sk_buff *skb)
skb               502 drivers/net/wireless/ath/carl9170/tx.c 	struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb);
skb               509 drivers/net/wireless/ath/carl9170/tx.c 	if (carl9170_tx_put_skb(skb))
skb               516 drivers/net/wireless/ath/carl9170/tx.c 	struct sk_buff *skb;
skb               519 drivers/net/wireless/ath/carl9170/tx.c 	skb_queue_walk(queue, skb) {
skb               520 drivers/net/wireless/ath/carl9170/tx.c 		struct _carl9170_tx_superframe *txc = (void *) skb->data;
skb               525 drivers/net/wireless/ath/carl9170/tx.c 		__skb_unlink(skb, queue);
skb               528 drivers/net/wireless/ath/carl9170/tx.c 		carl9170_release_dev_space(ar, skb);
skb               529 drivers/net/wireless/ath/carl9170/tx.c 		return skb;
skb               561 drivers/net/wireless/ath/carl9170/tx.c 	struct sk_buff *skb;
skb               569 drivers/net/wireless/ath/carl9170/tx.c 		skb = skb_peek(&ar->tx_status[i]);
skb               571 drivers/net/wireless/ath/carl9170/tx.c 		if (!skb)
skb               574 drivers/net/wireless/ath/carl9170/tx.c 		txinfo = IEEE80211_SKB_CB(skb);
skb               606 drivers/net/wireless/ath/carl9170/tx.c 	struct sk_buff *skb;
skb               617 drivers/net/wireless/ath/carl9170/tx.c 		skb = skb_peek(&iter->queue);
skb               618 drivers/net/wireless/ath/carl9170/tx.c 		if (!skb)
skb               621 drivers/net/wireless/ath/carl9170/tx.c 		txinfo = IEEE80211_SKB_CB(skb);
skb               661 drivers/net/wireless/ath/carl9170/tx.c 	struct sk_buff *skb;
skb               668 drivers/net/wireless/ath/carl9170/tx.c 	skb = carl9170_get_queued_skb(ar, cookie, &ar->tx_status[q]);
skb               669 drivers/net/wireless/ath/carl9170/tx.c 	if (!skb) {
skb               677 drivers/net/wireless/ath/carl9170/tx.c 	txinfo = IEEE80211_SKB_CB(skb);
skb               686 drivers/net/wireless/ath/carl9170/tx.c 	carl9170_tx_status(ar, skb, success);
skb               874 drivers/net/wireless/ath/carl9170/tx.c 				  struct sk_buff *skb)
skb               881 drivers/net/wireless/ath/carl9170/tx.c 	info = IEEE80211_SKB_CB(skb);
skb               883 drivers/net/wireless/ath/carl9170/tx.c 	ieee80211_get_tx_rates(vif, sta, skb,
skb               890 drivers/net/wireless/ath/carl9170/tx.c 				      struct sk_buff *skb)
skb               894 drivers/net/wireless/ath/carl9170/tx.c 	struct _carl9170_tx_superframe *txc = (void *) skb->data;
skb               899 drivers/net/wireless/ath/carl9170/tx.c 	info = IEEE80211_SKB_CB(skb);
skb               960 drivers/net/wireless/ath/carl9170/tx.c 			       struct sk_buff *skb)
skb               982 drivers/net/wireless/ath/carl9170/tx.c 	hw_queue = ar9170_qmap[carl9170_get_queue(ar, skb)];
skb               984 drivers/net/wireless/ath/carl9170/tx.c 	hdr = (void *)skb->data;
skb               985 drivers/net/wireless/ath/carl9170/tx.c 	info = IEEE80211_SKB_CB(skb);
skb               986 drivers/net/wireless/ath/carl9170/tx.c 	len = skb->len;
skb               997 drivers/net/wireless/ath/carl9170/tx.c 	txc = skb_push(skb, sizeof(*txc));
skb              1067 drivers/net/wireless/ath/carl9170/tx.c 	txc->s.len = cpu_to_le16(skb->len);
skb              1078 drivers/net/wireless/ath/carl9170/tx.c 	skb_pull(skb, sizeof(*txc));
skb              1082 drivers/net/wireless/ath/carl9170/tx.c static void carl9170_set_immba(struct ar9170 *ar, struct sk_buff *skb)
skb              1086 drivers/net/wireless/ath/carl9170/tx.c 	super = (void *) skb->data;
skb              1090 drivers/net/wireless/ath/carl9170/tx.c static void carl9170_set_ampdu_params(struct ar9170 *ar, struct sk_buff *skb)
skb              1095 drivers/net/wireless/ath/carl9170/tx.c 	super = (void *) skb->data;
skb              1131 drivers/net/wireless/ath/carl9170/tx.c 	struct sk_buff *skb, *first;
skb              1179 drivers/net/wireless/ath/carl9170/tx.c 		while ((skb = skb_peek(&tid_info->queue))) {
skb              1181 drivers/net/wireless/ath/carl9170/tx.c 			if (unlikely(carl9170_get_seq(skb) != seq))
skb              1195 drivers/net/wireless/ath/carl9170/tx.c 			carl9170_tx_apply_rateset(ar, tx_info_first, skb);
skb              1199 drivers/net/wireless/ath/carl9170/tx.c 			__skb_unlink(skb, &tid_info->queue);
skb              1201 drivers/net/wireless/ath/carl9170/tx.c 			__skb_queue_tail(&agg, skb);
skb              1245 drivers/net/wireless/ath/carl9170/tx.c 	struct sk_buff *skb;
skb              1252 drivers/net/wireless/ath/carl9170/tx.c 	skb = skb_peek(queue);
skb              1253 drivers/net/wireless/ath/carl9170/tx.c 	if (unlikely(!skb))
skb              1256 drivers/net/wireless/ath/carl9170/tx.c 	if (carl9170_alloc_dev_space(ar, skb))
skb              1259 drivers/net/wireless/ath/carl9170/tx.c 	__skb_unlink(skb, queue);
skb              1262 drivers/net/wireless/ath/carl9170/tx.c 	info = IEEE80211_SKB_CB(skb);
skb              1266 drivers/net/wireless/ath/carl9170/tx.c 	return skb;
skb              1273 drivers/net/wireless/ath/carl9170/tx.c void carl9170_tx_drop(struct ar9170 *ar, struct sk_buff *skb)
skb              1280 drivers/net/wireless/ath/carl9170/tx.c 	super = (void *)skb->data;
skb              1282 drivers/net/wireless/ath/carl9170/tx.c 		ar9170_qmap[carl9170_get_queue(ar, skb)]);
skb              1286 drivers/net/wireless/ath/carl9170/tx.c static bool carl9170_tx_ps_drop(struct ar9170 *ar, struct sk_buff *skb)
skb              1293 drivers/net/wireless/ath/carl9170/tx.c 	sta = __carl9170_get_tx_sta(ar, skb);
skb              1298 drivers/net/wireless/ath/carl9170/tx.c 	tx_info = IEEE80211_SKB_CB(skb);
skb              1309 drivers/net/wireless/ath/carl9170/tx.c 		carl9170_release_dev_space(ar, skb);
skb              1310 drivers/net/wireless/ath/carl9170/tx.c 		carl9170_tx_status(ar, skb, false);
skb              1319 drivers/net/wireless/ath/carl9170/tx.c static void carl9170_bar_check(struct ar9170 *ar, struct sk_buff *skb)
skb              1321 drivers/net/wireless/ath/carl9170/tx.c 	struct _carl9170_tx_superframe *super = (void *) skb->data;
skb              1325 drivers/net/wireless/ath/carl9170/tx.c 	    skb->len >= sizeof(struct ieee80211_bar)) {
skb              1327 drivers/net/wireless/ath/carl9170/tx.c 		unsigned int queue = skb_get_queue_mapping(skb);
skb              1331 drivers/net/wireless/ath/carl9170/tx.c 			entry->skb = skb;
skb              1341 drivers/net/wireless/ath/carl9170/tx.c 	struct sk_buff *skb;
skb              1354 drivers/net/wireless/ath/carl9170/tx.c 			skb = carl9170_tx_pick_skb(ar, &ar->tx_pending[i]);
skb              1355 drivers/net/wireless/ath/carl9170/tx.c 			if (unlikely(!skb))
skb              1358 drivers/net/wireless/ath/carl9170/tx.c 			if (unlikely(carl9170_tx_ps_drop(ar, skb)))
skb              1361 drivers/net/wireless/ath/carl9170/tx.c 			carl9170_bar_check(ar, skb);
skb              1370 drivers/net/wireless/ath/carl9170/tx.c 			skb_queue_tail(&ar->tx_status[q], skb);
skb              1380 drivers/net/wireless/ath/carl9170/tx.c 			carl9170_tx_get_skb(skb);
skb              1382 drivers/net/wireless/ath/carl9170/tx.c 			carl9170_usb_tx(ar, skb);
skb              1395 drivers/net/wireless/ath/carl9170/tx.c 	struct ieee80211_sta *sta, struct sk_buff *skb,
skb              1404 drivers/net/wireless/ath/carl9170/tx.c 	tid = carl9170_get_tid(skb);
skb              1405 drivers/net/wireless/ath/carl9170/tx.c 	seq = carl9170_get_seq(skb);
skb              1430 drivers/net/wireless/ath/carl9170/tx.c 		__skb_queue_tail(&agg->queue, skb);
skb              1439 drivers/net/wireless/ath/carl9170/tx.c 			__skb_queue_after(&agg->queue, iter, skb);
skb              1444 drivers/net/wireless/ath/carl9170/tx.c 	__skb_queue_head(&agg->queue, skb);
skb              1465 drivers/net/wireless/ath/carl9170/tx.c 	carl9170_tx_status(ar, skb, false);
skb              1472 drivers/net/wireless/ath/carl9170/tx.c 		    struct sk_buff *skb)
skb              1483 drivers/net/wireless/ath/carl9170/tx.c 	info = IEEE80211_SKB_CB(skb);
skb              1486 drivers/net/wireless/ath/carl9170/tx.c 	if (unlikely(carl9170_tx_prepare(ar, sta, skb)))
skb              1489 drivers/net/wireless/ath/carl9170/tx.c 	carl9170_tx_accounting(ar, skb);
skb              1508 drivers/net/wireless/ath/carl9170/tx.c 		run = carl9170_tx_ampdu_queue(ar, sta, skb, info);
skb              1513 drivers/net/wireless/ath/carl9170/tx.c 		unsigned int queue = skb_get_queue_mapping(skb);
skb              1515 drivers/net/wireless/ath/carl9170/tx.c 		carl9170_tx_get_rates(ar, vif, sta, skb);
skb              1516 drivers/net/wireless/ath/carl9170/tx.c 		carl9170_tx_apply_rateset(ar, info, skb);
skb              1517 drivers/net/wireless/ath/carl9170/tx.c 		skb_queue_tail(&ar->tx_pending[queue], skb);
skb              1525 drivers/net/wireless/ath/carl9170/tx.c 	ieee80211_free_txskb(ar->hw, skb);
skb              1567 drivers/net/wireless/ath/carl9170/tx.c static bool carl9170_tx_beacon_physet(struct ar9170 *ar, struct sk_buff *skb,
skb              1575 drivers/net/wireless/ath/carl9170/tx.c 	txinfo = IEEE80211_SKB_CB(skb);
skb              1600 drivers/net/wireless/ath/carl9170/tx.c 		SET_VAL(AR9170_MAC_BCN_HT2_LEN, *plcp, skb->len + FCS_LEN);
skb              1603 drivers/net/wireless/ath/carl9170/tx.c 			*plcp |= ((skb->len + FCS_LEN) << (3 + 16)) + 0x0400;
skb              1605 drivers/net/wireless/ath/carl9170/tx.c 			*plcp |= ((skb->len + FCS_LEN) << 16) + 0x0010;
skb              1613 drivers/net/wireless/ath/carl9170/tx.c 	struct sk_buff *skb = NULL;
skb              1625 drivers/net/wireless/ath/carl9170/tx.c 	skb = ieee80211_beacon_get_tim(ar->hw, carl9170_get_vif(cvif),
skb              1628 drivers/net/wireless/ath/carl9170/tx.c 	if (!skb) {
skb              1634 drivers/net/wireless/ath/carl9170/tx.c 	data = (__le32 *)skb->data;
skb              1640 drivers/net/wireless/ath/carl9170/tx.c 	len = roundup(skb->len + FCS_LEN, 4);
skb              1662 drivers/net/wireless/ath/carl9170/tx.c 	ht_rate = carl9170_tx_beacon_physet(ar, skb, &ht1, &plcp);
skb              1671 drivers/net/wireless/ath/carl9170/tx.c 	for (i = 0; i < DIV_ROUND_UP(skb->len, 4); i++) {
skb              1690 drivers/net/wireless/ath/carl9170/tx.c 		cvif->beacon = skb;
skb              1698 drivers/net/wireless/ath/carl9170/tx.c 					addr, skb->len + FCS_LEN);
skb              1712 drivers/net/wireless/ath/carl9170/tx.c 	dev_kfree_skb_any(skb);
skb               372 drivers/net/wireless/ath/carl9170/usb.c 		struct sk_buff *skb = (void *)urb->context;
skb               374 drivers/net/wireless/ath/carl9170/usb.c 		carl9170_tx_drop(ar, skb);
skb               375 drivers/net/wireless/ath/carl9170/usb.c 		carl9170_tx_callback(ar, skb);
skb               562 drivers/net/wireless/ath/carl9170/usb.c 		struct sk_buff *skb = (void *)urb->context;
skb               563 drivers/net/wireless/ath/carl9170/usb.c 		carl9170_tx_drop(ar, skb);
skb               564 drivers/net/wireless/ath/carl9170/usb.c 		carl9170_tx_callback(ar, skb);
skb               715 drivers/net/wireless/ath/carl9170/usb.c void carl9170_usb_tx(struct ar9170 *ar, struct sk_buff *skb)
skb               730 drivers/net/wireless/ath/carl9170/usb.c 		tx_stream = (void *) (skb->data - sizeof(*tx_stream));
skb               732 drivers/net/wireless/ath/carl9170/usb.c 		len = skb->len + sizeof(*tx_stream);
skb               737 drivers/net/wireless/ath/carl9170/usb.c 		data = skb->data;
skb               738 drivers/net/wireless/ath/carl9170/usb.c 		len = skb->len;
skb               743 drivers/net/wireless/ath/carl9170/usb.c 		carl9170_usb_tx_data_complete, skb);
skb               755 drivers/net/wireless/ath/carl9170/usb.c 	carl9170_tx_drop(ar, skb);
skb               756 drivers/net/wireless/ath/carl9170/usb.c 	carl9170_tx_callback(ar, skb);
skb                33 drivers/net/wireless/ath/main.c 	struct sk_buff *skb;
skb                49 drivers/net/wireless/ath/main.c 	skb = __dev_alloc_skb(len + common->cachelsz - 1, gfp_mask);
skb                50 drivers/net/wireless/ath/main.c 	if (skb != NULL) {
skb                51 drivers/net/wireless/ath/main.c 		off = ((unsigned long) skb->data) % common->cachelsz;
skb                53 drivers/net/wireless/ath/main.c 			skb_reserve(skb, common->cachelsz - off);
skb                59 drivers/net/wireless/ath/main.c 	return skb;
skb               280 drivers/net/wireless/ath/wcn36xx/dxe.c 	struct sk_buff *skb;
skb               282 drivers/net/wireless/ath/wcn36xx/dxe.c 	skb = alloc_skb(WCN36XX_PKT_SIZE, gfp);
skb               283 drivers/net/wireless/ath/wcn36xx/dxe.c 	if (skb == NULL)
skb               287 drivers/net/wireless/ath/wcn36xx/dxe.c 					 skb_tail_pointer(skb),
skb               292 drivers/net/wireless/ath/wcn36xx/dxe.c 		kfree_skb(skb);
skb               295 drivers/net/wireless/ath/wcn36xx/dxe.c 	ctl->skb = skb;
skb               323 drivers/net/wireless/ath/wcn36xx/dxe.c 		kfree_skb(cur->skb);
skb               331 drivers/net/wireless/ath/wcn36xx/dxe.c 	struct sk_buff *skb;
skb               335 drivers/net/wireless/ath/wcn36xx/dxe.c 	skb = wcn->tx_ack_skb;
skb               339 drivers/net/wireless/ath/wcn36xx/dxe.c 	if (!skb) {
skb               344 drivers/net/wireless/ath/wcn36xx/dxe.c 	info = IEEE80211_SKB_CB(skb);
skb               351 drivers/net/wireless/ath/wcn36xx/dxe.c 	ieee80211_tx_status_irqsafe(wcn->hw, skb);
skb               372 drivers/net/wireless/ath/wcn36xx/dxe.c 		if (ctl->skb &&
skb               375 drivers/net/wireless/ath/wcn36xx/dxe.c 					 ctl->skb->len, DMA_TO_DEVICE);
skb               376 drivers/net/wireless/ath/wcn36xx/dxe.c 			info = IEEE80211_SKB_CB(ctl->skb);
skb               379 drivers/net/wireless/ath/wcn36xx/dxe.c 				ieee80211_free_txskb(wcn->hw, ctl->skb);
skb               387 drivers/net/wireless/ath/wcn36xx/dxe.c 			ctl->skb = NULL;
skb               531 drivers/net/wireless/ath/wcn36xx/dxe.c 	struct sk_buff *skb;
skb               566 drivers/net/wireless/ath/wcn36xx/dxe.c 		skb = ctl->skb;
skb               575 drivers/net/wireless/ath/wcn36xx/dxe.c 			wcn36xx_rx_skb(wcn, skb);
skb               679 drivers/net/wireless/ath/wcn36xx/dxe.c 			 struct sk_buff *skb,
skb               699 drivers/net/wireless/ath/wcn36xx/dxe.c 	if (NULL != ctl_skb->skb) {
skb               715 drivers/net/wireless/ath/wcn36xx/dxe.c 	ctl_bd->skb = NULL;
skb               734 drivers/net/wireless/ath/wcn36xx/dxe.c 					      skb->data,
skb               735 drivers/net/wireless/ath/wcn36xx/dxe.c 					      skb->len,
skb               743 drivers/net/wireless/ath/wcn36xx/dxe.c 	ctl_skb->skb = skb;
skb               745 drivers/net/wireless/ath/wcn36xx/dxe.c 	desc_skb->fr_len = ctl_skb->skb->len;
skb               750 drivers/net/wireless/ath/wcn36xx/dxe.c 			 (char *)ctl_skb->skb->data, ctl_skb->skb->len);
skb               424 drivers/net/wireless/ath/wcn36xx/dxe.h 	struct sk_buff		*skb;
skb               467 drivers/net/wireless/ath/wcn36xx/dxe.h 			 struct sk_buff *skb,
skb               477 drivers/net/wireless/ath/wcn36xx/main.c 		       struct sk_buff *skb)
skb               485 drivers/net/wireless/ath/wcn36xx/main.c 	if (wcn36xx_start_tx(wcn, sta_priv, skb))
skb               486 drivers/net/wireless/ath/wcn36xx/main.c 		ieee80211_free_txskb(wcn->hw, skb);
skb               781 drivers/net/wireless/ath/wcn36xx/main.c 	struct sk_buff *skb = NULL;
skb               891 drivers/net/wireless/ath/wcn36xx/main.c 		skb = ieee80211_proberesp_get(hw, vif);
skb               892 drivers/net/wireless/ath/wcn36xx/main.c 		if (!skb) {
skb               897 drivers/net/wireless/ath/wcn36xx/main.c 		wcn36xx_smd_update_proberesp_tmpl(wcn, vif, skb);
skb               898 drivers/net/wireless/ath/wcn36xx/main.c 		dev_kfree_skb(skb);
skb               912 drivers/net/wireless/ath/wcn36xx/main.c 			skb = ieee80211_beacon_get_tim(hw, vif, &tim_off,
skb               914 drivers/net/wireless/ath/wcn36xx/main.c 			if (!skb) {
skb               918 drivers/net/wireless/ath/wcn36xx/main.c 			wcn36xx_smd_send_beacon(wcn, vif, skb, tim_off, 0);
skb               919 drivers/net/wireless/ath/wcn36xx/main.c 			dev_kfree_skb(skb);
skb              1677 drivers/net/wireless/ath/wcn36xx/smd.c 				      struct sk_buff *skb)
skb              1685 drivers/net/wireless/ath/wcn36xx/smd.c 	if (skb->len > BEACON_TEMPLATE_SIZE) {
skb              1687 drivers/net/wireless/ath/wcn36xx/smd.c 			     skb->len);
skb              1692 drivers/net/wireless/ath/wcn36xx/smd.c 	msg.probe_resp_template_len = skb->len;
skb              1693 drivers/net/wireless/ath/wcn36xx/smd.c 	memcpy(&msg.probe_resp_template, skb->data, skb->len);
skb                95 drivers/net/wireless/ath/wcn36xx/smd.h 				      struct sk_buff *skb);
skb                49 drivers/net/wireless/ath/wcn36xx/testmode.c 	struct sk_buff *skb;
skb               104 drivers/net/wireless/ath/wcn36xx/testmode.c 	skb = cfg80211_testmode_alloc_reply_skb(wcn->hw->wiphy,
skb               106 drivers/net/wireless/ath/wcn36xx/testmode.c 	if (!skb) {
skb               111 drivers/net/wireless/ath/wcn36xx/testmode.c 	ret = nla_put(skb, WCN36XX_TM_ATTR_DATA, rsp->msg_body_length, rsp);
skb               113 drivers/net/wireless/ath/wcn36xx/testmode.c 		kfree_skb(skb);
skb               117 drivers/net/wireless/ath/wcn36xx/testmode.c 	ret = cfg80211_testmode_reply(skb);
skb                26 drivers/net/wireless/ath/wcn36xx/txrx.c int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb)
skb                39 drivers/net/wireless/ath/wcn36xx/txrx.c 	bd = (struct wcn36xx_rx_bd *)skb->data;
skb                45 drivers/net/wireless/ath/wcn36xx/txrx.c 	skb_put(skb, bd->pdu.mpdu_header_off + bd->pdu.mpdu_len);
skb                46 drivers/net/wireless/ath/wcn36xx/txrx.c 	skb_pull(skb, bd->pdu.mpdu_header_off);
skb                48 drivers/net/wireless/ath/wcn36xx/txrx.c 	hdr = (struct ieee80211_hdr *) skb->data;
skb                73 drivers/net/wireless/ath/wcn36xx/txrx.c 	memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
skb                77 drivers/net/wireless/ath/wcn36xx/txrx.c 			    skb, skb->len, fc, sn);
skb                79 drivers/net/wireless/ath/wcn36xx/txrx.c 				 (char *)skb->data, skb->len);
skb                82 drivers/net/wireless/ath/wcn36xx/txrx.c 			    skb, skb->len, fc, sn);
skb                84 drivers/net/wireless/ath/wcn36xx/txrx.c 				 (char *)skb->data, skb->len);
skb                87 drivers/net/wireless/ath/wcn36xx/txrx.c 	ieee80211_rx_irqsafe(wcn->hw, skb);
skb               122 drivers/net/wireless/ath/wcn36xx/txrx.c 				   struct sk_buff *skb)
skb               124 drivers/net/wireless/ath/wcn36xx/txrx.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
skb               136 drivers/net/wireless/ath/wcn36xx/txrx.c 	if (skb_get_queue_mapping(skb) == IEEE80211_AC_VO)
skb               159 drivers/net/wireless/ath/wcn36xx/txrx.c 				struct sk_buff *skb,
skb               162 drivers/net/wireless/ath/wcn36xx/txrx.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
skb               210 drivers/net/wireless/ath/wcn36xx/txrx.c 			   skb->len, sta_priv ? sta_priv->tid : 0);
skb               213 drivers/net/wireless/ath/wcn36xx/txrx.c 		wcn36xx_tx_start_ampdu(wcn, sta_priv, skb);
skb               219 drivers/net/wireless/ath/wcn36xx/txrx.c 				struct sk_buff *skb,
skb               222 drivers/net/wireless/ath/wcn36xx/txrx.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
skb               261 drivers/net/wireless/ath/wcn36xx/txrx.c 			   skb->len, WCN36XX_TID);
skb               266 drivers/net/wireless/ath/wcn36xx/txrx.c 		     struct sk_buff *skb)
skb               268 drivers/net/wireless/ath/wcn36xx/txrx.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
skb               270 drivers/net/wireless/ath/wcn36xx/txrx.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb               282 drivers/net/wireless/ath/wcn36xx/txrx.c 		    skb, skb->len, __le16_to_cpu(hdr->frame_control),
skb               286 drivers/net/wireless/ath/wcn36xx/txrx.c 	wcn36xx_dbg_dump(WCN36XX_DBG_TX_DUMP, "", skb->data, skb->len);
skb               300 drivers/net/wireless/ath/wcn36xx/txrx.c 		wcn->tx_ack_skb = skb;
skb               313 drivers/net/wireless/ath/wcn36xx/txrx.c 		wcn36xx_set_tx_data(&bd, wcn, &vif_priv, sta_priv, skb, bcast);
skb               316 drivers/net/wireless/ath/wcn36xx/txrx.c 		wcn36xx_set_tx_mgmt(&bd, wcn, &vif_priv, skb, bcast);
skb               321 drivers/net/wireless/ath/wcn36xx/txrx.c 	ret = wcn36xx_dxe_tx_frame(wcn, vif_priv, &bd, skb, is_low);
skb               162 drivers/net/wireless/ath/wcn36xx/txrx.h int  wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb);
skb               165 drivers/net/wireless/ath/wcn36xx/txrx.h 		     struct sk_buff *skb);
skb                68 drivers/net/wireless/ath/wil6210/debugfs.c 			has_skb = wil->rx_buff_mgmt.buff_arr[buff_id].skb;
skb                76 drivers/net/wireless/ath/wil6210/debugfs.c 		has_skb = ring->ctx && ring->ctx[idx].skb;
skb               135 drivers/net/wireless/ath/wil6210/debugfs.c 					   _s : (ring->ctx[i].skb ? _h : 'h'));
skb              1055 drivers/net/wireless/ath/wil6210/debugfs.c static void wil_seq_print_skb(struct seq_file *s, struct sk_buff *skb)
skb              1058 drivers/net/wireless/ath/wil6210/debugfs.c 	int len = skb_headlen(skb);
skb              1059 drivers/net/wireless/ath/wil6210/debugfs.c 	void *p = skb->data;
skb              1060 drivers/net/wireless/ath/wil6210/debugfs.c 	int nr_frags = skb_shinfo(skb)->nr_frags;
skb              1068 drivers/net/wireless/ath/wil6210/debugfs.c 			const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
skb              1088 drivers/net/wireless/ath/wil6210/debugfs.c 	struct sk_buff *skb;
skb              1131 drivers/net/wireless/ath/wil6210/debugfs.c 	skb = NULL;
skb              1135 drivers/net/wireless/ath/wil6210/debugfs.c 			skb = ring->ctx ? ring->ctx[txdesc_idx].skb : NULL;
skb              1146 drivers/net/wireless/ath/wil6210/debugfs.c 				skb = wil->rx_buff_mgmt.buff_arr[buff_id].skb;
skb              1149 drivers/net/wireless/ath/wil6210/debugfs.c 		skb = ring->ctx[txdesc_idx].skb;
skb              1160 drivers/net/wireless/ath/wil6210/debugfs.c 	seq_printf(s, "  SKB = 0x%p\n", skb);
skb              1162 drivers/net/wireless/ath/wil6210/debugfs.c 	if (skb) {
skb              1163 drivers/net/wireless/ath/wil6210/debugfs.c 		skb_get(skb);
skb              1164 drivers/net/wireless/ath/wil6210/debugfs.c 		wil_seq_print_skb(s, skb);
skb              1165 drivers/net/wireless/ath/wil6210/debugfs.c 		kfree_skb(skb);
skb                48 drivers/net/wireless/ath/wil6210/rx_reorder.c 	struct sk_buff *skb = r->reorder_buf[index];
skb                50 drivers/net/wireless/ath/wil6210/rx_reorder.c 	if (!skb)
skb                56 drivers/net/wireless/ath/wil6210/rx_reorder.c 	wil_netif_rx_any(skb, ndev);
skb                93 drivers/net/wireless/ath/wil6210/rx_reorder.c void wil_rx_reorder(struct wil6210_priv *wil, struct sk_buff *skb)
skb               105 drivers/net/wireless/ath/wil6210/rx_reorder.c 	wil->txrx_ops.get_reorder_params(wil, skb, &tid, &cid, &mid, &seq,
skb               115 drivers/net/wireless/ath/wil6210/rx_reorder.c 		dev_kfree_skb(skb);
skb               124 drivers/net/wireless/ath/wil6210/rx_reorder.c 		wil_netif_rx_any(skb, ndev);
skb               133 drivers/net/wireless/ath/wil6210/rx_reorder.c 			dev_kfree_skb(skb);
skb               137 drivers/net/wireless/ath/wil6210/rx_reorder.c 		wil_netif_rx_any(skb, ndev);
skb               160 drivers/net/wireless/ath/wil6210/rx_reorder.c 				wil_netif_rx_any(skb, ndev);
skb               177 drivers/net/wireless/ath/wil6210/rx_reorder.c 		dev_kfree_skb(skb);
skb               199 drivers/net/wireless/ath/wil6210/rx_reorder.c 		dev_kfree_skb(skb);
skb               211 drivers/net/wireless/ath/wil6210/rx_reorder.c 		wil_netif_rx_any(skb, ndev);
skb               216 drivers/net/wireless/ath/wil6210/rx_reorder.c 	r->reorder_buf[index] = skb;
skb               238 drivers/net/wireless/ath/wil6210/txrx.c 			if (ctx->skb)
skb               239 drivers/net/wireless/ath/wil6210/txrx.c 				dev_kfree_skb_any(ctx->skb);
skb               251 drivers/net/wireless/ath/wil6210/txrx.c 			kfree_skb(ctx->skb);
skb               275 drivers/net/wireless/ath/wil6210/txrx.c 	struct sk_buff *skb = dev_alloc_skb(sz + headroom);
skb               277 drivers/net/wireless/ath/wil6210/txrx.c 	if (unlikely(!skb))
skb               280 drivers/net/wireless/ath/wil6210/txrx.c 	skb_reserve(skb, headroom);
skb               281 drivers/net/wireless/ath/wil6210/txrx.c 	skb_put(skb, sz);
skb               287 drivers/net/wireless/ath/wil6210/txrx.c 	skb->ip_summed = CHECKSUM_NONE;
skb               289 drivers/net/wireless/ath/wil6210/txrx.c 	pa = dma_map_single(dev, skb->data, skb->len, DMA_FROM_DEVICE);
skb               291 drivers/net/wireless/ath/wil6210/txrx.c 		kfree_skb(skb);
skb               303 drivers/net/wireless/ath/wil6210/txrx.c 	vring->ctx[i].skb = skb;
skb               318 drivers/net/wireless/ath/wil6210/txrx.c 				       struct sk_buff *skb)
skb               333 drivers/net/wireless/ath/wil6210/txrx.c 	struct vring_rx_desc *d = wil_skb_rxdesc(skb);
skb               338 drivers/net/wireless/ath/wil6210/txrx.c 	if (skb_headroom(skb) < rtap_len &&
skb               339 drivers/net/wireless/ath/wil6210/txrx.c 	    pskb_expand_head(skb, rtap_len, 0, GFP_ATOMIC)) {
skb               344 drivers/net/wireless/ath/wil6210/txrx.c 	rtap = skb_push(skb, rtap_len);
skb               375 drivers/net/wireless/ath/wil6210/txrx.c static int wil_rx_get_cid_by_skb(struct wil6210_priv *wil, struct sk_buff *skb)
skb               377 drivers/net/wireless/ath/wil6210/txrx.c 	struct vring_rx_desc *d = wil_skb_rxdesc(skb);
skb               397 drivers/net/wireless/ath/wil6210/txrx.c 		if (unlikely(skb->len < ETH_HLEN + snaplen)) {
skb               400 drivers/net/wireless/ath/wil6210/txrx.c 					    skb->len);
skb               403 drivers/net/wireless/ath/wil6210/txrx.c 		ta = wil_skb_get_sa(skb);
skb               405 drivers/net/wireless/ath/wil6210/txrx.c 		if (unlikely(skb->len < sizeof(struct ieee80211_hdr_3addr))) {
skb               407 drivers/net/wireless/ath/wil6210/txrx.c 					    skb->len);
skb               410 drivers/net/wireless/ath/wil6210/txrx.c 		hdr = (void *)skb->data;
skb               438 drivers/net/wireless/ath/wil6210/txrx.c 				    ta, vif->wdev.iftype, ftype, skb->len);
skb               460 drivers/net/wireless/ath/wil6210/txrx.c 	struct sk_buff *skb;
skb               470 drivers/net/wireless/ath/wil6210/txrx.c 	BUILD_BUG_ON(sizeof(struct skb_rx_info) > sizeof(skb->cb));
skb               483 drivers/net/wireless/ath/wil6210/txrx.c 	skb = vring->ctx[i].skb;
skb               484 drivers/net/wireless/ath/wil6210/txrx.c 	vring->ctx[i].skb = NULL;
skb               486 drivers/net/wireless/ath/wil6210/txrx.c 	if (!skb) {
skb               490 drivers/net/wireless/ath/wil6210/txrx.c 	d = wil_skb_rxdesc(skb);
skb               508 drivers/net/wireless/ath/wil6210/txrx.c 		kfree_skb(skb);
skb               515 drivers/net/wireless/ath/wil6210/txrx.c 		kfree_skb(skb);
skb               518 drivers/net/wireless/ath/wil6210/txrx.c 	skb_trim(skb, dmalen);
skb               520 drivers/net/wireless/ath/wil6210/txrx.c 	prefetch(skb->data);
skb               523 drivers/net/wireless/ath/wil6210/txrx.c 			  skb->data, skb_headlen(skb), false);
skb               525 drivers/net/wireless/ath/wil6210/txrx.c 	cid = wil_rx_get_cid_by_skb(wil, skb);
skb               527 drivers/net/wireless/ath/wil6210/txrx.c 		kfree_skb(skb);
skb               530 drivers/net/wireless/ath/wil6210/txrx.c 	wil_skb_set_cid(skb, (u8)cid);
skb               539 drivers/net/wireless/ath/wil6210/txrx.c 		wil_rx_add_radiotap_header(wil, skb);
skb               543 drivers/net/wireless/ath/wil6210/txrx.c 		return skb;
skb               573 drivers/net/wireless/ath/wil6210/txrx.c 					  skb->data, skb_headlen(skb), false);
skb               575 drivers/net/wireless/ath/wil6210/txrx.c 		kfree_skb(skb);
skb               586 drivers/net/wireless/ath/wil6210/txrx.c 			skb->ip_summed = CHECKSUM_UNNECESSARY;
skb               603 drivers/net/wireless/ath/wil6210/txrx.c 		memmove(skb->data + snaplen, skb->data, 2 * ETH_ALEN);
skb               604 drivers/net/wireless/ath/wil6210/txrx.c 		skb_pull(skb, snaplen);
skb               607 drivers/net/wireless/ath/wil6210/txrx.c 	return skb;
skb               672 drivers/net/wireless/ath/wil6210/txrx.c static int wil_rx_crypto_check(struct wil6210_priv *wil, struct sk_buff *skb)
skb               674 drivers/net/wireless/ath/wil6210/txrx.c 	struct vring_rx_desc *d = wil_skb_rxdesc(skb);
skb               675 drivers/net/wireless/ath/wil6210/txrx.c 	int cid = wil_skb_get_cid(skb);
skb               703 drivers/net/wireless/ath/wil6210/txrx.c static int wil_rx_error_check(struct wil6210_priv *wil, struct sk_buff *skb,
skb               706 drivers/net/wireless/ath/wil6210/txrx.c 	struct vring_rx_desc *d = wil_skb_rxdesc(skb);
skb               718 drivers/net/wireless/ath/wil6210/txrx.c static void wil_get_netif_rx_params(struct sk_buff *skb, int *cid,
skb               721 drivers/net/wireless/ath/wil6210/txrx.c 	struct vring_rx_desc *d = wil_skb_rxdesc(skb);
skb               723 drivers/net/wireless/ath/wil6210/txrx.c 	*cid = wil_skb_get_cid(skb);
skb               734 drivers/net/wireless/ath/wil6210/txrx.c 						  struct sk_buff *skb)
skb               740 drivers/net/wireless/ath/wil6210/txrx.c 	int len = skb->len;
skb               742 drivers/net/wireless/ath/wil6210/txrx.c 	if (!skb_mac_header_was_set(skb)) {
skb               747 drivers/net/wireless/ath/wil6210/txrx.c 	len -= skb_mac_offset(skb);
skb               753 drivers/net/wireless/ath/wil6210/txrx.c 	buf = skb_mac_header(skb) + sizeof(struct ethhdr);
skb               771 drivers/net/wireless/ath/wil6210/txrx.c static bool wil_skb_is_eap_3(struct wil6210_priv *wil, struct sk_buff *skb)
skb               776 drivers/net/wireless/ath/wil6210/txrx.c 	key = wil_is_ptk_eapol_key(wil, skb);
skb               793 drivers/net/wireless/ath/wil6210/txrx.c static bool wil_skb_is_eap_4(struct wil6210_priv *wil, struct sk_buff *skb)
skb               798 drivers/net/wireless/ath/wil6210/txrx.c 	key = wil_is_ptk_eapol_key(wil, skb);
skb               848 drivers/net/wireless/ath/wil6210/txrx.c void wil_tx_complete_handle_eapol(struct wil6210_vif *vif, struct sk_buff *skb)
skb               859 drivers/net/wireless/ath/wil6210/txrx.c 	if (!wil_skb_is_eap_4(wil, skb))
skb               886 drivers/net/wireless/ath/wil6210/txrx.c static void wil_rx_handle_eapol(struct wil6210_vif *vif, struct sk_buff *skb)
skb               896 drivers/net/wireless/ath/wil6210/txrx.c 	if (!wil_skb_is_eap_3(wil, skb))
skb               907 drivers/net/wireless/ath/wil6210/txrx.c void wil_netif_rx(struct sk_buff *skb, struct net_device *ndev, int cid,
skb               914 drivers/net/wireless/ath/wil6210/txrx.c 	unsigned int len = skb->len;
skb               915 drivers/net/wireless/ath/wil6210/txrx.c 	u8 *sa, *da = wil_skb_get_da(skb);
skb               931 drivers/net/wireless/ath/wil6210/txrx.c 		sa = wil_skb_get_sa(skb);
skb               935 drivers/net/wireless/ath/wil6210/txrx.c 			dev_kfree_skb(skb);
skb               943 drivers/net/wireless/ath/wil6210/txrx.c 			xmit_skb = skb_copy(skb, GFP_ATOMIC);
skb               953 drivers/net/wireless/ath/wil6210/txrx.c 				xmit_skb = skb;
skb               954 drivers/net/wireless/ath/wil6210/txrx.c 				skb = NULL;
skb               972 drivers/net/wireless/ath/wil6210/txrx.c 	if (skb) { /* deliver to local stack */
skb               973 drivers/net/wireless/ath/wil6210/txrx.c 		skb->protocol = eth_type_trans(skb, ndev);
skb               974 drivers/net/wireless/ath/wil6210/txrx.c 		skb->dev = ndev;
skb               976 drivers/net/wireless/ath/wil6210/txrx.c 		if (skb->protocol == cpu_to_be16(ETH_P_PAE))
skb               977 drivers/net/wireless/ath/wil6210/txrx.c 			wil_rx_handle_eapol(vif, skb);
skb               980 drivers/net/wireless/ath/wil6210/txrx.c 			rc = napi_gro_receive(&wil->napi_rx, skb);
skb               982 drivers/net/wireless/ath/wil6210/txrx.c 			netif_rx_ni(skb);
skb              1002 drivers/net/wireless/ath/wil6210/txrx.c void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
skb              1008 drivers/net/wireless/ath/wil6210/txrx.c 	wil->txrx_ops.get_netif_rx_params(skb, &cid, &security);
skb              1012 drivers/net/wireless/ath/wil6210/txrx.c 	skb_orphan(skb);
skb              1014 drivers/net/wireless/ath/wil6210/txrx.c 	if (security && (wil->txrx_ops.rx_crypto_check(wil, skb) != 0)) {
skb              1015 drivers/net/wireless/ath/wil6210/txrx.c 		wil_dbg_txrx(wil, "Rx drop %d bytes\n", skb->len);
skb              1016 drivers/net/wireless/ath/wil6210/txrx.c 		dev_kfree_skb(skb);
skb              1024 drivers/net/wireless/ath/wil6210/txrx.c 	if (unlikely(wil->txrx_ops.rx_error_check(wil, skb, stats))) {
skb              1025 drivers/net/wireless/ath/wil6210/txrx.c 		dev_kfree_skb(skb);
skb              1029 drivers/net/wireless/ath/wil6210/txrx.c 	wil_netif_rx(skb, ndev, cid, stats, true);
skb              1042 drivers/net/wireless/ath/wil6210/txrx.c 	struct sk_buff *skb;
skb              1049 drivers/net/wireless/ath/wil6210/txrx.c 	while ((*quota > 0) && (NULL != (skb = wil_vring_reap_rx(wil, v)))) {
skb              1054 drivers/net/wireless/ath/wil6210/txrx.c 			skb->dev = ndev;
skb              1055 drivers/net/wireless/ath/wil6210/txrx.c 			skb_reset_mac_header(skb);
skb              1056 drivers/net/wireless/ath/wil6210/txrx.c 			skb->ip_summed = CHECKSUM_UNNECESSARY;
skb              1057 drivers/net/wireless/ath/wil6210/txrx.c 			skb->pkt_type = PACKET_OTHERHOST;
skb              1058 drivers/net/wireless/ath/wil6210/txrx.c 			skb->protocol = htons(ETH_P_802_2);
skb              1059 drivers/net/wireless/ath/wil6210/txrx.c 			wil_netif_rx_any(skb, ndev);
skb              1061 drivers/net/wireless/ath/wil6210/txrx.c 			wil_rx_reorder(wil, skb);
skb              1434 drivers/net/wireless/ath/wil6210/txrx.c 					  struct sk_buff *skb)
skb              1437 drivers/net/wireless/ath/wil6210/txrx.c 	const u8 *da = wil_skb_get_da(skb);
skb              1448 drivers/net/wireless/ath/wil6210/txrx.c 		    skb->protocol != cpu_to_be16(ETH_P_PAE))
skb              1471 drivers/net/wireless/ath/wil6210/txrx.c 		       struct wil_ring *ring, struct sk_buff *skb);
skb              1475 drivers/net/wireless/ath/wil6210/txrx.c 					     struct sk_buff *skb)
skb              1498 drivers/net/wireless/ath/wil6210/txrx.c 		    skb->protocol != cpu_to_be16(ETH_P_PAE))
skb              1524 drivers/net/wireless/ath/wil6210/txrx.c 					    struct sk_buff *skb)
skb              1537 drivers/net/wireless/ath/wil6210/txrx.c 	    skb->protocol != cpu_to_be16(ETH_P_PAE))
skb              1544 drivers/net/wireless/ath/wil6210/txrx.c 				 struct sk_buff *skb, int vring_index)
skb              1546 drivers/net/wireless/ath/wil6210/txrx.c 	u8 *da = wil_skb_get_da(skb);
skb              1554 drivers/net/wireless/ath/wil6210/txrx.c 					    struct sk_buff *skb)
skb              1560 drivers/net/wireless/ath/wil6210/txrx.c 	const u8 *src = wil_skb_get_sa(skb);
skb              1575 drivers/net/wireless/ath/wil6210/txrx.c 		    skb->protocol != cpu_to_be16(ETH_P_PAE))
skb              1591 drivers/net/wireless/ath/wil6210/txrx.c 	wil_set_da_for_vring(wil, skb, i);
skb              1603 drivers/net/wireless/ath/wil6210/txrx.c 		    skb->protocol != cpu_to_be16(ETH_P_PAE))
skb              1609 drivers/net/wireless/ath/wil6210/txrx.c 		skb2 = skb_copy(skb, GFP_ATOMIC);
skb              1638 drivers/net/wireless/ath/wil6210/txrx.c 					  struct sk_buff *skb,
skb              1672 drivers/net/wireless/ath/wil6210/txrx.c 				     struct sk_buff *skb){
skb              1675 drivers/net/wireless/ath/wil6210/txrx.c 	if (skb->ip_summed != CHECKSUM_PARTIAL)
skb              1680 drivers/net/wireless/ath/wil6210/txrx.c 	switch (skb->protocol) {
skb              1682 drivers/net/wireless/ath/wil6210/txrx.c 		protocol = ip_hdr(skb)->protocol;
skb              1686 drivers/net/wireless/ath/wil6210/txrx.c 		protocol = ipv6_hdr(skb)->nexthdr;
skb              1697 drivers/net/wireless/ath/wil6210/txrx.c 		(tcp_hdrlen(skb) & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK);
skb              1708 drivers/net/wireless/ath/wil6210/txrx.c 	d->dma.ip_length = skb_network_header_len(skb);
skb              1731 drivers/net/wireless/ath/wil6210/txrx.c 			      struct wil_ring *vring, struct sk_buff *skb)
skb              1752 drivers/net/wireless/ath/wil6210/txrx.c 	int nr_frags = skb_shinfo(skb)->nr_frags;
skb              1754 drivers/net/wireless/ath/wil6210/txrx.c 	int mss = skb_shinfo(skb)->gso_size;	/* payload size w/o headers */
skb              1771 drivers/net/wireless/ath/wil6210/txrx.c 	wil_dbg_txrx(wil, "tx_vring_tso: %d bytes to vring %d\n", skb->len,
skb              1791 drivers/net/wireless/ath/wil6210/txrx.c 		(int)skb_network_header_len(skb) +
skb              1792 drivers/net/wireless/ath/wil6210/txrx.c 		tcp_hdrlen(skb);
skb              1794 drivers/net/wireless/ath/wil6210/txrx.c 	gso_type = skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV6 | SKB_GSO_TCPV4);
skb              1800 drivers/net/wireless/ath/wil6210/txrx.c 		ip_hdr(skb)->tot_len = 0;
skb              1801 drivers/net/wireless/ath/wil6210/txrx.c 		ip_hdr(skb)->check = 0;
skb              1806 drivers/net/wireless/ath/wil6210/txrx.c 		ipv6_hdr(skb)->payload_len = 0;
skb              1816 drivers/net/wireless/ath/wil6210/txrx.c 	if (skb->ip_summed != CHECKSUM_PARTIAL)
skb              1822 drivers/net/wireless/ath/wil6210/txrx.c 	tcp_hdr_len = tcp_hdrlen(skb);
skb              1823 drivers/net/wireless/ath/wil6210/txrx.c 	skb_net_hdr_len = skb_network_header_len(skb);
skb              1827 drivers/net/wireless/ath/wil6210/txrx.c 	pa = dma_map_single(dev, skb->data, hdrlen, DMA_TO_DEVICE);
skb              1835 drivers/net/wireless/ath/wil6210/txrx.c 	wil_tx_desc_offload_setup_tso(hdr_desc, skb, wil_tso_type_hdr, is_ipv4,
skb              1843 drivers/net/wireless/ath/wil6210/txrx.c 	headlen = skb_headlen(skb) - hdrlen;
skb              1851 drivers/net/wireless/ath/wil6210/txrx.c 			frag = &skb_shinfo(skb)->frags[f];
skb              1878 drivers/net/wireless/ath/wil6210/txrx.c 						    skb->data +
skb              1879 drivers/net/wireless/ath/wil6210/txrx.c 						    skb_headlen(skb) - headlen,
skb              1903 drivers/net/wireless/ath/wil6210/txrx.c 			wil_tx_desc_offload_setup_tso(d, skb, desc_tso_type,
skb              1980 drivers/net/wireless/ath/wil6210/txrx.c 	vring->ctx[i].skb = skb_get(skb);
skb              2008 drivers/net/wireless/ath/wil6210/txrx.c 		*(ktime_t *)&skb->cb = ktime_get();
skb              2010 drivers/net/wireless/ath/wil6210/txrx.c 		memset(skb->cb, 0, sizeof(ktime_t));
skb              2034 drivers/net/wireless/ath/wil6210/txrx.c 			 struct wil_ring *ring, struct sk_buff *skb)
skb              2041 drivers/net/wireless/ath/wil6210/txrx.c 	int nr_frags = skb_shinfo(skb)->nr_frags;
skb              2049 drivers/net/wireless/ath/wil6210/txrx.c 	uint len = skb_headlen(skb);
skb              2052 drivers/net/wireless/ath/wil6210/txrx.c 		     skb->len, ring_index, nr_frags);
skb              2065 drivers/net/wireless/ath/wil6210/txrx.c 	pa = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
skb              2068 drivers/net/wireless/ath/wil6210/txrx.c 		     skb_headlen(skb), skb->data, &pa);
skb              2070 drivers/net/wireless/ath/wil6210/txrx.c 			  skb->data, skb_headlen(skb), false);
skb              2084 drivers/net/wireless/ath/wil6210/txrx.c 	if (unlikely(wil_tx_desc_offload_setup(d, skb))) {
skb              2095 drivers/net/wireless/ath/wil6210/txrx.c 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
skb              2118 drivers/net/wireless/ath/wil6210/txrx.c 		wil_tx_desc_offload_setup(d, skb);
skb              2133 drivers/net/wireless/ath/wil6210/txrx.c 	ring->ctx[i].skb = skb_get(skb);
skb              2155 drivers/net/wireless/ath/wil6210/txrx.c 	trace_wil6210_tx(ring_index, swhead, skb->len, nr_frags);
skb              2163 drivers/net/wireless/ath/wil6210/txrx.c 		*(ktime_t *)&skb->cb = ktime_get();
skb              2165 drivers/net/wireless/ath/wil6210/txrx.c 		memset(skb->cb, 0, sizeof(ktime_t));
skb              2192 drivers/net/wireless/ath/wil6210/txrx.c 		       struct wil_ring *ring, struct sk_buff *skb)
skb              2209 drivers/net/wireless/ath/wil6210/txrx.c 	rc = (skb_is_gso(skb) ? wil->txrx_ops.tx_ring_tso : __wil_tx_ring)
skb              2210 drivers/net/wireless/ath/wil6210/txrx.c 	     (wil, vif, ring, skb);
skb              2316 drivers/net/wireless/ath/wil6210/txrx.c netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
skb              2320 drivers/net/wireless/ath/wil6210/txrx.c 	const u8 *da = wil_skb_get_da(skb);
skb              2348 drivers/net/wireless/ath/wil6210/txrx.c 		ring = wil_find_tx_ring_sta(wil, vif, skb);
skb              2354 drivers/net/wireless/ath/wil6210/txrx.c 			ring = wil_find_tx_bcast_2(wil, vif, skb);
skb              2357 drivers/net/wireless/ath/wil6210/txrx.c 			ring = wil_find_tx_bcast_1(wil, vif, skb);
skb              2362 drivers/net/wireless/ath/wil6210/txrx.c 			ring = wil_find_tx_bcast_2(wil, vif, skb);
skb              2365 drivers/net/wireless/ath/wil6210/txrx.c 		ring = wil_find_tx_ucast(wil, vif, skb);
skb              2372 drivers/net/wireless/ath/wil6210/txrx.c 	rc = wil_tx_ring(wil, vif, ring, skb);
skb              2379 drivers/net/wireless/ath/wil6210/txrx.c 		dev_kfree_skb_any(skb);
skb              2390 drivers/net/wireless/ath/wil6210/txrx.c 	dev_kfree_skb_any(skb);
skb              2395 drivers/net/wireless/ath/wil6210/txrx.c void wil_tx_latency_calc(struct wil6210_priv *wil, struct sk_buff *skb,
skb              2404 drivers/net/wireless/ath/wil6210/txrx.c 	if (ktime_to_ms(*(ktime_t *)&skb->cb) == 0)
skb              2407 drivers/net/wireless/ath/wil6210/txrx.c 	skb_time_us = ktime_us_delta(ktime_get(), *(ktime_t *)&skb->cb);
skb              2477 drivers/net/wireless/ath/wil6210/txrx.c 			struct sk_buff *skb;
skb              2480 drivers/net/wireless/ath/wil6210/txrx.c 			skb = ctx->skb;
skb              2499 drivers/net/wireless/ath/wil6210/txrx.c 			if (skb) {
skb              2502 drivers/net/wireless/ath/wil6210/txrx.c 					ndev->stats.tx_bytes += skb->len;
skb              2505 drivers/net/wireless/ath/wil6210/txrx.c 						stats->tx_bytes += skb->len;
skb              2507 drivers/net/wireless/ath/wil6210/txrx.c 						wil_tx_latency_calc(wil, skb,
skb              2516 drivers/net/wireless/ath/wil6210/txrx.c 				if (skb->protocol == cpu_to_be16(ETH_P_PAE))
skb              2517 drivers/net/wireless/ath/wil6210/txrx.c 					wil_tx_complete_handle_eapol(vif, skb);
skb              2519 drivers/net/wireless/ath/wil6210/txrx.c 				wil_consume_skb(skb, d->dma.error == 0);
skb              2562 drivers/net/wireless/ath/wil6210/txrx.c 				   struct sk_buff *skb, int *tid, int *cid,
skb              2565 drivers/net/wireless/ath/wil6210/txrx.c 	struct vring_rx_desc *d = wil_skb_rxdesc(skb);
skb              2568 drivers/net/wireless/ath/wil6210/txrx.c 	*cid = wil_skb_get_cid(skb);
skb               585 drivers/net/wireless/ath/wil6210/txrx.h static inline struct vring_rx_desc *wil_skb_rxdesc(struct sk_buff *skb)
skb               587 drivers/net/wireless/ath/wil6210/txrx.h 	return (void *)skb->cb;
skb               610 drivers/net/wireless/ath/wil6210/txrx.h static inline u8 *wil_skb_get_da(struct sk_buff *skb)
skb               612 drivers/net/wireless/ath/wil6210/txrx.h 	struct ethhdr *eth = (void *)skb->data;
skb               617 drivers/net/wireless/ath/wil6210/txrx.h static inline u8 *wil_skb_get_sa(struct sk_buff *skb)
skb               619 drivers/net/wireless/ath/wil6210/txrx.h 	struct ethhdr *eth = (void *)skb->data;
skb               624 drivers/net/wireless/ath/wil6210/txrx.h static inline bool wil_need_txstat(struct sk_buff *skb)
skb               626 drivers/net/wireless/ath/wil6210/txrx.h 	const u8 *da = wil_skb_get_da(skb);
skb               628 drivers/net/wireless/ath/wil6210/txrx.h 	return is_unicast_ether_addr(da) && skb->sk &&
skb               629 drivers/net/wireless/ath/wil6210/txrx.h 	       (skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS);
skb               632 drivers/net/wireless/ath/wil6210/txrx.h static inline void wil_consume_skb(struct sk_buff *skb, bool acked)
skb               634 drivers/net/wireless/ath/wil6210/txrx.h 	if (unlikely(wil_need_txstat(skb)))
skb               635 drivers/net/wireless/ath/wil6210/txrx.h 		skb_complete_wifi_ack(skb, acked);
skb               637 drivers/net/wireless/ath/wil6210/txrx.h 		acked ? dev_consume_skb_any(skb) : dev_kfree_skb_any(skb);
skb               674 drivers/net/wireless/ath/wil6210/txrx.h static inline u8 wil_skb_get_cid(struct sk_buff *skb)
skb               676 drivers/net/wireless/ath/wil6210/txrx.h 	struct skb_rx_info *skb_rx_info = (void *)skb->cb;
skb               681 drivers/net/wireless/ath/wil6210/txrx.h static inline void wil_skb_set_cid(struct sk_buff *skb, u8 cid)
skb               683 drivers/net/wireless/ath/wil6210/txrx.h 	struct skb_rx_info *skb_rx_info = (void *)skb->cb;
skb               688 drivers/net/wireless/ath/wil6210/txrx.h void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev);
skb               689 drivers/net/wireless/ath/wil6210/txrx.h void wil_netif_rx(struct sk_buff *skb, struct net_device *ndev, int cid,
skb               691 drivers/net/wireless/ath/wil6210/txrx.h void wil_rx_reorder(struct wil6210_priv *wil, struct sk_buff *skb);
skb               700 drivers/net/wireless/ath/wil6210/txrx.h void wil_tx_latency_calc(struct wil6210_priv *wil, struct sk_buff *skb,
skb               175 drivers/net/wireless/ath/wil6210/txrx_edma.c 	struct sk_buff *skb;
skb               185 drivers/net/wireless/ath/wil6210/txrx_edma.c 	skb = dev_alloc_skb(sz);
skb               186 drivers/net/wireless/ath/wil6210/txrx_edma.c 	if (unlikely(!skb))
skb               189 drivers/net/wireless/ath/wil6210/txrx_edma.c 	skb_put(skb, sz);
skb               195 drivers/net/wireless/ath/wil6210/txrx_edma.c 	skb->ip_summed = CHECKSUM_NONE;
skb               197 drivers/net/wireless/ath/wil6210/txrx_edma.c 	pa = dma_map_single(dev, skb->data, skb->len, DMA_FROM_DEVICE);
skb               199 drivers/net/wireless/ath/wil6210/txrx_edma.c 		kfree_skb(skb);
skb               210 drivers/net/wireless/ath/wil6210/txrx_edma.c 	buff_arr[buff_id].skb = skb;
skb               218 drivers/net/wireless/ath/wil6210/txrx_edma.c 	memcpy(skb->cb, &pa, sizeof(pa));
skb               289 drivers/net/wireless/ath/wil6210/txrx_edma.c 		struct sk_buff *skb = rx_buff->skb;
skb               291 drivers/net/wireless/ath/wil6210/txrx_edma.c 		if (unlikely(!skb)) {
skb               294 drivers/net/wireless/ath/wil6210/txrx_edma.c 			rx_buff->skb = NULL;
skb               295 drivers/net/wireless/ath/wil6210/txrx_edma.c 			memcpy(&pa, skb->cb, sizeof(pa));
skb               298 drivers/net/wireless/ath/wil6210/txrx_edma.c 			kfree_skb(skb);
skb               480 drivers/net/wireless/ath/wil6210/txrx_edma.c 		if (ctx->skb)
skb               481 drivers/net/wireless/ath/wil6210/txrx_edma.c 			dev_kfree_skb_any(ctx->skb);
skb               518 drivers/net/wireless/ath/wil6210/txrx_edma.c 					struct sk_buff *skb, int *tid,
skb               522 drivers/net/wireless/ath/wil6210/txrx_edma.c 	struct wil_rx_status_extended *s = wil_skb_rxstatus(skb);
skb               532 drivers/net/wireless/ath/wil6210/txrx_edma.c static void wil_get_netif_rx_params_edma(struct sk_buff *skb, int *cid,
skb               535 drivers/net/wireless/ath/wil6210/txrx_edma.c 	struct wil_rx_status_extended *s = wil_skb_rxstatus(skb);
skb               542 drivers/net/wireless/ath/wil6210/txrx_edma.c 				    struct sk_buff *skb)
skb               555 drivers/net/wireless/ath/wil6210/txrx_edma.c 	st = wil_skb_rxstatus(skb);
skb               769 drivers/net/wireless/ath/wil6210/txrx_edma.c 			 struct sk_buff *skb, struct wil_net_stats *stats)
skb               817 drivers/net/wireless/ath/wil6210/txrx_edma.c 				  skb->data, skb_headlen(skb), false);
skb               824 drivers/net/wireless/ath/wil6210/txrx_edma.c 				   struct sk_buff *skb,
skb               828 drivers/net/wireless/ath/wil6210/txrx_edma.c 	void *msg = wil_skb_rxstatus(skb);
skb               858 drivers/net/wireless/ath/wil6210/txrx_edma.c 	skb->ip_summed = wil_rx_status_get_checksum(msg, stats);
skb               870 drivers/net/wireless/ath/wil6210/txrx_edma.c 	struct sk_buff *skb;
skb               885 drivers/net/wireless/ath/wil6210/txrx_edma.c 	BUILD_BUG_ON(sizeof(struct wil_rx_status_extended) > sizeof(skb->cb));
skb               924 drivers/net/wireless/ath/wil6210/txrx_edma.c 	skb = wil->rx_buff_mgmt.buff_arr[buff_id].skb;
skb               925 drivers/net/wireless/ath/wil6210/txrx_edma.c 	wil->rx_buff_mgmt.buff_arr[buff_id].skb = NULL;
skb               926 drivers/net/wireless/ath/wil6210/txrx_edma.c 	if (!skb) {
skb               940 drivers/net/wireless/ath/wil6210/txrx_edma.c 	memcpy(&pa, skb->cb, sizeof(pa));
skb               991 drivers/net/wireless/ath/wil6210/txrx_edma.c 		kfree_skb(skb);
skb               992 drivers/net/wireless/ath/wil6210/txrx_edma.c 		if (rxdata->skb) {
skb               993 drivers/net/wireless/ath/wil6210/txrx_edma.c 			kfree_skb(rxdata->skb);
skb               994 drivers/net/wireless/ath/wil6210/txrx_edma.c 			rxdata->skb = NULL;
skb              1000 drivers/net/wireless/ath/wil6210/txrx_edma.c 	skb_trim(skb, dmalen);
skb              1002 drivers/net/wireless/ath/wil6210/txrx_edma.c 	prefetch(skb->data);
skb              1004 drivers/net/wireless/ath/wil6210/txrx_edma.c 	if (!rxdata->skb) {
skb              1005 drivers/net/wireless/ath/wil6210/txrx_edma.c 		rxdata->skb = skb;
skb              1007 drivers/net/wireless/ath/wil6210/txrx_edma.c 		if (likely(skb_try_coalesce(rxdata->skb, skb, &headstolen,
skb              1009 drivers/net/wireless/ath/wil6210/txrx_edma.c 			kfree_skb_partial(skb, headstolen);
skb              1012 drivers/net/wireless/ath/wil6210/txrx_edma.c 			kfree_skb(skb);
skb              1013 drivers/net/wireless/ath/wil6210/txrx_edma.c 			kfree_skb(rxdata->skb);
skb              1014 drivers/net/wireless/ath/wil6210/txrx_edma.c 			rxdata->skb = NULL;
skb              1024 drivers/net/wireless/ath/wil6210/txrx_edma.c 	skb = rxdata->skb;
skb              1025 drivers/net/wireless/ath/wil6210/txrx_edma.c 	rxdata->skb = NULL;
skb              1037 drivers/net/wireless/ath/wil6210/txrx_edma.c 	    wil_check_bar(wil, msg, cid, skb, stats) == -EAGAIN) {
skb              1038 drivers/net/wireless/ath/wil6210/txrx_edma.c 		kfree_skb(skb);
skb              1049 drivers/net/wireless/ath/wil6210/txrx_edma.c 		kfree_skb(skb);
skb              1053 drivers/net/wireless/ath/wil6210/txrx_edma.c 	skb_pull(skb, data_offset);
skb              1056 drivers/net/wireless/ath/wil6210/txrx_edma.c 			  skb->data, skb_headlen(skb), false);
skb              1061 drivers/net/wireless/ath/wil6210/txrx_edma.c 	s = wil_skb_rxstatus(skb);
skb              1064 drivers/net/wireless/ath/wil6210/txrx_edma.c 	return skb;
skb              1072 drivers/net/wireless/ath/wil6210/txrx_edma.c 	struct sk_buff *skb;
skb              1091 drivers/net/wireless/ath/wil6210/txrx_edma.c 		       (NULL != (skb =
skb              1095 drivers/net/wireless/ath/wil6210/txrx_edma.c 				void *msg = wil_skb_rxstatus(skb);
skb              1103 drivers/net/wireless/ath/wil6210/txrx_edma.c 					kfree_skb(skb);
skb              1107 drivers/net/wireless/ath/wil6210/txrx_edma.c 				wil_netif_rx_any(skb, ndev);
skb              1109 drivers/net/wireless/ath/wil6210/txrx_edma.c 				wil_rx_reorder(wil, skb);
skb              1232 drivers/net/wireless/ath/wil6210/txrx_edma.c 			struct sk_buff *skb = ctx->skb;
skb              1252 drivers/net/wireless/ath/wil6210/txrx_edma.c 			if (skb) {
skb              1255 drivers/net/wireless/ath/wil6210/txrx_edma.c 					ndev->stats.tx_bytes += skb->len;
skb              1258 drivers/net/wireless/ath/wil6210/txrx_edma.c 						stats->tx_bytes += skb->len;
skb              1260 drivers/net/wireless/ath/wil6210/txrx_edma.c 						wil_tx_latency_calc(wil, skb,
skb              1269 drivers/net/wireless/ath/wil6210/txrx_edma.c 				if (skb->protocol == cpu_to_be16(ETH_P_PAE))
skb              1270 drivers/net/wireless/ath/wil6210/txrx_edma.c 					wil_tx_complete_handle_eapol(vif, skb);
skb              1272 drivers/net/wireless/ath/wil6210/txrx_edma.c 				wil_consume_skb(skb, msg.status == 0);
skb              1357 drivers/net/wireless/ath/wil6210/txrx_edma.c 			       struct sk_buff *skb, bool is_ipv4,
skb              1394 drivers/net/wireless/ath/wil6210/txrx_edma.c 		ring->ctx[i].skb = skb_get(skb);
skb              1408 drivers/net/wireless/ath/wil6210/txrx_edma.c 				  struct sk_buff *skb)
skb              1412 drivers/net/wireless/ath/wil6210/txrx_edma.c 	int nr_frags = skb_shinfo(skb)->nr_frags;
skb              1423 drivers/net/wireless/ath/wil6210/txrx_edma.c 	int mss = skb_shinfo(skb)->gso_size;
skb              1425 drivers/net/wireless/ath/wil6210/txrx_edma.c 	wil_dbg_txrx(wil, "tx_ring_tso: %d bytes to ring %d\n", skb->len,
skb              1438 drivers/net/wireless/ath/wil6210/txrx_edma.c 	gso_type = skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV6 | SKB_GSO_TCPV4);
skb              1450 drivers/net/wireless/ath/wil6210/txrx_edma.c 	if (skb->ip_summed != CHECKSUM_PARTIAL)
skb              1456 drivers/net/wireless/ath/wil6210/txrx_edma.c 	tcp_hdr_len = tcp_hdrlen(skb);
skb              1457 drivers/net/wireless/ath/wil6210/txrx_edma.c 	skb_net_hdr_len = skb_network_header_len(skb);
skb              1465 drivers/net/wireless/ath/wil6210/txrx_edma.c 	rc = wil_tx_tso_gen_desc(wil, skb->data, hdrlen, swhead,
skb              1466 drivers/net/wireless/ath/wil6210/txrx_edma.c 				 wil_tso_type_hdr, NULL, ring, skb,
skb              1473 drivers/net/wireless/ath/wil6210/txrx_edma.c 	headlen = skb_headlen(skb) - hdrlen;
skb              1475 drivers/net/wireless/ath/wil6210/txrx_edma.c 	rc = wil_tx_tso_gen_desc(wil, skb->data + hdrlen, headlen,
skb              1478 drivers/net/wireless/ath/wil6210/txrx_edma.c 				 wil_tso_type_lst, NULL, ring, skb,
skb              1486 drivers/net/wireless/ath/wil6210/txrx_edma.c 		skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
skb              1496 drivers/net/wireless/ath/wil6210/txrx_edma.c 					 frag, ring, skb, is_ipv4,
skb              1522 drivers/net/wireless/ath/wil6210/txrx_edma.c 		*(ktime_t *)&skb->cb = ktime_get();
skb              1524 drivers/net/wireless/ath/wil6210/txrx_edma.c 		memset(skb->cb, 0, sizeof(ktime_t));
skb              1605 drivers/net/wireless/ath/wil6210/txrx_edma.c 	kfree_skb(sring->rx_data.skb);
skb              1606 drivers/net/wireless/ath/wil6210/txrx_edma.c 	sring->rx_data.skb = NULL;
skb               353 drivers/net/wireless/ath/wil6210/txrx_edma.h static inline void *wil_skb_rxstatus(struct sk_buff *skb)
skb               355 drivers/net/wireless/ath/wil6210/txrx_edma.h 	return (void *)skb->cb;
skb               516 drivers/net/wireless/ath/wil6210/wil6210.h 	struct sk_buff *skb;
skb               549 drivers/net/wireless/ath/wil6210/wil6210.h 	struct sk_buff *skb;
skb               617 drivers/net/wireless/ath/wil6210/wil6210.h 			   struct wil_ring *ring, struct sk_buff *skb);
skb               628 drivers/net/wireless/ath/wil6210/wil6210.h 				   struct sk_buff *skb, int *tid, int *cid,
skb               630 drivers/net/wireless/ath/wil6210/wil6210.h 	void (*get_netif_rx_params)(struct sk_buff *skb,
skb               632 drivers/net/wireless/ath/wil6210/wil6210.h 	int (*rx_crypto_check)(struct wil6210_priv *wil, struct sk_buff *skb);
skb               633 drivers/net/wireless/ath/wil6210/wil6210.h 	int (*rx_error_check)(struct wil6210_priv *wil, struct sk_buff *skb,
skb               898 drivers/net/wireless/ath/wil6210/wil6210.h 	struct sk_buff *skb;
skb              1381 drivers/net/wireless/ath/wil6210/wil6210.h netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev);
skb              1384 drivers/net/wireless/ath/wil6210/wil6210.h 				  struct sk_buff *skb);
skb              1183 drivers/net/wireless/ath/wil6210/wmi.c 	struct sk_buff *skb;
skb              1200 drivers/net/wireless/ath/wil6210/wmi.c 	skb = alloc_skb(sz, GFP_KERNEL);
skb              1201 drivers/net/wireless/ath/wil6210/wmi.c 	if (!skb) {
skb              1206 drivers/net/wireless/ath/wil6210/wmi.c 	eth = skb_put(skb, ETH_HLEN);
skb              1210 drivers/net/wireless/ath/wil6210/wmi.c 	skb_put_data(skb, evt->eapol, eapol_len);
skb              1211 drivers/net/wireless/ath/wil6210/wmi.c 	skb->protocol = eth_type_trans(skb, ndev);
skb              1212 drivers/net/wireless/ath/wil6210/wmi.c 	if (likely(netif_rx_ni(skb) == NET_RX_SUCCESS)) {
skb              1210 drivers/net/wireless/atmel/at76c50x-usb.c 	struct sk_buff *skb = priv->rx_skb;
skb              1218 drivers/net/wireless/atmel/at76c50x-usb.c 	if (!skb) {
skb              1219 drivers/net/wireless/atmel/at76c50x-usb.c 		skb = dev_alloc_skb(sizeof(struct at76_rx_buffer));
skb              1220 drivers/net/wireless/atmel/at76c50x-usb.c 		if (!skb) {
skb              1226 drivers/net/wireless/atmel/at76c50x-usb.c 		priv->rx_skb = skb;
skb              1228 drivers/net/wireless/atmel/at76c50x-usb.c 		skb_push(skb, skb_headroom(skb));
skb              1229 drivers/net/wireless/atmel/at76c50x-usb.c 		skb_trim(skb, 0);
skb              1232 drivers/net/wireless/atmel/at76c50x-usb.c 	size = skb_tailroom(skb);
skb              1234 drivers/net/wireless/atmel/at76c50x-usb.c 			  skb_put(skb, size), size, at76_rx_callback, priv);
skb              1763 drivers/net/wireless/atmel/at76c50x-usb.c 			     struct sk_buff *skb)
skb              1767 drivers/net/wireless/atmel/at76c50x-usb.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb              1768 drivers/net/wireless/atmel/at76c50x-usb.c 	struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
skb              1776 drivers/net/wireless/atmel/at76c50x-usb.c 		dev_kfree_skb_any(skb);
skb              1791 drivers/net/wireless/atmel/at76c50x-usb.c 			dev_kfree_skb_any(skb);
skb              1802 drivers/net/wireless/atmel/at76c50x-usb.c 	priv->tx_skb = skb;
skb              1803 drivers/net/wireless/atmel/at76c50x-usb.c 	padding = at76_calc_padding(skb->len);
skb              1804 drivers/net/wireless/atmel/at76c50x-usb.c 	submit_len = AT76_TX_HDRLEN + skb->len + padding;
skb              1809 drivers/net/wireless/atmel/at76c50x-usb.c 	tx_buffer->wlength = cpu_to_le16(skb->len);
skb              1812 drivers/net/wireless/atmel/at76c50x-usb.c 	memcpy(tx_buffer->packet, skb->data, skb->len);
skb               799 drivers/net/wireless/atmel/atmel.c static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
skb               805 drivers/net/wireless/atmel/atmel.c 	u16 buff, frame_ctl, len = (ETH_ZLEN < skb->len) ? skb->len : ETH_ZLEN;
skb               810 drivers/net/wireless/atmel/atmel.c 		dev_kfree_skb(skb);
skb               816 drivers/net/wireless/atmel/atmel.c 		dev_kfree_skb(skb);
skb               845 drivers/net/wireless/atmel/atmel.c 		skb_copy_from_linear_data(skb, &header.addr1, ETH_ALEN);
skb               852 drivers/net/wireless/atmel/atmel.c 		skb_copy_from_linear_data(skb, &header.addr3, ETH_ALEN);
skb               862 drivers/net/wireless/atmel/atmel.c 	atmel_copy_to_card(dev, buff + DATA_FRAME_WS_HEADER_SIZE, skb->data + 12, len - 12);
skb               866 drivers/net/wireless/atmel/atmel.c 	tx_update_descriptor(priv, *(skb->data) & 0x01, len + 18, buff, TX_PACKET_TYPE_DATA);
skb               871 drivers/net/wireless/atmel/atmel.c 	dev_kfree_skb(skb);
skb               898 drivers/net/wireless/atmel/atmel.c 	struct sk_buff	*skb;
skb               910 drivers/net/wireless/atmel/atmel.c 	if (!(skb = dev_alloc_skb(msdu_size + 14))) {
skb               915 drivers/net/wireless/atmel/atmel.c 	skb_reserve(skb, 2);
skb               916 drivers/net/wireless/atmel/atmel.c 	skbp = skb_put(skb, msdu_size + 12);
skb               925 drivers/net/wireless/atmel/atmel.c 			dev_kfree_skb(skb);
skb               936 drivers/net/wireless/atmel/atmel.c 	skb->protocol = eth_type_trans(skb, priv->dev);
skb               937 drivers/net/wireless/atmel/atmel.c 	skb->ip_summed = CHECKSUM_NONE;
skb               938 drivers/net/wireless/atmel/atmel.c 	netif_rx(skb);
skb               972 drivers/net/wireless/atmel/atmel.c 	struct sk_buff *skb;
skb              1035 drivers/net/wireless/atmel/atmel.c 			if (!(skb = dev_alloc_skb(priv->frag_len + 14))) {
skb              1038 drivers/net/wireless/atmel/atmel.c 				skb_reserve(skb, 2);
skb              1039 drivers/net/wireless/atmel/atmel.c 				skb_put_data(skb, priv->rx_buf,
skb              1041 drivers/net/wireless/atmel/atmel.c 				skb->protocol = eth_type_trans(skb, priv->dev);
skb              1042 drivers/net/wireless/atmel/atmel.c 				skb->ip_summed = CHECKSUM_NONE;
skb              1043 drivers/net/wireless/atmel/atmel.c 				netif_rx(skb);
skb               398 drivers/net/wireless/broadcom/b43/dma.c 	if (meta->skb) {
skb               400 drivers/net/wireless/broadcom/b43/dma.c 			ieee80211_free_txskb(ring->dev->wl->hw, meta->skb);
skb               402 drivers/net/wireless/broadcom/b43/dma.c 			dev_kfree_skb_any(meta->skb);
skb               403 drivers/net/wireless/broadcom/b43/dma.c 		meta->skb = NULL;
skb               570 drivers/net/wireless/broadcom/b43/dma.c static bool b43_rx_buffer_is_poisoned(struct b43_dmaring *ring, struct sk_buff *skb)
skb               572 drivers/net/wireless/broadcom/b43/dma.c 	unsigned char *f = skb->data + ring->frameoffset;
skb               577 drivers/net/wireless/broadcom/b43/dma.c static void b43_poison_rx_buffer(struct b43_dmaring *ring, struct sk_buff *skb)
skb               584 drivers/net/wireless/broadcom/b43/dma.c 	rxhdr = (struct b43_rxhdr_fw4 *)(skb->data);
skb               588 drivers/net/wireless/broadcom/b43/dma.c 	frame = skb->data + ring->frameoffset;
skb               597 drivers/net/wireless/broadcom/b43/dma.c 	struct sk_buff *skb;
skb               601 drivers/net/wireless/broadcom/b43/dma.c 	skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
skb               602 drivers/net/wireless/broadcom/b43/dma.c 	if (unlikely(!skb))
skb               604 drivers/net/wireless/broadcom/b43/dma.c 	b43_poison_rx_buffer(ring, skb);
skb               605 drivers/net/wireless/broadcom/b43/dma.c 	dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0);
skb               610 drivers/net/wireless/broadcom/b43/dma.c 		dev_kfree_skb_any(skb);
skb               612 drivers/net/wireless/broadcom/b43/dma.c 		skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
skb               613 drivers/net/wireless/broadcom/b43/dma.c 		if (unlikely(!skb))
skb               615 drivers/net/wireless/broadcom/b43/dma.c 		b43_poison_rx_buffer(ring, skb);
skb               616 drivers/net/wireless/broadcom/b43/dma.c 		dmaaddr = map_descbuffer(ring, skb->data,
skb               620 drivers/net/wireless/broadcom/b43/dma.c 			dev_kfree_skb_any(skb);
skb               625 drivers/net/wireless/broadcom/b43/dma.c 	meta->skb = skb;
skb               663 drivers/net/wireless/broadcom/b43/dma.c 		dev_kfree_skb(meta->skb);
skb               785 drivers/net/wireless/broadcom/b43/dma.c 		if (!meta->skb || b43_dma_ptr_is_poisoned(meta->skb)) {
skb               791 drivers/net/wireless/broadcom/b43/dma.c 					 meta->skb->len, 1);
skb               854 drivers/net/wireless/broadcom/b43/dma.c 		ring->meta->skb = B43_DMA_PTR_POISON;
skb              1190 drivers/net/wireless/broadcom/b43/dma.c 			   struct sk_buff *skb)
skb              1193 drivers/net/wireless/broadcom/b43/dma.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb              1220 drivers/net/wireless/broadcom/b43/dma.c 				 skb, info, cookie);
skb              1242 drivers/net/wireless/broadcom/b43/dma.c 	meta->skb = skb;
skb              1246 drivers/net/wireless/broadcom/b43/dma.c 	meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
skb              1248 drivers/net/wireless/broadcom/b43/dma.c 	if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
skb              1249 drivers/net/wireless/broadcom/b43/dma.c 		priv_info->bouncebuffer = kmemdup(skb->data, skb->len,
skb              1258 drivers/net/wireless/broadcom/b43/dma.c 		meta->dmaaddr = map_descbuffer(ring, priv_info->bouncebuffer, skb->len, 1);
skb              1259 drivers/net/wireless/broadcom/b43/dma.c 		if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
skb              1269 drivers/net/wireless/broadcom/b43/dma.c 	ops->fill_descriptor(ring, desc, meta->dmaaddr, skb->len, 0, 1, 1);
skb              1340 drivers/net/wireless/broadcom/b43/dma.c int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb)
skb              1345 drivers/net/wireless/broadcom/b43/dma.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb              1347 drivers/net/wireless/broadcom/b43/dma.c 	hdr = (struct ieee80211_hdr *)skb->data;
skb              1357 drivers/net/wireless/broadcom/b43/dma.c 			dev, skb_get_queue_mapping(skb));
skb              1384 drivers/net/wireless/broadcom/b43/dma.c 	ring->queue_prio = skb_get_queue_mapping(skb);
skb              1386 drivers/net/wireless/broadcom/b43/dma.c 	err = dma_tx_fragment(ring, skb);
skb              1390 drivers/net/wireless/broadcom/b43/dma.c 		ieee80211_free_txskb(dev->wl->hw, skb);
skb              1401 drivers/net/wireless/broadcom/b43/dma.c 		unsigned int skb_mapping = skb_get_queue_mapping(skb);
skb              1478 drivers/net/wireless/broadcom/b43/dma.c 		if (b43_dma_ptr_is_poisoned(meta->skb)) {
skb              1485 drivers/net/wireless/broadcom/b43/dma.c 		if (meta->skb) {
skb              1487 drivers/net/wireless/broadcom/b43/dma.c 			     b43_get_priv_tx_info(IEEE80211_SKB_CB(meta->skb));
skb              1490 drivers/net/wireless/broadcom/b43/dma.c 					 meta->skb->len, 1);
skb              1501 drivers/net/wireless/broadcom/b43/dma.c 			if (unlikely(!meta->skb)) {
skb              1511 drivers/net/wireless/broadcom/b43/dma.c 			info = IEEE80211_SKB_CB(meta->skb);
skb              1534 drivers/net/wireless/broadcom/b43/dma.c 			ieee80211_tx_status(dev->wl->hw, meta->skb);
skb              1538 drivers/net/wireless/broadcom/b43/dma.c 			meta->skb = B43_DMA_PTR_POISON;
skb              1543 drivers/net/wireless/broadcom/b43/dma.c 			if (unlikely(meta->skb)) {
skb              1588 drivers/net/wireless/broadcom/b43/dma.c 	struct sk_buff *skb;
skb              1596 drivers/net/wireless/broadcom/b43/dma.c 	skb = meta->skb;
skb              1598 drivers/net/wireless/broadcom/b43/dma.c 	rxhdr = (struct b43_rxhdr_fw4 *)skb->data;
skb              1613 drivers/net/wireless/broadcom/b43/dma.c 	if (unlikely(b43_rx_buffer_is_poisoned(ring, skb))) {
skb              1632 drivers/net/wireless/broadcom/b43/dma.c 			b43_poison_rx_buffer(ring, meta->skb);
skb              1655 drivers/net/wireless/broadcom/b43/dma.c 	skb_put(skb, len + ring->frameoffset);
skb              1656 drivers/net/wireless/broadcom/b43/dma.c 	skb_pull(skb, ring->frameoffset);
skb              1658 drivers/net/wireless/broadcom/b43/dma.c 	b43_rx(ring->dev, skb, rxhdr);
skb              1664 drivers/net/wireless/broadcom/b43/dma.c 	b43_poison_rx_buffer(ring, skb);
skb               188 drivers/net/wireless/broadcom/b43/dma.h 	struct sk_buff *skb;
skb               295 drivers/net/wireless/broadcom/b43/dma.h 	       struct sk_buff *skb);
skb              3584 drivers/net/wireless/broadcom/b43/main.c 	struct sk_buff *skb;
skb              3597 drivers/net/wireless/broadcom/b43/main.c 			skb = skb_dequeue(&wl->tx_queue[queue_num]);
skb              3599 drivers/net/wireless/broadcom/b43/main.c 				err = b43_pio_tx(dev, skb);
skb              3601 drivers/net/wireless/broadcom/b43/main.c 				err = b43_dma_tx(dev, skb);
skb              3605 drivers/net/wireless/broadcom/b43/main.c 				skb_queue_head(&wl->tx_queue[queue_num], skb);
skb              3609 drivers/net/wireless/broadcom/b43/main.c 				ieee80211_free_txskb(wl->hw, skb);
skb              3625 drivers/net/wireless/broadcom/b43/main.c 		      struct sk_buff *skb)
skb              3629 drivers/net/wireless/broadcom/b43/main.c 	if (unlikely(skb->len < 2 + 2 + 6)) {
skb              3631 drivers/net/wireless/broadcom/b43/main.c 		ieee80211_free_txskb(hw, skb);
skb              3634 drivers/net/wireless/broadcom/b43/main.c 	B43_WARN_ON(skb_shinfo(skb)->nr_frags);
skb              3636 drivers/net/wireless/broadcom/b43/main.c 	skb_queue_tail(&wl->tx_queue[skb->queue_mapping], skb);
skb              3637 drivers/net/wireless/broadcom/b43/main.c 	if (!wl->tx_queue_stopped[skb->queue_mapping]) {
skb              3640 drivers/net/wireless/broadcom/b43/main.c 		ieee80211_stop_queue(wl->hw, skb->queue_mapping);
skb              4379 drivers/net/wireless/broadcom/b43/main.c 			struct sk_buff *skb;
skb              4381 drivers/net/wireless/broadcom/b43/main.c 			skb = skb_dequeue(&wl->tx_queue[queue_num]);
skb              4382 drivers/net/wireless/broadcom/b43/main.c 			ieee80211_free_txskb(wl->hw, skb);
skb               185 drivers/net/wireless/broadcom/b43/pio.c 		if (pack->skb) {
skb               186 drivers/net/wireless/broadcom/b43/pio.c 			ieee80211_free_txskb(q->dev->wl->hw, pack->skb);
skb               187 drivers/net/wireless/broadcom/b43/pio.c 			pack->skb = NULL;
skb               353 drivers/net/wireless/broadcom/b43/pio.c 	const char *frame = pack->skb->data;
skb               354 drivers/net/wireless/broadcom/b43/pio.c 	unsigned int frame_len = pack->skb->len;
skb               423 drivers/net/wireless/broadcom/b43/pio.c 	const char *frame = pack->skb->data;
skb               424 drivers/net/wireless/broadcom/b43/pio.c 	unsigned int frame_len = pack->skb->len;
skb               441 drivers/net/wireless/broadcom/b43/pio.c 			struct sk_buff *skb)
skb               449 drivers/net/wireless/broadcom/b43/pio.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb               460 drivers/net/wireless/broadcom/b43/pio.c 	err = b43_generate_txhdr(dev, (u8 *)txhdr, skb,
skb               472 drivers/net/wireless/broadcom/b43/pio.c 	pack->skb = skb;
skb               483 drivers/net/wireless/broadcom/b43/pio.c 	q->buffer_used += roundup(skb->len + hdrlen, 4);
skb               489 drivers/net/wireless/broadcom/b43/pio.c int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb)
skb               495 drivers/net/wireless/broadcom/b43/pio.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb               497 drivers/net/wireless/broadcom/b43/pio.c 	hdr = (struct ieee80211_hdr *)skb->data;
skb               507 drivers/net/wireless/broadcom/b43/pio.c 		q = select_queue_by_priority(dev, skb_get_queue_mapping(skb));
skb               511 drivers/net/wireless/broadcom/b43/pio.c 	total_len = roundup(skb->len + hdrlen, 4);
skb               528 drivers/net/wireless/broadcom/b43/pio.c 		ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb));
skb               536 drivers/net/wireless/broadcom/b43/pio.c 	q->queue_prio = skb_get_queue_mapping(skb);
skb               538 drivers/net/wireless/broadcom/b43/pio.c 	err = pio_tx_frame(q, skb);
skb               542 drivers/net/wireless/broadcom/b43/pio.c 		ieee80211_free_txskb(dev->wl->hw, skb);
skb               555 drivers/net/wireless/broadcom/b43/pio.c 		ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb));
skb               576 drivers/net/wireless/broadcom/b43/pio.c 	info = IEEE80211_SKB_CB(pack->skb);
skb               580 drivers/net/wireless/broadcom/b43/pio.c 	total_len = pack->skb->len + b43_txhdr_size(dev);
skb               585 drivers/net/wireless/broadcom/b43/pio.c 	ieee80211_tx_status(dev->wl->hw, pack->skb);
skb               586 drivers/net/wireless/broadcom/b43/pio.c 	pack->skb = NULL;
skb               603 drivers/net/wireless/broadcom/b43/pio.c 	struct sk_buff *skb;
skb               697 drivers/net/wireless/broadcom/b43/pio.c 	skb = dev_alloc_skb(len + padding + 2);
skb               698 drivers/net/wireless/broadcom/b43/pio.c 	if (unlikely(!skb)) {
skb               702 drivers/net/wireless/broadcom/b43/pio.c 	skb_reserve(skb, 2);
skb               703 drivers/net/wireless/broadcom/b43/pio.c 	skb_put(skb, len + padding);
skb               705 drivers/net/wireless/broadcom/b43/pio.c 		b43_block_read(dev, skb->data + padding, (len & ~3),
skb               718 drivers/net/wireless/broadcom/b43/pio.c 				skb->data[len + padding - 3] = tail[0];
skb               719 drivers/net/wireless/broadcom/b43/pio.c 				skb->data[len + padding - 2] = tail[1];
skb               720 drivers/net/wireless/broadcom/b43/pio.c 				skb->data[len + padding - 1] = tail[2];
skb               723 drivers/net/wireless/broadcom/b43/pio.c 				skb->data[len + padding - 2] = tail[0];
skb               724 drivers/net/wireless/broadcom/b43/pio.c 				skb->data[len + padding - 1] = tail[1];
skb               727 drivers/net/wireless/broadcom/b43/pio.c 				skb->data[len + padding - 1] = tail[0];
skb               732 drivers/net/wireless/broadcom/b43/pio.c 		b43_block_read(dev, skb->data + padding, (len & ~1),
skb               743 drivers/net/wireless/broadcom/b43/pio.c 			skb->data[len + padding - 1] = tail[0];
skb               747 drivers/net/wireless/broadcom/b43/pio.c 	b43_rx(q->dev, skb, rxhdr);
skb                63 drivers/net/wireless/broadcom/b43/pio.h 	struct sk_buff *skb;
skb               158 drivers/net/wireless/broadcom/b43/pio.h int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb);
skb               645 drivers/net/wireless/broadcom/b43/xmit.c void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
skb               694 drivers/net/wireless/broadcom/b43/xmit.c 	if (unlikely(skb->len < (sizeof(struct b43_plcp_hdr6) + padding))) {
skb               698 drivers/net/wireless/broadcom/b43/xmit.c 	plcp = (struct b43_plcp_hdr6 *)(skb->data + padding);
skb               699 drivers/net/wireless/broadcom/b43/xmit.c 	skb_pull(skb, sizeof(struct b43_plcp_hdr6) + padding);
skb               701 drivers/net/wireless/broadcom/b43/xmit.c 	if (unlikely(skb->len < (2 + 2 + 6 /*minimum hdr */  + FCS_LEN))) {
skb               705 drivers/net/wireless/broadcom/b43/xmit.c 	wlhdr = (struct ieee80211_hdr *)(skb->data);
skb               722 drivers/net/wireless/broadcom/b43/xmit.c 			if (unlikely(skb->len < (wlhdr_len + 3))) {
skb               819 drivers/net/wireless/broadcom/b43/xmit.c 	memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
skb               820 drivers/net/wireless/broadcom/b43/xmit.c 	ieee80211_rx_ni(dev->wl->hw, skb);
skb               827 drivers/net/wireless/broadcom/b43/xmit.c 	dev_kfree_skb_any(skb);
skb               357 drivers/net/wireless/broadcom/b43/xmit.h void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr);
skb               309 drivers/net/wireless/broadcom/b43legacy/dma.c 	if (meta->skb) {
skb               311 drivers/net/wireless/broadcom/b43legacy/dma.c 			dev_kfree_skb_irq(meta->skb);
skb               313 drivers/net/wireless/broadcom/b43legacy/dma.c 			dev_kfree_skb(meta->skb);
skb               314 drivers/net/wireless/broadcom/b43legacy/dma.c 		meta->skb = NULL;
skb               448 drivers/net/wireless/broadcom/b43legacy/dma.c 	struct sk_buff *skb;
skb               452 drivers/net/wireless/broadcom/b43legacy/dma.c 	skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
skb               453 drivers/net/wireless/broadcom/b43legacy/dma.c 	if (unlikely(!skb))
skb               455 drivers/net/wireless/broadcom/b43legacy/dma.c 	dmaaddr = map_descbuffer(ring, skb->data,
skb               461 drivers/net/wireless/broadcom/b43legacy/dma.c 		dev_kfree_skb_any(skb);
skb               463 drivers/net/wireless/broadcom/b43legacy/dma.c 		skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
skb               464 drivers/net/wireless/broadcom/b43legacy/dma.c 		if (unlikely(!skb))
skb               466 drivers/net/wireless/broadcom/b43legacy/dma.c 		dmaaddr = map_descbuffer(ring, skb->data,
skb               471 drivers/net/wireless/broadcom/b43legacy/dma.c 		dev_kfree_skb_any(skb);
skb               475 drivers/net/wireless/broadcom/b43legacy/dma.c 	meta->skb = skb;
skb               479 drivers/net/wireless/broadcom/b43legacy/dma.c 	rxhdr = (struct b43legacy_rxhdr_fw3 *)(skb->data);
skb               481 drivers/net/wireless/broadcom/b43legacy/dma.c 	txstat = (struct b43legacy_hwtxstatus *)(skb->data);
skb               518 drivers/net/wireless/broadcom/b43legacy/dma.c 		dev_kfree_skb(meta->skb);
skb               592 drivers/net/wireless/broadcom/b43legacy/dma.c 		if (!meta->skb) {
skb               598 drivers/net/wireless/broadcom/b43legacy/dma.c 					 meta->skb->len, 1);
skb               949 drivers/net/wireless/broadcom/b43legacy/dma.c 	struct sk_buff *skb = *in_skb;
skb               950 drivers/net/wireless/broadcom/b43legacy/dma.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb               960 drivers/net/wireless/broadcom/b43legacy/dma.c 	B43legacy_WARN_ON(skb_shinfo(skb)->nr_frags != 0);
skb               973 drivers/net/wireless/broadcom/b43legacy/dma.c 				 skb->data, skb->len, info,
skb               997 drivers/net/wireless/broadcom/b43legacy/dma.c 	meta->skb = skb;
skb              1000 drivers/net/wireless/broadcom/b43legacy/dma.c 	meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
skb              1002 drivers/net/wireless/broadcom/b43legacy/dma.c 	if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
skb              1003 drivers/net/wireless/broadcom/b43legacy/dma.c 		bounce_skb = alloc_skb(skb->len, GFP_KERNEL | GFP_DMA);
skb              1011 drivers/net/wireless/broadcom/b43legacy/dma.c 		skb_put_data(bounce_skb, skb->data, skb->len);
skb              1012 drivers/net/wireless/broadcom/b43legacy/dma.c 		memcpy(bounce_skb->cb, skb->cb, sizeof(skb->cb));
skb              1013 drivers/net/wireless/broadcom/b43legacy/dma.c 		bounce_skb->dev = skb->dev;
skb              1014 drivers/net/wireless/broadcom/b43legacy/dma.c 		skb_set_queue_mapping(bounce_skb, skb_get_queue_mapping(skb));
skb              1017 drivers/net/wireless/broadcom/b43legacy/dma.c 		dev_kfree_skb_any(skb);
skb              1018 drivers/net/wireless/broadcom/b43legacy/dma.c 		skb = bounce_skb;
skb              1020 drivers/net/wireless/broadcom/b43legacy/dma.c 		meta->skb = skb;
skb              1021 drivers/net/wireless/broadcom/b43legacy/dma.c 		meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
skb              1022 drivers/net/wireless/broadcom/b43legacy/dma.c 		if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
skb              1031 drivers/net/wireless/broadcom/b43legacy/dma.c 			     skb->len, 0, 1, 1);
skb              1039 drivers/net/wireless/broadcom/b43legacy/dma.c 	dev_kfree_skb_any(skb);
skb              1070 drivers/net/wireless/broadcom/b43legacy/dma.c 		     struct sk_buff *skb)
skb              1075 drivers/net/wireless/broadcom/b43legacy/dma.c 	ring = priority_to_txring(dev, skb_get_queue_mapping(skb));
skb              1097 drivers/net/wireless/broadcom/b43legacy/dma.c 	err = dma_tx_fragment(ring, &skb);
skb              1101 drivers/net/wireless/broadcom/b43legacy/dma.c 		dev_kfree_skb_any(skb);
skb              1111 drivers/net/wireless/broadcom/b43legacy/dma.c 		unsigned int skb_mapping = skb_get_queue_mapping(skb);
skb              1156 drivers/net/wireless/broadcom/b43legacy/dma.c 		if (meta->skb)
skb              1158 drivers/net/wireless/broadcom/b43legacy/dma.c 					 meta->skb->len, 1);
skb              1166 drivers/net/wireless/broadcom/b43legacy/dma.c 			BUG_ON(!meta->skb);
skb              1167 drivers/net/wireless/broadcom/b43legacy/dma.c 			info = IEEE80211_SKB_CB(meta->skb);
skb              1204 drivers/net/wireless/broadcom/b43legacy/dma.c 			ieee80211_tx_status_irqsafe(dev->wl->hw, meta->skb);
skb              1206 drivers/net/wireless/broadcom/b43legacy/dma.c 			meta->skb = NULL;
skb              1211 drivers/net/wireless/broadcom/b43legacy/dma.c 			B43legacy_WARN_ON(meta->skb != NULL);
skb              1247 drivers/net/wireless/broadcom/b43legacy/dma.c 	struct sk_buff *skb;
skb              1255 drivers/net/wireless/broadcom/b43legacy/dma.c 	skb = meta->skb;
skb              1260 drivers/net/wireless/broadcom/b43legacy/dma.c 				(struct b43legacy_hwtxstatus *)skb->data;
skb              1277 drivers/net/wireless/broadcom/b43legacy/dma.c 	rxhdr = (struct b43legacy_rxhdr_fw3 *)skb->data;
skb              1331 drivers/net/wireless/broadcom/b43legacy/dma.c 	skb_put(skb, len + ring->frameoffset);
skb              1332 drivers/net/wireless/broadcom/b43legacy/dma.c 	skb_pull(skb, ring->frameoffset);
skb              1334 drivers/net/wireless/broadcom/b43legacy/dma.c 	b43legacy_rx(ring->dev, skb, rxhdr);
skb               110 drivers/net/wireless/broadcom/b43legacy/dma.h 	struct sk_buff *skb;
skb               189 drivers/net/wireless/broadcom/b43legacy/dma.h 		     struct sk_buff *skb);
skb               209 drivers/net/wireless/broadcom/b43legacy/dma.h 		     struct sk_buff *skb)
skb              2450 drivers/net/wireless/broadcom/b43legacy/main.c 	struct sk_buff *skb;
skb              2463 drivers/net/wireless/broadcom/b43legacy/main.c 			skb = skb_dequeue(&wl->tx_queue[queue_num]);
skb              2465 drivers/net/wireless/broadcom/b43legacy/main.c 				err = b43legacy_pio_tx(dev, skb);
skb              2467 drivers/net/wireless/broadcom/b43legacy/main.c 				err = b43legacy_dma_tx(dev, skb);
skb              2471 drivers/net/wireless/broadcom/b43legacy/main.c 				skb_queue_head(&wl->tx_queue[queue_num], skb);
skb              2475 drivers/net/wireless/broadcom/b43legacy/main.c 				dev_kfree_skb(skb); /* Drop it */
skb              2488 drivers/net/wireless/broadcom/b43legacy/main.c 			    struct sk_buff *skb)
skb              2492 drivers/net/wireless/broadcom/b43legacy/main.c 	if (unlikely(skb->len < 2 + 2 + 6)) {
skb              2494 drivers/net/wireless/broadcom/b43legacy/main.c 		dev_kfree_skb_any(skb);
skb              2497 drivers/net/wireless/broadcom/b43legacy/main.c 	B43legacy_WARN_ON(skb_shinfo(skb)->nr_frags);
skb              2499 drivers/net/wireless/broadcom/b43legacy/main.c 	skb_queue_tail(&wl->tx_queue[skb->queue_mapping], skb);
skb              2500 drivers/net/wireless/broadcom/b43legacy/main.c 	if (!wl->tx_queue_stopped[skb->queue_mapping])
skb              2503 drivers/net/wireless/broadcom/b43legacy/main.c 		ieee80211_stop_queue(wl->hw, skb->queue_mapping);
skb                90 drivers/net/wireless/broadcom/b43legacy/pio.c 			struct sk_buff *skb)
skb                94 drivers/net/wireless/broadcom/b43legacy/pio.c 				    skb->data[skb->len - 1]);
skb               173 drivers/net/wireless/broadcom/b43legacy/pio.c 				  struct sk_buff *skb,
skb               184 drivers/net/wireless/broadcom/b43legacy/pio.c 	B43legacy_WARN_ON(skb_shinfo(skb)->nr_frags != 0);
skb               186 drivers/net/wireless/broadcom/b43legacy/pio.c 				 txhdr, skb->data, skb->len,
skb               187 drivers/net/wireless/broadcom/b43legacy/pio.c 				 IEEE80211_SKB_CB(skb),
skb               193 drivers/net/wireless/broadcom/b43legacy/pio.c 	octets = skb->len + txhdr_size;
skb               196 drivers/net/wireless/broadcom/b43legacy/pio.c 	tx_data(queue, txhdr, (u8 *)skb->data, octets);
skb               197 drivers/net/wireless/broadcom/b43legacy/pio.c 	tx_complete(queue, skb);
skb               207 drivers/net/wireless/broadcom/b43legacy/pio.c 	if (packet->skb) {
skb               209 drivers/net/wireless/broadcom/b43legacy/pio.c 			dev_kfree_skb_irq(packet->skb);
skb               211 drivers/net/wireless/broadcom/b43legacy/pio.c 			dev_kfree_skb(packet->skb);
skb               220 drivers/net/wireless/broadcom/b43legacy/pio.c 	struct sk_buff *skb = packet->skb;
skb               224 drivers/net/wireless/broadcom/b43legacy/pio.c 	octets = (u16)skb->len + sizeof(struct b43legacy_txhdr_fw3);
skb               244 drivers/net/wireless/broadcom/b43legacy/pio.c 	err = pio_tx_write_fragment(queue, skb, packet,
skb               454 drivers/net/wireless/broadcom/b43legacy/pio.c 		     struct sk_buff *skb)
skb               464 drivers/net/wireless/broadcom/b43legacy/pio.c 	packet->skb = skb;
skb               486 drivers/net/wireless/broadcom/b43legacy/pio.c 	if (!packet->skb)
skb               490 drivers/net/wireless/broadcom/b43legacy/pio.c 	queue->tx_devq_used -= (packet->skb->len +
skb               493 drivers/net/wireless/broadcom/b43legacy/pio.c 	info = IEEE80211_SKB_CB(packet->skb);
skb               525 drivers/net/wireless/broadcom/b43legacy/pio.c 	ieee80211_tx_status_irqsafe(dev->wl->hw, packet->skb);
skb               526 drivers/net/wireless/broadcom/b43legacy/pio.c 	packet->skb = NULL;
skb               563 drivers/net/wireless/broadcom/b43legacy/pio.c 	struct sk_buff *skb;
skb               618 drivers/net/wireless/broadcom/b43legacy/pio.c 	skb = dev_alloc_skb(len);
skb               619 drivers/net/wireless/broadcom/b43legacy/pio.c 	if (unlikely(!skb)) {
skb               623 drivers/net/wireless/broadcom/b43legacy/pio.c 	skb_put(skb, len);
skb               626 drivers/net/wireless/broadcom/b43legacy/pio.c 		*((__le16 *)(skb->data + i)) = cpu_to_le16(tmp);
skb               630 drivers/net/wireless/broadcom/b43legacy/pio.c 		skb->data[len - 1] = (tmp & 0x00FF);
skb               632 drivers/net/wireless/broadcom/b43legacy/pio.c 	b43legacy_rx(queue->dev, skb, rxhdr);
skb                44 drivers/net/wireless/broadcom/b43legacy/pio.h 	struct sk_buff *skb;
skb               102 drivers/net/wireless/broadcom/b43legacy/pio.h 		   struct sk_buff *skb);
skb               127 drivers/net/wireless/broadcom/b43legacy/pio.h 		   struct sk_buff *skb)
skb               419 drivers/net/wireless/broadcom/b43legacy/xmit.c 		  struct sk_buff *skb,
skb               451 drivers/net/wireless/broadcom/b43legacy/xmit.c 	if (unlikely(skb->len < (sizeof(struct b43legacy_plcp_hdr6) +
skb               456 drivers/net/wireless/broadcom/b43legacy/xmit.c 	plcp = (struct b43legacy_plcp_hdr6 *)(skb->data + padding);
skb               457 drivers/net/wireless/broadcom/b43legacy/xmit.c 	skb_pull(skb, sizeof(struct b43legacy_plcp_hdr6) + padding);
skb               459 drivers/net/wireless/broadcom/b43legacy/xmit.c 	if (unlikely(skb->len < (2+2+6/*minimum hdr*/ + FCS_LEN))) {
skb               463 drivers/net/wireless/broadcom/b43legacy/xmit.c 	wlhdr = (struct ieee80211_hdr *)(skb->data);
skb               488 drivers/net/wireless/broadcom/b43legacy/xmit.c 			if (unlikely(skb->len < (wlhdr_len + 3))) {
skb               493 drivers/net/wireless/broadcom/b43legacy/xmit.c 			if (skb->data[wlhdr_len + 3] & (1 << 5)) {
skb               503 drivers/net/wireless/broadcom/b43legacy/xmit.c 			if (unlikely(skb->len < (wlhdr_len + iv_len +
skb               510 drivers/net/wireless/broadcom/b43legacy/xmit.c 			memmove(skb->data + iv_len, skb->data, wlhdr_len);
skb               511 drivers/net/wireless/broadcom/b43legacy/xmit.c 			skb_pull(skb, iv_len);
skb               513 drivers/net/wireless/broadcom/b43legacy/xmit.c 			skb_trim(skb, skb->len - icv_len);
skb               563 drivers/net/wireless/broadcom/b43legacy/xmit.c 	memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
skb               564 drivers/net/wireless/broadcom/b43legacy/xmit.c 	ieee80211_rx_irqsafe(dev->wl->hw, skb);
skb               569 drivers/net/wireless/broadcom/b43legacy/xmit.c 	dev_kfree_skb_any(skb);
skb               199 drivers/net/wireless/broadcom/b43legacy/xmit.h 		struct sk_buff *skb,
skb               332 drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c 					  struct sk_buff *skb)
skb               338 drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c 		return brcmf_proto_txdata(drvr, ifidx, 0, skb);
skb               340 drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c 	return brcmf_fws_process_skb(ifp, skb);
skb               400 drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c 				       struct sk_buff *skb)
skb               402 drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c 	brcmf_fws_rxreorder(ifp, skb);
skb               286 drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c 				   struct sk_buff *skb)
skb               292 drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c 	req_sz = skb->len + 3;
skb               297 drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c 		err = sdio_memcpy_fromio(func, ((u8 *)(skb->data)), addr,
skb               301 drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c 		err = sdio_readsb(func, ((u8 *)(skb->data)), addr, req_sz);
skb               317 drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c 				    struct sk_buff *skb)
skb               323 drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c 	req_sz = skb->len + 3;
skb               326 drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c 	err = sdio_memcpy_toio(func, addr, ((u8 *)(skb->data)), req_sz);
skb               563 drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c 	struct sk_buff *skb;
skb               589 drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c 		skb_queue_walk(pktq, skb) {
skb               590 drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c 			memcpy(skb->data, glom_skb->data, skb->len);
skb               591 drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c 			skb_pull(glom_skb, skb->len);
skb               635 drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c 	struct sk_buff *skb;
skb               649 drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c 		skb_queue_walk(pktq, skb) {
skb               651 drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c 						       addr, skb);
skb                73 drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h 	int (*txdata)(struct device *dev, struct sk_buff *skb);
skb               172 drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h static inline int brcmf_bus_txdata(struct brcmf_bus *bus, struct sk_buff *skb)
skb               174 drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h 	return bus->ops->txdata(bus->dev, skb);
skb               259 drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c static bool brcmf_skb_is_iapp(struct sk_buff *skb)
skb               269 drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c 	if (skb->len - skb->mac_len != 6 ||
skb               270 drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c 	    !is_multicast_ether_addr(eth_hdr(skb)->h_dest))
skb               273 drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c 	eth_data = skb_mac_header(skb) + ETH_HLEN;
skb               285 drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb,
skb               300 drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c 		dev_kfree_skb(skb);
skb               316 drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c 	if (!drvr->settings->iapp && brcmf_skb_is_iapp(skb)) {
skb               317 drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c 		dev_kfree_skb(skb);
skb               323 drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c 	if (skb_headroom(skb) < drvr->hdrlen || skb_header_cloned(skb)) {
skb               324 drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c 		head_delta = max_t(int, drvr->hdrlen - skb_headroom(skb), 0);
skb               329 drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c 		ret = pskb_expand_head(skb, ALIGN(head_delta, NET_SKB_PAD), 0,
skb               340 drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c 	if (skb->len < sizeof(*eh)) {
skb               342 drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c 		dev_kfree_skb(skb);
skb               346 drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c 	eh = (struct ethhdr *)(skb->data);
skb               352 drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c 	if ((skb->priority == 0) || (skb->priority > 7))
skb               353 drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c 		skb->priority = cfg80211_classify8021d(skb, NULL);
skb               355 drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c 	ret = brcmf_proto_tx_queue_data(drvr, ifp->ifidx, skb);
skb               357 drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c 		brcmf_txfinalize(ifp, skb, false);
skb               364 drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c 		ndev->stats.tx_bytes += skb->len;
skb               395 drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb)
skb               401 drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c 	if (!ifp->drvr->settings->iapp && brcmf_skb_is_iapp(skb)) {
skb               402 drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c 		brcmu_pkt_buf_free_skb(skb);
skb               406 drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c 	if (skb->pkt_type == PACKET_MULTICAST)
skb               410 drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c 		brcmu_pkt_buf_free_skb(skb);
skb               414 drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c 	ifp->ndev->stats.rx_bytes += skb->len;
skb               417 drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c 	brcmf_dbg(DATA, "rx proto=0x%X\n", ntohs(skb->protocol));
skb               419 drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c 		netif_rx(skb);
skb               425 drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c 		netif_rx_ni(skb);
skb               428 drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c void brcmf_netif_mon_rx(struct brcmf_if *ifp, struct sk_buff *skb)
skb               433 drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c 		struct wlc_d11rxhdr *wlc_rxhdr = (struct wlc_d11rxhdr *)skb->data;
skb               448 drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c 		skb_pull(skb, offset);
skb               451 drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c 		radiotap = skb_push(skb, sizeof(*radiotap));
skb               456 drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c 		skb->len -= 4;
skb               461 drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c 		radiotap = skb_push(skb, sizeof(*radiotap));
skb               466 drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c 		skb->len -= 4;
skb               469 drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c 	skb->dev = ifp->ndev;
skb               470 drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c 	skb_reset_mac_header(skb);
skb               471 drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c 	skb->pkt_type = PACKET_OTHERHOST;
skb               472 drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c 	skb->protocol = htons(ETH_P_802_2);
skb               474 drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c 	brcmf_netif_rx(ifp, skb);
skb               477 drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c static int brcmf_rx_hdrpull(struct brcmf_pub *drvr, struct sk_buff *skb,
skb               483 drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c 	ret = brcmf_proto_hdrpull(drvr, true, skb, ifp);
skb               488 drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c 		brcmu_pkt_buf_free_skb(skb);
skb               492 drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c 	skb->protocol = eth_type_trans(skb, (*ifp)->ndev);
skb               496 drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c void brcmf_rx_frame(struct device *dev, struct sk_buff *skb, bool handle_event)
skb               502 drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c 	brcmf_dbg(DATA, "Enter: %s: rxp=%p\n", dev_name(dev), skb);
skb               504 drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c 	if (brcmf_rx_hdrpull(drvr, skb, &ifp))
skb               507 drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c 	if (brcmf_proto_is_reorder_skb(skb)) {
skb               508 drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c 		brcmf_proto_rxreorder(ifp, skb);
skb               512 drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c 			brcmf_fweh_process_skb(ifp->drvr, skb,
skb               515 drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c 		brcmf_netif_rx(ifp, skb);
skb               519 drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c void brcmf_rx_event(struct device *dev, struct sk_buff *skb)
skb               525 drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c 	brcmf_dbg(EVENT, "Enter: %s: rxp=%p\n", dev_name(dev), skb);
skb               527 drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c 	if (brcmf_rx_hdrpull(drvr, skb, &ifp))
skb               530 drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c 	brcmf_fweh_process_skb(ifp->drvr, skb, 0);
skb               531 drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c 	brcmu_pkt_buf_free_skb(skb);
skb               720 drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c static netdev_tx_t brcmf_net_p2p_start_xmit(struct sk_buff *skb,
skb               723 drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c 	if (skb)
skb               724 drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c 		dev_kfree_skb_any(skb);
skb               211 drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb);
skb               212 drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h void brcmf_netif_mon_rx(struct brcmf_if *ifp, struct sk_buff *skb);
skb               234 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c 	struct sk_buff *skb;
skb               249 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c 	skb = skb_dequeue(&ring->skblist);
skb               250 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c 	while (skb) {
skb               251 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c 		brcmf_txfinalize(ifp, skb, false);
skb               252 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c 		skb = skb_dequeue(&ring->skblist);
skb               260 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c 			   struct sk_buff *skb)
skb               266 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c 	skb_queue_tail(&ring->skblist, skb);
skb               288 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c 	struct sk_buff *skb;
skb               294 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c 	skb = skb_dequeue(&ring->skblist);
skb               302 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c 	return skb;
skb               307 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c 			     struct sk_buff *skb)
skb               313 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c 	skb_queue_head(&ring->skblist, skb);
skb                58 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.h 			   struct sk_buff *skb);
skb                61 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.h 			     struct sk_buff *skb);
skb               326 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h 					  struct sk_buff *skb, u16 stype)
skb               332 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h 	if (skb->protocol != cpu_to_be16(ETH_P_LINK_CTL))
skb               335 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h 	if ((skb->len + ETH_HLEN) < sizeof(*event_packet))
skb               338 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h 	event_packet = (struct brcmf_event *)skb_mac_header(skb);
skb               357 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h 	brcmf_fweh_process_event(drvr, event_packet, skb->len + ETH_HLEN);
skb               211 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c #define brcmf_skbcb(skb)	((struct brcmf_skbuff_cb *)((skb)->cb))
skb               236 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c #define brcmf_skb_if_flags_set_field(skb, field, value) \
skb               237 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 	brcmu_maskset16(&(brcmf_skbcb(skb)->if_flags), \
skb               240 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c #define brcmf_skb_if_flags_get_field(skb, field) \
skb               241 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 	brcmu_maskget16(brcmf_skbcb(skb)->if_flags, \
skb               271 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c #define brcmf_skb_htod_tag_set_field(skb, field, value) \
skb               272 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 	brcmu_maskset32(&(brcmf_skbcb(skb)->htod), \
skb               275 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c #define brcmf_skb_htod_tag_get_field(skb, field) \
skb               276 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 	brcmu_maskget32(brcmf_skbcb(skb)->htod, \
skb               287 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c #define brcmf_skb_htod_seq_set_field(skb, field, value) \
skb               288 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 	brcmu_maskset16(&(brcmf_skbcb(skb)->htod_seq), \
skb               291 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c #define brcmf_skb_htod_seq_get_field(skb, field) \
skb               292 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 	brcmu_maskget16(brcmf_skbcb(skb)->htod_seq, \
skb               566 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c static bool brcmf_fws_ifidx_match(struct sk_buff *skb, void *arg)
skb               568 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 	u32 ifidx = brcmf_skb_if_flags_get_field(skb, INDEX);
skb               647 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 	struct sk_buff *skb;
skb               654 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 		skb = brcmu_pktq_pdeq_match(q, prec, matchfn, &ifidx);
skb               655 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 		while (skb) {
skb               656 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 			hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT);
skb               657 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 			brcmf_fws_hanger_poppkt(&fws->hanger, hslot, &skb,
skb               659 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 			brcmu_pkt_buf_free_skb(skb);
skb               660 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 			skb = brcmu_pktq_pdeq_match(q, prec, matchfn, &ifidx);
skb               685 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 	struct sk_buff *skb;
skb               693 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 			skb = h->items[i].pkt;
skb               694 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 			if (fn == NULL || fn(skb, &ifidx)) {
skb               697 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 					brcmu_pkt_buf_free_skb(skb);
skb               830 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 	struct sk_buff *skb;
skb               841 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 		skb = brcmu_pktq_pdeq_match(txq, prec, fn, &ifidx);
skb               842 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 		while (skb) {
skb               843 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 			hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT);
skb               845 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 			WARN_ON(skb != hi->pkt);
skb               847 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 			brcmu_pkt_buf_free_skb(skb);
skb               848 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 			skb = brcmu_pktq_pdeq_match(txq, prec, fn, &ifidx);
skb               875 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c static u8 brcmf_fws_hdrpush(struct brcmf_fws_info *fws, struct sk_buff *skb)
skb               877 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 	struct brcmf_fws_mac_descriptor *entry = brcmf_skbcb(skb)->mac;
skb               881 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 	__le32 pkttag = cpu_to_le32(brcmf_skbcb(skb)->htod);
skb               882 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 	__le16 pktseq = cpu_to_le16(brcmf_skbcb(skb)->htod_seq);
skb               885 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 		  entry->name, brcmf_skb_if_flags_get_field(skb, INDEX),
skb               887 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 		  brcmf_skbcb(skb)->htod, brcmf_skbcb(skb)->htod_seq);
skb               897 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 	skb_push(skb, data_offset);
skb               898 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 	wlh = skb->data;
skb               931 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 	struct sk_buff *skb;
skb               955 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 		skb = brcmu_pkt_buf_get_skb(len);
skb               956 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 		if (skb == NULL)
skb               958 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 		skb_pull(skb, len);
skb               959 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 		skcb = brcmf_skbcb(skb);
skb               964 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 		data_offset = brcmf_fws_hdrpush(fws, skb);
skb               965 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 		ifidx = brcmf_skb_if_flags_get_field(skb, INDEX);
skb               967 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 		err = brcmf_proto_txdata(fws->drvr, ifidx, data_offset, skb);
skb               970 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 			brcmu_pkt_buf_free_skb(skb);
skb              1171 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 				 struct sk_buff *skb)
skb              1175 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 		brcmf_skb_if_flags_set_field(skb, REQUESTED, 1);
skb              1176 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 		brcmf_skb_if_flags_set_field(skb, REQ_CREDIT, 1);
skb              1181 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 		brcmf_skb_if_flags_set_field(skb, REQUESTED, 1);
skb              1182 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 		brcmf_skb_if_flags_set_field(skb, REQ_CREDIT, 0);
skb              1186 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 		brcmf_skb_if_flags_set_field(skb, REQUESTED, 0);
skb              1187 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 		brcmf_skb_if_flags_set_field(skb, REQ_CREDIT, 0);
skb              1191 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c static void brcmf_fws_macdesc_return_req_credit(struct sk_buff *skb)
skb              1193 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 	struct brcmf_fws_mac_descriptor *entry = brcmf_skbcb(skb)->mac;
skb              1195 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 	if ((brcmf_skb_if_flags_get_field(skb, REQ_CREDIT)) &&
skb              1410 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 					 struct sk_buff *skb,
skb              1413 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 	struct brcmf_fws_mac_descriptor *entry = brcmf_skbcb(skb)->mac;
skb              1417 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 	hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT);
skb              1429 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 	brcmf_skb_htod_tag_set_field(skb, GENERATION, genbit);
skb              1430 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 	brcmf_skbcb(skb)->htod_seq = seq;
skb              1431 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 	if (brcmf_skb_htod_seq_get_field(skb, FROMFW)) {
skb              1432 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 		brcmf_skb_htod_seq_set_field(skb, FROMDRV, 1);
skb              1433 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 		brcmf_skb_htod_seq_set_field(skb, FROMFW, 0);
skb              1435 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 		brcmf_skb_htod_seq_set_field(skb, FROMDRV, 0);
skb              1437 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 	ret = brcmf_fws_enq(fws, BRCMF_FWS_SKBSTATE_SUPPRESSED, fifo, skb);
skb              1441 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 		brcmf_fws_hanger_poppkt(&fws->hanger, hslot, &skb, true);
skb              1459 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 	struct sk_buff *skb;
skb              1482 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 		ret = brcmf_fws_hanger_poppkt(&fws->hanger, hslot, &skb,
skb              1490 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 		skcb = brcmf_skbcb(skb);
skb              1493 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 			brcmu_pkt_buf_free_skb(skb);
skb              1504 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 		fifo = brcmf_skb_htod_tag_get_field(skb, FIFO);
skb              1506 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 		    (brcmf_skb_if_flags_get_field(skb, REQ_CREDIT)) ||
skb              1511 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 		brcmf_fws_macdesc_return_req_credit(skb);
skb              1513 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 		ret = brcmf_proto_hdrpull(fws->drvr, false, skb, &ifp);
skb              1515 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 			brcmu_pkt_buf_free_skb(skb);
skb              1519 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 			ret = brcmf_fws_txstatus_suppressed(fws, fifo, skb,
skb              1522 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 			brcmf_txfinalize(ifp, skb, true);
skb              1851 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c void brcmf_fws_hdrpull(struct brcmf_if *ifp, s16 siglen, struct sk_buff *skb)
skb              1864 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 		  ifp->ifidx, skb->len, siglen);
skb              1866 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 	WARN_ON(siglen > skb->len);
skb              1872 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 		skb_pull(skb, siglen);
skb              1878 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 	signal_data = skb->data;
skb              1910 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 			rd = (struct brcmf_skb_reorder_data *)skb->cb;
skb              1964 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 	skb_pull(skb, siglen);
skb              1968 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 	if (skb->len == 0)
skb              1994 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 				   struct sk_buff *skb, int fifo)
skb              2002 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 	entry = brcmf_skbcb(skb)->mac;
skb              2005 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 		if (brcmf_skbcb(skb)->state == BRCMF_FWS_SKBSTATE_SUPPRESSED)
skb              2008 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 		pktout = brcmu_pktq_penq_head(&entry->psq, qidx, skb);
skb              2020 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 		hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT);
skb              2026 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 		brcmf_fws_macdesc_return_req_credit(skb);
skb              2055 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 				struct sk_buff *skb)
skb              2057 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 	struct brcmf_skbuff_cb *skcb = brcmf_skbcb(skb);
skb              2067 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 	data_offset = brcmf_fws_precommit_skb(fws, fifo, skb);
skb              2071 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 	ifidx = brcmf_skb_if_flags_get_field(skb, INDEX);
skb              2073 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 	rc = brcmf_proto_txdata(fws->drvr, ifidx, data_offset, skb);
skb              2081 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 		(void)brcmf_proto_hdrpull(fws->drvr, false, skb, NULL);
skb              2087 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 	if (brcmf_skb_if_flags_get_field(skb, REQUESTED))
skb              2093 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 	brcmf_fws_rollback_toq(fws, skb, fifo);
skb              2117 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb)
skb              2121 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 	struct brcmf_skbuff_cb *skcb = brcmf_skbcb(skb);
skb              2122 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 	struct ethhdr *eh = (struct ethhdr *)(skb->data);
skb              2132 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 	brcmf_skb_if_flags_set_field(skb, INDEX, ifp->ifidx);
skb              2134 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 		fifo = brcmf_fws_prio2fifo[skb->priority];
skb              2144 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 	if (!brcmf_fws_assign_htod(fws, skb, fifo)) {
skb              2145 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 		brcmf_fws_enq(fws, BRCMF_FWS_SKBSTATE_DELAYED, fifo, skb);
skb              2149 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 		brcmf_txfinalize(ifp, skb, false);
skb              2207 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 	struct sk_buff *skb;
skb              2220 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 			while ((skb = brcmf_fws_deq(fws, fifo)) != NULL) {
skb              2221 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 				hslot = brcmf_skb_htod_tag_get_field(skb,
skb              2224 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 							&skb, true);
skb              2225 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 				ifidx = brcmf_skb_if_flags_get_field(skb,
skb              2229 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 				ret = brcmf_proto_txdata(drvr, ifidx, 0, skb);
skb              2234 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 							 skb, false);
skb              2243 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 			skb = brcmf_fws_deq(fws, fifo);
skb              2244 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 			if (!skb)
skb              2247 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 			if (brcmf_fws_commit_skb(fws, fifo, skb))
skb              2256 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 				skb = brcmf_fws_deq(fws, fifo);
skb              2257 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 				if (!skb) {
skb              2261 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 				if (brcmf_fws_commit_skb(fws, fifo, skb))
skb              2476 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c void brcmf_fws_bustxfail(struct brcmf_fws_info *fws, struct sk_buff *skb)
skb              2480 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 	if (brcmf_skbcb(skb)->state == BRCMF_FWS_SKBSTATE_TIM) {
skb              2481 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 		brcmu_pkt_buf_free_skb(skb);
skb              2485 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 	hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT);
skb                14 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h void brcmf_fws_hdrpull(struct brcmf_if *ifp, s16 siglen, struct sk_buff *skb);
skb                15 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb);
skb                20 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h void brcmf_fws_bustxfail(struct brcmf_fws_info *fws, struct sk_buff *skb);
skb                22 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb);
skb               279 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c 	struct sk_buff *skb;
skb               319 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c 			 struct sk_buff *skb, u16 data_offset,
skb               327 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c 	*physaddr = dma_map_single(dev, skb->data + data_offset,
skb               328 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c 				   skb->len - data_offset, pktids->direction);
skb               353 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c 	array[*idx].skb = skb;
skb               366 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c 	struct sk_buff *skb;
skb               376 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c 				 pktid->skb->len - pktid->data_offset,
skb               378 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c 		skb = pktid->skb;
skb               380 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c 		return skb;
skb               403 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c 					 pktid->skb->len - pktid->data_offset,
skb               405 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c 			brcmu_pkt_buf_free_skb(pktid->skb);
skb               491 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c 	struct sk_buff *skb = NULL;
skb               508 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c 	skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev,
skb               512 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c 		if (!skb)
skb               515 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c 		memcpy(buf, skb->data, (len < msgbuf->ioctl_resp_ret_len) ?
skb               518 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c 	brcmu_pkt_buf_free_skb(skb);
skb               533 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c 				struct sk_buff *skb, struct brcmf_if **ifp)
skb               538 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c static void brcmf_msgbuf_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb)
skb               661 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c 					struct sk_buff *skb)
skb               664 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c 	struct ethhdr *eh = (struct ethhdr *)(skb->data);
skb               673 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c 				       skb->priority, ifidx);
skb               700 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c 	struct sk_buff *skb;
skb               714 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c 		skb = brcmf_flowring_dequeue(flow, flowid);
skb               715 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c 		if (skb == NULL) {
skb               720 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c 		skb_orphan(skb);
skb               722 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c 					     msgbuf->tx_pktids, skb, ETH_HLEN,
skb               724 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c 			brcmf_flowring_reinsert(flow, flowid, skb);
skb               732 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c 			brcmf_flowring_reinsert(flow, flowid, skb);
skb               743 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c 		tx_msghdr->flags |= (skb->priority & 0x07) <<
skb               746 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c 		memcpy(tx_msghdr->txhdr, skb->data, ETH_HLEN);
skb               747 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c 		tx_msghdr->data_len = cpu_to_le16(skb->len - ETH_HLEN);
skb               796 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c 				      struct sk_buff *skb)
skb               800 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c 	struct ethhdr *eh = (struct ethhdr *)(skb->data);
skb               805 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c 	flowid = brcmf_flowring_lookup(flow, eh->h_dest, skb->priority, ifidx);
skb               807 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c 		flowid = brcmf_msgbuf_flowring_create(msgbuf, ifidx, skb);
skb               811 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c 	queue_count = brcmf_flowring_enqueue(flow, flowid, skb);
skb               873 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c 	struct sk_buff *skb;
skb               880 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c 	skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev,
skb               882 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c 	if (!skb)
skb               890 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c 			 skb, true);
skb               899 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c 	struct sk_buff *skb;
skb               921 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c 		skb = brcmu_pkt_buf_get_skb(BRCMF_MSGBUF_MAX_PKT_SIZE);
skb               923 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c 		if (skb == NULL) {
skb               929 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c 		pktlen = skb->len;
skb               931 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c 					     msgbuf->rx_pktids, skb, 0,
skb               933 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c 			dev_kfree_skb_any(skb);
skb               948 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c 			skb_pull(skb, msgbuf->rx_metadata_offset);
skb               949 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c 			pktlen = skb->len;
skb              1007 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c 	struct sk_buff *skb;
skb              1031 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c 		skb = brcmu_pkt_buf_get_skb(BRCMF_MSGBUF_MAX_PKT_SIZE);
skb              1033 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c 		if (skb == NULL) {
skb              1039 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c 		pktlen = skb->len;
skb              1041 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c 					     msgbuf->rx_pktids, skb, 0,
skb              1043 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c 			dev_kfree_skb_any(skb);
skb              1100 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c 	struct sk_buff *skb;
skb              1111 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c 	skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev,
skb              1113 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c 	if (!skb)
skb              1117 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c 		skb_pull(skb, msgbuf->rx_dataoffset);
skb              1119 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c 	skb_trim(skb, buflen);
skb              1128 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c 	skb->protocol = eth_type_trans(skb, ifp->ndev);
skb              1130 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c 	brcmf_fweh_process_skb(ifp->drvr, skb, 0);
skb              1133 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c 	brcmu_pkt_buf_free_skb(skb);
skb              1142 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c 	struct sk_buff *skb;
skb              1157 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c 	skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev,
skb              1159 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c 	if (!skb)
skb              1163 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c 		skb_pull(skb, data_offset);
skb              1165 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c 		skb_pull(skb, msgbuf->rx_dataoffset);
skb              1167 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c 	skb_trim(skb, buflen);
skb              1175 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c 			brcmu_pkt_buf_free_skb(skb);
skb              1179 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c 		brcmf_netif_mon_rx(ifp, skb);
skb              1187 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c 		brcmu_pkt_buf_free_skb(skb);
skb              1191 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c 	skb->protocol = eth_type_trans(skb, ifp->ndev);
skb              1192 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c 	brcmf_netif_rx(ifp, skb);
skb              1350 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c static int brcmf_pcie_tx(struct device *dev, struct sk_buff *skb)
skb                20 drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h 		       struct sk_buff *skb, struct brcmf_if **ifp);
skb                26 drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h 			     struct sk_buff *skb);
skb                28 drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h 		      struct sk_buff *skb);
skb                35 drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h 	void (*rxreorder)(struct brcmf_if *ifp, struct sk_buff *skb);
skb                49 drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h 				      struct sk_buff *skb,
skb                61 drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h 	return drvr->proto->hdrpull(drvr, do_fws, skb, ifp);
skb                77 drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h 					    struct sk_buff *skb)
skb                79 drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h 	return drvr->proto->tx_queue_data(drvr, ifidx, skb);
skb                83 drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h 				     u8 offset, struct sk_buff *skb)
skb                85 drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h 	return drvr->proto->txdata(drvr, ifidx, offset, skb);
skb               103 drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h static inline bool brcmf_proto_is_reorder_skb(struct sk_buff *skb)
skb               107 drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h 	rd = (struct brcmf_skb_reorder_data *)skb->cb;
skb               112 drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h brcmf_proto_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb)
skb               114 drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h 	ifp->drvr->proto->rxreorder(ifp, skb);
skb               470 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c 		  req->skb);
skb               473 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c 	brcmf_proto_bcdc_txcomplete(devinfo->dev, req->skb, urb->status == 0);
skb               474 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c 	req->skb = NULL;
skb               489 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c 	struct sk_buff *skb;
skb               493 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c 	skb = req->skb;
skb               494 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c 	req->skb = NULL;
skb               498 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c 		brcmu_pkt_buf_free_skb(skb);
skb               504 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c 		skb_put(skb, urb->actual_length);
skb               505 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c 		brcmf_rx_frame(devinfo->dev, skb, true);
skb               508 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c 		brcmu_pkt_buf_free_skb(skb);
skb               518 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c 	struct sk_buff *skb;
skb               524 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c 	skb = dev_alloc_skb(devinfo->bus_pub.bus_mtu);
skb               525 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c 	if (!skb) {
skb               529 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c 	req->skb = skb;
skb               532 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c 			  skb->data, skb_tailroom(skb), brcmf_usb_rx_complete,
skb               540 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c 		brcmu_pkt_buf_free_skb(req->skb);
skb               541 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c 		req->skb = NULL;
skb               584 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c static int brcmf_usb_tx(struct device *dev, struct sk_buff *skb)
skb               591 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c 	brcmf_dbg(USB, "Enter, skb=%p\n", skb);
skb               605 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c 	req->skb = skb;
skb               608 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c 			  skb->data, skb->len, brcmf_usb_tx_complete, req);
skb               615 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c 		req->skb = NULL;
skb                39 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.h 	struct sk_buff  *skb;
skb              1555 drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c 	struct sk_buff *skb;
skb              1559 drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c 		skb = di->txp[i];
skb              1560 drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c 		if (skb != NULL) {
skb              1561 drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c 			tx_info = (struct ieee80211_tx_info *)skb->cb;
skb               405 drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c 			 struct sk_buff *skb)
skb               408 drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c 	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
skb               413 drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c 		kfree_skb(skb);
skb               416 drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c 	if (brcms_c_sendpkt_mac80211(wl->wlc, skb, hw))
skb              6850 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c static int brcms_c_tx(struct brcms_c_info *wlc, struct sk_buff *skb)
skb              6857 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c 	fifo = brcms_ac_to_fifo(skb_get_queue_mapping(skb));
skb              6859 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c 	txh = (struct d11txh *)(skb->data);
skb              6874 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c 						 skb_get_queue_mapping(skb)));
skb              6893 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c 	ret = brcms_c_txfifo(wlc, fifo, skb);
skb                20 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c 	struct sk_buff *skb;
skb                22 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c 	skb = dev_alloc_skb(len);
skb                23 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c 	if (skb) {
skb                24 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c 		skb_put(skb, len);
skb                25 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c 		skb->priority = 0;
skb                28 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c 	return skb;
skb                33 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c void brcmu_pkt_buf_free_skb(struct sk_buff *skb)
skb                35 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c 	if (!skb)
skb                38 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c 	WARN_ON(skb->next);
skb                39 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c 	dev_kfree_skb_any(skb);
skb               107 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c 				      bool (*match_fn)(struct sk_buff *skb,
skb               118 drivers/net/wireless/broadcom/brcm80211/include/brcmu_utils.h void brcmu_pkt_buf_free_skb(struct sk_buff *skb);
skb              1213 drivers/net/wireless/cisco/airo.c 		struct sk_buff *skb;
skb              1917 drivers/net/wireless/cisco/airo.c static netdev_tx_t mpi_start_xmit(struct sk_buff *skb,
skb              1924 drivers/net/wireless/cisco/airo.c 	if (!skb) {
skb              1928 drivers/net/wireless/cisco/airo.c 	if (skb_padto(skb, ETH_ZLEN)) {
skb              1940 drivers/net/wireless/cisco/airo.c 		skb_queue_tail (&ai->txq, skb);
skb              1945 drivers/net/wireless/cisco/airo.c 	skb_queue_tail (&ai->txq, skb);
skb              1966 drivers/net/wireless/cisco/airo.c 	struct sk_buff *skb;
skb              1975 drivers/net/wireless/cisco/airo.c 	if ((skb = skb_dequeue(&ai->txq)) == NULL) {
skb              1983 drivers/net/wireless/cisco/airo.c 	len = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
skb              1984 drivers/net/wireless/cisco/airo.c 	buffer = skb->data;
skb              2041 drivers/net/wireless/cisco/airo.c 	dev_kfree_skb_any(skb);
skb              2100 drivers/net/wireless/cisco/airo.c 	struct sk_buff *skb = priv->xmit.skb;
skb              2106 drivers/net/wireless/cisco/airo.c 	status = transmit_802_3_packet (priv, fids[fid], skb->data);
skb              2119 drivers/net/wireless/cisco/airo.c 	dev_kfree_skb(skb);
skb              2122 drivers/net/wireless/cisco/airo.c static netdev_tx_t airo_start_xmit(struct sk_buff *skb,
skb              2130 drivers/net/wireless/cisco/airo.c 	if ( skb == NULL ) {
skb              2134 drivers/net/wireless/cisco/airo.c 	if (skb_padto(skb, ETH_ZLEN)) {
skb              2152 drivers/net/wireless/cisco/airo.c 	len = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
skb              2155 drivers/net/wireless/cisco/airo.c 	priv->xmit.skb = skb;
skb              2171 drivers/net/wireless/cisco/airo.c 	struct sk_buff *skb = priv->xmit11.skb;
skb              2177 drivers/net/wireless/cisco/airo.c 	status = transmit_802_11_packet (priv, fids[fid], skb->data);
skb              2190 drivers/net/wireless/cisco/airo.c 	dev_kfree_skb(skb);
skb              2193 drivers/net/wireless/cisco/airo.c static netdev_tx_t airo_start_xmit11(struct sk_buff *skb,
skb              2204 drivers/net/wireless/cisco/airo.c 		dev_kfree_skb_any(skb);
skb              2208 drivers/net/wireless/cisco/airo.c 	if ( skb == NULL ) {
skb              2212 drivers/net/wireless/cisco/airo.c 	if (skb_padto(skb, ETH_ZLEN)) {
skb              2230 drivers/net/wireless/cisco/airo.c 	len = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
skb              2233 drivers/net/wireless/cisco/airo.c 	priv->xmit11.skb = skb;
skb              2403 drivers/net/wireless/cisco/airo.c 		struct sk_buff *skb = NULL;
skb              2404 drivers/net/wireless/cisco/airo.c 		for (;(skb = skb_dequeue(&ai->txq));)
skb              2405 drivers/net/wireless/cisco/airo.c 			dev_kfree_skb(skb);
skb              2434 drivers/net/wireless/cisco/airo.c static int wll_header_parse(const struct sk_buff *skb, unsigned char *haddr)
skb              2436 drivers/net/wireless/cisco/airo.c 	memcpy(haddr, skb_mac_header(skb) + 10, ETH_ALEN);
skb              3290 drivers/net/wireless/cisco/airo.c 	struct sk_buff *skb = NULL;
skb              3335 drivers/net/wireless/cisco/airo.c 	skb = dev_alloc_skb(len + hdrlen + 2 + 2);
skb              3336 drivers/net/wireless/cisco/airo.c 	if (!skb) {
skb              3341 drivers/net/wireless/cisco/airo.c 	skb_reserve(skb, 2); /* This way the IP header is aligned */
skb              3342 drivers/net/wireless/cisco/airo.c 	buffer = skb_put(skb, len + hdrlen);
skb              3370 drivers/net/wireless/cisco/airo.c 					dev_kfree_skb_irq(skb);
skb              3375 drivers/net/wireless/cisco/airo.c 				skb_trim(skb, len + hdrlen);
skb              3381 drivers/net/wireless/cisco/airo.c 			dev_kfree_skb_irq (skb);
skb              3417 drivers/net/wireless/cisco/airo.c 			skb_reset_mac_header(skb);
skb              3418 drivers/net/wireless/cisco/airo.c 			skb->pkt_type = PACKET_OTHERHOST;
skb              3419 drivers/net/wireless/cisco/airo.c 			skb->dev = ai->wifidev;
skb              3420 drivers/net/wireless/cisco/airo.c 			skb->protocol = htons(ETH_P_802_2);
skb              3422 drivers/net/wireless/cisco/airo.c 			skb->protocol = eth_type_trans(skb, ai->dev);
skb              3423 drivers/net/wireless/cisco/airo.c 		skb->ip_summed = CHECKSUM_NONE;
skb              3425 drivers/net/wireless/cisco/airo.c 		netif_rx(skb);
skb              3643 drivers/net/wireless/cisco/airo.c 	struct sk_buff *skb;
skb              3655 drivers/net/wireless/cisco/airo.c 		skb = dev_alloc_skb(len);
skb              3656 drivers/net/wireless/cisco/airo.c 		if (!skb) {
skb              3660 drivers/net/wireless/cisco/airo.c 		buffer = skb_put(skb,len);
skb              3671 drivers/net/wireless/cisco/airo.c 				skb_trim (skb, len - off);
skb              3679 drivers/net/wireless/cisco/airo.c 			dev_kfree_skb_irq (skb);
skb              3696 drivers/net/wireless/cisco/airo.c 		skb->ip_summed = CHECKSUM_NONE;
skb              3697 drivers/net/wireless/cisco/airo.c 		skb->protocol = eth_type_trans(skb, ai->dev);
skb              3698 drivers/net/wireless/cisco/airo.c 		netif_rx(skb);
skb              3712 drivers/net/wireless/cisco/airo.c 	struct sk_buff *skb = NULL;
skb              3739 drivers/net/wireless/cisco/airo.c 	skb = dev_alloc_skb( len + hdrlen + 2 );
skb              3740 drivers/net/wireless/cisco/airo.c 	if ( !skb ) {
skb              3744 drivers/net/wireless/cisco/airo.c 	buffer = skb_put(skb, len + hdrlen);
skb              3779 drivers/net/wireless/cisco/airo.c 	skb_reset_mac_header(skb);
skb              3780 drivers/net/wireless/cisco/airo.c 	skb->pkt_type = PACKET_OTHERHOST;
skb              3781 drivers/net/wireless/cisco/airo.c 	skb->dev = ai->wifidev;
skb              3782 drivers/net/wireless/cisco/airo.c 	skb->protocol = htons(ETH_P_802_2);
skb              3783 drivers/net/wireless/cisco/airo.c 	skb->ip_summed = CHECKSUM_NONE;
skb              3784 drivers/net/wireless/cisco/airo.c 	netif_rx( skb );
skb              2292 drivers/net/wireless/intel/ipw2x00/ipw2100.c 	packet->skb = dev_alloc_skb(sizeof(struct ipw2100_rx));
skb              2293 drivers/net/wireless/intel/ipw2x00/ipw2100.c 	if (!packet->skb)
skb              2296 drivers/net/wireless/intel/ipw2x00/ipw2100.c 	packet->rxp = (struct ipw2100_rx *)packet->skb->data;
skb              2297 drivers/net/wireless/intel/ipw2x00/ipw2100.c 	packet->dma_addr = pci_map_single(priv->pci_dev, packet->skb->data,
skb              2301 drivers/net/wireless/intel/ipw2x00/ipw2100.c 		dev_kfree_skb(packet->skb);
skb              2458 drivers/net/wireless/intel/ipw2x00/ipw2100.c 	if (unlikely(status->frame_size > skb_tailroom(packet->skb))) {
skb              2462 drivers/net/wireless/intel/ipw2x00/ipw2100.c 			       status->frame_size, skb_tailroom(packet->skb));
skb              2485 drivers/net/wireless/intel/ipw2x00/ipw2100.c 	skb_put(packet->skb, status->frame_size);
skb              2490 drivers/net/wireless/intel/ipw2x00/ipw2100.c 	skb_copy_from_linear_data(packet->skb, packet_data,
skb              2495 drivers/net/wireless/intel/ipw2x00/ipw2100.c 	if (!libipw_rx(priv->ieee, packet->skb, stats)) {
skb              2504 drivers/net/wireless/intel/ipw2x00/ipw2100.c 		dev_kfree_skb_any(packet->skb);
skb              2505 drivers/net/wireless/intel/ipw2x00/ipw2100.c 		packet->skb = NULL;
skb              2540 drivers/net/wireless/intel/ipw2x00/ipw2100.c 	if (unlikely(status->frame_size > skb_tailroom(packet->skb) -
skb              2546 drivers/net/wireless/intel/ipw2x00/ipw2100.c 			       skb_tailroom(packet->skb));
skb              2567 drivers/net/wireless/intel/ipw2x00/ipw2100.c 	memmove(packet->skb->data + sizeof(struct ipw_rt_hdr),
skb              2568 drivers/net/wireless/intel/ipw2x00/ipw2100.c 		packet->skb->data, status->frame_size);
skb              2570 drivers/net/wireless/intel/ipw2x00/ipw2100.c 	ipw_rt = (struct ipw_rt_hdr *) packet->skb->data;
skb              2580 drivers/net/wireless/intel/ipw2x00/ipw2100.c 	skb_put(packet->skb, status->frame_size + sizeof(struct ipw_rt_hdr));
skb              2582 drivers/net/wireless/intel/ipw2x00/ipw2100.c 	if (!libipw_rx(priv->ieee, packet->skb, stats)) {
skb              2586 drivers/net/wireless/intel/ipw2x00/ipw2100.c 		dev_kfree_skb_any(packet->skb);
skb              2587 drivers/net/wireless/intel/ipw2x00/ipw2100.c 		packet->skb = NULL;
skb              4613 drivers/net/wireless/intel/ipw2x00/ipw2100.c 		dev_kfree_skb(priv->rx_buffers[j].skb);
skb              4668 drivers/net/wireless/intel/ipw2x00/ipw2100.c 			dev_kfree_skb(priv->rx_buffers[i].skb);
skb               335 drivers/net/wireless/intel/ipw2x00/ipw2100.h 	struct sk_buff *skb;
skb              3444 drivers/net/wireless/intel/ipw2x00/ipw2200.c 		if (rxq->pool[i].skb != NULL) {
skb              3447 drivers/net/wireless/intel/ipw2x00/ipw2200.c 			dev_kfree_skb(rxq->pool[i].skb);
skb              3448 drivers/net/wireless/intel/ipw2x00/ipw2200.c 			rxq->pool[i].skb = NULL;
skb              5189 drivers/net/wireless/intel/ipw2x00/ipw2200.c 		rxb->skb = alloc_skb(IPW_RX_BUF_SIZE, GFP_ATOMIC);
skb              5190 drivers/net/wireless/intel/ipw2x00/ipw2200.c 		if (!rxb->skb) {
skb              5201 drivers/net/wireless/intel/ipw2x00/ipw2200.c 		    pci_map_single(priv->pci_dev, rxb->skb->data,
skb              5234 drivers/net/wireless/intel/ipw2x00/ipw2200.c 		if (rxq->pool[i].skb != NULL) {
skb              5237 drivers/net/wireless/intel/ipw2x00/ipw2200.c 			dev_kfree_skb(rxq->pool[i].skb);
skb              7155 drivers/net/wireless/intel/ipw2x00/ipw2200.c 			     struct sk_buff *skb)
skb              7160 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	u8 *daddr = skb->data + ETH_ALEN;
skb              7638 drivers/net/wireless/intel/ipw2x00/ipw2200.c 				      struct sk_buff *skb)
skb              7643 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	hdr = (struct ieee80211_hdr *)skb->data;
skb              7653 drivers/net/wireless/intel/ipw2x00/ipw2200.c 		memmove(skb->data + LIBIPW_3ADDR_LEN,
skb              7654 drivers/net/wireless/intel/ipw2x00/ipw2200.c 			skb->data + LIBIPW_3ADDR_LEN + 8,
skb              7655 drivers/net/wireless/intel/ipw2x00/ipw2200.c 			skb->len - LIBIPW_3ADDR_LEN - 8);
skb              7656 drivers/net/wireless/intel/ipw2x00/ipw2200.c 		skb_trim(skb, skb->len - 16);	/* CCMP_HDR_LEN + CCMP_MIC_LEN */
skb              7662 drivers/net/wireless/intel/ipw2x00/ipw2200.c 		memmove(skb->data + LIBIPW_3ADDR_LEN,
skb              7663 drivers/net/wireless/intel/ipw2x00/ipw2200.c 			skb->data + LIBIPW_3ADDR_LEN + 4,
skb              7664 drivers/net/wireless/intel/ipw2x00/ipw2200.c 			skb->len - LIBIPW_3ADDR_LEN - 4);
skb              7665 drivers/net/wireless/intel/ipw2x00/ipw2200.c 		skb_trim(skb, skb->len - 8);	/* IV + ICV */
skb              7682 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
skb              7690 drivers/net/wireless/intel/ipw2x00/ipw2200.c 		     skb_tailroom(rxb->skb))) {
skb              7703 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	skb_reserve(rxb->skb, offsetof(struct ipw_rx_packet, u.frame.data));
skb              7706 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	skb_put(rxb->skb, le16_to_cpu(pkt->u.frame.length));
skb              7708 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
skb              7711 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	hdr = (struct libipw_hdr_4addr *)rxb->skb->data;
skb              7715 drivers/net/wireless/intel/ipw2x00/ipw2200.c 		ipw_rebuild_decrypted_skb(priv, rxb->skb);
skb              7717 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	if (!libipw_rx(priv->ieee, rxb->skb, stats))
skb              7720 drivers/net/wireless/intel/ipw2x00/ipw2200.c 		rxb->skb = NULL;
skb              7731 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
skb              7753 drivers/net/wireless/intel/ipw2x00/ipw2200.c 		     skb_tailroom(rxb->skb))) {
skb              7776 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	memmove(rxb->skb->data + sizeof(struct ipw_rt_hdr),
skb              7777 drivers/net/wireless/intel/ipw2x00/ipw2200.c 		rxb->skb->data + IPW_RX_FRAME_SIZE, len);
skb              7779 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	ipw_rt = (struct ipw_rt_hdr *)rxb->skb->data;
skb              7870 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	skb_put(rxb->skb, len + sizeof(struct ipw_rt_hdr));
skb              7872 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
skb              7874 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	if (!libipw_rx(priv->ieee, rxb->skb, stats))
skb              7877 drivers/net/wireless/intel/ipw2x00/ipw2200.c 		rxb->skb = NULL;
skb              7908 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
skb              7921 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	struct sk_buff *skb;
skb              7932 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	if (unlikely((len + IPW_RX_FRAME_SIZE) > skb_tailroom(rxb->skb))) {
skb              7954 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	hdr = (void *)rxb->skb->data + IPW_RX_FRAME_SIZE;
skb              7973 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	skb = skb_copy(rxb->skb, GFP_ATOMIC);
skb              7974 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	if (skb == NULL) {
skb              7980 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	ipw_rt = (void *)skb->data;
skb              7992 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	skb_put(skb, sizeof(*ipw_rt) + len);
skb              8078 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	IPW_DEBUG_RX("Rx packet of %d bytes.\n", skb->len);
skb              8080 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	if (!libipw_rx(priv->prom_priv->ieee, skb, stats)) {
skb              8082 drivers/net/wireless/intel/ipw2x00/ipw2200.c 		dev_kfree_skb_any(skb);
skb              8202 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	struct sk_buff *skb = rxb->skb;
skb              8203 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)skb->data;
skb              8205 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	    (skb->data + IPW_RX_FRAME_SIZE);
skb              8223 drivers/net/wireless/intel/ipw2x00/ipw2200.c 		skb_put(skb, le16_to_cpu(pkt->u.frame.length) +
skb              8227 drivers/net/wireless/intel/ipw2x00/ipw2200.c 		skb_pull(skb, IPW_RX_FRAME_SIZE);
skb              8230 drivers/net/wireless/intel/ipw2x00/ipw2200.c 		memcpy(skb_push(skb, sizeof(*stats)), stats, sizeof(*stats));
skb              8232 drivers/net/wireless/intel/ipw2x00/ipw2200.c 		skb->dev = priv->ieee->dev;
skb              8235 drivers/net/wireless/intel/ipw2x00/ipw2200.c 		skb_reset_mac_header(skb);
skb              8237 drivers/net/wireless/intel/ipw2x00/ipw2200.c 		skb->pkt_type = PACKET_OTHERHOST;
skb              8238 drivers/net/wireless/intel/ipw2x00/ipw2200.c 		skb->protocol = cpu_to_be16(ETH_P_80211_STATS);
skb              8239 drivers/net/wireless/intel/ipw2x00/ipw2200.c 		memset(skb->cb, 0, sizeof(rxb->skb->cb));
skb              8240 drivers/net/wireless/intel/ipw2x00/ipw2200.c 		netif_rx(skb);
skb              8241 drivers/net/wireless/intel/ipw2x00/ipw2200.c 		rxb->skb = NULL;
skb              8278 drivers/net/wireless/intel/ipw2x00/ipw2200.c 		pkt = (struct ipw_rx_packet *)rxb->skb->data;
skb              8337 drivers/net/wireless/intel/ipw2x00/ipw2200.c 				    (struct libipw_hdr_4addr *)(rxb->skb->
skb              8423 drivers/net/wireless/intel/ipw2x00/ipw2200.c 		if (rxb->skb != NULL) {
skb              8424 drivers/net/wireless/intel/ipw2x00/ipw2200.c 			dev_kfree_skb_any(rxb->skb);
skb              8425 drivers/net/wireless/intel/ipw2x00/ipw2200.c 			rxb->skb = NULL;
skb              10238 drivers/net/wireless/intel/ipw2x00/ipw2200.c 		struct sk_buff *skb;
skb              10247 drivers/net/wireless/intel/ipw2x00/ipw2200.c 		skb = alloc_skb(remaining_bytes, GFP_ATOMIC);
skb              10248 drivers/net/wireless/intel/ipw2x00/ipw2200.c 		if (skb != NULL) {
skb              10255 drivers/net/wireless/intel/ipw2x00/ipw2200.c 				skb_put_data(skb,
skb              10260 drivers/net/wireless/intel/ipw2x00/ipw2200.c 			txb->fragments[i] = skb;
skb              10263 drivers/net/wireless/intel/ipw2x00/ipw2200.c 					(priv->pci_dev, skb->data,
skb              11525 drivers/net/wireless/intel/ipw2x00/ipw2200.c static netdev_tx_t ipw_prom_hard_start_xmit(struct sk_buff *skb,
skb              11529 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	dev_kfree_skb(skb);
skb               702 drivers/net/wireless/intel/ipw2x00/ipw2200.h 	struct sk_buff *skb;
skb               248 drivers/net/wireless/intel/ipw2x00/libipw.h 	struct sk_buff *skb;
skb               801 drivers/net/wireless/intel/ipw2x00/libipw.h 	int (*is_qos_active) (struct net_device *dev, struct sk_buff *skb);
skb               953 drivers/net/wireless/intel/ipw2x00/libipw.h netdev_tx_t libipw_xmit(struct sk_buff *skb, struct net_device *dev);
skb               957 drivers/net/wireless/intel/ipw2x00/libipw.h void libipw_rx_any(struct libipw_device *ieee, struct sk_buff *skb,
skb               959 drivers/net/wireless/intel/ipw2x00/libipw.h int libipw_rx(struct libipw_device *ieee, struct sk_buff *skb,
skb                36 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 					struct sk_buff *skb,
skb                39 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
skb                42 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 	skb->dev = ieee->dev;
skb                43 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 	skb_reset_mac_header(skb);
skb                44 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 	skb_pull(skb, libipw_get_hdrlen(fc));
skb                45 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 	skb->pkt_type = PACKET_OTHERHOST;
skb                46 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 	skb->protocol = htons(ETH_P_80211_RAW);
skb                47 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 	memset(skb->cb, 0, sizeof(skb->cb));
skb                48 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 	netif_rx(skb);
skb                65 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 		if (entry->skb != NULL &&
skb                70 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 			dev_kfree_skb_any(entry->skb);
skb                71 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 			entry->skb = NULL;
skb                74 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 		if (entry->skb != NULL && entry->seq == seq &&
skb                88 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 	struct sk_buff *skb = NULL;
skb                99 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 		skb = dev_alloc_skb(ieee->dev->mtu +
skb               104 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 		if (skb == NULL)
skb               112 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 		if (entry->skb != NULL)
skb               113 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 			dev_kfree_skb_any(entry->skb);
skb               118 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 		entry->skb = skb;
skb               128 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 			skb = entry->skb;
skb               132 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 	return skb;
skb               155 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 	entry->skb = NULL;
skb               166 drivers/net/wireless/intel/ipw2x00/libipw_rx.c libipw_rx_frame_mgmt(struct libipw_device *ieee, struct sk_buff *skb,
skb               185 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 			skb2 = skb_clone(skb, GFP_ATOMIC);
skb               193 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 		ieee->apdevstats.rx_bytes += skb->len;
skb               194 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 		prism2_rx_80211(ieee->apdev, skb, rx_stats, PRISM2_RX_MGMT);
skb               202 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 			       skb->dev->name, type, stype);
skb               206 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 		hostap_rx(skb->dev, skb, rx_stats);
skb               211 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 	       "received in non-Host AP mode\n", skb->dev->name);
skb               228 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 				    struct sk_buff *skb)
skb               235 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 	if (skb->len < 24)
skb               238 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 	hdr = (struct libipw_hdr_3addr *)skb->data;
skb               254 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 	if (skb->len < 24 + 8)
skb               258 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 	pos = skb->data + 24;
skb               268 drivers/net/wireless/intel/ipw2x00/libipw_rx.c libipw_rx_frame_decrypt(struct libipw_device *ieee, struct sk_buff *skb,
skb               277 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 	hdr = (struct libipw_hdr_3addr *)skb->data;
skb               281 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 	res = crypt->ops->decrypt_mpdu(skb, hdrlen, crypt->priv);
skb               289 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 					     skb->data[hdrlen + 3] >> 6);
skb               300 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 				struct sk_buff *skb, int keyidx,
skb               309 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 	hdr = (struct libipw_hdr_3addr *)skb->data;
skb               313 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 	res = crypt->ops->decrypt_msdu(skb, keyidx, hdrlen, crypt->priv);
skb               328 drivers/net/wireless/intel/ipw2x00/libipw_rx.c int libipw_rx(struct libipw_device *ieee, struct sk_buff *skb,
skb               352 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 	hdr = (struct libipw_hdr_4addr *)skb->data;
skb               353 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 	if (skb->len < 10) {
skb               365 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 	if (skb->len < hdrlen) {
skb               367 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 			dev->name, skb->len);
skb               410 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 		dev->stats.rx_bytes += skb->len;
skb               411 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 		libipw_monitor_rx(ieee, skb, rx_stats);
skb               420 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 		if (skb->len >= hdrlen + 3) {
skb               422 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 			keyidx = skb->data[hdrlen + 3] >> 6;
skb               467 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 		    (keyidx = hostap_rx_frame_decrypt(ieee, skb, crypt)) < 0) {
skb               475 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 		if (libipw_rx_frame_mgmt(ieee, skb, rx_stats, type, stype))
skb               488 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 	if (skb->len < LIBIPW_3ADDR_LEN)
skb               501 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 		if (skb->len < LIBIPW_4ADDR_LEN)
skb               516 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 		skb->dev = dev = wds;
skb               525 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 		skb->dev = dev = ieee->stadev;
skb               534 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 		switch (hostap_handle_sta_rx(ieee, dev, skb, rx_stats,
skb               563 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 					     type, stype, skb->len);
skb               570 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 	    (keyidx = libipw_rx_frame_decrypt(ieee, skb, crypt)) < 0)
skb               573 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 	hdr = (struct libipw_hdr_4addr *)skb->data;
skb               592 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 		flen = skb->len;
skb               607 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 			skb_copy_from_linear_data(skb, skb_put(frag_skb, flen), flen);
skb               611 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 			skb_copy_from_linear_data_offset(skb, hdrlen,
skb               614 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 		dev_kfree_skb_any(skb);
skb               615 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 		skb = NULL;
skb               626 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 		skb = frag_skb;
skb               627 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 		hdr = (struct libipw_hdr_4addr *)skb->data;
skb               634 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 	    libipw_rx_frame_decrypt_msdu(ieee, skb, keyidx, crypt))
skb               637 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 	hdr = (struct libipw_hdr_4addr *)skb->data;
skb               640 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 			   libipw_is_eapol_frame(ieee, skb)) {
skb               652 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 	    !libipw_is_eapol_frame(ieee, skb)) {
skb               666 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 		if (skb->len >= hdrlen + 3)
skb               667 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 			keyidx = skb->data[hdrlen + 3] >> 6;
skb               695 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 		if (skb->len < trimlen)
skb               698 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 		__skb_trim(skb, skb->len - trimlen);
skb               700 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 		if (skb->len < hdrlen)
skb               706 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 	payload = skb->data + hdrlen;
skb               719 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 				prism2_rx_80211(ieee->apdev, skb, rx_stats,
skb               722 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 				ieee->apdevstats.rx_bytes += skb->len;
skb               735 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 	if (skb->len - hdrlen >= 8 &&
skb               741 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 		skb_pull(skb, hdrlen + SNAP_SIZE);
skb               742 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 		memcpy(skb_push(skb, ETH_ALEN), src, ETH_ALEN);
skb               743 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 		memcpy(skb_push(skb, ETH_ALEN), dst, ETH_ALEN);
skb               747 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 		skb_pull(skb, hdrlen);
skb               748 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 		len = htons(skb->len);
skb               749 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 		memcpy(skb_push(skb, 2), &len, 2);
skb               750 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 		memcpy(skb_push(skb, ETH_ALEN), src, ETH_ALEN);
skb               751 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 		memcpy(skb_push(skb, ETH_ALEN), dst, ETH_ALEN);
skb               756 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 		    IEEE80211_FCTL_TODS) && skb->len >= ETH_HLEN + ETH_ALEN) {
skb               759 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 		skb_copy_to_linear_data_offset(skb, ETH_ALEN,
skb               760 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 					       skb->data + skb->len - ETH_ALEN,
skb               762 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 		skb_trim(skb, skb->len - ETH_ALEN);
skb               767 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 	dev->stats.rx_bytes += skb->len;
skb               775 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 			skb2 = skb_clone(skb, GFP_ATOMIC);
skb               783 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 			skb2 = skb;
skb               784 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 			skb = NULL;
skb               799 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 	if (skb) {
skb               800 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 		skb->protocol = eth_type_trans(skb, dev);
skb               801 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 		memset(skb->cb, 0, sizeof(skb->cb));
skb               802 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 		skb->ip_summed = CHECKSUM_NONE;	/* 802.11 crc not sufficient */
skb               803 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 		if (netif_rx(skb) == NET_RX_DROP) {
skb               833 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 		     struct sk_buff *skb, struct libipw_rx_stats *stats)
skb               840 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 		if (!libipw_rx(ieee, skb, stats))
skb               841 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 			dev_kfree_skb_irq(skb);
skb               845 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 	if (skb->len < sizeof(struct ieee80211_hdr))
skb               848 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 	hdr = (struct libipw_hdr_4addr *)skb->data;
skb               856 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 		if (skb->len < sizeof(struct libipw_hdr_3addr))
skb               859 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 		dev_kfree_skb_irq(skb);
skb               910 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 		if (!libipw_rx(ieee, skb, stats))
skb               911 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 			dev_kfree_skb_irq(skb);
skb               915 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 	dev_kfree_skb_irq(skb);
skb               209 drivers/net/wireless/intel/ipw2x00/libipw_tx.c static int libipw_classify(struct sk_buff *skb)
skb               214 drivers/net/wireless/intel/ipw2x00/libipw_tx.c 	eth = (struct ethhdr *)skb->data;
skb               218 drivers/net/wireless/intel/ipw2x00/libipw_tx.c 	ip = ip_hdr(skb);
skb               241 drivers/net/wireless/intel/ipw2x00/libipw_tx.c netdev_tx_t libipw_xmit(struct sk_buff *skb, struct net_device *dev)
skb               260 drivers/net/wireless/intel/ipw2x00/libipw_tx.c 	int priority = skb->priority;
skb               275 drivers/net/wireless/intel/ipw2x00/libipw_tx.c 	if (unlikely(skb->len < SNAP_SIZE + sizeof(u16))) {
skb               277 drivers/net/wireless/intel/ipw2x00/libipw_tx.c 		       ieee->dev->name, skb->len);
skb               281 drivers/net/wireless/intel/ipw2x00/libipw_tx.c 	ether_type = ((struct ethhdr *)skb->data)->h_proto;
skb               298 drivers/net/wireless/intel/ipw2x00/libipw_tx.c 	skb_copy_from_linear_data(skb, dest, ETH_ALEN);
skb               299 drivers/net/wireless/intel/ipw2x00/libipw_tx.c 	skb_copy_from_linear_data_offset(skb, ETH_ALEN, src, ETH_ALEN);
skb               321 drivers/net/wireless/intel/ipw2x00/libipw_tx.c 	if (ieee->is_qos_active && ieee->is_qos_active(dev, skb)) {
skb               325 drivers/net/wireless/intel/ipw2x00/libipw_tx.c 		skb->priority = libipw_classify(skb);
skb               326 drivers/net/wireless/intel/ipw2x00/libipw_tx.c 		header.qos_ctl |= cpu_to_le16(skb->priority & LIBIPW_QCTL_TID);
skb               331 drivers/net/wireless/intel/ipw2x00/libipw_tx.c 	skb_pull(skb, sizeof(struct ethhdr));
skb               334 drivers/net/wireless/intel/ipw2x00/libipw_tx.c 	bytes = skb->len + SNAP_SIZE + sizeof(u16);
skb               352 drivers/net/wireless/intel/ipw2x00/libipw_tx.c 		skb_copy_from_linear_data(skb, skb_put(skb_new, skb->len), skb->len);
skb               359 drivers/net/wireless/intel/ipw2x00/libipw_tx.c 		dev_kfree_skb_any(skb);
skb               360 drivers/net/wireless/intel/ipw2x00/libipw_tx.c 		skb = skb_new;
skb               363 drivers/net/wireless/intel/ipw2x00/libipw_tx.c 		skb_pull(skb, hdr_len);
skb               478 drivers/net/wireless/intel/ipw2x00/libipw_tx.c 		skb_copy_from_linear_data(skb, skb_put(skb_frag, bytes), bytes);
skb               481 drivers/net/wireless/intel/ipw2x00/libipw_tx.c 		skb_pull(skb, bytes);
skb               496 drivers/net/wireless/intel/ipw2x00/libipw_tx.c 	dev_kfree_skb_any(skb);
skb               449 drivers/net/wireless/intel/iwlegacy/3945-mac.c 	      struct sk_buff *skb)
skb               451 drivers/net/wireless/intel/iwlegacy/3945-mac.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
skb               452 drivers/net/wireless/intel/iwlegacy/3945-mac.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb               460 drivers/net/wireless/intel/iwlegacy/3945-mac.c 	int txq_id = skb_get_queue_mapping(skb);
skb               523 drivers/net/wireless/intel/iwlegacy/3945-mac.c 	txq->skbs[q->write_ptr] = skb;
skb               547 drivers/net/wireless/intel/iwlegacy/3945-mac.c 		il3945_build_tx_cmd_hwcrypto(il, info, out_cmd, skb, sta_id);
skb               555 drivers/net/wireless/intel/iwlegacy/3945-mac.c 	tx_cmd->len = cpu_to_le16((u16) skb->len);
skb               584 drivers/net/wireless/intel/iwlegacy/3945-mac.c 	secondlen = skb->len - hdr_len;
skb               587 drivers/net/wireless/intel/iwlegacy/3945-mac.c 		    pci_map_single(il->pci_dev, skb->data + hdr_len, secondlen,
skb               609 drivers/net/wireless/intel/iwlegacy/3945-mac.c 	il_update_stats(il, true, fc, skb->len);
skb              2855 drivers/net/wireless/intel/iwlegacy/3945-mac.c 	       struct sk_buff *skb)
skb              2861 drivers/net/wireless/intel/iwlegacy/3945-mac.c 	D_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
skb              2862 drivers/net/wireless/intel/iwlegacy/3945-mac.c 	     ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
skb              2864 drivers/net/wireless/intel/iwlegacy/3945-mac.c 	if (il3945_tx_skb(il, control->sta, skb))
skb              2865 drivers/net/wireless/intel/iwlegacy/3945-mac.c 		dev_kfree_skb_any(skb);
skb               428 drivers/net/wireless/intel/iwlegacy/3945-rs.c 		    struct sk_buff *skb)
skb               435 drivers/net/wireless/intel/iwlegacy/3945-rs.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb               608 drivers/net/wireless/intel/iwlegacy/3945-rs.c 	struct sk_buff *skb = txrc->skb;
skb               624 drivers/net/wireless/intel/iwlegacy/3945-rs.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb               276 drivers/net/wireless/intel/iwlegacy/3945.c 	struct sk_buff *skb;
skb               283 drivers/net/wireless/intel/iwlegacy/3945.c 		skb = txq->skbs[txq->q.read_ptr];
skb               284 drivers/net/wireless/intel/iwlegacy/3945.c 		ieee80211_tx_status_irqsafe(il->hw, skb);
skb               473 drivers/net/wireless/intel/iwlegacy/3945.c 	struct sk_buff *skb;
skb               494 drivers/net/wireless/intel/iwlegacy/3945.c 	skb = dev_alloc_skb(SMALL_PACKET_SIZE);
skb               495 drivers/net/wireless/intel/iwlegacy/3945.c 	if (!skb) {
skb               508 drivers/net/wireless/intel/iwlegacy/3945.c 		skb_put_data(skb, rx_hdr->payload, len);
skb               510 drivers/net/wireless/intel/iwlegacy/3945.c 		skb_add_rx_frag(skb, 0, rxb->page,
skb               517 drivers/net/wireless/intel/iwlegacy/3945.c 	memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
skb               519 drivers/net/wireless/intel/iwlegacy/3945.c 	ieee80211_rx(il->hw, skb);
skb               668 drivers/net/wireless/intel/iwlegacy/3945.c 		struct sk_buff *skb = txq->skbs[txq->q.read_ptr];
skb               671 drivers/net/wireless/intel/iwlegacy/3945.c 		if (skb) {
skb               672 drivers/net/wireless/intel/iwlegacy/3945.c 			dev_kfree_skb_any(skb);
skb               567 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	struct sk_buff *skb;
skb               586 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	skb = dev_alloc_skb(SMALL_PACKET_SIZE);
skb               587 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	if (!skb) {
skb               593 drivers/net/wireless/intel/iwlegacy/4965-mac.c 		skb_put_data(skb, hdr, len);
skb               595 drivers/net/wireless/intel/iwlegacy/4965-mac.c 		skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb),
skb               602 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
skb               604 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	ieee80211_rx(il->hw, skb);
skb              1482 drivers/net/wireless/intel/iwlegacy/4965-mac.c il4965_tx_cmd_build_basic(struct il_priv *il, struct sk_buff *skb,
skb              1643 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	      struct sk_buff *skb)
skb              1645 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
skb              1646 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb              1723 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	txq_id = skb_get_queue_mapping(skb);
skb              1765 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	txq->skbs[q->write_ptr] = skb;
skb              1789 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	tx_cmd->len = cpu_to_le16((u16) skb->len);
skb              1792 drivers/net/wireless/intel/iwlegacy/4965-mac.c 		il4965_tx_cmd_build_hwcrypto(il, info, tx_cmd, skb, sta_id);
skb              1795 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	il4965_tx_cmd_build_basic(il, skb, tx_cmd, info, hdr, sta_id);
skb              1825 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	secondlen = skb->len - hdr_len;
skb              1828 drivers/net/wireless/intel/iwlegacy/4965-mac.c 		    pci_map_single(il->pci_dev, skb->data + hdr_len, secondlen,
skb              1860 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	il_update_stats(il, true, fc, skb->len);
skb              2444 drivers/net/wireless/intel/iwlegacy/4965-mac.c il4965_tx_status(struct il_priv *il, struct sk_buff *skb, bool is_agg)
skb              2446 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
skb              2451 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	ieee80211_tx_status_irqsafe(il->hw, skb);
skb              2461 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	struct sk_buff *skb;
skb              2473 drivers/net/wireless/intel/iwlegacy/4965-mac.c 		skb = txq->skbs[txq->q.read_ptr];
skb              2475 drivers/net/wireless/intel/iwlegacy/4965-mac.c 		if (WARN_ON_ONCE(skb == NULL))
skb              2478 drivers/net/wireless/intel/iwlegacy/4965-mac.c 		hdr = (struct ieee80211_hdr *) skb->data;
skb              2482 drivers/net/wireless/intel/iwlegacy/4965-mac.c 		il4965_tx_status(il, skb, txq_id >= IL4965_FIRST_AMPDU_QUEUE);
skb              2690 drivers/net/wireless/intel/iwlegacy/4965-mac.c 		struct sk_buff *skb;
skb              2708 drivers/net/wireless/intel/iwlegacy/4965-mac.c 			skb = il->txq[txq_id].skbs[idx];
skb              2709 drivers/net/wireless/intel/iwlegacy/4965-mac.c 			if (WARN_ON_ONCE(skb == NULL))
skb              2711 drivers/net/wireless/intel/iwlegacy/4965-mac.c 			hdr = (struct ieee80211_hdr *) skb->data;
skb              2766 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	struct sk_buff *skb;
skb              2786 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	skb = txq->skbs[txq->q.read_ptr];
skb              2787 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	info = IEEE80211_SKB_CB(skb);
skb              2790 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	hdr = (struct ieee80211_hdr *) skb->data;
skb              3949 drivers/net/wireless/intel/iwlegacy/4965-mac.c 		struct sk_buff *skb = txq->skbs[txq->q.read_ptr];
skb              3952 drivers/net/wireless/intel/iwlegacy/4965-mac.c 		if (skb) {
skb              3953 drivers/net/wireless/intel/iwlegacy/4965-mac.c 			dev_kfree_skb_any(skb);
skb              5861 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	      struct sk_buff *skb)
skb              5867 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	D_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
skb              5868 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	     ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
skb              5870 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	if (il4965_tx_skb(il, control->sta, skb))
skb              5871 drivers/net/wireless/intel/iwlegacy/4965-mac.c 		dev_kfree_skb_any(skb);
skb               127 drivers/net/wireless/intel/iwlegacy/4965-rs.c 					 struct sk_buff *skb,
skb               793 drivers/net/wireless/intel/iwlegacy/4965-rs.c 		    struct sk_buff *skb)
skb               800 drivers/net/wireless/intel/iwlegacy/4965-rs.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
skb               802 drivers/net/wireless/intel/iwlegacy/4965-rs.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb               974 drivers/net/wireless/intel/iwlegacy/4965-rs.c 		il4965_rs_rate_scale_perform(il, skb, sta, lq_sta);
skb              1725 drivers/net/wireless/intel/iwlegacy/4965-rs.c il4965_rs_rate_scale_perform(struct il_priv *il, struct sk_buff *skb,
skb              1731 drivers/net/wireless/intel/iwlegacy/4965-rs.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb              1732 drivers/net/wireless/intel/iwlegacy/4965-rs.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
skb              2186 drivers/net/wireless/intel/iwlegacy/4965-rs.c 	struct sk_buff *skb = txrc->skb;
skb              2189 drivers/net/wireless/intel/iwlegacy/4965-rs.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb                65 drivers/net/wireless/intel/iwlegacy/4965.h 		  struct sk_buff *skb);
skb               152 drivers/net/wireless/intel/iwlegacy/4965.h 		   struct sk_buff *skb);
skb              5289 drivers/net/wireless/intel/iwlegacy/common.c 	struct sk_buff *skb = ieee80211_beacon_get(hw, vif);
skb              5291 drivers/net/wireless/intel/iwlegacy/common.c 	if (!skb)
skb              5300 drivers/net/wireless/intel/iwlegacy/common.c 		dev_kfree_skb(skb);
skb              5309 drivers/net/wireless/intel/iwlegacy/common.c 	il->beacon_skb = skb;
skb              5311 drivers/net/wireless/intel/iwlegacy/common.c 	timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
skb               204 drivers/net/wireless/intel/iwlwifi/dvm/agn.h 		  struct sk_buff *skb);
skb               581 drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c 			  struct sk_buff *skb)
skb               585 drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c 	if (iwlagn_tx_skb(priv, control->sta, skb))
skb               586 drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c 		ieee80211_free_txskb(hw, skb);
skb              2109 drivers/net/wireless/intel/iwlwifi/dvm/main.c static void iwl_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
skb              2114 drivers/net/wireless/intel/iwlwifi/dvm/main.c 	info = IEEE80211_SKB_CB(skb);
skb              2116 drivers/net/wireless/intel/iwlwifi/dvm/main.c 	ieee80211_free_txskb(priv->hw, skb);
skb               136 drivers/net/wireless/intel/iwlwifi/dvm/rs.c 				   struct sk_buff *skb,
skb               878 drivers/net/wireless/intel/iwlwifi/dvm/rs.c 			 struct sk_buff *skb)
skb               885 drivers/net/wireless/intel/iwlwifi/dvm/rs.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
skb               888 drivers/net/wireless/intel/iwlwifi/dvm/rs.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb              1057 drivers/net/wireless/intel/iwlwifi/dvm/rs.c 		rs_rate_scale_perform(priv, skb, sta, lq_sta);
skb              2189 drivers/net/wireless/intel/iwlwifi/dvm/rs.c 				  struct sk_buff *skb,
skb              2195 drivers/net/wireless/intel/iwlwifi/dvm/rs.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb              2196 drivers/net/wireless/intel/iwlwifi/dvm/rs.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
skb              2695 drivers/net/wireless/intel/iwlwifi/dvm/rs.c 	struct sk_buff *skb = txrc->skb;
skb              2700 drivers/net/wireless/intel/iwlwifi/dvm/rs.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb               616 drivers/net/wireless/intel/iwlwifi/dvm/rx.c 	struct sk_buff *skb;
skb               636 drivers/net/wireless/intel/iwlwifi/dvm/rx.c 	skb = alloc_skb(128, GFP_ATOMIC);
skb               637 drivers/net/wireless/intel/iwlwifi/dvm/rx.c 	if (!skb) {
skb               645 drivers/net/wireless/intel/iwlwifi/dvm/rx.c 	hdrlen = (len <= skb_tailroom(skb)) ? len : sizeof(*hdr);
skb               647 drivers/net/wireless/intel/iwlwifi/dvm/rx.c 	skb_put_data(skb, hdr, hdrlen);
skb               654 drivers/net/wireless/intel/iwlwifi/dvm/rx.c 		skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset,
skb               674 drivers/net/wireless/intel/iwlwifi/dvm/rx.c 	memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
skb               676 drivers/net/wireless/intel/iwlwifi/dvm/rx.c 	ieee80211_rx_napi(priv->hw, NULL, skb, priv->napi);
skb                48 drivers/net/wireless/intel/iwlwifi/dvm/tx.c 				      struct sk_buff *skb,
skb               264 drivers/net/wireless/intel/iwlwifi/dvm/tx.c 		  struct sk_buff *skb)
skb               266 drivers/net/wireless/intel/iwlwifi/dvm/tx.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
skb               267 drivers/net/wireless/intel/iwlwifi/dvm/tx.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb               303 drivers/net/wireless/intel/iwlwifi/dvm/tx.c 		    pskb_expand_head(skb, 0, noa_data->length,
skb               305 drivers/net/wireless/intel/iwlwifi/dvm/tx.c 			skb_put_data(skb, noa_data->data, noa_data->length);
skb               306 drivers/net/wireless/intel/iwlwifi/dvm/tx.c 			hdr = (struct ieee80211_hdr *)skb->data;
skb               355 drivers/net/wireless/intel/iwlwifi/dvm/tx.c 	len = (u16)skb->len;
skb               359 drivers/net/wireless/intel/iwlwifi/dvm/tx.c 		iwlagn_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb);
skb               362 drivers/net/wireless/intel/iwlwifi/dvm/tx.c 	iwlagn_tx_cmd_build_basic(priv, skb, tx_cmd, info, hdr, sta_id);
skb               437 drivers/net/wireless/intel/iwlwifi/dvm/tx.c 	if (iwl_trans_tx(priv->trans, skb, dev_cmd, txq_id))
skb              1128 drivers/net/wireless/intel/iwlwifi/dvm/tx.c 	struct sk_buff *skb;
skb              1182 drivers/net/wireless/intel/iwlwifi/dvm/tx.c 		skb_queue_walk(&skbs, skb) {
skb              1183 drivers/net/wireless/intel/iwlwifi/dvm/tx.c 			hdr = (struct ieee80211_hdr *)skb->data;
skb              1188 drivers/net/wireless/intel/iwlwifi/dvm/tx.c 			info = IEEE80211_SKB_CB(skb);
skb              1221 drivers/net/wireless/intel/iwlwifi/dvm/tx.c 			iwlagn_set_tx_status(priv, IEEE80211_SKB_CB(skb),
skb              1254 drivers/net/wireless/intel/iwlwifi/dvm/tx.c 		skb = __skb_dequeue(&skbs);
skb              1255 drivers/net/wireless/intel/iwlwifi/dvm/tx.c 		ieee80211_tx_status(priv->hw, skb);
skb              1272 drivers/net/wireless/intel/iwlwifi/dvm/tx.c 	struct sk_buff *skb;
skb              1357 drivers/net/wireless/intel/iwlwifi/dvm/tx.c 	skb_queue_walk(&reclaimed_skbs, skb) {
skb              1358 drivers/net/wireless/intel/iwlwifi/dvm/tx.c 		struct ieee80211_hdr *hdr = (void *)skb->data;
skb              1359 drivers/net/wireless/intel/iwlwifi/dvm/tx.c 		struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb              1378 drivers/net/wireless/intel/iwlwifi/dvm/tx.c 			info = IEEE80211_SKB_CB(skb);
skb              1391 drivers/net/wireless/intel/iwlwifi/dvm/tx.c 		skb = __skb_dequeue(&reclaimed_skbs);
skb              1392 drivers/net/wireless/intel/iwlwifi/dvm/tx.c 		ieee80211_tx_status(priv->hw, skb);
skb                23 drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h 	TP_PROTO(const struct device *dev, struct sk_buff *skb,
skb                25 drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h 	TP_ARGS(dev, skb, data_src, data_len),
skb                30 drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h 				iwl_trace_data(skb) ? data_len : 0)
skb                34 drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h 		if (iwl_trace_data(skb))
skb                82 drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h 	TP_PROTO(const struct device *dev, struct sk_buff *skb,
skb                86 drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h 	TP_ARGS(dev, skb, tfd, tfdlen, buf0, buf0_len, hdr_len),
skb                99 drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h 		__dynamic_array(u8, buf1, hdr_len > 0 && iwl_trace_data(skb) ?
skb               100 drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h 						0 : skb->len - hdr_len)
skb               104 drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h 		__entry->skbaddr = skb;
skb               107 drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h 			__entry->framelen += skb->len - hdr_len;
skb               110 drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h 		if (hdr_len > 0 && !iwl_trace_data(skb))
skb               111 drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h 			skb_copy_bits(skb, hdr_len,
skb               113 drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h 				      skb->len - hdr_len);
skb                20 drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h static inline bool iwl_trace_data(struct sk_buff *skb)
skb                22 drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h 	struct ieee80211_hdr *hdr = (void *)skb->data;
skb                44 drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h 	return skb->len <= offs + 2 ||
skb                45 drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h 		*(__be16 *)(skb->data + offs) != cpu_to_be16(ETH_P_PAE);
skb               159 drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h 	void (*free_skb)(struct iwl_op_mode *op_mode, struct sk_buff *skb);
skb               229 drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h 					struct sk_buff *skb)
skb               231 drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h 	op_mode->ops->free_skb(op_mode, skb);
skb               558 drivers/net/wireless/intel/iwlwifi/iwl-trans.h 	int (*tx)(struct iwl_trans *trans, struct sk_buff *skb,
skb               950 drivers/net/wireless/intel/iwlwifi/iwl-trans.h static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
skb               961 drivers/net/wireless/intel/iwlwifi/iwl-trans.h 	return trans->ops->tx(trans, skb, dev_cmd, queue);
skb              1463 drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c 	struct sk_buff *skb;
skb              1469 drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c 	skb = alloc_skb(size, GFP_ATOMIC);
skb              1470 drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c 	if (!skb) {
skb              1489 drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c 	skb_put_data(skb, sb->data, size);
skb              1490 drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c 	memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
skb              1493 drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c 	ieee80211_rx_napi(mvm->hw, NULL, skb, NULL);
skb               744 drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c static void iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
skb               748 drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c 		if (likely(iwl_mvm_tx_skb_sta(mvm, skb, sta) == 0))
skb               751 drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c 		if (likely(iwl_mvm_tx_skb_non_sta(mvm, skb) == 0))
skb               755 drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c 	ieee80211_free_txskb(mvm->hw, skb);
skb               760 drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c 			   struct sk_buff *skb)
skb               764 drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb               765 drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c 	struct ieee80211_hdr *hdr = (void *)skb->data;
skb               766 drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c 	bool offchannel = IEEE80211_SKB_CB(skb)->flags &
skb               801 drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c 	iwl_mvm_tx_skb(mvm, skb, sta);
skb               804 drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c 	ieee80211_free_txskb(hw, skb);
skb               811 drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c 	struct sk_buff *skb = NULL;
skb               840 drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c 			skb = ieee80211_tx_dequeue(hw, txq);
skb               842 drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c 			if (!skb) {
skb               851 drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c 			iwl_mvm_tx_skb(mvm, skb, txq->sta);
skb              4982 drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c static bool iwl_mvm_can_hw_csum(struct sk_buff *skb)
skb              4984 drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c 	u8 protocol = ip_hdr(skb)->protocol;
skb              4994 drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c 				      struct sk_buff *skb)
skb              4999 drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c 	if (skb->protocol != htons(ETH_P_IP))
skb              5005 drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c 	return iwl_mvm_can_hw_csum(skb) == iwl_mvm_can_hw_csum(head);
skb              1105 drivers/net/wireless/intel/iwlwifi/mvm/mvm.h 			struct sk_buff *skb; /* ch sw template */
skb              1511 drivers/net/wireless/intel/iwlwifi/mvm/mvm.h int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb,
skb              1513 drivers/net/wireless/intel/iwlwifi/mvm/mvm.h int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb);
skb              1514 drivers/net/wireless/intel/iwlwifi/mvm/mvm.h void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
skb              1213 drivers/net/wireless/intel/iwlwifi/mvm/ops.c static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
skb              1218 drivers/net/wireless/intel/iwlwifi/mvm/ops.c 	info = IEEE80211_SKB_CB(skb);
skb              1220 drivers/net/wireless/intel/iwlwifi/mvm/ops.c 	ieee80211_free_txskb(mvm->hw, skb);
skb              1206 drivers/net/wireless/intel/iwlwifi/mvm/rs.c 				      struct sk_buff *skb)
skb              1208 drivers/net/wireless/intel/iwlwifi/mvm/rs.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
skb              1211 drivers/net/wireless/intel/iwlwifi/mvm/rs.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb              2718 drivers/net/wireless/intel/iwlwifi/mvm/rs.c 	struct sk_buff *skb = txrc->skb;
skb              2719 drivers/net/wireless/intel/iwlwifi/mvm/rs.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb               100 drivers/net/wireless/intel/iwlwifi/mvm/rx.c 					    struct sk_buff *skb,
skb               118 drivers/net/wireless/intel/iwlwifi/mvm/rx.c 	skb_reserve(skb, hdrlen & 3);
skb               132 drivers/net/wireless/intel/iwlwifi/mvm/rx.c 	hdrlen = (len <= skb_tailroom(skb)) ? len : hdrlen + crypt_len + 8;
skb               134 drivers/net/wireless/intel/iwlwifi/mvm/rx.c 	skb_put_data(skb, hdr, hdrlen);
skb               141 drivers/net/wireless/intel/iwlwifi/mvm/rx.c 		skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset,
skb               145 drivers/net/wireless/intel/iwlwifi/mvm/rx.c 	ieee80211_rx_napi(mvm->hw, sta, skb, napi);
skb               325 drivers/net/wireless/intel/iwlwifi/mvm/rx.c 			    struct sk_buff *skb,
skb               334 drivers/net/wireless/intel/iwlwifi/mvm/rx.c 		skb->ip_summed = CHECKSUM_UNNECESSARY;
skb               351 drivers/net/wireless/intel/iwlwifi/mvm/rx.c 	struct sk_buff *skb;
skb               367 drivers/net/wireless/intel/iwlwifi/mvm/rx.c 	skb = alloc_skb(128, GFP_ATOMIC);
skb               368 drivers/net/wireless/intel/iwlwifi/mvm/rx.c 	if (!skb) {
skb               373 drivers/net/wireless/intel/iwlwifi/mvm/rx.c 	rx_status = IEEE80211_SKB_RXCB(skb);
skb               382 drivers/net/wireless/intel/iwlwifi/mvm/rx.c 		kfree_skb(skb);
skb               480 drivers/net/wireless/intel/iwlwifi/mvm/rx.c 			iwl_mvm_rx_csum(sta, skb, rx_pkt_status);
skb               543 drivers/net/wireless/intel/iwlwifi/mvm/rx.c 			kfree_skb(skb);
skb               563 drivers/net/wireless/intel/iwlwifi/mvm/rx.c 	iwl_mvm_pass_packet_to_mac80211(mvm, sta, napi, skb, hdr, len,
skb                69 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c static void *iwl_mvm_skb_get_hdr(struct sk_buff *skb)
skb                71 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 	struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
skb                72 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 	u8 *data = skb->data;
skb                95 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c static inline int iwl_mvm_check_pn(struct iwl_mvm *mvm, struct sk_buff *skb,
skb                99 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 	struct ieee80211_hdr *hdr = iwl_mvm_skb_get_hdr(skb);
skb               100 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 	struct ieee80211_rx_status *stats = IEEE80211_SKB_RXCB(skb);
skb               172 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c static int iwl_mvm_create_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
skb               198 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 	headlen = (len <= skb_tailroom(skb)) ? len :
skb               221 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 	skb_put_data(skb, hdr, hdrlen);
skb               222 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 	skb_put_data(skb, (u8 *)hdr + hdrlen + pad_len, headlen - hdrlen);
skb               230 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 		skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset,
skb               238 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 					    struct sk_buff *skb)
skb               240 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 	struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
skb               250 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 	radiotap = skb_put(skb, size + 2);
skb               273 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 					    struct sk_buff *skb, int queue,
skb               277 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 	if (iwl_mvm_check_pn(mvm, skb, queue, sta))
skb               278 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 		kfree_skb(skb);
skb               280 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 		ieee80211_rx_napi(mvm->hw, sta, skb, napi);
skb               397 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 			    struct sk_buff *skb,
skb               411 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 		skb->ip_summed = CHECKSUM_UNNECESSARY;
skb               570 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 		struct sk_buff *skb;
skb               582 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 		while ((skb = __skb_dequeue(skb_list))) {
skb               583 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 			iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb,
skb               852 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 			    struct sk_buff *skb,
skb               855 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 	struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
skb               856 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 	struct ieee80211_hdr *hdr = iwl_mvm_skb_get_hdr(skb);
skb              1028 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 	__skb_queue_tail(&entries[index].e.frames, skb);
skb              1057 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 	kfree_skb(skb);
skb              1379 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c static void iwl_mvm_rx_he(struct iwl_mvm *mvm, struct sk_buff *skb,
skb              1383 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 	struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
skb              1405 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 	he = skb_put_data(skb, &known, sizeof(known));
skb              1410 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 		he_mu = skb_put_data(skb, &mu_known, sizeof(mu_known));
skb              1521 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c static void iwl_mvm_decode_lsig(struct sk_buff *skb,
skb              1524 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 	struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
skb              1536 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 		lsig = skb_put(skb, sizeof(*lsig));
skb              1559 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 	struct sk_buff *skb;
skb              1606 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 	skb = alloc_skb(128, GFP_ATOMIC);
skb              1607 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 	if (!skb) {
skb              1619 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 		skb_reserve(skb, 2);
skb              1622 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 	rx_status = IEEE80211_SKB_RXCB(skb);
skb              1640 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 		iwl_mvm_rx_he(mvm, skb, &phy_data, rate_n_flags,
skb              1643 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 	iwl_mvm_decode_lsig(skb, &phy_data);
skb              1645 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 	rx_status = IEEE80211_SKB_RXCB(skb);
skb              1650 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 		kfree_skb(skb);
skb              1711 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 		iwl_mvm_add_rtap_sniffer_config(mvm, skb);
skb              1780 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 			iwl_mvm_rx_csum(sta, skb, desc);
skb              1783 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 			kfree_skb(skb);
skb              1845 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 			kfree_skb(skb);
skb              1864 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 	if (iwl_mvm_create_skb(mvm, skb, hdr, len, crypt_len, rxb)) {
skb              1865 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 		kfree_skb(skb);
skb              1869 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 	if (!iwl_mvm_reorder(mvm, napi, queue, sta, skb, desc))
skb              1870 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 		iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue,
skb              1888 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 	struct sk_buff *skb;
skb              1909 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 	skb = alloc_skb(128, GFP_ATOMIC);
skb              1910 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 	if (!skb) {
skb              1915 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 	rx_status = IEEE80211_SKB_RXCB(skb);
skb              1952 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 		iwl_mvm_rx_he(mvm, skb, &phy_data, rate_n_flags,
skb              1955 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 	iwl_mvm_decode_lsig(skb, &phy_data);
skb              2007 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 			kfree_skb(skb);
skb              2013 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 	ieee80211_rx_napi(mvm->hw, sta, skb, napi);
skb               384 drivers/net/wireless/intel/iwlwifi/mvm/tdls.c 				   u16 switch_timeout, struct sk_buff *skb,
skb               404 drivers/net/wireless/intel/iwlwifi/mvm/tdls.c 	if (!skb || WARN_ON(skb->len > IWL_TDLS_CH_SW_FRAME_MAX_SIZE)) {
skb               455 drivers/net/wireless/intel/iwlwifi/mvm/tdls.c 	info = IEEE80211_SKB_CB(skb);
skb               456 drivers/net/wireless/intel/iwlwifi/mvm/tdls.c 	hdr = (void *)skb->data;
skb               466 drivers/net/wireless/intel/iwlwifi/mvm/tdls.c 	iwl_mvm_set_tx_cmd(mvm, skb, &tail->frame.tx_cmd, info,
skb               473 drivers/net/wireless/intel/iwlwifi/mvm/tdls.c 	memcpy(tail->frame.data, skb->data, skb->len);
skb               537 drivers/net/wireless/intel/iwlwifi/mvm/tdls.c 						 mvm->tdls_cs.peer.skb,
skb               587 drivers/net/wireless/intel/iwlwifi/mvm/tdls.c 	mvm->tdls_cs.peer.skb = skb_copy(tmpl_skb, GFP_KERNEL);
skb               588 drivers/net/wireless/intel/iwlwifi/mvm/tdls.c 	if (!mvm->tdls_cs.peer.skb) {
skb               649 drivers/net/wireless/intel/iwlwifi/mvm/tdls.c 	dev_kfree_skb(mvm->tdls_cs.peer.skb);
skb               650 drivers/net/wireless/intel/iwlwifi/mvm/tdls.c 	mvm->tdls_cs.peer.skb = NULL;
skb                96 drivers/net/wireless/intel/iwlwifi/mvm/tx.c #define OPT_HDR(type, skb, off) \
skb                97 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 	(type *)(skb_network_header(skb) + (off))
skb                99 drivers/net/wireless/intel/iwlwifi/mvm/tx.c static u16 iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb,
skb               112 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 	if (skb->ip_summed != CHECKSUM_PARTIAL || IWL_MVM_SW_TX_CSUM_OFFLOAD)
skb               117 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 		      (skb->protocol != htons(ETH_P_IP) &&
skb               118 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 		       skb->protocol != htons(ETH_P_IPV6)),
skb               120 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 		skb_checksum_help(skb);
skb               124 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 	if (skb->protocol == htons(ETH_P_IP)) {
skb               125 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 		protocol = ip_hdr(skb)->protocol;
skb               129 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 			(struct ipv6hdr *)skb_network_header(skb);
skb               140 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 				skb_checksum_help(skb);
skb               144 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 			hp = OPT_HDR(struct ipv6_opt_hdr, skb, off);
skb               154 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 		skb_checksum_help(skb);
skb               169 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 	if (skb->protocol == htons(ETH_P_IP) &&
skb               171 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 		ip_hdr(skb)->check = 0;
skb               177 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 		tcp_hdr(skb)->check = 0;
skb               179 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 		udp_hdr(skb)->check = 0;
skb               201 drivers/net/wireless/intel/iwlwifi/mvm/tx.c void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
skb               205 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 	struct ieee80211_hdr *hdr = (void *)skb->data;
skb               208 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 	u32 len = skb->len + FCS_LEN;
skb               232 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 		struct ieee80211_bar *bar = (void *)skb->data;
skb               288 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 	    ieee80211_action_contains_tpc(skb))
skb               293 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 	tx_cmd->len = cpu_to_le16((u16)skb->len);
skb               303 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 		cpu_to_le16(iwl_mvm_tx_csum(mvm, skb, hdr, info,
skb               491 drivers/net/wireless/intel/iwlwifi/mvm/tx.c iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
skb               495 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
skb               520 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 		offload_assist = iwl_mvm_tx_csum(mvm, skb, hdr, info,
skb               551 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 			cmd->len = cpu_to_le16((u16)skb->len);
skb               564 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 			cmd->len = cpu_to_le16((u16)skb->len);
skb               578 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 		iwl_mvm_set_tx_cmd_crypto(mvm, info, tx_cmd, skb, hdrlen);
skb               580 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 	iwl_mvm_set_tx_cmd(mvm, skb, tx_cmd, info, sta_id);
skb               591 drivers/net/wireless/intel/iwlwifi/mvm/tx.c static void iwl_mvm_skb_prepare_status(struct sk_buff *skb,
skb               594 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 	struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
skb               648 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 				       struct sk_buff *skb)
skb               650 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb               653 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 	struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
skb               675 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 					  skb->len - base_len,
skb               682 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 	if (skb_tailroom(skb) < resp_data->noa_len) {
skb               683 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 		if (pskb_expand_head(skb, 0, resp_data->noa_len, GFP_ATOMIC)) {
skb               690 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 	pos = skb_put(skb, resp_data->noa_len);
skb               707 drivers/net/wireless/intel/iwlwifi/mvm/tx.c int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
skb               709 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
skb               715 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 	bool offchannel = IEEE80211_SKB_CB(skb)->flags &
skb               722 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 	memcpy(&info, skb->cb, sizeof(info));
skb               724 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 	if (WARN_ON_ONCE(skb->len > IEEE80211_MAX_DATA_LEN + hdrlen))
skb               767 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 		iwl_mvm_probe_resp_set_noa(mvm, skb);
skb               771 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 	dev_cmd = iwl_mvm_set_tx_params(mvm, skb, &info, hdrlen, NULL, sta_id);
skb               776 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 	iwl_mvm_skb_prepare_status(skb, dev_cmd);
skb               778 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 	if (iwl_trans_tx(mvm->trans, skb, dev_cmd, queue)) {
skb               818 drivers/net/wireless/intel/iwlwifi/mvm/tx.c iwl_mvm_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes,
skb               823 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 	struct ieee80211_hdr *hdr = (void *)skb->data;
skb               824 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 	char cb[sizeof(skb->cb)];
skb               827 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 	unsigned int mss = skb_shinfo(skb)->gso_size;
skb               828 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 	bool ipv4 = (skb->protocol == htons(ETH_P_IP));
skb               830 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 	u16 ip_base_id = ipv4 ? ntohs(ip_hdr(skb)->id) : 0;
skb               832 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 	skb_shinfo(skb)->gso_size = num_subframes * mss;
skb               833 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 	memcpy(cb, skb->cb, sizeof(cb));
skb               835 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 	next = skb_gso_segment(skb, netdev_flags);
skb               836 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 	skb_shinfo(skb)->gso_size = mss;
skb               840 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 		consume_skb(skb);
skb               885 drivers/net/wireless/intel/iwlwifi/mvm/tx.c static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
skb               891 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 	struct ieee80211_hdr *hdr = (void *)skb->data;
skb               892 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 	unsigned int mss = skb_shinfo(skb)->gso_size;
skb               898 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 	snap_ip_tcp = 8 + skb_transport_header(skb) - skb_network_header(skb) +
skb               899 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 		tcp_hdrlen(skb);
skb               904 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 		return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb);
skb               910 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 	if (skb->protocol == htons(ETH_P_IPV6) &&
skb               911 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 	    ((struct ipv6hdr *)skb_network_header(skb))->nexthdr !=
skb               914 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 		return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb);
skb               927 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 		return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb);
skb               931 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 		return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb);
skb               963 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 	tcp_payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) -
skb               964 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 		tcp_hdrlen(skb) + skb->data_len;
skb               972 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 	if ((num_subframes * 2 + skb_shinfo(skb)->nr_frags + 1) >
skb               981 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 		__skb_queue_tail(mpdus_skb, skb);
skb               989 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 	return iwl_mvm_tx_tso_segment(skb, num_subframes, netdev_flags,
skb               993 drivers/net/wireless/intel/iwlwifi/mvm/tx.c static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
skb              1067 drivers/net/wireless/intel/iwlwifi/mvm/tx.c static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
skb              1071 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
skb              1095 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 		iwl_mvm_probe_resp_set_noa(mvm, skb);
skb              1097 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 	dev_cmd = iwl_mvm_set_tx_params(mvm, skb, info, hdrlen,
skb              1174 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 		     IEEE80211_SEQ_TO_SN(seq_number), skb->len);
skb              1177 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 	iwl_mvm_skb_prepare_status(skb, dev_cmd);
skb              1179 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 	if (iwl_trans_tx(mvm->trans, skb, dev_cmd, txq_id))
skb              1201 drivers/net/wireless/intel/iwlwifi/mvm/tx.c int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb,
skb              1216 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 	memcpy(&info, skb->cb, sizeof(info));
skb              1218 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 	if (!skb_is_gso(skb))
skb              1219 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 		return iwl_mvm_tx_mpdu(mvm, skb, &info, sta);
skb              1221 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 	payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) -
skb              1222 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 		tcp_hdrlen(skb) + skb->data_len;
skb              1224 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 	if (payload_len <= skb_shinfo(skb)->gso_size)
skb              1225 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 		return iwl_mvm_tx_mpdu(mvm, skb, &info, sta);
skb              1229 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 	ret = iwl_mvm_tx_tso(mvm, skb, &info, sta, &mpdus_skbs);
skb              1237 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 		skb = __skb_dequeue(&mpdus_skbs);
skb              1239 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 		ret = iwl_mvm_tx_mpdu(mvm, skb, &info, sta);
skb              1472 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 		struct sk_buff *skb = __skb_dequeue(&skbs);
skb              1473 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 		struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb              1474 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 		struct ieee80211_hdr *hdr = (void *)skb->data;
skb              1538 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 			struct ieee80211_hdr *hdr = (void *)skb->data;
skb              1562 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 		ieee80211_tx_status(mvm->hw, skb);
skb              1779 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 	struct sk_buff *skb;
skb              1831 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 	skb_queue_walk(&reclaimed_skbs, skb) {
skb              1832 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 		struct ieee80211_hdr *hdr = (void *)skb->data;
skb              1833 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 		struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb              1889 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 		skb = __skb_dequeue(&reclaimed_skbs);
skb              1890 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 		ieee80211_tx_status(mvm->hw, skb);
skb               309 drivers/net/wireless/intel/iwlwifi/pcie/internal.h 	struct sk_buff *skb;
skb               692 drivers/net/wireless/intel/iwlwifi/pcie/internal.h int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
skb              1088 drivers/net/wireless/intel/iwlwifi/pcie/internal.h 			    struct sk_buff *skb);
skb              1113 drivers/net/wireless/intel/iwlwifi/pcie/internal.h int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
skb               201 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c 		struct sk_buff *skb;
skb               203 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c 		skb = txq->entries[idx].skb;
skb               209 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c 		if (skb) {
skb               210 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c 			iwl_op_mode_free_skb(trans->op_mode, skb);
skb               211 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c 			txq->entries[idx].skb = NULL;
skb               244 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c 				     struct sk_buff *skb,
skb               252 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c 	struct ieee80211_hdr *hdr = (void *)skb->data;
skb               254 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c 	unsigned int mss = skb_shinfo(skb)->gso_size;
skb               261 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c 	trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd),
skb               264 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c 	ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb);
skb               265 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c 	snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb);
skb               266 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c 	total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len;
skb               280 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c 	page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
skb               287 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c 	skb_pull(skb, hdr_len);
skb               296 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c 	tso_start(skb, &tso);
skb               325 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c 		tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len);
skb               337 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c 		trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr, tb_len);
skb               354 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c 			trace_iwlwifi_dev_tx_tb(trans->dev, skb, tso.data,
skb               358 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c 			tso_build_data(skb, &tso, tb_len);
skb               363 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c 	skb_push(skb, hdr_len);
skb               376 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c 					  struct sk_buff *skb,
skb               413 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c 	if (iwl_pcie_gen2_build_amsdu(trans, skb, tfd,
skb               428 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c 				      struct sk_buff *skb,
skb               434 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb               435 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
skb               449 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c 		trace_iwlwifi_dev_tx_tb(trans->dev, skb,
skb               465 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c 				    struct sk_buff *skb,
skb               509 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c 	trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr,
skb               513 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c 	tb2_len = skb_headlen(skb) - hdr_len;
skb               516 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c 		tb_phys = dma_map_single(trans->dev, skb->data + hdr_len,
skb               521 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c 		trace_iwlwifi_dev_tx_tb(trans->dev, skb,
skb               522 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c 					skb->data + hdr_len,
skb               526 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c 	if (iwl_pcie_gen2_tx_add_frags(trans, skb, tfd, out_meta))
skb               529 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c 	skb_walk_frags(skb, frag) {
skb               535 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c 		trace_iwlwifi_dev_tx_tb(trans->dev, skb,
skb               553 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c 					    struct sk_buff *skb,
skb               556 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
skb               583 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c 	if (amsdu && skb_shinfo(skb)->gso_size)
skb               584 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c 		return iwl_pcie_gen2_build_tx_amsdu(trans, txq, dev_cmd, skb,
skb               587 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c 	return iwl_pcie_gen2_build_tx(trans, txq, dev_cmd, skb, out_meta,
skb               591 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
skb               605 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c 	if (skb_is_nonlinear(skb) &&
skb               606 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c 	    skb_shinfo(skb)->nr_frags > IWL_PCIE_MAX_FRAGS(trans_pcie) &&
skb               607 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c 	    __skb_linearize(skb))
skb               619 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c 			dev_cmd_ptr = (void *)((u8 *)skb->cb +
skb               623 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c 			__skb_queue_tail(&txq->overflow_q, skb);
skb               632 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c 	txq->entries[idx].skb = skb;
skb               643 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c 	tfd = iwl_pcie_gen2_build_tfd(trans, txq, dev_cmd, skb, out_meta);
skb              1060 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c 			struct sk_buff *skb = txq->entries[idx].skb;
skb              1062 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c 			if (WARN_ON_ONCE(!skb))
skb              1065 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c 			iwl_pcie_free_tso_page(trans_pcie, skb);
skb              1072 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c 		struct sk_buff *skb = __skb_dequeue(&txq->overflow_q);
skb              1074 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c 		iwl_op_mode_free_skb(trans->op_mode, skb);
skb               477 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 		struct sk_buff *skb;
skb               479 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 		skb = txq->entries[idx].skb;
skb               485 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 		if (skb) {
skb               486 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 			iwl_op_mode_free_skb(trans->op_mode, skb);
skb               487 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 			txq->entries[idx].skb = NULL;
skb               624 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 			    struct sk_buff *skb)
skb               628 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 	page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
skb               666 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 			struct sk_buff *skb = txq->entries[txq->read_ptr].skb;
skb               668 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 			if (WARN_ON_ONCE(!skb))
skb               671 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 			iwl_pcie_free_tso_page(trans_pcie, skb);
skb               687 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 		struct sk_buff *skb = __skb_dequeue(&txq->overflow_q);
skb               689 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 		iwl_op_mode_free_skb(trans->op_mode, skb);
skb              1153 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 		struct sk_buff *skb = txq->entries[read_ptr].skb;
skb              1155 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 		if (WARN_ON_ONCE(!skb))
skb              1158 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 		iwl_pcie_free_tso_page(trans_pcie, skb);
skb              1160 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 		__skb_queue_tail(skbs, skb);
skb              1162 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 		txq->entries[read_ptr].skb = NULL;
skb              1198 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 			struct sk_buff *skb = __skb_dequeue(&overflow_skbs);
skb              1201 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 			dev_cmd_ptr = *(void **)((u8 *)skb->cb +
skb              1209 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 			iwl_trans_tx(trans, skb, dev_cmd_ptr, txq_id);
skb              2003 drivers/net/wireless/intel/iwlwifi/pcie/tx.c static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
skb              2014 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 	head_tb_len = skb_headlen(skb) - hdr_len;
skb              2018 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 						    skb->data + hdr_len,
skb              2022 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 		trace_iwlwifi_dev_tx_tb(trans->dev, skb,
skb              2023 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 					skb->data + hdr_len,
skb              2029 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb              2030 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
skb              2042 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 		trace_iwlwifi_dev_tx_tb(trans->dev, skb,
skb              2099 drivers/net/wireless/intel/iwlwifi/pcie/tx.c static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
skb              2107 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 	struct ieee80211_hdr *hdr = (void *)skb->data;
skb              2109 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 	unsigned int mss = skb_shinfo(skb)->gso_size;
skb              2121 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 	trace_iwlwifi_dev_tx(trans->dev, skb,
skb              2126 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 	ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb);
skb              2127 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 	snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb);
skb              2128 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 	total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len - iv_len;
skb              2142 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 	page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
skb              2144 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 	memcpy(hdr_page->pos, skb->data + hdr_len, iv_len);
skb              2151 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 	skb_pull(skb, hdr_len + iv_len);
skb              2160 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 	tso_start(skb, &tso);
skb              2191 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 		tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len);
skb              2197 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 			csum_skb = alloc_skb(data_left + tcp_hdrlen(skb),
skb              2203 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 						    skb->protocol ==
skb              2207 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 			skb_put_data(csum_skb, tcph, tcp_hdrlen(skb));
skb              2225 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 		trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr,
skb              2251 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 			trace_iwlwifi_dev_tx_tb(trans->dev, skb, tso.data,
skb              2255 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 			tso_build_data(skb, &tso, size);
skb              2277 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 	skb_push(skb, hdr_len + iv_len);
skb              2282 drivers/net/wireless/intel/iwlwifi/pcie/tx.c static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
skb              2295 drivers/net/wireless/intel/iwlwifi/pcie/tx.c int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
skb              2320 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 		     skb->ip_summed == CHECKSUM_PARTIAL)) {
skb              2321 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 		int offs = skb_checksum_start_offset(skb);
skb              2322 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 		int csum_offs = offs + skb->csum_offset;
skb              2325 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 		if (skb_ensure_writable(skb, csum_offs + sizeof(__sum16)))
skb              2328 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 		csum = skb_checksum(skb, offs, skb->len - offs, 0);
skb              2329 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 		*(__sum16 *)(skb->data + csum_offs) = csum_fold(csum);
skb              2331 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 		skb->ip_summed = CHECKSUM_UNNECESSARY;
skb              2334 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 	if (skb_is_nonlinear(skb) &&
skb              2335 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 	    skb_shinfo(skb)->nr_frags > IWL_PCIE_MAX_FRAGS(trans_pcie) &&
skb              2336 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 	    __skb_linearize(skb))
skb              2342 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 	hdr = (struct ieee80211_hdr *)skb->data;
skb              2355 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 			dev_cmd_ptr = (void *)((u8 *)skb->cb +
skb              2359 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 			__skb_queue_tail(&txq->overflow_q, skb);
skb              2378 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 	txq->entries[txq->write_ptr].skb = skb;
skb              2434 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 	trace_iwlwifi_dev_tx(trans->dev, skb,
skb              2447 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 	if (amsdu && skb_shinfo(skb)->gso_size) {
skb              2448 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 		if (unlikely(iwl_fill_data_tbs_amsdu(trans, skb, txq, hdr_len,
skb              2455 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 		if (unlikely(iwl_fill_data_tbs(trans, skb, txq, hdr_len,
skb              2459 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 		skb_walk_frags(skb, frag) {
skb                85 drivers/net/wireless/intersil/hostap/hostap.h void hostap_info_process(local_info_t *local, struct sk_buff *skb);
skb                82 drivers/net/wireless/intersil/hostap/hostap_80211.h int prism2_rx_80211(struct net_device *dev, struct sk_buff *skb,
skb                84 drivers/net/wireless/intersil/hostap/hostap_80211.h void hostap_80211_rx(struct net_device *dev, struct sk_buff *skb,
skb                86 drivers/net/wireless/intersil/hostap/hostap_80211.h void hostap_dump_rx_80211(const char *name, struct sk_buff *skb,
skb                89 drivers/net/wireless/intersil/hostap/hostap_80211.h void hostap_dump_tx_80211(const char *name, struct sk_buff *skb);
skb                90 drivers/net/wireless/intersil/hostap/hostap_80211.h netdev_tx_t hostap_data_start_xmit(struct sk_buff *skb,
skb                92 drivers/net/wireless/intersil/hostap/hostap_80211.h netdev_tx_t hostap_mgmt_start_xmit(struct sk_buff *skb,
skb                94 drivers/net/wireless/intersil/hostap/hostap_80211.h netdev_tx_t hostap_master_start_xmit(struct sk_buff *skb,
skb                21 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c void hostap_dump_rx_80211(const char *name, struct sk_buff *skb,
skb                27 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 	hdr = (struct ieee80211_hdr *) skb->data;
skb                32 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 	       skb->len, jiffies);
skb                34 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 	if (skb->len < 2)
skb                44 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 	if (skb->len < IEEE80211_DATA_HDR3_LEN) {
skb                55 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 	if (skb->len >= 30)
skb                63 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c int prism2_rx_80211(struct net_device *dev, struct sk_buff *skb,
skb                92 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 	fhdr = (struct ieee80211_hdr *) skb->data;
skb                98 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 		dev_kfree_skb_any(skb);
skb               112 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 	head_need -= skb_headroom(skb);
skb               113 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 	tail_need -= skb_tailroom(skb);
skb               116 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 		if (pskb_expand_head(skb, head_need > 0 ? head_need : 0,
skb               121 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 			dev_kfree_skb_any(skb);
skb               130 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 	memset(skb_put(skb, 4), 0xff, 4); /* Prism2 strips CRC */
skb               135 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 		hdr = skb_push(skb, phdrlen);
skb               152 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 		LWNG_SETVAL(frmlen, 10, 0, 4, skb->len - phdrlen);
skb               156 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 		hdr = skb_push(skb, phdrlen);
skb               174 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 		hdr = skb_push(skb, phdrlen);
skb               192 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 	ret = skb->len - phdrlen;
skb               193 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 	skb->dev = dev;
skb               194 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 	skb_reset_mac_header(skb);
skb               195 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 	skb_pull(skb, hdrlen);
skb               197 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 		skb_pull(skb, phdrlen);
skb               198 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 	skb->pkt_type = PACKET_OTHERHOST;
skb               199 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 	skb->protocol = cpu_to_be16(ETH_P_802_2);
skb               200 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 	memset(skb->cb, 0, sizeof(skb->cb));
skb               201 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 	netif_rx(skb);
skb               208 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c static void monitor_rx(struct net_device *dev, struct sk_buff *skb,
skb               213 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 	len = prism2_rx_80211(dev, skb, rx_stats, PRISM2_RX_MONITOR);
skb               229 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 		if (entry->skb != NULL &&
skb               234 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 			dev_kfree_skb(entry->skb);
skb               235 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 			entry->skb = NULL;
skb               238 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 		if (entry->skb != NULL && entry->seq == seq &&
skb               253 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 	struct sk_buff *skb = NULL;
skb               264 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 		skb = dev_alloc_skb(local->dev->mtu +
skb               269 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 		if (skb == NULL)
skb               277 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 		if (entry->skb != NULL)
skb               278 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 			dev_kfree_skb(entry->skb);
skb               283 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 		entry->skb = skb;
skb               293 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 			skb = entry->skb;
skb               297 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 	return skb;
skb               321 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 	entry->skb = NULL;
skb               392 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c static void hostap_rx_sta_beacon(local_info_t *local, struct sk_buff *skb,
skb               402 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 	if (skb->len < IEEE80211_MGMT_HDR_LEN + sizeof(mgmt->u.beacon))
skb               405 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 	mgmt = (struct hostap_ieee80211_mgmt *) skb->data;
skb               407 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 	left = skb->len - (pos - skb->data);
skb               471 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c hostap_rx_frame_mgmt(local_info_t *local, struct sk_buff *skb,
skb               476 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 		hostap_update_sta_ps(local, (struct ieee80211_hdr *) skb->data);
skb               484 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 			skb2 = skb_clone(skb, GFP_ATOMIC);
skb               492 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 		local->apdevstats.rx_bytes += skb->len;
skb               495 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 		prism2_rx_80211(local->apdev, skb, rx_stats, PRISM2_RX_MGMT);
skb               504 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 			       skb->dev->name, type >> 2, stype >> 4);
skb               508 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 		hostap_rx(skb->dev, skb, rx_stats);
skb               513 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 		hostap_rx_sta_beacon(local, skb, stype);
skb               525 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 		       skb->dev->name, type >> 2, stype >> 4);
skb               608 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c static int hostap_is_eapol_frame(local_info_t *local, struct sk_buff *skb)
skb               615 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 	if (skb->len < 24)
skb               618 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 	hdr = (struct ieee80211_hdr *) skb->data;
skb               634 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 	if (skb->len < 24 + 8)
skb               638 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 	pos = skb->data + 24;
skb               649 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c hostap_rx_frame_decrypt(local_info_t *local, struct sk_buff *skb,
skb               658 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 	hdr = (struct ieee80211_hdr *) skb->data;
skb               672 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 	res = crypt->ops->decrypt_mpdu(skb, hdrlen, crypt->priv);
skb               687 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c hostap_rx_frame_decrypt_msdu(local_info_t *local, struct sk_buff *skb,
skb               696 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 	hdr = (struct ieee80211_hdr *) skb->data;
skb               700 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 	res = crypt->ops->decrypt_msdu(skb, keyidx, hdrlen, crypt->priv);
skb               716 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c void hostap_80211_rx(struct net_device *dev, struct sk_buff *skb,
skb               740 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 	iface->stats.rx_bytes += skb->len;
skb               747 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 	hdr = (struct ieee80211_hdr *) skb->data;
skb               749 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 	if (skb->len < 10)
skb               776 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 		monitor_rx(dev, skb, rx_stats);
skb               782 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 		if (skb->len >= hdrlen + 3)
skb               783 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 			idx = skb->data[hdrlen + 3] >> 6;
skb               823 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 		    (keyidx = hostap_rx_frame_decrypt(local, skb, crypt)) < 0)
skb               832 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 		if (hostap_rx_frame_mgmt(local, skb, rx_stats, type, stype))
skb               839 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 	if (skb->len < IEEE80211_DATA_HDR3_LEN)
skb               852 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 		if (skb->len < IEEE80211_DATA_HDR4_LEN)
skb               866 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 		skb->dev = dev = wds;
skb               874 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 		skb->dev = dev = local->stadev;
skb               881 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 		switch (hostap_handle_sta_rx(local, dev, skb, rx_stats,
skb               912 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 	    (keyidx = hostap_rx_frame_decrypt(local, skb, crypt)) < 0)
skb               914 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 	hdr = (struct ieee80211_hdr *) skb->data;
skb               931 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 		flen = skb->len;
skb               946 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 			skb_copy_from_linear_data(skb, skb_put(frag_skb, flen),
skb               951 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 			skb_copy_from_linear_data_offset(skb, hdrlen,
skb               955 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 		dev_kfree_skb(skb);
skb               956 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 		skb = NULL;
skb               967 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 		skb = frag_skb;
skb               968 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 		hdr = (struct ieee80211_hdr *) skb->data;
skb               976 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 	    hostap_rx_frame_decrypt_msdu(local, skb, keyidx, crypt))
skb               979 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 	hdr = (struct ieee80211_hdr *) skb->data;
skb               982 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 		    hostap_is_eapol_frame(local, skb)) {
skb               996 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 	    !hostap_is_eapol_frame(local, skb)) {
skb              1007 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 	payload = skb->data + hdrlen;
skb              1019 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 				prism2_rx_80211(local->apdev, skb, rx_stats,
skb              1022 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 				local->apdevstats.rx_bytes += skb->len;
skb              1035 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 	if (skb->len - hdrlen >= 8 &&
skb              1041 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 		skb_pull(skb, hdrlen + 6);
skb              1042 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 		memcpy(skb_push(skb, ETH_ALEN), src, ETH_ALEN);
skb              1043 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 		memcpy(skb_push(skb, ETH_ALEN), dst, ETH_ALEN);
skb              1047 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 		skb_pull(skb, hdrlen);
skb              1048 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 		len = htons(skb->len);
skb              1049 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 		memcpy(skb_push(skb, 2), &len, 2);
skb              1050 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 		memcpy(skb_push(skb, ETH_ALEN), src, ETH_ALEN);
skb              1051 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 		memcpy(skb_push(skb, ETH_ALEN), dst, ETH_ALEN);
skb              1056 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 	    skb->len >= ETH_HLEN + ETH_ALEN) {
skb              1059 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 		skb_copy_from_linear_data_offset(skb, skb->len - ETH_ALEN,
skb              1060 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 						 skb->data + ETH_ALEN,
skb              1062 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 		skb_trim(skb, skb->len - ETH_ALEN);
skb              1066 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 	dev->stats.rx_bytes += skb->len;
skb              1074 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 			skb2 = skb_clone(skb, GFP_ATOMIC);
skb              1082 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 			skb2 = skb;
skb              1083 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 			skb = NULL;
skb              1097 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 	if (skb) {
skb              1098 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 		skb->protocol = eth_type_trans(skb, dev);
skb              1099 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 		memset(skb->cb, 0, sizeof(skb->cb));
skb              1100 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 		netif_rx(skb);
skb              1109 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c 	dev_kfree_skb(skb);
skb                21 drivers/net/wireless/intersil/hostap/hostap_80211_tx.c void hostap_dump_tx_80211(const char *name, struct sk_buff *skb)
skb                26 drivers/net/wireless/intersil/hostap/hostap_80211_tx.c 	hdr = (struct ieee80211_hdr *) skb->data;
skb                29 drivers/net/wireless/intersil/hostap/hostap_80211_tx.c 	       name, skb->len, jiffies);
skb                31 drivers/net/wireless/intersil/hostap/hostap_80211_tx.c 	if (skb->len < 2)
skb                41 drivers/net/wireless/intersil/hostap/hostap_80211_tx.c 	if (skb->len < IEEE80211_DATA_HDR3_LEN) {
skb                52 drivers/net/wireless/intersil/hostap/hostap_80211_tx.c 	if (skb->len >= 30)
skb                61 drivers/net/wireless/intersil/hostap/hostap_80211_tx.c netdev_tx_t hostap_data_start_xmit(struct sk_buff *skb,
skb                80 drivers/net/wireless/intersil/hostap/hostap_80211_tx.c 	if (skb->len < ETH_HLEN) {
skb                82 drivers/net/wireless/intersil/hostap/hostap_80211_tx.c 		       "(len=%d)\n", dev->name, skb->len);
skb                83 drivers/net/wireless/intersil/hostap/hostap_80211_tx.c 		kfree_skb(skb);
skb                97 drivers/net/wireless/intersil/hostap/hostap_80211_tx.c 			kfree_skb(skb);
skb               104 drivers/net/wireless/intersil/hostap/hostap_80211_tx.c 			kfree_skb(skb);
skb               108 drivers/net/wireless/intersil/hostap/hostap_80211_tx.c 			   !ether_addr_equal(skb->data + ETH_ALEN, dev->dev_addr)) {
skb               122 drivers/net/wireless/intersil/hostap/hostap_80211_tx.c 	ethertype = (skb->data[12] << 8) | skb->data[13];
skb               156 drivers/net/wireless/intersil/hostap/hostap_80211_tx.c 			skb_copy_from_linear_data_offset(skb, ETH_ALEN,
skb               170 drivers/net/wireless/intersil/hostap/hostap_80211_tx.c 			skb_copy_from_linear_data_offset(skb, ETH_ALEN,
skb               178 drivers/net/wireless/intersil/hostap/hostap_80211_tx.c 		    is_multicast_ether_addr(skb->data))
skb               186 drivers/net/wireless/intersil/hostap/hostap_80211_tx.c 		skb_copy_from_linear_data(skb, &hdr.addr3, ETH_ALEN);
skb               190 drivers/net/wireless/intersil/hostap/hostap_80211_tx.c 		skb_copy_from_linear_data(skb, &hdr.addr1, ETH_ALEN);
skb               192 drivers/net/wireless/intersil/hostap/hostap_80211_tx.c 		skb_copy_from_linear_data_offset(skb, ETH_ALEN, &hdr.addr3,
skb               199 drivers/net/wireless/intersil/hostap/hostap_80211_tx.c 		skb_copy_from_linear_data_offset(skb, ETH_ALEN, &hdr.addr2,
skb               201 drivers/net/wireless/intersil/hostap/hostap_80211_tx.c 		skb_copy_from_linear_data(skb, &hdr.addr3, ETH_ALEN);
skb               204 drivers/net/wireless/intersil/hostap/hostap_80211_tx.c 		skb_copy_from_linear_data(skb, &hdr.addr1, ETH_ALEN);
skb               205 drivers/net/wireless/intersil/hostap/hostap_80211_tx.c 		skb_copy_from_linear_data_offset(skb, ETH_ALEN, &hdr.addr2,
skb               212 drivers/net/wireless/intersil/hostap/hostap_80211_tx.c 	skb_pull(skb, skip_header_bytes);
skb               214 drivers/net/wireless/intersil/hostap/hostap_80211_tx.c 	if (skb_tailroom(skb) < need_tailroom) {
skb               215 drivers/net/wireless/intersil/hostap/hostap_80211_tx.c 		skb = skb_unshare(skb, GFP_ATOMIC);
skb               216 drivers/net/wireless/intersil/hostap/hostap_80211_tx.c 		if (skb == NULL) {
skb               220 drivers/net/wireless/intersil/hostap/hostap_80211_tx.c 		if (pskb_expand_head(skb, need_headroom, need_tailroom,
skb               222 drivers/net/wireless/intersil/hostap/hostap_80211_tx.c 			kfree_skb(skb);
skb               226 drivers/net/wireless/intersil/hostap/hostap_80211_tx.c 	} else if (skb_headroom(skb) < need_headroom) {
skb               227 drivers/net/wireless/intersil/hostap/hostap_80211_tx.c 		struct sk_buff *tmp = skb;
skb               228 drivers/net/wireless/intersil/hostap/hostap_80211_tx.c 		skb = skb_realloc_headroom(skb, need_headroom);
skb               230 drivers/net/wireless/intersil/hostap/hostap_80211_tx.c 		if (skb == NULL) {
skb               235 drivers/net/wireless/intersil/hostap/hostap_80211_tx.c 		skb = skb_unshare(skb, GFP_ATOMIC);
skb               236 drivers/net/wireless/intersil/hostap/hostap_80211_tx.c 		if (skb == NULL) {
skb               243 drivers/net/wireless/intersil/hostap/hostap_80211_tx.c 		memcpy(skb_push(skb, encaps_len), encaps_data, encaps_len);
skb               244 drivers/net/wireless/intersil/hostap/hostap_80211_tx.c 	memcpy(skb_push(skb, hdr_len), &hdr, hdr_len);
skb               246 drivers/net/wireless/intersil/hostap/hostap_80211_tx.c 		skb_put_data(skb, &hdr.addr4, ETH_ALEN);
skb               250 drivers/net/wireless/intersil/hostap/hostap_80211_tx.c 	iface->stats.tx_bytes += skb->len;
skb               252 drivers/net/wireless/intersil/hostap/hostap_80211_tx.c 	skb_reset_mac_header(skb);
skb               253 drivers/net/wireless/intersil/hostap/hostap_80211_tx.c 	meta = (struct hostap_skb_tx_data *) skb->cb;
skb               262 drivers/net/wireless/intersil/hostap/hostap_80211_tx.c 	skb->dev = local->dev;
skb               263 drivers/net/wireless/intersil/hostap/hostap_80211_tx.c 	dev_queue_xmit(skb);
skb               269 drivers/net/wireless/intersil/hostap/hostap_80211_tx.c netdev_tx_t hostap_mgmt_start_xmit(struct sk_buff *skb,
skb               281 drivers/net/wireless/intersil/hostap/hostap_80211_tx.c 	if (skb->len < 10) {
skb               283 drivers/net/wireless/intersil/hostap/hostap_80211_tx.c 		       "(len=%d)\n", dev->name, skb->len);
skb               284 drivers/net/wireless/intersil/hostap/hostap_80211_tx.c 		kfree_skb(skb);
skb               289 drivers/net/wireless/intersil/hostap/hostap_80211_tx.c 	iface->stats.tx_bytes += skb->len;
skb               291 drivers/net/wireless/intersil/hostap/hostap_80211_tx.c 	meta = (struct hostap_skb_tx_data *) skb->cb;
skb               296 drivers/net/wireless/intersil/hostap/hostap_80211_tx.c 	if (skb->len >= IEEE80211_DATA_HDR3_LEN + sizeof(rfc1042_header) + 2) {
skb               297 drivers/net/wireless/intersil/hostap/hostap_80211_tx.c 		hdr = (struct ieee80211_hdr *) skb->data;
skb               301 drivers/net/wireless/intersil/hostap/hostap_80211_tx.c 			u8 *pos = &skb->data[IEEE80211_DATA_HDR3_LEN +
skb               308 drivers/net/wireless/intersil/hostap/hostap_80211_tx.c 	skb->dev = local->dev;
skb               309 drivers/net/wireless/intersil/hostap/hostap_80211_tx.c 	dev_queue_xmit(skb);
skb               315 drivers/net/wireless/intersil/hostap/hostap_80211_tx.c static struct sk_buff * hostap_tx_encrypt(struct sk_buff *skb,
skb               323 drivers/net/wireless/intersil/hostap/hostap_80211_tx.c 	iface = netdev_priv(skb->dev);
skb               326 drivers/net/wireless/intersil/hostap/hostap_80211_tx.c 	if (skb->len < IEEE80211_DATA_HDR3_LEN) {
skb               327 drivers/net/wireless/intersil/hostap/hostap_80211_tx.c 		kfree_skb(skb);
skb               333 drivers/net/wireless/intersil/hostap/hostap_80211_tx.c 		hdr = (struct ieee80211_hdr *) skb->data;
skb               339 drivers/net/wireless/intersil/hostap/hostap_80211_tx.c 		kfree_skb(skb);
skb               343 drivers/net/wireless/intersil/hostap/hostap_80211_tx.c 	skb = skb_unshare(skb, GFP_ATOMIC);
skb               344 drivers/net/wireless/intersil/hostap/hostap_80211_tx.c 	if (skb == NULL)
skb               351 drivers/net/wireless/intersil/hostap/hostap_80211_tx.c 	if ((skb_headroom(skb) < prefix_len ||
skb               352 drivers/net/wireless/intersil/hostap/hostap_80211_tx.c 	     skb_tailroom(skb) < postfix_len) &&
skb               353 drivers/net/wireless/intersil/hostap/hostap_80211_tx.c 	    pskb_expand_head(skb, prefix_len, postfix_len, GFP_ATOMIC)) {
skb               354 drivers/net/wireless/intersil/hostap/hostap_80211_tx.c 		kfree_skb(skb);
skb               358 drivers/net/wireless/intersil/hostap/hostap_80211_tx.c 	hdr = (struct ieee80211_hdr *) skb->data;
skb               366 drivers/net/wireless/intersil/hostap/hostap_80211_tx.c 		res = crypt->ops->encrypt_msdu(skb, hdr_len, crypt->priv);
skb               368 drivers/net/wireless/intersil/hostap/hostap_80211_tx.c 		res = crypt->ops->encrypt_mpdu(skb, hdr_len, crypt->priv);
skb               371 drivers/net/wireless/intersil/hostap/hostap_80211_tx.c 		kfree_skb(skb);
skb               375 drivers/net/wireless/intersil/hostap/hostap_80211_tx.c 	return skb;
skb               382 drivers/net/wireless/intersil/hostap/hostap_80211_tx.c netdev_tx_t hostap_master_start_xmit(struct sk_buff *skb,
skb               398 drivers/net/wireless/intersil/hostap/hostap_80211_tx.c 	tx.skb = skb;
skb               401 drivers/net/wireless/intersil/hostap/hostap_80211_tx.c 	meta = (struct hostap_skb_tx_data *) skb->cb;
skb               421 drivers/net/wireless/intersil/hostap/hostap_80211_tx.c 	if (skb->len < 24) {
skb               423 drivers/net/wireless/intersil/hostap/hostap_80211_tx.c 		       "(len=%d)\n", dev->name, skb->len);
skb               434 drivers/net/wireless/intersil/hostap/hostap_80211_tx.c 	skb = tx.skb;
skb               435 drivers/net/wireless/intersil/hostap/hostap_80211_tx.c 	meta = (struct hostap_skb_tx_data *) skb->cb;
skb               436 drivers/net/wireless/intersil/hostap/hostap_80211_tx.c 	hdr = (struct ieee80211_hdr *) skb->data;
skb               449 drivers/net/wireless/intersil/hostap/hostap_80211_tx.c 			hostap_dump_tx_80211(dev->name, skb);
skb               516 drivers/net/wireless/intersil/hostap/hostap_80211_tx.c 		skb = hostap_tx_encrypt(skb, tx.crypt);
skb               517 drivers/net/wireless/intersil/hostap/hostap_80211_tx.c 		if (skb == NULL) {
skb               523 drivers/net/wireless/intersil/hostap/hostap_80211_tx.c 		meta = (struct hostap_skb_tx_data *) skb->cb;
skb               535 drivers/net/wireless/intersil/hostap/hostap_80211_tx.c 	if (local->func->tx == NULL || local->func->tx(skb, dev)) {
skb               541 drivers/net/wireless/intersil/hostap/hostap_80211_tx.c 		iface->stats.tx_bytes += skb->len;
skb               545 drivers/net/wireless/intersil/hostap/hostap_80211_tx.c 	if (ret == NETDEV_TX_OK && skb)
skb               546 drivers/net/wireless/intersil/hostap/hostap_80211_tx.c 		dev_kfree_skb(skb);
skb               604 drivers/net/wireless/intersil/hostap/hostap_ap.c static void hostap_ap_tx_cb(struct sk_buff *skb, int ok, void *data)
skb               610 drivers/net/wireless/intersil/hostap/hostap_ap.c 		dev_kfree_skb(skb);
skb               617 drivers/net/wireless/intersil/hostap/hostap_ap.c 	hdr = (struct ieee80211_hdr *) skb->data;
skb               621 drivers/net/wireless/intersil/hostap/hostap_ap.c 	skb->dev = ap->local->apdev;
skb               622 drivers/net/wireless/intersil/hostap/hostap_ap.c 	skb_pull(skb, hostap_80211_get_hdrlen(hdr->frame_control));
skb               623 drivers/net/wireless/intersil/hostap/hostap_ap.c 	skb->pkt_type = PACKET_OTHERHOST;
skb               624 drivers/net/wireless/intersil/hostap/hostap_ap.c 	skb->protocol = cpu_to_be16(ETH_P_802_2);
skb               625 drivers/net/wireless/intersil/hostap/hostap_ap.c 	memset(skb->cb, 0, sizeof(skb->cb));
skb               626 drivers/net/wireless/intersil/hostap/hostap_ap.c 	netif_rx(skb);
skb               632 drivers/net/wireless/intersil/hostap/hostap_ap.c static void hostap_ap_tx_cb_auth(struct sk_buff *skb, int ok, void *data)
skb               643 drivers/net/wireless/intersil/hostap/hostap_ap.c 		dev_kfree_skb(skb);
skb               647 drivers/net/wireless/intersil/hostap/hostap_ap.c 	hdr = (struct ieee80211_hdr *) skb->data;
skb               649 drivers/net/wireless/intersil/hostap/hostap_ap.c 	    skb->len < IEEE80211_MGMT_HDR_LEN + 6) {
skb               652 drivers/net/wireless/intersil/hostap/hostap_ap.c 		dev_kfree_skb(skb);
skb               656 drivers/net/wireless/intersil/hostap/hostap_ap.c 	pos = (__le16 *) (skb->data + IEEE80211_MGMT_HDR_LEN);
skb               695 drivers/net/wireless/intersil/hostap/hostap_ap.c 	dev_kfree_skb(skb);
skb               700 drivers/net/wireless/intersil/hostap/hostap_ap.c static void hostap_ap_tx_cb_assoc(struct sk_buff *skb, int ok, void *data)
skb               711 drivers/net/wireless/intersil/hostap/hostap_ap.c 		dev_kfree_skb(skb);
skb               715 drivers/net/wireless/intersil/hostap/hostap_ap.c 	hdr = (struct ieee80211_hdr *) skb->data;
skb               718 drivers/net/wireless/intersil/hostap/hostap_ap.c 	    skb->len < IEEE80211_MGMT_HDR_LEN + 4) {
skb               721 drivers/net/wireless/intersil/hostap/hostap_ap.c 		dev_kfree_skb(skb);
skb               741 drivers/net/wireless/intersil/hostap/hostap_ap.c 	pos = (__le16 *) (skb->data + IEEE80211_MGMT_HDR_LEN);
skb               760 drivers/net/wireless/intersil/hostap/hostap_ap.c 	dev_kfree_skb(skb);
skb               765 drivers/net/wireless/intersil/hostap/hostap_ap.c static void hostap_ap_tx_cb_poll(struct sk_buff *skb, int ok, void *data)
skb               771 drivers/net/wireless/intersil/hostap/hostap_ap.c 	if (skb->len < 24)
skb               773 drivers/net/wireless/intersil/hostap/hostap_ap.c 	hdr = (struct ieee80211_hdr *) skb->data;
skb               787 drivers/net/wireless/intersil/hostap/hostap_ap.c 	dev_kfree_skb(skb);
skb               931 drivers/net/wireless/intersil/hostap/hostap_ap.c 	struct sk_buff *skb;
skb               946 drivers/net/wireless/intersil/hostap/hostap_ap.c 	skb = dev_alloc_skb(sizeof(*hdr) + body_len);
skb               947 drivers/net/wireless/intersil/hostap/hostap_ap.c 	if (skb == NULL) {
skb               955 drivers/net/wireless/intersil/hostap/hostap_ap.c 	hdr = skb_put_zero(skb, hdrlen);
skb               957 drivers/net/wireless/intersil/hostap/hostap_ap.c 		skb_put_data(skb, body, body_len);
skb               979 drivers/net/wireless/intersil/hostap/hostap_ap.c 	meta = (struct hostap_skb_tx_data *) skb->cb;
skb               985 drivers/net/wireless/intersil/hostap/hostap_ap.c 	skb->dev = dev;
skb               986 drivers/net/wireless/intersil/hostap/hostap_ap.c 	skb_reset_mac_header(skb);
skb               987 drivers/net/wireless/intersil/hostap/hostap_ap.c 	skb_reset_network_header(skb);
skb               988 drivers/net/wireless/intersil/hostap/hostap_ap.c 	dev_queue_xmit(skb);
skb              1244 drivers/net/wireless/intersil/hostap/hostap_ap.c 	struct sk_buff *skb;
skb              1258 drivers/net/wireless/intersil/hostap/hostap_ap.c 	skb = dev_alloc_skb(WLAN_AUTH_CHALLENGE_LEN +
skb              1261 drivers/net/wireless/intersil/hostap/hostap_ap.c 	if (skb == NULL) {
skb              1266 drivers/net/wireless/intersil/hostap/hostap_ap.c 	skb_reserve(skb, ap->crypt->extra_mpdu_prefix_len);
skb              1267 drivers/net/wireless/intersil/hostap/hostap_ap.c 	skb_put_zero(skb, WLAN_AUTH_CHALLENGE_LEN);
skb              1268 drivers/net/wireless/intersil/hostap/hostap_ap.c 	if (ap->crypt->encrypt_mpdu(skb, 0, ap->crypt_priv)) {
skb              1269 drivers/net/wireless/intersil/hostap/hostap_ap.c 		dev_kfree_skb(skb);
skb              1274 drivers/net/wireless/intersil/hostap/hostap_ap.c 	skb_copy_from_linear_data_offset(skb, ap->crypt->extra_mpdu_prefix_len,
skb              1276 drivers/net/wireless/intersil/hostap/hostap_ap.c 	dev_kfree_skb(skb);
skb              1283 drivers/net/wireless/intersil/hostap/hostap_ap.c static void handle_authen(local_info_t *local, struct sk_buff *skb,
skb              1287 drivers/net/wireless/intersil/hostap/hostap_ap.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
skb              1299 drivers/net/wireless/intersil/hostap/hostap_ap.c 	len = skb->len - IEEE80211_MGMT_HDR_LEN;
skb              1319 drivers/net/wireless/intersil/hostap/hostap_ap.c 		if (skb->len >= hdrlen + 3)
skb              1320 drivers/net/wireless/intersil/hostap/hostap_ap.c 			idx = skb->data[hdrlen + 3] >> 6;
skb              1324 drivers/net/wireless/intersil/hostap/hostap_ap.c 	pos = (__le16 *) (skb->data + IEEE80211_MGMT_HDR_LEN);
skb              1496 drivers/net/wireless/intersil/hostap/hostap_ap.c static void handle_assoc(local_info_t *local, struct sk_buff *skb,
skb              1500 drivers/net/wireless/intersil/hostap/hostap_ap.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
skb              1510 drivers/net/wireless/intersil/hostap/hostap_ap.c 	left = len = skb->len - IEEE80211_MGMT_HDR_LEN;
skb              1532 drivers/net/wireless/intersil/hostap/hostap_ap.c 	pos = (__le16 *) (skb->data + IEEE80211_MGMT_HDR_LEN);
skb              1705 drivers/net/wireless/intersil/hostap/hostap_ap.c static void handle_deauth(local_info_t *local, struct sk_buff *skb,
skb              1709 drivers/net/wireless/intersil/hostap/hostap_ap.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
skb              1710 drivers/net/wireless/intersil/hostap/hostap_ap.c 	char *body = (char *) (skb->data + IEEE80211_MGMT_HDR_LEN);
skb              1716 drivers/net/wireless/intersil/hostap/hostap_ap.c 	len = skb->len - IEEE80211_MGMT_HDR_LEN;
skb              1747 drivers/net/wireless/intersil/hostap/hostap_ap.c static void handle_disassoc(local_info_t *local, struct sk_buff *skb,
skb              1751 drivers/net/wireless/intersil/hostap/hostap_ap.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
skb              1752 drivers/net/wireless/intersil/hostap/hostap_ap.c 	char *body = skb->data + IEEE80211_MGMT_HDR_LEN;
skb              1758 drivers/net/wireless/intersil/hostap/hostap_ap.c 	len = skb->len - IEEE80211_MGMT_HDR_LEN;
skb              1840 drivers/net/wireless/intersil/hostap/hostap_ap.c 				 struct sk_buff *skb)
skb              1847 drivers/net/wireless/intersil/hostap/hostap_ap.c 		dev_queue_xmit(skb);
skb              1853 drivers/net/wireless/intersil/hostap/hostap_ap.c 	meta = (struct hostap_skb_tx_data *) skb->cb;
skb              1859 drivers/net/wireless/intersil/hostap/hostap_ap.c 	dev_queue_xmit(skb);
skb              1871 drivers/net/wireless/intersil/hostap/hostap_ap.c 	struct sk_buff *skb;
skb              1920 drivers/net/wireless/intersil/hostap/hostap_ap.c 	while ((skb = skb_dequeue(&sta->tx_buf)) != NULL) {
skb              1925 drivers/net/wireless/intersil/hostap/hostap_ap.c 		pspoll_send_buffered(local, sta, skb);
skb              1982 drivers/net/wireless/intersil/hostap/hostap_ap.c static void handle_beacon(local_info_t *local, struct sk_buff *skb,
skb              1985 drivers/net/wireless/intersil/hostap/hostap_ap.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
skb              1986 drivers/net/wireless/intersil/hostap/hostap_ap.c 	char *body = skb->data + IEEE80211_MGMT_HDR_LEN;
skb              1996 drivers/net/wireless/intersil/hostap/hostap_ap.c 	len = skb->len - IEEE80211_MGMT_HDR_LEN;
skb              2137 drivers/net/wireless/intersil/hostap/hostap_ap.c static void handle_ap_item(local_info_t *local, struct sk_buff *skb,
skb              2148 drivers/net/wireless/intersil/hostap/hostap_ap.c 	hdr = (struct ieee80211_hdr *) skb->data;
skb              2186 drivers/net/wireless/intersil/hostap/hostap_ap.c 		handle_beacon(local, skb, rx_stats);
skb              2222 drivers/net/wireless/intersil/hostap/hostap_ap.c 		handle_assoc(local, skb, rx_stats, 0);
skb              2228 drivers/net/wireless/intersil/hostap/hostap_ap.c 		handle_assoc(local, skb, rx_stats, 1);
skb              2237 drivers/net/wireless/intersil/hostap/hostap_ap.c 		handle_disassoc(local, skb, rx_stats);
skb              2240 drivers/net/wireless/intersil/hostap/hostap_ap.c 		handle_authen(local, skb, rx_stats);
skb              2243 drivers/net/wireless/intersil/hostap/hostap_ap.c 		handle_deauth(local, skb, rx_stats);
skb              2253 drivers/net/wireless/intersil/hostap/hostap_ap.c 	dev_kfree_skb(skb);
skb              2258 drivers/net/wireless/intersil/hostap/hostap_ap.c void hostap_rx(struct net_device *dev, struct sk_buff *skb,
skb              2268 drivers/net/wireless/intersil/hostap/hostap_ap.c 	if (skb->len < 16)
skb              2273 drivers/net/wireless/intersil/hostap/hostap_ap.c 	hdr = (struct ieee80211_hdr *) skb->data;
skb              2279 drivers/net/wireless/intersil/hostap/hostap_ap.c 	skb->protocol = cpu_to_be16(ETH_P_HOSTAP);
skb              2280 drivers/net/wireless/intersil/hostap/hostap_ap.c 	handle_ap_item(local, skb, rx_stats);
skb              2284 drivers/net/wireless/intersil/hostap/hostap_ap.c 	dev_kfree_skb(skb);
skb              2291 drivers/net/wireless/intersil/hostap/hostap_ap.c 	struct sk_buff *skb;
skb              2298 drivers/net/wireless/intersil/hostap/hostap_ap.c 	skb = dev_alloc_skb(16);
skb              2299 drivers/net/wireless/intersil/hostap/hostap_ap.c 	if (skb == NULL) {
skb              2305 drivers/net/wireless/intersil/hostap/hostap_ap.c 	hdr = skb_put(skb, 16);
skb              2318 drivers/net/wireless/intersil/hostap/hostap_ap.c 	skb->dev = local->dev;
skb              2321 drivers/net/wireless/intersil/hostap/hostap_ap.c 	hostap_rx(local->dev, skb, &rx_stats);
skb              2685 drivers/net/wireless/intersil/hostap/hostap_ap.c 	struct sk_buff *skb = tx->skb;
skb              2690 drivers/net/wireless/intersil/hostap/hostap_ap.c 	meta = (struct hostap_skb_tx_data *) skb->cb;
skb              2692 drivers/net/wireless/intersil/hostap/hostap_ap.c 	if (local->ap == NULL || skb->len < 10 ||
skb              2696 drivers/net/wireless/intersil/hostap/hostap_ap.c 	hdr = (struct ieee80211_hdr *) skb->data;
skb              2777 drivers/net/wireless/intersil/hostap/hostap_ap.c 	skb_queue_tail(&sta->tx_buf, skb);
skb              2796 drivers/net/wireless/intersil/hostap/hostap_ap.c 			sta->tx_bytes += skb->len;
skb              2823 drivers/net/wireless/intersil/hostap/hostap_ap.c void hostap_handle_sta_tx_exc(local_info_t *local, struct sk_buff *skb)
skb              2829 drivers/net/wireless/intersil/hostap/hostap_ap.c 	hdr = (struct ieee80211_hdr *) skb->data;
skb              2830 drivers/net/wireless/intersil/hostap/hostap_ap.c 	meta = (struct hostap_skb_tx_data *) skb->cb;
skb              2924 drivers/net/wireless/intersil/hostap/hostap_ap.c 			       struct sk_buff *skb,
skb              2936 drivers/net/wireless/intersil/hostap/hostap_ap.c 	hdr = (struct ieee80211_hdr *) skb->data;
skb              2957 drivers/net/wireless/intersil/hostap/hostap_ap.c 				prism2_rx_80211(local->apdev, skb, rx_stats,
skb              2966 drivers/net/wireless/intersil/hostap/hostap_ap.c 				hostap_rx(dev, skb, rx_stats);
skb              2979 drivers/net/wireless/intersil/hostap/hostap_ap.c 				hostap_dump_rx_80211(dev->name, skb, rx_stats);
skb              2988 drivers/net/wireless/intersil/hostap/hostap_ap.c 			prism2_rx_80211(local->apdev, skb, rx_stats,
skb              3000 drivers/net/wireless/intersil/hostap/hostap_ap.c 			hostap_rx(dev, skb, rx_stats);
skb              3019 drivers/net/wireless/intersil/hostap/hostap_ap.c 			hostap_dump_rx_80211(dev->name, skb, rx_stats);
skb              3030 drivers/net/wireless/intersil/hostap/hostap_ap.c 		sta->rx_bytes += skb->len;
skb              3037 drivers/net/wireless/intersil/hostap/hostap_ap.c 			prism2_rx_80211(local->apdev, skb, rx_stats,
skb              3046 drivers/net/wireless/intersil/hostap/hostap_ap.c 			hostap_rx(dev, skb, rx_stats);
skb               219 drivers/net/wireless/intersil/hostap/hostap_ap.h void hostap_rx(struct net_device *dev, struct sk_buff *skb,
skb               231 drivers/net/wireless/intersil/hostap/hostap_ap.h 	struct sk_buff *skb;
skb               238 drivers/net/wireless/intersil/hostap/hostap_ap.h void hostap_handle_sta_tx_exc(local_info_t *local, struct sk_buff *skb);
skb               244 drivers/net/wireless/intersil/hostap/hostap_ap.h 			       struct sk_buff *skb,
skb              1799 drivers/net/wireless/intersil/hostap/hostap_hw.c static int prism2_tx_80211(struct sk_buff *skb, struct net_device *dev)
skb              1811 drivers/net/wireless/intersil/hostap/hostap_hw.c 	meta = (struct hostap_skb_tx_data *) skb->cb;
skb              1828 drivers/net/wireless/intersil/hostap/hostap_hw.c 	skb_copy_from_linear_data(skb, &txdesc.frame_control, hdr_len);
skb              1832 drivers/net/wireless/intersil/hostap/hostap_hw.c 	    skb->len >= 30) {
skb              1834 drivers/net/wireless/intersil/hostap/hostap_hw.c 		skb_copy_from_linear_data_offset(skb, hdr_len, txdesc.addr4,
skb              1847 drivers/net/wireless/intersil/hostap/hostap_hw.c 	data_len = skb->len - hdr_len;
skb              1864 drivers/net/wireless/intersil/hostap/hostap_hw.c 		res = hfa384x_to_bap(dev, BAP0, skb->data + hdr_len,
skb              1865 drivers/net/wireless/intersil/hostap/hostap_hw.c 				     skb->len - hdr_len);
skb              1931 drivers/net/wireless/intersil/hostap/hostap_hw.c 	struct sk_buff *skb = NULL;
skb              1989 drivers/net/wireless/intersil/hostap/hostap_hw.c 	skb = dev_alloc_skb(len + hdr_len);
skb              1990 drivers/net/wireless/intersil/hostap/hostap_hw.c 	if (!skb) {
skb              1996 drivers/net/wireless/intersil/hostap/hostap_hw.c 	skb->dev = dev;
skb              1997 drivers/net/wireless/intersil/hostap/hostap_hw.c 	skb_put_data(skb, &rxdesc, hdr_len);
skb              2000 drivers/net/wireless/intersil/hostap/hostap_hw.c 		res = hfa384x_from_bap(dev, BAP0, skb_put(skb, len), len);
skb              2008 drivers/net/wireless/intersil/hostap/hostap_hw.c 	skb_queue_tail(&local->rx_list, skb);
skb              2021 drivers/net/wireless/intersil/hostap/hostap_hw.c 	if (skb)
skb              2022 drivers/net/wireless/intersil/hostap/hostap_hw.c 		dev_kfree_skb(skb);
skb              2028 drivers/net/wireless/intersil/hostap/hostap_hw.c static void hostap_rx_skb(local_info_t *local, struct sk_buff *skb)
skb              2031 drivers/net/wireless/intersil/hostap/hostap_hw.c 	struct net_device *dev = skb->dev;
skb              2036 drivers/net/wireless/intersil/hostap/hostap_hw.c 	if (skb->len < sizeof(*rxdesc)) {
skb              2039 drivers/net/wireless/intersil/hostap/hostap_hw.c 		    skb->len >= sizeof(*rxdesc) - 30) {
skb              2040 drivers/net/wireless/intersil/hostap/hostap_hw.c 			rx_hdrlen = skb->len;
skb              2042 drivers/net/wireless/intersil/hostap/hostap_hw.c 			dev_kfree_skb(skb);
skb              2047 drivers/net/wireless/intersil/hostap/hostap_hw.c 	rxdesc = (struct hfa384x_rx_frame *) skb->data;
skb              2050 drivers/net/wireless/intersil/hostap/hostap_hw.c 	    skb->len >= sizeof(*rxdesc))
skb              2058 drivers/net/wireless/intersil/hostap/hostap_hw.c 	if (skb->len > PRISM2_DATA_MAXLEN) {
skb              2060 drivers/net/wireless/intersil/hostap/hostap_hw.c 		       dev->name, skb->len, PRISM2_DATA_MAXLEN);
skb              2074 drivers/net/wireless/intersil/hostap/hostap_hw.c 	memmove(skb_pull(skb, rx_hdrlen - hdrlen),
skb              2077 drivers/net/wireless/intersil/hostap/hostap_hw.c 	hostap_80211_rx(dev, skb, &stats);
skb              2081 drivers/net/wireless/intersil/hostap/hostap_hw.c 	dev_kfree_skb(skb);
skb              2089 drivers/net/wireless/intersil/hostap/hostap_hw.c 	struct sk_buff *skb;
skb              2091 drivers/net/wireless/intersil/hostap/hostap_hw.c 	while ((skb = skb_dequeue(&local->rx_list)) != NULL)
skb              2092 drivers/net/wireless/intersil/hostap/hostap_hw.c 		hostap_rx_skb(local, skb);
skb              2168 drivers/net/wireless/intersil/hostap/hostap_hw.c 	struct sk_buff *skb;
skb              2194 drivers/net/wireless/intersil/hostap/hostap_hw.c 	skb = dev_alloc_skb(hdrlen + len);
skb              2195 drivers/net/wireless/intersil/hostap/hostap_hw.c 	if (skb == NULL) {
skb              2201 drivers/net/wireless/intersil/hostap/hostap_hw.c 	skb_put_data(skb, (void *)&txdesc->frame_control, hdrlen);
skb              2203 drivers/net/wireless/intersil/hostap/hostap_hw.c 		skb_put_data(skb, payload, len);
skb              2205 drivers/net/wireless/intersil/hostap/hostap_hw.c 	skb->dev = local->dev;
skb              2206 drivers/net/wireless/intersil/hostap/hostap_hw.c 	skb_reset_mac_header(skb);
skb              2208 drivers/net/wireless/intersil/hostap/hostap_hw.c 	cb->func(skb, ok, cb->data);
skb              2294 drivers/net/wireless/intersil/hostap/hostap_hw.c 	struct sk_buff *skb;
skb              2296 drivers/net/wireless/intersil/hostap/hostap_hw.c 	while ((skb = skb_dequeue(&local->sta_tx_exc_list)) != NULL) {
skb              2298 drivers/net/wireless/intersil/hostap/hostap_hw.c 			(struct hfa384x_tx_frame *) skb->data;
skb              2300 drivers/net/wireless/intersil/hostap/hostap_hw.c 		if (skb->len >= sizeof(*txdesc)) {
skb              2304 drivers/net/wireless/intersil/hostap/hostap_hw.c 			memmove(skb_pull(skb, sizeof(*txdesc) - hdrlen),
skb              2307 drivers/net/wireless/intersil/hostap/hostap_hw.c 			hostap_handle_sta_tx_exc(local, skb);
skb              2309 drivers/net/wireless/intersil/hostap/hostap_hw.c 		dev_kfree_skb(skb);
skb              2351 drivers/net/wireless/intersil/hostap/hostap_hw.c 		struct sk_buff *skb;
skb              2352 drivers/net/wireless/intersil/hostap/hostap_hw.c 		skb = dev_alloc_skb(sizeof(txdesc));
skb              2353 drivers/net/wireless/intersil/hostap/hostap_hw.c 		if (skb) {
skb              2354 drivers/net/wireless/intersil/hostap/hostap_hw.c 			skb_put_data(skb, &txdesc, sizeof(txdesc));
skb              2355 drivers/net/wireless/intersil/hostap/hostap_hw.c 			skb_queue_tail(&local->sta_tx_exc_list, skb);
skb              2396 drivers/net/wireless/intersil/hostap/hostap_hw.c 	struct sk_buff *skb;
skb              2398 drivers/net/wireless/intersil/hostap/hostap_hw.c 	while ((skb = skb_dequeue(&local->info_list)) != NULL) {
skb              2399 drivers/net/wireless/intersil/hostap/hostap_hw.c 		hostap_info_process(local, skb);
skb              2400 drivers/net/wireless/intersil/hostap/hostap_hw.c 		dev_kfree_skb(skb);
skb              2412 drivers/net/wireless/intersil/hostap/hostap_hw.c 	struct sk_buff *skb;
skb              2443 drivers/net/wireless/intersil/hostap/hostap_hw.c 	skb = dev_alloc_skb(sizeof(info) + left);
skb              2444 drivers/net/wireless/intersil/hostap/hostap_hw.c 	if (skb == NULL) {
skb              2451 drivers/net/wireless/intersil/hostap/hostap_hw.c 	skb_put_data(skb, &info, sizeof(info));
skb              2452 drivers/net/wireless/intersil/hostap/hostap_hw.c 	if (left > 0 && hfa384x_from_bap(dev, BAP0, skb_put(skb, left), left))
skb              2458 drivers/net/wireless/intersil/hostap/hostap_hw.c 		dev_kfree_skb(skb);
skb              2463 drivers/net/wireless/intersil/hostap/hostap_hw.c 	skb_queue_tail(&local->info_list, skb);
skb              3319 drivers/net/wireless/intersil/hostap/hostap_hw.c 		if (local->frag_cache[i].skb != NULL)
skb              3320 drivers/net/wireless/intersil/hostap/hostap_hw.c 			dev_kfree_skb(local->frag_cache[i].skb);
skb               368 drivers/net/wireless/intersil/hostap/hostap_info.c void hostap_info_process(local_info_t *local, struct sk_buff *skb)
skb               377 drivers/net/wireless/intersil/hostap/hostap_info.c 	info = (struct hfa384x_info_frame *) skb->data;
skb               378 drivers/net/wireless/intersil/hostap/hostap_info.c 	buf = skb->data + sizeof(*info);
skb               379 drivers/net/wireless/intersil/hostap/hostap_info.c 	left = skb->len - sizeof(*info);
skb               577 drivers/net/wireless/intersil/hostap/hostap_main.c static int hostap_80211_header_parse(const struct sk_buff *skb,
skb               580 drivers/net/wireless/intersil/hostap/hostap_main.c 	memcpy(haddr, skb_mac_header(skb) + 10, ETH_ALEN); /* addr2 */
skb              1029 drivers/net/wireless/intersil/hostap/hostap_main.c 	struct sk_buff *skb;
skb              1034 drivers/net/wireless/intersil/hostap/hostap_main.c 	skb = dev_alloc_skb(IEEE80211_MGMT_HDR_LEN + bodylen);
skb              1035 drivers/net/wireless/intersil/hostap/hostap_main.c 	if (skb == NULL)
skb              1038 drivers/net/wireless/intersil/hostap/hostap_main.c 	mgmt = skb_put_zero(skb, IEEE80211_MGMT_HDR_LEN);
skb              1044 drivers/net/wireless/intersil/hostap/hostap_main.c 		skb_put_data(skb, body, bodylen);
skb              1046 drivers/net/wireless/intersil/hostap/hostap_main.c 	meta = (struct hostap_skb_tx_data *) skb->cb;
skb              1051 drivers/net/wireless/intersil/hostap/hostap_main.c 	skb->dev = dev;
skb              1052 drivers/net/wireless/intersil/hostap/hostap_main.c 	skb_reset_mac_header(skb);
skb              1053 drivers/net/wireless/intersil/hostap/hostap_main.c 	skb_reset_network_header(skb);
skb              1054 drivers/net/wireless/intersil/hostap/hostap_main.c 	dev_queue_xmit(skb);
skb               545 drivers/net/wireless/intersil/hostap/hostap_wlan.h 	struct sk_buff *skb;
skb               600 drivers/net/wireless/intersil/hostap/hostap_wlan.h 	int (*tx)(struct sk_buff *skb, struct net_device *dev);
skb               203 drivers/net/wireless/intersil/orinoco/main.c 	struct sk_buff *skb;
skb               356 drivers/net/wireless/intersil/orinoco/main.c int orinoco_process_xmit_skb(struct sk_buff *skb,
skb               375 drivers/net/wireless/intersil/orinoco/main.c 	eh = (struct ethhdr *)skb->data;
skb               383 drivers/net/wireless/intersil/orinoco/main.c 		int len = skb->len + sizeof(encaps_hdr) - (2 * ETH_ALEN);
skb               385 drivers/net/wireless/intersil/orinoco/main.c 		if (skb_headroom(skb) < ENCAPS_OVERHEAD) {
skb               389 drivers/net/wireless/intersil/orinoco/main.c 				       dev->name, skb_headroom(skb));
skb               399 drivers/net/wireless/intersil/orinoco/main.c 		eh = skb_push(skb, ENCAPS_OVERHEAD);
skb               405 drivers/net/wireless/intersil/orinoco/main.c 		size_t len = skb->len - ETH_HLEN;
skb               410 drivers/net/wireless/intersil/orinoco/main.c 		if (skb->len % 2) {
skb               411 drivers/net/wireless/intersil/orinoco/main.c 			*mic = skb->data[skb->len - 1];
skb               417 drivers/net/wireless/intersil/orinoco/main.c 			    skb->data + ETH_HLEN,
skb               425 drivers/net/wireless/intersil/orinoco/main.c static netdev_tx_t orinoco_xmit(struct sk_buff *skb, struct net_device *dev)
skb               463 drivers/net/wireless/intersil/orinoco/main.c 	if (skb->len < ETH_HLEN)
skb               468 drivers/net/wireless/intersil/orinoco/main.c 	err = orinoco_process_xmit_skb(skb, dev, priv, &tx_control,
skb               514 drivers/net/wireless/intersil/orinoco/main.c 	err = hw->ops->bap_pwrite(hw, USER_BAP, skb->data, skb->len,
skb               523 drivers/net/wireless/intersil/orinoco/main.c 		size_t offset = HERMES_802_3_OFFSET + skb->len;
skb               552 drivers/net/wireless/intersil/orinoco/main.c 	stats->tx_bytes += HERMES_802_3_OFFSET + skb->len;
skb               561 drivers/net/wireless/intersil/orinoco/main.c 	dev_kfree_skb(skb);
skb               699 drivers/net/wireless/intersil/orinoco/main.c 				struct sk_buff *skb,
skb               715 drivers/net/wireless/intersil/orinoco/main.c 		orinoco_spy_gather(dev, skb_mac_header(skb) + ETH_ALEN,
skb               738 drivers/net/wireless/intersil/orinoco/main.c 	struct sk_buff *skb;
skb               787 drivers/net/wireless/intersil/orinoco/main.c 	skb = dev_alloc_skb(hdrlen + datalen);
skb               788 drivers/net/wireless/intersil/orinoco/main.c 	if (!skb) {
skb               795 drivers/net/wireless/intersil/orinoco/main.c 	skb_put_data(skb, &(desc->frame_ctl), hdrlen);
skb               796 drivers/net/wireless/intersil/orinoco/main.c 	skb_reset_mac_header(skb);
skb               800 drivers/net/wireless/intersil/orinoco/main.c 		err = hw->ops->bap_pread(hw, IRQ_BAP, skb_put(skb, datalen),
skb               810 drivers/net/wireless/intersil/orinoco/main.c 	skb->dev = dev;
skb               811 drivers/net/wireless/intersil/orinoco/main.c 	skb->ip_summed = CHECKSUM_NONE;
skb               812 drivers/net/wireless/intersil/orinoco/main.c 	skb->pkt_type = PACKET_OTHERHOST;
skb               813 drivers/net/wireless/intersil/orinoco/main.c 	skb->protocol = cpu_to_be16(ETH_P_802_2);
skb               816 drivers/net/wireless/intersil/orinoco/main.c 	stats->rx_bytes += skb->len;
skb               818 drivers/net/wireless/intersil/orinoco/main.c 	netif_rx(skb);
skb               822 drivers/net/wireless/intersil/orinoco/main.c 	dev_kfree_skb_irq(skb);
skb               833 drivers/net/wireless/intersil/orinoco/main.c 	struct sk_buff *skb = NULL;
skb               902 drivers/net/wireless/intersil/orinoco/main.c 	skb = dev_alloc_skb(length + ETH_HLEN + 2 + 1);
skb               903 drivers/net/wireless/intersil/orinoco/main.c 	if (!skb) {
skb               912 drivers/net/wireless/intersil/orinoco/main.c 	skb_reserve(skb, ETH_HLEN + 2);
skb               914 drivers/net/wireless/intersil/orinoco/main.c 	err = hw->ops->bap_pread(hw, IRQ_BAP, skb_put(skb, length),
skb               929 drivers/net/wireless/intersil/orinoco/main.c 	rx_data->skb = skb;
skb               936 drivers/net/wireless/intersil/orinoco/main.c 	dev_kfree_skb_irq(skb);
skb               947 drivers/net/wireless/intersil/orinoco/main.c 		       struct sk_buff *skb)
skb               970 drivers/net/wireless/intersil/orinoco/main.c 		rxmic = skb->data + skb->len - MICHAEL_MIC_LEN;
skb               972 drivers/net/wireless/intersil/orinoco/main.c 		skb_trim(skb, skb->len - MICHAEL_MIC_LEN);
skb               986 drivers/net/wireless/intersil/orinoco/main.c 			    skb->data, skb->len, &mic[0]);
skb              1028 drivers/net/wireless/intersil/orinoco/main.c 	     is_ethersnap(skb->data))) {
skb              1032 drivers/net/wireless/intersil/orinoco/main.c 		hdr = skb_push(skb, ETH_HLEN - ENCAPS_OVERHEAD);
skb              1035 drivers/net/wireless/intersil/orinoco/main.c 		hdr = skb_push(skb, ETH_HLEN);
skb              1044 drivers/net/wireless/intersil/orinoco/main.c 	skb->protocol = eth_type_trans(skb, dev);
skb              1045 drivers/net/wireless/intersil/orinoco/main.c 	skb->ip_summed = CHECKSUM_NONE;
skb              1047 drivers/net/wireless/intersil/orinoco/main.c 		skb->pkt_type = PACKET_OTHERHOST;
skb              1050 drivers/net/wireless/intersil/orinoco/main.c 	orinoco_stat_gather(dev, skb, desc);
skb              1053 drivers/net/wireless/intersil/orinoco/main.c 	netif_rx(skb);
skb              1060 drivers/net/wireless/intersil/orinoco/main.c 	dev_kfree_skb(skb);
skb              1071 drivers/net/wireless/intersil/orinoco/main.c 	struct sk_buff *skb;
skb              1086 drivers/net/wireless/intersil/orinoco/main.c 		skb = rx_data->skb;
skb              1090 drivers/net/wireless/intersil/orinoco/main.c 		orinoco_rx(dev, desc, skb);
skb              2319 drivers/net/wireless/intersil/orinoco/main.c 		dev_kfree_skb(rx_data->skb);
skb               199 drivers/net/wireless/intersil/orinoco/orinoco.h int orinoco_process_xmit_skb(struct sk_buff *skb,
skb              1193 drivers/net/wireless/intersil/orinoco/orinoco_usb.c static netdev_tx_t ezusb_xmit(struct sk_buff *skb, struct net_device *dev)
skb              1234 drivers/net/wireless/intersil/orinoco/orinoco_usb.c 	if (skb->len < ETH_HLEN)
skb              1246 drivers/net/wireless/intersil/orinoco/orinoco_usb.c 	err = orinoco_process_xmit_skb(skb, dev, priv, &tx_control,
skb              1257 drivers/net/wireless/intersil/orinoco/orinoco_usb.c 	memcpy(buf, skb->data, skb->len);
skb              1258 drivers/net/wireless/intersil/orinoco/orinoco_usb.c 	buf += skb->len;
skb              1265 drivers/net/wireless/intersil/orinoco/orinoco_usb.c 		if (skb->len % 2)
skb              1289 drivers/net/wireless/intersil/orinoco/orinoco_usb.c 	stats->tx_bytes += skb->len;
skb              1298 drivers/net/wireless/intersil/orinoco/orinoco_usb.c 	dev_kfree_skb(skb);
skb               192 drivers/net/wireless/intersil/p54/fwio.c 	struct sk_buff *skb;
skb               201 drivers/net/wireless/intersil/p54/fwio.c 	skb = __dev_alloc_skb(priv->tx_hdr_len + frame_len, memflags);
skb               202 drivers/net/wireless/intersil/p54/fwio.c 	if (!skb)
skb               204 drivers/net/wireless/intersil/p54/fwio.c 	skb_reserve(skb, priv->tx_hdr_len);
skb               206 drivers/net/wireless/intersil/p54/fwio.c 	hdr = skb_put(skb, sizeof(*hdr));
skb               211 drivers/net/wireless/intersil/p54/fwio.c 	return skb;
skb               218 drivers/net/wireless/intersil/p54/fwio.c 	struct sk_buff *skb;
skb               228 drivers/net/wireless/intersil/p54/fwio.c 	skb = p54_alloc_skb(priv, P54_HDR_FLAG_CONTROL, eeprom_hdr_size +
skb               231 drivers/net/wireless/intersil/p54/fwio.c 	if (unlikely(!skb))
skb               236 drivers/net/wireless/intersil/p54/fwio.c 	eeprom_hdr = skb_put(skb, eeprom_hdr_size + len);
skb               248 drivers/net/wireless/intersil/p54/fwio.c 	p54_tx(priv, skb);
skb               264 drivers/net/wireless/intersil/p54/fwio.c 	struct sk_buff *skb;
skb               267 drivers/net/wireless/intersil/p54/fwio.c 	skb = p54_alloc_skb(priv, P54_HDR_FLAG_CONTROL_OPSET, sizeof(*tim),
skb               269 drivers/net/wireless/intersil/p54/fwio.c 	if (unlikely(!skb))
skb               272 drivers/net/wireless/intersil/p54/fwio.c 	tim = skb_put(skb, sizeof(*tim));
skb               275 drivers/net/wireless/intersil/p54/fwio.c 	p54_tx(priv, skb);
skb               281 drivers/net/wireless/intersil/p54/fwio.c 	struct sk_buff *skb;
skb               284 drivers/net/wireless/intersil/p54/fwio.c 	skb = p54_alloc_skb(priv, P54_HDR_FLAG_CONTROL_OPSET, sizeof(*sta),
skb               286 drivers/net/wireless/intersil/p54/fwio.c 	if (unlikely(!skb))
skb               289 drivers/net/wireless/intersil/p54/fwio.c 	sta = skb_put(skb, sizeof(*sta));
skb               291 drivers/net/wireless/intersil/p54/fwio.c 	p54_tx(priv, skb);
skb               297 drivers/net/wireless/intersil/p54/fwio.c 	struct sk_buff *skb;
skb               304 drivers/net/wireless/intersil/p54/fwio.c 	skb = p54_alloc_skb(priv, P54_HDR_FLAG_CONTROL_OPSET, sizeof(*cancel),
skb               306 drivers/net/wireless/intersil/p54/fwio.c 	if (unlikely(!skb))
skb               309 drivers/net/wireless/intersil/p54/fwio.c 	cancel = skb_put(skb, sizeof(*cancel));
skb               311 drivers/net/wireless/intersil/p54/fwio.c 	p54_tx(priv, skb);
skb               317 drivers/net/wireless/intersil/p54/fwio.c 	struct sk_buff *skb;
skb               321 drivers/net/wireless/intersil/p54/fwio.c 	skb = p54_alloc_skb(priv, P54_HDR_FLAG_CONTROL_OPSET, sizeof(*setup),
skb               323 drivers/net/wireless/intersil/p54/fwio.c 	if (!skb)
skb               326 drivers/net/wireless/intersil/p54/fwio.c 	setup = skb_put(skb, sizeof(*setup));
skb               386 drivers/net/wireless/intersil/p54/fwio.c 	p54_tx(priv, skb);
skb               393 drivers/net/wireless/intersil/p54/fwio.c 	struct sk_buff *skb;
skb               405 drivers/net/wireless/intersil/p54/fwio.c 	skb = p54_alloc_skb(priv, P54_HDR_FLAG_CONTROL_OPSET, sizeof(*head) +
skb               409 drivers/net/wireless/intersil/p54/fwio.c 	if (!skb)
skb               412 drivers/net/wireless/intersil/p54/fwio.c 	head = skb_put(skb, sizeof(*head));
skb               419 drivers/net/wireless/intersil/p54/fwio.c 		__le16 *pa_power_points = skb_put(skb, 2);
skb               423 drivers/net/wireless/intersil/p54/fwio.c 	iq_autocal = skb_put(skb, sizeof(*iq_autocal));
skb               436 drivers/net/wireless/intersil/p54/fwio.c 		body = skb_put(skb, sizeof(body->longbow));
skb               438 drivers/net/wireless/intersil/p54/fwio.c 		body = skb_put(skb, sizeof(body->normal));
skb               499 drivers/net/wireless/intersil/p54/fwio.c 		rate = skb_put(skb, sizeof(*rate));
skb               505 drivers/net/wireless/intersil/p54/fwio.c 	rssi = skb_put(skb, sizeof(*rssi));
skb               511 drivers/net/wireless/intersil/p54/fwio.c 		rssi = skb_put(skb, sizeof(*rssi));
skb               517 drivers/net/wireless/intersil/p54/fwio.c 		rate = skb_put(skb, sizeof(*rate));
skb               523 drivers/net/wireless/intersil/p54/fwio.c 	hdr = (struct p54_hdr *) skb->data;
skb               524 drivers/net/wireless/intersil/p54/fwio.c 	hdr->len = cpu_to_le16(skb->len - sizeof(*hdr));
skb               526 drivers/net/wireless/intersil/p54/fwio.c 	p54_tx(priv, skb);
skb               535 drivers/net/wireless/intersil/p54/fwio.c 	dev_kfree_skb_any(skb);
skb               541 drivers/net/wireless/intersil/p54/fwio.c 	struct sk_buff *skb;
skb               544 drivers/net/wireless/intersil/p54/fwio.c 	skb = p54_alloc_skb(priv, P54_HDR_FLAG_CONTROL_OPSET, sizeof(*led),
skb               546 drivers/net/wireless/intersil/p54/fwio.c 	if (unlikely(!skb))
skb               549 drivers/net/wireless/intersil/p54/fwio.c 	led = skb_put(skb, sizeof(*led));
skb               554 drivers/net/wireless/intersil/p54/fwio.c 	p54_tx(priv, skb);
skb               560 drivers/net/wireless/intersil/p54/fwio.c 	struct sk_buff *skb;
skb               564 drivers/net/wireless/intersil/p54/fwio.c 	skb = p54_alloc_skb(priv, P54_HDR_FLAG_CONTROL_OPSET, sizeof(*edcf),
skb               566 drivers/net/wireless/intersil/p54/fwio.c 	if (unlikely(!skb))
skb               569 drivers/net/wireless/intersil/p54/fwio.c 	edcf = skb_put(skb, sizeof(*edcf));
skb               591 drivers/net/wireless/intersil/p54/fwio.c 	p54_tx(priv, skb);
skb               597 drivers/net/wireless/intersil/p54/fwio.c 	struct sk_buff *skb;
skb               609 drivers/net/wireless/intersil/p54/fwio.c 	skb = p54_alloc_skb(priv, P54_HDR_FLAG_CONTROL_OPSET, sizeof(*psm),
skb               611 drivers/net/wireless/intersil/p54/fwio.c 	if (!skb)
skb               614 drivers/net/wireless/intersil/p54/fwio.c 	psm = skb_put(skb, sizeof(*psm));
skb               628 drivers/net/wireless/intersil/p54/fwio.c 	p54_tx(priv, skb);
skb               635 drivers/net/wireless/intersil/p54/fwio.c 	struct sk_buff *skb;
skb               638 drivers/net/wireless/intersil/p54/fwio.c 	skb = p54_alloc_skb(priv, P54_HDR_FLAG_CONTROL_OPSET, sizeof(*xbow),
skb               640 drivers/net/wireless/intersil/p54/fwio.c 	if (unlikely(!skb))
skb               643 drivers/net/wireless/intersil/p54/fwio.c 	xbow = skb_put(skb, sizeof(*xbow));
skb               648 drivers/net/wireless/intersil/p54/fwio.c 	p54_tx(priv, skb);
skb               655 drivers/net/wireless/intersil/p54/fwio.c 	struct sk_buff *skb;
skb               658 drivers/net/wireless/intersil/p54/fwio.c 	skb = p54_alloc_skb(priv, P54_HDR_FLAG_CONTROL_OPSET, sizeof(*rxkey),
skb               660 drivers/net/wireless/intersil/p54/fwio.c 	if (unlikely(!skb))
skb               663 drivers/net/wireless/intersil/p54/fwio.c 	rxkey = skb_put(skb, sizeof(*rxkey));
skb               694 drivers/net/wireless/intersil/p54/fwio.c 		dev_kfree_skb(skb);
skb               698 drivers/net/wireless/intersil/p54/fwio.c 	p54_tx(priv, skb);
skb               706 drivers/net/wireless/intersil/p54/fwio.c 	struct sk_buff *skb;
skb               708 drivers/net/wireless/intersil/p54/fwio.c 	skb = p54_alloc_skb(priv, P54_HDR_FLAG_CONTROL,
skb               711 drivers/net/wireless/intersil/p54/fwio.c 	if (!skb)
skb               724 drivers/net/wireless/intersil/p54/fwio.c 	txinfo = IEEE80211_SKB_CB(skb);
skb               728 drivers/net/wireless/intersil/p54/fwio.c 	p54_tx(priv, skb);
skb               735 drivers/net/wireless/intersil/p54/fwio.c 	struct sk_buff *skb;
skb               738 drivers/net/wireless/intersil/p54/fwio.c 	skb = p54_alloc_skb(priv, P54_HDR_FLAG_CONTROL_OPSET, sizeof(*grp),
skb               740 drivers/net/wireless/intersil/p54/fwio.c 	if (!skb)
skb               743 drivers/net/wireless/intersil/p54/fwio.c 	grp = skb_put(skb, sizeof(*grp));
skb               759 drivers/net/wireless/intersil/p54/fwio.c 	p54_tx(priv, skb);
skb                87 drivers/net/wireless/intersil/p54/lmac.h #define GET_REQ_ID(skb)							\
skb                88 drivers/net/wireless/intersil/p54/lmac.h 	(((struct p54_hdr *) ((struct sk_buff *) skb)->data)->req_id)	\
skb                90 drivers/net/wireless/intersil/p54/lmac.h #define FREE_AFTER_TX(skb)						\
skb                91 drivers/net/wireless/intersil/p54/lmac.h 	((((struct p54_hdr *) ((struct sk_buff *) skb)->data)->		\
skb                94 drivers/net/wireless/intersil/p54/lmac.h #define IS_DATA_FRAME(skb)						\
skb                95 drivers/net/wireless/intersil/p54/lmac.h 	(!((((struct p54_hdr *) ((struct sk_buff *) skb)->data)->	\
skb                98 drivers/net/wireless/intersil/p54/lmac.h #define GET_HW_QUEUE(skb)						\
skb               100 drivers/net/wireless/intersil/p54/lmac.h 	skb->data)->data)->hw_queue)
skb               528 drivers/net/wireless/intersil/p54/lmac.h 		  struct sk_buff *skb);
skb               530 drivers/net/wireless/intersil/p54/lmac.h void p54_tx(struct p54_common *priv, struct sk_buff *skb);
skb               557 drivers/net/wireless/intersil/p54/lmac.h u8 *p54_find_ie(struct sk_buff *skb, u8 ie);
skb                74 drivers/net/wireless/intersil/p54/main.c u8 *p54_find_ie(struct sk_buff *skb, u8 ie)
skb                76 drivers/net/wireless/intersil/p54/main.c 	struct ieee80211_mgmt *mgmt = (void *)skb->data;
skb                79 drivers/net/wireless/intersil/p54/main.c 	if (skb->len <= sizeof(mgmt))
skb                83 drivers/net/wireless/intersil/p54/main.c 	end = skb->data + skb->len;
skb                96 drivers/net/wireless/intersil/p54/main.c static int p54_beacon_format_ie_tim(struct sk_buff *skb)
skb               108 drivers/net/wireless/intersil/p54/main.c 	tim = p54_find_ie(skb, WLAN_EID_TIM);
skb               119 drivers/net/wireless/intersil/p54/main.c 	memmove(tim, next, skb_tail_pointer(skb) - next);
skb               120 drivers/net/wireless/intersil/p54/main.c 	tim = skb_tail_pointer(skb) - (dtim_len + 2);
skb               130 drivers/net/wireless/intersil/p54/main.c 		skb_trim(skb, skb->len - (dtim_len - 3));
skb               167 drivers/net/wireless/intersil/p54/p54.h 	void (*tx)(struct ieee80211_hw *dev, struct sk_buff *skb);
skb               266 drivers/net/wireless/intersil/p54/p54.h int p54_rx(struct ieee80211_hw *dev, struct sk_buff *skb);
skb               267 drivers/net/wireless/intersil/p54/p54.h void p54_free_skb(struct ieee80211_hw *dev, struct sk_buff *skb);
skb               150 drivers/net/wireless/intersil/p54/p54pci.c 			struct sk_buff *skb;
skb               152 drivers/net/wireless/intersil/p54/p54pci.c 			skb = dev_alloc_skb(priv->common.rx_mtu + 32);
skb               153 drivers/net/wireless/intersil/p54/p54pci.c 			if (!skb)
skb               157 drivers/net/wireless/intersil/p54/p54pci.c 						 skb_tail_pointer(skb),
skb               162 drivers/net/wireless/intersil/p54/p54pci.c 				dev_kfree_skb_any(skb);
skb               172 drivers/net/wireless/intersil/p54/p54pci.c 			rx_buf[i] = skb;
skb               198 drivers/net/wireless/intersil/p54/p54pci.c 		struct sk_buff *skb;
skb               202 drivers/net/wireless/intersil/p54/p54pci.c 		skb = rx_buf[i];
skb               204 drivers/net/wireless/intersil/p54/p54pci.c 		if (!skb) {
skb               220 drivers/net/wireless/intersil/p54/p54pci.c 		skb_put(skb, len);
skb               222 drivers/net/wireless/intersil/p54/p54pci.c 		if (p54_rx(dev, skb)) {
skb               228 drivers/net/wireless/intersil/p54/p54pci.c 			skb_trim(skb, 0);
skb               248 drivers/net/wireless/intersil/p54/p54pci.c 	struct sk_buff *skb;
skb               258 drivers/net/wireless/intersil/p54/p54pci.c 		skb = tx_buf[i];
skb               269 drivers/net/wireless/intersil/p54/p54pci.c 		if (skb && FREE_AFTER_TX(skb))
skb               270 drivers/net/wireless/intersil/p54/p54pci.c 			p54_free_skb(dev, skb);
skb               324 drivers/net/wireless/intersil/p54/p54pci.c static void p54p_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
skb               337 drivers/net/wireless/intersil/p54/p54pci.c 	mapping = pci_map_single(priv->pdev, skb->data, skb->len,
skb               341 drivers/net/wireless/intersil/p54/p54pci.c 		p54_free_skb(dev, skb);
skb               345 drivers/net/wireless/intersil/p54/p54pci.c 	priv->tx_buf_data[i] = skb;
skb               349 drivers/net/wireless/intersil/p54/p54pci.c 	desc->device_addr = ((struct p54_hdr *)skb->data)->req_id;
skb               350 drivers/net/wireless/intersil/p54/p54pci.c 	desc->len = cpu_to_le16(skb->len);
skb               328 drivers/net/wireless/intersil/p54/p54spi.c 	struct sk_buff *skb;
skb               354 drivers/net/wireless/intersil/p54/p54spi.c 	skb = dev_alloc_skb(len + 4);
skb               355 drivers/net/wireless/intersil/p54/p54spi.c 	if (!skb) {
skb               362 drivers/net/wireless/intersil/p54/p54spi.c 		skb_put_data(skb, rx_head + 1, len);
skb               364 drivers/net/wireless/intersil/p54/p54spi.c 		skb_put_data(skb, rx_head + 1, READAHEAD_SZ);
skb               366 drivers/net/wireless/intersil/p54/p54spi.c 				skb_put(skb, len - READAHEAD_SZ),
skb               373 drivers/net/wireless/intersil/p54/p54spi.c 	skb_put(skb, 4);
skb               375 drivers/net/wireless/intersil/p54/p54spi.c 	if (p54_rx(priv->hw, skb) == 0)
skb               376 drivers/net/wireless/intersil/p54/p54spi.c 		dev_kfree_skb(skb);
skb               392 drivers/net/wireless/intersil/p54/p54spi.c static int p54spi_tx_frame(struct p54s_priv *priv, struct sk_buff *skb)
skb               394 drivers/net/wireless/intersil/p54/p54spi.c 	struct p54_hdr *hdr = (struct p54_hdr *) skb->data;
skb               400 drivers/net/wireless/intersil/p54/p54spi.c 	ret = p54spi_spi_write_dma(priv, hdr->req_id, skb->data, skb->len);
skb               413 drivers/net/wireless/intersil/p54/p54spi.c 	if (FREE_AFTER_TX(skb))
skb               414 drivers/net/wireless/intersil/p54/p54spi.c 		p54_free_skb(priv->hw, skb);
skb               423 drivers/net/wireless/intersil/p54/p54spi.c 	struct sk_buff *skb;
skb               446 drivers/net/wireless/intersil/p54/p54spi.c 		skb = container_of((void *) info, struct sk_buff, cb);
skb               448 drivers/net/wireless/intersil/p54/p54spi.c 		ret = p54spi_tx_frame(priv, skb);
skb               451 drivers/net/wireless/intersil/p54/p54spi.c 			p54_free_skb(priv->hw, skb);
skb               461 drivers/net/wireless/intersil/p54/p54spi.c static void p54spi_op_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
skb               464 drivers/net/wireless/intersil/p54/p54spi.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb               142 drivers/net/wireless/intersil/p54/p54usb.c 	struct sk_buff *skb = (struct sk_buff *) urb->context;
skb               143 drivers/net/wireless/intersil/p54/p54usb.c 	struct p54u_rx_info *info = (struct p54u_rx_info *)skb->cb;
skb               147 drivers/net/wireless/intersil/p54/p54usb.c 	skb_unlink(skb, &priv->rx_queue);
skb               150 drivers/net/wireless/intersil/p54/p54usb.c 		dev_kfree_skb_irq(skb);
skb               154 drivers/net/wireless/intersil/p54/p54usb.c 	skb_put(skb, urb->actual_length);
skb               157 drivers/net/wireless/intersil/p54/p54usb.c 		skb_pull(skb, priv->common.tx_hdr_len);
skb               159 drivers/net/wireless/intersil/p54/p54usb.c 		skb_pull(skb, 4);
skb               160 drivers/net/wireless/intersil/p54/p54usb.c 		skb_put(skb, 4);
skb               163 drivers/net/wireless/intersil/p54/p54usb.c 	if (p54_rx(dev, skb)) {
skb               164 drivers/net/wireless/intersil/p54/p54usb.c 		skb = dev_alloc_skb(priv->common.rx_mtu + 32);
skb               165 drivers/net/wireless/intersil/p54/p54usb.c 		if (unlikely(!skb)) {
skb               170 drivers/net/wireless/intersil/p54/p54usb.c 		info = (struct p54u_rx_info *) skb->cb;
skb               173 drivers/net/wireless/intersil/p54/p54usb.c 		urb->transfer_buffer = skb_tail_pointer(skb);
skb               174 drivers/net/wireless/intersil/p54/p54usb.c 		urb->context = skb;
skb               177 drivers/net/wireless/intersil/p54/p54usb.c 			skb_push(skb, priv->common.tx_hdr_len);
skb               179 drivers/net/wireless/intersil/p54/p54usb.c 			skb_push(skb, 4);
skb               180 drivers/net/wireless/intersil/p54/p54usb.c 			skb_put(skb, 4);
skb               182 drivers/net/wireless/intersil/p54/p54usb.c 		skb_reset_tail_pointer(skb);
skb               183 drivers/net/wireless/intersil/p54/p54usb.c 		skb_trim(skb, 0);
skb               184 drivers/net/wireless/intersil/p54/p54usb.c 		urb->transfer_buffer = skb_tail_pointer(skb);
skb               186 drivers/net/wireless/intersil/p54/p54usb.c 	skb_queue_tail(&priv->rx_queue, skb);
skb               189 drivers/net/wireless/intersil/p54/p54usb.c 		skb_unlink(skb, &priv->rx_queue);
skb               191 drivers/net/wireless/intersil/p54/p54usb.c 		dev_kfree_skb_irq(skb);
skb               197 drivers/net/wireless/intersil/p54/p54usb.c 	struct sk_buff *skb = urb->context;
skb               201 drivers/net/wireless/intersil/p54/p54usb.c 	p54_free_skb(dev, skb);
skb               226 drivers/net/wireless/intersil/p54/p54usb.c 	struct sk_buff *skb;
skb               231 drivers/net/wireless/intersil/p54/p54usb.c 		skb = __dev_alloc_skb(priv->common.rx_mtu + 32, GFP_KERNEL);
skb               232 drivers/net/wireless/intersil/p54/p54usb.c 		if (!skb) {
skb               244 drivers/net/wireless/intersil/p54/p54usb.c 				  skb_tail_pointer(skb),
skb               245 drivers/net/wireless/intersil/p54/p54usb.c 				  priv->common.rx_mtu + 32, p54u_rx_cb, skb);
skb               246 drivers/net/wireless/intersil/p54/p54usb.c 		info = (struct p54u_rx_info *) skb->cb;
skb               249 drivers/net/wireless/intersil/p54/p54usb.c 		skb_queue_tail(&priv->rx_queue, skb);
skb               254 drivers/net/wireless/intersil/p54/p54usb.c 			skb_unlink(skb, &priv->rx_queue);
skb               266 drivers/net/wireless/intersil/p54/p54usb.c 	kfree_skb(skb);
skb               294 drivers/net/wireless/intersil/p54/p54usb.c static void p54u_tx_lm87(struct ieee80211_hw *dev, struct sk_buff *skb)
skb               298 drivers/net/wireless/intersil/p54/p54usb.c 	struct lm87_tx_hdr *hdr = (void *)skb->data - sizeof(*hdr);
skb               302 drivers/net/wireless/intersil/p54/p54usb.c 		p54_free_skb(dev, skb);
skb               306 drivers/net/wireless/intersil/p54/p54usb.c 	hdr->chksum = p54u_lm87_chksum((__le32 *)skb->data, skb->len);
skb               307 drivers/net/wireless/intersil/p54/p54usb.c 	hdr->device_addr = ((struct p54_hdr *)skb->data)->req_id;
skb               311 drivers/net/wireless/intersil/p54/p54usb.c 			  hdr, skb->len + sizeof(*hdr),  FREE_AFTER_TX(skb) ?
skb               312 drivers/net/wireless/intersil/p54/p54usb.c 			  p54u_tx_cb : p54u_tx_dummy_cb, skb);
skb               318 drivers/net/wireless/intersil/p54/p54usb.c 		p54_free_skb(dev, skb);
skb               323 drivers/net/wireless/intersil/p54/p54usb.c static void p54u_tx_net2280(struct ieee80211_hw *dev, struct sk_buff *skb)
skb               327 drivers/net/wireless/intersil/p54/p54usb.c 	struct net2280_tx_hdr *hdr = (void *)skb->data - sizeof(*hdr);
skb               348 drivers/net/wireless/intersil/p54/p54usb.c 	hdr->len = cpu_to_le16(skb->len);
skb               349 drivers/net/wireless/intersil/p54/p54usb.c 	hdr->device_addr = ((struct p54_hdr *) skb->data)->req_id;
skb               365 drivers/net/wireless/intersil/p54/p54usb.c 			  hdr, skb->len + sizeof(*hdr), FREE_AFTER_TX(skb) ?
skb               366 drivers/net/wireless/intersil/p54/p54usb.c 			  p54u_tx_cb : p54u_tx_dummy_cb, skb);
skb               388 drivers/net/wireless/intersil/p54/p54usb.c 		p54_free_skb(dev, skb);
skb                32 drivers/net/wireless/intersil/p54/txrx.c 	struct sk_buff *skb;
skb                43 drivers/net/wireless/intersil/p54/txrx.c 	skb_queue_walk(&priv->tx_queue, skb) {
skb                44 drivers/net/wireless/intersil/p54/txrx.c 		info = IEEE80211_SKB_CB(skb);
skb                46 drivers/net/wireless/intersil/p54/txrx.c 		hdr = (void *) skb->data;
skb                53 drivers/net/wireless/intersil/p54/txrx.c 			    i++, skb, skb->len,
skb                79 drivers/net/wireless/intersil/p54/txrx.c static int p54_assign_address(struct p54_common *priv, struct sk_buff *skb)
skb                84 drivers/net/wireless/intersil/p54/txrx.c 	struct p54_hdr *data = (void *) skb->data;
skb                88 drivers/net/wireless/intersil/p54/txrx.c 	u16 len = priv->headroom + skb->len + priv->tailroom + 3;
skb                90 drivers/net/wireless/intersil/p54/txrx.c 	info = IEEE80211_SKB_CB(skb);
skb               133 drivers/net/wireless/intersil/p54/txrx.c 	info = IEEE80211_SKB_CB(skb);
skb               138 drivers/net/wireless/intersil/p54/txrx.c 	if (IS_DATA_FRAME(skb) &&
skb               139 drivers/net/wireless/intersil/p54/txrx.c 	    unlikely(GET_HW_QUEUE(skb) == P54_QUEUE_BEACON))
skb               143 drivers/net/wireless/intersil/p54/txrx.c 		__skb_queue_after(&priv->tx_queue, target_skb, skb);
skb               145 drivers/net/wireless/intersil/p54/txrx.c 		__skb_queue_head(&priv->tx_queue, skb);
skb               152 drivers/net/wireless/intersil/p54/txrx.c 	struct sk_buff *skb;
skb               155 drivers/net/wireless/intersil/p54/txrx.c 	skb = skb_dequeue(&priv->tx_pending);
skb               156 drivers/net/wireless/intersil/p54/txrx.c 	if (unlikely(!skb))
skb               159 drivers/net/wireless/intersil/p54/txrx.c 	ret = p54_assign_address(priv, skb);
skb               161 drivers/net/wireless/intersil/p54/txrx.c 		skb_queue_head(&priv->tx_pending, skb);
skb               163 drivers/net/wireless/intersil/p54/txrx.c 		priv->tx(priv->hw, skb);
skb               186 drivers/net/wireless/intersil/p54/txrx.c 				       struct sk_buff *skb,
skb               216 drivers/net/wireless/intersil/p54/txrx.c 				       struct sk_buff *skb)
skb               218 drivers/net/wireless/intersil/p54/txrx.c 	if (IS_DATA_FRAME(skb)) {
skb               222 drivers/net/wireless/intersil/p54/txrx.c 		priv->tx_stats[GET_HW_QUEUE(skb)].len--;
skb               225 drivers/net/wireless/intersil/p54/txrx.c 		if (unlikely(GET_HW_QUEUE(skb) == P54_QUEUE_BEACON)) {
skb               226 drivers/net/wireless/intersil/p54/txrx.c 			if (priv->beacon_req_id == GET_REQ_ID(skb)) {
skb               236 drivers/net/wireless/intersil/p54/txrx.c void p54_free_skb(struct ieee80211_hw *dev, struct sk_buff *skb)
skb               239 drivers/net/wireless/intersil/p54/txrx.c 	if (unlikely(!skb))
skb               242 drivers/net/wireless/intersil/p54/txrx.c 	skb_unlink(skb, &priv->tx_queue);
skb               243 drivers/net/wireless/intersil/p54/txrx.c 	p54_tx_qos_accounting_free(priv, skb);
skb               244 drivers/net/wireless/intersil/p54/txrx.c 	ieee80211_free_txskb(dev, skb);
skb               269 drivers/net/wireless/intersil/p54/txrx.c void p54_tx(struct p54_common *priv, struct sk_buff *skb)
skb               271 drivers/net/wireless/intersil/p54/txrx.c 	skb_queue_tail(&priv->tx_pending, skb);
skb               294 drivers/net/wireless/intersil/p54/txrx.c static void p54_pspoll_workaround(struct p54_common *priv, struct sk_buff *skb)
skb               296 drivers/net/wireless/intersil/p54/txrx.c 	struct ieee80211_hdr *hdr = (void *) skb->data;
skb               313 drivers/net/wireless/intersil/p54/txrx.c 	tim = p54_find_ie(skb, WLAN_EID_TIM);
skb               327 drivers/net/wireless/intersil/p54/txrx.c static int p54_rx_data(struct p54_common *priv, struct sk_buff *skb)
skb               329 drivers/net/wireless/intersil/p54/txrx.c 	struct p54_rx_data *hdr = (struct p54_rx_data *) skb->data;
skb               330 drivers/net/wireless/intersil/p54/txrx.c 	struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
skb               381 drivers/net/wireless/intersil/p54/txrx.c 	skb_pull(skb, header_len);
skb               382 drivers/net/wireless/intersil/p54/txrx.c 	skb_trim(skb, le16_to_cpu(hdr->len));
skb               384 drivers/net/wireless/intersil/p54/txrx.c 	fc = ((struct ieee80211_hdr *)skb->data)->frame_control;
skb               389 drivers/net/wireless/intersil/p54/txrx.c 		p54_pspoll_workaround(priv, skb);
skb               391 drivers/net/wireless/intersil/p54/txrx.c 	ieee80211_rx_irqsafe(priv->hw, skb);
skb               399 drivers/net/wireless/intersil/p54/txrx.c static void p54_rx_frame_sent(struct p54_common *priv, struct sk_buff *skb)
skb               401 drivers/net/wireless/intersil/p54/txrx.c 	struct p54_hdr *hdr = (struct p54_hdr *) skb->data;
skb               493 drivers/net/wireless/intersil/p54/txrx.c 				   struct sk_buff *skb)
skb               495 drivers/net/wireless/intersil/p54/txrx.c 	struct p54_hdr *hdr = (struct p54_hdr *) skb->data;
skb               516 drivers/net/wireless/intersil/p54/txrx.c static void p54_rx_stats(struct p54_common *priv, struct sk_buff *skb)
skb               518 drivers/net/wireless/intersil/p54/txrx.c 	struct p54_hdr *hdr = (struct p54_hdr *) skb->data;
skb               610 drivers/net/wireless/intersil/p54/txrx.c static void p54_rx_trap(struct p54_common *priv, struct sk_buff *skb)
skb               612 drivers/net/wireless/intersil/p54/txrx.c 	struct p54_hdr *hdr = (struct p54_hdr *) skb->data;
skb               646 drivers/net/wireless/intersil/p54/txrx.c static int p54_rx_control(struct p54_common *priv, struct sk_buff *skb)
skb               648 drivers/net/wireless/intersil/p54/txrx.c 	struct p54_hdr *hdr = (struct p54_hdr *) skb->data;
skb               652 drivers/net/wireless/intersil/p54/txrx.c 		p54_rx_frame_sent(priv, skb);
skb               655 drivers/net/wireless/intersil/p54/txrx.c 		p54_rx_trap(priv, skb);
skb               660 drivers/net/wireless/intersil/p54/txrx.c 		p54_rx_stats(priv, skb);
skb               663 drivers/net/wireless/intersil/p54/txrx.c 		p54_rx_eeprom_readback(priv, skb);
skb               675 drivers/net/wireless/intersil/p54/txrx.c int p54_rx(struct ieee80211_hw *dev, struct sk_buff *skb)
skb               678 drivers/net/wireless/intersil/p54/txrx.c 	u16 type = le16_to_cpu(*((__le16 *)skb->data));
skb               681 drivers/net/wireless/intersil/p54/txrx.c 		return p54_rx_control(priv, skb);
skb               683 drivers/net/wireless/intersil/p54/txrx.c 		return p54_rx_data(priv, skb);
skb               687 drivers/net/wireless/intersil/p54/txrx.c static void p54_tx_80211_header(struct p54_common *priv, struct sk_buff *skb,
skb               693 drivers/net/wireless/intersil/p54/txrx.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
skb               709 drivers/net/wireless/intersil/p54/txrx.c 	*queue = skb_get_queue_mapping(skb) + P54_QUEUE_DATA;
skb               782 drivers/net/wireless/intersil/p54/txrx.c 		  struct sk_buff *skb)
skb               785 drivers/net/wireless/intersil/p54/txrx.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb               799 drivers/net/wireless/intersil/p54/txrx.c 	p54_tx_80211_header(priv, skb, info, control->sta, &queue, &extra_len,
skb               802 drivers/net/wireless/intersil/p54/txrx.c 	if (p54_tx_qos_accounting_alloc(priv, skb, queue)) {
skb               803 drivers/net/wireless/intersil/p54/txrx.c 		ieee80211_free_txskb(dev, skb);
skb               807 drivers/net/wireless/intersil/p54/txrx.c 	padding = (unsigned long)(skb->data - (sizeof(*hdr) + sizeof(*txhdr))) & 3;
skb               808 drivers/net/wireless/intersil/p54/txrx.c 	len = skb->len;
skb               811 drivers/net/wireless/intersil/p54/txrx.c 		crypt_offset = ieee80211_get_hdrlen_from_skb(skb);
skb               813 drivers/net/wireless/intersil/p54/txrx.c 			u8 *iv = (u8 *)(skb->data + crypt_offset);
skb               824 drivers/net/wireless/intersil/p54/txrx.c 	txhdr = skb_push(skb, sizeof(*txhdr) + padding);
skb               825 drivers/net/wireless/intersil/p54/txrx.c 	hdr = skb_push(skb, sizeof(*hdr));
skb               914 drivers/net/wireless/intersil/p54/txrx.c 			skb_put_data(skb,
skb               920 drivers/net/wireless/intersil/p54/txrx.c 		skb_put_zero(skb, info->control.hw_key->icv_len);
skb               945 drivers/net/wireless/intersil/p54/txrx.c 	p54_tx(priv, skb);
skb               678 drivers/net/wireless/intersil/prism54/islpci_dev.c 		struct sk_buff *skb;
skb               683 drivers/net/wireless/intersil/prism54/islpci_dev.c 		if (!(skb = dev_alloc_skb(MAX_FRAGMENT_SIZE_RX + 2))) {
skb               686 drivers/net/wireless/intersil/prism54/islpci_dev.c 			skb = NULL;
skb               689 drivers/net/wireless/intersil/prism54/islpci_dev.c 		skb_reserve(skb, (4 - (long) skb->data) & 0x03);
skb               691 drivers/net/wireless/intersil/prism54/islpci_dev.c 		priv->data_low_rx[counter] = skb;
skb               695 drivers/net/wireless/intersil/prism54/islpci_dev.c 		    pci_map_single(priv->pdev, (void *) skb->data,
skb                30 drivers/net/wireless/intersil/prism54/islpci_eth.c 	struct sk_buff *skb;
skb                45 drivers/net/wireless/intersil/prism54/islpci_eth.c 			skb = priv->data_low_tx[index];
skb                50 drivers/net/wireless/intersil/prism54/islpci_eth.c 			      skb, skb->data, skb->len, skb->truesize);
skb                55 drivers/net/wireless/intersil/prism54/islpci_eth.c 					 skb->len, PCI_DMA_TODEVICE);
skb                56 drivers/net/wireless/intersil/prism54/islpci_eth.c 			dev_kfree_skb_irq(skb);
skb                57 drivers/net/wireless/intersil/prism54/islpci_eth.c 			skb = NULL;
skb                65 drivers/net/wireless/intersil/prism54/islpci_eth.c islpci_eth_transmit(struct sk_buff *skb, struct net_device *ndev)
skb               103 drivers/net/wireless/intersil/prism54/islpci_eth.c 	if (likely(((long) skb->data & 0x03) | init_wds)) {
skb               105 drivers/net/wireless/intersil/prism54/islpci_eth.c 		offset = (4 - (long) skb->data) & 0x03;
skb               109 drivers/net/wireless/intersil/prism54/islpci_eth.c 		if (!skb_cloned(skb) && (skb_tailroom(skb) >= offset)) {
skb               110 drivers/net/wireless/intersil/prism54/islpci_eth.c 			unsigned char *src = skb->data;
skb               118 drivers/net/wireless/intersil/prism54/islpci_eth.c 			skb_reserve(skb, (4 - (long) skb->data) & 0x03);
skb               121 drivers/net/wireless/intersil/prism54/islpci_eth.c 				skb_put(skb, 6);
skb               125 drivers/net/wireless/intersil/prism54/islpci_eth.c 				memmove(skb->data + 6, src, skb->len);
skb               126 drivers/net/wireless/intersil/prism54/islpci_eth.c 				skb_copy_to_linear_data(skb, wds_mac, 6);
skb               128 drivers/net/wireless/intersil/prism54/islpci_eth.c 				memmove(skb->data, src, skb->len);
skb               132 drivers/net/wireless/intersil/prism54/islpci_eth.c 			DEBUG(SHOW_TRACING, "memmove %p %p %i\n", skb->data,
skb               133 drivers/net/wireless/intersil/prism54/islpci_eth.c 			      src, skb->len);
skb               137 drivers/net/wireless/intersil/prism54/islpci_eth.c 			    dev_alloc_skb(init_wds ? skb->len + 6 : skb->len);
skb               149 drivers/net/wireless/intersil/prism54/islpci_eth.c 			skb_put(newskb, init_wds ? skb->len + 6 : skb->len);
skb               151 drivers/net/wireless/intersil/prism54/islpci_eth.c 				skb_copy_from_linear_data(skb,
skb               153 drivers/net/wireless/intersil/prism54/islpci_eth.c 							  skb->len);
skb               159 drivers/net/wireless/intersil/prism54/islpci_eth.c 				skb_copy_from_linear_data(skb, newskb->data,
skb               160 drivers/net/wireless/intersil/prism54/islpci_eth.c 							  skb->len);
skb               164 drivers/net/wireless/intersil/prism54/islpci_eth.c 			      newskb->data, skb->data, skb->len, init_wds);
skb               167 drivers/net/wireless/intersil/prism54/islpci_eth.c 			newskb->dev = skb->dev;
skb               168 drivers/net/wireless/intersil/prism54/islpci_eth.c 			dev_kfree_skb_irq(skb);
skb               169 drivers/net/wireless/intersil/prism54/islpci_eth.c 			skb = newskb;
skb               174 drivers/net/wireless/intersil/prism54/islpci_eth.c 	DEBUG(SHOW_BUFFER_CONTENTS, "\ntx %p ", skb->data);
skb               175 drivers/net/wireless/intersil/prism54/islpci_eth.c 	display_buffer((char *) skb->data, skb->len);
skb               180 drivers/net/wireless/intersil/prism54/islpci_eth.c 					 (void *) skb->data, skb->len,
skb               193 drivers/net/wireless/intersil/prism54/islpci_eth.c 	priv->data_low_tx[index] = skb;
skb               195 drivers/net/wireless/intersil/prism54/islpci_eth.c 	frame_size = skb->len;
skb               216 drivers/net/wireless/intersil/prism54/islpci_eth.c 	ndev->stats.tx_bytes += skb->len;
skb               229 drivers/net/wireless/intersil/prism54/islpci_eth.c 	dev_kfree_skb(skb);
skb               234 drivers/net/wireless/intersil/prism54/islpci_eth.c islpci_monitor_rx(islpci_private *priv, struct sk_buff **skb)
skb               239 drivers/net/wireless/intersil/prism54/islpci_eth.c 	struct rfmon_header *hdr = (struct rfmon_header *) (*skb)->data;
skb               252 drivers/net/wireless/intersil/prism54/islpci_eth.c 		skb_pull(*skb, sizeof (struct rfmon_header));
skb               254 drivers/net/wireless/intersil/prism54/islpci_eth.c 		if (skb_headroom(*skb) < sizeof (struct avs_80211_1_header)) {
skb               255 drivers/net/wireless/intersil/prism54/islpci_eth.c 			struct sk_buff *newskb = skb_copy_expand(*skb,
skb               260 drivers/net/wireless/intersil/prism54/islpci_eth.c 				dev_kfree_skb_irq(*skb);
skb               261 drivers/net/wireless/intersil/prism54/islpci_eth.c 				*skb = newskb;
skb               268 drivers/net/wireless/intersil/prism54/islpci_eth.c 		avs = skb_push(*skb, sizeof(struct avs_80211_1_header));
skb               285 drivers/net/wireless/intersil/prism54/islpci_eth.c 		skb_pull(*skb, sizeof (struct rfmon_header));
skb               287 drivers/net/wireless/intersil/prism54/islpci_eth.c 	(*skb)->protocol = htons(ETH_P_802_2);
skb               288 drivers/net/wireless/intersil/prism54/islpci_eth.c 	skb_reset_mac_header(*skb);
skb               289 drivers/net/wireless/intersil/prism54/islpci_eth.c 	(*skb)->pkt_type = PACKET_OTHERHOST;
skb               299 drivers/net/wireless/intersil/prism54/islpci_eth.c 	struct sk_buff *skb;
skb               313 drivers/net/wireless/intersil/prism54/islpci_eth.c 	skb = priv->data_low_rx[index];
skb               316 drivers/net/wireless/intersil/prism54/islpci_eth.c 		  (unsigned long) skb->data) & 3;
skb               321 drivers/net/wireless/intersil/prism54/islpci_eth.c 	      control_block->rx_data_low[priv->free_data_rx].address, skb->data,
skb               322 drivers/net/wireless/intersil/prism54/islpci_eth.c 	      skb->len, offset, skb->truesize);
skb               331 drivers/net/wireless/intersil/prism54/islpci_eth.c 	skb_put(skb, size);
skb               334 drivers/net/wireless/intersil/prism54/islpci_eth.c 		skb_pull(skb, 2);
skb               335 drivers/net/wireless/intersil/prism54/islpci_eth.c 		skb_put(skb, 2);
skb               339 drivers/net/wireless/intersil/prism54/islpci_eth.c 	DEBUG(SHOW_BUFFER_CONTENTS, "\nrx %p ", skb->data);
skb               340 drivers/net/wireless/intersil/prism54/islpci_eth.c 	display_buffer((char *) skb->data, skb->len);
skb               347 drivers/net/wireless/intersil/prism54/islpci_eth.c 		src = skb->data + 6;
skb               348 drivers/net/wireless/intersil/prism54/islpci_eth.c 		memmove(skb->data, src, skb->len - 6);
skb               349 drivers/net/wireless/intersil/prism54/islpci_eth.c 		skb_trim(skb, skb->len - 6);
skb               352 drivers/net/wireless/intersil/prism54/islpci_eth.c 	DEBUG(SHOW_TRACING, "Fragment size %i in skb at %p\n", size, skb);
skb               353 drivers/net/wireless/intersil/prism54/islpci_eth.c 	DEBUG(SHOW_TRACING, "Skb data at %p, length %i\n", skb->data, skb->len);
skb               356 drivers/net/wireless/intersil/prism54/islpci_eth.c 	DEBUG(SHOW_BUFFER_CONTENTS, "\nrx %p ", skb->data);
skb               357 drivers/net/wireless/intersil/prism54/islpci_eth.c 	display_buffer((char *) skb->data, skb->len);
skb               361 drivers/net/wireless/intersil/prism54/islpci_eth.c 		skb->dev = ndev;
skb               362 drivers/net/wireless/intersil/prism54/islpci_eth.c 		discard = islpci_monitor_rx(priv, &skb);
skb               364 drivers/net/wireless/intersil/prism54/islpci_eth.c 		if (unlikely(skb->data[2 * ETH_ALEN] == 0)) {
skb               370 drivers/net/wireless/intersil/prism54/islpci_eth.c 			    (struct rx_annex_header *) skb->data;
skb               380 drivers/net/wireless/intersil/prism54/islpci_eth.c 			skb_copy_from_linear_data(skb,
skb               381 drivers/net/wireless/intersil/prism54/islpci_eth.c 						  (skb->data +
skb               384 drivers/net/wireless/intersil/prism54/islpci_eth.c 			skb_pull(skb, sizeof (struct rfmon_header));
skb               386 drivers/net/wireless/intersil/prism54/islpci_eth.c 		skb->protocol = eth_type_trans(skb, ndev);
skb               388 drivers/net/wireless/intersil/prism54/islpci_eth.c 	skb->ip_summed = CHECKSUM_NONE;
skb               396 drivers/net/wireless/intersil/prism54/islpci_eth.c 	     skb->data[0], skb->data[1], skb->data[2], skb->data[3],
skb               397 drivers/net/wireless/intersil/prism54/islpci_eth.c 	     skb->data[4], skb->data[5]);
skb               400 drivers/net/wireless/intersil/prism54/islpci_eth.c 		dev_kfree_skb_irq(skb);
skb               401 drivers/net/wireless/intersil/prism54/islpci_eth.c 		skb = NULL;
skb               403 drivers/net/wireless/intersil/prism54/islpci_eth.c 		netif_rx(skb);
skb               415 drivers/net/wireless/intersil/prism54/islpci_eth.c 		skb = dev_alloc_skb(MAX_FRAGMENT_SIZE_RX + 2);
skb               416 drivers/net/wireless/intersil/prism54/islpci_eth.c 		if (unlikely(skb == NULL)) {
skb               421 drivers/net/wireless/intersil/prism54/islpci_eth.c 		skb_reserve(skb, (4 - (long) skb->data) & 0x03);
skb               424 drivers/net/wireless/intersil/prism54/islpci_eth.c 		priv->data_low_rx[index] = skb;
skb               429 drivers/net/wireless/intersil/prism54/islpci_eth.c 		      skb, skb->data, skb->len, index, skb->truesize);
skb               434 drivers/net/wireless/intersil/prism54/islpci_eth.c 		    pci_map_single(priv->pdev, (void *) skb->data,
skb               444 drivers/net/wireless/intersil/prism54/islpci_eth.c 			dev_kfree_skb_irq(skb);
skb               445 drivers/net/wireless/intersil/prism54/islpci_eth.c 			skb = NULL;
skb               404 drivers/net/wireless/mac80211_hwsim.c 	struct sk_buff *skb;
skb               425 drivers/net/wireless/mac80211_hwsim.c 	skb = cfg80211_vendor_event_alloc(wiphy, wdev, 100, 0, GFP_KERNEL);
skb               426 drivers/net/wireless/mac80211_hwsim.c 	if (skb) {
skb               432 drivers/net/wireless/mac80211_hwsim.c 		nla_put_u32(skb, QCA_WLAN_VENDOR_ATTR_TEST, val + 1);
skb               435 drivers/net/wireless/mac80211_hwsim.c 		cfg80211_vendor_event(skb, GFP_KERNEL);
skb               439 drivers/net/wireless/mac80211_hwsim.c 	skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, 10);
skb               440 drivers/net/wireless/mac80211_hwsim.c 	if (!skb)
skb               446 drivers/net/wireless/mac80211_hwsim.c 	nla_put_u32(skb, QCA_WLAN_VENDOR_ATTR_TEST, val + 2);
skb               448 drivers/net/wireless/mac80211_hwsim.c 	return cfg80211_vendor_cmd_reply(skb);
skb               625 drivers/net/wireless/mac80211_hwsim.c 				    struct sk_buff *skb,
skb               633 drivers/net/wireless/mac80211_hwsim.c 	struct sk_buff *skb;
skb               643 drivers/net/wireless/mac80211_hwsim.c 	skb = dev_alloc_skb(sizeof(*pspoll));
skb               644 drivers/net/wireless/mac80211_hwsim.c 	if (!skb)
skb               646 drivers/net/wireless/mac80211_hwsim.c 	pspoll = skb_put(skb, sizeof(*pspoll));
skb               655 drivers/net/wireless/mac80211_hwsim.c 	mac80211_hwsim_tx_frame(data->hw, skb,
skb               664 drivers/net/wireless/mac80211_hwsim.c 	struct sk_buff *skb;
skb               674 drivers/net/wireless/mac80211_hwsim.c 	skb = dev_alloc_skb(sizeof(*hdr));
skb               675 drivers/net/wireless/mac80211_hwsim.c 	if (!skb)
skb               677 drivers/net/wireless/mac80211_hwsim.c 	hdr = skb_put(skb, sizeof(*hdr) - ETH_ALEN);
skb               688 drivers/net/wireless/mac80211_hwsim.c 	mac80211_hwsim_tx_frame(data->hw, skb,
skb               785 drivers/net/wireless/mac80211_hwsim.c static netdev_tx_t hwsim_mon_xmit(struct sk_buff *skb,
skb               789 drivers/net/wireless/mac80211_hwsim.c 	dev_kfree_skb(skb);
skb               834 drivers/net/wireless/mac80211_hwsim.c 	struct sk_buff *skb;
skb               846 drivers/net/wireless/mac80211_hwsim.c 	skb = skb_copy_expand(tx_skb, sizeof(*hdr), 0, GFP_ATOMIC);
skb               847 drivers/net/wireless/mac80211_hwsim.c 	if (skb == NULL)
skb               850 drivers/net/wireless/mac80211_hwsim.c 	hdr = skb_push(skb, sizeof(*hdr));
skb               869 drivers/net/wireless/mac80211_hwsim.c 	skb->dev = hwsim_mon;
skb               870 drivers/net/wireless/mac80211_hwsim.c 	skb_reset_mac_header(skb);
skb               871 drivers/net/wireless/mac80211_hwsim.c 	skb->ip_summed = CHECKSUM_UNNECESSARY;
skb               872 drivers/net/wireless/mac80211_hwsim.c 	skb->pkt_type = PACKET_OTHERHOST;
skb               873 drivers/net/wireless/mac80211_hwsim.c 	skb->protocol = htons(ETH_P_802_2);
skb               874 drivers/net/wireless/mac80211_hwsim.c 	memset(skb->cb, 0, sizeof(skb->cb));
skb               875 drivers/net/wireless/mac80211_hwsim.c 	netif_rx(skb);
skb               882 drivers/net/wireless/mac80211_hwsim.c 	struct sk_buff *skb;
skb               890 drivers/net/wireless/mac80211_hwsim.c 	skb = dev_alloc_skb(100);
skb               891 drivers/net/wireless/mac80211_hwsim.c 	if (skb == NULL)
skb               894 drivers/net/wireless/mac80211_hwsim.c 	hdr = skb_put(skb, sizeof(*hdr));
skb               906 drivers/net/wireless/mac80211_hwsim.c 	hdr11 = skb_put(skb, 10);
skb               912 drivers/net/wireless/mac80211_hwsim.c 	skb->dev = hwsim_mon;
skb               913 drivers/net/wireless/mac80211_hwsim.c 	skb_reset_mac_header(skb);
skb               914 drivers/net/wireless/mac80211_hwsim.c 	skb->ip_summed = CHECKSUM_UNNECESSARY;
skb               915 drivers/net/wireless/mac80211_hwsim.c 	skb->pkt_type = PACKET_OTHERHOST;
skb               916 drivers/net/wireless/mac80211_hwsim.c 	skb->protocol = htons(ETH_P_802_2);
skb               917 drivers/net/wireless/mac80211_hwsim.c 	memset(skb->cb, 0, sizeof(skb->cb));
skb               918 drivers/net/wireless/mac80211_hwsim.c 	netif_rx(skb);
skb               956 drivers/net/wireless/mac80211_hwsim.c 			   struct sk_buff *skb)
skb               971 drivers/net/wireless/mac80211_hwsim.c 		    mac80211_hwsim_addr_match(data, skb->data + 4)) {
skb               982 drivers/net/wireless/mac80211_hwsim.c 				  struct sk_buff *skb, int portid)
skb               991 drivers/net/wireless/mac80211_hwsim.c 			res = genlmsg_unicast(net, skb, portid);
skb               999 drivers/net/wireless/mac80211_hwsim.c 		nlmsg_free(skb);
skb              1038 drivers/net/wireless/mac80211_hwsim.c 	struct sk_buff *skb;
skb              1060 drivers/net/wireless/mac80211_hwsim.c 	skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_ATOMIC);
skb              1061 drivers/net/wireless/mac80211_hwsim.c 	if (skb == NULL)
skb              1064 drivers/net/wireless/mac80211_hwsim.c 	msg_head = genlmsg_put(skb, 0, 0, &hwsim_genl_family, 0,
skb              1071 drivers/net/wireless/mac80211_hwsim.c 	if (nla_put(skb, HWSIM_ATTR_ADDR_TRANSMITTER,
skb              1076 drivers/net/wireless/mac80211_hwsim.c 	if (nla_put(skb, HWSIM_ATTR_FRAME, my_skb->len, my_skb->data))
skb              1088 drivers/net/wireless/mac80211_hwsim.c 	if (nla_put_u32(skb, HWSIM_ATTR_FLAGS, hwsim_flags))
skb              1091 drivers/net/wireless/mac80211_hwsim.c 	if (nla_put_u32(skb, HWSIM_ATTR_FREQ, data->channel->center_freq))
skb              1105 drivers/net/wireless/mac80211_hwsim.c 	if (nla_put(skb, HWSIM_ATTR_TX_INFO,
skb              1110 drivers/net/wireless/mac80211_hwsim.c 	if (nla_put(skb, HWSIM_ATTR_TX_INFO_FLAGS,
skb              1119 drivers/net/wireless/mac80211_hwsim.c 	if (nla_put_u64_64bit(skb, HWSIM_ATTR_COOKIE, cookie, HWSIM_ATTR_PAD))
skb              1122 drivers/net/wireless/mac80211_hwsim.c 	genlmsg_end(skb, msg_head);
skb              1123 drivers/net/wireless/mac80211_hwsim.c 	if (hwsim_unicast_netgroup(data, skb, dst_portid))
skb              1133 drivers/net/wireless/mac80211_hwsim.c 	nlmsg_free(skb);
skb              1169 drivers/net/wireless/mac80211_hwsim.c static void mac80211_hwsim_add_vendor_rtap(struct sk_buff *skb)
skb              1188 drivers/net/wireless/mac80211_hwsim.c 	rtap = skb_push(skb, sizeof(*rtap) + 8 + 4);
skb              1212 drivers/net/wireless/mac80211_hwsim.c 	IEEE80211_SKB_RXCB(skb)->flag |= RX_FLAG_RADIOTAP_VENDOR_DATA;
skb              1217 drivers/net/wireless/mac80211_hwsim.c 					  struct sk_buff *skb,
skb              1222 drivers/net/wireless/mac80211_hwsim.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
skb              1223 drivers/net/wireless/mac80211_hwsim.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb              1261 drivers/net/wireless/mac80211_hwsim.c 	skb_orphan(skb);
skb              1262 drivers/net/wireless/mac80211_hwsim.c 	skb_dst_drop(skb);
skb              1263 drivers/net/wireless/mac80211_hwsim.c 	skb->mark = 0;
skb              1264 drivers/net/wireless/mac80211_hwsim.c 	skb_ext_reset(skb);
skb              1265 drivers/net/wireless/mac80211_hwsim.c 	nf_reset_ct(skb);
skb              1295 drivers/net/wireless/mac80211_hwsim.c 		    !hwsim_ps_rx_ok(data2, skb))
skb              1317 drivers/net/wireless/mac80211_hwsim.c 		if (skb->len < PAGE_SIZE && paged_rx) {
skb              1329 drivers/net/wireless/mac80211_hwsim.c 			memcpy(page_address(page), skb->data, skb->len);
skb              1330 drivers/net/wireless/mac80211_hwsim.c 			skb_add_rx_frag(nskb, 0, page, 0, skb->len, skb->len);
skb              1332 drivers/net/wireless/mac80211_hwsim.c 			nskb = skb_copy(skb, GFP_ATOMIC);
skb              1357 drivers/net/wireless/mac80211_hwsim.c 			      struct sk_buff *skb)
skb              1360 drivers/net/wireless/mac80211_hwsim.c 	struct ieee80211_tx_info *txi = IEEE80211_SKB_CB(skb);
skb              1361 drivers/net/wireless/mac80211_hwsim.c 	struct ieee80211_hdr *hdr = (void *)skb->data;
skb              1367 drivers/net/wireless/mac80211_hwsim.c 	if (WARN_ON(skb->len < 10)) {
skb              1369 drivers/net/wireless/mac80211_hwsim.c 		ieee80211_free_txskb(hw, skb);
skb              1386 drivers/net/wireless/mac80211_hwsim.c 		ieee80211_free_txskb(hw, skb);
skb              1392 drivers/net/wireless/mac80211_hwsim.c 		ieee80211_free_txskb(hw, skb);
skb              1402 drivers/net/wireless/mac80211_hwsim.c 		ieee80211_get_tx_rates(txi->control.vif, control->sta, skb,
skb              1406 drivers/net/wireless/mac80211_hwsim.c 	if (skb->len >= 24 + 8 &&
skb              1413 drivers/net/wireless/mac80211_hwsim.c 		mgmt = (struct ieee80211_mgmt *)skb->data;
skb              1421 drivers/net/wireless/mac80211_hwsim.c 	mac80211_hwsim_monitor_rx(hw, skb, channel);
skb              1427 drivers/net/wireless/mac80211_hwsim.c 		return mac80211_hwsim_tx_frame_nl(hw, skb, _portid);
skb              1431 drivers/net/wireless/mac80211_hwsim.c 	data->tx_bytes += skb->len;
skb              1432 drivers/net/wireless/mac80211_hwsim.c 	ack = mac80211_hwsim_tx_frame_no_nl(hw, skb, channel);
skb              1434 drivers/net/wireless/mac80211_hwsim.c 	if (ack && skb->len >= 16)
skb              1445 drivers/net/wireless/mac80211_hwsim.c 	ieee80211_tx_status_irqsafe(hw, skb);
skb              1517 drivers/net/wireless/mac80211_hwsim.c 				    struct sk_buff *skb,
skb              1524 drivers/net/wireless/mac80211_hwsim.c 		struct ieee80211_tx_info *txi = IEEE80211_SKB_CB(skb);
skb              1525 drivers/net/wireless/mac80211_hwsim.c 		ieee80211_get_tx_rates(txi->control.vif, NULL, skb,
skb              1530 drivers/net/wireless/mac80211_hwsim.c 	mac80211_hwsim_monitor_rx(hw, skb, chan);
skb              1533 drivers/net/wireless/mac80211_hwsim.c 		return mac80211_hwsim_tx_frame_nl(hw, skb, _pid);
skb              1535 drivers/net/wireless/mac80211_hwsim.c 	mac80211_hwsim_tx_frame_no_nl(hw, skb, chan);
skb              1536 drivers/net/wireless/mac80211_hwsim.c 	dev_kfree_skb(skb);
skb              1547 drivers/net/wireless/mac80211_hwsim.c 	struct sk_buff *skb;
skb              1556 drivers/net/wireless/mac80211_hwsim.c 	skb = ieee80211_beacon_get(hw, vif);
skb              1557 drivers/net/wireless/mac80211_hwsim.c 	if (skb == NULL)
skb              1559 drivers/net/wireless/mac80211_hwsim.c 	info = IEEE80211_SKB_CB(skb);
skb              1561 drivers/net/wireless/mac80211_hwsim.c 		ieee80211_get_tx_rates(vif, NULL, skb,
skb              1567 drivers/net/wireless/mac80211_hwsim.c 	mgmt = (struct ieee80211_mgmt *) skb->data;
skb              1574 drivers/net/wireless/mac80211_hwsim.c 	mac80211_hwsim_tx_frame(hw, skb,
skb              1931 drivers/net/wireless/mac80211_hwsim.c 	struct sk_buff *skb;
skb              1949 drivers/net/wireless/mac80211_hwsim.c 		skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy,
skb              1951 drivers/net/wireless/mac80211_hwsim.c 		if (!skb)
skb              1953 drivers/net/wireless/mac80211_hwsim.c 		if (nla_put_u32(skb, HWSIM_TM_ATTR_PS, hwsim->ps))
skb              1955 drivers/net/wireless/mac80211_hwsim.c 		return cfg80211_testmode_reply(skb);
skb              1967 drivers/net/wireless/mac80211_hwsim.c 	kfree_skb(skb);
skb              2409 drivers/net/wireless/mac80211_hwsim.c static int append_radio_msg(struct sk_buff *skb, int id,
skb              2414 drivers/net/wireless/mac80211_hwsim.c 	ret = nla_put_u32(skb, HWSIM_ATTR_RADIO_ID, id);
skb              2419 drivers/net/wireless/mac80211_hwsim.c 		ret = nla_put_u32(skb, HWSIM_ATTR_CHANNELS, param->channels);
skb              2425 drivers/net/wireless/mac80211_hwsim.c 		ret = nla_put(skb, HWSIM_ATTR_REG_HINT_ALPHA2, 2,
skb              2438 drivers/net/wireless/mac80211_hwsim.c 			ret = nla_put_u32(skb, HWSIM_ATTR_REG_CUSTOM_REG, i);
skb              2446 drivers/net/wireless/mac80211_hwsim.c 		ret = nla_put_flag(skb, HWSIM_ATTR_REG_STRICT_REG);
skb              2452 drivers/net/wireless/mac80211_hwsim.c 		ret = nla_put_flag(skb, HWSIM_ATTR_SUPPORT_P2P_DEVICE);
skb              2458 drivers/net/wireless/mac80211_hwsim.c 		ret = nla_put_flag(skb, HWSIM_ATTR_USE_CHANCTX);
skb              2464 drivers/net/wireless/mac80211_hwsim.c 		ret = nla_put(skb, HWSIM_ATTR_RADIO_NAME,
skb              3092 drivers/net/wireless/mac80211_hwsim.c 	struct sk_buff *skb;
skb              3096 drivers/net/wireless/mac80211_hwsim.c 	skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
skb              3097 drivers/net/wireless/mac80211_hwsim.c 	if (!skb)
skb              3100 drivers/net/wireless/mac80211_hwsim.c 	data = genlmsg_put(skb, 0, 0, &hwsim_genl_family, 0,
skb              3105 drivers/net/wireless/mac80211_hwsim.c 	ret = nla_put_u32(skb, HWSIM_ATTR_RADIO_ID, id);
skb              3109 drivers/net/wireless/mac80211_hwsim.c 	ret = nla_put(skb, HWSIM_ATTR_RADIO_NAME, strlen(hwname),
skb              3114 drivers/net/wireless/mac80211_hwsim.c 	genlmsg_end(skb, data);
skb              3116 drivers/net/wireless/mac80211_hwsim.c 	hwsim_mcast_config_msg(skb, info);
skb              3121 drivers/net/wireless/mac80211_hwsim.c 	nlmsg_free(skb);
skb              3136 drivers/net/wireless/mac80211_hwsim.c static int mac80211_hwsim_get_radio(struct sk_buff *skb,
skb              3145 drivers/net/wireless/mac80211_hwsim.c 	hdr = genlmsg_put(skb, portid, seq, &hwsim_genl_family, flags,
skb              3165 drivers/net/wireless/mac80211_hwsim.c 	res = append_radio_msg(skb, data->idx, &param);
skb              3169 drivers/net/wireless/mac80211_hwsim.c 	genlmsg_end(skb, hdr);
skb              3173 drivers/net/wireless/mac80211_hwsim.c 	genlmsg_cancel(skb, hdr);
skb              3242 drivers/net/wireless/mac80211_hwsim.c 	struct sk_buff *skb, *tmp;
skb              3270 drivers/net/wireless/mac80211_hwsim.c 	skb_queue_walk_safe(&data2->pending, skb, tmp) {
skb              3273 drivers/net/wireless/mac80211_hwsim.c 		txi = IEEE80211_SKB_CB(skb);
skb              3277 drivers/net/wireless/mac80211_hwsim.c 			skb_unlink(skb, &data2->pending);
skb              3294 drivers/net/wireless/mac80211_hwsim.c 	txi = IEEE80211_SKB_CB(skb);
skb              3307 drivers/net/wireless/mac80211_hwsim.c 		if (skb->len >= 16) {
skb              3308 drivers/net/wireless/mac80211_hwsim.c 			hdr = (struct ieee80211_hdr *) skb->data;
skb              3314 drivers/net/wireless/mac80211_hwsim.c 	ieee80211_tx_status_irqsafe(data2->hw, skb);
skb              3330 drivers/net/wireless/mac80211_hwsim.c 	struct sk_buff *skb = NULL;
skb              3343 drivers/net/wireless/mac80211_hwsim.c 	skb = alloc_skb(frame_data_len, GFP_KERNEL);
skb              3344 drivers/net/wireless/mac80211_hwsim.c 	if (skb == NULL)
skb              3351 drivers/net/wireless/mac80211_hwsim.c 	skb_put_data(skb, frame_data, frame_data_len);
skb              3393 drivers/net/wireless/mac80211_hwsim.c 	hdr = (void *)skb->data;
skb              3399 drivers/net/wireless/mac80211_hwsim.c 	memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
skb              3401 drivers/net/wireless/mac80211_hwsim.c 	data2->rx_bytes += skb->len;
skb              3402 drivers/net/wireless/mac80211_hwsim.c 	ieee80211_rx_irqsafe(data2->hw, skb);
skb              3408 drivers/net/wireless/mac80211_hwsim.c 	dev_kfree_skb(skb);
skb              3641 drivers/net/wireless/mac80211_hwsim.c 	struct sk_buff *skb;
skb              3656 drivers/net/wireless/mac80211_hwsim.c 		skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
skb              3657 drivers/net/wireless/mac80211_hwsim.c 		if (!skb) {
skb              3662 drivers/net/wireless/mac80211_hwsim.c 		res = mac80211_hwsim_get_radio(skb, data, info->snd_portid,
skb              3665 drivers/net/wireless/mac80211_hwsim.c 			nlmsg_free(skb);
skb              3669 drivers/net/wireless/mac80211_hwsim.c 		res = genlmsg_reply(skb, info);
skb              3679 drivers/net/wireless/mac80211_hwsim.c static int hwsim_dump_radio_nl(struct sk_buff *skb,
skb              3697 drivers/net/wireless/mac80211_hwsim.c 		if (!net_eq(wiphy_net(data->hw->wiphy), sock_net(skb->sk)))
skb              3700 drivers/net/wireless/mac80211_hwsim.c 		res = mac80211_hwsim_get_radio(skb, data,
skb              3701 drivers/net/wireless/mac80211_hwsim.c 					       NETLINK_CB(cb->skb).portid,
skb              3713 drivers/net/wireless/mac80211_hwsim.c 	if (skb->len == 0 && cb->prev_seq && cb->seq != cb->prev_seq) {
skb              3714 drivers/net/wireless/mac80211_hwsim.c 		hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
skb              3719 drivers/net/wireless/mac80211_hwsim.c 			genlmsg_end(skb, hdr);
skb              3727 drivers/net/wireless/mac80211_hwsim.c 	return res ?: skb->len;
skb                38 drivers/net/wireless/marvell/libertas/decl.h netdev_tx_t lbs_hard_start_xmit(struct sk_buff *skb,
skb               449 drivers/net/wireless/marvell/libertas/if_cs.c 	struct sk_buff *skb = NULL;
skb               462 drivers/net/wireless/marvell/libertas/if_cs.c 	skb = dev_alloc_skb(MRVDRV_ETH_RX_PACKET_BUFFER_SIZE + 2);
skb               463 drivers/net/wireless/marvell/libertas/if_cs.c 	if (!skb)
skb               465 drivers/net/wireless/marvell/libertas/if_cs.c 	skb_put(skb, len);
skb               466 drivers/net/wireless/marvell/libertas/if_cs.c 	skb_reserve(skb, 2);/* 16 byte align */
skb               467 drivers/net/wireless/marvell/libertas/if_cs.c 	data = skb->data;
skb               479 drivers/net/wireless/marvell/libertas/if_cs.c 	return skb;
skb               504 drivers/net/wireless/marvell/libertas/if_cs.c 		struct sk_buff *skb;
skb               506 drivers/net/wireless/marvell/libertas/if_cs.c 		skb = if_cs_receive_data(priv);
skb               507 drivers/net/wireless/marvell/libertas/if_cs.c 		if (skb)
skb               508 drivers/net/wireless/marvell/libertas/if_cs.c 			lbs_process_rxed_packet(priv, skb);
skb               237 drivers/net/wireless/marvell/libertas/if_sdio.c 	struct sk_buff *skb;
skb               246 drivers/net/wireless/marvell/libertas/if_sdio.c 	skb = dev_alloc_skb(MRVDRV_ETH_RX_PACKET_BUFFER_SIZE + NET_IP_ALIGN);
skb               247 drivers/net/wireless/marvell/libertas/if_sdio.c 	if (!skb) {
skb               252 drivers/net/wireless/marvell/libertas/if_sdio.c 	skb_reserve(skb, NET_IP_ALIGN);
skb               254 drivers/net/wireless/marvell/libertas/if_sdio.c 	skb_put_data(skb, buffer, size);
skb               256 drivers/net/wireless/marvell/libertas/if_sdio.c 	lbs_process_rxed_packet(card->priv, skb);
skb               736 drivers/net/wireless/marvell/libertas/if_spi.c 	struct sk_buff *skb;
skb               759 drivers/net/wireless/marvell/libertas/if_spi.c 	skb = dev_alloc_skb(MRVDRV_ETH_RX_PACKET_BUFFER_SIZE);
skb               760 drivers/net/wireless/marvell/libertas/if_spi.c 	if (!skb) {
skb               764 drivers/net/wireless/marvell/libertas/if_spi.c 	skb_reserve(skb, IPFIELD_ALIGN_OFFSET);
skb               765 drivers/net/wireless/marvell/libertas/if_spi.c 	data = skb_put(skb, len);
skb               770 drivers/net/wireless/marvell/libertas/if_spi.c 		dev_kfree_skb(skb);
skb               775 drivers/net/wireless/marvell/libertas/if_spi.c 	err = lbs_process_rxed_packet(card->priv, skb);
skb               447 drivers/net/wireless/marvell/libertas/if_usb.c 	struct sk_buff *skb;
skb               450 drivers/net/wireless/marvell/libertas/if_usb.c 	if (!(skb = dev_alloc_skb(MRVDRV_ETH_RX_PACKET_BUFFER_SIZE))) {
skb               455 drivers/net/wireless/marvell/libertas/if_usb.c 	cardp->rx_skb = skb;
skb               460 drivers/net/wireless/marvell/libertas/if_usb.c 			  skb->data + IPFIELD_ALIGN_OFFSET,
skb               467 drivers/net/wireless/marvell/libertas/if_usb.c 		kfree_skb(skb);
skb               492 drivers/net/wireless/marvell/libertas/if_usb.c 	struct sk_buff *skb = cardp->rx_skb;
skb               499 drivers/net/wireless/marvell/libertas/if_usb.c 		kfree_skb(skb);
skb               504 drivers/net/wireless/marvell/libertas/if_usb.c 		__le32 *tmp = (__le32 *)(skb->data + IPFIELD_ALIGN_OFFSET);
skb               515 drivers/net/wireless/marvell/libertas/if_usb.c 		kfree_skb(skb);
skb               519 drivers/net/wireless/marvell/libertas/if_usb.c 		memcpy (&bootcmdresp, skb->data + IPFIELD_ALIGN_OFFSET,
skb               523 drivers/net/wireless/marvell/libertas/if_usb.c 			kfree_skb(skb);
skb               554 drivers/net/wireless/marvell/libertas/if_usb.c 		kfree_skb(skb);
skb               559 drivers/net/wireless/marvell/libertas/if_usb.c 	syncfwheader = kmemdup(skb->data + IPFIELD_ALIGN_OFFSET,
skb               563 drivers/net/wireless/marvell/libertas/if_usb.c 		kfree_skb(skb);
skb               577 drivers/net/wireless/marvell/libertas/if_usb.c 	kfree_skb(skb);
skb               597 drivers/net/wireless/marvell/libertas/if_usb.c static inline void process_cmdtypedata(int recvlength, struct sk_buff *skb,
skb               604 drivers/net/wireless/marvell/libertas/if_usb.c 		kfree_skb(skb);
skb               608 drivers/net/wireless/marvell/libertas/if_usb.c 	skb_reserve(skb, IPFIELD_ALIGN_OFFSET);
skb               609 drivers/net/wireless/marvell/libertas/if_usb.c 	skb_put(skb, recvlength);
skb               610 drivers/net/wireless/marvell/libertas/if_usb.c 	skb_pull(skb, MESSAGE_HEADER_LEN);
skb               612 drivers/net/wireless/marvell/libertas/if_usb.c 	lbs_process_rxed_packet(priv, skb);
skb               616 drivers/net/wireless/marvell/libertas/if_usb.c 				      struct sk_buff *skb,
skb               626 drivers/net/wireless/marvell/libertas/if_usb.c 		kfree_skb(skb);
skb               637 drivers/net/wireless/marvell/libertas/if_usb.c 	kfree_skb(skb);
skb               656 drivers/net/wireless/marvell/libertas/if_usb.c 	struct sk_buff *skb = cardp->rx_skb;
skb               661 drivers/net/wireless/marvell/libertas/if_usb.c 	__le32 *pkt = (__le32 *)(skb->data + IPFIELD_ALIGN_OFFSET);
skb               668 drivers/net/wireless/marvell/libertas/if_usb.c 			kfree_skb(skb);
skb               672 drivers/net/wireless/marvell/libertas/if_usb.c 		recvbuff = skb->data + IPFIELD_ALIGN_OFFSET;
skb               678 drivers/net/wireless/marvell/libertas/if_usb.c 		kfree_skb(skb);
skb               684 drivers/net/wireless/marvell/libertas/if_usb.c 		process_cmdtypedata(recvlength, skb, cardp, priv);
skb               688 drivers/net/wireless/marvell/libertas/if_usb.c 		process_cmdrequest(recvlength, recvbuff, skb, cardp, priv);
skb               695 drivers/net/wireless/marvell/libertas/if_usb.c 		kfree_skb(skb);
skb               709 drivers/net/wireless/marvell/libertas/if_usb.c 		kfree_skb(skb);
skb                47 drivers/net/wireless/marvell/libertas/rx.c 	struct sk_buff *skb);
skb                57 drivers/net/wireless/marvell/libertas/rx.c int lbs_process_rxed_packet(struct lbs_private *priv, struct sk_buff *skb)
skb                69 drivers/net/wireless/marvell/libertas/rx.c 	BUG_ON(!skb);
skb                71 drivers/net/wireless/marvell/libertas/rx.c 	skb->ip_summed = CHECKSUM_NONE;
skb                74 drivers/net/wireless/marvell/libertas/rx.c 		ret = process_rxed_802_11_packet(priv, skb);
skb                78 drivers/net/wireless/marvell/libertas/rx.c 	p_rx_pd = (struct rxpd *) skb->data;
skb                84 drivers/net/wireless/marvell/libertas/rx.c 	lbs_deb_hex(LBS_DEB_RX, "RX Data: Before chop rxpd", skb->data,
skb                85 drivers/net/wireless/marvell/libertas/rx.c 		 min_t(unsigned int, skb->len, 100));
skb                87 drivers/net/wireless/marvell/libertas/rx.c 	if (skb->len < (ETH_HLEN + 8 + sizeof(struct rxpd))) {
skb                91 drivers/net/wireless/marvell/libertas/rx.c 		dev_kfree_skb(skb);
skb                96 drivers/net/wireless/marvell/libertas/rx.c 		skb->len, (size_t)le32_to_cpu(p_rx_pd->pkt_ptr),
skb                97 drivers/net/wireless/marvell/libertas/rx.c 		skb->len - (size_t)le32_to_cpu(p_rx_pd->pkt_ptr));
skb               144 drivers/net/wireless/marvell/libertas/rx.c 	skb_pull(skb, hdrchop);
skb               148 drivers/net/wireless/marvell/libertas/rx.c 	lbs_deb_rx("rx data: size of actual packet %d\n", skb->len);
skb               149 drivers/net/wireless/marvell/libertas/rx.c 	dev->stats.rx_bytes += skb->len;
skb               152 drivers/net/wireless/marvell/libertas/rx.c 	skb->protocol = eth_type_trans(skb, dev);
skb               154 drivers/net/wireless/marvell/libertas/rx.c 		netif_rx(skb);
skb               156 drivers/net/wireless/marvell/libertas/rx.c 		netif_rx_ni(skb);
skb               213 drivers/net/wireless/marvell/libertas/rx.c 	struct sk_buff *skb)
skb               222 drivers/net/wireless/marvell/libertas/rx.c 	p_rx_pkt = (struct rx80211packethdr *) skb->data;
skb               227 drivers/net/wireless/marvell/libertas/rx.c 	if (skb->len < (ETH_HLEN + 8 + sizeof(struct rxpd))) {
skb               231 drivers/net/wireless/marvell/libertas/rx.c 		kfree_skb(skb);
skb               236 drivers/net/wireless/marvell/libertas/rx.c 	       skb->len, sizeof(struct rxpd), skb->len - sizeof(struct rxpd));
skb               250 drivers/net/wireless/marvell/libertas/rx.c 	skb_pull(skb, sizeof(struct rxpd));
skb               253 drivers/net/wireless/marvell/libertas/rx.c 	if ((skb_headroom(skb) < sizeof(struct rx_radiotap_hdr)) &&
skb               254 drivers/net/wireless/marvell/libertas/rx.c 	    pskb_expand_head(skb, sizeof(struct rx_radiotap_hdr), 0, GFP_ATOMIC)) {
skb               257 drivers/net/wireless/marvell/libertas/rx.c 		kfree_skb(skb);
skb               261 drivers/net/wireless/marvell/libertas/rx.c 	pradiotap_hdr = skb_push(skb, sizeof(struct rx_radiotap_hdr));
skb               266 drivers/net/wireless/marvell/libertas/rx.c 	lbs_deb_rx("rx data: size of actual packet %d\n", skb->len);
skb               267 drivers/net/wireless/marvell/libertas/rx.c 	dev->stats.rx_bytes += skb->len;
skb               270 drivers/net/wireless/marvell/libertas/rx.c 	skb->protocol = eth_type_trans(skb, priv->dev);
skb               273 drivers/net/wireless/marvell/libertas/rx.c 		netif_rx(skb);
skb               275 drivers/net/wireless/marvell/libertas/rx.c 		netif_rx_ni(skb);
skb                65 drivers/net/wireless/marvell/libertas/tx.c netdev_tx_t lbs_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb                81 drivers/net/wireless/marvell/libertas/tx.c 	if (!skb->len || (skb->len > MRVDRV_ETH_TX_PACKET_BUFFER_SIZE)) {
skb                83 drivers/net/wireless/marvell/libertas/tx.c 		       skb->len, MRVDRV_ETH_TX_PACKET_BUFFER_SIZE);
skb               108 drivers/net/wireless/marvell/libertas/tx.c 	lbs_deb_hex(LBS_DEB_TX, "TX Data", skb->data, min_t(unsigned int, skb->len, 100));
skb               113 drivers/net/wireless/marvell/libertas/tx.c 	p802x_hdr = skb->data;
skb               114 drivers/net/wireless/marvell/libertas/tx.c 	pkt_len = skb->len;
skb               117 drivers/net/wireless/marvell/libertas/tx.c 		struct tx_radiotap_hdr *rtap_hdr = (void *)skb->data;
skb               150 drivers/net/wireless/marvell/libertas/tx.c 	dev->stats.tx_bytes += skb->len;
skb               155 drivers/net/wireless/marvell/libertas/tx.c 		skb_orphan(skb);
skb               158 drivers/net/wireless/marvell/libertas/tx.c 		priv->currenttxskb = skb;
skb               161 drivers/net/wireless/marvell/libertas/tx.c 		dev_kfree_skb_any(skb);
skb               413 drivers/net/wireless/marvell/libertas_tf/if_usb.c 	struct sk_buff *skb;
skb               418 drivers/net/wireless/marvell/libertas_tf/if_usb.c 	skb = dev_alloc_skb(MRVDRV_ETH_RX_PACKET_BUFFER_SIZE);
skb               419 drivers/net/wireless/marvell/libertas_tf/if_usb.c 	if (!skb) {
skb               425 drivers/net/wireless/marvell/libertas_tf/if_usb.c 	cardp->rx_skb = skb;
skb               430 drivers/net/wireless/marvell/libertas_tf/if_usb.c 			  skb_tail_pointer(skb),
skb               439 drivers/net/wireless/marvell/libertas_tf/if_usb.c 		kfree_skb(skb);
skb               463 drivers/net/wireless/marvell/libertas_tf/if_usb.c 	struct sk_buff *skb = cardp->rx_skb;
skb               471 drivers/net/wireless/marvell/libertas_tf/if_usb.c 		kfree_skb(skb);
skb               477 drivers/net/wireless/marvell/libertas_tf/if_usb.c 		__le32 *tmp = (__le32 *)(skb->data);
skb               489 drivers/net/wireless/marvell/libertas_tf/if_usb.c 		kfree_skb(skb);
skb               494 drivers/net/wireless/marvell/libertas_tf/if_usb.c 		memcpy(&bcmdresp, skb->data, sizeof(bcmdresp));
skb               497 drivers/net/wireless/marvell/libertas_tf/if_usb.c 			kfree_skb(skb);
skb               529 drivers/net/wireless/marvell/libertas_tf/if_usb.c 		kfree_skb(skb);
skb               535 drivers/net/wireless/marvell/libertas_tf/if_usb.c 	syncfwheader = kmemdup(skb->data, sizeof(struct fwsyncheader),
skb               540 drivers/net/wireless/marvell/libertas_tf/if_usb.c 		kfree_skb(skb);
skb               558 drivers/net/wireless/marvell/libertas_tf/if_usb.c 	kfree_skb(skb);
skb               580 drivers/net/wireless/marvell/libertas_tf/if_usb.c static inline void process_cmdtypedata(int recvlength, struct sk_buff *skb,
skb               587 drivers/net/wireless/marvell/libertas_tf/if_usb.c 		kfree_skb(skb);
skb               591 drivers/net/wireless/marvell/libertas_tf/if_usb.c 	skb_put(skb, recvlength);
skb               592 drivers/net/wireless/marvell/libertas_tf/if_usb.c 	skb_pull(skb, MESSAGE_HEADER_LEN);
skb               593 drivers/net/wireless/marvell/libertas_tf/if_usb.c 	lbtf_rx(priv, skb);
skb               597 drivers/net/wireless/marvell/libertas_tf/if_usb.c 				      struct sk_buff *skb,
skb               607 drivers/net/wireless/marvell/libertas_tf/if_usb.c 		kfree_skb(skb);
skb               614 drivers/net/wireless/marvell/libertas_tf/if_usb.c 	kfree_skb(skb);
skb               627 drivers/net/wireless/marvell/libertas_tf/if_usb.c 	struct sk_buff *skb = cardp->rx_skb;
skb               632 drivers/net/wireless/marvell/libertas_tf/if_usb.c 	__le32 *pkt = (__le32 *) skb->data;
skb               640 drivers/net/wireless/marvell/libertas_tf/if_usb.c 			kfree_skb(skb);
skb               644 drivers/net/wireless/marvell/libertas_tf/if_usb.c 		recvbuff = skb->data;
skb               650 drivers/net/wireless/marvell/libertas_tf/if_usb.c 		kfree_skb(skb);
skb               657 drivers/net/wireless/marvell/libertas_tf/if_usb.c 		process_cmdtypedata(recvlength, skb, cardp, priv);
skb               661 drivers/net/wireless/marvell/libertas_tf/if_usb.c 		process_cmdrequest(recvlength, recvbuff, skb, cardp, priv);
skb               687 drivers/net/wireless/marvell/libertas_tf/if_usb.c 		kfree_skb(skb);
skb               693 drivers/net/wireless/marvell/libertas_tf/if_usb.c 		kfree_skb(skb);
skb               492 drivers/net/wireless/marvell/libertas_tf/libertas_tf.h int lbtf_rx(struct lbtf_private *priv, struct sk_buff *skb);
skb               184 drivers/net/wireless/marvell/libertas_tf/main.c 		       struct sk_buff *skb)
skb               188 drivers/net/wireless/marvell/libertas_tf/main.c 	priv->skb_to_tx = skb;
skb               204 drivers/net/wireless/marvell/libertas_tf/main.c 	struct sk_buff *skb = NULL;
skb               211 drivers/net/wireless/marvell/libertas_tf/main.c 		skb = skb_dequeue(&priv->bc_ps_buf);
skb               213 drivers/net/wireless/marvell/libertas_tf/main.c 		skb = priv->skb_to_tx;
skb               220 drivers/net/wireless/marvell/libertas_tf/main.c 	len = skb->len;
skb               221 drivers/net/wireless/marvell/libertas_tf/main.c 	info  = IEEE80211_SKB_CB(skb);
skb               222 drivers/net/wireless/marvell/libertas_tf/main.c 	txpd = skb_push(skb, sizeof(struct txpd));
skb               225 drivers/net/wireless/marvell/libertas_tf/main.c 		dev_kfree_skb_any(skb);
skb               236 drivers/net/wireless/marvell/libertas_tf/main.c 	memcpy(txpd->tx_dest_addr_high, skb->data + sizeof(struct txpd) + 4,
skb               240 drivers/net/wireless/marvell/libertas_tf/main.c 	lbtf_deb_hex(LBTF_DEB_TX, "TX Data", skb->data, min_t(unsigned int, skb->len, 100));
skb               243 drivers/net/wireless/marvell/libertas_tf/main.c 	priv->tx_skb = skb;
skb               244 drivers/net/wireless/marvell/libertas_tf/main.c 	err = priv->ops->hw_host_to_card(priv, MVMS_DAT, skb->data, skb->len);
skb               247 drivers/net/wireless/marvell/libertas_tf/main.c 		dev_kfree_skb_any(skb);
skb               274 drivers/net/wireless/marvell/libertas_tf/main.c 	struct sk_buff *skb;
skb               291 drivers/net/wireless/marvell/libertas_tf/main.c 	while ((skb = skb_dequeue(&priv->bc_ps_buf)))
skb               292 drivers/net/wireless/marvell/libertas_tf/main.c 		dev_kfree_skb_any(skb);
skb               488 drivers/net/wireless/marvell/libertas_tf/main.c int lbtf_rx(struct lbtf_private *priv, struct sk_buff *skb)
skb               502 drivers/net/wireless/marvell/libertas_tf/main.c 	prxpd = (struct rxpd *) skb->data;
skb               515 drivers/net/wireless/marvell/libertas_tf/main.c 	skb_pull(skb, sizeof(struct rxpd));
skb               517 drivers/net/wireless/marvell/libertas_tf/main.c 	hdr = (struct ieee80211_hdr *)skb->data;
skb               526 drivers/net/wireless/marvell/libertas_tf/main.c 		memmove(skb->data + 2, skb->data, skb->len);
skb               527 drivers/net/wireless/marvell/libertas_tf/main.c 		skb_reserve(skb, 2);
skb               530 drivers/net/wireless/marvell/libertas_tf/main.c 	memcpy(IEEE80211_SKB_RXCB(skb), &stats, sizeof(stats));
skb               533 drivers/net/wireless/marvell/libertas_tf/main.c 	       skb->len, sizeof(struct rxpd), skb->len - sizeof(struct rxpd));
skb               534 drivers/net/wireless/marvell/libertas_tf/main.c 	lbtf_deb_hex(LBTF_DEB_RX, "RX Data", skb->data,
skb               535 drivers/net/wireless/marvell/libertas_tf/main.c 	             min_t(unsigned int, skb->len, 100));
skb               537 drivers/net/wireless/marvell/libertas_tf/main.c 	ieee80211_rx_irqsafe(priv->hw, skb);
skb               678 drivers/net/wireless/marvell/libertas_tf/main.c 	struct sk_buff *skb = NULL;
skb               686 drivers/net/wireless/marvell/libertas_tf/main.c 		while ((skb = ieee80211_get_buffered_bc(priv->hw, priv->vif))) {
skb               687 drivers/net/wireless/marvell/libertas_tf/main.c 			skb_queue_tail(&priv->bc_ps_buf, skb);
skb               696 drivers/net/wireless/marvell/libertas_tf/main.c 	skb = ieee80211_beacon_get(priv->hw, priv->vif);
skb               698 drivers/net/wireless/marvell/libertas_tf/main.c 	if (skb) {
skb               699 drivers/net/wireless/marvell/libertas_tf/main.c 		lbtf_beacon_set(priv, skb);
skb               700 drivers/net/wireless/marvell/libertas_tf/main.c 		kfree_skb(skb);
skb               206 drivers/net/wireless/marvell/mwifiex/11h.c 				     struct sk_buff *skb)
skb               213 drivers/net/wireless/marvell/mwifiex/11h.c 	rpt_event = (void *)(skb->data + sizeof(u32));
skb               214 drivers/net/wireless/marvell/mwifiex/11h.c 	event_len = skb->len - (sizeof(struct host_cmd_ds_chan_rpt_event)+
skb               255 drivers/net/wireless/marvell/mwifiex/11h.c 				      struct sk_buff *skb)
skb               259 drivers/net/wireless/marvell/mwifiex/11h.c 	rdr_event = (void *)(skb->data + sizeof(u32));
skb               100 drivers/net/wireless/marvell/mwifiex/11n_aggr.c 			    struct sk_buff *skb)
skb               103 drivers/net/wireless/marvell/mwifiex/11n_aggr.c 	struct mwifiex_txinfo *tx_info = MWIFIEX_SKB_TXCB(skb);
skb               105 drivers/net/wireless/marvell/mwifiex/11n_aggr.c 	skb_push(skb, sizeof(*local_tx_pd));
skb               107 drivers/net/wireless/marvell/mwifiex/11n_aggr.c 	local_tx_pd = (struct txpd *) skb->data;
skb               111 drivers/net/wireless/marvell/mwifiex/11n_aggr.c 	local_tx_pd->priority = (u8) skb->priority;
skb               113 drivers/net/wireless/marvell/mwifiex/11n_aggr.c 		mwifiex_wmm_compute_drv_pkt_delay(priv, skb);
skb               119 drivers/net/wireless/marvell/mwifiex/11n_aggr.c 	local_tx_pd->tx_pkt_length = cpu_to_le16(skb->len -
skb                27 drivers/net/wireless/marvell/mwifiex/11n_aggr.h 				struct sk_buff *skb);
skb                33 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c 					  struct sk_buff *skb)
skb                35 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c 	struct rxpd *local_rx_pd = (struct rxpd *)(skb->data);
skb                44 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c 		skb_pull(skb, le16_to_cpu(local_rx_pd->rx_pkt_offset));
skb                45 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c 		skb_trim(skb, le16_to_cpu(local_rx_pd->rx_pkt_length));
skb                47 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c 		ieee80211_amsdu_to_8023s(skb, &list, priv->curr_addr,
skb                59 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c 								  skb->len);
skb               114 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c 	struct sk_buff *skb;
skb               126 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c 			skb = tbl->rx_reorder_ptr[i];
skb               127 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c 			__skb_queue_tail(&list, skb);
skb               144 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c 	while ((skb = __skb_dequeue(&list)))
skb               145 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c 		mwifiex_11n_dispatch_pkt(priv, skb);
skb               161 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c 	struct sk_buff *skb;
skb               170 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c 		skb = tbl->rx_reorder_ptr[i];
skb               171 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c 		__skb_queue_tail(&list, skb);
skb               190 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c 	while ((skb = __skb_dequeue(&list)))
skb               191 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c 		mwifiex_11n_dispatch_pkt(priv, skb);
skb               175 drivers/net/wireless/marvell/mwifiex/cfg80211.c mwifiex_form_mgmt_frame(struct sk_buff *skb, const u8 *buf, size_t len)
skb               183 drivers/net/wireless/marvell/mwifiex/cfg80211.c 	skb_reserve(skb, MWIFIEX_MIN_DATA_HEADER_LEN +
skb               185 drivers/net/wireless/marvell/mwifiex/cfg80211.c 	memcpy(skb_push(skb, sizeof(pkt_len)), &pkt_len, sizeof(pkt_len));
skb               187 drivers/net/wireless/marvell/mwifiex/cfg80211.c 	memcpy(skb_push(skb, sizeof(tx_control)),
skb               190 drivers/net/wireless/marvell/mwifiex/cfg80211.c 	memcpy(skb_push(skb, sizeof(pkt_type)), &pkt_type, sizeof(pkt_type));
skb               193 drivers/net/wireless/marvell/mwifiex/cfg80211.c 	skb_put_data(skb, buf, sizeof(struct ieee80211_hdr_3addr));
skb               194 drivers/net/wireless/marvell/mwifiex/cfg80211.c 	skb_put_data(skb, addr, ETH_ALEN);
skb               195 drivers/net/wireless/marvell/mwifiex/cfg80211.c 	skb_put_data(skb, buf + sizeof(struct ieee80211_hdr_3addr),
skb               198 drivers/net/wireless/marvell/mwifiex/cfg80211.c 	skb->priority = LOW_PRIO_TID;
skb               199 drivers/net/wireless/marvell/mwifiex/cfg80211.c 	__net_timestamp(skb);
skb               213 drivers/net/wireless/marvell/mwifiex/cfg80211.c 	struct sk_buff *skb;
skb               235 drivers/net/wireless/marvell/mwifiex/cfg80211.c 	skb = dev_alloc_skb(MWIFIEX_MIN_DATA_HEADER_LEN +
skb               239 drivers/net/wireless/marvell/mwifiex/cfg80211.c 	if (!skb) {
skb               245 drivers/net/wireless/marvell/mwifiex/cfg80211.c 	tx_info = MWIFIEX_SKB_TXCB(skb);
skb               251 drivers/net/wireless/marvell/mwifiex/cfg80211.c 	mwifiex_form_mgmt_frame(skb, buf, len);
skb               255 drivers/net/wireless/marvell/mwifiex/cfg80211.c 		skb = mwifiex_clone_skb_for_tx_status(priv,
skb               256 drivers/net/wireless/marvell/mwifiex/cfg80211.c 						      skb,
skb               262 drivers/net/wireless/marvell/mwifiex/cfg80211.c 	mwifiex_queue_tx_pkt(priv, skb);
skb              3149 drivers/net/wireless/marvell/mwifiex/cfg80211.c 	struct sk_buff *skb, *tmp;
skb              3160 drivers/net/wireless/marvell/mwifiex/cfg80211.c 	skb_queue_walk_safe(&priv->bypass_txq, skb, tmp) {
skb              3161 drivers/net/wireless/marvell/mwifiex/cfg80211.c 		skb_unlink(skb, &priv->bypass_txq);
skb              3162 drivers/net/wireless/marvell/mwifiex/cfg80211.c 		mwifiex_write_data_complete(priv->adapter, skb, 0, -1);
skb              4053 drivers/net/wireless/marvell/mwifiex/cfg80211.c 	struct sk_buff *skb;
skb              4087 drivers/net/wireless/marvell/mwifiex/cfg80211.c 		skb = cfg80211_testmode_alloc_reply_skb(wiphy, hostcmd->len);
skb              4088 drivers/net/wireless/marvell/mwifiex/cfg80211.c 		if (!skb) {
skb              4092 drivers/net/wireless/marvell/mwifiex/cfg80211.c 		err = nla_put(skb, MWIFIEX_TM_ATTR_DATA,
skb              4096 drivers/net/wireless/marvell/mwifiex/cfg80211.c 			kfree_skb(skb);
skb              4100 drivers/net/wireless/marvell/mwifiex/cfg80211.c 		err = cfg80211_testmode_reply(skb);
skb                53 drivers/net/wireless/marvell/mwifiex/cmdevt.c 	cmd_node->cmd_skb = cmd_node->skb;
skb               416 drivers/net/wireless/marvell/mwifiex/cmdevt.c 		cmd_array[i].skb = dev_alloc_skb(MWIFIEX_SIZE_OF_CMD_BUFFER);
skb               417 drivers/net/wireless/marvell/mwifiex/cmdevt.c 		if (!cmd_array[i].skb) {
skb               452 drivers/net/wireless/marvell/mwifiex/cmdevt.c 		if (cmd_array[i].skb) {
skb               455 drivers/net/wireless/marvell/mwifiex/cmdevt.c 			dev_kfree_skb_any(cmd_array[i].skb);
skb               490 drivers/net/wireless/marvell/mwifiex/cmdevt.c 	struct sk_buff *skb = adapter->event_skb;
skb               524 drivers/net/wireless/marvell/mwifiex/cmdevt.c 	if (skb) {
skb               525 drivers/net/wireless/marvell/mwifiex/cmdevt.c 		rx_info = MWIFIEX_SKB_RXCB(skb);
skb               530 drivers/net/wireless/marvell/mwifiex/cmdevt.c 				 skb->data, skb->len);
skb               542 drivers/net/wireless/marvell/mwifiex/cmdevt.c 	adapter->if_ops.event_complete(adapter, skb);
skb               626 drivers/net/wireless/marvell/mwifiex/init.c 	struct sk_buff *skb;
skb               657 drivers/net/wireless/marvell/mwifiex/init.c 	while ((skb = skb_dequeue(&adapter->tx_data_q)))
skb               658 drivers/net/wireless/marvell/mwifiex/init.c 		mwifiex_write_data_complete(adapter, skb, 0, 0);
skb               662 drivers/net/wireless/marvell/mwifiex/init.c 	while ((skb = skb_dequeue(&adapter->rx_data_q))) {
skb               663 drivers/net/wireless/marvell/mwifiex/init.c 		struct mwifiex_rxinfo *rx_info = MWIFIEX_SKB_RXCB(skb);
skb               670 drivers/net/wireless/marvell/mwifiex/init.c 		dev_kfree_skb_any(skb);
skb               187 drivers/net/wireless/marvell/mwifiex/main.c 	struct sk_buff *skb;
skb               200 drivers/net/wireless/marvell/mwifiex/main.c 	while ((skb = skb_dequeue(&adapter->rx_data_q))) {
skb               210 drivers/net/wireless/marvell/mwifiex/main.c 		rx_info = MWIFIEX_SKB_RXCB(skb);
skb               213 drivers/net/wireless/marvell/mwifiex/main.c 				adapter->if_ops.deaggr_pkt(adapter, skb);
skb               214 drivers/net/wireless/marvell/mwifiex/main.c 			dev_kfree_skb_any(skb);
skb               216 drivers/net/wireless/marvell/mwifiex/main.c 			mwifiex_handle_rx_packet(adapter, skb);
skb               768 drivers/net/wireless/marvell/mwifiex/main.c 			struct sk_buff *skb)
skb               770 drivers/net/wireless/marvell/mwifiex/main.c 	struct ethhdr *eth_hdr = (struct ethhdr *)skb->data;
skb               773 drivers/net/wireless/marvell/mwifiex/main.c 	    mwifiex_is_skb_mgmt_frame(skb) ||
skb               780 drivers/net/wireless/marvell/mwifiex/main.c 			     mwifiex_is_skb_mgmt_frame(skb));
skb               789 drivers/net/wireless/marvell/mwifiex/main.c int mwifiex_queue_tx_pkt(struct mwifiex_private *priv, struct sk_buff *skb)
skb               792 drivers/net/wireless/marvell/mwifiex/main.c 	int index = mwifiex_1d_to_wmm_queue[skb->priority];
skb               803 drivers/net/wireless/marvell/mwifiex/main.c 	if (mwifiex_bypass_tx_queue(priv, skb)) {
skb               806 drivers/net/wireless/marvell/mwifiex/main.c 		mwifiex_wmm_add_buf_bypass_txqueue(priv, skb);
skb               809 drivers/net/wireless/marvell/mwifiex/main.c 		mwifiex_wmm_add_buf_txqueue(priv, skb);
skb               819 drivers/net/wireless/marvell/mwifiex/main.c 				struct sk_buff *skb, u8 flag, u64 *cookie)
skb               821 drivers/net/wireless/marvell/mwifiex/main.c 	struct sk_buff *orig_skb = skb;
skb               824 drivers/net/wireless/marvell/mwifiex/main.c 	skb = skb_clone(skb, GFP_ATOMIC);
skb               825 drivers/net/wireless/marvell/mwifiex/main.c 	if (skb) {
skb               834 drivers/net/wireless/marvell/mwifiex/main.c 			tx_info = MWIFIEX_SKB_TXCB(skb);
skb               844 drivers/net/wireless/marvell/mwifiex/main.c 		} else if (skb_shared(skb)) {
skb               847 drivers/net/wireless/marvell/mwifiex/main.c 			kfree_skb(skb);
skb               848 drivers/net/wireless/marvell/mwifiex/main.c 			skb = orig_skb;
skb               852 drivers/net/wireless/marvell/mwifiex/main.c 		skb = orig_skb;
skb               855 drivers/net/wireless/marvell/mwifiex/main.c 	return skb;
skb               862 drivers/net/wireless/marvell/mwifiex/main.c mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb               874 drivers/net/wireless/marvell/mwifiex/main.c 		kfree_skb(skb);
skb               878 drivers/net/wireless/marvell/mwifiex/main.c 	if (!skb->len || (skb->len > ETH_FRAME_LEN)) {
skb               880 drivers/net/wireless/marvell/mwifiex/main.c 			    "Tx: bad skb len %d\n", skb->len);
skb               881 drivers/net/wireless/marvell/mwifiex/main.c 		kfree_skb(skb);
skb               885 drivers/net/wireless/marvell/mwifiex/main.c 	if (skb_headroom(skb) < MWIFIEX_MIN_DATA_HEADER_LEN) {
skb               888 drivers/net/wireless/marvell/mwifiex/main.c 			    skb_headroom(skb));
skb               891 drivers/net/wireless/marvell/mwifiex/main.c 			skb_realloc_headroom(skb, MWIFIEX_MIN_DATA_HEADER_LEN);
skb               895 drivers/net/wireless/marvell/mwifiex/main.c 			kfree_skb(skb);
skb               899 drivers/net/wireless/marvell/mwifiex/main.c 		kfree_skb(skb);
skb               900 drivers/net/wireless/marvell/mwifiex/main.c 		skb = new_skb;
skb               903 drivers/net/wireless/marvell/mwifiex/main.c 			    skb_headroom(skb));
skb               906 drivers/net/wireless/marvell/mwifiex/main.c 	tx_info = MWIFIEX_SKB_TXCB(skb);
skb               910 drivers/net/wireless/marvell/mwifiex/main.c 	tx_info->pkt_len = skb->len;
skb               912 drivers/net/wireless/marvell/mwifiex/main.c 	multicast = is_multicast_ether_addr(skb->data);
skb               914 drivers/net/wireless/marvell/mwifiex/main.c 	if (unlikely(!multicast && skb->sk &&
skb               915 drivers/net/wireless/marvell/mwifiex/main.c 		     skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS &&
skb               917 drivers/net/wireless/marvell/mwifiex/main.c 		skb = mwifiex_clone_skb_for_tx_status(priv,
skb               918 drivers/net/wireless/marvell/mwifiex/main.c 						      skb,
skb               928 drivers/net/wireless/marvell/mwifiex/main.c 	__net_timestamp(skb);
skb               932 drivers/net/wireless/marvell/mwifiex/main.c 	    !ether_addr_equal_unaligned(priv->cfg_bssid, skb->data)) {
skb               934 drivers/net/wireless/marvell/mwifiex/main.c 			mwifiex_tdls_check_tx(priv, skb);
skb               937 drivers/net/wireless/marvell/mwifiex/main.c 	mwifiex_queue_tx_pkt(priv, skb);
skb              1281 drivers/net/wireless/marvell/mwifiex/main.c mwifiex_netdev_select_wmm_queue(struct net_device *dev, struct sk_buff *skb,
skb              1284 drivers/net/wireless/marvell/mwifiex/main.c 	skb->priority = cfg80211_classify8021d(skb, NULL);
skb              1285 drivers/net/wireless/marvell/mwifiex/main.c 	return mwifiex_1d_to_wmm_queue[skb->priority];
skb               757 drivers/net/wireless/marvell/mwifiex/main.h 	struct sk_buff *skb;
skb              1081 drivers/net/wireless/marvell/mwifiex/main.h int mwifiex_recv_packet(struct mwifiex_private *priv, struct sk_buff *skb);
skb              1083 drivers/net/wireless/marvell/mwifiex/main.h 			    struct sk_buff *skb);
skb              1086 drivers/net/wireless/marvell/mwifiex/main.h 				struct sk_buff *skb);
skb              1117 drivers/net/wireless/marvell/mwifiex/main.h 			     struct sk_buff *skb);
skb              1118 drivers/net/wireless/marvell/mwifiex/main.h int mwifiex_process_tx(struct mwifiex_private *priv, struct sk_buff *skb,
skb              1122 drivers/net/wireless/marvell/mwifiex/main.h 				struct sk_buff *skb, int aggr, int status);
skb              1143 drivers/net/wireless/marvell/mwifiex/main.h 			      struct sk_buff *skb);
skb              1153 drivers/net/wireless/marvell/mwifiex/main.h 				  struct sk_buff *skb);
skb              1155 drivers/net/wireless/marvell/mwifiex/main.h 				  struct sk_buff *skb);
skb              1157 drivers/net/wireless/marvell/mwifiex/main.h 				  struct sk_buff *skb);
skb              1163 drivers/net/wireless/marvell/mwifiex/main.h void *mwifiex_process_sta_txpd(struct mwifiex_private *, struct sk_buff *skb);
skb              1164 drivers/net/wireless/marvell/mwifiex/main.h void *mwifiex_process_uap_txpd(struct mwifiex_private *, struct sk_buff *skb);
skb              1396 drivers/net/wireless/marvell/mwifiex/main.h static inline bool mwifiex_is_skb_mgmt_frame(struct sk_buff *skb)
skb              1398 drivers/net/wireless/marvell/mwifiex/main.h 	return (get_unaligned_le32(skb->data) == PKT_TYPE_MGMT);
skb              1549 drivers/net/wireless/marvell/mwifiex/main.h int mwifiex_queue_tx_pkt(struct mwifiex_private *priv, struct sk_buff *skb);
skb              1635 drivers/net/wireless/marvell/mwifiex/main.h int mwifiex_tdls_check_tx(struct mwifiex_private *priv, struct sk_buff *skb);
skb              1656 drivers/net/wireless/marvell/mwifiex/main.h 				     struct sk_buff *skb);
skb              1663 drivers/net/wireless/marvell/mwifiex/main.h 				struct sk_buff *skb, u8 flag, u64 *cookie);
skb              1670 drivers/net/wireless/marvell/mwifiex/main.h 				      struct sk_buff *skb);
skb                55 drivers/net/wireless/marvell/mwifiex/pcie.c mwifiex_map_pci_memory(struct mwifiex_adapter *adapter, struct sk_buff *skb,
skb                61 drivers/net/wireless/marvell/mwifiex/pcie.c 	mapping.addr = pci_map_single(card->dev, skb->data, size, flags);
skb                67 drivers/net/wireless/marvell/mwifiex/pcie.c 	mwifiex_store_mapping(skb, &mapping);
skb                72 drivers/net/wireless/marvell/mwifiex/pcie.c 				     struct sk_buff *skb, int flags)
skb                77 drivers/net/wireless/marvell/mwifiex/pcie.c 	mwifiex_get_mapping(skb, &mapping);
skb               614 drivers/net/wireless/marvell/mwifiex/pcie.c 	struct sk_buff *skb;
skb               622 drivers/net/wireless/marvell/mwifiex/pcie.c 		skb = mwifiex_alloc_dma_align_buf(MWIFIEX_RX_DATA_BUF_SIZE,
skb               624 drivers/net/wireless/marvell/mwifiex/pcie.c 		if (!skb) {
skb               631 drivers/net/wireless/marvell/mwifiex/pcie.c 		if (mwifiex_map_pci_memory(adapter, skb,
skb               636 drivers/net/wireless/marvell/mwifiex/pcie.c 		buf_pa = MWIFIEX_SKB_DMA_ADDR(skb);
skb               640 drivers/net/wireless/marvell/mwifiex/pcie.c 			    skb, skb->len, skb->data, (u32)buf_pa,
skb               643 drivers/net/wireless/marvell/mwifiex/pcie.c 		card->rx_buf_list[i] = skb;
skb               649 drivers/net/wireless/marvell/mwifiex/pcie.c 			desc2->len = (u16)skb->len;
skb               650 drivers/net/wireless/marvell/mwifiex/pcie.c 			desc2->frag_len = (u16)skb->len;
skb               658 drivers/net/wireless/marvell/mwifiex/pcie.c 			desc->len = (u16)skb->len;
skb               674 drivers/net/wireless/marvell/mwifiex/pcie.c 	struct sk_buff *skb;
skb               680 drivers/net/wireless/marvell/mwifiex/pcie.c 		skb = dev_alloc_skb(MAX_EVENT_SIZE);
skb               681 drivers/net/wireless/marvell/mwifiex/pcie.c 		if (!skb) {
skb               687 drivers/net/wireless/marvell/mwifiex/pcie.c 		skb_put(skb, MAX_EVENT_SIZE);
skb               689 drivers/net/wireless/marvell/mwifiex/pcie.c 		if (mwifiex_map_pci_memory(adapter, skb, MAX_EVENT_SIZE,
skb               691 drivers/net/wireless/marvell/mwifiex/pcie.c 			kfree_skb(skb);
skb               696 drivers/net/wireless/marvell/mwifiex/pcie.c 		buf_pa = MWIFIEX_SKB_DMA_ADDR(skb);
skb               700 drivers/net/wireless/marvell/mwifiex/pcie.c 			    skb, skb->len, skb->data, (u32)buf_pa,
skb               703 drivers/net/wireless/marvell/mwifiex/pcie.c 		card->evt_buf_list[i] = skb;
skb               708 drivers/net/wireless/marvell/mwifiex/pcie.c 		desc->len = (u16)skb->len;
skb               722 drivers/net/wireless/marvell/mwifiex/pcie.c 	struct sk_buff *skb;
skb               731 drivers/net/wireless/marvell/mwifiex/pcie.c 				skb = card->tx_buf_list[i];
skb               732 drivers/net/wireless/marvell/mwifiex/pcie.c 				mwifiex_unmap_pci_memory(adapter, skb,
skb               734 drivers/net/wireless/marvell/mwifiex/pcie.c 				dev_kfree_skb_any(skb);
skb               740 drivers/net/wireless/marvell/mwifiex/pcie.c 				skb = card->tx_buf_list[i];
skb               741 drivers/net/wireless/marvell/mwifiex/pcie.c 				mwifiex_unmap_pci_memory(adapter, skb,
skb               743 drivers/net/wireless/marvell/mwifiex/pcie.c 				dev_kfree_skb_any(skb);
skb               763 drivers/net/wireless/marvell/mwifiex/pcie.c 	struct sk_buff *skb;
skb               770 drivers/net/wireless/marvell/mwifiex/pcie.c 				skb = card->rx_buf_list[i];
skb               771 drivers/net/wireless/marvell/mwifiex/pcie.c 				mwifiex_unmap_pci_memory(adapter, skb,
skb               773 drivers/net/wireless/marvell/mwifiex/pcie.c 				dev_kfree_skb_any(skb);
skb               779 drivers/net/wireless/marvell/mwifiex/pcie.c 				skb = card->rx_buf_list[i];
skb               780 drivers/net/wireless/marvell/mwifiex/pcie.c 				mwifiex_unmap_pci_memory(adapter, skb,
skb               782 drivers/net/wireless/marvell/mwifiex/pcie.c 				dev_kfree_skb_any(skb);
skb               799 drivers/net/wireless/marvell/mwifiex/pcie.c 	struct sk_buff *skb;
skb               805 drivers/net/wireless/marvell/mwifiex/pcie.c 			skb = card->evt_buf_list[i];
skb               806 drivers/net/wireless/marvell/mwifiex/pcie.c 			mwifiex_unmap_pci_memory(adapter, skb,
skb               808 drivers/net/wireless/marvell/mwifiex/pcie.c 			dev_kfree_skb_any(skb);
skb              1024 drivers/net/wireless/marvell/mwifiex/pcie.c 	struct sk_buff *skb;
skb              1027 drivers/net/wireless/marvell/mwifiex/pcie.c 	skb = dev_alloc_skb(MWIFIEX_UPLD_SIZE);
skb              1028 drivers/net/wireless/marvell/mwifiex/pcie.c 	if (!skb) {
skb              1033 drivers/net/wireless/marvell/mwifiex/pcie.c 	skb_put(skb, MWIFIEX_UPLD_SIZE);
skb              1034 drivers/net/wireless/marvell/mwifiex/pcie.c 	if (mwifiex_map_pci_memory(adapter, skb, MWIFIEX_UPLD_SIZE,
skb              1036 drivers/net/wireless/marvell/mwifiex/pcie.c 		kfree_skb(skb);
skb              1040 drivers/net/wireless/marvell/mwifiex/pcie.c 	card->cmdrsp_buf = skb;
skb              1149 drivers/net/wireless/marvell/mwifiex/pcie.c 	struct sk_buff *skb;
skb              1179 drivers/net/wireless/marvell/mwifiex/pcie.c 		skb = card->tx_buf_list[wrdoneidx];
skb              1181 drivers/net/wireless/marvell/mwifiex/pcie.c 		if (skb) {
skb              1184 drivers/net/wireless/marvell/mwifiex/pcie.c 				    skb, wrdoneidx);
skb              1185 drivers/net/wireless/marvell/mwifiex/pcie.c 			mwifiex_unmap_pci_memory(adapter, skb,
skb              1191 drivers/net/wireless/marvell/mwifiex/pcie.c 				mwifiex_write_data_complete(adapter, skb, 0,
skb              1194 drivers/net/wireless/marvell/mwifiex/pcie.c 				mwifiex_write_data_complete(adapter, skb, 0, 0);
skb              1245 drivers/net/wireless/marvell/mwifiex/pcie.c mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb,
skb              1256 drivers/net/wireless/marvell/mwifiex/pcie.c 	if (!(skb->data && skb->len)) {
skb              1259 drivers/net/wireless/marvell/mwifiex/pcie.c 			    __func__, skb->data, skb->len);
skb              1274 drivers/net/wireless/marvell/mwifiex/pcie.c 		payload = skb->data;
skb              1275 drivers/net/wireless/marvell/mwifiex/pcie.c 		put_unaligned_le16((u16)skb->len, payload + 0);
skb              1278 drivers/net/wireless/marvell/mwifiex/pcie.c 		if (mwifiex_map_pci_memory(adapter, skb, skb->len,
skb              1283 drivers/net/wireless/marvell/mwifiex/pcie.c 		buf_pa = MWIFIEX_SKB_DMA_ADDR(skb);
skb              1284 drivers/net/wireless/marvell/mwifiex/pcie.c 		card->tx_buf_list[wrindx] = skb;
skb              1290 drivers/net/wireless/marvell/mwifiex/pcie.c 			desc2->len = (u16)skb->len;
skb              1291 drivers/net/wireless/marvell/mwifiex/pcie.c 			desc2->frag_len = (u16)skb->len;
skb              1298 drivers/net/wireless/marvell/mwifiex/pcie.c 			desc->len = (u16)skb->len;
skb              1361 drivers/net/wireless/marvell/mwifiex/pcie.c 	mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE);
skb              1517 drivers/net/wireless/marvell/mwifiex/pcie.c mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
skb              1523 drivers/net/wireless/marvell/mwifiex/pcie.c 	if (!(skb->data && skb->len)) {
skb              1526 drivers/net/wireless/marvell/mwifiex/pcie.c 			    __func__, skb->data, skb->len);
skb              1530 drivers/net/wireless/marvell/mwifiex/pcie.c 	if (mwifiex_map_pci_memory(adapter, skb, skb->len, PCI_DMA_TODEVICE))
skb              1533 drivers/net/wireless/marvell/mwifiex/pcie.c 	buf_pa = MWIFIEX_SKB_DMA_ADDR(skb);
skb              1542 drivers/net/wireless/marvell/mwifiex/pcie.c 		mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE);
skb              1554 drivers/net/wireless/marvell/mwifiex/pcie.c 		mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE);
skb              1559 drivers/net/wireless/marvell/mwifiex/pcie.c 	if (mwifiex_write_reg(adapter, reg->cmd_size, skb->len)) {
skb              1563 drivers/net/wireless/marvell/mwifiex/pcie.c 		mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE);
skb              1572 drivers/net/wireless/marvell/mwifiex/pcie.c 		mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE);
skb              1601 drivers/net/wireless/marvell/mwifiex/pcie.c mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
skb              1607 drivers/net/wireless/marvell/mwifiex/pcie.c 	u8 *payload = (u8 *)skb->data;
skb              1609 drivers/net/wireless/marvell/mwifiex/pcie.c 	if (!(skb->data && skb->len)) {
skb              1612 drivers/net/wireless/marvell/mwifiex/pcie.c 			    __func__, skb->data, skb->len);
skb              1628 drivers/net/wireless/marvell/mwifiex/pcie.c 	put_unaligned_le16((u16)skb->len, &payload[0]);
skb              1631 drivers/net/wireless/marvell/mwifiex/pcie.c 	if (mwifiex_map_pci_memory(adapter, skb, skb->len, PCI_DMA_TODEVICE))
skb              1634 drivers/net/wireless/marvell/mwifiex/pcie.c 	card->cmd_buf = skb;
skb              1639 drivers/net/wireless/marvell/mwifiex/pcie.c 	skb_get(skb);
skb              1723 drivers/net/wireless/marvell/mwifiex/pcie.c 	struct sk_buff *skb = card->cmdrsp_buf;
skb              1731 drivers/net/wireless/marvell/mwifiex/pcie.c 		mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_FROMDEVICE);
skb              1734 drivers/net/wireless/marvell/mwifiex/pcie.c 					    MWIFIEX_SKB_DMA_ADDR(skb),
skb              1746 drivers/net/wireless/marvell/mwifiex/pcie.c 	rx_len = get_unaligned_le16(skb->data);
skb              1747 drivers/net/wireless/marvell/mwifiex/pcie.c 	skb_put(skb, MWIFIEX_UPLD_SIZE - skb->len);
skb              1748 drivers/net/wireless/marvell/mwifiex/pcie.c 	skb_trim(skb, rx_len);
skb              1753 drivers/net/wireless/marvell/mwifiex/pcie.c 						MWIFIEX_SKB_DMA_ADDR(skb),
skb              1765 drivers/net/wireless/marvell/mwifiex/pcie.c 			mwifiex_unmap_pci_memory(adapter, skb,
skb              1767 drivers/net/wireless/marvell/mwifiex/pcie.c 			skb_pull(skb, adapter->intf_hdr_len);
skb              1772 drivers/net/wireless/marvell/mwifiex/pcie.c 			mwifiex_process_sleep_confirm_resp(adapter, skb->data,
skb              1773 drivers/net/wireless/marvell/mwifiex/pcie.c 							   skb->len);
skb              1778 drivers/net/wireless/marvell/mwifiex/pcie.c 		memcpy(adapter->upld_buf, skb->data,
skb              1779 drivers/net/wireless/marvell/mwifiex/pcie.c 		       min_t(u32, MWIFIEX_SIZE_OF_CMD_BUFFER, skb->len));
skb              1780 drivers/net/wireless/marvell/mwifiex/pcie.c 		skb_push(skb, adapter->intf_hdr_len);
skb              1781 drivers/net/wireless/marvell/mwifiex/pcie.c 		if (mwifiex_map_pci_memory(adapter, skb, MWIFIEX_UPLD_SIZE,
skb              1785 drivers/net/wireless/marvell/mwifiex/pcie.c 		skb_pull(skb, adapter->intf_hdr_len);
skb              1786 drivers/net/wireless/marvell/mwifiex/pcie.c 		adapter->curr_cmd->resp_skb = skb;
skb              1816 drivers/net/wireless/marvell/mwifiex/pcie.c 					struct sk_buff *skb)
skb              1820 drivers/net/wireless/marvell/mwifiex/pcie.c 	if (skb) {
skb              1821 drivers/net/wireless/marvell/mwifiex/pcie.c 		card->cmdrsp_buf = skb;
skb              1823 drivers/net/wireless/marvell/mwifiex/pcie.c 		if (mwifiex_map_pci_memory(adapter, skb, MWIFIEX_UPLD_SIZE,
skb              1928 drivers/net/wireless/marvell/mwifiex/pcie.c 				       struct sk_buff *skb)
skb              1937 drivers/net/wireless/marvell/mwifiex/pcie.c 	if (!skb)
skb              1955 drivers/net/wireless/marvell/mwifiex/pcie.c 		skb_push(skb, adapter->intf_hdr_len);
skb              1956 drivers/net/wireless/marvell/mwifiex/pcie.c 		skb_put(skb, MAX_EVENT_SIZE - skb->len);
skb              1957 drivers/net/wireless/marvell/mwifiex/pcie.c 		if (mwifiex_map_pci_memory(adapter, skb,
skb              1961 drivers/net/wireless/marvell/mwifiex/pcie.c 		card->evt_buf_list[rdptr] = skb;
skb              1963 drivers/net/wireless/marvell/mwifiex/pcie.c 		desc->paddr = MWIFIEX_SKB_DMA_ADDR(skb);
skb              1964 drivers/net/wireless/marvell/mwifiex/pcie.c 		desc->len = (u16)skb->len;
skb              1966 drivers/net/wireless/marvell/mwifiex/pcie.c 		skb = NULL;
skb              1970 drivers/net/wireless/marvell/mwifiex/pcie.c 			    rdptr, card->evt_buf_list[rdptr], skb);
skb              2112 drivers/net/wireless/marvell/mwifiex/pcie.c 	struct sk_buff *skb;
skb              2134 drivers/net/wireless/marvell/mwifiex/pcie.c 	skb = dev_alloc_skb(MWIFIEX_UPLD_SIZE);
skb              2135 drivers/net/wireless/marvell/mwifiex/pcie.c 	if (!skb) {
skb              2218 drivers/net/wireless/marvell/mwifiex/pcie.c 			memmove(skb->data, &firmware[offset], txlen);
skb              2221 drivers/net/wireless/marvell/mwifiex/pcie.c 		skb_put(skb, MWIFIEX_UPLD_SIZE - skb->len);
skb              2222 drivers/net/wireless/marvell/mwifiex/pcie.c 		skb_trim(skb, tx_blocks * card->pcie.blksz_fw_dl);
skb              2225 drivers/net/wireless/marvell/mwifiex/pcie.c 		if (mwifiex_pcie_send_boot_cmd(adapter, skb)) {
skb              2240 drivers/net/wireless/marvell/mwifiex/pcie.c 				mwifiex_unmap_pci_memory(adapter, skb,
skb              2252 drivers/net/wireless/marvell/mwifiex/pcie.c 			mwifiex_unmap_pci_memory(adapter, skb,
skb              2258 drivers/net/wireless/marvell/mwifiex/pcie.c 		mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE);
skb              2269 drivers/net/wireless/marvell/mwifiex/pcie.c 	dev_kfree_skb_any(skb);
skb              2554 drivers/net/wireless/marvell/mwifiex/pcie.c 				     struct sk_buff *skb,
skb              2557 drivers/net/wireless/marvell/mwifiex/pcie.c 	if (!skb) {
skb              2564 drivers/net/wireless/marvell/mwifiex/pcie.c 		return mwifiex_pcie_send_data(adapter, skb, tx_param);
skb              2566 drivers/net/wireless/marvell/mwifiex/pcie.c 		return mwifiex_pcie_send_cmd(adapter, skb);
skb              1141 drivers/net/wireless/marvell/mwifiex/sdio.c 				    struct sk_buff *skb)
skb              1149 drivers/net/wireless/marvell/mwifiex/sdio.c 	data = skb->data;
skb              1150 drivers/net/wireless/marvell/mwifiex/sdio.c 	total_pkt_len = skb->len;
skb              1195 drivers/net/wireless/marvell/mwifiex/sdio.c 				    struct sk_buff *skb, u32 upld_typ)
skb              1201 drivers/net/wireless/marvell/mwifiex/sdio.c 	pkt_len = get_unaligned_le16(skb->data);
skb              1204 drivers/net/wireless/marvell/mwifiex/sdio.c 		skb_trim(skb, pkt_len);
skb              1205 drivers/net/wireless/marvell/mwifiex/sdio.c 		skb_pull(skb, adapter->intf_hdr_len);
skb              1212 drivers/net/wireless/marvell/mwifiex/sdio.c 		rx_info = MWIFIEX_SKB_RXCB(skb);
skb              1215 drivers/net/wireless/marvell/mwifiex/sdio.c 			skb_queue_tail(&adapter->rx_data_q, skb);
skb              1219 drivers/net/wireless/marvell/mwifiex/sdio.c 			mwifiex_deaggr_sdio_pkt(adapter, skb);
skb              1220 drivers/net/wireless/marvell/mwifiex/sdio.c 			dev_kfree_skb_any(skb);
skb              1228 drivers/net/wireless/marvell/mwifiex/sdio.c 			skb_queue_tail(&adapter->rx_data_q, skb);
skb              1232 drivers/net/wireless/marvell/mwifiex/sdio.c 			mwifiex_handle_rx_packet(adapter, skb);
skb              1245 drivers/net/wireless/marvell/mwifiex/sdio.c 								   skb->data,
skb              1246 drivers/net/wireless/marvell/mwifiex/sdio.c 								   skb->len);
skb              1248 drivers/net/wireless/marvell/mwifiex/sdio.c 			memcpy(cmd_buf, skb->data,
skb              1250 drivers/net/wireless/marvell/mwifiex/sdio.c 				     skb->len));
skb              1252 drivers/net/wireless/marvell/mwifiex/sdio.c 			dev_kfree_skb_any(skb);
skb              1255 drivers/net/wireless/marvell/mwifiex/sdio.c 			adapter->curr_cmd->resp_skb = skb;
skb              1262 drivers/net/wireless/marvell/mwifiex/sdio.c 		adapter->event_cause = get_unaligned_le32(skb->data);
skb              1264 drivers/net/wireless/marvell/mwifiex/sdio.c 		if ((skb->len > 0) && (skb->len  < MAX_EVENT_SIZE))
skb              1266 drivers/net/wireless/marvell/mwifiex/sdio.c 			       skb->data + MWIFIEX_EVENT_HEADER_LEN,
skb              1267 drivers/net/wireless/marvell/mwifiex/sdio.c 			       skb->len);
skb              1271 drivers/net/wireless/marvell/mwifiex/sdio.c 		adapter->event_skb = skb;
skb              1278 drivers/net/wireless/marvell/mwifiex/sdio.c 		dev_kfree_skb_any(skb);
skb              1303 drivers/net/wireless/marvell/mwifiex/sdio.c 	struct sk_buff *skb = NULL;
skb              1466 drivers/net/wireless/marvell/mwifiex/sdio.c 		skb = mwifiex_alloc_dma_align_buf(rx_len, GFP_KERNEL);
skb              1467 drivers/net/wireless/marvell/mwifiex/sdio.c 		if (!skb) {
skb              1478 drivers/net/wireless/marvell/mwifiex/sdio.c 		skb_put(skb, rx_len);
skb              1481 drivers/net/wireless/marvell/mwifiex/sdio.c 					      skb->data, skb->len,
skb              1489 drivers/net/wireless/marvell/mwifiex/sdio.c 			dev_kfree_skb_any(skb);
skb              1493 drivers/net/wireless/marvell/mwifiex/sdio.c 		mwifiex_decode_rx_packet(adapter, skb, pkt_type);
skb              1507 drivers/net/wireless/marvell/mwifiex/sdio.c 	if (f_do_rx_cur && skb)
skb              1509 drivers/net/wireless/marvell/mwifiex/sdio.c 		dev_kfree_skb_any(skb);
skb              1535 drivers/net/wireless/marvell/mwifiex/sdio.c 	struct sk_buff *skb;
skb              1571 drivers/net/wireless/marvell/mwifiex/sdio.c 		skb = mwifiex_alloc_dma_align_buf(rx_len, GFP_KERNEL);
skb              1572 drivers/net/wireless/marvell/mwifiex/sdio.c 		if (!skb)
skb              1575 drivers/net/wireless/marvell/mwifiex/sdio.c 		skb_put(skb, rx_len);
skb              1577 drivers/net/wireless/marvell/mwifiex/sdio.c 		if (mwifiex_sdio_card_to_host(adapter, &pkt_type, skb->data,
skb              1578 drivers/net/wireless/marvell/mwifiex/sdio.c 					      skb->len, adapter->ioport |
skb              1582 drivers/net/wireless/marvell/mwifiex/sdio.c 			dev_kfree_skb_any(skb);
skb              1592 drivers/net/wireless/marvell/mwifiex/sdio.c 		mwifiex_decode_rx_packet(adapter, skb, pkt_type);
skb              1887 drivers/net/wireless/marvell/mwifiex/sdio.c 				     u8 type, struct sk_buff *skb,
skb              1895 drivers/net/wireless/marvell/mwifiex/sdio.c 	u8 *payload = (u8 *)skb->data;
skb              1896 drivers/net/wireless/marvell/mwifiex/sdio.c 	u32 pkt_len = skb->len;
skb               724 drivers/net/wireless/marvell/mwifiex/sdio.h 					       struct sk_buff *skb)
skb               726 drivers/net/wireless/marvell/mwifiex/sdio.h 	dev_kfree_skb_any(skb);
skb               734 drivers/net/wireless/marvell/mwifiex/sdio.h 					      struct sk_buff *skb)
skb               736 drivers/net/wireless/marvell/mwifiex/sdio.h 	dev_kfree_skb_any(skb);
skb                36 drivers/net/wireless/marvell/mwifiex/sta_rx.c 			       struct sk_buff *skb)
skb                43 drivers/net/wireless/marvell/mwifiex/sta_rx.c 	eth = (struct ethhdr *)skb->data;
skb                46 drivers/net/wireless/marvell/mwifiex/sta_rx.c 		arp = (void *)(skb->data + sizeof(struct ethhdr));
skb                54 drivers/net/wireless/marvell/mwifiex/sta_rx.c 		ipv6 = (void *)(skb->data + sizeof(struct ethhdr));
skb                55 drivers/net/wireless/marvell/mwifiex/sta_rx.c 		icmpv6 = (void *)(skb->data + sizeof(struct ethhdr) +
skb                84 drivers/net/wireless/marvell/mwifiex/sta_rx.c 			      struct sk_buff *skb)
skb                95 drivers/net/wireless/marvell/mwifiex/sta_rx.c 	local_rx_pd = (struct rxpd *) (skb->data);
skb               140 drivers/net/wireless/marvell/mwifiex/sta_rx.c 	skb_pull(skb, hdr_chop);
skb               143 drivers/net/wireless/marvell/mwifiex/sta_rx.c 	    mwifiex_discard_gratuitous_arp(priv, skb)) {
skb               145 drivers/net/wireless/marvell/mwifiex/sta_rx.c 		dev_kfree_skb_any(skb);
skb               170 drivers/net/wireless/marvell/mwifiex/sta_rx.c 	ret = mwifiex_recv_packet(priv, skb);
skb               191 drivers/net/wireless/marvell/mwifiex/sta_rx.c 				  struct sk_buff *skb)
skb               201 drivers/net/wireless/marvell/mwifiex/sta_rx.c 	local_rx_pd = (struct rxpd *) (skb->data);
skb               209 drivers/net/wireless/marvell/mwifiex/sta_rx.c 	if ((rx_pkt_offset + rx_pkt_length) > (u16) skb->len) {
skb               212 drivers/net/wireless/marvell/mwifiex/sta_rx.c 			    skb->len, rx_pkt_offset, rx_pkt_length);
skb               214 drivers/net/wireless/marvell/mwifiex/sta_rx.c 		dev_kfree_skb_any(skb);
skb               219 drivers/net/wireless/marvell/mwifiex/sta_rx.c 		ret = mwifiex_process_mgmt_packet(priv, skb);
skb               222 drivers/net/wireless/marvell/mwifiex/sta_rx.c 		dev_kfree_skb_any(skb);
skb               234 drivers/net/wireless/marvell/mwifiex/sta_rx.c 		mwifiex_process_rx_packet(priv, skb);
skb               262 drivers/net/wireless/marvell/mwifiex/sta_rx.c 					 ta, (u8) rx_pkt_type, skb);
skb               265 drivers/net/wireless/marvell/mwifiex/sta_rx.c 		dev_kfree_skb_any(skb);
skb                45 drivers/net/wireless/marvell/mwifiex/sta_tx.c 				struct sk_buff *skb)
skb                49 drivers/net/wireless/marvell/mwifiex/sta_tx.c 	struct mwifiex_txinfo *tx_info = MWIFIEX_SKB_TXCB(skb);
skb                54 drivers/net/wireless/marvell/mwifiex/sta_tx.c 	if (!skb->len) {
skb                56 drivers/net/wireless/marvell/mwifiex/sta_tx.c 			    "Tx: bad packet length: %d\n", skb->len);
skb                58 drivers/net/wireless/marvell/mwifiex/sta_tx.c 		return skb->data;
skb                61 drivers/net/wireless/marvell/mwifiex/sta_tx.c 	BUG_ON(skb_headroom(skb) < MWIFIEX_MIN_DATA_HEADER_LEN);
skb                63 drivers/net/wireless/marvell/mwifiex/sta_tx.c 	pkt_type = mwifiex_is_skb_mgmt_frame(skb) ? PKT_TYPE_MGMT : 0;
skb                65 drivers/net/wireless/marvell/mwifiex/sta_tx.c 	pad = ((void *)skb->data - (sizeof(*local_tx_pd) + hroom)-
skb                67 drivers/net/wireless/marvell/mwifiex/sta_tx.c 	skb_push(skb, sizeof(*local_tx_pd) + pad);
skb                69 drivers/net/wireless/marvell/mwifiex/sta_tx.c 	local_tx_pd = (struct txpd *) skb->data;
skb                73 drivers/net/wireless/marvell/mwifiex/sta_tx.c 	local_tx_pd->tx_pkt_length = cpu_to_le16((u16)(skb->len -
skb                77 drivers/net/wireless/marvell/mwifiex/sta_tx.c 	local_tx_pd->priority = (u8) skb->priority;
skb                79 drivers/net/wireless/marvell/mwifiex/sta_tx.c 				mwifiex_wmm_compute_drv_pkt_delay(priv, skb);
skb               119 drivers/net/wireless/marvell/mwifiex/sta_tx.c 	skb_push(skb, hroom);
skb               125 drivers/net/wireless/marvell/mwifiex/sta_tx.c 	return skb->data;
skb               142 drivers/net/wireless/marvell/mwifiex/sta_tx.c 	struct sk_buff *skb;
skb               159 drivers/net/wireless/marvell/mwifiex/sta_tx.c 	skb = dev_alloc_skb(data_len);
skb               160 drivers/net/wireless/marvell/mwifiex/sta_tx.c 	if (!skb)
skb               163 drivers/net/wireless/marvell/mwifiex/sta_tx.c 	tx_info = MWIFIEX_SKB_TXCB(skb);
skb               169 drivers/net/wireless/marvell/mwifiex/sta_tx.c 	skb_reserve(skb, sizeof(struct txpd) + adapter->intf_hdr_len);
skb               170 drivers/net/wireless/marvell/mwifiex/sta_tx.c 	skb_push(skb, sizeof(struct txpd));
skb               172 drivers/net/wireless/marvell/mwifiex/sta_tx.c 	local_tx_pd = (struct txpd *) skb->data;
skb               180 drivers/net/wireless/marvell/mwifiex/sta_tx.c 	skb_push(skb, adapter->intf_hdr_len);
skb               183 drivers/net/wireless/marvell/mwifiex/sta_tx.c 						   skb, NULL);
skb               187 drivers/net/wireless/marvell/mwifiex/sta_tx.c 						   skb, &tx_param);
skb               191 drivers/net/wireless/marvell/mwifiex/sta_tx.c 		dev_kfree_skb_any(skb);
skb               198 drivers/net/wireless/marvell/mwifiex/sta_tx.c 		dev_kfree_skb_any(skb);
skb               205 drivers/net/wireless/marvell/mwifiex/sta_tx.c 		dev_kfree_skb_any(skb);
skb                34 drivers/net/wireless/marvell/mwifiex/tdls.c 	struct sk_buff *skb, *tmp;
skb                42 drivers/net/wireless/marvell/mwifiex/tdls.c 	skb_queue_walk_safe(&priv->tdls_txq, skb, tmp) {
skb                43 drivers/net/wireless/marvell/mwifiex/tdls.c 		if (!ether_addr_equal(mac, skb->data))
skb                46 drivers/net/wireless/marvell/mwifiex/tdls.c 		__skb_unlink(skb, &priv->tdls_txq);
skb                47 drivers/net/wireless/marvell/mwifiex/tdls.c 		tx_info = MWIFIEX_SKB_TXCB(skb);
skb                48 drivers/net/wireless/marvell/mwifiex/tdls.c 		tid = skb->priority;
skb                63 drivers/net/wireless/marvell/mwifiex/tdls.c 			mwifiex_write_data_complete(priv->adapter, skb, 0, -1);
skb                67 drivers/net/wireless/marvell/mwifiex/tdls.c 		skb_queue_tail(&ra_list->skb_head, skb);
skb                89 drivers/net/wireless/marvell/mwifiex/tdls.c 	struct sk_buff *skb, *tmp;
skb                99 drivers/net/wireless/marvell/mwifiex/tdls.c 				skb_queue_walk_safe(&ra_list->skb_head, skb,
skb               101 drivers/net/wireless/marvell/mwifiex/tdls.c 					if (!ether_addr_equal(mac, skb->data))
skb               103 drivers/net/wireless/marvell/mwifiex/tdls.c 					__skb_unlink(skb, &ra_list->skb_head);
skb               106 drivers/net/wireless/marvell/mwifiex/tdls.c 					skb_queue_tail(&priv->tdls_txq, skb);
skb               119 drivers/net/wireless/marvell/mwifiex/tdls.c 			     struct sk_buff *skb)
skb               129 drivers/net/wireless/marvell/mwifiex/tdls.c 	if (skb_tailroom(skb) < rates_size + 4) {
skb               135 drivers/net/wireless/marvell/mwifiex/tdls.c 	pos = skb_put(skb, supp_rates_size + 2);
skb               142 drivers/net/wireless/marvell/mwifiex/tdls.c 		pos = skb_put(skb, ext_rates_size + 2);
skb               153 drivers/net/wireless/marvell/mwifiex/tdls.c 				struct sk_buff *skb)
skb               159 drivers/net/wireless/marvell/mwifiex/tdls.c 	pos = skb_put(skb, 4);
skb               168 drivers/net/wireless/marvell/mwifiex/tdls.c 				      struct sk_buff *skb)
skb               173 drivers/net/wireless/marvell/mwifiex/tdls.c 	pos = skb_put(skb, sizeof(struct ieee80211_vht_cap) + 2);
skb               187 drivers/net/wireless/marvell/mwifiex/tdls.c 			 u8 vht_enabled, struct sk_buff *skb)
skb               208 drivers/net/wireless/marvell/mwifiex/tdls.c 	pos = skb_put(skb, sizeof(struct ieee80211_ht_operation) + 2);
skb               234 drivers/net/wireless/marvell/mwifiex/tdls.c 				     const u8 *mac, struct sk_buff *skb)
skb               273 drivers/net/wireless/marvell/mwifiex/tdls.c 	pos = skb_put(skb, sizeof(struct ieee80211_vht_operation) + 2);
skb               356 drivers/net/wireless/marvell/mwifiex/tdls.c 				       struct sk_buff *skb)
skb               360 drivers/net/wireless/marvell/mwifiex/tdls.c 	extcap = skb_put(skb, sizeof(struct ieee_types_extcap));
skb               371 drivers/net/wireless/marvell/mwifiex/tdls.c static void mwifiex_tdls_add_qos_capab(struct sk_buff *skb)
skb               373 drivers/net/wireless/marvell/mwifiex/tdls.c 	u8 *pos = skb_put(skb, 3);
skb               381 drivers/net/wireless/marvell/mwifiex/tdls.c mwifiex_tdls_add_wmm_param_ie(struct mwifiex_private *priv, struct sk_buff *skb)
skb               389 drivers/net/wireless/marvell/mwifiex/tdls.c 	wmm = skb_put_zero(skb, sizeof(*wmm));
skb               409 drivers/net/wireless/marvell/mwifiex/tdls.c mwifiex_add_wmm_info_ie(struct mwifiex_private *priv, struct sk_buff *skb,
skb               414 drivers/net/wireless/marvell/mwifiex/tdls.c 	buf = skb_put(skb,
skb               428 drivers/net/wireless/marvell/mwifiex/tdls.c static void mwifiex_tdls_add_bss_co_2040(struct sk_buff *skb)
skb               432 drivers/net/wireless/marvell/mwifiex/tdls.c 	bssco = skb_put(skb, sizeof(struct ieee_types_bss_co_2040));
skb               439 drivers/net/wireless/marvell/mwifiex/tdls.c static void mwifiex_tdls_add_supported_chan(struct sk_buff *skb)
skb               444 drivers/net/wireless/marvell/mwifiex/tdls.c 	supp_chan = skb_put(skb,
skb               451 drivers/net/wireless/marvell/mwifiex/tdls.c static void mwifiex_tdls_add_oper_class(struct sk_buff *skb)
skb               456 drivers/net/wireless/marvell/mwifiex/tdls.c 	reg_class = skb_put(skb,
skb               466 drivers/net/wireless/marvell/mwifiex/tdls.c 					u16 status_code, struct sk_buff *skb)
skb               476 drivers/net/wireless/marvell/mwifiex/tdls.c 	tf = skb_put(skb, offsetof(struct ieee80211_tdls_data, u));
skb               486 drivers/net/wireless/marvell/mwifiex/tdls.c 		skb_put(skb, sizeof(tf->u.setup_req));
skb               489 drivers/net/wireless/marvell/mwifiex/tdls.c 		ret = mwifiex_tdls_append_rates_ie(priv, skb);
skb               491 drivers/net/wireless/marvell/mwifiex/tdls.c 			dev_kfree_skb_any(skb);
skb               495 drivers/net/wireless/marvell/mwifiex/tdls.c 		pos = skb_put(skb, sizeof(struct ieee80211_ht_cap) + 2);
skb               502 drivers/net/wireless/marvell/mwifiex/tdls.c 			dev_kfree_skb_any(skb);
skb               507 drivers/net/wireless/marvell/mwifiex/tdls.c 			ret = mwifiex_tdls_add_vht_capab(priv, skb);
skb               509 drivers/net/wireless/marvell/mwifiex/tdls.c 				dev_kfree_skb_any(skb);
skb               512 drivers/net/wireless/marvell/mwifiex/tdls.c 			mwifiex_tdls_add_aid(priv, skb);
skb               515 drivers/net/wireless/marvell/mwifiex/tdls.c 		mwifiex_tdls_add_ext_capab(priv, skb);
skb               516 drivers/net/wireless/marvell/mwifiex/tdls.c 		mwifiex_tdls_add_bss_co_2040(skb);
skb               517 drivers/net/wireless/marvell/mwifiex/tdls.c 		mwifiex_tdls_add_supported_chan(skb);
skb               518 drivers/net/wireless/marvell/mwifiex/tdls.c 		mwifiex_tdls_add_oper_class(skb);
skb               519 drivers/net/wireless/marvell/mwifiex/tdls.c 		mwifiex_add_wmm_info_ie(priv, skb, 0);
skb               525 drivers/net/wireless/marvell/mwifiex/tdls.c 		skb_put(skb, sizeof(tf->u.setup_resp));
skb               529 drivers/net/wireless/marvell/mwifiex/tdls.c 		ret = mwifiex_tdls_append_rates_ie(priv, skb);
skb               531 drivers/net/wireless/marvell/mwifiex/tdls.c 			dev_kfree_skb_any(skb);
skb               535 drivers/net/wireless/marvell/mwifiex/tdls.c 		pos = skb_put(skb, sizeof(struct ieee80211_ht_cap) + 2);
skb               542 drivers/net/wireless/marvell/mwifiex/tdls.c 			dev_kfree_skb_any(skb);
skb               547 drivers/net/wireless/marvell/mwifiex/tdls.c 			ret = mwifiex_tdls_add_vht_capab(priv, skb);
skb               549 drivers/net/wireless/marvell/mwifiex/tdls.c 				dev_kfree_skb_any(skb);
skb               552 drivers/net/wireless/marvell/mwifiex/tdls.c 			mwifiex_tdls_add_aid(priv, skb);
skb               555 drivers/net/wireless/marvell/mwifiex/tdls.c 		mwifiex_tdls_add_ext_capab(priv, skb);
skb               556 drivers/net/wireless/marvell/mwifiex/tdls.c 		mwifiex_tdls_add_bss_co_2040(skb);
skb               557 drivers/net/wireless/marvell/mwifiex/tdls.c 		mwifiex_tdls_add_supported_chan(skb);
skb               558 drivers/net/wireless/marvell/mwifiex/tdls.c 		mwifiex_tdls_add_oper_class(skb);
skb               559 drivers/net/wireless/marvell/mwifiex/tdls.c 		mwifiex_add_wmm_info_ie(priv, skb, 0);
skb               565 drivers/net/wireless/marvell/mwifiex/tdls.c 		skb_put(skb, sizeof(tf->u.setup_cfm));
skb               569 drivers/net/wireless/marvell/mwifiex/tdls.c 		mwifiex_tdls_add_wmm_param_ie(priv, skb);
skb               571 drivers/net/wireless/marvell/mwifiex/tdls.c 			ret = mwifiex_tdls_add_vht_oper(priv, peer, skb);
skb               573 drivers/net/wireless/marvell/mwifiex/tdls.c 				dev_kfree_skb_any(skb);
skb               576 drivers/net/wireless/marvell/mwifiex/tdls.c 			ret = mwifiex_tdls_add_ht_oper(priv, peer, 1, skb);
skb               578 drivers/net/wireless/marvell/mwifiex/tdls.c 				dev_kfree_skb_any(skb);
skb               582 drivers/net/wireless/marvell/mwifiex/tdls.c 			ret = mwifiex_tdls_add_ht_oper(priv, peer, 0, skb);
skb               584 drivers/net/wireless/marvell/mwifiex/tdls.c 				dev_kfree_skb_any(skb);
skb               593 drivers/net/wireless/marvell/mwifiex/tdls.c 		skb_put(skb, sizeof(tf->u.teardown));
skb               600 drivers/net/wireless/marvell/mwifiex/tdls.c 		skb_put(skb, sizeof(tf->u.discover_req));
skb               612 drivers/net/wireless/marvell/mwifiex/tdls.c mwifiex_tdls_add_link_ie(struct sk_buff *skb, const u8 *src_addr,
skb               617 drivers/net/wireless/marvell/mwifiex/tdls.c 	lnkid = skb_put(skb, sizeof(struct ieee80211_tdls_lnkie));
skb               632 drivers/net/wireless/marvell/mwifiex/tdls.c 	struct sk_buff *skb;
skb               659 drivers/net/wireless/marvell/mwifiex/tdls.c 	skb = dev_alloc_skb(skb_len);
skb               660 drivers/net/wireless/marvell/mwifiex/tdls.c 	if (!skb) {
skb               665 drivers/net/wireless/marvell/mwifiex/tdls.c 	skb_reserve(skb, MWIFIEX_MIN_DATA_HEADER_LEN);
skb               674 drivers/net/wireless/marvell/mwifiex/tdls.c 						   skb);
skb               676 drivers/net/wireless/marvell/mwifiex/tdls.c 			dev_kfree_skb_any(skb);
skb               680 drivers/net/wireless/marvell/mwifiex/tdls.c 			skb_put_data(skb, extra_ies, extra_ies_len);
skb               681 drivers/net/wireless/marvell/mwifiex/tdls.c 		mwifiex_tdls_add_link_ie(skb, priv->curr_addr, peer,
skb               687 drivers/net/wireless/marvell/mwifiex/tdls.c 						   skb);
skb               689 drivers/net/wireless/marvell/mwifiex/tdls.c 			dev_kfree_skb_any(skb);
skb               693 drivers/net/wireless/marvell/mwifiex/tdls.c 			skb_put_data(skb, extra_ies, extra_ies_len);
skb               694 drivers/net/wireless/marvell/mwifiex/tdls.c 		mwifiex_tdls_add_link_ie(skb, peer, priv->curr_addr,
skb               702 drivers/net/wireless/marvell/mwifiex/tdls.c 		skb->priority = MWIFIEX_PRIO_BK;
skb               705 drivers/net/wireless/marvell/mwifiex/tdls.c 		skb->priority = MWIFIEX_PRIO_VI;
skb               709 drivers/net/wireless/marvell/mwifiex/tdls.c 	tx_info = MWIFIEX_SKB_TXCB(skb);
skb               714 drivers/net/wireless/marvell/mwifiex/tdls.c 	__net_timestamp(skb);
skb               715 drivers/net/wireless/marvell/mwifiex/tdls.c 	mwifiex_queue_tx_pkt(priv, skb);
skb               731 drivers/net/wireless/marvell/mwifiex/tdls.c 				    u16 status_code, struct sk_buff *skb)
skb               741 drivers/net/wireless/marvell/mwifiex/tdls.c 	mgmt = skb_put(skb, offsetof(struct ieee80211_mgmt, u));
skb               751 drivers/net/wireless/marvell/mwifiex/tdls.c 	pos = skb_put(skb, ETH_ALEN);
skb               755 drivers/net/wireless/marvell/mwifiex/tdls.c 		skb_put(skb, sizeof(mgmt->u.action.u.tdls_discover_resp) + 1);
skb               769 drivers/net/wireless/marvell/mwifiex/tdls.c 		ret = mwifiex_tdls_append_rates_ie(priv, skb);
skb               771 drivers/net/wireless/marvell/mwifiex/tdls.c 			dev_kfree_skb_any(skb);
skb               775 drivers/net/wireless/marvell/mwifiex/tdls.c 		pos = skb_put(skb, sizeof(struct ieee80211_ht_cap) + 2);
skb               782 drivers/net/wireless/marvell/mwifiex/tdls.c 			dev_kfree_skb_any(skb);
skb               787 drivers/net/wireless/marvell/mwifiex/tdls.c 			ret = mwifiex_tdls_add_vht_capab(priv, skb);
skb               789 drivers/net/wireless/marvell/mwifiex/tdls.c 				dev_kfree_skb_any(skb);
skb               792 drivers/net/wireless/marvell/mwifiex/tdls.c 			mwifiex_tdls_add_aid(priv, skb);
skb               795 drivers/net/wireless/marvell/mwifiex/tdls.c 		mwifiex_tdls_add_ext_capab(priv, skb);
skb               796 drivers/net/wireless/marvell/mwifiex/tdls.c 		mwifiex_tdls_add_bss_co_2040(skb);
skb               797 drivers/net/wireless/marvell/mwifiex/tdls.c 		mwifiex_tdls_add_supported_chan(skb);
skb               798 drivers/net/wireless/marvell/mwifiex/tdls.c 		mwifiex_tdls_add_qos_capab(skb);
skb               799 drivers/net/wireless/marvell/mwifiex/tdls.c 		mwifiex_tdls_add_oper_class(skb);
skb               814 drivers/net/wireless/marvell/mwifiex/tdls.c 	struct sk_buff *skb;
skb               839 drivers/net/wireless/marvell/mwifiex/tdls.c 	skb = dev_alloc_skb(skb_len);
skb               840 drivers/net/wireless/marvell/mwifiex/tdls.c 	if (!skb) {
skb               846 drivers/net/wireless/marvell/mwifiex/tdls.c 	skb_reserve(skb, MWIFIEX_MIN_DATA_HEADER_LEN);
skb               850 drivers/net/wireless/marvell/mwifiex/tdls.c 	pos = skb_put_zero(skb,
skb               857 drivers/net/wireless/marvell/mwifiex/tdls.c 						skb)) {
skb               858 drivers/net/wireless/marvell/mwifiex/tdls.c 		dev_kfree_skb_any(skb);
skb               863 drivers/net/wireless/marvell/mwifiex/tdls.c 		skb_put_data(skb, extra_ies, extra_ies_len);
skb               867 drivers/net/wireless/marvell/mwifiex/tdls.c 	mwifiex_tdls_add_link_ie(skb, peer, priv->curr_addr,
skb               870 drivers/net/wireless/marvell/mwifiex/tdls.c 	skb->priority = MWIFIEX_PRIO_VI;
skb               872 drivers/net/wireless/marvell/mwifiex/tdls.c 	tx_info = MWIFIEX_SKB_TXCB(skb);
skb               878 drivers/net/wireless/marvell/mwifiex/tdls.c 	pkt_len = skb->len - MWIFIEX_MGMT_FRAME_HEADER_SIZE - sizeof(pkt_len);
skb               879 drivers/net/wireless/marvell/mwifiex/tdls.c 	memcpy(skb->data + MWIFIEX_MGMT_FRAME_HEADER_SIZE, &pkt_len,
skb               881 drivers/net/wireless/marvell/mwifiex/tdls.c 	__net_timestamp(skb);
skb               882 drivers/net/wireless/marvell/mwifiex/tdls.c 	mwifiex_queue_tx_pkt(priv, skb);
skb              1282 drivers/net/wireless/marvell/mwifiex/tdls.c int mwifiex_tdls_check_tx(struct mwifiex_private *priv, struct sk_buff *skb)
skb              1287 drivers/net/wireless/marvell/mwifiex/tdls.c 	ether_addr_copy(mac, skb->data);
skb                37 drivers/net/wireless/marvell/mwifiex/txrx.c 			     struct sk_buff *skb)
skb                42 drivers/net/wireless/marvell/mwifiex/txrx.c 	struct mwifiex_rxinfo *rx_info = MWIFIEX_SKB_RXCB(skb);
skb                45 drivers/net/wireless/marvell/mwifiex/txrx.c 	local_rx_pd = (struct rxpd *) (skb->data);
skb                55 drivers/net/wireless/marvell/mwifiex/txrx.c 		dev_kfree_skb_any(skb);
skb                59 drivers/net/wireless/marvell/mwifiex/txrx.c 	mwifiex_dbg_dump(adapter, DAT_D, "rx pkt:", skb->data,
skb                60 drivers/net/wireless/marvell/mwifiex/txrx.c 			 min_t(size_t, skb->len, DEBUG_DUMP_DATA_MAX_LEN));
skb                67 drivers/net/wireless/marvell/mwifiex/txrx.c 		ret = mwifiex_process_uap_rx_packet(priv, skb);
skb                69 drivers/net/wireless/marvell/mwifiex/txrx.c 		ret = mwifiex_process_sta_rx_packet(priv, skb);
skb                84 drivers/net/wireless/marvell/mwifiex/txrx.c int mwifiex_process_tx(struct mwifiex_private *priv, struct sk_buff *skb,
skb                92 drivers/net/wireless/marvell/mwifiex/txrx.c 	struct ethhdr *hdr = (void *)skb->data;
skb                99 drivers/net/wireless/marvell/mwifiex/txrx.c 			dest_node->stats.tx_bytes += skb->len;
skb               103 drivers/net/wireless/marvell/mwifiex/txrx.c 		head_ptr = mwifiex_process_uap_txpd(priv, skb);
skb               105 drivers/net/wireless/marvell/mwifiex/txrx.c 		head_ptr = mwifiex_process_sta_txpd(priv, skb);
skb               109 drivers/net/wireless/marvell/mwifiex/txrx.c 		skb_queue_tail(&adapter->tx_data_q, skb);
skb               120 drivers/net/wireless/marvell/mwifiex/txrx.c 							   skb, tx_param);
skb               124 drivers/net/wireless/marvell/mwifiex/txrx.c 							   skb, tx_param);
skb               127 drivers/net/wireless/marvell/mwifiex/txrx.c 	mwifiex_dbg_dump(adapter, DAT_D, "tx pkt:", skb->data,
skb               128 drivers/net/wireless/marvell/mwifiex/txrx.c 			 min_t(size_t, skb->len, DEBUG_DUMP_DATA_MAX_LEN));
skb               148 drivers/net/wireless/marvell/mwifiex/txrx.c 		mwifiex_write_data_complete(adapter, skb, 0, ret);
skb               153 drivers/net/wireless/marvell/mwifiex/txrx.c 		mwifiex_write_data_complete(adapter, skb, 0, ret);
skb               163 drivers/net/wireless/marvell/mwifiex/txrx.c 				struct sk_buff *skb,
skb               167 drivers/net/wireless/marvell/mwifiex/txrx.c 	u8 *head_ptr = skb->data;
skb               172 drivers/net/wireless/marvell/mwifiex/txrx.c 	tx_info = MWIFIEX_SKB_TXCB(skb);
skb               179 drivers/net/wireless/marvell/mwifiex/txrx.c 		mwifiex_write_data_complete(adapter, skb, 0, 0);
skb               188 drivers/net/wireless/marvell/mwifiex/txrx.c 						   skb, tx_param);
skb               192 drivers/net/wireless/marvell/mwifiex/txrx.c 						   skb, tx_param);
skb               206 drivers/net/wireless/marvell/mwifiex/txrx.c 		skb_queue_head(&adapter->tx_data_q, skb);
skb               217 drivers/net/wireless/marvell/mwifiex/txrx.c 		mwifiex_write_data_complete(adapter, skb, 0, ret);
skb               222 drivers/net/wireless/marvell/mwifiex/txrx.c 		mwifiex_write_data_complete(adapter, skb, 0, ret);
skb               233 drivers/net/wireless/marvell/mwifiex/txrx.c 	struct sk_buff *skb, *skb_next;
skb               237 drivers/net/wireless/marvell/mwifiex/txrx.c 	skb = skb_dequeue(&adapter->tx_data_q);
skb               238 drivers/net/wireless/marvell/mwifiex/txrx.c 	if (!skb)
skb               241 drivers/net/wireless/marvell/mwifiex/txrx.c 	tx_info = MWIFIEX_SKB_TXCB(skb);
skb               256 drivers/net/wireless/marvell/mwifiex/txrx.c 	return mwifiex_host_to_card(adapter, skb, &tx_param);
skb               278 drivers/net/wireless/marvell/mwifiex/txrx.c 				struct sk_buff *skb, int aggr, int status)
skb               285 drivers/net/wireless/marvell/mwifiex/txrx.c 	if (!skb)
skb               288 drivers/net/wireless/marvell/mwifiex/txrx.c 	tx_info = MWIFIEX_SKB_TXCB(skb);
skb               317 drivers/net/wireless/marvell/mwifiex/txrx.c 	index = mwifiex_1d_to_wmm_queue[skb->priority];
skb               326 drivers/net/wireless/marvell/mwifiex/txrx.c 	dev_kfree_skb_any(skb);
skb                38 drivers/net/wireless/marvell/mwifiex/uap_txrx.c 	struct sk_buff *skb, *tmp;
skb                47 drivers/net/wireless/marvell/mwifiex/uap_txrx.c 		skb_queue_walk_safe(&ra_list->skb_head, skb, tmp) {
skb                48 drivers/net/wireless/marvell/mwifiex/uap_txrx.c 			tx_info = MWIFIEX_SKB_TXCB(skb);
skb                50 drivers/net/wireless/marvell/mwifiex/uap_txrx.c 				__skb_unlink(skb, &ra_list->skb_head);
skb                51 drivers/net/wireless/marvell/mwifiex/uap_txrx.c 				mwifiex_write_data_complete(adapter, skb, 0,
skb                94 drivers/net/wireless/marvell/mwifiex/uap_txrx.c 					 struct sk_buff *skb)
skb               106 drivers/net/wireless/marvell/mwifiex/uap_txrx.c 	uap_rx_pd = (struct uap_rxpd *)(skb->data);
skb               113 drivers/net/wireless/marvell/mwifiex/uap_txrx.c 		kfree_skb(skb);
skb               158 drivers/net/wireless/marvell/mwifiex/uap_txrx.c 	skb_pull(skb, hdr_chop);
skb               160 drivers/net/wireless/marvell/mwifiex/uap_txrx.c 	if (skb_headroom(skb) < MWIFIEX_MIN_DATA_HEADER_LEN) {
skb               163 drivers/net/wireless/marvell/mwifiex/uap_txrx.c 			    skb_headroom(skb));
skb               166 drivers/net/wireless/marvell/mwifiex/uap_txrx.c 			skb_realloc_headroom(skb, MWIFIEX_MIN_DATA_HEADER_LEN);
skb               170 drivers/net/wireless/marvell/mwifiex/uap_txrx.c 			kfree_skb(skb);
skb               175 drivers/net/wireless/marvell/mwifiex/uap_txrx.c 		kfree_skb(skb);
skb               176 drivers/net/wireless/marvell/mwifiex/uap_txrx.c 		skb = new_skb;
skb               179 drivers/net/wireless/marvell/mwifiex/uap_txrx.c 			    skb_headroom(skb));
skb               182 drivers/net/wireless/marvell/mwifiex/uap_txrx.c 	tx_info = MWIFIEX_SKB_TXCB(skb);
skb               191 drivers/net/wireless/marvell/mwifiex/uap_txrx.c 		src_node->stats.rx_bytes += skb->len;
skb               201 drivers/net/wireless/marvell/mwifiex/uap_txrx.c 		priv->stats.rx_bytes += skb->len;
skb               207 drivers/net/wireless/marvell/mwifiex/uap_txrx.c 		tx_info->pkt_len = skb->len;
skb               210 drivers/net/wireless/marvell/mwifiex/uap_txrx.c 	__net_timestamp(skb);
skb               212 drivers/net/wireless/marvell/mwifiex/uap_txrx.c 	index = mwifiex_1d_to_wmm_queue[skb->priority];
skb               214 drivers/net/wireless/marvell/mwifiex/uap_txrx.c 	mwifiex_wmm_add_buf_txqueue(priv, skb);
skb               235 drivers/net/wireless/marvell/mwifiex/uap_txrx.c 				  struct sk_buff *skb)
skb               243 drivers/net/wireless/marvell/mwifiex/uap_txrx.c 	uap_rx_pd = (struct uap_rxpd *)(skb->data);
skb               250 drivers/net/wireless/marvell/mwifiex/uap_txrx.c 		dev_kfree_skb_any(skb);
skb               257 drivers/net/wireless/marvell/mwifiex/uap_txrx.c 		skb_uap = skb_copy(skb, GFP_ATOMIC);
skb               262 drivers/net/wireless/marvell/mwifiex/uap_txrx.c 			mwifiex_uap_queue_bridged_pkt(priv, skb);
skb               268 drivers/net/wireless/marvell/mwifiex/uap_txrx.c 	return mwifiex_process_rx_packet(priv, skb);
skb               272 drivers/net/wireless/marvell/mwifiex/uap_txrx.c 			    struct sk_buff *skb)
skb               280 drivers/net/wireless/marvell/mwifiex/uap_txrx.c 	if (!skb)
skb               283 drivers/net/wireless/marvell/mwifiex/uap_txrx.c 	p_ethhdr = (void *)skb->data;
skb               287 drivers/net/wireless/marvell/mwifiex/uap_txrx.c 		src_node->stats.rx_bytes += skb->len;
skb               293 drivers/net/wireless/marvell/mwifiex/uap_txrx.c 		if (skb_headroom(skb) < MWIFIEX_MIN_DATA_HEADER_LEN)
skb               295 drivers/net/wireless/marvell/mwifiex/uap_txrx.c 			skb_realloc_headroom(skb, MWIFIEX_MIN_DATA_HEADER_LEN);
skb               297 drivers/net/wireless/marvell/mwifiex/uap_txrx.c 			skb_uap = skb_copy(skb, GFP_ATOMIC);
skb               326 drivers/net/wireless/marvell/mwifiex/uap_txrx.c 	skb->dev = priv->netdev;
skb               327 drivers/net/wireless/marvell/mwifiex/uap_txrx.c 	skb->protocol = eth_type_trans(skb, priv->netdev);
skb               328 drivers/net/wireless/marvell/mwifiex/uap_txrx.c 	skb->ip_summed = CHECKSUM_NONE;
skb               349 drivers/net/wireless/marvell/mwifiex/uap_txrx.c 	    skb->truesize > MWIFIEX_RX_DATA_BUF_SIZE)
skb               350 drivers/net/wireless/marvell/mwifiex/uap_txrx.c 		skb->truesize += (skb->len - MWIFIEX_RX_DATA_BUF_SIZE);
skb               354 drivers/net/wireless/marvell/mwifiex/uap_txrx.c 		netif_rx(skb);
skb               356 drivers/net/wireless/marvell/mwifiex/uap_txrx.c 		netif_rx_ni(skb);
skb               372 drivers/net/wireless/marvell/mwifiex/uap_txrx.c 				  struct sk_buff *skb)
skb               382 drivers/net/wireless/marvell/mwifiex/uap_txrx.c 	uap_rx_pd = (struct uap_rxpd *)(skb->data);
skb               389 drivers/net/wireless/marvell/mwifiex/uap_txrx.c 	     le16_to_cpu(uap_rx_pd->rx_pkt_length)) > (u16) skb->len) {
skb               392 drivers/net/wireless/marvell/mwifiex/uap_txrx.c 			    skb->len, le16_to_cpu(uap_rx_pd->rx_pkt_offset),
skb               400 drivers/net/wireless/marvell/mwifiex/uap_txrx.c 		dev_kfree_skb_any(skb);
skb               405 drivers/net/wireless/marvell/mwifiex/uap_txrx.c 		ret = mwifiex_process_mgmt_packet(priv, skb);
skb               408 drivers/net/wireless/marvell/mwifiex/uap_txrx.c 		dev_kfree_skb_any(skb);
skb               425 drivers/net/wireless/marvell/mwifiex/uap_txrx.c 		ret = mwifiex_handle_uap_rx_forward(priv, skb);
skb               433 drivers/net/wireless/marvell/mwifiex/uap_txrx.c 					 skb);
skb               436 drivers/net/wireless/marvell/mwifiex/uap_txrx.c 		dev_kfree_skb_any(skb);
skb               462 drivers/net/wireless/marvell/mwifiex/uap_txrx.c 			       struct sk_buff *skb)
skb               466 drivers/net/wireless/marvell/mwifiex/uap_txrx.c 	struct mwifiex_txinfo *tx_info = MWIFIEX_SKB_TXCB(skb);
skb               471 drivers/net/wireless/marvell/mwifiex/uap_txrx.c 	if (!skb->len) {
skb               473 drivers/net/wireless/marvell/mwifiex/uap_txrx.c 			    "Tx: bad packet length: %d\n", skb->len);
skb               475 drivers/net/wireless/marvell/mwifiex/uap_txrx.c 		return skb->data;
skb               478 drivers/net/wireless/marvell/mwifiex/uap_txrx.c 	BUG_ON(skb_headroom(skb) < MWIFIEX_MIN_DATA_HEADER_LEN);
skb               480 drivers/net/wireless/marvell/mwifiex/uap_txrx.c 	pkt_type = mwifiex_is_skb_mgmt_frame(skb) ? PKT_TYPE_MGMT : 0;
skb               482 drivers/net/wireless/marvell/mwifiex/uap_txrx.c 	pad = ((void *)skb->data - (sizeof(*txpd) + hroom) - NULL) &
skb               485 drivers/net/wireless/marvell/mwifiex/uap_txrx.c 	skb_push(skb, sizeof(*txpd) + pad);
skb               487 drivers/net/wireless/marvell/mwifiex/uap_txrx.c 	txpd = (struct uap_txpd *)skb->data;
skb               491 drivers/net/wireless/marvell/mwifiex/uap_txrx.c 	txpd->tx_pkt_length = cpu_to_le16((u16)(skb->len - (sizeof(*txpd) +
skb               493 drivers/net/wireless/marvell/mwifiex/uap_txrx.c 	txpd->priority = (u8)skb->priority;
skb               495 drivers/net/wireless/marvell/mwifiex/uap_txrx.c 	txpd->pkt_delay_2ms = mwifiex_wmm_compute_drv_pkt_delay(priv, skb);
skb               522 drivers/net/wireless/marvell/mwifiex/uap_txrx.c 	skb_push(skb, hroom);
skb               528 drivers/net/wireless/marvell/mwifiex/uap_txrx.c 	return skb->data;
skb                59 drivers/net/wireless/marvell/mwifiex/usb.c 			    struct sk_buff *skb, u8 ep)
skb                68 drivers/net/wireless/marvell/mwifiex/usb.c 	if (skb->len < INTF_HEADER_LEN) {
skb                78 drivers/net/wireless/marvell/mwifiex/usb.c 		skb_copy_from_linear_data(skb, &tmp, INTF_HEADER_LEN);
skb                80 drivers/net/wireless/marvell/mwifiex/usb.c 		skb_pull(skb, INTF_HEADER_LEN);
skb                84 drivers/net/wireless/marvell/mwifiex/usb.c 			if (skb->len > MWIFIEX_SIZE_OF_CMD_BUFFER) {
skb                93 drivers/net/wireless/marvell/mwifiex/usb.c 							adapter, skb->data,
skb                94 drivers/net/wireless/marvell/mwifiex/usb.c 							skb->len);
skb               102 drivers/net/wireless/marvell/mwifiex/usb.c 			adapter->curr_cmd->resp_skb = skb;
skb               106 drivers/net/wireless/marvell/mwifiex/usb.c 			if (skb->len < sizeof(u32)) {
skb               112 drivers/net/wireless/marvell/mwifiex/usb.c 			skb_copy_from_linear_data(skb, &tmp, sizeof(u32));
skb               117 drivers/net/wireless/marvell/mwifiex/usb.c 			if (skb->len > MAX_EVENT_SIZE) {
skb               124 drivers/net/wireless/marvell/mwifiex/usb.c 			memcpy(adapter->event_body, skb->data +
skb               125 drivers/net/wireless/marvell/mwifiex/usb.c 			       MWIFIEX_EVENT_HEADER_LEN, skb->len);
skb               128 drivers/net/wireless/marvell/mwifiex/usb.c 			adapter->event_skb = skb;
skb               138 drivers/net/wireless/marvell/mwifiex/usb.c 		if (skb->len > MWIFIEX_RX_DATA_BUF_SIZE) {
skb               144 drivers/net/wireless/marvell/mwifiex/usb.c 		skb_queue_tail(&adapter->rx_data_q, skb);
skb               158 drivers/net/wireless/marvell/mwifiex/usb.c 	skb_push(skb, INTF_HEADER_LEN);
skb               167 drivers/net/wireless/marvell/mwifiex/usb.c 	struct sk_buff *skb = context->skb;
skb               190 drivers/net/wireless/marvell/mwifiex/usb.c 				dev_kfree_skb_any(skb);
skb               193 drivers/net/wireless/marvell/mwifiex/usb.c 		if (skb->len > recv_length)
skb               194 drivers/net/wireless/marvell/mwifiex/usb.c 			skb_trim(skb, recv_length);
skb               196 drivers/net/wireless/marvell/mwifiex/usb.c 			skb_put(skb, recv_length - skb->len);
skb               198 drivers/net/wireless/marvell/mwifiex/usb.c 		status = mwifiex_usb_recv(adapter, skb, context->ep);
skb               219 drivers/net/wireless/marvell/mwifiex/usb.c 				dev_kfree_skb_any(skb);
skb               227 drivers/net/wireless/marvell/mwifiex/usb.c 		dev_kfree_skb_any(skb);
skb               232 drivers/net/wireless/marvell/mwifiex/usb.c 			dev_kfree_skb_any(skb);
skb               249 drivers/net/wireless/marvell/mwifiex/usb.c 			context->skb = NULL;
skb               275 drivers/net/wireless/marvell/mwifiex/usb.c 		mwifiex_write_data_complete(adapter, context->skb, 0,
skb               305 drivers/net/wireless/marvell/mwifiex/usb.c 			dev_kfree_skb_any(ctx->skb);
skb               306 drivers/net/wireless/marvell/mwifiex/usb.c 			ctx->skb = NULL;
skb               315 drivers/net/wireless/marvell/mwifiex/usb.c 		ctx->skb = dev_alloc_skb(size);
skb               316 drivers/net/wireless/marvell/mwifiex/usb.c 		if (!ctx->skb) {
skb               327 drivers/net/wireless/marvell/mwifiex/usb.c 				 ctx->skb->data, size, mwifiex_usb_rx_complete,
skb               332 drivers/net/wireless/marvell/mwifiex/usb.c 				  ctx->skb->data, size, mwifiex_usb_rx_complete,
skb               342 drivers/net/wireless/marvell/mwifiex/usb.c 		dev_kfree_skb_any(ctx->skb);
skb               343 drivers/net/wireless/marvell/mwifiex/usb.c 		ctx->skb = NULL;
skb               627 drivers/net/wireless/marvell/mwifiex/usb.c 		card->rx_cmd.skb = dev_alloc_skb(MWIFIEX_RX_CMD_BUF_SIZE);
skb               628 drivers/net/wireless/marvell/mwifiex/usb.c 		if (card->rx_cmd.skb)
skb               816 drivers/net/wireless/marvell/mwifiex/usb.c 	context->skb = skb_send;
skb               952 drivers/net/wireless/marvell/mwifiex/usb.c 				    struct sk_buff *skb,
skb               961 drivers/net/wireless/marvell/mwifiex/usb.c 		(struct txpd *)((u8 *)skb->data + adapter->intf_hdr_len);
skb               970 drivers/net/wireless/marvell/mwifiex/usb.c 	pad = (align - (skb->len & (align - 1))) % align;
skb               974 drivers/net/wireless/marvell/mwifiex/usb.c 		if (port->tx_aggr.aggr_len + skb->len + pad >
skb               982 drivers/net/wireless/marvell/mwifiex/usb.c 			if (port->tx_aggr.aggr_len + skb->len + pad +
skb               997 drivers/net/wireless/marvell/mwifiex/usb.c 			if (port->tx_aggr.aggr_len + skb->len + pad >
skb              1034 drivers/net/wireless/marvell/mwifiex/usb.c 		skb_queue_tail(&port->tx_aggr.aggr_list, skb);
skb              1035 drivers/net/wireless/marvell/mwifiex/usb.c 		port->tx_aggr.aggr_len += (skb->len + pad);
skb              1094 drivers/net/wireless/marvell/mwifiex/usb.c 		payload = skb->data;
skb              1097 drivers/net/wireless/marvell/mwifiex/usb.c 		*(u16 *)payload = cpu_to_le16(skb->len);
skb              1098 drivers/net/wireless/marvell/mwifiex/usb.c 		skb_send = skb;
skb              1106 drivers/net/wireless/marvell/mwifiex/usb.c 		skb_queue_tail(&port->tx_aggr.aggr_list, skb);
skb              1107 drivers/net/wireless/marvell/mwifiex/usb.c 		port->tx_aggr.aggr_len += (skb->len + pad);
skb              1165 drivers/net/wireless/marvell/mwifiex/usb.c 				    struct sk_buff *skb,
skb              1213 drivers/net/wireless/marvell/mwifiex/usb.c 			ret =  mwifiex_usb_aggr_tx_data(adapter, ep, skb,
skb              1222 drivers/net/wireless/marvell/mwifiex/usb.c 	return mwifiex_usb_construct_send_urb(adapter, port, ep, context, skb);
skb              1280 drivers/net/wireless/marvell/mwifiex/usb.c 	card->rx_cmd.skb = dev_alloc_skb(MWIFIEX_RX_CMD_BUF_SIZE);
skb              1281 drivers/net/wireless/marvell/mwifiex/usb.c 	if (!card->rx_cmd.skb)
skb              1537 drivers/net/wireless/marvell/mwifiex/usb.c 	skb_push(card->rx_cmd.skb, INTF_HEADER_LEN);
skb              1547 drivers/net/wireless/marvell/mwifiex/usb.c 				       struct sk_buff *skb)
skb              1573 drivers/net/wireless/marvell/mwifiex/usb.c 		if (card->rx_data_list[i].skb)
skb                62 drivers/net/wireless/marvell/mwifiex/usb.h 	struct sk_buff *skb;
skb               391 drivers/net/wireless/marvell/mwifiex/util.c 			    struct sk_buff *skb)
skb               397 drivers/net/wireless/marvell/mwifiex/util.c 	if (!skb)
skb               407 drivers/net/wireless/marvell/mwifiex/util.c 	rx_pd = (struct rxpd *)skb->data;
skb               409 drivers/net/wireless/marvell/mwifiex/util.c 	skb_pull(skb, le16_to_cpu(rx_pd->rx_pkt_offset));
skb               410 drivers/net/wireless/marvell/mwifiex/util.c 	skb_pull(skb, sizeof(pkt_len));
skb               414 drivers/net/wireless/marvell/mwifiex/util.c 	ieee_hdr = (void *)skb->data;
skb               421 drivers/net/wireless/marvell/mwifiex/util.c 	memmove(skb->data + sizeof(struct ieee80211_hdr_3addr),
skb               422 drivers/net/wireless/marvell/mwifiex/util.c 		skb->data + sizeof(struct ieee80211_hdr),
skb               429 drivers/net/wireless/marvell/mwifiex/util.c 			 CAL_RSSI(rx_pd->snr, rx_pd->nf), skb->data, pkt_len,
skb               444 drivers/net/wireless/marvell/mwifiex/util.c int mwifiex_recv_packet(struct mwifiex_private *priv, struct sk_buff *skb)
skb               449 drivers/net/wireless/marvell/mwifiex/util.c 	if (!skb)
skb               452 drivers/net/wireless/marvell/mwifiex/util.c 	priv->stats.rx_bytes += skb->len;
skb               456 drivers/net/wireless/marvell/mwifiex/util.c 		p_ethhdr = (void *)skb->data;
skb               460 drivers/net/wireless/marvell/mwifiex/util.c 			src_node->stats.rx_bytes += skb->len;
skb               465 drivers/net/wireless/marvell/mwifiex/util.c 	skb->dev = priv->netdev;
skb               466 drivers/net/wireless/marvell/mwifiex/util.c 	skb->protocol = eth_type_trans(skb, priv->netdev);
skb               467 drivers/net/wireless/marvell/mwifiex/util.c 	skb->ip_summed = CHECKSUM_NONE;
skb               488 drivers/net/wireless/marvell/mwifiex/util.c 	    (skb->truesize > MWIFIEX_RX_DATA_BUF_SIZE))
skb               489 drivers/net/wireless/marvell/mwifiex/util.c 		skb->truesize += (skb->len - MWIFIEX_RX_DATA_BUF_SIZE);
skb               492 drivers/net/wireless/marvell/mwifiex/util.c 		netif_rx(skb);
skb               494 drivers/net/wireless/marvell/mwifiex/util.c 		netif_rx_ni(skb);
skb               739 drivers/net/wireless/marvell/mwifiex/util.c 	struct sk_buff *skb;
skb               744 drivers/net/wireless/marvell/mwifiex/util.c 	skb = __dev_alloc_skb(buf_len, flags);
skb               746 drivers/net/wireless/marvell/mwifiex/util.c 	if (!skb)
skb               749 drivers/net/wireless/marvell/mwifiex/util.c 	skb_reserve(skb, MWIFIEX_RX_HEADROOM);
skb               751 drivers/net/wireless/marvell/mwifiex/util.c 	pad = MWIFIEX_ALIGN_ADDR(skb->data, MWIFIEX_DMA_ALIGN_SZ) -
skb               752 drivers/net/wireless/marvell/mwifiex/util.c 	      (long)skb->data;
skb               754 drivers/net/wireless/marvell/mwifiex/util.c 	skb_reserve(skb, pad);
skb               756 drivers/net/wireless/marvell/mwifiex/util.c 	return skb;
skb                53 drivers/net/wireless/marvell/mwifiex/util.h static inline struct mwifiex_rxinfo *MWIFIEX_SKB_RXCB(struct sk_buff *skb)
skb                55 drivers/net/wireless/marvell/mwifiex/util.h 	struct mwifiex_cb *cb = (struct mwifiex_cb *)skb->cb;
skb                57 drivers/net/wireless/marvell/mwifiex/util.h 	BUILD_BUG_ON(sizeof(struct mwifiex_cb) > sizeof(skb->cb));
skb                61 drivers/net/wireless/marvell/mwifiex/util.h static inline struct mwifiex_txinfo *MWIFIEX_SKB_TXCB(struct sk_buff *skb)
skb                63 drivers/net/wireless/marvell/mwifiex/util.h 	struct mwifiex_cb *cb = (struct mwifiex_cb *)skb->cb;
skb                68 drivers/net/wireless/marvell/mwifiex/util.h static inline void mwifiex_store_mapping(struct sk_buff *skb,
skb                71 drivers/net/wireless/marvell/mwifiex/util.h 	struct mwifiex_cb *cb = (struct mwifiex_cb *)skb->cb;
skb                76 drivers/net/wireless/marvell/mwifiex/util.h static inline void mwifiex_get_mapping(struct sk_buff *skb,
skb                79 drivers/net/wireless/marvell/mwifiex/util.h 	struct mwifiex_cb *cb = (struct mwifiex_cb *)skb->cb;
skb                84 drivers/net/wireless/marvell/mwifiex/util.h static inline dma_addr_t MWIFIEX_SKB_DMA_ADDR(struct sk_buff *skb)
skb                88 drivers/net/wireless/marvell/mwifiex/util.h 	mwifiex_get_mapping(skb, &mapping);
skb               504 drivers/net/wireless/marvell/mwifiex/wmm.c 	struct sk_buff *skb, *tmp;
skb               506 drivers/net/wireless/marvell/mwifiex/wmm.c 	skb_queue_walk_safe(&ra_list->skb_head, skb, tmp) {
skb               507 drivers/net/wireless/marvell/mwifiex/wmm.c 		skb_unlink(skb, &ra_list->skb_head);
skb               508 drivers/net/wireless/marvell/mwifiex/wmm.c 		mwifiex_write_data_complete(adapter, skb, 0, -1);
skb               585 drivers/net/wireless/marvell/mwifiex/wmm.c 	struct sk_buff *skb, *tmp;
skb               604 drivers/net/wireless/marvell/mwifiex/wmm.c 	skb_queue_walk_safe(&priv->tdls_txq, skb, tmp) {
skb               605 drivers/net/wireless/marvell/mwifiex/wmm.c 		skb_unlink(skb, &priv->tdls_txq);
skb               606 drivers/net/wireless/marvell/mwifiex/wmm.c 		mwifiex_write_data_complete(priv->adapter, skb, 0, -1);
skb               609 drivers/net/wireless/marvell/mwifiex/wmm.c 	skb_queue_walk_safe(&priv->bypass_txq, skb, tmp) {
skb               610 drivers/net/wireless/marvell/mwifiex/wmm.c 		skb_unlink(skb, &priv->bypass_txq);
skb               611 drivers/net/wireless/marvell/mwifiex/wmm.c 		mwifiex_write_data_complete(priv->adapter, skb, 0, -1);
skb               794 drivers/net/wireless/marvell/mwifiex/wmm.c 				   struct sk_buff *skb)
skb               796 drivers/net/wireless/marvell/mwifiex/wmm.c 	skb_queue_tail(&priv->bypass_txq, skb);
skb               810 drivers/net/wireless/marvell/mwifiex/wmm.c 			    struct sk_buff *skb)
skb               818 drivers/net/wireless/marvell/mwifiex/wmm.c 	struct ethhdr *eth_hdr = (struct ethhdr *)skb->data;
skb               819 drivers/net/wireless/marvell/mwifiex/wmm.c 	struct mwifiex_txinfo *tx_info = MWIFIEX_SKB_TXCB(skb);
skb               833 drivers/net/wireless/marvell/mwifiex/wmm.c 	if (!priv->media_connected && !mwifiex_is_skb_mgmt_frame(skb)) {
skb               835 drivers/net/wireless/marvell/mwifiex/wmm.c 		mwifiex_write_data_complete(adapter, skb, 0, -1);
skb               839 drivers/net/wireless/marvell/mwifiex/wmm.c 	tid = skb->priority;
skb               849 drivers/net/wireless/marvell/mwifiex/wmm.c 	    !mwifiex_is_skb_mgmt_frame(skb)) {
skb               860 drivers/net/wireless/marvell/mwifiex/wmm.c 			skb_queue_tail(&priv->tdls_txq, skb);
skb               870 drivers/net/wireless/marvell/mwifiex/wmm.c 		memcpy(ra, skb->data, ETH_ALEN);
skb               871 drivers/net/wireless/marvell/mwifiex/wmm.c 		if (ra[0] & 0x01 || mwifiex_is_skb_mgmt_frame(skb))
skb               878 drivers/net/wireless/marvell/mwifiex/wmm.c 		mwifiex_write_data_complete(adapter, skb, 0, -1);
skb               882 drivers/net/wireless/marvell/mwifiex/wmm.c 	skb_queue_tail(&ra_list->skb_head, skb);
skb              1062 drivers/net/wireless/marvell/mwifiex/wmm.c 				  const struct sk_buff *skb)
skb              1064 drivers/net/wireless/marvell/mwifiex/wmm.c 	u32 queue_delay = ktime_to_ms(net_timedelta(skb->tstamp));
skb              1206 drivers/net/wireless/marvell/mwifiex/wmm.c 	struct sk_buff *skb, *tmp;
skb              1215 drivers/net/wireless/marvell/mwifiex/wmm.c 	skb_queue_walk_safe(&ptr->skb_head, skb, tmp) {
skb              1216 drivers/net/wireless/marvell/mwifiex/wmm.c 		total_size += skb->len;
skb              1234 drivers/net/wireless/marvell/mwifiex/wmm.c 	struct sk_buff *skb, *skb_next;
skb              1245 drivers/net/wireless/marvell/mwifiex/wmm.c 	skb = skb_dequeue(&ptr->skb_head);
skb              1247 drivers/net/wireless/marvell/mwifiex/wmm.c 	tx_info = MWIFIEX_SKB_TXCB(skb);
skb              1249 drivers/net/wireless/marvell/mwifiex/wmm.c 		    "data: dequeuing the packet %p %p\n", ptr, skb);
skb              1263 drivers/net/wireless/marvell/mwifiex/wmm.c 	if (mwifiex_process_tx(priv, skb, &tx_param) == -EBUSY) {
skb              1269 drivers/net/wireless/marvell/mwifiex/wmm.c 			mwifiex_write_data_complete(adapter, skb, 0, -1);
skb              1273 drivers/net/wireless/marvell/mwifiex/wmm.c 		skb_queue_tail(&ptr->skb_head, skb);
skb              1293 drivers/net/wireless/marvell/mwifiex/wmm.c 	struct sk_buff *skb;
skb              1299 drivers/net/wireless/marvell/mwifiex/wmm.c 	skb = skb_peek(&ptr->skb_head);
skb              1301 drivers/net/wireless/marvell/mwifiex/wmm.c 	tx_info = MWIFIEX_SKB_TXCB(skb);
skb              1320 drivers/net/wireless/marvell/mwifiex/wmm.c 	struct sk_buff *skb, *skb_next;
skb              1328 drivers/net/wireless/marvell/mwifiex/wmm.c 	skb = skb_dequeue(&ptr->skb_head);
skb              1333 drivers/net/wireless/marvell/mwifiex/wmm.c 		skb_queue_tail(&adapter->tx_data_q, skb);
skb              1344 drivers/net/wireless/marvell/mwifiex/wmm.c 	tx_info = MWIFIEX_SKB_TXCB(skb);
skb              1353 drivers/net/wireless/marvell/mwifiex/wmm.c 						   skb, &tx_param);
skb              1356 drivers/net/wireless/marvell/mwifiex/wmm.c 						   skb, &tx_param);
skb              1366 drivers/net/wireless/marvell/mwifiex/wmm.c 			mwifiex_write_data_complete(adapter, skb, 0, -1);
skb              1370 drivers/net/wireless/marvell/mwifiex/wmm.c 		skb_queue_tail(&ptr->skb_head, skb);
skb              1378 drivers/net/wireless/marvell/mwifiex/wmm.c 		mwifiex_write_data_complete(adapter, skb, 0, ret);
skb              1383 drivers/net/wireless/marvell/mwifiex/wmm.c 		mwifiex_write_data_complete(adapter, skb, 0, ret);
skb              1479 drivers/net/wireless/marvell/mwifiex/wmm.c 	struct sk_buff *skb;
skb              1500 drivers/net/wireless/marvell/mwifiex/wmm.c 		skb = skb_dequeue(&priv->bypass_txq);
skb              1501 drivers/net/wireless/marvell/mwifiex/wmm.c 		tx_info = MWIFIEX_SKB_TXCB(skb);
skb              1506 drivers/net/wireless/marvell/mwifiex/wmm.c 		if (mwifiex_process_tx(priv, skb, &tx_param) == -EBUSY) {
skb              1507 drivers/net/wireless/marvell/mwifiex/wmm.c 			skb_queue_head(&priv->bypass_txq, skb);
skb                57 drivers/net/wireless/marvell/mwifiex/wmm.h 	struct sk_buff *skb;
skb                62 drivers/net/wireless/marvell/mwifiex/wmm.h 	skb = skb_peek(&ptr->skb_head);
skb                64 drivers/net/wireless/marvell/mwifiex/wmm.h 	return skb->priority;
skb               101 drivers/net/wireless/marvell/mwifiex/wmm.h 				 struct sk_buff *skb);
skb               103 drivers/net/wireless/marvell/mwifiex/wmm.h 					struct sk_buff *skb);
skb               116 drivers/net/wireless/marvell/mwifiex/wmm.h 				     const struct sk_buff *skb);
skb               149 drivers/net/wireless/marvell/mwl8k.c 		struct sk_buff *skb;
skb               164 drivers/net/wireless/marvell/mwl8k.c 	struct sk_buff **skb;
skb               813 drivers/net/wireless/marvell/mwl8k.c static inline void mwl8k_remove_dma_header(struct sk_buff *skb, __le16 qos)
skb               818 drivers/net/wireless/marvell/mwl8k.c 	tr = (struct mwl8k_dma_data *)skb->data;
skb               831 drivers/net/wireless/marvell/mwl8k.c 		skb_pull(skb, sizeof(*tr) - hdrlen);
skb               837 drivers/net/wireless/marvell/mwl8k.c mwl8k_add_dma_header(struct mwl8k_priv *priv, struct sk_buff *skb,
skb               851 drivers/net/wireless/marvell/mwl8k.c 	wh = (struct ieee80211_hdr *)skb->data;
skb               861 drivers/net/wireless/marvell/mwl8k.c 		if (pskb_expand_head(skb, REDUCED_TX_HEADROOM, 0, GFP_ATOMIC)) {
skb               867 drivers/net/wireless/marvell/mwl8k.c 		skb->truesize += REDUCED_TX_HEADROOM;
skb               873 drivers/net/wireless/marvell/mwl8k.c 		skb_push(skb, reqd_hdrlen - hdrlen);
skb               878 drivers/net/wireless/marvell/mwl8k.c 	tr = (struct mwl8k_dma_data *)skb->data;
skb               889 drivers/net/wireless/marvell/mwl8k.c 	tr->fwlen = cpu_to_le16(skb->len - sizeof(*tr) + tail_pad);
skb               893 drivers/net/wireless/marvell/mwl8k.c 		struct sk_buff *skb)
skb               901 drivers/net/wireless/marvell/mwl8k.c 	wh = (struct ieee80211_hdr *)skb->data;
skb               903 drivers/net/wireless/marvell/mwl8k.c 	tx_info = IEEE80211_SKB_CB(skb);
skb               934 drivers/net/wireless/marvell/mwl8k.c 	mwl8k_add_dma_header(priv, skb, head_pad, data_pad);
skb              1212 drivers/net/wireless/marvell/mwl8k.c 		struct sk_buff *skb;
skb              1217 drivers/net/wireless/marvell/mwl8k.c 		skb = dev_alloc_skb(MWL8K_RX_MAXSZ);
skb              1218 drivers/net/wireless/marvell/mwl8k.c 		if (skb == NULL)
skb              1221 drivers/net/wireless/marvell/mwl8k.c 		addr = pci_map_single(priv->pdev, skb->data,
skb              1228 drivers/net/wireless/marvell/mwl8k.c 		rxq->buf[rx].skb = skb;
skb              1251 drivers/net/wireless/marvell/mwl8k.c 		if (rxq->buf[i].skb != NULL) {
skb              1257 drivers/net/wireless/marvell/mwl8k.c 			kfree_skb(rxq->buf[i].skb);
skb              1258 drivers/net/wireless/marvell/mwl8k.c 			rxq->buf[i].skb = NULL;
skb              1285 drivers/net/wireless/marvell/mwl8k.c 				     struct sk_buff *skb)
skb              1297 drivers/net/wireless/marvell/mwl8k.c 	priv->beacon_skb = skb_copy(skb, GFP_ATOMIC);
skb              1326 drivers/net/wireless/marvell/mwl8k.c 		struct sk_buff *skb;
skb              1333 drivers/net/wireless/marvell/mwl8k.c 		skb = rxq->buf[rxq->head].skb;
skb              1334 drivers/net/wireless/marvell/mwl8k.c 		if (skb == NULL)
skb              1344 drivers/net/wireless/marvell/mwl8k.c 		rxq->buf[rxq->head].skb = NULL;
skb              1357 drivers/net/wireless/marvell/mwl8k.c 		wh = &((struct mwl8k_dma_data *)skb->data)->wh;
skb              1364 drivers/net/wireless/marvell/mwl8k.c 		if (mwl8k_capture_bssid(priv, (void *)skb->data))
skb              1365 drivers/net/wireless/marvell/mwl8k.c 			mwl8k_save_beacon(hw, skb);
skb              1393 drivers/net/wireless/marvell/mwl8k.c 					tr = (struct mwl8k_dma_data *)skb->data;
skb              1405 drivers/net/wireless/marvell/mwl8k.c 		skb_put(skb, pkt_len);
skb              1406 drivers/net/wireless/marvell/mwl8k.c 		mwl8k_remove_dma_header(skb, qos);
skb              1407 drivers/net/wireless/marvell/mwl8k.c 		memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
skb              1408 drivers/net/wireless/marvell/mwl8k.c 		ieee80211_rx_irqsafe(hw, skb);
skb              1469 drivers/net/wireless/marvell/mwl8k.c 	txq->skb = kcalloc(MWL8K_TX_DESCS, sizeof(*txq->skb), GFP_KERNEL);
skb              1470 drivers/net/wireless/marvell/mwl8k.c 	if (txq->skb == NULL) {
skb              1679 drivers/net/wireless/marvell/mwl8k.c 		struct sk_buff *skb;
skb              1706 drivers/net/wireless/marvell/mwl8k.c 		skb = txq->skb[tx];
skb              1707 drivers/net/wireless/marvell/mwl8k.c 		txq->skb[tx] = NULL;
skb              1709 drivers/net/wireless/marvell/mwl8k.c 		BUG_ON(skb == NULL);
skb              1712 drivers/net/wireless/marvell/mwl8k.c 		mwl8k_remove_dma_header(skb, tx_desc->qos_control);
skb              1714 drivers/net/wireless/marvell/mwl8k.c 		wh = (struct ieee80211_hdr *) skb->data;
skb              1720 drivers/net/wireless/marvell/mwl8k.c 		info = IEEE80211_SKB_CB(skb);
skb              1755 drivers/net/wireless/marvell/mwl8k.c 		ieee80211_tx_status_irqsafe(hw, skb);
skb              1774 drivers/net/wireless/marvell/mwl8k.c 	kfree(txq->skb);
skb              1775 drivers/net/wireless/marvell/mwl8k.c 	txq->skb = NULL;
skb              1896 drivers/net/wireless/marvell/mwl8k.c 	       struct sk_buff *skb)
skb              1913 drivers/net/wireless/marvell/mwl8k.c 	struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
skb              1916 drivers/net/wireless/marvell/mwl8k.c 	wh = (struct ieee80211_hdr *)skb->data;
skb              1922 drivers/net/wireless/marvell/mwl8k.c 	if (skb->protocol == cpu_to_be16(ETH_P_PAE))
skb              1929 drivers/net/wireless/marvell/mwl8k.c 		mwl8k_encapsulate_tx_frame(priv, skb);
skb              1931 drivers/net/wireless/marvell/mwl8k.c 		mwl8k_add_dma_header(priv, skb, 0, 0);
skb              1933 drivers/net/wireless/marvell/mwl8k.c 	wh = &((struct mwl8k_dma_data *)skb->data)->wh;
skb              1935 drivers/net/wireless/marvell/mwl8k.c 	tx_info = IEEE80211_SKB_CB(skb);
skb              2023 drivers/net/wireless/marvell/mwl8k.c 				dev_kfree_skb(skb);
skb              2044 drivers/net/wireless/marvell/mwl8k.c 	dma = pci_map_single(priv->pdev, skb->data,
skb              2045 drivers/net/wireless/marvell/mwl8k.c 				skb->len, PCI_DMA_TODEVICE);
skb              2055 drivers/net/wireless/marvell/mwl8k.c 		dev_kfree_skb(skb);
skb              2080 drivers/net/wireless/marvell/mwl8k.c 			pci_unmap_single(priv->pdev, dma, skb->len,
skb              2082 drivers/net/wireless/marvell/mwl8k.c 			dev_kfree_skb(skb);
skb              2087 drivers/net/wireless/marvell/mwl8k.c 	BUG_ON(txq->skb[txq->tail] != NULL);
skb              2088 drivers/net/wireless/marvell/mwl8k.c 	txq->skb[txq->tail] = skb;
skb              2095 drivers/net/wireless/marvell/mwl8k.c 	tx->pkt_len = cpu_to_le16(skb->len);
skb              4686 drivers/net/wireless/marvell/mwl8k.c 		     struct sk_buff *skb)
skb              4689 drivers/net/wireless/marvell/mwl8k.c 	int index = skb_get_queue_mapping(skb);
skb              4694 drivers/net/wireless/marvell/mwl8k.c 		dev_kfree_skb(skb);
skb              4698 drivers/net/wireless/marvell/mwl8k.c 	mwl8k_txq_xmit(hw, index, control->sta, skb);
skb              5141 drivers/net/wireless/marvell/mwl8k.c 		struct sk_buff *skb;
skb              5143 drivers/net/wireless/marvell/mwl8k.c 		skb = ieee80211_beacon_get(hw, vif);
skb              5144 drivers/net/wireless/marvell/mwl8k.c 		if (skb != NULL) {
skb              5145 drivers/net/wireless/marvell/mwl8k.c 			mwl8k_cmd_set_beacon(hw, vif, skb->data, skb->len);
skb              5146 drivers/net/wireless/marvell/mwl8k.c 			kfree_skb(skb);
skb              5630 drivers/net/wireless/marvell/mwl8k.c 	struct sk_buff *skb = priv->beacon_skb;
skb              5631 drivers/net/wireless/marvell/mwl8k.c 	struct ieee80211_mgmt *mgmt = (void *)skb->data;
skb              5632 drivers/net/wireless/marvell/mwl8k.c 	int len = skb->len - offsetof(struct ieee80211_mgmt, u.beacon.variable);
skb              5640 drivers/net/wireless/marvell/mwl8k.c 	mwl8k_cmd_finalize_join(priv->hw, skb->data, skb->len, dtim_period);
skb              5642 drivers/net/wireless/marvell/mwl8k.c 	dev_kfree_skb(skb);
skb                12 drivers/net/wireless/mediatek/mt76/agg-rx.c 	struct sk_buff *skb;
skb                16 drivers/net/wireless/mediatek/mt76/agg-rx.c 	skb = tid->reorder_buf[idx];
skb                17 drivers/net/wireless/mediatek/mt76/agg-rx.c 	if (!skb)
skb                22 drivers/net/wireless/mediatek/mt76/agg-rx.c 	__skb_queue_tail(frames, skb);
skb                53 drivers/net/wireless/mediatek/mt76/agg-rx.c 	struct sk_buff *skb;
skb                67 drivers/net/wireless/mediatek/mt76/agg-rx.c 		skb = tid->reorder_buf[idx];
skb                68 drivers/net/wireless/mediatek/mt76/agg-rx.c 		if (!skb)
skb                72 drivers/net/wireless/mediatek/mt76/agg-rx.c 		status = (struct mt76_rx_status *)skb->cb;
skb               112 drivers/net/wireless/mediatek/mt76/agg-rx.c mt76_rx_aggr_check_ctl(struct sk_buff *skb, struct sk_buff_head *frames)
skb               114 drivers/net/wireless/mediatek/mt76/agg-rx.c 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
skb               115 drivers/net/wireless/mediatek/mt76/agg-rx.c 	struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
skb               138 drivers/net/wireless/mediatek/mt76/agg-rx.c void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames)
skb               140 drivers/net/wireless/mediatek/mt76/agg-rx.c 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
skb               141 drivers/net/wireless/mediatek/mt76/agg-rx.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
skb               149 drivers/net/wireless/mediatek/mt76/agg-rx.c 	__skb_queue_tail(frames, skb);
skb               156 drivers/net/wireless/mediatek/mt76/agg-rx.c 		mt76_rx_aggr_check_ctl(skb, frames);
skb               189 drivers/net/wireless/mediatek/mt76/agg-rx.c 		__skb_unlink(skb, frames);
skb               190 drivers/net/wireless/mediatek/mt76/agg-rx.c 		dev_kfree_skb(skb);
skb               201 drivers/net/wireless/mediatek/mt76/agg-rx.c 	__skb_unlink(skb, frames);
skb               216 drivers/net/wireless/mediatek/mt76/agg-rx.c 		dev_kfree_skb(skb);
skb               221 drivers/net/wireless/mediatek/mt76/agg-rx.c 	tid->reorder_buf[idx] = skb;
skb               266 drivers/net/wireless/mediatek/mt76/agg-rx.c 		struct sk_buff *skb = tid->reorder_buf[i];
skb               268 drivers/net/wireless/mediatek/mt76/agg-rx.c 		if (!skb)
skb               272 drivers/net/wireless/mediatek/mt76/agg-rx.c 		dev_kfree_skb(skb);
skb                50 drivers/net/wireless/mediatek/mt76/dma.c 		 struct sk_buff *skb, void *txwi)
skb                89 drivers/net/wireless/mediatek/mt76/dma.c 	q->entry[idx].skb = skb;
skb               121 drivers/net/wireless/mediatek/mt76/dma.c 	if (e->skb == DMA_DUMMY_DATA)
skb               122 drivers/net/wireless/mediatek/mt76/dma.c 		e->skb = NULL;
skb               165 drivers/net/wireless/mediatek/mt76/dma.c 		if (entry.skb)
skb               258 drivers/net/wireless/mediatek/mt76/dma.c 			  struct sk_buff *skb, u32 tx_info)
skb               264 drivers/net/wireless/mediatek/mt76/dma.c 	addr = dma_map_single(dev->dev, skb->data, skb->len,
skb               270 drivers/net/wireless/mediatek/mt76/dma.c 	buf.len = skb->len;
skb               273 drivers/net/wireless/mediatek/mt76/dma.c 	mt76_dma_add_buf(dev, q, &buf, 1, tx_info, skb, NULL);
skb               282 drivers/net/wireless/mediatek/mt76/dma.c 		      struct sk_buff *skb, struct mt76_wcid *wcid,
skb               287 drivers/net/wireless/mediatek/mt76/dma.c 		.skb = skb,
skb               298 drivers/net/wireless/mediatek/mt76/dma.c 		ieee80211_free_txskb(dev->hw, skb);
skb               303 drivers/net/wireless/mediatek/mt76/dma.c 	skb->prev = skb->next = NULL;
skb               305 drivers/net/wireless/mediatek/mt76/dma.c 		mt76_insert_hdr_pad(skb);
skb               307 drivers/net/wireless/mediatek/mt76/dma.c 	len = skb_headlen(skb);
skb               308 drivers/net/wireless/mediatek/mt76/dma.c 	addr = dma_map_single(dev->dev, skb->data, len, DMA_TO_DEVICE);
skb               317 drivers/net/wireless/mediatek/mt76/dma.c 	skb_walk_frags(skb, iter) {
skb               345 drivers/net/wireless/mediatek/mt76/dma.c 				tx_info.info, tx_info.skb, t);
skb               353 drivers/net/wireless/mediatek/mt76/dma.c 	e.skb = tx_info.skb;
skb               450 drivers/net/wireless/mediatek/mt76/dma.c 	struct sk_buff *skb = q->rx_head;
skb               451 drivers/net/wireless/mediatek/mt76/dma.c 	struct skb_shared_info *shinfo = skb_shinfo(skb);
skb               455 drivers/net/wireless/mediatek/mt76/dma.c 		skb_add_rx_frag(skb, shinfo->nr_frags, page, offset, len,
skb               463 drivers/net/wireless/mediatek/mt76/dma.c 	dev->drv->rx_skb(dev, q - dev->q_rx, skb);
skb               470 drivers/net/wireless/mediatek/mt76/dma.c 	struct sk_buff *skb;
skb               499 drivers/net/wireless/mediatek/mt76/dma.c 		skb = build_skb(data, q->buf_size);
skb               500 drivers/net/wireless/mediatek/mt76/dma.c 		if (!skb) {
skb               504 drivers/net/wireless/mediatek/mt76/dma.c 		skb_reserve(skb, q->buf_offset);
skb               507 drivers/net/wireless/mediatek/mt76/dma.c 			u32 *rxfce = (u32 *)skb->cb;
skb               511 drivers/net/wireless/mediatek/mt76/dma.c 		__skb_put(skb, len);
skb               515 drivers/net/wireless/mediatek/mt76/dma.c 			q->rx_head = skb;
skb               519 drivers/net/wireless/mediatek/mt76/dma.c 		dev->drv->rx_skb(dev, q - dev->q_rx, skb);
skb               375 drivers/net/wireless/mediatek/mt76/mac80211.c void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb)
skb               378 drivers/net/wireless/mediatek/mt76/mac80211.c 		dev_kfree_skb(skb);
skb               382 drivers/net/wireless/mediatek/mt76/mac80211.c 	__skb_queue_tail(&dev->rx_skb[q], skb);
skb               487 drivers/net/wireless/mediatek/mt76/mac80211.c static struct ieee80211_sta *mt76_rx_convert(struct sk_buff *skb)
skb               489 drivers/net/wireless/mediatek/mt76/mac80211.c 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
skb               492 drivers/net/wireless/mediatek/mt76/mac80211.c 	mstat = *((struct mt76_rx_status *)skb->cb);
skb               506 drivers/net/wireless/mediatek/mt76/mac80211.c 	BUILD_BUG_ON(sizeof(mstat) > sizeof(skb->cb));
skb               516 drivers/net/wireless/mediatek/mt76/mac80211.c mt76_check_ccmp_pn(struct sk_buff *skb)
skb               518 drivers/net/wireless/mediatek/mt76/mac80211.c 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
skb               534 drivers/net/wireless/mediatek/mt76/mac80211.c 		hdr = (struct ieee80211_hdr *)skb->data;
skb               555 drivers/net/wireless/mediatek/mt76/mac80211.c mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb)
skb               557 drivers/net/wireless/mediatek/mt76/mac80211.c 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
skb               558 drivers/net/wireless/mediatek/mt76/mac80211.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
skb               629 drivers/net/wireless/mediatek/mt76/mac80211.c 	struct sk_buff *skb;
skb               632 drivers/net/wireless/mediatek/mt76/mac80211.c 	while ((skb = __skb_dequeue(frames)) != NULL) {
skb               633 drivers/net/wireless/mediatek/mt76/mac80211.c 		if (mt76_check_ccmp_pn(skb)) {
skb               634 drivers/net/wireless/mediatek/mt76/mac80211.c 			dev_kfree_skb(skb);
skb               638 drivers/net/wireless/mediatek/mt76/mac80211.c 		sta = mt76_rx_convert(skb);
skb               639 drivers/net/wireless/mediatek/mt76/mac80211.c 		ieee80211_rx_napi(dev->hw, sta, skb, napi);
skb               648 drivers/net/wireless/mediatek/mt76/mac80211.c 	struct sk_buff *skb;
skb               652 drivers/net/wireless/mediatek/mt76/mac80211.c 	while ((skb = __skb_dequeue(&dev->rx_skb[q])) != NULL) {
skb               653 drivers/net/wireless/mediatek/mt76/mac80211.c 		mt76_check_sta(dev, skb);
skb               654 drivers/net/wireless/mediatek/mt76/mac80211.c 		mt76_rx_aggr_reorder(skb, &frames);
skb               826 drivers/net/wireless/mediatek/mt76/mac80211.c void mt76_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id)
skb               828 drivers/net/wireless/mediatek/mt76/mac80211.c 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
skb               829 drivers/net/wireless/mediatek/mt76/mac80211.c 	int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
skb               832 drivers/net/wireless/mediatek/mt76/mac80211.c 	__skb_push(skb, 8);
skb               833 drivers/net/wireless/mediatek/mt76/mac80211.c 	memmove(skb->data, skb->data + 8, hdr_len);
skb               834 drivers/net/wireless/mediatek/mt76/mac80211.c 	hdr = skb->data + hdr_len;
skb                12 drivers/net/wireless/mediatek/mt76/mcu.c 	struct sk_buff *skb;
skb                14 drivers/net/wireless/mediatek/mt76/mcu.c 	skb = alloc_skb(head_len + data_len + tail_len,
skb                16 drivers/net/wireless/mediatek/mt76/mcu.c 	if (!skb)
skb                19 drivers/net/wireless/mediatek/mt76/mcu.c 	skb_reserve(skb, head_len);
skb                21 drivers/net/wireless/mediatek/mt76/mcu.c 		skb_put_data(skb, data, data_len);
skb                23 drivers/net/wireless/mediatek/mt76/mcu.c 	return skb;
skb                44 drivers/net/wireless/mediatek/mt76/mcu.c void mt76_mcu_rx_event(struct mt76_dev *dev, struct sk_buff *skb)
skb                46 drivers/net/wireless/mediatek/mt76/mcu.c 	skb_queue_tail(&dev->mmio.mcu.res_q, skb);
skb                81 drivers/net/wireless/mediatek/mt76/mt76.h 	struct sk_buff *skb;
skb                89 drivers/net/wireless/mediatek/mt76/mt76.h 		struct sk_buff *skb;
skb               157 drivers/net/wireless/mediatek/mt76/mt76.h 		       struct sk_buff *skb, void *txwi);
skb               160 drivers/net/wireless/mediatek/mt76/mt76.h 			    struct sk_buff *skb, struct mt76_wcid *wcid,
skb               164 drivers/net/wireless/mediatek/mt76/mt76.h 				struct sk_buff *skb, u32 tx_info);
skb               231 drivers/net/wireless/mediatek/mt76/mt76.h 	struct sk_buff *skb;
skb               305 drivers/net/wireless/mediatek/mt76/mt76.h 		       struct sk_buff *skb);
skb               674 drivers/net/wireless/mediatek/mt76/mt76.h static inline struct mt76_tx_cb *mt76_tx_skb_cb(struct sk_buff *skb)
skb               677 drivers/net/wireless/mediatek/mt76/mt76.h 		     sizeof(IEEE80211_SKB_CB(skb)->status.status_driver_data));
skb               678 drivers/net/wireless/mediatek/mt76/mt76.h 	return ((void *)IEEE80211_SKB_CB(skb)->status.status_driver_data);
skb               681 drivers/net/wireless/mediatek/mt76/mt76.h static inline void mt76_insert_hdr_pad(struct sk_buff *skb)
skb               683 drivers/net/wireless/mediatek/mt76/mt76.h 	int len = ieee80211_get_hdrlen_from_skb(skb);
skb               688 drivers/net/wireless/mediatek/mt76/mt76.h 	skb_push(skb, 2);
skb               689 drivers/net/wireless/mediatek/mt76/mt76.h 	memmove(skb->data, skb->data + 2, len);
skb               691 drivers/net/wireless/mediatek/mt76/mt76.h 	skb->data[len] = 0;
skb               692 drivers/net/wireless/mediatek/mt76/mt76.h 	skb->data[len + 1] = 0;
skb               703 drivers/net/wireless/mediatek/mt76/mt76.h void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb);
skb               705 drivers/net/wireless/mediatek/mt76/mt76.h 	     struct mt76_wcid *wcid, struct sk_buff *skb);
skb               738 drivers/net/wireless/mediatek/mt76/mt76.h 			   struct sk_buff *skb);
skb               742 drivers/net/wireless/mediatek/mt76/mt76.h void mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb,
skb               744 drivers/net/wireless/mediatek/mt76/mt76.h void mt76_tx_complete_skb(struct mt76_dev *dev, struct sk_buff *skb);
skb               763 drivers/net/wireless/mediatek/mt76/mt76.h void mt76_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id);
skb               780 drivers/net/wireless/mediatek/mt76/mt76.h void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames);
skb               830 drivers/net/wireless/mediatek/mt76/mt76.h void mt76_mcu_rx_event(struct mt76_dev *dev, struct sk_buff *skb);
skb                17 drivers/net/wireless/mediatek/mt76/mt7603/beacon.c 	struct sk_buff *skb = NULL;
skb                22 drivers/net/wireless/mediatek/mt76/mt7603/beacon.c 	skb = ieee80211_beacon_get(mt76_hw(dev), vif);
skb                23 drivers/net/wireless/mediatek/mt76/mt7603/beacon.c 	if (!skb)
skb                26 drivers/net/wireless/mediatek/mt76/mt7603/beacon.c 	mt76_tx_queue_skb(dev, MT_TXQ_BEACON, skb, &mvif->sta.wcid, NULL);
skb                49 drivers/net/wireless/mediatek/mt76/mt7603/beacon.c 	struct sk_buff *skb;
skb                54 drivers/net/wireless/mediatek/mt76/mt7603/beacon.c 	skb = ieee80211_get_buffered_bc(mt76_hw(dev), vif);
skb                55 drivers/net/wireless/mediatek/mt76/mt7603/beacon.c 	if (!skb)
skb                58 drivers/net/wireless/mediatek/mt76/mt7603/beacon.c 	info = IEEE80211_SKB_CB(skb);
skb                61 drivers/net/wireless/mediatek/mt76/mt7603/beacon.c 	mt76_skb_set_moredata(skb, true);
skb                62 drivers/net/wireless/mediatek/mt76/mt7603/beacon.c 	__skb_queue_tail(&data->q, skb);
skb                63 drivers/net/wireless/mediatek/mt76/mt7603/beacon.c 	data->tail[mvif->idx] = skb;
skb                72 drivers/net/wireless/mediatek/mt76/mt7603/beacon.c 	struct sk_buff *skb;
skb               118 drivers/net/wireless/mediatek/mt76/mt7603/beacon.c 	while ((skb = __skb_dequeue(&data.q)) != NULL) {
skb               119 drivers/net/wireless/mediatek/mt76/mt7603/beacon.c 		struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb               123 drivers/net/wireless/mediatek/mt76/mt7603/beacon.c 		mt76_tx_queue_skb(dev, MT_TXQ_CAB, skb, &mvif->sta.wcid, NULL);
skb                31 drivers/net/wireless/mediatek/mt76/mt7603/dma.c mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb)
skb                33 drivers/net/wireless/mediatek/mt76/mt7603/dma.c 	__le32 *txd = (__le32 *)skb->data;
skb                43 drivers/net/wireless/mediatek/mt76/mt7603/dma.c 	if (skb->len < MT_TXD_SIZE + sizeof(struct ieee80211_hdr))
skb                48 drivers/net/wireless/mediatek/mt76/mt7603/dma.c 	skb->priority = FIELD_GET(MT_TXD1_TID, val);
skb                59 drivers/net/wireless/mediatek/mt76/mt7603/dma.c 	skb_set_queue_mapping(skb, FIELD_GET(MT_TXD0_Q_IDX, val));
skb                66 drivers/net/wireless/mediatek/mt76/mt7603/dma.c 	hdr = (struct ieee80211_hdr *)&skb->data[MT_TXD_SIZE];
skb                71 drivers/net/wireless/mediatek/mt76/mt7603/dma.c 	__skb_queue_tail(&msta->psq, skb);
skb                73 drivers/net/wireless/mediatek/mt76/mt7603/dma.c 		skb = __skb_dequeue(&msta->psq);
skb                74 drivers/net/wireless/mediatek/mt76/mt7603/dma.c 		dev_kfree_skb(skb);
skb                80 drivers/net/wireless/mediatek/mt76/mt7603/dma.c 	dev_kfree_skb(skb);
skb                84 drivers/net/wireless/mediatek/mt76/mt7603/dma.c 			 struct sk_buff *skb)
skb                87 drivers/net/wireless/mediatek/mt76/mt7603/dma.c 	__le32 *rxd = (__le32 *)skb->data;
skb                88 drivers/net/wireless/mediatek/mt76/mt7603/dma.c 	__le32 *end = (__le32 *)&skb->data[skb->len];
skb                95 drivers/net/wireless/mediatek/mt76/mt7603/dma.c 			mt76_mcu_rx_event(&dev->mt76, skb);
skb                97 drivers/net/wireless/mediatek/mt76/mt7603/dma.c 			mt7603_rx_loopback_skb(dev, skb);
skb               105 drivers/net/wireless/mediatek/mt76/mt7603/dma.c 		dev_kfree_skb(skb);
skb               108 drivers/net/wireless/mediatek/mt76/mt7603/dma.c 		mt76_mcu_rx_event(&dev->mt76, skb);
skb               111 drivers/net/wireless/mediatek/mt76/mt7603/dma.c 		if (mt7603_mac_fill_rx(dev, skb) == 0) {
skb               112 drivers/net/wireless/mediatek/mt76/mt7603/dma.c 			mt76_rx(&dev->mt76, q, skb);
skb               117 drivers/net/wireless/mediatek/mt76/mt7603/dma.c 		dev_kfree_skb(skb);
skb               397 drivers/net/wireless/mediatek/mt76/mt7603/mac.c mt7603_mac_fill_rx(struct mt7603_dev *dev, struct sk_buff *skb)
skb               399 drivers/net/wireless/mediatek/mt76/mt7603/mac.c 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
skb               402 drivers/net/wireless/mediatek/mt76/mt7603/mac.c 	__le32 *rxd = (__le32 *)skb->data;
skb               449 drivers/net/wireless/mediatek/mt76/mt7603/mac.c 		if ((u8 *)rxd - skb->data >= skb->len)
skb               467 drivers/net/wireless/mediatek/mt76/mt7603/mac.c 		if ((u8 *)rxd - skb->data >= skb->len)
skb               472 drivers/net/wireless/mediatek/mt76/mt7603/mac.c 		if ((u8 *)rxd - skb->data >= skb->len)
skb               523 drivers/net/wireless/mediatek/mt76/mt7603/mac.c 		if ((u8 *)rxd - skb->data >= skb->len)
skb               529 drivers/net/wireless/mediatek/mt76/mt7603/mac.c 	skb_pull(skb, (u8 *)rxd - skb->data + 2 * remove_pad);
skb               534 drivers/net/wireless/mediatek/mt76/mt7603/mac.c 		mt76_insert_ccmp_hdr(skb, key_id);
skb               537 drivers/net/wireless/mediatek/mt76/mt7603/mac.c 	hdr = (struct ieee80211_hdr *)skb->data;
skb               779 drivers/net/wireless/mediatek/mt76/mt7603/mac.c 		      struct sk_buff *skb, enum mt76_txq_id qid,
skb               783 drivers/net/wireless/mediatek/mt76/mt7603/mac.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb               785 drivers/net/wireless/mediatek/mt76/mt7603/mac.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
skb               786 drivers/net/wireless/mediatek/mt76/mt7603/mac.c 	struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
skb               791 drivers/net/wireless/mediatek/mt76/mt7603/mac.c 	int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
skb               821 drivers/net/wireless/mediatek/mt76/mt7603/mac.c 	val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + MT_TXD_SIZE) |
skb               828 drivers/net/wireless/mediatek/mt76/mt7603/mac.c 			 skb->priority & IEEE80211_QOS_CTL_TID_MASK) |
skb               914 drivers/net/wireless/mediatek/mt76/mt7603/mac.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
skb               930 drivers/net/wireless/mediatek/mt76/mt7603/mac.c 	pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
skb               940 drivers/net/wireless/mediatek/mt76/mt7603/mac.c 	mt7603_mac_write_txwi(dev, txwi_ptr, tx_info->skb, qid, wcid,
skb              1087 drivers/net/wireless/mediatek/mt76/mt7603/mac.c 	struct sk_buff *skb;
skb              1093 drivers/net/wireless/mediatek/mt76/mt7603/mac.c 	skb = mt76_tx_status_skb_get(mdev, &sta->wcid, pid, &list);
skb              1094 drivers/net/wireless/mediatek/mt76/mt7603/mac.c 	if (skb) {
skb              1095 drivers/net/wireless/mediatek/mt76/mt7603/mac.c 		struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb              1102 drivers/net/wireless/mediatek/mt76/mt7603/mac.c 		mt76_tx_status_skb_done(mdev, skb, &list);
skb              1106 drivers/net/wireless/mediatek/mt76/mt7603/mac.c 	return !!skb;
skb              1157 drivers/net/wireless/mediatek/mt76/mt7603/mac.c 	struct sk_buff *skb = e->skb;
skb              1160 drivers/net/wireless/mediatek/mt76/mt7603/mac.c 		dev_kfree_skb_any(skb);
skb              1167 drivers/net/wireless/mediatek/mt76/mt7603/mac.c 	mt76_tx_complete_skb(mdev, skb);
skb               369 drivers/net/wireless/mediatek/mt76/mt7603/main.c 	struct sk_buff *skb;
skb               371 drivers/net/wireless/mediatek/mt76/mt7603/main.c 	while ((skb = __skb_dequeue(list)) != NULL)
skb               372 drivers/net/wireless/mediatek/mt76/mt7603/main.c 		mt76_tx_queue_skb_raw(dev, skb_get_queue_mapping(skb),
skb               373 drivers/net/wireless/mediatek/mt76/mt7603/main.c 				      skb, 0);
skb               398 drivers/net/wireless/mediatek/mt76/mt7603/main.c mt7603_ps_set_more_data(struct sk_buff *skb)
skb               402 drivers/net/wireless/mediatek/mt76/mt7603/main.c 	hdr = (struct ieee80211_hdr *)&skb->data[MT_TXD_SIZE];
skb               416 drivers/net/wireless/mediatek/mt76/mt7603/main.c 	struct sk_buff *skb, *tmp;
skb               423 drivers/net/wireless/mediatek/mt76/mt7603/main.c 	skb_queue_walk_safe(&msta->psq, skb, tmp) {
skb               427 drivers/net/wireless/mediatek/mt76/mt7603/main.c 		if (!(tids & BIT(skb->priority)))
skb               430 drivers/net/wireless/mediatek/mt76/mt7603/main.c 		skb_set_queue_mapping(skb, MT_TXQ_PSD);
skb               431 drivers/net/wireless/mediatek/mt76/mt7603/main.c 		__skb_unlink(skb, &msta->psq);
skb               432 drivers/net/wireless/mediatek/mt76/mt7603/main.c 		mt7603_ps_set_more_data(skb);
skb               433 drivers/net/wireless/mediatek/mt76/mt7603/main.c 		__skb_queue_tail(&list, skb);
skb               634 drivers/net/wireless/mediatek/mt76/mt7603/main.c 		      struct sk_buff *skb)
skb               636 drivers/net/wireless/mediatek/mt76/mt7603/main.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb               653 drivers/net/wireless/mediatek/mt76/mt7603/main.c 	mt76_tx(&dev->mt76, control->sta, wcid, skb);
skb                17 drivers/net/wireless/mediatek/mt76/mt7603/mcu.c __mt7603_mcu_msg_send(struct mt7603_dev *dev, struct sk_buff *skb,
skb                29 drivers/net/wireless/mediatek/mt76/mt7603/mcu.c 	txd = (struct mt7603_mcu_txd *)skb_push(skb, hdrlen);
skb                32 drivers/net/wireless/mediatek/mt76/mt7603/mcu.c 	txd->len = cpu_to_le16(skb->len);
skb                53 drivers/net/wireless/mediatek/mt76/mt7603/mcu.c 	return mt76_tx_queue_skb_raw(dev, MT_TXQ_MCU, skb, 0);
skb                63 drivers/net/wireless/mediatek/mt76/mt7603/mcu.c 	struct sk_buff *skb;
skb                66 drivers/net/wireless/mediatek/mt76/mt7603/mcu.c 	skb = mt7603_mcu_msg_alloc(data, len);
skb                67 drivers/net/wireless/mediatek/mt76/mt7603/mcu.c 	if (!skb)
skb                72 drivers/net/wireless/mediatek/mt76/mt7603/mcu.c 	ret = __mt7603_mcu_msg_send(dev, skb, cmd, &seq);
skb                79 drivers/net/wireless/mediatek/mt76/mt7603/mcu.c 		skb = mt76_mcu_get_response(&dev->mt76, expires);
skb                80 drivers/net/wireless/mediatek/mt76/mt7603/mcu.c 		if (!skb) {
skb                89 drivers/net/wireless/mediatek/mt76/mt7603/mcu.c 		rxd = (struct mt7603_mcu_rxd *)skb->data;
skb                93 drivers/net/wireless/mediatek/mt76/mt7603/mcu.c 		dev_kfree_skb(skb);
skb               200 drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h int mt7603_mac_fill_rx(struct mt7603_dev *dev, struct sk_buff *skb);
skb               236 drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h 			 struct sk_buff *skb);
skb                60 drivers/net/wireless/mediatek/mt76/mt7615/dma.c 			 struct sk_buff *skb)
skb                63 drivers/net/wireless/mediatek/mt76/mt7615/dma.c 	__le32 *rxd = (__le32 *)skb->data;
skb                64 drivers/net/wireless/mediatek/mt76/mt7615/dma.c 	__le32 *end = (__le32 *)&skb->data[skb->len];
skb                73 drivers/net/wireless/mediatek/mt76/mt7615/dma.c 		dev_kfree_skb(skb);
skb                76 drivers/net/wireless/mediatek/mt76/mt7615/dma.c 		mt7615_mac_tx_free(dev, skb);
skb                79 drivers/net/wireless/mediatek/mt76/mt7615/dma.c 		mt7615_mcu_rx_event(dev, skb);
skb                82 drivers/net/wireless/mediatek/mt76/mt7615/dma.c 		if (!mt7615_mac_fill_rx(dev, skb)) {
skb                83 drivers/net/wireless/mediatek/mt76/mt7615/dma.c 			mt76_rx(&dev->mt76, q, skb);
skb                88 drivers/net/wireless/mediatek/mt76/mt7615/dma.c 		dev_kfree_skb(skb);
skb               316 drivers/net/wireless/mediatek/mt76/mt7615/init.c 		if (txwi->skb)
skb               317 drivers/net/wireless/mediatek/mt76/mt7615/init.c 			dev_kfree_skb_any(txwi->skb);
skb                44 drivers/net/wireless/mediatek/mt76/mt7615/mac.c int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
skb                46 drivers/net/wireless/mediatek/mt76/mt7615/mac.c 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
skb                49 drivers/net/wireless/mediatek/mt76/mt7615/mac.c 	__le32 *rxd = (__le32 *)skb->data;
skb                97 drivers/net/wireless/mediatek/mt76/mt7615/mac.c 		if ((u8 *)rxd - skb->data >= skb->len)
skb               115 drivers/net/wireless/mediatek/mt76/mt7615/mac.c 		if ((u8 *)rxd - skb->data >= skb->len)
skb               121 drivers/net/wireless/mediatek/mt76/mt7615/mac.c 		if ((u8 *)rxd - skb->data >= skb->len)
skb               194 drivers/net/wireless/mediatek/mt76/mt7615/mac.c 		if ((u8 *)rxd - skb->data >= skb->len)
skb               198 drivers/net/wireless/mediatek/mt76/mt7615/mac.c 	skb_pull(skb, (u8 *)rxd - skb->data + 2 * remove_pad);
skb               203 drivers/net/wireless/mediatek/mt76/mt7615/mac.c 		mt76_insert_ccmp_hdr(skb, key_id);
skb               206 drivers/net/wireless/mediatek/mt76/mt7615/mac.c 	hdr = (struct ieee80211_hdr *)skb->data;
skb               226 drivers/net/wireless/mediatek/mt76/mt7615/mac.c 		dev_kfree_skb_any(e->skb);
skb               231 drivers/net/wireless/mediatek/mt76/mt7615/mac.c 	if (e->skb == DMA_DUMMY_DATA) {
skb               242 drivers/net/wireless/mediatek/mt76/mt7615/mac.c 		e->skb = t ? t->skb : NULL;
skb               245 drivers/net/wireless/mediatek/mt76/mt7615/mac.c 	if (e->skb)
skb               246 drivers/net/wireless/mediatek/mt76/mt7615/mac.c 		mt76_tx_complete_skb(mdev, e->skb);
skb               306 drivers/net/wireless/mediatek/mt76/mt7615/mac.c 			  struct sk_buff *skb, struct mt76_wcid *wcid,
skb               310 drivers/net/wireless/mediatek/mt76/mt7615/mac.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb               312 drivers/net/wireless/mediatek/mt76/mt7615/mac.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
skb               339 drivers/net/wireless/mediatek/mt76/mt7615/mac.c 			skb_get_queue_mapping(skb);
skb               349 drivers/net/wireless/mediatek/mt76/mt7615/mac.c 	val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + MT_TXD_SIZE) |
skb               358 drivers/net/wireless/mediatek/mt76/mt7615/mac.c 			 ieee80211_get_hdrlen_from_skb(skb) / 2) |
skb               360 drivers/net/wireless/mediatek/mt76/mt7615/mac.c 			 skb->priority & IEEE80211_QOS_CTL_TID_MASK) |
skb               369 drivers/net/wireless/mediatek/mt76/mt7615/mac.c 		if (multicast && ieee80211_is_robust_mgmt_frame(skb) &&
skb               428 drivers/net/wireless/mediatek/mt76/mt7615/mac.c 		struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
skb               767 drivers/net/wireless/mediatek/mt76/mt7615/mac.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data;
skb               770 drivers/net/wireless/mediatek/mt76/mt7615/mac.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
skb               781 drivers/net/wireless/mediatek/mt76/mt7615/mac.c 	pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
skb               791 drivers/net/wireless/mediatek/mt76/mt7615/mac.c 	mt7615_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, sta,
skb               820 drivers/net/wireless/mediatek/mt76/mt7615/mac.c 	t->skb = tx_info->skb;
skb               830 drivers/net/wireless/mediatek/mt76/mt7615/mac.c 	tx_info->skb = DMA_DUMMY_DATA;
skb               978 drivers/net/wireless/mediatek/mt76/mt7615/mac.c 	struct sk_buff *skb;
skb               984 drivers/net/wireless/mediatek/mt76/mt7615/mac.c 	skb = mt76_tx_status_skb_get(mdev, &sta->wcid, pid, &list);
skb               985 drivers/net/wireless/mediatek/mt76/mt7615/mac.c 	if (skb) {
skb               986 drivers/net/wireless/mediatek/mt76/mt7615/mac.c 		struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb               993 drivers/net/wireless/mediatek/mt76/mt7615/mac.c 		mt76_tx_status_skb_done(mdev, skb, &list);
skb               997 drivers/net/wireless/mediatek/mt76/mt7615/mac.c 	return !!skb;
skb              1044 drivers/net/wireless/mediatek/mt76/mt7615/mac.c void mt7615_mac_tx_free(struct mt7615_dev *dev, struct sk_buff *skb)
skb              1046 drivers/net/wireless/mediatek/mt76/mt7615/mac.c 	struct mt7615_tx_free *free = (struct mt7615_tx_free *)skb->data;
skb              1061 drivers/net/wireless/mediatek/mt76/mt7615/mac.c 		if (txwi->skb) {
skb              1062 drivers/net/wireless/mediatek/mt76/mt7615/mac.c 			mt76_tx_complete_skb(mdev, txwi->skb);
skb              1063 drivers/net/wireless/mediatek/mt76/mt7615/mac.c 			txwi->skb = NULL;
skb              1068 drivers/net/wireless/mediatek/mt76/mt7615/mac.c 	dev_kfree_skb(skb);
skb               405 drivers/net/wireless/mediatek/mt76/mt7615/main.c 		      struct sk_buff *skb)
skb               408 drivers/net/wireless/mediatek/mt76/mt7615/main.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb               426 drivers/net/wireless/mediatek/mt76/mt7615/main.c 	mt76_tx(&dev->mt76, control->sta, wcid, skb);
skb                51 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c static int __mt7615_mcu_msg_send(struct mt7615_dev *dev, struct sk_buff *skb,
skb                64 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 	mcu_txd = (struct mt7615_mcu_txd *)skb_push(skb,
skb                78 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 	val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len) |
skb                88 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 	mcu_txd->len = cpu_to_le16(skb->len - sizeof(mcu_txd->txd));
skb               112 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 	return mt76_tx_queue_skb_raw(dev, qid, skb, 0);
skb               117 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 			  struct sk_buff *skb, int seq)
skb               119 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 	struct mt7615_mcu_rxd *rxd = (struct mt7615_mcu_rxd *)skb->data;
skb               127 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 		skb_pull(skb, sizeof(*rxd) - 4);
skb               128 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 		ret = *skb->data;
skb               131 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 		skb_pull(skb, sizeof(*rxd));
skb               132 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 		ret = le32_to_cpu(*(__le32 *)skb->data);
skb               137 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 	dev_kfree_skb(skb);
skb               148 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 	struct sk_buff *skb;
skb               151 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 	skb = mt7615_mcu_msg_alloc(data, len);
skb               152 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 	if (!skb)
skb               157 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 	ret = __mt7615_mcu_msg_send(dev, skb, cmd, &seq);
skb               162 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 		skb = mt76_mcu_get_response(mdev, expires);
skb               163 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 		if (!skb) {
skb               170 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 		ret = mt7615_mcu_parse_response(dev, cmd, skb, seq);
skb               189 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c mt7615_mcu_rx_ext_event(struct mt7615_dev *dev, struct sk_buff *skb)
skb               191 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 	struct mt7615_mcu_rxd *rxd = (struct mt7615_mcu_rxd *)skb->data;
skb               209 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c mt7615_mcu_rx_unsolicited_event(struct mt7615_dev *dev, struct sk_buff *skb)
skb               211 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 	struct mt7615_mcu_rxd *rxd = (struct mt7615_mcu_rxd *)skb->data;
skb               215 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 		mt7615_mcu_rx_ext_event(dev, skb);
skb               220 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 	dev_kfree_skb(skb);
skb               223 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c void mt7615_mcu_rx_event(struct mt7615_dev *dev, struct sk_buff *skb)
skb               225 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 	struct mt7615_mcu_rxd *rxd = (struct mt7615_mcu_rxd *)skb->data;
skb               232 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 		mt7615_mcu_rx_unsolicited_event(dev, skb);
skb               234 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 		mt76_mcu_rx_event(&dev->mt76, skb);
skb              1121 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 	struct sk_buff *skb;
skb              1123 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 	skb = ieee80211_beacon_get_template(mt76_hw(dev), vif, &offs);
skb              1124 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 	if (!skb)
skb              1127 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 	if (skb->len > 512 - MT_TXD_SIZE) {
skb              1129 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 		dev_kfree_skb(skb);
skb              1133 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 	mt7615_mac_write_txwi(dev, (__le32 *)(req.pkt), skb, wcid, NULL,
skb              1135 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 	memcpy(req.pkt + MT_TXD_SIZE, skb->data, skb->len);
skb              1136 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 	req.pkt_len = cpu_to_le16(MT_TXD_SIZE + skb->len);
skb              1143 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 		req.csa_cnt = skb->data[offs.csa_counter_offs[0]];
skb              1145 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 	dev_kfree_skb(skb);
skb               197 drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h void mt7615_mcu_rx_event(struct mt7615_dev *dev, struct sk_buff *skb);
skb               235 drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h 			  struct sk_buff *skb, struct mt76_wcid *wcid,
skb               238 drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb);
skb               240 drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h void mt7615_mac_tx_free(struct mt7615_dev *dev, struct sk_buff *skb);
skb               262 drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h 			 struct sk_buff *skb);
skb               168 drivers/net/wireless/mediatek/mt76/mt76x02.h void mt76x02_remove_hdr_pad(struct sk_buff *skb, int len);
skb               171 drivers/net/wireless/mediatek/mt76/mt76x02.h 			  struct sk_buff *skb);
skb               175 drivers/net/wireless/mediatek/mt76/mt76x02.h 		struct sk_buff *skb);
skb                26 drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c mt76x02_write_beacon(struct mt76x02_dev *dev, int offset, struct sk_buff *skb)
skb                31 drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c 	if (WARN_ON_ONCE(beacon_len < skb->len + sizeof(struct mt76x02_txwi)))
skb                34 drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c 	mt76x02_mac_write_txwi(dev, &txwi, skb, NULL, NULL, skb->len);
skb                39 drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c 	mt76_wr_copy(dev, offset, skb->data, skb->len);
skb                45 drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c 			 struct sk_buff *skb)
skb                55 drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c 	if (skb) {
skb                56 drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c 		ret = mt76x02_write_beacon(dev, beacon_addr, skb);
skb                71 drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c 			   struct sk_buff *skb)
skb                79 drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c 			force_update = !!dev->beacons[i] ^ !!skb;
skb                84 drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c 			dev->beacons[i] = skb;
skb                85 drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c 			__mt76x02_mac_set_beacon(dev, bcn_idx, skb);
skb               179 drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c 	struct sk_buff *skb = NULL;
skb               184 drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c 	skb = ieee80211_beacon_get(mt76_hw(dev), vif);
skb               185 drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c 	if (!skb)
skb               188 drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c 	mt76x02_mac_set_beacon(dev, mvif->idx, skb);
skb               199 drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c 	struct sk_buff *skb;
skb               204 drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c 	skb = ieee80211_get_buffered_bc(mt76_hw(dev), vif);
skb               205 drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c 	if (!skb)
skb               208 drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c 	info = IEEE80211_SKB_CB(skb);
skb               211 drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c 	mt76_skb_set_moredata(skb, true);
skb               212 drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c 	__skb_queue_tail(&data->q, skb);
skb               213 drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c 	data->tail[mvif->idx] = skb;
skb               314 drivers/net/wireless/mediatek/mt76/mt76x02_mac.c 			    struct sk_buff *skb, struct mt76_wcid *wcid,
skb               317 drivers/net/wireless/mediatek/mt76/mt76x02_mac.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
skb               318 drivers/net/wireless/mediatek/mt76/mt76x02_mac.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb               333 drivers/net/wireless/mediatek/mt76/mt76x02_mac.c 		ieee80211_get_tx_rates(info->control.vif, sta, skb,
skb               544 drivers/net/wireless/mediatek/mt76/mt76x02_mac.c 			status.skb = mt76_tx_status_skb_get(mdev, wcid,
skb               546 drivers/net/wireless/mediatek/mt76/mt76x02_mac.c 		if (status.skb)
skb               547 drivers/net/wireless/mediatek/mt76/mt76x02_mac.c 			status.info = IEEE80211_SKB_CB(status.skb);
skb               550 drivers/net/wireless/mediatek/mt76/mt76x02_mac.c 	if (!status.skb && !(stat->pktid & MT_PACKET_ID_HAS_RATE)) {
skb               556 drivers/net/wireless/mediatek/mt76/mt76x02_mac.c 	if (msta && stat->aggr && !status.skb) {
skb               583 drivers/net/wireless/mediatek/mt76/mt76x02_mac.c 	if (status.skb)
skb               584 drivers/net/wireless/mediatek/mt76/mt76x02_mac.c 		mt76_tx_status_skb_done(mdev, status.skb, &list);
skb               587 drivers/net/wireless/mediatek/mt76/mt76x02_mac.c 	if (!status.skb)
skb               707 drivers/net/wireless/mediatek/mt76/mt76x02_mac.c int mt76x02_mac_process_rx(struct mt76x02_dev *dev, struct sk_buff *skb,
skb               710 drivers/net/wireless/mediatek/mt76/mt76x02_mac.c 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
skb               744 drivers/net/wireless/mediatek/mt76/mt76x02_mac.c 		int offset = ieee80211_get_hdrlen_from_skb(skb) + pad_len;
skb               745 drivers/net/wireless/mediatek/mt76/mt76x02_mac.c 		u8 *data = skb->data + offset;
skb               766 drivers/net/wireless/mediatek/mt76/mt76x02_mac.c 	mt76x02_remove_hdr_pad(skb, pad_len);
skb               771 drivers/net/wireless/mediatek/mt76/mt76x02_mac.c 	if (WARN_ON_ONCE(len > skb->len))
skb               774 drivers/net/wireless/mediatek/mt76/mt76x02_mac.c 	pskb_trim(skb, len);
skb               834 drivers/net/wireless/mediatek/mt76/mt76x02_mac.c 		dev_kfree_skb_any(e->skb);
skb               844 drivers/net/wireless/mediatek/mt76/mt76x02_mac.c 	mt76_tx_complete_skb(mdev, e->skb);
skb               180 drivers/net/wireless/mediatek/mt76/mt76x02_mac.h int mt76x02_mac_process_rx(struct mt76x02_dev *dev, struct sk_buff *skb,
skb               187 drivers/net/wireless/mediatek/mt76/mt76x02_mac.h 			    struct sk_buff *skb, struct mt76_wcid *wcid,
skb               197 drivers/net/wireless/mediatek/mt76/mt76x02_mac.h 			   struct sk_buff *skb);
skb                18 drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c 	struct sk_buff *skb;
skb                23 drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c 	skb = mt76x02_mcu_msg_alloc(data, len);
skb                24 drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c 	if (!skb)
skb                37 drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c 		  FIELD_PREP(MT_MCU_MSG_LEN, skb->len);
skb                39 drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c 	ret = mt76_tx_queue_skb_raw(dev, MT_TXQ_MCU, skb, tx_info);
skb                47 drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c 		skb = mt76_mcu_get_response(&dev->mt76, expires);
skb                48 drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c 		if (!skb) {
skb                57 drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c 		rxfce = (u32 *)skb->cb;
skb                62 drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c 		dev_kfree_skb(skb);
skb               139 drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c 	struct sk_buff *skb;
skb               144 drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c 	while ((skb = skb_dequeue(&dev->mt76.mmio.mcu.res_q)) != NULL)
skb               145 drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c 		dev_kfree_skb(skb);
skb                19 drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c 	struct sk_buff *skb;
skb                49 drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c 	while ((skb = __skb_dequeue(&data.q)) != NULL) {
skb                50 drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c 		struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb                54 drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c 		mt76_tx_queue_skb(dev, MT_TXQ_PSD, skb, &mvif->group_wcid,
skb                12 drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c 		struct sk_buff *skb)
skb                14 drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb                31 drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c 	mt76_tx(&dev->mt76, control->sta, wcid, skb);
skb                36 drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c 			  struct sk_buff *skb)
skb                39 drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c 	void *rxwi = skb->data;
skb                43 drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c 		mt76_mcu_rx_event(&dev->mt76, skb);
skb                47 drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c 	skb_pull(skb, sizeof(struct mt76x02_rxwi));
skb                48 drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c 	if (mt76x02_mac_process_rx(dev, skb, rxwi)) {
skb                49 drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c 		dev_kfree_skb(skb);
skb                53 drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c 	mt76_rx(mdev, q, skb);
skb               144 drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data;
skb               146 drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c 	bool ampdu = IEEE80211_SKB_CB(tx_info->skb)->flags & IEEE80211_TX_CTL_AMPDU;
skb               153 drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c 	len = tx_info->skb->len - (hdrlen & 2);
skb               154 drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c 	mt76x02_mac_write_txwi(dev, txwi, tx_info->skb, wcid, sta, len);
skb               156 drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c 	pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
skb                16 drivers/net/wireless/mediatek/mt76/mt76x02_usb.h int mt76x02u_skb_dma_info(struct sk_buff *skb, int port, u32 flags);
skb                 8 drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c static void mt76x02u_remove_dma_hdr(struct sk_buff *skb)
skb                12 drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c 	skb_pull(skb, sizeof(struct mt76x02_txwi) + MT_DMA_HDR_LEN);
skb                13 drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c 	hdr_len = ieee80211_get_hdrlen_from_skb(skb);
skb                15 drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c 		mt76x02_remove_hdr_pad(skb, 2);
skb                21 drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c 	mt76x02u_remove_dma_hdr(e->skb);
skb                22 drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c 	mt76_tx_complete_skb(mdev, e->skb);
skb                26 drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c int mt76x02u_skb_dma_info(struct sk_buff *skb, int port, u32 flags)
skb                28 drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c 	struct sk_buff *iter, *last = skb;
skb                37 drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c 	info = FIELD_PREP(MT_TXD_INFO_LEN, round_up(skb->len, 4)) |
skb                39 drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c 	put_unaligned_le32(info, skb_push(skb, sizeof(info)));
skb                42 drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c 	pad = round_up(skb->len, 4) + 4 - skb->len;
skb                47 drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c 	skb_walk_frags(skb, iter) {
skb                50 drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c 			skb->data_len += pad;
skb                51 drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c 			skb->len += pad;
skb                69 drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c 	int pid, len = tx_info->skb->len, ep = q2ep(mdev->q_tx[qid].q->hw_idx);
skb                71 drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c 	bool ampdu = IEEE80211_SKB_CB(tx_info->skb)->flags & IEEE80211_TX_CTL_AMPDU;
skb                75 drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c 	mt76_insert_hdr_pad(tx_info->skb);
skb                77 drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c 	txwi = (struct mt76x02_txwi *)(tx_info->skb->data - sizeof(*txwi));
skb                78 drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c 	mt76x02_mac_write_txwi(dev, txwi, tx_info->skb, wcid, sta, len);
skb                79 drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c 	skb_push(tx_info->skb, sizeof(*txwi));
skb                81 drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c 	pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
skb               100 drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c 	return mt76x02u_skb_dma_info(tx_info->skb, WLAN_PORT, flags);
skb               171 drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c 	struct sk_buff *skb;
skb               190 drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c 		skb = __skb_dequeue(&data.q);
skb               191 drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c 		mt76x02_mac_set_beacon(dev, i, skb);
skb                82 drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c __mt76x02u_mcu_send_msg(struct mt76_dev *dev, struct sk_buff *skb,
skb               102 drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c 	ret = mt76x02u_skb_dma_info(skb, CPU_TX_PORT, info);
skb               106 drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c 	ret = mt76u_bulk_msg(dev, skb->data, skb->len, NULL, 500);
skb               113 drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c 	consume_skb(skb);
skb               123 drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c 	struct sk_buff *skb;
skb               126 drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c 	skb = mt76_mcu_msg_alloc(data, MT_CMD_HDR_LEN, len, 8);
skb               127 drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c 	if (!skb)
skb               131 drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c 	err = __mt76x02u_mcu_send_msg(dev, skb, cmd, wait_resp);
skb               137 drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c static inline void skb_put_le32(struct sk_buff *skb, u32 val)
skb               139 drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c 	put_unaligned_le32(val, skb_put(skb, 4));
skb               149 drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c 	struct sk_buff *skb;
skb               157 drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c 	skb = alloc_skb(cnt * 8 + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
skb               158 drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c 	if (!skb)
skb               160 drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c 	skb_reserve(skb, MT_DMA_HDR_LEN);
skb               163 drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c 		skb_put_le32(skb, base + data[i].reg);
skb               164 drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c 		skb_put_le32(skb, data[i].value);
skb               168 drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c 	ret = __mt76x02u_mcu_send_msg(dev, skb, CMD_RANDOM_WRITE, cnt == n);
skb               183 drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c 	struct sk_buff *skb;
skb               193 drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c 	skb = alloc_skb(cnt * 8 + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
skb               194 drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c 	if (!skb)
skb               196 drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c 	skb_reserve(skb, MT_DMA_HDR_LEN);
skb               199 drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c 		skb_put_le32(skb, base + data[i].reg);
skb               200 drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c 		skb_put_le32(skb, data[i].value);
skb               210 drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c 	ret = __mt76x02u_mcu_send_msg(dev, skb, CMD_RANDOM_READ, true);
skb               592 drivers/net/wireless/mediatek/mt76/mt76x02_util.c void mt76x02_remove_hdr_pad(struct sk_buff *skb, int len)
skb               599 drivers/net/wireless/mediatek/mt76/mt76x02_util.c 	hdrlen = ieee80211_get_hdrlen_from_skb(skb);
skb               600 drivers/net/wireless/mediatek/mt76/mt76x02_util.c 	memmove(skb->data + len, skb->data, hdrlen);
skb               601 drivers/net/wireless/mediatek/mt76/mt76x02_util.c 	skb_pull(skb, len);
skb                41 drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2u.h int mt76x2u_skb_dma_info(struct sk_buff *skb, enum dma_msg_port port,
skb                87 drivers/net/wireless/mediatek/mt76/tx.c mt76_check_agg_ssn(struct mt76_txq *mtxq, struct sk_buff *skb)
skb                89 drivers/net/wireless/mediatek/mt76/tx.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
skb               112 drivers/net/wireless/mediatek/mt76/tx.c 	struct sk_buff *skb;
skb               117 drivers/net/wireless/mediatek/mt76/tx.c 	while ((skb = __skb_dequeue(list)) != NULL)
skb               118 drivers/net/wireless/mediatek/mt76/tx.c 		ieee80211_tx_status(dev->hw, skb);
skb               123 drivers/net/wireless/mediatek/mt76/tx.c __mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, u8 flags,
skb               126 drivers/net/wireless/mediatek/mt76/tx.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb               127 drivers/net/wireless/mediatek/mt76/tx.c 	struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
skb               136 drivers/net/wireless/mediatek/mt76/tx.c 	__skb_unlink(skb, &dev->status_list);
skb               145 drivers/net/wireless/mediatek/mt76/tx.c 	__skb_queue_tail(list, skb);
skb               149 drivers/net/wireless/mediatek/mt76/tx.c mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb,
skb               152 drivers/net/wireless/mediatek/mt76/tx.c 	__mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_DONE, list);
skb               158 drivers/net/wireless/mediatek/mt76/tx.c 		       struct sk_buff *skb)
skb               160 drivers/net/wireless/mediatek/mt76/tx.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb               161 drivers/net/wireless/mediatek/mt76/tx.c 	struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
skb               187 drivers/net/wireless/mediatek/mt76/tx.c 	__skb_queue_tail(&dev->status_list, skb);
skb               198 drivers/net/wireless/mediatek/mt76/tx.c 	struct sk_buff *skb, *tmp;
skb               200 drivers/net/wireless/mediatek/mt76/tx.c 	skb_queue_walk_safe(&dev->status_list, skb, tmp) {
skb               201 drivers/net/wireless/mediatek/mt76/tx.c 		struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
skb               207 drivers/net/wireless/mediatek/mt76/tx.c 			return skb;
skb               213 drivers/net/wireless/mediatek/mt76/tx.c 		__mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_FAILED |
skb               232 drivers/net/wireless/mediatek/mt76/tx.c void mt76_tx_complete_skb(struct mt76_dev *dev, struct sk_buff *skb)
skb               236 drivers/net/wireless/mediatek/mt76/tx.c 	if (!skb->prev) {
skb               237 drivers/net/wireless/mediatek/mt76/tx.c 		ieee80211_free_txskb(dev->hw, skb);
skb               242 drivers/net/wireless/mediatek/mt76/tx.c 	__mt76_tx_status_skb_done(dev, skb, MT_TX_CB_DMA_DONE, &list);
skb               249 drivers/net/wireless/mediatek/mt76/tx.c 	struct mt76_wcid *wcid, struct sk_buff *skb)
skb               251 drivers/net/wireless/mediatek/mt76/tx.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb               252 drivers/net/wireless/mediatek/mt76/tx.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
skb               254 drivers/net/wireless/mediatek/mt76/tx.c 	int qid = skb_get_queue_mapping(skb);
skb               258 drivers/net/wireless/mediatek/mt76/tx.c 		skb_set_queue_mapping(skb, qid);
skb               262 drivers/net/wireless/mediatek/mt76/tx.c 		ieee80211_get_tx_rates(info->control.vif, sta, skb,
skb               270 drivers/net/wireless/mediatek/mt76/tx.c 		tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
skb               275 drivers/net/wireless/mediatek/mt76/tx.c 			mt76_check_agg_ssn(mtxq, skb);
skb               281 drivers/net/wireless/mediatek/mt76/tx.c 	dev->queue_ops->tx_queue_skb(dev, qid, skb, wcid, sta);
skb               285 drivers/net/wireless/mediatek/mt76/tx.c 		ieee80211_stop_queue(dev->hw, skb_get_queue_mapping(skb));
skb               297 drivers/net/wireless/mediatek/mt76/tx.c 	struct sk_buff *skb;
skb               299 drivers/net/wireless/mediatek/mt76/tx.c 	skb = skb_dequeue(&mtxq->retry_q);
skb               300 drivers/net/wireless/mediatek/mt76/tx.c 	if (skb) {
skb               301 drivers/net/wireless/mediatek/mt76/tx.c 		u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
skb               306 drivers/net/wireless/mediatek/mt76/tx.c 		return skb;
skb               309 drivers/net/wireless/mediatek/mt76/tx.c 	skb = ieee80211_tx_dequeue(dev->hw, txq);
skb               310 drivers/net/wireless/mediatek/mt76/tx.c 	if (!skb)
skb               313 drivers/net/wireless/mediatek/mt76/tx.c 	return skb;
skb               318 drivers/net/wireless/mediatek/mt76/tx.c 		  struct sk_buff *skb, bool last)
skb               321 drivers/net/wireless/mediatek/mt76/tx.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb               328 drivers/net/wireless/mediatek/mt76/tx.c 	mt76_skb_set_moredata(skb, !last);
skb               329 drivers/net/wireless/mediatek/mt76/tx.c 	dev->queue_ops->tx_queue_skb(dev, MT_TXQ_PSD, skb, wcid, sta);
skb               347 drivers/net/wireless/mediatek/mt76/tx.c 		struct sk_buff *skb;
skb               353 drivers/net/wireless/mediatek/mt76/tx.c 			skb = mt76_txq_dequeue(dev, mtxq, true);
skb               354 drivers/net/wireless/mediatek/mt76/tx.c 			if (!skb)
skb               358 drivers/net/wireless/mediatek/mt76/tx.c 				mt76_check_agg_ssn(mtxq, skb);
skb               364 drivers/net/wireless/mediatek/mt76/tx.c 			last_skb = skb;
skb               388 drivers/net/wireless/mediatek/mt76/tx.c 	struct sk_buff *skb;
skb               400 drivers/net/wireless/mediatek/mt76/tx.c 	skb = mt76_txq_dequeue(dev, mtxq, false);
skb               401 drivers/net/wireless/mediatek/mt76/tx.c 	if (!skb) {
skb               406 drivers/net/wireless/mediatek/mt76/tx.c 	info = IEEE80211_SKB_CB(skb);
skb               408 drivers/net/wireless/mediatek/mt76/tx.c 		ieee80211_get_tx_rates(txq->vif, txq->sta, skb,
skb               413 drivers/net/wireless/mediatek/mt76/tx.c 	ampdu = IEEE80211_SKB_CB(skb)->flags & IEEE80211_TX_CTL_AMPDU;
skb               417 drivers/net/wireless/mediatek/mt76/tx.c 		mt76_check_agg_ssn(mtxq, skb);
skb               419 drivers/net/wireless/mediatek/mt76/tx.c 	idx = dev->queue_ops->tx_queue_skb(dev, qid, skb, wcid, txq->sta);
skb               433 drivers/net/wireless/mediatek/mt76/tx.c 		skb = mt76_txq_dequeue(dev, mtxq, false);
skb               434 drivers/net/wireless/mediatek/mt76/tx.c 		if (!skb) {
skb               439 drivers/net/wireless/mediatek/mt76/tx.c 		info = IEEE80211_SKB_CB(skb);
skb               444 drivers/net/wireless/mediatek/mt76/tx.c 			skb_queue_tail(&mtxq->retry_q, skb);
skb               451 drivers/net/wireless/mediatek/mt76/tx.c 			mt76_check_agg_ssn(mtxq, skb);
skb               453 drivers/net/wireless/mediatek/mt76/tx.c 		idx = dev->queue_ops->tx_queue_skb(dev, qid, skb, wcid,
skb               602 drivers/net/wireless/mediatek/mt76/tx.c 	struct sk_buff *skb;
skb               609 drivers/net/wireless/mediatek/mt76/tx.c 	while ((skb = skb_dequeue(&mtxq->retry_q)) != NULL)
skb               610 drivers/net/wireless/mediatek/mt76/tx.c 		ieee80211_free_txskb(dev->hw, skb);
skb               426 drivers/net/wireless/mediatek/mt76/usb.c 	struct sk_buff *skb;
skb               434 drivers/net/wireless/mediatek/mt76/usb.c 		skb = alloc_skb(MT_SKB_HEAD_LEN, GFP_ATOMIC);
skb               435 drivers/net/wireless/mediatek/mt76/usb.c 		if (!skb)
skb               438 drivers/net/wireless/mediatek/mt76/usb.c 		skb_put_data(skb, data + MT_DMA_HDR_LEN, MT_SKB_HEAD_LEN);
skb               441 drivers/net/wireless/mediatek/mt76/usb.c 		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
skb               445 drivers/net/wireless/mediatek/mt76/usb.c 		return skb;
skb               449 drivers/net/wireless/mediatek/mt76/usb.c 	skb = build_skb(data, buf_size);
skb               450 drivers/net/wireless/mediatek/mt76/usb.c 	if (!skb)
skb               453 drivers/net/wireless/mediatek/mt76/usb.c 	skb_reserve(skb, MT_DMA_HDR_LEN);
skb               454 drivers/net/wireless/mediatek/mt76/usb.c 	__skb_put(skb, len);
skb               456 drivers/net/wireless/mediatek/mt76/usb.c 	return skb;
skb               466 drivers/net/wireless/mediatek/mt76/usb.c 	struct sk_buff *skb;
skb               476 drivers/net/wireless/mediatek/mt76/usb.c 	skb = mt76u_build_rx_skb(data, data_len, q->buf_size);
skb               477 drivers/net/wireless/mediatek/mt76/usb.c 	if (!skb)
skb               483 drivers/net/wireless/mediatek/mt76/usb.c 		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
skb               490 drivers/net/wireless/mediatek/mt76/usb.c 	dev->drv->rx_skb(dev, MT_RXQ_MAIN, skb);
skb               749 drivers/net/wireless/mediatek/mt76/usb.c mt76u_tx_setup_buffers(struct mt76_dev *dev, struct sk_buff *skb,
skb               752 drivers/net/wireless/mediatek/mt76/usb.c 	urb->transfer_buffer_length = skb->len;
skb               755 drivers/net/wireless/mediatek/mt76/usb.c 		urb->transfer_buffer = skb->data;
skb               760 drivers/net/wireless/mediatek/mt76/usb.c 	urb->num_sgs = skb_to_sgvec(skb, urb->sg, 0, skb->len);
skb               769 drivers/net/wireless/mediatek/mt76/usb.c 		   struct sk_buff *skb, struct mt76_wcid *wcid,
skb               774 drivers/net/wireless/mediatek/mt76/usb.c 		.skb = skb,
skb               782 drivers/net/wireless/mediatek/mt76/usb.c 	skb->prev = skb->next = NULL;
skb               787 drivers/net/wireless/mediatek/mt76/usb.c 	err = mt76u_tx_setup_buffers(dev, tx_info.skb, q->entry[idx].urb);
skb               796 drivers/net/wireless/mediatek/mt76/usb.c 	q->entry[idx].skb = tx_info.skb;
skb                26 drivers/net/wireless/mediatek/mt76/util.h mt76_skb_set_moredata(struct sk_buff *skb, bool enable)
skb                28 drivers/net/wireless/mediatek/mt76/util.h 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
skb                31 drivers/net/wireless/mediatek/mt7601u/dma.c 	struct sk_buff *skb;
skb                34 drivers/net/wireless/mediatek/mt7601u/dma.c 	skb = alloc_skb(p ? 128 : seg_len, GFP_ATOMIC);
skb                35 drivers/net/wireless/mediatek/mt7601u/dma.c 	if (!skb)
skb                38 drivers/net/wireless/mediatek/mt7601u/dma.c 	true_len = mt76_mac_process_rx(dev, skb, data, rxwi);
skb                47 drivers/net/wireless/mediatek/mt7601u/dma.c 		skb_put_data(skb, data, hdr_len);
skb                55 drivers/net/wireless/mediatek/mt7601u/dma.c 	copy = (true_len <= skb_tailroom(skb)) ? true_len : hdr_len + 8;
skb                58 drivers/net/wireless/mediatek/mt7601u/dma.c 	skb_put_data(skb, data, copy);
skb                62 drivers/net/wireless/mediatek/mt7601u/dma.c 		skb_add_rx_frag(skb, 0, p, data - page_address(p),
skb                67 drivers/net/wireless/mediatek/mt7601u/dma.c 	return skb;
skb                72 drivers/net/wireless/mediatek/mt7601u/dma.c 	dev_kfree_skb(skb);
skb                79 drivers/net/wireless/mediatek/mt7601u/dma.c 	struct sk_buff *skb;
skb               103 drivers/net/wireless/mediatek/mt7601u/dma.c 	skb = mt7601u_rx_skb_from_seg(dev, rxwi, data, seg_len, truesize, p);
skb               104 drivers/net/wireless/mediatek/mt7601u/dma.c 	if (!skb)
skb               108 drivers/net/wireless/mediatek/mt7601u/dma.c 	ieee80211_rx(dev->hw, skb);
skb               233 drivers/net/wireless/mediatek/mt7601u/dma.c 	struct sk_buff *skb;
skb               253 drivers/net/wireless/mediatek/mt7601u/dma.c 	skb = q->e[q->start].skb;
skb               254 drivers/net/wireless/mediatek/mt7601u/dma.c 	q->e[q->start].skb = NULL;
skb               255 drivers/net/wireless/mediatek/mt7601u/dma.c 	trace_mt_tx_dma_done(dev, skb);
skb               257 drivers/net/wireless/mediatek/mt7601u/dma.c 	__skb_queue_tail(&dev->tx_skb_done, skb);
skb               261 drivers/net/wireless/mediatek/mt7601u/dma.c 		ieee80211_wake_queue(dev->hw, skb_get_queue_mapping(skb));
skb               289 drivers/net/wireless/mediatek/mt7601u/dma.c 		struct sk_buff *skb = __skb_dequeue(&skbs);
skb               291 drivers/net/wireless/mediatek/mt7601u/dma.c 		mt7601u_tx_status(dev, skb);
skb               296 drivers/net/wireless/mediatek/mt7601u/dma.c 				 struct sk_buff *skb, u8 ep)
skb               313 drivers/net/wireless/mediatek/mt7601u/dma.c 	e->skb = skb;
skb               314 drivers/net/wireless/mediatek/mt7601u/dma.c 	usb_fill_bulk_urb(e->urb, usb_dev, snd_pipe, skb->data, skb->len,
skb               333 drivers/net/wireless/mediatek/mt7601u/dma.c 		ieee80211_stop_queue(dev->hw, skb_get_queue_mapping(skb));
skb               355 drivers/net/wireless/mediatek/mt7601u/dma.c int mt7601u_dma_enqueue_tx(struct mt7601u_dev *dev, struct sk_buff *skb,
skb               366 drivers/net/wireless/mediatek/mt7601u/dma.c 	ret = mt7601u_dma_skb_wrap_pkt(skb, ep2dmaq(ep), dma_flags);
skb               370 drivers/net/wireless/mediatek/mt7601u/dma.c 	ret = mt7601u_dma_submit_tx(dev, skb, ep);
skb               372 drivers/net/wireless/mediatek/mt7601u/dma.c 		ieee80211_free_txskb(dev->hw, skb);
skb               456 drivers/net/wireless/mediatek/mt7601u/dma.c 		if (q->e[i].skb)
skb               457 drivers/net/wireless/mediatek/mt7601u/dma.c 			mt7601u_tx_status(q->dev, q->e[i].skb);
skb                58 drivers/net/wireless/mediatek/mt7601u/dma.h static inline int mt7601u_dma_skb_wrap(struct sk_buff *skb,
skb                72 drivers/net/wireless/mediatek/mt7601u/dma.h 		FIELD_PREP(MT_TXD_INFO_LEN, round_up(skb->len, 4)) |
skb                76 drivers/net/wireless/mediatek/mt7601u/dma.h 	put_unaligned_le32(info, skb_push(skb, sizeof(info)));
skb                77 drivers/net/wireless/mediatek/mt7601u/dma.h 	return skb_put_padto(skb, round_up(skb->len, 4) + 4);
skb                81 drivers/net/wireless/mediatek/mt7601u/dma.h mt7601u_dma_skb_wrap_pkt(struct sk_buff *skb, enum mt76_qsel qsel, u32 flags)
skb                84 drivers/net/wireless/mediatek/mt7601u/dma.h 	return mt7601u_dma_skb_wrap(skb, WLAN_PORT, DMA_PACKET, flags);
skb               460 drivers/net/wireless/mediatek/mt7601u/mac.c u32 mt76_mac_process_rx(struct mt7601u_dev *dev, struct sk_buff *skb,
skb               463 drivers/net/wireless/mediatek/mt7601u/mac.c 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
skb               155 drivers/net/wireless/mediatek/mt7601u/mac.h u32 mt76_mac_process_rx(struct mt7601u_dev *dev, struct sk_buff *skb,
skb                29 drivers/net/wireless/mediatek/mt7601u/mcu.c static inline void skb_put_le32(struct sk_buff *skb, u32 val)
skb                31 drivers/net/wireless/mediatek/mt7601u/mcu.c 	put_unaligned_le32(val, skb_put(skb, 4));
skb                34 drivers/net/wireless/mediatek/mt7601u/mcu.c static inline void mt7601u_dma_skb_wrap_cmd(struct sk_buff *skb,
skb                37 drivers/net/wireless/mediatek/mt7601u/mcu.c 	WARN_ON(mt7601u_dma_skb_wrap(skb, CPU_TX_PORT, DMA_COMMAND,
skb                43 drivers/net/wireless/mediatek/mt7601u/mcu.c 					    struct sk_buff *skb, bool need_resp)
skb                47 drivers/net/wireless/mediatek/mt7601u/mcu.c 	for (i = 0; i < skb->len / 4; i++)
skb                48 drivers/net/wireless/mediatek/mt7601u/mcu.c 		csum ^= get_unaligned_le32(skb->data + i * 4);
skb                50 drivers/net/wireless/mediatek/mt7601u/mcu.c 	trace_mt_mcu_msg_send(dev, skb, csum, need_resp);
skb                55 drivers/net/wireless/mediatek/mt7601u/mcu.c 	struct sk_buff *skb;
skb                59 drivers/net/wireless/mediatek/mt7601u/mcu.c 	skb = alloc_skb(len + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
skb                60 drivers/net/wireless/mediatek/mt7601u/mcu.c 	if (skb) {
skb                61 drivers/net/wireless/mediatek/mt7601u/mcu.c 		skb_reserve(skb, MT_DMA_HDR_LEN);
skb                62 drivers/net/wireless/mediatek/mt7601u/mcu.c 		skb_put_data(skb, data, len);
skb                65 drivers/net/wireless/mediatek/mt7601u/mcu.c 	return skb;
skb               110 drivers/net/wireless/mediatek/mt7601u/mcu.c mt7601u_mcu_msg_send(struct mt7601u_dev *dev, struct sk_buff *skb,
skb               128 drivers/net/wireless/mediatek/mt7601u/mcu.c 	mt7601u_dma_skb_wrap_cmd(skb, seq, cmd);
skb               133 drivers/net/wireless/mediatek/mt7601u/mcu.c 	trace_mt_mcu_msg_send_cs(dev, skb, wait_resp);
skb               134 drivers/net/wireless/mediatek/mt7601u/mcu.c 	trace_mt_submit_urb_sync(dev, cmd_pipe, skb->len);
skb               135 drivers/net/wireless/mediatek/mt7601u/mcu.c 	ret = usb_bulk_msg(usb_dev, cmd_pipe, skb->data, skb->len, &sent, 500);
skb               140 drivers/net/wireless/mediatek/mt7601u/mcu.c 	if (sent != skb->len)
skb               148 drivers/net/wireless/mediatek/mt7601u/mcu.c 	consume_skb(skb);
skb               156 drivers/net/wireless/mediatek/mt7601u/mcu.c 	struct sk_buff *skb;
skb               165 drivers/net/wireless/mediatek/mt7601u/mcu.c 	skb = mt7601u_mcu_msg_alloc(&msg, sizeof(msg));
skb               166 drivers/net/wireless/mediatek/mt7601u/mcu.c 	if (!skb)
skb               168 drivers/net/wireless/mediatek/mt7601u/mcu.c 	return mt7601u_mcu_msg_send(dev, skb, CMD_FUN_SET_OP, func == 5);
skb               193 drivers/net/wireless/mediatek/mt7601u/mcu.c 	struct sk_buff *skb;
skb               202 drivers/net/wireless/mediatek/mt7601u/mcu.c 	skb = mt7601u_mcu_msg_alloc(&msg, sizeof(msg));
skb               203 drivers/net/wireless/mediatek/mt7601u/mcu.c 	if (!skb)
skb               205 drivers/net/wireless/mediatek/mt7601u/mcu.c 	return mt7601u_mcu_msg_send(dev, skb, CMD_CALIBRATION_OP, true);
skb               212 drivers/net/wireless/mediatek/mt7601u/mcu.c 	struct sk_buff *skb;
skb               220 drivers/net/wireless/mediatek/mt7601u/mcu.c 	skb = alloc_skb(cnt * 8 + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
skb               221 drivers/net/wireless/mediatek/mt7601u/mcu.c 	if (!skb)
skb               223 drivers/net/wireless/mediatek/mt7601u/mcu.c 	skb_reserve(skb, MT_DMA_HDR_LEN);
skb               226 drivers/net/wireless/mediatek/mt7601u/mcu.c 		skb_put_le32(skb, base + data[i].reg);
skb               227 drivers/net/wireless/mediatek/mt7601u/mcu.c 		skb_put_le32(skb, data[i].value);
skb               230 drivers/net/wireless/mediatek/mt7601u/mcu.c 	ret = mt7601u_mcu_msg_send(dev, skb, CMD_RANDOM_WRITE, cnt == n);
skb               241 drivers/net/wireless/mediatek/mt7601u/mcu.c 	struct sk_buff *skb;
skb               249 drivers/net/wireless/mediatek/mt7601u/mcu.c 	skb = alloc_skb(cnt * 4 + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
skb               250 drivers/net/wireless/mediatek/mt7601u/mcu.c 	if (!skb)
skb               252 drivers/net/wireless/mediatek/mt7601u/mcu.c 	skb_reserve(skb, MT_DMA_HDR_LEN);
skb               254 drivers/net/wireless/mediatek/mt7601u/mcu.c 	skb_put_le32(skb, MT_MCU_MEMMAP_WLAN + offset);
skb               256 drivers/net/wireless/mediatek/mt7601u/mcu.c 		skb_put_le32(skb, data[i]);
skb               258 drivers/net/wireless/mediatek/mt7601u/mcu.c 	ret = mt7601u_mcu_msg_send(dev, skb, CMD_BURST_WRITE, cnt == n);
skb                88 drivers/net/wireless/mediatek/mt7601u/mt7601u.h 		struct sk_buff *skb;
skb               369 drivers/net/wireless/mediatek/mt7601u/mt7601u.h 		struct sk_buff *skb);
skb               372 drivers/net/wireless/mediatek/mt7601u/mt7601u.h void mt7601u_tx_status(struct mt7601u_dev *dev, struct sk_buff *skb);
skb               376 drivers/net/wireless/mediatek/mt7601u/mt7601u.h void mt76_remove_hdr_pad(struct sk_buff *skb);
skb               377 drivers/net/wireless/mediatek/mt7601u/mt7601u.h int mt76_insert_hdr_pad(struct sk_buff *skb);
skb               389 drivers/net/wireless/mediatek/mt7601u/mt7601u.h int mt7601u_dma_enqueue_tx(struct mt7601u_dev *dev, struct sk_buff *skb,
skb                80 drivers/net/wireless/mediatek/mt7601u/trace.h 		 struct sk_buff *skb, u32 csum, bool resp),
skb                81 drivers/net/wireless/mediatek/mt7601u/trace.h 	TP_ARGS(dev, skb, csum, resp),
skb                90 drivers/net/wireless/mediatek/mt7601u/trace.h 		__entry->info = *(u32 *)skb->data;
skb               278 drivers/net/wireless/mediatek/mt7601u/trace.h 	TP_PROTO(struct mt7601u_dev *dev, struct sk_buff *skb,
skb               280 drivers/net/wireless/mediatek/mt7601u/trace.h 	TP_ARGS(dev, skb, sta, h),
skb               284 drivers/net/wireless/mediatek/mt7601u/trace.h 		__field(struct sk_buff *, skb)
skb               290 drivers/net/wireless/mediatek/mt7601u/trace.h 		__entry->skb = skb;
skb               295 drivers/net/wireless/mediatek/mt7601u/trace.h 		  __entry->skb, __entry->sta,
skb               303 drivers/net/wireless/mediatek/mt7601u/trace.h 	TP_PROTO(struct mt7601u_dev *dev, struct sk_buff *skb),
skb               304 drivers/net/wireless/mediatek/mt7601u/trace.h 	TP_ARGS(dev, skb),
skb               307 drivers/net/wireless/mediatek/mt7601u/trace.h 		__field(struct sk_buff *, skb)
skb               311 drivers/net/wireless/mediatek/mt7601u/trace.h 		__entry->skb = skb;
skb               313 drivers/net/wireless/mediatek/mt7601u/trace.h 	TP_printk(DEV_PR_FMT "%p", DEV_PR_ARG, __entry->skb)
skb                27 drivers/net/wireless/mediatek/mt7601u/tx.c static u8 skb2q(struct sk_buff *skb)
skb                29 drivers/net/wireless/mediatek/mt7601u/tx.c 	int qid = skb_get_queue_mapping(skb);
skb                33 drivers/net/wireless/mediatek/mt7601u/tx.c 		skb_set_queue_mapping(skb, qid);
skb                90 drivers/net/wireless/mediatek/mt7601u/tx.c static void mt7601u_tx_skb_remove_dma_overhead(struct sk_buff *skb,
skb                95 drivers/net/wireless/mediatek/mt7601u/tx.c 	skb_pull(skb, sizeof(struct mt76_txwi) + 4);
skb                96 drivers/net/wireless/mediatek/mt7601u/tx.c 	if (ieee80211_get_hdrlen_from_skb(skb) % 4)
skb                97 drivers/net/wireless/mediatek/mt7601u/tx.c 		mt76_remove_hdr_pad(skb);
skb                99 drivers/net/wireless/mediatek/mt7601u/tx.c 	skb_trim(skb, pkt_len);
skb               102 drivers/net/wireless/mediatek/mt7601u/tx.c void mt7601u_tx_status(struct mt7601u_dev *dev, struct sk_buff *skb)
skb               104 drivers/net/wireless/mediatek/mt7601u/tx.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb               106 drivers/net/wireless/mediatek/mt7601u/tx.c 	mt7601u_tx_skb_remove_dma_overhead(skb, info);
skb               113 drivers/net/wireless/mediatek/mt7601u/tx.c 	ieee80211_tx_status(dev->hw, skb);
skb               117 drivers/net/wireless/mediatek/mt7601u/tx.c static int mt7601u_skb_rooms(struct mt7601u_dev *dev, struct sk_buff *skb)
skb               119 drivers/net/wireless/mediatek/mt7601u/tx.c 	int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
skb               126 drivers/net/wireless/mediatek/mt7601u/tx.c 	return skb_cow(skb, need_head);
skb               130 drivers/net/wireless/mediatek/mt7601u/tx.c mt7601u_push_txwi(struct mt7601u_dev *dev, struct sk_buff *skb,
skb               134 drivers/net/wireless/mediatek/mt7601u/tx.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb               143 drivers/net/wireless/mediatek/mt7601u/tx.c 	txwi = skb_push(skb, sizeof(struct mt76_txwi));
skb               147 drivers/net/wireless/mediatek/mt7601u/tx.c 		ieee80211_get_tx_rates(info->control.vif, sta, skb,
skb               191 drivers/net/wireless/mediatek/mt7601u/tx.c 		struct sk_buff *skb)
skb               193 drivers/net/wireless/mediatek/mt7601u/tx.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb               200 drivers/net/wireless/mediatek/mt7601u/tx.c 	int pkt_len = skb->len;
skb               201 drivers/net/wireless/mediatek/mt7601u/tx.c 	int hw_q = skb2q(skb);
skb               206 drivers/net/wireless/mediatek/mt7601u/tx.c 	if (mt7601u_skb_rooms(dev, skb) || mt76_insert_hdr_pad(skb)) {
skb               207 drivers/net/wireless/mediatek/mt7601u/tx.c 		ieee80211_free_txskb(dev->hw, skb);
skb               220 drivers/net/wireless/mediatek/mt7601u/tx.c 	txwi = mt7601u_push_txwi(dev, skb, sta, wcid, pkt_len);
skb               222 drivers/net/wireless/mediatek/mt7601u/tx.c 	if (mt7601u_dma_enqueue_tx(dev, skb, wcid, hw_q))
skb               225 drivers/net/wireless/mediatek/mt7601u/tx.c 	trace_mt_tx(dev, skb, msta, txwi);
skb                 8 drivers/net/wireless/mediatek/mt7601u/util.c void mt76_remove_hdr_pad(struct sk_buff *skb)
skb                10 drivers/net/wireless/mediatek/mt7601u/util.c 	int len = ieee80211_get_hdrlen_from_skb(skb);
skb                12 drivers/net/wireless/mediatek/mt7601u/util.c 	memmove(skb->data + 2, skb->data, len);
skb                13 drivers/net/wireless/mediatek/mt7601u/util.c 	skb_pull(skb, 2);
skb                16 drivers/net/wireless/mediatek/mt7601u/util.c int mt76_insert_hdr_pad(struct sk_buff *skb)
skb                18 drivers/net/wireless/mediatek/mt7601u/util.c 	int len = ieee80211_get_hdrlen_from_skb(skb);
skb                24 drivers/net/wireless/mediatek/mt7601u/util.c 	ret = skb_cow(skb, 2);
skb                28 drivers/net/wireless/mediatek/mt7601u/util.c 	skb_push(skb, 2);
skb                29 drivers/net/wireless/mediatek/mt7601u/util.c 	memmove(skb->data, skb->data + 2, len);
skb                31 drivers/net/wireless/mediatek/mt7601u/util.c 	skb->data[len] = 0;
skb                32 drivers/net/wireless/mediatek/mt7601u/util.c 	skb->data[len + 1] = 0;
skb               102 drivers/net/wireless/quantenna/qtnfmac/bus.h static inline int qtnf_bus_data_tx(struct qtnf_bus *bus, struct sk_buff *skb)
skb               104 drivers/net/wireless/quantenna/qtnfmac/bus.h 	return bus->bus_ops->data_tx(bus, skb);
skb               113 drivers/net/wireless/quantenna/qtnfmac/bus.h static inline int qtnf_bus_control_tx(struct qtnf_bus *bus, struct sk_buff *skb)
skb               115 drivers/net/wireless/quantenna/qtnfmac/bus.h 	return bus->bus_ops->control_tx(bus, skb);
skb               147 drivers/net/wireless/quantenna/qtnfmac/cfg80211.c 	struct sk_buff *skb;
skb               161 drivers/net/wireless/quantenna/qtnfmac/cfg80211.c 	while ((skb = skb_dequeue(&vif->high_pri_tx_queue)))
skb               162 drivers/net/wireless/quantenna/qtnfmac/cfg80211.c 		dev_kfree_skb_any(skb);
skb                73 drivers/net/wireless/quantenna/qtnfmac/core.c qtnf_netdev_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
skb                80 drivers/net/wireless/quantenna/qtnfmac/core.c 	if (unlikely(skb->dev != ndev)) {
skb                82 drivers/net/wireless/quantenna/qtnfmac/core.c 		dev_kfree_skb_any(skb);
skb                88 drivers/net/wireless/quantenna/qtnfmac/core.c 		dev_kfree_skb_any(skb);
skb                95 drivers/net/wireless/quantenna/qtnfmac/core.c 		dev_kfree_skb_any(skb);
skb                99 drivers/net/wireless/quantenna/qtnfmac/core.c 	if (!skb->len || (skb->len > ETH_FRAME_LEN)) {
skb               101 drivers/net/wireless/quantenna/qtnfmac/core.c 				   skb->len);
skb               102 drivers/net/wireless/quantenna/qtnfmac/core.c 		dev_kfree_skb_any(skb);
skb               110 drivers/net/wireless/quantenna/qtnfmac/core.c 	return qtnf_bus_data_tx(mac->bus, skb);
skb               381 drivers/net/wireless/quantenna/qtnfmac/core.c 	struct sk_buff *skb;
skb               387 drivers/net/wireless/quantenna/qtnfmac/core.c 	while ((skb = skb_dequeue(&vif->high_pri_tx_queue))) {
skb               389 drivers/net/wireless/quantenna/qtnfmac/core.c 				    0, skb->data, skb->len);
skb               390 drivers/net/wireless/quantenna/qtnfmac/core.c 		dev_kfree_skb_any(skb);
skb               719 drivers/net/wireless/quantenna/qtnfmac/core.c struct net_device *qtnf_classify_skb(struct qtnf_bus *bus, struct sk_buff *skb)
skb               730 drivers/net/wireless/quantenna/qtnfmac/core.c 		(skb_tail_pointer(skb) - sizeof(*meta));
skb               770 drivers/net/wireless/quantenna/qtnfmac/core.c 	__skb_trim(skb, skb->len - sizeof(*meta));
skb               804 drivers/net/wireless/quantenna/qtnfmac/core.c void qtnf_update_rx_stats(struct net_device *ndev, const struct sk_buff *skb)
skb               811 drivers/net/wireless/quantenna/qtnfmac/core.c 		ndev->stats.rx_bytes += skb->len;
skb               819 drivers/net/wireless/quantenna/qtnfmac/core.c 	stats64->rx_bytes += skb->len;
skb               824 drivers/net/wireless/quantenna/qtnfmac/core.c void qtnf_update_tx_stats(struct net_device *ndev, const struct sk_buff *skb)
skb               831 drivers/net/wireless/quantenna/qtnfmac/core.c 		ndev->stats.tx_bytes += skb->len;
skb               839 drivers/net/wireless/quantenna/qtnfmac/core.c 	stats64->tx_bytes += skb->len;
skb               844 drivers/net/wireless/quantenna/qtnfmac/core.c void qtnf_packet_send_hi_pri(struct sk_buff *skb)
skb               846 drivers/net/wireless/quantenna/qtnfmac/core.c 	struct qtnf_vif *vif = qtnf_netdev_get_priv(skb->dev);
skb               848 drivers/net/wireless/quantenna/qtnfmac/core.c 	skb_queue_tail(&vif->high_pri_tx_queue, skb);
skb               146 drivers/net/wireless/quantenna/qtnfmac/core.h struct net_device *qtnf_classify_skb(struct qtnf_bus *bus, struct sk_buff *skb);
skb               148 drivers/net/wireless/quantenna/qtnfmac/core.h void qtnf_update_rx_stats(struct net_device *ndev, const struct sk_buff *skb);
skb               149 drivers/net/wireless/quantenna/qtnfmac/core.h void qtnf_update_tx_stats(struct net_device *ndev, const struct sk_buff *skb);
skb               155 drivers/net/wireless/quantenna/qtnfmac/core.h void qtnf_packet_send_hi_pri(struct sk_buff *skb);
skb               692 drivers/net/wireless/quantenna/qtnfmac/event.c 				  const struct sk_buff *skb)
skb               698 drivers/net/wireless/quantenna/qtnfmac/event.c 	if (unlikely(!skb || skb->len < sizeof(*event))) {
skb               703 drivers/net/wireless/quantenna/qtnfmac/event.c 	event = (struct qlink_event *)skb->data;
skb               715 drivers/net/wireless/quantenna/qtnfmac/event.c 	res = qtnf_event_parse(mac, skb);
skb                50 drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c int qtnf_pcie_control_tx(struct qtnf_bus *bus, struct sk_buff *skb)
skb                55 drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c 	ret = qtnf_shm_ipc_send(&priv->shm_ipc_ep_in, skb->data, skb->len);
skb               239 drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c 	struct sk_buff *skb;
skb               246 drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c 	skb = __dev_alloc_skb(len, GFP_KERNEL);
skb               248 drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c 	if (unlikely(!skb)) {
skb               253 drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c 	memcpy_fromio(skb_put(skb, len), buf, len);
skb               255 drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c 	qtnf_trans_handle_rx_ctl_packet(bus, skb);
skb                72 drivers/net/wireless/quantenna/qtnfmac/pcie/pcie_priv.h int qtnf_pcie_control_tx(struct qtnf_bus *bus, struct sk_buff *skb);
skb               287 drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c 	struct sk_buff *skb;
skb               290 drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c 	skb = __netdev_alloc_skb_ip_align(NULL, SKB_BUF_SIZE, GFP_ATOMIC);
skb               291 drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c 	if (!skb) {
skb               296 drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c 	priv->rx_skb[index] = skb;
skb               299 drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c 	paddr = pci_map_single(priv->pdev, skb->data,
skb               350 drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c 	struct sk_buff *skb;
skb               358 drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c 			skb = priv->rx_skb[i];
skb               363 drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c 			dev_kfree_skb_any(skb);
skb               372 drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c 			skb = priv->tx_skb[i];
skb               375 drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c 			pci_unmap_single(priv->pdev, paddr, skb->len,
skb               377 drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c 			dev_kfree_skb_any(skb);
skb               467 drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c 	struct sk_buff *skb;
skb               482 drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c 		skb = priv->tx_skb[i];
skb               483 drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c 		if (likely(skb)) {
skb               487 drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c 			pci_unmap_single(priv->pdev, paddr, skb->len,
skb               490 drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c 			if (skb->dev) {
skb               491 drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c 				qtnf_update_tx_stats(skb->dev, skb);
skb               493 drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c 					qtnf_wake_all_queues(skb->dev);
skb               498 drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c 			dev_kfree_skb_any(skb);
skb               534 drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c static int qtnf_pcie_data_tx(struct qtnf_bus *bus, struct sk_buff *skb)
skb               548 drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c 		if (skb->dev) {
skb               549 drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c 			netif_tx_stop_all_queues(skb->dev);
skb               558 drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c 	priv->tx_skb[i] = skb;
skb               559 drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c 	len = skb->len;
skb               561 drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c 	skb_paddr = pci_map_single(priv->pdev, skb->data,
skb               562 drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c 				   skb->len, PCI_DMA_TODEVICE);
skb               595 drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c 	if (ret && skb) {
skb               597 drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c 		if (skb->dev)
skb               598 drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c 			skb->dev->stats.tx_dropped++;
skb               599 drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c 		dev_kfree_skb_any(skb);
skb               676 drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c 	struct sk_buff *skb = NULL;
skb               695 drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c 		skb = priv->rx_skb[r_idx];
skb               704 drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c 		if (!skb) {
skb               709 drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c 		if (skb && (skb_tailroom(skb) <  psize)) {
skb               711 drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c 			       psize, skb_tailroom(skb));
skb               715 drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c 		if (skb) {
skb               723 drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c 			skb_put(skb, psize);
skb               724 drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c 			ndev = qtnf_classify_skb(bus, skb);
skb               726 drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c 				qtnf_update_rx_stats(ndev, skb);
skb               727 drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c 				skb->protocol = eth_type_trans(skb, ndev);
skb               728 drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c 				napi_gro_receive(napi, skb);
skb               732 drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c 				dev_kfree_skb_any(skb);
skb               735 drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c 			if (skb) {
skb               737 drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c 				dev_kfree_skb_any(skb);
skb               874 drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c 	struct sk_buff *skb = NULL;
skb               878 drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c 	skb = __dev_alloc_skb(QTN_PCIE_FW_BUFSZ, GFP_KERNEL);
skb               879 drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c 	if (!skb)
skb               882 drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c 	skb->len = QTN_PCIE_FW_BUFSZ;
skb               883 drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c 	skb->dev = NULL;
skb               885 drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c 	hdr = (struct qtnf_pearl_fw_hdr *)skb->data;
skb               895 drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c 	pdata = skb->data + hds;
skb               907 drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c 	ret = qtnf_pcie_data_tx(bus, skb);
skb               248 drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c 	struct sk_buff *skb;
skb               251 drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c 	skb = __netdev_alloc_skb_ip_align(NULL, SKB_BUF_SIZE, GFP_ATOMIC);
skb               252 drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c 	if (!skb) {
skb               257 drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c 	ts->base.rx_skb[index] = skb;
skb               259 drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c 	paddr = pci_map_single(ts->base.pdev, skb->data,
skb               300 drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c 	struct sk_buff *skb;
skb               308 drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c 			skb = priv->rx_skb[i];
skb               312 drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c 			dev_kfree_skb_any(skb);
skb               323 drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c 			skb = priv->tx_skb[i];
skb               327 drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c 			dev_kfree_skb_any(skb);
skb               384 drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c 	struct sk_buff *skb;
skb               401 drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c 		skb = priv->tx_skb[i];
skb               403 drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c 		if (likely(skb)) {
skb               406 drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c 			pci_unmap_single(priv->pdev, paddr, skb->len,
skb               409 drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c 			if (skb->dev) {
skb               410 drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c 				qtnf_update_tx_stats(skb->dev, skb);
skb               412 drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c 					qtnf_wake_all_queues(skb->dev);
skb               417 drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c 			dev_kfree_skb_any(skb);
skb               489 drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c static int qtnf_pcie_data_tx(struct qtnf_bus *bus, struct sk_buff *skb)
skb               501 drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c 	if (unlikely(skb->protocol == htons(ETH_P_PAE))) {
skb               502 drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c 		qtnf_packet_send_hi_pri(skb);
skb               503 drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c 		qtnf_update_tx_stats(skb->dev, skb);
skb               511 drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c 		qtnf_try_stop_xmit(bus, skb->dev);
skb               517 drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c 	priv->tx_skb[i] = skb;
skb               518 drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c 	len = skb->len;
skb               520 drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c 	skb_paddr = pci_map_single(priv->pdev, skb->data,
skb               521 drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c 				   skb->len, PCI_DMA_TODEVICE);
skb               547 drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c 		if (skb->dev)
skb               548 drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c 			skb->dev->stats.tx_dropped++;
skb               549 drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c 		dev_kfree_skb_any(skb);
skb               608 drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c 	struct sk_buff *skb = NULL;
skb               628 drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c 		skb = priv->rx_skb[r_idx];
skb               638 drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c 		if (!skb) {
skb               643 drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c 		if (skb && (skb_tailroom(skb) <  psize)) {
skb               645 drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c 			       psize, skb_tailroom(skb));
skb               649 drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c 		if (skb) {
skb               656 drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c 			skb_reserve(skb, poffset);
skb               657 drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c 			skb_put(skb, psize);
skb               658 drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c 			ndev = qtnf_classify_skb(bus, skb);
skb               660 drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c 				qtnf_update_rx_stats(ndev, skb);
skb               661 drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c 				skb->protocol = eth_type_trans(skb, ndev);
skb               662 drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c 				netif_receive_skb(skb);
skb               666 drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c 				dev_kfree_skb_any(skb);
skb               669 drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c 			if (skb) {
skb               671 drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c 				dev_kfree_skb_any(skb);
skb                14 drivers/net/wireless/quantenna/qtnfmac/qlink_util.h qtnf_cmd_skb_put_buffer(struct sk_buff *skb, const u8 *buf_src, size_t len)
skb                16 drivers/net/wireless/quantenna/qtnfmac/qlink_util.h 	skb_put_data(skb, buf_src, len);
skb                19 drivers/net/wireless/quantenna/qtnfmac/qlink_util.h static inline void qtnf_cmd_skb_put_tlv_arr(struct sk_buff *skb,
skb                23 drivers/net/wireless/quantenna/qtnfmac/qlink_util.h 	struct qlink_tlv_hdr *hdr = skb_put(skb, sizeof(*hdr) + arr_len);
skb                30 drivers/net/wireless/quantenna/qtnfmac/qlink_util.h static inline void qtnf_cmd_skb_put_tlv_tag(struct sk_buff *skb, u16 tlv_id)
skb                32 drivers/net/wireless/quantenna/qtnfmac/qlink_util.h 	struct qlink_tlv_hdr *hdr = skb_put(skb, sizeof(*hdr));
skb                38 drivers/net/wireless/quantenna/qtnfmac/qlink_util.h static inline void qtnf_cmd_skb_put_tlv_u8(struct sk_buff *skb, u16 tlv_id,
skb                41 drivers/net/wireless/quantenna/qtnfmac/qlink_util.h 	struct qlink_tlv_hdr *hdr = skb_put(skb, sizeof(*hdr) + sizeof(value));
skb                48 drivers/net/wireless/quantenna/qtnfmac/qlink_util.h static inline void qtnf_cmd_skb_put_tlv_u16(struct sk_buff *skb,
skb                51 drivers/net/wireless/quantenna/qtnfmac/qlink_util.h 	struct qlink_tlv_hdr *hdr = skb_put(skb, sizeof(*hdr) + sizeof(value));
skb                59 drivers/net/wireless/quantenna/qtnfmac/qlink_util.h static inline void qtnf_cmd_skb_put_tlv_u32(struct sk_buff *skb,
skb                62 drivers/net/wireless/quantenna/qtnfmac/qlink_util.h 	struct qlink_tlv_hdr *hdr = skb_put(skb, sizeof(*hdr) + sizeof(value));
skb                81 drivers/net/wireless/quantenna/qtnfmac/trans.c static void qtnf_trans_signal_cmdresp(struct qtnf_bus *bus, struct sk_buff *skb)
skb                84 drivers/net/wireless/quantenna/qtnfmac/trans.c 	const struct qlink_resp *resp = (const struct qlink_resp *)skb->data;
skb                99 drivers/net/wireless/quantenna/qtnfmac/trans.c 	ctl_node->resp_skb = skb;
skb               109 drivers/net/wireless/quantenna/qtnfmac/trans.c 	dev_kfree_skb(skb);
skb               112 drivers/net/wireless/quantenna/qtnfmac/trans.c static int qtnf_trans_event_enqueue(struct qtnf_bus *bus, struct sk_buff *skb)
skb               118 drivers/net/wireless/quantenna/qtnfmac/trans.c 		skb_queue_tail(&trans->event_queue, skb);
skb               122 drivers/net/wireless/quantenna/qtnfmac/trans.c 		dev_kfree_skb(skb);
skb               168 drivers/net/wireless/quantenna/qtnfmac/trans.c int qtnf_trans_handle_rx_ctl_packet(struct qtnf_bus *bus, struct sk_buff *skb)
skb               170 drivers/net/wireless/quantenna/qtnfmac/trans.c 	const struct qlink_msg_header *header = (void *)skb->data;
skb               173 drivers/net/wireless/quantenna/qtnfmac/trans.c 	if (unlikely(skb->len < sizeof(*header))) {
skb               174 drivers/net/wireless/quantenna/qtnfmac/trans.c 		pr_warn("packet is too small: %u\n", skb->len);
skb               175 drivers/net/wireless/quantenna/qtnfmac/trans.c 		dev_kfree_skb(skb);
skb               179 drivers/net/wireless/quantenna/qtnfmac/trans.c 	if (unlikely(skb->len != le16_to_cpu(header->len))) {
skb               181 drivers/net/wireless/quantenna/qtnfmac/trans.c 			skb->len, le16_to_cpu(header->len));
skb               182 drivers/net/wireless/quantenna/qtnfmac/trans.c 		dev_kfree_skb(skb);
skb               188 drivers/net/wireless/quantenna/qtnfmac/trans.c 		if (unlikely(skb->len < sizeof(struct qlink_cmd))) {
skb               189 drivers/net/wireless/quantenna/qtnfmac/trans.c 			pr_warn("cmd reply too short: %u\n", skb->len);
skb               190 drivers/net/wireless/quantenna/qtnfmac/trans.c 			dev_kfree_skb(skb);
skb               194 drivers/net/wireless/quantenna/qtnfmac/trans.c 		qtnf_trans_signal_cmdresp(bus, skb);
skb               197 drivers/net/wireless/quantenna/qtnfmac/trans.c 		if (unlikely(skb->len < sizeof(struct qlink_event))) {
skb               198 drivers/net/wireless/quantenna/qtnfmac/trans.c 			pr_warn("event too short: %u\n", skb->len);
skb               199 drivers/net/wireless/quantenna/qtnfmac/trans.c 			dev_kfree_skb(skb);
skb               203 drivers/net/wireless/quantenna/qtnfmac/trans.c 		ret = qtnf_trans_event_enqueue(bus, skb);
skb               207 drivers/net/wireless/quantenna/qtnfmac/trans.c 		dev_kfree_skb(skb);
skb                39 drivers/net/wireless/quantenna/qtnfmac/trans.h int qtnf_trans_handle_rx_ctl_packet(struct qtnf_bus *bus, struct sk_buff *skb);
skb               734 drivers/net/wireless/ralink/rt2x00/rt2400pci.c 	struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
skb               739 drivers/net/wireless/ralink/rt2x00/rt2400pci.c 		rt2x00_set_field32(&word, RXD_W2_BUFFER_LENGTH, entry->skb->len);
skb              1097 drivers/net/wireless/ralink/rt2x00/rt2400pci.c 	struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
skb               823 drivers/net/wireless/ralink/rt2x00/rt2500pci.c 	struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
skb              1250 drivers/net/wireless/ralink/rt2x00/rt2500pci.c 	struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
skb              1059 drivers/net/wireless/ralink/rt2x00/rt2500usb.c 	struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
skb              1060 drivers/net/wireless/ralink/rt2x00/rt2500usb.c 	__le32 *txd = (__le32 *) entry->skb->data;
skb              1139 drivers/net/wireless/ralink/rt2x00/rt2500usb.c 	skb_push(entry->skb, TXD_DESC_SIZE);
skb              1140 drivers/net/wireless/ralink/rt2x00/rt2500usb.c 	memset(entry->skb->data, 0, TXD_DESC_SIZE);
skb              1160 drivers/net/wireless/ralink/rt2x00/rt2500usb.c 			  entry->skb->data, length, rt2500usb_beacondone,
skb              1207 drivers/net/wireless/ralink/rt2x00/rt2500usb.c 	length = roundup(entry->skb->len, 2);
skb              1221 drivers/net/wireless/ralink/rt2x00/rt2500usb.c 	struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
skb              1223 drivers/net/wireless/ralink/rt2x00/rt2500usb.c 	    (__le32 *)(entry->skb->data +
skb              1286 drivers/net/wireless/ralink/rt2x00/rt2500usb.c 	skb_trim(entry->skb, rxdesc->size);
skb              1309 drivers/net/wireless/ralink/rt2x00/rt2500usb.c 		dev_kfree_skb(entry->skb);
skb              1310 drivers/net/wireless/ralink/rt2x00/rt2500usb.c 		entry->skb = NULL;
skb               880 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 	__le32 *rxwi = (__le32 *) entry->skb->data;
skb               918 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 	skb_pull(entry->skb, entry->queue->winfo_size);
skb              1001 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 	struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
skb              1323 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 	struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
skb              1341 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 	memset(skb_push(entry->skb, txwi_desc_size), 0, txwi_desc_size);
skb              1347 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 	skbdesc->desc = entry->skb->data;
skb              1363 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 	padding_len = roundup(entry->skb->len, 4) - entry->skb->len;
skb              1364 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 	if (padding_len && skb_pad(entry->skb, padding_len)) {
skb              1367 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 		entry->skb = NULL;
skb              1374 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 	rt2800_register_multiwrite(rt2x00dev, beacon_base, entry->skb->data,
skb              1375 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 				   entry->skb->len + padding_len);
skb              1391 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 	dev_kfree_skb_any(entry->skb);
skb              1392 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 	entry->skb = NULL;
skb                63 drivers/net/wireless/ralink/rt2x00/rt2800mmio.c 	return (__le32 *) entry->skb->data;
skb                70 drivers/net/wireless/ralink/rt2x00/rt2800mmio.c 	struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
skb                92 drivers/net/wireless/ralink/rt2x00/rt2800mmio.c 	rt2x00_set_field32(&word, TXD_W1_SD_LEN1, entry->skb->len);
skb               627 drivers/net/wireless/ralink/rt2x00/rt2800mmio.c 	struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
skb               396 drivers/net/wireless/ralink/rt2x00/rt2800usb.c 		return (__le32 *) (entry->skb->data);
skb               398 drivers/net/wireless/ralink/rt2x00/rt2800usb.c 		return (__le32 *) (entry->skb->data + TXINFO_DESC_SIZE);
skb               404 drivers/net/wireless/ralink/rt2x00/rt2800usb.c 	struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
skb               405 drivers/net/wireless/ralink/rt2x00/rt2800usb.c 	__le32 *txi = (__le32 *) entry->skb->data;
skb               419 drivers/net/wireless/ralink/rt2x00/rt2800usb.c 			   roundup(entry->skb->len, 4) - TXINFO_DESC_SIZE);
skb               450 drivers/net/wireless/ralink/rt2x00/rt2800usb.c 	return roundup(entry->skb->len, 4) + 4;
skb               484 drivers/net/wireless/ralink/rt2x00/rt2800usb.c 	struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
skb               485 drivers/net/wireless/ralink/rt2x00/rt2800usb.c 	__le32 *rxi = (__le32 *)entry->skb->data;
skb               507 drivers/net/wireless/ralink/rt2x00/rt2800usb.c 	skb_pull(entry->skb, RXINFO_DESC_SIZE);
skb               520 drivers/net/wireless/ralink/rt2x00/rt2800usb.c 	rxd = (__le32 *)(entry->skb->data + rx_pkt_len);
skb               571 drivers/net/wireless/ralink/rt2x00/rt2800usb.c 	skb_trim(entry->skb, rx_pkt_len);
skb              1439 drivers/net/wireless/ralink/rt2x00/rt2x00.h 		  struct sk_buff *skb);
skb                36 drivers/net/wireless/ralink/rt2x00/rt2x00crypto.c 				       struct sk_buff *skb,
skb                39 drivers/net/wireless/ralink/rt2x00/rt2x00crypto.c 	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
skb                64 drivers/net/wireless/ralink/rt2x00/rt2x00crypto.c 				      struct sk_buff *skb)
skb                66 drivers/net/wireless/ralink/rt2x00/rt2x00crypto.c 	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
skb                91 drivers/net/wireless/ralink/rt2x00/rt2x00crypto.c void rt2x00crypto_tx_copy_iv(struct sk_buff *skb, struct txentry_desc *txdesc)
skb                93 drivers/net/wireless/ralink/rt2x00/rt2x00crypto.c 	struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
skb                99 drivers/net/wireless/ralink/rt2x00/rt2x00crypto.c 	memcpy(skbdesc->iv, skb->data + txdesc->iv_offset, txdesc->iv_len);
skb               102 drivers/net/wireless/ralink/rt2x00/rt2x00crypto.c void rt2x00crypto_tx_remove_iv(struct sk_buff *skb, struct txentry_desc *txdesc)
skb               104 drivers/net/wireless/ralink/rt2x00/rt2x00crypto.c 	struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
skb               110 drivers/net/wireless/ralink/rt2x00/rt2x00crypto.c 	memcpy(skbdesc->iv, skb->data + txdesc->iv_offset, txdesc->iv_len);
skb               113 drivers/net/wireless/ralink/rt2x00/rt2x00crypto.c 	memmove(skb->data + txdesc->iv_len, skb->data, txdesc->iv_offset);
skb               116 drivers/net/wireless/ralink/rt2x00/rt2x00crypto.c 	skb_pull(skb, txdesc->iv_len);
skb               123 drivers/net/wireless/ralink/rt2x00/rt2x00crypto.c void rt2x00crypto_tx_insert_iv(struct sk_buff *skb, unsigned int header_length)
skb               125 drivers/net/wireless/ralink/rt2x00/rt2x00crypto.c 	struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
skb               132 drivers/net/wireless/ralink/rt2x00/rt2x00crypto.c 	skb_push(skb, iv_len);
skb               135 drivers/net/wireless/ralink/rt2x00/rt2x00crypto.c 	memmove(skb->data, skb->data + iv_len, header_length);
skb               138 drivers/net/wireless/ralink/rt2x00/rt2x00crypto.c 	memcpy(skb->data + header_length, skbdesc->iv, iv_len);
skb               144 drivers/net/wireless/ralink/rt2x00/rt2x00crypto.c void rt2x00crypto_rx_insert_iv(struct sk_buff *skb,
skb               149 drivers/net/wireless/ralink/rt2x00/rt2x00crypto.c 	unsigned int align = ALIGN_SIZE(skb, header_length);
skb               190 drivers/net/wireless/ralink/rt2x00/rt2x00crypto.c 		skb_push(skb, iv_len - align);
skb               191 drivers/net/wireless/ralink/rt2x00/rt2x00crypto.c 		skb_put(skb, icv_len);
skb               194 drivers/net/wireless/ralink/rt2x00/rt2x00crypto.c 		memmove(skb->data + transfer,
skb               195 drivers/net/wireless/ralink/rt2x00/rt2x00crypto.c 			skb->data + transfer + (iv_len - align),
skb               199 drivers/net/wireless/ralink/rt2x00/rt2x00crypto.c 		skb_push(skb, iv_len + align);
skb               201 drivers/net/wireless/ralink/rt2x00/rt2x00crypto.c 			skb_put(skb, icv_len - align);
skb               203 drivers/net/wireless/ralink/rt2x00/rt2x00crypto.c 			skb_trim(skb, rxdesc->size + iv_len + icv_len);
skb               206 drivers/net/wireless/ralink/rt2x00/rt2x00crypto.c 		memmove(skb->data + transfer,
skb               207 drivers/net/wireless/ralink/rt2x00/rt2x00crypto.c 			skb->data + transfer + iv_len + align,
skb               213 drivers/net/wireless/ralink/rt2x00/rt2x00crypto.c 	memcpy(skb->data + transfer, rxdesc->iv, iv_len);
skb               221 drivers/net/wireless/ralink/rt2x00/rt2x00crypto.c 		memmove(skb->data + transfer,
skb               222 drivers/net/wireless/ralink/rt2x00/rt2x00crypto.c 			skb->data + transfer + align,
skb               239 drivers/net/wireless/ralink/rt2x00/rt2x00crypto.c 	memcpy(skb->data + transfer, &rxdesc->icv, 4);
skb               134 drivers/net/wireless/ralink/rt2x00/rt2x00debug.c 	struct sk_buff *skb = entry->skb;
skb               135 drivers/net/wireless/ralink/rt2x00/rt2x00debug.c 	struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
skb               151 drivers/net/wireless/ralink/rt2x00/rt2x00debug.c 	data_len = skb->len;
skb               179 drivers/net/wireless/ralink/rt2x00/rt2x00debug.c 	skb_put_data(skbcopy, skb->data, skb->len);
skb               247 drivers/net/wireless/ralink/rt2x00/rt2x00debug.c 	struct sk_buff *skb;
skb               256 drivers/net/wireless/ralink/rt2x00/rt2x00debug.c 				     (skb =
skb               261 drivers/net/wireless/ralink/rt2x00/rt2x00debug.c 	status = min_t(size_t, skb->len, length);
skb               262 drivers/net/wireless/ralink/rt2x00/rt2x00debug.c 	if (copy_to_user(buf, skb->data, status)) {
skb               270 drivers/net/wireless/ralink/rt2x00/rt2x00debug.c 	kfree_skb(skb);
skb               172 drivers/net/wireless/ralink/rt2x00/rt2x00dev.c 	struct sk_buff *skb;
skb               183 drivers/net/wireless/ralink/rt2x00/rt2x00dev.c 	skb = ieee80211_get_buffered_bc(rt2x00dev->hw, vif);
skb               184 drivers/net/wireless/ralink/rt2x00/rt2x00dev.c 	while (skb) {
skb               185 drivers/net/wireless/ralink/rt2x00/rt2x00dev.c 		rt2x00mac_tx(rt2x00dev->hw, &control, skb);
skb               186 drivers/net/wireless/ralink/rt2x00/rt2x00dev.c 		skb = ieee80211_get_buffered_bc(rt2x00dev->hw, vif);
skb               264 drivers/net/wireless/ralink/rt2x00/rt2x00dev.c 	struct ieee80211_bar *bar = (void *) entry->skb->data;
skb               390 drivers/net/wireless/ralink/rt2x00/rt2x00dev.c 	entry->skb = NULL;
skb               414 drivers/net/wireless/ralink/rt2x00/rt2x00dev.c 	struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
skb               455 drivers/net/wireless/ralink/rt2x00/rt2x00dev.c 	dev_kfree_skb_any(entry->skb);
skb               464 drivers/net/wireless/ralink/rt2x00/rt2x00dev.c 	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
skb               465 drivers/net/wireless/ralink/rt2x00/rt2x00dev.c 	struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
skb               478 drivers/net/wireless/ralink/rt2x00/rt2x00dev.c 	skb_pull(entry->skb, rt2x00dev->extra_tx_headroom);
skb               488 drivers/net/wireless/ralink/rt2x00/rt2x00dev.c 	header_length = ieee80211_get_hdrlen_from_skb(entry->skb);
skb               494 drivers/net/wireless/ralink/rt2x00/rt2x00dev.c 		rt2x00queue_remove_l2pad(entry->skb, header_length);
skb               503 drivers/net/wireless/ralink/rt2x00/rt2x00dev.c 		rt2x00crypto_tx_insert_iv(entry->skb, header_length);
skb               537 drivers/net/wireless/ralink/rt2x00/rt2x00dev.c 			ieee80211_tx_status(rt2x00dev->hw, entry->skb);
skb               539 drivers/net/wireless/ralink/rt2x00/rt2x00dev.c 			ieee80211_tx_status_ni(rt2x00dev->hw, entry->skb);
skb               541 drivers/net/wireless/ralink/rt2x00/rt2x00dev.c 		dev_kfree_skb_any(entry->skb);
skb               598 drivers/net/wireless/ralink/rt2x00/rt2x00dev.c 				      struct sk_buff *skb,
skb               602 drivers/net/wireless/ralink/rt2x00/rt2x00dev.c 	struct ieee80211_bar *ba = (void *)skb->data;
skb               642 drivers/net/wireless/ralink/rt2x00/rt2x00dev.c 				      struct sk_buff *skb,
skb               645 drivers/net/wireless/ralink/rt2x00/rt2x00dev.c 	struct ieee80211_hdr *hdr = (void *) skb->data;
skb               659 drivers/net/wireless/ralink/rt2x00/rt2x00dev.c 	if (skb->len <= 40 + FCS_LEN)
skb               669 drivers/net/wireless/ralink/rt2x00/rt2x00dev.c 	tim = rt2x00lib_find_ie(skb->data, skb->len - FCS_LEN, WLAN_EID_TIM);
skb               741 drivers/net/wireless/ralink/rt2x00/rt2x00dev.c 	struct sk_buff *skb;
skb               757 drivers/net/wireless/ralink/rt2x00/rt2x00dev.c 	skb = rt2x00queue_alloc_rxskb(entry, gfp);
skb               758 drivers/net/wireless/ralink/rt2x00/rt2x00dev.c 	if (!skb)
skb               780 drivers/net/wireless/ralink/rt2x00/rt2x00dev.c 		dev_kfree_skb(entry->skb);
skb               788 drivers/net/wireless/ralink/rt2x00/rt2x00dev.c 	header_length = ieee80211_get_hdrlen_from_skb(entry->skb);
skb               798 drivers/net/wireless/ralink/rt2x00/rt2x00dev.c 		rt2x00crypto_rx_insert_iv(entry->skb, header_length,
skb               803 drivers/net/wireless/ralink/rt2x00/rt2x00dev.c 		rt2x00queue_remove_l2pad(entry->skb, header_length);
skb               806 drivers/net/wireless/ralink/rt2x00/rt2x00dev.c 	skb_trim(entry->skb, rxdesc.size);
skb               820 drivers/net/wireless/ralink/rt2x00/rt2x00dev.c 	rt2x00lib_rxdone_check_ps(rt2x00dev, entry->skb, &rxdesc);
skb               826 drivers/net/wireless/ralink/rt2x00/rt2x00dev.c 	rt2x00lib_rxdone_check_ba(rt2x00dev, entry->skb, &rxdesc);
skb               831 drivers/net/wireless/ralink/rt2x00/rt2x00dev.c 	rt2x00link_update_stats(rt2x00dev, entry->skb, &rxdesc);
skb               839 drivers/net/wireless/ralink/rt2x00/rt2x00dev.c 	rx_status = IEEE80211_SKB_RXCB(entry->skb);
skb               859 drivers/net/wireless/ralink/rt2x00/rt2x00dev.c 	ieee80211_rx_ni(rt2x00dev->hw, entry->skb);
skb               865 drivers/net/wireless/ralink/rt2x00/rt2x00dev.c 	entry->skb = skb;
skb               109 drivers/net/wireless/ralink/rt2x00/rt2x00lib.h void rt2x00queue_align_frame(struct sk_buff *skb);
skb               118 drivers/net/wireless/ralink/rt2x00/rt2x00lib.h void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int header_length);
skb               128 drivers/net/wireless/ralink/rt2x00/rt2x00lib.h void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int header_length);
skb               136 drivers/net/wireless/ralink/rt2x00/rt2x00lib.h int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
skb               201 drivers/net/wireless/ralink/rt2x00/rt2x00lib.h 			     struct sk_buff *skb,
skb               317 drivers/net/wireless/ralink/rt2x00/rt2x00lib.h 				       struct sk_buff *skb,
skb               320 drivers/net/wireless/ralink/rt2x00/rt2x00lib.h 				      struct sk_buff *skb);
skb               321 drivers/net/wireless/ralink/rt2x00/rt2x00lib.h void rt2x00crypto_tx_copy_iv(struct sk_buff *skb,
skb               323 drivers/net/wireless/ralink/rt2x00/rt2x00lib.h void rt2x00crypto_tx_remove_iv(struct sk_buff *skb,
skb               325 drivers/net/wireless/ralink/rt2x00/rt2x00lib.h void rt2x00crypto_tx_insert_iv(struct sk_buff *skb, unsigned int header_length);
skb               326 drivers/net/wireless/ralink/rt2x00/rt2x00lib.h void rt2x00crypto_rx_insert_iv(struct sk_buff *skb,
skb               336 drivers/net/wireless/ralink/rt2x00/rt2x00lib.h 						     struct sk_buff *skb,
skb               342 drivers/net/wireless/ralink/rt2x00/rt2x00lib.h 						    struct sk_buff *skb)
skb               347 drivers/net/wireless/ralink/rt2x00/rt2x00lib.h static inline void rt2x00crypto_tx_copy_iv(struct sk_buff *skb,
skb               352 drivers/net/wireless/ralink/rt2x00/rt2x00lib.h static inline void rt2x00crypto_tx_remove_iv(struct sk_buff *skb,
skb               357 drivers/net/wireless/ralink/rt2x00/rt2x00lib.h static inline void rt2x00crypto_tx_insert_iv(struct sk_buff *skb,
skb               362 drivers/net/wireless/ralink/rt2x00/rt2x00lib.h static inline void rt2x00crypto_rx_insert_iv(struct sk_buff *skb,
skb               180 drivers/net/wireless/ralink/rt2x00/rt2x00link.c 			     struct sk_buff *skb,
skb               186 drivers/net/wireless/ralink/rt2x00/rt2x00link.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
skb                25 drivers/net/wireless/ralink/rt2x00/rt2x00mac.c 	struct sk_buff *skb;
skb                34 drivers/net/wireless/ralink/rt2x00/rt2x00mac.c 	skb = dev_alloc_skb(data_length + rt2x00dev->hw->extra_tx_headroom);
skb                35 drivers/net/wireless/ralink/rt2x00/rt2x00mac.c 	if (unlikely(!skb)) {
skb                40 drivers/net/wireless/ralink/rt2x00/rt2x00mac.c 	skb_reserve(skb, rt2x00dev->hw->extra_tx_headroom);
skb                41 drivers/net/wireless/ralink/rt2x00/rt2x00mac.c 	skb_put(skb, data_length);
skb                52 drivers/net/wireless/ralink/rt2x00/rt2x00mac.c 	memcpy(skb->cb, frag_skb->cb, sizeof(skb->cb));
skb                53 drivers/net/wireless/ralink/rt2x00/rt2x00mac.c 	rts_info = IEEE80211_SKB_CB(skb);
skb                69 drivers/net/wireless/ralink/rt2x00/rt2x00mac.c 	data_length += rt2x00crypto_tx_overhead(rt2x00dev, skb);
skb                74 drivers/net/wireless/ralink/rt2x00/rt2x00mac.c 					(struct ieee80211_cts *)(skb->data));
skb                78 drivers/net/wireless/ralink/rt2x00/rt2x00mac.c 				  (struct ieee80211_rts *)(skb->data));
skb                80 drivers/net/wireless/ralink/rt2x00/rt2x00mac.c 	retval = rt2x00queue_write_tx_frame(queue, skb, NULL, true);
skb                82 drivers/net/wireless/ralink/rt2x00/rt2x00mac.c 		dev_kfree_skb_any(skb);
skb                91 drivers/net/wireless/ralink/rt2x00/rt2x00mac.c 		  struct sk_buff *skb)
skb                94 drivers/net/wireless/ralink/rt2x00/rt2x00mac.c 	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
skb                95 drivers/net/wireless/ralink/rt2x00/rt2x00mac.c 	enum data_queue_qid qid = skb_get_queue_mapping(skb);
skb               147 drivers/net/wireless/ralink/rt2x00/rt2x00mac.c 		if (rt2x00mac_tx_rts_cts(rt2x00dev, queue, skb))
skb               151 drivers/net/wireless/ralink/rt2x00/rt2x00mac.c 	if (unlikely(rt2x00queue_write_tx_frame(queue, skb, control->sta, false)))
skb               157 drivers/net/wireless/ralink/rt2x00/rt2x00mac.c 	ieee80211_free_txskb(hw, skb);
skb                67 drivers/net/wireless/ralink/rt2x00/rt2x00mmio.c 		skbdesc = get_skb_frame_desc(entry->skb);
skb                27 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	struct sk_buff *skb;
skb                59 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	skb = __dev_alloc_skb(frame_size + head_size + tail_size, gfp);
skb                60 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	if (!skb)
skb                67 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	skb_reserve(skb, head_size);
skb                68 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	skb_put(skb, frame_size);
skb                73 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	skbdesc = get_skb_frame_desc(skb);
skb                79 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 		skb_dma = dma_map_single(rt2x00dev->dev, skb->data, skb->len,
skb                82 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 			dev_kfree_skb_any(skb);
skb                90 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	return skb;
skb                96 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
skb                99 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	    dma_map_single(dev, entry->skb->data, entry->skb->len, DMA_TO_DEVICE);
skb               113 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
skb               116 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 		dma_unmap_single(dev, skbdesc->skb_dma, entry->skb->len,
skb               120 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 		dma_unmap_single(dev, skbdesc->skb_dma, entry->skb->len,
skb               129 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	if (!entry->skb)
skb               133 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	dev_kfree_skb_any(entry->skb);
skb               134 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	entry->skb = NULL;
skb               137 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c void rt2x00queue_align_frame(struct sk_buff *skb)
skb               139 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	unsigned int frame_length = skb->len;
skb               140 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	unsigned int align = ALIGN_SIZE(skb, 0);
skb               145 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	skb_push(skb, align);
skb               146 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	memmove(skb->data, skb->data + align, frame_length);
skb               147 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	skb_trim(skb, frame_length);
skb               154 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int hdr_len)
skb               156 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	unsigned int l2pad = (skb->len > hdr_len) ? L2PAD_SIZE(hdr_len) : 0;
skb               161 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	skb_push(skb, l2pad);
skb               162 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	memmove(skb->data, skb->data + l2pad, hdr_len);
skb               165 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int hdr_len)
skb               167 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	unsigned int l2pad = (skb->len > hdr_len) ? L2PAD_SIZE(hdr_len) : 0;
skb               172 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	memmove(skb->data + l2pad, skb->data, hdr_len);
skb               173 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	skb_pull(skb, l2pad);
skb               177 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 						 struct sk_buff *skb,
skb               180 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
skb               181 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
skb               228 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 						  struct sk_buff *skb,
skb               232 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
skb               249 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	data_length = skb->len + 4;
skb               250 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	data_length += rt2x00crypto_tx_overhead(rt2x00dev, skb);
skb               292 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 						struct sk_buff *skb,
skb               297 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
skb               299 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
skb               385 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 					     struct sk_buff *skb,
skb               389 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
skb               390 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
skb               400 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	txdesc->length = skb->len;
skb               401 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	txdesc->header_length = ieee80211_get_hdrlen_from_skb(skb);
skb               476 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	rt2x00crypto_create_tx_descriptor(rt2x00dev, skb, txdesc);
skb               477 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	rt2x00queue_create_tx_descriptor_seq(rt2x00dev, skb, txdesc);
skb               480 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 		rt2x00queue_create_tx_descriptor_ht(rt2x00dev, skb, txdesc,
skb               483 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 		rt2x00queue_create_tx_descriptor_plcp(rt2x00dev, skb, txdesc,
skb               509 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	skb_push(entry->skb, rt2x00dev->extra_tx_headroom);
skb               510 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	memset(entry->skb->data, 0, rt2x00dev->extra_tx_headroom);
skb               562 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	struct ieee80211_bar *bar = (void *) (entry->skb->data +
skb               601 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
skb               616 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	rt2x00queue_create_tx_descriptor(queue->rt2x00dev, skb, &txdesc, sta);
skb               623 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	tx_info = IEEE80211_SKB_CB(skb);
skb               626 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	skbdesc = get_skb_frame_desc(skb);
skb               642 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 			rt2x00crypto_tx_copy_iv(skb, &txdesc);
skb               644 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 			rt2x00crypto_tx_remove_iv(skb, &txdesc);
skb               656 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 		rt2x00queue_insert_l2pad(skb, txdesc.header_length);
skb               658 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 		rt2x00queue_align_frame(skb);
skb               684 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	entry->skb = skb;
skb               693 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 		entry->skb = NULL;
skb               760 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	intf->beacon->skb = ieee80211_beacon_get(rt2x00dev->hw, vif);
skb               761 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	if (!intf->beacon->skb)
skb               769 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	rt2x00queue_create_tx_descriptor(rt2x00dev, intf->beacon->skb, &txdesc, NULL);
skb               774 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	skbdesc = get_skb_frame_desc(intf->beacon->skb);
skb              1121 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 		entries[i].skb = NULL;
skb              1150 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	struct sk_buff *skb;
skb              1153 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 		skb = rt2x00queue_alloc_rxskb(&queue->entries[i], GFP_KERNEL);
skb              1154 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 		if (!skb)
skb              1156 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 		queue->entries[i].skb = skb;
skb               115 drivers/net/wireless/ralink/rt2x00/rt2x00queue.h static inline struct skb_frame_desc* get_skb_frame_desc(struct sk_buff *skb)
skb               119 drivers/net/wireless/ralink/rt2x00/rt2x00queue.h 	return (struct skb_frame_desc *)&IEEE80211_SKB_CB(skb)->driver_data;
skb               374 drivers/net/wireless/ralink/rt2x00/rt2x00queue.h 	struct sk_buff *skb;
skb               312 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c 	status = skb_padto(entry->skb, length);
skb               324 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c 			  entry->skb->data, length,
skb               358 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c 		skbdesc = get_skb_frame_desc(entry->skb);
skb               410 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c 			  entry->skb->data, entry->skb->len,
skb              1307 drivers/net/wireless/ralink/rt2x00/rt61pci.c 	struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
skb              1778 drivers/net/wireless/ralink/rt2x00/rt61pci.c 	struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
skb              1903 drivers/net/wireless/ralink/rt2x00/rt61pci.c 	padding_len = roundup(entry->skb->len, 4) - entry->skb->len;
skb              1904 drivers/net/wireless/ralink/rt2x00/rt61pci.c 	if (padding_len && skb_pad(entry->skb, padding_len)) {
skb              1907 drivers/net/wireless/ralink/rt2x00/rt61pci.c 		entry->skb = NULL;
skb              1916 drivers/net/wireless/ralink/rt2x00/rt61pci.c 				       entry->skb->data,
skb              1917 drivers/net/wireless/ralink/rt2x00/rt61pci.c 				       entry->skb->len + padding_len);
skb              1933 drivers/net/wireless/ralink/rt2x00/rt61pci.c 	dev_kfree_skb_any(entry->skb);
skb              1934 drivers/net/wireless/ralink/rt2x00/rt61pci.c 	entry->skb = NULL;
skb              1447 drivers/net/wireless/ralink/rt2x00/rt73usb.c 	struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
skb              1448 drivers/net/wireless/ralink/rt2x00/rt73usb.c 	__le32 *txd = (__le32 *) entry->skb->data;
skb              1541 drivers/net/wireless/ralink/rt2x00/rt73usb.c 	skb_push(entry->skb, TXD_DESC_SIZE);
skb              1542 drivers/net/wireless/ralink/rt2x00/rt73usb.c 	memset(entry->skb->data, 0, TXD_DESC_SIZE);
skb              1557 drivers/net/wireless/ralink/rt2x00/rt73usb.c 	padding_len = roundup(entry->skb->len, 4) - entry->skb->len;
skb              1558 drivers/net/wireless/ralink/rt2x00/rt73usb.c 	if (padding_len && skb_pad(entry->skb, padding_len)) {
skb              1561 drivers/net/wireless/ralink/rt2x00/rt73usb.c 		entry->skb = NULL;
skb              1567 drivers/net/wireless/ralink/rt2x00/rt73usb.c 	rt2x00usb_register_multiwrite(rt2x00dev, beacon_base, entry->skb->data,
skb              1568 drivers/net/wireless/ralink/rt2x00/rt73usb.c 				      entry->skb->len + padding_len);
skb              1584 drivers/net/wireless/ralink/rt2x00/rt73usb.c 	dev_kfree_skb(entry->skb);
skb              1585 drivers/net/wireless/ralink/rt2x00/rt73usb.c 	entry->skb = NULL;
skb              1623 drivers/net/wireless/ralink/rt2x00/rt73usb.c 	length = roundup(entry->skb->len, 4);
skb              1671 drivers/net/wireless/ralink/rt2x00/rt73usb.c 	struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
skb              1672 drivers/net/wireless/ralink/rt2x00/rt73usb.c 	__le32 *rxd = (__le32 *)entry->skb->data;
skb              1742 drivers/net/wireless/ralink/rt2x00/rt73usb.c 	skb_pull(entry->skb, entry->queue->desc_size);
skb              1743 drivers/net/wireless/ralink/rt2x00/rt73usb.c 	skb_trim(entry->skb, rxdesc->size);
skb                68 drivers/net/wireless/ray_cs.c static netdev_tx_t ray_dev_start_xmit(struct sk_buff *skb,
skb                76 drivers/net/wireless/ray_cs.c static void untranslate(ray_dev_t *local, struct sk_buff *skb, int len);
skb               817 drivers/net/wireless/ray_cs.c static netdev_tx_t ray_dev_start_xmit(struct sk_buff *skb,
skb               822 drivers/net/wireless/ray_cs.c 	short length = skb->len;
skb               826 drivers/net/wireless/ray_cs.c 		dev_kfree_skb(skb);
skb               830 drivers/net/wireless/ray_cs.c 	dev_dbg(&link->dev, "ray_dev_start_xmit(skb=%p, dev=%p)\n", skb, dev);
skb               841 drivers/net/wireless/ray_cs.c 		if (skb_padto(skb, ETH_ZLEN))
skb               845 drivers/net/wireless/ray_cs.c 	switch (ray_hw_xmit(skb->data, length, dev, DATA_TYPE)) {
skb               854 drivers/net/wireless/ray_cs.c 		dev_kfree_skb(skb);
skb              2099 drivers/net/wireless/ray_cs.c 	struct sk_buff *skb = NULL;
skb              2166 drivers/net/wireless/ray_cs.c 	skb = dev_alloc_skb(total_len + 5);
skb              2167 drivers/net/wireless/ray_cs.c 	if (skb == NULL) {
skb              2174 drivers/net/wireless/ray_cs.c 	skb_reserve(skb, 2);	/* Align IP on 16 byte (TBD check this) */
skb              2181 drivers/net/wireless/ray_cs.c 	rx_ptr = skb_put(skb, total_len);
skb              2187 drivers/net/wireless/ray_cs.c 	skb_copy_from_linear_data_offset(skb,
skb              2196 drivers/net/wireless/ray_cs.c 			skb_pull(skb, RX_MAC_HEADER_LENGTH);
skb              2199 drivers/net/wireless/ray_cs.c 			untranslate(local, skb, total_len);
skb              2235 drivers/net/wireless/ray_cs.c 	skb->protocol = eth_type_trans(skb, dev);
skb              2236 drivers/net/wireless/ray_cs.c 	netif_rx(skb);
skb              2266 drivers/net/wireless/ray_cs.c static void untranslate(ray_dev_t *local, struct sk_buff *skb, int len)
skb              2268 drivers/net/wireless/ray_cs.c 	snaphdr_t *psnap = (snaphdr_t *) (skb->data + RX_MAC_HEADER_LENGTH);
skb              2269 drivers/net/wireless/ray_cs.c 	struct ieee80211_hdr *pmac = (struct ieee80211_hdr *)skb->data;
skb              2285 drivers/net/wireless/ray_cs.c 			       skb->data, 64, true);
skb              2290 drivers/net/wireless/ray_cs.c 		printk(KERN_DEBUG "untranslate skb->data = %p\n", skb->data);
skb              2300 drivers/net/wireless/ray_cs.c 		peth = (struct ethhdr *)(skb->data + delta);
skb              2308 drivers/net/wireless/ray_cs.c 			peth = (struct ethhdr *)(skb->data + delta);
skb              2316 drivers/net/wireless/ray_cs.c 				peth = (struct ethhdr *)(skb->data + delta);
skb              2324 drivers/net/wireless/ray_cs.c 				peth = (struct ethhdr *)(skb->data + delta);
skb              2331 drivers/net/wireless/ray_cs.c 			peth = (struct ethhdr *)(skb->data + delta);
skb              2336 drivers/net/wireless/ray_cs.c 	skb_pull(skb, delta);
skb              2338 drivers/net/wireless/ray_cs.c 	      skb->data);
skb              2346 drivers/net/wireless/ray_cs.c 			printk("%02x ", skb->data[i]);
skb               222 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 		struct sk_buff *skb = priv->rx_buf[priv->rx_idx];
skb               275 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 					 *((dma_addr_t *)skb->cb),
skb               277 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 			skb_put(skb, flags & 0xFFF);
skb               322 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 			memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
skb               323 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 			ieee80211_rx_irqsafe(dev, skb);
skb               325 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 			skb = new_skb;
skb               326 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 			priv->rx_buf[priv->rx_idx] = skb;
skb               327 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 			*((dma_addr_t *) skb->cb) = mapping;
skb               332 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 		cmd_desc->rx_buf = cpu_to_le32(*((dma_addr_t *)skb->cb));
skb               349 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 		struct sk_buff *skb;
skb               357 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 		skb = __skb_dequeue(&ring->queue);
skb               359 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 				 skb->len, PCI_DMA_TODEVICE);
skb               361 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 		info = IEEE80211_SKB_CB(skb);
skb               370 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 		ieee80211_tx_status_irqsafe(dev, skb);
skb               456 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 		       struct sk_buff *skb)
skb               458 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb               459 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
skb               473 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 	prio = skb_get_queue_mapping(skb);
skb               476 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 	mapping = pci_map_single(priv->pdev, skb->data,
skb               477 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 				 skb->len, PCI_DMA_TODEVICE);
skb               480 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 		kfree_skb(skb);
skb               488 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 		   skb->len;
skb               504 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 						skb->len, info);
skb               509 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 						skb->len, info);
skb               515 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 		plcp_len = DIV_ROUND_UP(16 * (skb->len + 4),
skb               517 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 		remainder = (16 * (skb->len + 4)) %
skb               529 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 					NL80211_BAND_2GHZ, skb->len,
skb               549 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 		entry->frame_len_se = cpu_to_le16(skb->len);
skb               554 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 		entry->frame_len = cpu_to_le32(skb->len);
skb               573 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 	__skb_queue_tail(&ring->queue, skb);
skb              1017 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 		struct sk_buff *skb = dev_alloc_skb(MAX_RX_SIZE);
skb              1020 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 		if (!skb) {
skb              1026 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 		priv->rx_buf[i] = skb;
skb              1027 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 		mapping = (dma_addr_t *)skb->cb;
skb              1028 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 		*mapping = pci_map_single(priv->pdev, skb_tail_pointer(skb),
skb              1032 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 			kfree_skb(skb);
skb              1053 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 		struct sk_buff *skb = priv->rx_buf[i];
skb              1054 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 		if (!skb)
skb              1058 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 				 *((dma_addr_t *)skb->cb),
skb              1060 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 		kfree_skb(skb);
skb              1104 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 		struct sk_buff *skb = __skb_dequeue(&ring->queue);
skb              1107 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 				 skb->len, PCI_DMA_TODEVICE);
skb              1108 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 		kfree_skb(skb);
skb              1288 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 	struct sk_buff *skb;
skb              1295 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 	skb = ieee80211_beacon_get(dev, vif);
skb              1296 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 	if (!skb)
skb              1303 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 	mgmt = (struct ieee80211_mgmt *)skb->data;
skb              1307 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 	skb_set_queue_mapping(skb, 0);
skb              1309 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 	rtl8180_tx(dev, NULL, skb);
skb               188 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 	struct sk_buff *skb = (struct sk_buff *)urb->context;
skb               189 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb               193 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 	skb_pull(skb, priv->is_rtl8187b ? sizeof(struct rtl8187b_tx_hdr) :
skb               199 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 			skb_queue_tail(&priv->b_tx_status.queue, skb);
skb               217 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 		ieee80211_tx_status_irqsafe(hw, skb);
skb               222 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 		skb_queue_tail(&priv->b_tx_status.queue, skb);
skb               229 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 		       struct sk_buff *skb)
skb               232 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb               233 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 	struct ieee80211_hdr *tx_hdr =	(struct ieee80211_hdr *)(skb->data);
skb               243 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 		kfree_skb(skb);
skb               247 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 	flags = skb->len;
skb               262 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 						 skb->len, info);
skb               267 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 						 skb->len, info);
skb               278 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 		struct rtl8187_tx_hdr *hdr = skb_push(skb, sizeof(*hdr));
skb               291 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 		struct rtl8187b_tx_hdr *hdr = skb_push(skb, sizeof(*hdr));
skb               301 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 							 skb->len, txrate);
skb               307 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 			ep = epmap[skb_get_queue_mapping(skb)];
skb               314 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 			  buf, skb->len, rtl8187_tx_cb, skb);
skb               320 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 		kfree_skb(skb);
skb               327 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 	struct sk_buff *skb = (struct sk_buff *)urb->context;
skb               328 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 	struct rtl8187_rx_info *info = (struct rtl8187_rx_info *)skb->cb;
skb               337 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 	__skb_unlink(skb, &priv->rx_queue);
skb               339 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 	skb_put(skb, urb->actual_length);
skb               342 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 		dev_kfree_skb_irq(skb);
skb               348 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 			(typeof(hdr))(skb_tail_pointer(skb) - sizeof(*hdr));
skb               359 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 			(typeof(hdr))(skb_tail_pointer(skb) - sizeof(*hdr));
skb               381 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 	skb_trim(skb, flags & 0x0FFF);
skb               390 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 	memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
skb               391 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 	ieee80211_rx_irqsafe(dev, skb);
skb               393 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 	skb = dev_alloc_skb(RTL8187_MAX_RX);
skb               394 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 	if (unlikely(!skb)) {
skb               399 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 	info = (struct rtl8187_rx_info *)skb->cb;
skb               402 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 	urb->transfer_buffer = skb_tail_pointer(skb);
skb               403 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 	urb->context = skb;
skb               404 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 	skb_queue_tail(&priv->rx_queue, skb);
skb               409 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 		skb_unlink(skb, &priv->rx_queue);
skb               410 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 		dev_kfree_skb_irq(skb);
skb               418 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 	struct sk_buff *skb;
skb               423 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 		skb = __dev_alloc_skb(RTL8187_MAX_RX, GFP_KERNEL);
skb               424 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 		if (!skb) {
skb               436 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 				  skb_tail_pointer(skb),
skb               437 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 				  RTL8187_MAX_RX, rtl8187_rx_cb, skb);
skb               438 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 		info = (struct rtl8187_rx_info *)skb->cb;
skb               441 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 		skb_queue_tail(&priv->rx_queue, skb);
skb               445 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 			skb_unlink(skb, &priv->rx_queue);
skb               455 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 	kfree_skb(skb);
skb               500 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 		struct sk_buff *skb, *iter;
skb               509 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 		skb = NULL;
skb               525 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 				skb = iter;
skb               529 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 		if (skb) {
skb               530 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 			struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb               532 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 			__skb_unlink(skb, &priv->b_tx_status.queue);
skb               537 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 			ieee80211_tx_status_irqsafe(hw, skb);
skb              1025 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 	struct sk_buff *skb;
skb              1044 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 	while ((skb = skb_dequeue(&priv->b_tx_status.queue)))
skb              1045 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 		dev_kfree_skb_any(skb);
skb              1071 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 	struct sk_buff *skb;
skb              1078 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 	skb = ieee80211_beacon_get(dev, vif);
skb              1079 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 	if (!skb)
skb              1086 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 	mgmt = (struct ieee80211_mgmt *)skb->data;
skb              1090 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 	skb_set_queue_mapping(skb, 0);
skb              1092 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 	rtl8187_tx(dev, NULL, skb);
skb              1320 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h 	int (*parse_rx_desc) (struct rtl8xxxu_priv *priv, struct sk_buff *skb);
skb              1426 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h int rtl8xxxu_parse_rxdesc16(struct rtl8xxxu_priv *priv, struct sk_buff *skb);
skb              1427 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h int rtl8xxxu_parse_rxdesc24(struct rtl8xxxu_priv *priv, struct sk_buff *skb);
skb              4605 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c static u32 rtl8xxxu_queue_select(struct ieee80211_hw *hw, struct sk_buff *skb)
skb              4607 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
skb              4613 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 		queue = rtl8xxxu_80211_to_rtl_queue(skb_get_queue_mapping(skb));
skb              4701 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 	struct sk_buff *skb = (struct sk_buff *)urb->context;
skb              4708 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 	tx_info = IEEE80211_SKB_CB(skb);
skb              4712 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 	skb_pull(skb, priv->fops->tx_desc_size);
skb              4721 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 	ieee80211_tx_status_irqsafe(hw, skb);
skb              4909 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 			struct sk_buff *skb)
skb              4911 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
skb              4912 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
skb              4920 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 	u16 pktlen = skb->len;
skb              4926 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 	if (skb_headroom(skb) < tx_desc_size) {
skb              4929 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 			 __func__, skb_headroom(skb));
skb              4933 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 	if (unlikely(skb->len > (65535 - tx_desc_size))) {
skb              4935 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 			 __func__, skb->len);
skb              4953 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 	tx_desc = skb_push(skb, tx_desc_size);
skb              4965 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 	queue = rtl8xxxu_queue_select(hw, skb);
skb              5021 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 			  skb->data, skb->len, rtl8xxxu_tx_complete, skb);
skb              5032 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 	dev_kfree_skb(skb);
skb              5089 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 	struct sk_buff *skb;
skb              5100 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 		skb = (struct sk_buff *)rx_urb->urb.context;
skb              5101 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 		dev_kfree_skb(skb);
skb              5116 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 	struct sk_buff *skb;
skb              5147 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 			skb = (struct sk_buff *)rx_urb->urb.context;
skb              5148 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 			dev_kfree_skb(skb);
skb              5155 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 				 struct sk_buff *skb)
skb              5157 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 	struct rtl8723bu_c2h *c2h = (struct rtl8723bu_c2h *)skb->data;
skb              5161 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 	len = skb->len - 2;
skb              5199 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c int rtl8xxxu_parse_rxdesc16(struct rtl8xxxu_priv *priv, struct sk_buff *skb)
skb              5211 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 	urb_len = skb->len;
skb              5215 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 		rx_desc = (struct rtl8xxxu_rxdesc16 *)skb->data;
skb              5216 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 		_rx_desc_le = (__le32 *)skb->data;
skb              5217 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 		_rx_desc = (u32 *)skb->data;
skb              5242 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 			next_skb = skb_clone(skb, GFP_ATOMIC);
skb              5244 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 		rx_status = IEEE80211_SKB_RXCB(skb);
skb              5247 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 		skb_pull(skb, sizeof(struct rtl8xxxu_rxdesc16));
skb              5249 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 		phy_stats = (struct rtl8723au_phy_stats *)skb->data;
skb              5251 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 		skb_pull(skb, drvinfo_sz + desc_shift);
skb              5253 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 		skb_trim(skb, pkt_len);
skb              5279 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 		ieee80211_rx_irqsafe(hw, skb);
skb              5281 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 		skb = next_skb;
skb              5282 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 		if (skb)
skb              5287 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 	} while (skb && urb_len > 0 && pkt_cnt > 0);
skb              5292 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c int rtl8xxxu_parse_rxdesc24(struct rtl8xxxu_priv *priv, struct sk_buff *skb)
skb              5295 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 	struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
skb              5297 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 		(struct rtl8xxxu_rxdesc24 *)skb->data;
skb              5299 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 	__le32 *_rx_desc_le = (__le32 *)skb->data;
skb              5300 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 	u32 *_rx_desc = (u32 *)skb->data;
skb              5309 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 	skb_pull(skb, sizeof(struct rtl8xxxu_rxdesc24));
skb              5311 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 	phy_stats = (struct rtl8723au_phy_stats *)skb->data;
skb              5315 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 	skb_pull(skb, drvinfo_sz + desc_shift);
skb              5320 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 		rtl8723bu_handle_c2h(priv, skb);
skb              5321 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 		dev_kfree_skb(skb);
skb              5349 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 	ieee80211_rx_irqsafe(hw, skb);
skb              5359 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 	struct sk_buff *skb = (struct sk_buff *)urb->context;
skb              5362 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 	skb_put(skb, urb->actual_length);
skb              5365 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 		priv->fops->parse_rx_desc(priv, skb);
skb              5367 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 		skb = NULL;
skb              5378 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 	dev_kfree_skb(skb);
skb              5386 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 	struct sk_buff *skb;
skb              5399 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 	skb = __netdev_alloc_skb(NULL, skb_size, GFP_KERNEL);
skb              5400 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 	if (!skb)
skb              5403 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 	memset(skb->data, 0, rx_desc_sz);
skb              5404 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 	usb_fill_bulk_urb(&rx_urb->urb, priv->udev, priv->pipe_in, skb->data,
skb              5405 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 			  skb_size, rtl8xxxu_rx_complete, skb);
skb              1225 drivers/net/wireless/realtek/rtlwifi/base.c 		      struct sk_buff *skb, struct rtl_tcb_desc *tcb_desc)
skb              1237 drivers/net/wireless/realtek/rtlwifi/base.c 	struct ieee80211_hdr *hdr = rtl_get_hdr(skb);
skb              1241 drivers/net/wireless/realtek/rtlwifi/base.c 	__le16 fc = rtl_get_fc(skb);
skb              1245 drivers/net/wireless/realtek/rtlwifi/base.c 	if (rtl_is_tx_report_skb(hw, skb))
skb              1314 drivers/net/wireless/realtek/rtlwifi/base.c bool rtl_tx_mgmt_proc(struct ieee80211_hw *hw, struct sk_buff *skb)
skb              1318 drivers/net/wireless/realtek/rtlwifi/base.c 	__le16 fc = rtl_get_fc(skb);
skb              1367 drivers/net/wireless/realtek/rtlwifi/base.c bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
skb              1370 drivers/net/wireless/realtek/rtlwifi/base.c 	struct ieee80211_hdr *hdr = rtl_get_hdr(skb);
skb              1372 drivers/net/wireless/realtek/rtlwifi/base.c 	__le16 fc = rtl_get_fc(skb);
skb              1373 drivers/net/wireless/realtek/rtlwifi/base.c 	u8 *act = (u8 *)(((u8 *)skb->data + MAC80211_3ADDR_LEN));
skb              1392 drivers/net/wireless/realtek/rtlwifi/base.c 				skb->data, skb->len);
skb              1397 drivers/net/wireless/realtek/rtlwifi/base.c 				struct ieee80211_mgmt *mgmt = (void *)skb->data;
skb              1463 drivers/net/wireless/realtek/rtlwifi/base.c 					struct sk_buff *skb, bool is_enc)
skb              1466 drivers/net/wireless/realtek/rtlwifi/base.c 	u8 mac_hdr_len = ieee80211_get_hdrlen_from_skb(skb);
skb              1489 drivers/net/wireless/realtek/rtlwifi/base.c 	return skb->data + offset;
skb              1493 drivers/net/wireless/realtek/rtlwifi/base.c u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx,
skb              1498 drivers/net/wireless/realtek/rtlwifi/base.c 	__le16 fc = rtl_get_fc(skb);
skb              1506 drivers/net/wireless/realtek/rtlwifi/base.c 	ether_type_ptr = rtl_skb_ether_type_ptr(hw, skb, is_enc);
skb              1568 drivers/net/wireless/realtek/rtlwifi/base.c void rtl_tx_ackqueue(struct ieee80211_hw *hw, struct sk_buff *skb)
skb              1573 drivers/net/wireless/realtek/rtlwifi/base.c 	__skb_queue_tail(&tx_report->queue, skb);
skb              1577 drivers/net/wireless/realtek/rtlwifi/base.c static void rtl_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb,
skb              1583 drivers/net/wireless/realtek/rtlwifi/base.c 	info = IEEE80211_SKB_CB(skb);
skb              1594 drivers/net/wireless/realtek/rtlwifi/base.c 	ieee80211_tx_status_irqsafe(hw, skb);
skb              1597 drivers/net/wireless/realtek/rtlwifi/base.c bool rtl_is_tx_report_skb(struct ieee80211_hw *hw, struct sk_buff *skb)
skb              1601 drivers/net/wireless/realtek/rtlwifi/base.c 	__le16 fc = rtl_get_fc(skb);
skb              1603 drivers/net/wireless/realtek/rtlwifi/base.c 	ether_type_ptr = rtl_skb_ether_type_ptr(hw, skb, true);
skb              1653 drivers/net/wireless/realtek/rtlwifi/base.c 	struct sk_buff *skb;
skb              1669 drivers/net/wireless/realtek/rtlwifi/base.c 	skb_queue_walk(queue, skb) {
skb              1670 drivers/net/wireless/realtek/rtlwifi/base.c 		tx_info = rtl_tx_skb_cb_info(skb);
skb              1672 drivers/net/wireless/realtek/rtlwifi/base.c 			skb_unlink(skb, queue);
skb              1673 drivers/net/wireless/realtek/rtlwifi/base.c 			rtl_tx_status(hw, skb, st == 0);
skb              1905 drivers/net/wireless/realtek/rtlwifi/base.c void rtl_beacon_statistic(struct ieee80211_hw *hw, struct sk_buff *skb)
skb              1908 drivers/net/wireless/realtek/rtlwifi/base.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
skb              1922 drivers/net/wireless/realtek/rtlwifi/base.c 	if (skb->len <= 40 + FCS_LEN)
skb              1951 drivers/net/wireless/realtek/rtlwifi/base.c 	struct sk_buff *skb, *tmp;
skb              1954 drivers/net/wireless/realtek/rtlwifi/base.c 	skb_queue_walk_safe(queue, skb, tmp) {
skb              1955 drivers/net/wireless/realtek/rtlwifi/base.c 		tx_info = rtl_tx_skb_cb_info(skb);
skb              1959 drivers/net/wireless/realtek/rtlwifi/base.c 		skb_unlink(skb, queue);
skb              1960 drivers/net/wireless/realtek/rtlwifi/base.c 		rtl_tx_status(hw, skb, false);
skb              1991 drivers/net/wireless/realtek/rtlwifi/base.c void rtl_collect_scan_list(struct ieee80211_hw *hw, struct sk_buff *skb)
skb              1994 drivers/net/wireless/realtek/rtlwifi/base.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
skb              2254 drivers/net/wireless/realtek/rtlwifi/base.c 				    struct sk_buff *skb);
skb              2256 drivers/net/wireless/realtek/rtlwifi/base.c static bool rtl_c2h_fast_cmd(struct ieee80211_hw *hw, struct sk_buff *skb)
skb              2258 drivers/net/wireless/realtek/rtlwifi/base.c 	u8 cmd_id = GET_C2H_CMD_ID(skb->data);
skb              2270 drivers/net/wireless/realtek/rtlwifi/base.c void rtl_c2hcmd_enqueue(struct ieee80211_hw *hw, struct sk_buff *skb)
skb              2275 drivers/net/wireless/realtek/rtlwifi/base.c 	if (rtl_c2h_fast_cmd(hw, skb)) {
skb              2276 drivers/net/wireless/realtek/rtlwifi/base.c 		rtl_c2h_content_parsing(hw, skb);
skb              2277 drivers/net/wireless/realtek/rtlwifi/base.c 		kfree_skb(skb);
skb              2284 drivers/net/wireless/realtek/rtlwifi/base.c 	__skb_queue_tail(&rtlpriv->c2hcmd_queue, skb);
skb              2294 drivers/net/wireless/realtek/rtlwifi/base.c 				    struct sk_buff *skb)
skb              2302 drivers/net/wireless/realtek/rtlwifi/base.c 	cmd_id = GET_C2H_CMD_ID(skb->data);
skb              2303 drivers/net/wireless/realtek/rtlwifi/base.c 	cmd_len = skb->len - C2H_DATA_OFFSET;
skb              2304 drivers/net/wireless/realtek/rtlwifi/base.c 	cmd_buf = GET_C2H_DATA_PTR(skb->data);
skb              2343 drivers/net/wireless/realtek/rtlwifi/base.c 	struct sk_buff *skb;
skb              2351 drivers/net/wireless/realtek/rtlwifi/base.c 		skb = __skb_dequeue(&rtlpriv->c2hcmd_queue);
skb              2356 drivers/net/wireless/realtek/rtlwifi/base.c 		if (!skb)
skb              2360 drivers/net/wireless/realtek/rtlwifi/base.c 			 *((u8 *)skb->cb));
skb              2362 drivers/net/wireless/realtek/rtlwifi/base.c 			      "C2H data: ", skb->data, skb->len);
skb              2365 drivers/net/wireless/realtek/rtlwifi/base.c 			rtl_c2h_content_parsing(hw, skb);
skb              2368 drivers/net/wireless/realtek/rtlwifi/base.c 		dev_kfree_skb_any(skb);
skb              2426 drivers/net/wireless/realtek/rtlwifi/base.c 	struct sk_buff *skb;
skb              2430 drivers/net/wireless/realtek/rtlwifi/base.c 	skb = dev_alloc_skb(27 + hw->extra_tx_headroom);
skb              2431 drivers/net/wireless/realtek/rtlwifi/base.c 	if (!skb)
skb              2434 drivers/net/wireless/realtek/rtlwifi/base.c 	skb_reserve(skb, hw->extra_tx_headroom);
skb              2435 drivers/net/wireless/realtek/rtlwifi/base.c 	action_frame = skb_put_zero(skb, 27);
skb              2462 drivers/net/wireless/realtek/rtlwifi/base.c 	return skb;
skb              2472 drivers/net/wireless/realtek/rtlwifi/base.c 	struct sk_buff *skb = NULL;
skb              2495 drivers/net/wireless/realtek/rtlwifi/base.c 	skb = rtl_make_smps_action(hw, smps, sta->addr, bssid);
skb              2497 drivers/net/wireless/realtek/rtlwifi/base.c 	if (skb) {
skb              2498 drivers/net/wireless/realtek/rtlwifi/base.c 		struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb              2506 drivers/net/wireless/realtek/rtlwifi/base.c 		rtlpriv->intf_ops->adapter_tx(hw, sta, skb, &tcb_desc);
skb              2552 drivers/net/wireless/realtek/rtlwifi/base.c 	struct sk_buff *skb;
skb              2557 drivers/net/wireless/realtek/rtlwifi/base.c 	skb = dev_alloc_skb(34 + hw->extra_tx_headroom);
skb              2558 drivers/net/wireless/realtek/rtlwifi/base.c 	if (!skb)
skb              2561 drivers/net/wireless/realtek/rtlwifi/base.c 	skb_reserve(skb, hw->extra_tx_headroom);
skb              2562 drivers/net/wireless/realtek/rtlwifi/base.c 	action_frame = skb_put_zero(skb, 34);
skb              2577 drivers/net/wireless/realtek/rtlwifi/base.c 	return skb;
skb                77 drivers/net/wireless/realtek/rtlwifi/base.h bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx);
skb                80 drivers/net/wireless/realtek/rtlwifi/base.h bool rtl_tx_mgmt_proc(struct ieee80211_hw *hw, struct sk_buff *skb);
skb                81 drivers/net/wireless/realtek/rtlwifi/base.h u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx,
skb                84 drivers/net/wireless/realtek/rtlwifi/base.h void rtl_tx_ackqueue(struct ieee80211_hw *hw, struct sk_buff *skb);
skb                85 drivers/net/wireless/realtek/rtlwifi/base.h bool rtl_is_tx_report_skb(struct ieee80211_hw *hw, struct sk_buff *skb);
skb                97 drivers/net/wireless/realtek/rtlwifi/base.h void rtl_beacon_statistic(struct ieee80211_hw *hw, struct sk_buff *skb);
skb                98 drivers/net/wireless/realtek/rtlwifi/base.h void rtl_collect_scan_list(struct ieee80211_hw *hw, struct sk_buff *skb);
skb               115 drivers/net/wireless/realtek/rtlwifi/base.h void rtl_c2hcmd_enqueue(struct ieee80211_hw *hw, struct sk_buff *skb);
skb               122 drivers/net/wireless/realtek/rtlwifi/base.h 		      struct sk_buff *skb, struct rtl_tcb_desc *tcb_desc);
skb               185 drivers/net/wireless/realtek/rtlwifi/core.c 		      struct sk_buff *skb)
skb               200 drivers/net/wireless/realtek/rtlwifi/core.c 	if (!rtlpriv->intf_ops->waitq_insert(hw, control->sta, skb))
skb               201 drivers/net/wireless/realtek/rtlwifi/core.c 		rtlpriv->intf_ops->adapter_tx(hw, control->sta, skb, &tcb_desc);
skb               205 drivers/net/wireless/realtek/rtlwifi/core.c 	dev_kfree_skb_any(skb);
skb              1010 drivers/net/wireless/realtek/rtlwifi/core.c 	struct sk_buff *skb = ieee80211_beacon_get(hw, vif);
skb              1013 drivers/net/wireless/realtek/rtlwifi/core.c 	if (skb) {
skb              1015 drivers/net/wireless/realtek/rtlwifi/core.c 		rtlpriv->intf_ops->adapter_tx(hw, NULL, skb, &tcb_desc);
skb              1846 drivers/net/wireless/realtek/rtlwifi/core.c bool rtl_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb)
skb              1865 drivers/net/wireless/realtek/rtlwifi/core.c 	rtlpriv->cfg->ops->fill_tx_cmddesc(hw, (u8 *)pdesc, 1, 1, skb);
skb              1867 drivers/net/wireless/realtek/rtlwifi/core.c 	__skb_queue_tail(&ring->queue, skb);
skb                60 drivers/net/wireless/realtek/rtlwifi/core.h bool rtl_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb);
skb                34 drivers/net/wireless/realtek/rtlwifi/pci.c static u8 _rtl_mac_to_hwqueue(struct ieee80211_hw *hw, struct sk_buff *skb)
skb                37 drivers/net/wireless/realtek/rtlwifi/pci.c 	__le16 fc = rtl_get_fc(skb);
skb                38 drivers/net/wireless/realtek/rtlwifi/pci.c 	u8 queue_index = skb_get_queue_mapping(skb);
skb                49 drivers/net/wireless/realtek/rtlwifi/pci.c 		hdr = rtl_get_hdr(skb);
skb               432 drivers/net/wireless/realtek/rtlwifi/pci.c 				       struct sk_buff *skb,
skb               436 drivers/net/wireless/realtek/rtlwifi/pci.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb               478 drivers/net/wireless/realtek/rtlwifi/pci.c 	struct sk_buff *skb = NULL;
skb               506 drivers/net/wireless/realtek/rtlwifi/pci.c 				skb = skb_dequeue(&mac->skb_waitq[tid]);
skb               516 drivers/net/wireless/realtek/rtlwifi/pci.c 			info = IEEE80211_SKB_CB(skb);
skb               518 drivers/net/wireless/realtek/rtlwifi/pci.c 				_rtl_update_earlymode_info(hw, skb,
skb               521 drivers/net/wireless/realtek/rtlwifi/pci.c 			rtlpriv->intf_ops->adapter_tx(hw, NULL, skb, &tcb_desc);
skb               534 drivers/net/wireless/realtek/rtlwifi/pci.c 		struct sk_buff *skb;
skb               549 drivers/net/wireless/realtek/rtlwifi/pci.c 		skb = __skb_dequeue(&ring->queue);
skb               554 drivers/net/wireless/realtek/rtlwifi/pci.c 				 skb->len, PCI_DMA_TODEVICE);
skb               558 drivers/net/wireless/realtek/rtlwifi/pci.c 			skb_pull(skb, EM_HDR_LEN);
skb               564 drivers/net/wireless/realtek/rtlwifi/pci.c 			 *(u16 *)(skb->data + 22));
skb               567 drivers/net/wireless/realtek/rtlwifi/pci.c 			dev_kfree_skb(skb);
skb               575 drivers/net/wireless/realtek/rtlwifi/pci.c 		fc = rtl_get_fc(skb);
skb               586 drivers/net/wireless/realtek/rtlwifi/pci.c 				(struct ieee80211_mgmt *)skb->data;
skb               589 drivers/net/wireless/realtek/rtlwifi/pci.c 				dev_kfree_skb(skb);
skb               595 drivers/net/wireless/realtek/rtlwifi/pci.c 		tid = rtl_get_tid(skb);
skb               599 drivers/net/wireless/realtek/rtlwifi/pci.c 		info = IEEE80211_SKB_CB(skb);
skb               605 drivers/net/wireless/realtek/rtlwifi/pci.c 			ieee80211_tx_status_irqsafe(hw, skb);
skb               607 drivers/net/wireless/realtek/rtlwifi/pci.c 			rtl_tx_ackqueue(hw, skb);
skb               616 drivers/net/wireless/realtek/rtlwifi/pci.c 			ieee80211_wake_queue(hw, skb_get_queue_mapping(skb));
skb               619 drivers/net/wireless/realtek/rtlwifi/pci.c 		skb = NULL;
skb               636 drivers/net/wireless/realtek/rtlwifi/pci.c 	struct sk_buff *skb;
skb               639 drivers/net/wireless/realtek/rtlwifi/pci.c 		skb = new_skb;
skb               642 drivers/net/wireless/realtek/rtlwifi/pci.c 	skb = dev_alloc_skb(rtlpci->rxbuffersize);
skb               643 drivers/net/wireless/realtek/rtlwifi/pci.c 	if (!skb)
skb               648 drivers/net/wireless/realtek/rtlwifi/pci.c 	*((dma_addr_t *)skb->cb) =
skb               649 drivers/net/wireless/realtek/rtlwifi/pci.c 		pci_map_single(rtlpci->pdev, skb_tail_pointer(skb),
skb               651 drivers/net/wireless/realtek/rtlwifi/pci.c 	bufferaddress = *((dma_addr_t *)skb->cb);
skb               654 drivers/net/wireless/realtek/rtlwifi/pci.c 	rtlpci->rx_ring[rxring_idx].rx_buf[desc_idx] = skb;
skb               659 drivers/net/wireless/realtek/rtlwifi/pci.c 					    (u8 *)(dma_addr_t *)skb->cb);
skb               688 drivers/net/wireless/realtek/rtlwifi/pci.c 				    struct sk_buff *skb,
skb               691 drivers/net/wireless/realtek/rtlwifi/pci.c 	if (unlikely(!rtl_action_proc(hw, skb, false))) {
skb               692 drivers/net/wireless/realtek/rtlwifi/pci.c 		dev_kfree_skb_any(skb);
skb               696 drivers/net/wireless/realtek/rtlwifi/pci.c 		uskb = dev_alloc_skb(skb->len + 128);
skb               700 drivers/net/wireless/realtek/rtlwifi/pci.c 			skb_put_data(uskb, skb->data, skb->len);
skb               701 drivers/net/wireless/realtek/rtlwifi/pci.c 			dev_kfree_skb_any(skb);
skb               704 drivers/net/wireless/realtek/rtlwifi/pci.c 			ieee80211_rx_irqsafe(hw, skb);
skb               747 drivers/net/wireless/realtek/rtlwifi/pci.c 		struct sk_buff *skb = rtlpci->rx_ring[rxring_idx].rx_buf[
skb               760 drivers/net/wireless/realtek/rtlwifi/pci.c 			pdesc = (struct rtl_rx_desc *)skb->data;
skb               776 drivers/net/wireless/realtek/rtlwifi/pci.c 		pci_unmap_single(rtlpci->pdev, *((dma_addr_t *)skb->cb),
skb               785 drivers/net/wireless/realtek/rtlwifi/pci.c 						 &rx_status, (u8 *)pdesc, skb);
skb               795 drivers/net/wireless/realtek/rtlwifi/pci.c 		if (skb->end - skb->tail > len) {
skb               796 drivers/net/wireless/realtek/rtlwifi/pci.c 			skb_put(skb, len);
skb               798 drivers/net/wireless/realtek/rtlwifi/pci.c 				skb_reserve(skb, stats.rx_drvinfo_size +
skb               801 drivers/net/wireless/realtek/rtlwifi/pci.c 				skb_reserve(skb, stats.rx_drvinfo_size +
skb               806 drivers/net/wireless/realtek/rtlwifi/pci.c 				 skb->end - skb->tail, len);
skb               807 drivers/net/wireless/realtek/rtlwifi/pci.c 			dev_kfree_skb_any(skb);
skb               812 drivers/net/wireless/realtek/rtlwifi/pci.c 			rtl_c2hcmd_enqueue(hw, skb);
skb               822 drivers/net/wireless/realtek/rtlwifi/pci.c 		hdr = rtl_get_hdr(skb);
skb               823 drivers/net/wireless/realtek/rtlwifi/pci.c 		fc = rtl_get_fc(skb);
skb               825 drivers/net/wireless/realtek/rtlwifi/pci.c 		if (!stats.crc && !stats.hwerror && (skb->len > FCS_LEN)) {
skb               826 drivers/net/wireless/realtek/rtlwifi/pci.c 			memcpy(IEEE80211_SKB_RXCB(skb), &rx_status,
skb               835 drivers/net/wireless/realtek/rtlwifi/pci.c 				rtlpriv->stats.rxbytesunicast += skb->len;
skb               837 drivers/net/wireless/realtek/rtlwifi/pci.c 			rtl_is_special_data(hw, skb, false, true);
skb               845 drivers/net/wireless/realtek/rtlwifi/pci.c 			rtl_collect_scan_list(hw, skb);
skb               848 drivers/net/wireless/realtek/rtlwifi/pci.c 			rtl_beacon_statistic(hw, skb);
skb               849 drivers/net/wireless/realtek/rtlwifi/pci.c 			rtl_p2p_info(hw, (void *)skb->data, skb->len);
skb               851 drivers/net/wireless/realtek/rtlwifi/pci.c 			rtl_swlps_beacon(hw, (void *)skb->data, skb->len);
skb               852 drivers/net/wireless/realtek/rtlwifi/pci.c 			rtl_recognize_peer(hw, (void *)skb->data, skb->len);
skb               857 drivers/net/wireless/realtek/rtlwifi/pci.c 				dev_kfree_skb_any(skb);
skb               859 drivers/net/wireless/realtek/rtlwifi/pci.c 				_rtl_pci_rx_to_mac80211(hw, skb, rx_status);
skb               863 drivers/net/wireless/realtek/rtlwifi/pci.c 			dev_kfree_skb_any(skb);
skb               879 drivers/net/wireless/realtek/rtlwifi/pci.c 		skb = new_skb;
skb               882 drivers/net/wireless/realtek/rtlwifi/pci.c 			_rtl_pci_init_one_rxdesc(hw, skb, (u8 *)buffer_desc,
skb               886 drivers/net/wireless/realtek/rtlwifi/pci.c 			_rtl_pci_init_one_rxdesc(hw, skb, (u8 *)pdesc,
skb              1343 drivers/net/wireless/realtek/rtlwifi/pci.c 		struct sk_buff *skb = __skb_dequeue(&ring->queue);
skb              1354 drivers/net/wireless/realtek/rtlwifi/pci.c 				 skb->len, PCI_DMA_TODEVICE);
skb              1355 drivers/net/wireless/realtek/rtlwifi/pci.c 		kfree_skb(skb);
skb              1380 drivers/net/wireless/realtek/rtlwifi/pci.c 		struct sk_buff *skb = rtlpci->rx_ring[rxring_idx].rx_buf[i];
skb              1382 drivers/net/wireless/realtek/rtlwifi/pci.c 		if (!skb)
skb              1384 drivers/net/wireless/realtek/rtlwifi/pci.c 		pci_unmap_single(rtlpci->pdev, *((dma_addr_t *)skb->cb),
skb              1386 drivers/net/wireless/realtek/rtlwifi/pci.c 		kfree_skb(skb);
skb              1522 drivers/net/wireless/realtek/rtlwifi/pci.c 				struct sk_buff *skb =
skb              1536 drivers/net/wireless/realtek/rtlwifi/pci.c 						 skb->len, PCI_DMA_TODEVICE);
skb              1537 drivers/net/wireless/realtek/rtlwifi/pci.c 				dev_kfree_skb_irq(skb);
skb              1557 drivers/net/wireless/realtek/rtlwifi/pci.c 					struct sk_buff *skb)
skb              1561 drivers/net/wireless/realtek/rtlwifi/pci.c 	u8 tid = rtl_get_tid(skb);
skb              1562 drivers/net/wireless/realtek/rtlwifi/pci.c 	__le16 fc = rtl_get_fc(skb);
skb              1578 drivers/net/wireless/realtek/rtlwifi/pci.c 	if (_rtl_mac_to_hwqueue(hw, skb) > VO_QUEUE)
skb              1588 drivers/net/wireless/realtek/rtlwifi/pci.c 	skb_queue_tail(&rtlpriv->mac80211.skb_waitq[tid], skb);
skb              1596 drivers/net/wireless/realtek/rtlwifi/pci.c 		      struct sk_buff *skb,
skb              1600 drivers/net/wireless/realtek/rtlwifi/pci.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb              1605 drivers/net/wireless/realtek/rtlwifi/pci.c 	u8 hw_queue = _rtl_mac_to_hwqueue(hw, skb);
skb              1607 drivers/net/wireless/realtek/rtlwifi/pci.c 	struct ieee80211_hdr *hdr = rtl_get_hdr(skb);
skb              1608 drivers/net/wireless/realtek/rtlwifi/pci.c 	__le16 fc = rtl_get_fc(skb);
skb              1615 drivers/net/wireless/realtek/rtlwifi/pci.c 		rtl_tx_mgmt_proc(hw, skb);
skb              1623 drivers/net/wireless/realtek/rtlwifi/pci.c 	rtl_action_proc(hw, skb, true);
skb              1626 drivers/net/wireless/realtek/rtlwifi/pci.c 		rtlpriv->stats.txbytesmulticast += skb->len;
skb              1628 drivers/net/wireless/realtek/rtlwifi/pci.c 		rtlpriv->stats.txbytesbroadcast += skb->len;
skb              1630 drivers/net/wireless/realtek/rtlwifi/pci.c 		rtlpriv->stats.txbytesunicast += skb->len;
skb              1659 drivers/net/wireless/realtek/rtlwifi/pci.c 			return skb->len;
skb              1668 drivers/net/wireless/realtek/rtlwifi/pci.c 		return skb->len;
skb              1675 drivers/net/wireless/realtek/rtlwifi/pci.c 			(u8 *)ptx_bd_desc, info, sta, skb, hw_queue, ptcb_desc);
skb              1677 drivers/net/wireless/realtek/rtlwifi/pci.c 	__skb_queue_tail(&ring->queue, skb);
skb              1694 drivers/net/wireless/realtek/rtlwifi/pci.c 		ieee80211_stop_queue(hw, skb_get_queue_mapping(skb));
skb                17 drivers/net/wireless/realtek/rtlwifi/rc.c 				  struct sk_buff *skb, bool not_data)
skb                54 drivers/net/wireless/realtek/rtlwifi/rc.c 	if (rtl_is_special_data(rtlpriv->mac80211.hw, skb, true, false) ||
skb               169 drivers/net/wireless/realtek/rtlwifi/rc.c 	struct sk_buff *skb = txrc->skb;
skb               170 drivers/net/wireless/realtek/rtlwifi/rc.c 	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
skb               172 drivers/net/wireless/realtek/rtlwifi/rc.c 	__le16 fc = rtl_get_fc(skb);
skb               176 drivers/net/wireless/realtek/rtlwifi/rc.c 	rix = _rtl_rc_get_highest_rix(rtlpriv, sta, skb, not_data);
skb               211 drivers/net/wireless/realtek/rtlwifi/rc.c 			  struct sk_buff *skb)
skb               215 drivers/net/wireless/realtek/rtlwifi/rc.c 	struct ieee80211_hdr *hdr = rtl_get_hdr(skb);
skb               216 drivers/net/wireless/realtek/rtlwifi/rc.c 	__le16 fc = rtl_get_fc(skb);
skb               222 drivers/net/wireless/realtek/rtlwifi/rc.c 	if (rtl_is_special_data(mac->hw, skb, true, true))
skb               233 drivers/net/wireless/realtek/rtlwifi/rc.c 		    !(skb->protocol == cpu_to_be16(ETH_P_PAE))) {
skb               235 drivers/net/wireless/realtek/rtlwifi/rc.c 				u8 tid = rtl_get_tid(skb);
skb               542 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c 	struct sk_buff *skb = NULL;
skb               602 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c 	skb = dev_alloc_skb(totalpacketlen);
skb               603 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c 	if (!skb)
skb               605 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c 	skb_put_data(skb, &reserved_page_packet, totalpacketlen);
skb               607 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c 	rtstatus = rtl_cmd_send_packet(hw, skb);
skb                76 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c 		struct sk_buff *skb = __skb_dequeue(&ring->queue);
skb                82 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c 				 skb->len, PCI_DMA_TODEVICE);
skb                83 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c 		kfree_skb(skb);
skb                15 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c static u8 _rtl88ee_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 hw_queue)
skb                17 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c 	__le16 fc = rtl_get_fc(skb);
skb                24 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c 	return skb->priority;
skb               272 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c 					       struct sk_buff *skb,
skb               286 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c 	tmp_buf = skb->data + pstatus->rx_drvinfo_size + pstatus->rx_bufshift;
skb               370 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c 			   u8 *pdesc8, struct sk_buff *skb)
skb               419 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c 	hdr = (struct ieee80211_hdr *)(skb->data + status->rx_drvinfo_size
skb               459 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c 		p_drvinfo = (struct rx_fwinfo_88e *)(skb->data +
skb               463 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c 						   skb, status, pdesc,
skb               480 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c 			  struct sk_buff *skb,
skb               491 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c 	unsigned int skb_len = skb->len;
skb               492 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c 	u8 fw_qsel = _rtl88ee_map_hwqueue_to_fwqueue(skb, hw_queue);
skb               511 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c 	rtl_get_tcb_desc(hw, info, sta, skb, ptcb_desc);
skb               514 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c 		skb_push(skb, EM_HDR_LEN);
skb               515 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c 		memset(skb->data, 0, EM_HDR_LEN);
skb               517 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c 	buf_len = skb->len;
skb               518 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c 	mapping = pci_map_single(rtlpci->pdev, skb->data, skb->len,
skb               540 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c 							 (__le32 *)(skb->data));
skb               670 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c 			     bool lastseg, struct sk_buff *skb)
skb               678 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c 					    skb->data, skb->len,
skb               681 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
skb               705 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c 	set_tx_desc_tx_buffer_size(pdesc, (u16)(skb->len));
skb               714 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c 	set_tx_desc_pkt_size(pdesc, (u16)(skb->len));
skb               786 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h 			  struct sk_buff *skb,
skb               791 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h 			   u8 *pdesc, struct sk_buff *skb);
skb               801 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h 			     struct sk_buff *skb);
skb               568 drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c 	struct sk_buff *skb = NULL;
skb               625 drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c 	skb = dev_alloc_skb(totalpacketlen);
skb               626 drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c 	if (!skb)
skb               628 drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c 	skb_put_data(skb, &reserved_page_packet, totalpacketlen);
skb               631 drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c 		rtstatus = cmd_send_packet(hw, skb);
skb               633 drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c 		rtstatus = rtl_cmd_send_packet(hw, skb);
skb                14 drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c static u8 _rtl92ce_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 hw_queue)
skb                16 drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c 	__le16 fc = rtl_get_fc(skb);
skb                23 drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c 	return skb->priority;
skb               252 drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c 					       struct sk_buff *skb,
skb               267 drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c 	tmp_buf = skb->data + pstats->rx_drvinfo_size + pstats->rx_bufshift;
skb               299 drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c 			   u8 *p_desc8, struct sk_buff *skb)
skb               329 drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c 	hdr = (struct ieee80211_hdr *)(skb->data + stats->rx_drvinfo_size
skb               368 drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c 		p_drvinfo = (struct rx_fwinfo_92c *)(skb->data +
skb               372 drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c 						   skb, stats, pdesc,
skb               386 drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c 			  struct sk_buff *skb,
skb               397 drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c 	u8 fw_qsel = _rtl92ce_map_hwqueue_to_fwqueue(skb, hw_queue);
skb               405 drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c 					    skb->data, skb->len,
skb               428 drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c 	rtl_get_tcb_desc(hw, info, sta, skb, tcb_desc);
skb               482 drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c 		set_tx_desc_pkt_size(pdesc, (u16)skb->len);
skb               532 drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c 	set_tx_desc_tx_buffer_size(pdesc, (u16)skb->len);
skb               564 drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c 			     bool lastseg, struct sk_buff *skb)
skb               572 drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c 					    skb->data, skb->len,
skb               575 drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
skb               599 drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c 	set_tx_desc_tx_buffer_size(pdesc, (u16)(skb->len));
skb               608 drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c 	set_tx_desc_pkt_size(pdesc, (u16)(skb->len));
skb               516 drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.h 			  struct sk_buff *skb, u8 hw_queue,
skb               521 drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.h 			   u8 *pdesc, struct sk_buff *skb);
skb               531 drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.h 			     struct sk_buff *skb);
skb              1532 drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c static bool usb_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb)
skb              1543 drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c 	dev_kfree_skb(skb);
skb               750 drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c 					       struct sk_buff *skb,
skb               764 drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c 	tmp_buf = skb->data + pstats->rx_drvinfo_size + pstats->rx_bufshift;
skb               123 drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.h 				      struct sk_buff *skb,
skb               285 drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c 			   u8 *pdesc8, struct sk_buff *skb)
skb               325 drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c 		p_drvinfo = (struct rx_fwinfo_92c *)(skb->data +
skb               327 drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c 		rtl92c_translate_rx_signal_stuff(hw, skb, stats, p_desc,
skb               337 drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c static void _rtl_rx_process(struct ieee80211_hw *hw, struct sk_buff *skb)
skb               340 drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c 		 (struct ieee80211_rx_status *)IEEE80211_SKB_RXCB(skb);
skb               354 drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c 	rxdesc	= (__le32 *)skb->data;
skb               355 drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c 	skb_len	= skb->len;
skb               393 drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c 		rtl92c_translate_rx_signal_stuff(hw, skb, &stats,
skb               396 drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c 	skb_pull(skb, (drvinfo_len + RTL_RX_DESC_SIZE));
skb               397 drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c 	hdr = (struct ieee80211_hdr *)(skb->data);
skb               413 drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c 	ieee80211_rx(hw, skb);
skb               416 drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c void  rtl8192cu_rx_hdl(struct ieee80211_hw *hw, struct sk_buff * skb)
skb               418 drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c 	_rtl_rx_process(hw, skb);
skb               426 drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c void rtl8192c_tx_cleanup(struct ieee80211_hw *hw, struct sk_buff  *skb)
skb               431 drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c 			 struct sk_buff *skb)
skb               471 drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c 			  struct sk_buff *skb,
skb               484 drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c 	u16 pktlen = skb->len;
skb               486 drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c 						skb_get_queue_mapping(skb));
skb               491 drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c 	rtl_get_tcb_desc(hw, info, sta, skb, tcb_desc);
skb               492 drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c 	txdesc8 = skb_push(skb, RTL_TX_HEADER_SIZE);
skb               631 drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c 			     bool lastseg, struct sk_buff *skb)
skb               635 drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
skb               651 drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c 	set_tx_desc_pkt_size(pdesc, (u16)skb->len);
skb               383 drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h 			   u8 *p_desc, struct sk_buff *skb);
skb               384 drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h void  rtl8192cu_rx_hdl(struct ieee80211_hw *hw, struct sk_buff * skb);
skb               385 drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h void rtl8192c_tx_cleanup(struct ieee80211_hw *hw, struct sk_buff  *skb);
skb               387 drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h 			 struct sk_buff *skb);
skb               394 drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h 			  struct sk_buff *skb,
skb               401 drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h 			     bool b_lastseg, struct sk_buff *skb);
skb               455 drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c 				    struct sk_buff *skb)
skb               472 drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c 	rtlpriv->cfg->ops->fill_tx_cmddesc(hw, (u8 *) pdesc, 1, 1, skb);
skb               473 drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c 	__skb_queue_tail(&ring->queue, skb);
skb               599 drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c 	struct sk_buff *skb = NULL;
skb               645 drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c 	skb = dev_alloc_skb(totalpacketlen);
skb               646 drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c 	if (!skb) {
skb               649 drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c 		skb_put_data(skb, &reserved_page_packet, totalpacketlen);
skb               650 drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c 		rtstatus = _rtl92d_cmd_send_packet(hw, skb);
skb                14 drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c static u8 _rtl92de_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 hw_queue)
skb                16 drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c 	__le16 fc = rtl_get_fc(skb);
skb                23 drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c 	return skb->priority;
skb               403 drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c 					       struct sk_buff *skb,
skb               417 drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c 	tmp_buf = skb->data + pstats->rx_drvinfo_size + pstats->rx_bufshift;
skb               441 drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c 		u8 *p_desc, struct sk_buff *skb)
skb               480 drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c 		p_drvinfo = (struct rx_fwinfo_92d *)(skb->data +
skb               483 drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c 						   skb, stats, pdesc,
skb               509 drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c 			  struct sk_buff *skb,
skb               521 drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c 	unsigned int skb_len = skb->len;
skb               522 drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c 	u8 fw_qsel = _rtl92de_map_hwqueue_to_fwqueue(skb, hw_queue);
skb               538 drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c 	rtl_get_tcb_desc(hw, info, sta, skb, ptcb_desc);
skb               541 drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c 		skb_push(skb, EM_HDR_LEN);
skb               542 drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c 		memset(skb->data, 0, EM_HDR_LEN);
skb               544 drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c 	buf_len = skb->len;
skb               545 drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c 	mapping = pci_map_single(rtlpci->pdev, skb->data, skb->len,
skb               567 drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c 							  (u8 *)(skb->data));
skb               693 drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c 			     bool lastseg, struct sk_buff *skb)
skb               701 drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c 		    skb->data, skb->len, PCI_DMA_TODEVICE);
skb               702 drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
skb               727 drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c 	SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16)skb->len);
skb               731 drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c 	SET_TX_DESC_PKT_SIZE(pdesc, (u16) (skb->len));
skb               708 drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h 			  struct sk_buff *skb, u8 hw_queue,
skb               713 drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h 			   u8 *pdesc, struct sk_buff *skb);
skb               723 drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h 			     struct sk_buff *skb);
skb               662 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c 	struct sk_buff *skb = NULL;
skb               746 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c 	skb = dev_alloc_skb(totalpacketlen);
skb               747 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c 	if (!skb)
skb               749 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c 	skb_put_data(skb, &reserved_page_packet, totalpacketlen);
skb               751 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c 	rtstatus = rtl_cmd_send_packet(hw, skb);
skb                16 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c static u8 _rtl92ee_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 hw_queue)
skb                18 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c 	__le16 fc = rtl_get_fc(skb);
skb                25 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c 	return skb->priority;
skb               215 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c 					       struct sk_buff *skb,
skb               229 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c 	tmp_buf = skb->data + pstatus->rx_drvinfo_size +
skb               330 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c 			   u8 *pdesc8, struct sk_buff *skb)
skb               372 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c 	hdr = (struct ieee80211_hdr *)(skb->data + status->rx_drvinfo_size +
skb               412 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c 		p_drvinfo = (struct rx_fwinfo *)(skb->data +
skb               415 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c 		_rtl92ee_translate_rx_signal_stuff(hw, skb, status, pdesc8,
skb               555 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c 				 struct sk_buff *skb, dma_addr_t addr)
skb               559 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c 	u32 pkt_len = skb->len;
skb               644 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c 			  struct sk_buff *skb,
skb               651 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c 	struct rtlwifi_tx_info *tx_info = rtl_tx_skb_cb_info(skb);
skb               655 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c 	u8 fw_qsel = _rtl92ee_map_hwqueue_to_fwqueue(skb, hw_queue);
skb               674 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c 	rtl_get_tcb_desc(hw, info, sta, skb, ptcb_desc);
skb               677 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c 		skb_push(skb, EM_HDR_LEN);
skb               678 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c 		memset(skb->data, 0, EM_HDR_LEN);
skb               680 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c 	buf_len = skb->len;
skb               681 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c 	mapping = pci_map_single(rtlpci->pdev, skb->data, skb->len,
skb               691 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c 					    skb, mapping);
skb               707 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c 							  (u8 *)(skb->data));
skb               840 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c 			     bool lastseg, struct sk_buff *skb)
skb               846 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c 					    skb->data, skb->len,
skb               872 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c 	set_tx_desc_tx_buffer_size(pdesc, (u16)(skb->len));
skb               881 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c 	set_tx_desc_pkt_size(pdesc, (u16)(skb->len));
skb               725 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.h 				 struct sk_buff *skb, dma_addr_t addr);
skb               732 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.h 			  struct sk_buff *skb,
skb               737 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.h 			   u8 *pdesc, struct sk_buff *skb);
skb               747 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.h 			     struct sk_buff *skb);
skb               110 drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c 		struct sk_buff *skb, u8 last)
skb               125 drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c 	rtlpriv->cfg->ops->fill_tx_cmddesc(hw, (u8 *)pdesc, 1, 1, skb);
skb               126 drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c 	__skb_queue_tail(&ring->queue, skb);
skb               137 drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c 	struct sk_buff *skb;
skb               164 drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c 		skb = dev_alloc_skb(frag_length);
skb               165 drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c 		if (!skb)
skb               167 drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c 		skb_reserve(skb, extra_descoffset);
skb               168 drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c 		skb_put_data(skb, code_virtual_address + frag_offset,
skb               171 drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c 		tcb_desc = (struct rtl_tcb_desc *)(skb->cb);
skb               176 drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c 		_rtl92s_cmd_send_packet(hw, skb, last_inipkt);
skb               423 drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c static u32 _rtl92s_fill_h2c_cmd(struct sk_buff *skb, u32 h2cbufferlen,
skb               441 drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c 		ph2c_buffer = skb_put(skb, (u32)len);
skb               501 drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c 	struct sk_buff *skb;
skb               532 drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c 	skb = dev_alloc_skb(len);
skb               533 drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c 	if (!skb)
skb               535 drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c 	cb_desc = (struct rtl_tcb_desc *)(skb->cb);
skb               540 drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c 	_rtl92s_fill_h2c_cmd(skb, MAX_TRANSMIT_BUFFER_SIZE, 1, &element_id,
skb               542 drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c 	_rtl92s_cmd_send_packet(hw, skb, false);
skb                15 drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c static u8 _rtl92se_map_hwqueue_to_fwqueue(struct sk_buff *skb,	u8 skb_queue)
skb                17 drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c 	__le16 fc = rtl_get_fc(skb);
skb                30 drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c 	if (skb->priority == 7)
skb                32 drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c 	return skb->priority;
skb               195 drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c 		struct sk_buff *skb, struct rtl_stats *pstats,
skb               208 drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c 	tmp_buf = skb->data + pstats->rx_drvinfo_size + pstats->rx_bufshift;
skb               236 drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c 			   struct sk_buff *skb)
skb               285 drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c 		hdr = (struct ieee80211_hdr *)(skb->data +
skb               300 drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c 		p_drvinfo = (struct rx_fwinfo *)(skb->data +
skb               302 drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c 		_rtl92se_translate_rx_signal_stuff(hw, skb, stats, pdesc,
skb               316 drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c 		struct sk_buff *skb,
skb               327 drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c 	u8 fw_qsel = _rtl92se_map_hwqueue_to_fwqueue(skb, hw_queue);
skb               331 drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c 	dma_addr_t mapping = pci_map_single(rtlpci->pdev, skb->data, skb->len,
skb               350 drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c 	rtl_get_tcb_desc(hw, info, sta, skb, ptcb_desc);
skb               431 drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c 		SET_TX_DESC_PKT_SIZE(pdesc, (u16) skb->len);
skb               485 drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c 	SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16) skb->len);
skb               494 drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c 	bool firstseg, bool lastseg, struct sk_buff *skb)
skb               499 drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c 	struct rtl_tcb_desc *tcb_desc = (struct rtl_tcb_desc *)(skb->cb);
skb               501 drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c 	dma_addr_t mapping = pci_map_single(rtlpci->pdev, skb->data, skb->len,
skb               522 drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c 		SET_TX_DESC_PKT_SIZE(pdesc, (u16)(skb->len));
skb               523 drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c 		SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16)(skb->len));
skb               536 drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c 		SET_TX_DESC_PKT_SIZE(pdesc, (u16)(skb->len));
skb               540 drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c 		SET_BITS_TO_LE_4BYTE(skb->data, 24, 7, rtlhal->h2c_txcmd_seq);
skb               542 drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c 		SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16)(skb->len));
skb                11 drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.h 			  struct sk_buff *skb, u8 hw_queue,
skb                14 drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.h 			     bool lastseg, struct sk_buff *skb);
skb                17 drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.h 			   struct sk_buff *skb);
skb               390 drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c 	struct sk_buff *skb = NULL;
skb               450 drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c 	skb = dev_alloc_skb(totalpacketlen);
skb               451 drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c 	if (!skb)
skb               453 drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c 	skb_put_data(skb, &reserved_page_packet, totalpacketlen);
skb               455 drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c 	rtstatus = rtl_cmd_send_packet(hw, skb);
skb                14 drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c static u8 _rtl8723e_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 hw_queue)
skb                16 drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c 	__le16 fc = rtl_get_fc(skb);
skb                23 drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c 	return skb->priority;
skb               218 drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c 				      struct sk_buff *skb,
skb               231 drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c 	tmp_buf = skb->data + pstatus->rx_drvinfo_size + pstatus->rx_bufshift;
skb               263 drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c 			    u8 *pdesc8, struct sk_buff *skb)
skb               292 drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c 	hdr = (struct ieee80211_hdr *)(skb->data + status->rx_drvinfo_size
skb               332 drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c 		p_drvinfo = (struct rx_fwinfo_8723e *)(skb->data +
skb               335 drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c 		translate_rx_signal_stuff(hw, skb, status, pdesc8, p_drvinfo);
skb               345 drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c 			   struct sk_buff *skb,
skb               358 drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c 	u8 fw_qsel = _rtl8723e_map_hwqueue_to_fwqueue(skb, hw_queue);
skb               366 drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c 					    skb->data, skb->len,
skb               386 drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c 	rtl_get_tcb_desc(hw, info, sta, skb, ptcb_desc);
skb               443 drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c 		set_tx_desc_pkt_size(pdesc, (u16)skb->len);
skb               491 drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c 	set_tx_desc_tx_buffer_size(pdesc, (u16)skb->len);
skb               525 drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c 			      bool lastseg, struct sk_buff *skb)
skb               533 drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c 					    skb->data, skb->len,
skb               536 drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
skb               560 drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c 	set_tx_desc_tx_buffer_size(pdesc, (u16)(skb->len));
skb               569 drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c 	set_tx_desc_pkt_size(pdesc, (u16)(skb->len));
skb               519 drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.h 			   struct sk_buff *skb, u8 hw_queue,
skb               524 drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.h 			    u8 *pdesc, struct sk_buff *skb);
skb               534 drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.h 			      struct sk_buff *skb);
skb               479 drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c 	struct sk_buff *skb = NULL;
skb               564 drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c 	skb = dev_alloc_skb(totalpacketlen);
skb               565 drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c 	if (!skb)
skb               567 drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c 	skb_put_data(skb, &reserved_page_packet, totalpacketlen);
skb               569 drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c 	rtstatus = rtl_cmd_send_packet(hw, skb);
skb                38 drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c 		struct sk_buff *skb = __skb_dequeue(&ring->queue);
skb                44 drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c 				 skb->len, PCI_DMA_TODEVICE);
skb                45 drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c 		kfree_skb(skb);
skb                16 drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c static u8 _rtl8723be_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 hw_queue)
skb                18 drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c 	__le16 fc = rtl_get_fc(skb);
skb                25 drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c 	return skb->priority;
skb               191 drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c 					struct sk_buff *skb,
skb               205 drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c 	tmp_buf = skb->data + pstatus->rx_drvinfo_size + pstatus->rx_bufshift;
skb               299 drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c 			     u8 *pdesc8, struct sk_buff *skb)
skb               349 drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c 	hdr = (struct ieee80211_hdr *)(skb->data + status->rx_drvinfo_size +
skb               388 drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c 		p_drvinfo = (struct rx_fwinfo_8723be *)(skb->data +
skb               391 drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c 		_rtl8723be_translate_rx_signal_stuff(hw, skb, status,
skb               407 drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c 			    struct ieee80211_sta *sta, struct sk_buff *skb,
skb               414 drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c 	struct rtlwifi_tx_info *tx_info = rtl_tx_skb_cb_info(skb);
skb               419 drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c 	unsigned int skb_len = skb->len;
skb               420 drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c 	u8 fw_qsel = _rtl8723be_map_hwqueue_to_fwqueue(skb, hw_queue);
skb               438 drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c 	rtl_get_tcb_desc(hw, info, sta, skb, ptcb_desc);
skb               441 drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c 		skb_push(skb, EM_HDR_LEN);
skb               442 drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c 		memset(skb->data, 0, EM_HDR_LEN);
skb               444 drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c 	buf_len = skb->len;
skb               445 drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c 	mapping = pci_map_single(rtlpci->pdev, skb->data, skb->len,
skb               466 drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c 							    (__le32 *)(skb->data));
skb               591 drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c 			       struct sk_buff *skb)
skb               599 drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c 					    skb->data, skb->len,
skb               622 drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c 	set_tx_desc_tx_buffer_size(pdesc, (u16)(skb->len));
skb               631 drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c 	set_tx_desc_pkt_size(pdesc, (u16)(skb->len));
skb               631 drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.h 			    struct ieee80211_sta *sta, struct sk_buff *skb,
skb               636 drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.h 			     u8 *pdesc, struct sk_buff *skb);
skb               646 drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.h 			       struct sk_buff *skb);
skb               219 drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c 			     struct sk_buff *skb)
skb               239 drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c 	rtlpriv->cfg->ops->fill_tx_cmddesc(hw, (u8 *)pdesc, 1, 1, skb);
skb               241 drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c 	__skb_queue_tail(&ring->queue, skb);
skb                70 drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.h 			     struct sk_buff *skb);
skb              1521 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c 	struct sk_buff *skb = NULL;
skb              1625 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c 	skb = dev_alloc_skb(totalpacketlen);
skb              1626 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c 	if (!skb)
skb              1628 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c 	skb_put_data(skb, &reserved_page_packet_8812, totalpacketlen);
skb              1630 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c 	rtstatus = rtl_cmd_send_packet(hw, skb);
skb              1658 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c 	struct sk_buff *skb = NULL;
skb              1763 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c 	skb = dev_alloc_skb(totalpacketlen);
skb              1764 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c 	if (!skb)
skb              1766 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c 	skb_put_data(skb, &reserved_page_packet_8821, totalpacketlen);
skb              1768 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c 	rtstatus = rtl_cmd_send_packet(hw, skb);
skb                34 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c 		struct sk_buff *skb = __skb_dequeue(&ring->queue);
skb                40 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c 				 skb->len, PCI_DMA_TODEVICE);
skb                41 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c 		kfree_skb(skb);
skb                16 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c static u8 _rtl8821ae_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 hw_queue)
skb                18 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c 	__le16 fc = rtl_get_fc(skb);
skb                25 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c 	return skb->priority;
skb               276 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c 				      struct sk_buff *skb,
skb               289 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c 	tmp_buf = skb->data + pstatus->rx_drvinfo_size + pstatus->rx_bufshift;
skb               434 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c 			     u8 *pdesc8, struct sk_buff *skb)
skb               492 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c 	hdr = (struct ieee80211_hdr *)(skb->data +
skb               539 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c 		p_drvinfo = (struct rx_fwinfo_8821ae *)(skb->data +
skb               542 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c 		translate_rx_signal_stuff(hw, skb, status, pdesc, p_drvinfo);
skb               663 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c 			    struct sk_buff *skb,
skb               670 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c 	struct rtlwifi_tx_info *tx_info = rtl_tx_skb_cb_info(skb);
skb               674 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c 	unsigned int skb_len = skb->len;
skb               675 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c 	u8 fw_qsel = _rtl8821ae_map_hwqueue_to_fwqueue(skb, hw_queue);
skb               686 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c 	rtl_get_tcb_desc(hw, info, sta, skb, ptcb_desc);
skb               689 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c 		skb_push(skb, EM_HDR_LEN);
skb               690 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c 		memset(skb->data, 0, EM_HDR_LEN);
skb               692 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c 	buf_len = skb->len;
skb               693 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c 	mapping = pci_map_single(rtlpci->pdev, skb->data, skb->len,
skb               715 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c 							   (__le32 *)skb->data);
skb               830 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c 			       bool lastseg, struct sk_buff *skb)
skb               838 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c 					    skb->data, skb->len,
skb               851 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c 	set_tx_desc_pkt_size(pdesc, (u16)(skb->len));
skb               865 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c 	set_tx_desc_tx_buffer_size(pdesc, skb->len);
skb               637 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.h 			    struct sk_buff *skb,
skb               642 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.h 			     u8 *pdesc, struct sk_buff *skb);
skb               652 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.h 			       struct sk_buff *skb);
skb               421 drivers/net/wireless/realtek/rtlwifi/usb.c 				    struct sk_buff *skb)
skb               424 drivers/net/wireless/realtek/rtlwifi/usb.c 	u8 *rxdesc = skb->data;
skb               434 drivers/net/wireless/realtek/rtlwifi/usb.c 	skb_pull(skb, RTL_RX_DESC_SIZE);
skb               435 drivers/net/wireless/realtek/rtlwifi/usb.c 	rtlpriv->cfg->ops->query_rx_desc(hw, &stats, &rx_status, rxdesc, skb);
skb               436 drivers/net/wireless/realtek/rtlwifi/usb.c 	skb_pull(skb, (stats.rx_drvinfo_size + stats.rx_bufshift));
skb               437 drivers/net/wireless/realtek/rtlwifi/usb.c 	hdr = (struct ieee80211_hdr *)(skb->data);
skb               440 drivers/net/wireless/realtek/rtlwifi/usb.c 		memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
skb               448 drivers/net/wireless/realtek/rtlwifi/usb.c 			rtlpriv->stats.rxbytesunicast +=  skb->len;
skb               458 drivers/net/wireless/realtek/rtlwifi/usb.c 		rtl_beacon_statistic(hw, skb);
skb               463 drivers/net/wireless/realtek/rtlwifi/usb.c 				      struct sk_buff *skb)
skb               466 drivers/net/wireless/realtek/rtlwifi/usb.c 	u8 *rxdesc = skb->data;
skb               476 drivers/net/wireless/realtek/rtlwifi/usb.c 	skb_pull(skb, RTL_RX_DESC_SIZE);
skb               477 drivers/net/wireless/realtek/rtlwifi/usb.c 	rtlpriv->cfg->ops->query_rx_desc(hw, &stats, &rx_status, rxdesc, skb);
skb               478 drivers/net/wireless/realtek/rtlwifi/usb.c 	skb_pull(skb, (stats.rx_drvinfo_size + stats.rx_bufshift));
skb               479 drivers/net/wireless/realtek/rtlwifi/usb.c 	hdr = (struct ieee80211_hdr *)(skb->data);
skb               482 drivers/net/wireless/realtek/rtlwifi/usb.c 		memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
skb               490 drivers/net/wireless/realtek/rtlwifi/usb.c 			rtlpriv->stats.rxbytesunicast +=  skb->len;
skb               501 drivers/net/wireless/realtek/rtlwifi/usb.c 		rtl_beacon_statistic(hw, skb);
skb               503 drivers/net/wireless/realtek/rtlwifi/usb.c 		if (likely(rtl_action_proc(hw, skb, false)))
skb               504 drivers/net/wireless/realtek/rtlwifi/usb.c 			ieee80211_rx(hw, skb);
skb               506 drivers/net/wireless/realtek/rtlwifi/usb.c 			dev_kfree_skb_any(skb);
skb               508 drivers/net/wireless/realtek/rtlwifi/usb.c 		dev_kfree_skb_any(skb);
skb               512 drivers/net/wireless/realtek/rtlwifi/usb.c static void _rtl_rx_pre_process(struct ieee80211_hw *hw, struct sk_buff *skb)
skb               520 drivers/net/wireless/realtek/rtlwifi/usb.c 		rtlusb->usb_rx_segregate_hdl(hw, skb, &rx_queue);
skb               535 drivers/net/wireless/realtek/rtlwifi/usb.c 	struct sk_buff *skb;
skb               537 drivers/net/wireless/realtek/rtlwifi/usb.c 	while ((skb = skb_dequeue(&rtlusb->rx_queue))) {
skb               539 drivers/net/wireless/realtek/rtlwifi/usb.c 			dev_kfree_skb_any(skb);
skb               544 drivers/net/wireless/realtek/rtlwifi/usb.c 			_rtl_usb_rx_process_noagg(hw, skb);
skb               547 drivers/net/wireless/realtek/rtlwifi/usb.c 			_rtl_rx_pre_process(hw, skb);
skb               600 drivers/net/wireless/realtek/rtlwifi/usb.c 		struct sk_buff *skb;
skb               621 drivers/net/wireless/realtek/rtlwifi/usb.c 		skb = dev_alloc_skb(size + __RADIO_TAP_SIZE_RSV + padding);
skb               622 drivers/net/wireless/realtek/rtlwifi/usb.c 		if (!skb) {
skb               627 drivers/net/wireless/realtek/rtlwifi/usb.c 		_rtl_install_trx_info(rtlusb, skb, rtlusb->in_ep);
skb               630 drivers/net/wireless/realtek/rtlwifi/usb.c 		skb_reserve(skb, padding);
skb               633 drivers/net/wireless/realtek/rtlwifi/usb.c 		skb_reserve(skb, __RADIO_TAP_SIZE_RSV);
skb               635 drivers/net/wireless/realtek/rtlwifi/usb.c 		skb_put_data(skb, _urb->transfer_buffer, size);
skb               637 drivers/net/wireless/realtek/rtlwifi/usb.c 		skb_queue_tail(&rtlusb->rx_queue, skb);
skb               827 drivers/net/wireless/realtek/rtlwifi/usb.c 		struct sk_buff *skb;
skb               831 drivers/net/wireless/realtek/rtlwifi/usb.c 		skb = (struct sk_buff *)_urb->context;
skb               832 drivers/net/wireless/realtek/rtlwifi/usb.c 		kfree_skb(skb);
skb               838 drivers/net/wireless/realtek/rtlwifi/usb.c 			struct sk_buff *skb)
skb               843 drivers/net/wireless/realtek/rtlwifi/usb.c 	rtlusb->usb_tx_post_hdl(hw, urb, skb);
skb               844 drivers/net/wireless/realtek/rtlwifi/usb.c 	skb_pull(skb, RTL_TX_HEADER_SIZE);
skb               845 drivers/net/wireless/realtek/rtlwifi/usb.c 	txinfo = IEEE80211_SKB_CB(skb);
skb               855 drivers/net/wireless/realtek/rtlwifi/usb.c 	ieee80211_tx_status_irqsafe(hw, skb);
skb               861 drivers/net/wireless/realtek/rtlwifi/usb.c 	struct sk_buff *skb = (struct sk_buff *)urb->context;
skb               862 drivers/net/wireless/realtek/rtlwifi/usb.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb               869 drivers/net/wireless/realtek/rtlwifi/usb.c 	err = _usb_tx_post(hw, urb, skb);
skb               877 drivers/net/wireless/realtek/rtlwifi/usb.c 				struct sk_buff *skb, u32 ep_num)
skb               882 drivers/net/wireless/realtek/rtlwifi/usb.c 	WARN_ON(NULL == skb);
skb               885 drivers/net/wireless/realtek/rtlwifi/usb.c 		kfree_skb(skb);
skb               888 drivers/net/wireless/realtek/rtlwifi/usb.c 	_rtl_install_trx_info(rtlusb, skb, ep_num);
skb               890 drivers/net/wireless/realtek/rtlwifi/usb.c 			  ep_num), skb->data, skb->len, _rtl_tx_complete, skb);
skb               895 drivers/net/wireless/realtek/rtlwifi/usb.c static void _rtl_usb_transmit(struct ieee80211_hw *hw, struct sk_buff *skb,
skb               906 drivers/net/wireless/realtek/rtlwifi/usb.c 		kfree_skb(skb);
skb               910 drivers/net/wireless/realtek/rtlwifi/usb.c 	_skb = skb;
skb               914 drivers/net/wireless/realtek/rtlwifi/usb.c 		kfree_skb(skb);
skb               922 drivers/net/wireless/realtek/rtlwifi/usb.c 				   struct sk_buff *skb,
skb               926 drivers/net/wireless/realtek/rtlwifi/usb.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb               929 drivers/net/wireless/realtek/rtlwifi/usb.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
skb               944 drivers/net/wireless/realtek/rtlwifi/usb.c 	rtl_action_proc(hw, skb, true);
skb               946 drivers/net/wireless/realtek/rtlwifi/usb.c 		rtlpriv->stats.txbytesmulticast += skb->len;
skb               948 drivers/net/wireless/realtek/rtlwifi/usb.c 		rtlpriv->stats.txbytesbroadcast += skb->len;
skb               950 drivers/net/wireless/realtek/rtlwifi/usb.c 		rtlpriv->stats.txbytesunicast += skb->len;
skb               951 drivers/net/wireless/realtek/rtlwifi/usb.c 	rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *)pdesc, NULL, info, sta, skb,
skb               959 drivers/net/wireless/realtek/rtlwifi/usb.c 		      struct sk_buff *skb,
skb               964 drivers/net/wireless/realtek/rtlwifi/usb.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
skb               970 drivers/net/wireless/realtek/rtlwifi/usb.c 	hw_queue = rtlusb->usb_mq_to_hwq(fc, skb_get_queue_mapping(skb));
skb               971 drivers/net/wireless/realtek/rtlwifi/usb.c 	_rtl_usb_tx_preprocess(hw, sta, skb, hw_queue);
skb               972 drivers/net/wireless/realtek/rtlwifi/usb.c 	_rtl_usb_transmit(hw, skb, hw_queue);
skb               976 drivers/net/wireless/realtek/rtlwifi/usb.c 	dev_kfree_skb_any(skb);
skb               982 drivers/net/wireless/realtek/rtlwifi/usb.c 					struct sk_buff *skb)
skb                51 drivers/net/wireless/realtek/rtlwifi/usb.h 					 struct sk_buff *skb,
skb                54 drivers/net/wireless/realtek/rtlwifi/usb.h 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb              1032 drivers/net/wireless/realtek/rtlwifi/wifi.h static inline struct rtlwifi_tx_info *rtl_tx_skb_cb_info(struct sk_buff *skb)
skb              1034 drivers/net/wireless/realtek/rtlwifi/wifi.h 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb              2250 drivers/net/wireless/realtek/rtlwifi/wifi.h 				    struct sk_buff *skb, dma_addr_t addr);
skb              2261 drivers/net/wireless/realtek/rtlwifi/wifi.h 			     struct sk_buff *skb, u8 hw_queue,
skb              2267 drivers/net/wireless/realtek/rtlwifi/wifi.h 				struct sk_buff *skb);
skb              2270 drivers/net/wireless/realtek/rtlwifi/wifi.h 				     struct sk_buff *skb, u8 hw_queue);
skb              2274 drivers/net/wireless/realtek/rtlwifi/wifi.h 			      u8 *pdesc, struct sk_buff *skb);
skb              2346 drivers/net/wireless/realtek/rtlwifi/wifi.h 			  struct sk_buff *skb,
skb              2352 drivers/net/wireless/realtek/rtlwifi/wifi.h 			     struct sk_buff *skb);
skb              2723 drivers/net/wireless/realtek/rtlwifi/wifi.h 			 struct sk_buff *skb);
skb              3198 drivers/net/wireless/realtek/rtlwifi/wifi.h static inline struct ieee80211_hdr *rtl_get_hdr(struct sk_buff *skb)
skb              3200 drivers/net/wireless/realtek/rtlwifi/wifi.h 	return (struct ieee80211_hdr *)(skb->data);
skb              3203 drivers/net/wireless/realtek/rtlwifi/wifi.h static inline __le16 rtl_get_fc(struct sk_buff *skb)
skb              3205 drivers/net/wireless/realtek/rtlwifi/wifi.h 	return rtl_get_hdr(skb)->frame_control;
skb              3213 drivers/net/wireless/realtek/rtlwifi/wifi.h static inline u16 rtl_get_tid(struct sk_buff *skb)
skb              3215 drivers/net/wireless/realtek/rtlwifi/wifi.h 	return rtl_get_tid_h(rtl_get_hdr(skb));
skb               447 drivers/net/wireless/realtek/rtw88/coex.c void rtw_coex_info_response(struct rtw_dev *rtwdev, struct sk_buff *skb)
skb               450 drivers/net/wireless/realtek/rtw88/coex.c 	u8 *payload = get_payload_from_coex_resp(skb);
skb               455 drivers/net/wireless/realtek/rtw88/coex.c 	skb_queue_tail(&coex->queue, skb);
skb               489 drivers/net/wireless/realtek/rtw88/coex.c 	struct sk_buff *skb;
skb               494 drivers/net/wireless/realtek/rtw88/coex.c 	skb = rtw_coex_info_request(rtwdev, &req);
skb               495 drivers/net/wireless/realtek/rtw88/coex.c 	if (!skb)
skb               498 drivers/net/wireless/realtek/rtw88/coex.c 	payload = get_payload_from_coex_resp(skb);
skb               500 drivers/net/wireless/realtek/rtw88/coex.c 	dev_kfree_skb_any(skb);
skb               511 drivers/net/wireless/realtek/rtw88/coex.c 	struct sk_buff *skb;
skb               516 drivers/net/wireless/realtek/rtw88/coex.c 	skb = rtw_coex_info_request(rtwdev, &req);
skb               517 drivers/net/wireless/realtek/rtw88/coex.c 	if (!skb)
skb               520 drivers/net/wireless/realtek/rtw88/coex.c 	dev_kfree_skb_any(skb);
skb               348 drivers/net/wireless/realtek/rtw88/coex.h void rtw_coex_info_response(struct rtw_dev *rtwdev, struct sk_buff *skb);
skb                13 drivers/net/wireless/realtek/rtw88/fw.c 				      struct sk_buff *skb)
skb                18 drivers/net/wireless/realtek/rtw88/fw.c 	c2h = get_c2h_from_skb(skb);
skb                23 drivers/net/wireless/realtek/rtw88/fw.c 		rtw_tx_report_handle(rtwdev, skb);
skb                30 drivers/net/wireless/realtek/rtw88/fw.c void rtw_fw_c2h_cmd_handle(struct rtw_dev *rtwdev, struct sk_buff *skb)
skb                36 drivers/net/wireless/realtek/rtw88/fw.c 	pkt_offset = *((u32 *)skb->cb);
skb                37 drivers/net/wireless/realtek/rtw88/fw.c 	c2h = (struct rtw_c2h_cmd *)(skb->data + pkt_offset);
skb                38 drivers/net/wireless/realtek/rtw88/fw.c 	len = skb->len - pkt_offset - 2;
skb                50 drivers/net/wireless/realtek/rtw88/fw.c 		rtw_fw_c2h_cmd_handle_ext(rtwdev, skb);
skb                60 drivers/net/wireless/realtek/rtw88/fw.c 			       struct sk_buff *skb)
skb                65 drivers/net/wireless/realtek/rtw88/fw.c 	c2h = (struct rtw_c2h_cmd *)(skb->data + pkt_offset);
skb                66 drivers/net/wireless/realtek/rtw88/fw.c 	len = skb->len - pkt_offset - 2;
skb                67 drivers/net/wireless/realtek/rtw88/fw.c 	*((u32 *)skb->cb) = pkt_offset;
skb                74 drivers/net/wireless/realtek/rtw88/fw.c 		rtw_coex_info_response(rtwdev, skb);
skb                78 drivers/net/wireless/realtek/rtw88/fw.c 		*((u32 *)skb->cb) = pkt_offset;
skb                79 drivers/net/wireless/realtek/rtw88/fw.c 		skb_queue_tail(&rtwdev->c2h_queue, skb);
skb               477 drivers/net/wireless/realtek/rtw88/fw.c static void rtw_fill_rsvd_page_desc(struct rtw_dev *rtwdev, struct sk_buff *skb)
skb               484 drivers/net/wireless/realtek/rtw88/fw.c 	rtw_rsvd_page_pkt_info_update(rtwdev, &pkt_info, skb);
skb               485 drivers/net/wireless/realtek/rtw88/fw.c 	pkt_desc = skb_push(skb, chip->tx_pkt_desc_sz);
skb               487 drivers/net/wireless/realtek/rtw88/fw.c 	rtw_tx_fill_tx_desc(&pkt_info, skb);
skb               499 drivers/net/wireless/realtek/rtw88/fw.c 	struct sk_buff *skb = rsvd_pkt->skb;
skb               503 drivers/net/wireless/realtek/rtw88/fw.c 		       skb->data, skb->len);
skb               505 drivers/net/wireless/realtek/rtw88/fw.c 		memcpy(buf, skb->data, skb->len);
skb               635 drivers/net/wireless/realtek/rtw88/fw.c 		rsvd_pkt->skb = iter;
skb               679 drivers/net/wireless/realtek/rtw88/fw.c 			page += rtw_len_to_page(rsvd_pkt->skb->len +
skb               682 drivers/net/wireless/realtek/rtw88/fw.c 			page += rtw_len_to_page(rsvd_pkt->skb->len, page_size);
skb               684 drivers/net/wireless/realtek/rtw88/fw.c 		kfree_skb(rsvd_pkt->skb);
skb               691 drivers/net/wireless/realtek/rtw88/fw.c 		kfree_skb(rsvd_pkt->skb);
skb               700 drivers/net/wireless/realtek/rtw88/fw.c 	struct sk_buff *skb;
skb               703 drivers/net/wireless/realtek/rtw88/fw.c 	skb = rtw_beacon_get(hw, vif);
skb               704 drivers/net/wireless/realtek/rtw88/fw.c 	if (!skb) {
skb               710 drivers/net/wireless/realtek/rtw88/fw.c 	ret = rtw_download_drv_rsvd_page(rtwdev, skb->data, skb->len);
skb               714 drivers/net/wireless/realtek/rtw88/fw.c 	dev_kfree_skb(skb);
skb                91 drivers/net/wireless/realtek/rtw88/fw.h 	struct sk_buff *skb;
skb               257 drivers/net/wireless/realtek/rtw88/fw.h static inline struct rtw_c2h_cmd *get_c2h_from_skb(struct sk_buff *skb)
skb               261 drivers/net/wireless/realtek/rtw88/fw.h 	pkt_offset = *((u32 *)skb->cb);
skb               262 drivers/net/wireless/realtek/rtw88/fw.h 	return (struct rtw_c2h_cmd *)(skb->data + pkt_offset);
skb               266 drivers/net/wireless/realtek/rtw88/fw.h 			       struct sk_buff *skb);
skb               267 drivers/net/wireless/realtek/rtw88/fw.h void rtw_fw_c2h_cmd_handle(struct rtw_dev *rtwdev, struct sk_buff *skb);
skb                12 drivers/net/wireless/realtek/rtw88/hci.h 		  struct sk_buff *skb);
skb                30 drivers/net/wireless/realtek/rtw88/hci.h 			     struct sk_buff *skb)
skb                32 drivers/net/wireless/realtek/rtw88/hci.h 	return rtwdev->hci.ops->tx(rtwdev, pkt_info, skb);
skb                17 drivers/net/wireless/realtek/rtw88/mac80211.c 		       struct sk_buff *skb)
skb                25 drivers/net/wireless/realtek/rtw88/mac80211.c 	rtw_tx_pkt_info_update(rtwdev, &pkt_info, control, skb);
skb                26 drivers/net/wireless/realtek/rtw88/mac80211.c 	if (rtw_hci_tx(rtwdev, &pkt_info, skb))
skb                32 drivers/net/wireless/realtek/rtw88/mac80211.c 	ieee80211_free_txskb(hw, skb);
skb               197 drivers/net/wireless/realtek/rtw88/main.c 	struct sk_buff *skb, *tmp;
skb               199 drivers/net/wireless/realtek/rtw88/main.c 	skb_queue_walk_safe(&rtwdev->c2h_queue, skb, tmp) {
skb               200 drivers/net/wireless/realtek/rtw88/main.c 		skb_unlink(skb, &rtwdev->c2h_queue);
skb               201 drivers/net/wireless/realtek/rtw88/main.c 		rtw_fw_c2h_cmd_handle(rtwdev, skb);
skb               202 drivers/net/wireless/realtek/rtw88/main.c 		dev_kfree_skb_any(skb);
skb                28 drivers/net/wireless/realtek/rtw88/pci.c static u8 rtw_pci_get_tx_qsel(struct sk_buff *skb, u8 queue)
skb                40 drivers/net/wireless/realtek/rtw88/pci.c 		return skb->priority;
skb                98 drivers/net/wireless/realtek/rtw88/pci.c 	struct sk_buff *skb, *tmp;
skb               102 drivers/net/wireless/realtek/rtw88/pci.c 	skb_queue_walk_safe(&tx_ring->queue, skb, tmp) {
skb               103 drivers/net/wireless/realtek/rtw88/pci.c 		__skb_unlink(skb, &tx_ring->queue);
skb               104 drivers/net/wireless/realtek/rtw88/pci.c 		tx_data = rtw_pci_get_tx_data(skb);
skb               107 drivers/net/wireless/realtek/rtw88/pci.c 		pci_unmap_single(pdev, dma, skb->len, PCI_DMA_TODEVICE);
skb               108 drivers/net/wireless/realtek/rtw88/pci.c 		dev_kfree_skb_any(skb);
skb               131 drivers/net/wireless/realtek/rtw88/pci.c 	struct sk_buff *skb;
skb               137 drivers/net/wireless/realtek/rtw88/pci.c 		skb = rx_ring->buf[i];
skb               138 drivers/net/wireless/realtek/rtw88/pci.c 		if (!skb)
skb               141 drivers/net/wireless/realtek/rtw88/pci.c 		dma = *((dma_addr_t *)skb->cb);
skb               143 drivers/net/wireless/realtek/rtw88/pci.c 		dev_kfree_skb(skb);
skb               204 drivers/net/wireless/realtek/rtw88/pci.c static int rtw_pci_reset_rx_desc(struct rtw_dev *rtwdev, struct sk_buff *skb,
skb               213 drivers/net/wireless/realtek/rtw88/pci.c 	if (!skb)
skb               216 drivers/net/wireless/realtek/rtw88/pci.c 	dma = pci_map_single(pdev, skb->data, buf_sz, PCI_DMA_FROMDEVICE);
skb               220 drivers/net/wireless/realtek/rtw88/pci.c 	*((dma_addr_t *)skb->cb) = dma;
skb               252 drivers/net/wireless/realtek/rtw88/pci.c 	struct sk_buff *skb = NULL;
skb               268 drivers/net/wireless/realtek/rtw88/pci.c 		skb = dev_alloc_skb(buf_sz);
skb               269 drivers/net/wireless/realtek/rtw88/pci.c 		if (!skb) {
skb               275 drivers/net/wireless/realtek/rtw88/pci.c 		memset(skb->data, 0, buf_sz);
skb               276 drivers/net/wireless/realtek/rtw88/pci.c 		rx_ring->buf[i] = skb;
skb               277 drivers/net/wireless/realtek/rtw88/pci.c 		ret = rtw_pci_reset_rx_desc(rtwdev, skb, rx_ring, i, desc_size);
skb               280 drivers/net/wireless/realtek/rtw88/pci.c 			dev_kfree_skb_any(skb);
skb               295 drivers/net/wireless/realtek/rtw88/pci.c 		skb = rx_ring->buf[i];
skb               296 drivers/net/wireless/realtek/rtw88/pci.c 		if (!skb)
skb               298 drivers/net/wireless/realtek/rtw88/pci.c 		dma = *((dma_addr_t *)skb->cb);
skb               300 drivers/net/wireless/realtek/rtw88/pci.c 		dev_kfree_skb_any(skb);
skb               546 drivers/net/wireless/realtek/rtw88/pci.c static u8 rtw_hw_queue_mapping(struct sk_buff *skb)
skb               548 drivers/net/wireless/realtek/rtw88/pci.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
skb               550 drivers/net/wireless/realtek/rtw88/pci.c 	u8 q_mapping = skb_get_queue_mapping(skb);
skb               605 drivers/net/wireless/realtek/rtw88/pci.c 			struct sk_buff *skb, u8 queue)
skb               622 drivers/net/wireless/realtek/rtw88/pci.c 	size = skb->len;
skb               629 drivers/net/wireless/realtek/rtw88/pci.c 	pkt_desc = skb_push(skb, chip->tx_pkt_desc_sz);
skb               631 drivers/net/wireless/realtek/rtw88/pci.c 	pkt_info->qsel = rtw_pci_get_tx_qsel(skb, queue);
skb               632 drivers/net/wireless/realtek/rtw88/pci.c 	rtw_tx_fill_tx_desc(pkt_info, skb);
skb               633 drivers/net/wireless/realtek/rtw88/pci.c 	dma = pci_map_single(rtwpci->pdev, skb->data, skb->len,
skb               641 drivers/net/wireless/realtek/rtw88/pci.c 	psb_len = (skb->len - 1) / 128 + 1;
skb               651 drivers/net/wireless/realtek/rtw88/pci.c 	tx_data = rtw_pci_get_tx_data(skb);
skb               654 drivers/net/wireless/realtek/rtw88/pci.c 	skb_queue_tail(&ring->queue, skb);
skb               676 drivers/net/wireless/realtek/rtw88/pci.c 	struct sk_buff *skb;
skb               683 drivers/net/wireless/realtek/rtw88/pci.c 	skb = dev_alloc_skb(length);
skb               684 drivers/net/wireless/realtek/rtw88/pci.c 	if (!skb)
skb               687 drivers/net/wireless/realtek/rtw88/pci.c 	skb_reserve(skb, tx_pkt_desc_sz);
skb               688 drivers/net/wireless/realtek/rtw88/pci.c 	memcpy((u8 *)skb_put(skb, size), buf, size);
skb               693 drivers/net/wireless/realtek/rtw88/pci.c 	return rtw_pci_xmit(rtwdev, &pkt_info, skb, RTW_TX_QUEUE_BCN);
skb               698 drivers/net/wireless/realtek/rtw88/pci.c 	struct sk_buff *skb;
skb               705 drivers/net/wireless/realtek/rtw88/pci.c 	skb = dev_alloc_skb(length);
skb               706 drivers/net/wireless/realtek/rtw88/pci.c 	if (!skb)
skb               709 drivers/net/wireless/realtek/rtw88/pci.c 	skb_reserve(skb, tx_pkt_desc_sz);
skb               710 drivers/net/wireless/realtek/rtw88/pci.c 	memcpy((u8 *)skb_put(skb, size), buf, size);
skb               714 drivers/net/wireless/realtek/rtw88/pci.c 	return rtw_pci_xmit(rtwdev, &pkt_info, skb, RTW_TX_QUEUE_H2C);
skb               719 drivers/net/wireless/realtek/rtw88/pci.c 		      struct sk_buff *skb)
skb               723 drivers/net/wireless/realtek/rtw88/pci.c 	u8 queue = rtw_hw_queue_mapping(skb);
skb               726 drivers/net/wireless/realtek/rtw88/pci.c 	ret = rtw_pci_xmit(rtwdev, pkt_info, skb, queue);
skb               732 drivers/net/wireless/realtek/rtw88/pci.c 		ieee80211_stop_queue(rtwdev->hw, skb_get_queue_mapping(skb));
skb               746 drivers/net/wireless/realtek/rtw88/pci.c 	struct sk_buff *skb;
skb               764 drivers/net/wireless/realtek/rtw88/pci.c 		skb = skb_dequeue(&ring->queue);
skb               765 drivers/net/wireless/realtek/rtw88/pci.c 		if (!skb) {
skb               770 drivers/net/wireless/realtek/rtw88/pci.c 		tx_data = rtw_pci_get_tx_data(skb);
skb               771 drivers/net/wireless/realtek/rtw88/pci.c 		pci_unmap_single(rtwpci->pdev, tx_data->dma, skb->len,
skb               776 drivers/net/wireless/realtek/rtw88/pci.c 			dev_kfree_skb_irq(skb);
skb               782 drivers/net/wireless/realtek/rtw88/pci.c 			q_map = skb_get_queue_mapping(skb);
skb               787 drivers/net/wireless/realtek/rtw88/pci.c 		skb_pull(skb, rtwdev->chip->tx_pkt_desc_sz);
skb               789 drivers/net/wireless/realtek/rtw88/pci.c 		info = IEEE80211_SKB_CB(skb);
skb               793 drivers/net/wireless/realtek/rtw88/pci.c 			rtw_tx_report_enqueue(rtwdev, skb, tx_data->sn);
skb               804 drivers/net/wireless/realtek/rtw88/pci.c 		ieee80211_tx_status_irqsafe(hw, skb);
skb               817 drivers/net/wireless/realtek/rtw88/pci.c 	struct sk_buff *skb, *new;
skb               840 drivers/net/wireless/realtek/rtw88/pci.c 		skb = ring->buf[cur_rp];
skb               841 drivers/net/wireless/realtek/rtw88/pci.c 		dma = *((dma_addr_t *)skb->cb);
skb               844 drivers/net/wireless/realtek/rtw88/pci.c 		rx_desc = skb->data;
skb               860 drivers/net/wireless/realtek/rtw88/pci.c 		skb_put_data(new, skb->data, new_len);
skb               217 drivers/net/wireless/realtek/rtw88/pci.h rtw_pci_tx_data *rtw_pci_get_tx_data(struct sk_buff *skb)
skb               219 drivers/net/wireless/realtek/rtw88/pci.h 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb                10 drivers/net/wireless/realtek/rtw88/rx.c 		  struct sk_buff *skb)
skb                15 drivers/net/wireless/realtek/rtw88/rx.c 	hdr = (struct ieee80211_hdr *)skb->data;
skb                22 drivers/net/wireless/realtek/rtw88/rx.c 		rtwdev->stats.rx_unicast += skb->len;
skb                26 drivers/net/wireless/realtek/rtw88/rx.c 			rtwvif->stats.rx_unicast += skb->len;
skb                34 drivers/net/wireless/realtek/rtw88/rx.h 		  struct sk_buff *skb);
skb                12 drivers/net/wireless/realtek/rtw88/tx.c 		  struct sk_buff *skb)
skb                17 drivers/net/wireless/realtek/rtw88/tx.c 	hdr = (struct ieee80211_hdr *)skb->data;
skb                24 drivers/net/wireless/realtek/rtw88/tx.c 		rtwdev->stats.tx_unicast += skb->len;
skb                28 drivers/net/wireless/realtek/rtw88/tx.c 			rtwvif->stats.tx_unicast += skb->len;
skb                36 drivers/net/wireless/realtek/rtw88/tx.c void rtw_tx_fill_tx_desc(struct rtw_tx_pkt_info *pkt_info, struct sk_buff *skb)
skb                38 drivers/net/wireless/realtek/rtw88/tx.c 	__le32 *txdesc = (__le32 *)skb->data;
skb               163 drivers/net/wireless/realtek/rtw88/tx.c void rtw_tx_report_enqueue(struct rtw_dev *rtwdev, struct sk_buff *skb, u8 sn)
skb               170 drivers/net/wireless/realtek/rtw88/tx.c 	drv_data = (u8 *)IEEE80211_SKB_CB(skb)->status.status_driver_data;
skb               174 drivers/net/wireless/realtek/rtw88/tx.c 	__skb_queue_tail(&tx_report->queue, skb);
skb               182 drivers/net/wireless/realtek/rtw88/tx.c 				    struct sk_buff *skb, bool acked)
skb               186 drivers/net/wireless/realtek/rtw88/tx.c 	info = IEEE80211_SKB_CB(skb);
skb               193 drivers/net/wireless/realtek/rtw88/tx.c 	ieee80211_tx_status_irqsafe(rtwdev->hw, skb);
skb               196 drivers/net/wireless/realtek/rtw88/tx.c void rtw_tx_report_handle(struct rtw_dev *rtwdev, struct sk_buff *skb)
skb               205 drivers/net/wireless/realtek/rtw88/tx.c 	c2h = get_c2h_from_skb(skb);
skb               225 drivers/net/wireless/realtek/rtw88/tx.c 					struct sk_buff *skb)
skb               235 drivers/net/wireless/realtek/rtw88/tx.c 					struct sk_buff *skb)
skb               238 drivers/net/wireless/realtek/rtw88/tx.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
skb               239 drivers/net/wireless/realtek/rtw88/tx.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb               294 drivers/net/wireless/realtek/rtw88/tx.c 			    struct sk_buff *skb)
skb               297 drivers/net/wireless/realtek/rtw88/tx.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb               298 drivers/net/wireless/realtek/rtw88/tx.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
skb               311 drivers/net/wireless/realtek/rtw88/tx.c 		rtw_tx_mgmt_pkt_info_update(rtwdev, pkt_info, control, skb);
skb               313 drivers/net/wireless/realtek/rtw88/tx.c 		rtw_tx_data_pkt_info_update(rtwdev, pkt_info, control, skb);
skb               340 drivers/net/wireless/realtek/rtw88/tx.c 	pkt_info->tx_pkt_size = skb->len;
skb               342 drivers/net/wireless/realtek/rtw88/tx.c 	pkt_info->qsel = skb->priority;
skb               346 drivers/net/wireless/realtek/rtw88/tx.c 	rtw_tx_stats(rtwdev, vif, skb);
skb               351 drivers/net/wireless/realtek/rtw88/tx.c 				   struct sk_buff *skb)
skb               354 drivers/net/wireless/realtek/rtw88/tx.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
skb               363 drivers/net/wireless/realtek/rtw88/tx.c 	pkt_info->tx_pkt_size = skb->len;
skb                81 drivers/net/wireless/realtek/rtw88/tx.h 			    struct sk_buff *skb);
skb                82 drivers/net/wireless/realtek/rtw88/tx.h void rtw_tx_fill_tx_desc(struct rtw_tx_pkt_info *pkt_info, struct sk_buff *skb);
skb                83 drivers/net/wireless/realtek/rtw88/tx.h void rtw_tx_report_enqueue(struct rtw_dev *rtwdev, struct sk_buff *skb, u8 sn);
skb                84 drivers/net/wireless/realtek/rtw88/tx.h void rtw_tx_report_handle(struct rtw_dev *rtwdev, struct sk_buff *skb);
skb                87 drivers/net/wireless/realtek/rtw88/tx.h 				   struct sk_buff *skb);
skb                40 drivers/net/wireless/rsi/rsi_91x_coex.c 	struct sk_buff *skb;
skb                47 drivers/net/wireless/rsi/rsi_91x_coex.c 			skb = skb_dequeue(&coex_cb->coex_tx_qs[RSI_COEX_Q_BT]);
skb                48 drivers/net/wireless/rsi/rsi_91x_coex.c 			rsi_send_bt_pkt(coex_cb->priv, skb);
skb               101 drivers/net/wireless/rsi/rsi_91x_coex.c int rsi_coex_send_pkt(void *priv, struct sk_buff *skb, u8 hal_queue)
skb               117 drivers/net/wireless/rsi/rsi_91x_coex.c 		skb_queue_tail(&coex_cb->coex_tx_qs[coex_q], skb);
skb               123 drivers/net/wireless/rsi/rsi_91x_coex.c 			(struct skb_info *)&IEEE80211_SKB_CB(skb)->driver_data;
skb               126 drivers/net/wireless/rsi/rsi_91x_coex.c 			rsi_indicate_tx_status(common->priv, skb, -EINVAL);
skb               132 drivers/net/wireless/rsi/rsi_91x_coex.c 	if (skb->priority == MGMT_SOFT_Q)
skb               133 drivers/net/wireless/rsi/rsi_91x_coex.c 		status = rsi_send_mgmt_pkt(common, skb);
skb               135 drivers/net/wireless/rsi/rsi_91x_coex.c 		status = rsi_send_data_pkt(common, skb);
skb                94 drivers/net/wireless/rsi/rsi_91x_core.c 	struct sk_buff *skb;
skb               107 drivers/net/wireless/rsi/rsi_91x_core.c 		skb = skb_peek(&common->tx_queue[q_num]);
skb               112 drivers/net/wireless/rsi/rsi_91x_core.c 		wh = (struct ieee80211_hdr *)skb->data;
skb               117 drivers/net/wireless/rsi/rsi_91x_core.c 							  skb->len, &rate);
skb               122 drivers/net/wireless/rsi/rsi_91x_core.c 			skb = skb->next;
skb               220 drivers/net/wireless/rsi/rsi_91x_core.c 			       struct sk_buff *skb)
skb               222 drivers/net/wireless/rsi/rsi_91x_core.c 	u8 q_num = skb->priority;
skb               226 drivers/net/wireless/rsi/rsi_91x_core.c 		dev_kfree_skb(skb);
skb               230 drivers/net/wireless/rsi/rsi_91x_core.c 	skb_queue_tail(&common->tx_queue[q_num], skb);
skb               265 drivers/net/wireless/rsi/rsi_91x_core.c 	struct sk_buff *skb;
skb               299 drivers/net/wireless/rsi/rsi_91x_core.c 		skb = rsi_core_dequeue_pkt(common, q_num);
skb               300 drivers/net/wireless/rsi/rsi_91x_core.c 		if (skb == NULL) {
skb               306 drivers/net/wireless/rsi/rsi_91x_core.c 			status = rsi_send_pkt_to_bus(common, skb);
skb               307 drivers/net/wireless/rsi/rsi_91x_core.c 			dev_kfree_skb(skb);
skb               311 drivers/net/wireless/rsi/rsi_91x_core.c 				status = rsi_coex_send_pkt(common, skb,
skb               316 drivers/net/wireless/rsi/rsi_91x_core.c 					status = rsi_send_mgmt_pkt(common, skb);
skb               318 drivers/net/wireless/rsi/rsi_91x_core.c 					status = rsi_send_data_pkt(common, skb);
skb               375 drivers/net/wireless/rsi/rsi_91x_core.c void rsi_core_xmit(struct rsi_common *common, struct sk_buff *skb)
skb               385 drivers/net/wireless/rsi/rsi_91x_core.c 	if ((!skb) || (!skb->len)) {
skb               401 drivers/net/wireless/rsi/rsi_91x_core.c 	info = IEEE80211_SKB_CB(skb);
skb               403 drivers/net/wireless/rsi/rsi_91x_core.c 	wh = (struct ieee80211_hdr *)&skb->data[0];
skb               427 drivers/net/wireless/rsi/rsi_91x_core.c 		skb->priority = q_num;
skb               429 drivers/net/wireless/rsi/rsi_91x_core.c 		if (rsi_prepare_mgmt_desc(common, skb)) {
skb               438 drivers/net/wireless/rsi/rsi_91x_core.c 			skb->priority = TID_TO_WME_AC(tid);
skb               441 drivers/net/wireless/rsi/rsi_91x_core.c 			skb->priority = BE_Q;
skb               444 drivers/net/wireless/rsi/rsi_91x_core.c 		q_num = skb->priority;
skb               467 drivers/net/wireless/rsi/rsi_91x_core.c 		if (skb->protocol == cpu_to_be16(ETH_P_PAE)) {
skb               469 drivers/net/wireless/rsi/rsi_91x_core.c 			skb->priority = q_num;
skb               471 drivers/net/wireless/rsi/rsi_91x_core.c 		if (rsi_prepare_data_desc(common, skb)) {
skb               487 drivers/net/wireless/rsi/rsi_91x_core.c 	rsi_core_queue_pkt(common, skb);
skb               496 drivers/net/wireless/rsi/rsi_91x_core.c 	ieee80211_free_txskb(common->priv->hw, skb);
skb                41 drivers/net/wireless/rsi/rsi_91x_hal.c int rsi_send_pkt_to_bus(struct rsi_common *common, struct sk_buff *skb)
skb                50 drivers/net/wireless/rsi/rsi_91x_hal.c 						   skb->data, skb->len);
skb                58 drivers/net/wireless/rsi/rsi_91x_hal.c int rsi_prepare_mgmt_desc(struct rsi_common *common, struct sk_buff *skb)
skb                71 drivers/net/wireless/rsi/rsi_91x_hal.c 	if (skb->len > MAX_MGMT_PKT_SIZE) {
skb                76 drivers/net/wireless/rsi/rsi_91x_hal.c 	info = IEEE80211_SKB_CB(skb);
skb                82 drivers/net/wireless/rsi/rsi_91x_hal.c 	if (header_size > skb_headroom(skb)) {
skb                88 drivers/net/wireless/rsi/rsi_91x_hal.c 	skb_push(skb, header_size);
skb                89 drivers/net/wireless/rsi/rsi_91x_hal.c 	dword_align_bytes = ((unsigned long)skb->data & 0x3f);
skb                90 drivers/net/wireless/rsi/rsi_91x_hal.c 	if (dword_align_bytes > skb_headroom(skb)) {
skb                95 drivers/net/wireless/rsi/rsi_91x_hal.c 	skb_push(skb, dword_align_bytes);
skb                99 drivers/net/wireless/rsi/rsi_91x_hal.c 	memset(&skb->data[0], 0, header_size);
skb               100 drivers/net/wireless/rsi/rsi_91x_hal.c 	wh = (struct ieee80211_hdr *)&skb->data[header_size];
skb               102 drivers/net/wireless/rsi/rsi_91x_hal.c 	mgmt_desc = (struct rsi_mgmt_desc *)skb->data;
skb               103 drivers/net/wireless/rsi/rsi_91x_hal.c 	xtend_desc = (struct rsi_xtended_desc *)&skb->data[FRAME_DESC_SZ];
skb               105 drivers/net/wireless/rsi/rsi_91x_hal.c 	rsi_set_len_qno(&mgmt_desc->len_qno, (skb->len - FRAME_DESC_SZ),
skb               152 drivers/net/wireless/rsi/rsi_91x_hal.c int rsi_prepare_data_desc(struct rsi_common *common, struct sk_buff *skb)
skb               167 drivers/net/wireless/rsi/rsi_91x_hal.c 	info = IEEE80211_SKB_CB(skb);
skb               172 drivers/net/wireless/rsi/rsi_91x_hal.c 	if (header_size > skb_headroom(skb)) {
skb               176 drivers/net/wireless/rsi/rsi_91x_hal.c 	skb_push(skb, header_size);
skb               177 drivers/net/wireless/rsi/rsi_91x_hal.c 	dword_align_bytes = ((unsigned long)skb->data & 0x3f);
skb               178 drivers/net/wireless/rsi/rsi_91x_hal.c 	if (header_size > skb_headroom(skb)) {
skb               182 drivers/net/wireless/rsi/rsi_91x_hal.c 	skb_push(skb, dword_align_bytes);
skb               186 drivers/net/wireless/rsi/rsi_91x_hal.c 	data_desc = (struct rsi_data_desc *)skb->data;
skb               189 drivers/net/wireless/rsi/rsi_91x_hal.c 	xtend_desc = (struct rsi_xtended_desc *)&skb->data[FRAME_DESC_SZ];
skb               190 drivers/net/wireless/rsi/rsi_91x_hal.c 	wh = (struct ieee80211_hdr *)&skb->data[header_size];
skb               213 drivers/net/wireless/rsi/rsi_91x_hal.c 	rsi_set_len_qno(&data_desc->len_qno, (skb->len - FRAME_DESC_SZ),
skb               232 drivers/net/wireless/rsi/rsi_91x_hal.c 	if (skb->protocol == cpu_to_be16(ETH_P_PAE)) {
skb               246 drivers/net/wireless/rsi/rsi_91x_hal.c 			skb->priority = VO_Q;
skb               249 drivers/net/wireless/rsi/rsi_91x_hal.c 					(skb->len - FRAME_DESC_SZ),
skb               251 drivers/net/wireless/rsi/rsi_91x_hal.c 		if ((skb->len - header_size) == EAPOL4_PACKET_LEN) {
skb               259 drivers/net/wireless/rsi/rsi_91x_hal.c 	data_desc->qid_tid = ((skb->priority & 0xf) |
skb               290 drivers/net/wireless/rsi/rsi_91x_hal.c int rsi_send_data_pkt(struct rsi_common *common, struct sk_buff *skb)
skb               298 drivers/net/wireless/rsi/rsi_91x_hal.c 	if (!skb)
skb               303 drivers/net/wireless/rsi/rsi_91x_hal.c 	info = IEEE80211_SKB_CB(skb);
skb               314 drivers/net/wireless/rsi/rsi_91x_hal.c 	status = rsi_send_pkt_to_bus(common, skb);
skb               319 drivers/net/wireless/rsi/rsi_91x_hal.c 	++common->tx_stats.total_tx_pkt_freed[skb->priority];
skb               320 drivers/net/wireless/rsi/rsi_91x_hal.c 	rsi_indicate_tx_status(adapter, skb, status);
skb               333 drivers/net/wireless/rsi/rsi_91x_hal.c 		      struct sk_buff *skb)
skb               345 drivers/net/wireless/rsi/rsi_91x_hal.c 	info = IEEE80211_SKB_CB(skb);
skb               351 drivers/net/wireless/rsi/rsi_91x_hal.c 							   (u8 *)skb->data,
skb               352 drivers/net/wireless/rsi/rsi_91x_hal.c 							   skb->len);
skb               357 drivers/net/wireless/rsi/rsi_91x_hal.c 		dev_kfree_skb(skb);
skb               362 drivers/net/wireless/rsi/rsi_91x_hal.c 	wh = (struct ieee80211_hdr *)&skb->data[header_size];
skb               363 drivers/net/wireless/rsi/rsi_91x_hal.c 	mgmt_desc = (struct rsi_mgmt_desc *)skb->data;
skb               364 drivers/net/wireless/rsi/rsi_91x_hal.c 	xtend_desc = (struct rsi_xtended_desc *)&skb->data[FRAME_DESC_SZ];
skb               376 drivers/net/wireless/rsi/rsi_91x_hal.c 	status = rsi_send_pkt_to_bus(common, skb);
skb               380 drivers/net/wireless/rsi/rsi_91x_hal.c 	rsi_indicate_tx_status(common->priv, skb, status);
skb               384 drivers/net/wireless/rsi/rsi_91x_hal.c int rsi_send_bt_pkt(struct rsi_common *common, struct sk_buff *skb)
skb               389 drivers/net/wireless/rsi/rsi_91x_hal.c 	u8 queueno = ((skb->data[1] >> 4) & 0xf);
skb               392 drivers/net/wireless/rsi/rsi_91x_hal.c 		status = rsi_send_pkt_to_bus(common, skb);
skb               399 drivers/net/wireless/rsi/rsi_91x_hal.c 	if (header_size > skb_headroom(skb)) {
skb               404 drivers/net/wireless/rsi/rsi_91x_hal.c 	skb_push(skb, header_size);
skb               405 drivers/net/wireless/rsi/rsi_91x_hal.c 	memset(skb->data, 0, header_size);
skb               406 drivers/net/wireless/rsi/rsi_91x_hal.c 	bt_desc = (struct rsi_bt_desc *)skb->data;
skb               408 drivers/net/wireless/rsi/rsi_91x_hal.c 	rsi_set_len_qno(&bt_desc->len_qno, (skb->len - FRAME_DESC_SZ),
skb               410 drivers/net/wireless/rsi/rsi_91x_hal.c 	bt_desc->bt_pkt_type = cpu_to_le16(bt_cb(skb)->pkt_type);
skb               412 drivers/net/wireless/rsi/rsi_91x_hal.c 	status = rsi_send_pkt_to_bus(common, skb);
skb               417 drivers/net/wireless/rsi/rsi_91x_hal.c 	dev_kfree_skb(skb);
skb               421 drivers/net/wireless/rsi/rsi_91x_hal.c int rsi_prepare_beacon(struct rsi_common *common, struct sk_buff *skb)
skb               451 drivers/net/wireless/rsi/rsi_91x_hal.c 	bcn_frm = (struct rsi_data_desc *)skb->data;
skb               479 drivers/net/wireless/rsi/rsi_91x_hal.c 	memcpy(&skb->data[FRAME_DESC_SZ], mac_bcn->data, mac_bcn->len);
skb               480 drivers/net/wireless/rsi/rsi_91x_hal.c 	skb_put(skb, mac_bcn->len + FRAME_DESC_SZ);
skb               338 drivers/net/wireless/rsi/rsi_91x_mac80211.c 			    struct sk_buff *skb,
skb               341 drivers/net/wireless/rsi/rsi_91x_mac80211.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb               353 drivers/net/wireless/rsi/rsi_91x_mac80211.c 	skb_pull(skb, tx_params->internal_hdr_size);
skb               356 drivers/net/wireless/rsi/rsi_91x_mac80211.c 	ieee80211_tx_status_irqsafe(adapter->hw, skb);
skb               371 drivers/net/wireless/rsi/rsi_91x_mac80211.c 			    struct sk_buff *skb)
skb               375 drivers/net/wireless/rsi/rsi_91x_mac80211.c 	struct ieee80211_hdr *wlh = (struct ieee80211_hdr *)skb->data;
skb               380 drivers/net/wireless/rsi/rsi_91x_mac80211.c 	rsi_core_xmit(common, skb);
skb              1278 drivers/net/wireless/rsi/rsi_91x_mac80211.c 			       struct sk_buff *skb,
skb              1285 drivers/net/wireless/rsi/rsi_91x_mac80211.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb              1294 drivers/net/wireless/rsi/rsi_91x_mac80211.c 	hdr = ((struct ieee80211_hdr *)(skb->data));
skb              1310 drivers/net/wireless/rsi/rsi_91x_mac80211.c 			memmove(skb->data + 4, skb->data, hdrlen);
skb              1311 drivers/net/wireless/rsi/rsi_91x_mac80211.c 			skb_pull(skb, 4);
skb              1313 drivers/net/wireless/rsi/rsi_91x_mac80211.c 			memmove(skb->data + 8, skb->data, hdrlen);
skb              1314 drivers/net/wireless/rsi/rsi_91x_mac80211.c 			skb_pull(skb, 8);
skb              1349 drivers/net/wireless/rsi/rsi_91x_mac80211.c 			    struct sk_buff *skb)
skb              1353 drivers/net/wireless/rsi/rsi_91x_mac80211.c 	struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
skb              1356 drivers/net/wireless/rsi/rsi_91x_mac80211.c 		dev_kfree_skb(skb);
skb              1361 drivers/net/wireless/rsi/rsi_91x_mac80211.c 	rsi_fill_rx_status(hw, skb, common, rx_status);
skb              1363 drivers/net/wireless/rsi/rsi_91x_mac80211.c 	ieee80211_rx_irqsafe(hw, skb);
skb               124 drivers/net/wireless/rsi/rsi_91x_main.c 	struct sk_buff *skb = NULL;
skb               137 drivers/net/wireless/rsi/rsi_91x_main.c 	skb = dev_alloc_skb(pkt_len + FRAME_DESC_SZ);
skb               138 drivers/net/wireless/rsi/rsi_91x_main.c 	if (skb == NULL)
skb               142 drivers/net/wireless/rsi/rsi_91x_main.c 	skb_put(skb, pkt_len);
skb               143 drivers/net/wireless/rsi/rsi_91x_main.c 	memcpy((skb->data), (buffer + payload_offset), skb->len);
skb               145 drivers/net/wireless/rsi/rsi_91x_main.c 	return skb;
skb               160 drivers/net/wireless/rsi/rsi_91x_main.c 	struct sk_buff *skb = NULL;
skb               191 drivers/net/wireless/rsi/rsi_91x_main.c 			skb = rsi_prepare_skb(common,
skb               195 drivers/net/wireless/rsi/rsi_91x_main.c 			if (skb == NULL)
skb               198 drivers/net/wireless/rsi/rsi_91x_main.c 			rsi_indicate_pkt_to_os(common, skb);
skb               344 drivers/net/wireless/rsi/rsi_91x_mgmt.c 					struct sk_buff *skb)
skb               349 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	if (skb == NULL) {
skb               353 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	desc = (struct rsi_cmd_desc *)skb->data;
skb               355 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	skb->priority = MGMT_SOFT_Q;
skb               356 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	tx_params = (struct skb_info *)&IEEE80211_SKB_CB(skb)->driver_data;
skb               358 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	skb_queue_tail(&common->tx_queue[MGMT_SOFT_Q], skb);
skb               382 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	struct sk_buff *skb;
skb               387 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	skb = dev_alloc_skb(frame_len);
skb               389 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	if (!skb) {
skb               395 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	memset(skb->data, 0, frame_len);
skb               396 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	radio_caps = (struct rsi_radio_caps *)skb->data;
skb               470 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	skb_put(skb, frame_len);
skb               472 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	return rsi_send_internal_mgmt_frame(common, skb);
skb               492 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	struct sk_buff *skb;
skb               505 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	skb = dev_alloc_skb(msg_len);
skb               506 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	if (!skb)
skb               509 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	skb_put_data(skb,
skb               513 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	info = IEEE80211_SKB_CB(skb);
skb               517 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	rsi_indicate_pkt_to_os(common, skb);
skb               539 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	struct sk_buff *skb = NULL;
skb               547 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	skb = dev_alloc_skb(frame_len);
skb               549 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	if (!skb) {
skb               555 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	memset(skb->data, 0, frame_len);
skb               556 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	peer_notify = (struct rsi_peer_notify *)skb->data;
skb               586 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	skb_put(skb, frame_len);
skb               588 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	status = rsi_send_internal_mgmt_frame(common, skb);
skb               616 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	struct sk_buff *skb = NULL;
skb               620 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	skb = dev_alloc_skb(frame_len);
skb               622 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	if (!skb) {
skb               628 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	memset(skb->data, 0, frame_len);
skb               629 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	aggr_params = (struct rsi_aggr_params *)skb->data;
skb               650 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	skb_put(skb, frame_len);
skb               652 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	return rsi_send_internal_mgmt_frame(common, skb);
skb               664 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	struct sk_buff *skb;
skb               670 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	skb = dev_alloc_skb(frame_len);
skb               671 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	if (!skb) {
skb               677 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	memset(skb->data, 0, frame_len);
skb               678 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	bb_rf_prog = (struct rsi_bb_rf_prog *)skb->data;
skb               694 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	skb_put(skb, frame_len);
skb               696 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	return rsi_send_internal_mgmt_frame(common, skb);
skb               712 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	struct sk_buff *skb = NULL;
skb               721 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	skb = dev_alloc_skb(frame_len);
skb               722 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	if (!skb) {
skb               728 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	memset(skb->data, 0, frame_len);
skb               729 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	vap_caps = (struct rsi_vap_caps *)skb->data;
skb               770 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	skb_put(skb, frame_len);
skb               772 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	return rsi_send_internal_mgmt_frame(common, skb);
skb               795 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	struct sk_buff *skb = NULL;
skb               802 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	skb = dev_alloc_skb(frame_len);
skb               803 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	if (!skb) {
skb               809 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	memset(skb->data, 0, frame_len);
skb               810 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	set_key = (struct rsi_set_key *)skb->data;
skb               850 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	skb_put(skb, frame_len);
skb               852 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	return rsi_send_internal_mgmt_frame(common, skb);
skb               862 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	struct sk_buff *skb;
skb               869 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	skb = dev_alloc_skb(frame_len);
skb               870 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	if (!skb) {
skb               875 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	memset(skb->data, 0, frame_len);
skb               877 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	dev_cfgs = (struct rsi_config_vals *)skb->data;
skb               897 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	skb_put(skb, frame_len);
skb               899 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	return rsi_send_internal_mgmt_frame(common, skb);
skb               910 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	struct sk_buff *skb;
skb               914 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	skb = dev_alloc_skb(sizeof(struct rsi_boot_params));
skb               915 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	if (!skb) {
skb               921 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	memset(skb->data, 0, sizeof(struct rsi_boot_params));
skb               922 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	boot_params = (struct rsi_boot_params *)skb->data;
skb               958 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	skb_put(skb, sizeof(struct rsi_boot_params));
skb               960 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	return rsi_send_internal_mgmt_frame(common, skb);
skb               965 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	struct sk_buff *skb;
skb               970 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	skb = dev_alloc_skb(sizeof(struct rsi_boot_params_9116));
skb               971 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	if (!skb)
skb               973 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	memset(skb->data, 0, sizeof(struct rsi_boot_params));
skb               974 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	boot_params = (struct rsi_boot_params_9116 *)skb->data;
skb              1002 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	skb_put(skb, sizeof(struct rsi_boot_params_9116));
skb              1004 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	return rsi_send_internal_mgmt_frame(common, skb);
skb              1016 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	struct sk_buff *skb;
skb              1021 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	skb = dev_alloc_skb(FRAME_DESC_SZ);
skb              1022 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	if (!skb) {
skb              1028 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	memset(skb->data, 0, FRAME_DESC_SZ);
skb              1029 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	mgmt_frame = (struct rsi_mac_frame *)skb->data;
skb              1040 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	skb_put(skb, FRAME_DESC_SZ);
skb              1042 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	return rsi_send_internal_mgmt_frame(common, skb);
skb              1115 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	struct sk_buff *skb = NULL;
skb              1122 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	skb = dev_alloc_skb(frame_len);
skb              1123 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	if (!skb) {
skb              1130 drivers/net/wireless/rsi/rsi_91x_mgmt.c 		dev_kfree_skb(skb);
skb              1133 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	memset(skb->data, 0, frame_len);
skb              1134 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	chan_cfg = (struct rsi_chan_config *)skb->data;
skb              1159 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	skb_put(skb, frame_len);
skb              1161 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	return rsi_send_internal_mgmt_frame(common, skb);
skb              1175 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	struct sk_buff *skb = NULL;
skb              1180 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	skb = dev_alloc_skb(FRAME_DESC_SZ);
skb              1181 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	if (!skb) {
skb              1187 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	memset(skb->data, 0, FRAME_DESC_SZ);
skb              1188 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	cmd_frame = (struct rsi_mac_frame *)skb->data;
skb              1196 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	skb_put(skb, FRAME_DESC_SZ);
skb              1198 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	return rsi_send_internal_mgmt_frame(common, skb);
skb              1204 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	struct sk_buff *skb;
skb              1210 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	skb = dev_alloc_skb(sizeof(struct rsi_dynamic_s));
skb              1211 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	if (!skb)
skb              1214 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	memset(skb->data, 0, sizeof(struct rsi_dynamic_s));
skb              1215 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	dynamic_frame = (struct rsi_dynamic_s *)skb->data;
skb              1236 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	skb_put(skb, sizeof(struct rsi_dynamic_s));
skb              1238 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	return rsi_send_internal_mgmt_frame(common, skb);
skb              1300 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	struct sk_buff *skb;
skb              1315 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	skb = dev_alloc_skb(frame_len);
skb              1316 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	if (!skb) {
skb              1322 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	memset(skb->data, 0, frame_len);
skb              1327 drivers/net/wireless/rsi/rsi_91x_mgmt.c 		dev_kfree_skb(skb);
skb              1331 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	auto_rate = (struct rsi_auto_rate *)skb->data;
skb              1431 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	skb_put(skb, frame_len);
skb              1434 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	return rsi_send_internal_mgmt_frame(common, skb);
skb              1501 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	struct sk_buff *skb;
skb              1505 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	skb = dev_alloc_skb(FRAME_DESC_SZ);
skb              1506 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	if (!skb) {
skb              1512 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	memset(skb->data, 0, FRAME_DESC_SZ);
skb              1513 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	mgmt_frame = (struct rsi_eeprom_read_frame *)skb->data;
skb              1529 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	skb_put(skb, FRAME_DESC_SZ);
skb              1531 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	return rsi_send_internal_mgmt_frame(common, skb);
skb              1545 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	struct sk_buff *skb;
skb              1549 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	skb = dev_alloc_skb(FRAME_DESC_SZ);
skb              1550 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	if (!skb) {
skb              1556 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	memset(skb->data, 0, FRAME_DESC_SZ);
skb              1557 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	mgmt_frame = (struct rsi_block_unblock_data *)skb->data;
skb              1573 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	skb_put(skb, FRAME_DESC_SZ);
skb              1575 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	return rsi_send_internal_mgmt_frame(common, skb);
skb              1589 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	struct sk_buff *skb;
skb              1593 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	skb = dev_alloc_skb(FRAME_DESC_SZ);
skb              1594 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	if (!skb) {
skb              1600 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	memset(skb->data, 0, FRAME_DESC_SZ);
skb              1601 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	cmd_frame = (struct rsi_mac_frame *)skb->data;
skb              1607 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	skb_put(skb, FRAME_DESC_SZ);
skb              1609 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	return rsi_send_internal_mgmt_frame(common, skb);
skb              1619 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	struct sk_buff *skb;
skb              1622 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	skb = dev_alloc_skb(frame_len);
skb              1623 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	if (!skb)
skb              1625 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	memset(skb->data, 0, frame_len);
skb              1627 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	ps = (struct rsi_request_ps *)skb->data;
skb              1663 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	skb_put(skb, frame_len);
skb              1665 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	return rsi_send_internal_mgmt_frame(common, skb);
skb              1672 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	struct sk_buff *skb;
skb              1677 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	skb = dev_alloc_skb(frame_len);
skb              1678 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	if (!skb)
skb              1680 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	memset(skb->data, 0, frame_len);
skb              1682 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	w9116_features = (struct rsi_wlan_9116_features *)skb->data;
skb              1704 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	skb_put(skb, frame_len);
skb              1706 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	return rsi_send_internal_mgmt_frame(common, skb);
skb              1721 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	struct sk_buff *skb;
skb              1723 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	skb = dev_alloc_skb(FRAME_DESC_SZ);
skb              1724 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	if (!skb) {
skb              1730 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	memset(skb->data, 0, FRAME_DESC_SZ);
skb              1732 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	ant_sel_frame = (struct rsi_ant_sel_frame *)skb->data;
skb              1738 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	skb_put(skb, FRAME_DESC_SZ);
skb              1740 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	return rsi_send_internal_mgmt_frame(common, skb);
skb              1745 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	struct sk_buff *skb = NULL;
skb              1748 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	skb = dev_alloc_skb(MAX_MGMT_PKT_SIZE);
skb              1749 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	if (!skb)
skb              1752 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	memset(skb->data, 0, MAX_MGMT_PKT_SIZE);
skb              1754 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	dword_align_bytes = ((unsigned long)skb->data & 0x3f);
skb              1756 drivers/net/wireless/rsi/rsi_91x_mgmt.c 		skb_pull(skb, (64 - dword_align_bytes));
skb              1757 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	if (rsi_prepare_beacon(common, skb)) {
skb              1759 drivers/net/wireless/rsi/rsi_91x_mgmt.c 		dev_kfree_skb(skb);
skb              1762 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	skb_queue_tail(&common->tx_queue[MGMT_BEACON_Q], skb);
skb              1774 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	struct sk_buff *skb;
skb              1780 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	skb = dev_alloc_skb(length);
skb              1781 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	if (!skb)
skb              1783 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	memset(skb->data, 0, length);
skb              1784 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	cmd_frame = (struct rsi_wowlan_req *)skb->data;
skb              1799 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	skb_put(skb, length);
skb              1801 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	return rsi_send_internal_mgmt_frame(common, skb);
skb              1810 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	struct sk_buff *skb;
skb              1816 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	skb = dev_alloc_skb(frame_len);
skb              1817 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	if (!skb)
skb              1819 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	memset(skb->data, 0, frame_len);
skb              1821 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	bgscan = (struct rsi_bgscan_config *)skb->data;
skb              1841 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	skb_put(skb, frame_len);
skb              1843 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	return rsi_send_internal_mgmt_frame(common, skb);
skb              1854 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	struct sk_buff *skb;
skb              1871 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	skb = dev_alloc_skb(frame_len + MAX_BGSCAN_PROBE_REQ_LEN);
skb              1872 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	if (!skb)
skb              1874 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	memset(skb->data, 0, frame_len + MAX_BGSCAN_PROBE_REQ_LEN);
skb              1876 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	bgscan = (struct rsi_bgscan_probe *)skb->data;
skb              1891 drivers/net/wireless/rsi/rsi_91x_mgmt.c 		dev_kfree_skb(skb);
skb              1895 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	memcpy(&skb->data[frame_len], probereq_skb->data, probereq_skb->len);
skb              1903 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	skb_put(skb, frame_len + probereq_skb->len);
skb              1907 drivers/net/wireless/rsi/rsi_91x_mgmt.c 	return rsi_send_internal_mgmt_frame(common, skb);
skb                67 drivers/net/wireless/rsi/rsi_91x_sdio_ops.c 	struct sk_buff *skb;
skb                78 drivers/net/wireless/rsi/rsi_91x_sdio_ops.c 			skb = skb_dequeue(&sdev->rx_q.head);
skb                79 drivers/net/wireless/rsi/rsi_91x_sdio_ops.c 			if (!skb)
skb                83 drivers/net/wireless/rsi/rsi_91x_sdio_ops.c 			status = rsi_read_pkt(common, skb->data, skb->len);
skb                86 drivers/net/wireless/rsi/rsi_91x_sdio_ops.c 				dev_kfree_skb(skb);
skb                89 drivers/net/wireless/rsi/rsi_91x_sdio_ops.c 			dev_kfree_skb(skb);
skb               116 drivers/net/wireless/rsi/rsi_91x_sdio_ops.c 	struct sk_buff *skb;
skb               147 drivers/net/wireless/rsi/rsi_91x_sdio_ops.c 	skb = dev_alloc_skb(rcv_pkt_len);
skb               148 drivers/net/wireless/rsi/rsi_91x_sdio_ops.c 	if (!skb)
skb               151 drivers/net/wireless/rsi/rsi_91x_sdio_ops.c 	status = rsi_sdio_host_intf_read_pkt(adapter, skb->data, rcv_pkt_len);
skb               155 drivers/net/wireless/rsi/rsi_91x_sdio_ops.c 		dev_kfree_skb(skb);
skb               158 drivers/net/wireless/rsi/rsi_91x_sdio_ops.c 	skb_put(skb, rcv_pkt_len);
skb               159 drivers/net/wireless/rsi/rsi_91x_sdio_ops.c 	skb_queue_tail(&dev->rx_q.head, skb);
skb               317 drivers/net/wireless/rsi/rsi_91x_usb.c 	struct sk_buff *skb;
skb               321 drivers/net/wireless/rsi/rsi_91x_usb.c 	skb = dev_alloc_skb(RSI_MAX_RX_USB_PKT_SIZE);
skb               322 drivers/net/wireless/rsi/rsi_91x_usb.c 	if (!skb)
skb               324 drivers/net/wireless/rsi/rsi_91x_usb.c 	skb_reserve(skb, MAX_DWORD_ALIGN_BYTES);
skb               325 drivers/net/wireless/rsi/rsi_91x_usb.c 	skb_put(skb, RSI_MAX_RX_USB_PKT_SIZE - MAX_DWORD_ALIGN_BYTES);
skb               326 drivers/net/wireless/rsi/rsi_91x_usb.c 	dword_align_bytes = (unsigned long)skb->data & 0x3f;
skb               328 drivers/net/wireless/rsi/rsi_91x_usb.c 		skb_push(skb, dword_align_bytes);
skb               329 drivers/net/wireless/rsi/rsi_91x_usb.c 	urb->transfer_buffer = skb->data;
skb               330 drivers/net/wireless/rsi/rsi_91x_usb.c 	rx_cb->rx_skb = skb;
skb               337 drivers/net/wireless/rsi/rsi_91x_usb.c 			  skb->len,
skb               344 drivers/net/wireless/rsi/rsi_91x_usb.c 		dev_kfree_skb(skb);
skb                33 drivers/net/wireless/rsi/rsi_91x_usb_ops.c 	struct sk_buff *skb;
skb                43 drivers/net/wireless/rsi/rsi_91x_usb_ops.c 			skb = skb_dequeue(&dev->rx_q);
skb                44 drivers/net/wireless/rsi/rsi_91x_usb_ops.c 			if (!skb)
skb                46 drivers/net/wireless/rsi/rsi_91x_usb_ops.c 			status = rsi_read_pkt(common, skb->data, 0);
skb                52 drivers/net/wireless/rsi/rsi_91x_usb_ops.c 			dev_kfree_skb(skb);
skb                34 drivers/net/wireless/rsi/rsi_coex.h int rsi_coex_send_pkt(void *priv, struct sk_buff *skb, u8 proto_type);
skb               213 drivers/net/wireless/rsi/rsi_hal.h int rsi_prepare_mgmt_desc(struct rsi_common *common, struct sk_buff *skb);
skb               214 drivers/net/wireless/rsi/rsi_hal.h int rsi_prepare_data_desc(struct rsi_common *common, struct sk_buff *skb);
skb               215 drivers/net/wireless/rsi/rsi_hal.h int rsi_prepare_beacon(struct rsi_common *common, struct sk_buff *skb);
skb               216 drivers/net/wireless/rsi/rsi_hal.h int rsi_send_pkt_to_bus(struct rsi_common *common, struct sk_buff *skb);
skb               217 drivers/net/wireless/rsi/rsi_hal.h int rsi_send_bt_pkt(struct rsi_common *common, struct sk_buff *skb);
skb               735 drivers/net/wireless/rsi/rsi_mgmt.h void rsi_indicate_pkt_to_os(struct rsi_common *common, struct sk_buff *skb);
skb               737 drivers/net/wireless/rsi/rsi_mgmt.h void rsi_indicate_tx_status(struct rsi_hw *common, struct sk_buff *skb,
skb               741 drivers/net/wireless/rsi/rsi_mgmt.h void rsi_core_xmit(struct rsi_common *common, struct sk_buff *skb);
skb               742 drivers/net/wireless/rsi/rsi_mgmt.h int rsi_send_mgmt_pkt(struct rsi_common *common, struct sk_buff *skb);
skb               743 drivers/net/wireless/rsi/rsi_mgmt.h int rsi_send_data_pkt(struct rsi_common *common, struct sk_buff *skb);
skb                18 drivers/net/wireless/st/cw1200/queue.c 	struct sk_buff		*skb;
skb                73 drivers/net/wireless/st/cw1200/queue.c 		stats->skb_dtor(stats->priv, item->skb, &item->txpriv);
skb               109 drivers/net/wireless/st/cw1200/queue.c 		item->skb = NULL;
skb               211 drivers/net/wireless/st/cw1200/queue.c 		WARN_ON(!item->skb);
skb               213 drivers/net/wireless/st/cw1200/queue.c 		item->skb = NULL;
skb               279 drivers/net/wireless/st/cw1200/queue.c 		     struct sk_buff *skb,
skb               292 drivers/net/wireless/st/cw1200/queue.c 		BUG_ON(item->skb);
skb               295 drivers/net/wireless/st/cw1200/queue.c 		item->skb = skb;
skb               349 drivers/net/wireless/st/cw1200/queue.c 		*tx = (struct wsm_tx *)item->skb->data;
skb               350 drivers/net/wireless/st/cw1200/queue.c 		*tx_info = IEEE80211_SKB_CB(item->skb);
skb               465 drivers/net/wireless/st/cw1200/queue.c 		gc_skb = item->skb;
skb               466 drivers/net/wireless/st/cw1200/queue.c 		item->skb = NULL;
skb               491 drivers/net/wireless/st/cw1200/queue.c 			 struct sk_buff **skb,
skb               513 drivers/net/wireless/st/cw1200/queue.c 		*skb = item->skb;
skb                23 drivers/net/wireless/st/cw1200/queue.h 					struct sk_buff *skb,
skb                80 drivers/net/wireless/st/cw1200/queue.h 		     struct sk_buff *skb,
skb                92 drivers/net/wireless/st/cw1200/queue.h 			 struct sk_buff **skb,
skb                82 drivers/net/wireless/st/cw1200/scan.c 	frame.skb = ieee80211_probereq_get(hw, priv->vif->addr, NULL, 0,
skb                84 drivers/net/wireless/st/cw1200/scan.c 	if (!frame.skb) {
skb                91 drivers/net/wireless/st/cw1200/scan.c 		skb_put_data(frame.skb, req->ie, req->ie_len);
skb                99 drivers/net/wireless/st/cw1200/scan.c 		dev_kfree_skb(frame.skb);
skb               123 drivers/net/wireless/st/cw1200/scan.c 	if (frame.skb)
skb               124 drivers/net/wireless/st/cw1200/scan.c 		dev_kfree_skb(frame.skb);
skb               400 drivers/net/wireless/st/cw1200/scan.c 				 &frame.skb, &txpriv)) {
skb               406 drivers/net/wireless/st/cw1200/scan.c 	wsm = (struct wsm_tx *)frame.skb->data;
skb               417 drivers/net/wireless/st/cw1200/scan.c 	skb_pull(frame.skb, txpriv->offset);
skb               419 drivers/net/wireless/st/cw1200/scan.c 	ies = &frame.skb->data[sizeof(struct ieee80211_hdr_3addr)];
skb               420 drivers/net/wireless/st/cw1200/scan.c 	ies_len = frame.skb->len - sizeof(struct ieee80211_hdr_3addr);
skb               439 drivers/net/wireless/st/cw1200/scan.c 			skb_trim(frame.skb, frame.skb->len - ssids[0].length);
skb               454 drivers/net/wireless/st/cw1200/scan.c 	skb_push(frame.skb, txpriv->offset);
skb               456 drivers/net/wireless/st/cw1200/scan.c 		IEEE80211_SKB_CB(frame.skb)->flags |= IEEE80211_TX_STAT_ACK;
skb               194 drivers/net/wireless/st/cw1200/sta.c 		struct sk_buff *skb;
skb               198 drivers/net/wireless/st/cw1200/sta.c 		skb = ieee80211_nullfunc_get(priv->hw, priv->vif, false);
skb               199 drivers/net/wireless/st/cw1200/sta.c 		WARN_ON(!skb);
skb               200 drivers/net/wireless/st/cw1200/sta.c 		if (skb)
skb               201 drivers/net/wireless/st/cw1200/sta.c 			cw1200_tx(priv->hw, NULL, skb);
skb              1546 drivers/net/wireless/st/cw1200/sta.c 	struct sk_buff *skb;
skb              1564 drivers/net/wireless/st/cw1200/sta.c 	while ((skb = skb_dequeue(&entry->rx_queue)))
skb              1565 drivers/net/wireless/st/cw1200/sta.c 		ieee80211_rx_irqsafe(priv->hw, skb);
skb              1664 drivers/net/wireless/st/cw1200/sta.c 	struct sk_buff *skb;
skb              1673 drivers/net/wireless/st/cw1200/sta.c 	skb = ieee80211_beacon_get_tim(priv->hw, priv->vif,
skb              1675 drivers/net/wireless/st/cw1200/sta.c 	if (!skb) {
skb              1685 drivers/net/wireless/st/cw1200/sta.c 		skb->data[tim_offset + 2] = 0;
skb              1689 drivers/net/wireless/st/cw1200/sta.c 			skb->data[tim_offset + 4] |= 1;
skb              1691 drivers/net/wireless/st/cw1200/sta.c 			skb->data[tim_offset + 4] &= ~1;
skb              1694 drivers/net/wireless/st/cw1200/sta.c 	update_ie.ies = &skb->data[tim_offset];
skb              1698 drivers/net/wireless/st/cw1200/sta.c 	dev_kfree_skb(skb);
skb              2205 drivers/net/wireless/st/cw1200/sta.c 	frame.skb = ieee80211_beacon_get_tim(priv->hw, priv->vif,
skb              2207 drivers/net/wireless/st/cw1200/sta.c 	if (!frame.skb)
skb              2218 drivers/net/wireless/st/cw1200/sta.c 	mgmt = (void *)frame.skb->data;
skb              2232 drivers/net/wireless/st/cw1200/sta.c 	dev_kfree_skb(frame.skb);
skb              2246 drivers/net/wireless/st/cw1200/sta.c 	frame.skb = ieee80211_pspoll_get(priv->hw, priv->vif);
skb              2247 drivers/net/wireless/st/cw1200/sta.c 	if (!frame.skb)
skb              2252 drivers/net/wireless/st/cw1200/sta.c 	dev_kfree_skb(frame.skb);
skb              2265 drivers/net/wireless/st/cw1200/sta.c 	frame.skb = ieee80211_nullfunc_get(priv->hw, priv->vif, false);
skb              2266 drivers/net/wireless/st/cw1200/sta.c 	if (!frame.skb)
skb              2271 drivers/net/wireless/st/cw1200/sta.c 	dev_kfree_skb(frame.skb);
skb                22 drivers/net/wireless/st/cw1200/txrx.c 				   struct sk_buff *skb);
skb               401 drivers/net/wireless/st/cw1200/txrx.c 	struct sk_buff *skb;
skb               508 drivers/net/wireless/st/cw1200/txrx.c 	skb_put(t->skb, t->tx_info->control.hw_key->icv_len);
skb               511 drivers/net/wireless/st/cw1200/txrx.c 		skb_put(t->skb, 8); /* MIC space */
skb               521 drivers/net/wireless/st/cw1200/txrx.c 	size_t offset = (size_t)t->skb->data & 3;
skb               533 drivers/net/wireless/st/cw1200/txrx.c 	if (skb_headroom(t->skb) < offset) {
skb               536 drivers/net/wireless/st/cw1200/txrx.c 			  skb_headroom(t->skb));
skb               539 drivers/net/wireless/st/cw1200/txrx.c 	skb_push(t->skb, offset);
skb               567 drivers/net/wireless/st/cw1200/txrx.c 	if (skb_headroom(t->skb) < sizeof(struct wsm_tx)) {
skb               570 drivers/net/wireless/st/cw1200/txrx.c 			  skb_headroom(t->skb));
skb               574 drivers/net/wireless/st/cw1200/txrx.c 	wsm = skb_push(t->skb, sizeof(struct wsm_tx));
skb               577 drivers/net/wireless/st/cw1200/txrx.c 	wsm->hdr.len = __cpu_to_le16(t->skb->len);
skb               598 drivers/net/wireless/st/cw1200/txrx.c 		u8 *payload = &t->skb->data[t->hdrlen];
skb               705 drivers/net/wireless/st/cw1200/txrx.c 	       struct sk_buff *skb)
skb               709 drivers/net/wireless/st/cw1200/txrx.c 		.skb = skb,
skb               710 drivers/net/wireless/st/cw1200/txrx.c 		.queue = skb_get_queue_mapping(skb),
skb               711 drivers/net/wireless/st/cw1200/txrx.c 		.tx_info = IEEE80211_SKB_CB(skb),
skb               712 drivers/net/wireless/st/cw1200/txrx.c 		.hdr = (struct ieee80211_hdr *)skb->data,
skb               740 drivers/net/wireless/st/cw1200/txrx.c 		 skb->len, t.queue, t.txpriv.link_id,
skb               772 drivers/net/wireless/st/cw1200/txrx.c 					t.skb, &t.txpriv));
skb               786 drivers/net/wireless/st/cw1200/txrx.c 	cw1200_skb_dtor(priv, skb, &t.txpriv);
skb               793 drivers/net/wireless/st/cw1200/txrx.c 				   struct sk_buff *skb)
skb               795 drivers/net/wireless/st/cw1200/txrx.c 	struct ieee80211_mgmt *mgmt = (void *)skb->data;
skb               805 drivers/net/wireless/st/cw1200/txrx.c 				struct sk_buff *skb)
skb               808 drivers/net/wireless/st/cw1200/txrx.c 	struct ieee80211_pspoll *pspoll = (struct ieee80211_pspoll *)skb->data;
skb               856 drivers/net/wireless/st/cw1200/txrx.c 	struct sk_buff *skb;
skb               897 drivers/net/wireless/st/cw1200/txrx.c 					 &skb, &txpriv)) {
skb               898 drivers/net/wireless/st/cw1200/txrx.c 		struct ieee80211_tx_info *tx = IEEE80211_SKB_CB(skb);
skb               952 drivers/net/wireless/st/cw1200/txrx.c 			skb_trim(skb, skb->len - tx->control.hw_key->icv_len);
skb               954 drivers/net/wireless/st/cw1200/txrx.c 				skb_trim(skb, skb->len - 8); /* MIC space */
skb               963 drivers/net/wireless/st/cw1200/txrx.c 			       struct sk_buff *skb, int link_id, int tid)
skb               980 drivers/net/wireless/st/cw1200/txrx.c 			hdr = (struct ieee80211_hdr *)skb->data;
skb               991 drivers/net/wireless/st/cw1200/txrx.c 		     struct sk_buff *skb,
skb               994 drivers/net/wireless/st/cw1200/txrx.c 	skb_pull(skb, txpriv->offset);
skb               996 drivers/net/wireless/st/cw1200/txrx.c 		cw1200_notify_buffered_tx(priv, skb,
skb              1000 drivers/net/wireless/st/cw1200/txrx.c 	ieee80211_tx_status(priv->hw, skb);
skb              1008 drivers/net/wireless/st/cw1200/txrx.c 	struct sk_buff *skb = *skb_p;
skb              1009 drivers/net/wireless/st/cw1200/txrx.c 	struct ieee80211_rx_status *hdr = IEEE80211_SKB_RXCB(skb);
skb              1010 drivers/net/wireless/st/cw1200/txrx.c 	struct ieee80211_hdr *frame = (struct ieee80211_hdr *)skb->data;
skb              1011 drivers/net/wireless/st/cw1200/txrx.c 	struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
skb              1068 drivers/net/wireless/st/cw1200/txrx.c 	if (skb->len < sizeof(struct ieee80211_pspoll)) {
skb              1074 drivers/net/wireless/st/cw1200/txrx.c 		if (cw1200_handle_pspoll(priv, skb))
skb              1135 drivers/net/wireless/st/cw1200/txrx.c 		if (skb->len < hdrlen + iv_len + icv_len) {
skb              1141 drivers/net/wireless/st/cw1200/txrx.c 		skb_trim(skb, skb->len - icv_len);
skb              1142 drivers/net/wireless/st/cw1200/txrx.c 		memmove(skb->data + iv_len, skb->data, hdrlen);
skb              1143 drivers/net/wireless/st/cw1200/txrx.c 		skb_pull(skb, iv_len);
skb              1148 drivers/net/wireless/st/cw1200/txrx.c 		memcpy(&hdr->mactime, skb->data + skb->len - 8, 8);
skb              1150 drivers/net/wireless/st/cw1200/txrx.c 		if (skb->len >= 8)
skb              1151 drivers/net/wireless/st/cw1200/txrx.c 			skb_trim(skb, skb->len - 8);
skb              1162 drivers/net/wireless/st/cw1200/txrx.c 		if (cw1200_handle_action_rx(priv, skb))
skb              1169 drivers/net/wireless/st/cw1200/txrx.c 			  (skb->data))->u.beacon.variable;
skb              1170 drivers/net/wireless/st/cw1200/txrx.c 		size_t ies_len = skb->len - (ies - (u8 *)(skb->data));
skb              1210 drivers/net/wireless/st/cw1200/txrx.c 			skb_queue_tail(&entry->rx_queue, skb);
skb              1212 drivers/net/wireless/st/cw1200/txrx.c 			ieee80211_rx_irqsafe(priv->hw, skb);
skb              1215 drivers/net/wireless/st/cw1200/txrx.c 		ieee80211_rx_irqsafe(priv->hw, skb);
skb                63 drivers/net/wireless/st/cw1200/txrx.h 	       struct sk_buff *skb);
skb                65 drivers/net/wireless/st/cw1200/txrx.h 		     struct sk_buff *skb,
skb              1462 drivers/net/wireless/st/cw1200/wsm.h 	struct sk_buff *skb;
skb              1469 drivers/net/wireless/st/cw1200/wsm.h 	u8 *p = skb_push(arg->skb, 4);
skb              1472 drivers/net/wireless/st/cw1200/wsm.h 	((__le16 *)p)[1] = __cpu_to_le16(arg->skb->len - 4);
skb              1473 drivers/net/wireless/st/cw1200/wsm.h 	ret = wsm_write_mib(priv, WSM_MIB_ID_TEMPLATE_FRAME, p, arg->skb->len);
skb              1474 drivers/net/wireless/st/cw1200/wsm.h 	skb_pull(arg->skb, 4);
skb               340 drivers/net/wireless/ti/wl1251/main.c 			 struct sk_buff *skb)
skb               345 drivers/net/wireless/ti/wl1251/main.c 	skb_queue_tail(&wl->tx_queue, skb);
skb               540 drivers/net/wireless/ti/wl1251/main.c 	struct sk_buff *skb = NULL;
skb               549 drivers/net/wireless/ti/wl1251/main.c 		skb = ieee80211_nullfunc_get(wl->hw, wl->vif, false);
skb               550 drivers/net/wireless/ti/wl1251/main.c 		if (!skb)
skb               552 drivers/net/wireless/ti/wl1251/main.c 		size = skb->len;
skb               553 drivers/net/wireless/ti/wl1251/main.c 		ptr = skb->data;
skb               559 drivers/net/wireless/ti/wl1251/main.c 	dev_kfree_skb(skb);
skb               978 drivers/net/wireless/ti/wl1251/main.c 	struct sk_buff *skb;
skb              1012 drivers/net/wireless/ti/wl1251/main.c 	skb = ieee80211_probereq_get(wl->hw, wl->vif->addr, ssid, ssid_len,
skb              1014 drivers/net/wireless/ti/wl1251/main.c 	if (!skb) {
skb              1019 drivers/net/wireless/ti/wl1251/main.c 		skb_put_data(skb, req->ie, req->ie_len);
skb              1021 drivers/net/wireless/ti/wl1251/main.c 	ret = wl1251_cmd_template_set(wl, CMD_PROBE_REQ, skb->data,
skb              1022 drivers/net/wireless/ti/wl1251/main.c 				      skb->len);
skb              1023 drivers/net/wireless/ti/wl1251/main.c 	dev_kfree_skb(skb);
skb              1083 drivers/net/wireless/ti/wl1251/main.c 	struct sk_buff *beacon, *skb;
skb              1129 drivers/net/wireless/ti/wl1251/main.c 			skb = ieee80211_pspoll_get(wl->hw, wl->vif);
skb              1130 drivers/net/wireless/ti/wl1251/main.c 			if (!skb)
skb              1134 drivers/net/wireless/ti/wl1251/main.c 						      skb->data,
skb              1135 drivers/net/wireless/ti/wl1251/main.c 						      skb->len);
skb              1136 drivers/net/wireless/ti/wl1251/main.c 			dev_kfree_skb(skb);
skb               136 drivers/net/wireless/ti/wl1251/rx.c 	struct sk_buff *skb;
skb               159 drivers/net/wireless/ti/wl1251/rx.c 	skb = __dev_alloc_skb(length, GFP_KERNEL);
skb               160 drivers/net/wireless/ti/wl1251/rx.c 	if (!skb) {
skb               165 drivers/net/wireless/ti/wl1251/rx.c 	rx_buffer = skb_put(skb, length);
skb               169 drivers/net/wireless/ti/wl1251/rx.c 	skb_trim(skb, desc->length - PLCP_HEADER_LENGTH);
skb               171 drivers/net/wireless/ti/wl1251/rx.c 	fc = (u16 *)skb->data;
skb               178 drivers/net/wireless/ti/wl1251/rx.c 	wl1251_debug(DEBUG_RX, "rx skb 0x%p: %d B %s", skb, skb->len,
skb               181 drivers/net/wireless/ti/wl1251/rx.c 	memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
skb               182 drivers/net/wireless/ti/wl1251/rx.c 	ieee80211_rx_ni(wl->hw, skb);
skb                56 drivers/net/wireless/ti/wl1251/tx.c static int wl1251_tx_id(struct wl1251 *wl, struct sk_buff *skb)
skb                62 drivers/net/wireless/ti/wl1251/tx.c 			wl->tx_frames[i] = skb;
skb               134 drivers/net/wireless/ti/wl1251/tx.c static int wl1251_tx_fill_hdr(struct wl1251 *wl, struct sk_buff *skb,
skb               142 drivers/net/wireless/ti/wl1251/tx.c 	if (!skb)
skb               145 drivers/net/wireless/ti/wl1251/tx.c 	id = wl1251_tx_id(wl, skb);
skb               149 drivers/net/wireless/ti/wl1251/tx.c 	fc = *(u16 *)skb->data;
skb               150 drivers/net/wireless/ti/wl1251/tx.c 	tx_hdr = skb_push(skb, sizeof(*tx_hdr));
skb               152 drivers/net/wireless/ti/wl1251/tx.c 	tx_hdr->length = cpu_to_le16(skb->len - sizeof(*tx_hdr));
skb               158 drivers/net/wireless/ti/wl1251/tx.c 	tx_hdr->xmit_queue = wl1251_tx_get_queue(skb_get_queue_mapping(skb));
skb               167 drivers/net/wireless/ti/wl1251/tx.c static int wl1251_tx_send_packet(struct wl1251 *wl, struct sk_buff *skb,
skb               174 drivers/net/wireless/ti/wl1251/tx.c 	if (!skb)
skb               177 drivers/net/wireless/ti/wl1251/tx.c 	tx_hdr = (struct tx_double_buffer_desc *) skb->data;
skb               186 drivers/net/wireless/ti/wl1251/tx.c 		fc = *(__le16 *)(skb->data + sizeof(*tx_hdr));
skb               192 drivers/net/wireless/ti/wl1251/tx.c 		pos = skb_push(skb, WL1251_TKIP_IV_SPACE);
skb               201 drivers/net/wireless/ti/wl1251/tx.c 	if (unlikely((long)skb->data & 0x03)) {
skb               202 drivers/net/wireless/ti/wl1251/tx.c 		int offset = (4 - (long)skb->data) & 0x03;
skb               206 drivers/net/wireless/ti/wl1251/tx.c 		if (skb_cloned(skb) || (skb_tailroom(skb) < offset)) {
skb               207 drivers/net/wireless/ti/wl1251/tx.c 			struct sk_buff *newskb = skb_copy_expand(skb, 0, 3,
skb               215 drivers/net/wireless/ti/wl1251/tx.c 			dev_kfree_skb_any(skb);
skb               216 drivers/net/wireless/ti/wl1251/tx.c 			wl->tx_frames[tx_hdr->id] = skb = newskb;
skb               218 drivers/net/wireless/ti/wl1251/tx.c 			offset = (4 - (long)skb->data) & 0x03;
skb               224 drivers/net/wireless/ti/wl1251/tx.c 			unsigned char *src = skb->data;
skb               225 drivers/net/wireless/ti/wl1251/tx.c 			skb_reserve(skb, offset);
skb               226 drivers/net/wireless/ti/wl1251/tx.c 			memmove(skb->data, src, skb->len);
skb               227 drivers/net/wireless/ti/wl1251/tx.c 			tx_hdr = (struct tx_double_buffer_desc *) skb->data;
skb               232 drivers/net/wireless/ti/wl1251/tx.c 	len = WL1251_TX_ALIGN(skb->len);
skb               240 drivers/net/wireless/ti/wl1251/tx.c 	wl1251_mem_write(wl, addr, skb->data, len);
skb               243 drivers/net/wireless/ti/wl1251/tx.c 		     "queue %d", tx_hdr->id, skb, tx_hdr->length,
skb               289 drivers/net/wireless/ti/wl1251/tx.c static int wl1251_tx_frame(struct wl1251 *wl, struct sk_buff *skb)
skb               295 drivers/net/wireless/ti/wl1251/tx.c 	info = IEEE80211_SKB_CB(skb);
skb               317 drivers/net/wireless/ti/wl1251/tx.c 	ret = wl1251_tx_fill_hdr(wl, skb, info);
skb               321 drivers/net/wireless/ti/wl1251/tx.c 	ret = wl1251_tx_send_packet(wl, skb, info);
skb               333 drivers/net/wireless/ti/wl1251/tx.c 	struct sk_buff *skb;
skb               342 drivers/net/wireless/ti/wl1251/tx.c 	while ((skb = skb_dequeue(&wl->tx_queue))) {
skb               350 drivers/net/wireless/ti/wl1251/tx.c 		ret = wl1251_tx_frame(wl, skb);
skb               352 drivers/net/wireless/ti/wl1251/tx.c 			skb_queue_head(&wl->tx_queue, skb);
skb               355 drivers/net/wireless/ti/wl1251/tx.c 			dev_kfree_skb(skb);
skb               399 drivers/net/wireless/ti/wl1251/tx.c 	struct sk_buff *skb;
skb               403 drivers/net/wireless/ti/wl1251/tx.c 	skb = wl->tx_frames[result->id];
skb               404 drivers/net/wireless/ti/wl1251/tx.c 	if (skb == NULL) {
skb               409 drivers/net/wireless/ti/wl1251/tx.c 	info = IEEE80211_SKB_CB(skb);
skb               423 drivers/net/wireless/ti/wl1251/tx.c 	frame = skb_pull(skb, sizeof(struct tx_double_buffer_desc));
skb               426 drivers/net/wireless/ti/wl1251/tx.c 		hdrlen = ieee80211_get_hdrlen_from_skb(skb);
skb               428 drivers/net/wireless/ti/wl1251/tx.c 		skb_pull(skb, WL1251_TKIP_IV_SPACE);
skb               433 drivers/net/wireless/ti/wl1251/tx.c 		     result->id, skb, result->ack_failures, result->rate,
skb               437 drivers/net/wireless/ti/wl1251/tx.c 	ieee80211_tx_status(wl->hw, skb);
skb               548 drivers/net/wireless/ti/wl1251/tx.c 	struct sk_buff *skb;
skb               554 drivers/net/wireless/ti/wl1251/tx.c 	while ((skb = skb_dequeue(&wl->tx_queue))) {
skb               555 drivers/net/wireless/ti/wl1251/tx.c 		info = IEEE80211_SKB_CB(skb);
skb               557 drivers/net/wireless/ti/wl1251/tx.c 		wl1251_debug(DEBUG_TX, "flushing skb 0x%p", skb);
skb               562 drivers/net/wireless/ti/wl1251/tx.c 		ieee80211_tx_status(wl->hw, skb);
skb               567 drivers/net/wireless/ti/wl1251/tx.c 			skb = wl->tx_frames[i];
skb               568 drivers/net/wireless/ti/wl1251/tx.c 			info = IEEE80211_SKB_CB(skb);
skb               573 drivers/net/wireless/ti/wl1251/tx.c 			ieee80211_tx_status(wl->hw, skb);
skb              1311 drivers/net/wireless/ti/wl12xx/main.c 			    struct sk_buff *skb)
skb              1313 drivers/net/wireless/ti/wl12xx/main.c 	u32 aligned_len = wlcore_calc_packet_alignment(wl, skb->len);
skb              1316 drivers/net/wireless/ti/wl12xx/main.c 		desc->wl128x_mem.extra_bytes = aligned_len - skb->len;
skb              1328 drivers/net/wireless/ti/wl12xx/main.c 		int pad = aligned_len - skb->len;
skb              1560 drivers/net/wireless/ti/wl12xx/main.c 				    struct sk_buff *skb)
skb                53 drivers/net/wireless/ti/wl18xx/event.c 	struct sk_buff *skb;
skb                67 drivers/net/wireless/ti/wl18xx/event.c 	skb = cfg80211_vendor_event_alloc(wl->hw->wiphy, NULL, 20,
skb                71 drivers/net/wireless/ti/wl18xx/event.c 	if (nla_put_u32(skb, WLCORE_VENDOR_ATTR_FREQ, freq)) {
skb                72 drivers/net/wireless/ti/wl18xx/event.c 		kfree_skb(skb);
skb                75 drivers/net/wireless/ti/wl18xx/event.c 	cfg80211_vendor_event(skb, GFP_KERNEL);
skb                83 drivers/net/wireless/ti/wl18xx/event.c 	struct sk_buff *skb;
skb                88 drivers/net/wireless/ti/wl18xx/event.c 	skb = cfg80211_vendor_event_alloc(wl->hw->wiphy, NULL,
skb                93 drivers/net/wireless/ti/wl18xx/event.c 	if (nla_put(skb, WLCORE_VENDOR_ATTR_SSID, ssid_len, ssid) ||
skb                94 drivers/net/wireless/ti/wl18xx/event.c 	    nla_put(skb, WLCORE_VENDOR_ATTR_PSK, pwd_len, pwd)) {
skb                95 drivers/net/wireless/ti/wl18xx/event.c 		kfree_skb(skb);
skb                98 drivers/net/wireless/ti/wl18xx/event.c 	cfg80211_vendor_event(skb, GFP_KERNEL);
skb              1081 drivers/net/wireless/ti/wl18xx/main.c 			    struct sk_buff *skb)
skb              1083 drivers/net/wireless/ti/wl18xx/main.c 	desc->length = cpu_to_le16(skb->len);
skb              1219 drivers/net/wireless/ti/wl18xx/main.c 				    struct sk_buff *skb)
skb              1229 drivers/net/wireless/ti/wl18xx/main.c 	if (skb->ip_summed != CHECKSUM_PARTIAL) {
skb              1234 drivers/net/wireless/ti/wl18xx/main.c 	ip_hdr_offset = skb_network_header(skb) - skb_mac_header(skb);
skb              1243 drivers/net/wireless/ti/wl18xx/main.c 	ip_hdr = (void *)skb_network_header(skb);
skb              1249 drivers/net/wireless/ti/wl18xx/main.c 			       struct sk_buff *skb)
skb              1252 drivers/net/wireless/ti/wl18xx/main.c 		skb->ip_summed = CHECKSUM_UNNECESSARY;
skb                65 drivers/net/wireless/ti/wl18xx/tx.c 	struct sk_buff *skb;
skb                79 drivers/net/wireless/ti/wl18xx/tx.c 	skb = wl->tx_frames[id];
skb                80 drivers/net/wireless/ti/wl18xx/tx.c 	info = IEEE80211_SKB_CB(skb);
skb                81 drivers/net/wireless/ti/wl18xx/tx.c 	tx_desc = (struct wl1271_tx_hw_descr *)skb->data;
skb                83 drivers/net/wireless/ti/wl18xx/tx.c 	if (wl12xx_is_dummy_packet(wl, skb)) {
skb               112 drivers/net/wireless/ti/wl18xx/tx.c 	skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
skb               118 drivers/net/wireless/ti/wl18xx/tx.c 		int hdrlen = ieee80211_get_hdrlen_from_skb(skb);
skb               119 drivers/net/wireless/ti/wl18xx/tx.c 		memmove(skb->data + WL1271_EXTRA_SPACE_TKIP, skb->data, hdrlen);
skb               120 drivers/net/wireless/ti/wl18xx/tx.c 		skb_pull(skb, WL1271_EXTRA_SPACE_TKIP);
skb               124 drivers/net/wireless/ti/wl18xx/tx.c 		     id, skb, tx_success);
skb               127 drivers/net/wireless/ti/wl18xx/tx.c 	skb_queue_tail(&wl->deferred_tx_queue, skb);
skb              1057 drivers/net/wireless/ti/wlcore/cmd.c 	struct sk_buff *skb = NULL;
skb              1067 drivers/net/wireless/ti/wlcore/cmd.c 		skb = ieee80211_nullfunc_get(wl->hw,
skb              1070 drivers/net/wireless/ti/wlcore/cmd.c 		if (!skb)
skb              1072 drivers/net/wireless/ti/wlcore/cmd.c 		size = skb->len;
skb              1073 drivers/net/wireless/ti/wlcore/cmd.c 		ptr = skb->data;
skb              1081 drivers/net/wireless/ti/wlcore/cmd.c 	dev_kfree_skb(skb);
skb              1093 drivers/net/wireless/ti/wlcore/cmd.c 	struct sk_buff *skb = NULL;
skb              1096 drivers/net/wireless/ti/wlcore/cmd.c 	skb = ieee80211_nullfunc_get(wl->hw, vif, false);
skb              1097 drivers/net/wireless/ti/wlcore/cmd.c 	if (!skb)
skb              1101 drivers/net/wireless/ti/wlcore/cmd.c 				      skb->data, skb->len,
skb              1106 drivers/net/wireless/ti/wlcore/cmd.c 	dev_kfree_skb(skb);
skb              1118 drivers/net/wireless/ti/wlcore/cmd.c 	struct sk_buff *skb;
skb              1121 drivers/net/wireless/ti/wlcore/cmd.c 	skb = ieee80211_pspoll_get(wl->hw, vif);
skb              1122 drivers/net/wireless/ti/wlcore/cmd.c 	if (!skb)
skb              1126 drivers/net/wireless/ti/wlcore/cmd.c 				      CMD_TEMPL_PS_POLL, skb->data,
skb              1127 drivers/net/wireless/ti/wlcore/cmd.c 				      skb->len, 0, wlvif->basic_rate_set);
skb              1130 drivers/net/wireless/ti/wlcore/cmd.c 	dev_kfree_skb(skb);
skb              1141 drivers/net/wireless/ti/wlcore/cmd.c 	struct sk_buff *skb;
skb              1149 drivers/net/wireless/ti/wlcore/cmd.c 	skb = ieee80211_probereq_get(wl->hw, vif->addr, ssid, ssid_len,
skb              1151 drivers/net/wireless/ti/wlcore/cmd.c 	if (!skb) {
skb              1156 drivers/net/wireless/ti/wlcore/cmd.c 		skb_put_data(skb, ie0, ie0_len);
skb              1158 drivers/net/wireless/ti/wlcore/cmd.c 		skb_put_data(skb, ie1, ie1_len);
skb              1170 drivers/net/wireless/ti/wlcore/cmd.c 					      skb->data, skb->len, 0, rate);
skb              1174 drivers/net/wireless/ti/wlcore/cmd.c 					      skb->data, skb->len, 0, rate);
skb              1177 drivers/net/wireless/ti/wlcore/cmd.c 	dev_kfree_skb(skb);
skb              1184 drivers/net/wireless/ti/wlcore/cmd.c 					      struct sk_buff *skb)
skb              1190 drivers/net/wireless/ti/wlcore/cmd.c 	if (!skb)
skb              1191 drivers/net/wireless/ti/wlcore/cmd.c 		skb = ieee80211_ap_probereq_get(wl->hw, vif);
skb              1192 drivers/net/wireless/ti/wlcore/cmd.c 	if (!skb)
skb              1201 drivers/net/wireless/ti/wlcore/cmd.c 					      skb->data, skb->len, 0, rate);
skb              1205 drivers/net/wireless/ti/wlcore/cmd.c 					      skb->data, skb->len, 0, rate);
skb              1211 drivers/net/wireless/ti/wlcore/cmd.c 	return skb;
skb              1219 drivers/net/wireless/ti/wlcore/cmd.c 	struct sk_buff *skb;
skb              1224 drivers/net/wireless/ti/wlcore/cmd.c 	skb = dev_alloc_skb(sizeof(*hdr) + sizeof(__le16) + sizeof(*tmpl) +
skb              1226 drivers/net/wireless/ti/wlcore/cmd.c 	if (!skb) {
skb              1231 drivers/net/wireless/ti/wlcore/cmd.c 	skb_reserve(skb, sizeof(*hdr) + WL1271_EXTRA_SPACE_MAX);
skb              1233 drivers/net/wireless/ti/wlcore/cmd.c 	tmpl = skb_put_zero(skb, sizeof(*tmpl));
skb              1273 drivers/net/wireless/ti/wlcore/cmd.c 		u8 *space = skb_push(skb, extra);
skb              1279 drivers/net/wireless/ti/wlcore/cmd.c 		memset(skb_push(skb, sizeof(__le16)), 0, sizeof(__le16));
skb              1282 drivers/net/wireless/ti/wlcore/cmd.c 	hdr = skb_push(skb, sizeof(*hdr));
skb              1298 drivers/net/wireless/ti/wlcore/cmd.c 				      skb->data, skb->len, 0,
skb              1301 drivers/net/wireless/ti/wlcore/cmd.c 	dev_kfree_skb(skb);
skb                55 drivers/net/wireless/ti/wlcore/cmd.h 					      struct sk_buff *skb);
skb                36 drivers/net/wireless/ti/wlcore/hw_ops.h 			       struct sk_buff *skb)
skb                41 drivers/net/wireless/ti/wlcore/hw_ops.h 	wl->ops->set_tx_desc_data_len(wl, desc, skb);
skb               124 drivers/net/wireless/ti/wlcore/hw_ops.h 			   struct sk_buff *skb)
skb               129 drivers/net/wireless/ti/wlcore/hw_ops.h 	wl->ops->set_tx_desc_csum(wl, desc, skb);
skb               135 drivers/net/wireless/ti/wlcore/hw_ops.h 		      struct sk_buff *skb)
skb               138 drivers/net/wireless/ti/wlcore/hw_ops.h 		wl->ops->set_rx_csum(wl, desc, skb);
skb               496 drivers/net/wireless/ti/wlcore/main.c 	struct sk_buff *skb;
skb               499 drivers/net/wireless/ti/wlcore/main.c 	while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
skb               500 drivers/net/wireless/ti/wlcore/main.c 		ieee80211_rx_ni(wl->hw, skb);
skb               503 drivers/net/wireless/ti/wlcore/main.c 	while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
skb               504 drivers/net/wireless/ti/wlcore/main.c 		ieee80211_tx_status_ni(wl->hw, skb);
skb              1204 drivers/net/wireless/ti/wlcore/main.c 			 struct sk_buff *skb)
skb              1207 drivers/net/wireless/ti/wlcore/main.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb              1216 drivers/net/wireless/ti/wlcore/main.c 		ieee80211_free_txskb(hw, skb);
skb              1221 drivers/net/wireless/ti/wlcore/main.c 	mapping = skb_get_queue_mapping(skb);
skb              1224 drivers/net/wireless/ti/wlcore/main.c 	hlid = wl12xx_tx_get_hlid(wl, wlvif, skb, control->sta);
skb              1239 drivers/net/wireless/ti/wlcore/main.c 		ieee80211_free_txskb(hw, skb);
skb              1244 drivers/net/wireless/ti/wlcore/main.c 		     hlid, q, skb->len);
skb              1245 drivers/net/wireless/ti/wlcore/main.c 	skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
skb              1311 drivers/net/wireless/ti/wlcore/main.c 	struct sk_buff *skb;
skb              1318 drivers/net/wireless/ti/wlcore/main.c 	skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE);
skb              1319 drivers/net/wireless/ti/wlcore/main.c 	if (!skb) {
skb              1324 drivers/net/wireless/ti/wlcore/main.c 	skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr));
skb              1326 drivers/net/wireless/ti/wlcore/main.c 	hdr = skb_put_zero(skb, sizeof(*hdr));
skb              1331 drivers/net/wireless/ti/wlcore/main.c 	skb_put_zero(skb, dummy_packet_size);
skb              1334 drivers/net/wireless/ti/wlcore/main.c 	skb->priority = WL1271_TID_MGMT;
skb              1337 drivers/net/wireless/ti/wlcore/main.c 	skb_set_queue_mapping(skb, 0);
skb              1338 drivers/net/wireless/ti/wlcore/main.c 	memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info));
skb              1340 drivers/net/wireless/ti/wlcore/main.c 	return skb;
skb              2884 drivers/net/wireless/ti/wlcore/main.c static int wl1271_ssid_set(struct wl12xx_vif *wlvif, struct sk_buff *skb,
skb              2888 drivers/net/wireless/ti/wlcore/main.c 	const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
skb              2889 drivers/net/wireless/ti/wlcore/main.c 					 skb->len - offset);
skb              2910 drivers/net/wireless/ti/wlcore/main.c 	struct sk_buff *skb;
skb              2917 drivers/net/wireless/ti/wlcore/main.c 	skb = ieee80211_ap_probereq_get(wl->hw, vif);
skb              2918 drivers/net/wireless/ti/wlcore/main.c 	if (!skb)
skb              2923 drivers/net/wireless/ti/wlcore/main.c 	wl1271_ssid_set(wlvif, skb, ieoffset);
skb              2924 drivers/net/wireless/ti/wlcore/main.c 	dev_kfree_skb(skb);
skb              3913 drivers/net/wireless/ti/wlcore/main.c static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset)
skb              3916 drivers/net/wireless/ti/wlcore/main.c 	const u8 *next, *end = skb->data + skb->len;
skb              3917 drivers/net/wireless/ti/wlcore/main.c 	u8 *ie = (u8 *)cfg80211_find_ie(eid, skb->data + ieoffset,
skb              3918 drivers/net/wireless/ti/wlcore/main.c 					skb->len - ieoffset);
skb              3924 drivers/net/wireless/ti/wlcore/main.c 	skb_trim(skb, skb->len - len);
skb              3927 drivers/net/wireless/ti/wlcore/main.c static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
skb              3932 drivers/net/wireless/ti/wlcore/main.c 	const u8 *next, *end = skb->data + skb->len;
skb              3934 drivers/net/wireless/ti/wlcore/main.c 					       skb->data + ieoffset,
skb              3935 drivers/net/wireless/ti/wlcore/main.c 					       skb->len - ieoffset);
skb              3941 drivers/net/wireless/ti/wlcore/main.c 	skb_trim(skb, skb->len - len);
skb              3948 drivers/net/wireless/ti/wlcore/main.c 	struct sk_buff *skb;
skb              3951 drivers/net/wireless/ti/wlcore/main.c 	skb = ieee80211_proberesp_get(wl->hw, vif);
skb              3952 drivers/net/wireless/ti/wlcore/main.c 	if (!skb)
skb              3957 drivers/net/wireless/ti/wlcore/main.c 				      skb->data,
skb              3958 drivers/net/wireless/ti/wlcore/main.c 				      skb->len, 0,
skb              3960 drivers/net/wireless/ti/wlcore/main.c 	dev_kfree_skb(skb);
skb                80 drivers/net/wireless/ti/wlcore/ps.c 	struct sk_buff *skb;
skb                89 drivers/net/wireless/ti/wlcore/ps.c 		while ((skb = skb_dequeue(&lnk->tx_queue[i]))) {
skb                92 drivers/net/wireless/ti/wlcore/ps.c 			if (WARN_ON(wl12xx_is_dummy_packet(wl, skb)))
skb                95 drivers/net/wireless/ti/wlcore/ps.c 			info = IEEE80211_SKB_CB(skb);
skb                98 drivers/net/wireless/ti/wlcore/ps.c 			ieee80211_tx_status_ni(wl->hw, skb);
skb               107 drivers/net/wireless/ti/wlcore/rx.c 	struct sk_buff *skb;
skb               156 drivers/net/wireless/ti/wlcore/rx.c 	skb = __dev_alloc_skb(pkt_data_len + reserved, GFP_KERNEL);
skb               157 drivers/net/wireless/ti/wlcore/rx.c 	if (!skb) {
skb               163 drivers/net/wireless/ti/wlcore/rx.c 	skb_reserve(skb, reserved);
skb               171 drivers/net/wireless/ti/wlcore/rx.c 	skb_put_data(skb, data + sizeof(*desc), pkt_data_len);
skb               173 drivers/net/wireless/ti/wlcore/rx.c 		skb_pull(skb, RX_BUF_ALIGN);
skb               177 drivers/net/wireless/ti/wlcore/rx.c 	hdr = (struct ieee80211_hdr *)skb->data;
skb               183 drivers/net/wireless/ti/wlcore/rx.c 	wl1271_rx_status(wl, desc, IEEE80211_SKB_RXCB(skb), beacon,
skb               185 drivers/net/wireless/ti/wlcore/rx.c 	wlcore_hw_set_rx_csum(wl, desc, skb);
skb               188 drivers/net/wireless/ti/wlcore/rx.c 	wl1271_debug(DEBUG_RX, "rx skb 0x%p: %d B %s seq %d hlid %d", skb,
skb               189 drivers/net/wireless/ti/wlcore/rx.c 		     skb->len - desc->pad_len,
skb               193 drivers/net/wireless/ti/wlcore/rx.c 	skb_queue_tail(&wl->deferred_rx_queue, skb);
skb                61 drivers/net/wireless/ti/wlcore/testmode.c 	struct sk_buff *skb;
skb               114 drivers/net/wireless/ti/wlcore/testmode.c 		skb = cfg80211_testmode_alloc_reply_skb(wl->hw->wiphy, len);
skb               115 drivers/net/wireless/ti/wlcore/testmode.c 		if (!skb) {
skb               120 drivers/net/wireless/ti/wlcore/testmode.c 		if (nla_put(skb, WL1271_TM_ATTR_DATA, buf_len, buf)) {
skb               121 drivers/net/wireless/ti/wlcore/testmode.c 			kfree_skb(skb);
skb               126 drivers/net/wireless/ti/wlcore/testmode.c 		ret = cfg80211_testmode_reply(skb);
skb               144 drivers/net/wireless/ti/wlcore/testmode.c 	struct sk_buff *skb;
skb               180 drivers/net/wireless/ti/wlcore/testmode.c 	skb = cfg80211_testmode_alloc_reply_skb(wl->hw->wiphy, sizeof(*cmd));
skb               181 drivers/net/wireless/ti/wlcore/testmode.c 	if (!skb) {
skb               186 drivers/net/wireless/ti/wlcore/testmode.c 	if (nla_put(skb, WL1271_TM_ATTR_DATA, sizeof(*cmd), cmd)) {
skb               187 drivers/net/wireless/ti/wlcore/testmode.c 		kfree_skb(skb);
skb               192 drivers/net/wireless/ti/wlcore/testmode.c 	ret = cfg80211_testmode_reply(skb);
skb               243 drivers/net/wireless/ti/wlcore/testmode.c 	struct sk_buff *skb;
skb               252 drivers/net/wireless/ti/wlcore/testmode.c 	skb = cfg80211_testmode_alloc_reply_skb(wl->hw->wiphy, len);
skb               253 drivers/net/wireless/ti/wlcore/testmode.c 	if (!skb) {
skb               258 drivers/net/wireless/ti/wlcore/testmode.c 	if (nla_put(skb, WL1271_TM_ATTR_DATA, sizeof(wl->fem_manuf),
skb               260 drivers/net/wireless/ti/wlcore/testmode.c 		kfree_skb(skb);
skb               265 drivers/net/wireless/ti/wlcore/testmode.c 	ret = cfg80211_testmode_reply(skb);
skb               309 drivers/net/wireless/ti/wlcore/testmode.c 	struct sk_buff *skb;
skb               332 drivers/net/wireless/ti/wlcore/testmode.c 	skb = cfg80211_testmode_alloc_reply_skb(wl->hw->wiphy, ETH_ALEN);
skb               333 drivers/net/wireless/ti/wlcore/testmode.c 	if (!skb) {
skb               338 drivers/net/wireless/ti/wlcore/testmode.c 	if (nla_put(skb, WL1271_TM_ATTR_DATA, ETH_ALEN, mac_addr)) {
skb               339 drivers/net/wireless/ti/wlcore/testmode.c 		kfree_skb(skb);
skb               344 drivers/net/wireless/ti/wlcore/testmode.c 	ret = cfg80211_testmode_reply(skb);
skb                49 drivers/net/wireless/ti/wlcore/tx.c static int wl1271_alloc_tx_id(struct wl1271 *wl, struct sk_buff *skb)
skb                58 drivers/net/wireless/ti/wlcore/tx.c 	wl->tx_frames[id] = skb;
skb                77 drivers/net/wireless/ti/wlcore/tx.c 						 struct sk_buff *skb)
skb                81 drivers/net/wireless/ti/wlcore/tx.c 	hdr = (struct ieee80211_hdr *)(skb->data +
skb               134 drivers/net/wireless/ti/wlcore/tx.c bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb)
skb               136 drivers/net/wireless/ti/wlcore/tx.c 	return wl->dummy_packet == skb;
skb               141 drivers/net/wireless/ti/wlcore/tx.c 				struct sk_buff *skb, struct ieee80211_sta *sta)
skb               154 drivers/net/wireless/ti/wlcore/tx.c 		hdr = (struct ieee80211_hdr *)skb->data;
skb               163 drivers/net/wireless/ti/wlcore/tx.c 		      struct sk_buff *skb, struct ieee80211_sta *sta)
skb               168 drivers/net/wireless/ti/wlcore/tx.c 		return wl12xx_tx_get_hlid_ap(wl, wlvif, skb, sta);
skb               170 drivers/net/wireless/ti/wlcore/tx.c 	control = IEEE80211_SKB_CB(skb);
skb               191 drivers/net/wireless/ti/wlcore/tx.c 			      struct sk_buff *skb, u32 extra, u32 buf_offset,
skb               195 drivers/net/wireless/ti/wlcore/tx.c 	u32 total_len = skb->len + sizeof(struct wl1271_tx_hw_descr) + extra;
skb               206 drivers/net/wireless/ti/wlcore/tx.c 	id = wl1271_alloc_tx_id(wl, skb);
skb               213 drivers/net/wireless/ti/wlcore/tx.c 		desc = skb_push(skb, total_len - skb->len);
skb               232 drivers/net/wireless/ti/wlcore/tx.c 		ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
skb               251 drivers/net/wireless/ti/wlcore/tx.c 			       struct sk_buff *skb, u32 extra,
skb               263 drivers/net/wireless/ti/wlcore/tx.c 	desc = (struct wl1271_tx_hw_descr *) skb->data;
skb               272 drivers/net/wireless/ti/wlcore/tx.c 		skb_set_network_header(skb, skb_network_offset(skb) + extra);
skb               279 drivers/net/wireless/ti/wlcore/tx.c 	is_dummy = wl12xx_is_dummy_packet(wl, skb);
skb               286 drivers/net/wireless/ti/wlcore/tx.c 	ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
skb               287 drivers/net/wireless/ti/wlcore/tx.c 	desc->tid = skb->priority;
skb               319 drivers/net/wireless/ti/wlcore/tx.c 		if (skb->protocol == cpu_to_be16(ETH_P_PAE))
skb               331 drivers/net/wireless/ti/wlcore/tx.c 			 skb->protocol == cpu_to_be16(ETH_P_PAE) ||
skb               355 drivers/net/wireless/ti/wlcore/tx.c 	wlcore_hw_set_tx_desc_csum(wl, desc, skb);
skb               356 drivers/net/wireless/ti/wlcore/tx.c 	wlcore_hw_set_tx_desc_data_len(wl, desc, skb);
skb               361 drivers/net/wireless/ti/wlcore/tx.c 				   struct sk_buff *skb, u32 buf_offset, u8 hlid)
skb               370 drivers/net/wireless/ti/wlcore/tx.c 	if (!skb) {
skb               376 drivers/net/wireless/ti/wlcore/tx.c 		wl1271_error("invalid hlid. dropping skb 0x%p", skb);
skb               380 drivers/net/wireless/ti/wlcore/tx.c 	info = IEEE80211_SKB_CB(skb);
skb               382 drivers/net/wireless/ti/wlcore/tx.c 	is_dummy = wl12xx_is_dummy_packet(wl, skb);
skb               407 drivers/net/wireless/ti/wlcore/tx.c 	ret = wl1271_tx_allocate(wl, wlvif, skb, extra, buf_offset, hlid,
skb               412 drivers/net/wireless/ti/wlcore/tx.c 	wl1271_tx_fill_hdr(wl, wlvif, skb, extra, info, hlid);
skb               415 drivers/net/wireless/ti/wlcore/tx.c 		wl1271_tx_ap_update_inconnection_sta(wl, wlvif, skb);
skb               427 drivers/net/wireless/ti/wlcore/tx.c 	total_len = wlcore_calc_packet_alignment(wl, skb->len);
skb               429 drivers/net/wireless/ti/wlcore/tx.c 	memcpy(wl->aggr_buf + buf_offset, skb->data, skb->len);
skb               430 drivers/net/wireless/ti/wlcore/tx.c 	memset(wl->aggr_buf + buf_offset + skb->len, 0, total_len - skb->len);
skb               434 drivers/net/wireless/ti/wlcore/tx.c 		skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
skb               510 drivers/net/wireless/ti/wlcore/tx.c 	struct sk_buff *skb;
skb               513 drivers/net/wireless/ti/wlcore/tx.c 	skb = skb_dequeue(&lnk->tx_queue[q]);
skb               514 drivers/net/wireless/ti/wlcore/tx.c 	if (skb) {
skb               525 drivers/net/wireless/ti/wlcore/tx.c 	return skb;
skb               552 drivers/net/wireless/ti/wlcore/tx.c 	struct sk_buff *skb = NULL;
skb               566 drivers/net/wireless/ti/wlcore/tx.c 		skb = wlcore_lnk_dequeue_high_prio(wl, h, ac,
skb               568 drivers/net/wireless/ti/wlcore/tx.c 		if (!skb)
skb               575 drivers/net/wireless/ti/wlcore/tx.c 	if (!skb)
skb               579 drivers/net/wireless/ti/wlcore/tx.c 	return skb;
skb               586 drivers/net/wireless/ti/wlcore/tx.c 	struct sk_buff *skb = NULL;
skb               600 drivers/net/wireless/ti/wlcore/tx.c 			skb = wlcore_vif_dequeue_high_prio(wl, wlvif, ac, hlid,
skb               602 drivers/net/wireless/ti/wlcore/tx.c 			if (!skb)
skb               611 drivers/net/wireless/ti/wlcore/tx.c 	if (!skb) {
skb               612 drivers/net/wireless/ti/wlcore/tx.c 		skb = wlcore_lnk_dequeue_high_prio(wl, wl->system_hlid,
skb               614 drivers/net/wireless/ti/wlcore/tx.c 		if (skb) {
skb               622 drivers/net/wireless/ti/wlcore/tx.c 	if (!skb) {
skb               627 drivers/net/wireless/ti/wlcore/tx.c 			skb = wlcore_vif_dequeue_high_prio(wl, wlvif, ac, hlid,
skb               629 drivers/net/wireless/ti/wlcore/tx.c 			if (skb) {
skb               641 drivers/net/wireless/ti/wlcore/tx.c 	if (!skb && low_prio_hlid != WL12XX_INVALID_LINK_ID) {
skb               643 drivers/net/wireless/ti/wlcore/tx.c 		skb = wlcore_lnk_dequeue(wl, lnk, ac);
skb               645 drivers/net/wireless/ti/wlcore/tx.c 		WARN_ON(!skb); /* we checked this before */
skb               656 drivers/net/wireless/ti/wlcore/tx.c 	if (!skb &&
skb               660 drivers/net/wireless/ti/wlcore/tx.c 		skb = wl->dummy_packet;
skb               662 drivers/net/wireless/ti/wlcore/tx.c 		q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
skb               669 drivers/net/wireless/ti/wlcore/tx.c 	return skb;
skb               673 drivers/net/wireless/ti/wlcore/tx.c 				  struct sk_buff *skb, u8 hlid)
skb               676 drivers/net/wireless/ti/wlcore/tx.c 	int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
skb               678 drivers/net/wireless/ti/wlcore/tx.c 	if (wl12xx_is_dummy_packet(wl, skb)) {
skb               681 drivers/net/wireless/ti/wlcore/tx.c 		skb_queue_head(&wl->links[hlid].tx_queue[q], skb);
skb               695 drivers/net/wireless/ti/wlcore/tx.c static bool wl1271_tx_is_data_present(struct sk_buff *skb)
skb               697 drivers/net/wireless/ti/wlcore/tx.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
skb               751 drivers/net/wireless/ti/wlcore/tx.c 	struct sk_buff *skb;
skb               763 drivers/net/wireless/ti/wlcore/tx.c 	while ((skb = wl1271_skb_dequeue(wl, &hlid))) {
skb               764 drivers/net/wireless/ti/wlcore/tx.c 		struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb               768 drivers/net/wireless/ti/wlcore/tx.c 		if (!wl12xx_is_dummy_packet(wl, skb))
skb               773 drivers/net/wireless/ti/wlcore/tx.c 		has_data = wlvif && wl1271_tx_is_data_present(skb);
skb               774 drivers/net/wireless/ti/wlcore/tx.c 		ret = wl1271_prepare_tx_frame(wl, wlvif, skb, buf_offset,
skb               781 drivers/net/wireless/ti/wlcore/tx.c 			wl1271_skb_queue_head(wl, wlvif, skb, hlid);
skb               798 drivers/net/wireless/ti/wlcore/tx.c 			wl1271_skb_queue_head(wl, wlvif, skb, hlid);
skb               803 drivers/net/wireless/ti/wlcore/tx.c 			if (wl12xx_is_dummy_packet(wl, skb))
skb               808 drivers/net/wireless/ti/wlcore/tx.c 				wl1271_skb_queue_head(wl, wlvif, skb, hlid);
skb               810 drivers/net/wireless/ti/wlcore/tx.c 				ieee80211_free_txskb(wl->hw, skb);
skb               817 drivers/net/wireless/ti/wlcore/tx.c 			desc = (struct wl1271_tx_hw_descr *) skb->data;
skb               903 drivers/net/wireless/ti/wlcore/tx.c 	struct sk_buff *skb;
skb               915 drivers/net/wireless/ti/wlcore/tx.c 	skb = wl->tx_frames[id];
skb               916 drivers/net/wireless/ti/wlcore/tx.c 	info = IEEE80211_SKB_CB(skb);
skb               918 drivers/net/wireless/ti/wlcore/tx.c 	if (wl12xx_is_dummy_packet(wl, skb)) {
skb               948 drivers/net/wireless/ti/wlcore/tx.c 	skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
skb               954 drivers/net/wireless/ti/wlcore/tx.c 		int hdrlen = ieee80211_get_hdrlen_from_skb(skb);
skb               955 drivers/net/wireless/ti/wlcore/tx.c 		memmove(skb->data + WL1271_EXTRA_SPACE_TKIP, skb->data,
skb               957 drivers/net/wireless/ti/wlcore/tx.c 		skb_pull(skb, WL1271_EXTRA_SPACE_TKIP);
skb               962 drivers/net/wireless/ti/wlcore/tx.c 		     result->id, skb, result->ack_failures,
skb               966 drivers/net/wireless/ti/wlcore/tx.c 	skb_queue_tail(&wl->deferred_tx_queue, skb);
skb              1020 drivers/net/wireless/ti/wlcore/tx.c 	struct sk_buff *skb;
skb              1029 drivers/net/wireless/ti/wlcore/tx.c 		while ((skb = skb_dequeue(&lnk->tx_queue[i]))) {
skb              1030 drivers/net/wireless/ti/wlcore/tx.c 			wl1271_debug(DEBUG_TX, "link freeing skb 0x%p", skb);
skb              1032 drivers/net/wireless/ti/wlcore/tx.c 			if (!wl12xx_is_dummy_packet(wl, skb)) {
skb              1033 drivers/net/wireless/ti/wlcore/tx.c 				info = IEEE80211_SKB_CB(skb);
skb              1036 drivers/net/wireless/ti/wlcore/tx.c 				ieee80211_tx_status_ni(wl->hw, skb);
skb              1079 drivers/net/wireless/ti/wlcore/tx.c 	struct sk_buff *skb;
skb              1102 drivers/net/wireless/ti/wlcore/tx.c 		skb = wl->tx_frames[i];
skb              1104 drivers/net/wireless/ti/wlcore/tx.c 		wl1271_debug(DEBUG_TX, "freeing skb 0x%p", skb);
skb              1106 drivers/net/wireless/ti/wlcore/tx.c 		if (!wl12xx_is_dummy_packet(wl, skb)) {
skb              1111 drivers/net/wireless/ti/wlcore/tx.c 			info = IEEE80211_SKB_CB(skb);
skb              1112 drivers/net/wireless/ti/wlcore/tx.c 			skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
skb              1117 drivers/net/wireless/ti/wlcore/tx.c 				int hdrlen = ieee80211_get_hdrlen_from_skb(skb);
skb              1118 drivers/net/wireless/ti/wlcore/tx.c 				memmove(skb->data + WL1271_EXTRA_SPACE_TKIP,
skb              1119 drivers/net/wireless/ti/wlcore/tx.c 					skb->data, hdrlen);
skb              1120 drivers/net/wireless/ti/wlcore/tx.c 				skb_pull(skb, WL1271_EXTRA_SPACE_TKIP);
skb              1126 drivers/net/wireless/ti/wlcore/tx.c 			ieee80211_tx_status_ni(wl->hw, skb);
skb               240 drivers/net/wireless/ti/wlcore/tx.h 		      struct sk_buff *skb, struct ieee80211_sta *sta);
skb               243 drivers/net/wireless/ti/wlcore/tx.h bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb);
skb                55 drivers/net/wireless/ti/wlcore/wlcore.h 				     struct sk_buff *skb);
skb                73 drivers/net/wireless/ti/wlcore/wlcore.h 				 struct sk_buff *skb);
skb                76 drivers/net/wireless/ti/wlcore/wlcore.h 			    struct sk_buff *skb);
skb               398 drivers/net/wireless/virt_wifi.c static netdev_tx_t virt_wifi_start_xmit(struct sk_buff *skb,
skb               409 drivers/net/wireless/virt_wifi.c 	skb->dev = priv->lowerdev;
skb               410 drivers/net/wireless/virt_wifi.c 	return dev_queue_xmit(skb);
skb               466 drivers/net/wireless/virt_wifi.c 	struct sk_buff *skb = *pskb;
skb               468 drivers/net/wireless/virt_wifi.c 		rcu_dereference(skb->dev->rx_handler_data);
skb               474 drivers/net/wireless/virt_wifi.c 	skb = skb_share_check(skb, GFP_ATOMIC);
skb               475 drivers/net/wireless/virt_wifi.c 	if (!skb) {
skb               480 drivers/net/wireless/virt_wifi.c 	*pskb = skb;
skb               481 drivers/net/wireless/virt_wifi.c 	skb->dev = priv->upperdev;
skb               482 drivers/net/wireless/virt_wifi.c 	skb->pkt_type = PACKET_HOST;
skb               952 drivers/net/wireless/wl3501_cs.c 	struct sk_buff *skb;
skb               974 drivers/net/wireless/wl3501_cs.c 	skb = dev_alloc_skb(pkt_len + 5);
skb               976 drivers/net/wireless/wl3501_cs.c 	if (!skb) {
skb               981 drivers/net/wireless/wl3501_cs.c 		skb->dev = dev;
skb               982 drivers/net/wireless/wl3501_cs.c 		skb_reserve(skb, 2); /* IP headers on 16 bytes boundaries */
skb               983 drivers/net/wireless/wl3501_cs.c 		skb_copy_to_linear_data(skb, (unsigned char *)&sig.daddr, 12);
skb               984 drivers/net/wireless/wl3501_cs.c 		wl3501_receive(this, skb->data, pkt_len);
skb               985 drivers/net/wireless/wl3501_cs.c 		skb_put(skb, pkt_len);
skb               986 drivers/net/wireless/wl3501_cs.c 		skb->protocol	= eth_type_trans(skb, dev);
skb               988 drivers/net/wireless/wl3501_cs.c 		dev->stats.rx_bytes += skb->len;
skb               989 drivers/net/wireless/wl3501_cs.c 		netif_rx(skb);
skb              1309 drivers/net/wireless/wl3501_cs.c static netdev_tx_t wl3501_hard_start_xmit(struct sk_buff *skb,
skb              1318 drivers/net/wireless/wl3501_cs.c 	rc = wl3501_send_pkt(this, skb->data, skb->len);
skb              1326 drivers/net/wireless/wl3501_cs.c 		dev->stats.tx_bytes += skb->len;
skb              1327 drivers/net/wireless/wl3501_cs.c 		kfree_skb(skb);
skb               197 drivers/net/wireless/zydas/zd1201.c 	struct sk_buff *skb;
skb               322 drivers/net/wireless/zydas/zd1201.c 			if (!(skb = dev_alloc_skb(datalen+24)))
skb               325 drivers/net/wireless/zydas/zd1201.c 			skb_put_data(skb, &data[datalen - 16], 2);
skb               326 drivers/net/wireless/zydas/zd1201.c 			skb_put_data(skb, &data[datalen - 2], 2);
skb               327 drivers/net/wireless/zydas/zd1201.c 			skb_put_data(skb, &data[datalen - 14], 6);
skb               328 drivers/net/wireless/zydas/zd1201.c 			skb_put_data(skb, &data[datalen - 22], 6);
skb               329 drivers/net/wireless/zydas/zd1201.c 			skb_put_data(skb, &data[datalen - 8], 6);
skb               330 drivers/net/wireless/zydas/zd1201.c 			skb_put_data(skb, &data[datalen - 24], 2);
skb               331 drivers/net/wireless/zydas/zd1201.c 			skb_put_data(skb, data, len);
skb               332 drivers/net/wireless/zydas/zd1201.c 			skb->protocol = eth_type_trans(skb, zd->dev);
skb               334 drivers/net/wireless/zydas/zd1201.c 			zd->dev->stats.rx_bytes += skb->len;
skb               335 drivers/net/wireless/zydas/zd1201.c 			netif_rx(skb);
skb               350 drivers/net/wireless/zydas/zd1201.c 				skb = dev_alloc_skb(IEEE80211_MAX_DATA_LEN +14+2);
skb               351 drivers/net/wireless/zydas/zd1201.c 				if (!skb) {
skb               355 drivers/net/wireless/zydas/zd1201.c 				frag->skb = skb;
skb               357 drivers/net/wireless/zydas/zd1201.c 				skb_reserve(skb, 2);
skb               358 drivers/net/wireless/zydas/zd1201.c 				skb_put_data(skb, &data[datalen - 14], 12);
skb               359 drivers/net/wireless/zydas/zd1201.c 				skb_put_data(skb, &data[6], 2);
skb               360 drivers/net/wireless/zydas/zd1201.c 				skb_put_data(skb, data + 8, len);
skb               369 drivers/net/wireless/zydas/zd1201.c 			skb = frag->skb;
skb               370 drivers/net/wireless/zydas/zd1201.c 			ptr = skb_put(skb, len);
skb               380 drivers/net/wireless/zydas/zd1201.c 			skb = dev_alloc_skb(len + 14 + 2);
skb               381 drivers/net/wireless/zydas/zd1201.c 			if (!skb)
skb               383 drivers/net/wireless/zydas/zd1201.c 			skb_reserve(skb, 2);
skb               384 drivers/net/wireless/zydas/zd1201.c 			skb_put_data(skb, &data[datalen - 14], 12);
skb               385 drivers/net/wireless/zydas/zd1201.c 			skb_put_data(skb, &data[6], 2);
skb               386 drivers/net/wireless/zydas/zd1201.c 			skb_put_data(skb, data + 8, len);
skb               388 drivers/net/wireless/zydas/zd1201.c 		skb->protocol = eth_type_trans(skb, zd->dev);
skb               390 drivers/net/wireless/zydas/zd1201.c 		zd->dev->stats.rx_bytes += skb->len;
skb               391 drivers/net/wireless/zydas/zd1201.c 		netif_rx(skb);
skb               783 drivers/net/wireless/zydas/zd1201.c static netdev_tx_t zd1201_hard_start_xmit(struct sk_buff *skb,
skb               793 drivers/net/wireless/zydas/zd1201.c 		kfree_skb(skb);
skb               798 drivers/net/wireless/zydas/zd1201.c 	txbuflen = skb->len + 8 + 1;
skb               810 drivers/net/wireless/zydas/zd1201.c 	skb_copy_from_linear_data_offset(skb, 12, txbuf + 6, skb->len - 12);
skb               812 drivers/net/wireless/zydas/zd1201.c 		txbuf[skb->len-12+6]=0;
skb               813 drivers/net/wireless/zydas/zd1201.c 	skb_copy_from_linear_data(skb, txbuf + skb->len - 12 + 6 + pad, 12);
skb               814 drivers/net/wireless/zydas/zd1201.c 	*(__be16*)&txbuf[skb->len+6+pad] = htons(skb->len-12+6);
skb               826 drivers/net/wireless/zydas/zd1201.c 		dev->stats.tx_bytes += skb->len;
skb               828 drivers/net/wireless/zydas/zd1201.c 	kfree_skb(skb);
skb              1841 drivers/net/wireless/zydas/zd1201.c 		kfree_skb(frag->skb);
skb                56 drivers/net/wireless/zydas/zd1201.h 	struct sk_buff		*skb;
skb               333 drivers/net/wireless/zydas/zd1211rw/zd_mac.c 	struct sk_buff *skb;
skb               353 drivers/net/wireless/zydas/zd1211rw/zd_mac.c 	while ((skb = skb_dequeue(ack_wait_queue)))
skb               354 drivers/net/wireless/zydas/zd1211rw/zd_mac.c 		dev_kfree_skb_any(skb);
skb               431 drivers/net/wireless/zydas/zd1211rw/zd_mac.c static void zd_mac_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb,
skb               434 drivers/net/wireless/zydas/zd1211rw/zd_mac.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb               475 drivers/net/wireless/zydas/zd1211rw/zd_mac.c 	ieee80211_tx_status_irqsafe(hw, skb);
skb               491 drivers/net/wireless/zydas/zd1211rw/zd_mac.c 	struct sk_buff *skb;
skb               501 drivers/net/wireless/zydas/zd1211rw/zd_mac.c 	skb_queue_walk(q, skb) {
skb               514 drivers/net/wireless/zydas/zd1211rw/zd_mac.c 		    skb_queue_is_first(q, skb)) {
skb               518 drivers/net/wireless/zydas/zd1211rw/zd_mac.c 		tx_hdr = (struct ieee80211_hdr *)skb->data;
skb               526 drivers/net/wireless/zydas/zd1211rw/zd_mac.c 		info = IEEE80211_SKB_CB(skb);
skb               546 drivers/net/wireless/zydas/zd1211rw/zd_mac.c 			skb = __skb_dequeue(q);
skb               547 drivers/net/wireless/zydas/zd1211rw/zd_mac.c 			zd_mac_tx_status(hw, skb,
skb               567 drivers/net/wireless/zydas/zd1211rw/zd_mac.c void zd_mac_tx_to_dev(struct sk_buff *skb, int error)
skb               569 drivers/net/wireless/zydas/zd1211rw/zd_mac.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb               575 drivers/net/wireless/zydas/zd1211rw/zd_mac.c 	skb_pull(skb, sizeof(struct zd_ctrlset));
skb               581 drivers/net/wireless/zydas/zd1211rw/zd_mac.c 		ieee80211_tx_status_irqsafe(hw, skb);
skb               585 drivers/net/wireless/zydas/zd1211rw/zd_mac.c 		skb_queue_tail(q, skb);
skb               851 drivers/net/wireless/zydas/zd1211rw/zd_mac.c 			struct sk_buff *skb)
skb               854 drivers/net/wireless/zydas/zd1211rw/zd_mac.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
skb               855 drivers/net/wireless/zydas/zd1211rw/zd_mac.c 	unsigned int frag_len = skb->len + FCS_LEN;
skb               858 drivers/net/wireless/zydas/zd1211rw/zd_mac.c 	struct zd_ctrlset *cs = skb_push(skb, sizeof(struct zd_ctrlset));
skb               859 drivers/net/wireless/zydas/zd1211rw/zd_mac.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb               926 drivers/net/wireless/zydas/zd1211rw/zd_mac.c 		     struct sk_buff *skb)
skb               929 drivers/net/wireless/zydas/zd1211rw/zd_mac.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb               932 drivers/net/wireless/zydas/zd1211rw/zd_mac.c 	r = fill_ctrlset(mac, skb);
skb               938 drivers/net/wireless/zydas/zd1211rw/zd_mac.c 	r = zd_usb_tx(&mac->chip.usb, skb);
skb               944 drivers/net/wireless/zydas/zd1211rw/zd_mac.c 	dev_kfree_skb(skb);
skb               965 drivers/net/wireless/zydas/zd1211rw/zd_mac.c 	struct sk_buff *skb;
skb               976 drivers/net/wireless/zydas/zd1211rw/zd_mac.c 	skb_queue_walk(q, skb) {
skb               981 drivers/net/wireless/zydas/zd1211rw/zd_mac.c 		if (mac->ack_pending && skb_queue_is_first(q, skb))
skb               984 drivers/net/wireless/zydas/zd1211rw/zd_mac.c 		tx_hdr = (struct ieee80211_hdr *)skb->data;
skb               994 drivers/net/wireless/zydas/zd1211rw/zd_mac.c 			skb = __skb_dequeue(q);
skb               995 drivers/net/wireless/zydas/zd1211rw/zd_mac.c 			zd_mac_tx_status(hw, skb,
skb              1006 drivers/net/wireless/zydas/zd1211rw/zd_mac.c 			skb = __skb_dequeue(q);
skb              1007 drivers/net/wireless/zydas/zd1211rw/zd_mac.c 			zd_mac_tx_status(hw, skb, mac->ack_signal, NULL);
skb              1021 drivers/net/wireless/zydas/zd1211rw/zd_mac.c 	struct sk_buff *skb;
skb              1083 drivers/net/wireless/zydas/zd1211rw/zd_mac.c 	skb = dev_alloc_skb(length + (need_padding ? 2 : 0));
skb              1084 drivers/net/wireless/zydas/zd1211rw/zd_mac.c 	if (skb == NULL)
skb              1088 drivers/net/wireless/zydas/zd1211rw/zd_mac.c 		skb_reserve(skb, 2);
skb              1092 drivers/net/wireless/zydas/zd1211rw/zd_mac.c 	skb_put_data(skb, buffer, length);
skb              1094 drivers/net/wireless/zydas/zd1211rw/zd_mac.c 	memcpy(IEEE80211_SKB_RXCB(skb), &stats, sizeof(stats));
skb              1095 drivers/net/wireless/zydas/zd1211rw/zd_mac.c 	ieee80211_rx_irqsafe(hw, skb);
skb              1151 drivers/net/wireless/zydas/zd1211rw/zd_mac.c 	struct sk_buff *skb, *beacon;
skb              1162 drivers/net/wireless/zydas/zd1211rw/zd_mac.c 		skb = ieee80211_get_buffered_bc(mac->hw, mac->vif);
skb              1163 drivers/net/wireless/zydas/zd1211rw/zd_mac.c 		if (!skb)
skb              1165 drivers/net/wireless/zydas/zd1211rw/zd_mac.c 		zd_op_tx(mac->hw, NULL, skb);
skb               303 drivers/net/wireless/zydas/zd1211rw/zd_mac.h void zd_mac_tx_to_dev(struct sk_buff *skb, int error);
skb               962 drivers/net/wireless/zydas/zd1211rw/zd_usb.c 	struct sk_buff *skb;
skb               967 drivers/net/wireless/zydas/zd1211rw/zd_usb.c 	skb = (struct sk_buff *)urb->context;
skb               968 drivers/net/wireless/zydas/zd1211rw/zd_usb.c 	info = IEEE80211_SKB_CB(skb);
skb               992 drivers/net/wireless/zydas/zd1211rw/zd_usb.c 	skb_unlink(skb, &usb->tx.submitted_skbs);
skb               993 drivers/net/wireless/zydas/zd1211rw/zd_usb.c 	zd_mac_tx_to_dev(skb, urb->status);
skb              1019 drivers/net/wireless/zydas/zd1211rw/zd_usb.c int zd_usb_tx(struct zd_usb *usb, struct sk_buff *skb)
skb              1022 drivers/net/wireless/zydas/zd1211rw/zd_usb.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb              1039 drivers/net/wireless/zydas/zd1211rw/zd_usb.c 		          skb->data, skb->len, tx_urb_complete, skb);
skb              1042 drivers/net/wireless/zydas/zd1211rw/zd_usb.c 	skb_queue_tail(&tx->submitted_skbs, skb);
skb              1049 drivers/net/wireless/zydas/zd1211rw/zd_usb.c 		skb_unlink(skb, &tx->submitted_skbs);
skb              1064 drivers/net/wireless/zydas/zd1211rw/zd_usb.c 	struct sk_buff *skb, *skbnext;
skb              1070 drivers/net/wireless/zydas/zd1211rw/zd_usb.c 	skb_queue_walk_safe(q, skb, skbnext) {
skb              1071 drivers/net/wireless/zydas/zd1211rw/zd_usb.c 		info = IEEE80211_SKB_CB(skb);
skb               256 drivers/net/wireless/zydas/zd1211rw/zd_usb.h int zd_usb_tx(struct zd_usb *usb, struct sk_buff *skb);
skb               322 drivers/net/xen-netback/common.h #define XENVIF_RX_CB(skb) ((struct xenvif_rx_cb *)(skb)->cb)
skb               379 drivers/net/xen-netback/common.h void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb);
skb               409 drivers/net/xen-netback/common.h 				 struct sk_buff *skb);
skb               428 drivers/net/xen-netback/common.h void xenvif_set_skb_hash(struct xenvif *vif, struct sk_buff *skb);
skb               149 drivers/net/xen-netback/hash.c void xenvif_set_skb_hash(struct xenvif *vif, struct sk_buff *skb)
skb               161 drivers/net/xen-netback/hash.c 	switch (skb->protocol) {
skb               181 drivers/net/xen-netback/hash.c 	if (!skb_flow_dissect_flow_keys(skb, &flow, 0))
skb               187 drivers/net/xen-netback/hash.c 	switch (skb->protocol) {
skb               239 drivers/net/xen-netback/hash.c 		skb_clear_hash(skb);
skb               241 drivers/net/xen-netback/hash.c 		__skb_set_sw_hash(skb, hash, type == PKT_HASH_TYPE_L4);
skb                56 drivers/net/xen-netback/interface.c 				 struct sk_buff *skb)
skb                58 drivers/net/xen-netback/interface.c 	skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
skb               150 drivers/net/xen-netback/interface.c static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
skb               164 drivers/net/xen-netback/interface.c 		return netdev_pick_tx(dev, skb, NULL) %
skb               167 drivers/net/xen-netback/interface.c 	xenvif_set_skb_hash(vif, skb);
skb               170 drivers/net/xen-netback/interface.c 		return skb_get_hash_raw(skb) % dev->real_num_tx_queues;
skb               173 drivers/net/xen-netback/interface.c 				[skb_get_hash_raw(skb) % size];
skb               177 drivers/net/xen-netback/interface.c xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb               185 drivers/net/xen-netback/interface.c 	BUG_ON(skb->dev != dev);
skb               196 drivers/net/xen-netback/interface.c 	index = skb_get_queue_mapping(skb);
skb               210 drivers/net/xen-netback/interface.c 	if (vif->multicast_control && skb->pkt_type == PACKET_MULTICAST) {
skb               211 drivers/net/xen-netback/interface.c 		struct ethhdr *eth = (struct ethhdr *)skb->data;
skb               217 drivers/net/xen-netback/interface.c 	cb = XENVIF_RX_CB(skb);
skb               225 drivers/net/xen-netback/interface.c 		skb_clear_hash(skb);
skb               227 drivers/net/xen-netback/interface.c 	xenvif_rx_queue_tail(queue, skb);
skb               234 drivers/net/xen-netback/interface.c 	dev_kfree_skb(skb);
skb               325 drivers/net/xen-netback/netback.c #define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
skb               345 drivers/net/xen-netback/netback.c 	struct sk_buff *skb =
skb               348 drivers/net/xen-netback/netback.c 	if (unlikely(skb == NULL))
skb               352 drivers/net/xen-netback/netback.c 	skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
skb               355 drivers/net/xen-netback/netback.c 	skb_shinfo(skb)->destructor_arg = NULL;
skb               357 drivers/net/xen-netback/netback.c 	return skb;
skb               361 drivers/net/xen-netback/netback.c 							struct sk_buff *skb,
skb               367 drivers/net/xen-netback/netback.c 	struct skb_shared_info *shinfo = skb_shinfo(skb);
skb               369 drivers/net/xen-netback/netback.c 	u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
skb               402 drivers/net/xen-netback/netback.c 		skb_shinfo(skb)->frag_list = nskb;
skb               436 drivers/net/xen-netback/netback.c 			       struct sk_buff *skb,
skb               441 drivers/net/xen-netback/netback.c 	u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
skb               445 drivers/net/xen-netback/netback.c 	struct skb_shared_info *shinfo = skb_shinfo(skb);
skb               521 drivers/net/xen-netback/netback.c 					   XENVIF_TX_CB(skb)->pending_idx,
skb               548 drivers/net/xen-netback/netback.c 	if (skb_has_frag_list(skb) && !first_shinfo) {
skb               549 drivers/net/xen-netback/netback.c 		first_shinfo = skb_shinfo(skb);
skb               550 drivers/net/xen-netback/netback.c 		shinfo = skb_shinfo(skb_shinfo(skb)->frag_list);
skb               560 drivers/net/xen-netback/netback.c static void xenvif_fill_frags(struct xenvif_queue *queue, struct sk_buff *skb)
skb               562 drivers/net/xen-netback/netback.c 	struct skb_shared_info *shinfo = skb_shinfo(skb);
skb               577 drivers/net/xen-netback/netback.c 			skb_shinfo(skb)->destructor_arg =
skb               588 drivers/net/xen-netback/netback.c 		__skb_fill_page_desc(skb, i, page, txp->offset, txp->size);
skb               589 drivers/net/xen-netback/netback.c 		skb->len += txp->size;
skb               590 drivers/net/xen-netback/netback.c 		skb->data_len += txp->size;
skb               591 drivers/net/xen-netback/netback.c 		skb->truesize += txp->size;
skb               633 drivers/net/xen-netback/netback.c 			      struct sk_buff *skb,
skb               644 drivers/net/xen-netback/netback.c 		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
skb               647 drivers/net/xen-netback/netback.c 		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
skb               655 drivers/net/xen-netback/netback.c 	skb_shinfo(skb)->gso_size = gso->u.gso.size;
skb               661 drivers/net/xen-netback/netback.c static int checksum_setup(struct xenvif_queue *queue, struct sk_buff *skb)
skb               670 drivers/net/xen-netback/netback.c 	if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
skb               672 drivers/net/xen-netback/netback.c 		skb->ip_summed = CHECKSUM_PARTIAL;
skb               677 drivers/net/xen-netback/netback.c 	if (skb->ip_summed != CHECKSUM_PARTIAL)
skb               680 drivers/net/xen-netback/netback.c 	return skb_checksum_setup(skb, recalculate_partial_csum);
skb               794 drivers/net/xen-netback/netback.c 	struct sk_buff *skb, *nskb;
skb               906 drivers/net/xen-netback/netback.c 		skb = xenvif_alloc_skb(data_len);
skb               907 drivers/net/xen-netback/netback.c 		if (unlikely(skb == NULL)) {
skb               914 drivers/net/xen-netback/netback.c 		skb_shinfo(skb)->nr_frags = ret;
skb               916 drivers/net/xen-netback/netback.c 			skb_shinfo(skb)->nr_frags++;
skb               922 drivers/net/xen-netback/netback.c 		if (skb_shinfo(skb)->nr_frags > MAX_SKB_FRAGS) {
skb               923 drivers/net/xen-netback/netback.c 			frag_overflow = skb_shinfo(skb)->nr_frags - MAX_SKB_FRAGS;
skb               925 drivers/net/xen-netback/netback.c 			skb_shinfo(skb)->nr_frags = MAX_SKB_FRAGS;
skb               928 drivers/net/xen-netback/netback.c 				skb_shinfo(skb)->nr_frags = 0;
skb               929 drivers/net/xen-netback/netback.c 				kfree_skb(skb);
skb               942 drivers/net/xen-netback/netback.c 			if (xenvif_set_skb_gso(queue->vif, skb, gso)) {
skb               944 drivers/net/xen-netback/netback.c 				skb_shinfo(skb)->nr_frags = 0;
skb               945 drivers/net/xen-netback/netback.c 				kfree_skb(skb);
skb               973 drivers/net/xen-netback/netback.c 				skb_set_hash(skb,
skb               978 drivers/net/xen-netback/netback.c 		XENVIF_TX_CB(skb)->pending_idx = pending_idx;
skb               980 drivers/net/xen-netback/netback.c 		__skb_put(skb, data_len);
skb               986 drivers/net/xen-netback/netback.c 			virt_to_gfn(skb->data);
skb               989 drivers/net/xen-netback/netback.c 			offset_in_page(skb->data) & ~XEN_PAGE_MASK;
skb               997 drivers/net/xen-netback/netback.c 			frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
skb              1003 drivers/net/xen-netback/netback.c 			frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
skb              1013 drivers/net/xen-netback/netback.c 		gop = xenvif_get_requests(queue, skb, txfrags, gop,
skb              1016 drivers/net/xen-netback/netback.c 		__skb_queue_tail(&queue->tx_queue, skb);
skb              1032 drivers/net/xen-netback/netback.c static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *skb)
skb              1034 drivers/net/xen-netback/netback.c 	unsigned int offset = skb_headlen(skb);
skb              1038 drivers/net/xen-netback/netback.c 	struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
skb              1045 drivers/net/xen-netback/netback.c 	skb->truesize -= skb->data_len;
skb              1046 drivers/net/xen-netback/netback.c 	skb->len += nskb->len;
skb              1047 drivers/net/xen-netback/netback.c 	skb->data_len += nskb->len;
skb              1050 drivers/net/xen-netback/netback.c 	for (i = 0; offset < skb->len; i++) {
skb              1058 drivers/net/xen-netback/netback.c 			skb->truesize += skb->data_len;
skb              1064 drivers/net/xen-netback/netback.c 		if (offset + PAGE_SIZE < skb->len)
skb              1067 drivers/net/xen-netback/netback.c 			len = skb->len - offset;
skb              1068 drivers/net/xen-netback/netback.c 		if (skb_copy_bits(skb, offset, page_address(page), len))
skb              1078 drivers/net/xen-netback/netback.c 	for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
skb              1079 drivers/net/xen-netback/netback.c 		skb_frag_unref(skb, f);
skb              1080 drivers/net/xen-netback/netback.c 	uarg = skb_shinfo(skb)->destructor_arg;
skb              1084 drivers/net/xen-netback/netback.c 	skb_shinfo(skb)->destructor_arg = NULL;
skb              1087 drivers/net/xen-netback/netback.c 	memcpy(skb_shinfo(skb)->frags, frags, i * sizeof(skb_frag_t));
skb              1088 drivers/net/xen-netback/netback.c 	skb_shinfo(skb)->nr_frags = i;
skb              1089 drivers/net/xen-netback/netback.c 	skb->truesize += i * PAGE_SIZE;
skb              1098 drivers/net/xen-netback/netback.c 	struct sk_buff *skb;
skb              1101 drivers/net/xen-netback/netback.c 	while ((skb = __skb_dequeue(&queue->tx_queue)) != NULL) {
skb              1106 drivers/net/xen-netback/netback.c 		pending_idx = XENVIF_TX_CB(skb)->pending_idx;
skb              1110 drivers/net/xen-netback/netback.c 		if (unlikely(xenvif_tx_check_gop(queue, skb, &gop_map, &gop_copy))) {
skb              1115 drivers/net/xen-netback/netback.c 			skb_shinfo(skb)->nr_frags = 0;
skb              1116 drivers/net/xen-netback/netback.c 			if (skb_has_frag_list(skb)) {
skb              1118 drivers/net/xen-netback/netback.c 						skb_shinfo(skb)->frag_list;
skb              1121 drivers/net/xen-netback/netback.c 			kfree_skb(skb);
skb              1125 drivers/net/xen-netback/netback.c 		data_len = skb->len;
skb              1138 drivers/net/xen-netback/netback.c 			skb->ip_summed = CHECKSUM_PARTIAL;
skb              1140 drivers/net/xen-netback/netback.c 			skb->ip_summed = CHECKSUM_UNNECESSARY;
skb              1142 drivers/net/xen-netback/netback.c 		xenvif_fill_frags(queue, skb);
skb              1144 drivers/net/xen-netback/netback.c 		if (unlikely(skb_has_frag_list(skb))) {
skb              1145 drivers/net/xen-netback/netback.c 			struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
skb              1147 drivers/net/xen-netback/netback.c 			if (xenvif_handle_frag_list(queue, skb)) {
skb              1151 drivers/net/xen-netback/netback.c 				xenvif_skb_zerocopy_prepare(queue, skb);
skb              1152 drivers/net/xen-netback/netback.c 				kfree_skb(skb);
skb              1156 drivers/net/xen-netback/netback.c 			skb_frag_list_init(skb);
skb              1160 drivers/net/xen-netback/netback.c 		skb->dev      = queue->vif->dev;
skb              1161 drivers/net/xen-netback/netback.c 		skb->protocol = eth_type_trans(skb, skb->dev);
skb              1162 drivers/net/xen-netback/netback.c 		skb_reset_network_header(skb);
skb              1164 drivers/net/xen-netback/netback.c 		if (checksum_setup(queue, skb)) {
skb              1168 drivers/net/xen-netback/netback.c 			if (skb_shinfo(skb)->destructor_arg)
skb              1169 drivers/net/xen-netback/netback.c 				xenvif_skb_zerocopy_prepare(queue, skb);
skb              1170 drivers/net/xen-netback/netback.c 			kfree_skb(skb);
skb              1174 drivers/net/xen-netback/netback.c 		skb_probe_transport_header(skb);
skb              1180 drivers/net/xen-netback/netback.c 		if (skb_is_gso(skb)) {
skb              1184 drivers/net/xen-netback/netback.c 			WARN_ON_ONCE(!skb_transport_header_was_set(skb));
skb              1185 drivers/net/xen-netback/netback.c 			if (unlikely(!skb_transport_header_was_set(skb))) {
skb              1186 drivers/net/xen-netback/netback.c 				kfree_skb(skb);
skb              1190 drivers/net/xen-netback/netback.c 			mss = skb_shinfo(skb)->gso_size;
skb              1191 drivers/net/xen-netback/netback.c 			hdrlen = skb_transport_header(skb) -
skb              1192 drivers/net/xen-netback/netback.c 				skb_mac_header(skb) +
skb              1193 drivers/net/xen-netback/netback.c 				tcp_hdrlen(skb);
skb              1195 drivers/net/xen-netback/netback.c 			skb_shinfo(skb)->gso_segs =
skb              1196 drivers/net/xen-netback/netback.c 				DIV_ROUND_UP(skb->len - hdrlen, mss);
skb              1199 drivers/net/xen-netback/netback.c 		queue->stats.rx_bytes += skb->len;
skb              1209 drivers/net/xen-netback/netback.c 		if (skb_shinfo(skb)->destructor_arg) {
skb              1210 drivers/net/xen-netback/netback.c 			xenvif_skb_zerocopy_prepare(queue, skb);
skb              1214 drivers/net/xen-netback/netback.c 		netif_receive_skb(skb);
skb                39 drivers/net/xen-netback/rx.c 	struct sk_buff *skb;
skb                42 drivers/net/xen-netback/rx.c 	skb = skb_peek(&queue->rx_queue);
skb                43 drivers/net/xen-netback/rx.c 	if (!skb)
skb                46 drivers/net/xen-netback/rx.c 	needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE);
skb                47 drivers/net/xen-netback/rx.c 	if (skb_is_gso(skb))
skb                49 drivers/net/xen-netback/rx.c 	if (skb->sw_hash)
skb                70 drivers/net/xen-netback/rx.c void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
skb                76 drivers/net/xen-netback/rx.c 	__skb_queue_tail(&queue->rx_queue, skb);
skb                78 drivers/net/xen-netback/rx.c 	queue->rx_queue_len += skb->len;
skb                90 drivers/net/xen-netback/rx.c 	struct sk_buff *skb;
skb                94 drivers/net/xen-netback/rx.c 	skb = __skb_dequeue(&queue->rx_queue);
skb                95 drivers/net/xen-netback/rx.c 	if (skb) {
skb                96 drivers/net/xen-netback/rx.c 		queue->rx_queue_len -= skb->len;
skb               107 drivers/net/xen-netback/rx.c 	return skb;
skb               112 drivers/net/xen-netback/rx.c 	struct sk_buff *skb;
skb               114 drivers/net/xen-netback/rx.c 	while ((skb = xenvif_rx_dequeue(queue)) != NULL)
skb               115 drivers/net/xen-netback/rx.c 		kfree_skb(skb);
skb               120 drivers/net/xen-netback/rx.c 	struct sk_buff *skb;
skb               123 drivers/net/xen-netback/rx.c 		skb = skb_peek(&queue->rx_queue);
skb               124 drivers/net/xen-netback/rx.c 		if (!skb)
skb               126 drivers/net/xen-netback/rx.c 		if (time_before(jiffies, XENVIF_RX_CB(skb)->expires))
skb               129 drivers/net/xen-netback/rx.c 		kfree_skb(skb);
skb               204 drivers/net/xen-netback/rx.c static unsigned int xenvif_gso_type(struct sk_buff *skb)
skb               206 drivers/net/xen-netback/rx.c 	if (skb_is_gso(skb)) {
skb               207 drivers/net/xen-netback/rx.c 		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
skb               216 drivers/net/xen-netback/rx.c 	struct sk_buff *skb;
skb               229 drivers/net/xen-netback/rx.c 	struct sk_buff *skb;
skb               232 drivers/net/xen-netback/rx.c 	skb = xenvif_rx_dequeue(queue);
skb               234 drivers/net/xen-netback/rx.c 	queue->stats.tx_bytes += skb->len;
skb               240 drivers/net/xen-netback/rx.c 	pkt->skb = skb;
skb               241 drivers/net/xen-netback/rx.c 	pkt->frag_iter = skb;
skb               242 drivers/net/xen-netback/rx.c 	pkt->remaining_len = skb->len;
skb               245 drivers/net/xen-netback/rx.c 	gso_type = xenvif_gso_type(skb);
skb               252 drivers/net/xen-netback/rx.c 		extra->u.gso.size = skb_shinfo(skb)->gso_size;
skb               261 drivers/net/xen-netback/rx.c 	if (skb->sw_hash) {
skb               269 drivers/net/xen-netback/rx.c 		if (skb->l4_hash)
skb               271 drivers/net/xen-netback/rx.c 				skb->protocol == htons(ETH_P_IP) ?
skb               276 drivers/net/xen-netback/rx.c 				skb->protocol == htons(ETH_P_IP) ?
skb               280 drivers/net/xen-netback/rx.c 		*(uint32_t *)extra->u.hash.value = skb_get_hash_raw(skb);
skb               295 drivers/net/xen-netback/rx.c 	__skb_queue_tail(queue->rx_copy.completed, pkt->skb);
skb               307 drivers/net/xen-netback/rx.c 		if (frag_iter == pkt->skb)
skb               380 drivers/net/xen-netback/rx.c 		struct sk_buff *skb = pkt->skb;
skb               382 drivers/net/xen-netback/rx.c 		if (skb->ip_summed == CHECKSUM_PARTIAL)
skb               385 drivers/net/xen-netback/rx.c 		else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
skb               505 drivers/net/xen-netback/rx.c 	struct sk_buff *skb;
skb               508 drivers/net/xen-netback/rx.c 	skb = skb_peek(&queue->rx_queue);
skb               509 drivers/net/xen-netback/rx.c 	if (!skb)
skb               512 drivers/net/xen-netback/rx.c 	timeout = XENVIF_RX_CB(skb)->expires - jiffies;
skb                72 drivers/net/xen-netfront.c #define NETFRONT_SKB_CB(skb)	((struct netfront_cb *)((skb)->cb))
skb               130 drivers/net/xen-netfront.c 		struct sk_buff *skb;
skb               177 drivers/net/xen-netfront.c 	BUILD_BUG_ON(sizeof(list->skb) != sizeof(list->link));
skb               178 drivers/net/xen-netfront.c 	return (unsigned long)list->skb < PAGE_OFFSET;
skb               209 drivers/net/xen-netfront.c 	struct sk_buff *skb = queue->rx_skbs[i];
skb               211 drivers/net/xen-netfront.c 	return skb;
skb               259 drivers/net/xen-netfront.c 	struct sk_buff *skb;
skb               262 drivers/net/xen-netfront.c 	skb = __netdev_alloc_skb(queue->info->netdev,
skb               265 drivers/net/xen-netfront.c 	if (unlikely(!skb))
skb               270 drivers/net/xen-netfront.c 		kfree_skb(skb);
skb               273 drivers/net/xen-netfront.c 	skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE);
skb               276 drivers/net/xen-netfront.c 	skb_reserve(skb, NET_IP_ALIGN);
skb               277 drivers/net/xen-netfront.c 	skb->dev = queue->info->netdev;
skb               279 drivers/net/xen-netfront.c 	return skb;
skb               295 drivers/net/xen-netfront.c 		struct sk_buff *skb;
skb               301 drivers/net/xen-netfront.c 		skb = xennet_alloc_one_rx_buffer(queue);
skb               302 drivers/net/xen-netfront.c 		if (!skb) {
skb               310 drivers/net/xen-netfront.c 		queue->rx_skbs[id] = skb;
skb               316 drivers/net/xen-netfront.c 		page = skb_frag_page(&skb_shinfo(skb)->frags[0]);
skb               378 drivers/net/xen-netfront.c 	struct sk_buff *skb;
skb               395 drivers/net/xen-netfront.c 			skb = queue->tx_skbs[id].skb;
skb               409 drivers/net/xen-netfront.c 			dev_kfree_skb_irq(skb);
skb               422 drivers/net/xen-netfront.c 	struct sk_buff *skb;
skb               438 drivers/net/xen-netfront.c 	struct sk_buff *skb = info->skb;
skb               448 drivers/net/xen-netfront.c 	queue->tx_skbs[id].skb = skb;
skb               463 drivers/net/xen-netfront.c 	struct netfront_queue *queue, struct sk_buff *skb,
skb               468 drivers/net/xen-netfront.c 		.skb = skb,
skb               484 drivers/net/xen-netfront.c 	skb_get(info->skb);
skb               490 drivers/net/xen-netfront.c 	struct sk_buff *skb, struct page *page,
skb               495 drivers/net/xen-netfront.c 		.skb = skb,
skb               523 drivers/net/xen-netfront.c static int xennet_count_skb_slots(struct sk_buff *skb)
skb               525 drivers/net/xen-netfront.c 	int i, frags = skb_shinfo(skb)->nr_frags;
skb               528 drivers/net/xen-netfront.c 	slots = gnttab_count_grant(offset_in_page(skb->data),
skb               529 drivers/net/xen-netfront.c 				   skb_headlen(skb));
skb               532 drivers/net/xen-netfront.c 		skb_frag_t *frag = skb_shinfo(skb)->frags + i;
skb               545 drivers/net/xen-netfront.c static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb,
skb               556 drivers/net/xen-netfront.c 		hash = skb_get_hash(skb);
skb               565 drivers/net/xen-netfront.c static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb               586 drivers/net/xen-netfront.c 	queue_index = skb_get_queue_mapping(skb);
skb               592 drivers/net/xen-netfront.c 	if (unlikely(skb->len > XEN_NETIF_MAX_TX_SIZE)) {
skb               595 drivers/net/xen-netfront.c 			skb->len);
skb               599 drivers/net/xen-netfront.c 	slots = xennet_count_skb_slots(skb);
skb               602 drivers/net/xen-netfront.c 				    slots, skb->len);
skb               603 drivers/net/xen-netfront.c 		if (skb_linearize(skb))
skb               607 drivers/net/xen-netfront.c 	page = virt_to_page(skb->data);
skb               608 drivers/net/xen-netfront.c 	offset = offset_in_page(skb->data);
skb               614 drivers/net/xen-netfront.c 		nskb = skb_copy(skb, GFP_ATOMIC);
skb               617 drivers/net/xen-netfront.c 		dev_consume_skb_any(skb);
skb               618 drivers/net/xen-netfront.c 		skb = nskb;
skb               619 drivers/net/xen-netfront.c 		page = virt_to_page(skb->data);
skb               620 drivers/net/xen-netfront.c 		offset = offset_in_page(skb->data);
skb               623 drivers/net/xen-netfront.c 	len = skb_headlen(skb);
skb               629 drivers/net/xen-netfront.c 		     netif_needs_gso(skb, netif_skb_features(skb)))) {
skb               635 drivers/net/xen-netfront.c 	first_tx = tx = xennet_make_first_txreq(queue, skb,
skb               644 drivers/net/xen-netfront.c 	if (skb->ip_summed == CHECKSUM_PARTIAL)
skb               647 drivers/net/xen-netfront.c 	else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
skb               652 drivers/net/xen-netfront.c 	if (skb_shinfo(skb)->gso_size) {
skb               660 drivers/net/xen-netfront.c 		gso->u.gso.size = skb_shinfo(skb)->gso_size;
skb               661 drivers/net/xen-netfront.c 		gso->u.gso.type = (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) ?
skb               672 drivers/net/xen-netfront.c 	tx = xennet_make_txreqs(queue, tx, skb, page, offset, len);
skb               675 drivers/net/xen-netfront.c 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb               676 drivers/net/xen-netfront.c 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
skb               677 drivers/net/xen-netfront.c 		tx = xennet_make_txreqs(queue, tx, skb, skb_frag_page(frag),
skb               683 drivers/net/xen-netfront.c 	first_tx->size = skb->len;
skb               690 drivers/net/xen-netfront.c 	tx_stats->bytes += skb->len;
skb               706 drivers/net/xen-netfront.c 	dev_kfree_skb_any(skb);
skb               724 drivers/net/xen-netfront.c static void xennet_move_rx_slot(struct netfront_queue *queue, struct sk_buff *skb,
skb               730 drivers/net/xen-netfront.c 	queue->rx_skbs[new] = skb;
skb               748 drivers/net/xen-netfront.c 		struct sk_buff *skb;
skb               772 drivers/net/xen-netfront.c 		skb = xennet_get_rx_skb(queue, cons);
skb               774 drivers/net/xen-netfront.c 		xennet_move_rx_slot(queue, skb, ref);
skb               789 drivers/net/xen-netfront.c 	struct sk_buff *skb = xennet_get_rx_skb(queue, cons);
skb               807 drivers/net/xen-netfront.c 			xennet_move_rx_slot(queue, skb, ref);
skb               830 drivers/net/xen-netfront.c 		__skb_queue_tail(list, skb);
skb               844 drivers/net/xen-netfront.c 		skb = xennet_get_rx_skb(queue, cons + slots);
skb               861 drivers/net/xen-netfront.c static int xennet_set_skb_gso(struct sk_buff *skb,
skb               877 drivers/net/xen-netfront.c 	skb_shinfo(skb)->gso_size = gso->u.gso.size;
skb               878 drivers/net/xen-netfront.c 	skb_shinfo(skb)->gso_type =
skb               884 drivers/net/xen-netfront.c 	skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
skb               885 drivers/net/xen-netfront.c 	skb_shinfo(skb)->gso_segs = 0;
skb               891 drivers/net/xen-netfront.c 			     struct sk_buff *skb,
skb               902 drivers/net/xen-netfront.c 		if (skb_shinfo(skb)->nr_frags == MAX_SKB_FRAGS) {
skb               903 drivers/net/xen-netfront.c 			unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
skb               905 drivers/net/xen-netfront.c 			BUG_ON(pull_to < skb_headlen(skb));
skb               906 drivers/net/xen-netfront.c 			__pskb_pull_tail(skb, pull_to - skb_headlen(skb));
skb               908 drivers/net/xen-netfront.c 		if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) {
skb               914 drivers/net/xen-netfront.c 		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
skb               927 drivers/net/xen-netfront.c static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
skb               937 drivers/net/xen-netfront.c 	if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
skb               940 drivers/net/xen-netfront.c 		skb->ip_summed = CHECKSUM_PARTIAL;
skb               945 drivers/net/xen-netfront.c 	if (skb->ip_summed != CHECKSUM_PARTIAL)
skb               948 drivers/net/xen-netfront.c 	return skb_checksum_setup(skb, recalculate_partial_csum);
skb               956 drivers/net/xen-netfront.c 	struct sk_buff *skb;
skb               958 drivers/net/xen-netfront.c 	while ((skb = __skb_dequeue(rxq)) != NULL) {
skb               959 drivers/net/xen-netfront.c 		int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
skb               961 drivers/net/xen-netfront.c 		if (pull_to > skb_headlen(skb))
skb               962 drivers/net/xen-netfront.c 			__pskb_pull_tail(skb, pull_to - skb_headlen(skb));
skb               965 drivers/net/xen-netfront.c 		skb->protocol = eth_type_trans(skb, queue->info->netdev);
skb               966 drivers/net/xen-netfront.c 		skb_reset_network_header(skb);
skb               968 drivers/net/xen-netfront.c 		if (checksum_setup(queue->info->netdev, skb)) {
skb               969 drivers/net/xen-netfront.c 			kfree_skb(skb);
skb               977 drivers/net/xen-netfront.c 		rx_stats->bytes += skb->len;
skb               981 drivers/net/xen-netfront.c 		napi_gro_receive(&queue->napi, skb);
skb               991 drivers/net/xen-netfront.c 	struct sk_buff *skb;
skb              1021 drivers/net/xen-netfront.c 			while ((skb = __skb_dequeue(&tmpq)))
skb              1022 drivers/net/xen-netfront.c 				__skb_queue_tail(&errq, skb);
skb              1028 drivers/net/xen-netfront.c 		skb = __skb_dequeue(&tmpq);
skb              1034 drivers/net/xen-netfront.c 			if (unlikely(xennet_set_skb_gso(skb, gso))) {
skb              1035 drivers/net/xen-netfront.c 				__skb_queue_head(&tmpq, skb);
skb              1041 drivers/net/xen-netfront.c 		NETFRONT_SKB_CB(skb)->pull_to = rx->status;
skb              1042 drivers/net/xen-netfront.c 		if (NETFRONT_SKB_CB(skb)->pull_to > RX_COPY_THRESHOLD)
skb              1043 drivers/net/xen-netfront.c 			NETFRONT_SKB_CB(skb)->pull_to = RX_COPY_THRESHOLD;
skb              1045 drivers/net/xen-netfront.c 		skb_frag_off_set(&skb_shinfo(skb)->frags[0], rx->offset);
skb              1046 drivers/net/xen-netfront.c 		skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status);
skb              1047 drivers/net/xen-netfront.c 		skb->data_len = rx->status;
skb              1048 drivers/net/xen-netfront.c 		skb->len += rx->status;
skb              1050 drivers/net/xen-netfront.c 		if (unlikely(xennet_fill_frags(queue, skb, &tmpq)))
skb              1054 drivers/net/xen-netfront.c 			skb->ip_summed = CHECKSUM_PARTIAL;
skb              1056 drivers/net/xen-netfront.c 			skb->ip_summed = CHECKSUM_UNNECESSARY;
skb              1058 drivers/net/xen-netfront.c 		__skb_queue_tail(&rxq, skb);
skb              1131 drivers/net/xen-netfront.c 	struct sk_buff *skb;
skb              1139 drivers/net/xen-netfront.c 		skb = queue->tx_skbs[i].skb;
skb              1147 drivers/net/xen-netfront.c 		dev_kfree_skb_irq(skb);
skb              1158 drivers/net/xen-netfront.c 		struct sk_buff *skb;
skb              1161 drivers/net/xen-netfront.c 		skb = queue->rx_skbs[id];
skb              1162 drivers/net/xen-netfront.c 		if (!skb)
skb              1169 drivers/net/xen-netfront.c 		page = skb_frag_page(&skb_shinfo(skb)->frags[0]);
skb              1179 drivers/net/xen-netfront.c 		kfree_skb(skb);
skb               185 drivers/nfc/fdp/fdp.c 	struct sk_buff *skb;
skb               212 drivers/nfc/fdp/fdp.c 		skb = nci_skb_alloc(ndev, (NCI_CTRL_HDR_SIZE + payload_size),
skb               214 drivers/nfc/fdp/fdp.c 		if (!skb) {
skb               220 drivers/nfc/fdp/fdp.c 		skb_reserve(skb, NCI_CTRL_HDR_SIZE);
skb               222 drivers/nfc/fdp/fdp.c 		skb_put_data(skb, fw->data + (fw->size - len), payload_size);
skb               224 drivers/nfc/fdp/fdp.c 		rc = nci_send_data(ndev, conn_id, skb);
skb               259 drivers/nfc/fdp/fdp.c static int fdp_nci_send(struct nci_dev *ndev, struct sk_buff *skb)
skb               269 drivers/nfc/fdp/fdp.c 	return info->phy_ops->write(info->phy, skb);
skb               272 drivers/nfc/fdp/fdp.c int fdp_nci_recv_frame(struct nci_dev *ndev, struct sk_buff *skb)
skb               278 drivers/nfc/fdp/fdp.c 	return nci_recv_frame(ndev, skb);
skb               598 drivers/nfc/fdp/fdp.c 					  struct sk_buff *skb)
skb               611 drivers/nfc/fdp/fdp.c 					  struct sk_buff *skb)
skb               618 drivers/nfc/fdp/fdp.c 	info->setup_patch_status = skb->data[0];
skb               625 drivers/nfc/fdp/fdp.c 					  struct sk_buff *skb)
skb               629 drivers/nfc/fdp/fdp.c 	u8 status = skb->data[0];
skb               638 drivers/nfc/fdp/fdp.c 							struct sk_buff *skb)
skb               642 drivers/nfc/fdp/fdp.c 	u8 status = skb->data[0];
skb               651 drivers/nfc/fdp/fdp.c 						struct sk_buff *skb)
skb               655 drivers/nfc/fdp/fdp.c 	struct nci_core_get_config_rsp *rsp = (void *) skb->data;
skb                28 drivers/nfc/fdp/fdp.h int fdp_nci_recv_frame(struct nci_dev *ndev, struct sk_buff *skb);
skb                35 drivers/nfc/fdp/i2c.c #define fdp_nci_i2c_dump_skb(dev, prefix, skb)				\
skb                37 drivers/nfc/fdp/i2c.c 		       16, 1, (skb)->data, (skb)->len, 0)
skb                66 drivers/nfc/fdp/i2c.c static void fdp_nci_i2c_add_len_lrc(struct sk_buff *skb)
skb                72 drivers/nfc/fdp/i2c.c 	len = skb->len;
skb                73 drivers/nfc/fdp/i2c.c 	*(u8 *)skb_push(skb, 1) = len & 0xff;
skb                74 drivers/nfc/fdp/i2c.c 	*(u8 *)skb_push(skb, 1) = len >> 8;
skb                78 drivers/nfc/fdp/i2c.c 		lrc ^= skb->data[i];
skb                80 drivers/nfc/fdp/i2c.c 	skb_put_u8(skb, lrc);
skb                83 drivers/nfc/fdp/i2c.c static void fdp_nci_i2c_remove_len_lrc(struct sk_buff *skb)
skb                85 drivers/nfc/fdp/i2c.c 	skb_pull(skb, FDP_FRAME_HEADROOM);
skb                86 drivers/nfc/fdp/i2c.c 	skb_trim(skb, skb->len - FDP_FRAME_TAILROOM);
skb                89 drivers/nfc/fdp/i2c.c static int fdp_nci_i2c_write(void *phy_id, struct sk_buff *skb)
skb                98 drivers/nfc/fdp/i2c.c 	fdp_nci_i2c_add_len_lrc(skb);
skb                99 drivers/nfc/fdp/i2c.c 	fdp_nci_i2c_dump_skb(&client->dev, "fdp_wr", skb);
skb               101 drivers/nfc/fdp/i2c.c 	r = i2c_master_send(client, skb->data, skb->len);
skb               104 drivers/nfc/fdp/i2c.c 		r = i2c_master_send(client, skb->data, skb->len);
skb               107 drivers/nfc/fdp/i2c.c 	if (r < 0 || r != skb->len)
skb               109 drivers/nfc/fdp/i2c.c 			__func__, r, skb->len);
skb               112 drivers/nfc/fdp/i2c.c 		if (r != skb->len) {
skb               120 drivers/nfc/fdp/i2c.c 	fdp_nci_i2c_remove_len_lrc(skb);
skb               131 drivers/nfc/fdp/i2c.c static int fdp_nci_i2c_read(struct fdp_i2c_phy *phy, struct sk_buff **skb)
skb               138 drivers/nfc/fdp/i2c.c 	*skb = NULL;
skb               174 drivers/nfc/fdp/i2c.c 			*skb = alloc_skb(len, GFP_KERNEL);
skb               175 drivers/nfc/fdp/i2c.c 			if (*skb == NULL) {
skb               180 drivers/nfc/fdp/i2c.c 			skb_put_data(*skb, tmp, len);
skb               181 drivers/nfc/fdp/i2c.c 			fdp_nci_i2c_dump_skb(&client->dev, "fdp_rd", *skb);
skb               183 drivers/nfc/fdp/i2c.c 			fdp_nci_i2c_remove_len_lrc(*skb);
skb               201 drivers/nfc/fdp/i2c.c 	struct sk_buff *skb;
skb               212 drivers/nfc/fdp/i2c.c 	r = fdp_nci_i2c_read(phy, &skb);
skb               219 drivers/nfc/fdp/i2c.c 	if (skb != NULL)
skb               220 drivers/nfc/fdp/i2c.c 		fdp_nci_recv_frame(phy->ndev, skb);
skb                70 drivers/nfc/mei_phy.c #define MEI_DUMP_SKB_IN(info, skb)				\
skb                74 drivers/nfc/mei_phy.c 			16, 1, (skb)->data, (skb)->len, false);	\
skb                77 drivers/nfc/mei_phy.c #define MEI_DUMP_SKB_OUT(info, skb)				\
skb                81 drivers/nfc/mei_phy.c 			16, 1, (skb)->data, (skb)->len, false);	\
skb               251 drivers/nfc/mei_phy.c static int nfc_mei_phy_write(void *phy_id, struct sk_buff *skb)
skb               256 drivers/nfc/mei_phy.c 	MEI_DUMP_SKB_OUT("mei frame sent", skb);
skb               258 drivers/nfc/mei_phy.c 	r = mei_nfc_send(phy, skb->data, skb->len);
skb               291 drivers/nfc/mei_phy.c 	struct sk_buff *skb;
skb               300 drivers/nfc/mei_phy.c 	skb = alloc_skb(MEI_NFC_MAX_READ, GFP_KERNEL);
skb               301 drivers/nfc/mei_phy.c 	if (!skb)
skb               304 drivers/nfc/mei_phy.c 	reply_size = mei_nfc_recv(phy, skb->data, MEI_NFC_MAX_READ);
skb               306 drivers/nfc/mei_phy.c 		kfree_skb(skb);
skb               310 drivers/nfc/mei_phy.c 	skb_put(skb, reply_size);
skb               311 drivers/nfc/mei_phy.c 	skb_pull(skb, MEI_NFC_HEADER_SIZE);
skb               313 drivers/nfc/mei_phy.c 	MEI_DUMP_SKB_IN("mei frame read", skb);
skb               315 drivers/nfc/mei_phy.c 	nfc_hci_recv_frame(phy->hdev, skb);
skb                48 drivers/nfc/microread/i2c.c #define I2C_DUMP_SKB(info, skb)					\
skb                52 drivers/nfc/microread/i2c.c 		       16, 1, (skb)->data, (skb)->len, 0);	\
skb                55 drivers/nfc/microread/i2c.c static void microread_i2c_add_len_crc(struct sk_buff *skb)
skb                61 drivers/nfc/microread/i2c.c 	len = skb->len;
skb                62 drivers/nfc/microread/i2c.c 	*(u8 *)skb_push(skb, 1) = len;
skb                64 drivers/nfc/microread/i2c.c 	for (i = 0; i < skb->len; i++)
skb                65 drivers/nfc/microread/i2c.c 		crc = crc ^ skb->data[i];
skb                67 drivers/nfc/microread/i2c.c 	skb_put_u8(skb, crc);
skb                70 drivers/nfc/microread/i2c.c static void microread_i2c_remove_len_crc(struct sk_buff *skb)
skb                72 drivers/nfc/microread/i2c.c 	skb_pull(skb, MICROREAD_I2C_FRAME_HEADROOM);
skb                73 drivers/nfc/microread/i2c.c 	skb_trim(skb, MICROREAD_I2C_FRAME_TAILROOM);
skb                76 drivers/nfc/microread/i2c.c static int check_crc(struct sk_buff *skb)
skb                81 drivers/nfc/microread/i2c.c 	for (i = 0; i < skb->len - 1; i++)
skb                82 drivers/nfc/microread/i2c.c 		crc = crc ^ skb->data[i];
skb                84 drivers/nfc/microread/i2c.c 	if (crc != skb->data[skb->len-1]) {
skb                85 drivers/nfc/microread/i2c.c 		pr_err("CRC error 0x%x != 0x%x\n", crc, skb->data[skb->len-1]);
skb               103 drivers/nfc/microread/i2c.c static int microread_i2c_write(void *phy_id, struct sk_buff *skb)
skb               114 drivers/nfc/microread/i2c.c 	microread_i2c_add_len_crc(skb);
skb               116 drivers/nfc/microread/i2c.c 	I2C_DUMP_SKB("i2c frame written", skb);
skb               118 drivers/nfc/microread/i2c.c 	r = i2c_master_send(client, skb->data, skb->len);
skb               122 drivers/nfc/microread/i2c.c 		r = i2c_master_send(client, skb->data, skb->len);
skb               126 drivers/nfc/microread/i2c.c 		if (r != skb->len)
skb               132 drivers/nfc/microread/i2c.c 	microread_i2c_remove_len_crc(skb);
skb               139 drivers/nfc/microread/i2c.c 			      struct sk_buff **skb)
skb               159 drivers/nfc/microread/i2c.c 	*skb = alloc_skb(1 + len, GFP_KERNEL);
skb               160 drivers/nfc/microread/i2c.c 	if (*skb == NULL) {
skb               165 drivers/nfc/microread/i2c.c 	skb_put_u8(*skb, len);
skb               167 drivers/nfc/microread/i2c.c 	r = i2c_master_recv(client, skb_put(*skb, len), len);
skb               169 drivers/nfc/microread/i2c.c 		kfree_skb(*skb);
skb               173 drivers/nfc/microread/i2c.c 	I2C_DUMP_SKB("cc frame read", *skb);
skb               175 drivers/nfc/microread/i2c.c 	r = check_crc(*skb);
skb               177 drivers/nfc/microread/i2c.c 		kfree_skb(*skb);
skb               182 drivers/nfc/microread/i2c.c 	skb_pull(*skb, 1);
skb               183 drivers/nfc/microread/i2c.c 	skb_trim(*skb, (*skb)->len - MICROREAD_I2C_FRAME_TAILROOM);
skb               201 drivers/nfc/microread/i2c.c 	struct sk_buff *skb = NULL;
skb               212 drivers/nfc/microread/i2c.c 	r = microread_i2c_read(phy, &skb);
skb               223 drivers/nfc/microread/i2c.c 	nfc_hci_recv_frame(phy->hdev, skb);
skb               218 drivers/nfc/microread/microread.c static int microread_xmit(struct nfc_hci_dev *hdev, struct sk_buff *skb)
skb               222 drivers/nfc/microread/microread.c 	return info->phy_ops->write(info->phy_id, skb);
skb               358 drivers/nfc/microread/microread.c static void microread_im_transceive_cb(void *context, struct sk_buff *skb,
skb               366 drivers/nfc/microread/microread.c 			if (skb->len == 0) {
skb               368 drivers/nfc/microread/microread.c 				kfree_skb(skb);
skb               374 drivers/nfc/microread/microread.c 			if (skb->data[skb->len - 1] != 0) {
skb               376 drivers/nfc/microread/microread.c 						       skb->data[skb->len - 1]);
skb               377 drivers/nfc/microread/microread.c 				kfree_skb(skb);
skb               383 drivers/nfc/microread/microread.c 			skb_trim(skb, skb->len - 1);	/* RF Error ind. */
skb               385 drivers/nfc/microread/microread.c 		info->async_cb(info->async_cb_context, skb, err);
skb               389 drivers/nfc/microread/microread.c 			kfree_skb(skb);
skb               401 drivers/nfc/microread/microread.c 				   struct sk_buff *skb, data_exchange_cb_t cb,
skb               411 drivers/nfc/microread/microread.c 		*(u8 *)skb_push(skb, 1) = 0;
skb               415 drivers/nfc/microread/microread.c 				     skb->data, skb->len);
skb               431 drivers/nfc/microread/microread.c 		crc = crc_ccitt(0xffff, skb->data, skb->len);
skb               433 drivers/nfc/microread/microread.c 		skb_put_u8(skb, crc & 0xff);
skb               434 drivers/nfc/microread/microread.c 		skb_put_u8(skb, crc >> 8);
skb               445 drivers/nfc/microread/microread.c 	*(u8 *)skb_push(skb, 1) = control_bits;
skb               453 drivers/nfc/microread/microread.c 				      skb->data, skb->len,
skb               457 drivers/nfc/microread/microread.c static int microread_tm_send(struct nfc_hci_dev *hdev, struct sk_buff *skb)
skb               463 drivers/nfc/microread/microread.c 			       skb->data, skb->len);
skb               465 drivers/nfc/microread/microread.c 	kfree_skb(skb);
skb               471 drivers/nfc/microread/microread.c 					struct sk_buff *skb)
skb               489 drivers/nfc/microread/microread.c 		      nfc_hci_sak_to_protocol(skb->data[MICROREAD_EMCF_A_SAK]);
skb               491 drivers/nfc/microread/microread.c 			 be16_to_cpu(*(u16 *)&skb->data[MICROREAD_EMCF_A_ATQA]);
skb               492 drivers/nfc/microread/microread.c 		targets->sel_res = skb->data[MICROREAD_EMCF_A_SAK];
skb               493 drivers/nfc/microread/microread.c 		targets->nfcid1_len = skb->data[MICROREAD_EMCF_A_LEN];
skb               498 drivers/nfc/microread/microread.c 		memcpy(targets->nfcid1, &skb->data[MICROREAD_EMCF_A_UID],
skb               503 drivers/nfc/microread/microread.c 		      nfc_hci_sak_to_protocol(skb->data[MICROREAD_EMCF_A3_SAK]);
skb               505 drivers/nfc/microread/microread.c 			 be16_to_cpu(*(u16 *)&skb->data[MICROREAD_EMCF_A3_ATQA]);
skb               506 drivers/nfc/microread/microread.c 		targets->sel_res = skb->data[MICROREAD_EMCF_A3_SAK];
skb               507 drivers/nfc/microread/microread.c 		targets->nfcid1_len = skb->data[MICROREAD_EMCF_A3_LEN];
skb               512 drivers/nfc/microread/microread.c 		memcpy(targets->nfcid1, &skb->data[MICROREAD_EMCF_A3_UID],
skb               517 drivers/nfc/microread/microread.c 		memcpy(targets->nfcid1, &skb->data[MICROREAD_EMCF_B_UID], 4);
skb               523 drivers/nfc/microread/microread.c 			le16_to_cpu(*(u16 *)&skb->data[MICROREAD_EMCF_T1_ATQA]);
skb               524 drivers/nfc/microread/microread.c 		memcpy(targets->nfcid1, &skb->data[MICROREAD_EMCF_T1_UID], 4);
skb               529 drivers/nfc/microread/microread.c 		memcpy(targets->nfcid1, &skb->data[MICROREAD_EMCF_T3_UID], 8);
skb               543 drivers/nfc/microread/microread.c 	kfree_skb(skb);
skb               550 drivers/nfc/microread/microread.c 				     u8 event, struct sk_buff *skb)
skb               560 drivers/nfc/microread/microread.c 		microread_target_discovered(hdev, gate, skb);
skb               564 drivers/nfc/microread/microread.c 		if (skb->len < 1) {
skb               565 drivers/nfc/microread/microread.c 			kfree_skb(skb);
skb               569 drivers/nfc/microread/microread.c 		if (skb->data[skb->len - 1]) {
skb               570 drivers/nfc/microread/microread.c 			kfree_skb(skb);
skb               574 drivers/nfc/microread/microread.c 		skb_trim(skb, skb->len - 1);
skb               576 drivers/nfc/microread/microread.c 		r = nfc_tm_data_received(hdev->ndev, skb);
skb               581 drivers/nfc/microread/microread.c 		kfree_skb(skb);
skb               586 drivers/nfc/microread/microread.c 				     NFC_COMM_PASSIVE, skb->data,
skb               587 drivers/nfc/microread/microread.c 				     skb->len);
skb               589 drivers/nfc/microread/microread.c 		kfree_skb(skb);
skb               593 drivers/nfc/microread/microread.c 		if (skb->len < 1) {
skb               594 drivers/nfc/microread/microread.c 			kfree_skb(skb);
skb               598 drivers/nfc/microread/microread.c 		if (skb->data[skb->len-1]) {
skb               599 drivers/nfc/microread/microread.c 			kfree_skb(skb);
skb               603 drivers/nfc/microread/microread.c 		skb_trim(skb, skb->len - 1);
skb               605 drivers/nfc/microread/microread.c 		r = nfc_tm_data_received(hdev->ndev, skb);
skb               609 drivers/nfc/microread/microread.c 		kfree_skb(skb);
skb                86 drivers/nfc/nfcmrvl/fw_dnld.c 	struct sk_buff *skb;
skb                89 drivers/nfc/nfcmrvl/fw_dnld.c 	skb = nci_skb_alloc(priv->ndev, (NCI_DATA_HDR_SIZE + plen), GFP_KERNEL);
skb                90 drivers/nfc/nfcmrvl/fw_dnld.c 	if (!skb) {
skb                95 drivers/nfc/nfcmrvl/fw_dnld.c 	hdr = skb_put(skb, NCI_DATA_HDR_SIZE);
skb               103 drivers/nfc/nfcmrvl/fw_dnld.c 	return skb;
skb               143 drivers/nfc/nfcmrvl/fw_dnld.c 			       struct sk_buff *skb)
skb               145 drivers/nfc/nfcmrvl/fw_dnld.c 	if (sizeof(nci_pattern_core_reset_ntf) != skb->len ||
skb               146 drivers/nfc/nfcmrvl/fw_dnld.c 	    memcmp(skb->data, nci_pattern_core_reset_ntf,
skb               159 drivers/nfc/nfcmrvl/fw_dnld.c static int process_state_init(struct nfcmrvl_private *priv, struct sk_buff *skb)
skb               163 drivers/nfc/nfcmrvl/fw_dnld.c 	if (sizeof(nci_pattern_core_init_rsp) >= skb->len ||
skb               164 drivers/nfc/nfcmrvl/fw_dnld.c 	    memcmp(skb->data, nci_pattern_core_init_rsp,
skb               189 drivers/nfc/nfcmrvl/fw_dnld.c 				       struct sk_buff *skb)
skb               193 drivers/nfc/nfcmrvl/fw_dnld.c 	if (sizeof(nci_pattern_core_set_config_rsp) != skb->len ||
skb               194 drivers/nfc/nfcmrvl/fw_dnld.c 	    memcmp(skb->data, nci_pattern_core_set_config_rsp, skb->len))
skb               235 drivers/nfc/nfcmrvl/fw_dnld.c 				       struct sk_buff *skb)
skb               237 drivers/nfc/nfcmrvl/fw_dnld.c 	if (sizeof(nci_pattern_core_set_config_rsp) != skb->len ||
skb               238 drivers/nfc/nfcmrvl/fw_dnld.c 	    memcmp(skb->data, nci_pattern_core_set_config_rsp, skb->len))
skb               246 drivers/nfc/nfcmrvl/fw_dnld.c 				 struct sk_buff *skb)
skb               248 drivers/nfc/nfcmrvl/fw_dnld.c 	if (sizeof(nci_pattern_core_conn_create_rsp) >= skb->len ||
skb               249 drivers/nfc/nfcmrvl/fw_dnld.c 	    memcmp(skb->data, nci_pattern_core_conn_create_rsp,
skb               260 drivers/nfc/nfcmrvl/fw_dnld.c 				 struct sk_buff *skb)
skb               278 drivers/nfc/nfcmrvl/fw_dnld.c 		skb_pull(skb, 3);
skb               279 drivers/nfc/nfcmrvl/fw_dnld.c 		if (skb->data[0] != HELPER_CMD_PACKET_FORMAT || skb->len != 5) {
skb               283 drivers/nfc/nfcmrvl/fw_dnld.c 		skb_pull(skb, 1);
skb               284 drivers/nfc/nfcmrvl/fw_dnld.c 		len = get_unaligned_le16(skb->data);
skb               285 drivers/nfc/nfcmrvl/fw_dnld.c 		skb_pull(skb, 2);
skb               286 drivers/nfc/nfcmrvl/fw_dnld.c 		comp_len = get_unaligned_le16(skb->data);
skb               287 drivers/nfc/nfcmrvl/fw_dnld.c 		memcpy(&comp_len, skb->data, 2);
skb               288 drivers/nfc/nfcmrvl/fw_dnld.c 		skb_pull(skb, 2);
skb               310 drivers/nfc/nfcmrvl/fw_dnld.c 		if (sizeof(nci_pattern_core_conn_credits_ntf) != skb->len ||
skb               311 drivers/nfc/nfcmrvl/fw_dnld.c 		    memcmp(nci_pattern_core_conn_credits_ntf, skb->data,
skb               312 drivers/nfc/nfcmrvl/fw_dnld.c 			   skb->len)) {
skb               336 drivers/nfc/nfcmrvl/fw_dnld.c 		if (sizeof(nci_pattern_core_conn_credits_ntf) != skb->len ||
skb               337 drivers/nfc/nfcmrvl/fw_dnld.c 		    memcmp(nci_pattern_core_conn_credits_ntf, skb->data,
skb               338 drivers/nfc/nfcmrvl/fw_dnld.c 			    skb->len)) {
skb               348 drivers/nfc/nfcmrvl/fw_dnld.c 		if (sizeof(nci_pattern_core_conn_credits_ntf) != skb->len ||
skb               349 drivers/nfc/nfcmrvl/fw_dnld.c 		    memcmp(nci_pattern_core_conn_credits_ntf, skb->data,
skb               350 drivers/nfc/nfcmrvl/fw_dnld.c 			    skb->len)) {
skb               361 drivers/nfc/nfcmrvl/fw_dnld.c 				  struct sk_buff *skb)
skb               363 drivers/nfc/nfcmrvl/fw_dnld.c 	if (sizeof(nci_pattern_core_conn_close_rsp) != skb->len ||
skb               364 drivers/nfc/nfcmrvl/fw_dnld.c 	    memcmp(skb->data, nci_pattern_core_conn_close_rsp, skb->len))
skb               372 drivers/nfc/nfcmrvl/fw_dnld.c static int process_state_boot(struct nfcmrvl_private *priv, struct sk_buff *skb)
skb               374 drivers/nfc/nfcmrvl/fw_dnld.c 	if (sizeof(nci_pattern_proprietary_boot_rsp) != skb->len ||
skb               375 drivers/nfc/nfcmrvl/fw_dnld.c 	    memcmp(skb->data, nci_pattern_proprietary_boot_rsp, skb->len))
skb               404 drivers/nfc/nfcmrvl/fw_dnld.c 	struct sk_buff *skb;
skb               412 drivers/nfc/nfcmrvl/fw_dnld.c 	while ((skb = skb_dequeue(&fw_dnld->rx_q))) {
skb               413 drivers/nfc/nfcmrvl/fw_dnld.c 		nfc_send_to_raw_sock(priv->ndev->nfc_dev, skb,
skb               417 drivers/nfc/nfcmrvl/fw_dnld.c 			ret = process_state_reset(priv, skb);
skb               420 drivers/nfc/nfcmrvl/fw_dnld.c 			ret = process_state_init(priv, skb);
skb               423 drivers/nfc/nfcmrvl/fw_dnld.c 			ret = process_state_set_ref_clock(priv, skb);
skb               426 drivers/nfc/nfcmrvl/fw_dnld.c 			ret = process_state_set_hi_config(priv, skb);
skb               429 drivers/nfc/nfcmrvl/fw_dnld.c 			ret = process_state_open_lc(priv, skb);
skb               432 drivers/nfc/nfcmrvl/fw_dnld.c 			ret = process_state_fw_dnld(priv, skb);
skb               435 drivers/nfc/nfcmrvl/fw_dnld.c 			ret = process_state_close_lc(priv, skb);
skb               438 drivers/nfc/nfcmrvl/fw_dnld.c 			ret = process_state_boot(priv, skb);
skb               444 drivers/nfc/nfcmrvl/fw_dnld.c 		kfree_skb(skb);
skb               474 drivers/nfc/nfcmrvl/fw_dnld.c 				   struct sk_buff *skb)
skb               484 drivers/nfc/nfcmrvl/fw_dnld.c 	skb_queue_tail(&priv->fw_dnld.rx_q, skb);
skb                96 drivers/nfc/nfcmrvl/fw_dnld.h 				struct sk_buff *skb);
skb                40 drivers/nfc/nfcmrvl/i2c.c 			    struct sk_buff **skb)
skb                57 drivers/nfc/nfcmrvl/i2c.c 	*skb = nci_skb_alloc(drv_data->priv->ndev,
skb                59 drivers/nfc/nfcmrvl/i2c.c 	if (!*skb)
skb                63 drivers/nfc/nfcmrvl/i2c.c 	skb_put_data(*skb, &nci_hdr, NCI_CTRL_HDR_SIZE);
skb                68 drivers/nfc/nfcmrvl/i2c.c 				      skb_put(*skb, nci_hdr.plen),
skb                75 drivers/nfc/nfcmrvl/i2c.c 			kfree_skb(*skb);
skb                86 drivers/nfc/nfcmrvl/i2c.c 	struct sk_buff *skb = NULL;
skb                95 drivers/nfc/nfcmrvl/i2c.c 	ret = nfcmrvl_i2c_read(drv_data, &skb);
skb               106 drivers/nfc/nfcmrvl/i2c.c 		if (nfcmrvl_nci_recv_frame(drv_data->priv, skb) < 0)
skb               129 drivers/nfc/nfcmrvl/i2c.c 				struct sk_buff *skb)
skb               137 drivers/nfc/nfcmrvl/i2c.c 	ret = i2c_master_send(drv_data->i2c, skb->data, skb->len);
skb               143 drivers/nfc/nfcmrvl/i2c.c 		ret = i2c_master_send(drv_data->i2c, skb->data, skb->len);
skb               147 drivers/nfc/nfcmrvl/i2c.c 		if (ret != skb->len) {
skb               150 drivers/nfc/nfcmrvl/i2c.c 				ret, skb->len);
skb               154 drivers/nfc/nfcmrvl/i2c.c 		kfree_skb(skb);
skb                59 drivers/nfc/nfcmrvl/main.c static int nfcmrvl_nci_send(struct nci_dev *ndev, struct sk_buff *skb)
skb                63 drivers/nfc/nfcmrvl/main.c 	nfc_info(priv->dev, "send entry, len %d\n", skb->len);
skb                65 drivers/nfc/nfcmrvl/main.c 	skb->dev = (void *)ndev;
skb                69 drivers/nfc/nfcmrvl/main.c 		unsigned char len = skb->len;
skb                71 drivers/nfc/nfcmrvl/main.c 		hdr = skb_push(skb, NFCMRVL_HCI_EVENT_HEADER_SIZE);
skb                78 drivers/nfc/nfcmrvl/main.c 	return priv->if_ops->nci_send(priv, skb);
skb               211 drivers/nfc/nfcmrvl/main.c int nfcmrvl_nci_recv_frame(struct nfcmrvl_private *priv, struct sk_buff *skb)
skb               214 drivers/nfc/nfcmrvl/main.c 		if (skb->data[0] == NFCMRVL_HCI_EVENT_CODE &&
skb               215 drivers/nfc/nfcmrvl/main.c 		    skb->data[1] == NFCMRVL_HCI_NFC_EVENT_CODE) {
skb               217 drivers/nfc/nfcmrvl/main.c 			skb_pull(skb, NFCMRVL_HCI_EVENT_HEADER_SIZE);
skb               220 drivers/nfc/nfcmrvl/main.c 			kfree_skb(skb);
skb               226 drivers/nfc/nfcmrvl/main.c 		nfcmrvl_fw_dnld_recv_frame(priv, skb);
skb               231 drivers/nfc/nfcmrvl/main.c 		nci_recv_frame(priv->ndev, skb);
skb               234 drivers/nfc/nfcmrvl/main.c 		kfree_skb(skb);
skb                97 drivers/nfc/nfcmrvl/nfcmrvl.h 	int (*nci_send) (struct nfcmrvl_private *priv, struct sk_buff *skb);
skb               103 drivers/nfc/nfcmrvl/nfcmrvl.h int nfcmrvl_nci_recv_frame(struct nfcmrvl_private *priv, struct sk_buff *skb);
skb                44 drivers/nfc/nfcmrvl/spi.c 	struct sk_buff *skb;
skb                57 drivers/nfc/nfcmrvl/spi.c 	skb = nci_spi_read(drv_data->nci_spi);
skb                58 drivers/nfc/nfcmrvl/spi.c 	if (!skb) {
skb                63 drivers/nfc/nfcmrvl/spi.c 	if (nfcmrvl_nci_recv_frame(drv_data->priv, skb) < 0)
skb                80 drivers/nfc/nfcmrvl/spi.c 				struct sk_buff *skb)
skb                93 drivers/nfc/nfcmrvl/spi.c 	skb_put(skb, 1);
skb                97 drivers/nfc/nfcmrvl/spi.c 			   skb);
skb                46 drivers/nfc/nfcmrvl/uart.c 				 struct sk_buff *skb)
skb                50 drivers/nfc/nfcmrvl/uart.c 	return nu->ops.send(nu, skb);
skb               153 drivers/nfc/nfcmrvl/uart.c static int nfcmrvl_nci_uart_recv(struct nci_uart *nu, struct sk_buff *skb)
skb               156 drivers/nfc/nfcmrvl/uart.c 				      skb);
skb                71 drivers/nfc/nfcmrvl/usb.c 	struct sk_buff *skb;
skb                81 drivers/nfc/nfcmrvl/usb.c 		skb = nci_skb_alloc(drv_data->priv->ndev, urb->actual_length,
skb                83 drivers/nfc/nfcmrvl/usb.c 		if (!skb) {
skb                86 drivers/nfc/nfcmrvl/usb.c 			skb_put_data(skb, urb->transfer_buffer,
skb                88 drivers/nfc/nfcmrvl/usb.c 			if (nfcmrvl_nci_recv_frame(drv_data->priv, skb) < 0)
skb               159 drivers/nfc/nfcmrvl/usb.c 	struct sk_buff *skb = urb->context;
skb               160 drivers/nfc/nfcmrvl/usb.c 	struct nci_dev *ndev = (struct nci_dev *)skb->dev;
skb               173 drivers/nfc/nfcmrvl/usb.c 	kfree_skb(skb);
skb               231 drivers/nfc/nfcmrvl/usb.c 				struct sk_buff *skb)
skb               248 drivers/nfc/nfcmrvl/usb.c 	usb_fill_bulk_urb(urb, drv_data->udev, pipe, skb->data, skb->len,
skb               249 drivers/nfc/nfcmrvl/usb.c 			  nfcmrvl_tx_complete, skb);
skb                60 drivers/nfc/nfcsim.c 	struct sk_buff *skb;
skb                81 drivers/nfc/nfcsim.c 	dev_kfree_skb(link->skb);
skb                91 drivers/nfc/nfcsim.c static void nfcsim_link_set_skb(struct nfcsim_link *link, struct sk_buff *skb,
skb                96 drivers/nfc/nfcsim.c 	dev_kfree_skb(link->skb);
skb                97 drivers/nfc/nfcsim.c 	link->skb = skb;
skb               131 drivers/nfc/nfcsim.c 	struct sk_buff *skb;
skb               139 drivers/nfc/nfcsim.c 	skb = link->skb;
skb               140 drivers/nfc/nfcsim.c 	link->skb = NULL;
skb               147 drivers/nfc/nfcsim.c 	if (!skb || link->rf_tech != rf_tech || link->mode == mode) {
skb               161 drivers/nfc/nfcsim.c 		dev_kfree_skb(skb);
skb               162 drivers/nfc/nfcsim.c 		skb = ERR_PTR(rc);
skb               167 drivers/nfc/nfcsim.c 	return skb;
skb               185 drivers/nfc/nfcsim.c 	struct sk_buff *skb;
skb               187 drivers/nfc/nfcsim.c 	skb = nfcsim_link_recv_skb(dev->link_in, dev->recv_timeout,
skb               193 drivers/nfc/nfcsim.c 		if (!IS_ERR(skb))
skb               194 drivers/nfc/nfcsim.c 			dev_kfree_skb(skb);
skb               196 drivers/nfc/nfcsim.c 		skb = ERR_PTR(-ENODEV);
skb               199 drivers/nfc/nfcsim.c 	dev->cb(dev->nfc_digital_dev, dev->arg, skb);
skb               202 drivers/nfc/nfcsim.c static int nfcsim_send(struct nfc_digital_dev *ddev, struct sk_buff *skb,
skb               221 drivers/nfc/nfcsim.c 		dev_kfree_skb(skb);
skb               227 drivers/nfc/nfcsim.c 	if (skb) {
skb               228 drivers/nfc/nfcsim.c 		nfcsim_link_set_skb(dev->link_out, skb, dev->rf_tech,
skb               281 drivers/nfc/nfcsim.c 			       struct sk_buff *skb, u16 timeout,
skb               284 drivers/nfc/nfcsim.c 	return nfcsim_send(ddev, skb, timeout, cb, arg);
skb               311 drivers/nfc/nfcsim.c 			       struct sk_buff *skb, u16 timeout,
skb               314 drivers/nfc/nfcsim.c 	return nfcsim_send(ddev, skb, timeout, cb, arg);
skb                68 drivers/nfc/nxp-nci/core.c static int nxp_nci_send(struct nci_dev *ndev, struct sk_buff *skb)
skb                83 drivers/nfc/nxp-nci/core.c 	r = info->phy_ops->write(info->phy_id, skb);
skb                85 drivers/nfc/nxp-nci/core.c 		kfree_skb(skb);
skb                92 drivers/nfc/nxp-nci/firmware.c 	struct sk_buff *skb;
skb                97 drivers/nfc/nxp-nci/firmware.c 	skb = nci_skb_alloc(info->ndev, info->max_payload, GFP_KERNEL);
skb                98 drivers/nfc/nxp-nci/firmware.c 	if (!skb) {
skb               114 drivers/nfc/nxp-nci/firmware.c 	put_unaligned_be16(header, skb_put(skb, NXP_NCI_FW_HDR_LEN));
skb               116 drivers/nfc/nxp-nci/firmware.c 	skb_put_data(skb, fw_info->data + fw_info->written, chunk_len);
skb               118 drivers/nfc/nxp-nci/firmware.c 	crc = nxp_nci_fw_crc(skb->data, chunk_len + NXP_NCI_FW_HDR_LEN);
skb               119 drivers/nfc/nxp-nci/firmware.c 	put_unaligned_be16(crc, skb_put(skb, NXP_NCI_FW_CRC_LEN));
skb               121 drivers/nfc/nxp-nci/firmware.c 	r = info->phy_ops->write(info->phy_id, skb);
skb               125 drivers/nfc/nxp-nci/firmware.c 	kfree_skb(skb);
skb               281 drivers/nfc/nxp-nci/firmware.c static u16 nxp_nci_fw_check_crc(struct sk_buff *skb)
skb               284 drivers/nfc/nxp-nci/firmware.c 	size_t len = skb->len - NXP_NCI_FW_CRC_LEN;
skb               286 drivers/nfc/nxp-nci/firmware.c 	crc = nxp_nci_fw_crc(skb->data, len);
skb               287 drivers/nfc/nxp-nci/firmware.c 	frame_crc = get_unaligned_be16(skb->data + len);
skb               292 drivers/nfc/nxp-nci/firmware.c void nxp_nci_fw_recv_frame(struct nci_dev *ndev, struct sk_buff *skb)
skb               299 drivers/nfc/nxp-nci/firmware.c 	if (skb) {
skb               300 drivers/nfc/nxp-nci/firmware.c 		if (nxp_nci_fw_check_crc(skb) != 0x00)
skb               303 drivers/nfc/nxp-nci/firmware.c 			fw_info->cmd_result = nxp_nci_fw_read_status(*(u8 *)skb_pull(skb, NXP_NCI_FW_HDR_LEN));
skb               304 drivers/nfc/nxp-nci/firmware.c 		kfree_skb(skb);
skb                60 drivers/nfc/nxp-nci/i2c.c static int nxp_nci_i2c_write(void *phy_id, struct sk_buff *skb)
skb                69 drivers/nfc/nxp-nci/i2c.c 	r = i2c_master_send(client, skb->data, skb->len);
skb                73 drivers/nfc/nxp-nci/i2c.c 		r = i2c_master_send(client, skb->data, skb->len);
skb                78 drivers/nfc/nxp-nci/i2c.c 	} else if (r != skb->len) {
skb                81 drivers/nfc/nxp-nci/i2c.c 			r, skb->len);
skb                97 drivers/nfc/nxp-nci/i2c.c 			       struct sk_buff **skb)
skb               116 drivers/nfc/nxp-nci/i2c.c 	*skb = alloc_skb(NXP_NCI_FW_HDR_LEN + frame_len, GFP_KERNEL);
skb               117 drivers/nfc/nxp-nci/i2c.c 	if (*skb == NULL) {
skb               122 drivers/nfc/nxp-nci/i2c.c 	skb_put_data(*skb, &header, NXP_NCI_FW_HDR_LEN);
skb               124 drivers/nfc/nxp-nci/i2c.c 	r = i2c_master_recv(client, skb_put(*skb, frame_len), frame_len);
skb               136 drivers/nfc/nxp-nci/i2c.c 	kfree_skb(*skb);
skb               142 drivers/nfc/nxp-nci/i2c.c 				struct sk_buff **skb)
skb               157 drivers/nfc/nxp-nci/i2c.c 	*skb = alloc_skb(NCI_CTRL_HDR_SIZE + header.plen, GFP_KERNEL);
skb               158 drivers/nfc/nxp-nci/i2c.c 	if (*skb == NULL) {
skb               163 drivers/nfc/nxp-nci/i2c.c 	skb_put_data(*skb, (void *)&header, NCI_CTRL_HDR_SIZE);
skb               165 drivers/nfc/nxp-nci/i2c.c 	r = i2c_master_recv(client, skb_put(*skb, header.plen), header.plen);
skb               177 drivers/nfc/nxp-nci/i2c.c 	kfree_skb(*skb);
skb               188 drivers/nfc/nxp-nci/i2c.c 	struct sk_buff *skb = NULL;
skb               211 drivers/nfc/nxp-nci/i2c.c 		r = nxp_nci_i2c_nci_read(phy, &skb);
skb               214 drivers/nfc/nxp-nci/i2c.c 		r = nxp_nci_i2c_fw_read(phy, &skb);
skb               233 drivers/nfc/nxp-nci/i2c.c 		nci_recv_frame(phy->ndev, skb);
skb               236 drivers/nfc/nxp-nci/i2c.c 		nxp_nci_fw_recv_frame(phy->ndev, skb);
skb                33 drivers/nfc/nxp-nci/nxp-nci.h 	int (*write)(void *id, struct sk_buff *skb);
skb                69 drivers/nfc/nxp-nci/nxp-nci.h void nxp_nci_fw_recv_frame(struct nci_dev *ndev, struct sk_buff *skb);
skb                98 drivers/nfc/pn533/i2c.c static int pn533_i2c_read(struct pn533_i2c_phy *phy, struct sk_buff **skb)
skb               106 drivers/nfc/pn533/i2c.c 	*skb = alloc_skb(len, GFP_KERNEL);
skb               107 drivers/nfc/pn533/i2c.c 	if (*skb == NULL)
skb               110 drivers/nfc/pn533/i2c.c 	r = i2c_master_recv(client, skb_put(*skb, len), len);
skb               113 drivers/nfc/pn533/i2c.c 		kfree_skb(*skb);
skb               117 drivers/nfc/pn533/i2c.c 	if (!((*skb)->data[0] & 0x01)) {
skb               119 drivers/nfc/pn533/i2c.c 		kfree_skb(*skb);
skb               124 drivers/nfc/pn533/i2c.c 	skb_pull(*skb, 1);
skb               126 drivers/nfc/pn533/i2c.c 	skb_trim(*skb, phy->priv->ops->rx_frame_size((*skb)->data));
skb               135 drivers/nfc/pn533/i2c.c 	struct sk_buff *skb = NULL;
skb               149 drivers/nfc/pn533/i2c.c 	r = pn533_i2c_read(phy, &skb);
skb               161 drivers/nfc/pn533/i2c.c 		pn533_recv_frame(phy->priv, skb, 0);
skb               356 drivers/nfc/pn533/pn533.c 				  struct sk_buff *skb)
skb               359 drivers/nfc/pn533/pn533.c 	int payload_len = skb->len;
skb               363 drivers/nfc/pn533/pn533.c 	skb_push(skb, ops->tx_header_len);
skb               364 drivers/nfc/pn533/pn533.c 	skb_put(skb, ops->tx_tail_len);
skb               366 drivers/nfc/pn533/pn533.c 	ops->tx_frame_init(skb->data, cmd_code);
skb               367 drivers/nfc/pn533/pn533.c 	ops->tx_update_payload_len(skb->data, payload_len);
skb               368 drivers/nfc/pn533/pn533.c 	ops->tx_frame_finish(skb->data);
skb               620 drivers/nfc/pn533/pn533.c 	struct sk_buff *skb;
skb               622 drivers/nfc/pn533/pn533.c 	skb = alloc_skb(dev->ops->tx_header_len +
skb               626 drivers/nfc/pn533/pn533.c 	if (skb)
skb               627 drivers/nfc/pn533/pn533.c 		skb_reserve(skb, dev->ops->tx_header_len);
skb               629 drivers/nfc/pn533/pn533.c 	return skb;
skb               996 drivers/nfc/pn533/pn533.c 	struct sk_buff *skb;
skb              1018 drivers/nfc/pn533/pn533.c 	skb = pn533_alloc_skb(dev, skb_len);
skb              1019 drivers/nfc/pn533/pn533.c 	if (!skb)
skb              1023 drivers/nfc/pn533/pn533.c 	skb_put_u8(skb, PN533_INIT_TARGET_DEP);
skb              1026 drivers/nfc/pn533/pn533.c 	skb_put_data(skb, mifare_params, 6);
skb              1029 drivers/nfc/pn533/pn533.c 	felica = skb_put_data(skb, felica_params, 18);
skb              1033 drivers/nfc/pn533/pn533.c 	nfcid3 = skb_put_zero(skb, 10);
skb              1037 drivers/nfc/pn533/pn533.c 	skb_put_u8(skb, gbytes_len);
skb              1039 drivers/nfc/pn533/pn533.c 	skb_put_data(skb, gbytes, gbytes_len);
skb              1042 drivers/nfc/pn533/pn533.c 	skb_put_u8(skb, 0);
skb              1044 drivers/nfc/pn533/pn533.c 	return skb;
skb              1053 drivers/nfc/pn533/pn533.c 	struct sk_buff *skb;
skb              1083 drivers/nfc/pn533/pn533.c 	skb = pn533_build_response(dev);
skb              1084 drivers/nfc/pn533/pn533.c 	if (!skb) {
skb              1089 drivers/nfc/pn533/pn533.c 	return nfc_tm_data_received(dev->nfc_dev, skb);
skb              1103 drivers/nfc/pn533/pn533.c 	struct sk_buff *skb;
skb              1108 drivers/nfc/pn533/pn533.c 	skb = pn533_alloc_skb(dev, 0);
skb              1109 drivers/nfc/pn533/pn533.c 	if (!skb)
skb              1114 drivers/nfc/pn533/pn533.c 					skb,
skb              1119 drivers/nfc/pn533/pn533.c 		dev_kfree_skb(skb);
skb              1127 drivers/nfc/pn533/pn533.c 	struct sk_buff *skb;
skb              1133 drivers/nfc/pn533/pn533.c 	skb = skb_dequeue(&dev->fragment_skb);
skb              1134 drivers/nfc/pn533/pn533.c 	if (skb == NULL) {	/* No more data */
skb              1143 drivers/nfc/pn533/pn533.c 					skb, pn533_tm_send_complete, NULL);
skb              1147 drivers/nfc/pn533/pn533.c 					skb, pn533_tm_send_complete, NULL);
skb              1155 drivers/nfc/pn533/pn533.c 	dev_kfree_skb(skb);
skb              1165 drivers/nfc/pn533/pn533.c 	struct sk_buff *skb;
skb              1170 drivers/nfc/pn533/pn533.c 	skb = pn533_alloc_skb(dev, 0);
skb              1171 drivers/nfc/pn533/pn533.c 	if (!skb)
skb              1174 drivers/nfc/pn533/pn533.c 	rc = pn533_send_data_async(dev, PN533_CMD_TG_GET_DATA, skb,
skb              1178 drivers/nfc/pn533/pn533.c 		dev_kfree_skb(skb);
skb              1262 drivers/nfc/pn533/pn533.c 	struct sk_buff *skb;
skb              1267 drivers/nfc/pn533/pn533.c 	skb = pn533_alloc_skb(dev, 2);
skb              1268 drivers/nfc/pn533/pn533.c 	if (!skb)
skb              1271 drivers/nfc/pn533/pn533.c 	skb_put_u8(skb, PN533_CFGITEM_RF_FIELD);
skb              1272 drivers/nfc/pn533/pn533.c 	skb_put_u8(skb, PN533_CFGITEM_RF_FIELD_AUTO_RFCA);
skb              1274 drivers/nfc/pn533/pn533.c 	rc = pn533_send_cmd_async(dev, PN533_CMD_RF_CONFIGURATION, skb,
skb              1277 drivers/nfc/pn533/pn533.c 		dev_kfree_skb(skb);
skb              1337 drivers/nfc/pn533/pn533.c 	struct sk_buff *skb;
skb              1362 drivers/nfc/pn533/pn533.c 	skb = pn533_alloc_skb(dev, skb_len);
skb              1363 drivers/nfc/pn533/pn533.c 	if (!skb)
skb              1366 drivers/nfc/pn533/pn533.c 	skb_put_u8(skb, 0x01);  /* Active */
skb              1367 drivers/nfc/pn533/pn533.c 	skb_put_u8(skb, 0x02);  /* 424 kbps */
skb              1369 drivers/nfc/pn533/pn533.c 	next = skb_put(skb, 1);  /* Next */
skb              1373 drivers/nfc/pn533/pn533.c 	skb_put_data(skb, passive_data, PASSIVE_DATA_LEN);
skb              1377 drivers/nfc/pn533/pn533.c 	skb_put_data(skb, nfcid3, NFC_NFCID3_MAXSIZE);
skb              1380 drivers/nfc/pn533/pn533.c 	skb_put_data(skb, dev->gb, dev->gb_len);
skb              1383 drivers/nfc/pn533/pn533.c 	rc = pn533_send_cmd_async(dev, PN533_CMD_IN_JUMP_FOR_DEP, skb,
skb              1387 drivers/nfc/pn533/pn533.c 		dev_kfree_skb(skb);
skb              1454 drivers/nfc/pn533/pn533.c 	struct sk_buff *skb;
skb              1456 drivers/nfc/pn533/pn533.c 	skb = pn533_alloc_skb(dev, mod->len);
skb              1457 drivers/nfc/pn533/pn533.c 	if (!skb)
skb              1460 drivers/nfc/pn533/pn533.c 	skb_put_data(skb, &mod->data, mod->len);
skb              1462 drivers/nfc/pn533/pn533.c 	return skb;
skb              1468 drivers/nfc/pn533/pn533.c 	struct sk_buff *skb;
skb              1484 drivers/nfc/pn533/pn533.c 		skb = pn533_alloc_poll_tg_frame(dev);
skb              1487 drivers/nfc/pn533/pn533.c 		skb = pn533_alloc_poll_in_frame(dev, mod);
skb              1490 drivers/nfc/pn533/pn533.c 	if (!skb) {
skb              1495 drivers/nfc/pn533/pn533.c 	rc = pn533_send_cmd_async(dev, cmd_code, skb, pn533_poll_complete,
skb              1498 drivers/nfc/pn533/pn533.c 		dev_kfree_skb(skb);
skb              1602 drivers/nfc/pn533/pn533.c 	struct sk_buff *skb;
skb              1607 drivers/nfc/pn533/pn533.c 	skb = pn533_alloc_skb(dev, sizeof(u8) * 2); /*TG + Next*/
skb              1608 drivers/nfc/pn533/pn533.c 	if (!skb)
skb              1611 drivers/nfc/pn533/pn533.c 	skb_put_u8(skb, 1); /* TG */
skb              1612 drivers/nfc/pn533/pn533.c 	skb_put_u8(skb, 0); /* Next */
skb              1614 drivers/nfc/pn533/pn533.c 	resp = pn533_send_cmd_sync(dev, PN533_CMD_IN_ATR, skb);
skb              1711 drivers/nfc/pn533/pn533.c 	struct sk_buff *skb;
skb              1724 drivers/nfc/pn533/pn533.c 	skb = pn533_alloc_skb(dev, sizeof(u8));
skb              1725 drivers/nfc/pn533/pn533.c 	if (!skb)
skb              1728 drivers/nfc/pn533/pn533.c 	skb_put_u8(skb, 1); /* TG*/
skb              1730 drivers/nfc/pn533/pn533.c 	rc = pn533_send_cmd_async(dev, PN533_CMD_IN_RELEASE, skb,
skb              1733 drivers/nfc/pn533/pn533.c 		dev_kfree_skb(skb);
skb              1805 drivers/nfc/pn533/pn533.c 	struct sk_buff *skb;
skb              1835 drivers/nfc/pn533/pn533.c 	skb = pn533_alloc_skb(dev, skb_len);
skb              1836 drivers/nfc/pn533/pn533.c 	if (!skb)
skb              1839 drivers/nfc/pn533/pn533.c 	skb_put_u8(skb, !comm_mode);  /* ActPass */
skb              1840 drivers/nfc/pn533/pn533.c 	skb_put_u8(skb, 0x02);  /* 424 kbps */
skb              1842 drivers/nfc/pn533/pn533.c 	next = skb_put(skb, 1);  /* Next */
skb              1846 drivers/nfc/pn533/pn533.c 	skb_put_data(skb, passive_data, PASSIVE_DATA_LEN);
skb              1851 drivers/nfc/pn533/pn533.c 		memcpy(skb_put(skb, NFC_NFCID3_MAXSIZE), target->nfcid2,
skb              1854 drivers/nfc/pn533/pn533.c 		skb_put_data(skb, nfcid3, NFC_NFCID3_MAXSIZE);
skb              1858 drivers/nfc/pn533/pn533.c 		skb_put_data(skb, gb, gb_len);
skb              1866 drivers/nfc/pn533/pn533.c 		dev_kfree_skb(skb);
skb              1874 drivers/nfc/pn533/pn533.c 	rc = pn533_send_cmd_async(dev, PN533_CMD_IN_JUMP_FOR_DEP, skb,
skb              1878 drivers/nfc/pn533/pn533.c 		dev_kfree_skb(skb);
skb              1911 drivers/nfc/pn533/pn533.c 	struct sk_buff *skb, *tmp, *t;
skb              1920 drivers/nfc/pn533/pn533.c 		skb = skb_dequeue(&dev->resp_q);
skb              1930 drivers/nfc/pn533/pn533.c 	skb = alloc_skb(skb_len, GFP_KERNEL);
skb              1931 drivers/nfc/pn533/pn533.c 	if (skb == NULL)
skb              1934 drivers/nfc/pn533/pn533.c 	skb_put(skb, skb_len);
skb              1937 drivers/nfc/pn533/pn533.c 		memcpy(skb->data + tmp_len, tmp->data, tmp->len);
skb              1944 drivers/nfc/pn533/pn533.c 	return skb;
skb              1951 drivers/nfc/pn533/pn533.c 	struct sk_buff *skb;
skb              1991 drivers/nfc/pn533/pn533.c 	skb = pn533_build_response(dev);
skb              1992 drivers/nfc/pn533/pn533.c 	if (!skb) {
skb              1997 drivers/nfc/pn533/pn533.c 	arg->cb(arg->cb_context, skb, 0);
skb              2014 drivers/nfc/pn533/pn533.c void pn533_recv_frame(struct pn533 *dev, struct sk_buff *skb, int status)
skb              2026 drivers/nfc/pn533/pn533.c 	if (skb == NULL) {
skb              2031 drivers/nfc/pn533/pn533.c 	if (pn533_rx_frame_is_ack(skb->data)) {
skb              2033 drivers/nfc/pn533/pn533.c 		dev_kfree_skb(skb);
skb              2037 drivers/nfc/pn533/pn533.c 	print_hex_dump_debug("PN533 RX: ", DUMP_PREFIX_NONE, 16, 1, skb->data,
skb              2038 drivers/nfc/pn533/pn533.c 			     dev->ops->rx_frame_size(skb->data), false);
skb              2040 drivers/nfc/pn533/pn533.c 	if (!dev->ops->rx_is_frame_valid(skb->data, dev)) {
skb              2043 drivers/nfc/pn533/pn533.c 	} else if (!pn533_rx_frame_is_cmd_response(dev, skb->data)) {
skb              2048 drivers/nfc/pn533/pn533.c 	dev->cmd->resp = skb;
skb              2056 drivers/nfc/pn533/pn533.c static int pn533_fill_fragment_skbs(struct pn533 *dev, struct sk_buff *skb)
skb              2063 drivers/nfc/pn533/pn533.c 		if (skb->len > PN533_CMD_DATAFRAME_MAXLEN)
skb              2066 drivers/nfc/pn533/pn533.c 			frag_size = skb->len;
skb              2087 drivers/nfc/pn533/pn533.c 		skb_put_data(frag, skb->data, frag_size);
skb              2090 drivers/nfc/pn533/pn533.c 		skb_pull(skb, frag_size);
skb              2095 drivers/nfc/pn533/pn533.c 	} while (skb->len > 0);
skb              2097 drivers/nfc/pn533/pn533.c 	dev_kfree_skb(skb);
skb              2103 drivers/nfc/pn533/pn533.c 			    struct nfc_target *target, struct sk_buff *skb,
skb              2132 drivers/nfc/pn533/pn533.c 						   skb,
skb              2141 drivers/nfc/pn533/pn533.c 		if (skb->len > PN533_CMD_DATAEXCH_DATA_MAXLEN) {
skb              2142 drivers/nfc/pn533/pn533.c 			rc = pn533_fill_fragment_skbs(dev, skb);
skb              2146 drivers/nfc/pn533/pn533.c 			skb = skb_dequeue(&dev->fragment_skb);
skb              2147 drivers/nfc/pn533/pn533.c 			if (!skb) {
skb              2152 drivers/nfc/pn533/pn533.c 			*(u8 *)skb_push(skb, sizeof(u8)) =  1; /* TG */
skb              2156 drivers/nfc/pn533/pn533.c 					   skb, pn533_data_exchange_complete,
skb              2169 drivers/nfc/pn533/pn533.c 	dev_kfree_skb(skb);
skb              2205 drivers/nfc/pn533/pn533.c static int pn533_tm_send(struct nfc_dev *nfc_dev, struct sk_buff *skb)
skb              2213 drivers/nfc/pn533/pn533.c 	if (skb->len > PN533_CMD_DATAEXCH_DATA_MAXLEN) {
skb              2214 drivers/nfc/pn533/pn533.c 		rc = pn533_fill_fragment_skbs(dev, skb);
skb              2219 drivers/nfc/pn533/pn533.c 		skb = skb_dequeue(&dev->fragment_skb);
skb              2220 drivers/nfc/pn533/pn533.c 		if (!skb) {
skb              2225 drivers/nfc/pn533/pn533.c 		rc = pn533_send_data_async(dev, PN533_CMD_TG_SET_META_DATA, skb,
skb              2229 drivers/nfc/pn533/pn533.c 		rc = pn533_send_data_async(dev, PN533_CMD_TG_SET_DATA, skb,
skb              2235 drivers/nfc/pn533/pn533.c 		dev_kfree_skb(skb);
skb              2245 drivers/nfc/pn533/pn533.c 	struct sk_buff *skb;
skb              2250 drivers/nfc/pn533/pn533.c 	skb = pn533_alloc_skb(dev, PN533_CMD_DATAEXCH_HEAD_LEN);
skb              2251 drivers/nfc/pn533/pn533.c 	if (!skb)
skb              2259 drivers/nfc/pn533/pn533.c 						skb,
skb              2267 drivers/nfc/pn533/pn533.c 		skb_put_u8(skb, 1); /*TG*/
skb              2271 drivers/nfc/pn533/pn533.c 						 skb,
skb              2284 drivers/nfc/pn533/pn533.c 	dev_kfree_skb(skb);
skb              2295 drivers/nfc/pn533/pn533.c 	struct sk_buff *skb;
skb              2301 drivers/nfc/pn533/pn533.c 	skb = skb_dequeue(&dev->fragment_skb);
skb              2303 drivers/nfc/pn533/pn533.c 	if (skb == NULL) {	/* No more data */
skb              2317 drivers/nfc/pn533/pn533.c 						 skb,
skb              2327 drivers/nfc/pn533/pn533.c 						 skb,
skb              2340 drivers/nfc/pn533/pn533.c 	dev_kfree_skb(skb);
skb              2351 drivers/nfc/pn533/pn533.c 	struct sk_buff *skb;
skb              2359 drivers/nfc/pn533/pn533.c 	skb = pn533_alloc_skb(dev, skb_len);
skb              2360 drivers/nfc/pn533/pn533.c 	if (!skb)
skb              2363 drivers/nfc/pn533/pn533.c 	skb_put_u8(skb, cfgitem);
skb              2364 drivers/nfc/pn533/pn533.c 	skb_put_data(skb, cfgdata, cfgdata_len);
skb              2366 drivers/nfc/pn533/pn533.c 	resp = pn533_send_cmd_sync(dev, PN533_CMD_RF_CONFIGURATION, skb);
skb              2377 drivers/nfc/pn533/pn533.c 	struct sk_buff *skb;
skb              2380 drivers/nfc/pn533/pn533.c 	skb = pn533_alloc_skb(dev, 0);
skb              2381 drivers/nfc/pn533/pn533.c 	if (!skb)
skb              2384 drivers/nfc/pn533/pn533.c 	resp = pn533_send_cmd_sync(dev, PN533_CMD_GET_FIRMWARE_VERSION, skb);
skb              2399 drivers/nfc/pn533/pn533.c 	struct sk_buff *skb;
skb              2404 drivers/nfc/pn533/pn533.c 	skb = pn533_alloc_skb(dev, sizeof(u8));
skb              2405 drivers/nfc/pn533/pn533.c 	if (!skb)
skb              2408 drivers/nfc/pn533/pn533.c 	skb_put_u8(skb, 0x1);
skb              2410 drivers/nfc/pn533/pn533.c 	resp = pn533_send_cmd_sync(dev, 0x18, skb);
skb              2440 drivers/nfc/pn533/pn533.c 	struct sk_buff *skb;
skb              2443 drivers/nfc/pn533/pn533.c 	skb = pn533_alloc_skb(dev, 1);
skb              2444 drivers/nfc/pn533/pn533.c 	if (!skb)
skb              2447 drivers/nfc/pn533/pn533.c 	skb_put_u8(skb, 0x01);
skb              2449 drivers/nfc/pn533/pn533.c 	resp = pn533_send_cmd_sync(dev, PN533_CMD_SAM_CONFIGURATION, skb);
skb               224 drivers/nfc/pn533/pn533.h void pn533_recv_frame(struct pn533 *dev, struct sk_buff *skb, int status);
skb                62 drivers/nfc/pn533/usb.c 	struct sk_buff *skb = NULL;
skb                65 drivers/nfc/pn533/usb.c 		skb = alloc_skb(urb->actual_length, GFP_ATOMIC);
skb                66 drivers/nfc/pn533/usb.c 		if (!skb) {
skb                69 drivers/nfc/pn533/usb.c 			skb_put_data(skb, urb->transfer_buffer,
skb                74 drivers/nfc/pn533/usb.c 	pn533_recv_frame(phy->priv, skb, urb->status);
skb               181 drivers/nfc/pn544/i2c.c #define I2C_DUMP_SKB(info, skb)					\
skb               185 drivers/nfc/pn544/i2c.c 		       16, 1, (skb)->data, (skb)->len, 0);	\
skb               270 drivers/nfc/pn544/i2c.c static void pn544_hci_i2c_add_len_crc(struct sk_buff *skb)
skb               275 drivers/nfc/pn544/i2c.c 	len = skb->len + 2;
skb               276 drivers/nfc/pn544/i2c.c 	*(u8 *)skb_push(skb, 1) = len;
skb               278 drivers/nfc/pn544/i2c.c 	crc = crc_ccitt(0xffff, skb->data, skb->len);
skb               280 drivers/nfc/pn544/i2c.c 	skb_put_u8(skb, crc & 0xff);
skb               281 drivers/nfc/pn544/i2c.c 	skb_put_u8(skb, crc >> 8);
skb               284 drivers/nfc/pn544/i2c.c static void pn544_hci_i2c_remove_len_crc(struct sk_buff *skb)
skb               286 drivers/nfc/pn544/i2c.c 	skb_pull(skb, PN544_I2C_FRAME_HEADROOM);
skb               287 drivers/nfc/pn544/i2c.c 	skb_trim(skb, PN544_I2C_FRAME_TAILROOM);
skb               295 drivers/nfc/pn544/i2c.c static int pn544_hci_i2c_write(void *phy_id, struct sk_buff *skb)
skb               306 drivers/nfc/pn544/i2c.c 	pn544_hci_i2c_add_len_crc(skb);
skb               308 drivers/nfc/pn544/i2c.c 	I2C_DUMP_SKB("i2c frame written", skb);
skb               310 drivers/nfc/pn544/i2c.c 	r = i2c_master_send(client, skb->data, skb->len);
skb               314 drivers/nfc/pn544/i2c.c 		r = i2c_master_send(client, skb->data, skb->len);
skb               318 drivers/nfc/pn544/i2c.c 		if (r != skb->len)
skb               324 drivers/nfc/pn544/i2c.c 	pn544_hci_i2c_remove_len_crc(skb);
skb               358 drivers/nfc/pn544/i2c.c static int pn544_hci_i2c_read(struct pn544_i2c_phy *phy, struct sk_buff **skb)
skb               378 drivers/nfc/pn544/i2c.c 	*skb = alloc_skb(1 + len, GFP_KERNEL);
skb               379 drivers/nfc/pn544/i2c.c 	if (*skb == NULL) {
skb               384 drivers/nfc/pn544/i2c.c 	skb_put_u8(*skb, len);
skb               386 drivers/nfc/pn544/i2c.c 	r = i2c_master_recv(client, skb_put(*skb, len), len);
skb               388 drivers/nfc/pn544/i2c.c 		kfree_skb(*skb);
skb               392 drivers/nfc/pn544/i2c.c 	I2C_DUMP_SKB("i2c frame read", *skb);
skb               394 drivers/nfc/pn544/i2c.c 	r = check_crc((*skb)->data, (*skb)->len);
skb               396 drivers/nfc/pn544/i2c.c 		kfree_skb(*skb);
skb               401 drivers/nfc/pn544/i2c.c 	skb_pull(*skb, 1);
skb               402 drivers/nfc/pn544/i2c.c 	skb_trim(*skb, (*skb)->len - 2);
skb               486 drivers/nfc/pn544/i2c.c 	struct sk_buff *skb = NULL;
skb               504 drivers/nfc/pn544/i2c.c 		r = pn544_hci_i2c_read(phy, &skb);
skb               515 drivers/nfc/pn544/i2c.c 		nfc_hci_recv_frame(phy->hdev, skb);
skb               168 drivers/nfc/pn544/pn544.c 	struct sk_buff *skb;
skb               308 drivers/nfc/pn544/pn544.c 			      PN544_ID_MGMT_FULL_VERSION_SW, &skb);
skb               312 drivers/nfc/pn544/pn544.c 	if (skb->len != FULL_VERSION_LEN) {
skb               313 drivers/nfc/pn544/pn544.c 		kfree_skb(skb);
skb               319 drivers/nfc/pn544/pn544.c 		       skb->data, FULL_VERSION_LEN, false);
skb               321 drivers/nfc/pn544/pn544.c 	kfree_skb(skb);
skb               326 drivers/nfc/pn544/pn544.c static int pn544_hci_xmit(struct nfc_hci_dev *hdev, struct sk_buff *skb)
skb               330 drivers/nfc/pn544/pn544.c 	return info->phy_ops->write(info->phy_id, skb);
skb               574 drivers/nfc/pn544/pn544.c static void pn544_hci_data_exchange_cb(void *context, struct sk_buff *skb,
skb               582 drivers/nfc/pn544/pn544.c 			skb_pull(skb, 1);
skb               583 drivers/nfc/pn544/pn544.c 		info->async_cb(info->async_cb_context, skb, err);
skb               587 drivers/nfc/pn544/pn544.c 			kfree_skb(skb);
skb               605 drivers/nfc/pn544/pn544.c 				   struct sk_buff *skb, data_exchange_cb_t cb,
skb               620 drivers/nfc/pn544/pn544.c 			if (skb->len == MIFARE_CMD_LEN &&
skb               621 drivers/nfc/pn544/pn544.c 			    (skb->data[0] == MIFARE_CMD_AUTH_KEY_A ||
skb               622 drivers/nfc/pn544/pn544.c 			     skb->data[0] == MIFARE_CMD_AUTH_KEY_B)) {
skb               624 drivers/nfc/pn544/pn544.c 				u8 *data = skb->data + MIFARE_CMD_HEADER;
skb               636 drivers/nfc/pn544/pn544.c 						      skb->data, skb->len,
skb               641 drivers/nfc/pn544/pn544.c 		*(u8 *)skb_push(skb, 1) = 0;
skb               642 drivers/nfc/pn544/pn544.c 		*(u8 *)skb_push(skb, 1) = 0;
skb               649 drivers/nfc/pn544/pn544.c 					      PN544_FELICA_RAW, skb->data,
skb               650 drivers/nfc/pn544/pn544.c 					      skb->len,
skb               654 drivers/nfc/pn544/pn544.c 					      PN544_JEWEL_RAW_CMD, skb->data,
skb               655 drivers/nfc/pn544/pn544.c 					      skb->len, cb, cb_context);
skb               657 drivers/nfc/pn544/pn544.c 		*(u8 *)skb_push(skb, 1) = 0;
skb               660 drivers/nfc/pn544/pn544.c 					PN544_HCI_EVT_SND_DATA, skb->data,
skb               661 drivers/nfc/pn544/pn544.c 					skb->len);
skb               667 drivers/nfc/pn544/pn544.c static int pn544_hci_tm_send(struct nfc_hci_dev *hdev, struct sk_buff *skb)
skb               672 drivers/nfc/pn544/pn544.c 	*(u8 *)skb_push(skb, 1) = 0;
skb               675 drivers/nfc/pn544/pn544.c 			       PN544_HCI_EVT_SND_DATA, skb->data, skb->len);
skb               677 drivers/nfc/pn544/pn544.c 	kfree_skb(skb);
skb               717 drivers/nfc/pn544/pn544.c 				    struct sk_buff *skb)
skb               748 drivers/nfc/pn544/pn544.c 		if (skb->len < 2) {
skb               753 drivers/nfc/pn544/pn544.c 		if (skb->data[0] != 0) {
skb               754 drivers/nfc/pn544/pn544.c 			pr_debug("data0 %d\n", skb->data[0]);
skb               759 drivers/nfc/pn544/pn544.c 		skb_pull(skb, 2);
skb               760 drivers/nfc/pn544/pn544.c 		return nfc_tm_data_received(hdev->ndev, skb);
skb               766 drivers/nfc/pn544/pn544.c 	kfree_skb(skb);
skb               795 drivers/nfc/port100.c 				    struct sk_buff *skb)
skb               798 drivers/nfc/port100.c 	int payload_len = skb->len;
skb               800 drivers/nfc/port100.c 	skb_push(skb, PORT100_FRAME_HEADER_LEN);
skb               801 drivers/nfc/port100.c 	skb_put(skb, PORT100_FRAME_TAIL_LEN);
skb               803 drivers/nfc/port100.c 	port100_tx_frame_init(skb->data, cmd_code);
skb               804 drivers/nfc/port100.c 	port100_tx_update_payload_len(skb->data, payload_len);
skb               805 drivers/nfc/port100.c 	port100_tx_frame_finish(skb->data);
skb               965 drivers/nfc/port100.c 	struct sk_buff *skb;
skb               967 drivers/nfc/port100.c 	skb = alloc_skb(dev->skb_headroom + dev->skb_tailroom + size,
skb               969 drivers/nfc/port100.c 	if (skb)
skb               970 drivers/nfc/port100.c 		skb_reserve(skb, dev->skb_headroom);
skb               972 drivers/nfc/port100.c 	return skb;
skb               977 drivers/nfc/port100.c 	struct sk_buff *skb;
skb               981 drivers/nfc/port100.c 	skb = port100_alloc_skb(dev, 1);
skb               982 drivers/nfc/port100.c 	if (!skb)
skb               985 drivers/nfc/port100.c 	skb_put_u8(skb, command_type);
skb               987 drivers/nfc/port100.c 	resp = port100_send_cmd_sync(dev, PORT100_CMD_SET_COMMAND_TYPE, skb);
skb              1000 drivers/nfc/port100.c 	struct sk_buff *skb;
skb              1004 drivers/nfc/port100.c 	skb = port100_alloc_skb(dev, 0);
skb              1005 drivers/nfc/port100.c 	if (!skb)
skb              1008 drivers/nfc/port100.c 	resp = port100_send_cmd_sync(dev, PORT100_CMD_GET_COMMAND_TYPE, skb);
skb              1024 drivers/nfc/port100.c 	struct sk_buff *skb;
skb              1028 drivers/nfc/port100.c 	skb = port100_alloc_skb(dev, 0);
skb              1029 drivers/nfc/port100.c 	if (!skb)
skb              1033 drivers/nfc/port100.c 				     skb);
skb              1047 drivers/nfc/port100.c 	struct sk_buff *skb, *resp;
skb              1049 drivers/nfc/port100.c 	skb = port100_alloc_skb(dev, 1);
skb              1050 drivers/nfc/port100.c 	if (!skb)
skb              1053 drivers/nfc/port100.c 	skb_put_u8(skb, on ? 1 : 0);
skb              1059 drivers/nfc/port100.c 	resp = port100_send_cmd_sync(dev, PORT100_CMD_SWITCH_RF, skb);
skb              1072 drivers/nfc/port100.c 	struct sk_buff *skb;
skb              1079 drivers/nfc/port100.c 	skb = port100_alloc_skb(dev, sizeof(struct port100_in_rf_setting));
skb              1080 drivers/nfc/port100.c 	if (!skb)
skb              1083 drivers/nfc/port100.c 	skb_put_data(skb, &in_rf_settings[rf],
skb              1086 drivers/nfc/port100.c 	resp = port100_send_cmd_sync(dev, PORT100_CMD_IN_SET_RF, skb);
skb              1102 drivers/nfc/port100.c 	struct sk_buff *skb;
skb              1122 drivers/nfc/port100.c 	skb = port100_alloc_skb(dev, size);
skb              1123 drivers/nfc/port100.c 	if (!skb)
skb              1126 drivers/nfc/port100.c 	skb_put_data(skb, protocols, size);
skb              1128 drivers/nfc/port100.c 	resp = port100_send_cmd_sync(dev, PORT100_CMD_IN_SET_PROTOCOL, skb);
skb              1204 drivers/nfc/port100.c 			       struct sk_buff *skb, u16 _timeout,
skb              1220 drivers/nfc/port100.c 	memcpy(skb_push(skb, sizeof(__le16)), &timeout, sizeof(__le16));
skb              1222 drivers/nfc/port100.c 	return port100_send_cmd_async(dev, PORT100_CMD_IN_COMM_RF, skb,
skb              1229 drivers/nfc/port100.c 	struct sk_buff *skb;
skb              1236 drivers/nfc/port100.c 	skb = port100_alloc_skb(dev, sizeof(struct port100_tg_rf_setting));
skb              1237 drivers/nfc/port100.c 	if (!skb)
skb              1240 drivers/nfc/port100.c 	skb_put_data(skb, &tg_rf_settings[rf],
skb              1243 drivers/nfc/port100.c 	resp = port100_send_cmd_sync(dev, PORT100_CMD_TG_SET_RF, skb);
skb              1259 drivers/nfc/port100.c 	struct sk_buff *skb;
skb              1279 drivers/nfc/port100.c 	skb = port100_alloc_skb(dev, size);
skb              1280 drivers/nfc/port100.c 	if (!skb)
skb              1283 drivers/nfc/port100.c 	skb_put_data(skb, protocols, size);
skb              1285 drivers/nfc/port100.c 	resp = port100_send_cmd_sync(dev, PORT100_CMD_TG_SET_PROTOCOL, skb);
skb              1370 drivers/nfc/port100.c 			       struct sk_buff *skb, u16 timeout,
skb              1384 drivers/nfc/port100.c 	skb_push(skb, sizeof(struct port100_tg_comm_rf_cmd));
skb              1386 drivers/nfc/port100.c 	hdr = (struct port100_tg_comm_rf_cmd *)skb->data;
skb              1393 drivers/nfc/port100.c 	return port100_send_cmd_async(dev, PORT100_CMD_TG_COMM_RF, skb,
skb              1405 drivers/nfc/port100.c 	struct sk_buff *skb;
skb              1426 drivers/nfc/port100.c 	skb = port100_alloc_skb(dev, 0);
skb              1427 drivers/nfc/port100.c 	if (!skb) {
skb              1432 drivers/nfc/port100.c 	skb_push(skb, sizeof(struct port100_tg_comm_rf_cmd));
skb              1433 drivers/nfc/port100.c 	hdr = (struct port100_tg_comm_rf_cmd *)skb->data;
skb              1449 drivers/nfc/port100.c 	return port100_send_cmd_async(dev, PORT100_CMD_TG_COMM_RF, skb,
skb              1457 drivers/nfc/port100.c 	struct sk_buff *skb;
skb              1459 drivers/nfc/port100.c 	skb = port100_alloc_skb(dev, 0);
skb              1460 drivers/nfc/port100.c 	if (!skb)
skb              1463 drivers/nfc/port100.c 	return port100_tg_send_cmd(ddev, skb, timeout, cb, arg);
skb                87 drivers/nfc/s3fwrn5/core.c static int s3fwrn5_nci_send(struct nci_dev *ndev, struct sk_buff *skb)
skb                99 drivers/nfc/s3fwrn5/core.c 	ret = s3fwrn5_write(info, skb);
skb               101 drivers/nfc/s3fwrn5/core.c 		kfree_skb(skb);
skb               192 drivers/nfc/s3fwrn5/core.c int s3fwrn5_recv_frame(struct nci_dev *ndev, struct sk_buff *skb,
skb               197 drivers/nfc/s3fwrn5/core.c 		return nci_recv_frame(ndev, skb);
skb               199 drivers/nfc/s3fwrn5/core.c 		return s3fwrn5_fw_recv_frame(ndev, skb);
skb                57 drivers/nfc/s3fwrn5/firmware.c 	struct sk_buff *skb;
skb                64 drivers/nfc/s3fwrn5/firmware.c 	skb = alloc_skb(S3FWRN5_FW_HDR_SIZE + len, GFP_KERNEL);
skb                65 drivers/nfc/s3fwrn5/firmware.c 	if (!skb)
skb                68 drivers/nfc/s3fwrn5/firmware.c 	skb_put_data(skb, &hdr, S3FWRN5_FW_HDR_SIZE);
skb                70 drivers/nfc/s3fwrn5/firmware.c 		skb_put_data(skb, data, len);
skb                72 drivers/nfc/s3fwrn5/firmware.c 	*msg = skb;
skb               505 drivers/nfc/s3fwrn5/firmware.c int s3fwrn5_fw_recv_frame(struct nci_dev *ndev, struct sk_buff *skb)
skb               512 drivers/nfc/s3fwrn5/firmware.c 	fw_info->rsp = skb;
skb                98 drivers/nfc/s3fwrn5/firmware.h int s3fwrn5_fw_recv_frame(struct nci_dev *ndev, struct sk_buff *skb);
skb                90 drivers/nfc/s3fwrn5/i2c.c static int s3fwrn5_i2c_write(void *phy_id, struct sk_buff *skb)
skb                99 drivers/nfc/s3fwrn5/i2c.c 	ret = i2c_master_send(phy->i2c_dev, skb->data, skb->len);
skb               103 drivers/nfc/s3fwrn5/i2c.c 		ret  = i2c_master_send(phy->i2c_dev, skb->data, skb->len);
skb               111 drivers/nfc/s3fwrn5/i2c.c 	if (ret != skb->len)
skb               126 drivers/nfc/s3fwrn5/i2c.c 	struct sk_buff *skb;
skb               145 drivers/nfc/s3fwrn5/i2c.c 	skb = alloc_skb(hdr_size + data_len, GFP_KERNEL);
skb               146 drivers/nfc/s3fwrn5/i2c.c 	if (!skb)
skb               149 drivers/nfc/s3fwrn5/i2c.c 	skb_put_data(skb, hdr, hdr_size);
skb               154 drivers/nfc/s3fwrn5/i2c.c 	ret = i2c_master_recv(phy->i2c_dev, skb_put(skb, data_len), data_len);
skb               156 drivers/nfc/s3fwrn5/i2c.c 		kfree_skb(skb);
skb               161 drivers/nfc/s3fwrn5/i2c.c 	return s3fwrn5_recv_frame(phy->ndev, skb, phy->mode);
skb                15 drivers/nfc/s3fwrn5/nci.c static int s3fwrn5_nci_prop_rsp(struct nci_dev *ndev, struct sk_buff *skb)
skb                17 drivers/nfc/s3fwrn5/nci.c 	__u8 status = skb->data[0];
skb                28 drivers/nfc/s3fwrn5/s3fwrn5.h 	int (*write)(void *id, struct sk_buff *skb);
skb                73 drivers/nfc/s3fwrn5/s3fwrn5.h static inline int s3fwrn5_write(struct s3fwrn5_info *info, struct sk_buff *skb)
skb                78 drivers/nfc/s3fwrn5/s3fwrn5.h 	return info->phy_ops->write(info->phy_id, skb);
skb                85 drivers/nfc/s3fwrn5/s3fwrn5.h int s3fwrn5_recv_frame(struct nci_dev *ndev, struct sk_buff *skb,
skb                61 drivers/nfc/st-nci/core.c static int st_nci_send(struct nci_dev *ndev, struct sk_buff *skb)
skb                65 drivers/nfc/st-nci/core.c 	skb->dev = (void *)ndev;
skb                70 drivers/nfc/st-nci/core.c 	return ndlc_send(info->ndlc, skb);
skb                81 drivers/nfc/st-nci/core.c 					struct sk_buff *skb)
skb                83 drivers/nfc/st-nci/core.c 	__u8 status = skb->data[0];
skb                73 drivers/nfc/st-nci/i2c.c static int st_nci_i2c_write(void *phy_id, struct sk_buff *skb)
skb                82 drivers/nfc/st-nci/i2c.c 	r = i2c_master_send(client, skb->data, skb->len);
skb                85 drivers/nfc/st-nci/i2c.c 		r = i2c_master_send(client, skb->data, skb->len);
skb                89 drivers/nfc/st-nci/i2c.c 		if (r != skb->len)
skb               107 drivers/nfc/st-nci/i2c.c 				 struct sk_buff **skb)
skb               129 drivers/nfc/st-nci/i2c.c 	*skb = alloc_skb(ST_NCI_I2C_MIN_SIZE + len, GFP_KERNEL);
skb               130 drivers/nfc/st-nci/i2c.c 	if (*skb == NULL)
skb               133 drivers/nfc/st-nci/i2c.c 	skb_reserve(*skb, ST_NCI_I2C_MIN_SIZE);
skb               134 drivers/nfc/st-nci/i2c.c 	skb_put(*skb, ST_NCI_I2C_MIN_SIZE);
skb               135 drivers/nfc/st-nci/i2c.c 	memcpy((*skb)->data, buf, ST_NCI_I2C_MIN_SIZE);
skb               142 drivers/nfc/st-nci/i2c.c 		kfree_skb(*skb);
skb               146 drivers/nfc/st-nci/i2c.c 	skb_put(*skb, len);
skb               147 drivers/nfc/st-nci/i2c.c 	memcpy((*skb)->data + ST_NCI_I2C_MIN_SIZE, buf, len);
skb               161 drivers/nfc/st-nci/i2c.c 	struct sk_buff *skb = NULL;
skb               180 drivers/nfc/st-nci/i2c.c 	r = st_nci_i2c_read(phy, &skb);
skb               184 drivers/nfc/st-nci/i2c.c 	ndlc_recv(phy->ndlc, skb);
skb                39 drivers/nfc/st-nci/ndlc.c #define NDLC_DUMP_SKB(info, skb)                                 \
skb                43 drivers/nfc/st-nci/ndlc.c 			16, 1, skb->data, skb->len, 0);          \
skb                73 drivers/nfc/st-nci/ndlc.c int ndlc_send(struct llt_ndlc *ndlc, struct sk_buff *skb)
skb                79 drivers/nfc/st-nci/ndlc.c 	*(u8 *)skb_push(skb, 1) = pcb;
skb                80 drivers/nfc/st-nci/ndlc.c 	skb_queue_tail(&ndlc->send_q, skb);
skb                90 drivers/nfc/st-nci/ndlc.c 	struct sk_buff *skb;
skb                99 drivers/nfc/st-nci/ndlc.c 		skb = skb_dequeue(&ndlc->send_q);
skb               100 drivers/nfc/st-nci/ndlc.c 		NDLC_DUMP_SKB("ndlc frame written", skb);
skb               101 drivers/nfc/st-nci/ndlc.c 		r = ndlc->ops->write(ndlc->phy_id, skb);
skb               107 drivers/nfc/st-nci/ndlc.c 		*(unsigned long *)skb->cb = time_sent;
skb               109 drivers/nfc/st-nci/ndlc.c 		skb_queue_tail(&ndlc->ack_pending_q, skb);
skb               124 drivers/nfc/st-nci/ndlc.c 	struct sk_buff *skb;
skb               127 drivers/nfc/st-nci/ndlc.c 	while ((skb = skb_dequeue_tail(&ndlc->ack_pending_q))) {
skb               128 drivers/nfc/st-nci/ndlc.c 		pcb = skb->data[0];
skb               131 drivers/nfc/st-nci/ndlc.c 			skb->data[0] = (pcb & ~PCB_SUPERVISOR_RETRANSMIT_MASK) |
skb               135 drivers/nfc/st-nci/ndlc.c 			skb->data[0] = (pcb & ~PCB_DATAFRAME_RETRANSMIT_MASK) |
skb               140 drivers/nfc/st-nci/ndlc.c 			kfree_skb(skb);
skb               143 drivers/nfc/st-nci/ndlc.c 		skb_queue_head(&ndlc->send_q, skb);
skb               149 drivers/nfc/st-nci/ndlc.c 	struct sk_buff *skb;
skb               156 drivers/nfc/st-nci/ndlc.c 	while ((skb = skb_dequeue(&ndlc->rcv_q)) != NULL) {
skb               157 drivers/nfc/st-nci/ndlc.c 		pcb = skb->data[0];
skb               158 drivers/nfc/st-nci/ndlc.c 		skb_pull(skb, 1);
skb               162 drivers/nfc/st-nci/ndlc.c 				skb = skb_dequeue(&ndlc->ack_pending_q);
skb               163 drivers/nfc/st-nci/ndlc.c 				kfree_skb(skb);
skb               185 drivers/nfc/st-nci/ndlc.c 				kfree_skb(skb);
skb               189 drivers/nfc/st-nci/ndlc.c 			nci_recv_frame(ndlc->ndev, skb);
skb               191 drivers/nfc/st-nci/ndlc.c 			kfree_skb(skb);
skb               223 drivers/nfc/st-nci/ndlc.c void ndlc_recv(struct llt_ndlc *ndlc, struct sk_buff *skb)
skb               225 drivers/nfc/st-nci/ndlc.c 	if (skb == NULL) {
skb               230 drivers/nfc/st-nci/ndlc.c 		NDLC_DUMP_SKB("incoming frame", skb);
skb               231 drivers/nfc/st-nci/ndlc.c 		skb_queue_tail(&ndlc->rcv_q, skb);
skb                46 drivers/nfc/st-nci/ndlc.h int ndlc_send(struct llt_ndlc *ndlc, struct sk_buff *skb);
skb                47 drivers/nfc/st-nci/ndlc.h void ndlc_recv(struct llt_ndlc *ndlc, struct sk_buff *skb);
skb               136 drivers/nfc/st-nci/se.c 	struct sk_buff *skb;
skb               139 drivers/nfc/st-nci/se.c 				NCI_HCI_APDU_PARAM_ATR, &skb);
skb               143 drivers/nfc/st-nci/se.c 	if (skb->len <= ST_NCI_ESE_MAX_LENGTH) {
skb               144 drivers/nfc/st-nci/se.c 		memcpy(info->se_info.atr, skb->data, skb->len);
skb               149 drivers/nfc/st-nci/se.c 	kfree_skb(skb);
skb               252 drivers/nfc/st-nci/se.c 					      u8 event, struct sk_buff *skb)
skb               259 drivers/nfc/st-nci/se.c 			if (!ST_NCI_EVT_HOT_PLUG_IS_INHIBITED(skb)) {
skb               277 drivers/nfc/st-nci/se.c 						   struct sk_buff *skb)
skb               289 drivers/nfc/st-nci/se.c 				 skb->data, skb->len, 0);
skb               300 drivers/nfc/st-nci/se.c 	kfree_skb(skb);
skb               311 drivers/nfc/st-nci/se.c 						struct sk_buff *skb)
skb               330 drivers/nfc/st-nci/se.c 		if (skb->len < NFC_MIN_AID_LENGTH + 2 &&
skb               331 drivers/nfc/st-nci/se.c 		    skb->data[0] != NFC_EVT_TRANSACTION_AID_TAG)
skb               335 drivers/nfc/st-nci/se.c 					    skb->len - 2, GFP_KERNEL);
skb               339 drivers/nfc/st-nci/se.c 		transaction->aid_len = skb->data[1];
skb               340 drivers/nfc/st-nci/se.c 		memcpy(transaction->aid, &skb->data[2], transaction->aid_len);
skb               343 drivers/nfc/st-nci/se.c 		if (skb->data[transaction->aid_len + 2] !=
skb               347 drivers/nfc/st-nci/se.c 		transaction->params_len = skb->data[transaction->aid_len + 3];
skb               348 drivers/nfc/st-nci/se.c 		memcpy(transaction->params, skb->data +
skb               357 drivers/nfc/st-nci/se.c 	kfree_skb(skb);
skb               362 drivers/nfc/st-nci/se.c 				 u8 event, struct sk_buff *skb)
skb               369 drivers/nfc/st-nci/se.c 		st_nci_hci_admin_event_received(ndev, event, skb);
skb               372 drivers/nfc/st-nci/se.c 		st_nci_hci_apdu_reader_event_received(ndev, event, skb);
skb               375 drivers/nfc/st-nci/se.c 		st_nci_hci_connectivity_event_received(ndev, host, event, skb);
skb               382 drivers/nfc/st-nci/se.c 			       struct sk_buff *skb)
skb                74 drivers/nfc/st-nci/spi.c static int st_nci_spi_write(void *phy_id, struct sk_buff *skb)
skb                83 drivers/nfc/st-nci/spi.c 		.tx_buf = skb->data,
skb                85 drivers/nfc/st-nci/spi.c 		.len = skb->len,
skb                97 drivers/nfc/st-nci/spi.c 		skb_rx = alloc_skb(skb->len, GFP_KERNEL);
skb               103 drivers/nfc/st-nci/spi.c 		skb_put(skb_rx, skb->len);
skb               104 drivers/nfc/st-nci/spi.c 		memcpy(skb_rx->data, buf, skb->len);
skb               121 drivers/nfc/st-nci/spi.c 			struct sk_buff **skb)
skb               143 drivers/nfc/st-nci/spi.c 	*skb = alloc_skb(ST_NCI_SPI_MIN_SIZE + len, GFP_KERNEL);
skb               144 drivers/nfc/st-nci/spi.c 	if (*skb == NULL)
skb               147 drivers/nfc/st-nci/spi.c 	skb_reserve(*skb, ST_NCI_SPI_MIN_SIZE);
skb               148 drivers/nfc/st-nci/spi.c 	skb_put(*skb, ST_NCI_SPI_MIN_SIZE);
skb               149 drivers/nfc/st-nci/spi.c 	memcpy((*skb)->data, buf, ST_NCI_SPI_MIN_SIZE);
skb               157 drivers/nfc/st-nci/spi.c 		kfree_skb(*skb);
skb               161 drivers/nfc/st-nci/spi.c 	skb_put(*skb, len);
skb               162 drivers/nfc/st-nci/spi.c 	memcpy((*skb)->data + ST_NCI_SPI_MIN_SIZE, buf, len);
skb               176 drivers/nfc/st-nci/spi.c 	struct sk_buff *skb = NULL;
skb               195 drivers/nfc/st-nci/spi.c 	r = st_nci_spi_read(phy, &skb);
skb               199 drivers/nfc/st-nci/spi.c 	ndlc_recv(phy->ndlc, skb);
skb               136 drivers/nfc/st-nci/st-nci.h 					u8 event, struct sk_buff *skb);
skb               138 drivers/nfc/st-nci/st-nci.h 						struct sk_buff *skb);
skb                95 drivers/nfc/st-nci/vendor_cmds.c 	struct sk_buff *msg, *skb;
skb                99 drivers/nfc/st-nci/vendor_cmds.c 			     data, data_len, &skb);
skb               104 drivers/nfc/st-nci/vendor_cmds.c 					     HCI_DM_GET_INFO, skb->len);
skb               110 drivers/nfc/st-nci/vendor_cmds.c 	if (nla_put(msg, NFC_ATTR_VENDOR_DATA, skb->len, skb->data)) {
skb               119 drivers/nfc/st-nci/vendor_cmds.c 	kfree_skb(skb);
skb               128 drivers/nfc/st-nci/vendor_cmds.c 	struct sk_buff *msg, *skb;
skb               132 drivers/nfc/st-nci/vendor_cmds.c 			     data, data_len, &skb);
skb               137 drivers/nfc/st-nci/vendor_cmds.c 					     HCI_DM_GET_DATA, skb->len);
skb               143 drivers/nfc/st-nci/vendor_cmds.c 	if (nla_put(msg, NFC_ATTR_VENDOR_DATA, skb->len, skb->data)) {
skb               152 drivers/nfc/st-nci/vendor_cmds.c 	kfree_skb(skb);
skb               210 drivers/nfc/st-nci/vendor_cmds.c 	struct sk_buff *msg, *skb;
skb               217 drivers/nfc/st-nci/vendor_cmds.c 	r = nci_hci_get_param(ndev, param->gate, param->data, &skb);
skb               222 drivers/nfc/st-nci/vendor_cmds.c 					     HCI_GET_PARAM, skb->len);
skb               228 drivers/nfc/st-nci/vendor_cmds.c 	if (nla_put(msg, NFC_ATTR_VENDOR_DATA, skb->len, skb->data)) {
skb               237 drivers/nfc/st-nci/vendor_cmds.c 	kfree_skb(skb);
skb               255 drivers/nfc/st-nci/vendor_cmds.c 	struct sk_buff *msg, *skb;
skb               263 drivers/nfc/st-nci/vendor_cmds.c 			     data, data_len, &skb);
skb               268 drivers/nfc/st-nci/vendor_cmds.c 				HCI_DM_VDC_MEASUREMENT_VALUE, skb->len);
skb               274 drivers/nfc/st-nci/vendor_cmds.c 	if (nla_put(msg, NFC_ATTR_VENDOR_DATA, skb->len, skb->data)) {
skb               283 drivers/nfc/st-nci/vendor_cmds.c 	kfree_skb(skb);
skb               292 drivers/nfc/st-nci/vendor_cmds.c 	struct sk_buff *msg, *skb;
skb               300 drivers/nfc/st-nci/vendor_cmds.c 			     data, data_len, &skb);
skb               305 drivers/nfc/st-nci/vendor_cmds.c 					HCI_DM_VDC_VALUE_COMPARISON, skb->len);
skb               311 drivers/nfc/st-nci/vendor_cmds.c 	if (nla_put(msg, NFC_ATTR_VENDOR_DATA, skb->len, skb->data)) {
skb               320 drivers/nfc/st-nci/vendor_cmds.c 	kfree_skb(skb);
skb               329 drivers/nfc/st-nci/vendor_cmds.c 	struct sk_buff *msg, *skb;
skb               335 drivers/nfc/st-nci/vendor_cmds.c 	r = nci_nfcc_loopback(ndev, data, data_len, &skb);
skb               340 drivers/nfc/st-nci/vendor_cmds.c 					     LOOPBACK, skb->len);
skb               346 drivers/nfc/st-nci/vendor_cmds.c 	if (nla_put(msg, NFC_ATTR_VENDOR_DATA, skb->len, skb->data)) {
skb               354 drivers/nfc/st-nci/vendor_cmds.c 	kfree_skb(skb);
skb               247 drivers/nfc/st21nfca/core.c 	struct sk_buff *skb;
skb               269 drivers/nfc/st21nfca/core.c 			      ST21NFCA_NFC_MODE, &skb);
skb               273 drivers/nfc/st21nfca/core.c 	param = skb->data[0];
skb               274 drivers/nfc/st21nfca/core.c 	kfree_skb(skb);
skb               290 drivers/nfc/st21nfca/core.c 			      NFC_HCI_ID_MGMT_VERSION_SW, &skb);
skb               294 drivers/nfc/st21nfca/core.c 	if (skb->len != FULL_VERSION_LEN) {
skb               295 drivers/nfc/st21nfca/core.c 		kfree_skb(skb);
skb               301 drivers/nfc/st21nfca/core.c 		       skb->data, FULL_VERSION_LEN, false);
skb               303 drivers/nfc/st21nfca/core.c 	kfree_skb(skb);
skb               308 drivers/nfc/st21nfca/core.c static int st21nfca_hci_xmit(struct nfc_hci_dev *hdev, struct sk_buff *skb)
skb               312 drivers/nfc/st21nfca/core.c 	return info->phy_ops->write(info->phy_id, skb);
skb               737 drivers/nfc/st21nfca/core.c static void st21nfca_hci_data_exchange_cb(void *context, struct sk_buff *skb,
skb               745 drivers/nfc/st21nfca/core.c 			skb_trim(skb, skb->len - 1);
skb               746 drivers/nfc/st21nfca/core.c 		info->async_cb(info->async_cb_context, skb, err);
skb               750 drivers/nfc/st21nfca/core.c 			kfree_skb(skb);
skb               762 drivers/nfc/st21nfca/core.c 				      struct sk_buff *skb,
skb               768 drivers/nfc/st21nfca/core.c 		target->hci_reader_gate, skb->len);
skb               773 drivers/nfc/st21nfca/core.c 			return st21nfca_im_send_dep_req(hdev, skb);
skb               775 drivers/nfc/st21nfca/core.c 		*(u8 *)skb_push(skb, 1) = 0x1a;
skb               777 drivers/nfc/st21nfca/core.c 					      ST21NFCA_WR_XCHG_DATA, skb->data,
skb               778 drivers/nfc/st21nfca/core.c 					      skb->len, cb, cb_context);
skb               780 drivers/nfc/st21nfca/core.c 		*(u8 *)skb_push(skb, 1) = 0x1a;	/* CTR, see spec:10.2.2.1 */
skb               783 drivers/nfc/st21nfca/core.c 					      ST21NFCA_WR_XCHG_DATA, skb->data,
skb               784 drivers/nfc/st21nfca/core.c 					      skb->len, cb, cb_context);
skb               790 drivers/nfc/st21nfca/core.c 		*(u8 *)skb_push(skb, 1) = 0x17;
skb               793 drivers/nfc/st21nfca/core.c 					      ST21NFCA_WR_XCHG_DATA, skb->data,
skb               794 drivers/nfc/st21nfca/core.c 					      skb->len,
skb               803 drivers/nfc/st21nfca/core.c static int st21nfca_hci_tm_send(struct nfc_hci_dev *hdev, struct sk_buff *skb)
skb               805 drivers/nfc/st21nfca/core.c 	return st21nfca_tm_send_dep_res(hdev, skb);
skb               835 drivers/nfc/st21nfca/core.c 				struct sk_buff *skb)
skb               859 drivers/nfc/st21nfca/core.c 					struct sk_buff *skb)
skb               868 drivers/nfc/st21nfca/core.c 			if (!ST21NFCA_EVT_HOT_PLUG_IS_INHIBITED(skb)) {
skb               882 drivers/nfc/st21nfca/core.c 	kfree_skb(skb);
skb               892 drivers/nfc/st21nfca/core.c 				       u8 event, struct sk_buff *skb)
skb               901 drivers/nfc/st21nfca/core.c 		return st21nfca_admin_event_received(hdev, event, skb);
skb               903 drivers/nfc/st21nfca/core.c 		return st21nfca_dep_event_received(hdev, event, skb);
skb               906 drivers/nfc/st21nfca/core.c 							event, skb);
skb               908 drivers/nfc/st21nfca/core.c 		return st21nfca_apdu_reader_event_received(hdev, event, skb);
skb               910 drivers/nfc/st21nfca/core.c 		return st21nfca_hci_loopback_event_received(hdev, event, skb);
skb               117 drivers/nfc/st21nfca/dep.c 	struct sk_buff *skb;
skb               121 drivers/nfc/st21nfca/dep.c 		skb = info->dep_info.tx_pending;
skb               126 drivers/nfc/st21nfca/dep.c 				ST21NFCA_WR_XCHG_DATA, skb->data, skb->len,
skb               129 drivers/nfc/st21nfca/dep.c 		kfree_skb(skb);
skb               134 drivers/nfc/st21nfca/dep.c 						struct sk_buff *skb)
skb               136 drivers/nfc/st21nfca/dep.c 	info->dep_info.tx_pending = skb;
skb               144 drivers/nfc/st21nfca/dep.c 	struct sk_buff *skb;
skb               150 drivers/nfc/st21nfca/dep.c 	skb = alloc_skb(atr_req->length + 1, GFP_KERNEL);
skb               151 drivers/nfc/st21nfca/dep.c 	if (!skb)
skb               154 drivers/nfc/st21nfca/dep.c 	skb_put(skb, sizeof(struct st21nfca_atr_res));
skb               156 drivers/nfc/st21nfca/dep.c 	atr_res = (struct st21nfca_atr_res *)skb->data;
skb               170 drivers/nfc/st21nfca/dep.c 		skb_put(skb, gb_len);
skb               177 drivers/nfc/st21nfca/dep.c 			kfree_skb(skb);
skb               185 drivers/nfc/st21nfca/dep.c 				ST21NFCA_EVT_SEND_DATA, skb->data, skb->len);
skb               186 drivers/nfc/st21nfca/dep.c 	kfree_skb(skb);
skb               191 drivers/nfc/st21nfca/dep.c 				    struct sk_buff *skb)
skb               197 drivers/nfc/st21nfca/dep.c 	skb_trim(skb, skb->len - 1);
skb               199 drivers/nfc/st21nfca/dep.c 	if (!skb->len) {
skb               204 drivers/nfc/st21nfca/dep.c 	if (skb->len < ST21NFCA_ATR_REQ_MIN_SIZE) {
skb               209 drivers/nfc/st21nfca/dep.c 	atr_req = (struct st21nfca_atr_req *)skb->data;
skb               220 drivers/nfc/st21nfca/dep.c 	gb_len = skb->len - sizeof(struct st21nfca_atr_req);
skb               237 drivers/nfc/st21nfca/dep.c 	struct sk_buff *skb;
skb               241 drivers/nfc/st21nfca/dep.c 	skb = alloc_skb(sizeof(struct st21nfca_psl_res), GFP_KERNEL);
skb               242 drivers/nfc/st21nfca/dep.c 	if (!skb)
skb               244 drivers/nfc/st21nfca/dep.c 	skb_put(skb, sizeof(struct st21nfca_psl_res));
skb               246 drivers/nfc/st21nfca/dep.c 	psl_res = (struct st21nfca_psl_res *)skb->data;
skb               254 drivers/nfc/st21nfca/dep.c 				ST21NFCA_EVT_SEND_DATA, skb->data, skb->len);
skb               275 drivers/nfc/st21nfca/dep.c 	kfree_skb(skb);
skb               280 drivers/nfc/st21nfca/dep.c 				    struct sk_buff *skb)
skb               285 drivers/nfc/st21nfca/dep.c 	skb_trim(skb, skb->len - 1);
skb               287 drivers/nfc/st21nfca/dep.c 	if (!skb->len) {
skb               292 drivers/nfc/st21nfca/dep.c 	psl_req = (struct st21nfca_psl_req *)skb->data;
skb               294 drivers/nfc/st21nfca/dep.c 	if (skb->len < sizeof(struct st21nfca_psl_req)) {
skb               304 drivers/nfc/st21nfca/dep.c int st21nfca_tm_send_dep_res(struct nfc_hci_dev *hdev, struct sk_buff *skb)
skb               309 drivers/nfc/st21nfca/dep.c 	*(u8 *)skb_push(skb, 1) = info->dep_info.curr_nfc_dep_pni;
skb               310 drivers/nfc/st21nfca/dep.c 	*(u8 *)skb_push(skb, 1) = ST21NFCA_NFCIP1_DEP_RES;
skb               311 drivers/nfc/st21nfca/dep.c 	*(u8 *)skb_push(skb, 1) = ST21NFCA_NFCIP1_RES;
skb               312 drivers/nfc/st21nfca/dep.c 	*(u8 *)skb_push(skb, 1) = skb->len;
skb               315 drivers/nfc/st21nfca/dep.c 			ST21NFCA_EVT_SEND_DATA, skb->data, skb->len);
skb               316 drivers/nfc/st21nfca/dep.c 	kfree_skb(skb);
skb               323 drivers/nfc/st21nfca/dep.c 				    struct sk_buff *skb)
skb               330 drivers/nfc/st21nfca/dep.c 	skb_trim(skb, skb->len - 1);
skb               334 drivers/nfc/st21nfca/dep.c 	dep_req = (struct st21nfca_dep_req_res *)skb->data;
skb               335 drivers/nfc/st21nfca/dep.c 	if (skb->len < size) {
skb               345 drivers/nfc/st21nfca/dep.c 	if (skb->len < size) {
skb               364 drivers/nfc/st21nfca/dep.c 	skb_pull(skb, size);
skb               366 drivers/nfc/st21nfca/dep.c 	return nfc_tm_data_received(hdev->ndev, skb);
skb               372 drivers/nfc/st21nfca/dep.c 				struct sk_buff *skb)
skb               377 drivers/nfc/st21nfca/dep.c 	cmd0 = skb->data[1];
skb               380 drivers/nfc/st21nfca/dep.c 		cmd1 = skb->data[2];
skb               383 drivers/nfc/st21nfca/dep.c 			r = st21nfca_tm_recv_atr_req(hdev, skb);
skb               386 drivers/nfc/st21nfca/dep.c 			r = st21nfca_tm_recv_psl_req(hdev, skb);
skb               389 drivers/nfc/st21nfca/dep.c 			r = st21nfca_tm_recv_dep_req(hdev, skb);
skb               407 drivers/nfc/st21nfca/dep.c 				u8 event, struct sk_buff *skb)
skb               425 drivers/nfc/st21nfca/dep.c 		r = st21nfca_tm_event_send_data(hdev, skb);
skb               433 drivers/nfc/st21nfca/dep.c 	kfree_skb(skb);
skb               441 drivers/nfc/st21nfca/dep.c 	struct sk_buff *skb;
skb               445 drivers/nfc/st21nfca/dep.c 	skb =
skb               447 drivers/nfc/st21nfca/dep.c 	if (!skb)
skb               449 drivers/nfc/st21nfca/dep.c 	skb_reserve(skb, 1);
skb               451 drivers/nfc/st21nfca/dep.c 	skb_put(skb, sizeof(struct st21nfca_psl_req));
skb               452 drivers/nfc/st21nfca/dep.c 	psl_req = (struct st21nfca_psl_req *) skb->data;
skb               461 drivers/nfc/st21nfca/dep.c 	*(u8 *)skb_push(skb, 1) = info->dep_info.to | 0x10;
skb               463 drivers/nfc/st21nfca/dep.c 	st21nfca_im_send_pdu(info, skb);
skb               467 drivers/nfc/st21nfca/dep.c static void st21nfca_im_recv_atr_res_cb(void *context, struct sk_buff *skb,
skb               477 drivers/nfc/st21nfca/dep.c 	if (!skb)
skb               482 drivers/nfc/st21nfca/dep.c 		skb_trim(skb, skb->len - 1);
skb               483 drivers/nfc/st21nfca/dep.c 		atr_res = (struct st21nfca_atr_res *)skb->data;
skb               486 drivers/nfc/st21nfca/dep.c 				skb->len - sizeof(struct st21nfca_atr_res));
skb               509 drivers/nfc/st21nfca/dep.c 		kfree_skb(skb);
skb               516 drivers/nfc/st21nfca/dep.c 	struct sk_buff *skb;
skb               529 drivers/nfc/st21nfca/dep.c 	skb =
skb               531 drivers/nfc/st21nfca/dep.c 	if (!skb)
skb               534 drivers/nfc/st21nfca/dep.c 	skb_reserve(skb, 1);
skb               536 drivers/nfc/st21nfca/dep.c 	skb_put(skb, sizeof(struct st21nfca_atr_req));
skb               538 drivers/nfc/st21nfca/dep.c 	atr_req = (struct st21nfca_atr_req *)skb->data;
skb               559 drivers/nfc/st21nfca/dep.c 		skb_put_data(skb, gb, gb_len);
skb               563 drivers/nfc/st21nfca/dep.c 	*(u8 *)skb_push(skb, 1) = info->dep_info.to | 0x10; /* timeout */
skb               573 drivers/nfc/st21nfca/dep.c 				ST21NFCA_WR_XCHG_DATA, skb->data,
skb               574 drivers/nfc/st21nfca/dep.c 				skb->len, info->async_cb, info);
skb               578 drivers/nfc/st21nfca/dep.c static void st21nfca_im_recv_dep_res_cb(void *context, struct sk_buff *skb,
skb               589 drivers/nfc/st21nfca/dep.c 	if (!skb)
skb               594 drivers/nfc/st21nfca/dep.c 		dep_res = (struct st21nfca_dep_req_res *)skb->data;
skb               597 drivers/nfc/st21nfca/dep.c 		if (skb->len < size)
skb               605 drivers/nfc/st21nfca/dep.c 		if (skb->len < size)
skb               608 drivers/nfc/st21nfca/dep.c 		skb_trim(skb, skb->len - 1);
skb               619 drivers/nfc/st21nfca/dep.c 			skb_pull(skb, size);
skb               620 drivers/nfc/st21nfca/dep.c 			nfc_tm_data_received(info->hdev->ndev, skb);
skb               624 drivers/nfc/st21nfca/dep.c 			skb_pull(skb, size);
skb               625 drivers/nfc/st21nfca/dep.c 			*(u8 *)skb_push(skb, 1) = ST21NFCA_NFCIP1_DEP_REQ;
skb               626 drivers/nfc/st21nfca/dep.c 			*(u8 *)skb_push(skb, 1) = ST21NFCA_NFCIP1_REQ;
skb               627 drivers/nfc/st21nfca/dep.c 			*(u8 *)skb_push(skb, 1) = skb->len;
skb               628 drivers/nfc/st21nfca/dep.c 			*(u8 *)skb_push(skb, 1) = info->dep_info.to | 0x10;
skb               630 drivers/nfc/st21nfca/dep.c 			st21nfca_im_send_pdu(info, skb);
skb               640 drivers/nfc/st21nfca/dep.c 	kfree_skb(skb);
skb               643 drivers/nfc/st21nfca/dep.c int st21nfca_im_send_dep_req(struct nfc_hci_dev *hdev, struct sk_buff *skb)
skb               651 drivers/nfc/st21nfca/dep.c 	*(u8 *)skb_push(skb, 1) = info->dep_info.curr_nfc_dep_pni;
skb               652 drivers/nfc/st21nfca/dep.c 	*(u8 *)skb_push(skb, 1) = ST21NFCA_NFCIP1_DEP_REQ;
skb               653 drivers/nfc/st21nfca/dep.c 	*(u8 *)skb_push(skb, 1) = ST21NFCA_NFCIP1_REQ;
skb               654 drivers/nfc/st21nfca/dep.c 	*(u8 *)skb_push(skb, 1) = skb->len;
skb               656 drivers/nfc/st21nfca/dep.c 	*(u8 *)skb_push(skb, 1) = info->dep_info.to | 0x10;
skb               660 drivers/nfc/st21nfca/dep.c 				      skb->data, skb->len,
skb                82 drivers/nfc/st21nfca/i2c.c #define I2C_DUMP_SKB(info, skb)					\
skb                86 drivers/nfc/st21nfca/i2c.c 		       16, 1, (skb)->data, (skb)->len, 0);	\
skb               156 drivers/nfc/st21nfca/i2c.c static void st21nfca_hci_add_len_crc(struct sk_buff *skb)
skb               161 drivers/nfc/st21nfca/i2c.c 	*(u8 *)skb_push(skb, 1) = 0;
skb               163 drivers/nfc/st21nfca/i2c.c 	crc = crc_ccitt(0xffff, skb->data, skb->len);
skb               167 drivers/nfc/st21nfca/i2c.c 	skb_put_u8(skb, tmp);
skb               170 drivers/nfc/st21nfca/i2c.c 	skb_put_u8(skb, tmp);
skb               173 drivers/nfc/st21nfca/i2c.c static void st21nfca_hci_remove_len_crc(struct sk_buff *skb)
skb               175 drivers/nfc/st21nfca/i2c.c 	skb_pull(skb, ST21NFCA_FRAME_HEADROOM);
skb               176 drivers/nfc/st21nfca/i2c.c 	skb_trim(skb, skb->len - ST21NFCA_FRAME_TAILROOM);
skb               184 drivers/nfc/st21nfca/i2c.c static int st21nfca_hci_i2c_write(void *phy_id, struct sk_buff *skb)
skb               191 drivers/nfc/st21nfca/i2c.c 	I2C_DUMP_SKB("st21nfca_hci_i2c_write", skb);
skb               201 drivers/nfc/st21nfca/i2c.c 	st21nfca_hci_add_len_crc(skb);
skb               204 drivers/nfc/st21nfca/i2c.c 	skb_put_u8(skb, ST21NFCA_SOF_EOF);
skb               206 drivers/nfc/st21nfca/i2c.c 	*(u8 *)skb_push(skb, 1) = ST21NFCA_SOF_EOF;
skb               214 drivers/nfc/st21nfca/i2c.c 	tmp[0] = skb->data[0];
skb               215 drivers/nfc/st21nfca/i2c.c 	for (i = 1, j = 1; i < skb->len - 1; i++, j++) {
skb               216 drivers/nfc/st21nfca/i2c.c 		if (skb->data[i] == ST21NFCA_SOF_EOF
skb               217 drivers/nfc/st21nfca/i2c.c 		    || skb->data[i] == ST21NFCA_ESCAPE_BYTE_STUFFING) {
skb               220 drivers/nfc/st21nfca/i2c.c 			tmp[j] = skb->data[i] ^ ST21NFCA_BYTE_STUFFING_MASK;
skb               222 drivers/nfc/st21nfca/i2c.c 			tmp[j] = skb->data[i];
skb               225 drivers/nfc/st21nfca/i2c.c 	tmp[j] = skb->data[i];
skb               247 drivers/nfc/st21nfca/i2c.c 	st21nfca_hci_remove_len_crc(skb);
skb               293 drivers/nfc/st21nfca/i2c.c static int st21nfca_hci_i2c_repack(struct sk_buff *skb)
skb               297 drivers/nfc/st21nfca/i2c.c 	if (skb->len < 1 || (skb->len > 1 && skb->data[1] != 0))
skb               300 drivers/nfc/st21nfca/i2c.c 	size = get_frame_size(skb->data, skb->len);
skb               302 drivers/nfc/st21nfca/i2c.c 		skb_trim(skb, size);
skb               304 drivers/nfc/st21nfca/i2c.c 		for (i = 1, j = 0; i < skb->len; i++) {
skb               305 drivers/nfc/st21nfca/i2c.c 			if (skb->data[i + j] ==
skb               307 drivers/nfc/st21nfca/i2c.c 				skb->data[i] = skb->data[i + j + 1]
skb               312 drivers/nfc/st21nfca/i2c.c 			skb->data[i] = skb->data[i + j];
skb               315 drivers/nfc/st21nfca/i2c.c 		skb_trim(skb, i - j);
skb               317 drivers/nfc/st21nfca/i2c.c 		skb_pull(skb, 1);
skb               319 drivers/nfc/st21nfca/i2c.c 		r = check_crc(skb->data, skb->len);
skb               326 drivers/nfc/st21nfca/i2c.c 		skb_pull(skb, 1);
skb               328 drivers/nfc/st21nfca/i2c.c 		skb_trim(skb, skb->len - 2);
skb               329 drivers/nfc/st21nfca/i2c.c 		return skb->len;
skb               350 drivers/nfc/st21nfca/i2c.c 				 struct sk_buff *skb)
skb               384 drivers/nfc/st21nfca/i2c.c 			skb_trim(skb, 0);
skb               393 drivers/nfc/st21nfca/i2c.c 			skb_trim(skb, 0);
skb               397 drivers/nfc/st21nfca/i2c.c 		skb_put_data(skb, buf, len);
skb               399 drivers/nfc/st21nfca/i2c.c 		if (skb->data[skb->len - 1] == ST21NFCA_SOF_EOF) {
skb               401 drivers/nfc/st21nfca/i2c.c 			return st21nfca_hci_i2c_repack(skb);
skb                70 drivers/nfc/st21nfca/se.c 	struct sk_buff *skb;
skb                74 drivers/nfc/st21nfca/se.c 			ST21NFCA_PARAM_ATR, &skb);
skb                78 drivers/nfc/st21nfca/se.c 	if (skb->len <= ST21NFCA_ESE_MAX_LENGTH) {
skb                79 drivers/nfc/st21nfca/se.c 		memcpy(info->se_info.atr, skb->data, skb->len);
skb                83 drivers/nfc/st21nfca/se.c 	kfree_skb(skb);
skb               294 drivers/nfc/st21nfca/se.c 				u8 event, struct sk_buff *skb)
skb               314 drivers/nfc/st21nfca/se.c 		if (skb->len < NFC_MIN_AID_LENGTH + 2 &&
skb               315 drivers/nfc/st21nfca/se.c 		    skb->data[0] != NFC_EVT_TRANSACTION_AID_TAG)
skb               319 drivers/nfc/st21nfca/se.c 						   skb->len - 2, GFP_KERNEL);
skb               323 drivers/nfc/st21nfca/se.c 		transaction->aid_len = skb->data[1];
skb               324 drivers/nfc/st21nfca/se.c 		memcpy(transaction->aid, &skb->data[2],
skb               328 drivers/nfc/st21nfca/se.c 		if (skb->data[transaction->aid_len + 2] !=
skb               332 drivers/nfc/st21nfca/se.c 		transaction->params_len = skb->data[transaction->aid_len + 3];
skb               333 drivers/nfc/st21nfca/se.c 		memcpy(transaction->params, skb->data +
skb               342 drivers/nfc/st21nfca/se.c 	kfree_skb(skb);
skb               348 drivers/nfc/st21nfca/se.c 					u8 event, struct sk_buff *skb)
skb               365 drivers/nfc/st21nfca/se.c 			skb->data, skb->len, 0);
skb               377 drivers/nfc/st21nfca/se.c 	kfree_skb(skb);
skb               173 drivers/nfc/st21nfca/st21nfca.h 				u8 event, struct sk_buff *skb);
skb               174 drivers/nfc/st21nfca/st21nfca.h int st21nfca_tm_send_dep_res(struct nfc_hci_dev *hdev, struct sk_buff *skb);
skb               177 drivers/nfc/st21nfca/st21nfca.h int st21nfca_im_send_dep_req(struct nfc_hci_dev *hdev, struct sk_buff *skb);
skb               182 drivers/nfc/st21nfca/st21nfca.h 					u8 event, struct sk_buff *skb);
skb               184 drivers/nfc/st21nfca/st21nfca.h 					u8 event, struct sk_buff *skb);
skb               197 drivers/nfc/st21nfca/st21nfca.h 					 struct sk_buff *skb);
skb                90 drivers/nfc/st21nfca/vendor_cmds.c 	struct sk_buff *msg, *skb;
skb                96 drivers/nfc/st21nfca/vendor_cmds.c 			     data, data_len, &skb);
skb               101 drivers/nfc/st21nfca/vendor_cmds.c 					     HCI_DM_GET_INFO, skb->len);
skb               107 drivers/nfc/st21nfca/vendor_cmds.c 	if (nla_put(msg, NFC_ATTR_VENDOR_DATA, skb->len, skb->data)) {
skb               116 drivers/nfc/st21nfca/vendor_cmds.c 	kfree_skb(skb);
skb               125 drivers/nfc/st21nfca/vendor_cmds.c 	struct sk_buff *msg, *skb;
skb               131 drivers/nfc/st21nfca/vendor_cmds.c 			     data, data_len, &skb);
skb               136 drivers/nfc/st21nfca/vendor_cmds.c 					     HCI_DM_GET_DATA, skb->len);
skb               142 drivers/nfc/st21nfca/vendor_cmds.c 	if (nla_put(msg, NFC_ATTR_VENDOR_DATA, skb->len, skb->data)) {
skb               151 drivers/nfc/st21nfca/vendor_cmds.c 	kfree_skb(skb);
skb               187 drivers/nfc/st21nfca/vendor_cmds.c 	struct sk_buff *msg, *skb;
skb               194 drivers/nfc/st21nfca/vendor_cmds.c 	r = nfc_hci_get_param(hdev, param->gate, param->data, &skb);
skb               199 drivers/nfc/st21nfca/vendor_cmds.c 					     HCI_GET_PARAM, skb->len);
skb               205 drivers/nfc/st21nfca/vendor_cmds.c 	if (nla_put(msg, NFC_ATTR_VENDOR_DATA, skb->len, skb->data)) {
skb               214 drivers/nfc/st21nfca/vendor_cmds.c 	kfree_skb(skb);
skb               231 drivers/nfc/st21nfca/vendor_cmds.c 					 struct sk_buff *skb)
skb               237 drivers/nfc/st21nfca/vendor_cmds.c 		info->vendor_info.rx_skb = skb;
skb               918 drivers/nfc/st95hf/core.c 			      struct sk_buff *skb,
skb               936 drivers/nfc/st95hf/core.c 		len_data_to_tag = skb->len + 1;
skb               937 drivers/nfc/st95hf/core.c 		skb_put_u8(skb, stcontext->sendrcv_trflag);
skb               941 drivers/nfc/st95hf/core.c 		len_data_to_tag = skb->len;
skb               948 drivers/nfc/st95hf/core.c 	skb_push(skb, 3);
skb               949 drivers/nfc/st95hf/core.c 	skb->data[0] = ST95HF_COMMAND_SEND;
skb               950 drivers/nfc/st95hf/core.c 	skb->data[1] = SEND_RECEIVE_CMD;
skb               951 drivers/nfc/st95hf/core.c 	skb->data[2] = len_data_to_tag;
skb               957 drivers/nfc/st95hf/core.c 	if ((skb->data[3] == ISO14443A_RATS_REQ) &&
skb               972 drivers/nfc/st95hf/core.c 	rc = st95hf_spi_send(&stcontext->spicontext, skb->data,
skb               973 drivers/nfc/st95hf/core.c 			     skb->len,
skb               983 drivers/nfc/st95hf/core.c 	kfree_skb(skb);
skb              1002 drivers/nfc/st95hf/core.c 			      struct sk_buff *skb,
skb               645 drivers/nfc/trf7970a.c static int trf7970a_transmit(struct trf7970a *trf, struct sk_buff *skb,
skb               655 drivers/nfc/trf7970a.c 			     16, 1, skb->data, len, false);
skb               665 drivers/nfc/trf7970a.c 	t[1].tx_buf = skb->data;
skb               676 drivers/nfc/trf7970a.c 	skb_pull(skb, len);
skb               678 drivers/nfc/trf7970a.c 	if (skb->len > 0) {
skb               705 drivers/nfc/trf7970a.c 	struct sk_buff *skb = trf->tx_skb;
skb               729 drivers/nfc/trf7970a.c 	len = min(skb->len, len);
skb               733 drivers/nfc/trf7970a.c 	ret = trf7970a_transmit(trf, skb, len, &prefix, sizeof(prefix));
skb               740 drivers/nfc/trf7970a.c 	struct sk_buff *skb = trf->rx_skb;
skb               762 drivers/nfc/trf7970a.c 	if (fifo_bytes > skb_tailroom(skb)) {
skb               763 drivers/nfc/trf7970a.c 		skb = skb_copy_expand(skb, skb_headroom(skb),
skb               767 drivers/nfc/trf7970a.c 		if (!skb) {
skb               773 drivers/nfc/trf7970a.c 		trf->rx_skb = skb;
skb               777 drivers/nfc/trf7970a.c 				 skb_put(skb, fifo_bytes), fifo_bytes);
skb               784 drivers/nfc/trf7970a.c 	if ((trf->framing == NFC_DIGITAL_FRAMING_NFCA_T2T) && (skb->len == 1) &&
skb               786 drivers/nfc/trf7970a.c 		skb->data[0] >>= 4;
skb              1391 drivers/nfc/trf7970a.c static int trf7970a_per_cmd_config(struct trf7970a *trf, struct sk_buff *skb)
skb              1393 drivers/nfc/trf7970a.c 	u8 *req = skb->data;
skb              1460 drivers/nfc/trf7970a.c 			     struct sk_buff *skb, u16 timeout,
skb              1470 drivers/nfc/trf7970a.c 		trf->state, timeout, skb->len);
skb              1472 drivers/nfc/trf7970a.c 	if (skb->len > TRF7970A_TX_MAX)
skb              1511 drivers/nfc/trf7970a.c 		ret = trf7970a_per_cmd_config(trf, skb);
skb              1517 drivers/nfc/trf7970a.c 	trf->tx_skb = skb;
skb              1523 drivers/nfc/trf7970a.c 	len = skb->len;
skb              1545 drivers/nfc/trf7970a.c 	len = min_t(int, skb->len, TRF7970A_FIFO_SIZE);
skb              1552 drivers/nfc/trf7970a.c 	ret = trf7970a_transmit(trf, skb, len, prefix, sizeof(prefix));
skb               576 drivers/nvme/host/tcp.c static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
skb               584 drivers/nvme/host/tcp.c 	ret = skb_copy_bits(skb, *offset,
skb               633 drivers/nvme/host/tcp.c static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
skb               678 drivers/nvme/host/tcp.c 			ret = skb_copy_and_hash_datagram_iter(skb, *offset,
skb               681 drivers/nvme/host/tcp.c 			ret = skb_copy_datagram_iter(skb, *offset,
skb               712 drivers/nvme/host/tcp.c 		struct sk_buff *skb, unsigned int *offset, size_t *len)
skb               720 drivers/nvme/host/tcp.c 	ret = skb_copy_bits(skb, *offset, &ddgst[off], recv_len);
skb               750 drivers/nvme/host/tcp.c static int nvme_tcp_recv_skb(read_descriptor_t *desc, struct sk_buff *skb,
skb               760 drivers/nvme/host/tcp.c 			result = nvme_tcp_recv_pdu(queue, skb, &offset, &len);
skb               763 drivers/nvme/host/tcp.c 			result = nvme_tcp_recv_data(queue, skb, &offset, &len);
skb               766 drivers/nvme/host/tcp.c 			result = nvme_tcp_recv_ddgst(queue, skb, &offset, &len);
skb               105 drivers/rpmsg/rpmsg_char.c 	struct sk_buff *skb;
skb               107 drivers/rpmsg/rpmsg_char.c 	skb = alloc_skb(len, GFP_ATOMIC);
skb               108 drivers/rpmsg/rpmsg_char.c 	if (!skb)
skb               111 drivers/rpmsg/rpmsg_char.c 	skb_put_data(skb, buf, len);
skb               114 drivers/rpmsg/rpmsg_char.c 	skb_queue_tail(&eptdev->queue, skb);
skb               149 drivers/rpmsg/rpmsg_char.c 	struct sk_buff *skb;
skb               161 drivers/rpmsg/rpmsg_char.c 		skb = skb_dequeue(&eptdev->queue);
skb               162 drivers/rpmsg/rpmsg_char.c 		kfree_skb(skb);
skb               175 drivers/rpmsg/rpmsg_char.c 	struct sk_buff *skb;
skb               203 drivers/rpmsg/rpmsg_char.c 	skb = skb_dequeue(&eptdev->queue);
skb               205 drivers/rpmsg/rpmsg_char.c 	if (!skb)
skb               208 drivers/rpmsg/rpmsg_char.c 	use = min_t(size_t, iov_iter_count(to), skb->len);
skb               209 drivers/rpmsg/rpmsg_char.c 	if (copy_to_iter(skb->data, use, to) != use)
skb               212 drivers/rpmsg/rpmsg_char.c 	kfree_skb(skb);
skb               216 drivers/s390/net/ctcm_fsms.c 	struct sk_buff *skb;
skb               220 drivers/s390/net/ctcm_fsms.c 	while ((skb = skb_dequeue(q))) {
skb               221 drivers/s390/net/ctcm_fsms.c 		refcount_dec(&skb->users);
skb               222 drivers/s390/net/ctcm_fsms.c 		dev_kfree_skb_any(skb);
skb               251 drivers/s390/net/ctcm_fsms.c 	struct sk_buff *skb;
skb               268 drivers/s390/net/ctcm_fsms.c 	while ((skb = skb_dequeue(&ch->io_queue))) {
skb               270 drivers/s390/net/ctcm_fsms.c 		priv->stats.tx_bytes += skb->len - LL_HEADER_LENGTH;
skb               275 drivers/s390/net/ctcm_fsms.c 		refcount_dec(&skb->users);
skb               276 drivers/s390/net/ctcm_fsms.c 		dev_kfree_skb_irq(skb);
skb               296 drivers/s390/net/ctcm_fsms.c 		while ((skb = skb_dequeue(&ch->collect_queue))) {
skb               297 drivers/s390/net/ctcm_fsms.c 			skb_copy_from_linear_data(skb,
skb               298 drivers/s390/net/ctcm_fsms.c 				skb_put(ch->trans_skb, skb->len), skb->len);
skb               300 drivers/s390/net/ctcm_fsms.c 			priv->stats.tx_bytes += skb->len - LL_HEADER_LENGTH;
skb               301 drivers/s390/net/ctcm_fsms.c 			refcount_dec(&skb->users);
skb               302 drivers/s390/net/ctcm_fsms.c 			dev_kfree_skb_irq(skb);
skb               361 drivers/s390/net/ctcm_fsms.c 	struct sk_buff *skb = ch->trans_skb;
skb               362 drivers/s390/net/ctcm_fsms.c 	__u16 block_len = *((__u16 *)skb->data);
skb               401 drivers/s390/net/ctcm_fsms.c 			ctcmpc_dump_skb(skb, 0);
skb               403 drivers/s390/net/ctcm_fsms.c 		*((__u16 *)skb->data) = len;
skb               409 drivers/s390/net/ctcm_fsms.c 		*((__u16 *)skb->data) = block_len - 2;
skb               410 drivers/s390/net/ctcm_fsms.c 		ctcm_unpack_skb(ch, skb);
skb               413 drivers/s390/net/ctcm_fsms.c 	skb->data = ch->trans_skb_data;
skb               414 drivers/s390/net/ctcm_fsms.c 	skb_reset_tail_pointer(skb);
skb               415 drivers/s390/net/ctcm_fsms.c 	skb->len = 0;
skb               993 drivers/s390/net/ctcm_fsms.c 	struct sk_buff *skb;
skb              1015 drivers/s390/net/ctcm_fsms.c 	skb = skb_peek(&ch->io_queue);
skb              1016 drivers/s390/net/ctcm_fsms.c 	if (skb) {
skb              1020 drivers/s390/net/ctcm_fsms.c 		ch->ccw[4].count = skb->len;
skb              1021 drivers/s390/net/ctcm_fsms.c 		if (set_normalized_cda(&ch->ccw[4], skb->data)) {
skb              1216 drivers/s390/net/ctcm_fsms.c 	struct sk_buff		*skb;
skb              1239 drivers/s390/net/ctcm_fsms.c 	while ((skb = skb_dequeue(&ch->io_queue))) {
skb              1241 drivers/s390/net/ctcm_fsms.c 		priv->stats.tx_bytes += skb->len - TH_HEADER_LENGTH;
skb              1246 drivers/s390/net/ctcm_fsms.c 		refcount_dec(&skb->users);
skb              1247 drivers/s390/net/ctcm_fsms.c 		dev_kfree_skb_irq(skb);
skb              1276 drivers/s390/net/ctcm_fsms.c 	while ((skb = skb_dequeue(&ch->collect_queue))) {
skb              1277 drivers/s390/net/ctcm_fsms.c 		skb_put_data(ch->trans_skb, skb->data, skb->len);
skb              1279 drivers/s390/net/ctcm_fsms.c 			(skb_tail_pointer(ch->trans_skb) - skb->len);
skb              1281 drivers/s390/net/ctcm_fsms.c 		if (be16_to_cpu(skb->protocol) == ETH_P_SNAP)
skb              1290 drivers/s390/net/ctcm_fsms.c 		CTCM_D3_DUMP((char *)p_header, min_t(int, skb->len, 32));
skb              1292 drivers/s390/net/ctcm_fsms.c 		ch->collect_len -= skb->len;
skb              1293 drivers/s390/net/ctcm_fsms.c 		data_space -= skb->len;
skb              1295 drivers/s390/net/ctcm_fsms.c 		priv->stats.tx_bytes += skb->len;
skb              1296 drivers/s390/net/ctcm_fsms.c 		refcount_dec(&skb->users);
skb              1297 drivers/s390/net/ctcm_fsms.c 		dev_kfree_skb_any(skb);
skb              1385 drivers/s390/net/ctcm_fsms.c 	struct sk_buff		*skb = ch->trans_skb;
skb              1395 drivers/s390/net/ctcm_fsms.c 	if (skb == NULL) {
skb              1428 drivers/s390/net/ctcm_fsms.c 			skb_put_data(new_skb, skb->data, block_len);
skb              1433 drivers/s390/net/ctcm_fsms.c 			skb_put_data(new_skb, skb->data, len);
skb              1757 drivers/s390/net/ctcm_fsms.c 	struct sk_buff *skb;
skb              1780 drivers/s390/net/ctcm_fsms.c 	skb = skb_dequeue(&wch->sweep_queue);
skb              1781 drivers/s390/net/ctcm_fsms.c 	if (!skb)
skb              1784 drivers/s390/net/ctcm_fsms.c 	if (set_normalized_cda(&wch->ccw[4], skb->data)) {
skb              1787 drivers/s390/net/ctcm_fsms.c 		dev_kfree_skb_any(skb);
skb              1791 drivers/s390/net/ctcm_fsms.c 		refcount_inc(&skb->users);
skb              1792 drivers/s390/net/ctcm_fsms.c 		skb_queue_tail(&wch->io_queue, skb);
skb              1796 drivers/s390/net/ctcm_fsms.c 	wch->ccw[4].count = skb->len;
skb              1798 drivers/s390/net/ctcm_fsms.c 	header = (struct th_sweep *)skb->data;
skb                86 drivers/s390/net/ctcm_main.c 		struct sk_buff *skb;
skb               145 drivers/s390/net/ctcm_main.c 		skb = dev_alloc_skb(pskb->len);
skb               146 drivers/s390/net/ctcm_main.c 		if (!skb) {
skb               156 drivers/s390/net/ctcm_main.c 		skb_copy_from_linear_data(pskb, skb_put(skb, pskb->len),
skb               158 drivers/s390/net/ctcm_main.c 		skb_reset_mac_header(skb);
skb               159 drivers/s390/net/ctcm_main.c 		skb->dev = pskb->dev;
skb               160 drivers/s390/net/ctcm_main.c 		skb->protocol = pskb->protocol;
skb               162 drivers/s390/net/ctcm_main.c 		skblen = skb->len;
skb               169 drivers/s390/net/ctcm_main.c 		netif_rx_ni(skb);
skb               465 drivers/s390/net/ctcm_main.c static int ctcm_transmit_skb(struct channel *ch, struct sk_buff *skb)
skb               481 drivers/s390/net/ctcm_main.c 		int l = skb->len + LL_HEADER_LENGTH;
skb               487 drivers/s390/net/ctcm_main.c 			refcount_inc(&skb->users);
skb               489 drivers/s390/net/ctcm_main.c 			header.type = be16_to_cpu(skb->protocol);
skb               491 drivers/s390/net/ctcm_main.c 			memcpy(skb_push(skb, LL_HEADER_LENGTH), &header,
skb               493 drivers/s390/net/ctcm_main.c 			skb_queue_tail(&ch->collect_queue, skb);
skb               504 drivers/s390/net/ctcm_main.c 	refcount_inc(&skb->users);
skb               505 drivers/s390/net/ctcm_main.c 	ch->prof.txlen += skb->len;
skb               506 drivers/s390/net/ctcm_main.c 	header.length = skb->len + LL_HEADER_LENGTH;
skb               507 drivers/s390/net/ctcm_main.c 	header.type = be16_to_cpu(skb->protocol);
skb               509 drivers/s390/net/ctcm_main.c 	memcpy(skb_push(skb, LL_HEADER_LENGTH), &header, LL_HEADER_LENGTH);
skb               510 drivers/s390/net/ctcm_main.c 	block_len = skb->len + 2;
skb               511 drivers/s390/net/ctcm_main.c 	*((__u16 *)skb_push(skb, 2)) = block_len;
skb               517 drivers/s390/net/ctcm_main.c 	hi = ((unsigned long)skb_tail_pointer(skb) + LL_HEADER_LENGTH) >> 31;
skb               519 drivers/s390/net/ctcm_main.c 		nskb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
skb               521 drivers/s390/net/ctcm_main.c 			refcount_dec(&skb->users);
skb               522 drivers/s390/net/ctcm_main.c 			skb_pull(skb, LL_HEADER_LENGTH + 2);
skb               526 drivers/s390/net/ctcm_main.c 			skb_put_data(nskb, skb->data, skb->len);
skb               528 drivers/s390/net/ctcm_main.c 			refcount_dec(&skb->users);
skb               529 drivers/s390/net/ctcm_main.c 			dev_kfree_skb_irq(skb);
skb               530 drivers/s390/net/ctcm_main.c 			skb = nskb;
skb               535 drivers/s390/net/ctcm_main.c 	if (set_normalized_cda(&ch->ccw[4], skb->data)) {
skb               546 drivers/s390/net/ctcm_main.c 			refcount_dec(&skb->users);
skb               547 drivers/s390/net/ctcm_main.c 			skb_pull(skb, LL_HEADER_LENGTH + 2);
skb               554 drivers/s390/net/ctcm_main.c 		ch->ccw[1].count = skb->len;
skb               555 drivers/s390/net/ctcm_main.c 		skb_copy_from_linear_data(skb,
skb               556 drivers/s390/net/ctcm_main.c 				skb_put(ch->trans_skb, skb->len), skb->len);
skb               557 drivers/s390/net/ctcm_main.c 		refcount_dec(&skb->users);
skb               558 drivers/s390/net/ctcm_main.c 		dev_kfree_skb_irq(skb);
skb               561 drivers/s390/net/ctcm_main.c 		skb_queue_tail(&ch->io_queue, skb);
skb               585 drivers/s390/net/ctcm_main.c 		skb_pull(skb, LL_HEADER_LENGTH + 2);
skb               590 drivers/s390/net/ctcm_main.c 		priv->stats.tx_bytes += skb->len - LL_HEADER_LENGTH;
skb               663 drivers/s390/net/ctcm_main.c static int ctcmpc_transmit_skb(struct channel *ch, struct sk_buff *skb)
skb               682 drivers/s390/net/ctcm_main.c 		refcount_inc(&skb->users);
skb               690 drivers/s390/net/ctcm_main.c 		p_header->pdu_offset = skb->len;
skb               693 drivers/s390/net/ctcm_main.c 		if (be16_to_cpu(skb->protocol) == ETH_P_SNAP) {
skb               699 drivers/s390/net/ctcm_main.c 		memcpy(skb_push(skb, PDU_HEADER_LENGTH), p_header,
skb               704 drivers/s390/net/ctcm_main.c 				__func__, dev->name, skb->len);
skb               705 drivers/s390/net/ctcm_main.c 		CTCM_D3_DUMP((char *)skb->data, min_t(int, 32, skb->len));
skb               707 drivers/s390/net/ctcm_main.c 		skb_queue_tail(&ch->collect_queue, skb);
skb               708 drivers/s390/net/ctcm_main.c 		ch->collect_len += skb->len;
skb               719 drivers/s390/net/ctcm_main.c 	refcount_inc(&skb->users);
skb               725 drivers/s390/net/ctcm_main.c 	hi = ((unsigned long)skb->tail + TH_HEADER_LENGTH) >> 31;
skb               727 drivers/s390/net/ctcm_main.c 		nskb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
skb               731 drivers/s390/net/ctcm_main.c 			skb_put_data(nskb, skb->data, skb->len);
skb               733 drivers/s390/net/ctcm_main.c 			refcount_dec(&skb->users);
skb               734 drivers/s390/net/ctcm_main.c 			dev_kfree_skb_irq(skb);
skb               735 drivers/s390/net/ctcm_main.c 			skb = nskb;
skb               744 drivers/s390/net/ctcm_main.c 	p_header->pdu_offset = skb->len;
skb               748 drivers/s390/net/ctcm_main.c 	if (be16_to_cpu(skb->protocol) == ETH_P_SNAP) {
skb               753 drivers/s390/net/ctcm_main.c 	memcpy(skb_push(skb, PDU_HEADER_LENGTH), p_header, PDU_HEADER_LENGTH);
skb               759 drivers/s390/net/ctcm_main.c 		skb_queue_tail(&ch->collect_queue, skb);
skb               760 drivers/s390/net/ctcm_main.c 		ch->collect_len += skb->len;
skb               761 drivers/s390/net/ctcm_main.c 		skb = skb_dequeue(&ch->collect_queue);
skb               762 drivers/s390/net/ctcm_main.c 		ch->collect_len -= skb->len;
skb               766 drivers/s390/net/ctcm_main.c 	p_header = (struct pdu *)skb->data;
skb               769 drivers/s390/net/ctcm_main.c 	ch->prof.txlen += skb->len - PDU_HEADER_LENGTH;
skb               786 drivers/s390/net/ctcm_main.c 	memcpy(skb_push(skb, TH_HEADER_LENGTH), header, TH_HEADER_LENGTH);
skb               792 drivers/s390/net/ctcm_main.c 				__func__, dev->name, skb->len);
skb               793 drivers/s390/net/ctcm_main.c 	CTCM_D3_DUMP((char *)skb->data, min_t(int, 32, skb->len));
skb               795 drivers/s390/net/ctcm_main.c 	ch->ccw[4].count = skb->len;
skb               796 drivers/s390/net/ctcm_main.c 	if (set_normalized_cda(&ch->ccw[4], skb->data)) {
skb               811 drivers/s390/net/ctcm_main.c 		ch->ccw[1].count = skb->len;
skb               812 drivers/s390/net/ctcm_main.c 		skb_put_data(ch->trans_skb, skb->data, skb->len);
skb               813 drivers/s390/net/ctcm_main.c 		refcount_dec(&skb->users);
skb               814 drivers/s390/net/ctcm_main.c 		dev_kfree_skb_irq(skb);
skb               822 drivers/s390/net/ctcm_main.c 		skb_queue_tail(&ch->io_queue, skb);
skb               846 drivers/s390/net/ctcm_main.c 		priv->stats.tx_bytes += skb->len - TH_HEADER_LENGTH;
skb               857 drivers/s390/net/ctcm_main.c 	refcount_dec(&skb->users);
skb               858 drivers/s390/net/ctcm_main.c 	dev_kfree_skb_any(skb);
skb               877 drivers/s390/net/ctcm_main.c static int ctcm_tx(struct sk_buff *skb, struct net_device *dev)
skb               881 drivers/s390/net/ctcm_main.c 	if (skb == NULL) {
skb               888 drivers/s390/net/ctcm_main.c 	if (skb_headroom(skb) < (LL_HEADER_LENGTH + 2)) {
skb               892 drivers/s390/net/ctcm_main.c 		dev_kfree_skb(skb);
skb               903 drivers/s390/net/ctcm_main.c 		dev_kfree_skb(skb);
skb               914 drivers/s390/net/ctcm_main.c 	if (ctcm_transmit_skb(priv->channel[CTCM_WRITE], skb) != 0)
skb               920 drivers/s390/net/ctcm_main.c static int ctcmpc_tx(struct sk_buff *skb, struct net_device *dev)
skb               930 drivers/s390/net/ctcm_main.c 	if (skb == NULL) {
skb               937 drivers/s390/net/ctcm_main.c 	if (skb_headroom(skb) < (TH_HEADER_LENGTH + PDU_HEADER_LENGTH)) {
skb               943 drivers/s390/net/ctcm_main.c 		CTCM_D3_DUMP((char *)skb->data, min_t(int, 32, skb->len));
skb               945 drivers/s390/net/ctcm_main.c 		len =  skb->len + TH_HEADER_LENGTH + PDU_HEADER_LENGTH;
skb               953 drivers/s390/net/ctcm_main.c 			dev_kfree_skb_any(skb);
skb               960 drivers/s390/net/ctcm_main.c 		newskb->protocol = skb->protocol;
skb               962 drivers/s390/net/ctcm_main.c 		skb_put_data(newskb, skb->data, skb->len);
skb               963 drivers/s390/net/ctcm_main.c 		dev_kfree_skb_any(skb);
skb               964 drivers/s390/net/ctcm_main.c 		skb = newskb;
skb               974 drivers/s390/net/ctcm_main.c 		dev_kfree_skb_any(skb);
skb               988 drivers/s390/net/ctcm_main.c 		dev_kfree_skb_any(skb);
skb               997 drivers/s390/net/ctcm_main.c 	if (ctcmpc_transmit_skb(priv->channel[CTCM_WRITE], skb) != 0) {
skb              1001 drivers/s390/net/ctcm_main.c 		dev_kfree_skb_any(skb);
skb               228 drivers/s390/net/ctcm_mpc.c void ctcmpc_dump_skb(struct sk_buff *skb, int offset)
skb               230 drivers/s390/net/ctcm_mpc.c 	__u8 *p = skb->data;
skb               233 drivers/s390/net/ctcm_mpc.c 	int bl = skb->len;
skb               243 drivers/s390/net/ctcm_mpc.c 	ctcm_pr_debug("skb len=%d \n", skb->len);
skb               244 drivers/s390/net/ctcm_mpc.c 	if (skb->len > 2) {
skb              1052 drivers/s390/net/ctcm_mpc.c 	struct sk_buff *skb;
skb              1143 drivers/s390/net/ctcm_mpc.c 			skb = __dev_alloc_skb(new_len+4, GFP_ATOMIC);
skb              1145 drivers/s390/net/ctcm_mpc.c 			if (!skb) {
skb              1153 drivers/s390/net/ctcm_mpc.c 			skb_put_data(skb, pskb->data, new_len);
skb              1155 drivers/s390/net/ctcm_mpc.c 			skb_reset_mac_header(skb);
skb              1156 drivers/s390/net/ctcm_mpc.c 			skb->dev = pskb->dev;
skb              1157 drivers/s390/net/ctcm_mpc.c 			skb->protocol = pskb->protocol;
skb              1158 drivers/s390/net/ctcm_mpc.c 			skb->ip_summed = CHECKSUM_UNNECESSARY;
skb              1159 drivers/s390/net/ctcm_mpc.c 			*((__u32 *) skb_push(skb, 4)) = ch->pdu_seq;
skb              1167 drivers/s390/net/ctcm_mpc.c 					(unsigned long)skb, skb->len);
skb              1170 drivers/s390/net/ctcm_mpc.c 				ctcmpc_dump32((char *)skb->data, skb->len);
skb              1173 drivers/s390/net/ctcm_mpc.c 			skblen = skb->len;
skb              1174 drivers/s390/net/ctcm_mpc.c 			sendrc = netif_rx(skb);
skb              1186 drivers/s390/net/ctcm_mpc.c 		mpcginfo->skb = pskb;
skb              1239 drivers/s390/net/ctcm_mpc.c 	struct sk_buff	  *skb;
skb              1248 drivers/s390/net/ctcm_mpc.c 			(skb = skb_dequeue(&ch->io_queue))) {
skb              1249 drivers/s390/net/ctcm_mpc.c 		ctcmpc_unpack_skb(ch, skb);
skb              1258 drivers/s390/net/ctcm_mpc.c 		if (skb == skb_peek(&ch->io_queue))
skb              2066 drivers/s390/net/ctcm_mpc.c 	struct sk_buff   *skb;
skb              2108 drivers/s390/net/ctcm_mpc.c 		skb = __dev_alloc_skb(new_len, GFP_ATOMIC);
skb              2110 drivers/s390/net/ctcm_mpc.c 		if (skb == NULL) {
skb              2119 drivers/s390/net/ctcm_mpc.c 		skb_put_data(skb, qllcptr, new_len);
skb              2122 drivers/s390/net/ctcm_mpc.c 		if (skb_headroom(skb) < 4) {
skb              2126 drivers/s390/net/ctcm_mpc.c 			dev_kfree_skb_any(skb);
skb              2130 drivers/s390/net/ctcm_mpc.c 		*((__u32 *)skb_push(skb, 4)) =
skb              2139 drivers/s390/net/ctcm_mpc.c 		skb_reset_mac_header(skb);
skb              2140 drivers/s390/net/ctcm_mpc.c 		skb->dev = dev;
skb              2141 drivers/s390/net/ctcm_mpc.c 		skb->protocol = htons(ETH_P_SNAP);
skb              2142 drivers/s390/net/ctcm_mpc.c 		skb->ip_summed = CHECKSUM_UNNECESSARY;
skb              2144 drivers/s390/net/ctcm_mpc.c 		CTCM_D3_DUMP(skb->data, (sizeof(struct qllc) + 4));
skb              2146 drivers/s390/net/ctcm_mpc.c 		netif_rx(skb);
skb               151 drivers/s390/net/ctcm_mpc.h 	struct sk_buff	*skb;
skb               217 drivers/s390/net/ctcm_mpc.h void ctcmpc_dump_skb(struct sk_buff *skb, int offset);
skb               219 drivers/s390/net/ctcm_mpc.h static inline void ctcmpc_dump_skb(struct sk_buff *skb, int offset)
skb              1522 drivers/s390/net/lcs.c __lcs_start_xmit(struct lcs_card *card, struct sk_buff *skb,
skb              1529 drivers/s390/net/lcs.c 	if (skb == NULL) {
skb              1535 drivers/s390/net/lcs.c 		dev_kfree_skb(skb);
skb              1541 drivers/s390/net/lcs.c 	if (skb->protocol == htons(ETH_P_IPV6)) {
skb              1542 drivers/s390/net/lcs.c 		dev_kfree_skb(skb);
skb              1549 drivers/s390/net/lcs.c 	    skb->len + sizeof(u16) > LCS_IOBUFFERSIZE)
skb              1565 drivers/s390/net/lcs.c 	card->tx_buffer->count += skb->len + sizeof(struct lcs_header);
skb              1569 drivers/s390/net/lcs.c 	skb_copy_from_linear_data(skb, header + 1, skb->len);
skb              1571 drivers/s390/net/lcs.c 	card->stats.tx_bytes += skb->len;
skb              1573 drivers/s390/net/lcs.c 	dev_kfree_skb(skb);
skb              1585 drivers/s390/net/lcs.c lcs_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb              1592 drivers/s390/net/lcs.c 	rc = __lcs_start_xmit(card, skb, dev);
skb              1757 drivers/s390/net/lcs.c 	struct sk_buff *skb;
skb              1765 drivers/s390/net/lcs.c 	skb = dev_alloc_skb(skb_len);
skb              1766 drivers/s390/net/lcs.c 	if (skb == NULL) {
skb              1773 drivers/s390/net/lcs.c 	skb_put_data(skb, skb_data, skb_len);
skb              1774 drivers/s390/net/lcs.c 	skb->protocol =	card->lan_type_trans(skb, card->dev);
skb              1777 drivers/s390/net/lcs.c 	if (skb->protocol == htons(ETH_P_802_2))
skb              1778 drivers/s390/net/lcs.c 		*((__u32 *)skb->cb) = ++card->pkt_seq;
skb              1779 drivers/s390/net/lcs.c 	netif_rx(skb);
skb               309 drivers/s390/net/lcs.h 	__be16 (*lan_type_trans)(struct sk_buff *skb,
skb               627 drivers/s390/net/netiucv.c 		struct sk_buff *skb;
skb               644 drivers/s390/net/netiucv.c 		skb = dev_alloc_skb(pskb->len);
skb               645 drivers/s390/net/netiucv.c 		if (!skb) {
skb               651 drivers/s390/net/netiucv.c 		skb_copy_from_linear_data(pskb, skb_put(skb, pskb->len),
skb               653 drivers/s390/net/netiucv.c 		skb_reset_mac_header(skb);
skb               654 drivers/s390/net/netiucv.c 		skb->dev = pskb->dev;
skb               655 drivers/s390/net/netiucv.c 		skb->protocol = pskb->protocol;
skb               658 drivers/s390/net/netiucv.c 		privptr->stats.rx_bytes += skb->len;
skb               663 drivers/s390/net/netiucv.c 		netif_rx_ni(skb);
skb               716 drivers/s390/net/netiucv.c 	struct sk_buff *skb;
skb               731 drivers/s390/net/netiucv.c 		if ((skb = skb_dequeue(&conn->commit_queue))) {
skb               732 drivers/s390/net/netiucv.c 			refcount_dec(&skb->users);
skb               736 drivers/s390/net/netiucv.c 					(skb->len - NETIUCV_HDRLEN
skb               739 drivers/s390/net/netiucv.c 			dev_kfree_skb_any(skb);
skb               746 drivers/s390/net/netiucv.c 	while ((skb = skb_dequeue(&conn->collect_queue))) {
skb               747 drivers/s390/net/netiucv.c 		header.next = conn->tx_buff->len + skb->len + NETIUCV_HDRLEN;
skb               749 drivers/s390/net/netiucv.c 		skb_copy_from_linear_data(skb,
skb               750 drivers/s390/net/netiucv.c 					  skb_put(conn->tx_buff, skb->len),
skb               751 drivers/s390/net/netiucv.c 					  skb->len);
skb               752 drivers/s390/net/netiucv.c 		txbytes += skb->len;
skb               755 drivers/s390/net/netiucv.c 		refcount_dec(&skb->users);
skb               756 drivers/s390/net/netiucv.c 		dev_kfree_skb_any(skb);
skb               944 drivers/s390/net/netiucv.c 	struct sk_buff *skb;
skb               946 drivers/s390/net/netiucv.c 	while ((skb = skb_dequeue(q))) {
skb               947 drivers/s390/net/netiucv.c 		refcount_dec(&skb->users);
skb               948 drivers/s390/net/netiucv.c 		dev_kfree_skb_any(skb);
skb              1148 drivers/s390/net/netiucv.c 				struct sk_buff *skb)
skb              1156 drivers/s390/net/netiucv.c 		int l = skb->len + NETIUCV_HDRLEN;
skb              1165 drivers/s390/net/netiucv.c 			refcount_inc(&skb->users);
skb              1166 drivers/s390/net/netiucv.c 			skb_queue_tail(&conn->collect_queue, skb);
skb              1172 drivers/s390/net/netiucv.c 		struct sk_buff *nskb = skb;
skb              1177 drivers/s390/net/netiucv.c 		unsigned long hi = ((unsigned long)(skb_tail_pointer(skb) +
skb              1180 drivers/s390/net/netiucv.c 		if (hi || (skb_tailroom(skb) < 2)) {
skb              1181 drivers/s390/net/netiucv.c 			nskb = alloc_skb(skb->len + NETIUCV_HDRLEN +
skb              1189 drivers/s390/net/netiucv.c 				skb_put_data(nskb, skb->data, skb->len);
skb              1209 drivers/s390/net/netiucv.c 		conn->prof.txlen += skb->len;
skb              1227 drivers/s390/net/netiucv.c 				skb_pull(skb, NETIUCV_HDRLEN);
skb              1228 drivers/s390/net/netiucv.c 				skb_trim(skb, skb->len - NETIUCV_HDRLEN);
skb              1233 drivers/s390/net/netiucv.c 				dev_kfree_skb(skb);
skb              1355 drivers/s390/net/netiucv.c static int netiucv_tx(struct sk_buff *skb, struct net_device *dev)
skb              1364 drivers/s390/net/netiucv.c 	if (skb == NULL) {
skb              1369 drivers/s390/net/netiucv.c 	if (skb_headroom(skb) < NETIUCV_HDRLEN) {
skb              1372 drivers/s390/net/netiucv.c 		dev_kfree_skb(skb);
skb              1382 drivers/s390/net/netiucv.c 		dev_kfree_skb(skb);
skb              1394 drivers/s390/net/netiucv.c 	rc = netiucv_transmit_skb(privptr->conn, skb);
skb               759 drivers/s390/net/qeth_core.h 	int (*data_cb)(struct sk_buff *skb);
skb               907 drivers/s390/net/qeth_core.h static inline int qeth_get_ip_version(struct sk_buff *skb)
skb               909 drivers/s390/net/qeth_core.h 	struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
skb               925 drivers/s390/net/qeth_core.h static inline int qeth_get_ether_cast_type(struct sk_buff *skb)
skb               927 drivers/s390/net/qeth_core.h 	u8 *addr = eth_hdr(skb)->h_dest;
skb               935 drivers/s390/net/qeth_core.h static inline struct dst_entry *qeth_dst_check_rcu(struct sk_buff *skb, int ipv)
skb               937 drivers/s390/net/qeth_core.h 	struct dst_entry *dst = skb_dst(skb);
skb               946 drivers/s390/net/qeth_core.h static inline void qeth_rx_csum(struct qeth_card *card, struct sk_buff *skb,
skb               951 drivers/s390/net/qeth_core.h 		skb->ip_summed = CHECKSUM_UNNECESSARY;
skb               954 drivers/s390/net/qeth_core.h 		skb->ip_summed = CHECKSUM_NONE;
skb               958 drivers/s390/net/qeth_core.h static inline void qeth_tx_csum(struct sk_buff *skb, u8 *flags, int ipv)
skb               961 drivers/s390/net/qeth_core.h 	if ((ipv == 4 && ip_hdr(skb)->protocol == IPPROTO_UDP) ||
skb               962 drivers/s390/net/qeth_core.h 	    (ipv == 6 && ipv6_hdr(skb)->nexthdr == IPPROTO_UDP))
skb               999 drivers/s390/net/qeth_core.h int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb);
skb              1072 drivers/s390/net/qeth_core.h unsigned int qeth_count_elements(struct sk_buff *skb, unsigned int data_offset);
skb              1074 drivers/s390/net/qeth_core.h 			struct sk_buff *skb, struct qeth_hdr *hdr,
skb              1087 drivers/s390/net/qeth_core.h netdev_features_t qeth_features_check(struct sk_buff *skb,
skb              1091 drivers/s390/net/qeth_core.h u16 qeth_iqd_select_queue(struct net_device *dev, struct sk_buff *skb,
skb              1097 drivers/s390/net/qeth_core.h int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
skb              1100 drivers/s390/net/qeth_core.h 				  struct qeth_hdr *hdr, struct sk_buff *skb,
skb              1081 drivers/s390/net/qeth_core_main.c 	struct sk_buff *skb;
skb              1083 drivers/s390/net/qeth_core_main.c 	skb_queue_walk(&buf->skb_list, skb) {
skb              1085 drivers/s390/net/qeth_core_main.c 		QETH_CARD_TEXT_(q->card, 5, "%lx", (long) skb);
skb              1086 drivers/s390/net/qeth_core_main.c 		if (skb->protocol == htons(ETH_P_AF_IUCV) && skb->sk)
skb              1087 drivers/s390/net/qeth_core_main.c 			iucv_sk(skb->sk)->sk_txnotify(skb, notification);
skb              1095 drivers/s390/net/qeth_core_main.c 	struct sk_buff *skb;
skb              1109 drivers/s390/net/qeth_core_main.c 	while ((skb = __skb_dequeue(&buf->skb_list)) != NULL) {
skb              1110 drivers/s390/net/qeth_core_main.c 		unsigned int bytes = qdisc_pkt_len(skb);
skb              1111 drivers/s390/net/qeth_core_main.c 		bool is_tso = skb_is_gso(skb);
skb              1114 drivers/s390/net/qeth_core_main.c 		packets = is_tso ? skb_shinfo(skb)->gso_segs : 1;
skb              1120 drivers/s390/net/qeth_core_main.c 			if (skb->ip_summed == CHECKSUM_PARTIAL)
skb              1122 drivers/s390/net/qeth_core_main.c 			if (skb_is_nonlinear(skb))
skb              1130 drivers/s390/net/qeth_core_main.c 		napi_consume_skb(skb, budget);
skb              3532 drivers/s390/net/qeth_core_main.c int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb)
skb              3534 drivers/s390/net/qeth_core_main.c 	struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
skb              3540 drivers/s390/net/qeth_core_main.c 		switch (qeth_get_ip_version(skb)) {
skb              3542 drivers/s390/net/qeth_core_main.c 			tos = ipv4_get_dsfield(ip_hdr(skb));
skb              3545 drivers/s390/net/qeth_core_main.c 			tos = ipv6_get_dsfield(ipv6_hdr(skb));
skb              3562 drivers/s390/net/qeth_core_main.c 		if (skb->priority > 5)
skb              3564 drivers/s390/net/qeth_core_main.c 		return ~skb->priority >> 1 & 3;
skb              3584 drivers/s390/net/qeth_core_main.c static int qeth_get_elements_for_frags(struct sk_buff *skb)
skb              3588 drivers/s390/net/qeth_core_main.c 	for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
skb              3589 drivers/s390/net/qeth_core_main.c 		skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt];
skb              3607 drivers/s390/net/qeth_core_main.c unsigned int qeth_count_elements(struct sk_buff *skb, unsigned int data_offset)
skb              3609 drivers/s390/net/qeth_core_main.c 	unsigned int elements = qeth_get_elements_for_frags(skb);
skb              3610 drivers/s390/net/qeth_core_main.c 	addr_t end = (addr_t)skb->data + skb_headlen(skb);
skb              3611 drivers/s390/net/qeth_core_main.c 	addr_t start = (addr_t)skb->data + data_offset;
skb              3638 drivers/s390/net/qeth_core_main.c 			      struct sk_buff *skb, struct qeth_hdr **hdr,
skb              3650 drivers/s390/net/qeth_core_main.c 	start = (addr_t)skb->data - hdr_len;
skb              3651 drivers/s390/net/qeth_core_main.c 	end = (addr_t)skb->data;
skb              3657 drivers/s390/net/qeth_core_main.c 		if (skb_is_gso(skb))
skb              3658 drivers/s390/net/qeth_core_main.c 			__elements = 1 + qeth_count_elements(skb, proto_len);
skb              3660 drivers/s390/net/qeth_core_main.c 			__elements = qeth_count_elements(skb, 0);
skb              3661 drivers/s390/net/qeth_core_main.c 	} else if (!proto_len && PAGE_ALIGNED(skb->data)) {
skb              3664 drivers/s390/net/qeth_core_main.c 		__elements = 1 + qeth_count_elements(skb, 0);
skb              3668 drivers/s390/net/qeth_core_main.c 		__elements = 1 + qeth_count_elements(skb, proto_len);
skb              3673 drivers/s390/net/qeth_core_main.c 		if (!skb_is_nonlinear(skb)) {
skb              3676 drivers/s390/net/qeth_core_main.c 					 max_elements, __elements, skb->len);
skb              3680 drivers/s390/net/qeth_core_main.c 		rc = skb_linearize(skb);
skb              3694 drivers/s390/net/qeth_core_main.c 		*hdr = skb_push(skb, hdr_len);
skb              3704 drivers/s390/net/qeth_core_main.c 	skb_copy_from_linear_data(skb, ((char *)*hdr) + hdr_len, proto_len);
skb              3731 drivers/s390/net/qeth_core_main.c static unsigned int __qeth_fill_buffer(struct sk_buff *skb,
skb              3737 drivers/s390/net/qeth_core_main.c 	int length = skb_headlen(skb) - offset;
skb              3738 drivers/s390/net/qeth_core_main.c 	char *data = skb->data + offset;
skb              3751 drivers/s390/net/qeth_core_main.c 			if (length || skb_is_nonlinear(skb))
skb              3767 drivers/s390/net/qeth_core_main.c 	for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
skb              3768 drivers/s390/net/qeth_core_main.c 		skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt];
skb              3803 drivers/s390/net/qeth_core_main.c 				     struct sk_buff *skb, struct qeth_hdr *hdr,
skb              3809 drivers/s390/net/qeth_core_main.c 	__skb_queue_tail(&buf->skb_list, skb);
skb              3820 drivers/s390/net/qeth_core_main.c 		buf->is_header[element] = ((void *)hdr != skb->data);
skb              3824 drivers/s390/net/qeth_core_main.c 	return __qeth_fill_buffer(skb, buf, is_first_elem, offset);
skb              3828 drivers/s390/net/qeth_core_main.c 		       struct sk_buff *skb, unsigned int elements,
skb              3833 drivers/s390/net/qeth_core_main.c 	unsigned int bytes = qdisc_pkt_len(skb);
skb              3839 drivers/s390/net/qeth_core_main.c 	txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb));
skb              3848 drivers/s390/net/qeth_core_main.c 	    !qeth_iqd_may_bulk(queue, buffer, skb, hdr)) {
skb              3868 drivers/s390/net/qeth_core_main.c 	next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len);
skb              3886 drivers/s390/net/qeth_core_main.c 			struct sk_buff *skb, struct qeth_hdr *hdr,
skb              3914 drivers/s390/net/qeth_core_main.c 	txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb));
skb              3954 drivers/s390/net/qeth_core_main.c 	next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len);
skb              4002 drivers/s390/net/qeth_core_main.c 			      unsigned int payload_len, struct sk_buff *skb,
skb              4013 drivers/s390/net/qeth_core_main.c 	ext->mss = skb_shinfo(skb)->gso_size;
skb              4017 drivers/s390/net/qeth_core_main.c int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
skb              4020 drivers/s390/net/qeth_core_main.c 				  struct qeth_hdr *hdr, struct sk_buff *skb,
skb              4024 drivers/s390/net/qeth_core_main.c 	unsigned int frame_len = skb->len;
skb              4025 drivers/s390/net/qeth_core_main.c 	bool is_tso = skb_is_gso(skb);
skb              4034 drivers/s390/net/qeth_core_main.c 		proto_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
skb              4040 drivers/s390/net/qeth_core_main.c 	rc = skb_cow_head(skb, hw_hdr_len);
skb              4044 drivers/s390/net/qeth_core_main.c 	push_len = qeth_add_hw_header(queue, skb, &hdr, hw_hdr_len, proto_len,
skb              4054 drivers/s390/net/qeth_core_main.c 	fill_header(queue, hdr, skb, ipv, frame_len);
skb              4057 drivers/s390/net/qeth_core_main.c 				  frame_len - proto_len, skb, proto_len);
skb              4060 drivers/s390/net/qeth_core_main.c 		rc = __qeth_xmit(card, queue, skb, elements, hdr, data_offset,
skb              4064 drivers/s390/net/qeth_core_main.c 		skb_orphan(skb);
skb              4065 drivers/s390/net/qeth_core_main.c 		rc = qeth_do_send_packet(card, queue, skb, hdr, data_offset,
skb              5014 drivers/s390/net/qeth_core_main.c 				 struct sk_buff *skb, int offset, int data_len)
skb              5020 drivers/s390/net/qeth_core_main.c 	if (!skb->len) {
skb              5021 drivers/s390/net/qeth_core_main.c 		unsigned int linear = min(data_len, skb_tailroom(skb));
skb              5023 drivers/s390/net/qeth_core_main.c 		skb_put_data(skb, element->addr + offset, linear);
skb              5031 drivers/s390/net/qeth_core_main.c 	next_frag = skb_shinfo(skb)->nr_frags;
skb              5033 drivers/s390/net/qeth_core_main.c 	skb_add_rx_frag(skb, next_frag, page, offset, data_len, data_len);
skb              5049 drivers/s390/net/qeth_core_main.c 	struct sk_buff *skb;
skb              5093 drivers/s390/net/qeth_core_main.c 		skb = qethbuffer->rx_skb;
skb              5098 drivers/s390/net/qeth_core_main.c 		skb = napi_alloc_skb(&card->napi, linear + headroom);
skb              5100 drivers/s390/net/qeth_core_main.c 	if (!skb)
skb              5103 drivers/s390/net/qeth_core_main.c 		skb_reserve(skb, headroom);
skb              5110 drivers/s390/net/qeth_core_main.c 				qeth_create_skb_frag(element, skb, offset,
skb              5113 drivers/s390/net/qeth_core_main.c 				skb_put_data(skb, data_ptr, data_len);
skb              5120 drivers/s390/net/qeth_core_main.c 				dev_kfree_skb_any(skb);
skb              5136 drivers/s390/net/qeth_core_main.c 				   skb_shinfo(skb)->nr_frags);
skb              5138 drivers/s390/net/qeth_core_main.c 	return skb;
skb              6188 drivers/s390/net/qeth_core_main.c netdev_features_t qeth_features_check(struct sk_buff *skb,
skb              6199 drivers/s390/net/qeth_core_main.c 	if (netif_needs_gso(skb, features)) {
skb              6201 drivers/s390/net/qeth_core_main.c 		unsigned int doffset = skb->data - skb_mac_header(skb);
skb              6202 drivers/s390/net/qeth_core_main.c 		unsigned int hsize = skb_shinfo(skb)->gso_size;
skb              6203 drivers/s390/net/qeth_core_main.c 		unsigned int hroom = skb_headroom(skb);
skb              6210 drivers/s390/net/qeth_core_main.c 	return vlan_features_check(skb, features);
skb              6239 drivers/s390/net/qeth_core_main.c u16 qeth_iqd_select_queue(struct net_device *dev, struct sk_buff *skb,
skb               166 drivers/s390/net/qeth_l2_main.c 				struct qeth_hdr *hdr, struct sk_buff *skb,
skb               169 drivers/s390/net/qeth_l2_main.c 	int cast_type = qeth_get_ether_cast_type(skb);
skb               170 drivers/s390/net/qeth_l2_main.c 	struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
skb               174 drivers/s390/net/qeth_l2_main.c 	if (skb_is_gso(skb)) {
skb               178 drivers/s390/net/qeth_l2_main.c 		if (skb->ip_summed == CHECKSUM_PARTIAL)
skb               179 drivers/s390/net/qeth_l2_main.c 			qeth_tx_csum(skb, &hdr->hdr.l2.flags[1], ipv);
skb               305 drivers/s390/net/qeth_l2_main.c 	struct sk_buff *skb;
skb               312 drivers/s390/net/qeth_l2_main.c 		skb = qeth_core_get_next_skb(card,
skb               315 drivers/s390/net/qeth_l2_main.c 		if (!skb) {
skb               321 drivers/s390/net/qeth_l2_main.c 			skb->protocol = eth_type_trans(skb, skb->dev);
skb               322 drivers/s390/net/qeth_l2_main.c 			qeth_rx_csum(card, skb, hdr->hdr.l2.flags[1]);
skb               323 drivers/s390/net/qeth_l2_main.c 			len = skb->len;
skb               324 drivers/s390/net/qeth_l2_main.c 			napi_gro_receive(&card->napi, skb);
skb               328 drivers/s390/net/qeth_l2_main.c 				skb_push(skb, sizeof(struct qeth_hdr));
skb               329 drivers/s390/net/qeth_l2_main.c 				skb_copy_to_linear_data(skb, hdr,
skb               331 drivers/s390/net/qeth_l2_main.c 				len = skb->len;
skb               332 drivers/s390/net/qeth_l2_main.c 				card->osn_info.data_cb(skb);
skb               337 drivers/s390/net/qeth_l2_main.c 			dev_kfree_skb_any(skb);
skb               551 drivers/s390/net/qeth_l2_main.c static int qeth_l2_xmit_osn(struct qeth_card *card, struct sk_buff *skb,
skb               554 drivers/s390/net/qeth_l2_main.c 	struct qeth_hdr *hdr = (struct qeth_hdr *)skb->data;
skb               555 drivers/s390/net/qeth_l2_main.c 	addr_t end = (addr_t)(skb->data + sizeof(*hdr));
skb               556 drivers/s390/net/qeth_l2_main.c 	addr_t start = (addr_t)skb->data;
skb               561 drivers/s390/net/qeth_l2_main.c 	if (skb->protocol == htons(ETH_P_IPV6))
skb               570 drivers/s390/net/qeth_l2_main.c 		skb_copy_from_linear_data(skb, (char *)hdr, hd_len);
skb               574 drivers/s390/net/qeth_l2_main.c 	elements += qeth_count_elements(skb, hd_len);
skb               580 drivers/s390/net/qeth_l2_main.c 	rc = qeth_do_send_packet(card, queue, skb, hdr, hd_len, hd_len,
skb               588 drivers/s390/net/qeth_l2_main.c static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb,
skb               592 drivers/s390/net/qeth_l2_main.c 	u16 txq = skb_get_queue_mapping(skb);
skb               596 drivers/s390/net/qeth_l2_main.c 	if (!skb_is_gso(skb))
skb               597 drivers/s390/net/qeth_l2_main.c 		qdisc_skb_cb(skb)->pkt_len = skb->len;
skb               603 drivers/s390/net/qeth_l2_main.c 		rc = qeth_l2_xmit_osn(card, skb, queue);
skb               605 drivers/s390/net/qeth_l2_main.c 		rc = qeth_xmit(card, skb, queue, qeth_get_ip_version(skb),
skb               612 drivers/s390/net/qeth_l2_main.c 	kfree_skb(skb);
skb               616 drivers/s390/net/qeth_l2_main.c static u16 qeth_l2_select_queue(struct net_device *dev, struct sk_buff *skb,
skb               622 drivers/s390/net/qeth_l2_main.c 		return qeth_iqd_select_queue(dev, skb,
skb               623 drivers/s390/net/qeth_l2_main.c 					     qeth_get_ether_cast_type(skb),
skb               625 drivers/s390/net/qeth_l2_main.c 	return qeth_get_priority_queue(card, skb);
skb              1311 drivers/s390/net/qeth_l3_main.c static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
skb              1314 drivers/s390/net/qeth_l3_main.c 	struct af_iucv_trans_hdr *iucv = (struct af_iucv_trans_hdr *) skb->data;
skb              1315 drivers/s390/net/qeth_l3_main.c 	struct net_device *dev = skb->dev;
skb              1318 drivers/s390/net/qeth_l3_main.c 		dev_hard_header(skb, dev, ETH_P_AF_IUCV, dev->dev_addr,
skb              1319 drivers/s390/net/qeth_l3_main.c 				"FAKELL", skb->len);
skb              1328 drivers/s390/net/qeth_l3_main.c 		skb_reset_network_header(skb);
skb              1332 drivers/s390/net/qeth_l3_main.c 				ip_eth_mc_map(ip_hdr(skb)->daddr, tg_addr);
skb              1334 drivers/s390/net/qeth_l3_main.c 				ipv6_eth_mc_map(&ipv6_hdr(skb)->daddr, tg_addr);
skb              1343 drivers/s390/net/qeth_l3_main.c 				skb->pkt_type = PACKET_OTHERHOST;
skb              1348 drivers/s390/net/qeth_l3_main.c 			card->dev->header_ops->create(skb, card->dev, prot,
skb              1350 drivers/s390/net/qeth_l3_main.c 				skb->len);
skb              1352 drivers/s390/net/qeth_l3_main.c 			card->dev->header_ops->create(skb, card->dev, prot,
skb              1353 drivers/s390/net/qeth_l3_main.c 				tg_addr, "FAKELL", skb->len);
skb              1363 drivers/s390/net/qeth_l3_main.c 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag);
skb              1366 drivers/s390/net/qeth_l3_main.c 	qeth_rx_csum(card, skb, hdr->hdr.l3.ext_flags);
skb              1373 drivers/s390/net/qeth_l3_main.c 	struct sk_buff *skb;
skb              1380 drivers/s390/net/qeth_l3_main.c 		skb = qeth_core_get_next_skb(card,
skb              1383 drivers/s390/net/qeth_l3_main.c 		if (!skb) {
skb              1389 drivers/s390/net/qeth_l3_main.c 			qeth_l3_rebuild_skb(card, skb, hdr);
skb              1392 drivers/s390/net/qeth_l3_main.c 			skb->protocol = eth_type_trans(skb, skb->dev);
skb              1393 drivers/s390/net/qeth_l3_main.c 			len = skb->len;
skb              1394 drivers/s390/net/qeth_l3_main.c 			napi_gro_receive(&card->napi, skb);
skb              1397 drivers/s390/net/qeth_l3_main.c 			dev_kfree_skb_any(skb);
skb              1871 drivers/s390/net/qeth_l3_main.c static int qeth_l3_get_cast_type_rcu(struct sk_buff *skb, struct dst_entry *dst,
skb              1877 drivers/s390/net/qeth_l3_main.c 		n = dst_neigh_lookup_skb(dst, skb);
skb              1893 drivers/s390/net/qeth_l3_main.c 		if (ipv4_is_lbcast(ip_hdr(skb)->daddr))
skb              1895 drivers/s390/net/qeth_l3_main.c 		return ipv4_is_multicast(ip_hdr(skb)->daddr) ?
skb              1898 drivers/s390/net/qeth_l3_main.c 		return ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr) ?
skb              1902 drivers/s390/net/qeth_l3_main.c 		return qeth_get_ether_cast_type(skb);
skb              1906 drivers/s390/net/qeth_l3_main.c static int qeth_l3_get_cast_type(struct sk_buff *skb)
skb              1908 drivers/s390/net/qeth_l3_main.c 	int ipv = qeth_get_ip_version(skb);
skb              1913 drivers/s390/net/qeth_l3_main.c 	dst = qeth_dst_check_rcu(skb, ipv);
skb              1914 drivers/s390/net/qeth_l3_main.c 	cast_type = qeth_l3_get_cast_type_rcu(skb, dst, ipv);
skb              1932 drivers/s390/net/qeth_l3_main.c 				struct qeth_hdr *hdr, struct sk_buff *skb,
skb              1936 drivers/s390/net/qeth_l3_main.c 	struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
skb              1943 drivers/s390/net/qeth_l3_main.c 	if (skb_is_gso(skb)) {
skb              1948 drivers/s390/net/qeth_l3_main.c 		if (skb->protocol == htons(ETH_P_AF_IUCV)) {
skb              1952 drivers/s390/net/qeth_l3_main.c 			       iucv_trans_hdr(skb)->destUserID, 8);
skb              1956 drivers/s390/net/qeth_l3_main.c 		if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb              1957 drivers/s390/net/qeth_l3_main.c 			qeth_tx_csum(skb, &hdr->hdr.l3.ext_flags, ipv);
skb              1966 drivers/s390/net/qeth_l3_main.c 		if (skb_vlan_tag_present(skb)) {
skb              1968 drivers/s390/net/qeth_l3_main.c 			hdr->hdr.l3.vlan_id = skb_vlan_tag_get(skb);
skb              1976 drivers/s390/net/qeth_l3_main.c 	dst = qeth_dst_check_rcu(skb, ipv);
skb              1978 drivers/s390/net/qeth_l3_main.c 	if (IS_IQD(card) && skb_get_queue_mapping(skb) != QETH_IQD_MCAST_TXQ)
skb              1981 drivers/s390/net/qeth_l3_main.c 		cast_type = qeth_l3_get_cast_type_rcu(skb, dst, ipv);
skb              1988 drivers/s390/net/qeth_l3_main.c 				rt_nexthop(rt, ip_hdr(skb)->daddr) :
skb              1989 drivers/s390/net/qeth_l3_main.c 				ip_hdr(skb)->daddr;
skb              1996 drivers/s390/net/qeth_l3_main.c 			l3_hdr->next_hop.ipv6_addr = ipv6_hdr(skb)->daddr;
skb              2008 drivers/s390/net/qeth_l3_main.c static void qeth_l3_fixup_headers(struct sk_buff *skb)
skb              2010 drivers/s390/net/qeth_l3_main.c 	struct iphdr *iph = ip_hdr(skb);
skb              2013 drivers/s390/net/qeth_l3_main.c 	if (skb->ip_summed == CHECKSUM_PARTIAL)
skb              2015 drivers/s390/net/qeth_l3_main.c 	if (skb_is_gso(skb)) {
skb              2017 drivers/s390/net/qeth_l3_main.c 		tcp_hdr(skb)->check = ~tcp_v4_check(0, iph->saddr,
skb              2022 drivers/s390/net/qeth_l3_main.c static int qeth_l3_xmit(struct qeth_card *card, struct sk_buff *skb,
skb              2029 drivers/s390/net/qeth_l3_main.c 	hw_hdr_len = skb_is_gso(skb) ? sizeof(struct qeth_hdr_tso) :
skb              2031 drivers/s390/net/qeth_l3_main.c 	rc = skb_cow_head(skb, hw_hdr_len - ETH_HLEN);
skb              2034 drivers/s390/net/qeth_l3_main.c 	skb_pull(skb, ETH_HLEN);
skb              2036 drivers/s390/net/qeth_l3_main.c 	qeth_l3_fixup_headers(skb);
skb              2037 drivers/s390/net/qeth_l3_main.c 	return qeth_xmit(card, skb, queue, ipv, qeth_l3_fill_header);
skb              2040 drivers/s390/net/qeth_l3_main.c static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
skb              2044 drivers/s390/net/qeth_l3_main.c 	u16 txq = skb_get_queue_mapping(skb);
skb              2045 drivers/s390/net/qeth_l3_main.c 	int ipv = qeth_get_ip_version(skb);
skb              2049 drivers/s390/net/qeth_l3_main.c 	if (!skb_is_gso(skb))
skb              2050 drivers/s390/net/qeth_l3_main.c 		qdisc_skb_cb(skb)->pkt_len = skb->len;
skb              2058 drivers/s390/net/qeth_l3_main.c 		     skb->protocol != htons(ETH_P_AF_IUCV)))
skb              2065 drivers/s390/net/qeth_l3_main.c 	    qeth_l3_get_cast_type(skb) == RTN_BROADCAST)
skb              2069 drivers/s390/net/qeth_l3_main.c 		rc = qeth_l3_xmit(card, skb, queue, ipv);
skb              2071 drivers/s390/net/qeth_l3_main.c 		rc = qeth_xmit(card, skb, queue, ipv, qeth_l3_fill_header);
skb              2078 drivers/s390/net/qeth_l3_main.c 	kfree_skb(skb);
skb              2113 drivers/s390/net/qeth_l3_main.c static netdev_features_t qeth_l3_osa_features_check(struct sk_buff *skb,
skb              2117 drivers/s390/net/qeth_l3_main.c 	if (qeth_get_ip_version(skb) != 4)
skb              2119 drivers/s390/net/qeth_l3_main.c 	return qeth_features_check(skb, dev, features);
skb              2122 drivers/s390/net/qeth_l3_main.c static u16 qeth_l3_iqd_select_queue(struct net_device *dev, struct sk_buff *skb,
skb              2125 drivers/s390/net/qeth_l3_main.c 	return qeth_iqd_select_queue(dev, skb, qeth_l3_get_cast_type(skb),
skb              2129 drivers/s390/net/qeth_l3_main.c static u16 qeth_l3_osa_select_queue(struct net_device *dev, struct sk_buff *skb,
skb              2134 drivers/s390/net/qeth_l3_main.c 	return qeth_get_priority_queue(card, skb);
skb               522 drivers/scsi/bnx2fc/bnx2fc.h int bnx2fc_get_paged_crc_eof(struct sk_buff *skb, int tlen);
skb                71 drivers/scsi/bnx2fc/bnx2fc_fcoe.c static void bnx2fc_recv_frame(struct sk_buff *skb);
skb               152 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 	struct sk_buff *skb, *next;
skb               157 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 	skb_queue_walk_safe(list, skb, next) {
skb               158 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 		fr = fcoe_dev_from_skb(skb);
skb               160 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 			__skb_unlink(skb, list);
skb               161 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 			kfree_skb(skb);
skb               167 drivers/scsi/bnx2fc/bnx2fc_fcoe.c int bnx2fc_get_paged_crc_eof(struct sk_buff *skb, int tlen)
skb               171 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 	rc = fcoe_get_paged_crc_eof(skb, tlen, &bnx2fc_global);
skb               266 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 	struct sk_buff		*skb;
skb               287 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 	skb = fp_skb(fp);
skb               290 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 		kfree_skb(skb);
skb               297 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 			kfree_skb(skb);
skb               300 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 		if (fcoe_ctlr_els_send(ctlr, lport, skb))
skb               326 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 			kfree_skb(skb);
skb               336 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 	wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE;
skb               338 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 	skb->ip_summed = CHECKSUM_NONE;
skb               342 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 	if (skb_is_nonlinear(skb)) {
skb               344 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 		if (bnx2fc_get_paged_crc_eof(skb, tlen)) {
skb               345 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 			kfree_skb(skb);
skb               348 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 		frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
skb               351 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 		cp = skb_put(skb, tlen);
skb               357 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 	if (skb_is_nonlinear(skb)) {
skb               363 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 	skb_push(skb, elen + hlen);
skb               364 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 	skb_reset_mac_header(skb);
skb               365 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 	skb_reset_network_header(skb);
skb               366 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 	skb->mac_len = elen;
skb               367 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 	skb->protocol = htons(ETH_P_FCOE);
skb               368 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 	skb->dev = interface->netdev;
skb               371 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 	eh = eth_hdr(skb);
skb               392 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 		skb_shinfo(skb)->gso_type = SKB_GSO_FCOE;
skb               393 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 		skb_shinfo(skb)->gso_size = fr_max_payload(fp);
skb               395 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 		skb_shinfo(skb)->gso_type = 0;
skb               396 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 		skb_shinfo(skb)->gso_size = 0;
skb               408 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 		fcoe_check_wait_queue(lport, skb);
skb               409 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 	else if (fcoe_start_io(skb))
skb               410 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 		fcoe_check_wait_queue(lport, skb);
skb               425 drivers/scsi/bnx2fc/bnx2fc_fcoe.c static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
skb               445 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 	tmp_skb = skb_share_check(skb, GFP_ATOMIC);
skb               449 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 	skb = tmp_skb;
skb               451 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 	if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) {
skb               460 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 	if (unlikely((skb->len < FCOE_MIN_FRAME) ||
skb               461 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 	    !pskb_may_pull(skb, FCOE_HEADER_LEN)))
skb               464 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 	skb_set_transport_header(skb, sizeof(struct fcoe_hdr));
skb               466 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 	fr = fcoe_dev_from_skb(skb);
skb               472 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 	__skb_queue_tail(&bg->fcoe_rx_list, skb);
skb               480 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 	kfree_skb(skb);
skb               487 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 	struct sk_buff *skb;
skb               494 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 		while ((skb = __skb_dequeue(&bg->fcoe_rx_list)) != NULL) {
skb               496 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 			bnx2fc_recv_frame(skb);
skb               507 drivers/scsi/bnx2fc/bnx2fc_fcoe.c static void bnx2fc_recv_frame(struct sk_buff *skb)
skb               524 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 	fr = fcoe_dev_from_skb(skb);
skb               528 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 		kfree_skb(skb);
skb               532 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 	if (skb_is_nonlinear(skb))
skb               533 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 		skb_linearize(skb);
skb               534 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 	mac = eth_hdr(skb)->h_source;
skb               535 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 	dest_mac = eth_hdr(skb)->h_dest;
skb               538 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 	hp = (struct fcoe_hdr *) skb_network_header(skb);
skb               539 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 	fh = (struct fc_frame_header *) skb_transport_header(skb);
skb               540 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 	skb_pull(skb, sizeof(struct fcoe_hdr));
skb               541 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 	fr_len = skb->len - sizeof(struct fcoe_crc_eof);
skb               543 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 	fp = (struct fc_frame *)skb;
skb               547 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 	if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) {
skb               548 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 		kfree_skb(skb);
skb               553 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 	if (pskb_trim(skb, fr_len)) {
skb               554 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 		kfree_skb(skb);
skb               567 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 		kfree_skb(skb);
skb               576 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 			kfree_skb(skb);
skb               584 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 			kfree_skb(skb);
skb               591 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 		kfree_skb(skb);
skb               600 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 				kfree_skb(skb);
skb               609 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 		kfree_skb(skb);
skb               621 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 		kfree_skb(skb);
skb               630 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 			~crc32(~0, skb->data, fr_len)) {
skb               635 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 		kfree_skb(skb);
skb              1055 drivers/scsi/bnx2fc/bnx2fc_fcoe.c static int bnx2fc_fip_recv(struct sk_buff *skb, struct net_device *dev,
skb              1064 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 	fcoe_ctlr_recv(ctlr, skb);
skb              1104 drivers/scsi/bnx2fc/bnx2fc_fcoe.c static void bnx2fc_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
skb              1111 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 	fiph = (struct fip_header *) ((void *)skb->data + 2 * ETH_ALEN + 2);
skb              1112 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 	eth_hdr = (struct ethhdr *)skb_mac_header(skb);
skb              1120 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 	skb->dev = bnx2fc_from_ctlr(fip)->netdev;
skb              1121 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 	dev_queue_xmit(skb);
skb              2767 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 	struct sk_buff *skb;
skb              2800 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 	while ((skb = __skb_dequeue(&bg->fcoe_rx_list)) != NULL)
skb              2801 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 		kfree_skb(skb);
skb               558 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	struct sk_buff *skb;
skb               589 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	skb = fp_skb(fp);
skb               603 drivers/scsi/bnx2fc/bnx2fc_hwi.c 				kfree_skb(skb);
skb               621 drivers/scsi/bnx2fc/bnx2fc_hwi.c 		kfree_skb(skb);
skb               157 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
skb               161 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	struct cpl_act_open_req *req = (struct cpl_act_open_req *)skb->head;
skb               163 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	skb->priority = CPL_PRIORITY_SETUP;
skb               185 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	l2t_send(csk->cdev->lldev, skb, csk->l2t);
skb               188 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c static inline void act_open_arp_failure(struct t3cdev *dev, struct sk_buff *skb)
skb               190 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	cxgbi_sock_act_open_req_arp_failure(NULL, skb);
skb               201 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	struct sk_buff *skb = csk->cpl_close;
skb               202 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	struct cpl_close_con_req *req = (struct cpl_close_con_req *)skb->head;
skb               215 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	cxgbi_sock_skb_entail(csk, skb);
skb               227 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c static void abort_arp_failure(struct t3cdev *tdev, struct sk_buff *skb)
skb               229 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	struct cpl_abort_req *req = cplhdr(skb);
skb               233 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 		tdev, GET_TID(req), skb);
skb               235 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	cxgb3_ofld_send(tdev, skb);
skb               240 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	struct sk_buff *skb = csk->cpl_abort_req;
skb               243 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	if (unlikely(csk->state == CTP_ABORTING || !skb))
skb               251 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	req = (struct cpl_abort_req *)skb->head;
skb               252 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	skb->priority = CPL_PRIORITY_DATA;
skb               253 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	set_arp_failure_handler(skb, abort_arp_failure);
skb               266 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	l2t_send(csk->cdev->lldev, skb, csk->l2t);
skb               276 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	struct sk_buff *skb = csk->cpl_abort_rpl;
skb               277 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	struct cpl_abort_rpl *rpl = (struct cpl_abort_rpl *)skb->head;
skb               284 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	skb->priority = CPL_PRIORITY_DATA;
skb               289 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	cxgb3_ofld_send(csk->cdev->lldev, skb);
skb               299 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	struct sk_buff *skb;
skb               307 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	skb = alloc_wr(sizeof(*req), 0, GFP_ATOMIC);
skb               308 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	if (!skb) {
skb               312 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	req = (struct cpl_rx_data_ack *)skb->head;
skb               317 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	skb->priority = CPL_PRIORITY_ACK;
skb               318 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	cxgb3_ofld_send(csk->cdev->lldev, skb);
skb               350 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c static inline void make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb,
skb               356 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	skb_reset_transport_header(skb);
skb               357 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	req = __skb_push(skb, sizeof(*req));
skb               364 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	req->flags = htonl(V_TX_ULP_SUBMODE(cxgbi_skcb_ulp_mode(skb)) |
skb               389 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c static void arp_failure_skb_discard(struct t3cdev *dev, struct sk_buff *skb)
skb               391 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	kfree_skb(skb);
skb               397 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	struct sk_buff *skb;
skb               407 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	while (csk->wr_cred && (skb = skb_peek(&csk->write_queue)) != NULL) {
skb               408 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 		int len = skb->len;	/* length before skb_push */
skb               409 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 		int frags = skb_shinfo(skb)->nr_frags + (len != skb->data_len);
skb               420 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 				csk, skb->len, skb->data_len, frags,
skb               425 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 		__skb_unlink(skb, &csk->write_queue);
skb               426 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 		skb->priority = CPL_PRIORITY_DATA;
skb               427 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 		skb->csum = wrs_needed;	/* remember this until the WR_ACK */
skb               430 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 		cxgbi_sock_enqueue_wr(csk, skb);
skb               435 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 			csk, skb->len, skb->data_len, frags, skb->csum,
skb               438 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 		if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR))) {
skb               445 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 			len += cxgbi_ulp_extra_len(cxgbi_skcb_ulp_mode(skb));
skb               446 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 			make_tx_data_wr(csk, skb, len, req_completion);
skb               448 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 			cxgbi_skcb_clear_flag(skb, SKCBF_TX_NEED_HDR);
skb               450 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 		total_size += skb->truesize;
skb               453 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 			csk, csk->tid, skb);
skb               454 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 		set_arp_failure_handler(skb, arp_failure_skb_discard);
skb               455 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 		l2t_send(csk->cdev->lldev, skb, csk->l2t);
skb               475 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c static int do_act_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
skb               478 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	struct cpl_act_establish *req = cplhdr(skb);
skb               494 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	csk->rss_qid = G_QNUM(ntohs(skb->csum));
skb               522 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	__kfree_skb(skb);
skb               551 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	struct sk_buff *skb;
skb               559 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	skb = alloc_wr(sizeof(struct cpl_act_open_req), 0, GFP_ATOMIC);
skb               560 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	if (!skb)
skb               563 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 		skb->sk = (struct sock *)csk;
skb               564 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 		set_arp_failure_handler(skb, act_open_arp_failure);
skb               565 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 		send_act_open_req(csk, skb, csk->l2t);
skb               571 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c static int do_act_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
skb               574 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	struct cpl_act_open_rpl *rpl = cplhdr(skb);
skb               598 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	__kfree_skb(skb);
skb               606 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c static int do_peer_close(struct t3cdev *cdev, struct sk_buff *skb, void *ctx)
skb               615 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	__kfree_skb(skb);
skb               623 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c static int do_close_con_rpl(struct t3cdev *cdev, struct sk_buff *skb,
skb               627 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	struct cpl_close_con_rpl *rpl = cplhdr(skb);
skb               634 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	__kfree_skb(skb);
skb               661 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c static int do_abort_req(struct t3cdev *cdev, struct sk_buff *skb, void *ctx)
skb               663 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	const struct cpl_abort_req_rss *req = cplhdr(skb);
skb               697 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	__kfree_skb(skb);
skb               708 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c static int do_abort_rpl(struct t3cdev *cdev, struct sk_buff *skb, void *ctx)
skb               710 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	struct cpl_abort_rpl_rss *rpl = cplhdr(skb);
skb               735 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	__kfree_skb(skb);
skb               744 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c static int do_iscsi_hdr(struct t3cdev *t3dev, struct sk_buff *skb, void *ctx)
skb               747 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	struct cpl_iscsi_hdr *hdr_cpl = cplhdr(skb);
skb               756 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 		csk, csk->state, csk->flags, csk->tid, skb, skb->len);
skb               770 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	cxgbi_skcb_tcp_seq(skb) = ntohl(hdr_cpl->seq);
skb               771 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	cxgbi_skcb_flags(skb) = 0;
skb               773 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	skb_reset_transport_header(skb);
skb               774 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	__skb_pull(skb, sizeof(struct cpl_iscsi_hdr));
skb               778 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	if (skb->len <= hdr_len) {
skb               781 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 			skb->len, hdr_len);
skb               784 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	cxgbi_skcb_set_flag(skb, SKCBF_RX_COALESCED);
skb               786 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	err = skb_copy_bits(skb, skb->len - sizeof(ddp_cpl), &ddp_cpl,
skb               791 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 			skb->len, sizeof(ddp_cpl), err);
skb               795 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	cxgbi_skcb_set_flag(skb, SKCBF_RX_STATUS);
skb               796 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	cxgbi_skcb_rx_pdulen(skb) = ntohs(ddp_cpl.len);
skb               797 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	cxgbi_skcb_rx_ddigest(skb) = ntohl(ddp_cpl.ulp_crc);
skb               802 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 		csk, skb, skb->len, cxgbi_skcb_rx_pdulen(skb), status);
skb               805 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 		cxgbi_skcb_set_flag(skb, SKCBF_RX_HCRC_ERR);
skb               807 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 		cxgbi_skcb_set_flag(skb, SKCBF_RX_DCRC_ERR);
skb               809 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 		cxgbi_skcb_set_flag(skb, SKCBF_RX_PAD_ERR);
skb               811 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	if (skb->len > (hdr_len + sizeof(ddp_cpl))) {
skb               812 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 		err = skb_copy_bits(skb, hdr_len, &data_cpl, sizeof(data_cpl));
skb               816 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 				csk->tid, sizeof(data_cpl), skb->len, err);
skb               822 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 			skb, data_len, cxgbi_skcb_rx_pdulen(skb), status);
skb               825 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 		cxgbi_skcb_set_flag(skb, SKCBF_RX_DATA_DDPD);
skb               827 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	csk->rcv_nxt = ntohl(ddp_cpl.seq) + cxgbi_skcb_rx_pdulen(skb);
skb               828 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	__pskb_trim(skb, len);
skb               829 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	__skb_queue_tail(&csk->receive_queue, skb);
skb               839 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	__kfree_skb(skb);
skb               848 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c static int do_wr_ack(struct t3cdev *cdev, struct sk_buff *skb, void *ctx)
skb               851 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	struct cpl_wr_ack *hdr = cplhdr(skb);
skb               858 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	__kfree_skb(skb);
skb               961 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	struct sk_buff *skb = NULL;
skb               987 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	skb = alloc_wr(sizeof(struct cpl_act_open_req), 0, GFP_KERNEL);
skb               988 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	if (!skb) {
skb               993 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	skb->sk = (struct sock *)csk;
skb               994 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	set_arp_failure_handler(skb, act_open_arp_failure);
skb              1011 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	send_act_open_req(csk, skb, csk->l2t);
skb              1069 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c static inline void ulp_mem_io_set_hdr(struct sk_buff *skb, unsigned int addr)
skb              1071 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	struct ulp_mem_io *req = (struct ulp_mem_io *)skb->head;
skb              1100 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 		struct sk_buff *skb = alloc_wr(sizeof(struct ulp_mem_io) +
skb              1103 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 		if (!skb)
skb              1105 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 		ulp_mem_io_set_hdr(skb, pm_addr);
skb              1106 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 		req = (struct ulp_mem_io *)skb->head;
skb              1111 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 		skb->priority = CPL_PRIORITY_CONTROL;
skb              1112 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 		cxgb3_ofld_send(ppm->lldev, skb);
skb              1130 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 		struct sk_buff *skb = alloc_wr(sizeof(struct ulp_mem_io) +
skb              1133 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 		if (!skb) {
skb              1138 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 		ulp_mem_io_set_hdr(skb, pm_addr);
skb              1139 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 		skb->priority = CPL_PRIORITY_CONTROL;
skb              1140 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 		cxgb3_ofld_send(ppm->lldev, skb);
skb              1147 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0,
skb              1154 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	if (!skb)
skb              1158 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	req = (struct cpl_set_tcb_field *)skb->head;
skb              1166 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	skb->priority = CPL_PRIORITY_CONTROL;
skb              1168 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	cxgb3_ofld_send(csk->cdev->lldev, skb);
skb              1183 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0,
skb              1190 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	if (!skb)
skb              1194 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	req = (struct cpl_set_tcb_field *)skb->head;
skb              1202 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	skb->priority = CPL_PRIORITY_CONTROL;
skb              1204 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	cxgb3_ofld_send(csk->cdev->lldev, skb);
skb               193 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c static inline bool is_ofld_imm(const struct sk_buff *skb)
skb               195 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	int len = skb->len;
skb               197 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR)))
skb               203 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
skb               228 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 				(struct cpl_act_open_req *)skb->head;
skb               251 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 				(struct cpl_t5_act_open_req *)skb->head;
skb               279 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 				(struct cpl_t6_act_open_req *)skb->head;
skb               311 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id);
skb               318 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
skb               322 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb,
skb               347 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 			    (struct cpl_act_open_req6 *)skb->head;
skb               372 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 				(struct cpl_t5_act_open_req6 *)skb->head;
skb               395 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 				(struct cpl_t6_act_open_req6 *)skb->head;
skb               423 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id);
skb               432 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
skb               438 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	struct sk_buff *skb = csk->cpl_close;
skb               439 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	struct cpl_close_con_req *req = (struct cpl_close_con_req *)skb->head;
skb               446 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
skb               451 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	cxgbi_sock_skb_entail(csk, skb);
skb               456 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c static void abort_arp_failure(void *handle, struct sk_buff *skb)
skb               464 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	req = (struct cpl_abort_req *)skb->data;
skb               466 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
skb               472 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	struct sk_buff *skb = csk->cpl_abort_req;
skb               474 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	if (unlikely(csk->state == CTP_ABORTING) || !skb || !csk->cdev)
skb               487 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	req = (struct cpl_abort_req *)skb->head;
skb               488 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
skb               490 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	t4_set_arp_err_handler(skb, csk, abort_arp_failure);
skb               501 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
skb               506 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	struct sk_buff *skb = csk->cpl_abort_rpl;
skb               507 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	struct cpl_abort_rpl *rpl = (struct cpl_abort_rpl *)skb->head;
skb               514 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
skb               518 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
skb               528 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	struct sk_buff *skb;
skb               535 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	skb = alloc_wr(sizeof(*req), 0, GFP_ATOMIC);
skb               536 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	if (!skb) {
skb               540 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	req = (struct cpl_rx_data_ack *)skb->head;
skb               542 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	set_wr_txq(skb, CPL_PRIORITY_ACK, csk->port_id);
skb               548 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
skb               572 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
skb               576 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	if (is_ofld_imm(skb))
skb               577 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		return DIV_ROUND_UP(skb->len, 8);
skb               578 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	flits = skb_transport_offset(skb) / 8;
skb               579 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	cnt = skb_shinfo(skb)->nr_frags;
skb               580 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	if (skb_tail_pointer(skb) != skb_transport_header(skb))
skb               611 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	struct sk_buff *skb;
skb               619 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	skb = alloc_wr(flowclen, 0, GFP_ATOMIC);
skb               620 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	flowc = (struct fw_flowc_wr *)skb->head;
skb               657 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
skb               665 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
skb               670 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c static inline void make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb,
skb               674 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	unsigned int submode = cxgbi_skcb_ulp_mode(skb) & 3;
skb               676 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	bool imm = is_ofld_imm(skb);
skb               678 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	req = __skb_push(skb, sizeof(*req));
skb               706 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c static void arp_failure_skb_discard(void *handle, struct sk_buff *skb)
skb               708 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	kfree_skb(skb);
skb               714 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	struct sk_buff *skb;
skb               725 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	while (csk->wr_cred && (skb = skb_peek(&csk->write_queue)) != NULL) {
skb               726 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		int dlen = skb->len;
skb               727 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		int len = skb->len;
skb               731 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		skb_reset_transport_header(skb);
skb               732 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		if (is_ofld_imm(skb))
skb               736 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 						8 * calc_tx_flits_ofld(skb),
skb               739 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR)))
skb               758 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 				csk, skb->len, skb->data_len,
skb               762 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		__skb_unlink(skb, &csk->write_queue);
skb               763 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
skb               764 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		skb->csum = credits_needed + flowclen16;
skb               767 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		cxgbi_sock_enqueue_wr(csk, skb);
skb               771 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 			csk, skb->len, skb->data_len, credits_needed,
skb               774 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR))) {
skb               775 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 			len += cxgbi_ulp_extra_len(cxgbi_skcb_ulp_mode(skb));
skb               776 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 			make_tx_data_wr(csk, skb, dlen, len, credits_needed,
skb               779 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 			cxgbi_skcb_clear_flag(skb, SKCBF_TX_NEED_HDR);
skb               780 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		} else if (cxgbi_skcb_test_flag(skb, SKCBF_TX_FLAG_COMPL) &&
skb               783 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 				(struct cpl_close_con_req *)skb->data;
skb               786 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		total_size += skb->truesize;
skb               787 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		t4_set_arp_err_handler(skb, csk, arp_failure_skb_discard);
skb               791 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 			csk, csk->state, csk->flags, csk->tid, skb, len);
skb               793 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
skb               809 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c static void do_act_establish(struct cxgbi_device *cdev, struct sk_buff *skb)
skb               812 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	struct cpl_act_establish *req = (struct cpl_act_establish *)skb->data;
skb               885 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	__kfree_skb(skb);
skb               908 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	struct sk_buff *skb = NULL;
skb               932 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		skb = alloc_wr(size, 0, GFP_ATOMIC);
skb               936 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		skb = alloc_wr(size6, 0, GFP_ATOMIC);
skb               940 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	if (!skb)
skb               943 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		skb->sk = (struct sock *)csk;
skb               944 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		t4_set_arp_err_handler(skb, csk,
skb               946 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		send_act_open_func(csk, skb, csk->l2t);
skb               961 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
skb               964 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	struct cpl_act_open_rpl *rpl = (struct cpl_act_open_rpl *)skb->data;
skb              1007 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	__kfree_skb(skb);
skb              1010 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c static void do_peer_close(struct cxgbi_device *cdev, struct sk_buff *skb)
skb              1013 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	struct cpl_peer_close *req = (struct cpl_peer_close *)skb->data;
skb              1028 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	__kfree_skb(skb);
skb              1031 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c static void do_close_con_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
skb              1034 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	struct cpl_close_con_rpl *rpl = (struct cpl_close_con_rpl *)skb->data;
skb              1049 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	__kfree_skb(skb);
skb              1070 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c static void do_abort_req_rss(struct cxgbi_device *cdev, struct sk_buff *skb)
skb              1073 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	struct cpl_abort_req_rss *req = (struct cpl_abort_req_rss *)skb->data;
skb              1115 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	__kfree_skb(skb);
skb              1118 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c static void do_abort_rpl_rss(struct cxgbi_device *cdev, struct sk_buff *skb)
skb              1121 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	struct cpl_abort_rpl_rss *rpl = (struct cpl_abort_rpl_rss *)skb->data;
skb              1140 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	__kfree_skb(skb);
skb              1143 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c static void do_rx_data(struct cxgbi_device *cdev, struct sk_buff *skb)
skb              1146 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	struct cpl_rx_data *cpl = (struct cpl_rx_data *)skb->data;
skb              1161 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	__kfree_skb(skb);
skb              1164 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c static void do_rx_iscsi_hdr(struct cxgbi_device *cdev, struct sk_buff *skb)
skb              1167 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	struct cpl_iscsi_hdr *cpl = (struct cpl_iscsi_hdr *)skb->data;
skb              1181 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		csk, csk->state, csk->flags, csk->tid, skb, skb->len,
skb              1196 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	cxgbi_skcb_tcp_seq(skb) = ntohl(cpl->seq);
skb              1197 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	cxgbi_skcb_flags(skb) = 0;
skb              1199 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	skb_reset_transport_header(skb);
skb              1200 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	__skb_pull(skb, sizeof(*cpl));
skb              1201 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	__pskb_trim(skb, ntohs(cpl->len));
skb              1209 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 			csk, csk->state, csk->flags, csk->tid, skb);
skb              1210 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		csk->skb_ulp_lhdr = skb;
skb              1211 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		cxgbi_skcb_set_flag(skb, SKCBF_RX_HDR);
skb              1214 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		    (cxgbi_skcb_tcp_seq(skb) != csk->rcv_nxt)) {
skb              1216 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 				csk->tid, cxgbi_skcb_tcp_seq(skb),
skb              1221 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		bhs = skb->data;
skb              1233 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 				cxgbi_skcb_tcp_seq(skb));
skb              1237 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		cxgbi_skcb_rx_pdulen(skb) = (hlen + dlen + 3) & (~0x3);
skb              1239 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 			cxgbi_skcb_rx_pdulen(skb) += csk->dcrc_len;
skb              1240 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		csk->rcv_nxt += cxgbi_skcb_rx_pdulen(skb);
skb              1244 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 			csk, skb, *bhs, hlen, dlen,
skb              1254 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 			csk, csk->state, csk->flags, skb, lskb);
skb              1257 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	__skb_queue_tail(&csk->receive_queue, skb);
skb              1266 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	__kfree_skb(skb);
skb              1269 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c static void do_rx_iscsi_data(struct cxgbi_device *cdev, struct sk_buff *skb)
skb              1272 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	struct cpl_iscsi_hdr *cpl = (struct cpl_iscsi_hdr *)skb->data;
skb              1287 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		  csk, csk->state, csk->flags, csk->tid, skb,
skb              1288 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		  skb->len, pdu_len_ddp);
skb              1303 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	cxgbi_skcb_tcp_seq(skb) = be32_to_cpu(cpl->seq);
skb              1304 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	cxgbi_skcb_flags(skb) = 0;
skb              1306 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	skb_reset_transport_header(skb);
skb              1307 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	__skb_pull(skb, sizeof(*cpl));
skb              1308 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	__pskb_trim(skb, ntohs(cpl->len));
skb              1311 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		csk->skb_ulp_lhdr = skb;
skb              1318 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		  csk, csk->state, csk->flags, skb, lskb);
skb              1320 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	__skb_queue_tail(&csk->receive_queue, skb);
skb              1329 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	__kfree_skb(skb);
skb              1334 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		      struct sk_buff *skb, u32 ddpvld)
skb              1338 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 			csk, skb, ddpvld, cxgbi_skcb_flags(skb));
skb              1339 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		cxgbi_skcb_set_flag(skb, SKCBF_RX_HCRC_ERR);
skb              1344 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 			csk, skb, ddpvld, cxgbi_skcb_flags(skb));
skb              1345 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		cxgbi_skcb_set_flag(skb, SKCBF_RX_DCRC_ERR);
skb              1351 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 			  csk, skb, ddpvld);
skb              1352 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		cxgbi_skcb_set_flag(skb, SKCBF_RX_PAD_ERR);
skb              1356 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	    !cxgbi_skcb_test_flag(skb, SKCBF_RX_DATA)) {
skb              1359 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 			  csk, skb, ddpvld);
skb              1360 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		cxgbi_skcb_set_flag(skb, SKCBF_RX_DATA_DDPD);
skb              1365 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 				  struct sk_buff *skb)
skb              1369 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	struct cpl_rx_data_ddp *rpl = (struct cpl_rx_data_ddp *)skb->data;
skb              1383 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		csk, csk->state, csk->flags, skb, ddpvld, csk->skb_ulp_lhdr);
skb              1427 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	__kfree_skb(skb);
skb              1431 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c do_rx_iscsi_cmp(struct cxgbi_device *cdev, struct sk_buff *skb)
skb              1434 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	struct cpl_rx_iscsi_cmp *rpl = (struct cpl_rx_iscsi_cmp *)skb->data;
skb              1452 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		  csk, csk->state, csk->flags, skb, ddpvld, csk->skb_ulp_lhdr,
skb              1468 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	cxgbi_skcb_tcp_seq(skb) = seq;
skb              1469 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	cxgbi_skcb_flags(skb) = 0;
skb              1470 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	cxgbi_skcb_rx_pdulen(skb) = 0;
skb              1472 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	skb_reset_transport_header(skb);
skb              1473 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	__skb_pull(skb, sizeof(*rpl));
skb              1474 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	__pskb_trim(skb, be16_to_cpu(rpl->len));
skb              1489 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		cxgbi_skcb_set_flag(skb, SKCBF_RX_DATA);
skb              1491 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		__skb_queue_tail(&csk->receive_queue, skb);
skb              1494 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		 __skb_queue_tail(&csk->receive_queue, skb);
skb              1499 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	cxgbi_skcb_set_flag(skb, SKCBF_RX_HDR);
skb              1500 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	cxgbi_skcb_set_flag(skb, SKCBF_RX_STATUS);
skb              1501 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	cxgbi_skcb_set_flag(skb, SKCBF_RX_ISCSI_COMPL);
skb              1502 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	cxgbi_skcb_rx_ddigest(skb) = be32_to_cpu(rpl->ulp_crc);
skb              1504 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	cxgb4i_process_ddpvld(csk, skb, ddpvld);
skb              1507 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		  csk, skb, cxgbi_skcb_flags(skb));
skb              1519 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	__kfree_skb(skb);
skb              1522 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c static void do_fw4_ack(struct cxgbi_device *cdev, struct sk_buff *skb)
skb              1525 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	struct cpl_fw4_ack *rpl = (struct cpl_fw4_ack *)skb->data;
skb              1540 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	__kfree_skb(skb);
skb              1543 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c static void do_set_tcb_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
skb              1545 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	struct cpl_set_tcb_rpl *rpl = (struct cpl_set_tcb_rpl *)skb->data;
skb              1569 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	__kfree_skb(skb);
skb              1690 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	struct sk_buff *skb = NULL;
skb              1766 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		skb = alloc_wr(size, 0, GFP_NOIO);
skb              1769 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		skb = alloc_wr(size6, 0, GFP_NOIO);
skb              1772 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	if (!skb)
skb              1774 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	skb->sk = (struct sock *)csk;
skb              1775 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	t4_set_arp_err_handler(skb, csk, cxgbi_sock_act_open_req_arp_failure);
skb              1823 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		send_act_open_req(csk, skb, csk->l2t);
skb              1826 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		send_act_open_req6(csk, skb, csk->l2t);
skb              1841 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	if (skb)
skb              1842 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		__kfree_skb(skb);
skb              1921 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	struct sk_buff *skb = alloc_wr(wr_len, 0, GFP_ATOMIC);
skb              1923 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	if (!skb) {
skb              1929 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	ulp_mem_io_set_hdr(cdev, (struct ulp_mem_io *)skb->head, wr_len, dlen,
skb              1932 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	return skb;
skb              1942 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	struct sk_buff *skb = ddp_ppod_init_idata(cdev, ppm, idx, npods,
skb              1949 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	if (!skb)
skb              1952 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	req = (struct ulp_mem_io *)skb->head;
skb              1959 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	cxgbi_skcb_set_flag(skb, SKCBF_TX_MEM_WRITE);
skb              1960 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	cxgbi_skcb_set_flag(skb, SKCBF_TX_FLAG_COMPL);
skb              1961 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
skb              1964 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	cxgbi_sock_skb_entail(csk, skb);
skb              1999 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	struct sk_buff *skb;
skb              2005 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	skb = alloc_wr(sizeof(*req), 0, GFP_KERNEL);
skb              2006 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	if (!skb)
skb              2010 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	req = (struct cpl_set_tcb_field *)skb->head;
skb              2017 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id);
skb              2023 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
skb              2032 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	struct sk_buff *skb;
skb              2038 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	skb = alloc_wr(sizeof(*req), 0, GFP_KERNEL);
skb              2039 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	if (!skb)
skb              2045 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	req = (struct cpl_set_tcb_field *)skb->head;
skb              2053 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id);
skb              2059 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
skb              2187 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	struct sk_buff *skb;
skb              2194 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		skb = alloc_wr(len, 0, GFP_ATOMIC);
skb              2195 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		if (!skb)
skb              2197 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		skb_copy_to_linear_data(skb, &rsp[1], len);
skb              2206 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		skb = cxgb4_pktgl_to_skb(pgl, RX_PULL_LEN, RX_PULL_LEN);
skb              2207 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		if (unlikely(!skb))
skb              2211 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	rpl = (struct cpl_act_establish *)skb->data;
skb              2215 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		 cdev, opc, rpl->ot.opcode_tid, ntohl(rpl->ot.opcode_tid), skb);
skb              2218 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		__kfree_skb(skb);
skb              2220 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		cxgb4i_cplhandlers[opc](cdev, skb);
skb               919 drivers/scsi/cxgbi/libcxgbi.c void cxgbi_sock_act_open_req_arp_failure(void *handle, struct sk_buff *skb)
skb               921 drivers/scsi/cxgbi/libcxgbi.c 	struct cxgbi_sock *csk = (struct cxgbi_sock *)skb->sk;
skb               932 drivers/scsi/cxgbi/libcxgbi.c 	__kfree_skb(skb);
skb              1113 drivers/scsi/cxgbi/libcxgbi.c void cxgbi_sock_skb_entail(struct cxgbi_sock *csk, struct sk_buff *skb)
skb              1115 drivers/scsi/cxgbi/libcxgbi.c 	cxgbi_skcb_tcp_seq(skb) = csk->write_seq;
skb              1116 drivers/scsi/cxgbi/libcxgbi.c 	__skb_queue_tail(&csk->write_queue, skb);
skb              1122 drivers/scsi/cxgbi/libcxgbi.c 	struct sk_buff *skb;
skb              1124 drivers/scsi/cxgbi/libcxgbi.c 	while ((skb = cxgbi_sock_dequeue_wr(csk)) != NULL)
skb              1125 drivers/scsi/cxgbi/libcxgbi.c 		kfree_skb(skb);
skb              1139 drivers/scsi/cxgbi/libcxgbi.c static int cxgbi_sock_send_pdus(struct cxgbi_sock *csk, struct sk_buff *skb)
skb              1172 drivers/scsi/cxgbi/libcxgbi.c 	while (skb) {
skb              1173 drivers/scsi/cxgbi/libcxgbi.c 		int frags = skb_shinfo(skb)->nr_frags +
skb              1174 drivers/scsi/cxgbi/libcxgbi.c 				(skb->len != skb->data_len);
skb              1176 drivers/scsi/cxgbi/libcxgbi.c 		if (unlikely(skb_headroom(skb) < cdev->skb_tx_rsvd)) {
skb              1178 drivers/scsi/cxgbi/libcxgbi.c 				csk, skb_headroom(skb), cdev->skb_tx_rsvd);
skb              1185 drivers/scsi/cxgbi/libcxgbi.c 				csk, skb_shinfo(skb)->nr_frags, skb->len,
skb              1186 drivers/scsi/cxgbi/libcxgbi.c 				skb->data_len, (uint)(SKB_WR_LIST_SIZE));
skb              1191 drivers/scsi/cxgbi/libcxgbi.c 		next = skb->next;
skb              1192 drivers/scsi/cxgbi/libcxgbi.c 		skb->next = NULL;
skb              1193 drivers/scsi/cxgbi/libcxgbi.c 		cxgbi_skcb_set_flag(skb, SKCBF_TX_NEED_HDR);
skb              1194 drivers/scsi/cxgbi/libcxgbi.c 		cxgbi_sock_skb_entail(csk, skb);
skb              1195 drivers/scsi/cxgbi/libcxgbi.c 		copied += skb->len;
skb              1196 drivers/scsi/cxgbi/libcxgbi.c 		csk->write_seq += skb->len +
skb              1197 drivers/scsi/cxgbi/libcxgbi.c 				cxgbi_ulp_extra_len(cxgbi_skcb_ulp_mode(skb));
skb              1198 drivers/scsi/cxgbi/libcxgbi.c 		skb = next;
skb              1543 drivers/scsi/cxgbi/libcxgbi.c 			       struct sk_buff *skb,
skb              1550 drivers/scsi/cxgbi/libcxgbi.c 	bytes_read = iscsi_tcp_recv_skb(conn, skb, offset, offloaded, &status);
skb              1554 drivers/scsi/cxgbi/libcxgbi.c 			  skb, offset, offloaded);
skb              1559 drivers/scsi/cxgbi/libcxgbi.c 			skb, offset, offloaded, bytes_read);
skb              1564 drivers/scsi/cxgbi/libcxgbi.c 			skb, offset, offloaded);
skb              1574 drivers/scsi/cxgbi/libcxgbi.c 			skb, offset, offloaded, bytes_read);
skb              1578 drivers/scsi/cxgbi/libcxgbi.c 			skb, offset, offloaded, status);
skb              1585 drivers/scsi/cxgbi/libcxgbi.c 		 struct sk_buff *skb)
skb              1592 drivers/scsi/cxgbi/libcxgbi.c 		conn, skb, skb->len, cxgbi_skcb_flags(skb));
skb              1595 drivers/scsi/cxgbi/libcxgbi.c 		pr_info("conn 0x%p, skb 0x%p, not hdr.\n", conn, skb);
skb              1601 drivers/scsi/cxgbi/libcxgbi.c 	    cxgbi_skcb_test_flag(skb, SKCBF_RX_HCRC_ERR)) {
skb              1602 drivers/scsi/cxgbi/libcxgbi.c 		pr_info("conn 0x%p, skb 0x%p, hcrc.\n", conn, skb);
skb              1607 drivers/scsi/cxgbi/libcxgbi.c 	if (cxgbi_skcb_test_flag(skb, SKCBF_RX_ISCSI_COMPL) &&
skb              1608 drivers/scsi/cxgbi/libcxgbi.c 	    cxgbi_skcb_test_flag(skb, SKCBF_RX_DATA_DDPD)) {
skb              1615 drivers/scsi/cxgbi/libcxgbi.c 		itt_t itt = ((struct iscsi_data *)skb->data)->itt;
skb              1618 drivers/scsi/cxgbi/libcxgbi.c 							skb->data)->datasn);
skb              1626 drivers/scsi/cxgbi/libcxgbi.c 	err = read_pdu_skb(conn, skb, 0, 0);
skb              1628 drivers/scsi/cxgbi/libcxgbi.c 		struct iscsi_hdr *hdr = (struct iscsi_hdr *)skb->data;
skb              1639 drivers/scsi/cxgbi/libcxgbi.c 			     struct sk_buff *skb, unsigned int offset)
skb              1647 drivers/scsi/cxgbi/libcxgbi.c 		conn, skb, skb->len, cxgbi_skcb_flags(skb));
skb              1661 drivers/scsi/cxgbi/libcxgbi.c 	if (lskb == skb && conn->hdrdgst_en)
skb              1670 drivers/scsi/cxgbi/libcxgbi.c 			skb, opcode, ntohl(tcp_conn->in.hdr->itt),
skb              1673 drivers/scsi/cxgbi/libcxgbi.c 	return read_pdu_skb(conn, skb, offset, offloaded);
skb              1706 drivers/scsi/cxgbi/libcxgbi.c 	struct sk_buff *skb;
skb              1722 drivers/scsi/cxgbi/libcxgbi.c 		skb = skb_peek(&csk->receive_queue);
skb              1723 drivers/scsi/cxgbi/libcxgbi.c 		if (!skb ||
skb              1724 drivers/scsi/cxgbi/libcxgbi.c 		    !(cxgbi_skcb_test_flag(skb, SKCBF_RX_STATUS))) {
skb              1725 drivers/scsi/cxgbi/libcxgbi.c 			if (skb)
skb              1728 drivers/scsi/cxgbi/libcxgbi.c 					skb, cxgbi_skcb_flags(skb));
skb              1731 drivers/scsi/cxgbi/libcxgbi.c 		__skb_unlink(skb, &csk->receive_queue);
skb              1733 drivers/scsi/cxgbi/libcxgbi.c 		read += cxgbi_skcb_rx_pdulen(skb);
skb              1736 drivers/scsi/cxgbi/libcxgbi.c 			csk, skb, skb->len, cxgbi_skcb_flags(skb),
skb              1737 drivers/scsi/cxgbi/libcxgbi.c 			cxgbi_skcb_rx_pdulen(skb));
skb              1739 drivers/scsi/cxgbi/libcxgbi.c 		if (cxgbi_skcb_test_flag(skb, SKCBF_RX_COALESCED)) {
skb              1740 drivers/scsi/cxgbi/libcxgbi.c 			err = skb_read_pdu_bhs(csk, conn, skb);
skb              1744 drivers/scsi/cxgbi/libcxgbi.c 					csk, skb, skb->len,
skb              1745 drivers/scsi/cxgbi/libcxgbi.c 					cxgbi_skcb_flags(skb),
skb              1746 drivers/scsi/cxgbi/libcxgbi.c 					cxgbi_skcb_rx_pdulen(skb));
skb              1749 drivers/scsi/cxgbi/libcxgbi.c 			err = skb_read_pdu_data(conn, skb, skb,
skb              1754 drivers/scsi/cxgbi/libcxgbi.c 					csk, skb, skb->len,
skb              1755 drivers/scsi/cxgbi/libcxgbi.c 					cxgbi_skcb_flags(skb),
skb              1756 drivers/scsi/cxgbi/libcxgbi.c 					cxgbi_skcb_rx_pdulen(skb));
skb              1758 drivers/scsi/cxgbi/libcxgbi.c 			err = skb_read_pdu_bhs(csk, conn, skb);
skb              1762 drivers/scsi/cxgbi/libcxgbi.c 					csk, skb, skb->len,
skb              1763 drivers/scsi/cxgbi/libcxgbi.c 					cxgbi_skcb_flags(skb),
skb              1764 drivers/scsi/cxgbi/libcxgbi.c 					cxgbi_skcb_rx_pdulen(skb));
skb              1768 drivers/scsi/cxgbi/libcxgbi.c 			if (cxgbi_skcb_test_flag(skb, SKCBF_RX_DATA)) {
skb              1775 drivers/scsi/cxgbi/libcxgbi.c 						csk, skb, skb->len,
skb              1776 drivers/scsi/cxgbi/libcxgbi.c 						cxgbi_skcb_flags(skb),
skb              1777 drivers/scsi/cxgbi/libcxgbi.c 						cxgbi_skcb_rx_pdulen(skb));
skb              1783 drivers/scsi/cxgbi/libcxgbi.c 				err = skb_read_pdu_data(conn, skb, dskb, 0);
skb              1788 drivers/scsi/cxgbi/libcxgbi.c 						csk, skb, skb->len,
skb              1789 drivers/scsi/cxgbi/libcxgbi.c 						cxgbi_skcb_flags(skb),
skb              1790 drivers/scsi/cxgbi/libcxgbi.c 						cxgbi_skcb_rx_pdulen(skb),
skb              1794 drivers/scsi/cxgbi/libcxgbi.c 				err = skb_read_pdu_data(conn, skb, skb, 0);
skb              1797 drivers/scsi/cxgbi/libcxgbi.c 		__kfree_skb(skb);
skb              1911 drivers/scsi/cxgbi/libcxgbi.c 	tdata->skb = alloc_skb(cdev->skb_tx_rsvd + headroom, GFP_ATOMIC);
skb              1912 drivers/scsi/cxgbi/libcxgbi.c 	if (!tdata->skb) {
skb              1917 drivers/scsi/cxgbi/libcxgbi.c 	skb_reserve(tdata->skb, cdev->skb_tx_rsvd);
skb              1920 drivers/scsi/cxgbi/libcxgbi.c 		task->hdr = (struct iscsi_hdr *)tdata->skb->data;
skb              1924 drivers/scsi/cxgbi/libcxgbi.c 			__kfree_skb(tdata->skb);
skb              1925 drivers/scsi/cxgbi/libcxgbi.c 			tdata->skb = NULL;
skb              1938 drivers/scsi/cxgbi/libcxgbi.c 		task, opcode, tdata->skb, cdev->skb_tx_rsvd, headroom,
skb              1945 drivers/scsi/cxgbi/libcxgbi.c static inline void tx_skb_setmode(struct sk_buff *skb, int hcrc, int dcrc)
skb              1954 drivers/scsi/cxgbi/libcxgbi.c 		cxgbi_skcb_ulp_mode(skb) = (ULP2_MODE_ISCSI << 4) | submode;
skb              1956 drivers/scsi/cxgbi/libcxgbi.c 		cxgbi_skcb_ulp_mode(skb) = 0;
skb              1964 drivers/scsi/cxgbi/libcxgbi.c 	struct sk_buff *skb = tdata->skb;
skb              1971 drivers/scsi/cxgbi/libcxgbi.c 		task, task->sc, skb, (*skb->data) & ISCSI_OPCODE_MASK,
skb              1974 drivers/scsi/cxgbi/libcxgbi.c 	skb_put(skb, task->hdr_len);
skb              1975 drivers/scsi/cxgbi/libcxgbi.c 	tx_skb_setmode(skb, conn->hdrdgst_en, datalen ? conn->datadgst_en : 0);
skb              2005 drivers/scsi/cxgbi/libcxgbi.c 			char *dst = skb->data + task->hdr_len;
skb              2020 drivers/scsi/cxgbi/libcxgbi.c 			skb_put(skb, count + padlen);
skb              2024 drivers/scsi/cxgbi/libcxgbi.c 				__skb_fill_page_desc(skb, i,
skb              2028 drivers/scsi/cxgbi/libcxgbi.c 				skb_frag_ref(skb, i);
skb              2030 drivers/scsi/cxgbi/libcxgbi.c 			skb_shinfo(skb)->nr_frags = tdata->nr_frags;
skb              2031 drivers/scsi/cxgbi/libcxgbi.c 			skb->len += count;
skb              2032 drivers/scsi/cxgbi/libcxgbi.c 			skb->data_len += count;
skb              2033 drivers/scsi/cxgbi/libcxgbi.c 			skb->truesize += count;
skb              2040 drivers/scsi/cxgbi/libcxgbi.c 		skb_fill_page_desc(skb, 0, pg, offset_in_page(task->data),
skb              2042 drivers/scsi/cxgbi/libcxgbi.c 		skb->len += count;
skb              2043 drivers/scsi/cxgbi/libcxgbi.c 		skb->data_len += count;
skb              2044 drivers/scsi/cxgbi/libcxgbi.c 		skb->truesize += count;
skb              2048 drivers/scsi/cxgbi/libcxgbi.c 		i = skb_shinfo(skb)->nr_frags;
skb              2049 drivers/scsi/cxgbi/libcxgbi.c 		skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
skb              2053 drivers/scsi/cxgbi/libcxgbi.c 		skb->data_len += padlen;
skb              2054 drivers/scsi/cxgbi/libcxgbi.c 		skb->truesize += padlen;
skb              2055 drivers/scsi/cxgbi/libcxgbi.c 		skb->len += padlen;
skb              2068 drivers/scsi/cxgbi/libcxgbi.c 	struct sk_buff *skb = tdata->skb;
skb              2073 drivers/scsi/cxgbi/libcxgbi.c 	if (!skb) {
skb              2087 drivers/scsi/cxgbi/libcxgbi.c 	tdata->skb = NULL;
skb              2088 drivers/scsi/cxgbi/libcxgbi.c 	datalen = skb->data_len;
skb              2102 drivers/scsi/cxgbi/libcxgbi.c 		memcpy(skb->data, task->hdr, SKB_TX_ISCSI_PDU_HEADER_MAX);
skb              2104 drivers/scsi/cxgbi/libcxgbi.c 	err = cxgbi_sock_send_pdus(cconn->cep->csk, skb);
skb              2110 drivers/scsi/cxgbi/libcxgbi.c 			task, task->sc, skb, skb->len, skb->data_len, err);
skb              2125 drivers/scsi/cxgbi/libcxgbi.c 			task, skb, skb->len, skb->data_len, err);
skb              2127 drivers/scsi/cxgbi/libcxgbi.c 		tdata->skb = skb;
skb              2133 drivers/scsi/cxgbi/libcxgbi.c 		task->itt, skb, skb->len, skb->data_len, err);
skb              2135 drivers/scsi/cxgbi/libcxgbi.c 	__kfree_skb(skb);
skb              2157 drivers/scsi/cxgbi/libcxgbi.c 		task, tdata->skb, task->hdr_itt);
skb              2166 drivers/scsi/cxgbi/libcxgbi.c 	if (tdata->skb) {
skb              2167 drivers/scsi/cxgbi/libcxgbi.c 		__kfree_skb(tdata->skb);
skb              2168 drivers/scsi/cxgbi/libcxgbi.c 		tdata->skb = NULL;
skb               233 drivers/scsi/cxgbi/libcxgbi.h #define CXGBI_SKB_CB(skb)	((struct cxgbi_skb_cb *)&((skb)->cb[0]))
skb               234 drivers/scsi/cxgbi/libcxgbi.h #define cxgbi_skcb_flags(skb)		(CXGBI_SKB_CB(skb)->flags)
skb               235 drivers/scsi/cxgbi/libcxgbi.h #define cxgbi_skcb_ulp_mode(skb)	(CXGBI_SKB_CB(skb)->ulp_mode)
skb               236 drivers/scsi/cxgbi/libcxgbi.h #define cxgbi_skcb_tcp_seq(skb)		(CXGBI_SKB_CB(skb)->seq)
skb               237 drivers/scsi/cxgbi/libcxgbi.h #define cxgbi_skcb_rx_ddigest(skb)	(CXGBI_SKB_CB(skb)->rx.ddigest)
skb               238 drivers/scsi/cxgbi/libcxgbi.h #define cxgbi_skcb_rx_pdulen(skb)	(CXGBI_SKB_CB(skb)->rx.pdulen)
skb               239 drivers/scsi/cxgbi/libcxgbi.h #define cxgbi_skcb_tx_wr_next(skb)	(CXGBI_SKB_CB(skb)->tx.wr_next)
skb               241 drivers/scsi/cxgbi/libcxgbi.h static inline void cxgbi_skcb_set_flag(struct sk_buff *skb,
skb               244 drivers/scsi/cxgbi/libcxgbi.h 	__set_bit(flag, &(cxgbi_skcb_flags(skb)));
skb               247 drivers/scsi/cxgbi/libcxgbi.h static inline void cxgbi_skcb_clear_flag(struct sk_buff *skb,
skb               250 drivers/scsi/cxgbi/libcxgbi.h 	__clear_bit(flag, &(cxgbi_skcb_flags(skb)));
skb               253 drivers/scsi/cxgbi/libcxgbi.h static inline int cxgbi_skcb_test_flag(const struct sk_buff *skb,
skb               256 drivers/scsi/cxgbi/libcxgbi.h 	return test_bit(flag, &(cxgbi_skcb_flags(skb)));
skb               336 drivers/scsi/cxgbi/libcxgbi.h 	struct sk_buff *skb;
skb               338 drivers/scsi/cxgbi/libcxgbi.h 	while ((skb = __skb_dequeue(&csk->write_queue)))
skb               339 drivers/scsi/cxgbi/libcxgbi.h 		__kfree_skb(skb);
skb               353 drivers/scsi/cxgbi/libcxgbi.h 	struct sk_buff *skb = alloc_skb(wrlen + dlen, gfp);
skb               355 drivers/scsi/cxgbi/libcxgbi.h 	if (skb) {
skb               356 drivers/scsi/cxgbi/libcxgbi.h 		__skb_put(skb, wrlen);
skb               357 drivers/scsi/cxgbi/libcxgbi.h 		memset(skb->head, 0, wrlen + dlen);
skb               360 drivers/scsi/cxgbi/libcxgbi.h 	return skb;
skb               378 drivers/scsi/cxgbi/libcxgbi.h 					  struct sk_buff *skb)
skb               380 drivers/scsi/cxgbi/libcxgbi.h 	cxgbi_skcb_tx_wr_next(skb) = NULL;
skb               385 drivers/scsi/cxgbi/libcxgbi.h 	skb_get(skb);
skb               388 drivers/scsi/cxgbi/libcxgbi.h 		csk->wr_pending_head = skb;
skb               390 drivers/scsi/cxgbi/libcxgbi.h 		cxgbi_skcb_tx_wr_next(csk->wr_pending_tail) = skb;
skb               391 drivers/scsi/cxgbi/libcxgbi.h 	csk->wr_pending_tail = skb;
skb               397 drivers/scsi/cxgbi/libcxgbi.h 	const struct sk_buff *skb = csk->wr_pending_head;
skb               399 drivers/scsi/cxgbi/libcxgbi.h 	while (skb) {
skb               400 drivers/scsi/cxgbi/libcxgbi.h 		n += skb->csum;
skb               401 drivers/scsi/cxgbi/libcxgbi.h 		skb = cxgbi_skcb_tx_wr_next(skb);
skb               413 drivers/scsi/cxgbi/libcxgbi.h 	struct sk_buff *skb = csk->wr_pending_head;
skb               415 drivers/scsi/cxgbi/libcxgbi.h 	if (likely(skb)) {
skb               416 drivers/scsi/cxgbi/libcxgbi.h 		csk->wr_pending_head = cxgbi_skcb_tx_wr_next(skb);
skb               417 drivers/scsi/cxgbi/libcxgbi.h 		cxgbi_skcb_tx_wr_next(skb) = NULL;
skb               419 drivers/scsi/cxgbi/libcxgbi.h 	return skb;
skb               530 drivers/scsi/cxgbi/libcxgbi.h 	struct sk_buff *skb;
skb               125 drivers/scsi/fcoe/fcoe.c static void fcoe_recv_frame(struct sk_buff *skb);
skb               509 drivers/scsi/fcoe/fcoe.c static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *netdev,
skb               518 drivers/scsi/fcoe/fcoe.c 	fcoe_ctlr_recv(ctlr, skb);
skb               532 drivers/scsi/fcoe/fcoe.c static int fcoe_fip_vlan_recv(struct sk_buff *skb, struct net_device *netdev,
skb               541 drivers/scsi/fcoe/fcoe.c 	fcoe_ctlr_recv(ctlr, skb);
skb               550 drivers/scsi/fcoe/fcoe.c static void fcoe_port_send(struct fcoe_port *port, struct sk_buff *skb)
skb               553 drivers/scsi/fcoe/fcoe.c 		fcoe_check_wait_queue(port->lport, skb);
skb               554 drivers/scsi/fcoe/fcoe.c 	else if (fcoe_start_io(skb))
skb               555 drivers/scsi/fcoe/fcoe.c 		fcoe_check_wait_queue(port->lport, skb);
skb               563 drivers/scsi/fcoe/fcoe.c static void fcoe_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
skb               574 drivers/scsi/fcoe/fcoe.c 	frame = (struct fip_frame *)skb->data;
skb               578 drivers/scsi/fcoe/fcoe.c 		skb->dev = fcoe->realdev;
skb               580 drivers/scsi/fcoe/fcoe.c 		skb->dev = fcoe->netdev;
skb               581 drivers/scsi/fcoe/fcoe.c 	fcoe_port_send(lport_priv(fip->lp), skb);
skb              1335 drivers/scsi/fcoe/fcoe.c static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
skb              1359 drivers/scsi/fcoe/fcoe.c 			skb->len, skb->data_len, skb->head, skb->data,
skb              1360 drivers/scsi/fcoe/fcoe.c 			skb_tail_pointer(skb), skb_end_pointer(skb),
skb              1361 drivers/scsi/fcoe/fcoe.c 			skb->csum, skb->dev ? skb->dev->name : "<NULL>");
skb              1364 drivers/scsi/fcoe/fcoe.c 	skb = skb_share_check(skb, GFP_ATOMIC);
skb              1366 drivers/scsi/fcoe/fcoe.c 	if (skb == NULL)
skb              1369 drivers/scsi/fcoe/fcoe.c 	eh = eth_hdr(skb);
skb              1382 drivers/scsi/fcoe/fcoe.c 	if (unlikely((skb->len < FCOE_MIN_FRAME) ||
skb              1383 drivers/scsi/fcoe/fcoe.c 		     !pskb_may_pull(skb, FCOE_HEADER_LEN)))
skb              1386 drivers/scsi/fcoe/fcoe.c 	skb_set_transport_header(skb, sizeof(struct fcoe_hdr));
skb              1387 drivers/scsi/fcoe/fcoe.c 	fh = (struct fc_frame_header *) skb_transport_header(skb);
skb              1395 drivers/scsi/fcoe/fcoe.c 	fr = fcoe_dev_from_skb(skb);
skb              1432 drivers/scsi/fcoe/fcoe.c 	__skb_queue_tail(&fps->fcoe_rx_list, skb);
skb              1441 drivers/scsi/fcoe/fcoe.c 	kfree_skb(skb);
skb              1452 drivers/scsi/fcoe/fcoe.c static int fcoe_alloc_paged_crc_eof(struct sk_buff *skb, int tlen)
skb              1458 drivers/scsi/fcoe/fcoe.c 	rc = fcoe_get_paged_crc_eof(skb, tlen, fps);
skb              1477 drivers/scsi/fcoe/fcoe.c 	struct sk_buff *skb;
skb              1492 drivers/scsi/fcoe/fcoe.c 	skb = fp_skb(fp);
skb              1493 drivers/scsi/fcoe/fcoe.c 	wlen = skb->len / FCOE_WORD_TO_BYTE;
skb              1496 drivers/scsi/fcoe/fcoe.c 		kfree_skb(skb);
skb              1501 drivers/scsi/fcoe/fcoe.c 	    fcoe_ctlr_els_send(ctlr, lport, skb))
skb              1510 drivers/scsi/fcoe/fcoe.c 	wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE;
skb              1514 drivers/scsi/fcoe/fcoe.c 		skb->ip_summed = CHECKSUM_PARTIAL;
skb              1515 drivers/scsi/fcoe/fcoe.c 		skb->csum_start = skb_headroom(skb);
skb              1516 drivers/scsi/fcoe/fcoe.c 		skb->csum_offset = skb->len;
skb              1519 drivers/scsi/fcoe/fcoe.c 		skb->ip_summed = CHECKSUM_NONE;
skb              1524 drivers/scsi/fcoe/fcoe.c 	if (skb_is_nonlinear(skb)) {
skb              1526 drivers/scsi/fcoe/fcoe.c 		if (fcoe_alloc_paged_crc_eof(skb, tlen)) {
skb              1527 drivers/scsi/fcoe/fcoe.c 			kfree_skb(skb);
skb              1530 drivers/scsi/fcoe/fcoe.c 		frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
skb              1533 drivers/scsi/fcoe/fcoe.c 		cp = skb_put(skb, tlen);
skb              1540 drivers/scsi/fcoe/fcoe.c 	if (skb_is_nonlinear(skb)) {
skb              1546 drivers/scsi/fcoe/fcoe.c 	skb_push(skb, elen + hlen);
skb              1547 drivers/scsi/fcoe/fcoe.c 	skb_reset_mac_header(skb);
skb              1548 drivers/scsi/fcoe/fcoe.c 	skb_reset_network_header(skb);
skb              1549 drivers/scsi/fcoe/fcoe.c 	skb->mac_len = elen;
skb              1550 drivers/scsi/fcoe/fcoe.c 	skb->protocol = htons(ETH_P_FCOE);
skb              1551 drivers/scsi/fcoe/fcoe.c 	skb->priority = fcoe->priority;
skb              1556 drivers/scsi/fcoe/fcoe.c 		skb->dev = fcoe->realdev;
skb              1557 drivers/scsi/fcoe/fcoe.c 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
skb              1560 drivers/scsi/fcoe/fcoe.c 		skb->dev = fcoe->netdev;
skb              1563 drivers/scsi/fcoe/fcoe.c 	eh = eth_hdr(skb);
skb              1582 drivers/scsi/fcoe/fcoe.c 		skb_shinfo(skb)->gso_type = SKB_GSO_FCOE;
skb              1583 drivers/scsi/fcoe/fcoe.c 		skb_shinfo(skb)->gso_size = fr_max_payload(fp);
skb              1585 drivers/scsi/fcoe/fcoe.c 		skb_shinfo(skb)->gso_type = 0;
skb              1586 drivers/scsi/fcoe/fcoe.c 		skb_shinfo(skb)->gso_size = 0;
skb              1596 drivers/scsi/fcoe/fcoe.c 	fcoe_port_send(port, skb);
skb              1613 drivers/scsi/fcoe/fcoe.c 	struct sk_buff *skb = (struct sk_buff *)fp;
skb              1621 drivers/scsi/fcoe/fcoe.c 	if (lport->crc_offload && skb->ip_summed == CHECKSUM_UNNECESSARY)
skb              1639 drivers/scsi/fcoe/fcoe.c 	    le32_to_cpu(fr_crc(fp)) == ~crc32(~0, skb->data, skb->len)) {
skb              1656 drivers/scsi/fcoe/fcoe.c static void fcoe_recv_frame(struct sk_buff *skb)
skb              1666 drivers/scsi/fcoe/fcoe.c 	fr = fcoe_dev_from_skb(skb);
skb              1669 drivers/scsi/fcoe/fcoe.c 		FCOE_NETDEV_DBG(skb->dev, "NULL lport in skb\n");
skb              1670 drivers/scsi/fcoe/fcoe.c 		kfree_skb(skb);
skb              1674 drivers/scsi/fcoe/fcoe.c 	FCOE_NETDEV_DBG(skb->dev,
skb              1676 drivers/scsi/fcoe/fcoe.c 			skb->len, skb->data_len,
skb              1677 drivers/scsi/fcoe/fcoe.c 			skb->head, skb->data, skb_tail_pointer(skb),
skb              1678 drivers/scsi/fcoe/fcoe.c 			skb_end_pointer(skb), skb->csum,
skb              1679 drivers/scsi/fcoe/fcoe.c 			skb->dev ? skb->dev->name : "<NULL>");
skb              1681 drivers/scsi/fcoe/fcoe.c 	skb_linearize(skb); /* check for skb_is_nonlinear is within skb_linearize */
skb              1687 drivers/scsi/fcoe/fcoe.c 	hp = (struct fcoe_hdr *) skb_network_header(skb);
skb              1701 drivers/scsi/fcoe/fcoe.c 	skb_pull(skb, sizeof(struct fcoe_hdr));
skb              1702 drivers/scsi/fcoe/fcoe.c 	fr_len = skb->len - sizeof(struct fcoe_crc_eof);
skb              1707 drivers/scsi/fcoe/fcoe.c 	fp = (struct fc_frame *)skb;
skb              1713 drivers/scsi/fcoe/fcoe.c 	if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof)))
skb              1717 drivers/scsi/fcoe/fcoe.c 	if (pskb_trim(skb, fr_len))
skb              1728 drivers/scsi/fcoe/fcoe.c 	kfree_skb(skb);
skb              1739 drivers/scsi/fcoe/fcoe.c 	struct sk_buff *skb;
skb              1752 drivers/scsi/fcoe/fcoe.c 	while ((skb = __skb_dequeue(&tmp)))
skb              1753 drivers/scsi/fcoe/fcoe.c 		fcoe_recv_frame(skb);
skb               377 drivers/scsi/fcoe/fcoe_ctlr.c 	struct sk_buff *skb;
skb               389 drivers/scsi/fcoe/fcoe_ctlr.c 	skb = dev_alloc_skb(sizeof(*sol));
skb               390 drivers/scsi/fcoe/fcoe_ctlr.c 	if (!skb)
skb               393 drivers/scsi/fcoe/fcoe_ctlr.c 	sol = (struct fip_sol *)skb->data;
skb               421 drivers/scsi/fcoe/fcoe_ctlr.c 	skb_put(skb, sizeof(*sol));
skb               422 drivers/scsi/fcoe/fcoe_ctlr.c 	skb->protocol = htons(ETH_P_FIP);
skb               423 drivers/scsi/fcoe/fcoe_ctlr.c 	skb->priority = fip->priority;
skb               424 drivers/scsi/fcoe/fcoe_ctlr.c 	skb_reset_mac_header(skb);
skb               425 drivers/scsi/fcoe/fcoe_ctlr.c 	skb_reset_network_header(skb);
skb               426 drivers/scsi/fcoe/fcoe_ctlr.c 	fip->send(fip, skb);
skb               532 drivers/scsi/fcoe/fcoe_ctlr.c 	struct sk_buff *skb;
skb               549 drivers/scsi/fcoe/fcoe_ctlr.c 	skb = dev_alloc_skb(len);
skb               550 drivers/scsi/fcoe/fcoe_ctlr.c 	if (!skb)
skb               553 drivers/scsi/fcoe/fcoe_ctlr.c 	kal = (struct fip_kal *)skb->data;
skb               579 drivers/scsi/fcoe/fcoe_ctlr.c 	skb_put(skb, len);
skb               580 drivers/scsi/fcoe/fcoe_ctlr.c 	skb->protocol = htons(ETH_P_FIP);
skb               581 drivers/scsi/fcoe/fcoe_ctlr.c 	skb->priority = fip->priority;
skb               582 drivers/scsi/fcoe/fcoe_ctlr.c 	skb_reset_mac_header(skb);
skb               583 drivers/scsi/fcoe/fcoe_ctlr.c 	skb_reset_network_header(skb);
skb               584 drivers/scsi/fcoe/fcoe_ctlr.c 	fip->send(fip, skb);
skb               603 drivers/scsi/fcoe/fcoe_ctlr.c 			    u8 dtype, struct sk_buff *skb, u32 d_id)
skb               617 drivers/scsi/fcoe/fcoe_ctlr.c 	fh = (struct fc_frame_header *)skb->data;
skb               619 drivers/scsi/fcoe/fcoe_ctlr.c 	dlen = sizeof(struct fip_encaps) + skb->len;	/* len before push */
skb               620 drivers/scsi/fcoe/fcoe_ctlr.c 	cap = skb_push(skb, sizeof(*cap));
skb               654 drivers/scsi/fcoe/fcoe_ctlr.c 		mac = skb_put_zero(skb, sizeof(*mac));
skb               672 drivers/scsi/fcoe/fcoe_ctlr.c 	skb->protocol = htons(ETH_P_FIP);
skb               673 drivers/scsi/fcoe/fcoe_ctlr.c 	skb->priority = fip->priority;
skb               674 drivers/scsi/fcoe/fcoe_ctlr.c 	skb_reset_mac_header(skb);
skb               675 drivers/scsi/fcoe/fcoe_ctlr.c 	skb_reset_network_header(skb);
skb               696 drivers/scsi/fcoe/fcoe_ctlr.c 		       struct sk_buff *skb)
skb               704 drivers/scsi/fcoe/fcoe_ctlr.c 	fp = container_of(skb, struct fc_frame, skb);
skb               705 drivers/scsi/fcoe/fcoe_ctlr.c 	fh = (struct fc_frame_header *)skb->data;
skb               735 drivers/scsi/fcoe/fcoe_ctlr.c 		fip->flogi_req = skb;
skb               788 drivers/scsi/fcoe/fcoe_ctlr.c 	if (fcoe_ctlr_encaps(fip, lport, op, skb, ntoh24(fh->fh_d_id)))
skb               790 drivers/scsi/fcoe/fcoe_ctlr.c 	fip->send(fip, skb);
skb               795 drivers/scsi/fcoe/fcoe_ctlr.c 	kfree_skb(skb);
skb               890 drivers/scsi/fcoe/fcoe_ctlr.c 			       struct sk_buff *skb, struct fcoe_fcf *fcf)
skb               905 drivers/scsi/fcoe/fcoe_ctlr.c 	fiph = (struct fip_header *)skb->data;
skb               915 drivers/scsi/fcoe/fcoe_ctlr.c 	if (rlen + sizeof(*fiph) > skb->len)
skb              1018 drivers/scsi/fcoe/fcoe_ctlr.c static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb)
skb              1028 drivers/scsi/fcoe/fcoe_ctlr.c 	if (fcoe_ctlr_parse_adv(fip, skb, &new))
skb              1134 drivers/scsi/fcoe/fcoe_ctlr.c static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb)
skb              1138 drivers/scsi/fcoe/fcoe_ctlr.c 	struct fc_frame *fp = (struct fc_frame *)skb;
skb              1154 drivers/scsi/fcoe/fcoe_ctlr.c 	fiph = (struct fip_header *)skb->data;
skb              1160 drivers/scsi/fcoe/fcoe_ctlr.c 	if (rlen + sizeof(*fiph) > skb->len)
skb              1278 drivers/scsi/fcoe/fcoe_ctlr.c 	skb_pull(skb, (u8 *)fh - skb->data);
skb              1279 drivers/scsi/fcoe/fcoe_ctlr.c 	skb_trim(skb, els_len);
skb              1280 drivers/scsi/fcoe/fcoe_ctlr.c 	fp = (struct fc_frame *)skb;
skb              1289 drivers/scsi/fcoe/fcoe_ctlr.c 	stats->RxWords += skb->len / FIP_BPW;
skb              1299 drivers/scsi/fcoe/fcoe_ctlr.c 	kfree_skb(skb);
skb              1311 drivers/scsi/fcoe/fcoe_ctlr.c 				     struct sk_buff *skb)
skb              1326 drivers/scsi/fcoe/fcoe_ctlr.c 	struct fip_header *fh = (struct fip_header *)skb->data;
skb              1327 drivers/scsi/fcoe/fcoe_ctlr.c 	struct ethhdr *eh = eth_hdr(skb);
skb              1518 drivers/scsi/fcoe/fcoe_ctlr.c void fcoe_ctlr_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
skb              1520 drivers/scsi/fcoe/fcoe_ctlr.c 	skb = skb_share_check(skb, GFP_ATOMIC);
skb              1521 drivers/scsi/fcoe/fcoe_ctlr.c 	if (!skb)
skb              1523 drivers/scsi/fcoe/fcoe_ctlr.c 	skb_queue_tail(&fip->fip_recv_list, skb);
skb              1535 drivers/scsi/fcoe/fcoe_ctlr.c static int fcoe_ctlr_recv_handler(struct fcoe_ctlr *fip, struct sk_buff *skb)
skb              1544 drivers/scsi/fcoe/fcoe_ctlr.c 	if (skb_linearize(skb))
skb              1546 drivers/scsi/fcoe/fcoe_ctlr.c 	if (skb->len < sizeof(*fiph))
skb              1548 drivers/scsi/fcoe/fcoe_ctlr.c 	eh = eth_hdr(skb);
skb              1557 drivers/scsi/fcoe/fcoe_ctlr.c 	fiph = (struct fip_header *)skb->data;
skb              1563 drivers/scsi/fcoe/fcoe_ctlr.c 	if (ntohs(fiph->fip_dl_len) * FIP_BPW + sizeof(*fiph) > skb->len)
skb              1578 drivers/scsi/fcoe/fcoe_ctlr.c 		return fcoe_ctlr_vn_recv(fip, skb);
skb              1582 drivers/scsi/fcoe/fcoe_ctlr.c 		return fcoe_ctlr_vlan_recv(fip, skb);
skb              1590 drivers/scsi/fcoe/fcoe_ctlr.c 		fcoe_ctlr_recv_els(fip, skb);	/* consumes skb */
skb              1598 drivers/scsi/fcoe/fcoe_ctlr.c 		fcoe_ctlr_recv_adv(fip, skb);
skb              1600 drivers/scsi/fcoe/fcoe_ctlr.c 		fcoe_ctlr_recv_clr_vlink(fip, skb);
skb              1601 drivers/scsi/fcoe/fcoe_ctlr.c 	kfree_skb(skb);
skb              1604 drivers/scsi/fcoe/fcoe_ctlr.c 	kfree_skb(skb);
skb              1675 drivers/scsi/fcoe/fcoe_ctlr.c 	struct sk_buff *skb;
skb              1687 drivers/scsi/fcoe/fcoe_ctlr.c 	skb = skb_clone(skb_orig, GFP_ATOMIC);
skb              1688 drivers/scsi/fcoe/fcoe_ctlr.c 	if (!skb) {
skb              1689 drivers/scsi/fcoe/fcoe_ctlr.c 		skb = skb_orig;
skb              1692 drivers/scsi/fcoe/fcoe_ctlr.c 	fh = (struct fc_frame_header *)skb->data;
skb              1693 drivers/scsi/fcoe/fcoe_ctlr.c 	error = fcoe_ctlr_encaps(fip, fip->lp, FIP_DT_FLOGI, skb,
skb              1696 drivers/scsi/fcoe/fcoe_ctlr.c 		kfree_skb(skb);
skb              1699 drivers/scsi/fcoe/fcoe_ctlr.c 	fip->send(fip, skb);
skb              1880 drivers/scsi/fcoe/fcoe_ctlr.c 	struct sk_buff *skb;
skb              1883 drivers/scsi/fcoe/fcoe_ctlr.c 	while ((skb = skb_dequeue(&fip->fip_recv_list)))
skb              1884 drivers/scsi/fcoe/fcoe_ctlr.c 		fcoe_ctlr_recv_handler(fip, skb);
skb              1909 drivers/scsi/fcoe/fcoe_ctlr.c 	sa = eth_hdr(&fp->skb)->h_source;
skb              2022 drivers/scsi/fcoe/fcoe_ctlr.c 	struct sk_buff *skb;
skb              2046 drivers/scsi/fcoe/fcoe_ctlr.c 	skb = dev_alloc_skb(len);
skb              2047 drivers/scsi/fcoe/fcoe_ctlr.c 	if (!skb)
skb              2050 drivers/scsi/fcoe/fcoe_ctlr.c 	frame = (struct fip_vn2vn_probe_frame *)skb->data;
skb              2106 drivers/scsi/fcoe/fcoe_ctlr.c 	skb_put(skb, len);
skb              2107 drivers/scsi/fcoe/fcoe_ctlr.c 	skb->protocol = htons(ETH_P_FIP);
skb              2108 drivers/scsi/fcoe/fcoe_ctlr.c 	skb->priority = fip->priority;
skb              2109 drivers/scsi/fcoe/fcoe_ctlr.c 	skb_reset_mac_header(skb);
skb              2110 drivers/scsi/fcoe/fcoe_ctlr.c 	skb_reset_network_header(skb);
skb              2112 drivers/scsi/fcoe/fcoe_ctlr.c 	fip->send(fip, skb);
skb              2271 drivers/scsi/fcoe/fcoe_ctlr.c 			      struct sk_buff *skb,
skb              2286 drivers/scsi/fcoe/fcoe_ctlr.c 	fiph = (struct fip_header *)skb->data;
skb              2309 drivers/scsi/fcoe/fcoe_ctlr.c 	if (rlen + sizeof(*fiph) > skb->len)
skb              2738 drivers/scsi/fcoe/fcoe_ctlr.c static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
skb              2745 drivers/scsi/fcoe/fcoe_ctlr.c 	fiph = (struct fip_header *)skb->data;
skb              2749 drivers/scsi/fcoe/fcoe_ctlr.c 		vlan_id = skb_vlan_tag_get_id(skb);
skb              2758 drivers/scsi/fcoe/fcoe_ctlr.c 	rc = fcoe_ctlr_vn_parse(fip, skb, &frport);
skb              2788 drivers/scsi/fcoe/fcoe_ctlr.c 	kfree_skb(skb);
skb              2802 drivers/scsi/fcoe/fcoe_ctlr.c 			      struct sk_buff *skb,
skb              2815 drivers/scsi/fcoe/fcoe_ctlr.c 	fiph = (struct fip_header *)skb->data;
skb              2829 drivers/scsi/fcoe/fcoe_ctlr.c 	if (rlen + sizeof(*fiph) > skb->len)
skb              2901 drivers/scsi/fcoe/fcoe_ctlr.c 	struct sk_buff *skb;
skb              2915 drivers/scsi/fcoe/fcoe_ctlr.c 	skb = dev_alloc_skb(len);
skb              2916 drivers/scsi/fcoe/fcoe_ctlr.c 	if (!skb)
skb              2923 drivers/scsi/fcoe/fcoe_ctlr.c 	frame = (struct fip_vlan_notify_frame *)skb->data;
skb              2943 drivers/scsi/fcoe/fcoe_ctlr.c 	skb_put(skb, len);
skb              2944 drivers/scsi/fcoe/fcoe_ctlr.c 	skb->protocol = htons(ETH_P_FIP);
skb              2945 drivers/scsi/fcoe/fcoe_ctlr.c 	skb->priority = fip->priority;
skb              2946 drivers/scsi/fcoe/fcoe_ctlr.c 	skb_reset_mac_header(skb);
skb              2947 drivers/scsi/fcoe/fcoe_ctlr.c 	skb_reset_network_header(skb);
skb              2949 drivers/scsi/fcoe/fcoe_ctlr.c 	fip->send(fip, skb);
skb              2976 drivers/scsi/fcoe/fcoe_ctlr.c static int fcoe_ctlr_vlan_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
skb              2983 drivers/scsi/fcoe/fcoe_ctlr.c 	fiph = (struct fip_header *)skb->data;
skb              2985 drivers/scsi/fcoe/fcoe_ctlr.c 	rc = fcoe_ctlr_vlan_parse(fip, skb, &frport);
skb              2996 drivers/scsi/fcoe/fcoe_ctlr.c 	kfree_skb(skb);
skb               310 drivers/scsi/fcoe/fcoe_transport.c 	struct sk_buff *skb = fp_skb(fp);
skb               317 drivers/scsi/fcoe/fcoe_transport.c 	crc = crc32(~0, skb->data, skb_headlen(skb));
skb               319 drivers/scsi/fcoe/fcoe_transport.c 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb               320 drivers/scsi/fcoe/fcoe_transport.c 		frag = &skb_shinfo(skb)->frags[i];
skb               346 drivers/scsi/fcoe/fcoe_transport.c int fcoe_start_io(struct sk_buff *skb)
skb               351 drivers/scsi/fcoe/fcoe_transport.c 	nskb = skb_clone(skb, GFP_ATOMIC);
skb               357 drivers/scsi/fcoe/fcoe_transport.c 	kfree_skb(skb);
skb               370 drivers/scsi/fcoe/fcoe_transport.c 	struct sk_buff *skb;
skb               373 drivers/scsi/fcoe/fcoe_transport.c 	while ((skb = __skb_dequeue(&port->fcoe_pending_queue)) != NULL) {
skb               375 drivers/scsi/fcoe/fcoe_transport.c 		kfree_skb(skb);
skb               395 drivers/scsi/fcoe/fcoe_transport.c void fcoe_check_wait_queue(struct fc_lport *lport, struct sk_buff *skb)
skb               402 drivers/scsi/fcoe/fcoe_transport.c 	if (skb)
skb               403 drivers/scsi/fcoe/fcoe_transport.c 		__skb_queue_tail(&port->fcoe_pending_queue, skb);
skb               412 drivers/scsi/fcoe/fcoe_transport.c 		skb = __skb_dequeue(&port->fcoe_pending_queue);
skb               415 drivers/scsi/fcoe/fcoe_transport.c 		rc = fcoe_start_io(skb);
skb               419 drivers/scsi/fcoe/fcoe_transport.c 			__skb_queue_head(&port->fcoe_pending_queue, skb);
skb               468 drivers/scsi/fcoe/fcoe_transport.c int fcoe_get_paged_crc_eof(struct sk_buff *skb, int tlen,
skb               484 drivers/scsi/fcoe/fcoe_transport.c 	skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page,
skb               486 drivers/scsi/fcoe/fcoe_transport.c 	skb->len += tlen;
skb               487 drivers/scsi/fcoe/fcoe_transport.c 	skb->data_len += tlen;
skb               488 drivers/scsi/fcoe/fcoe_transport.c 	skb->truesize += tlen;
skb               340 drivers/scsi/fnic/fnic.h void fnic_eth_send(struct fcoe_ctlr *, struct sk_buff *skb);
skb                47 drivers/scsi/fnic/fnic_fcs.c static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb);
skb               196 drivers/scsi/fnic/fnic_fcs.c 	struct sk_buff *skb;
skb               199 drivers/scsi/fnic/fnic_fcs.c 	while ((skb = skb_dequeue(&fnic->frame_queue))) {
skb               204 drivers/scsi/fnic/fnic_fcs.c 			dev_kfree_skb(skb);
skb               207 drivers/scsi/fnic/fnic_fcs.c 		fp = (struct fc_frame *)skb;
skb               215 drivers/scsi/fnic/fnic_fcs.c 			skb_queue_head(&fnic->frame_queue, skb);
skb               305 drivers/scsi/fnic/fnic_fcs.c 					 struct sk_buff *skb)
skb               321 drivers/scsi/fnic/fnic_fcs.c 	if (skb_linearize(skb))
skb               324 drivers/scsi/fnic/fnic_fcs.c 	if (skb->len < sizeof(*fiph))
skb               327 drivers/scsi/fnic/fnic_fcs.c 	fiph = (struct fip_header *)skb->data;
skb               338 drivers/scsi/fnic/fnic_fcs.c 	if (rlen + sizeof(*fiph) > skb->len)
skb               377 drivers/scsi/fnic/fnic_fcs.c 	struct sk_buff *skb;
skb               390 drivers/scsi/fnic/fnic_fcs.c 	skb = dev_alloc_skb(sizeof(struct fip_vlan));
skb               391 drivers/scsi/fnic/fnic_fcs.c 	if (!skb)
skb               395 drivers/scsi/fnic/fnic_fcs.c 	eth_fr = (char *)skb->data;
skb               417 drivers/scsi/fnic/fnic_fcs.c 	skb_put(skb, sizeof(*vlan));
skb               418 drivers/scsi/fnic/fnic_fcs.c 	skb->protocol = htons(ETH_P_FIP);
skb               419 drivers/scsi/fnic/fnic_fcs.c 	skb_reset_mac_header(skb);
skb               420 drivers/scsi/fnic/fnic_fcs.c 	skb_reset_network_header(skb);
skb               421 drivers/scsi/fnic/fnic_fcs.c 	fip->send(fip, skb);
skb               428 drivers/scsi/fnic/fnic_fcs.c static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *skb)
skb               444 drivers/scsi/fnic/fnic_fcs.c 	fiph = (struct fip_header *) skb->data;
skb               567 drivers/scsi/fnic/fnic_fcs.c static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb)
skb               574 drivers/scsi/fnic/fnic_fcs.c 	if (!skb || !(skb->data))
skb               577 drivers/scsi/fnic/fnic_fcs.c 	if (skb_linearize(skb))
skb               580 drivers/scsi/fnic/fnic_fcs.c 	fiph = (struct fip_header *)skb->data;
skb               587 drivers/scsi/fnic/fnic_fcs.c 	if (ntohs(fiph->fip_dl_len) * FIP_BPW + sizeof(*fiph) > skb->len)
skb               597 drivers/scsi/fnic/fnic_fcs.c 		fnic_fcoe_process_vlan_resp(fnic, skb);
skb               614 drivers/scsi/fnic/fnic_fcs.c 	struct sk_buff *skb;
skb               617 drivers/scsi/fnic/fnic_fcs.c 	while ((skb = skb_dequeue(&fnic->fip_frame_queue))) {
skb               621 drivers/scsi/fnic/fnic_fcs.c 			dev_kfree_skb(skb);
skb               630 drivers/scsi/fnic/fnic_fcs.c 			skb_queue_head(&fnic->fip_frame_queue, skb);
skb               635 drivers/scsi/fnic/fnic_fcs.c 		eh = (struct ethhdr *)skb->data;
skb               637 drivers/scsi/fnic/fnic_fcs.c 			skb_pull(skb, sizeof(*eh));
skb               638 drivers/scsi/fnic/fnic_fcs.c 			if (fnic_fcoe_handle_fip_frame(fnic, skb) <= 0) {
skb               639 drivers/scsi/fnic/fnic_fcs.c 				dev_kfree_skb(skb);
skb               646 drivers/scsi/fnic/fnic_fcs.c 			if (is_fnic_fip_flogi_reject(&fnic->ctlr, skb)) {
skb               654 drivers/scsi/fnic/fnic_fcs.c 				dev_kfree_skb(skb);
skb               657 drivers/scsi/fnic/fnic_fcs.c 			fcoe_ctlr_recv(&fnic->ctlr, skb);
skb               668 drivers/scsi/fnic/fnic_fcs.c static inline int fnic_import_rq_eth_pkt(struct fnic *fnic, struct sk_buff *skb)
skb               678 drivers/scsi/fnic/fnic_fcs.c 	eh = (struct ethhdr *)skb->data;
skb               681 drivers/scsi/fnic/fnic_fcs.c 		eh = skb_pull(skb, VLAN_HLEN);
skb               682 drivers/scsi/fnic/fnic_fcs.c 		skb_reset_mac_header(skb);
skb               692 drivers/scsi/fnic/fnic_fcs.c 			FNIC_FC_RECV|0x80, (char *)skb->data, skb->len)) != 0) {
skb               695 drivers/scsi/fnic/fnic_fcs.c 		skb_queue_tail(&fnic->fip_frame_queue, skb);
skb               701 drivers/scsi/fnic/fnic_fcs.c 	skb_set_network_header(skb, sizeof(*eh));
skb               702 drivers/scsi/fnic/fnic_fcs.c 	skb_pull(skb, sizeof(*eh));
skb               704 drivers/scsi/fnic/fnic_fcs.c 	fcoe_hdr = (struct fcoe_hdr *)skb->data;
skb               708 drivers/scsi/fnic/fnic_fcs.c 	fp = (struct fc_frame *)skb;
skb               711 drivers/scsi/fnic/fnic_fcs.c 	skb_pull(skb, sizeof(struct fcoe_hdr));
skb               712 drivers/scsi/fnic/fnic_fcs.c 	skb_reset_transport_header(skb);
skb               714 drivers/scsi/fnic/fnic_fcs.c 	ft = (struct fcoe_crc_eof *)(skb->data + skb->len - sizeof(*ft));
skb               716 drivers/scsi/fnic/fnic_fcs.c 	skb_trim(skb, skb->len - sizeof(*ft));
skb               719 drivers/scsi/fnic/fnic_fcs.c 	dev_kfree_skb_irq(skb);
skb               837 drivers/scsi/fnic/fnic_fcs.c 	struct sk_buff *skb;
skb               857 drivers/scsi/fnic/fnic_fcs.c 	skb = buf->os_buf;
skb               858 drivers/scsi/fnic/fnic_fcs.c 	fp = (struct fc_frame *)skb;
skb               871 drivers/scsi/fnic/fnic_fcs.c 		skb_trim(skb, fcp_bytes_written);
skb               888 drivers/scsi/fnic/fnic_fcs.c 		skb_trim(skb, bytes_written);
skb               895 drivers/scsi/fnic/fnic_fcs.c 		if (fnic_import_rq_eth_pkt(fnic, skb))
skb               924 drivers/scsi/fnic/fnic_fcs.c 					(char *)skb->data, skb->len)) != 0) {
skb               928 drivers/scsi/fnic/fnic_fcs.c 	skb_queue_tail(&fnic->frame_queue, skb);
skb               933 drivers/scsi/fnic/fnic_fcs.c 	dev_kfree_skb_irq(skb);
skb               980 drivers/scsi/fnic/fnic_fcs.c 	struct sk_buff *skb;
skb               986 drivers/scsi/fnic/fnic_fcs.c 	skb = dev_alloc_skb(len);
skb               987 drivers/scsi/fnic/fnic_fcs.c 	if (!skb) {
skb               992 drivers/scsi/fnic/fnic_fcs.c 	skb_reset_mac_header(skb);
skb               993 drivers/scsi/fnic/fnic_fcs.c 	skb_reset_transport_header(skb);
skb               994 drivers/scsi/fnic/fnic_fcs.c 	skb_reset_network_header(skb);
skb               995 drivers/scsi/fnic/fnic_fcs.c 	skb_put(skb, len);
skb               996 drivers/scsi/fnic/fnic_fcs.c 	pa = dma_map_single(&fnic->pdev->dev, skb->data, len, DMA_FROM_DEVICE);
skb              1003 drivers/scsi/fnic/fnic_fcs.c 	fnic_queue_rq_desc(rq, skb, pa, len);
skb              1007 drivers/scsi/fnic/fnic_fcs.c 	kfree_skb(skb);
skb              1028 drivers/scsi/fnic/fnic_fcs.c void fnic_eth_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
skb              1038 drivers/scsi/fnic/fnic_fcs.c 		eth_hdr = (struct ethhdr *)skb_mac_header(skb);
skb              1039 drivers/scsi/fnic/fnic_fcs.c 		vlan_hdr = skb_push(skb, sizeof(*vlan_hdr) - sizeof(*eth_hdr));
skb              1045 drivers/scsi/fnic/fnic_fcs.c 			FNIC_FC_SEND|0x80, (char *)eth_hdr, skb->len)) != 0) {
skb              1050 drivers/scsi/fnic/fnic_fcs.c 			FNIC_FC_SEND|0x80, (char *)skb->data, skb->len)) != 0) {
skb              1055 drivers/scsi/fnic/fnic_fcs.c 	pa = dma_map_single(&fnic->pdev->dev, skb->data, skb->len,
skb              1066 drivers/scsi/fnic/fnic_fcs.c 	fnic_queue_wq_eth_desc(wq, skb, pa, skb->len,
skb              1074 drivers/scsi/fnic/fnic_fcs.c 	dma_unmap_single(&fnic->pdev->dev, pa, skb->len, DMA_TO_DEVICE);
skb              1076 drivers/scsi/fnic/fnic_fcs.c 	kfree_skb(skb);
skb              1085 drivers/scsi/fnic/fnic_fcs.c 	struct sk_buff *skb;
skb              1096 drivers/scsi/fnic/fnic_fcs.c 	skb = fp_skb(fp);
skb              1099 drivers/scsi/fnic/fnic_fcs.c 	    fcoe_ctlr_els_send(&fnic->ctlr, fnic->lport, skb))
skb              1104 drivers/scsi/fnic/fnic_fcs.c 		vlan_hdr = skb_push(skb, eth_hdr_len);
skb              1112 drivers/scsi/fnic/fnic_fcs.c 		eth_hdr = skb_push(skb, eth_hdr_len);
skb              1123 drivers/scsi/fnic/fnic_fcs.c 	tot_len = skb->len;
skb              1151 drivers/scsi/fnic/fnic_fcs.c 	fnic_queue_wq_desc(wq, skb, pa, tot_len, fr_eof(fp),
skb              1206 drivers/scsi/fnic/fnic_fcs.c 	struct sk_buff *skb;
skb              1209 drivers/scsi/fnic/fnic_fcs.c 	while ((skb = skb_dequeue(&fnic->tx_queue))) {
skb              1210 drivers/scsi/fnic/fnic_fcs.c 		fp = (struct fc_frame *)skb;
skb              1257 drivers/scsi/fnic/fnic_fcs.c 	struct sk_buff *skb = buf->os_buf;
skb              1258 drivers/scsi/fnic/fnic_fcs.c 	struct fc_frame *fp = (struct fc_frame *)skb;
skb                80 drivers/scsi/iscsi_tcp.c static int iscsi_sw_tcp_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
skb                87 drivers/scsi/iscsi_tcp.c 	ISCSI_SW_TCP_DBG(conn, "in %d bytes\n", skb->len - offset);
skb                91 drivers/scsi/iscsi_tcp.c 		consumed = iscsi_tcp_recv_skb(conn, skb, offset, 0, &status);
skb                97 drivers/scsi/iscsi_tcp.c 			 skb->len - offset, status);
skb                46 drivers/scsi/libfc/fc_frame.c 	struct sk_buff *skb;
skb                50 drivers/scsi/libfc/fc_frame.c 	skb = alloc_skb_fclone(len + FC_FRAME_HEADROOM + FC_FRAME_TAILROOM +
skb                52 drivers/scsi/libfc/fc_frame.c 	if (!skb)
skb                54 drivers/scsi/libfc/fc_frame.c 	skb_reserve(skb, NET_SKB_PAD + FC_FRAME_HEADROOM);
skb                55 drivers/scsi/libfc/fc_frame.c 	fp = (struct fc_frame *) skb;
skb                57 drivers/scsi/libfc/fc_frame.c 	skb_put(skb, len);
skb               885 drivers/scsi/libiscsi_tcp.c int iscsi_tcp_recv_skb(struct iscsi_conn *conn, struct sk_buff *skb,
skb               894 drivers/scsi/libiscsi_tcp.c 	ISCSI_DBG_TCP(conn, "in %d bytes\n", skb->len - offset);
skb               913 drivers/scsi/libiscsi_tcp.c 	skb_prepare_seq_read(skb, offset, skb->len, &seq);
skb               927 drivers/scsi/libiscsi_tcp.c 		ISCSI_DBG_TCP(conn, "skb %p ptr=%p avail=%u\n", skb, ptr,
skb              1399 drivers/scsi/pmcraid.c 	struct sk_buff *skb;
skb              1416 drivers/scsi/pmcraid.c 	skb = genlmsg_new(nla_genl_hdr_total_size, GFP_ATOMIC);
skb              1419 drivers/scsi/pmcraid.c 	if (!skb) {
skb              1426 drivers/scsi/pmcraid.c 	msg_header = genlmsg_put(skb, 0, 0,
skb              1431 drivers/scsi/pmcraid.c 		nlmsg_free(skb);
skb              1435 drivers/scsi/pmcraid.c 	result = nla_put(skb, PMCRAID_AEN_ATTR_EVENT, data_size, aen_msg);
skb              1439 drivers/scsi/pmcraid.c 		nlmsg_free(skb);
skb              1444 drivers/scsi/pmcraid.c 	genlmsg_end(skb, msg_header);
skb              1446 drivers/scsi/pmcraid.c 	result = genlmsg_multicast(&pmcraid_event_family, skb,
skb               236 drivers/scsi/qedf/qedf.h 	struct sk_buff *skb;
skb               479 drivers/scsi/qedf/qedf.h extern void qedf_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb);
skb               481 drivers/scsi/qedf/qedf.h extern void qedf_fip_recv(struct qedf_ctx *qedf, struct sk_buff *skb);
skb                17 drivers/scsi/qedf/qedf_fip.c 	struct sk_buff *skb;
skb                25 drivers/scsi/qedf/qedf_fip.c 	skb = dev_alloc_skb(sizeof(struct fip_vlan));
skb                26 drivers/scsi/qedf/qedf_fip.c 	if (!skb) {
skb                32 drivers/scsi/qedf/qedf_fip.c 	eth_fr = (char *)skb->data;
skb                53 drivers/scsi/qedf/qedf_fip.c 	skb_put(skb, sizeof(*vlan));
skb                54 drivers/scsi/qedf/qedf_fip.c 	skb->protocol = htons(ETH_P_FIP);
skb                55 drivers/scsi/qedf/qedf_fip.c 	skb_reset_mac_header(skb);
skb                56 drivers/scsi/qedf/qedf_fip.c 	skb_reset_network_header(skb);
skb                65 drivers/scsi/qedf/qedf_fip.c 		kfree_skb(skb);
skb                70 drivers/scsi/qedf/qedf_fip.c 	rc = qed_ops->ll2->start_xmit(qedf->cdev, skb, flags);
skb                73 drivers/scsi/qedf/qedf_fip.c 		kfree_skb(skb);
skb                80 drivers/scsi/qedf/qedf_fip.c 	struct sk_buff *skb)
skb                88 drivers/scsi/qedf/qedf_fip.c 	fiph = (struct fip_header *)(((void *)skb->data) + 2 * ETH_ALEN + 2);
skb               121 drivers/scsi/qedf/qedf_fip.c void qedf_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
skb               132 drivers/scsi/qedf/qedf_fip.c 		kfree_skb(skb);
skb               136 drivers/scsi/qedf/qedf_fip.c 	fiph = (struct fip_header *) ((void *)skb->data + 2 * ETH_ALEN + 2);
skb               137 drivers/scsi/qedf/qedf_fip.c 	eth_hdr = (struct ethhdr *)skb_mac_header(skb);
skb               145 drivers/scsi/qedf/qedf_fip.c 	__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), qedf->vlan_id);
skb               148 drivers/scsi/qedf/qedf_fip.c 	__vlan_hwaccel_get_tag(skb, &vlan_tci);
skb               155 drivers/scsi/qedf/qedf_fip.c 		    skb->data, skb->len, false);
skb               157 drivers/scsi/qedf/qedf_fip.c 	rc = qed_ops->ll2->start_xmit(qedf->cdev, skb, 0);
skb               160 drivers/scsi/qedf/qedf_fip.c 		kfree_skb(skb);
skb               168 drivers/scsi/qedf/qedf_fip.c void qedf_fip_recv(struct qedf_ctx *qedf, struct sk_buff *skb)
skb               186 drivers/scsi/qedf/qedf_fip.c 	eth_hdr = (struct ethhdr *)skb_mac_header(skb);
skb               187 drivers/scsi/qedf/qedf_fip.c 	fiph = (struct fip_header *) ((void *)skb->data + 2 * ETH_ALEN + 2);
skb               193 drivers/scsi/qedf/qedf_fip.c 		  skb, fiph, eth_hdr->h_source, eth_hdr->h_dest, op,
skb               197 drivers/scsi/qedf/qedf_fip.c 		    skb->data, skb->len, false);
skb               206 drivers/scsi/qedf/qedf_fip.c 		kfree_skb(skb);
skb               212 drivers/scsi/qedf/qedf_fip.c 		qedf_fcoe_process_vlan_resp(qedf, skb);
skb               213 drivers/scsi/qedf/qedf_fip.c 		kfree_skb(skb);
skb               223 drivers/scsi/qedf/qedf_fip.c 			kfree_skb(skb);
skb               288 drivers/scsi/qedf/qedf_fip.c 		kfree_skb(skb);
skb               291 drivers/scsi/qedf/qedf_fip.c 		__skb_pull(skb, ETH_HLEN);
skb               292 drivers/scsi/qedf/qedf_fip.c 		fcoe_ctlr_recv(&qedf->ctlr, skb);
skb               933 drivers/scsi/qedf/qedf_main.c static int qedf_get_paged_crc_eof(struct sk_buff *skb, int tlen)
skb               938 drivers/scsi/qedf/qedf_main.c 	rc = fcoe_get_paged_crc_eof(skb, tlen, &qedf_global);
skb               995 drivers/scsi/qedf/qedf_main.c 	struct sk_buff		*skb;
skb              1012 drivers/scsi/qedf/qedf_main.c 	skb = fp_skb(fp);
skb              1039 drivers/scsi/qedf/qedf_main.c 		kfree_skb(skb);
skb              1050 drivers/scsi/qedf/qedf_main.c 		kfree_skb(skb);
skb              1056 drivers/scsi/qedf/qedf_main.c 		kfree_skb(skb);
skb              1062 drivers/scsi/qedf/qedf_main.c 		kfree_skb(skb);
skb              1067 drivers/scsi/qedf/qedf_main.c 		if (fcoe_ctlr_els_send(&qedf->ctlr, lport, skb))
skb              1090 drivers/scsi/qedf/qedf_main.c 	wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE;
skb              1092 drivers/scsi/qedf/qedf_main.c 	skb->ip_summed = CHECKSUM_NONE;
skb              1096 drivers/scsi/qedf/qedf_main.c 	if (skb_is_nonlinear(skb)) {
skb              1099 drivers/scsi/qedf/qedf_main.c 		if (qedf_get_paged_crc_eof(skb, tlen)) {
skb              1100 drivers/scsi/qedf/qedf_main.c 			kfree_skb(skb);
skb              1103 drivers/scsi/qedf/qedf_main.c 		frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
skb              1106 drivers/scsi/qedf/qedf_main.c 		cp = skb_put(skb, tlen);
skb              1112 drivers/scsi/qedf/qedf_main.c 	if (skb_is_nonlinear(skb)) {
skb              1119 drivers/scsi/qedf/qedf_main.c 	skb_push(skb, elen + hlen);
skb              1120 drivers/scsi/qedf/qedf_main.c 	skb_reset_mac_header(skb);
skb              1121 drivers/scsi/qedf/qedf_main.c 	skb_reset_network_header(skb);
skb              1122 drivers/scsi/qedf/qedf_main.c 	skb->mac_len = elen;
skb              1123 drivers/scsi/qedf/qedf_main.c 	skb->protocol = htons(ETH_P_FCOE);
skb              1129 drivers/scsi/qedf/qedf_main.c 	__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), qedf->vlan_id);
skb              1132 drivers/scsi/qedf/qedf_main.c 	eh = eth_hdr(skb);
skb              1156 drivers/scsi/qedf/qedf_main.c 	__vlan_hwaccel_get_tag(skb, &vlan_tci);
skb              1166 drivers/scsi/qedf/qedf_main.c 		    1, skb->data, skb->len, false);
skb              1167 drivers/scsi/qedf/qedf_main.c 	rc = qed_ops->ll2->start_xmit(qedf->cdev, skb, 0);
skb              1170 drivers/scsi/qedf/qedf_main.c 		kfree_skb(skb);
skb              2330 drivers/scsi/qedf/qedf_main.c 	struct sk_buff *skb)
skb              2347 drivers/scsi/qedf/qedf_main.c 		kfree_skb(skb);
skb              2351 drivers/scsi/qedf/qedf_main.c 	if (skb_is_nonlinear(skb))
skb              2352 drivers/scsi/qedf/qedf_main.c 		skb_linearize(skb);
skb              2353 drivers/scsi/qedf/qedf_main.c 	mac = eth_hdr(skb)->h_source;
skb              2354 drivers/scsi/qedf/qedf_main.c 	dest_mac = eth_hdr(skb)->h_dest;
skb              2357 drivers/scsi/qedf/qedf_main.c 	hp = (struct fcoe_hdr *)skb->data;
skb              2358 drivers/scsi/qedf/qedf_main.c 	fh = (struct fc_frame_header *) skb_transport_header(skb);
skb              2359 drivers/scsi/qedf/qedf_main.c 	skb_pull(skb, sizeof(struct fcoe_hdr));
skb              2360 drivers/scsi/qedf/qedf_main.c 	fr_len = skb->len - sizeof(struct fcoe_crc_eof);
skb              2362 drivers/scsi/qedf/qedf_main.c 	fp = (struct fc_frame *)skb;
skb              2366 drivers/scsi/qedf/qedf_main.c 	if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) {
skb              2368 drivers/scsi/qedf/qedf_main.c 		kfree_skb(skb);
skb              2373 drivers/scsi/qedf/qedf_main.c 	if (pskb_trim(skb, fr_len)) {
skb              2375 drivers/scsi/qedf/qedf_main.c 		kfree_skb(skb);
skb              2388 drivers/scsi/qedf/qedf_main.c 		kfree_skb(skb);
skb              2397 drivers/scsi/qedf/qedf_main.c 				kfree_skb(skb);
skb              2406 drivers/scsi/qedf/qedf_main.c 		kfree_skb(skb);
skb              2413 drivers/scsi/qedf/qedf_main.c 		kfree_skb(skb);
skb              2422 drivers/scsi/qedf/qedf_main.c 			kfree_skb(skb);
skb              2438 drivers/scsi/qedf/qedf_main.c 		kfree_skb(skb);
skb              2448 drivers/scsi/qedf/qedf_main.c 		kfree_skb(skb);
skb              2465 drivers/scsi/qedf/qedf_main.c 		kfree_skb(skb);
skb              2470 drivers/scsi/qedf/qedf_main.c 	    "skb=%p fp=%p src=%06x dest=%06x r_ctl=%x fh_type=%x.\n", skb, fp,
skb              2475 drivers/scsi/qedf/qedf_main.c 		    1, skb->data, skb->len, false);
skb              2484 drivers/scsi/qedf/qedf_main.c 	struct sk_buff *skb = skb_work->skb;
skb              2492 drivers/scsi/qedf/qedf_main.c 	eh = (struct ethhdr *)skb->data;
skb              2497 drivers/scsi/qedf/qedf_main.c 		eh = skb_pull(skb, VLAN_HLEN);
skb              2498 drivers/scsi/qedf/qedf_main.c 		skb_reset_mac_header(skb);
skb              2507 drivers/scsi/qedf/qedf_main.c 		qedf_fip_recv(qedf, skb);
skb              2510 drivers/scsi/qedf/qedf_main.c 		__skb_pull(skb, ETH_HLEN);
skb              2511 drivers/scsi/qedf/qedf_main.c 		qedf_recv_frame(qedf, skb);
skb              2517 drivers/scsi/qedf/qedf_main.c 	kfree_skb(skb);
skb              2523 drivers/scsi/qedf/qedf_main.c static int qedf_ll2_rx(void *cookie, struct sk_buff *skb,
skb              2532 drivers/scsi/qedf/qedf_main.c 		kfree_skb(skb);
skb              2540 drivers/scsi/qedf/qedf_main.c 		kfree_skb(skb);
skb              2545 drivers/scsi/qedf/qedf_main.c 	skb_work->skb = skb;
skb               138 drivers/scsi/qedi/qedi.h 	struct sk_buff *skb;
skb              1110 drivers/scsi/qedi/qedi_iscsi.c 	struct sk_buff *skb;
skb              1132 drivers/scsi/qedi/qedi_iscsi.c 	skb = alloc_skb(len, GFP_ATOMIC);
skb              1133 drivers/scsi/qedi/qedi_iscsi.c 	if (!skb) {
skb              1138 drivers/scsi/qedi/qedi_iscsi.c 	skb_put(skb, len);
skb              1139 drivers/scsi/qedi/qedi_iscsi.c 	memcpy(skb->data, udev->tx_pkt, len);
skb              1140 drivers/scsi/qedi/qedi_iscsi.c 	skb->ip_summed = CHECKSUM_NONE;
skb              1143 drivers/scsi/qedi/qedi_iscsi.c 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlanid);
skb              1145 drivers/scsi/qedi/qedi_iscsi.c 	rc = qedi_ops->ll2->start_xmit(cdev, skb, 0);
skb              1149 drivers/scsi/qedi/qedi_iscsi.c 		kfree_skb(skb);
skb               657 drivers/scsi/qedi/qedi_main.c static int qedi_ll2_rx(void *cookie, struct sk_buff *skb, u32 arg1, u32 arg2)
skb               673 drivers/scsi/qedi/qedi_main.c 		kfree_skb(skb);
skb               677 drivers/scsi/qedi/qedi_main.c 	eh = (struct ethhdr *)skb->data;
skb               681 drivers/scsi/qedi/qedi_main.c 		eh = (struct ethhdr *)skb_pull(skb, VLAN_HLEN);
skb               682 drivers/scsi/qedi/qedi_main.c 		skb_reset_mac_header(skb);
skb               691 drivers/scsi/qedi/qedi_main.c 			  eh->h_proto, skb->len);
skb               692 drivers/scsi/qedi/qedi_main.c 		kfree_skb(skb);
skb               698 drivers/scsi/qedi/qedi_main.c 		  eh->h_proto, skb->len);
skb               707 drivers/scsi/qedi/qedi_main.c 		kfree_skb(skb);
skb               712 drivers/scsi/qedi/qedi_main.c 	work->skb = skb;
skb               714 drivers/scsi/qedi/qedi_main.c 	if (skb_vlan_tag_present(skb))
skb               715 drivers/scsi/qedi/qedi_main.c 		work->vlan_id = skb_vlan_tag_get(skb);
skb               718 drivers/scsi/qedi/qedi_main.c 		__vlan_insert_tag(work->skb, htons(ETH_P_8021Q), work->vlan_id);
skb               730 drivers/scsi/qedi/qedi_main.c static int qedi_ll2_process_skb(struct qedi_ctx *qedi, struct sk_buff *skb,
skb               754 drivers/scsi/qedi/qedi_main.c 	len = min_t(u32, skb->len, (u32)qedi_ll2_buf_size);
skb               755 drivers/scsi/qedi/qedi_main.c 	memcpy(pkt, skb->data, len);
skb               792 drivers/scsi/qedi/qedi_main.c 		if (work->skb)
skb               793 drivers/scsi/qedi/qedi_main.c 			kfree_skb(work->skb);
skb               811 drivers/scsi/qedi/qedi_main.c 			qedi_ll2_process_skb(qedi, work->skb, work->vlan_id);
skb               812 drivers/scsi/qedi/qedi_main.c 			kfree_skb(work->skb);
skb                32 drivers/scsi/scsi_netlink.c scsi_nl_rcv_msg(struct sk_buff *skb)
skb                39 drivers/scsi/scsi_netlink.c 	while (skb->len >= NLMSG_HDRLEN) {
skb                42 drivers/scsi/scsi_netlink.c 		nlh = nlmsg_hdr(skb);
skb                44 drivers/scsi/scsi_netlink.c 		    (skb->len < nlh->nlmsg_len)) {
skb                51 drivers/scsi/scsi_netlink.c 		if (rlen > skb->len)
skb                52 drivers/scsi/scsi_netlink.c 			rlen = skb->len;
skb                66 drivers/scsi/scsi_netlink.c 		if (!netlink_capable(skb, CAP_SYS_ADMIN)) {
skb               100 drivers/scsi/scsi_netlink.c 			netlink_ack(skb, nlh, err, NULL);
skb               102 drivers/scsi/scsi_netlink.c 		skb_pull(skb, rlen);
skb               531 drivers/scsi/scsi_transport_fc.c 	struct sk_buff *skb;
skb               548 drivers/scsi/scsi_transport_fc.c 	skb = nlmsg_new(len, GFP_KERNEL);
skb               549 drivers/scsi/scsi_transport_fc.c 	if (!skb) {
skb               554 drivers/scsi/scsi_transport_fc.c 	nlh = nlmsg_put(skb, 0, 0, SCSI_TRANSPORT_MSG, len, 0);
skb               572 drivers/scsi/scsi_transport_fc.c 	nlmsg_multicast(scsi_nl_sock, skb, 0, SCSI_NL_GRP_FC_EVENTS,
skb               577 drivers/scsi/scsi_transport_fc.c 	kfree_skb(skb);
skb              2328 drivers/scsi/scsi_transport_iscsi.c iscsi_multicast_skb(struct sk_buff *skb, uint32_t group, gfp_t gfp)
skb              2330 drivers/scsi/scsi_transport_iscsi.c 	return nlmsg_multicast(nls, skb, 0, group, gfp);
skb              2334 drivers/scsi/scsi_transport_iscsi.c iscsi_unicast_skb(struct sk_buff *skb, u32 portid)
skb              2336 drivers/scsi/scsi_transport_iscsi.c 	return nlmsg_unicast(nls, skb, portid);
skb              2343 drivers/scsi/scsi_transport_iscsi.c 	struct sk_buff *skb;
skb              2354 drivers/scsi/scsi_transport_iscsi.c 	skb = alloc_skb(len, GFP_ATOMIC);
skb              2355 drivers/scsi/scsi_transport_iscsi.c 	if (!skb) {
skb              2362 drivers/scsi/scsi_transport_iscsi.c 	nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
skb              2373 drivers/scsi/scsi_transport_iscsi.c 	return iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_ATOMIC);
skb              2382 drivers/scsi/scsi_transport_iscsi.c 	struct sk_buff *skb;
skb              2386 drivers/scsi/scsi_transport_iscsi.c 	skb = alloc_skb(len, GFP_ATOMIC);
skb              2387 drivers/scsi/scsi_transport_iscsi.c 	if (!skb) {
skb              2392 drivers/scsi/scsi_transport_iscsi.c 	nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
skb              2408 drivers/scsi/scsi_transport_iscsi.c 	return iscsi_multicast_skb(skb, ISCSI_NL_GRP_UIP, GFP_ATOMIC);
skb              2415 drivers/scsi/scsi_transport_iscsi.c 	struct sk_buff	*skb;
skb              2424 drivers/scsi/scsi_transport_iscsi.c 	skb = alloc_skb(len, GFP_ATOMIC);
skb              2425 drivers/scsi/scsi_transport_iscsi.c 	if (!skb) {
skb              2431 drivers/scsi/scsi_transport_iscsi.c 	nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
skb              2439 drivers/scsi/scsi_transport_iscsi.c 	iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_ATOMIC);
skb              2450 drivers/scsi/scsi_transport_iscsi.c 	struct sk_buff  *skb;
skb              2459 drivers/scsi/scsi_transport_iscsi.c 	skb = alloc_skb(len, GFP_ATOMIC);
skb              2460 drivers/scsi/scsi_transport_iscsi.c 	if (!skb) {
skb              2466 drivers/scsi/scsi_transport_iscsi.c 	nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
skb              2473 drivers/scsi/scsi_transport_iscsi.c 	iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_ATOMIC);
skb              2485 drivers/scsi/scsi_transport_iscsi.c 	struct sk_buff *skb;
skb              2489 drivers/scsi/scsi_transport_iscsi.c 	skb = alloc_skb(len, GFP_NOIO);
skb              2490 drivers/scsi/scsi_transport_iscsi.c 	if (!skb) {
skb              2496 drivers/scsi/scsi_transport_iscsi.c 	nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
skb              2507 drivers/scsi/scsi_transport_iscsi.c 	iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_NOIO);
skb              2516 drivers/scsi/scsi_transport_iscsi.c 	struct sk_buff *skb;
skb              2520 drivers/scsi/scsi_transport_iscsi.c 	skb = alloc_skb(len, GFP_NOIO);
skb              2521 drivers/scsi/scsi_transport_iscsi.c 	if (!skb) {
skb              2526 drivers/scsi/scsi_transport_iscsi.c 	nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
skb              2536 drivers/scsi/scsi_transport_iscsi.c 	iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_NOIO);
skb              2543 drivers/scsi/scsi_transport_iscsi.c 	struct sk_buff	*skb;
skb              2547 drivers/scsi/scsi_transport_iscsi.c 	skb = alloc_skb(len, GFP_ATOMIC);
skb              2548 drivers/scsi/scsi_transport_iscsi.c 	if (!skb) {
skb              2553 drivers/scsi/scsi_transport_iscsi.c 	nlh = __nlmsg_put(skb, 0, 0, type, (len - sizeof(*nlh)), 0);
skb              2555 drivers/scsi/scsi_transport_iscsi.c 	return iscsi_unicast_skb(skb, portid);
skb              2634 drivers/scsi/scsi_transport_iscsi.c 	struct sk_buff  *skb;
skb              2643 drivers/scsi/scsi_transport_iscsi.c 	skb = alloc_skb(len, GFP_KERNEL);
skb              2644 drivers/scsi/scsi_transport_iscsi.c 	if (!skb) {
skb              2651 drivers/scsi/scsi_transport_iscsi.c 	nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
skb              2672 drivers/scsi/scsi_transport_iscsi.c 		kfree_skb(skb);
skb              2680 drivers/scsi/scsi_transport_iscsi.c 	rc = iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_KERNEL);
skb              3498 drivers/scsi/scsi_transport_iscsi.c iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
skb              3522 drivers/scsi/scsi_transport_iscsi.c 	portid = NETLINK_CB(skb).portid;
skb              3698 drivers/scsi/scsi_transport_iscsi.c iscsi_if_rx(struct sk_buff *skb)
skb              3700 drivers/scsi/scsi_transport_iscsi.c 	u32 portid = NETLINK_CB(skb).portid;
skb              3703 drivers/scsi/scsi_transport_iscsi.c 	while (skb->len >= NLMSG_HDRLEN) {
skb              3711 drivers/scsi/scsi_transport_iscsi.c 		nlh = nlmsg_hdr(skb);
skb              3713 drivers/scsi/scsi_transport_iscsi.c 		    skb->len < nlh->nlmsg_len) {
skb              3719 drivers/scsi/scsi_transport_iscsi.c 		if (rlen > skb->len)
skb              3720 drivers/scsi/scsi_transport_iscsi.c 			rlen = skb->len;
skb              3722 drivers/scsi/scsi_transport_iscsi.c 		err = iscsi_if_recv_msg(skb, nlh, &group);
skb              3745 drivers/scsi/scsi_transport_iscsi.c 		skb_pull(skb, rlen);
skb               519 drivers/staging/fsl-dpaa2/ethsw/ethsw.c static netdev_tx_t port_dropframe(struct sk_buff *skb,
skb               523 drivers/staging/fsl-dpaa2/ethsw/ethsw.c 	dev_kfree_skb_any(skb);
skb               554 drivers/staging/fsl-dpaa2/ethsw/ethsw.c 	struct sk_buff *skb;
skb               563 drivers/staging/fsl-dpaa2/ethsw/ethsw.c 	u32 portid = NETLINK_CB(dump->cb->skb).portid;
skb               571 drivers/staging/fsl-dpaa2/ethsw/ethsw.c 	nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH,
skb               585 drivers/staging/fsl-dpaa2/ethsw/ethsw.c 	if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, entry->mac_addr))
skb               588 drivers/staging/fsl-dpaa2/ethsw/ethsw.c 	nlmsg_end(dump->skb, nlh);
skb               595 drivers/staging/fsl-dpaa2/ethsw/ethsw.c 	nlmsg_cancel(dump->skb, nlh);
skb               613 drivers/staging/fsl-dpaa2/ethsw/ethsw.c static int port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
skb               624 drivers/staging/fsl-dpaa2/ethsw/ethsw.c 		.skb = skb,
skb                77 drivers/staging/gdm724x/gdm_lte.c static int gdm_lte_rx(struct sk_buff *skb, struct nic *nic, int nic_type)
skb                81 drivers/staging/gdm724x/gdm_lte.c 	ret = netif_rx_ni(skb);
skb                86 drivers/staging/gdm724x/gdm_lte.c 		nic->stats.rx_bytes += skb->len + ETH_HLEN;
skb               335 drivers/staging/gdm724x/gdm_lte.c static s32 gdm_lte_tx_nic_type(struct net_device *dev, struct sk_buff *skb)
skb               350 drivers/staging/gdm724x/gdm_lte.c 	eth = (struct ethhdr *)skb->data;
skb               352 drivers/staging/gdm724x/gdm_lte.c 		vlan_eth = (struct vlan_ethhdr *)skb->data;
skb               354 drivers/staging/gdm724x/gdm_lte.c 		network_data = skb->data + VLAN_ETH_HLEN;
skb               358 drivers/staging/gdm724x/gdm_lte.c 		network_data = skb->data + ETH_HLEN;
skb               401 drivers/staging/gdm724x/gdm_lte.c static netdev_tx_t gdm_lte_tx(struct sk_buff *skb, struct net_device *dev)
skb               410 drivers/staging/gdm724x/gdm_lte.c 	nic_type = gdm_lte_tx_nic_type(dev, skb);
skb               417 drivers/staging/gdm724x/gdm_lte.c 		if (gdm_lte_emulate_arp(skb, nic_type) == 0) {
skb               418 drivers/staging/gdm724x/gdm_lte.c 			dev_kfree_skb(skb);
skb               424 drivers/staging/gdm724x/gdm_lte.c 		if (gdm_lte_emulate_ndp(skb, nic_type) == 0) {
skb               425 drivers/staging/gdm724x/gdm_lte.c 			dev_kfree_skb(skb);
skb               438 drivers/staging/gdm724x/gdm_lte.c 		struct vlan_ethhdr *vlan_eth = (struct vlan_ethhdr *)skb->data;
skb               441 drivers/staging/gdm724x/gdm_lte.c 		data_buf = skb->data + (VLAN_ETH_HLEN - ETH_HLEN);
skb               442 drivers/staging/gdm724x/gdm_lte.c 		data_len = skb->len - (VLAN_ETH_HLEN - ETH_HLEN);
skb               445 drivers/staging/gdm724x/gdm_lte.c 		data_buf = skb->data;
skb               446 drivers/staging/gdm724x/gdm_lte.c 		data_len = skb->len;
skb               463 drivers/staging/gdm724x/gdm_lte.c 		dev_kfree_skb(skb);
skb               490 drivers/staging/gdm724x/gdm_lte.c 	dev_kfree_skb(skb);
skb               566 drivers/staging/gdm724x/gdm_lte.c 	struct sk_buff *skb;
skb               657 drivers/staging/gdm724x/gdm_lte.c 	skb = dev_alloc_skb(len + mac_header_len + NET_IP_ALIGN);
skb               658 drivers/staging/gdm724x/gdm_lte.c 	if (!skb)
skb               660 drivers/staging/gdm724x/gdm_lte.c 	skb_reserve(skb, NET_IP_ALIGN);
skb               662 drivers/staging/gdm724x/gdm_lte.c 	skb_put_data(skb, mac_header_data, mac_header_len);
skb               663 drivers/staging/gdm724x/gdm_lte.c 	skb_put_data(skb, buf, len);
skb               665 drivers/staging/gdm724x/gdm_lte.c 	skb->protocol = ((struct ethhdr *)mac_header_data)->h_proto;
skb               666 drivers/staging/gdm724x/gdm_lte.c 	skb->dev = dev;
skb               667 drivers/staging/gdm724x/gdm_lte.c 	skb_reset_mac_header(skb);
skb               668 drivers/staging/gdm724x/gdm_lte.c 	skb_pull(skb, ETH_HLEN);
skb               670 drivers/staging/gdm724x/gdm_lte.c 	gdm_lte_rx(skb, nic, nic_type);
skb                29 drivers/staging/gdm724x/netlink_k.c static void netlink_rcv_cb(struct sk_buff *skb)
skb                42 drivers/staging/gdm724x/netlink_k.c 	if (skb->len < NLMSG_HDRLEN) {
skb                47 drivers/staging/gdm724x/netlink_k.c 	nlh = (struct nlmsghdr *)skb->data;
skb                49 drivers/staging/gdm724x/netlink_k.c 	if (skb->len < nlh->nlmsg_len || nlh->nlmsg_len > ND_MAX_MSG_LEN) {
skb                51 drivers/staging/gdm724x/netlink_k.c 		       skb->len, nlh->nlmsg_len);
skb                68 drivers/staging/gdm724x/netlink_k.c static void netlink_rcv(struct sk_buff *skb)
skb                71 drivers/staging/gdm724x/netlink_k.c 	netlink_rcv_cb(skb);
skb                95 drivers/staging/gdm724x/netlink_k.c 	struct sk_buff *skb = NULL;
skb               105 drivers/staging/gdm724x/netlink_k.c 	skb = alloc_skb(NLMSG_SPACE(len), GFP_ATOMIC);
skb               106 drivers/staging/gdm724x/netlink_k.c 	if (!skb)
skb               111 drivers/staging/gdm724x/netlink_k.c 	nlh = nlmsg_put(skb, 0, seq, type, len, 0);
skb               113 drivers/staging/gdm724x/netlink_k.c 	NETLINK_CB(skb).portid = 0;
skb               114 drivers/staging/gdm724x/netlink_k.c 	NETLINK_CB(skb).dst_group = 0;
skb               116 drivers/staging/gdm724x/netlink_k.c 	ret = netlink_broadcast(sock, skb, 0, group + 1, GFP_ATOMIC);
skb               555 drivers/staging/isdn/avm/avmcard.h u16  b1_send_message(struct capi_ctr *ctrl, struct sk_buff *skb);
skb               578 drivers/staging/isdn/avm/avmcard.h u16  b1dma_send_message(struct capi_ctr *ctrl, struct sk_buff *skb);
skb               377 drivers/staging/isdn/avm/b1.c u16 b1_send_message(struct capi_ctr *ctrl, struct sk_buff *skb)
skb               383 drivers/staging/isdn/avm/b1.c 	u16 len = CAPIMSG_LEN(skb->data);
skb               384 drivers/staging/isdn/avm/b1.c 	u8 cmd = CAPIMSG_COMMAND(skb->data);
skb               385 drivers/staging/isdn/avm/b1.c 	u8 subcmd = CAPIMSG_SUBCOMMAND(skb->data);
skb               391 drivers/staging/isdn/avm/b1.c 					     CAPIMSG_APPID(skb->data),
skb               392 drivers/staging/isdn/avm/b1.c 					     CAPIMSG_NCCI(skb->data),
skb               393 drivers/staging/isdn/avm/b1.c 					     CAPIMSG_MSGID(skb->data));
skb               399 drivers/staging/isdn/avm/b1.c 		dlen = CAPIMSG_DATALEN(skb->data);
skb               402 drivers/staging/isdn/avm/b1.c 		b1_put_slice(port, skb->data, len);
skb               403 drivers/staging/isdn/avm/b1.c 		b1_put_slice(port, skb->data + len, dlen);
skb               406 drivers/staging/isdn/avm/b1.c 		b1_put_slice(port, skb->data, len);
skb               410 drivers/staging/isdn/avm/b1.c 	dev_kfree_skb_any(skb);
skb               496 drivers/staging/isdn/avm/b1.c 	struct sk_buff *skb;
skb               528 drivers/staging/isdn/avm/b1.c 		if (!(skb = alloc_skb(DataB3Len + MsgLen, GFP_ATOMIC))) {
skb               532 drivers/staging/isdn/avm/b1.c 			skb_put_data(skb, card->msgbuf, MsgLen);
skb               533 drivers/staging/isdn/avm/b1.c 			skb_put_data(skb, card->databuf, DataB3Len);
skb               534 drivers/staging/isdn/avm/b1.c 			capi_ctr_handle_message(ctrl, ApplId, skb);
skb               542 drivers/staging/isdn/avm/b1.c 		if (!(skb = alloc_skb(MsgLen, GFP_ATOMIC))) {
skb               547 drivers/staging/isdn/avm/b1.c 			skb_put_data(skb, card->msgbuf, MsgLen);
skb               548 drivers/staging/isdn/avm/b1.c 			if (CAPIMSG_CMD(skb->data) == CAPI_DATA_B3_CONF)
skb               550 drivers/staging/isdn/avm/b1.c 						     CAPIMSG_NCCI(skb->data),
skb               551 drivers/staging/isdn/avm/b1.c 						     CAPIMSG_MSGID(skb->data));
skb               553 drivers/staging/isdn/avm/b1.c 			capi_ctr_handle_message(ctrl, ApplId, skb);
skb               350 drivers/staging/isdn/avm/b1dma.c static void b1dma_queue_tx(avmcard *card, struct sk_buff *skb)
skb               356 drivers/staging/isdn/avm/b1dma.c 	skb_queue_tail(&card->dma->send_queue, skb);
skb               371 drivers/staging/isdn/avm/b1dma.c 	struct sk_buff *skb;
skb               377 drivers/staging/isdn/avm/b1dma.c 	skb = skb_dequeue(&dma->send_queue);
skb               379 drivers/staging/isdn/avm/b1dma.c 	len = CAPIMSG_LEN(skb->data);
skb               382 drivers/staging/isdn/avm/b1dma.c 		cmd = CAPIMSG_COMMAND(skb->data);
skb               383 drivers/staging/isdn/avm/b1dma.c 		subcmd = CAPIMSG_SUBCOMMAND(skb->data);
skb               388 drivers/staging/isdn/avm/b1dma.c 			u16 dlen = CAPIMSG_DATALEN(skb->data);
skb               390 drivers/staging/isdn/avm/b1dma.c 			_put_slice(&p, skb->data, len);
skb               391 drivers/staging/isdn/avm/b1dma.c 			_put_slice(&p, skb->data + len, dlen);
skb               394 drivers/staging/isdn/avm/b1dma.c 			_put_slice(&p, skb->data, len);
skb               401 drivers/staging/isdn/avm/b1dma.c 		txlen = skb->len - 2;
skb               403 drivers/staging/isdn/avm/b1dma.c 		if (skb->data[2] == SEND_POLLACK)
skb               408 drivers/staging/isdn/avm/b1dma.c 		       skb->data[2], txlen);
skb               410 drivers/staging/isdn/avm/b1dma.c 		skb_copy_from_linear_data_offset(skb, 2, dma->sendbuf.dmabuf,
skb               411 drivers/staging/isdn/avm/b1dma.c 						 skb->len - 2);
skb               420 drivers/staging/isdn/avm/b1dma.c 	dev_kfree_skb_any(skb);
skb               427 drivers/staging/isdn/avm/b1dma.c 	struct sk_buff *skb;
skb               430 drivers/staging/isdn/avm/b1dma.c 	skb = alloc_skb(3, GFP_ATOMIC);
skb               431 drivers/staging/isdn/avm/b1dma.c 	if (!skb) {
skb               436 drivers/staging/isdn/avm/b1dma.c 	p = skb->data;
skb               440 drivers/staging/isdn/avm/b1dma.c 	skb_put(skb, (u8 *)p - (u8 *)skb->data);
skb               442 drivers/staging/isdn/avm/b1dma.c 	b1dma_queue_tx(card, skb);
skb               452 drivers/staging/isdn/avm/b1dma.c 	struct sk_buff *skb;
skb               473 drivers/staging/isdn/avm/b1dma.c 		if (!(skb = alloc_skb(DataB3Len + MsgLen, GFP_ATOMIC))) {
skb               477 drivers/staging/isdn/avm/b1dma.c 			skb_put_data(skb, card->msgbuf, MsgLen);
skb               478 drivers/staging/isdn/avm/b1dma.c 			skb_put_data(skb, card->databuf, DataB3Len);
skb               479 drivers/staging/isdn/avm/b1dma.c 			capi_ctr_handle_message(ctrl, ApplId, skb);
skb               487 drivers/staging/isdn/avm/b1dma.c 		if (!(skb = alloc_skb(MsgLen, GFP_ATOMIC))) {
skb               491 drivers/staging/isdn/avm/b1dma.c 			skb_put_data(skb, card->msgbuf, MsgLen);
skb               492 drivers/staging/isdn/avm/b1dma.c 			if (CAPIMSG_CMD(skb->data) == CAPI_DATA_B3_CONF) {
skb               495 drivers/staging/isdn/avm/b1dma.c 						     CAPIMSG_NCCI(skb->data),
skb               496 drivers/staging/isdn/avm/b1dma.c 						     CAPIMSG_MSGID(skb->data));
skb               499 drivers/staging/isdn/avm/b1dma.c 			capi_ctr_handle_message(ctrl, ApplId, skb);
skb               683 drivers/staging/isdn/avm/b1dma.c 	struct sk_buff *skb;
skb               686 drivers/staging/isdn/avm/b1dma.c 	skb = alloc_skb(15, GFP_ATOMIC);
skb               687 drivers/staging/isdn/avm/b1dma.c 	if (!skb) {
skb               692 drivers/staging/isdn/avm/b1dma.c 	p = skb->data;
skb               699 drivers/staging/isdn/avm/b1dma.c 	skb_put(skb, (u8 *)p - (u8 *)skb->data);
skb               701 drivers/staging/isdn/avm/b1dma.c 	b1dma_queue_tx(card, skb);
skb               776 drivers/staging/isdn/avm/b1dma.c 	struct sk_buff *skb;
skb               785 drivers/staging/isdn/avm/b1dma.c 	skb = alloc_skb(23, GFP_ATOMIC);
skb               786 drivers/staging/isdn/avm/b1dma.c 	if (!skb) {
skb               791 drivers/staging/isdn/avm/b1dma.c 	p = skb->data;
skb               800 drivers/staging/isdn/avm/b1dma.c 	skb_put(skb, (u8 *)p - (u8 *)skb->data);
skb               802 drivers/staging/isdn/avm/b1dma.c 	b1dma_queue_tx(card, skb);
skb               811 drivers/staging/isdn/avm/b1dma.c 	struct sk_buff *skb;
skb               819 drivers/staging/isdn/avm/b1dma.c 	skb = alloc_skb(7, GFP_ATOMIC);
skb               820 drivers/staging/isdn/avm/b1dma.c 	if (!skb) {
skb               825 drivers/staging/isdn/avm/b1dma.c 	p = skb->data;
skb               831 drivers/staging/isdn/avm/b1dma.c 	skb_put(skb, (u8 *)p - (u8 *)skb->data);
skb               833 drivers/staging/isdn/avm/b1dma.c 	b1dma_queue_tx(card, skb);
skb               838 drivers/staging/isdn/avm/b1dma.c u16 b1dma_send_message(struct capi_ctr *ctrl, struct sk_buff *skb)
skb               844 drivers/staging/isdn/avm/b1dma.c 	if (CAPIMSG_CMD(skb->data) == CAPI_DATA_B3_REQ) {
skb               848 drivers/staging/isdn/avm/b1dma.c 					     CAPIMSG_APPID(skb->data),
skb               849 drivers/staging/isdn/avm/b1dma.c 					     CAPIMSG_NCCI(skb->data),
skb               850 drivers/staging/isdn/avm/b1dma.c 					     CAPIMSG_MSGID(skb->data));
skb               854 drivers/staging/isdn/avm/b1dma.c 		b1dma_queue_tx(card, skb);
skb               413 drivers/staging/isdn/avm/c4.c 	struct sk_buff *skb;
skb               424 drivers/staging/isdn/avm/c4.c 	skb = skb_dequeue(&dma->send_queue);
skb               425 drivers/staging/isdn/avm/c4.c 	if (!skb) {
skb               432 drivers/staging/isdn/avm/c4.c 	len = CAPIMSG_LEN(skb->data);
skb               435 drivers/staging/isdn/avm/c4.c 		cmd = CAPIMSG_COMMAND(skb->data);
skb               436 drivers/staging/isdn/avm/c4.c 		subcmd = CAPIMSG_SUBCOMMAND(skb->data);
skb               441 drivers/staging/isdn/avm/c4.c 			u16 dlen = CAPIMSG_DATALEN(skb->data);
skb               443 drivers/staging/isdn/avm/c4.c 			_put_slice(&p, skb->data, len);
skb               444 drivers/staging/isdn/avm/c4.c 			_put_slice(&p, skb->data + len, dlen);
skb               447 drivers/staging/isdn/avm/c4.c 			_put_slice(&p, skb->data, len);
skb               454 drivers/staging/isdn/avm/c4.c 		txlen = skb->len - 2;
skb               456 drivers/staging/isdn/avm/c4.c 		if (skb->data[2] == SEND_POLLACK)
skb               461 drivers/staging/isdn/avm/c4.c 		       card->name, skb->data[2], txlen);
skb               463 drivers/staging/isdn/avm/c4.c 		skb_copy_from_linear_data_offset(skb, 2, dma->sendbuf.dmabuf,
skb               464 drivers/staging/isdn/avm/c4.c 						 skb->len - 2);
skb               475 drivers/staging/isdn/avm/c4.c 	dev_kfree_skb_any(skb);
skb               482 drivers/staging/isdn/avm/c4.c 	struct sk_buff *skb;
skb               485 drivers/staging/isdn/avm/c4.c 	skb = alloc_skb(3, GFP_ATOMIC);
skb               486 drivers/staging/isdn/avm/c4.c 	if (!skb) {
skb               491 drivers/staging/isdn/avm/c4.c 	p = skb->data;
skb               495 drivers/staging/isdn/avm/c4.c 	skb_put(skb, (u8 *)p - (u8 *)skb->data);
skb               497 drivers/staging/isdn/avm/c4.c 	skb_queue_tail(&card->dma->send_queue, skb);
skb               508 drivers/staging/isdn/avm/c4.c 	struct sk_buff *skb;
skb               535 drivers/staging/isdn/avm/c4.c 		if (!(skb = alloc_skb(DataB3Len + MsgLen, GFP_ATOMIC))) {
skb               539 drivers/staging/isdn/avm/c4.c 			skb_put_data(skb, card->msgbuf, MsgLen);
skb               540 drivers/staging/isdn/avm/c4.c 			skb_put_data(skb, card->databuf, DataB3Len);
skb               541 drivers/staging/isdn/avm/c4.c 			capi_ctr_handle_message(ctrl, ApplId, skb);
skb               554 drivers/staging/isdn/avm/c4.c 		if (!(skb = alloc_skb(MsgLen, GFP_ATOMIC))) {
skb               558 drivers/staging/isdn/avm/c4.c 			skb_put_data(skb, card->msgbuf, MsgLen);
skb               559 drivers/staging/isdn/avm/c4.c 			if (CAPIMSG_CMD(skb->data) == CAPI_DATA_B3_CONF)
skb               561 drivers/staging/isdn/avm/c4.c 						     CAPIMSG_NCCI(skb->data),
skb               562 drivers/staging/isdn/avm/c4.c 						     CAPIMSG_MSGID(skb->data));
skb               564 drivers/staging/isdn/avm/c4.c 			capi_ctr_handle_message(ctrl, ApplId, skb);
skb               733 drivers/staging/isdn/avm/c4.c 	struct sk_buff *skb;
skb               737 drivers/staging/isdn/avm/c4.c 	skb = alloc_skb(15, GFP_ATOMIC);
skb               738 drivers/staging/isdn/avm/c4.c 	if (!skb) {
skb               743 drivers/staging/isdn/avm/c4.c 	p = skb->data;
skb               750 drivers/staging/isdn/avm/c4.c 	skb_put(skb, (u8 *)p - (u8 *)skb->data);
skb               752 drivers/staging/isdn/avm/c4.c 	skb_queue_tail(&card->dma->send_queue, skb);
skb               760 drivers/staging/isdn/avm/c4.c 	struct sk_buff *skb;
skb               764 drivers/staging/isdn/avm/c4.c 	skb = alloc_skb(3 + 4, GFP_ATOMIC);
skb               765 drivers/staging/isdn/avm/c4.c 	if (!skb) {
skb               770 drivers/staging/isdn/avm/c4.c 	p = skb->data;
skb               775 drivers/staging/isdn/avm/c4.c 	skb_put(skb, (u8 *)p - (u8 *)skb->data);
skb               777 drivers/staging/isdn/avm/c4.c 	skb_queue_tail(&card->dma->send_queue, skb);
skb               786 drivers/staging/isdn/avm/c4.c 	struct sk_buff *skb;
skb               790 drivers/staging/isdn/avm/c4.c 	skb = alloc_skb(3 + 4, GFP_ATOMIC);
skb               791 drivers/staging/isdn/avm/c4.c 	if (!skb) {
skb               796 drivers/staging/isdn/avm/c4.c 	p = skb->data;
skb               804 drivers/staging/isdn/avm/c4.c 	skb_put(skb, (u8 *)p - (u8 *)skb->data);
skb               806 drivers/staging/isdn/avm/c4.c 	skb_queue_tail(&card->dma->send_queue, skb);
skb               953 drivers/staging/isdn/avm/c4.c 	struct sk_buff *skb;
skb               965 drivers/staging/isdn/avm/c4.c 		skb = alloc_skb(23, GFP_ATOMIC);
skb               966 drivers/staging/isdn/avm/c4.c 		if (!skb) {
skb               971 drivers/staging/isdn/avm/c4.c 		p = skb->data;
skb               980 drivers/staging/isdn/avm/c4.c 		skb_put(skb, (u8 *)p - (u8 *)skb->data);
skb               982 drivers/staging/isdn/avm/c4.c 		skb_queue_tail(&card->dma->send_queue, skb);
skb               997 drivers/staging/isdn/avm/c4.c 	struct sk_buff *skb;
skb              1005 drivers/staging/isdn/avm/c4.c 		skb = alloc_skb(7, GFP_ATOMIC);
skb              1006 drivers/staging/isdn/avm/c4.c 		if (!skb) {
skb              1011 drivers/staging/isdn/avm/c4.c 		p = skb->data;
skb              1017 drivers/staging/isdn/avm/c4.c 		skb_put(skb, (u8 *)p - (u8 *)skb->data);
skb              1018 drivers/staging/isdn/avm/c4.c 		skb_queue_tail(&card->dma->send_queue, skb);
skb              1028 drivers/staging/isdn/avm/c4.c static u16 c4_send_message(struct capi_ctr *ctrl, struct sk_buff *skb)
skb              1036 drivers/staging/isdn/avm/c4.c 	if (CAPIMSG_CMD(skb->data) == CAPI_DATA_B3_REQ) {
skb              1038 drivers/staging/isdn/avm/c4.c 					     CAPIMSG_APPID(skb->data),
skb              1039 drivers/staging/isdn/avm/c4.c 					     CAPIMSG_NCCI(skb->data),
skb              1040 drivers/staging/isdn/avm/c4.c 					     CAPIMSG_MSGID(skb->data));
skb              1043 drivers/staging/isdn/avm/c4.c 		skb_queue_tail(&card->dma->send_queue, skb);
skb               141 drivers/staging/isdn/avm/t1isa.c 	struct sk_buff *skb;
skb               170 drivers/staging/isdn/avm/t1isa.c 			if (!(skb = alloc_skb(DataB3Len + MsgLen, GFP_ATOMIC))) {
skb               174 drivers/staging/isdn/avm/t1isa.c 				skb_put_data(skb, card->msgbuf, MsgLen);
skb               175 drivers/staging/isdn/avm/t1isa.c 				skb_put_data(skb, card->databuf, DataB3Len);
skb               176 drivers/staging/isdn/avm/t1isa.c 				capi_ctr_handle_message(ctrl, ApplId, skb);
skb               184 drivers/staging/isdn/avm/t1isa.c 			if (!(skb = alloc_skb(MsgLen, GFP_ATOMIC))) {
skb               189 drivers/staging/isdn/avm/t1isa.c 				skb_put_data(skb, card->msgbuf, MsgLen);
skb               190 drivers/staging/isdn/avm/t1isa.c 				if (CAPIMSG_CMD(skb->data) == CAPI_DATA_B3)
skb               192 drivers/staging/isdn/avm/t1isa.c 							     CAPIMSG_NCCI(skb->data),
skb               193 drivers/staging/isdn/avm/t1isa.c 							     CAPIMSG_MSGID(skb->data));
skb               195 drivers/staging/isdn/avm/t1isa.c 				capi_ctr_handle_message(ctrl, ApplId, skb);
skb               369 drivers/staging/isdn/avm/t1isa.c static u16 t1isa_send_message(struct capi_ctr *ctrl, struct sk_buff *skb);
skb               458 drivers/staging/isdn/avm/t1isa.c static u16 t1isa_send_message(struct capi_ctr *ctrl, struct sk_buff *skb)
skb               464 drivers/staging/isdn/avm/t1isa.c 	u16 len = CAPIMSG_LEN(skb->data);
skb               465 drivers/staging/isdn/avm/t1isa.c 	u8 cmd = CAPIMSG_COMMAND(skb->data);
skb               466 drivers/staging/isdn/avm/t1isa.c 	u8 subcmd = CAPIMSG_SUBCOMMAND(skb->data);
skb               472 drivers/staging/isdn/avm/t1isa.c 					     CAPIMSG_APPID(skb->data),
skb               473 drivers/staging/isdn/avm/t1isa.c 					     CAPIMSG_NCCI(skb->data),
skb               474 drivers/staging/isdn/avm/t1isa.c 					     CAPIMSG_MSGID(skb->data));
skb               479 drivers/staging/isdn/avm/t1isa.c 		dlen = CAPIMSG_DATALEN(skb->data);
skb               482 drivers/staging/isdn/avm/t1isa.c 		t1_put_slice(port, skb->data, len);
skb               483 drivers/staging/isdn/avm/t1isa.c 		t1_put_slice(port, skb->data + len, dlen);
skb               486 drivers/staging/isdn/avm/t1isa.c 		t1_put_slice(port, skb->data, len);
skb               489 drivers/staging/isdn/avm/t1isa.c 	dev_kfree_skb_any(skb);
skb               141 drivers/staging/isdn/gigaset/asyncdata.c 	struct sk_buff *skb = bcs->rx_skb;
skb               201 drivers/staging/isdn/gigaset/asyncdata.c 				if (!skb) {
skb               204 drivers/staging/isdn/gigaset/asyncdata.c 				} else if (skb->len < 2) {
skb               208 drivers/staging/isdn/gigaset/asyncdata.c 						 skb->len);
skb               210 drivers/staging/isdn/gigaset/asyncdata.c 					dev_kfree_skb_any(skb);
skb               215 drivers/staging/isdn/gigaset/asyncdata.c 						skb->len);
skb               217 drivers/staging/isdn/gigaset/asyncdata.c 					dev_kfree_skb_any(skb);
skb               220 drivers/staging/isdn/gigaset/asyncdata.c 					__skb_trim(skb, skb->len - 2);
skb               221 drivers/staging/isdn/gigaset/asyncdata.c 					gigaset_skb_rcvd(bcs, skb);
skb               226 drivers/staging/isdn/gigaset/asyncdata.c 				skb = gigaset_new_rx_skb(bcs);
skb               232 drivers/staging/isdn/gigaset/asyncdata.c 				if (!skb) {
skb               235 drivers/staging/isdn/gigaset/asyncdata.c 					skb = gigaset_new_rx_skb(bcs);
skb               257 drivers/staging/isdn/gigaset/asyncdata.c 		if (skb) {
skb               258 drivers/staging/isdn/gigaset/asyncdata.c 			if (skb->len >= bcs->rx_bufsize) {
skb               260 drivers/staging/isdn/gigaset/asyncdata.c 				dev_kfree_skb_any(skb);
skb               262 drivers/staging/isdn/gigaset/asyncdata.c 				bcs->rx_skb = skb = NULL;
skb               264 drivers/staging/isdn/gigaset/asyncdata.c 				__skb_put_u8(skb, c);
skb               287 drivers/staging/isdn/gigaset/asyncdata.c 	struct sk_buff *skb = bcs->rx_skb;
skb               292 drivers/staging/isdn/gigaset/asyncdata.c 	if (!skb) {
skb               298 drivers/staging/isdn/gigaset/asyncdata.c 	while (procbytes < numbytes && skb->len < bcs->rx_bufsize) {
skb               315 drivers/staging/isdn/gigaset/asyncdata.c 		__skb_put_u8(skb, bitrev8(c));
skb               320 drivers/staging/isdn/gigaset/asyncdata.c 		gigaset_skb_rcvd(bcs, skb);
skb               455 drivers/staging/isdn/gigaset/asyncdata.c static struct sk_buff *HDLC_Encode(struct sk_buff *skb)
skb               466 drivers/staging/isdn/gigaset/asyncdata.c 	cp = skb->data;
skb               467 drivers/staging/isdn/gigaset/asyncdata.c 	len = skb->len;
skb               479 drivers/staging/isdn/gigaset/asyncdata.c 	hdlc_skb = dev_alloc_skb(skb->len + stuf_cnt + 6 + skb->mac_len);
skb               481 drivers/staging/isdn/gigaset/asyncdata.c 		dev_kfree_skb_any(skb);
skb               487 drivers/staging/isdn/gigaset/asyncdata.c 	skb_reserve(hdlc_skb, skb->mac_len);
skb               488 drivers/staging/isdn/gigaset/asyncdata.c 	memcpy(skb_mac_header(hdlc_skb), skb_mac_header(skb), skb->mac_len);
skb               489 drivers/staging/isdn/gigaset/asyncdata.c 	hdlc_skb->mac_len = skb->mac_len;
skb               495 drivers/staging/isdn/gigaset/asyncdata.c 	while (skb->len--) {
skb               496 drivers/staging/isdn/gigaset/asyncdata.c 		if (muststuff(*skb->data)) {
skb               498 drivers/staging/isdn/gigaset/asyncdata.c 			skb_put_u8(hdlc_skb, (*skb->data++) ^ PPP_TRANS);
skb               500 drivers/staging/isdn/gigaset/asyncdata.c 			skb_put_u8(hdlc_skb, *skb->data++);
skb               520 drivers/staging/isdn/gigaset/asyncdata.c 	dev_kfree_skb_any(skb);
skb               533 drivers/staging/isdn/gigaset/asyncdata.c static struct sk_buff *iraw_encode(struct sk_buff *skb)
skb               543 drivers/staging/isdn/gigaset/asyncdata.c 	iraw_skb = dev_alloc_skb(2 * skb->len + skb->mac_len);
skb               545 drivers/staging/isdn/gigaset/asyncdata.c 		dev_kfree_skb_any(skb);
skb               551 drivers/staging/isdn/gigaset/asyncdata.c 	skb_reserve(iraw_skb, skb->mac_len);
skb               552 drivers/staging/isdn/gigaset/asyncdata.c 	memcpy(skb_mac_header(iraw_skb), skb_mac_header(skb), skb->mac_len);
skb               553 drivers/staging/isdn/gigaset/asyncdata.c 	iraw_skb->mac_len = skb->mac_len;
skb               556 drivers/staging/isdn/gigaset/asyncdata.c 	cp = skb->data;
skb               557 drivers/staging/isdn/gigaset/asyncdata.c 	len = skb->len;
skb               564 drivers/staging/isdn/gigaset/asyncdata.c 	dev_kfree_skb_any(skb);
skb               582 drivers/staging/isdn/gigaset/asyncdata.c int gigaset_m10x_send_skb(struct bc_state *bcs, struct sk_buff *skb)
skb               585 drivers/staging/isdn/gigaset/asyncdata.c 	unsigned len = skb->len;
skb               589 drivers/staging/isdn/gigaset/asyncdata.c 		skb = HDLC_Encode(skb);
skb               591 drivers/staging/isdn/gigaset/asyncdata.c 		skb = iraw_encode(skb);
skb               592 drivers/staging/isdn/gigaset/asyncdata.c 	if (!skb) {
skb               598 drivers/staging/isdn/gigaset/asyncdata.c 	skb_queue_tail(&bcs->squeue, skb);
skb              1168 drivers/staging/isdn/gigaset/bas-gigaset.c 	struct sk_buff *skb;
skb              1275 drivers/staging/isdn/gigaset/bas-gigaset.c 	while ((skb = skb_dequeue(&bcs->squeue))) {
skb              1277 drivers/staging/isdn/gigaset/bas-gigaset.c 		len = skb->len;
skb              1278 drivers/staging/isdn/gigaset/bas-gigaset.c 		if (gigaset_isoc_buildframe(bcs, skb->data, len) == -EAGAIN) {
skb              1280 drivers/staging/isdn/gigaset/bas-gigaset.c 			skb_queue_head(&bcs->squeue, skb);
skb              1285 drivers/staging/isdn/gigaset/bas-gigaset.c 		skb_pull(skb, len);
skb              1286 drivers/staging/isdn/gigaset/bas-gigaset.c 		gigaset_skb_sent(bcs, skb);
skb              1287 drivers/staging/isdn/gigaset/bas-gigaset.c 		dev_kfree_skb_any(skb);
skb               412 drivers/staging/isdn/gigaset/capi.c void gigaset_skb_rcvd(struct bc_state *bcs, struct sk_buff *skb)
skb               417 drivers/staging/isdn/gigaset/capi.c 	int len = skb->len;
skb               424 drivers/staging/isdn/gigaset/capi.c 		dev_kfree_skb_any(skb);
skb               431 drivers/staging/isdn/gigaset/capi.c 		dev_kfree_skb_any(skb);
skb               440 drivers/staging/isdn/gigaset/capi.c 	skb_push(skb, CAPI_DATA_B3_REQ_LEN);
skb               441 drivers/staging/isdn/gigaset/capi.c 	CAPIMSG_SETLEN(skb->data, CAPI_DATA_B3_REQ_LEN);
skb               442 drivers/staging/isdn/gigaset/capi.c 	CAPIMSG_SETAPPID(skb->data, ap->id);
skb               443 drivers/staging/isdn/gigaset/capi.c 	CAPIMSG_SETCOMMAND(skb->data, CAPI_DATA_B3);
skb               444 drivers/staging/isdn/gigaset/capi.c 	CAPIMSG_SETSUBCOMMAND(skb->data,  CAPI_IND);
skb               445 drivers/staging/isdn/gigaset/capi.c 	CAPIMSG_SETMSGID(skb->data, ap->nextMessageNumber++);
skb               446 drivers/staging/isdn/gigaset/capi.c 	CAPIMSG_SETCONTROLLER(skb->data, iif->ctr.cnr);
skb               447 drivers/staging/isdn/gigaset/capi.c 	CAPIMSG_SETPLCI_PART(skb->data, bcs->channel + 1);
skb               448 drivers/staging/isdn/gigaset/capi.c 	CAPIMSG_SETNCCI_PART(skb->data, 1);
skb               450 drivers/staging/isdn/gigaset/capi.c 	CAPIMSG_SETDATALEN(skb->data, len);
skb               452 drivers/staging/isdn/gigaset/capi.c 	CAPIMSG_SETFLAGS(skb->data, 0);
skb               456 drivers/staging/isdn/gigaset/capi.c 	dump_rawmsg(DEBUG_MCMD, __func__, skb->data);
skb               457 drivers/staging/isdn/gigaset/capi.c 	capi_ctr_handle_message(&iif->ctr, ap->id, skb);
skb               499 drivers/staging/isdn/gigaset/capi.c 	struct sk_buff *skb;
skb               641 drivers/staging/isdn/gigaset/capi.c 			skb = alloc_skb(msgsize, GFP_ATOMIC);
skb               642 drivers/staging/isdn/gigaset/capi.c 			if (!skb) {
skb               648 drivers/staging/isdn/gigaset/capi.c 					      __skb_put(skb, msgsize))) {
skb               651 drivers/staging/isdn/gigaset/capi.c 				dev_kfree_skb_any(skb);
skb               665 drivers/staging/isdn/gigaset/capi.c 			capi_ctr_handle_message(&iif->ctr, ap->id, skb);
skb               685 drivers/staging/isdn/gigaset/capi.c 	struct sk_buff *skb;
skb               694 drivers/staging/isdn/gigaset/capi.c 	skb = alloc_skb(CAPI_DISCONNECT_IND_LEN, GFP_ATOMIC);
skb               695 drivers/staging/isdn/gigaset/capi.c 	if (!skb) {
skb               700 drivers/staging/isdn/gigaset/capi.c 			      __skb_put(skb, CAPI_DISCONNECT_IND_LEN))) {
skb               702 drivers/staging/isdn/gigaset/capi.c 		dev_kfree_skb_any(skb);
skb               706 drivers/staging/isdn/gigaset/capi.c 	capi_ctr_handle_message(&iif->ctr, ap->id, skb);
skb               719 drivers/staging/isdn/gigaset/capi.c 	struct sk_buff *skb;
skb               729 drivers/staging/isdn/gigaset/capi.c 	skb = alloc_skb(CAPI_DISCONNECT_B3_IND_BASELEN, GFP_ATOMIC);
skb               730 drivers/staging/isdn/gigaset/capi.c 	if (!skb) {
skb               735 drivers/staging/isdn/gigaset/capi.c 			  __skb_put(skb, CAPI_DISCONNECT_B3_IND_BASELEN))) {
skb               737 drivers/staging/isdn/gigaset/capi.c 		dev_kfree_skb_any(skb);
skb               741 drivers/staging/isdn/gigaset/capi.c 	capi_ctr_handle_message(&iif->ctr, ap->id, skb);
skb               756 drivers/staging/isdn/gigaset/capi.c 	struct sk_buff *skb;
skb               799 drivers/staging/isdn/gigaset/capi.c 	skb = alloc_skb(msgsize, GFP_ATOMIC);
skb               800 drivers/staging/isdn/gigaset/capi.c 	if (!skb) {
skb               804 drivers/staging/isdn/gigaset/capi.c 	if (capi_cmsg2message(&iif->hcmsg, __skb_put(skb, msgsize))) {
skb               806 drivers/staging/isdn/gigaset/capi.c 		dev_kfree_skb_any(skb);
skb               810 drivers/staging/isdn/gigaset/capi.c 	capi_ctr_handle_message(&iif->ctr, ap->id, skb);
skb               855 drivers/staging/isdn/gigaset/capi.c 	struct sk_buff *skb;
skb               903 drivers/staging/isdn/gigaset/capi.c 	skb = alloc_skb(msgsize, GFP_ATOMIC);
skb               904 drivers/staging/isdn/gigaset/capi.c 	if (!skb) {
skb               908 drivers/staging/isdn/gigaset/capi.c 	if (capi_cmsg2message(&iif->hcmsg, __skb_put(skb, msgsize))) {
skb               910 drivers/staging/isdn/gigaset/capi.c 		dev_kfree_skb_any(skb);
skb               914 drivers/staging/isdn/gigaset/capi.c 	capi_ctr_handle_message(&iif->ctr, ap->id, skb);
skb              1116 drivers/staging/isdn/gigaset/capi.c 		      struct sk_buff *skb,
skb              1127 drivers/staging/isdn/gigaset/capi.c 	if (capi_cmsg2message(&iif->acmsg, skb->data)) {
skb              1129 drivers/staging/isdn/gigaset/capi.c 		dev_kfree_skb_any(skb);
skb              1132 drivers/staging/isdn/gigaset/capi.c 	__skb_trim(skb, CAPI_STDCONF_LEN);
skb              1134 drivers/staging/isdn/gigaset/capi.c 	capi_ctr_handle_message(&iif->ctr, ap->id, skb);
skb              1142 drivers/staging/isdn/gigaset/capi.c 			    struct sk_buff *skb)
skb              1153 drivers/staging/isdn/gigaset/capi.c 	if (capi_message2cmsg(cmsg, skb->data)) {
skb              1155 drivers/staging/isdn/gigaset/capi.c 		dev_kfree_skb_any(skb);
skb              1185 drivers/staging/isdn/gigaset/capi.c 			send_conf(iif, ap, skb, CapiIllMessageParmCoding);
skb              1203 drivers/staging/isdn/gigaset/capi.c 				send_conf(iif, ap, skb,
skb              1256 drivers/staging/isdn/gigaset/capi.c 	dev_kfree_skb_any(skb);
skb              1282 drivers/staging/isdn/gigaset/capi.c 			  struct sk_buff *skb)
skb              1287 drivers/staging/isdn/gigaset/capi.c 	if (capi_message2cmsg(&iif->acmsg, skb->data)) {
skb              1289 drivers/staging/isdn/gigaset/capi.c 		dev_kfree_skb_any(skb);
skb              1297 drivers/staging/isdn/gigaset/capi.c 	send_conf(iif, ap, skb, CapiSuccess);
skb              1306 drivers/staging/isdn/gigaset/capi.c 			 struct sk_buff *skb)
skb              1311 drivers/staging/isdn/gigaset/capi.c 	if (capi_message2cmsg(&iif->acmsg, skb->data)) {
skb              1313 drivers/staging/isdn/gigaset/capi.c 		dev_kfree_skb_any(skb);
skb              1317 drivers/staging/isdn/gigaset/capi.c 	send_conf(iif, ap, skb, CapiAlertAlreadySent);
skb              1327 drivers/staging/isdn/gigaset/capi.c 			   struct sk_buff *skb)
skb              1340 drivers/staging/isdn/gigaset/capi.c 	if (capi_message2cmsg(cmsg, skb->data)) {
skb              1342 drivers/staging/isdn/gigaset/capi.c 		dev_kfree_skb_any(skb);
skb              1352 drivers/staging/isdn/gigaset/capi.c 		send_conf(iif, ap, skb, CapiNoPlciAvailable);
skb              1600 drivers/staging/isdn/gigaset/capi.c 	send_conf(iif, ap, skb, CapiSuccess);
skb              1612 drivers/staging/isdn/gigaset/capi.c 	send_conf(iif, ap, skb, info);
skb              1621 drivers/staging/isdn/gigaset/capi.c 			    struct sk_buff *skb)
skb              1631 drivers/staging/isdn/gigaset/capi.c 	if (capi_message2cmsg(cmsg, skb->data)) {
skb              1633 drivers/staging/isdn/gigaset/capi.c 		dev_kfree_skb_any(skb);
skb              1637 drivers/staging/isdn/gigaset/capi.c 	dev_kfree_skb_any(skb);
skb              1793 drivers/staging/isdn/gigaset/capi.c 			      struct sk_buff *skb)
skb              1801 drivers/staging/isdn/gigaset/capi.c 	if (capi_message2cmsg(cmsg, skb->data)) {
skb              1803 drivers/staging/isdn/gigaset/capi.c 		dev_kfree_skb_any(skb);
skb              1813 drivers/staging/isdn/gigaset/capi.c 		send_conf(iif, ap, skb, CapiIllContrPlciNcci);
skb              1826 drivers/staging/isdn/gigaset/capi.c 	send_conf(iif, ap, skb,
skb              1840 drivers/staging/isdn/gigaset/capi.c 			       struct sk_buff *skb)
skb              1850 drivers/staging/isdn/gigaset/capi.c 	if (capi_message2cmsg(cmsg, skb->data)) {
skb              1852 drivers/staging/isdn/gigaset/capi.c 		dev_kfree_skb_any(skb);
skb              1863 drivers/staging/isdn/gigaset/capi.c 		dev_kfree_skb_any(skb);
skb              1875 drivers/staging/isdn/gigaset/capi.c 			dev_kfree_skb_any(skb);
skb              1893 drivers/staging/isdn/gigaset/capi.c 	__skb_trim(skb, msgsize);
skb              1894 drivers/staging/isdn/gigaset/capi.c 	if (capi_cmsg2message(cmsg, skb->data)) {
skb              1896 drivers/staging/isdn/gigaset/capi.c 		dev_kfree_skb_any(skb);
skb              1900 drivers/staging/isdn/gigaset/capi.c 	capi_ctr_handle_message(&iif->ctr, ap->id, skb);
skb              1910 drivers/staging/isdn/gigaset/capi.c 			      struct sk_buff *skb)
skb              1920 drivers/staging/isdn/gigaset/capi.c 	if (capi_message2cmsg(cmsg, skb->data)) {
skb              1922 drivers/staging/isdn/gigaset/capi.c 		dev_kfree_skb_any(skb);
skb              1932 drivers/staging/isdn/gigaset/capi.c 		send_conf(iif, ap, skb, CapiIllContrPlciNcci);
skb              1966 drivers/staging/isdn/gigaset/capi.c 			send_conf(iif, ap, skb, CAPI_MSGOSRESOURCEERR);
skb              1976 drivers/staging/isdn/gigaset/capi.c 			send_conf(iif, ap, skb, CAPI_MSGOSRESOURCEERR);
skb              1995 drivers/staging/isdn/gigaset/capi.c 		send_conf(iif, ap, skb, CAPI_MSGOSRESOURCEERR);
skb              2001 drivers/staging/isdn/gigaset/capi.c 	send_conf(iif, ap, skb, CapiSuccess);
skb              2010 drivers/staging/isdn/gigaset/capi.c 				 struct sk_buff *skb)
skb              2018 drivers/staging/isdn/gigaset/capi.c 	if (capi_message2cmsg(cmsg, skb->data)) {
skb              2020 drivers/staging/isdn/gigaset/capi.c 		dev_kfree_skb_any(skb);
skb              2031 drivers/staging/isdn/gigaset/capi.c 		send_conf(iif, ap, skb, CapiIllContrPlciNcci);
skb              2038 drivers/staging/isdn/gigaset/capi.c 		send_conf(iif, ap, skb,
skb              2045 drivers/staging/isdn/gigaset/capi.c 		send_conf(iif, ap, skb, CAPI_MSGOSRESOURCEERR);
skb              2053 drivers/staging/isdn/gigaset/capi.c 	send_conf(iif, ap, skb,
skb              2063 drivers/staging/isdn/gigaset/capi.c 			   struct sk_buff *skb)
skb              2067 drivers/staging/isdn/gigaset/capi.c 	int channel = CAPIMSG_PLCI_PART(skb->data);
skb              2068 drivers/staging/isdn/gigaset/capi.c 	u16 ncci = CAPIMSG_NCCI_PART(skb->data);
skb              2069 drivers/staging/isdn/gigaset/capi.c 	u16 msglen = CAPIMSG_LEN(skb->data);
skb              2070 drivers/staging/isdn/gigaset/capi.c 	u16 datalen = CAPIMSG_DATALEN(skb->data);
skb              2071 drivers/staging/isdn/gigaset/capi.c 	u16 flags = CAPIMSG_FLAGS(skb->data);
skb              2072 drivers/staging/isdn/gigaset/capi.c 	u16 msgid = CAPIMSG_MSGID(skb->data);
skb              2073 drivers/staging/isdn/gigaset/capi.c 	u16 handle = CAPIMSG_HANDLE_REQ(skb->data);
skb              2076 drivers/staging/isdn/gigaset/capi.c 	dump_rawmsg(DEBUG_MCMD, __func__, skb->data);
skb              2081 drivers/staging/isdn/gigaset/capi.c 			   "DATA_B3_REQ", "NCCI", CAPIMSG_NCCI(skb->data));
skb              2082 drivers/staging/isdn/gigaset/capi.c 		send_conf(iif, ap, skb, CapiIllContrPlciNcci);
skb              2089 drivers/staging/isdn/gigaset/capi.c 	if (msglen + datalen != skb->len)
skb              2091 drivers/staging/isdn/gigaset/capi.c 			   "DATA_B3_REQ", msglen, datalen, skb->len);
skb              2092 drivers/staging/isdn/gigaset/capi.c 	if (msglen + datalen > skb->len) {
skb              2094 drivers/staging/isdn/gigaset/capi.c 		send_conf(iif, ap, skb, CapiIllMessageParmCoding); /* ? */
skb              2100 drivers/staging/isdn/gigaset/capi.c 		send_conf(iif, ap, skb, CapiIllMessageParmCoding);
skb              2106 drivers/staging/isdn/gigaset/capi.c 		send_conf(iif, ap, skb, CapiMessageNotSupportedInCurrentState);
skb              2111 drivers/staging/isdn/gigaset/capi.c 	skb_reset_mac_header(skb);
skb              2112 drivers/staging/isdn/gigaset/capi.c 	skb->mac_len = msglen;
skb              2113 drivers/staging/isdn/gigaset/capi.c 	skb_pull(skb, msglen);
skb              2116 drivers/staging/isdn/gigaset/capi.c 	if (cs->ops->send_skb(bcs, skb) < 0) {
skb              2117 drivers/staging/isdn/gigaset/capi.c 		send_conf(iif, ap, skb, CAPI_MSGOSRESOURCEERR);
skb              2137 drivers/staging/isdn/gigaset/capi.c 			    struct sk_buff *skb)
skb              2142 drivers/staging/isdn/gigaset/capi.c 	if (capi_message2cmsg(&iif->acmsg, skb->data)) {
skb              2144 drivers/staging/isdn/gigaset/capi.c 		dev_kfree_skb_any(skb);
skb              2148 drivers/staging/isdn/gigaset/capi.c 	send_conf(iif, ap, skb,
skb              2157 drivers/staging/isdn/gigaset/capi.c 			   struct sk_buff *skb)
skb              2162 drivers/staging/isdn/gigaset/capi.c 	if (capi_message2cmsg(&iif->acmsg, skb->data)) {
skb              2164 drivers/staging/isdn/gigaset/capi.c 		dev_kfree_skb_any(skb);
skb              2168 drivers/staging/isdn/gigaset/capi.c 	send_conf(iif, ap, skb, CapiMessageNotSupportedInCurrentState);
skb              2176 drivers/staging/isdn/gigaset/capi.c 		       struct sk_buff *skb)
skb              2181 drivers/staging/isdn/gigaset/capi.c 	if (capi_message2cmsg(&iif->acmsg, skb->data)) {
skb              2183 drivers/staging/isdn/gigaset/capi.c 		dev_kfree_skb_any(skb);
skb              2187 drivers/staging/isdn/gigaset/capi.c 	dev_kfree_skb_any(skb);
skb              2192 drivers/staging/isdn/gigaset/capi.c 			    struct sk_buff *skb)
skb              2194 drivers/staging/isdn/gigaset/capi.c 	dump_rawmsg(DEBUG_MCMD, __func__, skb->data);
skb              2195 drivers/staging/isdn/gigaset/capi.c 	dev_kfree_skb_any(skb);
skb              2265 drivers/staging/isdn/gigaset/capi.c static u16 gigaset_send_message(struct capi_ctr *ctr, struct sk_buff *skb)
skb              2274 drivers/staging/isdn/gigaset/capi.c 	if (skb_linearize(skb) < 0) {
skb              2280 drivers/staging/isdn/gigaset/capi.c 	ap = get_appl(iif, CAPIMSG_APPID(skb->data));
skb              2283 drivers/staging/isdn/gigaset/capi.c 			   __func__, CAPIMSG_APPID(skb->data));
skb              2288 drivers/staging/isdn/gigaset/capi.c 	handler = lookup_capi_send_handler(CAPIMSG_CMD(skb->data));
skb              2293 drivers/staging/isdn/gigaset/capi.c 				   __func__, CAPIMSG_CMD(skb->data));
skb              2300 drivers/staging/isdn/gigaset/capi.c 		skb_queue_tail(&iif->sendqueue, skb);
skb              2305 drivers/staging/isdn/gigaset/capi.c 	handler(iif, ap, skb);
skb              2309 drivers/staging/isdn/gigaset/capi.c 		skb = skb_dequeue(&iif->sendqueue);
skb              2310 drivers/staging/isdn/gigaset/capi.c 		if (!skb) {
skb              2315 drivers/staging/isdn/gigaset/capi.c 		ap = get_appl(iif, CAPIMSG_APPID(skb->data));
skb              2319 drivers/staging/isdn/gigaset/capi.c 				 __func__, CAPIMSG_APPID(skb->data));
skb              2322 drivers/staging/isdn/gigaset/capi.c 		handler = lookup_capi_send_handler(CAPIMSG_CMD(skb->data));
skb              2326 drivers/staging/isdn/gigaset/capi.c 				__func__, CAPIMSG_CMD(skb->data));
skb              2329 drivers/staging/isdn/gigaset/capi.c 		handler(iif, ap, skb);
skb               784 drivers/staging/isdn/gigaset/common.c 	struct sk_buff *skb;
skb               788 drivers/staging/isdn/gigaset/common.c 	while ((skb = skb_dequeue(&bcs->squeue)) != NULL)
skb               789 drivers/staging/isdn/gigaset/common.c 		dev_kfree_skb(skb);
skb                14 drivers/staging/isdn/gigaset/dummyll.c void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *skb)
skb                19 drivers/staging/isdn/gigaset/dummyll.c void gigaset_skb_rcvd(struct bc_state *bcs, struct sk_buff *skb)
skb               607 drivers/staging/isdn/gigaset/gigaset.h 	int (*send_skb)(struct bc_state *bcs, struct sk_buff *skb);
skb               630 drivers/staging/isdn/gigaset/gigaset.h int gigaset_m10x_send_skb(struct bc_state *bcs, struct sk_buff *skb);
skb               641 drivers/staging/isdn/gigaset/gigaset.h int gigaset_isoc_send_skb(struct bc_state *bcs, struct sk_buff *skb);
skb               673 drivers/staging/isdn/gigaset/gigaset.h void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *skb);
skb               674 drivers/staging/isdn/gigaset/gigaset.h void gigaset_skb_rcvd(struct bc_state *bcs, struct sk_buff *skb);
skb               836 drivers/staging/isdn/gigaset/isocdata.c 	struct sk_buff *skb;
skb               844 drivers/staging/isdn/gigaset/isocdata.c 	skb = bcs->rx_skb;
skb               845 drivers/staging/isdn/gigaset/isocdata.c 	if (skb == NULL) {
skb               846 drivers/staging/isdn/gigaset/isocdata.c 		skb = gigaset_new_rx_skb(bcs);
skb               847 drivers/staging/isdn/gigaset/isocdata.c 		if (skb == NULL)
skb               850 drivers/staging/isdn/gigaset/isocdata.c 	dobytes = bcs->rx_bufsize - skb->len;
skb               852 drivers/staging/isdn/gigaset/isocdata.c 		dst = skb_put(skb, count < dobytes ? count : dobytes);
skb               860 drivers/staging/isdn/gigaset/isocdata.c 				   "rcv data", skb->data, skb->len);
skb               861 drivers/staging/isdn/gigaset/isocdata.c 			bcs->hw.bas->goodbytes += skb->len;
skb               862 drivers/staging/isdn/gigaset/isocdata.c 			gigaset_skb_rcvd(bcs, skb);
skb               863 drivers/staging/isdn/gigaset/isocdata.c 			skb = gigaset_new_rx_skb(bcs);
skb               864 drivers/staging/isdn/gigaset/isocdata.c 			if (skb == NULL)
skb               986 drivers/staging/isdn/gigaset/isocdata.c int gigaset_isoc_send_skb(struct bc_state *bcs, struct sk_buff *skb)
skb               988 drivers/staging/isdn/gigaset/isocdata.c 	int len = skb->len;
skb               997 drivers/staging/isdn/gigaset/isocdata.c 	skb_queue_tail(&bcs->squeue, skb);
skb                64 drivers/staging/isdn/gigaset/ser-gigaset.c 	struct sk_buff *skb = bcs->tx_skb;
skb                67 drivers/staging/isdn/gigaset/ser-gigaset.c 	WARN_ON(!tty || !tty->ops || !skb);
skb                69 drivers/staging/isdn/gigaset/ser-gigaset.c 	if (!skb->len) {
skb                70 drivers/staging/isdn/gigaset/ser-gigaset.c 		dev_kfree_skb_any(skb);
skb                77 drivers/staging/isdn/gigaset/ser-gigaset.c 		sent = tty->ops->write(tty, skb->data, skb->len);
skb                84 drivers/staging/isdn/gigaset/ser-gigaset.c 	skb_pull(skb, sent);
skb                85 drivers/staging/isdn/gigaset/ser-gigaset.c 	if (!skb->len) {
skb                87 drivers/staging/isdn/gigaset/ser-gigaset.c 		gigaset_skb_sent(bcs, skb);
skb                90 drivers/staging/isdn/gigaset/ser-gigaset.c 			(unsigned long) skb);
skb                91 drivers/staging/isdn/gigaset/ser-gigaset.c 		dev_kfree_skb_any(skb);
skb               202 drivers/staging/isdn/gigaset/ser-gigaset.c 	struct sk_buff *skb;
skb               221 drivers/staging/isdn/gigaset/ser-gigaset.c 	while ((skb = skb_dequeue(&cs->bcs->squeue)) != NULL)
skb               222 drivers/staging/isdn/gigaset/ser-gigaset.c 		dev_kfree_skb_any(skb);
skb                47 drivers/staging/isdn/hysdn/hycapi.c static u16 hycapi_send_message(struct capi_ctr *ctrl, struct sk_buff *skb);
skb               117 drivers/staging/isdn/hysdn/hycapi.c hycapi_sendmsg_internal(struct capi_ctr *ctrl, struct sk_buff *skb)
skb               126 drivers/staging/isdn/hysdn/hycapi.c 	cinfo->skbs[cinfo->in_idx++] = skb;	/* add to buffer list */
skb               136 drivers/staging/isdn/hysdn/hycapi.c 	cinfo->tx_skb = skb;
skb               157 drivers/staging/isdn/hysdn/hycapi.c 	struct sk_buff *skb;
skb               169 drivers/staging/isdn/hysdn/hycapi.c 	if (!(skb = alloc_skb(len, GFP_ATOMIC))) {
skb               174 drivers/staging/isdn/hysdn/hycapi.c 	skb_put_data(skb, &len, sizeof(__u16));
skb               175 drivers/staging/isdn/hysdn/hycapi.c 	skb_put_data(skb, &appl, sizeof(__u16));
skb               176 drivers/staging/isdn/hysdn/hycapi.c 	skb_put_data(skb, &_command, sizeof(__u8));
skb               177 drivers/staging/isdn/hysdn/hycapi.c 	skb_put_data(skb, &_subcommand, sizeof(__u8));
skb               178 drivers/staging/isdn/hysdn/hycapi.c 	skb_put_data(skb, &MessageNumber, sizeof(__u16));
skb               179 drivers/staging/isdn/hysdn/hycapi.c 	skb_put_data(skb, &MessageBufferSize, sizeof(__u16));
skb               180 drivers/staging/isdn/hysdn/hycapi.c 	skb_put_data(skb, &(rp->level3cnt), sizeof(__u16));
skb               181 drivers/staging/isdn/hysdn/hycapi.c 	skb_put_data(skb, &(rp->datablkcnt), sizeof(__u16));
skb               182 drivers/staging/isdn/hysdn/hycapi.c 	skb_put_data(skb, &(rp->datablklen), sizeof(__u16));
skb               183 drivers/staging/isdn/hysdn/hycapi.c 	skb_put_data(skb, ExtFeatureDefaults, slen);
skb               185 drivers/staging/isdn/hysdn/hycapi.c 	hycapi_send_message(ctrl, skb);
skb               199 drivers/staging/isdn/hysdn/hycapi.c 	struct sk_buff *skb;
skb               208 drivers/staging/isdn/hysdn/hycapi.c 				skb = skb_copy(hycapi_applications[i].listen_req[ctrl->cnr - 1], GFP_ATOMIC);
skb               209 drivers/staging/isdn/hysdn/hycapi.c 				hycapi_sendmsg_internal(ctrl, skb);
skb               266 drivers/staging/isdn/hysdn/hycapi.c 	struct sk_buff *skb;
skb               277 drivers/staging/isdn/hysdn/hycapi.c 	if (!(skb = alloc_skb(len, GFP_ATOMIC))) {
skb               282 drivers/staging/isdn/hysdn/hycapi.c 	skb_put_data(skb, &len, sizeof(__u16));
skb               283 drivers/staging/isdn/hysdn/hycapi.c 	skb_put_data(skb, &appl, sizeof(__u16));
skb               284 drivers/staging/isdn/hysdn/hycapi.c 	skb_put_data(skb, &_command, sizeof(__u8));
skb               285 drivers/staging/isdn/hysdn/hycapi.c 	skb_put_data(skb, &_subcommand, sizeof(__u8));
skb               286 drivers/staging/isdn/hysdn/hycapi.c 	skb_put_data(skb, &MessageNumber, sizeof(__u16));
skb               287 drivers/staging/isdn/hysdn/hycapi.c 	hycapi_send_message(ctrl, skb);
skb               371 drivers/staging/isdn/hysdn/hycapi.c static u16 hycapi_send_message(struct capi_ctr *ctrl, struct sk_buff *skb)
skb               379 drivers/staging/isdn/hysdn/hycapi.c 	appl_id = CAPIMSG_APPID(skb->data);
skb               395 drivers/staging/isdn/hysdn/hycapi.c 	switch (CAPIMSG_CMD(skb->data)) {
skb               398 drivers/staging/isdn/hysdn/hycapi.c 				  CAPIMSG_NCCI(skb->data));
skb               401 drivers/staging/isdn/hysdn/hycapi.c 		_len = CAPIMSG_LEN(skb->data);
skb               404 drivers/staging/isdn/hysdn/hycapi.c 			skb_copy_from_linear_data(skb, msghead, 22);
skb               405 drivers/staging/isdn/hysdn/hycapi.c 			skb_copy_to_linear_data_offset(skb, _len2,
skb               407 drivers/staging/isdn/hysdn/hycapi.c 			skb_pull(skb, _len2);
skb               408 drivers/staging/isdn/hysdn/hycapi.c 			CAPIMSG_SETLEN(skb->data, 22);
skb               410 drivers/staging/isdn/hysdn/hycapi.c 						     CAPIMSG_APPID(skb->data),
skb               411 drivers/staging/isdn/hysdn/hycapi.c 						     CAPIMSG_NCCI(skb->data),
skb               412 drivers/staging/isdn/hysdn/hycapi.c 						     CAPIMSG_MSGID(skb->data));
skb               421 drivers/staging/isdn/hysdn/hycapi.c 		if (!(hycapi_applications[appl_id  -1].listen_req[ctrl->cnr - 1] = skb_copy(skb, GFP_ATOMIC)))
skb               431 drivers/staging/isdn/hysdn/hycapi.c 		hycapi_sendmsg_internal(ctrl, skb);
skb               433 drivers/staging/isdn/hysdn/hycapi.c 		dev_kfree_skb_any(skb);
skb               517 drivers/staging/isdn/hysdn/hycapi.c 	struct sk_buff *skb;
skb               542 drivers/staging/isdn/hysdn/hycapi.c 		if (!(skb = alloc_skb(len2, GFP_ATOMIC))) {
skb               547 drivers/staging/isdn/hysdn/hycapi.c 		skb_put_data(skb, buf, MsgLen);
skb               548 drivers/staging/isdn/hysdn/hycapi.c 		skb_put_data(skb, CP64, 2 * sizeof(__u32));
skb               549 drivers/staging/isdn/hysdn/hycapi.c 		skb_put_data(skb, buf + MsgLen, len - MsgLen);
skb               550 drivers/staging/isdn/hysdn/hycapi.c 		CAPIMSG_SETLEN(skb->data, 30);
skb               552 drivers/staging/isdn/hysdn/hycapi.c 		if (!(skb = alloc_skb(len, GFP_ATOMIC))) {
skb               557 drivers/staging/isdn/hysdn/hycapi.c 		skb_put_data(skb, buf, len);
skb               559 drivers/staging/isdn/hysdn/hycapi.c 	switch (CAPIMSG_CMD(skb->data))
skb               563 drivers/staging/isdn/hysdn/hycapi.c 		info = CAPIMSG_U16(skb->data, 12);
skb               567 drivers/staging/isdn/hysdn/hycapi.c 			capilib_new_ncci(&cinfo->ncci_head, ApplId, CAPIMSG_NCCI(skb->data),
skb               597 drivers/staging/isdn/hysdn/hycapi.c 				 CAPIMSG_NCCI(skb->data),
skb               602 drivers/staging/isdn/hysdn/hycapi.c 				     CAPIMSG_NCCI(skb->data),
skb               603 drivers/staging/isdn/hysdn/hycapi.c 				     CAPIMSG_MSGID(skb->data));
skb               608 drivers/staging/isdn/hysdn/hycapi.c 	capi_ctr_handle_message(ctrl, ApplId, skb);
skb               124 drivers/staging/isdn/hysdn/hysdn_net.c net_send_packet(struct sk_buff *skb, struct net_device *dev)
skb               130 drivers/staging/isdn/hysdn/hysdn_net.c 	lp->skbs[lp->in_idx++] = skb;	/* add to buffer list */
skb               192 drivers/staging/isdn/hysdn/hysdn_net.c 	struct sk_buff *skb;
skb               200 drivers/staging/isdn/hysdn/hysdn_net.c 	skb = dev_alloc_skb(len);
skb               201 drivers/staging/isdn/hysdn/hysdn_net.c 	if (skb == NULL) {
skb               208 drivers/staging/isdn/hysdn/hysdn_net.c 	skb_put_data(skb, buf, len);
skb               211 drivers/staging/isdn/hysdn/hysdn_net.c 	skb->protocol = eth_type_trans(skb, dev);
skb               215 drivers/staging/isdn/hysdn/hysdn_net.c 	netif_rx(skb);
skb                78 drivers/staging/isdn/hysdn/hysdn_sched.c 	struct sk_buff *skb;
skb               113 drivers/staging/isdn/hysdn/hysdn_sched.c 	    (skb = hysdn_tx_netget(card)) != NULL)
skb               115 drivers/staging/isdn/hysdn/hysdn_sched.c 		if (skb->len <= maxlen) {
skb               117 drivers/staging/isdn/hysdn/hysdn_sched.c 			skb_copy_from_linear_data(skb, buf, skb->len);
skb               118 drivers/staging/isdn/hysdn/hysdn_sched.c 			*len = skb->len;
skb               127 drivers/staging/isdn/hysdn/hysdn_sched.c 	    ((skb = hycapi_tx_capiget(card)) != NULL))
skb               129 drivers/staging/isdn/hysdn/hysdn_sched.c 		if (skb->len <= maxlen) {
skb               130 drivers/staging/isdn/hysdn/hysdn_sched.c 			skb_copy_from_linear_data(skb, buf, skb->len);
skb               131 drivers/staging/isdn/hysdn/hysdn_sched.c 			*len = skb->len;
skb               286 drivers/staging/ks7010/ks7010_sdio.c 						  struct sk_buff *skb),
skb               287 drivers/staging/ks7010/ks7010_sdio.c 			 struct sk_buff *skb)
skb               307 drivers/staging/ks7010/ks7010_sdio.c 	sp->skb = skb;
skb               315 drivers/staging/ks7010/ks7010_sdio.c 		(*complete_handler)(priv, skb);
skb               371 drivers/staging/ks7010/ks7010_sdio.c 		(*sp->complete_handler)(priv, sp->skb);
skb               380 drivers/staging/ks7010/ks7010_sdio.c 					   struct sk_buff *skb),
skb               381 drivers/staging/ks7010/ks7010_sdio.c 		  struct sk_buff *skb)
skb               399 drivers/staging/ks7010/ks7010_sdio.c 	result = enqueue_txdev(priv, p, size, complete_handler, skb);
skb               634 drivers/staging/ks7010/ks7010_sdio.c 			(*sp->complete_handler)(priv, sp->skb);
skb               420 drivers/staging/ks7010/ks_hostif.c 	struct sk_buff *skb;
skb               467 drivers/staging/ks7010/ks_hostif.c 		skb = dev_alloc_skb(rx_ind_size);
skb               468 drivers/staging/ks7010/ks_hostif.c 		if (!skb) {
skb               476 drivers/staging/ks7010/ks_hostif.c 		skb_put_data(skb, priv->rxp, size);
skb               481 drivers/staging/ks7010/ks_hostif.c 		skb_put_data(skb, &eth_hdr->h_proto, size);
skb               487 drivers/staging/ks7010/ks_hostif.c 		skb = dev_alloc_skb(rx_ind_size);
skb               488 drivers/staging/ks7010/ks_hostif.c 		if (!skb) {
skb               496 drivers/staging/ks7010/ks_hostif.c 		skb_put_data(skb, priv->rxp, 12);
skb               501 drivers/staging/ks7010/ks_hostif.c 		skb_put_data(skb, temp, 2);
skb               504 drivers/staging/ks7010/ks_hostif.c 		skb_put_data(skb, priv->rxp + 12, rx_ind_size - 14);
skb               519 drivers/staging/ks7010/ks_hostif.c 	skb->dev = priv->net_dev;
skb               520 drivers/staging/ks7010/ks_hostif.c 	skb->protocol = eth_type_trans(skb, skb->dev);
skb               523 drivers/staging/ks7010/ks_hostif.c 	netif_rx(skb);
skb              1063 drivers/staging/ks7010/ks_hostif.c int hostif_data_request(struct ks_wlan_private *priv, struct sk_buff *skb)
skb              1079 drivers/staging/ks7010/ks_hostif.c 	skb_len = skb->len;
skb              1092 drivers/staging/ks7010/ks_hostif.c 		dev_kfree_skb(skb);
skb              1112 drivers/staging/ks7010/ks_hostif.c 	buffer = skb->data;
skb              1113 drivers/staging/ks7010/ks_hostif.c 	length = skb->len;
skb              1116 drivers/staging/ks7010/ks_hostif.c 	eth = (struct ethhdr *)skb->data;
skb              1212 drivers/staging/ks7010/ks_hostif.c 			    send_packet_complete, skb);
skb              1232 drivers/staging/ks7010/ks_hostif.c 	dev_kfree_skb(skb);
skb               593 drivers/staging/ks7010/ks_hostif.h int hostif_data_request(struct ks_wlan_private *priv, struct sk_buff *skb);
skb               601 drivers/staging/ks7010/ks_hostif.h 					   struct sk_buff *skb),
skb               602 drivers/staging/ks7010/ks_hostif.h 		  struct sk_buff *skb);
skb               603 drivers/staging/ks7010/ks_hostif.h void send_packet_complete(struct ks_wlan_private *priv, struct sk_buff *skb);
skb               387 drivers/staging/ks7010/ks_wlan.h 				 struct sk_buff *skb);
skb               388 drivers/staging/ks7010/ks_wlan.h 	struct sk_buff *skb;
skb               494 drivers/staging/ks7010/ks_wlan.h 	struct sk_buff *skb;
skb                49 drivers/staging/ks7010/ks_wlan_net.c static int ks_wlan_start_xmit(struct sk_buff *skb, struct net_device *dev);
skb              2514 drivers/staging/ks7010/ks_wlan_net.c int ks_wlan_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb              2521 drivers/staging/ks7010/ks_wlan_net.c 	if (!skb) {
skb              2526 drivers/staging/ks7010/ks_wlan_net.c 		dev_kfree_skb(skb);
skb              2533 drivers/staging/ks7010/ks_wlan_net.c 	ret = hostif_data_request(priv, skb);
skb              2542 drivers/staging/ks7010/ks_wlan_net.c void send_packet_complete(struct ks_wlan_private *priv, struct sk_buff *skb)
skb              2549 drivers/staging/ks7010/ks_wlan_net.c 	if (skb) {
skb              2550 drivers/staging/ks7010/ks_wlan_net.c 		priv->nstats.tx_bytes += skb->len;
skb              2551 drivers/staging/ks7010/ks_wlan_net.c 		dev_kfree_skb(skb);
skb                75 drivers/staging/most/net/net.c static int skb_to_mamac(const struct sk_buff *skb, struct mbo *mbo)
skb                79 drivers/staging/most/net/net.c 	const u8 *dest_addr = skb->data + 4;
skb                80 drivers/staging/most/net/net.c 	const u8 *eth_type = skb->data + 12;
skb                81 drivers/staging/most/net/net.c 	unsigned int payload_len = skb->len - ETH_HLEN;
skb                84 drivers/staging/most/net/net.c 	if (mdp_len < skb->len) {
skb                85 drivers/staging/most/net/net.c 		pr_err("drop: too large packet! (%u)\n", skb->len);
skb                95 drivers/staging/most/net/net.c 	if (skb->len < ETH_HLEN) {
skb                96 drivers/staging/most/net/net.c 		pr_err("drop: too small packet! (%d)\n", skb->len);
skb               126 drivers/staging/most/net/net.c 	memcpy(buff, skb->data + ETH_HLEN, payload_len);
skb               131 drivers/staging/most/net/net.c static int skb_to_mep(const struct sk_buff *skb, struct mbo *mbo)
skb               134 drivers/staging/most/net/net.c 	unsigned int mep_len = skb->len + MEP_HDR_LEN;
skb               136 drivers/staging/most/net/net.c 	if (mep_len < skb->len) {
skb               137 drivers/staging/most/net/net.c 		pr_err("drop: too large packet! (%u)\n", skb->len);
skb               157 drivers/staging/most/net/net.c 	memcpy(buff, skb->data, skb->len);
skb               233 drivers/staging/most/net/net.c static netdev_tx_t most_nd_start_xmit(struct sk_buff *skb,
skb               249 drivers/staging/most/net/net.c 		ret = skb_to_mamac(skb, mbo);
skb               251 drivers/staging/most/net/net.c 		ret = skb_to_mep(skb, mbo);
skb               256 drivers/staging/most/net/net.c 		kfree_skb(skb);
skb               262 drivers/staging/most/net/net.c 	dev->stats.tx_bytes += skb->len;
skb               263 drivers/staging/most/net/net.c 	kfree_skb(skb);
skb               434 drivers/staging/most/net/net.c 	struct sk_buff *skb;
skb               456 drivers/staging/most/net/net.c 		skb = dev_alloc_skb(len - MDP_HDR_LEN + 2 * ETH_ALEN + 2);
skb               463 drivers/staging/most/net/net.c 		skb = dev_alloc_skb(len - MEP_HDR_LEN);
skb               466 drivers/staging/most/net/net.c 	if (!skb) {
skb               472 drivers/staging/most/net/net.c 	skb->dev = dev;
skb               476 drivers/staging/most/net/net.c 		ether_addr_copy(skb_put(skb, ETH_ALEN), dev->dev_addr);
skb               479 drivers/staging/most/net/net.c 		skb_put_data(skb, &zero, 4);
skb               480 drivers/staging/most/net/net.c 		skb_put_data(skb, buf + 5, 2);
skb               483 drivers/staging/most/net/net.c 		skb_put_data(skb, buf + 10, 2);
skb               492 drivers/staging/most/net/net.c 	skb_put_data(skb, buf, len);
skb               493 drivers/staging/most/net/net.c 	skb->protocol = eth_type_trans(skb, dev);
skb               494 drivers/staging/most/net/net.c 	skb_len = skb->len;
skb               495 drivers/staging/most/net/net.c 	if (netif_rx(skb) == NET_RX_SUCCESS) {
skb                81 drivers/staging/netlogic/xlr_net.c 	struct sk_buff *skb;
skb                86 drivers/staging/netlogic/xlr_net.c 	skb = alloc_skb(XLR_RX_BUF_SIZE, GFP_ATOMIC);
skb                87 drivers/staging/netlogic/xlr_net.c 	if (!skb)
skb                89 drivers/staging/netlogic/xlr_net.c 	skb_data = skb->data;
skb                90 drivers/staging/netlogic/xlr_net.c 	skb_reserve(skb, MAC_SKB_BACK_PTR_SIZE);
skb                91 drivers/staging/netlogic/xlr_net.c 	memcpy(skb_data, &skb, buf_len);
skb                93 drivers/staging/netlogic/xlr_net.c 	return skb->data;
skb                99 drivers/staging/netlogic/xlr_net.c 	struct sk_buff *skb;
skb               111 drivers/staging/netlogic/xlr_net.c 		skb = (struct sk_buff *)(*(unsigned long *)addr);
skb               119 drivers/staging/netlogic/xlr_net.c 		skb = (struct sk_buff *)(*(unsigned long *)addr);
skb               120 drivers/staging/netlogic/xlr_net.c 		skb->dev = adapter->netdev[port];
skb               121 drivers/staging/netlogic/xlr_net.c 		if (!skb->dev)
skb               123 drivers/staging/netlogic/xlr_net.c 		ndev = skb->dev;
skb               127 drivers/staging/netlogic/xlr_net.c 		skb_reserve(skb, BYTE_OFFSET);
skb               128 drivers/staging/netlogic/xlr_net.c 		skb_put(skb, length);
skb               129 drivers/staging/netlogic/xlr_net.c 		skb->protocol = eth_type_trans(skb, skb->dev);
skb               130 drivers/staging/netlogic/xlr_net.c 		netif_rx(skb);
skb               228 drivers/staging/netlogic/xlr_net.c 			     struct sk_buff *skb)
skb               230 drivers/staging/netlogic/xlr_net.c 	unsigned long physkb = virt_to_phys(skb);
skb               236 drivers/staging/netlogic/xlr_net.c 		(u64)skb->len << 40	|	/* Length of data */
skb               246 drivers/staging/netlogic/xlr_net.c static netdev_tx_t xlr_net_start_xmit(struct sk_buff *skb,
skb               254 drivers/staging/netlogic/xlr_net.c 	xlr_make_tx_desc(&msg, virt_to_phys(skb->data), skb);
skb               259 drivers/staging/netlogic/xlr_net.c 		dev_kfree_skb_any(skb);
skb                29 drivers/staging/octeon/ethernet-mem.c 		struct sk_buff *skb = dev_alloc_skb(size + 256);
skb                31 drivers/staging/octeon/ethernet-mem.c 		if (unlikely(!skb))
skb                33 drivers/staging/octeon/ethernet-mem.c 		skb_reserve(skb, 256 - (((unsigned long)skb->data) & 0x7f));
skb                34 drivers/staging/octeon/ethernet-mem.c 		*(struct sk_buff **)(skb->data - sizeof(void *)) = skb;
skb                35 drivers/staging/octeon/ethernet-mem.c 		cvmx_fpa_free(skb->data, pool, size / 128);
skb                54 drivers/staging/octeon/ethernet-mem.c 			struct sk_buff *skb =
skb                57 drivers/staging/octeon/ethernet-mem.c 			dev_kfree_skb(skb);
skb               138 drivers/staging/octeon/ethernet-rx.c static void copy_segments_to_skb(cvmx_wqe_t *work, struct sk_buff *skb)
skb               171 drivers/staging/octeon/ethernet-rx.c 		skb_put_data(skb, cvmx_phys_to_ptr(segment_ptr.s.addr),
skb               215 drivers/staging/octeon/ethernet-rx.c 		struct sk_buff *skb = NULL;
skb               258 drivers/staging/octeon/ethernet-rx.c 			skb = *pskb;
skb               259 drivers/staging/octeon/ethernet-rx.c 			prefetch(&skb->head);
skb               260 drivers/staging/octeon/ethernet-rx.c 			prefetch(&skb->len);
skb               282 drivers/staging/octeon/ethernet-rx.c 			skb->data = skb->head + work->packet_ptr.s.addr -
skb               283 drivers/staging/octeon/ethernet-rx.c 				cvmx_ptr_to_phys(skb->head);
skb               284 drivers/staging/octeon/ethernet-rx.c 			prefetch(skb->data);
skb               285 drivers/staging/octeon/ethernet-rx.c 			skb->len = work->word1.len;
skb               286 drivers/staging/octeon/ethernet-rx.c 			skb_set_tail_pointer(skb, skb->len);
skb               293 drivers/staging/octeon/ethernet-rx.c 			skb = dev_alloc_skb(work->word1.len);
skb               294 drivers/staging/octeon/ethernet-rx.c 			if (!skb) {
skb               316 drivers/staging/octeon/ethernet-rx.c 				skb_put_data(skb, ptr, work->word1.len);
skb               319 drivers/staging/octeon/ethernet-rx.c 				copy_segments_to_skb(work, skb);
skb               332 drivers/staging/octeon/ethernet-rx.c 				skb->protocol = eth_type_trans(skb, dev);
skb               333 drivers/staging/octeon/ethernet-rx.c 				skb->dev = dev;
skb               339 drivers/staging/octeon/ethernet-rx.c 					skb->ip_summed = CHECKSUM_NONE;
skb               341 drivers/staging/octeon/ethernet-rx.c 					skb->ip_summed = CHECKSUM_UNNECESSARY;
skb               346 drivers/staging/octeon/ethernet-rx.c 					dev->stats.rx_bytes += skb->len;
skb               348 drivers/staging/octeon/ethernet-rx.c 				netif_receive_skb(skb);
skb               355 drivers/staging/octeon/ethernet-rx.c 				dev_kfree_skb_irq(skb);
skb               364 drivers/staging/octeon/ethernet-rx.c 			dev_kfree_skb_irq(skb);
skb                30 drivers/staging/octeon/ethernet-tx.c #define CVM_OCT_SKB_CB(skb)	((u64 *)((skb)->cb))
skb                40 drivers/staging/octeon/ethernet-tx.c #define GET_SKBUFF_QOS(skb) 0
skb               128 drivers/staging/octeon/ethernet-tx.c int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
skb               160 drivers/staging/octeon/ethernet-tx.c 		qos = GET_SKBUFF_QOS(skb);
skb               191 drivers/staging/octeon/ethernet-tx.c 	if (unlikely(skb_shinfo(skb)->nr_frags > 5)) {
skb               192 drivers/staging/octeon/ethernet-tx.c 		if (unlikely(__skb_linearize(skb))) {
skb               229 drivers/staging/octeon/ethernet-tx.c 	if ((skb->len < 64) && OCTEON_IS_MODEL(OCTEON_CN3XXX)) {
skb               239 drivers/staging/octeon/ethernet-tx.c 				int add_bytes = 64 - skb->len;
skb               241 drivers/staging/octeon/ethernet-tx.c 				if ((skb_tail_pointer(skb) + add_bytes) <=
skb               242 drivers/staging/octeon/ethernet-tx.c 				    skb_end_pointer(skb))
skb               243 drivers/staging/octeon/ethernet-tx.c 					__skb_put_zero(skb, add_bytes);
skb               255 drivers/staging/octeon/ethernet-tx.c 	pko_command.s.total_bytes = skb->len;
skb               263 drivers/staging/octeon/ethernet-tx.c 	if (skb_shinfo(skb)->nr_frags == 0) {
skb               264 drivers/staging/octeon/ethernet-tx.c 		hw_buffer.s.addr = XKPHYS_TO_PHYS((uintptr_t)skb->data);
skb               266 drivers/staging/octeon/ethernet-tx.c 		hw_buffer.s.size = skb->len;
skb               268 drivers/staging/octeon/ethernet-tx.c 		hw_buffer.s.addr = XKPHYS_TO_PHYS((uintptr_t)skb->data);
skb               270 drivers/staging/octeon/ethernet-tx.c 		hw_buffer.s.size = skb_headlen(skb);
skb               271 drivers/staging/octeon/ethernet-tx.c 		CVM_OCT_SKB_CB(skb)[0] = hw_buffer.u64;
skb               272 drivers/staging/octeon/ethernet-tx.c 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb               273 drivers/staging/octeon/ethernet-tx.c 			skb_frag_t *fs = skb_shinfo(skb)->frags + i;
skb               278 drivers/staging/octeon/ethernet-tx.c 			CVM_OCT_SKB_CB(skb)[i + 1] = hw_buffer.u64;
skb               281 drivers/staging/octeon/ethernet-tx.c 			XKPHYS_TO_PHYS((uintptr_t)CVM_OCT_SKB_CB(skb));
skb               282 drivers/staging/octeon/ethernet-tx.c 		hw_buffer.s.size = skb_shinfo(skb)->nr_frags + 1;
skb               283 drivers/staging/octeon/ethernet-tx.c 		pko_command.s.segs = skb_shinfo(skb)->nr_frags + 1;
skb               299 drivers/staging/octeon/ethernet-tx.c 	fpa_head = skb->head + 256 - ((unsigned long)skb->head & 0x7f);
skb               300 drivers/staging/octeon/ethernet-tx.c 	if (unlikely(skb->data < fpa_head)) {
skb               305 drivers/staging/octeon/ethernet-tx.c 	    ((skb_end_pointer(skb) - fpa_head) < CVMX_FPA_PACKET_POOL_SIZE)) {
skb               309 drivers/staging/octeon/ethernet-tx.c 	if (unlikely(skb_shared(skb))) {
skb               313 drivers/staging/octeon/ethernet-tx.c 	if (unlikely(skb_cloned(skb))) {
skb               317 drivers/staging/octeon/ethernet-tx.c 	if (unlikely(skb_header_cloned(skb))) {
skb               321 drivers/staging/octeon/ethernet-tx.c 	if (unlikely(skb->destructor)) {
skb               325 drivers/staging/octeon/ethernet-tx.c 	if (unlikely(skb_shinfo(skb)->nr_frags)) {
skb               330 drivers/staging/octeon/ethernet-tx.c 	    (skb->truesize !=
skb               331 drivers/staging/octeon/ethernet-tx.c 	     sizeof(*skb) + skb_end_offset(skb))) {
skb               342 drivers/staging/octeon/ethernet-tx.c 	hw_buffer.s.back = ((unsigned long)skb->data >> 7) -
skb               345 drivers/staging/octeon/ethernet-tx.c 	*(struct sk_buff **)(fpa_head - sizeof(void *)) = skb;
skb               351 drivers/staging/octeon/ethernet-tx.c 	dst_release(skb_dst(skb));
skb               352 drivers/staging/octeon/ethernet-tx.c 	skb_dst_set(skb, NULL);
skb               353 drivers/staging/octeon/ethernet-tx.c 	skb_ext_reset(skb);
skb               354 drivers/staging/octeon/ethernet-tx.c 	nf_reset_ct(skb);
skb               357 drivers/staging/octeon/ethernet-tx.c 	skb->tc_index = 0;
skb               358 drivers/staging/octeon/ethernet-tx.c 	skb_reset_tc(skb);
skb               365 drivers/staging/octeon/ethernet-tx.c 	if ((skb->protocol == htons(ETH_P_IP)) &&
skb               366 drivers/staging/octeon/ethernet-tx.c 	    (ip_hdr(skb)->version == 4) &&
skb               367 drivers/staging/octeon/ethernet-tx.c 	    (ip_hdr(skb)->ihl == 5) &&
skb               368 drivers/staging/octeon/ethernet-tx.c 	    ((ip_hdr(skb)->frag_off == 0) ||
skb               369 drivers/staging/octeon/ethernet-tx.c 	     (ip_hdr(skb)->frag_off == htons(1 << 14))) &&
skb               370 drivers/staging/octeon/ethernet-tx.c 	    ((ip_hdr(skb)->protocol == IPPROTO_TCP) ||
skb               371 drivers/staging/octeon/ethernet-tx.c 	     (ip_hdr(skb)->protocol == IPPROTO_UDP))) {
skb               373 drivers/staging/octeon/ethernet-tx.c 		pko_command.s.ipoffp1 = skb_network_offset(skb) + 1;
skb               445 drivers/staging/octeon/ethernet-tx.c 		skb->next = to_free_list;
skb               446 drivers/staging/octeon/ethernet-tx.c 		to_free_list = skb;
skb               453 drivers/staging/octeon/ethernet-tx.c 		__skb_queue_tail(&priv->tx_free_list[qos], skb);
skb               510 drivers/staging/octeon/ethernet-tx.c int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev)
skb               523 drivers/staging/octeon/ethernet-tx.c 		dev_kfree_skb_any(skb);
skb               534 drivers/staging/octeon/ethernet-tx.c 		dev_kfree_skb_any(skb);
skb               555 drivers/staging/octeon/ethernet-tx.c 	memcpy(copy_location, skb->data, skb->len);
skb               562 drivers/staging/octeon/ethernet-tx.c 		work->word0.pip.cn38xx.hw_chksum = skb->csum;
skb               563 drivers/staging/octeon/ethernet-tx.c 	work->word1.len = skb->len;
skb               578 drivers/staging/octeon/ethernet-tx.c 	if (skb->protocol == htons(ETH_P_IP)) {
skb               587 drivers/staging/octeon/ethernet-tx.c 		    (ip_hdr(skb)->protocol == IPPROTO_TCP) ||
skb               588 drivers/staging/octeon/ethernet-tx.c 		    (ip_hdr(skb)->protocol == IPPROTO_UDP);
skb               599 drivers/staging/octeon/ethernet-tx.c 		work->word2.s.is_frag = !((ip_hdr(skb)->frag_off == 0) ||
skb               600 drivers/staging/octeon/ethernet-tx.c 					  (ip_hdr(skb)->frag_off ==
skb               606 drivers/staging/octeon/ethernet-tx.c 		work->word2.s.is_bcast = (skb->pkt_type == PACKET_BROADCAST);
skb               607 drivers/staging/octeon/ethernet-tx.c 		work->word2.s.is_mcast = (skb->pkt_type == PACKET_MULTICAST);
skb               622 drivers/staging/octeon/ethernet-tx.c 		memcpy(work->packet_data, skb->data + 10,
skb               631 drivers/staging/octeon/ethernet-tx.c 		work->word2.snoip.is_rarp = skb->protocol == htons(ETH_P_RARP);
skb               632 drivers/staging/octeon/ethernet-tx.c 		work->word2.snoip.is_arp = skb->protocol == htons(ETH_P_ARP);
skb               634 drivers/staging/octeon/ethernet-tx.c 		    (skb->pkt_type == PACKET_BROADCAST);
skb               636 drivers/staging/octeon/ethernet-tx.c 		    (skb->pkt_type == PACKET_MULTICAST);
skb               644 drivers/staging/octeon/ethernet-tx.c 		memcpy(work->packet_data, skb->data, sizeof(work->packet_data));
skb               651 drivers/staging/octeon/ethernet-tx.c 	dev->stats.tx_bytes += skb->len;
skb               652 drivers/staging/octeon/ethernet-tx.c 	dev_consume_skb_any(skb);
skb                 8 drivers/staging/octeon/ethernet-tx.h int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev);
skb                 9 drivers/staging/octeon/ethernet-tx.h int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev);
skb              1352 drivers/staging/qlge/qlge.h 	struct sk_buff *skb;
skb              1372 drivers/staging/qlge/qlge.h 		struct sk_buff *skb;
skb              1380 drivers/staging/qlge/qlge.h #define QL_TXQ_IDX(qdev, skb) (smp_processor_id()%(qdev->tx_ring_count))
skb              2266 drivers/staging/qlge/qlge.h netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev);
skb               508 drivers/staging/qlge/qlge_ethtool.c static void ql_create_lb_frame(struct sk_buff *skb,
skb               511 drivers/staging/qlge/qlge_ethtool.c 	memset(skb->data, 0xFF, frame_size);
skb               513 drivers/staging/qlge/qlge_ethtool.c 	memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1);
skb               514 drivers/staging/qlge/qlge_ethtool.c 	memset(&skb->data[frame_size / 2 + 10], 0xBE, 1);
skb               515 drivers/staging/qlge/qlge_ethtool.c 	memset(&skb->data[frame_size / 2 + 12], 0xAF, 1);
skb               519 drivers/staging/qlge/qlge_ethtool.c 					struct sk_buff *skb)
skb               521 drivers/staging/qlge/qlge_ethtool.c 	unsigned int frame_size = skb->len;
skb               523 drivers/staging/qlge/qlge_ethtool.c 	if ((*(skb->data + 3) == 0xFF) &&
skb               524 drivers/staging/qlge/qlge_ethtool.c 		(*(skb->data + frame_size / 2 + 10) == 0xBE) &&
skb               525 drivers/staging/qlge/qlge_ethtool.c 		(*(skb->data + frame_size / 2 + 12) == 0xAF)) {
skb               535 drivers/staging/qlge/qlge_ethtool.c 	struct sk_buff *skb;
skb               539 drivers/staging/qlge/qlge_ethtool.c 		skb = netdev_alloc_skb(qdev->ndev, size);
skb               540 drivers/staging/qlge/qlge_ethtool.c 		if (!skb)
skb               543 drivers/staging/qlge/qlge_ethtool.c 		skb->queue_mapping = 0;
skb               544 drivers/staging/qlge/qlge_ethtool.c 		skb_put(skb, size);
skb               545 drivers/staging/qlge/qlge_ethtool.c 		ql_create_lb_frame(skb, size);
skb               546 drivers/staging/qlge/qlge_ethtool.c 		rc = ql_lb_send(skb, qdev->ndev);
skb              1206 drivers/staging/qlge/qlge_main.c 			if (sbq_desc->p.skb == NULL) {
skb              1211 drivers/staging/qlge/qlge_main.c 				sbq_desc->p.skb =
skb              1214 drivers/staging/qlge/qlge_main.c 				if (sbq_desc->p.skb == NULL) {
skb              1218 drivers/staging/qlge/qlge_main.c 				skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
skb              1220 drivers/staging/qlge/qlge_main.c 						     sbq_desc->p.skb->data,
skb              1227 drivers/staging/qlge/qlge_main.c 					dev_kfree_skb_any(sbq_desc->p.skb);
skb              1228 drivers/staging/qlge/qlge_main.c 					sbq_desc->p.skb = NULL;
skb              1311 drivers/staging/qlge/qlge_main.c 		       struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
skb              1313 drivers/staging/qlge/qlge_main.c 	int len = skb_headlen(skb);
skb              1317 drivers/staging/qlge/qlge_main.c 	int frag_cnt = skb_shinfo(skb)->nr_frags;
skb              1326 drivers/staging/qlge/qlge_main.c 	map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
skb              1350 drivers/staging/qlge/qlge_main.c 		skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
skb              1497 drivers/staging/qlge/qlge_main.c 	struct sk_buff *skb;
skb              1509 drivers/staging/qlge/qlge_main.c 	skb = napi_get_frags(napi);
skb              1510 drivers/staging/qlge/qlge_main.c 	if (!skb) {
skb              1518 drivers/staging/qlge/qlge_main.c 	__skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
skb              1523 drivers/staging/qlge/qlge_main.c 	skb->len += length;
skb              1524 drivers/staging/qlge/qlge_main.c 	skb->data_len += length;
skb              1525 drivers/staging/qlge/qlge_main.c 	skb->truesize += length;
skb              1526 drivers/staging/qlge/qlge_main.c 	skb_shinfo(skb)->nr_frags++;
skb              1530 drivers/staging/qlge/qlge_main.c 	skb->ip_summed = CHECKSUM_UNNECESSARY;
skb              1531 drivers/staging/qlge/qlge_main.c 	skb_record_rx_queue(skb, rx_ring->cq_id);
skb              1533 drivers/staging/qlge/qlge_main.c 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
skb              1545 drivers/staging/qlge/qlge_main.c 	struct sk_buff *skb = NULL;
skb              1551 drivers/staging/qlge/qlge_main.c 	skb = netdev_alloc_skb(ndev, length);
skb              1552 drivers/staging/qlge/qlge_main.c 	if (!skb) {
skb              1573 drivers/staging/qlge/qlge_main.c 	if (skb->len > ndev->mtu + hlen) {
skb              1579 drivers/staging/qlge/qlge_main.c 	skb_put_data(skb, addr, hlen);
skb              1583 drivers/staging/qlge/qlge_main.c 	skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
skb              1586 drivers/staging/qlge/qlge_main.c 	skb->len += length - hlen;
skb              1587 drivers/staging/qlge/qlge_main.c 	skb->data_len += length - hlen;
skb              1588 drivers/staging/qlge/qlge_main.c 	skb->truesize += length - hlen;
skb              1591 drivers/staging/qlge/qlge_main.c 	rx_ring->rx_bytes += skb->len;
skb              1592 drivers/staging/qlge/qlge_main.c 	skb->protocol = eth_type_trans(skb, ndev);
skb              1593 drivers/staging/qlge/qlge_main.c 	skb_checksum_none_assert(skb);
skb              1601 drivers/staging/qlge/qlge_main.c 			skb->ip_summed = CHECKSUM_UNNECESSARY;
skb              1609 drivers/staging/qlge/qlge_main.c 				skb->ip_summed = CHECKSUM_UNNECESSARY;
skb              1617 drivers/staging/qlge/qlge_main.c 	skb_record_rx_queue(skb, rx_ring->cq_id);
skb              1619 drivers/staging/qlge/qlge_main.c 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
skb              1620 drivers/staging/qlge/qlge_main.c 	if (skb->ip_summed == CHECKSUM_UNNECESSARY)
skb              1621 drivers/staging/qlge/qlge_main.c 		napi_gro_receive(napi, skb);
skb              1623 drivers/staging/qlge/qlge_main.c 		netif_receive_skb(skb);
skb              1626 drivers/staging/qlge/qlge_main.c 	dev_kfree_skb_any(skb);
skb              1638 drivers/staging/qlge/qlge_main.c 	struct sk_buff *skb = NULL;
skb              1642 drivers/staging/qlge/qlge_main.c 	skb = sbq_desc->p.skb;
skb              1656 drivers/staging/qlge/qlge_main.c 	skb_put_data(new_skb, skb->data, length);
skb              1662 drivers/staging/qlge/qlge_main.c 	skb = new_skb;
skb              1667 drivers/staging/qlge/qlge_main.c 		dev_kfree_skb_any(skb);
skb              1673 drivers/staging/qlge/qlge_main.c 		ql_check_lb_frame(qdev, skb);
skb              1674 drivers/staging/qlge/qlge_main.c 		dev_kfree_skb_any(skb);
skb              1681 drivers/staging/qlge/qlge_main.c 	if (skb->len > ndev->mtu + ETH_HLEN) {
skb              1682 drivers/staging/qlge/qlge_main.c 		dev_kfree_skb_any(skb);
skb              1687 drivers/staging/qlge/qlge_main.c 	prefetch(skb->data);
skb              1703 drivers/staging/qlge/qlge_main.c 	rx_ring->rx_bytes += skb->len;
skb              1704 drivers/staging/qlge/qlge_main.c 	skb->protocol = eth_type_trans(skb, ndev);
skb              1705 drivers/staging/qlge/qlge_main.c 	skb_checksum_none_assert(skb);
skb              1716 drivers/staging/qlge/qlge_main.c 			skb->ip_summed = CHECKSUM_UNNECESSARY;
skb              1720 drivers/staging/qlge/qlge_main.c 			struct iphdr *iph = (struct iphdr *) skb->data;
skb              1723 drivers/staging/qlge/qlge_main.c 				skb->ip_summed = CHECKSUM_UNNECESSARY;
skb              1731 drivers/staging/qlge/qlge_main.c 	skb_record_rx_queue(skb, rx_ring->cq_id);
skb              1733 drivers/staging/qlge/qlge_main.c 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
skb              1734 drivers/staging/qlge/qlge_main.c 	if (skb->ip_summed == CHECKSUM_UNNECESSARY)
skb              1735 drivers/staging/qlge/qlge_main.c 		napi_gro_receive(&rx_ring->napi, skb);
skb              1737 drivers/staging/qlge/qlge_main.c 		netif_receive_skb(skb);
skb              1740 drivers/staging/qlge/qlge_main.c static void ql_realign_skb(struct sk_buff *skb, int len)
skb              1742 drivers/staging/qlge/qlge_main.c 	void *temp_addr = skb->data;
skb              1748 drivers/staging/qlge/qlge_main.c 	skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
skb              1749 drivers/staging/qlge/qlge_main.c 	skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
skb              1750 drivers/staging/qlge/qlge_main.c 	memmove(skb->data, temp_addr, len);
skb              1764 drivers/staging/qlge/qlge_main.c 	struct sk_buff *skb = NULL;
skb              1784 drivers/staging/qlge/qlge_main.c 		skb = sbq_desc->p.skb;
skb              1785 drivers/staging/qlge/qlge_main.c 		ql_realign_skb(skb, hdr_len);
skb              1786 drivers/staging/qlge/qlge_main.c 		skb_put(skb, hdr_len);
skb              1787 drivers/staging/qlge/qlge_main.c 		sbq_desc->p.skb = NULL;
skb              1796 drivers/staging/qlge/qlge_main.c 		return skb;
skb              1818 drivers/staging/qlge/qlge_main.c 			skb_put_data(skb, sbq_desc->p.skb->data, length);
skb              1832 drivers/staging/qlge/qlge_main.c 			skb = sbq_desc->p.skb;
skb              1833 drivers/staging/qlge/qlge_main.c 			ql_realign_skb(skb, length);
skb              1834 drivers/staging/qlge/qlge_main.c 			skb_put(skb, length);
skb              1841 drivers/staging/qlge/qlge_main.c 			sbq_desc->p.skb = NULL;
skb              1857 drivers/staging/qlge/qlge_main.c 			skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
skb              1860 drivers/staging/qlge/qlge_main.c 			skb->len += length;
skb              1861 drivers/staging/qlge/qlge_main.c 			skb->data_len += length;
skb              1862 drivers/staging/qlge/qlge_main.c 			skb->truesize += length;
skb              1870 drivers/staging/qlge/qlge_main.c 			skb = netdev_alloc_skb(qdev->ndev, length);
skb              1871 drivers/staging/qlge/qlge_main.c 			if (skb == NULL) {
skb              1881 drivers/staging/qlge/qlge_main.c 			skb_reserve(skb, NET_IP_ALIGN);
skb              1885 drivers/staging/qlge/qlge_main.c 			skb_fill_page_desc(skb, 0,
skb              1889 drivers/staging/qlge/qlge_main.c 			skb->len += length;
skb              1890 drivers/staging/qlge/qlge_main.c 			skb->data_len += length;
skb              1891 drivers/staging/qlge/qlge_main.c 			skb->truesize += length;
skb              1895 drivers/staging/qlge/qlge_main.c 			__pskb_pull_tail(skb, hlen);
skb              1928 drivers/staging/qlge/qlge_main.c 			skb = sbq_desc->p.skb;
skb              1929 drivers/staging/qlge/qlge_main.c 			sbq_desc->p.skb = NULL;
skb              1930 drivers/staging/qlge/qlge_main.c 			skb_reserve(skb, NET_IP_ALIGN);
skb              1940 drivers/staging/qlge/qlge_main.c 			skb_fill_page_desc(skb, i,
skb              1944 drivers/staging/qlge/qlge_main.c 			skb->len += size;
skb              1945 drivers/staging/qlge/qlge_main.c 			skb->data_len += size;
skb              1946 drivers/staging/qlge/qlge_main.c 			skb->truesize += size;
skb              1952 drivers/staging/qlge/qlge_main.c 		__pskb_pull_tail(skb, hlen);
skb              1954 drivers/staging/qlge/qlge_main.c 	return skb;
skb              1964 drivers/staging/qlge/qlge_main.c 	struct sk_buff *skb = NULL;
skb              1968 drivers/staging/qlge/qlge_main.c 	skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
skb              1969 drivers/staging/qlge/qlge_main.c 	if (unlikely(!skb)) {
skb              1979 drivers/staging/qlge/qlge_main.c 		dev_kfree_skb_any(skb);
skb              1986 drivers/staging/qlge/qlge_main.c 	if (skb->len > ndev->mtu + ETH_HLEN) {
skb              1987 drivers/staging/qlge/qlge_main.c 		dev_kfree_skb_any(skb);
skb              1994 drivers/staging/qlge/qlge_main.c 		ql_check_lb_frame(qdev, skb);
skb              1995 drivers/staging/qlge/qlge_main.c 		dev_kfree_skb_any(skb);
skb              1999 drivers/staging/qlge/qlge_main.c 	prefetch(skb->data);
skb              2015 drivers/staging/qlge/qlge_main.c 	skb->protocol = eth_type_trans(skb, ndev);
skb              2016 drivers/staging/qlge/qlge_main.c 	skb_checksum_none_assert(skb);
skb              2027 drivers/staging/qlge/qlge_main.c 			skb->ip_summed = CHECKSUM_UNNECESSARY;
skb              2031 drivers/staging/qlge/qlge_main.c 			struct iphdr *iph = (struct iphdr *) skb->data;
skb              2034 drivers/staging/qlge/qlge_main.c 				skb->ip_summed = CHECKSUM_UNNECESSARY;
skb              2042 drivers/staging/qlge/qlge_main.c 	rx_ring->rx_bytes += skb->len;
skb              2043 drivers/staging/qlge/qlge_main.c 	skb_record_rx_queue(skb, rx_ring->cq_id);
skb              2045 drivers/staging/qlge/qlge_main.c 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
skb              2046 drivers/staging/qlge/qlge_main.c 	if (skb->ip_summed == CHECKSUM_UNNECESSARY)
skb              2047 drivers/staging/qlge/qlge_main.c 		napi_gro_receive(&rx_ring->napi, skb);
skb              2049 drivers/staging/qlge/qlge_main.c 		netif_receive_skb(skb);
skb              2114 drivers/staging/qlge/qlge_main.c 	tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
skb              2116 drivers/staging/qlge/qlge_main.c 	dev_kfree_skb(tx_ring_desc->skb);
skb              2117 drivers/staging/qlge/qlge_main.c 	tx_ring_desc->skb = NULL;
skb              2561 drivers/staging/qlge/qlge_main.c static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
skb              2564 drivers/staging/qlge/qlge_main.c 	if (skb_is_gso(skb)) {
skb              2566 drivers/staging/qlge/qlge_main.c 		__be16 l3_proto = vlan_get_protocol(skb);
skb              2568 drivers/staging/qlge/qlge_main.c 		err = skb_cow_head(skb, 0);
skb              2574 drivers/staging/qlge/qlge_main.c 		mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
skb              2576 drivers/staging/qlge/qlge_main.c 		    cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
skb              2578 drivers/staging/qlge/qlge_main.c 		    cpu_to_le16(skb_network_offset(skb) |
skb              2579 drivers/staging/qlge/qlge_main.c 				skb_transport_offset(skb)
skb              2581 drivers/staging/qlge/qlge_main.c 		mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
skb              2584 drivers/staging/qlge/qlge_main.c 			struct iphdr *iph = ip_hdr(skb);
skb              2587 drivers/staging/qlge/qlge_main.c 			tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
skb              2593 drivers/staging/qlge/qlge_main.c 			tcp_hdr(skb)->check =
skb              2594 drivers/staging/qlge/qlge_main.c 			    ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
skb              2595 drivers/staging/qlge/qlge_main.c 					     &ipv6_hdr(skb)->daddr,
skb              2603 drivers/staging/qlge/qlge_main.c static void ql_hw_csum_setup(struct sk_buff *skb,
skb              2607 drivers/staging/qlge/qlge_main.c 	struct iphdr *iph = ip_hdr(skb);
skb              2610 drivers/staging/qlge/qlge_main.c 	mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
skb              2612 drivers/staging/qlge/qlge_main.c 		cpu_to_le16(skb_network_offset(skb) |
skb              2613 drivers/staging/qlge/qlge_main.c 		skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
skb              2618 drivers/staging/qlge/qlge_main.c 		check = &(tcp_hdr(skb)->check);
skb              2621 drivers/staging/qlge/qlge_main.c 		    cpu_to_le16(skb_transport_offset(skb) +
skb              2622 drivers/staging/qlge/qlge_main.c 				(tcp_hdr(skb)->doff << 2));
skb              2624 drivers/staging/qlge/qlge_main.c 		check = &(udp_hdr(skb)->check);
skb              2627 drivers/staging/qlge/qlge_main.c 		    cpu_to_le16(skb_transport_offset(skb) +
skb              2634 drivers/staging/qlge/qlge_main.c static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
skb              2641 drivers/staging/qlge/qlge_main.c 	u32 tx_ring_idx = (u32) skb->queue_mapping;
skb              2645 drivers/staging/qlge/qlge_main.c 	if (skb_padto(skb, ETH_ZLEN))
skb              2666 drivers/staging/qlge/qlge_main.c 	tx_ring_desc->skb = skb;
skb              2668 drivers/staging/qlge/qlge_main.c 	mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
skb              2670 drivers/staging/qlge/qlge_main.c 	if (skb_vlan_tag_present(skb)) {
skb              2672 drivers/staging/qlge/qlge_main.c 			     "Adding a vlan tag %d.\n", skb_vlan_tag_get(skb));
skb              2674 drivers/staging/qlge/qlge_main.c 		mac_iocb_ptr->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
skb              2676 drivers/staging/qlge/qlge_main.c 	tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
skb              2678 drivers/staging/qlge/qlge_main.c 		dev_kfree_skb_any(skb);
skb              2680 drivers/staging/qlge/qlge_main.c 	} else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
skb              2681 drivers/staging/qlge/qlge_main.c 		ql_hw_csum_setup(skb,
skb              2684 drivers/staging/qlge/qlge_main.c 	if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
skb              2700 drivers/staging/qlge/qlge_main.c 		     tx_ring->prod_idx, skb->len);
skb              2774 drivers/staging/qlge/qlge_main.c 		tx_ring_desc->skb = NULL;
skb              2867 drivers/staging/qlge/qlge_main.c 		if (sbq_desc->p.skb) {
skb              2872 drivers/staging/qlge/qlge_main.c 			dev_kfree_skb(sbq_desc->p.skb);
skb              2873 drivers/staging/qlge/qlge_main.c 			sbq_desc->p.skb = NULL;
skb              3067 drivers/staging/qlge/qlge_main.c 			if (tx_ring_desc && tx_ring_desc->skb) {
skb              3070 drivers/staging/qlge/qlge_main.c 					  tx_ring_desc->skb, j,
skb              3074 drivers/staging/qlge/qlge_main.c 				dev_kfree_skb(tx_ring_desc->skb);
skb              3075 drivers/staging/qlge/qlge_main.c 				tx_ring_desc->skb = NULL;
skb              4815 drivers/staging/qlge/qlge_main.c netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
skb              4817 drivers/staging/qlge/qlge_main.c 	return qlge_send(skb, ndev);
skb               142 drivers/staging/rtl8188eu/core/rtw_security.c 	struct sk_buff *skb;
skb               174 drivers/staging/rtl8188eu/core/rtw_security.c 		skb = dev_alloc_skb(length);
skb               175 drivers/staging/rtl8188eu/core/rtw_security.c 		if (!skb)
skb               178 drivers/staging/rtl8188eu/core/rtw_security.c 		skb_put_data(skb, pframe, length);
skb               180 drivers/staging/rtl8188eu/core/rtw_security.c 		memmove(skb->data + 4, skb->data, pattrib->hdrlen);
skb               181 drivers/staging/rtl8188eu/core/rtw_security.c 		skb_pull(skb, 4);
skb               182 drivers/staging/rtl8188eu/core/rtw_security.c 		skb_trim(skb, skb->len - 4);
skb               184 drivers/staging/rtl8188eu/core/rtw_security.c 		if (crypto_ops->encrypt_mpdu(skb, pattrib->hdrlen, crypto_private)) {
skb               185 drivers/staging/rtl8188eu/core/rtw_security.c 			kfree_skb(skb);
skb               189 drivers/staging/rtl8188eu/core/rtw_security.c 		memcpy(pframe, skb->data, skb->len);
skb               191 drivers/staging/rtl8188eu/core/rtw_security.c 		pframe += skb->len;
skb               194 drivers/staging/rtl8188eu/core/rtw_security.c 		kfree_skb(skb);
skb               207 drivers/staging/rtl8188eu/core/rtw_security.c 		struct sk_buff *skb = ((struct recv_frame *)precvframe)->pkt;
skb               208 drivers/staging/rtl8188eu/core/rtw_security.c 		u8 *pframe = skb->data;
skb               221 drivers/staging/rtl8188eu/core/rtw_security.c 		memcpy(icv, pframe + skb->len - 4, 4);
skb               233 drivers/staging/rtl8188eu/core/rtw_security.c 		if (crypto_ops->decrypt_mpdu(skb, prxattrib->hdrlen, crypto_private)) {
skb               239 drivers/staging/rtl8188eu/core/rtw_security.c 		skb_push(skb, 4);
skb               240 drivers/staging/rtl8188eu/core/rtw_security.c 		skb_put(skb, 4);
skb               243 drivers/staging/rtl8188eu/core/rtw_security.c 		memcpy(pframe + skb->len - 4, icv, 4);
skb              1290 drivers/staging/rtl8188eu/core/rtw_security.c 			struct sk_buff *skb = ((struct recv_frame *)precvframe)->pkt;
skb              1292 drivers/staging/rtl8188eu/core/rtw_security.c 			u8 *key, *pframe = skb->data;
skb              1317 drivers/staging/rtl8188eu/core/rtw_security.c 			memcpy(icv, pframe + skb->len - icv_len, icv_len);
skb              1328 drivers/staging/rtl8188eu/core/rtw_security.c 			if (crypto_ops->decrypt_mpdu(skb, prxattrib->hdrlen, crypto_private)) {
skb              1334 drivers/staging/rtl8188eu/core/rtw_security.c 			skb_push(skb, iv_len);
skb              1335 drivers/staging/rtl8188eu/core/rtw_security.c 			skb_put(skb, icv_len);
skb              1338 drivers/staging/rtl8188eu/core/rtw_security.c 			memcpy(pframe + skb->len - icv_len, icv, icv_len);
skb               372 drivers/staging/rtl8188eu/core/rtw_xmit.c static void set_qos(struct sk_buff *skb, struct pkt_attrib *pattrib)
skb               377 drivers/staging/rtl8188eu/core/rtw_xmit.c 		skb_copy_bits(skb, ETH_HLEN, &ip_hdr, sizeof(ip_hdr));
skb                28 drivers/staging/rtl8188eu/include/osdep_intf.h u16 rtw_recv_select_queue(struct sk_buff *skb);
skb                20 drivers/staging/rtl8188eu/os_dep/mon.c static void unprotect_frame(struct sk_buff *skb, int iv_len, int icv_len)
skb                25 drivers/staging/rtl8188eu/os_dep/mon.c 	hdr = (struct ieee80211_hdr *)skb->data;
skb                28 drivers/staging/rtl8188eu/os_dep/mon.c 	if (skb->len < hdr_len + iv_len + icv_len)
skb                35 drivers/staging/rtl8188eu/os_dep/mon.c 	memmove(skb->data + iv_len, skb->data, hdr_len);
skb                36 drivers/staging/rtl8188eu/os_dep/mon.c 	skb_pull(skb, iv_len);
skb                37 drivers/staging/rtl8188eu/os_dep/mon.c 	skb_trim(skb, skb->len - icv_len);
skb                43 drivers/staging/rtl8188eu/os_dep/mon.c 	struct sk_buff *skb;
skb                45 drivers/staging/rtl8188eu/os_dep/mon.c 	skb = netdev_alloc_skb(dev, data_len);
skb                46 drivers/staging/rtl8188eu/os_dep/mon.c 	if (!skb)
skb                48 drivers/staging/rtl8188eu/os_dep/mon.c 	skb_put_data(skb, data, data_len);
skb                54 drivers/staging/rtl8188eu/os_dep/mon.c 	unprotect_frame(skb, iv_len, icv_len);
skb                56 drivers/staging/rtl8188eu/os_dep/mon.c 	skb->ip_summed = CHECKSUM_UNNECESSARY;
skb                57 drivers/staging/rtl8188eu/os_dep/mon.c 	skb->protocol = eth_type_trans(skb, dev);
skb                58 drivers/staging/rtl8188eu/os_dep/mon.c 	netif_rx(skb);
skb               132 drivers/staging/rtl8188eu/os_dep/mon.c static netdev_tx_t mon_xmit(struct sk_buff *skb, struct net_device *dev)
skb               134 drivers/staging/rtl8188eu/os_dep/mon.c 	dev_kfree_skb(skb);
skb               225 drivers/staging/rtl8188eu/os_dep/os_intfs.c static unsigned int rtw_classify8021d(struct sk_buff *skb)
skb               234 drivers/staging/rtl8188eu/os_dep/os_intfs.c 	if (skb->priority >= 256 && skb->priority <= 263)
skb               235 drivers/staging/rtl8188eu/os_dep/os_intfs.c 		return skb->priority - 256;
skb               237 drivers/staging/rtl8188eu/os_dep/os_intfs.c 	switch (skb->protocol) {
skb               239 drivers/staging/rtl8188eu/os_dep/os_intfs.c 		dscp = ip_hdr(skb)->tos & 0xfc;
skb               248 drivers/staging/rtl8188eu/os_dep/os_intfs.c static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb,
skb               254 drivers/staging/rtl8188eu/os_dep/os_intfs.c 	skb->priority = rtw_classify8021d(skb);
skb               257 drivers/staging/rtl8188eu/os_dep/os_intfs.c 		skb->priority = qos_acm(pmlmepriv->acm_mask, skb->priority);
skb               259 drivers/staging/rtl8188eu/os_dep/os_intfs.c 	return rtw_1d_to_queue[skb->priority];
skb               262 drivers/staging/rtl8188eu/os_dep/os_intfs.c u16 rtw_recv_select_queue(struct sk_buff *skb)
skb               268 drivers/staging/rtl8188eu/os_dep/os_intfs.c 	u8 *pdata = skb->data;
skb                69 drivers/staging/rtl8188eu/os_dep/recv_linux.c 	struct sk_buff *skb;
skb                75 drivers/staging/rtl8188eu/os_dep/recv_linux.c 	skb = precv_frame->pkt;
skb                76 drivers/staging/rtl8188eu/os_dep/recv_linux.c 	if (!skb) {
skb                93 drivers/staging/rtl8188eu/os_dep/recv_linux.c 				pskb2 = skb_clone(skb, GFP_ATOMIC);
skb               102 drivers/staging/rtl8188eu/os_dep/recv_linux.c 				skb->dev = pnetdev;
skb               103 drivers/staging/rtl8188eu/os_dep/recv_linux.c 				skb_set_queue_mapping(skb, rtw_recv_select_queue(skb));
skb               105 drivers/staging/rtl8188eu/os_dep/recv_linux.c 				rtw_xmit_entry(skb, pnetdev);
skb               108 drivers/staging/rtl8188eu/os_dep/recv_linux.c 					skb = pskb2;
skb               115 drivers/staging/rtl8188eu/os_dep/recv_linux.c 	skb->ip_summed = CHECKSUM_NONE;
skb               116 drivers/staging/rtl8188eu/os_dep/recv_linux.c 	skb->dev = padapter->pnetdev;
skb               117 drivers/staging/rtl8188eu/os_dep/recv_linux.c 	skb->protocol = eth_type_trans(skb, padapter->pnetdev);
skb               119 drivers/staging/rtl8188eu/os_dep/recv_linux.c 	netif_rx(skb);
skb               112 drivers/staging/rtl8188eu/os_dep/xmit_linux.c static int rtw_mlcst2unicst(struct adapter *padapter, struct sk_buff *skb)
skb               132 drivers/staging/rtl8188eu/os_dep/xmit_linux.c 		if (!memcmp(psta->hwaddr, &skb->data[6], 6))
skb               135 drivers/staging/rtl8188eu/os_dep/xmit_linux.c 		newskb = skb_copy(skb, GFP_ATOMIC);
skb               163 drivers/staging/rtl8188eu/os_dep/xmit_linux.c 	dev_kfree_skb_any(skb);
skb                17 drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c 	struct sk_buff		*skb;
skb                37 drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c 			skb = dev_alloc_skb(frag_length +
skb                40 drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c 			skb = dev_alloc_skb(frag_length + 4);
skb                42 drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c 		if (!skb) {
skb                47 drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c 		memcpy((unsigned char *)(skb->cb), &dev, sizeof(dev));
skb                48 drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c 		tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
skb                56 drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c 			seg_ptr = skb_put(skb, priv->rtllib->tx_headroom);
skb                64 drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c 		seg_ptr = skb_put(skb, frag_length);
skb                72 drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c 				       skb);
skb                74 drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c 			priv->rtllib->softmac_hard_start_xmit(skb, dev);
skb              1169 drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c 			  struct cb_desc *cb_desc, struct sk_buff *skb)
skb              1175 drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c 	pTxFwInfo = (struct tx_fwinfo_8190pci *)skb->data;
skb              1218 drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c 	mapping = pci_map_single(priv->pdev, skb->data, skb->len,
skb              1228 drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c 	pdesc->PktSize = (u16)skb->len-sizeof(struct tx_fwinfo_8190pci);
skb              1276 drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c 	pdesc->TxBufferSize = skb->len;
skb              1282 drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c 			      struct cb_desc *cb_desc, struct sk_buff *skb)
skb              1285 drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c 	dma_addr_t mapping = pci_map_single(priv->pdev, skb->data, skb->len,
skb              1307 drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c 	entry->TxBufferSize = skb->len;
skb              1847 drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c 					      struct sk_buff *skb,
skb              1862 drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c 	tmp_buf = skb->data + pstats->RxDrvInfoSize + pstats->RxBufShift;
skb              2005 drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c 			 struct rx_desc *pdesc, struct sk_buff *skb)
skb              2036 drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c 	pDrvInfo = (struct rx_fwinfo *)(skb->data + stats->RxBufShift);
skb              2058 drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c 	_rtl92e_translate_rx_signal_stats(dev, skb, stats, pdesc, pDrvInfo);
skb              2064 drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c 	skb_trim(skb, skb->len - 4/*sCrcLng*/);
skb                31 drivers/staging/rtl8192e/rtl8192e/r8192E_dev.h 			  struct cb_desc *cb_desc, struct sk_buff *skb);
skb                33 drivers/staging/rtl8192e/rtl8192e/r8192E_dev.h 			      struct cb_desc *cb_desc, struct sk_buff *skb);
skb                35 drivers/staging/rtl8192e/rtl8192e/r8192E_dev.h 			 struct rx_desc *pdesc, struct sk_buff *skb);
skb                78 drivers/staging/rtl8192e/rtl8192e/rtl_core.c static void _rtl92e_hard_data_xmit(struct sk_buff *skb, struct net_device *dev,
skb                80 drivers/staging/rtl8192e/rtl8192e/rtl_core.c static int _rtl92e_hard_start_xmit(struct sk_buff *skb, struct net_device *dev);
skb                81 drivers/staging/rtl8192e/rtl8192e/rtl_core.c static void _rtl92e_tx_cmd(struct net_device *dev, struct sk_buff *skb);
skb                82 drivers/staging/rtl8192e/rtl8192e/rtl_core.c static short _rtl92e_tx(struct net_device *dev, struct sk_buff *skb);
skb              1116 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 	struct sk_buff *skb = NULL;
skb              1141 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 			skb = __skb_peek(&ring->queue);
skb              1142 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 			tcb_desc = (struct cb_desc *)(skb->cb +
skb              1557 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 			struct sk_buff *skb = priv->rx_buf[rx_queue_idx][i];
skb              1559 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 			if (!skb)
skb              1563 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 				*((dma_addr_t *)skb->cb),
skb              1565 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 			kfree_skb(skb);
skb              1584 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 		struct sk_buff *skb = __skb_dequeue(&ring->queue);
skb              1587 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 			skb->len, PCI_DMA_TODEVICE);
skb              1588 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 		kfree_skb(skb);
skb              1597 drivers/staging/rtl8192e/rtl8192e/rtl_core.c static void _rtl92e_hard_data_xmit(struct sk_buff *skb, struct net_device *dev,
skb              1602 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 	struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb +
skb              1608 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 		kfree_skb(skb);
skb              1616 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 	memcpy((unsigned char *)(skb->cb), &dev, sizeof(dev));
skb              1617 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 	skb_push(skb, priv->rtllib->tx_headroom);
skb              1618 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 	ret = _rtl92e_tx(dev, skb);
skb              1621 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 		priv->rtllib->stats.tx_bytes += (skb->len -
skb              1627 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 		kfree_skb(skb);
skb              1630 drivers/staging/rtl8192e/rtl8192e/rtl_core.c static int _rtl92e_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb              1634 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 	struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb +
skb              1641 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 			kfree_skb(skb);
skb              1646 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 	memcpy((unsigned char *)(skb->cb), &dev, sizeof(dev));
skb              1648 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 		_rtl92e_tx_cmd(dev, skb);
skb              1656 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 	skb_push(skb, priv->rtllib->tx_headroom);
skb              1657 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 	ret = _rtl92e_tx(dev, skb);
skb              1659 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 		kfree_skb(skb);
skb              1671 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 		struct sk_buff *skb;
skb              1679 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 		skb = __skb_dequeue(&ring->queue);
skb              1681 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 		skb->len, PCI_DMA_TODEVICE);
skb              1683 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 		kfree_skb(skb);
skb              1689 drivers/staging/rtl8192e/rtl8192e/rtl_core.c static void _rtl92e_tx_cmd(struct net_device *dev, struct sk_buff *skb)
skb              1704 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 	tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
skb              1706 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 	priv->ops->tx_fill_cmd_descriptor(dev, entry, tcb_desc, skb);
skb              1708 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 	__skb_queue_tail(&ring->queue, skb);
skb              1712 drivers/staging/rtl8192e/rtl8192e/rtl_core.c static short _rtl92e_tx(struct net_device *dev, struct sk_buff *skb)
skb              1717 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 	struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb +
skb              1729 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 		return skb->len;
skb              1736 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 	header = (struct rtllib_hdr_1addr *)(((u8 *)skb->data) + fwinfo_size);
skb              1742 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 		priv->stats.txbytesbroadcast += skb->len - fwinfo_size;
skb              1744 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 		priv->stats.txbytesmulticast += skb->len - fwinfo_size;
skb              1746 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 		priv->stats.txbytesunicast += skb->len - fwinfo_size;
skb              1759 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 			    tcb_desc->queue_index, ring->idx, idx, skb->len,
skb              1762 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 		return skb->len;
skb              1769 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 	priv->ops->tx_fill_descriptor(dev, pdesc, tcb_desc, skb);
skb              1770 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 	__skb_queue_tail(&ring->queue, skb);
skb              1798 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 			struct sk_buff *skb = dev_alloc_skb(priv->rxbuffersize);
skb              1802 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 			if (!skb)
skb              1804 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 			skb->dev = dev;
skb              1805 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 			priv->rx_buf[rx_queue_idx][i] = skb;
skb              1806 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 			mapping = (dma_addr_t *)skb->cb;
skb              1808 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 						  skb_tail_pointer_rsl(skb),
skb              1812 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 				dev_kfree_skb_any(skb);
skb              1906 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 				struct sk_buff *skb =
skb              1911 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 						 skb->len, PCI_DMA_TODEVICE);
skb              1912 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 				kfree_skb(skb);
skb              2023 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 		struct sk_buff *skb = priv->rx_buf[rx_queue_idx]
skb              2030 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 		pdesc, skb))
skb              2040 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 				*((dma_addr_t *)skb->cb),
skb              2044 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 		skb_put(skb, pdesc->Length);
skb              2045 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 		skb_reserve(skb, stats.RxDrvInfoSize +
skb              2047 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 		skb_trim(skb, skb->len - 4/*sCrcLng*/);
skb              2048 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 		rtllib_hdr = (struct rtllib_hdr_1addr *)skb->data;
skb              2070 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 		skb_len = skb->len;
skb              2072 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 		if (!rtllib_rx(priv->rtllib, skb, &stats)) {
skb              2073 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 			dev_kfree_skb_any(skb);
skb              2080 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 		skb = new_skb;
skb              2081 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 		skb->dev = dev;
skb              2084 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 								 skb;
skb              2085 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 		*((dma_addr_t *)skb->cb) = pci_map_single(priv->pdev,
skb              2086 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 					    skb_tail_pointer_rsl(skb),
skb              2090 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 					  *((dma_addr_t *)skb->cb))) {
skb              2091 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 			dev_kfree_skb_any(skb);
skb              2095 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 		pdesc->BufferAddress = *((dma_addr_t *)skb->cb);
skb              2110 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 	struct sk_buff *skb;
skb              2117 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 			skb = skb_dequeue(&ieee->skb_waitQ[queue_index]);
skb              2118 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 			ieee->softmac_data_hard_start_xmit(skb, dev, 0);
skb               281 drivers/staging/rtl8192e/rtl8192e/rtl_core.h 				   struct sk_buff *skb);
skb               285 drivers/staging/rtl8192e/rtl8192e/rtl_core.h 				       struct sk_buff *skb);
skb               289 drivers/staging/rtl8192e/rtl8192e/rtl_core.h 					   struct sk_buff *skb);
skb               291 drivers/staging/rtl8192e/rtl8192e/rtl_core.h 					  struct sk_buff *skb,
skb                70 drivers/staging/rtl8192e/rtl819x_BAProc.c 	struct sk_buff *skb = NULL;
skb                82 drivers/staging/rtl8192e/rtl819x_BAProc.c 	skb = dev_alloc_skb(len + sizeof(struct rtllib_hdr_3addr));
skb                83 drivers/staging/rtl8192e/rtl819x_BAProc.c 	if (!skb)
skb                86 drivers/staging/rtl8192e/rtl819x_BAProc.c 	memset(skb->data, 0, sizeof(struct rtllib_hdr_3addr));
skb                88 drivers/staging/rtl8192e/rtl819x_BAProc.c 	skb_reserve(skb, ieee->tx_headroom);
skb                90 drivers/staging/rtl8192e/rtl819x_BAProc.c 	BAReq = skb_put(skb, sizeof(struct rtllib_hdr_3addr));
skb                98 drivers/staging/rtl8192e/rtl819x_BAProc.c 	tag = skb_put(skb, 9);
skb               122 drivers/staging/rtl8192e/rtl819x_BAProc.c 	print_hex_dump_bytes("rtllib_ADDBA(): ", DUMP_PREFIX_NONE, skb->data,
skb               123 drivers/staging/rtl8192e/rtl819x_BAProc.c 			     skb->len);
skb               125 drivers/staging/rtl8192e/rtl819x_BAProc.c 	return skb;
skb               133 drivers/staging/rtl8192e/rtl819x_BAProc.c 	struct sk_buff *skb = NULL;
skb               147 drivers/staging/rtl8192e/rtl819x_BAProc.c 	skb = dev_alloc_skb(len + sizeof(struct rtllib_hdr_3addr));
skb               148 drivers/staging/rtl8192e/rtl819x_BAProc.c 	if (!skb)
skb               151 drivers/staging/rtl8192e/rtl819x_BAProc.c 	skb_reserve(skb, ieee->tx_headroom);
skb               153 drivers/staging/rtl8192e/rtl819x_BAProc.c 	Delba = skb_put(skb, sizeof(struct rtllib_hdr_3addr));
skb               160 drivers/staging/rtl8192e/rtl819x_BAProc.c 	tag = skb_put(skb, 6);
skb               173 drivers/staging/rtl8192e/rtl819x_BAProc.c 	print_hex_dump_bytes("rtllib_DELBA(): ", DUMP_PREFIX_NONE, skb->data,
skb               174 drivers/staging/rtl8192e/rtl819x_BAProc.c 			     skb->len);
skb               176 drivers/staging/rtl8192e/rtl819x_BAProc.c 	return skb;
skb               182 drivers/staging/rtl8192e/rtl819x_BAProc.c 	struct sk_buff *skb;
skb               184 drivers/staging/rtl8192e/rtl819x_BAProc.c 	skb = rtllib_ADDBA(ieee, dst, pBA, 0, ACT_ADDBAREQ);
skb               186 drivers/staging/rtl8192e/rtl819x_BAProc.c 	if (skb) {
skb               188 drivers/staging/rtl8192e/rtl819x_BAProc.c 		softmac_mgmt_xmit(skb, ieee);
skb               197 drivers/staging/rtl8192e/rtl819x_BAProc.c 	struct sk_buff *skb;
skb               199 drivers/staging/rtl8192e/rtl819x_BAProc.c 	skb = rtllib_ADDBA(ieee, dst, pBA, StatusCode, ACT_ADDBARSP);
skb               200 drivers/staging/rtl8192e/rtl819x_BAProc.c 	if (skb)
skb               201 drivers/staging/rtl8192e/rtl819x_BAProc.c 		softmac_mgmt_xmit(skb, ieee);
skb               210 drivers/staging/rtl8192e/rtl819x_BAProc.c 	struct sk_buff *skb;
skb               212 drivers/staging/rtl8192e/rtl819x_BAProc.c 	skb = rtllib_DELBA(ieee, dst, pBA, TxRxSelect, ReasonCode);
skb               213 drivers/staging/rtl8192e/rtl819x_BAProc.c 	if (skb)
skb               214 drivers/staging/rtl8192e/rtl819x_BAProc.c 		softmac_mgmt_xmit(skb, ieee);
skb               219 drivers/staging/rtl8192e/rtl819x_BAProc.c int rtllib_rx_ADDBAReq(struct rtllib_device *ieee, struct sk_buff *skb)
skb               230 drivers/staging/rtl8192e/rtl819x_BAProc.c 	if (skb->len < sizeof(struct rtllib_hdr_3addr) + 9) {
skb               232 drivers/staging/rtl8192e/rtl819x_BAProc.c 			    (int)skb->len,
skb               239 drivers/staging/rtl8192e/rtl819x_BAProc.c 			     skb->data, skb->len);
skb               242 drivers/staging/rtl8192e/rtl819x_BAProc.c 	req = (struct rtllib_hdr_3addr *) skb->data;
skb               309 drivers/staging/rtl8192e/rtl819x_BAProc.c int rtllib_rx_ADDBARsp(struct rtllib_device *ieee, struct sk_buff *skb)
skb               319 drivers/staging/rtl8192e/rtl819x_BAProc.c 	if (skb->len < sizeof(struct rtllib_hdr_3addr) + 9) {
skb               321 drivers/staging/rtl8192e/rtl819x_BAProc.c 			    (int)skb->len,
skb               325 drivers/staging/rtl8192e/rtl819x_BAProc.c 	rsp = (struct rtllib_hdr_3addr *)skb->data;
skb               413 drivers/staging/rtl8192e/rtl819x_BAProc.c int rtllib_rx_DELBA(struct rtllib_device *ieee, struct sk_buff *skb)
skb               419 drivers/staging/rtl8192e/rtl819x_BAProc.c 	if (skb->len < sizeof(struct rtllib_hdr_3addr) + 6) {
skb               421 drivers/staging/rtl8192e/rtl819x_BAProc.c 			    (int)skb->len,
skb               436 drivers/staging/rtl8192e/rtl819x_BAProc.c 	print_hex_dump_bytes("rtllib_rx_DELBA(): ", DUMP_PREFIX_NONE, skb->data,
skb               437 drivers/staging/rtl8192e/rtl819x_BAProc.c 			     skb->len);
skb               439 drivers/staging/rtl8192e/rtl819x_BAProc.c 	delba = (struct rtllib_hdr_3addr *)skb->data;
skb                65 drivers/staging/rtl8192e/rtllib.h #define skb_tail_pointer_rsl(skb) skb_tail_pointer(skb)
skb               632 drivers/staging/rtl8192e/rtllib.h 	struct sk_buff *skb;
skb              1707 drivers/staging/rtl8192e/rtllib.h 	int (*softmac_hard_start_xmit)(struct sk_buff *skb,
skb              1716 drivers/staging/rtl8192e/rtllib.h 	void (*softmac_data_hard_start_xmit)(struct sk_buff *skb,
skb              1941 drivers/staging/rtl8192e/rtllib.h int rtllib_xmit(struct sk_buff *skb,  struct net_device *dev);
skb              1945 drivers/staging/rtl8192e/rtllib.h int rtllib_rx(struct rtllib_device *ieee, struct sk_buff *skb,
skb              1948 drivers/staging/rtl8192e/rtllib.h 			struct sk_buff *skb);
skb              1973 drivers/staging/rtl8192e/rtllib.h int rtllib_rx_frame_softmac(struct rtllib_device *ieee, struct sk_buff *skb,
skb              2016 drivers/staging/rtl8192e/rtllib.h void softmac_mgmt_xmit(struct sk_buff *skb, struct rtllib_device *ieee);
skb              2103 drivers/staging/rtl8192e/rtllib.h int rtllib_rx_ADDBAReq(struct rtllib_device *ieee, struct sk_buff *skb);
skb              2104 drivers/staging/rtl8192e/rtllib.h int rtllib_rx_ADDBARsp(struct rtllib_device *ieee, struct sk_buff *skb);
skb              2105 drivers/staging/rtl8192e/rtllib.h int rtllib_rx_DELBA(struct rtllib_device *ieee, struct sk_buff *skb);
skb               153 drivers/staging/rtl8192e/rtllib_crypt_ccmp.c static int rtllib_ccmp_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
skb               159 drivers/staging/rtl8192e/rtllib_crypt_ccmp.c 	struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb +
skb               161 drivers/staging/rtl8192e/rtllib_crypt_ccmp.c 	if (skb_headroom(skb) < CCMP_HDR_LEN ||
skb               162 drivers/staging/rtl8192e/rtllib_crypt_ccmp.c 	    skb_tailroom(skb) < CCMP_MIC_LEN ||
skb               163 drivers/staging/rtl8192e/rtllib_crypt_ccmp.c 	    skb->len < hdr_len)
skb               166 drivers/staging/rtl8192e/rtllib_crypt_ccmp.c 	pos = skb_push(skb, CCMP_HDR_LEN);
skb               187 drivers/staging/rtl8192e/rtllib_crypt_ccmp.c 	hdr = (struct rtllib_hdr_4addr *) skb->data;
skb               194 drivers/staging/rtl8192e/rtllib_crypt_ccmp.c 		int data_len = skb->len - hdr_len - CCMP_HDR_LEN;
skb               202 drivers/staging/rtl8192e/rtllib_crypt_ccmp.c 		skb_put(skb, CCMP_MIC_LEN);
skb               205 drivers/staging/rtl8192e/rtllib_crypt_ccmp.c 		sg_set_buf(&sg[1], skb->data + hdr_len + CCMP_HDR_LEN,
skb               222 drivers/staging/rtl8192e/rtllib_crypt_ccmp.c static int rtllib_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
skb               227 drivers/staging/rtl8192e/rtllib_crypt_ccmp.c 	struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb +
skb               231 drivers/staging/rtl8192e/rtllib_crypt_ccmp.c 	if (skb->len < hdr_len + CCMP_HDR_LEN + CCMP_MIC_LEN) {
skb               236 drivers/staging/rtl8192e/rtllib_crypt_ccmp.c 	hdr = (struct rtllib_hdr_4addr *) skb->data;
skb               237 drivers/staging/rtl8192e/rtllib_crypt_ccmp.c 	pos = skb->data + hdr_len;
skb               273 drivers/staging/rtl8192e/rtllib_crypt_ccmp.c 		size_t data_len = skb->len - hdr_len - CCMP_HDR_LEN;
skb               309 drivers/staging/rtl8192e/rtllib_crypt_ccmp.c 	memmove(skb->data + CCMP_HDR_LEN, skb->data, hdr_len);
skb               310 drivers/staging/rtl8192e/rtllib_crypt_ccmp.c 	skb_pull(skb, CCMP_HDR_LEN);
skb               311 drivers/staging/rtl8192e/rtllib_crypt_ccmp.c 	skb_trim(skb, skb->len - CCMP_MIC_LEN);
skb               281 drivers/staging/rtl8192e/rtllib_crypt_tkip.c static int rtllib_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
skb               287 drivers/staging/rtl8192e/rtllib_crypt_tkip.c 	struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb +
skb               294 drivers/staging/rtl8192e/rtllib_crypt_tkip.c 	if (skb_headroom(skb) < 8 || skb_tailroom(skb) < 4 ||
skb               295 drivers/staging/rtl8192e/rtllib_crypt_tkip.c 	    skb->len < hdr_len)
skb               298 drivers/staging/rtl8192e/rtllib_crypt_tkip.c 	hdr = (struct rtllib_hdr_4addr *) skb->data;
skb               312 drivers/staging/rtl8192e/rtllib_crypt_tkip.c 	len = skb->len - hdr_len;
skb               313 drivers/staging/rtl8192e/rtllib_crypt_tkip.c 	pos = skb_push(skb, 8);
skb               336 drivers/staging/rtl8192e/rtllib_crypt_tkip.c 		icv = skb_put(skb, 4);
skb               367 drivers/staging/rtl8192e/rtllib_crypt_tkip.c static int rtllib_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
skb               374 drivers/staging/rtl8192e/rtllib_crypt_tkip.c 	struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb +
skb               383 drivers/staging/rtl8192e/rtllib_crypt_tkip.c 	if (skb->len < hdr_len + 8 + 4)
skb               386 drivers/staging/rtl8192e/rtllib_crypt_tkip.c 	hdr = (struct rtllib_hdr_4addr *) skb->data;
skb               387 drivers/staging/rtl8192e/rtllib_crypt_tkip.c 	pos = skb->data + hdr_len;
skb               391 drivers/staging/rtl8192e/rtllib_crypt_tkip.c 			netdev_dbg(skb->dev,
skb               399 drivers/staging/rtl8192e/rtllib_crypt_tkip.c 		netdev_dbg(skb->dev,
skb               406 drivers/staging/rtl8192e/rtllib_crypt_tkip.c 			netdev_dbg(skb->dev,
skb               416 drivers/staging/rtl8192e/rtllib_crypt_tkip.c 	if (!tcb_desc->bHwSec || (skb->cb[0] == 1)) {
skb               423 drivers/staging/rtl8192e/rtllib_crypt_tkip.c 				netdev_dbg(skb->dev,
skb               440 drivers/staging/rtl8192e/rtllib_crypt_tkip.c 		plen = skb->len - hdr_len - 12;
skb               452 drivers/staging/rtl8192e/rtllib_crypt_tkip.c 				netdev_dbg(skb->dev,
skb               474 drivers/staging/rtl8192e/rtllib_crypt_tkip.c 				netdev_dbg(skb->dev,
skb               491 drivers/staging/rtl8192e/rtllib_crypt_tkip.c 	memmove(skb->data + 8, skb->data, hdr_len);
skb               492 drivers/staging/rtl8192e/rtllib_crypt_tkip.c 	skb_pull(skb, 8);
skb               493 drivers/staging/rtl8192e/rtllib_crypt_tkip.c 	skb_trim(skb, skb->len - 4);
skb               526 drivers/staging/rtl8192e/rtllib_crypt_tkip.c static void michael_mic_hdr(struct sk_buff *skb, u8 *hdr)
skb               530 drivers/staging/rtl8192e/rtllib_crypt_tkip.c 	hdr11 = (struct rtllib_hdr_4addr *) skb->data;
skb               557 drivers/staging/rtl8192e/rtllib_crypt_tkip.c static int rtllib_michael_mic_add(struct sk_buff *skb, int hdr_len, void *priv)
skb               563 drivers/staging/rtl8192e/rtllib_crypt_tkip.c 	hdr = (struct rtllib_hdr_4addr *) skb->data;
skb               565 drivers/staging/rtl8192e/rtllib_crypt_tkip.c 	if (skb_tailroom(skb) < 8 || skb->len < hdr_len) {
skb               566 drivers/staging/rtl8192e/rtllib_crypt_tkip.c 		netdev_dbg(skb->dev,
skb               568 drivers/staging/rtl8192e/rtllib_crypt_tkip.c 			   skb_tailroom(skb), hdr_len, skb->len);
skb               572 drivers/staging/rtl8192e/rtllib_crypt_tkip.c 	michael_mic_hdr(skb, tkey->tx_hdr);
skb               575 drivers/staging/rtl8192e/rtllib_crypt_tkip.c 		tkey->tx_hdr[12] = *(skb->data + hdr_len - 2) & 0x07;
skb               576 drivers/staging/rtl8192e/rtllib_crypt_tkip.c 	pos = skb_put(skb, 8);
skb               578 drivers/staging/rtl8192e/rtllib_crypt_tkip.c 	    skb->data + hdr_len, skb->len - 8 - hdr_len, pos))
skb               606 drivers/staging/rtl8192e/rtllib_crypt_tkip.c static int rtllib_michael_mic_verify(struct sk_buff *skb, int keyidx,
skb               613 drivers/staging/rtl8192e/rtllib_crypt_tkip.c 	hdr = (struct rtllib_hdr_4addr *) skb->data;
skb               618 drivers/staging/rtl8192e/rtllib_crypt_tkip.c 	michael_mic_hdr(skb, tkey->rx_hdr);
skb               620 drivers/staging/rtl8192e/rtllib_crypt_tkip.c 		tkey->rx_hdr[12] = *(skb->data + hdr_len - 2) & 0x07;
skb               623 drivers/staging/rtl8192e/rtllib_crypt_tkip.c 			skb->data + hdr_len, skb->len - 8 - hdr_len, mic))
skb               626 drivers/staging/rtl8192e/rtllib_crypt_tkip.c 	if (memcmp(mic, skb->data + skb->len - 8, 8) != 0) {
skb               629 drivers/staging/rtl8192e/rtllib_crypt_tkip.c 		hdr = (struct rtllib_hdr_4addr *) skb->data;
skb               630 drivers/staging/rtl8192e/rtllib_crypt_tkip.c 		netdev_dbg(skb->dev,
skb               633 drivers/staging/rtl8192e/rtllib_crypt_tkip.c 		netdev_dbg(skb->dev, "%d\n",
skb               634 drivers/staging/rtl8192e/rtllib_crypt_tkip.c 			   memcmp(mic, skb->data + skb->len - 8, 8) != 0);
skb               635 drivers/staging/rtl8192e/rtllib_crypt_tkip.c 		if (skb->dev) {
skb               637 drivers/staging/rtl8192e/rtllib_crypt_tkip.c 			rtllib_michael_mic_failure(skb->dev, hdr, keyidx);
skb               649 drivers/staging/rtl8192e/rtllib_crypt_tkip.c 	skb_trim(skb, skb->len - 8);
skb                85 drivers/staging/rtl8192e/rtllib_crypt_wep.c static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
skb                91 drivers/staging/rtl8192e/rtllib_crypt_wep.c 	struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb +
skb                98 drivers/staging/rtl8192e/rtllib_crypt_wep.c 	if (skb_headroom(skb) < 4 || skb_tailroom(skb) < 4 ||
skb                99 drivers/staging/rtl8192e/rtllib_crypt_wep.c 	    skb->len < hdr_len){
skb               101 drivers/staging/rtl8192e/rtllib_crypt_wep.c 		       skb_headroom(skb), skb_tailroom(skb), skb->len, hdr_len);
skb               104 drivers/staging/rtl8192e/rtllib_crypt_wep.c 	len = skb->len - hdr_len;
skb               105 drivers/staging/rtl8192e/rtllib_crypt_wep.c 	pos = skb_push(skb, 4);
skb               138 drivers/staging/rtl8192e/rtllib_crypt_wep.c 		icv = skb_put(skb, 4);
skb               165 drivers/staging/rtl8192e/rtllib_crypt_wep.c static int prism2_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
skb               171 drivers/staging/rtl8192e/rtllib_crypt_wep.c 	struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb +
skb               178 drivers/staging/rtl8192e/rtllib_crypt_wep.c 	if (skb->len < hdr_len + 8)
skb               181 drivers/staging/rtl8192e/rtllib_crypt_wep.c 	pos = skb->data + hdr_len;
skb               195 drivers/staging/rtl8192e/rtllib_crypt_wep.c 	plen = skb->len - hdr_len - 8;
skb               220 drivers/staging/rtl8192e/rtllib_crypt_wep.c 	memmove(skb->data + 4, skb->data, hdr_len);
skb               221 drivers/staging/rtl8192e/rtllib_crypt_wep.c 	skb_pull(skb, 4);
skb               222 drivers/staging/rtl8192e/rtllib_crypt_wep.c 	skb_trim(skb, skb->len - 4);
skb                39 drivers/staging/rtl8192e/rtllib_rx.c static void rtllib_rx_mgt(struct rtllib_device *ieee, struct sk_buff *skb,
skb                43 drivers/staging/rtl8192e/rtllib_rx.c 				     struct sk_buff *skb,
skb                47 drivers/staging/rtl8192e/rtllib_rx.c 	skb->dev = ieee->dev;
skb                48 drivers/staging/rtl8192e/rtllib_rx.c 	skb_reset_mac_header(skb);
skb                49 drivers/staging/rtl8192e/rtllib_rx.c 	skb_pull(skb, hdr_length);
skb                50 drivers/staging/rtl8192e/rtllib_rx.c 	skb->pkt_type = PACKET_OTHERHOST;
skb                51 drivers/staging/rtl8192e/rtllib_rx.c 	skb->protocol = htons(ETH_P_80211_RAW);
skb                52 drivers/staging/rtl8192e/rtllib_rx.c 	memset(skb->cb, 0, sizeof(skb->cb));
skb                53 drivers/staging/rtl8192e/rtllib_rx.c 	netif_rx(skb);
skb                66 drivers/staging/rtl8192e/rtllib_rx.c 		if (entry->skb != NULL &&
skb                71 drivers/staging/rtl8192e/rtllib_rx.c 			dev_kfree_skb_any(entry->skb);
skb                72 drivers/staging/rtl8192e/rtllib_rx.c 			entry->skb = NULL;
skb                75 drivers/staging/rtl8192e/rtllib_rx.c 		if (entry->skb != NULL && entry->seq == seq &&
skb                90 drivers/staging/rtl8192e/rtllib_rx.c 	struct sk_buff *skb = NULL;
skb               117 drivers/staging/rtl8192e/rtllib_rx.c 		skb = dev_alloc_skb(ieee->dev->mtu +
skb               125 drivers/staging/rtl8192e/rtllib_rx.c 		if (!skb)
skb               133 drivers/staging/rtl8192e/rtllib_rx.c 		if (entry->skb != NULL)
skb               134 drivers/staging/rtl8192e/rtllib_rx.c 			dev_kfree_skb_any(entry->skb);
skb               139 drivers/staging/rtl8192e/rtllib_rx.c 		entry->skb = skb;
skb               150 drivers/staging/rtl8192e/rtllib_rx.c 			skb = entry->skb;
skb               154 drivers/staging/rtl8192e/rtllib_rx.c 	return skb;
skb               195 drivers/staging/rtl8192e/rtllib_rx.c 	entry->skb = NULL;
skb               206 drivers/staging/rtl8192e/rtllib_rx.c rtllib_rx_frame_mgmt(struct rtllib_device *ieee, struct sk_buff *skb,
skb               214 drivers/staging/rtl8192e/rtllib_rx.c 	struct rtllib_hdr_3addr *hdr = (struct rtllib_hdr_3addr *)skb->data;
skb               216 drivers/staging/rtl8192e/rtllib_rx.c 	rx_stats->len = skb->len;
skb               217 drivers/staging/rtl8192e/rtllib_rx.c 	rtllib_rx_mgt(ieee, skb, rx_stats);
skb               219 drivers/staging/rtl8192e/rtllib_rx.c 		dev_kfree_skb_any(skb);
skb               222 drivers/staging/rtl8192e/rtllib_rx.c 	rtllib_rx_frame_softmac(ieee, skb, rx_stats, type, stype);
skb               224 drivers/staging/rtl8192e/rtllib_rx.c 	dev_kfree_skb_any(skb);
skb               243 drivers/staging/rtl8192e/rtllib_rx.c 				    struct sk_buff *skb, size_t hdrlen)
skb               250 drivers/staging/rtl8192e/rtllib_rx.c 	if (skb->len < 24)
skb               253 drivers/staging/rtl8192e/rtllib_rx.c 	hdr = (struct rtllib_hdr_4addr *) skb->data;
skb               269 drivers/staging/rtl8192e/rtllib_rx.c 	if (skb->len < 24 + 8)
skb               273 drivers/staging/rtl8192e/rtllib_rx.c 	pos = skb->data + hdrlen;
skb               283 drivers/staging/rtl8192e/rtllib_rx.c rtllib_rx_frame_decrypt(struct rtllib_device *ieee, struct sk_buff *skb,
skb               294 drivers/staging/rtl8192e/rtllib_rx.c 						(skb->cb + MAX_DEV_ADDR_SIZE);
skb               302 drivers/staging/rtl8192e/rtllib_rx.c 	hdr = (struct rtllib_hdr_4addr *) skb->data;
skb               306 drivers/staging/rtl8192e/rtllib_rx.c 	res = crypt->ops->decrypt_mpdu(skb, hdrlen, crypt->priv);
skb               314 drivers/staging/rtl8192e/rtllib_rx.c 				   skb->data[hdrlen + 3] >> 6);
skb               324 drivers/staging/rtl8192e/rtllib_rx.c rtllib_rx_frame_decrypt_msdu(struct rtllib_device *ieee, struct sk_buff *skb,
skb               334 drivers/staging/rtl8192e/rtllib_rx.c 						(skb->cb + MAX_DEV_ADDR_SIZE);
skb               342 drivers/staging/rtl8192e/rtllib_rx.c 	hdr = (struct rtllib_hdr_4addr *) skb->data;
skb               346 drivers/staging/rtl8192e/rtllib_rx.c 	res = crypt->ops->decrypt_msdu(skb, keyidx, hdrlen, crypt->priv);
skb               763 drivers/staging/rtl8192e/rtllib_rx.c static u8 parse_subframe(struct rtllib_device *ieee, struct sk_buff *skb,
skb               767 drivers/staging/rtl8192e/rtllib_rx.c 	struct rtllib_hdr_3addr  *hdr = (struct rtllib_hdr_3addr *)skb->data;
skb               780 drivers/staging/rtl8192e/rtllib_rx.c 	   (((union frameqos *)(skb->data + RTLLIB_3ADDR_LEN))->field.reserved))
skb               790 drivers/staging/rtl8192e/rtllib_rx.c 	if (skb->len <= ChkLength)
skb               793 drivers/staging/rtl8192e/rtllib_rx.c 	skb_pull(skb, LLCOffset);
skb               811 drivers/staging/rtl8192e/rtllib_rx.c 		skb_put_data(sub_skb, skb->data, skb->len);
skb               825 drivers/staging/rtl8192e/rtllib_rx.c 	while (skb->len > ETHERNET_HEADER_SIZE) {
skb               827 drivers/staging/rtl8192e/rtllib_rx.c 		nSubframe_Length = *((u16 *)(skb->data + 12));
skb               831 drivers/staging/rtl8192e/rtllib_rx.c 		if (skb->len < (ETHERNET_HEADER_SIZE + nSubframe_Length)) {
skb               840 drivers/staging/rtl8192e/rtllib_rx.c 				    skb->len, nSubframe_Length);
skb               848 drivers/staging/rtl8192e/rtllib_rx.c 		skb_pull(skb, ETHERNET_HEADER_SIZE);
skb               863 drivers/staging/rtl8192e/rtllib_rx.c 		skb_put_data(sub_skb, skb->data, nSubframe_Length);
skb               872 drivers/staging/rtl8192e/rtllib_rx.c 		skb_pull(skb, nSubframe_Length);
skb               874 drivers/staging/rtl8192e/rtllib_rx.c 		if (skb->len != 0) {
skb               880 drivers/staging/rtl8192e/rtllib_rx.c 			if (skb->len < nPadding_Length)
skb               883 drivers/staging/rtl8192e/rtllib_rx.c 			skb_pull(skb, nPadding_Length);
skb               892 drivers/staging/rtl8192e/rtllib_rx.c 				   struct sk_buff *skb,
skb               895 drivers/staging/rtl8192e/rtllib_rx.c 	struct rtllib_hdr_4addr *hdr = (struct rtllib_hdr_4addr *)skb->data;
skb               900 drivers/staging/rtl8192e/rtllib_rx.c 	if (HTCCheck(ieee, skb->data)) {
skb               915 drivers/staging/rtl8192e/rtllib_rx.c 				     struct sk_buff *skb, u8 multicast)
skb               917 drivers/staging/rtl8192e/rtllib_rx.c 	struct rtllib_hdr_4addr *hdr = (struct rtllib_hdr_4addr *)skb->data;
skb               929 drivers/staging/rtl8192e/rtllib_rx.c 		!IsDataFrame(skb->data) ||
skb               930 drivers/staging/rtl8192e/rtllib_rx.c 		IsLegacyDataFrame(skb->data)) {
skb               940 drivers/staging/rtl8192e/rtllib_rx.c 			(u8)Frame_QoSTID((u8 *)(skb->data)), RX_DIR, true)) {
skb              1043 drivers/staging/rtl8192e/rtllib_rx.c static int rtllib_rx_get_crypt(struct rtllib_device *ieee, struct sk_buff *skb,
skb              1046 drivers/staging/rtl8192e/rtllib_rx.c 	struct rtllib_hdr_4addr *hdr = (struct rtllib_hdr_4addr *)skb->data;
skb              1051 drivers/staging/rtl8192e/rtllib_rx.c 		if (skb->len >= hdrlen + 3)
skb              1052 drivers/staging/rtl8192e/rtllib_rx.c 			idx = skb->data[hdrlen + 3] >> 6;
skb              1078 drivers/staging/rtl8192e/rtllib_rx.c static int rtllib_rx_decrypt(struct rtllib_device *ieee, struct sk_buff *skb,
skb              1087 drivers/staging/rtl8192e/rtllib_rx.c 	hdr = (struct rtllib_hdr_4addr *)skb->data;
skb              1097 drivers/staging/rtl8192e/rtllib_rx.c 	keyidx = rtllib_rx_frame_decrypt(ieee, skb, crypt);
skb              1103 drivers/staging/rtl8192e/rtllib_rx.c 	hdr = (struct rtllib_hdr_4addr *) skb->data;
skb              1117 drivers/staging/rtl8192e/rtllib_rx.c 		flen = skb->len;
skb              1133 drivers/staging/rtl8192e/rtllib_rx.c 			skb_put_data(frag_skb, skb->data, flen);
skb              1138 drivers/staging/rtl8192e/rtllib_rx.c 			skb_put_data(frag_skb, skb->data + hdrlen, flen);
skb              1140 drivers/staging/rtl8192e/rtllib_rx.c 		dev_kfree_skb_any(skb);
skb              1141 drivers/staging/rtl8192e/rtllib_rx.c 		skb = NULL;
skb              1154 drivers/staging/rtl8192e/rtllib_rx.c 		skb = frag_skb;
skb              1155 drivers/staging/rtl8192e/rtllib_rx.c 		hdr = (struct rtllib_hdr_4addr *) skb->data;
skb              1163 drivers/staging/rtl8192e/rtllib_rx.c 		rtllib_rx_frame_decrypt_msdu(ieee, skb, keyidx, crypt)) {
skb              1168 drivers/staging/rtl8192e/rtllib_rx.c 	hdr = (struct rtllib_hdr_4addr *) skb->data;
skb              1171 drivers/staging/rtl8192e/rtllib_rx.c 		    rtllib_is_eapol_frame(ieee, skb, hdrlen)) {
skb              1176 drivers/staging/rtl8192e/rtllib_rx.c 			struct eapol *eap = (struct eapol *)(skb->data +
skb              1190 drivers/staging/rtl8192e/rtllib_rx.c 	    rtllib_is_eapol_frame(ieee, skb, hdrlen)) {
skb              1191 drivers/staging/rtl8192e/rtllib_rx.c 		struct eapol *eap = (struct eapol *)(skb->data + 24);
skb              1198 drivers/staging/rtl8192e/rtllib_rx.c 	    !rtllib_is_eapol_frame(ieee, skb, hdrlen)) {
skb              1293 drivers/staging/rtl8192e/rtllib_rx.c static int rtllib_rx_InfraAdhoc(struct rtllib_device *ieee, struct sk_buff *skb,
skb              1297 drivers/staging/rtl8192e/rtllib_rx.c 	struct rtllib_hdr_4addr *hdr = (struct rtllib_hdr_4addr *)skb->data;
skb              1327 drivers/staging/rtl8192e/rtllib_rx.c 	hdrlen = rtllib_rx_get_hdrlen(ieee, skb, rx_stats);
skb              1328 drivers/staging/rtl8192e/rtllib_rx.c 	if (skb->len < hdrlen) {
skb              1336 drivers/staging/rtl8192e/rtllib_rx.c 	ret = rtllib_rx_check_duplicate(ieee, skb, multicast);
skb              1348 drivers/staging/rtl8192e/rtllib_rx.c 		if (rtllib_rx_frame_mgmt(ieee, skb, rx_stats, type, stype))
skb              1370 drivers/staging/rtl8192e/rtllib_rx.c 	if (skb->len == hdrlen)
skb              1388 drivers/staging/rtl8192e/rtllib_rx.c 	ret = rtllib_rx_get_crypt(ieee, skb, &crypt, hdrlen);
skb              1393 drivers/staging/rtl8192e/rtllib_rx.c 	ret = rtllib_rx_decrypt(ieee, skb, rx_stats, crypt, hdrlen);
skb              1400 drivers/staging/rtl8192e/rtllib_rx.c 	hdr = (struct rtllib_hdr_4addr *) skb->data;
skb              1401 drivers/staging/rtl8192e/rtllib_rx.c 	if (ieee->current_network.qos_data.active && IsQoSDataFrame(skb->data)
skb              1404 drivers/staging/rtl8192e/rtllib_rx.c 		TID = Frame_QoSTID(skb->data);
skb              1420 drivers/staging/rtl8192e/rtllib_rx.c 	if (parse_subframe(ieee, skb, rx_stats, rxb, src, dst) == 0) {
skb              1451 drivers/staging/rtl8192e/rtllib_rx.c 	dev_kfree_skb(skb);
skb              1466 drivers/staging/rtl8192e/rtllib_rx.c static int rtllib_rx_Master(struct rtllib_device *ieee, struct sk_buff *skb,
skb              1472 drivers/staging/rtl8192e/rtllib_rx.c static int rtllib_rx_Monitor(struct rtllib_device *ieee, struct sk_buff *skb,
skb              1475 drivers/staging/rtl8192e/rtllib_rx.c 	struct rtllib_hdr_4addr *hdr = (struct rtllib_hdr_4addr *)skb->data;
skb              1479 drivers/staging/rtl8192e/rtllib_rx.c 	if (skb->len < hdrlen) {
skb              1486 drivers/staging/rtl8192e/rtllib_rx.c 	if (HTCCheck(ieee, skb->data)) {
skb              1493 drivers/staging/rtl8192e/rtllib_rx.c 	rtllib_monitor_rx(ieee, skb, rx_stats, hdrlen);
skb              1495 drivers/staging/rtl8192e/rtllib_rx.c 	ieee->stats.rx_bytes += skb->len;
skb              1500 drivers/staging/rtl8192e/rtllib_rx.c static int rtllib_rx_Mesh(struct rtllib_device *ieee, struct sk_buff *skb,
skb              1510 drivers/staging/rtl8192e/rtllib_rx.c int rtllib_rx(struct rtllib_device *ieee, struct sk_buff *skb,
skb              1515 drivers/staging/rtl8192e/rtllib_rx.c 	if (!ieee || !skb || !rx_stats) {
skb              1519 drivers/staging/rtl8192e/rtllib_rx.c 	if (skb->len < 10) {
skb              1527 drivers/staging/rtl8192e/rtllib_rx.c 		ret = rtllib_rx_InfraAdhoc(ieee, skb, rx_stats);
skb              1531 drivers/staging/rtl8192e/rtllib_rx.c 		ret = rtllib_rx_Master(ieee, skb, rx_stats);
skb              1534 drivers/staging/rtl8192e/rtllib_rx.c 		ret = rtllib_rx_Monitor(ieee, skb, rx_stats);
skb              1537 drivers/staging/rtl8192e/rtllib_rx.c 		ret = rtllib_rx_Mesh(ieee, skb, rx_stats);
skb              2724 drivers/staging/rtl8192e/rtllib_rx.c 			  struct sk_buff *skb,
skb              2727 drivers/staging/rtl8192e/rtllib_rx.c 	struct rtllib_hdr_4addr *header = (struct rtllib_hdr_4addr *)skb->data;
skb              2764 drivers/staging/rtl8192e/rtllib_rx.c 			rtllib_rx_probe_rq(ieee, skb);
skb               128 drivers/staging/rtl8192e/rtllib_softmac.c static void enqueue_mgmt(struct rtllib_device *ieee, struct sk_buff *skb)
skb               141 drivers/staging/rtl8192e/rtllib_softmac.c 	ieee->mgmt_queue_ring[nh] = skb;
skb               200 drivers/staging/rtl8192e/rtllib_softmac.c inline void softmac_mgmt_xmit(struct sk_buff *skb, struct rtllib_device *ieee)
skb               205 drivers/staging/rtl8192e/rtllib_softmac.c 		(struct rtllib_hdr_3addr  *) skb->data;
skb               207 drivers/staging/rtl8192e/rtllib_softmac.c 	struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb + 8);
skb               228 drivers/staging/rtl8192e/rtllib_softmac.c 			enqueue_mgmt(ieee, skb);
skb               238 drivers/staging/rtl8192e/rtllib_softmac.c 			ieee->softmac_data_hard_start_xmit(skb, ieee->dev,
skb               268 drivers/staging/rtl8192e/rtllib_softmac.c 				       skb);
skb               270 drivers/staging/rtl8192e/rtllib_softmac.c 			ieee->softmac_hard_start_xmit(skb, ieee->dev);
skb               277 drivers/staging/rtl8192e/rtllib_softmac.c softmac_ps_mgmt_xmit(struct sk_buff *skb,
skb               282 drivers/staging/rtl8192e/rtllib_softmac.c 		(struct rtllib_hdr_3addr  *) skb->data;
skb               284 drivers/staging/rtl8192e/rtllib_softmac.c 	struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb + 8);
skb               315 drivers/staging/rtl8192e/rtllib_softmac.c 		ieee->softmac_data_hard_start_xmit(skb, ieee->dev,
skb               327 drivers/staging/rtl8192e/rtllib_softmac.c 		ieee->softmac_hard_start_xmit(skb, ieee->dev);
skb               336 drivers/staging/rtl8192e/rtllib_softmac.c 	struct sk_buff *skb;
skb               343 drivers/staging/rtl8192e/rtllib_softmac.c 	skb = dev_alloc_skb(sizeof(struct rtllib_probe_request) +
skb               346 drivers/staging/rtl8192e/rtllib_softmac.c 	if (!skb)
skb               349 drivers/staging/rtl8192e/rtllib_softmac.c 	skb_reserve(skb, ieee->tx_headroom);
skb               351 drivers/staging/rtl8192e/rtllib_softmac.c 	req = skb_put(skb, sizeof(struct rtllib_probe_request));
skb               359 drivers/staging/rtl8192e/rtllib_softmac.c 	tag = skb_put(skb, len + 2 + rate_len);
skb               369 drivers/staging/rtl8192e/rtllib_softmac.c 	return skb;
skb               376 drivers/staging/rtl8192e/rtllib_softmac.c 	struct sk_buff *skb;
skb               380 drivers/staging/rtl8192e/rtllib_softmac.c 	skb = rtllib_get_beacon_(ieee);
skb               382 drivers/staging/rtl8192e/rtllib_softmac.c 	if (skb) {
skb               383 drivers/staging/rtl8192e/rtllib_softmac.c 		softmac_mgmt_xmit(skb, ieee);
skb               476 drivers/staging/rtl8192e/rtllib_softmac.c 	struct sk_buff *skb;
skb               478 drivers/staging/rtl8192e/rtllib_softmac.c 	skb = rtllib_probe_req(ieee);
skb               479 drivers/staging/rtl8192e/rtllib_softmac.c 	if (skb) {
skb               480 drivers/staging/rtl8192e/rtllib_softmac.c 		softmac_mgmt_xmit(skb, ieee);
skb               775 drivers/staging/rtl8192e/rtllib_softmac.c 	struct sk_buff *skb;
skb               781 drivers/staging/rtl8192e/rtllib_softmac.c 	skb = dev_alloc_skb(len);
skb               783 drivers/staging/rtl8192e/rtllib_softmac.c 	if (!skb)
skb               786 drivers/staging/rtl8192e/rtllib_softmac.c 	skb_reserve(skb, ieee->tx_headroom);
skb               788 drivers/staging/rtl8192e/rtllib_softmac.c 	auth = skb_put(skb, sizeof(struct rtllib_authentication));
skb               809 drivers/staging/rtl8192e/rtllib_softmac.c 	return skb;
skb               818 drivers/staging/rtl8192e/rtllib_softmac.c 	struct sk_buff *skb = NULL;
skb               881 drivers/staging/rtl8192e/rtllib_softmac.c 	skb = dev_alloc_skb(beacon_size);
skb               882 drivers/staging/rtl8192e/rtllib_softmac.c 	if (!skb)
skb               885 drivers/staging/rtl8192e/rtllib_softmac.c 	skb_reserve(skb, ieee->tx_headroom);
skb               887 drivers/staging/rtl8192e/rtllib_softmac.c 	beacon_buf = skb_put(skb, (beacon_size - ieee->tx_headroom));
skb               958 drivers/staging/rtl8192e/rtllib_softmac.c 	return skb;
skb               963 drivers/staging/rtl8192e/rtllib_softmac.c 	struct sk_buff *skb;
skb               974 drivers/staging/rtl8192e/rtllib_softmac.c 	skb = dev_alloc_skb(len);
skb               976 drivers/staging/rtl8192e/rtllib_softmac.c 	if (!skb)
skb               979 drivers/staging/rtl8192e/rtllib_softmac.c 	skb_reserve(skb, ieee->tx_headroom);
skb               981 drivers/staging/rtl8192e/rtllib_softmac.c 	assoc = skb_put(skb, sizeof(struct rtllib_assoc_response_frame));
skb              1012 drivers/staging/rtl8192e/rtllib_softmac.c 	tag = skb_put(skb, rate_len);
skb              1016 drivers/staging/rtl8192e/rtllib_softmac.c 	return skb;
skb              1022 drivers/staging/rtl8192e/rtllib_softmac.c 	struct sk_buff *skb = NULL;
skb              1026 drivers/staging/rtl8192e/rtllib_softmac.c 	skb = dev_alloc_skb(len);
skb              1027 drivers/staging/rtl8192e/rtllib_softmac.c 	if (!skb)
skb              1030 drivers/staging/rtl8192e/rtllib_softmac.c 	skb->len = sizeof(struct rtllib_authentication);
skb              1032 drivers/staging/rtl8192e/rtllib_softmac.c 	skb_reserve(skb, ieee->tx_headroom);
skb              1034 drivers/staging/rtl8192e/rtllib_softmac.c 	auth = skb_put(skb, sizeof(struct rtllib_authentication));
skb              1044 drivers/staging/rtl8192e/rtllib_softmac.c 	return skb;
skb              1051 drivers/staging/rtl8192e/rtllib_softmac.c 	struct sk_buff *skb;
skb              1054 drivers/staging/rtl8192e/rtllib_softmac.c 	skb = dev_alloc_skb(sizeof(struct rtllib_hdr_3addr)+ieee->tx_headroom);
skb              1055 drivers/staging/rtl8192e/rtllib_softmac.c 	if (!skb)
skb              1058 drivers/staging/rtl8192e/rtllib_softmac.c 	skb_reserve(skb, ieee->tx_headroom);
skb              1060 drivers/staging/rtl8192e/rtllib_softmac.c 	hdr = skb_put(skb, sizeof(struct rtllib_hdr_3addr));
skb              1070 drivers/staging/rtl8192e/rtllib_softmac.c 	return skb;
skb              1077 drivers/staging/rtl8192e/rtllib_softmac.c 	struct sk_buff *skb;
skb              1080 drivers/staging/rtl8192e/rtllib_softmac.c 	skb = dev_alloc_skb(sizeof(struct rtllib_pspoll_hdr)+ieee->tx_headroom);
skb              1081 drivers/staging/rtl8192e/rtllib_softmac.c 	if (!skb)
skb              1084 drivers/staging/rtl8192e/rtllib_softmac.c 	skb_reserve(skb, ieee->tx_headroom);
skb              1086 drivers/staging/rtl8192e/rtllib_softmac.c 	hdr = skb_put(skb, sizeof(struct rtllib_pspoll_hdr));
skb              1095 drivers/staging/rtl8192e/rtllib_softmac.c 	return skb;
skb              1146 drivers/staging/rtl8192e/rtllib_softmac.c 	struct sk_buff *skb;
skb              1229 drivers/staging/rtl8192e/rtllib_softmac.c 	skb = dev_alloc_skb(len);
skb              1231 drivers/staging/rtl8192e/rtllib_softmac.c 	if (!skb)
skb              1234 drivers/staging/rtl8192e/rtllib_softmac.c 	skb_reserve(skb, ieee->tx_headroom);
skb              1236 drivers/staging/rtl8192e/rtllib_softmac.c 	hdr = skb_put(skb, sizeof(struct rtllib_assoc_request_frame) + 2);
skb              1264 drivers/staging/rtl8192e/rtllib_softmac.c 	skb_put_data(skb, beacon->ssid, beacon->ssid_len);
skb              1266 drivers/staging/rtl8192e/rtllib_softmac.c 	tag = skb_put(skb, rate_len);
skb              1295 drivers/staging/rtl8192e/rtllib_softmac.c 		tag = skb_put(skb, ckip_ie_len);
skb              1309 drivers/staging/rtl8192e/rtllib_softmac.c 		tag = skb_put(skb, ccxrm_ie_len);
skb              1323 drivers/staging/rtl8192e/rtllib_softmac.c 		tag = skb_put(skb, cxvernum_ie_len);
skb              1331 drivers/staging/rtl8192e/rtllib_softmac.c 			tag = skb_put(skb, ht_cap_len);
skb              1340 drivers/staging/rtl8192e/rtllib_softmac.c 		skb_put_data(skb, ieee->wpa_ie, ieee->wpa_ie_len);
skb              1343 drivers/staging/rtl8192e/rtllib_softmac.c 			tag = skb_put(skb, 18);
skb              1351 drivers/staging/rtl8192e/rtllib_softmac.c 		tag = skb_put(skb, wmm_info_len);
skb              1356 drivers/staging/rtl8192e/rtllib_softmac.c 		skb_put_data(skb, ieee->wps_ie, wps_ie_len);
skb              1360 drivers/staging/rtl8192e/rtllib_softmac.c 		tag = skb_put(skb, turbo_info_len);
skb              1366 drivers/staging/rtl8192e/rtllib_softmac.c 			tag = skb_put(skb, ht_cap_len);
skb              1374 drivers/staging/rtl8192e/rtllib_softmac.c 			tag = skb_put(skb, realtek_ie_len);
skb              1384 drivers/staging/rtl8192e/rtllib_softmac.c 	ieee->assocreq_ies_len = (skb->data + skb->len) - ies;
skb              1389 drivers/staging/rtl8192e/rtllib_softmac.c 	return skb;
skb              1432 drivers/staging/rtl8192e/rtllib_softmac.c 	struct sk_buff *skb;
skb              1438 drivers/staging/rtl8192e/rtllib_softmac.c 	skb = rtllib_authentication_req(beacon, ieee, 0, daddr);
skb              1440 drivers/staging/rtl8192e/rtllib_softmac.c 	if (!skb)
skb              1445 drivers/staging/rtl8192e/rtllib_softmac.c 		softmac_mgmt_xmit(skb, ieee);
skb              1457 drivers/staging/rtl8192e/rtllib_softmac.c 	struct sk_buff *skb;
skb              1463 drivers/staging/rtl8192e/rtllib_softmac.c 	skb = rtllib_authentication_req(beacon, ieee, chlen + 2, beacon->bssid);
skb              1465 drivers/staging/rtl8192e/rtllib_softmac.c 	if (!skb)
skb              1468 drivers/staging/rtl8192e/rtllib_softmac.c 		c = skb_put(skb, chlen+2);
skb              1476 drivers/staging/rtl8192e/rtllib_softmac.c 		rtllib_encrypt_fragment(ieee, skb,
skb              1479 drivers/staging/rtl8192e/rtllib_softmac.c 		softmac_mgmt_xmit(skb, ieee);
skb              1487 drivers/staging/rtl8192e/rtllib_softmac.c 	struct sk_buff *skb;
skb              1495 drivers/staging/rtl8192e/rtllib_softmac.c 	skb = rtllib_association_req(beacon, ieee);
skb              1496 drivers/staging/rtl8192e/rtllib_softmac.c 	if (!skb)
skb              1499 drivers/staging/rtl8192e/rtllib_softmac.c 		softmac_mgmt_xmit(skb, ieee);
skb              1769 drivers/staging/rtl8192e/rtllib_softmac.c static inline u16 auth_parse(struct net_device *dev, struct sk_buff *skb,
skb              1775 drivers/staging/rtl8192e/rtllib_softmac.c 	if (skb->len <  (sizeof(struct rtllib_authentication) -
skb              1777 drivers/staging/rtl8192e/rtllib_softmac.c 		netdev_dbg(dev, "invalid len in auth resp: %d\n", skb->len);
skb              1781 drivers/staging/rtl8192e/rtllib_softmac.c 	a = (struct rtllib_authentication *) skb->data;
skb              1782 drivers/staging/rtl8192e/rtllib_softmac.c 	if (skb->len > (sizeof(struct rtllib_authentication) + 3)) {
skb              1783 drivers/staging/rtl8192e/rtllib_softmac.c 		t = skb->data + sizeof(struct rtllib_authentication);
skb              1795 drivers/staging/rtl8192e/rtllib_softmac.c static int auth_rq_parse(struct net_device *dev, struct sk_buff *skb, u8 *dest)
skb              1799 drivers/staging/rtl8192e/rtllib_softmac.c 	if (skb->len <  (sizeof(struct rtllib_authentication) -
skb              1801 drivers/staging/rtl8192e/rtllib_softmac.c 		netdev_dbg(dev, "invalid len in auth request: %d\n", skb->len);
skb              1804 drivers/staging/rtl8192e/rtllib_softmac.c 	a = (struct rtllib_authentication *) skb->data;
skb              1814 drivers/staging/rtl8192e/rtllib_softmac.c static short probe_rq_parse(struct rtllib_device *ieee, struct sk_buff *skb,
skb              1822 drivers/staging/rtl8192e/rtllib_softmac.c 		(struct rtllib_hdr_3addr   *) skb->data;
skb              1825 drivers/staging/rtl8192e/rtllib_softmac.c 	if (skb->len < sizeof(struct rtllib_hdr_3addr))
skb              1836 drivers/staging/rtl8192e/rtllib_softmac.c 	skbend = (u8 *)skb->data + skb->len;
skb              1838 drivers/staging/rtl8192e/rtllib_softmac.c 	tag = skb->data + sizeof(struct rtllib_hdr_3addr);
skb              1860 drivers/staging/rtl8192e/rtllib_softmac.c static int assoc_rq_parse(struct net_device *dev, struct sk_buff *skb, u8 *dest)
skb              1864 drivers/staging/rtl8192e/rtllib_softmac.c 	if (skb->len < (sizeof(struct rtllib_assoc_request_frame) -
skb              1866 drivers/staging/rtl8192e/rtllib_softmac.c 		netdev_dbg(dev, "invalid len in auth request:%d\n", skb->len);
skb              1870 drivers/staging/rtl8192e/rtllib_softmac.c 	a = (struct rtllib_assoc_request_frame *) skb->data;
skb              1877 drivers/staging/rtl8192e/rtllib_softmac.c static inline u16 assoc_parse(struct rtllib_device *ieee, struct sk_buff *skb,
skb              1883 drivers/staging/rtl8192e/rtllib_softmac.c 	if (skb->len <  sizeof(struct rtllib_assoc_response_frame)) {
skb              1885 drivers/staging/rtl8192e/rtllib_softmac.c 			   skb->len);
skb              1889 drivers/staging/rtl8192e/rtllib_softmac.c 	response_head = (struct rtllib_assoc_response_frame *) skb->data;
skb              1906 drivers/staging/rtl8192e/rtllib_softmac.c void rtllib_rx_probe_rq(struct rtllib_device *ieee, struct sk_buff *skb)
skb              1911 drivers/staging/rtl8192e/rtllib_softmac.c 	if (probe_rq_parse(ieee, skb, dest) > 0) {
skb              1918 drivers/staging/rtl8192e/rtllib_softmac.c 				     struct sk_buff *skb)
skb              1925 drivers/staging/rtl8192e/rtllib_softmac.c 	status = auth_rq_parse(ieee->dev, skb, dest);
skb              1931 drivers/staging/rtl8192e/rtllib_softmac.c 				      struct sk_buff *skb)
skb              1937 drivers/staging/rtl8192e/rtllib_softmac.c 	if (assoc_rq_parse(ieee->dev, skb, dest) != -1)
skb              2169 drivers/staging/rtl8192e/rtllib_softmac.c 				  struct sk_buff *skb)
skb              2171 drivers/staging/rtl8192e/rtllib_softmac.c 	struct rtllib_hdr_3addr *header = (struct rtllib_hdr_3addr *) skb->data;
skb              2187 drivers/staging/rtl8192e/rtllib_softmac.c 			rtllib_rx_ADDBAReq(ieee, skb);
skb              2190 drivers/staging/rtl8192e/rtllib_softmac.c 			rtllib_rx_ADDBARsp(ieee, skb);
skb              2193 drivers/staging/rtl8192e/rtllib_softmac.c 			rtllib_rx_DELBA(ieee, skb);
skb              2203 drivers/staging/rtl8192e/rtllib_softmac.c rtllib_rx_assoc_resp(struct rtllib_device *ieee, struct sk_buff *skb,
skb              2210 drivers/staging/rtl8192e/rtllib_softmac.c 	struct rtllib_hdr_3addr *header = (struct rtllib_hdr_3addr *) skb->data;
skb              2219 drivers/staging/rtl8192e/rtllib_softmac.c 		errcode = assoc_parse(ieee, skb, &aid);
skb              2232 drivers/staging/rtl8192e/rtllib_softmac.c 			assoc_resp = (struct rtllib_assoc_response_frame *)skb->data;
skb              2256 drivers/staging/rtl8192e/rtllib_softmac.c 			ieee->assocresp_ies_len = (skb->data + skb->len) - ies;
skb              2280 drivers/staging/rtl8192e/rtllib_softmac.c static void rtllib_rx_auth_resp(struct rtllib_device *ieee, struct sk_buff *skb)
skb              2287 drivers/staging/rtl8192e/rtllib_softmac.c 	errcode = auth_parse(ieee->dev, skb, &challenge, &chlen);
skb              2335 drivers/staging/rtl8192e/rtllib_softmac.c rtllib_rx_auth(struct rtllib_device *ieee, struct sk_buff *skb,
skb              2344 drivers/staging/rtl8192e/rtllib_softmac.c 			rtllib_rx_auth_resp(ieee, skb);
skb              2346 drivers/staging/rtl8192e/rtllib_softmac.c 			rtllib_rx_auth_rq(ieee, skb);
skb              2353 drivers/staging/rtl8192e/rtllib_softmac.c rtllib_rx_deauth(struct rtllib_device *ieee, struct sk_buff *skb)
skb              2355 drivers/staging/rtl8192e/rtllib_softmac.c 	struct rtllib_hdr_3addr *header = (struct rtllib_hdr_3addr *) skb->data;
skb              2371 drivers/staging/rtl8192e/rtllib_softmac.c 			    ((struct rtllib_disassoc *)skb->data)->reason);
skb              2391 drivers/staging/rtl8192e/rtllib_softmac.c 				   struct sk_buff *skb,
skb              2395 drivers/staging/rtl8192e/rtllib_softmac.c 	struct rtllib_hdr_3addr *header = (struct rtllib_hdr_3addr *) skb->data;
skb              2405 drivers/staging/rtl8192e/rtllib_softmac.c 		if (rtllib_rx_assoc_resp(ieee, skb, rx_stats) == 1)
skb              2412 drivers/staging/rtl8192e/rtllib_softmac.c 			rtllib_rx_assoc_rq(ieee, skb);
skb              2415 drivers/staging/rtl8192e/rtllib_softmac.c 		rtllib_rx_auth(ieee, skb, rx_stats);
skb              2419 drivers/staging/rtl8192e/rtllib_softmac.c 		rtllib_rx_deauth(ieee, skb);
skb              2422 drivers/staging/rtl8192e/rtllib_softmac.c 		rtllib_process_action(ieee, skb);
skb              2806 drivers/staging/rtl8192e/rtllib_softmac.c 	struct sk_buff *skb;
skb              2809 drivers/staging/rtl8192e/rtllib_softmac.c 	skb = rtllib_probe_resp(ieee, broadcast_addr);
skb              2811 drivers/staging/rtl8192e/rtllib_softmac.c 	if (!skb)
skb              2814 drivers/staging/rtl8192e/rtllib_softmac.c 	b = (struct rtllib_probe_response *) skb->data;
skb              2817 drivers/staging/rtl8192e/rtllib_softmac.c 	return skb;
skb              2823 drivers/staging/rtl8192e/rtllib_softmac.c 	struct sk_buff *skb;
skb              2826 drivers/staging/rtl8192e/rtllib_softmac.c 	skb = rtllib_get_beacon_(ieee);
skb              2827 drivers/staging/rtl8192e/rtllib_softmac.c 	if (!skb)
skb              2830 drivers/staging/rtl8192e/rtllib_softmac.c 	b = (struct rtllib_probe_response *) skb->data;
skb              2838 drivers/staging/rtl8192e/rtllib_softmac.c 	return skb;
skb              3061 drivers/staging/rtl8192e/rtllib_softmac.c 	struct sk_buff *skb;
skb              3065 drivers/staging/rtl8192e/rtllib_softmac.c 	skb = dev_alloc_skb(len);
skb              3066 drivers/staging/rtl8192e/rtllib_softmac.c 	if (!skb)
skb              3069 drivers/staging/rtl8192e/rtllib_softmac.c 	skb_reserve(skb, ieee->tx_headroom);
skb              3071 drivers/staging/rtl8192e/rtllib_softmac.c 	disauth = skb_put(skb, sizeof(struct rtllib_disauth));
skb              3080 drivers/staging/rtl8192e/rtllib_softmac.c 	return skb;
skb              3087 drivers/staging/rtl8192e/rtllib_softmac.c 	struct sk_buff *skb;
skb              3091 drivers/staging/rtl8192e/rtllib_softmac.c 	skb = dev_alloc_skb(len);
skb              3093 drivers/staging/rtl8192e/rtllib_softmac.c 	if (!skb)
skb              3096 drivers/staging/rtl8192e/rtllib_softmac.c 	skb_reserve(skb, ieee->tx_headroom);
skb              3098 drivers/staging/rtl8192e/rtllib_softmac.c 	disass = skb_put(skb, sizeof(struct rtllib_disassoc));
skb              3107 drivers/staging/rtl8192e/rtllib_softmac.c 	return skb;
skb              3113 drivers/staging/rtl8192e/rtllib_softmac.c 	struct sk_buff *skb;
skb              3116 drivers/staging/rtl8192e/rtllib_softmac.c 		skb = rtllib_disauth_skb(beacon, ieee, asRsn);
skb              3118 drivers/staging/rtl8192e/rtllib_softmac.c 		skb = rtllib_disassociate_skb(beacon, ieee, asRsn);
skb              3120 drivers/staging/rtl8192e/rtllib_softmac.c 	if (skb)
skb              3121 drivers/staging/rtl8192e/rtllib_softmac.c 		softmac_mgmt_xmit(skb, ieee);
skb               234 drivers/staging/rtl8192e/rtllib_tx.c static int rtllib_classify(struct sk_buff *skb, u8 bIsAmsdu)
skb               239 drivers/staging/rtl8192e/rtllib_tx.c 	eth = (struct ethhdr *)skb->data;
skb               244 drivers/staging/rtl8192e/rtllib_tx.c 	print_hex_dump_bytes("rtllib_classify(): ", DUMP_PREFIX_NONE, skb->data,
skb               245 drivers/staging/rtl8192e/rtllib_tx.c 			     skb->len);
skb               247 drivers/staging/rtl8192e/rtllib_tx.c 	ip = ip_hdr(skb);
skb               269 drivers/staging/rtl8192e/rtllib_tx.c 				    struct sk_buff *skb,
skb               274 drivers/staging/rtl8192e/rtllib_tx.c 	struct rtllib_hdr_1addr *hdr = (struct rtllib_hdr_1addr *)skb->data;
skb               281 drivers/staging/rtl8192e/rtllib_tx.c 	if (!IsQoSDataFrame(skb->data))
skb               296 drivers/staging/rtl8192e/rtllib_tx.c 		    skb->priority, TX_DIR, true)) {
skb               396 drivers/staging/rtl8192e/rtllib_tx.c 					struct sk_buff *skb)
skb               409 drivers/staging/rtl8192e/rtllib_tx.c 	if (is_broadcast_ether_addr(skb->data+16))
skb               413 drivers/staging/rtl8192e/rtllib_tx.c 		if (skb->len > ieee->rts) {
skb               455 drivers/staging/rtl8192e/rtllib_tx.c 		if (skb->len > ieee->rts) {
skb               497 drivers/staging/rtl8192e/rtllib_tx.c static u16 rtllib_query_seqnum(struct rtllib_device *ieee, struct sk_buff *skb,
skb               504 drivers/staging/rtl8192e/rtllib_tx.c 	if (IsQoSDataFrame(skb->data)) {
skb               508 drivers/staging/rtl8192e/rtllib_tx.c 		    skb->priority, TX_DIR, true))
skb               517 drivers/staging/rtl8192e/rtllib_tx.c static int wme_downgrade_ac(struct sk_buff *skb)
skb               519 drivers/staging/rtl8192e/rtllib_tx.c 	switch (skb->priority) {
skb               522 drivers/staging/rtl8192e/rtllib_tx.c 		skb->priority = 5; /* VO -> VI */
skb               526 drivers/staging/rtl8192e/rtllib_tx.c 		skb->priority = 3; /* VI -> BE */
skb               530 drivers/staging/rtl8192e/rtllib_tx.c 		skb->priority = 1; /* BE -> BK */
skb               548 drivers/staging/rtl8192e/rtllib_tx.c static int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev)
skb               589 drivers/staging/rtl8192e/rtllib_tx.c 		if (unlikely(skb->len < SNAP_SIZE + sizeof(u16))) {
skb               591 drivers/staging/rtl8192e/rtllib_tx.c 				    skb->len);
skb               595 drivers/staging/rtl8192e/rtllib_tx.c 		ether_addr_copy(dest, skb->data);
skb               596 drivers/staging/rtl8192e/rtllib_tx.c 		ether_addr_copy(src, skb->data + ETH_ALEN);
skb               598 drivers/staging/rtl8192e/rtllib_tx.c 		memset(skb->cb, 0, sizeof(skb->cb));
skb               599 drivers/staging/rtl8192e/rtllib_tx.c 		ether_type = ntohs(((struct ethhdr *)skb->data)->h_proto);
skb               602 drivers/staging/rtl8192e/rtllib_tx.c 			txb = rtllib_alloc_txb(1, skb->len, GFP_ATOMIC);
skb               610 drivers/staging/rtl8192e/rtllib_tx.c 			txb->payload_size = cpu_to_le16(skb->len);
skb               611 drivers/staging/rtl8192e/rtllib_tx.c 			skb_put_data(txb->fragments[0], skb->data, skb->len);
skb               616 drivers/staging/rtl8192e/rtllib_tx.c 		if (skb->len > 282) {
skb               619 drivers/staging/rtl8192e/rtllib_tx.c 					((u8 *)skb->data+14);
skb               642 drivers/staging/rtl8192e/rtllib_tx.c 		skb->priority = rtllib_classify(skb, IsAmsdu);
skb               652 drivers/staging/rtl8192e/rtllib_tx.c 			struct eapol *eap = (struct eapol *)(skb->data +
skb               661 drivers/staging/rtl8192e/rtllib_tx.c 		skb_pull(skb, sizeof(struct ethhdr));
skb               664 drivers/staging/rtl8192e/rtllib_tx.c 		bytes = skb->len + SNAP_SIZE + sizeof(u16);
skb               718 drivers/staging/rtl8192e/rtllib_tx.c 			while (unlikely(ieee->wmm_acm & (0x01 << skb->priority))) {
skb               720 drivers/staging/rtl8192e/rtllib_tx.c 						skb->priority);
skb               721 drivers/staging/rtl8192e/rtllib_tx.c 				if (wme_downgrade_ac(skb))
skb               724 drivers/staging/rtl8192e/rtllib_tx.c 					   skb->priority);
skb               727 drivers/staging/rtl8192e/rtllib_tx.c 			qos_ctl |= skb->priority;
skb               776 drivers/staging/rtl8192e/rtllib_tx.c 			txb->queue_index = UP2AC(skb->priority);
skb               785 drivers/staging/rtl8192e/rtllib_tx.c 				skb_frag->priority = skb->priority;
skb               786 drivers/staging/rtl8192e/rtllib_tx.c 				tcb_desc->queue_index =  UP2AC(skb->priority);
skb               836 drivers/staging/rtl8192e/rtllib_tx.c 			skb_put_data(skb_frag, skb->data, bytes);
skb               839 drivers/staging/rtl8192e/rtllib_tx.c 			skb_pull(skb, bytes);
skb               854 drivers/staging/rtl8192e/rtllib_tx.c 			if (ieee->seq_ctrl[UP2AC(skb->priority) + 1] == 0xFFF)
skb               855 drivers/staging/rtl8192e/rtllib_tx.c 				ieee->seq_ctrl[UP2AC(skb->priority) + 1] = 0;
skb               857 drivers/staging/rtl8192e/rtllib_tx.c 				ieee->seq_ctrl[UP2AC(skb->priority) + 1]++;
skb               865 drivers/staging/rtl8192e/rtllib_tx.c 		if (unlikely(skb->len < sizeof(struct rtllib_hdr_3addr))) {
skb               867 drivers/staging/rtl8192e/rtllib_tx.c 				    skb->len);
skb               871 drivers/staging/rtl8192e/rtllib_tx.c 		txb = rtllib_alloc_txb(1, skb->len, GFP_ATOMIC);
skb               878 drivers/staging/rtl8192e/rtllib_tx.c 		txb->payload_size = cpu_to_le16(skb->len);
skb               879 drivers/staging/rtl8192e/rtllib_tx.c 		skb_put_data(txb->fragments[0], skb->data, skb->len);
skb               887 drivers/staging/rtl8192e/rtllib_tx.c 		tcb_desc->priority = skb->priority;
skb               941 drivers/staging/rtl8192e/rtllib_tx.c 	dev_kfree_skb_any(skb);
skb               967 drivers/staging/rtl8192e/rtllib_tx.c int rtllib_xmit(struct sk_buff *skb, struct net_device *dev)
skb               969 drivers/staging/rtl8192e/rtllib_tx.c 	memset(skb->cb, 0, sizeof(skb->cb));
skb               970 drivers/staging/rtl8192e/rtllib_tx.c 	return rtllib_xmit_inter(skb, dev);
skb               763 drivers/staging/rtl8192u/ieee80211/ieee80211.h 	struct sk_buff *skb;
skb              1916 drivers/staging/rtl8192u/ieee80211/ieee80211.h 	int (*is_qos_active)(struct net_device *dev, struct sk_buff *skb);
skb              1925 drivers/staging/rtl8192u/ieee80211/ieee80211.h 	int (*softmac_hard_start_xmit)(struct sk_buff *skb,
skb              1934 drivers/staging/rtl8192u/ieee80211/ieee80211.h 	void (*softmac_data_hard_start_xmit)(struct sk_buff *skb,
skb              2181 drivers/staging/rtl8192u/ieee80211/ieee80211.h int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev);
skb              2186 drivers/staging/rtl8192u/ieee80211/ieee80211.h int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
skb              2220 drivers/staging/rtl8192u/ieee80211/ieee80211.h 			       struct sk_buff *skb,
skb              2258 drivers/staging/rtl8192u/ieee80211/ieee80211.h void softmac_mgmt_xmit(struct sk_buff *skb, struct ieee80211_device *ieee);
skb              2380 drivers/staging/rtl8192u/ieee80211/ieee80211.h int ieee80211_rx_ADDBAReq(struct ieee80211_device *ieee, struct sk_buff *skb);
skb              2381 drivers/staging/rtl8192u/ieee80211/ieee80211.h int ieee80211_rx_ADDBARsp(struct ieee80211_device *ieee, struct sk_buff *skb);
skb              2382 drivers/staging/rtl8192u/ieee80211/ieee80211.h int ieee80211_rx_DELBA(struct ieee80211_device *ieee, struct sk_buff *skb);
skb              2435 drivers/staging/rtl8192u/ieee80211/ieee80211.h int ieee80211_data_xmit(struct sk_buff *skb, struct net_device *dev);
skb                42 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.h 	int (*encrypt_mpdu)(struct sk_buff *skb, int hdr_len, void *priv);
skb                43 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.h 	int (*decrypt_mpdu)(struct sk_buff *skb, int hdr_len, void *priv);
skb                48 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.h 	int (*encrypt_msdu)(struct sk_buff *skb, int hdr_len, void *priv);
skb                49 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.h 	int (*decrypt_msdu)(struct sk_buff *skb, int keyidx, int hdr_len,
skb               159 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c static int ieee80211_ccmp_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
skb               165 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c 	struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
skb               167 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c 	if (skb_headroom(skb) < CCMP_HDR_LEN ||
skb               168 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c 	    skb_tailroom(skb) < CCMP_MIC_LEN ||
skb               169 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c 	    skb->len < hdr_len)
skb               172 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c 	pos = skb_push(skb, CCMP_HDR_LEN);
skb               194 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c 	hdr = (struct rtl_80211_hdr_4addr *)skb->data;
skb               201 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c 		size_t data_len = skb->len - hdr_len - CCMP_HDR_LEN;
skb               209 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c 		skb_put(skb, CCMP_MIC_LEN);
skb               213 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c 		sg_set_buf(&sg[1], skb->data + hdr_len + CCMP_HDR_LEN,
skb               228 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c static int ieee80211_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
skb               233 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c 	struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
skb               236 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c 	if (skb->len < hdr_len + CCMP_HDR_LEN + CCMP_MIC_LEN) {
skb               241 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c 	hdr = (struct rtl_80211_hdr_4addr *)skb->data;
skb               242 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c 	pos = skb->data + hdr_len;
skb               246 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c 			netdev_dbg(skb->dev, "CCMP: received packet without ExtIV flag from %pM\n",
skb               254 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c 		netdev_dbg(skb->dev, "CCMP: RX tkey->key_idx=%d frame keyidx=%d priv=%p\n",
skb               260 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c 			netdev_dbg(skb->dev, "CCMP: received packet from %pM with keyid=%d that does not have a configured key\n",
skb               276 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c 			netdev_dbg(skb->dev, "CCMP: replay detected: STA=%pM previous PN %pm received PN %pm\n",
skb               288 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c 		size_t data_len = skb->len - hdr_len - CCMP_HDR_LEN;
skb               309 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c 				netdev_dbg(skb->dev, "CCMP: decrypt failed: STA=%pM\n",
skb               319 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c 	memmove(skb->data + CCMP_HDR_LEN, skb->data, hdr_len);
skb               320 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c 	skb_pull(skb, CCMP_HDR_LEN);
skb               321 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c 	skb_trim(skb, skb->len - CCMP_MIC_LEN);
skb               286 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
skb               292 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c 	struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
skb               298 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c 	if (skb_headroom(skb) < 8 || skb_tailroom(skb) < 4 ||
skb               299 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c 	    skb->len < hdr_len)
skb               302 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c 	hdr = (struct rtl_80211_hdr_4addr *)skb->data;
skb               315 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c 	len = skb->len - hdr_len;
skb               316 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c 	pos = skb_push(skb, 8);
skb               339 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c 		icv = skb_put(skb, 4);
skb               368 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
skb               375 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c 	struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
skb               383 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c 	if (skb->len < hdr_len + 8 + 4)
skb               386 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c 	hdr = (struct rtl_80211_hdr_4addr *)skb->data;
skb               387 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c 	pos = skb->data + hdr_len;
skb               435 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c 		plen = skb->len - hdr_len - 12;
skb               488 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c 	memmove(skb->data + 8, skb->data, hdr_len);
skb               489 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c 	skb_pull(skb, 8);
skb               490 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c 	skb_trim(skb, skb->len - 4);
skb               522 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c static void michael_mic_hdr(struct sk_buff *skb, u8 *hdr)
skb               526 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c 	hdr11 = (struct rtl_80211_hdr_4addr *)skb->data;
skb               553 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c static int ieee80211_michael_mic_add(struct sk_buff *skb, int hdr_len, void *priv)
skb               559 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c 	hdr = (struct rtl_80211_hdr_4addr *)skb->data;
skb               561 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c 	if (skb_tailroom(skb) < 8 || skb->len < hdr_len) {
skb               564 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c 		       skb_tailroom(skb), hdr_len, skb->len);
skb               568 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c 	michael_mic_hdr(skb, tkey->tx_hdr);
skb               573 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c 		tkey->tx_hdr[12] = *(skb->data + hdr_len - 2) & 0x07;
skb               575 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c 	pos = skb_put(skb, 8);
skb               578 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c 				skb->data + hdr_len, skb->len - 8 - hdr_len, pos))
skb               605 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c static int ieee80211_michael_mic_verify(struct sk_buff *skb, int keyidx,
skb               612 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c 	hdr = (struct rtl_80211_hdr_4addr *)skb->data;
skb               617 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c 	michael_mic_hdr(skb, tkey->rx_hdr);
skb               621 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c 		tkey->rx_hdr[12] = *(skb->data + hdr_len - 2) & 0x07;
skb               625 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c 			skb->data + hdr_len, skb->len - 8 - hdr_len, mic))
skb               627 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c 	if (memcmp(mic, skb->data + skb->len - 8, 8) != 0) {
skb               629 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c 		hdr = (struct rtl_80211_hdr_4addr *)skb->data;
skb               633 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c 		       skb->dev ? skb->dev->name : "N/A", hdr->addr2,
skb               635 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c 		if (skb->dev)
skb               636 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c 			ieee80211_michael_mic_failure(skb->dev, hdr, keyidx);
skb               648 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c 	skb_trim(skb, skb->len - 8);
skb                81 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
skb                87 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c 	struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
skb                93 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c 	if (skb_headroom(skb) < 4 || skb_tailroom(skb) < 4 ||
skb                94 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c 	    skb->len < hdr_len)
skb                97 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c 	len = skb->len - hdr_len;
skb                98 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c 	pos = skb_push(skb, 4);
skb               131 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c 		icv = skb_put(skb, 4);
skb               160 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c static int prism2_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
skb               166 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c 	struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
skb               172 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c 	if (skb->len < hdr_len + 8)
skb               175 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c 	pos = skb->data + hdr_len;
skb               189 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c 	plen = skb->len - hdr_len - 8;
skb               217 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c 	memmove(skb->data + 4, skb->data, hdr_len);
skb               218 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c 	skb_pull(skb, 4);
skb               219 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c 	skb_trim(skb, skb->len - 4);
skb                43 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 					struct sk_buff *skb,
skb                46 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 	struct rtl_80211_hdr_4addr *hdr = (struct rtl_80211_hdr_4addr *)skb->data;
skb                49 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 	skb->dev = ieee->dev;
skb                50 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 	skb_reset_mac_header(skb);
skb                52 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 	skb_pull(skb, ieee80211_get_hdrlen(fc));
skb                53 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 	skb->pkt_type = PACKET_OTHERHOST;
skb                54 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 	skb->protocol = htons(ETH_P_80211_RAW);
skb                55 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 	memset(skb->cb, 0, sizeof(skb->cb));
skb                56 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 	netif_rx(skb);
skb                70 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 		if (entry->skb &&
skb                76 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 			dev_kfree_skb_any(entry->skb);
skb                77 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 			entry->skb = NULL;
skb                80 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 		if (entry->skb && entry->seq == seq &&
skb                95 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 	struct sk_buff *skb = NULL;
skb               121 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 		skb = dev_alloc_skb(ieee->dev->mtu +
skb               128 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 		if (!skb)
skb               136 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 		if (entry->skb)
skb               137 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 			dev_kfree_skb_any(entry->skb);
skb               142 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 		entry->skb = skb;
skb               152 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 			skb = entry->skb;
skb               156 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 	return skb;
skb               196 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 	entry->skb = NULL;
skb               208 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c ieee80211_rx_frame_mgmt(struct ieee80211_device *ieee, struct sk_buff *skb,
skb               216 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 	struct rtl_80211_hdr_3addr *hdr = (struct rtl_80211_hdr_3addr *)skb->data;
skb               218 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 	rx_stats->len = skb->len;
skb               219 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 	ieee80211_rx_mgt(ieee, (struct rtl_80211_hdr_4addr *)skb->data, rx_stats);
skb               223 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 		dev_kfree_skb_any(skb);
skb               227 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 	ieee80211_rx_frame_softmac(ieee, skb, rx_stats, type, stype);
skb               229 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 	dev_kfree_skb_any(skb);
skb               249 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 			skb2 = skb_clone(skb, GFP_ATOMIC);
skb               257 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 		ieee->apdevstats.rx_bytes += skb->len;
skb               258 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 		prism2_rx_80211(ieee->apdev, skb, rx_stats, PRISM2_RX_MGMT);
skb               266 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 			       skb->dev->name, type, stype);
skb               270 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 		hostap_rx(skb->dev, skb, rx_stats);
skb               275 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 	       "received in non-Host AP mode\n", skb->dev->name);
skb               293 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 				    struct sk_buff *skb, size_t hdrlen)
skb               300 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 	if (skb->len < 24)
skb               303 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 	hdr = (struct rtl_80211_hdr_4addr *)skb->data;
skb               319 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 	if (skb->len < 24 + 8)
skb               324 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 	pos = skb->data + hdrlen;
skb               334 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c ieee80211_rx_frame_decrypt(struct ieee80211_device *ieee, struct sk_buff *skb,
skb               343 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 		struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
skb               346 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 	hdr = (struct rtl_80211_hdr_4addr *)skb->data;
skb               360 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 	res = crypt->ops->decrypt_mpdu(skb, hdrlen, crypt->priv);
skb               369 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 					     skb->data[hdrlen + 3] >> 6);
skb               380 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c ieee80211_rx_frame_decrypt_msdu(struct ieee80211_device *ieee, struct sk_buff *skb,
skb               389 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 		struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
skb               393 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 	hdr = (struct rtl_80211_hdr_4addr *)skb->data;
skb               397 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 	res = crypt->ops->decrypt_msdu(skb, keyidx, hdrlen, crypt->priv);
skb               752 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c static u8 parse_subframe(struct sk_buff *skb,
skb               756 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 	struct rtl_80211_hdr_3addr  *hdr = (struct rtl_80211_hdr_3addr *)skb->data;
skb               771 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 			(((frameqos *)(skb->data + IEEE80211_3ADDR_LEN))->field.reserved)) {
skb               785 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 	if (skb->len <= ChkLength)
skb               788 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 	skb_pull(skb, LLCOffset);
skb               793 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 		rxb->subframes[0] = skb;
skb               795 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 		rxb->subframes[0] = skb_copy(skb, GFP_ATOMIC);
skb               806 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 		while (skb->len > ETHERNET_HEADER_SIZE) {
skb               808 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 			nSubframe_Length = *((u16 *)(skb->data + 12));
skb               812 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 			if (skb->len < (ETHERNET_HEADER_SIZE + nSubframe_Length)) {
skb               816 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 				printk("nRemain_Length is %d and nSubframe_Length is : %d\n", skb->len, nSubframe_Length);
skb               822 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 			skb_pull(skb, ETHERNET_HEADER_SIZE);
skb               825 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 			sub_skb = skb_clone(skb, GFP_ATOMIC);
skb               834 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 			skb_put_data(sub_skb, skb->data, nSubframe_Length);
skb               841 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 			skb_pull(skb, nSubframe_Length);
skb               843 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 			if (skb->len != 0) {
skb               849 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 				if (skb->len < nPadding_Length) {
skb               853 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 				skb_pull(skb, nPadding_Length);
skb               857 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 		dev_kfree_skb(skb);
skb               869 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
skb               903 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 	hdr = (struct rtl_80211_hdr_4addr *)skb->data;
skb               906 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 	if (skb->len < 10) {
skb               920 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 	if (HTCCheck(ieee, skb->data)) {
skb               946 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 		ieee80211_monitor_rx(ieee, skb, rx_stats);
skb               948 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 		stats->rx_bytes += skb->len;
skb               954 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 		if (skb->len >= hdrlen + 3)
skb               955 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 			idx = skb->data[hdrlen + 3] >> 6;
skb               990 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 	if (skb->len < IEEE80211_DATA_HDR3_LEN)
skb               994 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 	if ((!ieee->pHTInfo->bCurRxReorderEnable) || !ieee->current_network.qos_data.active || !IsDataFrame(skb->data) || IsLegacyDataFrame(skb->data)) {
skb              1005 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 				Frame_QoSTID((u8 *)(skb->data)),
skb              1027 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 		if (ieee80211_rx_frame_mgmt(ieee, skb, rx_stats, type, stype))
skb              1046 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 		if (skb->len < IEEE80211_DATA_HDR4_LEN)
skb              1063 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 		skb->dev = dev = wds;
skb              1072 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 		skb->dev = dev = ieee->stadev;
skb              1080 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 		switch (hostap_handle_sta_rx(ieee, dev, skb, rx_stats,
skb              1106 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 				type, stype, skb->len);
skb              1115 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 	    (keyidx = ieee80211_rx_frame_decrypt(ieee, skb, crypt)) < 0) {
skb              1121 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 	hdr = (struct rtl_80211_hdr_4addr *)skb->data;
skb              1139 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 		flen = skb->len;
skb              1154 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 			skb_put_data(frag_skb, skb->data, flen);
skb              1158 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 			skb_put_data(frag_skb, skb->data + hdrlen, flen);
skb              1160 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 		dev_kfree_skb_any(skb);
skb              1161 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 		skb = NULL;
skb              1172 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 		skb = frag_skb;
skb              1173 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 		hdr = (struct rtl_80211_hdr_4addr *)skb->data;
skb              1180 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 	    ieee80211_rx_frame_decrypt_msdu(ieee, skb, keyidx, crypt)) {
skb              1189 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 	hdr = (struct rtl_80211_hdr_4addr *)skb->data;
skb              1192 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 		    ieee80211_is_eapol_frame(ieee, skb, hdrlen)) {
skb              1197 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 			struct eapol *eap = (struct eapol *)(skb->data +
skb              1213 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 	    ieee80211_is_eapol_frame(ieee, skb, hdrlen)) {
skb              1214 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 		struct eapol *eap = (struct eapol *)(skb->data +
skb              1222 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 	    !ieee80211_is_eapol_frame(ieee, skb, hdrlen)) {
skb              1236 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 	if (ieee->current_network.qos_data.active && IsQoSDataFrame(skb->data)
skb              1238 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 		TID = Frame_QoSTID(skb->data);
skb              1253 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 	if (parse_subframe(skb, rx_stats, rxb, src, dst) == 0) {
skb              1314 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 	dev_kfree_skb(skb);
skb               138 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c static void enqueue_mgmt(struct ieee80211_device *ieee, struct sk_buff *skb)
skb               152 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	ieee->mgmt_queue_ring[nh] = skb;
skb               212 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c inline void softmac_mgmt_xmit(struct sk_buff *skb, struct ieee80211_device *ieee)
skb               217 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 		(struct rtl_80211_hdr_3addr  *)skb->data;
skb               219 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
skb               234 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 			enqueue_mgmt(ieee, skb);
skb               245 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 			ieee->softmac_data_hard_start_xmit(skb, ieee->dev, ieee->basic_rate);
skb               270 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 			skb_queue_tail(&ieee->skb_waitQ[tcb_desc->queue_index], skb);
skb               272 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 			ieee->softmac_hard_start_xmit(skb, ieee->dev);
skb               280 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c softmac_ps_mgmt_xmit(struct sk_buff *skb, struct ieee80211_device *ieee)
skb               284 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 		(struct rtl_80211_hdr_3addr  *)skb->data;
skb               296 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 		ieee->softmac_data_hard_start_xmit(skb, ieee->dev, ieee->basic_rate);
skb               305 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 		ieee->softmac_hard_start_xmit(skb, ieee->dev);
skb               314 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	struct sk_buff *skb;
skb               321 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	skb = dev_alloc_skb(sizeof(struct ieee80211_probe_request) +
skb               323 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	if (!skb)
skb               326 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	skb_reserve(skb, ieee->tx_headroom);
skb               328 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	req = skb_put(skb, sizeof(struct ieee80211_probe_request));
skb               336 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	tag = skb_put(skb, len + 2 + rate_len);
skb               345 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	return skb;
skb               352 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	struct sk_buff *skb;
skb               357 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	skb = ieee80211_get_beacon_(ieee);
skb               359 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	if (skb) {
skb               360 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 		softmac_mgmt_xmit(skb, ieee);
skb               390 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	struct sk_buff *skb;
skb               392 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	skb = ieee80211_probe_req(ieee);
skb               393 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	if (skb) {
skb               394 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 		softmac_mgmt_xmit(skb, ieee);
skb               618 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	struct sk_buff *skb;
skb               622 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	skb = dev_alloc_skb(len);
skb               623 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	if (!skb)
skb               626 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	skb_reserve(skb, ieee->tx_headroom);
skb               627 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	auth = skb_put(skb, sizeof(struct ieee80211_authentication));
skb               654 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	return skb;
skb               662 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	struct sk_buff *skb = NULL;
skb               726 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	skb = dev_alloc_skb(beacon_size);
skb               727 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	if (!skb)
skb               729 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	skb_reserve(skb, ieee->tx_headroom);
skb               730 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	beacon_buf = skb_put(skb, (beacon_size - ieee->tx_headroom));
skb               799 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	return skb;
skb               805 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	struct sk_buff *skb;
skb               815 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	skb = dev_alloc_skb(len);
skb               817 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	if (!skb)
skb               820 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	skb_reserve(skb, ieee->tx_headroom);
skb               822 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	assoc = skb_put(skb, sizeof(struct ieee80211_assoc_response_frame));
skb               851 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	tag = skb_put(skb, rate_len);
skb               856 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	return skb;
skb               862 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	struct sk_buff *skb;
skb               866 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	skb = dev_alloc_skb(len);
skb               868 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	if (!skb)
skb               871 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	skb->len = sizeof(struct ieee80211_authentication);
skb               873 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	auth = (struct ieee80211_authentication *)skb->data;
skb               883 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	return skb;
skb               889 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	struct sk_buff *skb;
skb               892 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	skb = dev_alloc_skb(sizeof(struct rtl_80211_hdr_3addr));
skb               894 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	if (!skb)
skb               897 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	hdr = skb_put(skb, sizeof(struct rtl_80211_hdr_3addr));
skb               907 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	return skb;
skb               938 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	struct sk_buff *skb;
skb              1020 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	skb = dev_alloc_skb(len);
skb              1022 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	if (!skb)
skb              1025 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	skb_reserve(skb, ieee->tx_headroom);
skb              1027 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	hdr = skb_put(skb, sizeof(struct ieee80211_assoc_request_frame) + 2);
skb              1054 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	skb_put_data(skb, beacon->ssid, beacon->ssid_len);
skb              1056 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	tag = skb_put(skb, rate_len);
skb              1079 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 		tag = skb_put(skb, ckip_ie_len);
skb              1092 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 		tag = skb_put(skb, ccxrm_ie_len);
skb              1105 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 		tag = skb_put(skb, cxvernum_ie_len);
skb              1114 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 			tag = skb_put(skb, ht_cap_len);
skb              1124 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 		skb_put_data(skb, ieee->wpa_ie, wpa_ie_len);
skb              1127 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 		tag = skb_put(skb, wmm_info_len);
skb              1132 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 		tag = skb_put(skb, turbo_info_len);
skb              1139 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 			tag = skb_put(skb, ht_cap_len);
skb              1147 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 			tag = skb_put(skb, realtek_ie_len);
skb              1155 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	return skb;
skb              1197 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	struct sk_buff *skb;
skb              1202 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	skb = ieee80211_authentication_req(beacon, ieee, 0);
skb              1204 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	if (!skb) {
skb              1209 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 		softmac_mgmt_xmit(skb, ieee);
skb              1224 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	struct sk_buff *skb;
skb              1231 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	skb = ieee80211_authentication_req(beacon, ieee, chlen + 2);
skb              1232 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	if (!skb) {
skb              1235 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 		c = skb_put(skb, chlen + 2);
skb              1242 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 		ieee80211_encrypt_fragment(ieee, skb, sizeof(struct rtl_80211_hdr_3addr));
skb              1244 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 		softmac_mgmt_xmit(skb, ieee);
skb              1253 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	struct sk_buff *skb;
skb              1261 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	skb = ieee80211_association_req(beacon, ieee);
skb              1262 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	if (!skb) {
skb              1265 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 		softmac_mgmt_xmit(skb, ieee);
skb              1460 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c static inline u16 auth_parse(struct sk_buff *skb, u8 **challenge, int *chlen)
skb              1464 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	if (skb->len < (sizeof(struct ieee80211_authentication) - sizeof(struct ieee80211_info_element))) {
skb              1465 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 		IEEE80211_DEBUG_MGMT("invalid len in auth resp: %d\n", skb->len);
skb              1469 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	a = (struct ieee80211_authentication *)skb->data;
skb              1470 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	if (skb->len > (sizeof(struct ieee80211_authentication) + 3)) {
skb              1471 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 		t = skb->data + sizeof(struct ieee80211_authentication);
skb              1484 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c static int auth_rq_parse(struct sk_buff *skb, u8 *dest)
skb              1488 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	if (skb->len < (sizeof(struct ieee80211_authentication) - sizeof(struct ieee80211_info_element))) {
skb              1489 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 		IEEE80211_DEBUG_MGMT("invalid len in auth request: %d\n", skb->len);
skb              1492 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	a = (struct ieee80211_authentication *)skb->data;
skb              1502 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c static short probe_rq_parse(struct ieee80211_device *ieee, struct sk_buff *skb, u8 *src)
skb              1510 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 		(struct rtl_80211_hdr_3addr   *)skb->data;
skb              1512 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	if (skb->len < sizeof(struct rtl_80211_hdr_3addr))
skb              1517 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	skbend = (u8 *)skb->data + skb->len;
skb              1519 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	tag = skb->data + sizeof(struct rtl_80211_hdr_3addr);
skb              1542 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c static int assoc_rq_parse(struct sk_buff *skb, u8 *dest)
skb              1546 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	if (skb->len < (sizeof(struct ieee80211_assoc_request_frame) -
skb              1548 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 		IEEE80211_DEBUG_MGMT("invalid len in auth request:%d \n", skb->len);
skb              1552 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	a = (struct ieee80211_assoc_request_frame *)skb->data;
skb              1559 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c static inline u16 assoc_parse(struct ieee80211_device *ieee, struct sk_buff *skb, int *aid)
skb              1564 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	if (skb->len < sizeof(struct ieee80211_assoc_response_frame)) {
skb              1565 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 		IEEE80211_DEBUG_MGMT("invalid len in auth resp: %d\n", skb->len);
skb              1569 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	response_head = (struct ieee80211_assoc_response_frame *)skb->data;
skb              1587 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c ieee80211_rx_probe_rq(struct ieee80211_device *ieee, struct sk_buff *skb)
skb              1594 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	if (probe_rq_parse(ieee, skb, dest)) {
skb              1602 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c ieee80211_rx_auth_rq(struct ieee80211_device *ieee, struct sk_buff *skb)
skb              1609 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	status = auth_rq_parse(skb, dest);
skb              1616 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c ieee80211_rx_assoc_rq(struct ieee80211_device *ieee, struct sk_buff *skb)
skb              1622 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	if (assoc_rq_parse(skb, dest) != -1)
skb              1792 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 				     struct sk_buff *skb)
skb              1794 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	struct rtl_80211_hdr *header = (struct rtl_80211_hdr *)skb->data;
skb              1807 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 			ieee80211_rx_ADDBAReq(ieee, skb);
skb              1809 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 			ieee80211_rx_ADDBARsp(ieee, skb);
skb              1811 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 			ieee80211_rx_DELBA(ieee, skb);
skb              1820 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 					  struct sk_buff *skb)
skb              1829 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	errcode = auth_parse(skb, &challenge, &chlen);
skb              1879 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c ieee80211_rx_frame_softmac(struct ieee80211_device *ieee, struct sk_buff *skb,
skb              1883 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	struct rtl_80211_hdr_3addr *header = (struct rtl_80211_hdr_3addr *)skb->data;
skb              1912 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 			errcode = assoc_parse(ieee, skb, &aid);
skb              1920 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 					assoc_resp = (struct ieee80211_assoc_response_frame *)skb->data;
skb              1954 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 			ieee80211_rx_assoc_rq(ieee, skb);
skb              1962 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 				ieee80211_check_auth_response(ieee, skb);
skb              1964 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 				ieee80211_rx_auth_rq(ieee, skb);
skb              1974 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 			ieee80211_rx_probe_rq(ieee, skb);
skb              1996 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 		ieee80211_process_action(ieee, skb);
skb              2117 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	struct sk_buff *skb;
skb              2127 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 		while (!ieee->queue_stop && (skb = dequeue_mgmt(ieee))) {
skb              2128 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 			header = (struct rtl_80211_hdr_3addr  *)skb->data;
skb              2137 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 			ieee->softmac_data_hard_start_xmit(skb, ieee->dev, ieee->basic_rate);
skb              2412 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	struct sk_buff *skb;
skb              2415 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	skb = ieee80211_probe_resp(ieee, broadcast_addr);
skb              2417 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	if (!skb)
skb              2420 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	b = (struct ieee80211_probe_response *)skb->data;
skb              2423 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	return skb;
skb              2428 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	struct sk_buff *skb;
skb              2431 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	skb = ieee80211_get_beacon_(ieee);
skb              2432 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	if (!skb)
skb              2435 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	b = (struct ieee80211_probe_response *)skb->data;
skb              2443 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	return skb;
skb              2941 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	struct sk_buff *skb;
skb              2944 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	skb = dev_alloc_skb(sizeof(struct ieee80211_disassoc));
skb              2945 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	if (!skb)
skb              2948 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	disass = skb_put(skb, sizeof(struct ieee80211_disassoc));
skb              2957 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	return skb;
skb              2967 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	struct sk_buff *skb;
skb              2969 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	skb = ieee80211_disassociate_skb(beacon, ieee, asRsn);
skb              2970 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	if (skb) {
skb              2971 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 		softmac_mgmt_xmit(skb, ieee);
skb               261 drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c ieee80211_classify(struct sk_buff *skb, struct ieee80211_network *network)
skb               265 drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c 	eth = (struct ethhdr *)skb->data;
skb               269 drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c 	ip = ip_hdr(skb);
skb               291 drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c 				       struct sk_buff *skb, struct cb_desc *tcb_desc)
skb               295 drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c 	struct rtl_80211_hdr_1addr *hdr = (struct rtl_80211_hdr_1addr *)skb->data;
skb               299 drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c 	if (!IsQoSDataFrame(skb->data))
skb               309 drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c 		if (!GetTs(ieee, (struct ts_common_info **)(&pTxTs), hdr->addr1, skb->priority, TX_DIR, true)) {
skb               405 drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c 					   struct sk_buff *skb)
skb               417 drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c 	if (is_broadcast_ether_addr(skb->data + 16))  //check addr3 as infrastructure add3 is DA.
skb               425 drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c 		if (skb->len > ieee->rts) {
skb               457 drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c 			if (skb->len > ieee->rts) {
skb               517 drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c 				   struct sk_buff *skb, u8 *dst)
skb               521 drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c 	if (IsQoSDataFrame(skb->data)) /* we deal qos data only */ {
skb               523 drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c 		if (!GetTs(ieee, (struct ts_common_info **)(&pTS), dst, skb->priority, TX_DIR, true)) {
skb               530 drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
skb               567 drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c 		if (unlikely(skb->len < SNAP_SIZE + sizeof(u16))) {
skb               569 drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c 			ieee->dev->name, skb->len);
skb               573 drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c 		memset(skb->cb, 0, sizeof(skb->cb));
skb               574 drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c 		ether_type = ntohs(((struct ethhdr *)skb->data)->h_proto);
skb               588 drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c 			struct eapol *eap = (struct eapol *)(skb->data +
skb               596 drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c 		memcpy(&dest, skb->data, ETH_ALEN);
skb               597 drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c 		memcpy(&src, skb->data + ETH_ALEN, ETH_ALEN);
skb               600 drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c 		skb_pull(skb, sizeof(struct ethhdr));
skb               603 drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c 		bytes = skb->len + SNAP_SIZE + sizeof(u16);
skb               651 drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c 			skb->priority = ieee80211_classify(skb, &ieee->current_network);
skb               652 drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c 			qos_ctl |= skb->priority; //set in the ieee80211_classify
skb               697 drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c 			txb->queue_index = UP2AC(skb->priority);
skb               707 drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c 				skb_frag->priority = skb->priority;//UP2AC(skb->priority);
skb               708 drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c 				tcb_desc->queue_index =  UP2AC(skb->priority);
skb               741 drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c 				frag_hdr->seq_ctl = cpu_to_le16(ieee->seq_ctrl[UP2AC(skb->priority) + 1] << 4 | i);
skb               754 drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c 			skb_put_data(skb_frag, skb->data, bytes);
skb               757 drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c 			skb_pull(skb, bytes);
skb               770 drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c 			if (ieee->seq_ctrl[UP2AC(skb->priority) + 1] == 0xFFF)
skb               771 drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c 				ieee->seq_ctrl[UP2AC(skb->priority) + 1] = 0;
skb               773 drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c 				ieee->seq_ctrl[UP2AC(skb->priority) + 1]++;
skb               781 drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c 		if (unlikely(skb->len < sizeof(struct rtl_80211_hdr_3addr))) {
skb               783 drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c 			ieee->dev->name, skb->len);
skb               787 drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c 		txb = ieee80211_alloc_txb(1, skb->len, GFP_ATOMIC);
skb               795 drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c 		txb->payload_size = __cpu_to_le16(skb->len);
skb               796 drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c 		skb_put_data(txb->fragments[0], skb->data, skb->len);
skb               821 drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c 	dev_kfree_skb_any(skb);
skb               110 drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c 	struct sk_buff *skb = NULL;
skb               120 drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c 	skb = dev_alloc_skb(len + sizeof(struct rtl_80211_hdr_3addr)); //need to add something others? FIXME
skb               121 drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c 	if (!skb)
skb               124 drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c 	memset(skb->data, 0, sizeof(struct rtl_80211_hdr_3addr));	//I wonder whether it's necessary. Apparently kernel will not do it when alloc a skb.
skb               125 drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c 	skb_reserve(skb, ieee->tx_headroom);
skb               127 drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c 	BAReq = skb_put(skb, sizeof(struct rtl_80211_hdr_3addr));
skb               137 drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c 	tag = skb_put(skb, 9);
skb               165 drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c 	IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA | IEEE80211_DL_BA, skb->data, skb->len);
skb               166 drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c 	return skb;
skb               189 drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c 	struct sk_buff *skb = NULL;
skb               205 drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c 	skb = dev_alloc_skb(len + sizeof(struct rtl_80211_hdr_3addr)); //need to add something others? FIXME
skb               206 drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c 	if (!skb)
skb               209 drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c 	skb_reserve(skb, ieee->tx_headroom);
skb               211 drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c 	Delba = skb_put(skb, sizeof(struct rtl_80211_hdr_3addr));
skb               218 drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c 	tag = skb_put(skb, 6);
skb               232 drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c 	IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA | IEEE80211_DL_BA, skb->data, skb->len);
skb               236 drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c 	return skb;
skb               249 drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c 	struct sk_buff *skb;
skb               250 drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c 	skb = ieee80211_ADDBA(ieee, dst, pBA, 0, ACT_ADDBAREQ); //construct ACT_ADDBAREQ frames so set statuscode zero.
skb               252 drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c 	if (skb) {
skb               253 drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c 		softmac_mgmt_xmit(skb, ieee);
skb               273 drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c 	struct sk_buff *skb;
skb               274 drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c 	skb = ieee80211_ADDBA(ieee, dst, pBA, StatusCode, ACT_ADDBARSP); //construct ACT_ADDBARSP frames
skb               275 drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c 	if (skb) {
skb               276 drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c 		softmac_mgmt_xmit(skb, ieee);
skb               299 drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c 	struct sk_buff *skb;
skb               300 drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c 	skb = ieee80211_DELBA(ieee, dst, pBA, TxRxSelect, ReasonCode); //construct ACT_ADDBARSP frames
skb               301 drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c 	if (skb) {
skb               302 drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c 		softmac_mgmt_xmit(skb, ieee);
skb               315 drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c int ieee80211_rx_ADDBAReq(struct ieee80211_device *ieee, struct sk_buff *skb)
skb               326 drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c 	if (skb->len < sizeof(struct rtl_80211_hdr_3addr) + 9) {
skb               329 drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c 				skb->len,
skb               334 drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c 	IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA | IEEE80211_DL_BA, skb->data, skb->len);
skb               336 drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c 	req = (struct rtl_80211_hdr_3addr *)skb->data;
skb               415 drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c int ieee80211_rx_ADDBARsp(struct ieee80211_device *ieee, struct sk_buff *skb)
skb               425 drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c 	if (skb->len < sizeof(struct rtl_80211_hdr_3addr) + 9) {
skb               428 drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c 				skb->len,
skb               432 drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c 	rsp = (struct rtl_80211_hdr_3addr *)skb->data;
skb               539 drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c int ieee80211_rx_DELBA(struct ieee80211_device *ieee, struct sk_buff *skb)
skb               545 drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c 	if (skb->len < sizeof(struct rtl_80211_hdr_3addr) + 6) {
skb               548 drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c 				skb->len,
skb               559 drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c 	IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA | IEEE80211_DL_BA, skb->data, skb->len);
skb               560 drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c 	delba = (struct rtl_80211_hdr_3addr *)skb->data;
skb              1092 drivers/staging/rtl8192u/r8192U.h short rtl819xU_tx_cmd(struct net_device *dev, struct sk_buff *skb);
skb              1093 drivers/staging/rtl8192u/r8192U.h short rtl8192_tx(struct net_device *dev, struct sk_buff *skb);
skb               720 drivers/staging/rtl8192u/r8192U_core.c 	struct sk_buff *skb;
skb               725 drivers/staging/rtl8192u/r8192U_core.c 		skb = __dev_alloc_skb(RX_URB_SIZE, GFP_KERNEL);
skb               726 drivers/staging/rtl8192u/r8192U_core.c 		if (!skb)
skb               730 drivers/staging/rtl8192u/r8192U_core.c 			kfree_skb(skb);
skb               735 drivers/staging/rtl8192u/r8192U_core.c 				  skb_tail_pointer(skb),
skb               736 drivers/staging/rtl8192u/r8192U_core.c 				  RX_URB_SIZE, rtl8192_rx_isr, skb);
skb               737 drivers/staging/rtl8192u/r8192U_core.c 		info = (struct rtl8192_rx_info *)skb->cb;
skb               741 drivers/staging/rtl8192u/r8192U_core.c 		skb_queue_tail(&priv->rx_queue, skb);
skb               747 drivers/staging/rtl8192u/r8192U_core.c 		skb = __dev_alloc_skb(RX_URB_SIZE, GFP_KERNEL);
skb               748 drivers/staging/rtl8192u/r8192U_core.c 		if (!skb)
skb               752 drivers/staging/rtl8192u/r8192U_core.c 			kfree_skb(skb);
skb               757 drivers/staging/rtl8192u/r8192U_core.c 				  skb_tail_pointer(skb),
skb               758 drivers/staging/rtl8192u/r8192U_core.c 				  RX_URB_SIZE, rtl8192_rx_isr, skb);
skb               759 drivers/staging/rtl8192u/r8192U_core.c 		info = (struct rtl8192_rx_info *)skb->cb;
skb               763 drivers/staging/rtl8192u/r8192U_core.c 		skb_queue_tail(&priv->rx_queue, skb);
skb               815 drivers/staging/rtl8192u/r8192U_core.c 	struct sk_buff *skb;
skb               823 drivers/staging/rtl8192u/r8192U_core.c 	while ((skb = __skb_dequeue(&priv->rx_queue))) {
skb               824 drivers/staging/rtl8192u/r8192U_core.c 		info = (struct rtl8192_rx_info *)skb->cb;
skb               829 drivers/staging/rtl8192u/r8192U_core.c 		kfree_skb(skb);
skb               841 drivers/staging/rtl8192u/r8192U_core.c 	struct sk_buff *skb = (struct sk_buff *)urb->context;
skb               842 drivers/staging/rtl8192u/r8192U_core.c 	struct rtl8192_rx_info *info = (struct rtl8192_rx_info *)skb->cb;
skb               858 drivers/staging/rtl8192u/r8192U_core.c 	skb_unlink(skb, &priv->rx_queue);
skb               859 drivers/staging/rtl8192u/r8192U_core.c 	skb_put(skb, urb->actual_length);
skb               861 drivers/staging/rtl8192u/r8192U_core.c 	skb_queue_tail(&priv->skb_queue, skb);
skb               864 drivers/staging/rtl8192u/r8192U_core.c 	skb = dev_alloc_skb(RX_URB_SIZE);
skb               865 drivers/staging/rtl8192u/r8192U_core.c 	if (unlikely(!skb)) {
skb               874 drivers/staging/rtl8192u/r8192U_core.c 			  skb_tail_pointer(skb),
skb               875 drivers/staging/rtl8192u/r8192U_core.c 			  RX_URB_SIZE, rtl8192_rx_isr, skb);
skb               877 drivers/staging/rtl8192u/r8192U_core.c 	info = (struct rtl8192_rx_info *)skb->cb;
skb               882 drivers/staging/rtl8192u/r8192U_core.c 	urb->transfer_buffer = skb_tail_pointer(skb);
skb               883 drivers/staging/rtl8192u/r8192U_core.c 	urb->context = skb;
skb               884 drivers/staging/rtl8192u/r8192U_core.c 	skb_queue_tail(&priv->rx_queue, skb);
skb               919 drivers/staging/rtl8192u/r8192U_core.c static void rtl8192_hard_data_xmit(struct sk_buff *skb, struct net_device *dev,
skb               925 drivers/staging/rtl8192u/r8192U_core.c 	struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
skb               933 drivers/staging/rtl8192u/r8192U_core.c 	*(struct net_device **)(skb->cb) = dev;
skb               935 drivers/staging/rtl8192u/r8192U_core.c 	skb_push(skb, priv->ieee80211->tx_headroom);
skb               936 drivers/staging/rtl8192u/r8192U_core.c 	ret = rtl8192_tx(dev, skb);
skb               946 drivers/staging/rtl8192u/r8192U_core.c static int rtl8192_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb               951 drivers/staging/rtl8192u/r8192U_core.c 	struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
skb               957 drivers/staging/rtl8192u/r8192U_core.c 	memcpy((unsigned char *)(skb->cb), &dev, sizeof(dev));
skb               959 drivers/staging/rtl8192u/r8192U_core.c 		skb_push(skb, USB_HWDESC_HEADER_LEN);
skb               960 drivers/staging/rtl8192u/r8192U_core.c 		rtl819xU_tx_cmd(dev, skb);
skb               963 drivers/staging/rtl8192u/r8192U_core.c 		skb_push(skb, priv->ieee80211->tx_headroom);
skb               964 drivers/staging/rtl8192u/r8192U_core.c 		ret = rtl8192_tx(dev, skb);
skb               974 drivers/staging/rtl8192u/r8192U_core.c 	struct sk_buff *skb = (struct sk_buff *)tx_urb->context;
skb               980 drivers/staging/rtl8192u/r8192U_core.c 	if (!skb)
skb               983 drivers/staging/rtl8192u/r8192U_core.c 	dev = *(struct net_device **)(skb->cb);
skb               984 drivers/staging/rtl8192u/r8192U_core.c 	tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
skb               995 drivers/staging/rtl8192u/r8192U_core.c 				(skb->len - priv->ieee80211->tx_headroom);
skb              1003 drivers/staging/rtl8192u/r8192U_core.c 	dev_kfree_skb_any(skb);
skb              1024 drivers/staging/rtl8192u/r8192U_core.c 			skb = skb_dequeue(&(priv->ieee80211->skb_waitQ[queue_index]));
skb              1025 drivers/staging/rtl8192u/r8192U_core.c 			if (skb)
skb              1026 drivers/staging/rtl8192u/r8192U_core.c 				priv->ieee80211->softmac_hard_start_xmit(skb,
skb              1194 drivers/staging/rtl8192u/r8192U_core.c short rtl819xU_tx_cmd(struct net_device *dev, struct sk_buff *skb)
skb              1200 drivers/staging/rtl8192u/r8192U_core.c 	struct tx_desc_cmd_819x_usb *pdesc = (struct tx_desc_cmd_819x_usb *)skb->data;
skb              1201 drivers/staging/rtl8192u/r8192U_core.c 	struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
skb              1207 drivers/staging/rtl8192u/r8192U_core.c 		dev_kfree_skb(skb);
skb              1227 drivers/staging/rtl8192u/r8192U_core.c 			  skb->data, skb->len, rtl8192_tx_isr, skb);
skb              1416 drivers/staging/rtl8192u/r8192U_core.c short rtl8192_tx(struct net_device *dev, struct sk_buff *skb)
skb              1419 drivers/staging/rtl8192u/r8192U_core.c 	struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
skb              1420 drivers/staging/rtl8192u/r8192U_core.c 	struct tx_desc_819x_usb *tx_desc = (struct tx_desc_819x_usb *)skb->data;
skb              1422 drivers/staging/rtl8192u/r8192U_core.c 		(struct tx_fwinfo_819x_usb *)(skb->data + USB_HWDESC_HEADER_LEN);
skb              1436 drivers/staging/rtl8192u/r8192U_core.c 		dev_kfree_skb_any(skb);
skb              1442 drivers/staging/rtl8192u/r8192U_core.c 		dev_kfree_skb_any(skb);
skb              1498 drivers/staging/rtl8192u/r8192U_core.c 	tx_desc->PktSize = (skb->len - TX_PACKET_SHIFT_BYTES) & 0xffff;
skb              1542 drivers/staging/rtl8192u/r8192U_core.c 	tx_desc->TxBufferSize = (u32)(skb->len - USB_HWDESC_HEADER_LEN);
skb              1547 drivers/staging/rtl8192u/r8192U_core.c 			  usb_sndbulkpipe(udev, idx_pipe), skb->data,
skb              1548 drivers/staging/rtl8192u/r8192U_core.c 			  skb->len, rtl8192_tx_isr, skb);
skb              1561 drivers/staging/rtl8192u/r8192U_core.c 			if (skb->len > 0 && skb->len % 512 == 0)
skb              1564 drivers/staging/rtl8192u/r8192U_core.c 			if (skb->len > 0 && skb->len % 64 == 0)
skb              1595 drivers/staging/rtl8192u/r8192U_core.c 	dev_kfree_skb_any(skb);
skb              4308 drivers/staging/rtl8192u/r8192U_core.c static void TranslateRxSignalStuff819xUsb(struct sk_buff *skb,
skb              4315 drivers/staging/rtl8192u/r8192U_core.c 	struct rtl8192_rx_info *info = (struct rtl8192_rx_info *)skb->cb;
skb              4330 drivers/staging/rtl8192u/r8192U_core.c 	tmp_buf = (u8 *)skb->data;
skb              4502 drivers/staging/rtl8192u/r8192U_core.c static void query_rxdesc_status(struct sk_buff *skb,
skb              4506 drivers/staging/rtl8192u/r8192U_core.c 	struct rtl8192_rx_info *info = (struct rtl8192_rx_info *)skb->cb;
skb              4512 drivers/staging/rtl8192u/r8192U_core.c 	struct rx_desc_819x_usb *desc = (struct rx_desc_819x_usb *)skb->data;
skb              4537 drivers/staging/rtl8192u/r8192U_core.c 				skb->data
skb              4583 drivers/staging/rtl8192u/r8192U_core.c 	skb_pull(skb, sizeof(struct rx_desc_819x_usb));
skb              4587 drivers/staging/rtl8192u/r8192U_core.c 		skb_pull(skb, stats->RxBufShift + stats->RxDrvInfoSize);
skb              4592 drivers/staging/rtl8192u/r8192U_core.c 		TranslateRxSignalStuff819xUsb(skb, stats, driver_info);
skb              4596 drivers/staging/rtl8192u/r8192U_core.c static void rtl8192_rx_nomal(struct sk_buff *skb)
skb              4598 drivers/staging/rtl8192u/r8192U_core.c 	struct rtl8192_rx_info *info = (struct rtl8192_rx_info *)skb->cb;
skb              4612 drivers/staging/rtl8192u/r8192U_core.c 	if ((skb->len >= (20 + sizeof(struct rx_desc_819x_usb))) && (skb->len < RX_URB_SIZE)) {
skb              4614 drivers/staging/rtl8192u/r8192U_core.c 		query_rxdesc_status(skb, &stats, false);
skb              4618 drivers/staging/rtl8192u/r8192U_core.c 		skb_trim(skb, skb->len - 4/*sCrcLng*/);
skb              4620 drivers/staging/rtl8192u/r8192U_core.c 		rx_pkt_len = skb->len;
skb              4621 drivers/staging/rtl8192u/r8192U_core.c 		ieee80211_hdr = (struct rtl_80211_hdr_1addr *)skb->data;
skb              4632 drivers/staging/rtl8192u/r8192U_core.c 		if (!ieee80211_rx(priv->ieee80211, skb, &stats)) {
skb              4633 drivers/staging/rtl8192u/r8192U_core.c 			dev_kfree_skb_any(skb);
skb              4641 drivers/staging/rtl8192u/r8192U_core.c 		netdev_dbg(dev, "actual_length: %d\n", skb->len);
skb              4642 drivers/staging/rtl8192u/r8192U_core.c 		dev_kfree_skb_any(skb);
skb              4685 drivers/staging/rtl8192u/r8192U_core.c static void query_rx_cmdpkt_desc_status(struct sk_buff *skb,
skb              4688 drivers/staging/rtl8192u/r8192U_core.c 	struct rx_desc_819x_usb *desc = (struct rx_desc_819x_usb *)skb->data;
skb              4691 drivers/staging/rtl8192u/r8192U_core.c 	stats->virtual_address = (u8 *)skb->data;
skb              4702 drivers/staging/rtl8192u/r8192U_core.c static void rtl8192_rx_cmd(struct sk_buff *skb)
skb              4704 drivers/staging/rtl8192u/r8192U_core.c 	struct rtl8192_rx_info *info = (struct rtl8192_rx_info *)skb->cb;
skb              4714 drivers/staging/rtl8192u/r8192U_core.c 	if ((skb->len >= (20 + sizeof(struct rx_desc_819x_usb))) && (skb->len < RX_URB_SIZE)) {
skb              4715 drivers/staging/rtl8192u/r8192U_core.c 		query_rx_cmdpkt_desc_status(skb, &stats);
skb              4722 drivers/staging/rtl8192u/r8192U_core.c 		dev_kfree_skb_any(skb);
skb              4728 drivers/staging/rtl8192u/r8192U_core.c 	struct sk_buff *skb;
skb              4731 drivers/staging/rtl8192u/r8192U_core.c 	while (NULL != (skb = skb_dequeue(&priv->skb_queue))) {
skb              4732 drivers/staging/rtl8192u/r8192U_core.c 		info = (struct rtl8192_rx_info *)skb->cb;
skb              4737 drivers/staging/rtl8192u/r8192U_core.c 			rtl8192_rx_nomal(skb);
skb              4745 drivers/staging/rtl8192u/r8192U_core.c 			rtl8192_rx_cmd(skb);
skb              4751 drivers/staging/rtl8192u/r8192U_core.c 			dev_kfree_skb(skb);
skb                33 drivers/staging/rtl8192u/r819xU_cmdpkt.c 	struct sk_buff	    *skb;
skb                39 drivers/staging/rtl8192u/r819xU_cmdpkt.c 	skb  = dev_alloc_skb(USB_HWDESC_HEADER_LEN + DataLen + 4);
skb                40 drivers/staging/rtl8192u/r819xU_cmdpkt.c 	if (!skb)
skb                42 drivers/staging/rtl8192u/r819xU_cmdpkt.c 	memcpy((unsigned char *)(skb->cb), &dev, sizeof(dev));
skb                43 drivers/staging/rtl8192u/r819xU_cmdpkt.c 	tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
skb                47 drivers/staging/rtl8192u/r819xU_cmdpkt.c 	skb_reserve(skb, USB_HWDESC_HEADER_LEN);
skb                48 drivers/staging/rtl8192u/r819xU_cmdpkt.c 	skb_put_data(skb, pData, DataLen);
skb                55 drivers/staging/rtl8192u/r819xU_cmdpkt.c 		skb_queue_tail(&priv->ieee80211->skb_waitQ[tcb_desc->queue_index], skb);
skb                57 drivers/staging/rtl8192u/r819xU_cmdpkt.c 		priv->ieee80211->softmac_hard_start_xmit(skb, dev);
skb                44 drivers/staging/rtl8192u/r819xU_firmware.c 	struct sk_buff	    *skb;
skb                67 drivers/staging/rtl8192u/r819xU_firmware.c 		skb  = dev_alloc_skb(USB_HWDESC_HEADER_LEN + frag_length + 4);
skb                68 drivers/staging/rtl8192u/r819xU_firmware.c 		if (!skb)
skb                70 drivers/staging/rtl8192u/r819xU_firmware.c 		memcpy((unsigned char *)(skb->cb), &dev, sizeof(dev));
skb                71 drivers/staging/rtl8192u/r819xU_firmware.c 		tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
skb                76 drivers/staging/rtl8192u/r819xU_firmware.c 		skb_reserve(skb, USB_HWDESC_HEADER_LEN);
skb                77 drivers/staging/rtl8192u/r819xU_firmware.c 		seg_ptr = skb->data;
skb                89 drivers/staging/rtl8192u/r819xU_firmware.c 		skb_put(skb, i);
skb                96 drivers/staging/rtl8192u/r819xU_firmware.c 			skb_queue_tail(&priv->ieee80211->skb_waitQ[tcb_desc->queue_index], skb);
skb                98 drivers/staging/rtl8192u/r819xU_firmware.c 			priv->ieee80211->softmac_hard_start_xmit(skb, dev);
skb               410 drivers/staging/rtl8712/ieee80211.h 	struct sk_buff *skb;
skb                97 drivers/staging/rtl8712/recv_linux.c 	_pkt *skb;
skb               102 drivers/staging/rtl8712/recv_linux.c 	skb = recvframe->u.hdr.pkt;
skb               103 drivers/staging/rtl8712/recv_linux.c 	if (!skb)
skb               105 drivers/staging/rtl8712/recv_linux.c 	skb->data = recvframe->u.hdr.rx_data;
skb               106 drivers/staging/rtl8712/recv_linux.c 	skb->len = recvframe->u.hdr.len;
skb               107 drivers/staging/rtl8712/recv_linux.c 	skb_set_tail_pointer(skb, skb->len);
skb               109 drivers/staging/rtl8712/recv_linux.c 		skb->ip_summed = CHECKSUM_UNNECESSARY;
skb               111 drivers/staging/rtl8712/recv_linux.c 		skb->ip_summed = CHECKSUM_NONE;
skb               112 drivers/staging/rtl8712/recv_linux.c 	skb->dev = adapter->pnetdev;
skb               113 drivers/staging/rtl8712/recv_linux.c 	skb->protocol = eth_type_trans(skb, adapter->pnetdev);
skb               114 drivers/staging/rtl8712/recv_linux.c 	netif_rx(skb);
skb               563 drivers/staging/rtl8723bs/include/ieee80211.h 	struct sk_buff *skb;
skb                62 drivers/staging/rtl8723bs/include/osdep_intf.h u16 rtw_recv_select_queue(struct sk_buff *skb);
skb                96 drivers/staging/rtl8723bs/include/osdep_service.h struct sk_buff *_rtw_skb_copy(const struct sk_buff *skb);
skb                97 drivers/staging/rtl8723bs/include/osdep_service.h struct sk_buff *_rtw_skb_clone(struct sk_buff *skb);
skb                98 drivers/staging/rtl8723bs/include/osdep_service.h int _rtw_netif_rx(_nic_hdl ndev, struct sk_buff *skb);
skb               105 drivers/staging/rtl8723bs/include/osdep_service.h #define rtw_skb_copy(skb)	_rtw_skb_copy((skb))
skb               106 drivers/staging/rtl8723bs/include/osdep_service.h #define rtw_skb_clone(skb)	_rtw_skb_clone((skb))
skb               107 drivers/staging/rtl8723bs/include/osdep_service.h #define rtw_skb_copy_f(skb, mstat_f)	_rtw_skb_copy((skb))
skb               108 drivers/staging/rtl8723bs/include/osdep_service.h #define rtw_skb_clone_f(skb, mstat_f)	_rtw_skb_clone((skb))
skb               109 drivers/staging/rtl8723bs/include/osdep_service.h #define rtw_netif_rx(ndev, skb) _rtw_netif_rx(ndev, skb)
skb              2434 drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c static netdev_tx_t rtw_cfg80211_monitor_if_xmit_entry(struct sk_buff *skb, struct net_device *ndev)
skb              2451 drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c 	if (!skb)
skb              2454 drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c 	rtw_mstat_update(MSTAT_TYPE_SKB, MSTAT_ALLOC_SUCCESS, skb->truesize);
skb              2456 drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c 	if (unlikely(skb->len < sizeof(struct ieee80211_radiotap_header)))
skb              2459 drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c 	rtap_hdr = (struct ieee80211_radiotap_header *)skb->data;
skb              2463 drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c 	rtap_len = ieee80211_get_radiotap_len(skb->data);
skb              2464 drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c 	if (unlikely(skb->len < rtap_len))
skb              2474 drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c 	skb_pull(skb, rtap_len);
skb              2476 drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c 	dot11_hdr = (struct ieee80211_hdr *)skb->data;
skb              2494 drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c 		skb_pull(skb, dot11_hdr_len + qos_len + snap_len - sizeof(src_mac_addr) * 2);
skb              2495 drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c 		pdata = (unsigned char*)skb->data;
skb              2502 drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c 		ret = _rtw_xmit_entry(skb, padapter->pnetdev);
skb              2520 drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c 		u8 *buf = skb->data;
skb              2521 drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c 		u32 len = skb->len;
skb              2575 drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c 	dev_kfree_skb_any(skb);
skb               374 drivers/staging/rtl8723bs/os_dep/os_intfs.c static unsigned int rtw_classify8021d(struct sk_buff *skb)
skb               383 drivers/staging/rtl8723bs/os_dep/os_intfs.c 	if (skb->priority >= 256 && skb->priority <= 263)
skb               384 drivers/staging/rtl8723bs/os_dep/os_intfs.c 		return skb->priority - 256;
skb               386 drivers/staging/rtl8723bs/os_dep/os_intfs.c 	switch (skb->protocol) {
skb               388 drivers/staging/rtl8723bs/os_dep/os_intfs.c 		dscp = ip_hdr(skb)->tos & 0xfc;
skb               398 drivers/staging/rtl8723bs/os_dep/os_intfs.c static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb,
skb               404 drivers/staging/rtl8723bs/os_dep/os_intfs.c 	skb->priority = rtw_classify8021d(skb);
skb               407 drivers/staging/rtl8723bs/os_dep/os_intfs.c 		skb->priority = qos_acm(pmlmepriv->acm_mask, skb->priority);
skb               409 drivers/staging/rtl8723bs/os_dep/os_intfs.c 	return rtw_1d_to_queue[skb->priority];
skb               412 drivers/staging/rtl8723bs/os_dep/os_intfs.c u16 rtw_recv_select_queue(struct sk_buff *skb)
skb               418 drivers/staging/rtl8723bs/os_dep/os_intfs.c 	u8 *pdata = skb->data;
skb                45 drivers/staging/rtl8723bs/os_dep/osdep_service.c inline struct sk_buff *_rtw_skb_copy(const struct sk_buff *skb)
skb                47 drivers/staging/rtl8723bs/os_dep/osdep_service.c 	return skb_copy(skb, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
skb                50 drivers/staging/rtl8723bs/os_dep/osdep_service.c inline struct sk_buff *_rtw_skb_clone(struct sk_buff *skb)
skb                52 drivers/staging/rtl8723bs/os_dep/osdep_service.c 	return skb_clone(skb, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
skb                55 drivers/staging/rtl8723bs/os_dep/osdep_service.c inline int _rtw_netif_rx(_nic_hdl ndev, struct sk_buff *skb)
skb                57 drivers/staging/rtl8723bs/os_dep/osdep_service.c 	skb->dev = ndev;
skb                58 drivers/staging/rtl8723bs/os_dep/osdep_service.c 	return netif_rx(skb);
skb               220 drivers/staging/rtl8723bs/os_dep/recv_linux.c 	_pkt *skb = precv_frame->u.hdr.pkt;
skb               229 drivers/staging/rtl8723bs/os_dep/recv_linux.c 		rx_pid = *(u16*)(skb->data+ETH_HLEN);
skb               236 drivers/staging/rtl8723bs/os_dep/recv_linux.c 			u16 len = *(u16*)(skb->data+ETH_HLEN+2);
skb               243 drivers/staging/rtl8723bs/os_dep/recv_linux.c 				DBG_871X("0x%x\n", *(skb->data+ETH_HLEN+4+i));
skb               258 drivers/staging/rtl8723bs/os_dep/recv_linux.c 	_pkt *skb;
skb               266 drivers/staging/rtl8723bs/os_dep/recv_linux.c 	skb = precv_frame->u.hdr.pkt;
skb               267 drivers/staging/rtl8723bs/os_dep/recv_linux.c 	if (skb == NULL) {
skb               276 drivers/staging/rtl8723bs/os_dep/recv_linux.c 	skb->data = precv_frame->u.hdr.rx_data;
skb               278 drivers/staging/rtl8723bs/os_dep/recv_linux.c 	skb_set_tail_pointer(skb, precv_frame->u.hdr.len);
skb               280 drivers/staging/rtl8723bs/os_dep/recv_linux.c 	skb->len = precv_frame->u.hdr.len;
skb               282 drivers/staging/rtl8723bs/os_dep/recv_linux.c 	RT_TRACE(_module_recv_osdep_c_, _drv_info_, ("\n skb->head =%p skb->data =%p skb->tail =%p skb->end =%p skb->len =%d\n", skb->head, skb->data, skb_tail_pointer(skb), skb_end_pointer(skb), skb->len));
skb               292 drivers/staging/rtl8723bs/os_dep/recv_linux.c 	rtw_os_recv_indicate_pkt(padapter, skb, pattrib);
skb               127 drivers/staging/rtl8723bs/os_dep/xmit_linux.c static int rtw_mlcst2unicst(struct adapter *padapter, struct sk_buff *skb)
skb               169 drivers/staging/rtl8723bs/os_dep/xmit_linux.c 		if (!memcmp(psta->hwaddr, &skb->data[6], 6) ||
skb               178 drivers/staging/rtl8723bs/os_dep/xmit_linux.c 		newskb = rtw_skb_copy(skb);
skb               198 drivers/staging/rtl8723bs/os_dep/xmit_linux.c 	dev_kfree_skb_any(skb);
skb               237 drivers/staging/unisys/visornic/visornic_main.c static int visor_copy_fragsinfo_from_skb(struct sk_buff *skb,
skb               245 drivers/staging/unisys/visornic/visornic_main.c 	numfrags = skb_shinfo(skb)->nr_frags;
skb               255 drivers/staging/unisys/visornic/visornic_main.c 		if (skb_linearize(skb))
skb               257 drivers/staging/unisys/visornic/visornic_main.c 		numfrags = skb_shinfo(skb)->nr_frags;
skb               266 drivers/staging/unisys/visornic/visornic_main.c 			page_to_pfn(virt_to_page(skb->data + offset));
skb               268 drivers/staging/unisys/visornic/visornic_main.c 			(unsigned long)(skb->data + offset) & PI_PAGE_MASK;
skb               286 drivers/staging/unisys/visornic/visornic_main.c 				  skb_frag_page(&skb_shinfo(skb)->frags[frag])),
skb               287 drivers/staging/unisys/visornic/visornic_main.c 				  skb_frag_off(&skb_shinfo(skb)->frags[frag]),
skb               288 drivers/staging/unisys/visornic/visornic_main.c 				  skb_frag_size(&skb_shinfo(skb)->frags[frag]),
skb               299 drivers/staging/unisys/visornic/visornic_main.c 	if (skb_shinfo(skb)->frag_list) {
skb               303 drivers/staging/unisys/visornic/visornic_main.c 		for (skbinlist = skb_shinfo(skb)->frag_list; skbinlist;
skb               419 drivers/staging/unisys/visornic/visornic_main.c 	struct sk_buff *skb;
skb               425 drivers/staging/unisys/visornic/visornic_main.c 	skb = alloc_skb(RCVPOST_BUF_SIZE, GFP_ATOMIC);
skb               426 drivers/staging/unisys/visornic/visornic_main.c 	if (!skb)
skb               428 drivers/staging/unisys/visornic/visornic_main.c 	skb->dev = netdev;
skb               433 drivers/staging/unisys/visornic/visornic_main.c 	skb->len = RCVPOST_BUF_SIZE;
skb               435 drivers/staging/unisys/visornic/visornic_main.c 	skb->data_len = 0;
skb               436 drivers/staging/unisys/visornic/visornic_main.c 	return skb;
skb               447 drivers/staging/unisys/visornic/visornic_main.c 		    struct sk_buff *skb)
skb               451 drivers/staging/unisys/visornic/visornic_main.c 	cmdrsp->net.buf = skb;
skb               452 drivers/staging/unisys/visornic/visornic_main.c 	cmdrsp->net.rcvpost.frag.pi_pfn = page_to_pfn(virt_to_page(skb->data));
skb               454 drivers/staging/unisys/visornic/visornic_main.c 		(unsigned long)skb->data & PI_PAGE_MASK;
skb               455 drivers/staging/unisys/visornic/visornic_main.c 	cmdrsp->net.rcvpost.frag.pi_len = skb->len;
skb               458 drivers/staging/unisys/visornic/visornic_main.c 	if ((cmdrsp->net.rcvpost.frag.pi_off + skb->len) > PI_PAGE_SIZE)
skb               852 drivers/staging/unisys/visornic/visornic_main.c static netdev_tx_t visornic_xmit(struct sk_buff *skb, struct net_device *netdev)
skb               869 drivers/staging/unisys/visornic/visornic_main.c 		kfree_skb(skb);
skb               876 drivers/staging/unisys/visornic/visornic_main.c 	len = skb->len;
skb               884 drivers/staging/unisys/visornic/visornic_main.c 	firstfraglen = skb->len - skb->data_len;
skb               891 drivers/staging/unisys/visornic/visornic_main.c 		kfree_skb(skb);
skb               896 drivers/staging/unisys/visornic/visornic_main.c 	    ((skb_end_pointer(skb) - skb->data) >= ETH_MIN_PACKET_SIZE)) {
skb               899 drivers/staging/unisys/visornic/visornic_main.c 		skb_put_zero(skb, padlen);
skb               911 drivers/staging/unisys/visornic/visornic_main.c 	cmdrsp->net.buf = skb;
skb               926 drivers/staging/unisys/visornic/visornic_main.c 		kfree_skb(skb);
skb               932 drivers/staging/unisys/visornic/visornic_main.c 	if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
skb               934 drivers/staging/unisys/visornic/visornic_main.c 		cmdrsp->net.xmt.lincsum.protocol = skb->protocol;
skb               935 drivers/staging/unisys/visornic/visornic_main.c 		if (skb_transport_header(skb) > skb->data) {
skb               937 drivers/staging/unisys/visornic/visornic_main.c 				skb_transport_header(skb) - skb->data;
skb               940 drivers/staging/unisys/visornic/visornic_main.c 		if (skb_network_header(skb) > skb->data) {
skb               942 drivers/staging/unisys/visornic/visornic_main.c 				skb_network_header(skb) - skb->data;
skb               945 drivers/staging/unisys/visornic/visornic_main.c 		cmdrsp->net.xmt.lincsum.csum = skb->csum;
skb               956 drivers/staging/unisys/visornic/visornic_main.c 	memcpy(cmdrsp->net.xmt.ethhdr, skb->data, ETH_HLEN);
skb               962 drivers/staging/unisys/visornic/visornic_main.c 		visor_copy_fragsinfo_from_skb(skb, firstfraglen,
skb               970 drivers/staging/unisys/visornic/visornic_main.c 		kfree_skb(skb);
skb               982 drivers/staging/unisys/visornic/visornic_main.c 		kfree_skb(skb);
skb               987 drivers/staging/unisys/visornic/visornic_main.c 	skb_queue_head(&devdata->xmitbufhead, skb);
skb               991 drivers/staging/unisys/visornic/visornic_main.c 	devdata->net_stats.tx_bytes += skb->len;
skb              1120 drivers/staging/unisys/visornic/visornic_main.c 			 struct sk_buff *skb, struct net_device *netdev)
skb              1147 drivers/staging/unisys/visornic/visornic_main.c 			if ((skb) && devdata->rcvbuf[i] == skb) {
skb              1173 drivers/staging/unisys/visornic/visornic_main.c 	if (skb) {
skb              1175 drivers/staging/unisys/visornic/visornic_main.c 			kfree_skb(skb);
skb              1195 drivers/staging/unisys/visornic/visornic_main.c 	struct sk_buff *skb, *prev, *curr;
skb              1206 drivers/staging/unisys/visornic/visornic_main.c 	skb = cmdrsp->net.buf;
skb              1207 drivers/staging/unisys/visornic/visornic_main.c 	netdev = skb->dev;
skb              1218 drivers/staging/unisys/visornic/visornic_main.c 	skb->len = cmdrsp->net.rcv.rcv_done_len;
skb              1222 drivers/staging/unisys/visornic/visornic_main.c 	devdata->net_stats.rx_bytes += skb->len;
skb              1230 drivers/staging/unisys/visornic/visornic_main.c 		repost_return(cmdrsp, devdata, skb, netdev);
skb              1246 drivers/staging/unisys/visornic/visornic_main.c 	if (skb->len > RCVPOST_BUF_SIZE) {
skb              1248 drivers/staging/unisys/visornic/visornic_main.c 			if (repost_return(cmdrsp, devdata, skb, netdev) < 0)
skb              1255 drivers/staging/unisys/visornic/visornic_main.c 		skb->tail += RCVPOST_BUF_SIZE;
skb              1257 drivers/staging/unisys/visornic/visornic_main.c 		skb->data_len = skb->len - RCVPOST_BUF_SIZE;
skb              1264 drivers/staging/unisys/visornic/visornic_main.c 			if (repost_return(cmdrsp, devdata, skb, netdev) < 0)
skb              1269 drivers/staging/unisys/visornic/visornic_main.c 		skb->tail += skb->len;
skb              1271 drivers/staging/unisys/visornic/visornic_main.c 		skb->data_len = 0;
skb              1273 drivers/staging/unisys/visornic/visornic_main.c 	off = skb_tail_pointer(skb) - skb->data;
skb              1284 drivers/staging/unisys/visornic/visornic_main.c 	if (cmdrsp->net.rcv.rcvbuf[0] != skb) {
skb              1285 drivers/staging/unisys/visornic/visornic_main.c 		if (repost_return(cmdrsp, devdata, skb, netdev) < 0)
skb              1299 drivers/staging/unisys/visornic/visornic_main.c 				skb_shinfo(skb)->frag_list = curr;
skb              1307 drivers/staging/unisys/visornic/visornic_main.c 			currsize = min(skb->len - off,
skb              1315 drivers/staging/unisys/visornic/visornic_main.c 		if (skb->len != off) {
skb              1318 drivers/staging/unisys/visornic/visornic_main.c 				   skb->len, off);
skb              1325 drivers/staging/unisys/visornic/visornic_main.c 	skb->protocol = eth_type_trans(skb, netdev);
skb              1326 drivers/staging/unisys/visornic/visornic_main.c 	eth = eth_hdr(skb);
skb              1327 drivers/staging/unisys/visornic/visornic_main.c 	skb->csum = 0;
skb              1328 drivers/staging/unisys/visornic/visornic_main.c 	skb->ip_summed = CHECKSUM_NONE;
skb              1334 drivers/staging/unisys/visornic/visornic_main.c 		if (skb->pkt_type == PACKET_BROADCAST) {
skb              1338 drivers/staging/unisys/visornic/visornic_main.c 		} else if (skb->pkt_type == PACKET_MULTICAST) {
skb              1359 drivers/staging/unisys/visornic/visornic_main.c 		} else if (skb->pkt_type == PACKET_HOST) {
skb              1361 drivers/staging/unisys/visornic/visornic_main.c 		} else if (skb->pkt_type == PACKET_OTHERHOST) {
skb              1369 drivers/staging/unisys/visornic/visornic_main.c 		repost_return(cmdrsp, devdata, skb, netdev);
skb              1373 drivers/staging/unisys/visornic/visornic_main.c 	netif_receive_skb(skb);
skb              1378 drivers/staging/unisys/visornic/visornic_main.c 	skb = NULL;
skb              1383 drivers/staging/unisys/visornic/visornic_main.c 	repost_return(cmdrsp, devdata, skb, netdev);
skb               159 drivers/staging/vt6655/desc.h 	struct sk_buff *skb;
skb               221 drivers/staging/vt6655/desc.h 	struct sk_buff *skb;
skb               729 drivers/staging/vt6655/device_main.c 		dev_kfree_skb(td_info->skb);
skb               742 drivers/staging/vt6655/device_main.c 		dev_kfree_skb(td_info->skb);
skb               760 drivers/staging/vt6655/device_main.c 		if (!rd->rd_info->skb)
skb               783 drivers/staging/vt6655/device_main.c 	rd_info->skb = dev_alloc_skb((int)priv->rx_buf_sz);
skb               784 drivers/staging/vt6655/device_main.c 	if (!rd_info->skb)
skb               789 drivers/staging/vt6655/device_main.c 			       skb_put(rd_info->skb, skb_tailroom(rd_info->skb)),
skb               792 drivers/staging/vt6655/device_main.c 		dev_kfree_skb(rd_info->skb);
skb               793 drivers/staging/vt6655/device_main.c 		rd_info->skb = NULL;
skb               814 drivers/staging/vt6655/device_main.c 	dev_kfree_skb(rd_info->skb);
skb               846 drivers/staging/vt6655/device_main.c 	if (!context->skb)
skb               853 drivers/staging/vt6655/device_main.c 	info = IEEE80211_SKB_CB(context->skb);
skb               957 drivers/staging/vt6655/device_main.c 	struct sk_buff *skb = td_info->skb;
skb               959 drivers/staging/vt6655/device_main.c 	if (skb)
skb               960 drivers/staging/vt6655/device_main.c 		ieee80211_tx_status_irqsafe(priv->hw, skb);
skb               962 drivers/staging/vt6655/device_main.c 	td_info->skb = NULL;
skb              1147 drivers/staging/vt6655/device_main.c static int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb)
skb              1149 drivers/staging/vt6655/device_main.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
skb              1171 drivers/staging/vt6655/device_main.c 	head_td->td_info->skb = skb;
skb              1180 drivers/staging/vt6655/device_main.c 	vnt_generate_fifo_header(priv, dma_idx, head_td, skb);
skb              1211 drivers/staging/vt6655/device_main.c 			 struct sk_buff *skb)
skb              1215 drivers/staging/vt6655/device_main.c 	if (vnt_tx_packet(priv, skb))
skb              1216 drivers/staging/vt6655/device_main.c 		ieee80211_free_txskb(hw, skb);
skb                25 drivers/staging/vt6655/dpc.c static bool vnt_rx_data(struct vnt_private *priv, struct sk_buff *skb,
skb                44 drivers/staging/vt6655/dpc.c 	frame_size = le16_to_cpu(*((__le16 *)(skb->data + 2)));
skb                50 drivers/staging/vt6655/dpc.c 	skb_data = (u8 *)skb->data;
skb                87 drivers/staging/vt6655/dpc.c 	skb_pull(skb, 4);
skb                88 drivers/staging/vt6655/dpc.c 	skb_trim(skb, frame_size);
skb                99 drivers/staging/vt6655/dpc.c 	hdr = (struct ieee80211_hdr *)(skb->data);
skb               113 drivers/staging/vt6655/dpc.c 	memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
skb               115 drivers/staging/vt6655/dpc.c 	ieee80211_rx_irqsafe(priv->hw, skb);
skb               123 drivers/staging/vt6655/dpc.c 	struct sk_buff *skb;
skb               126 drivers/staging/vt6655/dpc.c 	skb = rd_info->skb;
skb               137 drivers/staging/vt6655/dpc.c 		dev_kfree_skb_irq(skb);
skb               141 drivers/staging/vt6655/dpc.c 	if (vnt_rx_data(priv, skb, frame_size))
skb               144 drivers/staging/vt6655/dpc.c 	dev_kfree_skb_irq(skb);
skb              1025 drivers/staging/vt6655/rxtx.c 	struct sk_buff *skb = td_info->skb;
skb              1026 drivers/staging/vt6655/rxtx.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb              1027 drivers/staging/vt6655/rxtx.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
skb              1053 drivers/staging/vt6655/rxtx.c 	cbFrameSize = skb->len + 4;
skb              1067 drivers/staging/vt6655/rxtx.c 			uPadding = 4 - (ieee80211_get_hdrlen_from_skb(skb) % 4);
skb              1184 drivers/staging/vt6655/rxtx.c 	cbReqCount = cbHeaderLength + uPadding + skb->len;
skb              1189 drivers/staging/vt6655/rxtx.c 	memcpy((pbyBuffer + uLength), skb->data, skb->len);
skb              1200 drivers/staging/vt6655/rxtx.c 			   struct sk_buff *skb,	u16 payload_len,
skb              1204 drivers/staging/vt6655/rxtx.c 	u8 *iv = ((u8 *)hdr + ieee80211_get_hdrlen_from_skb(skb));
skb              1207 drivers/staging/vt6655/rxtx.c 	payload_len -= ieee80211_get_hdrlen_from_skb(skb);
skb              1224 drivers/staging/vt6655/rxtx.c 		ieee80211_get_tkip_p2k(tx_key, skb, key_buffer);
skb              1270 drivers/staging/vt6655/rxtx.c 			     struct vnt_tx_desc *head_td, struct sk_buff *skb)
skb              1273 drivers/staging/vt6655/rxtx.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb              1280 drivers/staging/vt6655/rxtx.c 	u16 tx_body_size = skb->len, current_rate;
skb              1286 drivers/staging/vt6655/rxtx.c 	hdr = (struct ieee80211_hdr *)(skb->data);
skb              1358 drivers/staging/vt6655/rxtx.c 			cpu_to_le16(ieee80211_get_hdrlen_from_skb(skb) << 10);
skb              1399 drivers/staging/vt6655/rxtx.c 				tx_key, skb, tx_body_size, td_info->mic_hdr);
skb              1406 drivers/staging/vt6655/rxtx.c 			   struct sk_buff *skb)
skb              1413 drivers/staging/vt6655/rxtx.c 	u32 frame_size = skb->len + 4;
skb              1454 drivers/staging/vt6655/rxtx.c 	memcpy(mgmt_hdr, skb->data, skb->len);
skb              1459 drivers/staging/vt6655/rxtx.c 	info = IEEE80211_SKB_CB(skb);
skb              1471 drivers/staging/vt6655/rxtx.c 	priv->wBCNBufLen = sizeof(*short_head) + skb->len;
skb               182 drivers/staging/vt6655/rxtx.h 			     struct vnt_tx_desc *head_td, struct sk_buff *skb);
skb               213 drivers/staging/vt6656/device.h 	struct sk_buff *skb;
skb               220 drivers/staging/vt6656/device.h 	struct sk_buff *skb;
skb                31 drivers/staging/vt6656/dpc.c 	struct sk_buff *skb;
skb                48 drivers/staging/vt6656/dpc.c 	skb = ptr_rcb->skb;
skb                51 drivers/staging/vt6656/dpc.c 	wbk_status = *((u32 *)(skb->data));
skb                66 drivers/staging/vt6656/dpc.c 	skb_data = (u8 *)skb->data;
skb               136 drivers/staging/vt6656/dpc.c 	skb_pull(skb, 8);
skb               137 drivers/staging/vt6656/dpc.c 	skb_trim(skb, frame_size);
skb               148 drivers/staging/vt6656/dpc.c 	hdr = (struct ieee80211_hdr *)(skb->data);
skb               159 drivers/staging/vt6656/dpc.c 				dev_kfree_skb(skb);
skb               165 drivers/staging/vt6656/dpc.c 	memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
skb               167 drivers/staging/vt6656/dpc.c 	ieee80211_rx_irqsafe(priv->hw, skb);
skb                71 drivers/staging/vt6656/int.c 	if (!context->skb)
skb                74 drivers/staging/vt6656/int.c 	info = IEEE80211_SKB_CB(context->skb);
skb               109 drivers/staging/vt6656/int.c 	ieee80211_tx_status_irqsafe(priv->hw, context->skb);
skb               425 drivers/staging/vt6656/main_usb.c 		if (rcb->skb)
skb               426 drivers/staging/vt6656/main_usb.c 			dev_kfree_skb(rcb->skb);
skb               483 drivers/staging/vt6656/main_usb.c 		rcb->skb = dev_alloc_skb(priv->rx_buf_sz);
skb               484 drivers/staging/vt6656/main_usb.c 		if (!rcb->skb) {
skb               522 drivers/staging/vt6656/main_usb.c 			 struct sk_buff *skb)
skb               526 drivers/staging/vt6656/main_usb.c 	if (vnt_tx_packet(priv, skb))
skb               527 drivers/staging/vt6656/main_usb.c 		ieee80211_free_txskb(hw, skb);
skb               270 drivers/staging/vt6656/rxtx.c 				(struct ieee80211_hdr *)tx_context->skb->data;
skb               362 drivers/staging/vt6656/rxtx.c 				(struct ieee80211_hdr *)tx_context->skb->data;
skb               390 drivers/staging/vt6656/rxtx.c 				(struct ieee80211_hdr *)tx_context->skb->data;
skb               712 drivers/staging/vt6656/rxtx.c 			   struct sk_buff *skb, u16 payload_len,
skb               717 drivers/staging/vt6656/rxtx.c 	u8 *iv = ((u8 *)hdr + ieee80211_get_hdrlen_from_skb(skb));
skb               720 drivers/staging/vt6656/rxtx.c 	payload_len -= ieee80211_get_hdrlen_from_skb(skb);
skb               737 drivers/staging/vt6656/rxtx.c 		ieee80211_get_tkip_p2k(tx_key, skb, key_buffer);
skb               782 drivers/staging/vt6656/rxtx.c int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb)
skb               784 drivers/staging/vt6656/rxtx.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb               799 drivers/staging/vt6656/rxtx.c 	hdr = (struct ieee80211_hdr *)(skb->data);
skb               836 drivers/staging/vt6656/rxtx.c 	tx_context->skb = skb;
skb               839 drivers/staging/vt6656/rxtx.c 	tx_context->frame_len = skb->len + 4;
skb               846 drivers/staging/vt6656/rxtx.c 	tx_body_size = skb->len;
skb               892 drivers/staging/vt6656/rxtx.c 			cpu_to_le16(ieee80211_get_hdrlen_from_skb(skb) << 10);
skb               955 drivers/staging/vt6656/rxtx.c 	memcpy(tx_context->hdr, skb->data, tx_body_size);
skb               963 drivers/staging/vt6656/rxtx.c 				       tx_key, skb, tx_body_size, mic_hdr);
skb               990 drivers/staging/vt6656/rxtx.c static int vnt_beacon_xmit(struct vnt_private *priv, struct sk_buff *skb)
skb               998 drivers/staging/vt6656/rxtx.c 	u32 frame_size = skb->len + 4;
skb              1010 drivers/staging/vt6656/rxtx.c 	context->skb = skb;
skb              1046 drivers/staging/vt6656/rxtx.c 	memcpy(mgmt_hdr, skb->data, skb->len);
skb              1051 drivers/staging/vt6656/rxtx.c 	info = IEEE80211_SKB_CB(skb);
skb              1063 drivers/staging/vt6656/rxtx.c 	count = sizeof(struct vnt_tx_short_buf_head) + skb->len;
skb              1075 drivers/staging/vt6656/rxtx.c 		ieee80211_free_txskb(priv->hw, context->skb);
skb               242 drivers/staging/vt6656/rxtx.h int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb);
skb               229 drivers/staging/vt6656/usbpipe.c 			rcb->skb = dev_alloc_skb(priv->rx_buf_sz);
skb               230 drivers/staging/vt6656/usbpipe.c 			if (!rcb->skb) {
skb               235 drivers/staging/vt6656/usbpipe.c 			skb_push(rcb->skb, skb_headroom(rcb->skb));
skb               236 drivers/staging/vt6656/usbpipe.c 			skb_trim(rcb->skb, 0);
skb               239 drivers/staging/vt6656/usbpipe.c 		urb->transfer_buffer = skb_put(rcb->skb,
skb               240 drivers/staging/vt6656/usbpipe.c 					       skb_tailroom(rcb->skb));
skb               255 drivers/staging/vt6656/usbpipe.c 	if (!rcb->skb) {
skb               264 drivers/staging/vt6656/usbpipe.c 			  skb_put(rcb->skb, skb_tailroom(rcb->skb)),
skb               305 drivers/staging/vt6656/usbpipe.c 		if (context->skb)
skb               306 drivers/staging/vt6656/usbpipe.c 			ieee80211_free_txskb(priv->hw, context->skb);
skb                27 drivers/staging/wilc1000/wilc_mon.c 	struct sk_buff *skb = NULL;
skb                48 drivers/staging/wilc1000/wilc_mon.c 		skb = dev_alloc_skb(size + sizeof(*cb_hdr));
skb                49 drivers/staging/wilc1000/wilc_mon.c 		if (!skb)
skb                52 drivers/staging/wilc1000/wilc_mon.c 		skb_put_data(skb, buff, size);
skb                54 drivers/staging/wilc1000/wilc_mon.c 		cb_hdr = skb_push(skb, sizeof(*cb_hdr));
skb                73 drivers/staging/wilc1000/wilc_mon.c 		skb = dev_alloc_skb(size + sizeof(*hdr));
skb                75 drivers/staging/wilc1000/wilc_mon.c 		if (!skb)
skb                78 drivers/staging/wilc1000/wilc_mon.c 		skb_put_data(skb, buff, size);
skb                79 drivers/staging/wilc1000/wilc_mon.c 		hdr = skb_push(skb, sizeof(*hdr));
skb                88 drivers/staging/wilc1000/wilc_mon.c 	skb->dev = mon_dev;
skb                89 drivers/staging/wilc1000/wilc_mon.c 	skb_reset_mac_header(skb);
skb                90 drivers/staging/wilc1000/wilc_mon.c 	skb->ip_summed = CHECKSUM_UNNECESSARY;
skb                91 drivers/staging/wilc1000/wilc_mon.c 	skb->pkt_type = PACKET_OTHERHOST;
skb                92 drivers/staging/wilc1000/wilc_mon.c 	skb->protocol = htons(ETH_P_802_2);
skb                93 drivers/staging/wilc1000/wilc_mon.c 	memset(skb->cb, 0, sizeof(skb->cb));
skb                95 drivers/staging/wilc1000/wilc_mon.c 	netif_rx(skb);
skb               142 drivers/staging/wilc1000/wilc_mon.c static netdev_tx_t wilc_wfi_mon_xmit(struct sk_buff *skb,
skb               156 drivers/staging/wilc1000/wilc_mon.c 	rtap_len = ieee80211_get_radiotap_len(skb->data);
skb               157 drivers/staging/wilc1000/wilc_mon.c 	if (skb->len < rtap_len)
skb               160 drivers/staging/wilc1000/wilc_mon.c 	skb_pull(skb, rtap_len);
skb               162 drivers/staging/wilc1000/wilc_mon.c 	if (skb->data[0] == 0xc0 && is_broadcast_ether_addr(&skb->data[4])) {
skb               163 drivers/staging/wilc1000/wilc_mon.c 		skb2 = dev_alloc_skb(skb->len + sizeof(*cb_hdr));
skb               167 drivers/staging/wilc1000/wilc_mon.c 		skb_put_data(skb2, skb->data, skb->len);
skb               192 drivers/staging/wilc1000/wilc_mon.c 	skb->dev = mon_priv->real_ndev;
skb               194 drivers/staging/wilc1000/wilc_mon.c 	ether_addr_copy(srcadd, &skb->data[10]);
skb               195 drivers/staging/wilc1000/wilc_mon.c 	ether_addr_copy(bssid, &skb->data[16]);
skb               201 drivers/staging/wilc1000/wilc_mon.c 		ret = mon_mgmt_tx(mon_priv->real_ndev, skb->data, skb->len);
skb               204 drivers/staging/wilc1000/wilc_mon.c 		dev_kfree_skb(skb);
skb               206 drivers/staging/wilc1000/wilc_mon.c 		ret = wilc_mac_xmit(skb, mon_priv->real_ndev);
skb               707 drivers/staging/wilc1000/wilc_netdev.c 	dev_kfree_skb(pv_data->skb);
skb               711 drivers/staging/wilc1000/wilc_netdev.c netdev_tx_t wilc_mac_xmit(struct sk_buff *skb, struct net_device *ndev)
skb               718 drivers/staging/wilc1000/wilc_netdev.c 	if (skb->dev != ndev) {
skb               725 drivers/staging/wilc1000/wilc_netdev.c 		dev_kfree_skb(skb);
skb               730 drivers/staging/wilc1000/wilc_netdev.c 	tx_data->buff = skb->data;
skb               731 drivers/staging/wilc1000/wilc_netdev.c 	tx_data->size = skb->len;
skb               732 drivers/staging/wilc1000/wilc_netdev.c 	tx_data->skb  = skb;
skb               789 drivers/staging/wilc1000/wilc_netdev.c 	struct sk_buff *skb;
skb               807 drivers/staging/wilc1000/wilc_netdev.c 		skb = dev_alloc_skb(frame_len);
skb               808 drivers/staging/wilc1000/wilc_netdev.c 		if (!skb)
skb               811 drivers/staging/wilc1000/wilc_netdev.c 		skb->dev = wilc_netdev;
skb               813 drivers/staging/wilc1000/wilc_netdev.c 		skb_put_data(skb, buff_to_send, frame_len);
skb               815 drivers/staging/wilc1000/wilc_netdev.c 		skb->protocol = eth_type_trans(skb, wilc_netdev);
skb               818 drivers/staging/wilc1000/wilc_netdev.c 		skb->ip_summed = CHECKSUM_UNNECESSARY;
skb               819 drivers/staging/wilc1000/wilc_netdev.c 		stats = netif_rx(skb);
skb               142 drivers/staging/wilc1000/wilc_wfi_netdevice.h 	struct sk_buff *skb;
skb               252 drivers/staging/wilc1000/wilc_wlan.h 	struct sk_buff *skb;
skb               293 drivers/staging/wilc1000/wilc_wlan.h netdev_tx_t wilc_mac_xmit(struct sk_buff *skb, struct net_device *dev);
skb              1421 drivers/staging/wlan-ng/hfa384x.h hfa384x_drvr_txframe(struct hfa384x *hw, struct sk_buff *skb,
skb               175 drivers/staging/wlan-ng/hfa384x_usb.c static void hfa384x_usbin_rx(struct wlandevice *wlandev, struct sk_buff *skb);
skb               327 drivers/staging/wlan-ng/hfa384x_usb.c 	struct sk_buff *skb;
skb               330 drivers/staging/wlan-ng/hfa384x_usb.c 	skb = dev_alloc_skb(sizeof(union hfa384x_usbin));
skb               331 drivers/staging/wlan-ng/hfa384x_usb.c 	if (!skb) {
skb               339 drivers/staging/wlan-ng/hfa384x_usb.c 			  skb->data, sizeof(union hfa384x_usbin),
skb               342 drivers/staging/wlan-ng/hfa384x_usb.c 	hw->rx_urb_skb = skb;
skb               361 drivers/staging/wlan-ng/hfa384x_usb.c 		dev_kfree_skb(skb);
skb               598 drivers/staging/wlan-ng/hfa384x_usb.c 	struct sk_buff *skb;
skb               608 drivers/staging/wlan-ng/hfa384x_usb.c 	while ((skb = skb_dequeue(&hw->authq)))
skb               609 drivers/staging/wlan-ng/hfa384x_usb.c 		dev_kfree_skb(skb);
skb              2483 drivers/staging/wlan-ng/hfa384x_usb.c int hfa384x_drvr_txframe(struct hfa384x *hw, struct sk_buff *skb,
skb              2533 drivers/staging/wlan-ng/hfa384x_usb.c 		hw->txbuff.txfrm.desc.data_len = cpu_to_le16(skb->len + 8);
skb              2536 drivers/staging/wlan-ng/hfa384x_usb.c 		hw->txbuff.txfrm.desc.data_len = cpu_to_le16(skb->len);
skb              2539 drivers/staging/wlan-ng/hfa384x_usb.c 	usbpktlen += skb->len;
skb              2546 drivers/staging/wlan-ng/hfa384x_usb.c 		memcpy(ptr, p80211_wep->data, skb->len);
skb              2548 drivers/staging/wlan-ng/hfa384x_usb.c 		memcpy(ptr, skb->data, skb->len);
skb              2551 drivers/staging/wlan-ng/hfa384x_usb.c 	ptr += skb->len;
skb              2918 drivers/staging/wlan-ng/hfa384x_usb.c 	struct sk_buff *skb = NULL;
skb              2936 drivers/staging/wlan-ng/hfa384x_usb.c 	skb = hw->rx_urb_skb;
skb              2937 drivers/staging/wlan-ng/hfa384x_usb.c 	if (!skb || (skb->data != urb->transfer_buffer)) {
skb              3027 drivers/staging/wlan-ng/hfa384x_usb.c 				skb_put(skb, sizeof(*usbin));
skb              3028 drivers/staging/wlan-ng/hfa384x_usb.c 				hfa384x_usbin_rx(wlandev, skb);
skb              3029 drivers/staging/wlan-ng/hfa384x_usb.c 				skb = NULL;
skb              3074 drivers/staging/wlan-ng/hfa384x_usb.c 	if (skb)
skb              3075 drivers/staging/wlan-ng/hfa384x_usb.c 		dev_kfree_skb(skb);
skb              3251 drivers/staging/wlan-ng/hfa384x_usb.c static void hfa384x_usbin_rx(struct wlandevice *wlandev, struct sk_buff *skb)
skb              3253 drivers/staging/wlan-ng/hfa384x_usb.c 	union hfa384x_usbin *usbin = (union hfa384x_usbin *)skb->data;
skb              3281 drivers/staging/wlan-ng/hfa384x_usb.c 		skb_pull(skb, sizeof(struct hfa384x_rx_frame));
skb              3286 drivers/staging/wlan-ng/hfa384x_usb.c 		memmove(skb_push(skb, hdrlen),
skb              3289 drivers/staging/wlan-ng/hfa384x_usb.c 		skb->dev = wlandev->netdev;
skb              3292 drivers/staging/wlan-ng/hfa384x_usb.c 		skb_trim(skb, data_len + hdrlen);
skb              3295 drivers/staging/wlan-ng/hfa384x_usb.c 		memset(skb_put(skb, WLAN_CRC_LEN), 0xff, WLAN_CRC_LEN);
skb              3297 drivers/staging/wlan-ng/hfa384x_usb.c 		skb_reset_mac_header(skb);
skb              3300 drivers/staging/wlan-ng/hfa384x_usb.c 		p80211skb_rxmeta_attach(wlandev, skb);
skb              3301 drivers/staging/wlan-ng/hfa384x_usb.c 		rxmeta = p80211skb_rxmeta(skb);
skb              3307 drivers/staging/wlan-ng/hfa384x_usb.c 		p80211netdev_rx(wlandev, skb);
skb              3315 drivers/staging/wlan-ng/hfa384x_usb.c 			dev_kfree_skb(skb);
skb              3359 drivers/staging/wlan-ng/hfa384x_usb.c 	struct sk_buff *skb;
skb              3381 drivers/staging/wlan-ng/hfa384x_usb.c 	skb = dev_alloc_skb(skblen);
skb              3382 drivers/staging/wlan-ng/hfa384x_usb.c 	if (!skb)
skb              3390 drivers/staging/wlan-ng/hfa384x_usb.c 		datap = skb_put(skb, sizeof(struct p80211_caphdr));
skb              3412 drivers/staging/wlan-ng/hfa384x_usb.c 	skb_put_data(skb, &rxdesc->frame_control, hdrlen);
skb              3416 drivers/staging/wlan-ng/hfa384x_usb.c 		datap = skb_put_data(skb, rxfrm->data, datalen);
skb              3427 drivers/staging/wlan-ng/hfa384x_usb.c 		datap = skb_put(skb, WLAN_CRC_LEN);
skb              3432 drivers/staging/wlan-ng/hfa384x_usb.c 	p80211netdev_rx(wlandev, skb);
skb               109 drivers/staging/wlan-ng/p80211conv.c 			struct sk_buff *skb, union p80211_hdr *p80211_hdr,
skb               119 drivers/staging/wlan-ng/p80211conv.c 	memcpy(&e_hdr, skb->data, sizeof(e_hdr));
skb               121 drivers/staging/wlan-ng/p80211conv.c 	if (skb->len <= 0) {
skb               127 drivers/staging/wlan-ng/p80211conv.c 		pr_debug("ENCAP len: %d\n", skb->len);
skb               135 drivers/staging/wlan-ng/p80211conv.c 			pr_debug("802.3 len: %d\n", skb->len);
skb               140 drivers/staging/wlan-ng/p80211conv.c 			skb_pull(skb, ETH_HLEN);
skb               143 drivers/staging/wlan-ng/p80211conv.c 			skb_trim(skb, proto);
skb               145 drivers/staging/wlan-ng/p80211conv.c 			pr_debug("DIXII len: %d\n", skb->len);
skb               149 drivers/staging/wlan-ng/p80211conv.c 			skb_pull(skb, ETH_HLEN);
skb               152 drivers/staging/wlan-ng/p80211conv.c 			e_snap = skb_push(skb, sizeof(struct wlan_snap));
skb               164 drivers/staging/wlan-ng/p80211conv.c 			e_llc = skb_push(skb, sizeof(struct wlan_llc));
skb               206 drivers/staging/wlan-ng/p80211conv.c 		p80211_wep->data = kmalloc(skb->len, GFP_ATOMIC);
skb               209 drivers/staging/wlan-ng/p80211conv.c 		foo = wep_encrypt(wlandev, skb->data, p80211_wep->data,
skb               210 drivers/staging/wlan-ng/p80211conv.c 				  skb->len,
skb               276 drivers/staging/wlan-ng/p80211conv.c 			struct sk_buff *skb)
skb               291 drivers/staging/wlan-ng/p80211conv.c 	payload_length = skb->len - WLAN_HDR_A3_LEN - WLAN_CRC_LEN;
skb               294 drivers/staging/wlan-ng/p80211conv.c 	w_hdr = (union p80211_hdr *)skb->data;
skb               326 drivers/staging/wlan-ng/p80211conv.c 				   "WEP frame too short (%u).\n", skb->len);
skb               329 drivers/staging/wlan-ng/p80211conv.c 		foo = wep_decrypt(wlandev, skb->data + payload_offset + 4,
skb               331 drivers/staging/wlan-ng/p80211conv.c 				  skb->data + payload_offset,
skb               332 drivers/staging/wlan-ng/p80211conv.c 				  skb->data + payload_offset +
skb               345 drivers/staging/wlan-ng/p80211conv.c 		skb_pull(skb, 4);
skb               347 drivers/staging/wlan-ng/p80211conv.c 		skb_trim(skb, skb->len - 4);
skb               352 drivers/staging/wlan-ng/p80211conv.c 	e_hdr = (struct wlan_ethhdr *)(skb->data + payload_offset);
skb               354 drivers/staging/wlan-ng/p80211conv.c 	e_llc = (struct wlan_llc *)(skb->data + payload_offset);
skb               356 drivers/staging/wlan-ng/p80211conv.c 	    (struct wlan_snap *)(skb->data + payload_offset +
skb               376 drivers/staging/wlan-ng/p80211conv.c 		skb_pull(skb, payload_offset);
skb               378 drivers/staging/wlan-ng/p80211conv.c 		skb_trim(skb, skb->len - WLAN_CRC_LEN);
skb               405 drivers/staging/wlan-ng/p80211conv.c 		skb_pull(skb, payload_offset);
skb               408 drivers/staging/wlan-ng/p80211conv.c 		e_hdr = skb_push(skb, ETH_HLEN);
skb               414 drivers/staging/wlan-ng/p80211conv.c 		skb_trim(skb, skb->len - WLAN_CRC_LEN);
skb               440 drivers/staging/wlan-ng/p80211conv.c 		skb_pull(skb, payload_offset);
skb               443 drivers/staging/wlan-ng/p80211conv.c 		skb_pull(skb, sizeof(struct wlan_llc));
skb               446 drivers/staging/wlan-ng/p80211conv.c 		skb_pull(skb, sizeof(struct wlan_snap));
skb               449 drivers/staging/wlan-ng/p80211conv.c 		e_hdr = skb_push(skb, ETH_HLEN);
skb               455 drivers/staging/wlan-ng/p80211conv.c 		skb_trim(skb, skb->len - WLAN_CRC_LEN);
skb               473 drivers/staging/wlan-ng/p80211conv.c 		skb_pull(skb, payload_offset);
skb               476 drivers/staging/wlan-ng/p80211conv.c 		e_hdr = skb_push(skb, ETH_HLEN);
skb               482 drivers/staging/wlan-ng/p80211conv.c 		skb_trim(skb, skb->len - WLAN_CRC_LEN);
skb               494 drivers/staging/wlan-ng/p80211conv.c 	skb->protocol = eth_type_trans(skb, netdev);
skb               499 drivers/staging/wlan-ng/p80211conv.c 		orinoco_spy_gather(wlandev, eth_hdr(skb)->h_source,
skb               500 drivers/staging/wlan-ng/p80211conv.c 				   p80211skb_rxmeta(skb));
skb               503 drivers/staging/wlan-ng/p80211conv.c 	p80211skb_rxmeta_detach(skb);
skb               556 drivers/staging/wlan-ng/p80211conv.c void p80211skb_rxmeta_detach(struct sk_buff *skb)
skb               562 drivers/staging/wlan-ng/p80211conv.c 	if (!skb) {	/* bad skb */
skb               566 drivers/staging/wlan-ng/p80211conv.c 	frmmeta = p80211skb_frmmeta(skb);
skb               581 drivers/staging/wlan-ng/p80211conv.c 	memset(skb->cb, 0, sizeof(skb->cb));
skb               601 drivers/staging/wlan-ng/p80211conv.c int p80211skb_rxmeta_attach(struct wlandevice *wlandev, struct sk_buff *skb)
skb               608 drivers/staging/wlan-ng/p80211conv.c 	if (p80211skb_rxmeta(skb)) {
skb               628 drivers/staging/wlan-ng/p80211conv.c 	memset(skb->cb, 0, sizeof(struct p80211_frmmeta));
skb               629 drivers/staging/wlan-ng/p80211conv.c 	frmmeta = (struct p80211_frmmeta *)(skb->cb);
skb               653 drivers/staging/wlan-ng/p80211conv.c void p80211skb_free(struct wlandevice *wlandev, struct sk_buff *skb)
skb               657 drivers/staging/wlan-ng/p80211conv.c 	meta = p80211skb_frmmeta(skb);
skb               659 drivers/staging/wlan-ng/p80211conv.c 		p80211skb_rxmeta_detach(skb);
skb               662 drivers/staging/wlan-ng/p80211conv.c 			   "Freeing an skb (%p) w/ no frmmeta.\n", skb);
skb               663 drivers/staging/wlan-ng/p80211conv.c 	dev_kfree_skb(skb);
skb                87 drivers/staging/wlan-ng/p80211conv.h void p80211skb_free(struct wlandevice *wlandev, struct sk_buff *skb);
skb                88 drivers/staging/wlan-ng/p80211conv.h int p80211skb_rxmeta_attach(struct wlandevice *wlandev, struct sk_buff *skb);
skb                89 drivers/staging/wlan-ng/p80211conv.h void p80211skb_rxmeta_detach(struct sk_buff *skb);
skb                91 drivers/staging/wlan-ng/p80211conv.h static inline struct p80211_frmmeta *p80211skb_frmmeta(struct sk_buff *skb)
skb                93 drivers/staging/wlan-ng/p80211conv.h 	struct p80211_frmmeta *frmmeta = (struct p80211_frmmeta *)skb->cb;
skb                98 drivers/staging/wlan-ng/p80211conv.h static inline struct p80211_rxmeta *p80211skb_rxmeta(struct sk_buff *skb)
skb               100 drivers/staging/wlan-ng/p80211conv.h 	struct p80211_frmmeta *frmmeta = p80211skb_frmmeta(skb);
skb               158 drivers/staging/wlan-ng/p80211conv.h 			struct sk_buff *skb);
skb               160 drivers/staging/wlan-ng/p80211conv.h 			struct sk_buff *skb, union p80211_hdr *p80211_hdr,
skb                98 drivers/staging/wlan-ng/p80211netdev.c static netdev_tx_t p80211knetdev_hard_start_xmit(struct sk_buff *skb,
skb               216 drivers/staging/wlan-ng/p80211netdev.c void p80211netdev_rx(struct wlandevice *wlandev, struct sk_buff *skb)
skb               219 drivers/staging/wlan-ng/p80211netdev.c 	skb_queue_tail(&wlandev->nsd_rxq, skb);
skb               236 drivers/staging/wlan-ng/p80211netdev.c 				   struct sk_buff *skb)
skb               240 drivers/staging/wlan-ng/p80211netdev.c 	hdr = (struct p80211_hdr_a3 *)skb->data;
skb               255 drivers/staging/wlan-ng/p80211netdev.c 	if (skb_p80211_to_ether(wlandev, wlandev->ethconv, skb) == 0) {
skb               257 drivers/staging/wlan-ng/p80211netdev.c 		wlandev->netdev->stats.rx_bytes += skb->len;
skb               258 drivers/staging/wlan-ng/p80211netdev.c 		netif_rx_ni(skb);
skb               274 drivers/staging/wlan-ng/p80211netdev.c 	struct sk_buff *skb = NULL;
skb               278 drivers/staging/wlan-ng/p80211netdev.c 	while ((skb = skb_dequeue(&wlandev->nsd_rxq))) {
skb               285 drivers/staging/wlan-ng/p80211netdev.c 				skb->dev = dev;
skb               286 drivers/staging/wlan-ng/p80211netdev.c 				skb_reset_mac_header(skb);
skb               287 drivers/staging/wlan-ng/p80211netdev.c 				skb->ip_summed = CHECKSUM_NONE;
skb               288 drivers/staging/wlan-ng/p80211netdev.c 				skb->pkt_type = PACKET_OTHERHOST;
skb               289 drivers/staging/wlan-ng/p80211netdev.c 				skb->protocol = htons(ETH_P_80211_RAW);
skb               292 drivers/staging/wlan-ng/p80211netdev.c 				dev->stats.rx_bytes += skb->len;
skb               293 drivers/staging/wlan-ng/p80211netdev.c 				netif_rx_ni(skb);
skb               296 drivers/staging/wlan-ng/p80211netdev.c 				if (!p80211_convert_to_ether(wlandev, skb))
skb               300 drivers/staging/wlan-ng/p80211netdev.c 		dev_kfree_skb(skb);
skb               324 drivers/staging/wlan-ng/p80211netdev.c static netdev_tx_t p80211knetdev_hard_start_xmit(struct sk_buff *skb,
skb               335 drivers/staging/wlan-ng/p80211netdev.c 	if (!skb)
skb               365 drivers/staging/wlan-ng/p80211netdev.c 		if (be16_to_cpu(skb->protocol) != ETH_P_80211_RAW) {
skb               376 drivers/staging/wlan-ng/p80211netdev.c 	if (be16_to_cpu(skb->protocol) == ETH_P_80211_RAW) {
skb               382 drivers/staging/wlan-ng/p80211netdev.c 		memcpy(&p80211_hdr, skb->data, sizeof(p80211_hdr));
skb               383 drivers/staging/wlan-ng/p80211netdev.c 		skb_pull(skb, sizeof(p80211_hdr));
skb               386 drivers/staging/wlan-ng/p80211netdev.c 		    (wlandev, wlandev->ethconv, skb, &p80211_hdr,
skb               404 drivers/staging/wlan-ng/p80211netdev.c 	netdev->stats.tx_bytes += skb->len;
skb               406 drivers/staging/wlan-ng/p80211netdev.c 	txresult = wlandev->txframe(wlandev, skb, &p80211_hdr, &p80211_wep);
skb               431 drivers/staging/wlan-ng/p80211netdev.c 	if ((p80211_wep.data) && (p80211_wep.data != skb->data))
skb               436 drivers/staging/wlan-ng/p80211netdev.c 		dev_kfree_skb(skb);
skb               849 drivers/staging/wlan-ng/p80211netdev.c 	struct sk_buff *skb;
skb               854 drivers/staging/wlan-ng/p80211netdev.c 	while ((skb = skb_dequeue(&wlandev->nsd_rxq)))
skb               855 drivers/staging/wlan-ng/p80211netdev.c 		dev_kfree_skb(skb);
skb               182 drivers/staging/wlan-ng/p80211netdev.h 	int (*txframe)(struct wlandevice *wlandev, struct sk_buff *skb,
skb               236 drivers/staging/wlan-ng/p80211netdev.h void p80211netdev_rx(struct wlandevice *wlandev, struct sk_buff *skb);
skb               105 drivers/staging/wlan-ng/prism2sta.c static int prism2sta_txframe(struct wlandevice *wlandev, struct sk_buff *skb,
skb               244 drivers/staging/wlan-ng/prism2sta.c static int prism2sta_txframe(struct wlandevice *wlandev, struct sk_buff *skb,
skb               256 drivers/staging/wlan-ng/prism2sta.c 	return hfa384x_drvr_txframe(hw, skb, p80211_hdr, p80211_wep);
skb              1179 drivers/staging/wlan-ng/prism2sta.c 		struct sk_buff *skb;
skb              1182 drivers/staging/wlan-ng/prism2sta.c 		while ((skb = skb_dequeue(&hw->authq))) {
skb              1183 drivers/staging/wlan-ng/prism2sta.c 			inf = (struct hfa384x_inf_frame *)skb->data;
skb              1531 drivers/staging/wlan-ng/prism2sta.c 	struct sk_buff *skb;
skb              1533 drivers/staging/wlan-ng/prism2sta.c 	skb = dev_alloc_skb(sizeof(*inf));
skb              1534 drivers/staging/wlan-ng/prism2sta.c 	if (skb) {
skb              1535 drivers/staging/wlan-ng/prism2sta.c 		skb_put(skb, sizeof(*inf));
skb              1536 drivers/staging/wlan-ng/prism2sta.c 		memcpy(skb->data, inf, sizeof(*inf));
skb              1537 drivers/staging/wlan-ng/prism2sta.c 		skb_queue_tail(&hw->authq, skb);
skb                97 drivers/target/iscsi/cxgbit/cxgbit.h #define CXGBIT_SKB_CB(skb)	((union cxgbit_skb_cb *)&((skb)->cb[0]))
skb                98 drivers/target/iscsi/cxgbit/cxgbit.h #define cxgbit_skcb_flags(skb)		(CXGBIT_SKB_CB(skb)->flags)
skb                99 drivers/target/iscsi/cxgbit/cxgbit.h #define cxgbit_skcb_submode(skb)	(CXGBIT_SKB_CB(skb)->tx.submode)
skb               100 drivers/target/iscsi/cxgbit/cxgbit.h #define cxgbit_skcb_tx_wr_next(skb)	(CXGBIT_SKB_CB(skb)->wr_next)
skb               101 drivers/target/iscsi/cxgbit/cxgbit.h #define cxgbit_skcb_tx_extralen(skb)	(CXGBIT_SKB_CB(skb)->tx.extra_len)
skb               102 drivers/target/iscsi/cxgbit/cxgbit.h #define cxgbit_skcb_rx_opcode(skb)	(CXGBIT_SKB_CB(skb)->rx.opcode)
skb               103 drivers/target/iscsi/cxgbit/cxgbit.h #define cxgbit_skcb_rx_backlog_fn(skb)	(CXGBIT_SKB_CB(skb)->rx.backlog_fn)
skb               104 drivers/target/iscsi/cxgbit/cxgbit.h #define cxgbit_rx_pdu_cb(skb)		(CXGBIT_SKB_CB(skb)->rx.pdu_cb)
skb               106 drivers/target/iscsi/cxgbit/cxgbit.h static inline void *cplhdr(struct sk_buff *skb)
skb               108 drivers/target/iscsi/cxgbit/cxgbit.h 	return skb->data;
skb               203 drivers/target/iscsi/cxgbit/cxgbit.h 	struct sk_buff *skb;
skb               291 drivers/target/iscsi/cxgbit/cxgbit.h cxgbit_sock_enqueue_wr(struct cxgbit_sock *csk, struct sk_buff *skb)
skb               293 drivers/target/iscsi/cxgbit/cxgbit.h 	cxgbit_skcb_tx_wr_next(skb) = NULL;
skb               295 drivers/target/iscsi/cxgbit/cxgbit.h 	skb_get(skb);
skb               298 drivers/target/iscsi/cxgbit/cxgbit.h 		csk->wr_pending_head = skb;
skb               300 drivers/target/iscsi/cxgbit/cxgbit.h 		cxgbit_skcb_tx_wr_next(csk->wr_pending_tail) = skb;
skb               301 drivers/target/iscsi/cxgbit/cxgbit.h 	csk->wr_pending_tail = skb;
skb               306 drivers/target/iscsi/cxgbit/cxgbit.h 	struct sk_buff *skb = csk->wr_pending_head;
skb               308 drivers/target/iscsi/cxgbit/cxgbit.h 	if (likely(skb)) {
skb               309 drivers/target/iscsi/cxgbit/cxgbit.h 		csk->wr_pending_head = cxgbit_skcb_tx_wr_next(skb);
skb               310 drivers/target/iscsi/cxgbit/cxgbit.h 		cxgbit_skcb_tx_wr_next(skb) = NULL;
skb               312 drivers/target/iscsi/cxgbit/cxgbit.h 	return skb;
skb               624 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	struct sk_buff *skb;
skb               627 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	skb = alloc_skb(len, GFP_ATOMIC);
skb               628 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	if (!skb)
skb               631 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	cxgb_mk_close_con_req(skb, len, csk->tid, csk->txq_idx,
skb               634 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	cxgbit_skcb_flags(skb) |= SKCBF_TX_FLAG_COMPL;
skb               635 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	__skb_queue_tail(&csk->txq, skb);
skb               639 drivers/target/iscsi/cxgbit/cxgbit_cm.c static void cxgbit_arp_failure_discard(void *handle, struct sk_buff *skb)
skb               644 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	kfree_skb(skb);
skb               648 drivers/target/iscsi/cxgbit/cxgbit_cm.c static void cxgbit_abort_arp_failure(void *handle, struct sk_buff *skb)
skb               651 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	struct cpl_abort_req *req = cplhdr(skb);
skb               655 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	cxgbit_ofld_send(cdev, skb);
skb               660 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	struct sk_buff *skb;
skb               671 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	skb = __skb_dequeue(&csk->skbq);
skb               672 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	cxgb_mk_abort_req(skb, len, csk->tid, csk->txq_idx,
skb               675 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	return cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t);
skb               679 drivers/target/iscsi/cxgbit/cxgbit_cm.c __cxgbit_abort_conn(struct cxgbit_sock *csk, struct sk_buff *skb)
skb               681 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	__kfree_skb(skb);
skb               700 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	struct sk_buff *skb = alloc_skb(0, GFP_KERNEL | __GFP_NOFAIL);
skb               707 drivers/target/iscsi/cxgbit/cxgbit_cm.c 		cxgbit_skcb_rx_backlog_fn(skb) = __cxgbit_abort_conn;
skb               708 drivers/target/iscsi/cxgbit/cxgbit_cm.c 		__skb_queue_tail(&csk->backlogq, skb);
skb               710 drivers/target/iscsi/cxgbit/cxgbit_cm.c 		__cxgbit_abort_conn(csk, skb);
skb               779 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	struct sk_buff *skb;
skb               787 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	while ((skb = cxgbit_sock_dequeue_wr(csk)))
skb               788 drivers/target/iscsi/cxgbit/cxgbit_cm.c 		kfree_skb(skb);
skb               996 drivers/target/iscsi/cxgbit/cxgbit_cm.c int cxgbit_ofld_send(struct cxgbit_device *cdev, struct sk_buff *skb)
skb              1001 drivers/target/iscsi/cxgbit/cxgbit_cm.c 		kfree_skb(skb);
skb              1006 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	ret = cxgb4_ofld_send(cdev->lldi.ports[0], skb);
skb              1008 drivers/target/iscsi/cxgbit/cxgbit_cm.c 		kfree_skb(skb);
skb              1015 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	struct sk_buff *skb;
skb              1017 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	skb = alloc_skb(len, GFP_ATOMIC);
skb              1018 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	if (!skb)
skb              1021 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	cxgb_mk_tid_release(skb, len, tid, 0);
skb              1022 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	cxgbit_ofld_send(cdev, skb);
skb              1026 drivers/target/iscsi/cxgbit/cxgbit_cm.c cxgbit_l2t_send(struct cxgbit_device *cdev, struct sk_buff *skb,
skb              1032 drivers/target/iscsi/cxgbit/cxgbit_cm.c 		kfree_skb(skb);
skb              1037 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	ret = cxgb4_l2t_send(cdev->lldi.ports[0], skb, l2e);
skb              1039 drivers/target/iscsi/cxgbit/cxgbit_cm.c 		kfree_skb(skb);
skb              1043 drivers/target/iscsi/cxgbit/cxgbit_cm.c static void cxgbit_send_rx_credits(struct cxgbit_sock *csk, struct sk_buff *skb)
skb              1046 drivers/target/iscsi/cxgbit/cxgbit_cm.c 		__kfree_skb(skb);
skb              1050 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	cxgbit_ofld_send(csk->com.cdev, skb);
skb              1060 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	struct sk_buff *skb;
skb              1064 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	skb = alloc_skb(len, GFP_KERNEL);
skb              1065 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	if (!skb)
skb              1071 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	cxgb_mk_rx_data_ack(skb, len, csk->tid, csk->ctrlq_idx,
skb              1078 drivers/target/iscsi/cxgbit/cxgbit_cm.c 		cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_send_rx_credits;
skb              1079 drivers/target/iscsi/cxgbit/cxgbit_cm.c 		__skb_queue_tail(&csk->backlogq, skb);
skb              1084 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	cxgbit_send_rx_credits(csk, skb);
skb              1094 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	struct sk_buff *skb;
skb              1108 drivers/target/iscsi/cxgbit/cxgbit_cm.c 		skb = alloc_skb(len, GFP_ATOMIC);
skb              1109 drivers/target/iscsi/cxgbit/cxgbit_cm.c 		if (!skb)
skb              1111 drivers/target/iscsi/cxgbit/cxgbit_cm.c 		__skb_queue_tail(&csk->skbq, skb);
skb              1114 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	skb = alloc_skb(LRO_SKB_MIN_HEADROOM, GFP_ATOMIC);
skb              1115 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	if (!skb)
skb              1118 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	memset(skb->data, 0, LRO_SKB_MIN_HEADROOM);
skb              1119 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	csk->lro_hskb = skb;
skb              1130 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	struct sk_buff *skb;
skb              1143 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	skb = alloc_skb(len, GFP_ATOMIC);
skb              1144 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	if (!skb) {
skb              1149 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	rpl5 = __skb_put_zero(skb, len);
skb              1210 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->ctrlq_idx);
skb              1211 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	t4_set_arp_err_handler(skb, csk, cxgbit_arp_failure_discard);
skb              1212 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t);
skb              1216 drivers/target/iscsi/cxgbit/cxgbit_cm.c cxgbit_pass_accept_req(struct cxgbit_device *cdev, struct sk_buff *skb)
skb              1220 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	struct cpl_pass_accept_req *req = cplhdr(skb);
skb              1385 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	__kfree_skb(skb);
skb              1421 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	struct sk_buff *skb;
skb              1430 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	skb = __skb_dequeue(&csk->skbq);
skb              1431 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	flowc = __skb_put_zero(skb, flowclen);
skb              1483 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx);
skb              1484 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	cxgbit_ofld_send(csk->com.cdev, skb);
skb              1490 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	struct sk_buff *skb;
skb              1497 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	skb = alloc_skb(len, GFP_KERNEL);
skb              1498 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	if (!skb)
skb              1502 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	req = __skb_put_zero(skb, len);
skb              1511 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->ctrlq_idx);
skb              1516 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	cxgbit_ofld_send(csk->com.cdev, skb);
skb              1529 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	struct sk_buff *skb;
skb              1534 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	skb = alloc_skb(len, GFP_KERNEL);
skb              1535 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	if (!skb)
skb              1538 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	req = __skb_put_zero(skb, len);
skb              1546 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->ctrlq_idx);
skb              1551 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	cxgbit_ofld_send(csk->com.cdev, skb);
skb              1563 drivers/target/iscsi/cxgbit/cxgbit_cm.c cxgbit_pass_open_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
skb              1565 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	struct cpl_pass_open_rpl *rpl = cplhdr(skb);
skb              1581 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	__kfree_skb(skb);
skb              1585 drivers/target/iscsi/cxgbit/cxgbit_cm.c cxgbit_close_listsrv_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
skb              1587 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	struct cpl_close_listsvr_rpl *rpl = cplhdr(skb);
skb              1603 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	__kfree_skb(skb);
skb              1607 drivers/target/iscsi/cxgbit/cxgbit_cm.c cxgbit_pass_establish(struct cxgbit_device *cdev, struct sk_buff *skb)
skb              1609 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	struct cpl_pass_establish *req = cplhdr(skb);
skb              1646 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	__kfree_skb(skb);
skb              1649 drivers/target/iscsi/cxgbit/cxgbit_cm.c static void cxgbit_queue_rx_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
skb              1651 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	cxgbit_skcb_flags(skb) = 0;
skb              1653 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	__skb_queue_tail(&csk->rxq, skb);
skb              1658 drivers/target/iscsi/cxgbit/cxgbit_cm.c static void cxgbit_peer_close(struct cxgbit_sock *csk, struct sk_buff *skb)
skb              1666 drivers/target/iscsi/cxgbit/cxgbit_cm.c 		cxgbit_queue_rx_skb(csk, skb);
skb              1683 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	__kfree_skb(skb);
skb              1686 drivers/target/iscsi/cxgbit/cxgbit_cm.c static void cxgbit_close_con_rpl(struct cxgbit_sock *csk, struct sk_buff *skb)
skb              1707 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	__kfree_skb(skb);
skb              1710 drivers/target/iscsi/cxgbit/cxgbit_cm.c static void cxgbit_abort_req_rss(struct cxgbit_sock *csk, struct sk_buff *skb)
skb              1712 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	struct cpl_abort_req_rss *hdr = cplhdr(skb);
skb              1762 drivers/target/iscsi/cxgbit/cxgbit_cm.c 		cxgbit_queue_rx_skb(csk, skb);
skb              1769 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	__kfree_skb(skb);
skb              1772 drivers/target/iscsi/cxgbit/cxgbit_cm.c static void cxgbit_abort_rpl_rss(struct cxgbit_sock *csk, struct sk_buff *skb)
skb              1774 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	struct cpl_abort_rpl_rss *rpl = cplhdr(skb);
skb              1792 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	__kfree_skb(skb);
skb              1797 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	const struct sk_buff *skb = csk->wr_pending_head;
skb              1806 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	while (skb) {
skb              1807 drivers/target/iscsi/cxgbit/cxgbit_cm.c 		credit += (__force u32)skb->csum;
skb              1808 drivers/target/iscsi/cxgbit/cxgbit_cm.c 		skb = cxgbit_skcb_tx_wr_next(skb);
skb              1822 drivers/target/iscsi/cxgbit/cxgbit_cm.c static void cxgbit_fw4_ack(struct cxgbit_sock *csk, struct sk_buff *skb)
skb              1824 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	struct cpl_fw4_ack *rpl = (struct cpl_fw4_ack *)cplhdr(skb);
skb              1859 drivers/target/iscsi/cxgbit/cxgbit_cm.c 		cxgbit_queue_rx_skb(csk, skb);
skb              1882 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	__kfree_skb(skb);
skb              1885 drivers/target/iscsi/cxgbit/cxgbit_cm.c static void cxgbit_set_tcb_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
skb              1888 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	struct cpl_set_tcb_rpl *rpl = (struct cpl_set_tcb_rpl *)skb->data;
skb              1903 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	__kfree_skb(skb);
skb              1906 drivers/target/iscsi/cxgbit/cxgbit_cm.c static void cxgbit_rx_data(struct cxgbit_device *cdev, struct sk_buff *skb)
skb              1909 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	struct cpl_rx_data *cpl = cplhdr(skb);
skb              1920 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	cxgbit_queue_rx_skb(csk, skb);
skb              1923 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	__kfree_skb(skb);
skb              1927 drivers/target/iscsi/cxgbit/cxgbit_cm.c __cxgbit_process_rx_cpl(struct cxgbit_sock *csk, struct sk_buff *skb)
skb              1931 drivers/target/iscsi/cxgbit/cxgbit_cm.c 		__skb_queue_tail(&csk->backlogq, skb);
skb              1936 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	cxgbit_skcb_rx_backlog_fn(skb)(csk, skb);
skb              1940 drivers/target/iscsi/cxgbit/cxgbit_cm.c static void cxgbit_process_rx_cpl(struct cxgbit_sock *csk, struct sk_buff *skb)
skb              1943 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	__cxgbit_process_rx_cpl(csk, skb);
skb              1947 drivers/target/iscsi/cxgbit/cxgbit_cm.c static void cxgbit_rx_cpl(struct cxgbit_device *cdev, struct sk_buff *skb)
skb              1950 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	struct cpl_tx_data *cpl = cplhdr(skb);
skb              1954 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	u8 opcode = cxgbit_skcb_rx_opcode(skb);
skb              1959 drivers/target/iscsi/cxgbit/cxgbit_cm.c 			cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_fw4_ack;
skb              1963 drivers/target/iscsi/cxgbit/cxgbit_cm.c 			cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_peer_close;
skb              1966 drivers/target/iscsi/cxgbit/cxgbit_cm.c 			cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_close_con_rpl;
skb              1969 drivers/target/iscsi/cxgbit/cxgbit_cm.c 			cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_abort_req_rss;
skb              1972 drivers/target/iscsi/cxgbit/cxgbit_cm.c 			cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_abort_rpl_rss;
skb              1985 drivers/target/iscsi/cxgbit/cxgbit_cm.c 		cxgbit_process_rx_cpl(csk, skb);
skb              1987 drivers/target/iscsi/cxgbit/cxgbit_cm.c 		__cxgbit_process_rx_cpl(csk, skb);
skb              1991 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	__kfree_skb(skb);
skb                73 drivers/target/iscsi/cxgbit/cxgbit_ddp.c 	struct sk_buff *skb;
skb                75 drivers/target/iscsi/cxgbit/cxgbit_ddp.c 	skb  = alloc_skb(wr_len, GFP_KERNEL);
skb                76 drivers/target/iscsi/cxgbit/cxgbit_ddp.c 	if (!skb)
skb                79 drivers/target/iscsi/cxgbit/cxgbit_ddp.c 	req = __skb_put(skb, wr_len);
skb                94 drivers/target/iscsi/cxgbit/cxgbit_ddp.c 	return skb;
skb               104 drivers/target/iscsi/cxgbit/cxgbit_ddp.c 	struct sk_buff *skb;
skb               110 drivers/target/iscsi/cxgbit/cxgbit_ddp.c 	skb = cxgbit_ppod_init_idata(cdev, ppm, idx, npods, csk->tid);
skb               111 drivers/target/iscsi/cxgbit/cxgbit_ddp.c 	if (!skb)
skb               114 drivers/target/iscsi/cxgbit/cxgbit_ddp.c 	req = (struct ulp_mem_io *)skb->data;
skb               121 drivers/target/iscsi/cxgbit/cxgbit_ddp.c 	__skb_queue_tail(&csk->ppodq, skb);
skb                63 drivers/target/iscsi/cxgbit/cxgbit_lro.h #define cxgbit_skb_lro_cb(skb)	((struct cxgbit_lro_cb *)skb->data)
skb                64 drivers/target/iscsi/cxgbit/cxgbit_lro.h #define cxgbit_skb_lro_pdu_cb(skb, i)	\
skb                65 drivers/target/iscsi/cxgbit/cxgbit_lro.h 	((struct cxgbit_lro_pdu_cb *)(skb->data + sizeof(struct cxgbit_lro_cb) \
skb                92 drivers/target/iscsi/cxgbit/cxgbit_main.c 	struct sk_buff *skb;
skb                97 drivers/target/iscsi/cxgbit/cxgbit_main.c 		skb = alloc_skb(0, GFP_ATOMIC);
skb                98 drivers/target/iscsi/cxgbit/cxgbit_main.c 		if (!skb)
skb               102 drivers/target/iscsi/cxgbit/cxgbit_main.c 		__skb_queue_tail(&csk->rxq, skb);
skb               190 drivers/target/iscsi/cxgbit/cxgbit_main.c cxgbit_lro_add_packet_rsp(struct sk_buff *skb, u8 op, const __be64 *rsp)
skb               192 drivers/target/iscsi/cxgbit/cxgbit_main.c 	struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb);
skb               193 drivers/target/iscsi/cxgbit/cxgbit_main.c 	struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb,
skb               212 drivers/target/iscsi/cxgbit/cxgbit_main.c cxgbit_copy_frags(struct sk_buff *skb, const struct pkt_gl *gl,
skb               215 drivers/target/iscsi/cxgbit/cxgbit_main.c 	u8 skb_frag_idx = skb_shinfo(skb)->nr_frags;
skb               219 drivers/target/iscsi/cxgbit/cxgbit_main.c 	__skb_fill_page_desc(skb, skb_frag_idx, gl->frags[0].page,
skb               223 drivers/target/iscsi/cxgbit/cxgbit_main.c 		__skb_fill_page_desc(skb, skb_frag_idx + i,
skb               228 drivers/target/iscsi/cxgbit/cxgbit_main.c 	skb_shinfo(skb)->nr_frags += gl->nfrags;
skb               235 drivers/target/iscsi/cxgbit/cxgbit_main.c cxgbit_lro_add_packet_gl(struct sk_buff *skb, u8 op, const struct pkt_gl *gl)
skb               237 drivers/target/iscsi/cxgbit/cxgbit_main.c 	struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb);
skb               238 drivers/target/iscsi/cxgbit/cxgbit_main.c 	struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb,
skb               251 drivers/target/iscsi/cxgbit/cxgbit_main.c 		pdu_cb->hfrag_idx = skb_shinfo(skb)->nr_frags;
skb               254 drivers/target/iscsi/cxgbit/cxgbit_main.c 			cxgbit_skcb_flags(skb) = 0;
skb               266 drivers/target/iscsi/cxgbit/cxgbit_main.c 		pdu_cb->dfrag_idx = skb_shinfo(skb)->nr_frags;
skb               277 drivers/target/iscsi/cxgbit/cxgbit_main.c 		pdu_cb->hfrag_idx = skb_shinfo(skb)->nr_frags;
skb               282 drivers/target/iscsi/cxgbit/cxgbit_main.c 			cxgbit_skcb_flags(skb) = 0;
skb               299 drivers/target/iscsi/cxgbit/cxgbit_main.c 	cxgbit_copy_frags(skb, gl, offset);
skb               303 drivers/target/iscsi/cxgbit/cxgbit_main.c 	skb->len += len;
skb               304 drivers/target/iscsi/cxgbit/cxgbit_main.c 	skb->data_len += len;
skb               305 drivers/target/iscsi/cxgbit/cxgbit_main.c 	skb->truesize += len;
skb               312 drivers/target/iscsi/cxgbit/cxgbit_main.c 	struct sk_buff *skb;
skb               315 drivers/target/iscsi/cxgbit/cxgbit_main.c 	skb = napi_alloc_skb(napi, LRO_SKB_MAX_HEADROOM);
skb               317 drivers/target/iscsi/cxgbit/cxgbit_main.c 	if (unlikely(!skb))
skb               320 drivers/target/iscsi/cxgbit/cxgbit_main.c 	memset(skb->data, 0, LRO_SKB_MAX_HEADROOM);
skb               322 drivers/target/iscsi/cxgbit/cxgbit_main.c 	cxgbit_skcb_flags(skb) |= SKCBF_RX_LRO;
skb               324 drivers/target/iscsi/cxgbit/cxgbit_main.c 	lro_cb = cxgbit_skb_lro_cb(skb);
skb               330 drivers/target/iscsi/cxgbit/cxgbit_main.c 	return skb;
skb               333 drivers/target/iscsi/cxgbit/cxgbit_main.c static void cxgbit_queue_lro_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
skb               338 drivers/target/iscsi/cxgbit/cxgbit_main.c 	__skb_queue_tail(&csk->rxq, skb);
skb               347 drivers/target/iscsi/cxgbit/cxgbit_main.c static void cxgbit_lro_flush(struct t4_lro_mgr *lro_mgr, struct sk_buff *skb)
skb               349 drivers/target/iscsi/cxgbit/cxgbit_main.c 	struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb);
skb               354 drivers/target/iscsi/cxgbit/cxgbit_main.c 	__skb_unlink(skb, &lro_mgr->lroq);
skb               355 drivers/target/iscsi/cxgbit/cxgbit_main.c 	cxgbit_queue_lro_skb(csk, skb);
skb               365 drivers/target/iscsi/cxgbit/cxgbit_main.c 	struct sk_buff *skb;
skb               367 drivers/target/iscsi/cxgbit/cxgbit_main.c 	while ((skb = skb_peek(&lro_mgr->lroq)))
skb               368 drivers/target/iscsi/cxgbit/cxgbit_main.c 		cxgbit_lro_flush(lro_mgr, skb);
skb               376 drivers/target/iscsi/cxgbit/cxgbit_main.c 	struct sk_buff *skb;
skb               393 drivers/target/iscsi/cxgbit/cxgbit_main.c 	skb = cxgbit_lro_init_skb(csk, op, gl, rsp, napi);
skb               394 drivers/target/iscsi/cxgbit/cxgbit_main.c 	if (unlikely(!skb))
skb               397 drivers/target/iscsi/cxgbit/cxgbit_main.c 	csk->lro_skb = skb;
skb               399 drivers/target/iscsi/cxgbit/cxgbit_main.c 	__skb_queue_tail(&lro_mgr->lroq, skb);
skb               403 drivers/target/iscsi/cxgbit/cxgbit_main.c 	skb = csk->lro_skb;
skb               404 drivers/target/iscsi/cxgbit/cxgbit_main.c 	lro_cb = cxgbit_skb_lro_cb(skb);
skb               406 drivers/target/iscsi/cxgbit/cxgbit_main.c 	if ((gl && (((skb_shinfo(skb)->nr_frags + gl->nfrags) >
skb               409 drivers/target/iscsi/cxgbit/cxgbit_main.c 		cxgbit_lro_flush(lro_mgr, skb);
skb               414 drivers/target/iscsi/cxgbit/cxgbit_main.c 		cxgbit_lro_add_packet_gl(skb, op, gl);
skb               416 drivers/target/iscsi/cxgbit/cxgbit_main.c 		cxgbit_lro_add_packet_rsp(skb, op, rsp);
skb               436 drivers/target/iscsi/cxgbit/cxgbit_main.c 	struct sk_buff *skb;
skb               477 drivers/target/iscsi/cxgbit/cxgbit_main.c 		skb = napi_alloc_skb(napi, len);
skb               478 drivers/target/iscsi/cxgbit/cxgbit_main.c 		if (!skb)
skb               480 drivers/target/iscsi/cxgbit/cxgbit_main.c 		__skb_put(skb, len);
skb               481 drivers/target/iscsi/cxgbit/cxgbit_main.c 		skb_copy_to_linear_data(skb, &rsp[1], len);
skb               499 drivers/target/iscsi/cxgbit/cxgbit_main.c 		skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN);
skb               500 drivers/target/iscsi/cxgbit/cxgbit_main.c 		if (unlikely(!skb))
skb               504 drivers/target/iscsi/cxgbit/cxgbit_main.c 	rpl = (struct cpl_tx_data *)skb->data;
skb               506 drivers/target/iscsi/cxgbit/cxgbit_main.c 	cxgbit_skcb_rx_opcode(skb) = op;
skb               510 drivers/target/iscsi/cxgbit/cxgbit_main.c 		 ntohl(rpl->ot.opcode_tid), skb);
skb               513 drivers/target/iscsi/cxgbit/cxgbit_main.c 		cxgbit_cplhandlers[op](cdev, skb);
skb               516 drivers/target/iscsi/cxgbit/cxgbit_main.c 		__kfree_skb(skb);
skb               535 drivers/target/iscsi/cxgbit/cxgbit_main.c 	struct sk_buff *skb;
skb               562 drivers/target/iscsi/cxgbit/cxgbit_main.c 		skb = alloc_skb(0, GFP_ATOMIC);
skb               563 drivers/target/iscsi/cxgbit/cxgbit_main.c 		if (!skb)
skb               567 drivers/target/iscsi/cxgbit/cxgbit_main.c 		__skb_queue_tail(&csk->rxq, skb);
skb                29 drivers/target/iscsi/cxgbit/cxgbit_target.c 	struct sk_buff *skb = NULL;
skb                35 drivers/target/iscsi/cxgbit/cxgbit_target.c 		skb = alloc_skb_with_frags(hdr_len, len,
skb                38 drivers/target/iscsi/cxgbit/cxgbit_target.c 		if (!skb)
skb                41 drivers/target/iscsi/cxgbit/cxgbit_target.c 		skb_reserve(skb, TX_HDR_LEN);
skb                42 drivers/target/iscsi/cxgbit/cxgbit_target.c 		skb_reset_transport_header(skb);
skb                43 drivers/target/iscsi/cxgbit/cxgbit_target.c 		__skb_put(skb, ISCSI_HDR_LEN);
skb                44 drivers/target/iscsi/cxgbit/cxgbit_target.c 		skb->data_len = len;
skb                45 drivers/target/iscsi/cxgbit/cxgbit_target.c 		skb->len += len;
skb                51 drivers/target/iscsi/cxgbit/cxgbit_target.c 		skb = alloc_skb(hdr_len + iso_len, GFP_KERNEL);
skb                52 drivers/target/iscsi/cxgbit/cxgbit_target.c 		if (!skb)
skb                55 drivers/target/iscsi/cxgbit/cxgbit_target.c 		skb_reserve(skb, TX_HDR_LEN + iso_len);
skb                56 drivers/target/iscsi/cxgbit/cxgbit_target.c 		skb_reset_transport_header(skb);
skb                57 drivers/target/iscsi/cxgbit/cxgbit_target.c 		__skb_put(skb, ISCSI_HDR_LEN);
skb                61 drivers/target/iscsi/cxgbit/cxgbit_target.c 	cxgbit_skcb_submode(skb) = submode;
skb                62 drivers/target/iscsi/cxgbit/cxgbit_target.c 	cxgbit_skcb_tx_extralen(skb) = cxgbit_digest_len[submode];
skb                63 drivers/target/iscsi/cxgbit/cxgbit_target.c 	cxgbit_skcb_flags(skb) |= SKCBF_TX_NEED_HDR;
skb                64 drivers/target/iscsi/cxgbit/cxgbit_target.c 	return skb;
skb                79 drivers/target/iscsi/cxgbit/cxgbit_target.c static int cxgbit_is_ofld_imm(const struct sk_buff *skb)
skb                81 drivers/target/iscsi/cxgbit/cxgbit_target.c 	int length = skb->len;
skb                83 drivers/target/iscsi/cxgbit/cxgbit_target.c 	if (likely(cxgbit_skcb_flags(skb) & SKCBF_TX_NEED_HDR))
skb                86 drivers/target/iscsi/cxgbit/cxgbit_target.c 	if (likely(cxgbit_skcb_flags(skb) & SKCBF_TX_ISO))
skb               113 drivers/target/iscsi/cxgbit/cxgbit_target.c static unsigned int cxgbit_calc_tx_flits_ofld(const struct sk_buff *skb)
skb               117 drivers/target/iscsi/cxgbit/cxgbit_target.c 	if (cxgbit_is_ofld_imm(skb))
skb               118 drivers/target/iscsi/cxgbit/cxgbit_target.c 		return DIV_ROUND_UP(skb->len, 8);
skb               119 drivers/target/iscsi/cxgbit/cxgbit_target.c 	flits = skb_transport_offset(skb) / 8;
skb               120 drivers/target/iscsi/cxgbit/cxgbit_target.c 	cnt = skb_shinfo(skb)->nr_frags;
skb               121 drivers/target/iscsi/cxgbit/cxgbit_target.c 	if (skb_tail_pointer(skb) != skb_transport_header(skb))
skb               129 drivers/target/iscsi/cxgbit/cxgbit_target.c cxgbit_cpl_tx_data_iso(struct sk_buff *skb, struct cxgbit_iso_info *iso_info)
skb               132 drivers/target/iscsi/cxgbit/cxgbit_target.c 	unsigned int submode = cxgbit_skcb_submode(skb);
skb               136 drivers/target/iscsi/cxgbit/cxgbit_target.c 	cpl = __skb_push(skb, sizeof(*cpl));
skb               156 drivers/target/iscsi/cxgbit/cxgbit_target.c 	__skb_pull(skb, sizeof(*cpl));
skb               160 drivers/target/iscsi/cxgbit/cxgbit_target.c cxgbit_tx_data_wr(struct cxgbit_sock *csk, struct sk_buff *skb, u32 dlen,
skb               165 drivers/target/iscsi/cxgbit/cxgbit_target.c 	u32 submode = cxgbit_skcb_submode(skb);
skb               173 drivers/target/iscsi/cxgbit/cxgbit_target.c 	if (cxgbit_skcb_flags(skb) & SKCBF_TX_ISO) {
skb               180 drivers/target/iscsi/cxgbit/cxgbit_target.c 	if (cxgbit_is_ofld_imm(skb))
skb               183 drivers/target/iscsi/cxgbit/cxgbit_target.c 	req = __skb_push(skb, hdr_size);
skb               197 drivers/target/iscsi/cxgbit/cxgbit_target.c static void cxgbit_arp_failure_skb_discard(void *handle, struct sk_buff *skb)
skb               199 drivers/target/iscsi/cxgbit/cxgbit_target.c 	kfree_skb(skb);
skb               204 drivers/target/iscsi/cxgbit/cxgbit_target.c 	struct sk_buff *skb;
skb               206 drivers/target/iscsi/cxgbit/cxgbit_target.c 	while (csk->wr_cred && ((skb = skb_peek(&csk->txq)) != NULL)) {
skb               207 drivers/target/iscsi/cxgbit/cxgbit_target.c 		u32 dlen = skb->len;
skb               208 drivers/target/iscsi/cxgbit/cxgbit_target.c 		u32 len = skb->len;
skb               214 drivers/target/iscsi/cxgbit/cxgbit_target.c 		if (cxgbit_skcb_flags(skb) & SKCBF_TX_ISO)
skb               217 drivers/target/iscsi/cxgbit/cxgbit_target.c 		if (cxgbit_is_ofld_imm(skb))
skb               221 drivers/target/iscsi/cxgbit/cxgbit_target.c 					cxgbit_calc_tx_flits_ofld(skb)) +
skb               224 drivers/target/iscsi/cxgbit/cxgbit_target.c 		if (likely(cxgbit_skcb_flags(skb) & SKCBF_TX_NEED_HDR))
skb               240 drivers/target/iscsi/cxgbit/cxgbit_target.c 				 csk, skb->len, skb->data_len,
skb               244 drivers/target/iscsi/cxgbit/cxgbit_target.c 		__skb_unlink(skb, &csk->txq);
skb               245 drivers/target/iscsi/cxgbit/cxgbit_target.c 		set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx);
skb               246 drivers/target/iscsi/cxgbit/cxgbit_target.c 		skb->csum = (__force __wsum)(credits_needed + flowclen16);
skb               251 drivers/target/iscsi/cxgbit/cxgbit_target.c 			 csk, skb->len, skb->data_len, credits_needed,
skb               254 drivers/target/iscsi/cxgbit/cxgbit_target.c 		if (likely(cxgbit_skcb_flags(skb) & SKCBF_TX_NEED_HDR)) {
skb               255 drivers/target/iscsi/cxgbit/cxgbit_target.c 			len += cxgbit_skcb_tx_extralen(skb);
skb               264 drivers/target/iscsi/cxgbit/cxgbit_target.c 			cxgbit_tx_data_wr(csk, skb, dlen, len, credits_needed,
skb               268 drivers/target/iscsi/cxgbit/cxgbit_target.c 		} else if ((cxgbit_skcb_flags(skb) & SKCBF_TX_FLAG_COMPL) ||
skb               271 drivers/target/iscsi/cxgbit/cxgbit_target.c 				(struct cpl_close_con_req *)skb->data;
skb               276 drivers/target/iscsi/cxgbit/cxgbit_target.c 		cxgbit_sock_enqueue_wr(csk, skb);
skb               277 drivers/target/iscsi/cxgbit/cxgbit_target.c 		t4_set_arp_err_handler(skb, csk,
skb               281 drivers/target/iscsi/cxgbit/cxgbit_target.c 			 csk, csk->tid, skb, len);
skb               283 drivers/target/iscsi/cxgbit/cxgbit_target.c 		cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t);
skb               302 drivers/target/iscsi/cxgbit/cxgbit_target.c 	struct sk_buff *skb;
skb               312 drivers/target/iscsi/cxgbit/cxgbit_target.c 		while ((skb = __skb_dequeue(&backlogq))) {
skb               313 drivers/target/iscsi/cxgbit/cxgbit_target.c 			fn = cxgbit_skcb_rx_backlog_fn(skb);
skb               314 drivers/target/iscsi/cxgbit/cxgbit_target.c 			fn(csk, skb);
skb               324 drivers/target/iscsi/cxgbit/cxgbit_target.c static int cxgbit_queue_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
skb               332 drivers/target/iscsi/cxgbit/cxgbit_target.c 		__kfree_skb(skb);
skb               344 drivers/target/iscsi/cxgbit/cxgbit_target.c 	csk->write_seq += skb->len +
skb               345 drivers/target/iscsi/cxgbit/cxgbit_target.c 			  cxgbit_skcb_tx_extralen(skb);
skb               348 drivers/target/iscsi/cxgbit/cxgbit_target.c 	__skb_queue_tail(&csk->txq, skb);
skb               357 drivers/target/iscsi/cxgbit/cxgbit_target.c cxgbit_map_skb(struct iscsi_cmd *cmd, struct sk_buff *skb, u32 data_offset,
skb               381 drivers/target/iscsi/cxgbit/cxgbit_target.c 		skb_fill_page_desc(skb, i, page, sg->offset + page_off,
skb               383 drivers/target/iscsi/cxgbit/cxgbit_target.c 		skb->data_len += cur_len;
skb               384 drivers/target/iscsi/cxgbit/cxgbit_target.c 		skb->len += cur_len;
skb               385 drivers/target/iscsi/cxgbit/cxgbit_target.c 		skb->truesize += cur_len;
skb               400 drivers/target/iscsi/cxgbit/cxgbit_target.c 		skb_fill_page_desc(skb, i, page, 0, padding);
skb               401 drivers/target/iscsi/cxgbit/cxgbit_target.c 		skb->data_len += padding;
skb               402 drivers/target/iscsi/cxgbit/cxgbit_target.c 		skb->len += padding;
skb               403 drivers/target/iscsi/cxgbit/cxgbit_target.c 		skb->truesize += padding;
skb               414 drivers/target/iscsi/cxgbit/cxgbit_target.c 	struct sk_buff *skb;
skb               434 drivers/target/iscsi/cxgbit/cxgbit_target.c 		skb = __cxgbit_alloc_skb(csk, 0, true);
skb               435 drivers/target/iscsi/cxgbit/cxgbit_target.c 		if (unlikely(!skb))
skb               438 drivers/target/iscsi/cxgbit/cxgbit_target.c 		memset(skb->data, 0, ISCSI_HDR_LEN);
skb               439 drivers/target/iscsi/cxgbit/cxgbit_target.c 		cxgbit_skcb_flags(skb) |= SKCBF_TX_ISO;
skb               440 drivers/target/iscsi/cxgbit/cxgbit_target.c 		cxgbit_skcb_submode(skb) |= (csk->submode &
skb               442 drivers/target/iscsi/cxgbit/cxgbit_target.c 		cxgbit_skcb_tx_extralen(skb) = (num_pdu *
skb               443 drivers/target/iscsi/cxgbit/cxgbit_target.c 				cxgbit_digest_len[cxgbit_skcb_submode(skb)]) +
skb               466 drivers/target/iscsi/cxgbit/cxgbit_target.c 		cxgbit_cpl_tx_data_iso(skb, &iso_info);
skb               472 drivers/target/iscsi/cxgbit/cxgbit_target.c 					(struct iscsi_data_rsp *)skb->data,
skb               475 drivers/target/iscsi/cxgbit/cxgbit_target.c 		ret = cxgbit_map_skb(cmd, skb, tx_data, plen);
skb               477 drivers/target/iscsi/cxgbit/cxgbit_target.c 			__kfree_skb(skb);
skb               481 drivers/target/iscsi/cxgbit/cxgbit_target.c 		ret = cxgbit_queue_skb(csk, skb);
skb               504 drivers/target/iscsi/cxgbit/cxgbit_target.c 	struct sk_buff *skb;
skb               507 drivers/target/iscsi/cxgbit/cxgbit_target.c 	skb = cxgbit_alloc_skb(csk, 0);
skb               508 drivers/target/iscsi/cxgbit/cxgbit_target.c 	if (unlikely(!skb))
skb               511 drivers/target/iscsi/cxgbit/cxgbit_target.c 	memcpy(skb->data, cmd->pdu, ISCSI_HDR_LEN);
skb               514 drivers/target/iscsi/cxgbit/cxgbit_target.c 		cxgbit_skcb_submode(skb) |= (csk->submode &
skb               516 drivers/target/iscsi/cxgbit/cxgbit_target.c 		cxgbit_skcb_tx_extralen(skb) =
skb               517 drivers/target/iscsi/cxgbit/cxgbit_target.c 				cxgbit_digest_len[cxgbit_skcb_submode(skb)];
skb               520 drivers/target/iscsi/cxgbit/cxgbit_target.c 	ret = cxgbit_map_skb(cmd, skb, datain->offset, datain->length);
skb               522 drivers/target/iscsi/cxgbit/cxgbit_target.c 		__kfree_skb(skb);
skb               526 drivers/target/iscsi/cxgbit/cxgbit_target.c 	return cxgbit_queue_skb(csk, skb);
skb               554 drivers/target/iscsi/cxgbit/cxgbit_target.c 	struct sk_buff *skb;
skb               557 drivers/target/iscsi/cxgbit/cxgbit_target.c 	skb = cxgbit_alloc_skb(csk, data_buf_len + padding);
skb               558 drivers/target/iscsi/cxgbit/cxgbit_target.c 	if (unlikely(!skb))
skb               561 drivers/target/iscsi/cxgbit/cxgbit_target.c 	memcpy(skb->data, cmd->pdu, ISCSI_HDR_LEN);
skb               566 drivers/target/iscsi/cxgbit/cxgbit_target.c 		skb_store_bits(skb, ISCSI_HDR_LEN, data_buf, data_buf_len);
skb               569 drivers/target/iscsi/cxgbit/cxgbit_target.c 			skb_store_bits(skb, ISCSI_HDR_LEN + data_buf_len,
skb               573 drivers/target/iscsi/cxgbit/cxgbit_target.c 	cxgbit_skcb_tx_extralen(skb) = cxgbit_digest_len[
skb               574 drivers/target/iscsi/cxgbit/cxgbit_target.c 				       cxgbit_skcb_submode(skb)];
skb               576 drivers/target/iscsi/cxgbit/cxgbit_target.c 	return cxgbit_queue_skb(csk, skb);
skb               795 drivers/target/iscsi/cxgbit/cxgbit_target.c 	struct sk_buff *skb;
skb               799 drivers/target/iscsi/cxgbit/cxgbit_target.c 	skb = cxgbit_alloc_skb(csk, length + padding);
skb               800 drivers/target/iscsi/cxgbit/cxgbit_target.c 	if (!skb)
skb               802 drivers/target/iscsi/cxgbit/cxgbit_target.c 	skb_store_bits(skb, 0, login->rsp, ISCSI_HDR_LEN);
skb               803 drivers/target/iscsi/cxgbit/cxgbit_target.c 	skb_store_bits(skb, ISCSI_HDR_LEN, login->rsp_buf, length);
skb               806 drivers/target/iscsi/cxgbit/cxgbit_target.c 		skb_store_bits(skb, ISCSI_HDR_LEN + length,
skb               811 drivers/target/iscsi/cxgbit/cxgbit_target.c 			kfree_skb(skb);
skb               818 drivers/target/iscsi/cxgbit/cxgbit_target.c 	if (cxgbit_queue_skb(csk, skb))
skb               828 drivers/target/iscsi/cxgbit/cxgbit_target.c cxgbit_skb_copy_to_sg(struct sk_buff *skb, struct scatterlist *sg,
skb               834 drivers/target/iscsi/cxgbit/cxgbit_target.c 	struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(skb);
skb               836 drivers/target/iscsi/cxgbit/cxgbit_target.c 	skb_prepare_seq_read(skb, pdu_cb->doffset,
skb               878 drivers/target/iscsi/cxgbit/cxgbit_target.c 	struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
skb               898 drivers/target/iscsi/cxgbit/cxgbit_target.c 		struct skb_shared_info *ssi = skb_shinfo(csk->skb);
skb               914 drivers/target/iscsi/cxgbit/cxgbit_target.c 		cxgbit_skb_copy_to_sg(csk->skb, sg, sg_nents, 0);
skb               987 drivers/target/iscsi/cxgbit/cxgbit_target.c 	struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
skb              1017 drivers/target/iscsi/cxgbit/cxgbit_target.c 	struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
skb              1077 drivers/target/iscsi/cxgbit/cxgbit_target.c 		cxgbit_skb_copy_to_sg(csk->skb, sg_start, sg_nents, skip);
skb              1092 drivers/target/iscsi/cxgbit/cxgbit_target.c 	struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
skb              1134 drivers/target/iscsi/cxgbit/cxgbit_target.c 		skb_copy_bits(csk->skb, pdu_cb->doffset,
skb              1160 drivers/target/iscsi/cxgbit/cxgbit_target.c 	struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
skb              1195 drivers/target/iscsi/cxgbit/cxgbit_target.c 		skb_copy_bits(csk->skb, pdu_cb->doffset,
skb              1212 drivers/target/iscsi/cxgbit/cxgbit_target.c 	struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
skb              1290 drivers/target/iscsi/cxgbit/cxgbit_target.c 	struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
skb              1328 drivers/target/iscsi/cxgbit/cxgbit_target.c 	struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
skb              1361 drivers/target/iscsi/cxgbit/cxgbit_target.c 	skb_copy_bits(csk->skb, pdu_cb->doffset, login->req_buf, pdu_cb->dlen);
skb              1367 drivers/target/iscsi/cxgbit/cxgbit_target.c cxgbit_process_iscsi_pdu(struct cxgbit_sock *csk, struct sk_buff *skb, int idx)
skb              1369 drivers/target/iscsi/cxgbit/cxgbit_target.c 	struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb, idx);
skb              1372 drivers/target/iscsi/cxgbit/cxgbit_target.c 	cxgbit_rx_pdu_cb(skb) = pdu_cb;
skb              1374 drivers/target/iscsi/cxgbit/cxgbit_target.c 	csk->skb = skb;
skb              1386 drivers/target/iscsi/cxgbit/cxgbit_target.c static void cxgbit_lro_skb_dump(struct sk_buff *skb)
skb              1388 drivers/target/iscsi/cxgbit/cxgbit_target.c 	struct skb_shared_info *ssi = skb_shinfo(skb);
skb              1389 drivers/target/iscsi/cxgbit/cxgbit_target.c 	struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb);
skb              1390 drivers/target/iscsi/cxgbit/cxgbit_target.c 	struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb, 0);
skb              1394 drivers/target/iscsi/cxgbit/cxgbit_target.c 		skb, skb->head, skb->data, skb->len, skb->data_len,
skb              1397 drivers/target/iscsi/cxgbit/cxgbit_target.c 		skb, lro_cb->csk, lro_cb->pdu_idx, lro_cb->pdu_totallen);
skb              1402 drivers/target/iscsi/cxgbit/cxgbit_target.c 			skb, i, pdu_cb->pdulen, pdu_cb->flags, pdu_cb->seq,
skb              1406 drivers/target/iscsi/cxgbit/cxgbit_target.c 			skb, i, skb_frag_off(&ssi->frags[i]),
skb              1412 drivers/target/iscsi/cxgbit/cxgbit_target.c 	struct sk_buff *skb = csk->lro_hskb;
skb              1413 drivers/target/iscsi/cxgbit/cxgbit_target.c 	struct skb_shared_info *ssi = skb_shinfo(skb);
skb              1416 drivers/target/iscsi/cxgbit/cxgbit_target.c 	memset(skb->data, 0, LRO_SKB_MIN_HEADROOM);
skb              1420 drivers/target/iscsi/cxgbit/cxgbit_target.c 	skb->data_len = 0;
skb              1421 drivers/target/iscsi/cxgbit/cxgbit_target.c 	skb->truesize -= skb->len;
skb              1422 drivers/target/iscsi/cxgbit/cxgbit_target.c 	skb->len = 0;
skb              1426 drivers/target/iscsi/cxgbit/cxgbit_target.c cxgbit_lro_skb_merge(struct cxgbit_sock *csk, struct sk_buff *skb, u8 pdu_idx)
skb              1430 drivers/target/iscsi/cxgbit/cxgbit_target.c 	struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb, pdu_idx);
skb              1432 drivers/target/iscsi/cxgbit/cxgbit_target.c 	struct skb_shared_info *ssi = skb_shinfo(skb);
skb              1496 drivers/target/iscsi/cxgbit/cxgbit_target.c static int cxgbit_process_lro_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
skb              1498 drivers/target/iscsi/cxgbit/cxgbit_target.c 	struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb);
skb              1499 drivers/target/iscsi/cxgbit/cxgbit_target.c 	struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb, 0);
skb              1504 drivers/target/iscsi/cxgbit/cxgbit_target.c 		cxgbit_lro_skb_merge(csk, skb, 0);
skb              1524 drivers/target/iscsi/cxgbit/cxgbit_target.c 		ret = cxgbit_process_iscsi_pdu(csk, skb, pdu_idx);
skb              1530 drivers/target/iscsi/cxgbit/cxgbit_target.c 		cxgbit_lro_skb_merge(csk, skb, lro_cb->pdu_idx);
skb              1536 drivers/target/iscsi/cxgbit/cxgbit_target.c static int cxgbit_rx_lro_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
skb              1538 drivers/target/iscsi/cxgbit/cxgbit_target.c 	struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb);
skb              1539 drivers/target/iscsi/cxgbit/cxgbit_target.c 	struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb, 0);
skb              1546 drivers/target/iscsi/cxgbit/cxgbit_target.c 		cxgbit_lro_skb_dump(skb);
skb              1552 drivers/target/iscsi/cxgbit/cxgbit_target.c 	ret = cxgbit_process_lro_skb(csk, skb);
skb              1562 drivers/target/iscsi/cxgbit/cxgbit_target.c static int cxgbit_rx_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
skb              1567 drivers/target/iscsi/cxgbit/cxgbit_target.c 	if (likely(cxgbit_skcb_flags(skb) & SKCBF_RX_LRO)) {
skb              1569 drivers/target/iscsi/cxgbit/cxgbit_target.c 			ret = cxgbit_rx_lro_skb(csk, skb);
skb              1571 drivers/target/iscsi/cxgbit/cxgbit_target.c 			ret = cxgbit_process_lro_skb(csk, skb);
skb              1574 drivers/target/iscsi/cxgbit/cxgbit_target.c 	__kfree_skb(skb);
skb              1592 drivers/target/iscsi/cxgbit/cxgbit_target.c 	struct sk_buff *skb;
skb              1602 drivers/target/iscsi/cxgbit/cxgbit_target.c 	while ((skb = __skb_dequeue(&rxq))) {
skb              1603 drivers/target/iscsi/cxgbit/cxgbit_target.c 		if (cxgbit_rx_skb(csk, skb))
skb               401 drivers/target/target_core_user.c static int tcmu_genl_rm_dev_done(struct sk_buff *skb, struct genl_info *info)
skb               406 drivers/target/target_core_user.c static int tcmu_genl_add_dev_done(struct sk_buff *skb, struct genl_info *info)
skb               411 drivers/target/target_core_user.c static int tcmu_genl_reconfig_dev_done(struct sk_buff *skb,
skb               417 drivers/target/target_core_user.c static int tcmu_genl_set_features(struct sk_buff *skb, struct genl_info *info)
skb              1759 drivers/target/target_core_user.c 	struct sk_buff *skb;
skb              1763 drivers/target/target_core_user.c 	skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
skb              1764 drivers/target/target_core_user.c 	if (!skb)
skb              1767 drivers/target/target_core_user.c 	msg_header = genlmsg_put(skb, 0, 0, &tcmu_genl_family, 0, cmd);
skb              1771 drivers/target/target_core_user.c 	ret = nla_put_string(skb, TCMU_ATTR_DEVICE, udev->uio_info.name);
skb              1775 drivers/target/target_core_user.c 	ret = nla_put_u32(skb, TCMU_ATTR_MINOR, udev->uio_info.uio_dev->minor);
skb              1779 drivers/target/target_core_user.c 	ret = nla_put_u32(skb, TCMU_ATTR_DEVICE_ID, udev->se_dev.dev_index);
skb              1783 drivers/target/target_core_user.c 	*buf = skb;
skb              1788 drivers/target/target_core_user.c 	nlmsg_free(skb);
skb              1794 drivers/target/target_core_user.c 				   struct sk_buff *skb, void *msg_header)
skb              1798 drivers/target/target_core_user.c 	genlmsg_end(skb, msg_header);
skb              1802 drivers/target/target_core_user.c 		nlmsg_free(skb);
skb              1806 drivers/target/target_core_user.c 	ret = genlmsg_multicast_allns(&tcmu_genl_family, skb, 0,
skb              1821 drivers/target/target_core_user.c 	struct sk_buff *skb = NULL;
skb              1825 drivers/target/target_core_user.c 	ret = tcmu_netlink_event_init(udev, TCMU_CMD_ADDED_DEVICE, &skb,
skb              1829 drivers/target/target_core_user.c 	return tcmu_netlink_event_send(udev, TCMU_CMD_ADDED_DEVICE, skb,
skb              1835 drivers/target/target_core_user.c 	struct sk_buff *skb = NULL;
skb              1840 drivers/target/target_core_user.c 				      &skb, &msg_header);
skb              1844 drivers/target/target_core_user.c 				       skb, msg_header);
skb              2336 drivers/target/target_core_user.c 	struct sk_buff *skb = NULL;
skb              2341 drivers/target/target_core_user.c 				      &skb, &msg_header);
skb              2344 drivers/target/target_core_user.c 	ret = nla_put_string(skb, TCMU_ATTR_DEV_CFG, reconfig_data);
skb              2346 drivers/target/target_core_user.c 		nlmsg_free(skb);
skb              2350 drivers/target/target_core_user.c 				       skb, msg_header);
skb              2397 drivers/target/target_core_user.c 	struct sk_buff *skb = NULL;
skb              2402 drivers/target/target_core_user.c 				      &skb, &msg_header);
skb              2405 drivers/target/target_core_user.c 	ret = nla_put_u64_64bit(skb, TCMU_ATTR_DEV_SIZE,
skb              2408 drivers/target/target_core_user.c 		nlmsg_free(skb);
skb              2412 drivers/target/target_core_user.c 				       skb, msg_header);
skb              2480 drivers/target/target_core_user.c 	struct sk_buff *skb = NULL;
skb              2485 drivers/target/target_core_user.c 				      &skb, &msg_header);
skb              2488 drivers/target/target_core_user.c 	ret = nla_put_u8(skb, TCMU_ATTR_WRITECACHE, val);
skb              2490 drivers/target/target_core_user.c 		nlmsg_free(skb);
skb              2494 drivers/target/target_core_user.c 				       skb, msg_header);
skb              1484 drivers/thermal/thermal_core.c 	struct sk_buff *skb;
skb              1499 drivers/thermal/thermal_core.c 	skb = genlmsg_new(size, GFP_ATOMIC);
skb              1500 drivers/thermal/thermal_core.c 	if (!skb)
skb              1504 drivers/thermal/thermal_core.c 	msg_header = genlmsg_put(skb, 0, thermal_event_seqnum++,
skb              1508 drivers/thermal/thermal_core.c 		nlmsg_free(skb);
skb              1513 drivers/thermal/thermal_core.c 	attr = nla_reserve(skb, THERMAL_GENL_ATTR_EVENT,
skb              1517 drivers/thermal/thermal_core.c 		nlmsg_free(skb);
skb              1523 drivers/thermal/thermal_core.c 		nlmsg_free(skb);
skb              1533 drivers/thermal/thermal_core.c 	genlmsg_end(skb, msg_header);
skb              1535 drivers/thermal/thermal_core.c 	result = genlmsg_multicast(&thermal_event_genl_family, skb, 0,
skb                92 drivers/tty/ipwireless/network.c 				     struct sk_buff *skb)
skb               113 drivers/tty/ipwireless/network.c 		if (skb_headroom(skb) >= 2) {
skb               114 drivers/tty/ipwireless/network.c 			memcpy(skb_push(skb, 2), header, 2);
skb               116 drivers/tty/ipwireless/network.c 					       IPW_CHANNEL_RAS, skb->data,
skb               117 drivers/tty/ipwireless/network.c 					       skb->len,
skb               121 drivers/tty/ipwireless/network.c 				skb_pull(skb, 2);
skb               126 drivers/tty/ipwireless/network.c 			buf = kmalloc(skb->len + 2, GFP_ATOMIC);
skb               129 drivers/tty/ipwireless/network.c 			memcpy(buf + 2, skb->data, skb->len);
skb               133 drivers/tty/ipwireless/network.c 					       skb->len + 2,
skb               140 drivers/tty/ipwireless/network.c 		kfree_skb(skb);
skb               348 drivers/tty/ipwireless/network.c 	struct sk_buff *skb;
skb               355 drivers/tty/ipwireless/network.c 	skb = dev_alloc_skb(length + 4);
skb               356 drivers/tty/ipwireless/network.c 	if (skb == NULL)
skb               358 drivers/tty/ipwireless/network.c 	skb_reserve(skb, 2);
skb               359 drivers/tty/ipwireless/network.c 	skb_put_data(skb, data, length);
skb               361 drivers/tty/ipwireless/network.c 	return skb;
skb               396 drivers/tty/ipwireless/network.c 				struct sk_buff *skb;
skb               402 drivers/tty/ipwireless/network.c 				skb = ipw_packet_received_skb(data, length);
skb               403 drivers/tty/ipwireless/network.c 				if (skb)
skb               404 drivers/tty/ipwireless/network.c 					ppp_input(network->ppp_channel, skb);
skb               143 drivers/tty/n_gsm.c 	struct sk_buff *skb;	/* Frame being sent */
skb               857 drivers/tty/n_gsm.c 	if (dlci->skb == NULL) {
skb               858 drivers/tty/n_gsm.c 		dlci->skb = skb_dequeue_tail(&dlci->skb_list);
skb               859 drivers/tty/n_gsm.c 		if (dlci->skb == NULL)
skb               863 drivers/tty/n_gsm.c 	len = dlci->skb->len + overhead;
skb               869 drivers/tty/n_gsm.c 			dev_kfree_skb_any(dlci->skb);
skb               870 drivers/tty/n_gsm.c 			dlci->skb = NULL;
skb               883 drivers/tty/n_gsm.c 		skb_queue_tail(&dlci->skb_list, dlci->skb);
skb               884 drivers/tty/n_gsm.c 		dlci->skb = NULL;
skb               894 drivers/tty/n_gsm.c 	memcpy(dp, dlci->skb->data, len);
skb               895 drivers/tty/n_gsm.c 	skb_pull(dlci->skb, len);
skb               898 drivers/tty/n_gsm.c 		dev_kfree_skb_any(dlci->skb);
skb               899 drivers/tty/n_gsm.c 		dlci->skb = NULL;
skb              1685 drivers/tty/n_gsm.c 	while ((dlci->skb = skb_dequeue(&dlci->skb_list)))
skb              1686 drivers/tty/n_gsm.c 		dev_kfree_skb(dlci->skb);
skb              2689 drivers/tty/n_gsm.c static netdev_tx_t gsm_mux_net_start_xmit(struct sk_buff *skb,
skb              2696 drivers/tty/n_gsm.c 	skb_queue_head(&dlci->skb_list, skb);
skb              2698 drivers/tty/n_gsm.c 	net->stats.tx_bytes += skb->len;
skb              2720 drivers/tty/n_gsm.c 	struct sk_buff *skb;
skb              2725 drivers/tty/n_gsm.c 	skb = dev_alloc_skb(size + NET_IP_ALIGN);
skb              2726 drivers/tty/n_gsm.c 	if (!skb) {
skb              2732 drivers/tty/n_gsm.c 	skb_reserve(skb, NET_IP_ALIGN);
skb              2733 drivers/tty/n_gsm.c 	skb_put_data(skb, in_buf, size);
skb              2735 drivers/tty/n_gsm.c 	skb->dev = net;
skb              2736 drivers/tty/n_gsm.c 	skb->protocol = htons(ETH_P_IP);
skb              2739 drivers/tty/n_gsm.c 	netif_rx(skb);
skb              7607 drivers/tty/synclink.c static netdev_tx_t hdlcdev_xmit(struct sk_buff *skb,
skb              7620 drivers/tty/synclink.c 	info->xmit_cnt = skb->len;
skb              7621 drivers/tty/synclink.c 	mgsl_load_tx_dma_buffer(info, skb->data, skb->len);
skb              7625 drivers/tty/synclink.c 	dev->stats.tx_bytes += skb->len;
skb              7628 drivers/tty/synclink.c 	dev_kfree_skb(skb);
skb              7880 drivers/tty/synclink.c 	struct sk_buff *skb = dev_alloc_skb(size);
skb              7886 drivers/tty/synclink.c 	if (skb == NULL) {
skb              7893 drivers/tty/synclink.c 	skb_put_data(skb, buf, size);
skb              7895 drivers/tty/synclink.c 	skb->protocol = hdlc_type_trans(skb, dev);
skb              7900 drivers/tty/synclink.c 	netif_rx(skb);
skb              1454 drivers/tty/synclink_gt.c static netdev_tx_t hdlcdev_xmit(struct sk_buff *skb,
skb              1462 drivers/tty/synclink_gt.c 	if (!skb->len)
skb              1470 drivers/tty/synclink_gt.c 	dev->stats.tx_bytes += skb->len;
skb              1476 drivers/tty/synclink_gt.c 	tx_load(info, skb->data, skb->len);
skb              1480 drivers/tty/synclink_gt.c 	dev_kfree_skb(skb);
skb              1724 drivers/tty/synclink_gt.c 	struct sk_buff *skb = dev_alloc_skb(size);
skb              1729 drivers/tty/synclink_gt.c 	if (skb == NULL) {
skb              1735 drivers/tty/synclink_gt.c 	skb_put_data(skb, buf, size);
skb              1737 drivers/tty/synclink_gt.c 	skb->protocol = hdlc_type_trans(skb, dev);
skb              1742 drivers/tty/synclink_gt.c 	netif_rx(skb);
skb              1577 drivers/tty/synclinkmp.c static netdev_tx_t hdlcdev_xmit(struct sk_buff *skb,
skb              1590 drivers/tty/synclinkmp.c 	info->tx_count = skb->len;
skb              1591 drivers/tty/synclinkmp.c 	tx_load_dma_buffer(info, skb->data, skb->len);
skb              1595 drivers/tty/synclinkmp.c 	dev->stats.tx_bytes += skb->len;
skb              1598 drivers/tty/synclinkmp.c 	dev_kfree_skb(skb);
skb              1850 drivers/tty/synclinkmp.c 	struct sk_buff *skb = dev_alloc_skb(size);
skb              1856 drivers/tty/synclinkmp.c 	if (skb == NULL) {
skb              1863 drivers/tty/synclinkmp.c 	skb_put_data(skb, buf, size);
skb              1865 drivers/tty/synclinkmp.c 	skb->protocol = hdlc_type_trans(skb, dev);
skb              1870 drivers/tty/synclinkmp.c 	netif_rx(skb);
skb               159 drivers/usb/atm/usbatm.c static int usbatm_atm_send(struct atm_vcc *vcc, struct sk_buff *skb);
skb               183 drivers/usb/atm/usbatm.c static inline void usbatm_pop(struct atm_vcc *vcc, struct sk_buff *skb)
skb               186 drivers/usb/atm/usbatm.c 		vcc->pop(vcc, skb);
skb               188 drivers/usb/atm/usbatm.c 		dev_kfree_skb_any(skb);
skb               336 drivers/usb/atm/usbatm.c 		struct sk_buff *skb;
skb               370 drivers/usb/atm/usbatm.c 		skb = dev_alloc_skb(length);
skb               371 drivers/usb/atm/usbatm.c 		if (!skb) {
skb               381 drivers/usb/atm/usbatm.c 		     __func__, skb, skb->truesize);
skb               383 drivers/usb/atm/usbatm.c 		if (!atm_charge(vcc, skb->truesize)) {
skb               385 drivers/usb/atm/usbatm.c 				  __func__, skb->truesize);
skb               386 drivers/usb/atm/usbatm.c 			dev_kfree_skb_any(skb);
skb               390 drivers/usb/atm/usbatm.c 		skb_copy_to_linear_data(skb,
skb               393 drivers/usb/atm/usbatm.c 		__skb_put(skb, length);
skb               397 drivers/usb/atm/usbatm.c 		     __func__, skb, skb->len, skb->truesize);
skb               399 drivers/usb/atm/usbatm.c 		PACKETDEBUG(instance, skb->data, skb->len);
skb               401 drivers/usb/atm/usbatm.c 		vcc->push(vcc, skb);
skb               455 drivers/usb/atm/usbatm.c 				       struct sk_buff *skb,
skb               458 drivers/usb/atm/usbatm.c 	struct usbatm_control *ctrl = UDSL_SKB(skb);
skb               465 drivers/usb/atm/usbatm.c 		unsigned int data_len = min_t(unsigned int, skb->len, ATM_CELL_PAYLOAD);
skb               476 drivers/usb/atm/usbatm.c 		skb_copy_from_linear_data(skb, ptr, data_len);
skb               478 drivers/usb/atm/usbatm.c 		__skb_pull(skb, data_len);
skb               570 drivers/usb/atm/usbatm.c 	struct sk_buff *skb = instance->current_skb;
skb               576 drivers/usb/atm/usbatm.c 	if (!skb)
skb               577 drivers/usb/atm/usbatm.c 		skb = skb_dequeue(&instance->sndqueue);
skb               579 drivers/usb/atm/usbatm.c 	while (skb) {
skb               589 drivers/usb/atm/usbatm.c 		bytes_written += usbatm_write_cells(instance, skb,
skb               595 drivers/usb/atm/usbatm.c 		     __func__, bytes_written, skb, urb);
skb               597 drivers/usb/atm/usbatm.c 		if (!UDSL_SKB(skb)->len) {
skb               598 drivers/usb/atm/usbatm.c 			struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
skb               600 drivers/usb/atm/usbatm.c 			usbatm_pop(vcc, skb);
skb               603 drivers/usb/atm/usbatm.c 			skb = skb_dequeue(&instance->sndqueue);
skb               606 drivers/usb/atm/usbatm.c 		if (bytes_written == buf_size || (!skb && bytes_written)) {
skb               615 drivers/usb/atm/usbatm.c 	instance->current_skb = skb;
skb               621 drivers/usb/atm/usbatm.c 	struct sk_buff *skb, *n;
skb               624 drivers/usb/atm/usbatm.c 	skb_queue_walk_safe(&instance->sndqueue, skb, n) {
skb               625 drivers/usb/atm/usbatm.c 		if (UDSL_SKB(skb)->atm.vcc == vcc) {
skb               626 drivers/usb/atm/usbatm.c 			atm_dbg(instance, "%s: popping skb 0x%p\n", __func__, skb);
skb               627 drivers/usb/atm/usbatm.c 			__skb_unlink(skb, &instance->sndqueue);
skb               628 drivers/usb/atm/usbatm.c 			usbatm_pop(vcc, skb);
skb               634 drivers/usb/atm/usbatm.c 	if ((skb = instance->current_skb) && (UDSL_SKB(skb)->atm.vcc == vcc)) {
skb               635 drivers/usb/atm/usbatm.c 		atm_dbg(instance, "%s: popping current skb (0x%p)\n", __func__, skb);
skb               637 drivers/usb/atm/usbatm.c 		usbatm_pop(vcc, skb);
skb               642 drivers/usb/atm/usbatm.c static int usbatm_atm_send(struct atm_vcc *vcc, struct sk_buff *skb)
skb               645 drivers/usb/atm/usbatm.c 	struct usbatm_control *ctrl = UDSL_SKB(skb);
skb               663 drivers/usb/atm/usbatm.c 	if (skb->len > ATM_MAX_AAL5_PDU) {
skb               665 drivers/usb/atm/usbatm.c 				__func__, skb->len, ATM_MAX_AAL5_PDU);
skb               670 drivers/usb/atm/usbatm.c 	PACKETDEBUG(instance, skb->data, skb->len);
skb               674 drivers/usb/atm/usbatm.c 	ctrl->len = skb->len;
skb               675 drivers/usb/atm/usbatm.c 	ctrl->crc = crc32_be(~0, skb->data, skb->len);
skb               677 drivers/usb/atm/usbatm.c 	skb_queue_tail(&instance->sndqueue, skb);
skb               683 drivers/usb/atm/usbatm.c 	usbatm_pop(vcc, skb);
skb               325 drivers/usb/gadget/function/f_eem.c 	struct sk_buff *skb = (struct sk_buff *)req->context;
skb               327 drivers/usb/gadget/function/f_eem.c 	dev_kfree_skb_any(skb);
skb               335 drivers/usb/gadget/function/f_eem.c static struct sk_buff *eem_wrap(struct gether *port, struct sk_buff *skb)
skb               342 drivers/usb/gadget/function/f_eem.c 	if (!skb)
skb               345 drivers/usb/gadget/function/f_eem.c 	len = skb->len;
skb               346 drivers/usb/gadget/function/f_eem.c 	headroom = skb_headroom(skb);
skb               347 drivers/usb/gadget/function/f_eem.c 	tailroom = skb_tailroom(skb);
skb               356 drivers/usb/gadget/function/f_eem.c 			(headroom >= EEM_HLEN) && !skb_cloned(skb))
skb               359 drivers/usb/gadget/function/f_eem.c 	skb2 = skb_copy_expand(skb, EEM_HLEN, ETH_FCS_LEN + padlen, GFP_ATOMIC);
skb               360 drivers/usb/gadget/function/f_eem.c 	dev_kfree_skb_any(skb);
skb               361 drivers/usb/gadget/function/f_eem.c 	skb = skb2;
skb               362 drivers/usb/gadget/function/f_eem.c 	if (!skb)
skb               363 drivers/usb/gadget/function/f_eem.c 		return skb;
skb               367 drivers/usb/gadget/function/f_eem.c 	put_unaligned_be32(0xdeadbeef, skb_put(skb, 4));
skb               374 drivers/usb/gadget/function/f_eem.c 	len = skb->len;
skb               375 drivers/usb/gadget/function/f_eem.c 	put_unaligned_le16(len & 0x3FFF, skb_push(skb, 2));
skb               379 drivers/usb/gadget/function/f_eem.c 		put_unaligned_le16(0, skb_put(skb, 2));
skb               381 drivers/usb/gadget/function/f_eem.c 	return skb;
skb               389 drivers/usb/gadget/function/f_eem.c 			struct sk_buff *skb,
skb               400 drivers/usb/gadget/function/f_eem.c 		if (skb->len < EEM_HLEN) {
skb               407 drivers/usb/gadget/function/f_eem.c 		header = get_unaligned_le16(skb->data);
skb               408 drivers/usb/gadget/function/f_eem.c 		skb_pull(skb, EEM_HLEN);
skb               431 drivers/usb/gadget/function/f_eem.c 				if (skb->len < len) {
skb               436 drivers/usb/gadget/function/f_eem.c 				skb2 = skb_clone(skb, GFP_ATOMIC);
skb               475 drivers/usb/gadget/function/f_eem.c 			if ((skb->len < len)
skb               483 drivers/usb/gadget/function/f_eem.c 				crc = get_unaligned_le32(skb->data + len
skb               486 drivers/usb/gadget/function/f_eem.c 						skb->data, len - ETH_FCS_LEN);
skb               488 drivers/usb/gadget/function/f_eem.c 				crc = get_unaligned_be32(skb->data + len
skb               497 drivers/usb/gadget/function/f_eem.c 			skb2 = skb_clone(skb, GFP_ATOMIC);
skb               516 drivers/usb/gadget/function/f_eem.c 		skb_pull(skb, len);
skb               517 drivers/usb/gadget/function/f_eem.c 	} while (skb->len);
skb               520 drivers/usb/gadget/function/f_eem.c 	dev_kfree_skb_any(skb);
skb              1014 drivers/usb/gadget/function/f_ncm.c 				    struct sk_buff *skb)
skb              1030 drivers/usb/gadget/function/f_ncm.c 	if (!skb && !ncm->skb_tx_data)
skb              1033 drivers/usb/gadget/function/f_ncm.c 	if (skb) {
skb              1040 drivers/usb/gadget/function/f_ncm.c 					skb->data,
skb              1041 drivers/usb/gadget/function/f_ncm.c 					skb->len);
skb              1042 drivers/usb/gadget/function/f_ncm.c 			crc_pos = skb_put(skb, sizeof(uint32_t));
skb              1054 drivers/usb/gadget/function/f_ncm.c 		    div + rem + skb->len +
skb              1118 drivers/usb/gadget/function/f_ncm.c 		put_ncm(&ntb_ndp, opts->dgram_item_len, skb->len);
skb              1123 drivers/usb/gadget/function/f_ncm.c 		skb_put_data(ncm->skb_tx_data, skb->data, skb->len);
skb              1124 drivers/usb/gadget/function/f_ncm.c 		dev_consume_skb_any(skb);
skb              1125 drivers/usb/gadget/function/f_ncm.c 		skb = NULL;
skb              1139 drivers/usb/gadget/function/f_ncm.c 	if (skb)
skb              1140 drivers/usb/gadget/function/f_ncm.c 		dev_kfree_skb_any(skb);
skb              1175 drivers/usb/gadget/function/f_ncm.c 			  struct sk_buff *skb,
skb              1179 drivers/usb/gadget/function/f_ncm.c 	__le16		*tmp = (void *) skb->data;
skb              1194 drivers/usb/gadget/function/f_ncm.c 			skb->len);
skb              1196 drivers/usb/gadget/function/f_ncm.c 			       skb->data, 32, false);
skb              1227 drivers/usb/gadget/function/f_ncm.c 		tmp = (void *)(skb->data + ndp_index);
skb              1270 drivers/usb/gadget/function/f_ncm.c 				crc = get_unaligned_le32(skb->data +
skb              1274 drivers/usb/gadget/function/f_ncm.c 						 skb->data + index,
skb              1294 drivers/usb/gadget/function/f_ncm.c 			skb_put_data(skb2, skb->data + index,
skb              1308 drivers/usb/gadget/function/f_ncm.c 	dev_consume_skb_any(skb);
skb              1315 drivers/usb/gadget/function/f_ncm.c 	dev_kfree_skb_any(skb);
skb                44 drivers/usb/gadget/function/f_phonet.c 		struct sk_buff		*skb;
skb               204 drivers/usb/gadget/function/f_phonet.c 	struct sk_buff *skb = req->context;
skb               209 drivers/usb/gadget/function/f_phonet.c 		dev->stats.tx_bytes += skb->len;
skb               220 drivers/usb/gadget/function/f_phonet.c 	dev_kfree_skb_any(skb);
skb               224 drivers/usb/gadget/function/f_phonet.c static netdev_tx_t pn_net_xmit(struct sk_buff *skb, struct net_device *dev)
skb               231 drivers/usb/gadget/function/f_phonet.c 	if (skb->protocol != htons(ETH_P_PHONET))
skb               240 drivers/usb/gadget/function/f_phonet.c 	req->buf = skb->data;
skb               241 drivers/usb/gadget/function/f_phonet.c 	req->length = skb->len;
skb               244 drivers/usb/gadget/function/f_phonet.c 	req->context = skb;
skb               250 drivers/usb/gadget/function/f_phonet.c 	skb = NULL;
skb               255 drivers/usb/gadget/function/f_phonet.c 	if (unlikely(skb)) {
skb               256 drivers/usb/gadget/function/f_phonet.c 		dev_kfree_skb(skb);
skb               316 drivers/usb/gadget/function/f_phonet.c 	struct sk_buff *skb;
skb               323 drivers/usb/gadget/function/f_phonet.c 		skb = fp->rx.skb;
skb               324 drivers/usb/gadget/function/f_phonet.c 		if (!skb)
skb               325 drivers/usb/gadget/function/f_phonet.c 			skb = fp->rx.skb = netdev_alloc_skb(dev, 12);
skb               327 drivers/usb/gadget/function/f_phonet.c 			fp->rx.skb = NULL;
skb               330 drivers/usb/gadget/function/f_phonet.c 		if (unlikely(!skb))
skb               333 drivers/usb/gadget/function/f_phonet.c 		if (skb->len == 0) { /* First fragment */
skb               334 drivers/usb/gadget/function/f_phonet.c 			skb->protocol = htons(ETH_P_PHONET);
skb               335 drivers/usb/gadget/function/f_phonet.c 			skb_reset_mac_header(skb);
skb               337 drivers/usb/gadget/function/f_phonet.c 			skb_put_data(skb, page_address(page), 1);
skb               340 drivers/usb/gadget/function/f_phonet.c 		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
skb               341 drivers/usb/gadget/function/f_phonet.c 				skb->len <= 1, req->actual, PAGE_SIZE);
skb               345 drivers/usb/gadget/function/f_phonet.c 			skb->dev = dev;
skb               347 drivers/usb/gadget/function/f_phonet.c 			dev->stats.rx_bytes += skb->len;
skb               349 drivers/usb/gadget/function/f_phonet.c 			netif_rx(skb);
skb               388 drivers/usb/gadget/function/f_phonet.c 	if (fp->rx.skb) {
skb               389 drivers/usb/gadget/function/f_phonet.c 		dev_kfree_skb_irq(fp->rx.skb);
skb               390 drivers/usb/gadget/function/f_phonet.c 		fp->rx.skb = NULL;
skb               369 drivers/usb/gadget/function/f_rndis.c 					struct sk_buff *skb)
skb               373 drivers/usb/gadget/function/f_rndis.c 	if (!skb)
skb               376 drivers/usb/gadget/function/f_rndis.c 	skb2 = skb_realloc_headroom(skb, sizeof(struct rndis_packet_msg_type));
skb               379 drivers/usb/gadget/function/f_rndis.c 	dev_kfree_skb(skb);
skb               996 drivers/usb/gadget/function/rndis.c void rndis_add_hdr(struct sk_buff *skb)
skb              1000 drivers/usb/gadget/function/rndis.c 	if (!skb)
skb              1002 drivers/usb/gadget/function/rndis.c 	header = skb_push(skb, sizeof(*header));
skb              1005 drivers/usb/gadget/function/rndis.c 	header->MessageLength = cpu_to_le32(skb->len);
skb              1007 drivers/usb/gadget/function/rndis.c 	header->DataLength = cpu_to_le32(skb->len - sizeof(*header));
skb              1059 drivers/usb/gadget/function/rndis.c 			struct sk_buff *skb,
skb              1063 drivers/usb/gadget/function/rndis.c 	__le32 *tmp = (void *)skb->data;
skb              1068 drivers/usb/gadget/function/rndis.c 		dev_kfree_skb_any(skb);
skb              1074 drivers/usb/gadget/function/rndis.c 	if (!skb_pull(skb, get_unaligned_le32(tmp++) + 8)) {
skb              1075 drivers/usb/gadget/function/rndis.c 		dev_kfree_skb_any(skb);
skb              1078 drivers/usb/gadget/function/rndis.c 	skb_trim(skb, get_unaligned_le32(tmp++));
skb              1080 drivers/usb/gadget/function/rndis.c 	skb_queue_tail(list, skb);
skb               189 drivers/usb/gadget/function/rndis.h void rndis_add_hdr(struct sk_buff *skb);
skb               190 drivers/usb/gadget/function/rndis.h int rndis_rm_hdr(struct gether *port, struct sk_buff *skb,
skb                70 drivers/usb/gadget/function/u_ether.c 	struct sk_buff		*(*wrap)(struct gether *, struct sk_buff *skb);
skb                72 drivers/usb/gadget/function/u_ether.c 						struct sk_buff *skb,
skb               178 drivers/usb/gadget/function/u_ether.c 	struct sk_buff	*skb;
skb               220 drivers/usb/gadget/function/u_ether.c 	skb = __netdev_alloc_skb(dev->net, size + NET_IP_ALIGN, gfp_flags);
skb               221 drivers/usb/gadget/function/u_ether.c 	if (skb == NULL) {
skb               231 drivers/usb/gadget/function/u_ether.c 		skb_reserve(skb, NET_IP_ALIGN);
skb               233 drivers/usb/gadget/function/u_ether.c 	req->buf = skb->data;
skb               236 drivers/usb/gadget/function/u_ether.c 	req->context = skb;
skb               244 drivers/usb/gadget/function/u_ether.c 		if (skb)
skb               245 drivers/usb/gadget/function/u_ether.c 			dev_kfree_skb_any(skb);
skb               255 drivers/usb/gadget/function/u_ether.c 	struct sk_buff	*skb = req->context, *skb2;
skb               263 drivers/usb/gadget/function/u_ether.c 		skb_put(skb, req->actual);
skb               271 drivers/usb/gadget/function/u_ether.c 							skb,
skb               274 drivers/usb/gadget/function/u_ether.c 				dev_kfree_skb_any(skb);
skb               279 drivers/usb/gadget/function/u_ether.c 			skb_queue_tail(&dev->rx_frames, skb);
skb               281 drivers/usb/gadget/function/u_ether.c 		skb = NULL;
skb               318 drivers/usb/gadget/function/u_ether.c 		dev_kfree_skb_any(skb);
skb               332 drivers/usb/gadget/function/u_ether.c 	if (skb)
skb               333 drivers/usb/gadget/function/u_ether.c 		dev_kfree_skb_any(skb);
skb               440 drivers/usb/gadget/function/u_ether.c 	struct sk_buff	*skb = req->context;
skb               450 drivers/usb/gadget/function/u_ether.c 		dev_kfree_skb_any(skb);
skb               453 drivers/usb/gadget/function/u_ether.c 		dev->net->stats.tx_bytes += skb->len;
skb               454 drivers/usb/gadget/function/u_ether.c 		dev_consume_skb_any(skb);
skb               472 drivers/usb/gadget/function/u_ether.c static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
skb               493 drivers/usb/gadget/function/u_ether.c 	if (skb && !in) {
skb               494 drivers/usb/gadget/function/u_ether.c 		dev_kfree_skb_any(skb);
skb               499 drivers/usb/gadget/function/u_ether.c 	if (skb && !is_promisc(cdc_filter)) {
skb               500 drivers/usb/gadget/function/u_ether.c 		u8		*dest = skb->data;
skb               513 drivers/usb/gadget/function/u_ether.c 				dev_kfree_skb_any(skb);
skb               548 drivers/usb/gadget/function/u_ether.c 			skb = dev->wrap(dev->port_usb, skb);
skb               550 drivers/usb/gadget/function/u_ether.c 		if (!skb) {
skb               561 drivers/usb/gadget/function/u_ether.c 	length = skb->len;
skb               562 drivers/usb/gadget/function/u_ether.c 	req->buf = skb->data;
skb               563 drivers/usb/gadget/function/u_ether.c 	req->context = skb;
skb               595 drivers/usb/gadget/function/u_ether.c 		dev_kfree_skb_any(skb);
skb                74 drivers/usb/gadget/function/u_ether.h 						struct sk_buff *skb);
skb                76 drivers/usb/gadget/function/u_ether.h 						struct sk_buff *skb,
skb                20 fs/dlm/netlink.c 	struct sk_buff *skb;
skb                23 fs/dlm/netlink.c 	skb = genlmsg_new(size, GFP_NOFS);
skb                24 fs/dlm/netlink.c 	if (!skb)
skb                28 fs/dlm/netlink.c 	data = genlmsg_put(skb, 0, dlm_nl_seqnum++, &family, 0, cmd);
skb                30 fs/dlm/netlink.c 		nlmsg_free(skb);
skb                34 fs/dlm/netlink.c 	*skbp = skb;
skb                38 fs/dlm/netlink.c static struct dlm_lock_data *mk_data(struct sk_buff *skb)
skb                42 fs/dlm/netlink.c 	ret = nla_reserve(skb, DLM_TYPE_LOCK, sizeof(struct dlm_lock_data));
skb                48 fs/dlm/netlink.c static int send_data(struct sk_buff *skb)
skb                50 fs/dlm/netlink.c 	struct genlmsghdr *genlhdr = nlmsg_data((struct nlmsghdr *)skb->data);
skb                53 fs/dlm/netlink.c 	genlmsg_end(skb, data);
skb                55 fs/dlm/netlink.c 	return genlmsg_unicast(&init_net, skb, listener_nlportid);
skb                58 fs/dlm/netlink.c static int user_cmd(struct sk_buff *skb, struct genl_info *info)
skb              3028 fs/io_uring.c  		struct sk_buff *skb;
skb              3030 fs/io_uring.c  		while ((skb = skb_dequeue(&sock->sk_receive_queue)) != NULL)
skb              3031 fs/io_uring.c  			kfree_skb(skb);
skb              3083 fs/io_uring.c  static void io_destruct_skb(struct sk_buff *skb)
skb              3085 fs/io_uring.c  	struct io_ring_ctx *ctx = skb->sk->sk_user_data;
skb              3092 fs/io_uring.c  	unix_destruct_scm(skb);
skb              3104 fs/io_uring.c  	struct sk_buff *skb;
skb              3111 fs/io_uring.c  	skb = alloc_skb(0, GFP_KERNEL);
skb              3112 fs/io_uring.c  	if (!skb) {
skb              3117 fs/io_uring.c  	skb->sk = sk;
skb              3118 fs/io_uring.c  	skb->destructor = io_destruct_skb;
skb              3127 fs/io_uring.c  	UNIXCB(skb).fp = fpl;
skb              3128 fs/io_uring.c  	refcount_add(skb->truesize, &sk->sk_wmem_alloc);
skb              3129 fs/io_uring.c  	skb_queue_head(&sk->sk_receive_queue, skb);
skb                41 fs/quota/netlink.c 	struct sk_buff *skb;
skb                50 fs/quota/netlink.c 	skb = genlmsg_new(msg_size, GFP_NOFS);
skb                51 fs/quota/netlink.c 	if (!skb) {
skb                56 fs/quota/netlink.c 	msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
skb                63 fs/quota/netlink.c 	ret = nla_put_u32(skb, QUOTA_NL_A_QTYPE, qid.type);
skb                66 fs/quota/netlink.c 	ret = nla_put_u64_64bit(skb, QUOTA_NL_A_EXCESS_ID,
skb                71 fs/quota/netlink.c 	ret = nla_put_u32(skb, QUOTA_NL_A_WARNING, warntype);
skb                74 fs/quota/netlink.c 	ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MAJOR, MAJOR(dev));
skb                77 fs/quota/netlink.c 	ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MINOR, MINOR(dev));
skb                80 fs/quota/netlink.c 	ret = nla_put_u64_64bit(skb, QUOTA_NL_A_CAUSED_ID,
skb                85 fs/quota/netlink.c 	genlmsg_end(skb, msg_head);
skb                87 fs/quota/netlink.c 	genlmsg_multicast(&quota_genl_family, skb, 0, 0, GFP_NOFS);
skb                92 fs/quota/netlink.c 	kfree_skb(skb);
skb                37 include/crypto/algapi.h 	int (*report)(struct sk_buff *skb, struct crypto_alg *alg);
skb                63 include/linux/atalk.h static __inline__ struct ddpehdr *ddp_hdr(struct sk_buff *skb)
skb                65 include/linux/atalk.h 	return (struct ddpehdr *)skb_transport_header(skb);
skb                91 include/linux/atalk.h static __inline__ struct elapaarp *aarp_hdr(struct sk_buff *skb)
skb                93 include/linux/atalk.h 	return (struct elapaarp *)skb_transport_header(skb);
skb               126 include/linux/atalk.h 				       struct sk_buff *skb,
skb               105 include/linux/atmdev.h 	void (*push)(struct atm_vcc *vcc,struct sk_buff *skb);
skb               106 include/linux/atmdev.h 	void (*pop)(struct atm_vcc *vcc,struct sk_buff *skb); /* optional */
skb               108 include/linux/atmdev.h 	int (*send)(struct atm_vcc *vcc,struct sk_buff *skb);
skb               197 include/linux/atmdev.h 	int (*send)(struct atm_vcc *vcc,struct sk_buff *skb);
skb               225 include/linux/atmdev.h #define ATM_SKB(skb) (((struct atm_skb_data *) (skb)->cb))
skb               245 include/linux/atmdev.h static inline void atm_account_tx(struct atm_vcc *vcc, struct sk_buff *skb)
skb               255 include/linux/atmdev.h 	refcount_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
skb               256 include/linux/atmdev.h 	ATM_SKB(skb)->acct_truesize = skb->truesize;
skb               257 include/linux/atmdev.h 	ATM_SKB(skb)->atm_options = vcc->atm_options;
skb               103 include/linux/bpf-cgroup.h 				struct sk_buff *skb,
skb               168 include/linux/bpf-cgroup.h #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb)			      \
skb               172 include/linux/bpf-cgroup.h 		__ret = __cgroup_bpf_run_filter_skb(sk, skb,		      \
skb               178 include/linux/bpf-cgroup.h #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb)			       \
skb               181 include/linux/bpf-cgroup.h 	if (cgroup_bpf_enabled && sk && sk == skb->sk) {		       \
skb               184 include/linux/bpf-cgroup.h 			__ret = __cgroup_bpf_run_filter_skb(__sk, skb,	       \
skb               382 include/linux/bpf-cgroup.h #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
skb               383 include/linux/bpf-cgroup.h #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
skb               725 include/linux/bpf.h int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
skb               831 include/linux/bpf.h 					   struct sk_buff *skb,
skb                67 include/linux/can/core.h extern int can_send(struct sk_buff *skb, int loop);
skb                97 include/linux/can/dev.h 					  struct sk_buff *skb)
skb               100 include/linux/can/dev.h 	if (WARN_ON_ONCE(skb_headroom(skb) < sizeof(struct can_skb_priv)))
skb               104 include/linux/can/dev.h 	if (skb->ip_summed == CHECKSUM_NONE) {
skb               106 include/linux/can/dev.h 		can_skb_prv(skb)->ifindex = dev->ifindex;
skb               107 include/linux/can/dev.h 		can_skb_prv(skb)->skbcnt = 0;
skb               109 include/linux/can/dev.h 		skb->ip_summed = CHECKSUM_UNNECESSARY;
skb               113 include/linux/can/dev.h 			skb->pkt_type = PACKET_LOOPBACK;
skb               115 include/linux/can/dev.h 			skb->pkt_type = PACKET_HOST;
skb               117 include/linux/can/dev.h 		skb_reset_mac_header(skb);
skb               118 include/linux/can/dev.h 		skb_reset_network_header(skb);
skb               119 include/linux/can/dev.h 		skb_reset_transport_header(skb);
skb               127 include/linux/can/dev.h 					  struct sk_buff *skb)
skb               129 include/linux/can/dev.h 	const struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
skb               131 include/linux/can/dev.h 	if (skb->protocol == htons(ETH_P_CAN)) {
skb               132 include/linux/can/dev.h 		if (unlikely(skb->len != CAN_MTU ||
skb               135 include/linux/can/dev.h 	} else if (skb->protocol == htons(ETH_P_CANFD)) {
skb               136 include/linux/can/dev.h 		if (unlikely(skb->len != CANFD_MTU ||
skb               142 include/linux/can/dev.h 	if (!can_skb_headroom_valid(dev, skb))
skb               148 include/linux/can/dev.h 	kfree_skb(skb);
skb               153 include/linux/can/dev.h static inline bool can_is_canfd_skb(const struct sk_buff *skb)
skb               156 include/linux/can/dev.h 	return skb->len == CANFD_MTU;
skb               204 include/linux/can/dev.h void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
skb                42 include/linux/can/rx-offload.h 				struct sk_buff *skb, u32 timestamp);
skb                46 include/linux/can/rx-offload.h 			      struct sk_buff *skb);
skb                40 include/linux/can/skb.h static inline struct can_skb_priv *can_skb_prv(struct sk_buff *skb)
skb                42 include/linux/can/skb.h 	return (struct can_skb_priv *)(skb->head);
skb                45 include/linux/can/skb.h static inline void can_skb_reserve(struct sk_buff *skb)
skb                47 include/linux/can/skb.h 	skb_reserve(skb, sizeof(struct can_skb_priv));
skb                50 include/linux/can/skb.h static inline void can_skb_set_owner(struct sk_buff *skb, struct sock *sk)
skb                54 include/linux/can/skb.h 		skb->destructor = sock_efree;
skb                55 include/linux/can/skb.h 		skb->sk = sk;
skb                62 include/linux/can/skb.h static inline struct sk_buff *can_create_echo_skb(struct sk_buff *skb)
skb                64 include/linux/can/skb.h 	if (skb_shared(skb)) {
skb                65 include/linux/can/skb.h 		struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
skb                68 include/linux/can/skb.h 			can_skb_set_owner(nskb, skb->sk);
skb                69 include/linux/can/skb.h 			consume_skb(skb);
skb                72 include/linux/can/skb.h 			kfree_skb(skb);
skb                78 include/linux/can/skb.h 	return skb;
skb                66 include/linux/dccp.h static inline struct dccp_hdr *dccp_hdr(const struct sk_buff *skb)
skb                68 include/linux/dccp.h 	return (struct dccp_hdr *)skb_transport_header(skb);
skb                71 include/linux/dccp.h static inline struct dccp_hdr *dccp_zeroed_hdr(struct sk_buff *skb, int headlen)
skb                73 include/linux/dccp.h 	skb_push(skb, headlen);
skb                74 include/linux/dccp.h 	skb_reset_transport_header(skb);
skb                75 include/linux/dccp.h 	return memset(skb_transport_header(skb), 0, headlen);
skb                88 include/linux/dccp.h static inline unsigned int dccp_basic_hdr_len(const struct sk_buff *skb)
skb                90 include/linux/dccp.h 	const struct dccp_hdr *dh = dccp_hdr(skb);
skb               106 include/linux/dccp.h static inline struct dccp_hdr_request *dccp_hdr_request(struct sk_buff *skb)
skb               108 include/linux/dccp.h 	return (struct dccp_hdr_request *)(skb_transport_header(skb) +
skb               109 include/linux/dccp.h 					   dccp_basic_hdr_len(skb));
skb               112 include/linux/dccp.h static inline struct dccp_hdr_ack_bits *dccp_hdr_ack_bits(const struct sk_buff *skb)
skb               114 include/linux/dccp.h 	return (struct dccp_hdr_ack_bits *)(skb_transport_header(skb) +
skb               115 include/linux/dccp.h 					    dccp_basic_hdr_len(skb));
skb               118 include/linux/dccp.h static inline u64 dccp_hdr_ack_seq(const struct sk_buff *skb)
skb               120 include/linux/dccp.h 	const struct dccp_hdr_ack_bits *dhack = dccp_hdr_ack_bits(skb);
skb               124 include/linux/dccp.h static inline struct dccp_hdr_response *dccp_hdr_response(struct sk_buff *skb)
skb               126 include/linux/dccp.h 	return (struct dccp_hdr_response *)(skb_transport_header(skb) +
skb               127 include/linux/dccp.h 					    dccp_basic_hdr_len(skb));
skb               130 include/linux/dccp.h static inline struct dccp_hdr_reset *dccp_hdr_reset(struct sk_buff *skb)
skb               132 include/linux/dccp.h 	return (struct dccp_hdr_reset *)(skb_transport_header(skb) +
skb               133 include/linux/dccp.h 					 dccp_basic_hdr_len(skb));
skb               142 include/linux/dccp.h static inline unsigned int dccp_hdr_len(const struct sk_buff *skb)
skb               144 include/linux/dccp.h 	return __dccp_hdr_len(dccp_hdr(skb));
skb               181 include/linux/dccp.h 			      struct sk_buff *skb);
skb                20 include/linux/dsa/8021q.h struct sk_buff *dsa_8021q_xmit(struct sk_buff *skb, struct net_device *netdev,
skb                39 include/linux/dsa/8021q.h struct sk_buff *dsa_8021q_xmit(struct sk_buff *skb, struct net_device *netdev,
skb                54 include/linux/dsa/sja1105.h #define SJA1105_SKB_CB(skb) \
skb                55 include/linux/dsa/sja1105.h 	((struct sja1105_skb_cb *)DSA_SKB_CB_PRIV(skb))
skb                12 include/linux/errqueue.h #define SKB_EXT_ERR(skb) ((struct sock_exterr_skb *) ((skb)->cb))
skb                32 include/linux/etherdevice.h __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
skb                35 include/linux/etherdevice.h int eth_header(struct sk_buff *skb, struct net_device *dev, unsigned short type,
skb                37 include/linux/etherdevice.h int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr);
skb                42 include/linux/etherdevice.h __be16 eth_header_parse_protocol(const struct sk_buff *skb);
skb                59 include/linux/etherdevice.h struct sk_buff *eth_gro_receive(struct list_head *head, struct sk_buff *skb);
skb                60 include/linux/etherdevice.h int eth_gro_complete(struct sk_buff *skb, int nhoff);
skb               530 include/linux/etherdevice.h static inline int eth_skb_pad(struct sk_buff *skb)
skb               532 include/linux/etherdevice.h 	return skb_put_padto(skb, ETH_ZLEN);
skb                24 include/linux/fddidevice.h __be16 fddi_type_trans(struct sk_buff *skb, struct net_device *dev);
skb               601 include/linux/filter.h static inline void bpf_compute_data_pointers(struct sk_buff *skb)
skb               603 include/linux/filter.h 	struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb;
skb               606 include/linux/filter.h 	cb->data_meta = skb->data - skb_metadata_len(skb);
skb               607 include/linux/filter.h 	cb->data_end  = skb->data + skb_headlen(skb);
skb               614 include/linux/filter.h 	struct sk_buff *skb, void **saved_data_end)
skb               616 include/linux/filter.h 	struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb;
skb               619 include/linux/filter.h 	cb->data_end  = skb->data + skb_headlen(skb);
skb               624 include/linux/filter.h 	struct sk_buff *skb, void *saved_data_end)
skb               626 include/linux/filter.h 	struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb;
skb               631 include/linux/filter.h static inline u8 *bpf_skb_cb(struct sk_buff *skb)
skb               647 include/linux/filter.h 	return qdisc_skb_cb(skb)->data;
skb               651 include/linux/filter.h 					 struct sk_buff *skb)
skb               653 include/linux/filter.h 	u8 *cb_data = bpf_skb_cb(skb);
skb               662 include/linux/filter.h 	res = BPF_PROG_RUN(prog, skb);
skb               671 include/linux/filter.h 				       struct sk_buff *skb)
skb               676 include/linux/filter.h 	res = __bpf_prog_run_save_cb(prog, skb);
skb               682 include/linux/filter.h 					struct sk_buff *skb)
skb               684 include/linux/filter.h 	u8 *cb_data = bpf_skb_cb(skb);
skb               691 include/linux/filter.h 	res = BPF_PROG_RUN(prog, skb);
skb               797 include/linux/filter.h int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap);
skb               798 include/linux/filter.h static inline int sk_filter(struct sock *sk, struct sk_buff *skb)
skb               800 include/linux/filter.h 	return sk_filter_trim_cap(sk, skb, 1);
skb               912 include/linux/filter.h int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
skb               923 include/linux/filter.h 				  struct bpf_prog *prog, struct sk_buff *skb,
skb               928 include/linux/filter.h 		     struct bpf_prog *prog, struct sk_buff *skb,
skb              1163 include/linux/filter.h void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb,
skb              1166 include/linux/filter.h static inline void *bpf_load_pointer(const struct sk_buff *skb, int k,
skb              1170 include/linux/filter.h 		return skb_header_pointer(skb, k, size, buffer);
skb              1172 include/linux/filter.h 	return bpf_internal_load_pointer_neg_helper(skb, k, size);
skb               273 include/linux/genl_magic_func.h 	struct sk_buff *skb, gfp_t flags)				\
skb               277 include/linux/genl_magic_func.h 	return genlmsg_multicast(&ZZZ_genl_family, skb, 0,		\
skb               322 include/linux/genl_magic_func.h static int s_name ## _to_skb(struct sk_buff *skb, struct s_name *s,	\
skb               325 include/linux/genl_magic_func.h 	struct nlattr *tla = nla_nest_start(skb, tag_number);		\
skb               330 include/linux/genl_magic_func.h 	nla_nest_end(skb, tla);						\
skb               335 include/linux/genl_magic_func.h 		nla_nest_cancel(skb, tla);				\
skb               338 include/linux/genl_magic_func.h static inline int s_name ## _to_priv_skb(struct sk_buff *skb,		\
skb               341 include/linux/genl_magic_func.h 	return s_name ## _to_skb(skb, s, 0);				\
skb               343 include/linux/genl_magic_func.h static inline int s_name ## _to_unpriv_skb(struct sk_buff *skb,		\
skb               346 include/linux/genl_magic_func.h 	return s_name ## _to_skb(skb, s, 1);				\
skb               355 include/linux/genl_magic_func.h 		if (__put(skb, attr_nr, s->name))			\
skb               364 include/linux/genl_magic_func.h 		if (__put(skb, attr_nr, min_t(int, maxlen,		\
skb                66 include/linux/genl_magic_struct.h static inline int nla_put_u64_0pad(struct sk_buff *skb, int attrtype, u64 value)
skb                68 include/linux/genl_magic_struct.h 	return nla_put_64bit(skb, attrtype, sizeof(u64), &value, 0);
skb                26 include/linux/hdlc.h 	__be16 (*type_trans)(struct sk_buff *skb, struct net_device *dev);
skb                27 include/linux/hdlc.h 	int (*netif_rx)(struct sk_buff *skb);
skb                28 include/linux/hdlc.h 	netdev_tx_t (*xmit)(struct sk_buff *skb, struct net_device *dev);
skb                41 include/linux/hdlc.h 	netdev_tx_t (*xmit)(struct sk_buff *skb, struct net_device *dev);
skb                74 include/linux/hdlc.h static __inline__ void debug_frame(const struct sk_buff *skb)
skb                78 include/linux/hdlc.h 	for (i=0; i < skb->len; i++) {
skb                83 include/linux/hdlc.h 		printk(" %02X", skb->data[i]);
skb                94 include/linux/hdlc.h netdev_tx_t hdlc_start_xmit(struct sk_buff *skb, struct net_device *dev);
skb               101 include/linux/hdlc.h static __inline__ __be16 hdlc_type_trans(struct sk_buff *skb,
skb               106 include/linux/hdlc.h 	skb->dev = dev;
skb               107 include/linux/hdlc.h 	skb_reset_mac_header(skb);
skb               110 include/linux/hdlc.h 		return hdlc->proto->type_trans(skb, dev);
skb               152 include/linux/hdlcdrv.h 	struct sk_buff *skb;
skb                30 include/linux/hippidevice.h __be16 hippi_type_trans(struct sk_buff *skb, struct net_device *dev);
skb                19 include/linux/icmp.h static inline struct icmphdr *icmp_hdr(const struct sk_buff *skb)
skb                21 include/linux/icmp.h 	return (struct icmphdr *)skb_transport_header(skb);
skb                 8 include/linux/icmpv6.h static inline struct icmp6hdr *icmp6_hdr(const struct sk_buff *skb)
skb                10 include/linux/icmpv6.h 	return (struct icmp6hdr *)skb_transport_header(skb);
skb                16 include/linux/icmpv6.h extern void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info);
skb                18 include/linux/icmpv6.h typedef void ip6_icmp_send_t(struct sk_buff *skb, u8 type, u8 code, __u32 info,
skb                22 include/linux/icmpv6.h int ip6_err_gen_icmpv6_unreach(struct sk_buff *skb, int nhs, int type,
skb                27 include/linux/icmpv6.h static inline void icmpv6_send(struct sk_buff *skb,
skb                38 include/linux/icmpv6.h extern void				icmpv6_param_prob(struct sk_buff *skb,
skb              3204 include/linux/ieee80211.h static inline bool ieee80211_is_robust_mgmt_frame(struct sk_buff *skb)
skb              3206 include/linux/ieee80211.h 	if (skb->len < IEEE80211_MIN_ACTION_SIZE)
skb              3208 include/linux/ieee80211.h 	return _ieee80211_is_robust_mgmt_frame((void *)skb->data);
skb              3250 include/linux/ieee80211.h static inline bool ieee80211_is_group_privacy_action(struct sk_buff *skb)
skb              3252 include/linux/ieee80211.h 	if (skb->len < IEEE80211_MIN_ACTION_SIZE)
skb              3254 include/linux/ieee80211.h 	return _ieee80211_is_group_privacy_action((void *)skb->data);
skb              3304 include/linux/ieee80211.h static inline int ieee80211_get_tdls_action(struct sk_buff *skb, u32 hdr_size)
skb              3306 include/linux/ieee80211.h 	if (!skb_is_nonlinear(skb) &&
skb              3307 include/linux/ieee80211.h 	    skb->len > (skb_network_offset(skb) + 2)) {
skb              3309 include/linux/ieee80211.h 		const u8 *tdls_data = skb_network_header(skb) - 2;
skb              3332 include/linux/ieee80211.h static inline bool ieee80211_action_contains_tpc(struct sk_buff *skb)
skb              3334 include/linux/ieee80211.h 	struct ieee80211_mgmt *mgmt = (void *)skb->data;
skb              3339 include/linux/ieee80211.h 	if (skb->len < IEEE80211_MIN_ACTION_SIZE +
skb                25 include/linux/if_arp.h static inline struct arphdr *arp_hdr(const struct sk_buff *skb)
skb                27 include/linux/if_arp.h 	return (struct arphdr *)skb_network_header(skb);
skb                22 include/linux/if_ether.h static inline struct ethhdr *eth_hdr(const struct sk_buff *skb)
skb                24 include/linux/if_ether.h 	return (struct ethhdr *)skb_mac_header(skb);
skb                30 include/linux/if_ether.h static inline struct ethhdr *skb_eth_hdr(const struct sk_buff *skb)
skb                32 include/linux/if_ether.h 	return (struct ethhdr *)skb->data;
skb                35 include/linux/if_ether.h static inline struct ethhdr *inner_eth_hdr(const struct sk_buff *skb)
skb                37 include/linux/if_ether.h 	return (struct ethhdr *)skb_inner_mac_header(skb);
skb                40 include/linux/if_ether.h int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr);
skb                60 include/linux/if_frad.h    void              (*receive)(struct sk_buff *skb, struct net_device *);
skb                21 include/linux/if_pppox.h static inline struct pppoe_hdr *pppoe_hdr(const struct sk_buff *skb)
skb                23 include/linux/if_pppox.h 	return (struct pppoe_hdr *)skb_network_header(skb);
skb               103 include/linux/if_team.h 					 struct sk_buff *skb)
skb               108 include/linux/if_team.h 		netpoll_send_skb(np, skb);
skb               112 include/linux/if_team.h 					 struct sk_buff *skb)
skb               122 include/linux/if_team.h 				       struct sk_buff *skb);
skb               123 include/linux/if_team.h 	bool (*transmit)(struct team *team, struct sk_buff *skb);
skb               231 include/linux/if_team.h 				      struct sk_buff *skb)
skb               233 include/linux/if_team.h 	BUILD_BUG_ON(sizeof(skb->queue_mapping) !=
skb               234 include/linux/if_team.h 		     sizeof(qdisc_skb_cb(skb)->slave_dev_queue_mapping));
skb               235 include/linux/if_team.h 	skb_set_queue_mapping(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping);
skb               237 include/linux/if_team.h 	skb->dev = port->dev;
skb               239 include/linux/if_team.h 		team_netpoll_send_skb(port, skb);
skb               242 include/linux/if_team.h 	return dev_queue_xmit(skb);
skb                56 include/linux/if_vlan.h static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb)
skb                58 include/linux/if_vlan.h 	return (struct vlan_ethhdr *)skb_mac_header(skb);
skb               211 include/linux/if_vlan.h extern bool vlan_do_receive(struct sk_buff **skb);
skb               263 include/linux/if_vlan.h static inline bool vlan_do_receive(struct sk_buff **skb)
skb               333 include/linux/if_vlan.h static inline int __vlan_insert_inner_tag(struct sk_buff *skb,
skb               339 include/linux/if_vlan.h 	if (skb_cow_head(skb, VLAN_HLEN) < 0)
skb               342 include/linux/if_vlan.h 	skb_push(skb, VLAN_HLEN);
skb               346 include/linux/if_vlan.h 		memmove(skb->data, skb->data + VLAN_HLEN, mac_len - ETH_TLEN);
skb               347 include/linux/if_vlan.h 	skb->mac_header -= VLAN_HLEN;
skb               349 include/linux/if_vlan.h 	veth = (struct vlan_ethhdr *)(skb->data + mac_len - ETH_HLEN);
skb               361 include/linux/if_vlan.h 		veth->h_vlan_encapsulated_proto = skb->protocol;
skb               381 include/linux/if_vlan.h static inline int __vlan_insert_tag(struct sk_buff *skb,
skb               384 include/linux/if_vlan.h 	return __vlan_insert_inner_tag(skb, vlan_proto, vlan_tci, ETH_HLEN);
skb               402 include/linux/if_vlan.h static inline struct sk_buff *vlan_insert_inner_tag(struct sk_buff *skb,
skb               409 include/linux/if_vlan.h 	err = __vlan_insert_inner_tag(skb, vlan_proto, vlan_tci, mac_len);
skb               411 include/linux/if_vlan.h 		dev_kfree_skb_any(skb);
skb               414 include/linux/if_vlan.h 	return skb;
skb               431 include/linux/if_vlan.h static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb,
skb               434 include/linux/if_vlan.h 	return vlan_insert_inner_tag(skb, vlan_proto, vlan_tci, ETH_HLEN);
skb               449 include/linux/if_vlan.h static inline struct sk_buff *vlan_insert_tag_set_proto(struct sk_buff *skb,
skb               453 include/linux/if_vlan.h 	skb = vlan_insert_tag(skb, vlan_proto, vlan_tci);
skb               454 include/linux/if_vlan.h 	if (skb)
skb               455 include/linux/if_vlan.h 		skb->protocol = vlan_proto;
skb               456 include/linux/if_vlan.h 	return skb;
skb               465 include/linux/if_vlan.h static inline void __vlan_hwaccel_clear_tag(struct sk_buff *skb)
skb               467 include/linux/if_vlan.h 	skb->vlan_present = 0;
skb               493 include/linux/if_vlan.h static inline struct sk_buff *__vlan_hwaccel_push_inside(struct sk_buff *skb)
skb               495 include/linux/if_vlan.h 	skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
skb               496 include/linux/if_vlan.h 					skb_vlan_tag_get(skb));
skb               497 include/linux/if_vlan.h 	if (likely(skb))
skb               498 include/linux/if_vlan.h 		__vlan_hwaccel_clear_tag(skb);
skb               499 include/linux/if_vlan.h 	return skb;
skb               510 include/linux/if_vlan.h static inline void __vlan_hwaccel_put_tag(struct sk_buff *skb,
skb               513 include/linux/if_vlan.h 	skb->vlan_proto = vlan_proto;
skb               514 include/linux/if_vlan.h 	skb->vlan_tci = vlan_tci;
skb               515 include/linux/if_vlan.h 	skb->vlan_present = 1;
skb               525 include/linux/if_vlan.h static inline int __vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
skb               527 include/linux/if_vlan.h 	struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb->data;
skb               543 include/linux/if_vlan.h static inline int __vlan_hwaccel_get_tag(const struct sk_buff *skb,
skb               546 include/linux/if_vlan.h 	if (skb_vlan_tag_present(skb)) {
skb               547 include/linux/if_vlan.h 		*vlan_tci = skb_vlan_tag_get(skb);
skb               562 include/linux/if_vlan.h static inline int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
skb               564 include/linux/if_vlan.h 	if (skb->dev->features & NETIF_F_HW_VLAN_CTAG_TX) {
skb               565 include/linux/if_vlan.h 		return __vlan_hwaccel_get_tag(skb, vlan_tci);
skb               567 include/linux/if_vlan.h 		return __vlan_get_tag(skb, vlan_tci);
skb               580 include/linux/if_vlan.h static inline __be16 __vlan_get_protocol(struct sk_buff *skb, __be16 type,
skb               583 include/linux/if_vlan.h 	unsigned int vlan_depth = skb->mac_len;
skb               600 include/linux/if_vlan.h 			if (unlikely(!pskb_may_pull(skb,
skb               604 include/linux/if_vlan.h 			vh = (struct vlan_hdr *)(skb->data + vlan_depth);
skb               623 include/linux/if_vlan.h static inline __be16 vlan_get_protocol(struct sk_buff *skb)
skb               625 include/linux/if_vlan.h 	return __vlan_get_protocol(skb, skb->protocol, NULL);
skb               628 include/linux/if_vlan.h static inline void vlan_set_encap_proto(struct sk_buff *skb,
skb               641 include/linux/if_vlan.h 		skb->protocol = proto;
skb               654 include/linux/if_vlan.h 		skb->protocol = htons(ETH_P_802_3);
skb               659 include/linux/if_vlan.h 		skb->protocol = htons(ETH_P_802_2);
skb               669 include/linux/if_vlan.h static inline bool skb_vlan_tagged(const struct sk_buff *skb)
skb               671 include/linux/if_vlan.h 	if (!skb_vlan_tag_present(skb) &&
skb               672 include/linux/if_vlan.h 	    likely(!eth_type_vlan(skb->protocol)))
skb               685 include/linux/if_vlan.h static inline bool skb_vlan_tagged_multi(struct sk_buff *skb)
skb               687 include/linux/if_vlan.h 	__be16 protocol = skb->protocol;
skb               689 include/linux/if_vlan.h 	if (!skb_vlan_tag_present(skb)) {
skb               695 include/linux/if_vlan.h 		if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
skb               698 include/linux/if_vlan.h 		veh = (struct vlan_ethhdr *)skb->data;
skb               715 include/linux/if_vlan.h static inline netdev_features_t vlan_features_check(struct sk_buff *skb,
skb               718 include/linux/if_vlan.h 	if (skb_vlan_tagged_multi(skb)) {
skb                20 include/linux/igmp.h static inline struct igmphdr *igmp_hdr(const struct sk_buff *skb)
skb                22 include/linux/igmp.h 	return (struct igmphdr *)skb_transport_header(skb);
skb                26 include/linux/igmp.h 			igmpv3_report_hdr(const struct sk_buff *skb)
skb                28 include/linux/igmp.h 	return (struct igmpv3_report *)skb_transport_header(skb);
skb                32 include/linux/igmp.h 			igmpv3_query_hdr(const struct sk_buff *skb)
skb                34 include/linux/igmp.h 	return (struct igmpv3_query *)skb_transport_header(skb);
skb               105 include/linux/igmp.h static inline int ip_mc_may_pull(struct sk_buff *skb, unsigned int len)
skb               107 include/linux/igmp.h 	if (skb_transport_offset(skb) + ip_transport_len(skb) < len)
skb               110 include/linux/igmp.h 	return pskb_may_pull(skb, len);
skb               143 include/linux/igmp.h int ip_mc_check_igmp(struct sk_buff *skb);
skb                11 include/linux/inet_diag.h 	void		(*dump)(struct sk_buff *skb,
skb                26 include/linux/inet_diag.h 					 struct sk_buff *skb);
skb                40 include/linux/inet_diag.h 		      struct sk_buff *skb, const struct inet_diag_req_v2 *req,
skb                44 include/linux/inet_diag.h void inet_diag_dump_icsk(struct inet_hashinfo *h, struct sk_buff *skb,
skb                71 include/linux/inet_diag.h int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb,
skb                19 include/linux/ip.h static inline struct iphdr *ip_hdr(const struct sk_buff *skb)
skb                21 include/linux/ip.h 	return (struct iphdr *)skb_network_header(skb);
skb                24 include/linux/ip.h static inline struct iphdr *inner_ip_hdr(const struct sk_buff *skb)
skb                26 include/linux/ip.h 	return (struct iphdr *)skb_inner_network_header(skb);
skb                29 include/linux/ip.h static inline struct iphdr *ipip_hdr(const struct sk_buff *skb)
skb                31 include/linux/ip.h 	return (struct iphdr *)skb_transport_header(skb);
skb                34 include/linux/ip.h static inline unsigned int ip_transport_len(const struct sk_buff *skb)
skb                36 include/linux/ip.h 	return ntohs(ip_hdr(skb)->tot_len) - skb_network_header_len(skb);
skb                92 include/linux/ipv6.h static inline struct ipv6hdr *ipv6_hdr(const struct sk_buff *skb)
skb                94 include/linux/ipv6.h 	return (struct ipv6hdr *)skb_network_header(skb);
skb                97 include/linux/ipv6.h static inline struct ipv6hdr *inner_ipv6_hdr(const struct sk_buff *skb)
skb                99 include/linux/ipv6.h 	return (struct ipv6hdr *)skb_inner_network_header(skb);
skb               102 include/linux/ipv6.h static inline struct ipv6hdr *ipipv6_hdr(const struct sk_buff *skb)
skb               104 include/linux/ipv6.h 	return (struct ipv6hdr *)skb_transport_header(skb);
skb               107 include/linux/ipv6.h static inline unsigned int ipv6_transport_len(const struct sk_buff *skb)
skb               109 include/linux/ipv6.h 	return ntohs(ipv6_hdr(skb)->payload_len) + sizeof(struct ipv6hdr) -
skb               110 include/linux/ipv6.h 	       skb_network_header_len(skb);
skb               154 include/linux/ipv6.h #define IP6CB(skb)	((struct inet6_skb_parm*)((skb)->cb))
skb               155 include/linux/ipv6.h #define IP6CBMTU(skb)	((struct ip6_mtuinfo *)((skb)->cb))
skb               157 include/linux/ipv6.h static inline int inet6_iif(const struct sk_buff *skb)
skb               159 include/linux/ipv6.h 	bool l3_slave = ipv6_l3mdev_skb(IP6CB(skb)->flags);
skb               161 include/linux/ipv6.h 	return l3_slave ? skb->skb_iif : IP6CB(skb)->iif;
skb               164 include/linux/ipv6.h static inline bool inet6_is_jumbogram(const struct sk_buff *skb)
skb               166 include/linux/ipv6.h 	return !!(IP6CB(skb)->flags & IP6SKB_JUMBOGRAM);
skb               170 include/linux/ipv6.h static inline int inet6_sdif(const struct sk_buff *skb)
skb               173 include/linux/ipv6.h 	if (skb && ipv6_l3mdev_skb(IP6CB(skb)->flags))
skb               174 include/linux/ipv6.h 		return IP6CB(skb)->iif;
skb               180 include/linux/ipv6.h static inline bool inet6_exact_dif_match(struct net *net, struct sk_buff *skb)
skb               184 include/linux/ipv6.h 	    skb && ipv6_l3mdev_skb(IP6CB(skb)->flags))
skb                50 include/linux/isdn/capilli.h 	u16  (*send_message)(struct capi_ctr *, struct sk_buff *skb);
skb                85 include/linux/isdn/capilli.h void capi_ctr_handle_message(struct capi_ctr * card, u16 appl, struct sk_buff *skb);
skb                23 include/linux/kernelcapi.h 	void (*recv_message)(struct capi20_appl *ap, struct sk_buff *skb);
skb                40 include/linux/kernelcapi.h u16 capi20_put_message(struct capi20_appl *ap, struct sk_buff *skb);
skb                32 include/linux/lapb.h 	int  (*data_indication)(struct net_device *dev, struct sk_buff *skb);
skb                33 include/linux/lapb.h 	void (*data_transmit)(struct net_device *dev, struct sk_buff *skb);
skb                55 include/linux/lapb.h extern int lapb_data_request(struct net_device *dev, struct sk_buff *skb);
skb                56 include/linux/lapb.h extern int lapb_data_received(struct net_device *dev, struct sk_buff *skb);
skb               114 include/linux/lsm_audit.h int ipv4_skb_to_auditdata(struct sk_buff *skb,
skb               117 include/linux/lsm_audit.h int ipv6_skb_to_auditdata(struct sk_buff *skb,
skb              1686 include/linux/lsm_hooks.h 	int (*netlink_send)(struct sock *sk, struct sk_buff *skb);
skb              1726 include/linux/lsm_hooks.h 	int (*socket_sock_rcv_skb)(struct sock *sk, struct sk_buff *skb);
skb              1731 include/linux/lsm_hooks.h 					struct sk_buff *skb, u32 *secid);
skb              1737 include/linux/lsm_hooks.h 	int (*inet_conn_request)(struct sock *sk, struct sk_buff *skb,
skb              1741 include/linux/lsm_hooks.h 	void (*inet_conn_established)(struct sock *sk, struct sk_buff *skb);
skb              1754 include/linux/lsm_hooks.h 				  struct sk_buff *skb);
skb              1789 include/linux/lsm_hooks.h 	int (*xfrm_decode_session)(struct sk_buff *skb, u32 *secid, int ckall);
skb               540 include/linux/mISDNif.h 	struct sk_buff	*skb;
skb               542 include/linux/mISDNif.h 	skb = alloc_skb(len + MISDN_HEADER_LEN, gfp_mask);
skb               543 include/linux/mISDNif.h 	if (likely(skb))
skb               544 include/linux/mISDNif.h 		skb_reserve(skb, MISDN_HEADER_LEN);
skb               545 include/linux/mISDNif.h 	return skb;
skb               551 include/linux/mISDNif.h 	struct sk_buff	*skb = mI_alloc_skb(len, gfp_mask);
skb               554 include/linux/mISDNif.h 	if (!skb)
skb               557 include/linux/mISDNif.h 		skb_put_data(skb, dp, len);
skb               558 include/linux/mISDNif.h 	hh = mISDN_HEAD_P(skb);
skb               561 include/linux/mISDNif.h 	return skb;
skb               568 include/linux/mISDNif.h 	struct sk_buff		*skb;
skb               572 include/linux/mISDNif.h 	skb = _alloc_mISDN_skb(prim, id, len, dp, gfp_mask);
skb               573 include/linux/mISDNif.h 	if (!skb)
skb               575 include/linux/mISDNif.h 	if (ch->recv(ch->peer, skb))
skb               576 include/linux/mISDNif.h 		dev_kfree_skb(skb);
skb                84 include/linux/mroute.h int ipmr_get_route(struct net *net, struct sk_buff *skb,
skb                30 include/linux/mroute6.h extern int ip6_mr_input(struct sk_buff *skb);
skb                97 include/linux/mroute6.h extern int ip6mr_get_route(struct net *net, struct sk_buff *skb,
skb               101 include/linux/mroute6.h bool mroute6_is_socket(struct net *net, struct sk_buff *skb);
skb               104 include/linux/mroute6.h static inline bool mroute6_is_socket(struct net *net, struct sk_buff *skb)
skb               285 include/linux/mroute_base.h int mr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
skb               287 include/linux/mroute_base.h int mr_table_dump(struct mr_table *mrt, struct sk_buff *skb,
skb               289 include/linux/mroute_base.h 		  int (*fill)(struct mr_table *mrt, struct sk_buff *skb,
skb               293 include/linux/mroute_base.h int mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb,
skb               297 include/linux/mroute_base.h 				 struct sk_buff *skb,
skb               336 include/linux/mroute_base.h static inline int mr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
skb               343 include/linux/mroute_base.h mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb,
skb               347 include/linux/mroute_base.h 			     struct sk_buff *skb,
skb               266 include/linux/netdevice.h 	int	(*create) (struct sk_buff *skb, struct net_device *dev,
skb               269 include/linux/netdevice.h 	int	(*parse)(const struct sk_buff *skb, unsigned char *haddr);
skb               275 include/linux/netdevice.h 	__be16	(*parse_protocol)(const struct sk_buff *skb);
skb               336 include/linux/netdevice.h 	struct sk_buff		*skb;
skb               835 include/linux/netdevice.h 				       struct sk_buff *skb,
skb               916 include/linux/netdevice.h 	bool	(*xdo_dev_offload_ok) (struct sk_buff *skb,
skb              1254 include/linux/netdevice.h 	netdev_tx_t		(*ndo_start_xmit)(struct sk_buff *skb,
skb              1256 include/linux/netdevice.h 	netdev_features_t	(*ndo_features_check)(struct sk_buff *skb,
skb              1260 include/linux/netdevice.h 						    struct sk_buff *skb,
skb              1321 include/linux/netdevice.h 						   int vf, struct sk_buff *skb);
skb              1357 include/linux/netdevice.h 						     const struct sk_buff *skb,
skb              1387 include/linux/netdevice.h 	int			(*ndo_fdb_dump)(struct sk_buff *skb,
skb              1392 include/linux/netdevice.h 	int			(*ndo_fdb_get)(struct sk_buff *skb,
skb              1402 include/linux/netdevice.h 	int			(*ndo_bridge_getlink)(struct sk_buff *skb,
skb              1434 include/linux/netdevice.h 						       struct sk_buff *skb);
skb              2127 include/linux/netdevice.h 						    const struct sk_buff *skb)
skb              2129 include/linux/netdevice.h 	return netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
skb              2144 include/linux/netdevice.h u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
skb              2147 include/linux/netdevice.h 					 struct sk_buff *skb,
skb              2319 include/linux/netdevice.h #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
skb              2322 include/linux/netdevice.h static inline int gro_recursion_inc_test(struct sk_buff *skb)
skb              2324 include/linux/netdevice.h 	return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT;
skb              2330 include/linux/netdevice.h 					       struct sk_buff *skb)
skb              2332 include/linux/netdevice.h 	if (unlikely(gro_recursion_inc_test(skb))) {
skb              2333 include/linux/netdevice.h 		NAPI_GRO_CB(skb)->flush |= 1;
skb              2337 include/linux/netdevice.h 	return cb(head, skb);
skb              2345 include/linux/netdevice.h 						  struct sk_buff *skb)
skb              2347 include/linux/netdevice.h 	if (unlikely(gro_recursion_inc_test(skb))) {
skb              2348 include/linux/netdevice.h 		NAPI_GRO_CB(skb)->flush |= 1;
skb              2352 include/linux/netdevice.h 	return cb(sk, head, skb);
skb              2373 include/linux/netdevice.h 	struct sk_buff		*(*gso_segment)(struct sk_buff *skb,
skb              2376 include/linux/netdevice.h 						struct sk_buff *skb);
skb              2377 include/linux/netdevice.h 	int			(*gro_complete)(struct sk_buff *skb, int nhoff);
skb              2620 include/linux/netdevice.h int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb);
skb              2632 include/linux/netdevice.h u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
skb              2634 include/linux/netdevice.h u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
skb              2636 include/linux/netdevice.h int dev_queue_xmit(struct sk_buff *skb);
skb              2637 include/linux/netdevice.h int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev);
skb              2638 include/linux/netdevice.h int dev_direct_xmit(struct sk_buff *skb, u16 queue_id);
skb              2659 include/linux/netdevice.h int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb);
skb              2661 include/linux/netdevice.h static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
skb              2663 include/linux/netdevice.h 	return NAPI_GRO_CB(skb)->data_offset;
skb              2666 include/linux/netdevice.h static inline unsigned int skb_gro_len(const struct sk_buff *skb)
skb              2668 include/linux/netdevice.h 	return skb->len - NAPI_GRO_CB(skb)->data_offset;
skb              2671 include/linux/netdevice.h static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len)
skb              2673 include/linux/netdevice.h 	NAPI_GRO_CB(skb)->data_offset += len;
skb              2676 include/linux/netdevice.h static inline void *skb_gro_header_fast(struct sk_buff *skb,
skb              2679 include/linux/netdevice.h 	return NAPI_GRO_CB(skb)->frag0 + offset;
skb              2682 include/linux/netdevice.h static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
skb              2684 include/linux/netdevice.h 	return NAPI_GRO_CB(skb)->frag0_len < hlen;
skb              2687 include/linux/netdevice.h static inline void skb_gro_frag0_invalidate(struct sk_buff *skb)
skb              2689 include/linux/netdevice.h 	NAPI_GRO_CB(skb)->frag0 = NULL;
skb              2690 include/linux/netdevice.h 	NAPI_GRO_CB(skb)->frag0_len = 0;
skb              2693 include/linux/netdevice.h static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
skb              2696 include/linux/netdevice.h 	if (!pskb_may_pull(skb, hlen))
skb              2699 include/linux/netdevice.h 	skb_gro_frag0_invalidate(skb);
skb              2700 include/linux/netdevice.h 	return skb->data + offset;
skb              2703 include/linux/netdevice.h static inline void *skb_gro_network_header(struct sk_buff *skb)
skb              2705 include/linux/netdevice.h 	return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) +
skb              2706 include/linux/netdevice.h 	       skb_network_offset(skb);
skb              2709 include/linux/netdevice.h static inline void skb_gro_postpull_rcsum(struct sk_buff *skb,
skb              2712 include/linux/netdevice.h 	if (NAPI_GRO_CB(skb)->csum_valid)
skb              2713 include/linux/netdevice.h 		NAPI_GRO_CB(skb)->csum = csum_sub(NAPI_GRO_CB(skb)->csum,
skb              2722 include/linux/netdevice.h __sum16 __skb_gro_checksum_complete(struct sk_buff *skb);
skb              2724 include/linux/netdevice.h static inline bool skb_at_gro_remcsum_start(struct sk_buff *skb)
skb              2726 include/linux/netdevice.h 	return (NAPI_GRO_CB(skb)->gro_remcsum_start == skb_gro_offset(skb));
skb              2729 include/linux/netdevice.h static inline bool __skb_gro_checksum_validate_needed(struct sk_buff *skb,
skb              2733 include/linux/netdevice.h 	return ((skb->ip_summed != CHECKSUM_PARTIAL ||
skb              2734 include/linux/netdevice.h 		skb_checksum_start_offset(skb) <
skb              2735 include/linux/netdevice.h 		 skb_gro_offset(skb)) &&
skb              2736 include/linux/netdevice.h 		!skb_at_gro_remcsum_start(skb) &&
skb              2737 include/linux/netdevice.h 		NAPI_GRO_CB(skb)->csum_cnt == 0 &&
skb              2741 include/linux/netdevice.h static inline __sum16 __skb_gro_checksum_validate_complete(struct sk_buff *skb,
skb              2744 include/linux/netdevice.h 	if (NAPI_GRO_CB(skb)->csum_valid &&
skb              2745 include/linux/netdevice.h 	    !csum_fold(csum_add(psum, NAPI_GRO_CB(skb)->csum)))
skb              2748 include/linux/netdevice.h 	NAPI_GRO_CB(skb)->csum = psum;
skb              2750 include/linux/netdevice.h 	return __skb_gro_checksum_complete(skb);
skb              2753 include/linux/netdevice.h static inline void skb_gro_incr_csum_unnecessary(struct sk_buff *skb)
skb              2755 include/linux/netdevice.h 	if (NAPI_GRO_CB(skb)->csum_cnt > 0) {
skb              2757 include/linux/netdevice.h 		NAPI_GRO_CB(skb)->csum_cnt--;
skb              2763 include/linux/netdevice.h 		__skb_incr_checksum_unnecessary(skb);
skb              2767 include/linux/netdevice.h #define __skb_gro_checksum_validate(skb, proto, zero_okay, check,	\
skb              2771 include/linux/netdevice.h 	if (__skb_gro_checksum_validate_needed(skb, zero_okay, check))	\
skb              2772 include/linux/netdevice.h 		__ret = __skb_gro_checksum_validate_complete(skb,	\
skb              2773 include/linux/netdevice.h 				compute_pseudo(skb, proto));		\
skb              2775 include/linux/netdevice.h 		skb_gro_incr_csum_unnecessary(skb);			\
skb              2779 include/linux/netdevice.h #define skb_gro_checksum_validate(skb, proto, compute_pseudo)		\
skb              2780 include/linux/netdevice.h 	__skb_gro_checksum_validate(skb, proto, false, 0, compute_pseudo)
skb              2782 include/linux/netdevice.h #define skb_gro_checksum_validate_zero_check(skb, proto, check,		\
skb              2784 include/linux/netdevice.h 	__skb_gro_checksum_validate(skb, proto, true, check, compute_pseudo)
skb              2786 include/linux/netdevice.h #define skb_gro_checksum_simple_validate(skb)				\
skb              2787 include/linux/netdevice.h 	__skb_gro_checksum_validate(skb, 0, false, 0, null_compute_pseudo)
skb              2789 include/linux/netdevice.h static inline bool __skb_gro_checksum_convert_check(struct sk_buff *skb)
skb              2791 include/linux/netdevice.h 	return (NAPI_GRO_CB(skb)->csum_cnt == 0 &&
skb              2792 include/linux/netdevice.h 		!NAPI_GRO_CB(skb)->csum_valid);
skb              2795 include/linux/netdevice.h static inline void __skb_gro_checksum_convert(struct sk_buff *skb,
skb              2798 include/linux/netdevice.h 	NAPI_GRO_CB(skb)->csum = ~pseudo;
skb              2799 include/linux/netdevice.h 	NAPI_GRO_CB(skb)->csum_valid = 1;
skb              2802 include/linux/netdevice.h #define skb_gro_checksum_try_convert(skb, proto, check, compute_pseudo)	\
skb              2804 include/linux/netdevice.h 	if (__skb_gro_checksum_convert_check(skb))			\
skb              2805 include/linux/netdevice.h 		__skb_gro_checksum_convert(skb, check,			\
skb              2806 include/linux/netdevice.h 					   compute_pseudo(skb, proto));	\
skb              2820 include/linux/netdevice.h static inline void *skb_gro_remcsum_process(struct sk_buff *skb, void *ptr,
skb              2829 include/linux/netdevice.h 	BUG_ON(!NAPI_GRO_CB(skb)->csum_valid);
skb              2832 include/linux/netdevice.h 		NAPI_GRO_CB(skb)->gro_remcsum_start = off + hdrlen + start;
skb              2836 include/linux/netdevice.h 	ptr = skb_gro_header_fast(skb, off);
skb              2837 include/linux/netdevice.h 	if (skb_gro_header_hard(skb, off + plen)) {
skb              2838 include/linux/netdevice.h 		ptr = skb_gro_header_slow(skb, off + plen, off);
skb              2843 include/linux/netdevice.h 	delta = remcsum_adjust(ptr + hdrlen, NAPI_GRO_CB(skb)->csum,
skb              2847 include/linux/netdevice.h 	NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta);
skb              2855 include/linux/netdevice.h static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb,
skb              2864 include/linux/netdevice.h 	ptr = skb_gro_header_fast(skb, grc->offset);
skb              2865 include/linux/netdevice.h 	if (skb_gro_header_hard(skb, grc->offset + sizeof(u16))) {
skb              2866 include/linux/netdevice.h 		ptr = skb_gro_header_slow(skb, plen, grc->offset);
skb              2875 include/linux/netdevice.h static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush)
skb              2878 include/linux/netdevice.h 		NAPI_GRO_CB(skb)->flush |= flush;
skb              2880 include/linux/netdevice.h static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
skb              2886 include/linux/netdevice.h 		NAPI_GRO_CB(skb)->flush |= flush;
skb              2887 include/linux/netdevice.h 		skb_gro_remcsum_cleanup(skb, grc);
skb              2888 include/linux/netdevice.h 		skb->remcsum_offload = 0;
skb              2892 include/linux/netdevice.h static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush)
skb              2894 include/linux/netdevice.h 	NAPI_GRO_CB(skb)->flush |= flush;
skb              2896 include/linux/netdevice.h static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
skb              2901 include/linux/netdevice.h 	NAPI_GRO_CB(skb)->flush |= flush;
skb              2902 include/linux/netdevice.h 	skb_gro_remcsum_cleanup(skb, grc);
skb              2903 include/linux/netdevice.h 	skb->remcsum_offload = 0;
skb              2907 include/linux/netdevice.h static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
skb              2915 include/linux/netdevice.h 	return dev->header_ops->create(skb, dev, type, daddr, saddr, len);
skb              2918 include/linux/netdevice.h static inline int dev_parse_header(const struct sk_buff *skb,
skb              2921 include/linux/netdevice.h 	const struct net_device *dev = skb->dev;
skb              2925 include/linux/netdevice.h 	return dev->header_ops->parse(skb, haddr);
skb              2928 include/linux/netdevice.h static inline __be16 dev_parse_header_protocol(const struct sk_buff *skb)
skb              2930 include/linux/netdevice.h 	const struct net_device *dev = skb->dev;
skb              2934 include/linux/netdevice.h 	return dev->header_ops->parse_protocol(skb);
skb              3412 include/linux/netdevice.h 					  struct sk_buff *skb)
skb              3414 include/linux/netdevice.h 	return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
skb              3586 include/linux/netdevice.h void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason);
skb              3587 include/linux/netdevice.h void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason);
skb              3608 include/linux/netdevice.h static inline void dev_kfree_skb_irq(struct sk_buff *skb)
skb              3610 include/linux/netdevice.h 	__dev_kfree_skb_irq(skb, SKB_REASON_DROPPED);
skb              3613 include/linux/netdevice.h static inline void dev_consume_skb_irq(struct sk_buff *skb)
skb              3615 include/linux/netdevice.h 	__dev_kfree_skb_irq(skb, SKB_REASON_CONSUMED);
skb              3618 include/linux/netdevice.h static inline void dev_kfree_skb_any(struct sk_buff *skb)
skb              3620 include/linux/netdevice.h 	__dev_kfree_skb_any(skb, SKB_REASON_DROPPED);
skb              3623 include/linux/netdevice.h static inline void dev_consume_skb_any(struct sk_buff *skb)
skb              3625 include/linux/netdevice.h 	__dev_kfree_skb_any(skb, SKB_REASON_CONSUMED);
skb              3628 include/linux/netdevice.h void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog);
skb              3629 include/linux/netdevice.h int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb);
skb              3630 include/linux/netdevice.h int netif_rx(struct sk_buff *skb);
skb              3631 include/linux/netdevice.h int netif_rx_ni(struct sk_buff *skb);
skb              3632 include/linux/netdevice.h int netif_receive_skb(struct sk_buff *skb);
skb              3633 include/linux/netdevice.h int netif_receive_skb_core(struct sk_buff *skb);
skb              3635 include/linux/netdevice.h gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb);
skb              3644 include/linux/netdevice.h 	kfree_skb(napi->skb);
skb              3645 include/linux/netdevice.h 	napi->skb = NULL;
skb              3692 include/linux/netdevice.h struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again);
skb              3693 include/linux/netdevice.h struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
skb              3703 include/linux/netdevice.h int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
skb              3704 include/linux/netdevice.h int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
skb              3706 include/linux/netdevice.h 			const struct sk_buff *skb);
skb              3709 include/linux/netdevice.h 					       struct sk_buff *skb)
skb              3711 include/linux/netdevice.h 	if (skb_orphan_frags(skb, GFP_ATOMIC) ||
skb              3712 include/linux/netdevice.h 	    unlikely(!is_skb_forwardable(dev, skb))) {
skb              3714 include/linux/netdevice.h 		kfree_skb(skb);
skb              3718 include/linux/netdevice.h 	skb_scrub_packet(skb, true);
skb              3719 include/linux/netdevice.h 	skb->priority = 0;
skb              3724 include/linux/netdevice.h void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev);
skb              4343 include/linux/netdevice.h int skb_checksum_help(struct sk_buff *skb);
skb              4344 include/linux/netdevice.h int skb_crc32c_csum_help(struct sk_buff *skb);
skb              4345 include/linux/netdevice.h int skb_csum_hwoffload_help(struct sk_buff *skb,
skb              4348 include/linux/netdevice.h struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
skb              4350 include/linux/netdevice.h struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
skb              4367 include/linux/netdevice.h struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features)
skb              4369 include/linux/netdevice.h 	return __skb_gso_segment(skb, features, true);
skb              4371 include/linux/netdevice.h __be16 skb_network_protocol(struct sk_buff *skb, int *depth);
skb              4397 include/linux/netdevice.h void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb);
skb              4400 include/linux/netdevice.h 					struct sk_buff *skb)
skb              4415 include/linux/netdevice.h 					      struct sk_buff *skb, struct net_device *dev,
skb              4419 include/linux/netdevice.h 	return ops->ndo_start_xmit(skb, dev);
skb              4427 include/linux/netdevice.h static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_device *dev,
skb              4433 include/linux/netdevice.h 	rc = __netdev_start_xmit(ops, skb, dev, more);
skb              4499 include/linux/netdevice.h netdev_features_t passthru_features_check(struct sk_buff *skb,
skb              4502 include/linux/netdevice.h netdev_features_t netif_skb_features(struct sk_buff *skb);
skb              4531 include/linux/netdevice.h static inline bool skb_gso_ok(struct sk_buff *skb, netdev_features_t features)
skb              4533 include/linux/netdevice.h 	return net_gso_ok(features, skb_shinfo(skb)->gso_type) &&
skb              4534 include/linux/netdevice.h 	       (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
skb              4537 include/linux/netdevice.h static inline bool netif_needs_gso(struct sk_buff *skb,
skb              4540 include/linux/netdevice.h 	return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
skb              4541 include/linux/netdevice.h 		unlikely((skb->ip_summed != CHECKSUM_PARTIAL) &&
skb              4542 include/linux/netdevice.h 			 (skb->ip_summed != CHECKSUM_UNNECESSARY)));
skb              4551 include/linux/netdevice.h static inline void skb_gso_error_unwind(struct sk_buff *skb, __be16 protocol,
skb              4555 include/linux/netdevice.h 	skb->protocol = protocol;
skb              4556 include/linux/netdevice.h 	skb->encapsulation = 1;
skb              4557 include/linux/netdevice.h 	skb_push(skb, pulled_hlen);
skb              4558 include/linux/netdevice.h 	skb_reset_transport_header(skb);
skb              4559 include/linux/netdevice.h 	skb->mac_header = mac_offset;
skb              4560 include/linux/netdevice.h 	skb->network_header = skb->mac_header + mac_len;
skb              4561 include/linux/netdevice.h 	skb->mac_len = mac_len;
skb                77 include/linux/netfilter.h 			       struct sk_buff *skb,
skb               132 include/linux/netfilter.h nf_hook_entry_hookfn(const struct nf_hook_entry *entry, struct sk_buff *skb,
skb               135 include/linux/netfilter.h 	return entry->hook(entry->priv, skb, state);
skb               199 include/linux/netfilter.h int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state,
skb               210 include/linux/netfilter.h 			  struct sock *sk, struct sk_buff *skb,
skb               260 include/linux/netfilter.h 		ret = nf_hook_slow(skb, &state, hook_head, 0);
skb               286 include/linux/netfilter.h 	     struct sk_buff *skb, struct net_device *in, struct net_device *out,
skb               293 include/linux/netfilter.h 	    ((ret = nf_hook(pf, hook, net, sk, skb, in, out, okfn)) == 1))
skb               294 include/linux/netfilter.h 		ret = okfn(net, sk, skb);
skb               299 include/linux/netfilter.h NF_HOOK(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, struct sk_buff *skb,
skb               303 include/linux/netfilter.h 	int ret = nf_hook(pf, hook, net, sk, skb, in, out, okfn);
skb               305 include/linux/netfilter.h 		ret = okfn(net, sk, skb);
skb               314 include/linux/netfilter.h 	struct sk_buff *skb, *next;
skb               318 include/linux/netfilter.h 	list_for_each_entry_safe(skb, next, head, list) {
skb               319 include/linux/netfilter.h 		list_del(&skb->list);
skb               320 include/linux/netfilter.h 		if (nf_hook(pf, hook, net, sk, skb, in, out, okfn) == 1)
skb               321 include/linux/netfilter.h 			list_add_tail(&skb->list, &sublist);
skb               342 include/linux/netfilter.h __sum16 nf_checksum(struct sk_buff *skb, unsigned int hook,
skb               346 include/linux/netfilter.h __sum16 nf_checksum_partial(struct sk_buff *skb, unsigned int hook,
skb               351 include/linux/netfilter.h int nf_reroute(struct sk_buff *skb, struct nf_queue_entry *entry);
skb               363 include/linux/netfilter.h 	void (*decode_session)(struct sk_buff *skb, struct flowi *fl);
skb               364 include/linux/netfilter.h 	unsigned int (*manip_pkt)(struct sk_buff *skb, struct nf_conn *ct,
skb               372 include/linux/netfilter.h nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
skb               380 include/linux/netfilter.h 		nat_hook->decode_session(skb, fl);
skb               388 include/linux/netfilter.h 	     struct sk_buff *skb, struct net_device *in, struct net_device *out,
skb               392 include/linux/netfilter.h 	return okfn(net, sk, skb);
skb               397 include/linux/netfilter.h 	struct sk_buff *skb, struct net_device *in, struct net_device *out,
skb               400 include/linux/netfilter.h 	return okfn(net, sk, skb);
skb               412 include/linux/netfilter.h 			  struct sock *sk, struct sk_buff *skb,
skb               420 include/linux/netfilter.h nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
skb               432 include/linux/netfilter.h 			 const struct sk_buff *skb);
skb               434 include/linux/netfilter.h static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {}
skb               437 include/linux/netfilter.h 				       const struct sk_buff *skb)
skb               447 include/linux/netfilter.h 	int (*update)(struct net *net, struct sk_buff *skb);
skb               457 include/linux/netfilter.h 	struct nf_conn *(*get_ct)(const struct sk_buff *skb,
skb               460 include/linux/netfilter.h 	int (*build)(struct sk_buff *skb, struct nf_conn *ct,
skb               466 include/linux/netfilter.h 	void (*seq_adjust)(struct sk_buff *skb, struct nf_conn *ct,
skb               157 include/linux/netfilter/ipset/ip_set.h 	int (*kadt)(struct ip_set *set, const struct sk_buff *skb,
skb               180 include/linux/netfilter/ipset/ip_set.h 	int (*head)(struct ip_set *set, struct sk_buff *skb);
skb               182 include/linux/netfilter/ipset/ip_set.h 	int (*list)(const struct ip_set *set, struct sk_buff *skb,
skb               287 include/linux/netfilter/ipset/ip_set.h ip_set_put_flags(struct sk_buff *skb, struct ip_set *set)
skb               292 include/linux/netfilter/ipset/ip_set.h 		if (unlikely(nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
skb               306 include/linux/netfilter/ipset/ip_set.h 	return nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(cadt_flags));
skb               329 include/linux/netfilter/ipset/ip_set.h extern int ip_set_add(ip_set_id_t id, const struct sk_buff *skb,
skb               332 include/linux/netfilter/ipset/ip_set.h extern int ip_set_del(ip_set_id_t id, const struct sk_buff *skb,
skb               335 include/linux/netfilter/ipset/ip_set.h extern int ip_set_test(ip_set_id_t id, const struct sk_buff *skb,
skb               348 include/linux/netfilter/ipset/ip_set.h extern int ip_set_put_extensions(struct sk_buff *skb, const struct ip_set *set,
skb               410 include/linux/netfilter/ipset/ip_set.h static inline int nla_put_ipaddr4(struct sk_buff *skb, int type, __be32 ipaddr)
skb               412 include/linux/netfilter/ipset/ip_set.h 	struct nlattr *__nested = nla_nest_start(skb, type);
skb               417 include/linux/netfilter/ipset/ip_set.h 	ret = nla_put_in_addr(skb, IPSET_ATTR_IPADDR_IPV4, ipaddr);
skb               419 include/linux/netfilter/ipset/ip_set.h 		nla_nest_end(skb, __nested);
skb               423 include/linux/netfilter/ipset/ip_set.h static inline int nla_put_ipaddr6(struct sk_buff *skb, int type,
skb               426 include/linux/netfilter/ipset/ip_set.h 	struct nlattr *__nested = nla_nest_start(skb, type);
skb               431 include/linux/netfilter/ipset/ip_set.h 	ret = nla_put_in6_addr(skb, IPSET_ATTR_IPADDR_IPV6, ipaddrptr);
skb               433 include/linux/netfilter/ipset/ip_set.h 		nla_nest_end(skb, __nested);
skb               439 include/linux/netfilter/ipset/ip_set.h ip4addr(const struct sk_buff *skb, bool src)
skb               441 include/linux/netfilter/ipset/ip_set.h 	return src ? ip_hdr(skb)->saddr : ip_hdr(skb)->daddr;
skb               445 include/linux/netfilter/ipset/ip_set.h ip4addrptr(const struct sk_buff *skb, bool src, __be32 *addr)
skb               447 include/linux/netfilter/ipset/ip_set.h 	*addr = src ? ip_hdr(skb)->saddr : ip_hdr(skb)->daddr;
skb               451 include/linux/netfilter/ipset/ip_set.h ip6addrptr(const struct sk_buff *skb, bool src, struct in6_addr *addr)
skb               453 include/linux/netfilter/ipset/ip_set.h 	memcpy(addr, src ? &ipv6_hdr(skb)->saddr : &ipv6_hdr(skb)->daddr,
skb               559 include/linux/netfilter/ipset/ip_set.h ip_set_put_comment(struct sk_buff *skb, const struct ip_set_comment *comment)
skb               565 include/linux/netfilter/ipset/ip_set.h 	return nla_put_string(skb, IPSET_ATTR_COMMENT, c->str);
skb               640 include/linux/netfilter/ipset/ip_set.h ip_set_put_counter(struct sk_buff *skb, const struct ip_set_counter *counter)
skb               642 include/linux/netfilter/ipset/ip_set.h 	return nla_put_net64(skb, IPSET_ATTR_BYTES,
skb               645 include/linux/netfilter/ipset/ip_set.h 	       nla_put_net64(skb, IPSET_ATTR_PACKETS,
skb               669 include/linux/netfilter/ipset/ip_set.h ip_set_put_skbinfo(struct sk_buff *skb, const struct ip_set_skbinfo *skbinfo)
skb               673 include/linux/netfilter/ipset/ip_set.h 		nla_put_net64(skb, IPSET_ATTR_SKBMARK,
skb               678 include/linux/netfilter/ipset/ip_set.h 		nla_put_net32(skb, IPSET_ATTR_SKBPRIO,
skb               681 include/linux/netfilter/ipset/ip_set.h 		nla_put_net16(skb, IPSET_ATTR_SKBQUEUE,
skb               692 include/linux/netfilter/ipset/ip_set.h #define IP_SET_INIT_KEXT(skb, opt, set)			\
skb               693 include/linux/netfilter/ipset/ip_set.h 	{ .bytes = (skb)->len, .packets = 1, .target = true,\
skb                 9 include/linux/netfilter/ipset/ip_set_getport.h extern bool ip_set_get_ip4_port(const struct sk_buff *skb, bool src,
skb                13 include/linux/netfilter/ipset/ip_set_getport.h extern bool ip_set_get_ip6_port(const struct sk_buff *skb, bool src,
skb                16 include/linux/netfilter/ipset/ip_set_getport.h static inline bool ip_set_get_ip6_port(const struct sk_buff *skb, bool src,
skb                23 include/linux/netfilter/ipset/ip_set_getport.h extern bool ip_set_get_ip_port(const struct sk_buff *skb, u8 pf, bool src,
skb                10 include/linux/netfilter/nf_conntrack_amanda.h extern unsigned int (*nf_nat_amanda_hook)(struct sk_buff *skb,
skb                29 include/linux/netfilter/nf_conntrack_ftp.h extern unsigned int (*nf_nat_ftp_hook)(struct sk_buff *skb,
skb                41 include/linux/netfilter/nf_conntrack_h323.h extern int (*set_h245_addr_hook) (struct sk_buff *skb, unsigned int protoff,
skb                46 include/linux/netfilter/nf_conntrack_h323.h extern int (*set_h225_addr_hook) (struct sk_buff *skb, unsigned int protoff,
skb                51 include/linux/netfilter/nf_conntrack_h323.h extern int (*set_sig_addr_hook) (struct sk_buff *skb,
skb                56 include/linux/netfilter/nf_conntrack_h323.h extern int (*set_ras_addr_hook) (struct sk_buff *skb,
skb                61 include/linux/netfilter/nf_conntrack_h323.h extern int (*nat_rtp_rtcp_hook) (struct sk_buff *skb,
skb                70 include/linux/netfilter/nf_conntrack_h323.h extern int (*nat_t120_hook) (struct sk_buff *skb, struct nf_conn *ct,
skb                76 include/linux/netfilter/nf_conntrack_h323.h extern int (*nat_h245_hook) (struct sk_buff *skb, struct nf_conn *ct,
skb                82 include/linux/netfilter/nf_conntrack_h323.h extern int (*nat_callforwarding_hook) (struct sk_buff *skb,
skb                90 include/linux/netfilter/nf_conntrack_h323.h extern int (*nat_q931_hook) (struct sk_buff *skb, struct nf_conn *ct,
skb                11 include/linux/netfilter/nf_conntrack_irc.h extern unsigned int (*nf_nat_irc_hook)(struct sk_buff *skb,
skb               304 include/linux/netfilter/nf_conntrack_pptp.h (*nf_nat_pptp_hook_outbound)(struct sk_buff *skb,
skb               311 include/linux/netfilter/nf_conntrack_pptp.h (*nf_nat_pptp_hook_inbound)(struct sk_buff *skb,
skb                32 include/linux/netfilter/nf_conntrack_proto_gre.h bool gre_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff,
skb                43 include/linux/netfilter/nf_conntrack_sip.h 	int		(*request)(struct sk_buff *skb, unsigned int protoff,
skb                47 include/linux/netfilter/nf_conntrack_sip.h 	int		(*response)(struct sk_buff *skb, unsigned int protoff,
skb               111 include/linux/netfilter/nf_conntrack_sip.h 	unsigned int (*msg)(struct sk_buff *skb,
skb               117 include/linux/netfilter/nf_conntrack_sip.h 	void (*seq_adjust)(struct sk_buff *skb,
skb               120 include/linux/netfilter/nf_conntrack_sip.h 	unsigned int (*expect)(struct sk_buff *skb,
skb               129 include/linux/netfilter/nf_conntrack_sip.h 	unsigned int (*sdp_addr)(struct sk_buff *skb,
skb               139 include/linux/netfilter/nf_conntrack_sip.h 	unsigned int (*sdp_port)(struct sk_buff *skb,
skb               148 include/linux/netfilter/nf_conntrack_sip.h 	unsigned int (*sdp_session)(struct sk_buff *skb,
skb               156 include/linux/netfilter/nf_conntrack_sip.h 	unsigned int (*sdp_media)(struct sk_buff *skb,
skb                 8 include/linux/netfilter/nf_conntrack_snmp.h extern int (*nf_nat_snmp_hook)(struct sk_buff *skb,
skb                22 include/linux/netfilter/nf_conntrack_tftp.h extern unsigned int (*nf_nat_tftp_hook)(struct sk_buff *skb,
skb                11 include/linux/netfilter/nfnetlink.h 	int (*call)(struct net *net, struct sock *nl, struct sk_buff *skb,
skb                15 include/linux/netfilter/nfnetlink.h 	int (*call_rcu)(struct net *net, struct sock *nl, struct sk_buff *skb,
skb                19 include/linux/netfilter/nfnetlink.h 	int (*call_batch)(struct net *net, struct sock *nl, struct sk_buff *skb,
skb                33 include/linux/netfilter/nfnetlink.h 	int (*commit)(struct net *net, struct sk_buff *skb);
skb                34 include/linux/netfilter/nfnetlink.h 	int (*abort)(struct net *net, struct sk_buff *skb, bool autoload);
skb                43 include/linux/netfilter/nfnetlink.h int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 portid,
skb                46 include/linux/netfilter/nfnetlink.h int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid,
skb                18 include/linux/netfilter/nfnetlink_acct.h void nfnl_acct_update(const struct sk_buff *skb, struct nf_acct *nfacct);
skb                29 include/linux/netfilter/nfnetlink_osf.h bool nf_osf_match(const struct sk_buff *skb, u_int8_t family,
skb                34 include/linux/netfilter/nfnetlink_osf.h bool nf_osf_find(const struct sk_buff *skb,
skb               153 include/linux/netfilter/x_tables.h 	bool (*match)(const struct sk_buff *skb,
skb               191 include/linux/netfilter/x_tables.h 	unsigned int (*target)(struct sk_buff *skb,
skb                57 include/linux/netfilter_arp/arp_tables.h extern unsigned int arpt_do_table(struct sk_buff *skb,
skb                17 include/linux/netfilter_bridge.h int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb);
skb                19 include/linux/netfilter_bridge.h static inline void br_drop_fake_rtable(struct sk_buff *skb)
skb                21 include/linux/netfilter_bridge.h 	struct dst_entry *dst = skb_dst(skb);
skb                24 include/linux/netfilter_bridge.h 		skb_dst_drop(skb);
skb                28 include/linux/netfilter_bridge.h nf_bridge_info_get(const struct sk_buff *skb)
skb                30 include/linux/netfilter_bridge.h 	return skb_ext_find(skb, SKB_EXT_BRIDGE_NF);
skb                33 include/linux/netfilter_bridge.h static inline bool nf_bridge_info_exists(const struct sk_buff *skb)
skb                35 include/linux/netfilter_bridge.h 	return skb_ext_exist(skb, SKB_EXT_BRIDGE_NF);
skb                38 include/linux/netfilter_bridge.h static inline int nf_bridge_get_physinif(const struct sk_buff *skb)
skb                40 include/linux/netfilter_bridge.h 	const struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
skb                48 include/linux/netfilter_bridge.h static inline int nf_bridge_get_physoutif(const struct sk_buff *skb)
skb                50 include/linux/netfilter_bridge.h 	const struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
skb                59 include/linux/netfilter_bridge.h nf_bridge_get_physindev(const struct sk_buff *skb)
skb                61 include/linux/netfilter_bridge.h 	const struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
skb                67 include/linux/netfilter_bridge.h nf_bridge_get_physoutdev(const struct sk_buff *skb)
skb                69 include/linux/netfilter_bridge.h 	const struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
skb                74 include/linux/netfilter_bridge.h static inline bool nf_bridge_in_prerouting(const struct sk_buff *skb)
skb                76 include/linux/netfilter_bridge.h 	const struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
skb                81 include/linux/netfilter_bridge.h #define br_drop_fake_rtable(skb)	        do { } while (0)
skb                82 include/linux/netfilter_bridge.h static inline bool nf_bridge_in_prerouting(const struct sk_buff *skb)
skb                23 include/linux/netfilter_bridge/ebtables.h 	bool (*match)(const struct sk_buff *skb, const struct net_device *in,
skb                40 include/linux/netfilter_bridge/ebtables.h 	unsigned int (*target)(struct sk_buff *skb,
skb                58 include/linux/netfilter_bridge/ebtables.h 	unsigned int (*target)(struct sk_buff *skb,
skb               115 include/linux/netfilter_bridge/ebtables.h extern unsigned int ebt_do_table(struct sk_buff *skb,
skb                 9 include/linux/netfilter_ingress.h static inline bool nf_hook_ingress_active(const struct sk_buff *skb)
skb                15 include/linux/netfilter_ingress.h 	return rcu_access_pointer(skb->dev->nf_hooks_ingress);
skb                19 include/linux/netfilter_ingress.h static inline int nf_hook_ingress(struct sk_buff *skb)
skb                21 include/linux/netfilter_ingress.h 	struct nf_hook_entries *e = rcu_dereference(skb->dev->nf_hooks_ingress);
skb                32 include/linux/netfilter_ingress.h 			   NFPROTO_NETDEV, skb->dev, NULL, NULL,
skb                33 include/linux/netfilter_ingress.h 			   dev_net(skb->dev), NULL);
skb                34 include/linux/netfilter_ingress.h 	ret = nf_hook_slow(skb, &state, e, 0);
skb                46 include/linux/netfilter_ingress.h static inline int nf_hook_ingress_active(struct sk_buff *skb)
skb                51 include/linux/netfilter_ingress.h static inline int nf_hook_ingress(struct sk_buff *skb)
skb                19 include/linux/netfilter_ipv4.h int ip_route_me_harder(struct net *net, struct sk_buff *skb, unsigned addr_type);
skb                24 include/linux/netfilter_ipv4.h __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook,
skb                29 include/linux/netfilter_ipv4.h static inline __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook,
skb                65 include/linux/netfilter_ipv4/ip_tables.h extern unsigned int ipt_do_table(struct sk_buff *skb,
skb                45 include/linux/netfilter_ipv6.h 	int (*route_me_harder)(struct net *net, struct sk_buff *skb);
skb                56 include/linux/netfilter_ipv6.h 	void (*route_input)(struct sk_buff *skb);
skb                57 include/linux/netfilter_ipv6.h 	int (*fragment)(struct net *net, struct sock *sk, struct sk_buff *skb,
skb                59 include/linux/netfilter_ipv6.h 	int (*reroute)(struct sk_buff *skb, const struct nf_queue_entry *entry);
skb                61 include/linux/netfilter_ipv6.h 	int (*br_defrag)(struct net *net, struct sk_buff *skb, u32 user);
skb                63 include/linux/netfilter_ipv6.h 			   struct sk_buff *skb,
skb               120 include/linux/netfilter_ipv6.h static inline int nf_ipv6_br_defrag(struct net *net, struct sk_buff *skb,
skb               129 include/linux/netfilter_ipv6.h 	return v6_ops->br_defrag(net, skb, user);
skb               131 include/linux/netfilter_ipv6.h 	return nf_ct_frag6_gather(net, skb, user);
skb               137 include/linux/netfilter_ipv6.h int br_ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
skb               144 include/linux/netfilter_ipv6.h 				     struct sk_buff *skb,
skb               156 include/linux/netfilter_ipv6.h 	return v6_ops->br_fragment(net, sk, skb, data, output);
skb               158 include/linux/netfilter_ipv6.h 	return br_ip6_fragment(net, sk, skb, data, output);
skb               164 include/linux/netfilter_ipv6.h int ip6_route_me_harder(struct net *net, struct sk_buff *skb);
skb               166 include/linux/netfilter_ipv6.h static inline int nf_ip6_route_me_harder(struct net *net, struct sk_buff *skb)
skb               174 include/linux/netfilter_ipv6.h 	return v6_ops->route_me_harder(net, skb);
skb               176 include/linux/netfilter_ipv6.h 	return ip6_route_me_harder(net, skb);
skb               215 include/linux/netfilter_ipv6.h __sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook,
skb                32 include/linux/netfilter_ipv6/ip6_tables.h extern unsigned int ip6t_do_table(struct sk_buff *skb,
skb                14 include/linux/netlink.h static inline struct nlmsghdr *nlmsg_hdr(const struct sk_buff *skb)
skb                16 include/linux/netlink.h 	return (struct nlmsghdr *)skb->data;
skb                33 include/linux/netlink.h #define NETLINK_CB(skb)		(*(struct netlink_skb_parms*)&((skb)->cb))
skb                34 include/linux/netlink.h #define NETLINK_CREDS(skb)	(&NETLINK_CB((skb)).creds)
skb                47 include/linux/netlink.h 	void		(*input)(struct sk_buff *skb);
skb               129 include/linux/netlink.h bool netlink_strict_get_check(struct sk_buff *skb);
skb               131 include/linux/netlink.h int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 portid, int nonblock);
skb               132 include/linux/netlink.h int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 portid,
skb               134 include/linux/netlink.h int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb,
skb               136 include/linux/netlink.h 			       int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data),
skb               144 include/linux/netlink.h int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
skb               146 include/linux/netlink.h void netlink_detachskb(struct sock *sk, struct sk_buff *skb);
skb               147 include/linux/netlink.h int netlink_sendskb(struct sock *sk, struct sk_buff *skb);
skb               150 include/linux/netlink.h netlink_skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
skb               154 include/linux/netlink.h 	nskb = skb_clone(skb, gfp_mask);
skb               159 include/linux/netlink.h 	if (is_vmalloc_addr(skb->head))
skb               160 include/linux/netlink.h 		nskb->destructor = skb->destructor;
skb               181 include/linux/netlink.h 	struct sk_buff		*skb;
skb               183 include/linux/netlink.h 	int			(*dump)(struct sk_buff * skb,
skb               212 include/linux/netlink.h __nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, int type, int len, int flags);
skb               216 include/linux/netlink.h 	int (*dump)(struct sk_buff *skb, struct netlink_callback *);
skb               223 include/linux/netlink.h int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
skb               226 include/linux/netlink.h static inline int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
skb               233 include/linux/netlink.h 	return __netlink_dump_start(ssk, skb, nlh, control);
skb               247 include/linux/netlink.h bool netlink_ns_capable(const struct sk_buff *skb,
skb               249 include/linux/netlink.h bool netlink_capable(const struct sk_buff *skb, int cap);
skb               250 include/linux/netlink.h bool netlink_net_capable(const struct sk_buff *skb, int cap);
skb                66 include/linux/netpoll.h void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
skb                68 include/linux/netpoll.h static inline void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
skb                72 include/linux/netpoll.h 	netpoll_send_skb_on_dev(np, skb, np->dev);
skb               558 include/linux/phy.h 	bool (*rxtstamp)(struct phy_device *dev, struct sk_buff *skb, int type);
skb               566 include/linux/phy.h 	void (*txtstamp)(struct phy_device *dev, struct sk_buff *skb, int type);
skb                69 include/linux/pim.h int pim_rcv_v1(struct sk_buff *skb);
skb                76 include/linux/pim.h static inline struct pimhdr *pim_hdr(const struct sk_buff *skb)
skb                78 include/linux/pim.h 	return (struct pimhdr *)skb_transport_header(skb);
skb                58 include/linux/ptp_classify.h unsigned int ptp_classify_raw(const struct sk_buff *skb);
skb                65 include/linux/ptp_classify.h static inline unsigned int ptp_classify_raw(struct sk_buff *skb)
skb               258 include/linux/qed/qed_ll2_if.h 	int (*start_xmit)(struct qed_dev *cdev, struct sk_buff *skb,
skb                12 include/linux/rtnetlink.h extern int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, u32 group, int echo);
skb                13 include/linux/rtnetlink.h extern int rtnl_unicast(struct sk_buff *skb, struct net *net, u32 pid);
skb                14 include/linux/rtnetlink.h extern void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid,
skb                17 include/linux/rtnetlink.h extern int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics);
skb                18 include/linux/rtnetlink.h extern int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst,
skb                28 include/linux/rtnetlink.h void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev,
skb               113 include/linux/rtnetlink.h extern int ndo_dflt_fdb_dump(struct sk_buff *skb,
skb               130 include/linux/rtnetlink.h extern int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
skb               134 include/linux/rtnetlink.h 				   int (*vlan_fill)(struct sk_buff *skb,
skb                52 include/linux/sctp.h static inline struct sctphdr *sctp_hdr(const struct sk_buff *skb)
skb                54 include/linux/sctp.h 	return (struct sctphdr *)skb_transport_header(skb);
skb               438 include/linux/security.h int security_netlink_send(struct sock *sk, struct sk_buff *skb);
skb              1229 include/linux/security.h static inline int security_netlink_send(struct sock *sk, struct sk_buff *skb)
skb              1297 include/linux/security.h int security_sock_rcv_skb(struct sock *sk, struct sk_buff *skb);
skb              1300 include/linux/security.h int security_socket_getpeersec_dgram(struct socket *sock, struct sk_buff *skb, u32 *secid);
skb              1308 include/linux/security.h 			struct sk_buff *skb, struct request_sock *req);
skb              1312 include/linux/security.h 			struct sk_buff *skb);
skb              1322 include/linux/security.h int security_sctp_assoc_request(struct sctp_endpoint *ep, struct sk_buff *skb);
skb              1427 include/linux/security.h 					struct sk_buff *skb)
skb              1438 include/linux/security.h static inline int security_socket_getpeersec_dgram(struct socket *sock, struct sk_buff *skb, u32 *secid)
skb              1469 include/linux/security.h 			struct sk_buff *skb, struct request_sock *req)
skb              1480 include/linux/security.h 			struct sk_buff *skb)
skb              1527 include/linux/security.h 					      struct sk_buff *skb)
skb              1588 include/linux/security.h int security_xfrm_decode_session(struct sk_buff *skb, u32 *secid);
skb              1589 include/linux/security.h void security_skb_classify_flow(struct sk_buff *skb, struct flowi *fl);
skb              1646 include/linux/security.h static inline int security_xfrm_decode_session(struct sk_buff *skb, u32 *secid)
skb              1651 include/linux/security.h static inline void security_skb_classify_flow(struct sk_buff *skb, struct flowi *fl)
skb                42 include/linux/skb_array.h static inline int skb_array_produce(struct skb_array *a, struct sk_buff *skb)
skb                44 include/linux/skb_array.h 	return ptr_ring_produce(&a->ring, skb);
skb                47 include/linux/skb_array.h static inline int skb_array_produce_irq(struct skb_array *a, struct sk_buff *skb)
skb                49 include/linux/skb_array.h 	return ptr_ring_produce_irq(&a->ring, skb);
skb                52 include/linux/skb_array.h static inline int skb_array_produce_bh(struct skb_array *a, struct sk_buff *skb)
skb                54 include/linux/skb_array.h 	return ptr_ring_produce_bh(&a->ring, skb);
skb                57 include/linux/skb_array.h static inline int skb_array_produce_any(struct skb_array *a, struct sk_buff *skb)
skb                59 include/linux/skb_array.h 	return ptr_ring_produce_any(&a->ring, skb);
skb               146 include/linux/skb_array.h static inline int __skb_array_len_with_tag(struct sk_buff *skb)
skb               148 include/linux/skb_array.h 	if (likely(skb)) {
skb               149 include/linux/skb_array.h 		int len = skb->len;
skb               151 include/linux/skb_array.h 		if (skb_vlan_tag_present(skb))
skb               502 include/linux/skbuff.h int skb_zerocopy_iter_dgram(struct sk_buff *skb, struct msghdr *msg, int len);
skb               503 include/linux/skbuff.h int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
skb               725 include/linux/skbuff.h 			void		(*destructor)(struct sk_buff *skb);
skb               904 include/linux/skbuff.h static inline bool skb_pfmemalloc(const struct sk_buff *skb)
skb               906 include/linux/skbuff.h 	return unlikely(skb->pfmemalloc);
skb               922 include/linux/skbuff.h static inline struct dst_entry *skb_dst(const struct sk_buff *skb)
skb               927 include/linux/skbuff.h 	WARN_ON((skb->_skb_refdst & SKB_DST_NOREF) &&
skb               930 include/linux/skbuff.h 	return (struct dst_entry *)(skb->_skb_refdst & SKB_DST_PTRMASK);
skb               941 include/linux/skbuff.h static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
skb               943 include/linux/skbuff.h 	skb->_skb_refdst = (unsigned long)dst;
skb               956 include/linux/skbuff.h static inline void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst)
skb               959 include/linux/skbuff.h 	skb->_skb_refdst = (unsigned long)dst | SKB_DST_NOREF;
skb               966 include/linux/skbuff.h static inline bool skb_dst_is_noref(const struct sk_buff *skb)
skb               968 include/linux/skbuff.h 	return (skb->_skb_refdst & SKB_DST_NOREF) && skb_dst(skb);
skb               975 include/linux/skbuff.h static inline struct rtable *skb_rtable(const struct sk_buff *skb)
skb               977 include/linux/skbuff.h 	return (struct rtable *)skb_dst(skb);
skb               993 include/linux/skbuff.h static inline unsigned int skb_napi_id(const struct sk_buff *skb)
skb               996 include/linux/skbuff.h 	return skb->napi_id;
skb              1008 include/linux/skbuff.h static inline bool skb_unref(struct sk_buff *skb)
skb              1010 include/linux/skbuff.h 	if (unlikely(!skb))
skb              1012 include/linux/skbuff.h 	if (likely(refcount_read(&skb->users) == 1))
skb              1014 include/linux/skbuff.h 	else if (likely(!refcount_dec_and_test(&skb->users)))
skb              1020 include/linux/skbuff.h void skb_release_head_state(struct sk_buff *skb);
skb              1021 include/linux/skbuff.h void kfree_skb(struct sk_buff *skb);
skb              1023 include/linux/skbuff.h void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt);
skb              1024 include/linux/skbuff.h void skb_tx_error(struct sk_buff *skb);
skb              1025 include/linux/skbuff.h void consume_skb(struct sk_buff *skb);
skb              1026 include/linux/skbuff.h void __consume_stateless_skb(struct sk_buff *skb);
skb              1027 include/linux/skbuff.h void  __kfree_skb(struct sk_buff *skb);
skb              1030 include/linux/skbuff.h void kfree_skb_partial(struct sk_buff *skb, bool head_stolen);
skb              1038 include/linux/skbuff.h struct sk_buff *build_skb_around(struct sk_buff *skb,
skb              1080 include/linux/skbuff.h 				   const struct sk_buff *skb)
skb              1084 include/linux/skbuff.h 	fclones = container_of(skb, struct sk_buff_fclones, skb1);
skb              1086 include/linux/skbuff.h 	return skb->fclone == SKB_FCLONE_ORIG &&
skb              1105 include/linux/skbuff.h void skb_headers_offset_update(struct sk_buff *skb, int off);
skb              1106 include/linux/skbuff.h int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
skb              1107 include/linux/skbuff.h struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority);
skb              1109 include/linux/skbuff.h struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t priority);
skb              1110 include/linux/skbuff.h struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom,
skb              1112 include/linux/skbuff.h static inline struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom,
skb              1115 include/linux/skbuff.h 	return __pskb_copy_fclone(skb, headroom, gfp_mask, false);
skb              1118 include/linux/skbuff.h int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, gfp_t gfp_mask);
skb              1119 include/linux/skbuff.h struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
skb              1121 include/linux/skbuff.h struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom,
skb              1123 include/linux/skbuff.h int __must_check skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
skb              1125 include/linux/skbuff.h int __must_check skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg,
skb              1127 include/linux/skbuff.h int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer);
skb              1128 include/linux/skbuff.h int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error);
skb              1141 include/linux/skbuff.h static inline int skb_pad(struct sk_buff *skb, int pad)
skb              1143 include/linux/skbuff.h 	return __skb_pad(skb, pad, true);
skb              1147 include/linux/skbuff.h int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
skb              1160 include/linux/skbuff.h void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
skb              1166 include/linux/skbuff.h unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
skb              1202 include/linux/skbuff.h static inline void skb_clear_hash(struct sk_buff *skb)
skb              1204 include/linux/skbuff.h 	skb->hash = 0;
skb              1205 include/linux/skbuff.h 	skb->sw_hash = 0;
skb              1206 include/linux/skbuff.h 	skb->l4_hash = 0;
skb              1209 include/linux/skbuff.h static inline void skb_clear_hash_if_not_l4(struct sk_buff *skb)
skb              1211 include/linux/skbuff.h 	if (!skb->l4_hash)
skb              1212 include/linux/skbuff.h 		skb_clear_hash(skb);
skb              1216 include/linux/skbuff.h __skb_set_hash(struct sk_buff *skb, __u32 hash, bool is_sw, bool is_l4)
skb              1218 include/linux/skbuff.h 	skb->l4_hash = is_l4;
skb              1219 include/linux/skbuff.h 	skb->sw_hash = is_sw;
skb              1220 include/linux/skbuff.h 	skb->hash = hash;
skb              1224 include/linux/skbuff.h skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type)
skb              1227 include/linux/skbuff.h 	__skb_set_hash(skb, hash, false, type == PKT_HASH_TYPE_L4);
skb              1231 include/linux/skbuff.h __skb_set_sw_hash(struct sk_buff *skb, __u32 hash, bool is_l4)
skb              1233 include/linux/skbuff.h 	__skb_set_hash(skb, hash, true, is_l4);
skb              1236 include/linux/skbuff.h void __skb_get_hash(struct sk_buff *skb);
skb              1237 include/linux/skbuff.h u32 __skb_get_hash_symmetric(const struct sk_buff *skb);
skb              1238 include/linux/skbuff.h u32 skb_get_poff(const struct sk_buff *skb);
skb              1239 include/linux/skbuff.h u32 __skb_get_poff(const struct sk_buff *skb, void *data,
skb              1241 include/linux/skbuff.h __be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
skb              1244 include/linux/skbuff.h static inline __be32 skb_flow_get_ports(const struct sk_buff *skb,
skb              1247 include/linux/skbuff.h 	return __skb_flow_get_ports(skb, thoff, ip_proto, NULL, 0);
skb              1285 include/linux/skbuff.h 			const struct sk_buff *skb,
skb              1291 include/linux/skbuff.h static inline bool skb_flow_dissect(const struct sk_buff *skb,
skb              1295 include/linux/skbuff.h 	return __skb_flow_dissect(NULL, skb, flow_dissector,
skb              1299 include/linux/skbuff.h static inline bool skb_flow_dissect_flow_keys(const struct sk_buff *skb,
skb              1304 include/linux/skbuff.h 	return __skb_flow_dissect(NULL, skb, &flow_keys_dissector,
skb              1310 include/linux/skbuff.h 				 const struct sk_buff *skb,
skb              1316 include/linux/skbuff.h 	return __skb_flow_dissect(net, skb, &flow_keys_basic_dissector, flow,
skb              1320 include/linux/skbuff.h void skb_flow_dissect_meta(const struct sk_buff *skb,
skb              1329 include/linux/skbuff.h skb_flow_dissect_ct(const struct sk_buff *skb,
skb              1335 include/linux/skbuff.h skb_flow_dissect_tunnel_info(const struct sk_buff *skb,
skb              1339 include/linux/skbuff.h static inline __u32 skb_get_hash(struct sk_buff *skb)
skb              1341 include/linux/skbuff.h 	if (!skb->l4_hash && !skb->sw_hash)
skb              1342 include/linux/skbuff.h 		__skb_get_hash(skb);
skb              1344 include/linux/skbuff.h 	return skb->hash;
skb              1347 include/linux/skbuff.h static inline __u32 skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6 *fl6)
skb              1349 include/linux/skbuff.h 	if (!skb->l4_hash && !skb->sw_hash) {
skb              1353 include/linux/skbuff.h 		__skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
skb              1356 include/linux/skbuff.h 	return skb->hash;
skb              1359 include/linux/skbuff.h __u32 skb_get_hash_perturb(const struct sk_buff *skb,
skb              1362 include/linux/skbuff.h static inline __u32 skb_get_hash_raw(const struct sk_buff *skb)
skb              1364 include/linux/skbuff.h 	return skb->hash;
skb              1383 include/linux/skbuff.h static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
skb              1385 include/linux/skbuff.h 	return skb->head + skb->end;
skb              1388 include/linux/skbuff.h static inline unsigned int skb_end_offset(const struct sk_buff *skb)
skb              1390 include/linux/skbuff.h 	return skb->end;
skb              1393 include/linux/skbuff.h static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
skb              1395 include/linux/skbuff.h 	return skb->end;
skb              1398 include/linux/skbuff.h static inline unsigned int skb_end_offset(const struct sk_buff *skb)
skb              1400 include/linux/skbuff.h 	return skb->end - skb->head;
skb              1407 include/linux/skbuff.h static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
skb              1409 include/linux/skbuff.h 	return &skb_shinfo(skb)->hwtstamps;
skb              1412 include/linux/skbuff.h static inline struct ubuf_info *skb_zcopy(struct sk_buff *skb)
skb              1414 include/linux/skbuff.h 	bool is_zcopy = skb && skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY;
skb              1416 include/linux/skbuff.h 	return is_zcopy ? skb_uarg(skb) : NULL;
skb              1419 include/linux/skbuff.h static inline void skb_zcopy_set(struct sk_buff *skb, struct ubuf_info *uarg,
skb              1422 include/linux/skbuff.h 	if (skb && uarg && !skb_zcopy(skb)) {
skb              1427 include/linux/skbuff.h 		skb_shinfo(skb)->destructor_arg = uarg;
skb              1428 include/linux/skbuff.h 		skb_shinfo(skb)->tx_flags |= SKBTX_ZEROCOPY_FRAG;
skb              1432 include/linux/skbuff.h static inline void skb_zcopy_set_nouarg(struct sk_buff *skb, void *val)
skb              1434 include/linux/skbuff.h 	skb_shinfo(skb)->destructor_arg = (void *)((uintptr_t) val | 0x1UL);
skb              1435 include/linux/skbuff.h 	skb_shinfo(skb)->tx_flags |= SKBTX_ZEROCOPY_FRAG;
skb              1438 include/linux/skbuff.h static inline bool skb_zcopy_is_nouarg(struct sk_buff *skb)
skb              1440 include/linux/skbuff.h 	return (uintptr_t) skb_shinfo(skb)->destructor_arg & 0x1UL;
skb              1443 include/linux/skbuff.h static inline void *skb_zcopy_get_nouarg(struct sk_buff *skb)
skb              1445 include/linux/skbuff.h 	return (void *)((uintptr_t) skb_shinfo(skb)->destructor_arg & ~0x1UL);
skb              1449 include/linux/skbuff.h static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy)
skb              1451 include/linux/skbuff.h 	struct ubuf_info *uarg = skb_zcopy(skb);
skb              1454 include/linux/skbuff.h 		if (skb_zcopy_is_nouarg(skb)) {
skb              1463 include/linux/skbuff.h 		skb_shinfo(skb)->tx_flags &= ~SKBTX_ZEROCOPY_FRAG;
skb              1468 include/linux/skbuff.h static inline void skb_zcopy_abort(struct sk_buff *skb)
skb              1470 include/linux/skbuff.h 	struct ubuf_info *uarg = skb_zcopy(skb);
skb              1474 include/linux/skbuff.h 		skb_shinfo(skb)->tx_flags &= ~SKBTX_ZEROCOPY_FRAG;
skb              1478 include/linux/skbuff.h static inline void skb_mark_not_on_list(struct sk_buff *skb)
skb              1480 include/linux/skbuff.h 	skb->next = NULL;
skb              1483 include/linux/skbuff.h static inline void skb_list_del_init(struct sk_buff *skb)
skb              1485 include/linux/skbuff.h 	__list_del_entry(&skb->list);
skb              1486 include/linux/skbuff.h 	skb_mark_not_on_list(skb);
skb              1521 include/linux/skbuff.h 				     const struct sk_buff *skb)
skb              1523 include/linux/skbuff.h 	return skb->next == (const struct sk_buff *) list;
skb              1534 include/linux/skbuff.h 				      const struct sk_buff *skb)
skb              1536 include/linux/skbuff.h 	return skb->prev == (const struct sk_buff *) list;
skb              1548 include/linux/skbuff.h 					     const struct sk_buff *skb)
skb              1553 include/linux/skbuff.h 	BUG_ON(skb_queue_is_last(list, skb));
skb              1554 include/linux/skbuff.h 	return skb->next;
skb              1566 include/linux/skbuff.h 					     const struct sk_buff *skb)
skb              1571 include/linux/skbuff.h 	BUG_ON(skb_queue_is_first(list, skb));
skb              1572 include/linux/skbuff.h 	return skb->prev;
skb              1582 include/linux/skbuff.h static inline struct sk_buff *skb_get(struct sk_buff *skb)
skb              1584 include/linux/skbuff.h 	refcount_inc(&skb->users);
skb              1585 include/linux/skbuff.h 	return skb;
skb              1600 include/linux/skbuff.h static inline int skb_cloned(const struct sk_buff *skb)
skb              1602 include/linux/skbuff.h 	return skb->cloned &&
skb              1603 include/linux/skbuff.h 	       (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1;
skb              1606 include/linux/skbuff.h static inline int skb_unclone(struct sk_buff *skb, gfp_t pri)
skb              1610 include/linux/skbuff.h 	if (skb_cloned(skb))
skb              1611 include/linux/skbuff.h 		return pskb_expand_head(skb, 0, 0, pri);
skb              1623 include/linux/skbuff.h static inline int skb_header_cloned(const struct sk_buff *skb)
skb              1627 include/linux/skbuff.h 	if (!skb->cloned)
skb              1630 include/linux/skbuff.h 	dataref = atomic_read(&skb_shinfo(skb)->dataref);
skb              1635 include/linux/skbuff.h static inline int skb_header_unclone(struct sk_buff *skb, gfp_t pri)
skb              1639 include/linux/skbuff.h 	if (skb_header_cloned(skb))
skb              1640 include/linux/skbuff.h 		return pskb_expand_head(skb, 0, 0, pri);
skb              1649 include/linux/skbuff.h static inline void __skb_header_release(struct sk_buff *skb)
skb              1651 include/linux/skbuff.h 	skb->nohdr = 1;
skb              1652 include/linux/skbuff.h 	atomic_set(&skb_shinfo(skb)->dataref, 1 + (1 << SKB_DATAREF_SHIFT));
skb              1663 include/linux/skbuff.h static inline int skb_shared(const struct sk_buff *skb)
skb              1665 include/linux/skbuff.h 	return refcount_read(&skb->users) != 1;
skb              1681 include/linux/skbuff.h static inline struct sk_buff *skb_share_check(struct sk_buff *skb, gfp_t pri)
skb              1684 include/linux/skbuff.h 	if (skb_shared(skb)) {
skb              1685 include/linux/skbuff.h 		struct sk_buff *nskb = skb_clone(skb, pri);
skb              1688 include/linux/skbuff.h 			consume_skb(skb);
skb              1690 include/linux/skbuff.h 			kfree_skb(skb);
skb              1691 include/linux/skbuff.h 		skb = nskb;
skb              1693 include/linux/skbuff.h 	return skb;
skb              1716 include/linux/skbuff.h static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
skb              1720 include/linux/skbuff.h 	if (skb_cloned(skb)) {
skb              1721 include/linux/skbuff.h 		struct sk_buff *nskb = skb_copy(skb, pri);
skb              1725 include/linux/skbuff.h 			consume_skb(skb);
skb              1727 include/linux/skbuff.h 			kfree_skb(skb);
skb              1728 include/linux/skbuff.h 		skb = nskb;
skb              1730 include/linux/skbuff.h 	return skb;
skb              1748 include/linux/skbuff.h 	struct sk_buff *skb = list_->next;
skb              1750 include/linux/skbuff.h 	if (skb == (struct sk_buff *)list_)
skb              1751 include/linux/skbuff.h 		skb = NULL;
skb              1752 include/linux/skbuff.h 	return skb;
skb              1775 include/linux/skbuff.h static inline struct sk_buff *skb_peek_next(struct sk_buff *skb,
skb              1778 include/linux/skbuff.h 	struct sk_buff *next = skb->next;
skb              1800 include/linux/skbuff.h 	struct sk_buff *skb = READ_ONCE(list_->prev);
skb              1802 include/linux/skbuff.h 	if (skb == (struct sk_buff *)list_)
skb              1803 include/linux/skbuff.h 		skb = NULL;
skb              1804 include/linux/skbuff.h 	return skb;
skb              2019 include/linux/skbuff.h void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
skb              2020 include/linux/skbuff.h static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
skb              2025 include/linux/skbuff.h 	next	   = skb->next;
skb              2026 include/linux/skbuff.h 	prev	   = skb->prev;
skb              2027 include/linux/skbuff.h 	skb->next  = skb->prev = NULL;
skb              2042 include/linux/skbuff.h 	struct sk_buff *skb = skb_peek(list);
skb              2043 include/linux/skbuff.h 	if (skb)
skb              2044 include/linux/skbuff.h 		__skb_unlink(skb, list);
skb              2045 include/linux/skbuff.h 	return skb;
skb              2059 include/linux/skbuff.h 	struct sk_buff *skb = skb_peek_tail(list);
skb              2060 include/linux/skbuff.h 	if (skb)
skb              2061 include/linux/skbuff.h 		__skb_unlink(skb, list);
skb              2062 include/linux/skbuff.h 	return skb;
skb              2067 include/linux/skbuff.h static inline bool skb_is_nonlinear(const struct sk_buff *skb)
skb              2069 include/linux/skbuff.h 	return skb->data_len;
skb              2072 include/linux/skbuff.h static inline unsigned int skb_headlen(const struct sk_buff *skb)
skb              2074 include/linux/skbuff.h 	return skb->len - skb->data_len;
skb              2077 include/linux/skbuff.h static inline unsigned int __skb_pagelen(const struct sk_buff *skb)
skb              2081 include/linux/skbuff.h 	for (i = skb_shinfo(skb)->nr_frags - 1; (int)i >= 0; i--)
skb              2082 include/linux/skbuff.h 		len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
skb              2086 include/linux/skbuff.h static inline unsigned int skb_pagelen(const struct sk_buff *skb)
skb              2088 include/linux/skbuff.h 	return skb_headlen(skb) + __skb_pagelen(skb);
skb              2104 include/linux/skbuff.h static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
skb              2107 include/linux/skbuff.h 	skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
skb              2120 include/linux/skbuff.h 		skb->pfmemalloc	= true;
skb              2137 include/linux/skbuff.h static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
skb              2140 include/linux/skbuff.h 	__skb_fill_page_desc(skb, i, page, off, size);
skb              2141 include/linux/skbuff.h 	skb_shinfo(skb)->nr_frags = i + 1;
skb              2144 include/linux/skbuff.h void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
skb              2147 include/linux/skbuff.h void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
skb              2150 include/linux/skbuff.h #define SKB_LINEAR_ASSERT(skb)  BUG_ON(skb_is_nonlinear(skb))
skb              2153 include/linux/skbuff.h static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
skb              2155 include/linux/skbuff.h 	return skb->head + skb->tail;
skb              2158 include/linux/skbuff.h static inline void skb_reset_tail_pointer(struct sk_buff *skb)
skb              2160 include/linux/skbuff.h 	skb->tail = skb->data - skb->head;
skb              2163 include/linux/skbuff.h static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
skb              2165 include/linux/skbuff.h 	skb_reset_tail_pointer(skb);
skb              2166 include/linux/skbuff.h 	skb->tail += offset;
skb              2170 include/linux/skbuff.h static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
skb              2172 include/linux/skbuff.h 	return skb->tail;
skb              2175 include/linux/skbuff.h static inline void skb_reset_tail_pointer(struct sk_buff *skb)
skb              2177 include/linux/skbuff.h 	skb->tail = skb->data;
skb              2180 include/linux/skbuff.h static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
skb              2182 include/linux/skbuff.h 	skb->tail = skb->data + offset;
skb              2190 include/linux/skbuff.h void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len);
skb              2191 include/linux/skbuff.h void *skb_put(struct sk_buff *skb, unsigned int len);
skb              2192 include/linux/skbuff.h static inline void *__skb_put(struct sk_buff *skb, unsigned int len)
skb              2194 include/linux/skbuff.h 	void *tmp = skb_tail_pointer(skb);
skb              2195 include/linux/skbuff.h 	SKB_LINEAR_ASSERT(skb);
skb              2196 include/linux/skbuff.h 	skb->tail += len;
skb              2197 include/linux/skbuff.h 	skb->len  += len;
skb              2201 include/linux/skbuff.h static inline void *__skb_put_zero(struct sk_buff *skb, unsigned int len)
skb              2203 include/linux/skbuff.h 	void *tmp = __skb_put(skb, len);
skb              2209 include/linux/skbuff.h static inline void *__skb_put_data(struct sk_buff *skb, const void *data,
skb              2212 include/linux/skbuff.h 	void *tmp = __skb_put(skb, len);
skb              2218 include/linux/skbuff.h static inline void __skb_put_u8(struct sk_buff *skb, u8 val)
skb              2220 include/linux/skbuff.h 	*(u8 *)__skb_put(skb, 1) = val;
skb              2223 include/linux/skbuff.h static inline void *skb_put_zero(struct sk_buff *skb, unsigned int len)
skb              2225 include/linux/skbuff.h 	void *tmp = skb_put(skb, len);
skb              2232 include/linux/skbuff.h static inline void *skb_put_data(struct sk_buff *skb, const void *data,
skb              2235 include/linux/skbuff.h 	void *tmp = skb_put(skb, len);
skb              2242 include/linux/skbuff.h static inline void skb_put_u8(struct sk_buff *skb, u8 val)
skb              2244 include/linux/skbuff.h 	*(u8 *)skb_put(skb, 1) = val;
skb              2247 include/linux/skbuff.h void *skb_push(struct sk_buff *skb, unsigned int len);
skb              2248 include/linux/skbuff.h static inline void *__skb_push(struct sk_buff *skb, unsigned int len)
skb              2250 include/linux/skbuff.h 	skb->data -= len;
skb              2251 include/linux/skbuff.h 	skb->len  += len;
skb              2252 include/linux/skbuff.h 	return skb->data;
skb              2255 include/linux/skbuff.h void *skb_pull(struct sk_buff *skb, unsigned int len);
skb              2256 include/linux/skbuff.h static inline void *__skb_pull(struct sk_buff *skb, unsigned int len)
skb              2258 include/linux/skbuff.h 	skb->len -= len;
skb              2259 include/linux/skbuff.h 	BUG_ON(skb->len < skb->data_len);
skb              2260 include/linux/skbuff.h 	return skb->data += len;
skb              2263 include/linux/skbuff.h static inline void *skb_pull_inline(struct sk_buff *skb, unsigned int len)
skb              2265 include/linux/skbuff.h 	return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
skb              2268 include/linux/skbuff.h void *__pskb_pull_tail(struct sk_buff *skb, int delta);
skb              2270 include/linux/skbuff.h static inline void *__pskb_pull(struct sk_buff *skb, unsigned int len)
skb              2272 include/linux/skbuff.h 	if (len > skb_headlen(skb) &&
skb              2273 include/linux/skbuff.h 	    !__pskb_pull_tail(skb, len - skb_headlen(skb)))
skb              2275 include/linux/skbuff.h 	skb->len -= len;
skb              2276 include/linux/skbuff.h 	return skb->data += len;
skb              2279 include/linux/skbuff.h static inline void *pskb_pull(struct sk_buff *skb, unsigned int len)
skb              2281 include/linux/skbuff.h 	return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len);
skb              2284 include/linux/skbuff.h static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
skb              2286 include/linux/skbuff.h 	if (likely(len <= skb_headlen(skb)))
skb              2288 include/linux/skbuff.h 	if (unlikely(len > skb->len))
skb              2290 include/linux/skbuff.h 	return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL;
skb              2293 include/linux/skbuff.h void skb_condense(struct sk_buff *skb);
skb              2301 include/linux/skbuff.h static inline unsigned int skb_headroom(const struct sk_buff *skb)
skb              2303 include/linux/skbuff.h 	return skb->data - skb->head;
skb              2312 include/linux/skbuff.h static inline int skb_tailroom(const struct sk_buff *skb)
skb              2314 include/linux/skbuff.h 	return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail;
skb              2324 include/linux/skbuff.h static inline int skb_availroom(const struct sk_buff *skb)
skb              2326 include/linux/skbuff.h 	if (skb_is_nonlinear(skb))
skb              2329 include/linux/skbuff.h 	return skb->end - skb->tail - skb->reserved_tailroom;
skb              2340 include/linux/skbuff.h static inline void skb_reserve(struct sk_buff *skb, int len)
skb              2342 include/linux/skbuff.h 	skb->data += len;
skb              2343 include/linux/skbuff.h 	skb->tail += len;
skb              2358 include/linux/skbuff.h static inline void skb_tailroom_reserve(struct sk_buff *skb, unsigned int mtu,
skb              2361 include/linux/skbuff.h 	SKB_LINEAR_ASSERT(skb);
skb              2362 include/linux/skbuff.h 	if (mtu < skb_tailroom(skb) - needed_tailroom)
skb              2364 include/linux/skbuff.h 		skb->reserved_tailroom = skb_tailroom(skb) - mtu;
skb              2367 include/linux/skbuff.h 		skb->reserved_tailroom = needed_tailroom;
skb              2373 include/linux/skbuff.h static inline void skb_set_inner_protocol(struct sk_buff *skb,
skb              2376 include/linux/skbuff.h 	skb->inner_protocol = protocol;
skb              2377 include/linux/skbuff.h 	skb->inner_protocol_type = ENCAP_TYPE_ETHER;
skb              2380 include/linux/skbuff.h static inline void skb_set_inner_ipproto(struct sk_buff *skb,
skb              2383 include/linux/skbuff.h 	skb->inner_ipproto = ipproto;
skb              2384 include/linux/skbuff.h 	skb->inner_protocol_type = ENCAP_TYPE_IPPROTO;
skb              2387 include/linux/skbuff.h static inline void skb_reset_inner_headers(struct sk_buff *skb)
skb              2389 include/linux/skbuff.h 	skb->inner_mac_header = skb->mac_header;
skb              2390 include/linux/skbuff.h 	skb->inner_network_header = skb->network_header;
skb              2391 include/linux/skbuff.h 	skb->inner_transport_header = skb->transport_header;
skb              2394 include/linux/skbuff.h static inline void skb_reset_mac_len(struct sk_buff *skb)
skb              2396 include/linux/skbuff.h 	skb->mac_len = skb->network_header - skb->mac_header;
skb              2400 include/linux/skbuff.h 							*skb)
skb              2402 include/linux/skbuff.h 	return skb->head + skb->inner_transport_header;
skb              2405 include/linux/skbuff.h static inline int skb_inner_transport_offset(const struct sk_buff *skb)
skb              2407 include/linux/skbuff.h 	return skb_inner_transport_header(skb) - skb->data;
skb              2410 include/linux/skbuff.h static inline void skb_reset_inner_transport_header(struct sk_buff *skb)
skb              2412 include/linux/skbuff.h 	skb->inner_transport_header = skb->data - skb->head;
skb              2415 include/linux/skbuff.h static inline void skb_set_inner_transport_header(struct sk_buff *skb,
skb              2418 include/linux/skbuff.h 	skb_reset_inner_transport_header(skb);
skb              2419 include/linux/skbuff.h 	skb->inner_transport_header += offset;
skb              2422 include/linux/skbuff.h static inline unsigned char *skb_inner_network_header(const struct sk_buff *skb)
skb              2424 include/linux/skbuff.h 	return skb->head + skb->inner_network_header;
skb              2427 include/linux/skbuff.h static inline void skb_reset_inner_network_header(struct sk_buff *skb)
skb              2429 include/linux/skbuff.h 	skb->inner_network_header = skb->data - skb->head;
skb              2432 include/linux/skbuff.h static inline void skb_set_inner_network_header(struct sk_buff *skb,
skb              2435 include/linux/skbuff.h 	skb_reset_inner_network_header(skb);
skb              2436 include/linux/skbuff.h 	skb->inner_network_header += offset;
skb              2439 include/linux/skbuff.h static inline unsigned char *skb_inner_mac_header(const struct sk_buff *skb)
skb              2441 include/linux/skbuff.h 	return skb->head + skb->inner_mac_header;
skb              2444 include/linux/skbuff.h static inline void skb_reset_inner_mac_header(struct sk_buff *skb)
skb              2446 include/linux/skbuff.h 	skb->inner_mac_header = skb->data - skb->head;
skb              2449 include/linux/skbuff.h static inline void skb_set_inner_mac_header(struct sk_buff *skb,
skb              2452 include/linux/skbuff.h 	skb_reset_inner_mac_header(skb);
skb              2453 include/linux/skbuff.h 	skb->inner_mac_header += offset;
skb              2455 include/linux/skbuff.h static inline bool skb_transport_header_was_set(const struct sk_buff *skb)
skb              2457 include/linux/skbuff.h 	return skb->transport_header != (typeof(skb->transport_header))~0U;
skb              2460 include/linux/skbuff.h static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
skb              2462 include/linux/skbuff.h 	return skb->head + skb->transport_header;
skb              2465 include/linux/skbuff.h static inline void skb_reset_transport_header(struct sk_buff *skb)
skb              2467 include/linux/skbuff.h 	skb->transport_header = skb->data - skb->head;
skb              2470 include/linux/skbuff.h static inline void skb_set_transport_header(struct sk_buff *skb,
skb              2473 include/linux/skbuff.h 	skb_reset_transport_header(skb);
skb              2474 include/linux/skbuff.h 	skb->transport_header += offset;
skb              2477 include/linux/skbuff.h static inline unsigned char *skb_network_header(const struct sk_buff *skb)
skb              2479 include/linux/skbuff.h 	return skb->head + skb->network_header;
skb              2482 include/linux/skbuff.h static inline void skb_reset_network_header(struct sk_buff *skb)
skb              2484 include/linux/skbuff.h 	skb->network_header = skb->data - skb->head;
skb              2487 include/linux/skbuff.h static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
skb              2489 include/linux/skbuff.h 	skb_reset_network_header(skb);
skb              2490 include/linux/skbuff.h 	skb->network_header += offset;
skb              2493 include/linux/skbuff.h static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
skb              2495 include/linux/skbuff.h 	return skb->head + skb->mac_header;
skb              2498 include/linux/skbuff.h static inline int skb_mac_offset(const struct sk_buff *skb)
skb              2500 include/linux/skbuff.h 	return skb_mac_header(skb) - skb->data;
skb              2503 include/linux/skbuff.h static inline u32 skb_mac_header_len(const struct sk_buff *skb)
skb              2505 include/linux/skbuff.h 	return skb->network_header - skb->mac_header;
skb              2508 include/linux/skbuff.h static inline int skb_mac_header_was_set(const struct sk_buff *skb)
skb              2510 include/linux/skbuff.h 	return skb->mac_header != (typeof(skb->mac_header))~0U;
skb              2513 include/linux/skbuff.h static inline void skb_reset_mac_header(struct sk_buff *skb)
skb              2515 include/linux/skbuff.h 	skb->mac_header = skb->data - skb->head;
skb              2518 include/linux/skbuff.h static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
skb              2520 include/linux/skbuff.h 	skb_reset_mac_header(skb);
skb              2521 include/linux/skbuff.h 	skb->mac_header += offset;
skb              2524 include/linux/skbuff.h static inline void skb_pop_mac_header(struct sk_buff *skb)
skb              2526 include/linux/skbuff.h 	skb->mac_header = skb->network_header;
skb              2529 include/linux/skbuff.h static inline void skb_probe_transport_header(struct sk_buff *skb)
skb              2533 include/linux/skbuff.h 	if (skb_transport_header_was_set(skb))
skb              2536 include/linux/skbuff.h 	if (skb_flow_dissect_flow_keys_basic(NULL, skb, &keys,
skb              2538 include/linux/skbuff.h 		skb_set_transport_header(skb, keys.control.thoff);
skb              2541 include/linux/skbuff.h static inline void skb_mac_header_rebuild(struct sk_buff *skb)
skb              2543 include/linux/skbuff.h 	if (skb_mac_header_was_set(skb)) {
skb              2544 include/linux/skbuff.h 		const unsigned char *old_mac = skb_mac_header(skb);
skb              2546 include/linux/skbuff.h 		skb_set_mac_header(skb, -skb->mac_len);
skb              2547 include/linux/skbuff.h 		memmove(skb_mac_header(skb), old_mac, skb->mac_len);
skb              2551 include/linux/skbuff.h static inline int skb_checksum_start_offset(const struct sk_buff *skb)
skb              2553 include/linux/skbuff.h 	return skb->csum_start - skb_headroom(skb);
skb              2556 include/linux/skbuff.h static inline unsigned char *skb_checksum_start(const struct sk_buff *skb)
skb              2558 include/linux/skbuff.h 	return skb->head + skb->csum_start;
skb              2561 include/linux/skbuff.h static inline int skb_transport_offset(const struct sk_buff *skb)
skb              2563 include/linux/skbuff.h 	return skb_transport_header(skb) - skb->data;
skb              2566 include/linux/skbuff.h static inline u32 skb_network_header_len(const struct sk_buff *skb)
skb              2568 include/linux/skbuff.h 	return skb->transport_header - skb->network_header;
skb              2571 include/linux/skbuff.h static inline u32 skb_inner_network_header_len(const struct sk_buff *skb)
skb              2573 include/linux/skbuff.h 	return skb->inner_transport_header - skb->inner_network_header;
skb              2576 include/linux/skbuff.h static inline int skb_network_offset(const struct sk_buff *skb)
skb              2578 include/linux/skbuff.h 	return skb_network_header(skb) - skb->data;
skb              2581 include/linux/skbuff.h static inline int skb_inner_network_offset(const struct sk_buff *skb)
skb              2583 include/linux/skbuff.h 	return skb_inner_network_header(skb) - skb->data;
skb              2586 include/linux/skbuff.h static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
skb              2588 include/linux/skbuff.h 	return pskb_may_pull(skb, skb_network_offset(skb) + len);
skb              2639 include/linux/skbuff.h int ___pskb_trim(struct sk_buff *skb, unsigned int len);
skb              2641 include/linux/skbuff.h static inline void __skb_set_length(struct sk_buff *skb, unsigned int len)
skb              2643 include/linux/skbuff.h 	if (WARN_ON(skb_is_nonlinear(skb)))
skb              2645 include/linux/skbuff.h 	skb->len = len;
skb              2646 include/linux/skbuff.h 	skb_set_tail_pointer(skb, len);
skb              2649 include/linux/skbuff.h static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
skb              2651 include/linux/skbuff.h 	__skb_set_length(skb, len);
skb              2654 include/linux/skbuff.h void skb_trim(struct sk_buff *skb, unsigned int len);
skb              2656 include/linux/skbuff.h static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
skb              2658 include/linux/skbuff.h 	if (skb->data_len)
skb              2659 include/linux/skbuff.h 		return ___pskb_trim(skb, len);
skb              2660 include/linux/skbuff.h 	__skb_trim(skb, len);
skb              2664 include/linux/skbuff.h static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
skb              2666 include/linux/skbuff.h 	return (len < skb->len) ? __pskb_trim(skb, len) : 0;
skb              2678 include/linux/skbuff.h static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len)
skb              2680 include/linux/skbuff.h 	int err = pskb_trim(skb, len);
skb              2684 include/linux/skbuff.h static inline int __skb_grow(struct sk_buff *skb, unsigned int len)
skb              2686 include/linux/skbuff.h 	unsigned int diff = len - skb->len;
skb              2688 include/linux/skbuff.h 	if (skb_tailroom(skb) < diff) {
skb              2689 include/linux/skbuff.h 		int ret = pskb_expand_head(skb, 0, diff - skb_tailroom(skb),
skb              2694 include/linux/skbuff.h 	__skb_set_length(skb, len);
skb              2706 include/linux/skbuff.h static inline void skb_orphan(struct sk_buff *skb)
skb              2708 include/linux/skbuff.h 	if (skb->destructor) {
skb              2709 include/linux/skbuff.h 		skb->destructor(skb);
skb              2710 include/linux/skbuff.h 		skb->destructor = NULL;
skb              2711 include/linux/skbuff.h 		skb->sk		= NULL;
skb              2713 include/linux/skbuff.h 		BUG_ON(skb->sk);
skb              2726 include/linux/skbuff.h static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask)
skb              2728 include/linux/skbuff.h 	if (likely(!skb_zcopy(skb)))
skb              2730 include/linux/skbuff.h 	if (!skb_zcopy_is_nouarg(skb) &&
skb              2731 include/linux/skbuff.h 	    skb_uarg(skb)->callback == sock_zerocopy_callback)
skb              2733 include/linux/skbuff.h 	return skb_copy_ubufs(skb, gfp_mask);
skb              2737 include/linux/skbuff.h static inline int skb_orphan_frags_rx(struct sk_buff *skb, gfp_t gfp_mask)
skb              2739 include/linux/skbuff.h 	if (likely(!skb_zcopy(skb)))
skb              2741 include/linux/skbuff.h 	return skb_copy_ubufs(skb, gfp_mask);
skb              2754 include/linux/skbuff.h 	struct sk_buff *skb;
skb              2755 include/linux/skbuff.h 	while ((skb = __skb_dequeue(list)) != NULL)
skb              2756 include/linux/skbuff.h 		kfree_skb(skb);
skb              2803 include/linux/skbuff.h 	struct sk_buff *skb = __netdev_alloc_skb(dev, length + NET_IP_ALIGN, gfp);
skb              2805 include/linux/skbuff.h 	if (NET_IP_ALIGN && skb)
skb              2806 include/linux/skbuff.h 		skb_reserve(skb, NET_IP_ALIGN);
skb              2807 include/linux/skbuff.h 	return skb;
skb              2829 include/linux/skbuff.h void napi_consume_skb(struct sk_buff *skb, int budget);
skb              2832 include/linux/skbuff.h void __kfree_skb_defer(struct sk_buff *skb);
skb              2888 include/linux/skbuff.h 					     struct sk_buff *skb)
skb              2891 include/linux/skbuff.h 		skb->pfmemalloc = true;
skb              2963 include/linux/skbuff.h static inline void skb_frag_ref(struct sk_buff *skb, int f)
skb              2965 include/linux/skbuff.h 	__skb_frag_ref(&skb_shinfo(skb)->frags[f]);
skb              2986 include/linux/skbuff.h static inline void skb_frag_unref(struct sk_buff *skb, int f)
skb              2988 include/linux/skbuff.h 	__skb_frag_unref(&skb_shinfo(skb)->frags[f]);
skb              3050 include/linux/skbuff.h static inline void skb_frag_set_page(struct sk_buff *skb, int f,
skb              3053 include/linux/skbuff.h 	__skb_frag_set_page(&skb_shinfo(skb)->frags[f], page);
skb              3078 include/linux/skbuff.h static inline struct sk_buff *pskb_copy(struct sk_buff *skb,
skb              3081 include/linux/skbuff.h 	return __pskb_copy(skb, skb_headroom(skb), gfp_mask);
skb              3085 include/linux/skbuff.h static inline struct sk_buff *pskb_copy_for_clone(struct sk_buff *skb,
skb              3088 include/linux/skbuff.h 	return __pskb_copy_fclone(skb, skb_headroom(skb), gfp_mask, true);
skb              3100 include/linux/skbuff.h static inline int skb_clone_writable(const struct sk_buff *skb, unsigned int len)
skb              3102 include/linux/skbuff.h 	return !skb_header_cloned(skb) &&
skb              3103 include/linux/skbuff.h 	       skb_headroom(skb) + len <= skb->hdr_len;
skb              3106 include/linux/skbuff.h static inline int skb_try_make_writable(struct sk_buff *skb,
skb              3109 include/linux/skbuff.h 	return skb_cloned(skb) && !skb_clone_writable(skb, write_len) &&
skb              3110 include/linux/skbuff.h 	       pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
skb              3113 include/linux/skbuff.h static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
skb              3118 include/linux/skbuff.h 	if (headroom > skb_headroom(skb))
skb              3119 include/linux/skbuff.h 		delta = headroom - skb_headroom(skb);
skb              3122 include/linux/skbuff.h 		return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0,
skb              3139 include/linux/skbuff.h static inline int skb_cow(struct sk_buff *skb, unsigned int headroom)
skb              3141 include/linux/skbuff.h 	return __skb_cow(skb, headroom, skb_cloned(skb));
skb              3154 include/linux/skbuff.h static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom)
skb              3156 include/linux/skbuff.h 	return __skb_cow(skb, headroom, skb_header_cloned(skb));
skb              3169 include/linux/skbuff.h static inline int skb_padto(struct sk_buff *skb, unsigned int len)
skb              3171 include/linux/skbuff.h 	unsigned int size = skb->len;
skb              3174 include/linux/skbuff.h 	return skb_pad(skb, len - size);
skb              3188 include/linux/skbuff.h static inline int __skb_put_padto(struct sk_buff *skb, unsigned int len,
skb              3191 include/linux/skbuff.h 	unsigned int size = skb->len;
skb              3195 include/linux/skbuff.h 		if (__skb_pad(skb, len, free_on_error))
skb              3197 include/linux/skbuff.h 		__skb_put(skb, len);
skb              3212 include/linux/skbuff.h static inline int skb_put_padto(struct sk_buff *skb, unsigned int len)
skb              3214 include/linux/skbuff.h 	return __skb_put_padto(skb, len, true);
skb              3217 include/linux/skbuff.h static inline int skb_add_data(struct sk_buff *skb,
skb              3220 include/linux/skbuff.h 	const int off = skb->len;
skb              3222 include/linux/skbuff.h 	if (skb->ip_summed == CHECKSUM_NONE) {
skb              3224 include/linux/skbuff.h 		if (csum_and_copy_from_iter_full(skb_put(skb, copy), copy,
skb              3226 include/linux/skbuff.h 			skb->csum = csum_block_add(skb->csum, csum, off);
skb              3229 include/linux/skbuff.h 	} else if (copy_from_iter_full(skb_put(skb, copy), copy, from))
skb              3232 include/linux/skbuff.h 	__skb_trim(skb, off);
skb              3236 include/linux/skbuff.h static inline bool skb_can_coalesce(struct sk_buff *skb, int i,
skb              3239 include/linux/skbuff.h 	if (skb_zcopy(skb))
skb              3242 include/linux/skbuff.h 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
skb              3250 include/linux/skbuff.h static inline int __skb_linearize(struct sk_buff *skb)
skb              3252 include/linux/skbuff.h 	return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM;
skb              3262 include/linux/skbuff.h static inline int skb_linearize(struct sk_buff *skb)
skb              3264 include/linux/skbuff.h 	return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0;
skb              3274 include/linux/skbuff.h static inline bool skb_has_shared_frag(const struct sk_buff *skb)
skb              3276 include/linux/skbuff.h 	return skb_is_nonlinear(skb) &&
skb              3277 include/linux/skbuff.h 	       skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG;
skb              3287 include/linux/skbuff.h static inline int skb_linearize_cow(struct sk_buff *skb)
skb              3289 include/linux/skbuff.h 	return skb_is_nonlinear(skb) || skb_cloned(skb) ?
skb              3290 include/linux/skbuff.h 	       __skb_linearize(skb) : 0;
skb              3294 include/linux/skbuff.h __skb_postpull_rcsum(struct sk_buff *skb, const void *start, unsigned int len,
skb              3297 include/linux/skbuff.h 	if (skb->ip_summed == CHECKSUM_COMPLETE)
skb              3298 include/linux/skbuff.h 		skb->csum = csum_block_sub(skb->csum,
skb              3300 include/linux/skbuff.h 	else if (skb->ip_summed == CHECKSUM_PARTIAL &&
skb              3301 include/linux/skbuff.h 		 skb_checksum_start_offset(skb) < 0)
skb              3302 include/linux/skbuff.h 		skb->ip_summed = CHECKSUM_NONE;
skb              3315 include/linux/skbuff.h static inline void skb_postpull_rcsum(struct sk_buff *skb,
skb              3318 include/linux/skbuff.h 	__skb_postpull_rcsum(skb, start, len, 0);
skb              3322 include/linux/skbuff.h __skb_postpush_rcsum(struct sk_buff *skb, const void *start, unsigned int len,
skb              3325 include/linux/skbuff.h 	if (skb->ip_summed == CHECKSUM_COMPLETE)
skb              3326 include/linux/skbuff.h 		skb->csum = csum_block_add(skb->csum,
skb              3339 include/linux/skbuff.h static inline void skb_postpush_rcsum(struct sk_buff *skb,
skb              3342 include/linux/skbuff.h 	__skb_postpush_rcsum(skb, start, len, 0);
skb              3345 include/linux/skbuff.h void *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
skb              3358 include/linux/skbuff.h static inline void *skb_push_rcsum(struct sk_buff *skb, unsigned int len)
skb              3360 include/linux/skbuff.h 	skb_push(skb, len);
skb              3361 include/linux/skbuff.h 	skb_postpush_rcsum(skb, skb->data, len);
skb              3362 include/linux/skbuff.h 	return skb->data;
skb              3365 include/linux/skbuff.h int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len);
skb              3376 include/linux/skbuff.h static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
skb              3378 include/linux/skbuff.h 	if (likely(len >= skb->len))
skb              3380 include/linux/skbuff.h 	return pskb_trim_rcsum_slow(skb, len);
skb              3383 include/linux/skbuff.h static inline int __skb_trim_rcsum(struct sk_buff *skb, unsigned int len)
skb              3385 include/linux/skbuff.h 	if (skb->ip_summed == CHECKSUM_COMPLETE)
skb              3386 include/linux/skbuff.h 		skb->ip_summed = CHECKSUM_NONE;
skb              3387 include/linux/skbuff.h 	__skb_trim(skb, len);
skb              3391 include/linux/skbuff.h static inline int __skb_grow_rcsum(struct sk_buff *skb, unsigned int len)
skb              3393 include/linux/skbuff.h 	if (skb->ip_summed == CHECKSUM_COMPLETE)
skb              3394 include/linux/skbuff.h 		skb->ip_summed = CHECKSUM_NONE;
skb              3395 include/linux/skbuff.h 	return __skb_grow(skb, len);
skb              3401 include/linux/skbuff.h #define skb_rb_next(skb)   rb_to_skb(rb_next(&(skb)->rbnode))
skb              3402 include/linux/skbuff.h #define skb_rb_prev(skb)   rb_to_skb(rb_prev(&(skb)->rbnode))
skb              3404 include/linux/skbuff.h #define skb_queue_walk(queue, skb) \
skb              3405 include/linux/skbuff.h 		for (skb = (queue)->next;					\
skb              3406 include/linux/skbuff.h 		     skb != (struct sk_buff *)(queue);				\
skb              3407 include/linux/skbuff.h 		     skb = skb->next)
skb              3409 include/linux/skbuff.h #define skb_queue_walk_safe(queue, skb, tmp)					\
skb              3410 include/linux/skbuff.h 		for (skb = (queue)->next, tmp = skb->next;			\
skb              3411 include/linux/skbuff.h 		     skb != (struct sk_buff *)(queue);				\
skb              3412 include/linux/skbuff.h 		     skb = tmp, tmp = skb->next)
skb              3414 include/linux/skbuff.h #define skb_queue_walk_from(queue, skb)						\
skb              3415 include/linux/skbuff.h 		for (; skb != (struct sk_buff *)(queue);			\
skb              3416 include/linux/skbuff.h 		     skb = skb->next)
skb              3418 include/linux/skbuff.h #define skb_rbtree_walk(skb, root)						\
skb              3419 include/linux/skbuff.h 		for (skb = skb_rb_first(root); skb != NULL;			\
skb              3420 include/linux/skbuff.h 		     skb = skb_rb_next(skb))
skb              3422 include/linux/skbuff.h #define skb_rbtree_walk_from(skb)						\
skb              3423 include/linux/skbuff.h 		for (; skb != NULL;						\
skb              3424 include/linux/skbuff.h 		     skb = skb_rb_next(skb))
skb              3426 include/linux/skbuff.h #define skb_rbtree_walk_from_safe(skb, tmp)					\
skb              3427 include/linux/skbuff.h 		for (; tmp = skb ? skb_rb_next(skb) : NULL, (skb != NULL);	\
skb              3428 include/linux/skbuff.h 		     skb = tmp)
skb              3430 include/linux/skbuff.h #define skb_queue_walk_from_safe(queue, skb, tmp)				\
skb              3431 include/linux/skbuff.h 		for (tmp = skb->next;						\
skb              3432 include/linux/skbuff.h 		     skb != (struct sk_buff *)(queue);				\
skb              3433 include/linux/skbuff.h 		     skb = tmp, tmp = skb->next)
skb              3435 include/linux/skbuff.h #define skb_queue_reverse_walk(queue, skb) \
skb              3436 include/linux/skbuff.h 		for (skb = (queue)->prev;					\
skb              3437 include/linux/skbuff.h 		     skb != (struct sk_buff *)(queue);				\
skb              3438 include/linux/skbuff.h 		     skb = skb->prev)
skb              3440 include/linux/skbuff.h #define skb_queue_reverse_walk_safe(queue, skb, tmp)				\
skb              3441 include/linux/skbuff.h 		for (skb = (queue)->prev, tmp = skb->prev;			\
skb              3442 include/linux/skbuff.h 		     skb != (struct sk_buff *)(queue);				\
skb              3443 include/linux/skbuff.h 		     skb = tmp, tmp = skb->prev)
skb              3445 include/linux/skbuff.h #define skb_queue_reverse_walk_from_safe(queue, skb, tmp)			\
skb              3446 include/linux/skbuff.h 		for (tmp = skb->prev;						\
skb              3447 include/linux/skbuff.h 		     skb != (struct sk_buff *)(queue);				\
skb              3448 include/linux/skbuff.h 		     skb = tmp, tmp = skb->prev)
skb              3450 include/linux/skbuff.h static inline bool skb_has_frag_list(const struct sk_buff *skb)
skb              3452 include/linux/skbuff.h 	return skb_shinfo(skb)->frag_list != NULL;
skb              3455 include/linux/skbuff.h static inline void skb_frag_list_init(struct sk_buff *skb)
skb              3457 include/linux/skbuff.h 	skb_shinfo(skb)->frag_list = NULL;
skb              3460 include/linux/skbuff.h #define skb_walk_frags(skb, iter)	\
skb              3461 include/linux/skbuff.h 	for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
skb              3465 include/linux/skbuff.h 				const struct sk_buff *skb);
skb              3470 include/linux/skbuff.h 							   struct sk_buff *skb),
skb              3475 include/linux/skbuff.h 							   struct sk_buff *skb),
skb              3480 include/linux/skbuff.h 						       struct sk_buff *skb),
skb              3493 include/linux/skbuff.h int skb_copy_and_csum_datagram_msg(struct sk_buff *skb, int hlen,
skb              3495 include/linux/skbuff.h int skb_copy_and_hash_datagram_iter(const struct sk_buff *skb, int offset,
skb              3498 include/linux/skbuff.h int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset,
skb              3500 include/linux/skbuff.h int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *frm);
skb              3501 include/linux/skbuff.h void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
skb              3502 include/linux/skbuff.h void __skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb, int len);
skb              3504 include/linux/skbuff.h 					    struct sk_buff *skb)
skb              3506 include/linux/skbuff.h 	__skb_free_datagram_locked(sk, skb, 0);
skb              3508 include/linux/skbuff.h int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags);
skb              3509 include/linux/skbuff.h int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);
skb              3510 include/linux/skbuff.h int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len);
skb              3511 include/linux/skbuff.h __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to,
skb              3513 include/linux/skbuff.h int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
skb              3516 include/linux/skbuff.h int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset,
skb              3518 include/linux/skbuff.h void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
skb              3522 include/linux/skbuff.h void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len);
skb              3523 include/linux/skbuff.h int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen);
skb              3524 include/linux/skbuff.h void skb_scrub_packet(struct sk_buff *skb, bool xnet);
skb              3525 include/linux/skbuff.h bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu);
skb              3526 include/linux/skbuff.h bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len);
skb              3527 include/linux/skbuff.h struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
skb              3528 include/linux/skbuff.h struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
skb              3529 include/linux/skbuff.h int skb_ensure_writable(struct sk_buff *skb, int write_len);
skb              3530 include/linux/skbuff.h int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci);
skb              3531 include/linux/skbuff.h int skb_vlan_pop(struct sk_buff *skb);
skb              3532 include/linux/skbuff.h int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
skb              3533 include/linux/skbuff.h int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto,
skb              3535 include/linux/skbuff.h int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len,
skb              3537 include/linux/skbuff.h int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse);
skb              3538 include/linux/skbuff.h int skb_mpls_dec_ttl(struct sk_buff *skb);
skb              3539 include/linux/skbuff.h struct sk_buff *pskb_extract(struct sk_buff *skb, int off, int to_copy,
skb              3559 include/linux/skbuff.h __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
skb              3561 include/linux/skbuff.h __wsum skb_checksum(const struct sk_buff *skb, int offset, int len,
skb              3565 include/linux/skbuff.h __skb_header_pointer(const struct sk_buff *skb, int offset,
skb              3571 include/linux/skbuff.h 	if (!skb ||
skb              3572 include/linux/skbuff.h 	    skb_copy_bits(skb, offset, buffer, len) < 0)
skb              3579 include/linux/skbuff.h skb_header_pointer(const struct sk_buff *skb, int offset, int len, void *buffer)
skb              3581 include/linux/skbuff.h 	return __skb_header_pointer(skb, offset, len, skb->data,
skb              3582 include/linux/skbuff.h 				    skb_headlen(skb), buffer);
skb              3595 include/linux/skbuff.h static inline bool skb_needs_linearize(struct sk_buff *skb,
skb              3598 include/linux/skbuff.h 	return skb_is_nonlinear(skb) &&
skb              3599 include/linux/skbuff.h 	       ((skb_has_frag_list(skb) && !(features & NETIF_F_FRAGLIST)) ||
skb              3600 include/linux/skbuff.h 		(skb_shinfo(skb)->nr_frags && !(features & NETIF_F_SG)));
skb              3603 include/linux/skbuff.h static inline void skb_copy_from_linear_data(const struct sk_buff *skb,
skb              3607 include/linux/skbuff.h 	memcpy(to, skb->data, len);
skb              3610 include/linux/skbuff.h static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb,
skb              3614 include/linux/skbuff.h 	memcpy(to, skb->data + offset, len);
skb              3617 include/linux/skbuff.h static inline void skb_copy_to_linear_data(struct sk_buff *skb,
skb              3621 include/linux/skbuff.h 	memcpy(skb->data, from, len);
skb              3624 include/linux/skbuff.h static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb,
skb              3629 include/linux/skbuff.h 	memcpy(skb->data + offset, from, len);
skb              3634 include/linux/skbuff.h static inline ktime_t skb_get_ktime(const struct sk_buff *skb)
skb              3636 include/linux/skbuff.h 	return skb->tstamp;
skb              3648 include/linux/skbuff.h static inline void skb_get_timestamp(const struct sk_buff *skb,
skb              3651 include/linux/skbuff.h 	*stamp = ns_to_kernel_old_timeval(skb->tstamp);
skb              3654 include/linux/skbuff.h static inline void skb_get_new_timestamp(const struct sk_buff *skb,
skb              3657 include/linux/skbuff.h 	struct timespec64 ts = ktime_to_timespec64(skb->tstamp);
skb              3663 include/linux/skbuff.h static inline void skb_get_timestampns(const struct sk_buff *skb,
skb              3666 include/linux/skbuff.h 	*stamp = ktime_to_timespec(skb->tstamp);
skb              3669 include/linux/skbuff.h static inline void skb_get_new_timestampns(const struct sk_buff *skb,
skb              3672 include/linux/skbuff.h 	struct timespec64 ts = ktime_to_timespec64(skb->tstamp);
skb              3678 include/linux/skbuff.h static inline void __net_timestamp(struct sk_buff *skb)
skb              3680 include/linux/skbuff.h 	skb->tstamp = ktime_get_real();
skb              3693 include/linux/skbuff.h static inline u8 skb_metadata_len(const struct sk_buff *skb)
skb              3695 include/linux/skbuff.h 	return skb_shinfo(skb)->meta_len;
skb              3698 include/linux/skbuff.h static inline void *skb_metadata_end(const struct sk_buff *skb)
skb              3700 include/linux/skbuff.h 	return skb_mac_header(skb);
skb              3752 include/linux/skbuff.h static inline void skb_metadata_set(struct sk_buff *skb, u8 meta_len)
skb              3754 include/linux/skbuff.h 	skb_shinfo(skb)->meta_len = meta_len;
skb              3757 include/linux/skbuff.h static inline void skb_metadata_clear(struct sk_buff *skb)
skb              3759 include/linux/skbuff.h 	skb_metadata_set(skb, 0);
skb              3762 include/linux/skbuff.h struct sk_buff *skb_clone_sk(struct sk_buff *skb);
skb              3766 include/linux/skbuff.h void skb_clone_tx_timestamp(struct sk_buff *skb);
skb              3767 include/linux/skbuff.h bool skb_defer_rx_timestamp(struct sk_buff *skb);
skb              3771 include/linux/skbuff.h static inline void skb_clone_tx_timestamp(struct sk_buff *skb)
skb              3775 include/linux/skbuff.h static inline bool skb_defer_rx_timestamp(struct sk_buff *skb)
skb              3794 include/linux/skbuff.h void skb_complete_tx_timestamp(struct sk_buff *skb,
skb              3827 include/linux/skbuff.h static inline void skb_tx_timestamp(struct sk_buff *skb)
skb              3829 include/linux/skbuff.h 	skb_clone_tx_timestamp(skb);
skb              3830 include/linux/skbuff.h 	if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP)
skb              3831 include/linux/skbuff.h 		skb_tstamp_tx(skb, NULL);
skb              3841 include/linux/skbuff.h void skb_complete_wifi_ack(struct sk_buff *skb, bool acked);
skb              3843 include/linux/skbuff.h __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len);
skb              3844 include/linux/skbuff.h __sum16 __skb_checksum_complete(struct sk_buff *skb);
skb              3846 include/linux/skbuff.h static inline int skb_csum_unnecessary(const struct sk_buff *skb)
skb              3848 include/linux/skbuff.h 	return ((skb->ip_summed == CHECKSUM_UNNECESSARY) ||
skb              3849 include/linux/skbuff.h 		skb->csum_valid ||
skb              3850 include/linux/skbuff.h 		(skb->ip_summed == CHECKSUM_PARTIAL &&
skb              3851 include/linux/skbuff.h 		 skb_checksum_start_offset(skb) >= 0));
skb              3870 include/linux/skbuff.h static inline __sum16 skb_checksum_complete(struct sk_buff *skb)
skb              3872 include/linux/skbuff.h 	return skb_csum_unnecessary(skb) ?
skb              3873 include/linux/skbuff.h 	       0 : __skb_checksum_complete(skb);
skb              3876 include/linux/skbuff.h static inline void __skb_decr_checksum_unnecessary(struct sk_buff *skb)
skb              3878 include/linux/skbuff.h 	if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
skb              3879 include/linux/skbuff.h 		if (skb->csum_level == 0)
skb              3880 include/linux/skbuff.h 			skb->ip_summed = CHECKSUM_NONE;
skb              3882 include/linux/skbuff.h 			skb->csum_level--;
skb              3886 include/linux/skbuff.h static inline void __skb_incr_checksum_unnecessary(struct sk_buff *skb)
skb              3888 include/linux/skbuff.h 	if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
skb              3889 include/linux/skbuff.h 		if (skb->csum_level < SKB_MAX_CSUM_LEVEL)
skb              3890 include/linux/skbuff.h 			skb->csum_level++;
skb              3891 include/linux/skbuff.h 	} else if (skb->ip_summed == CHECKSUM_NONE) {
skb              3892 include/linux/skbuff.h 		skb->ip_summed = CHECKSUM_UNNECESSARY;
skb              3893 include/linux/skbuff.h 		skb->csum_level = 0;
skb              3902 include/linux/skbuff.h static inline bool __skb_checksum_validate_needed(struct sk_buff *skb,
skb              3906 include/linux/skbuff.h 	if (skb_csum_unnecessary(skb) || (zero_okay && !check)) {
skb              3907 include/linux/skbuff.h 		skb->csum_valid = 1;
skb              3908 include/linux/skbuff.h 		__skb_decr_checksum_unnecessary(skb);
skb              3926 include/linux/skbuff.h static inline void skb_checksum_complete_unset(struct sk_buff *skb)
skb              3928 include/linux/skbuff.h 	if (skb->ip_summed == CHECKSUM_COMPLETE)
skb              3929 include/linux/skbuff.h 		skb->ip_summed = CHECKSUM_NONE;
skb              3941 include/linux/skbuff.h static inline __sum16 __skb_checksum_validate_complete(struct sk_buff *skb,
skb              3945 include/linux/skbuff.h 	if (skb->ip_summed == CHECKSUM_COMPLETE) {
skb              3946 include/linux/skbuff.h 		if (!csum_fold(csum_add(psum, skb->csum))) {
skb              3947 include/linux/skbuff.h 			skb->csum_valid = 1;
skb              3952 include/linux/skbuff.h 	skb->csum = psum;
skb              3954 include/linux/skbuff.h 	if (complete || skb->len <= CHECKSUM_BREAK) {
skb              3957 include/linux/skbuff.h 		csum = __skb_checksum_complete(skb);
skb              3958 include/linux/skbuff.h 		skb->csum_valid = !csum;
skb              3965 include/linux/skbuff.h static inline __wsum null_compute_pseudo(struct sk_buff *skb, int proto)
skb              3980 include/linux/skbuff.h #define __skb_checksum_validate(skb, proto, complete,			\
skb              3984 include/linux/skbuff.h 	skb->csum_valid = 0;						\
skb              3985 include/linux/skbuff.h 	if (__skb_checksum_validate_needed(skb, zero_okay, check))	\
skb              3986 include/linux/skbuff.h 		__ret = __skb_checksum_validate_complete(skb,		\
skb              3987 include/linux/skbuff.h 				complete, compute_pseudo(skb, proto));	\
skb              3991 include/linux/skbuff.h #define skb_checksum_init(skb, proto, compute_pseudo)			\
skb              3992 include/linux/skbuff.h 	__skb_checksum_validate(skb, proto, false, false, 0, compute_pseudo)
skb              3994 include/linux/skbuff.h #define skb_checksum_init_zero_check(skb, proto, check, compute_pseudo)	\
skb              3995 include/linux/skbuff.h 	__skb_checksum_validate(skb, proto, false, true, check, compute_pseudo)
skb              3997 include/linux/skbuff.h #define skb_checksum_validate(skb, proto, compute_pseudo)		\
skb              3998 include/linux/skbuff.h 	__skb_checksum_validate(skb, proto, true, false, 0, compute_pseudo)
skb              4000 include/linux/skbuff.h #define skb_checksum_validate_zero_check(skb, proto, check,		\
skb              4002 include/linux/skbuff.h 	__skb_checksum_validate(skb, proto, true, true, check, compute_pseudo)
skb              4004 include/linux/skbuff.h #define skb_checksum_simple_validate(skb)				\
skb              4005 include/linux/skbuff.h 	__skb_checksum_validate(skb, 0, true, false, 0, null_compute_pseudo)
skb              4007 include/linux/skbuff.h static inline bool __skb_checksum_convert_check(struct sk_buff *skb)
skb              4009 include/linux/skbuff.h 	return (skb->ip_summed == CHECKSUM_NONE && skb->csum_valid);
skb              4012 include/linux/skbuff.h static inline void __skb_checksum_convert(struct sk_buff *skb, __wsum pseudo)
skb              4014 include/linux/skbuff.h 	skb->csum = ~pseudo;
skb              4015 include/linux/skbuff.h 	skb->ip_summed = CHECKSUM_COMPLETE;
skb              4018 include/linux/skbuff.h #define skb_checksum_try_convert(skb, proto, compute_pseudo)	\
skb              4020 include/linux/skbuff.h 	if (__skb_checksum_convert_check(skb))				\
skb              4021 include/linux/skbuff.h 		__skb_checksum_convert(skb, compute_pseudo(skb, proto)); \
skb              4024 include/linux/skbuff.h static inline void skb_remcsum_adjust_partial(struct sk_buff *skb, void *ptr,
skb              4027 include/linux/skbuff.h 	skb->ip_summed = CHECKSUM_PARTIAL;
skb              4028 include/linux/skbuff.h 	skb->csum_start = ((unsigned char *)ptr + start) - skb->head;
skb              4029 include/linux/skbuff.h 	skb->csum_offset = offset - start;
skb              4037 include/linux/skbuff.h static inline void skb_remcsum_process(struct sk_buff *skb, void *ptr,
skb              4043 include/linux/skbuff.h 		skb_remcsum_adjust_partial(skb, ptr, start, offset);
skb              4047 include/linux/skbuff.h 	 if (unlikely(skb->ip_summed != CHECKSUM_COMPLETE)) {
skb              4048 include/linux/skbuff.h 		__skb_checksum_complete(skb);
skb              4049 include/linux/skbuff.h 		skb_postpull_rcsum(skb, skb->data, ptr - (void *)skb->data);
skb              4052 include/linux/skbuff.h 	delta = remcsum_adjust(ptr, skb->csum, start, offset);
skb              4055 include/linux/skbuff.h 	skb->csum = csum_add(skb->csum, delta);
skb              4058 include/linux/skbuff.h static inline struct nf_conntrack *skb_nfct(const struct sk_buff *skb)
skb              4061 include/linux/skbuff.h 	return (void *)(skb->_nfct & NFCT_PTRMASK);
skb              4067 include/linux/skbuff.h static inline unsigned long skb_get_nfct(const struct sk_buff *skb)
skb              4070 include/linux/skbuff.h 	return skb->_nfct;
skb              4076 include/linux/skbuff.h static inline void skb_set_nfct(struct sk_buff *skb, unsigned long nfct)
skb              4079 include/linux/skbuff.h 	skb->_nfct = nfct;
skb              4114 include/linux/skbuff.h void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id);
skb              4115 include/linux/skbuff.h void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id);
skb              4118 include/linux/skbuff.h static inline void skb_ext_put(struct sk_buff *skb)
skb              4120 include/linux/skbuff.h 	if (skb->active_extensions)
skb              4121 include/linux/skbuff.h 		__skb_ext_put(skb->extensions);
skb              4148 include/linux/skbuff.h static inline bool skb_ext_exist(const struct sk_buff *skb, enum skb_ext_id id)
skb              4150 include/linux/skbuff.h 	return skb->active_extensions & (1 << id);
skb              4153 include/linux/skbuff.h static inline void skb_ext_del(struct sk_buff *skb, enum skb_ext_id id)
skb              4155 include/linux/skbuff.h 	if (skb_ext_exist(skb, id))
skb              4156 include/linux/skbuff.h 		__skb_ext_del(skb, id);
skb              4159 include/linux/skbuff.h static inline void *skb_ext_find(const struct sk_buff *skb, enum skb_ext_id id)
skb              4161 include/linux/skbuff.h 	if (skb_ext_exist(skb, id)) {
skb              4162 include/linux/skbuff.h 		struct skb_ext *ext = skb->extensions;
skb              4170 include/linux/skbuff.h static inline void skb_ext_reset(struct sk_buff *skb)
skb              4172 include/linux/skbuff.h 	if (unlikely(skb->active_extensions)) {
skb              4173 include/linux/skbuff.h 		__skb_ext_put(skb->extensions);
skb              4174 include/linux/skbuff.h 		skb->active_extensions = 0;
skb              4178 include/linux/skbuff.h static inline bool skb_has_extensions(struct sk_buff *skb)
skb              4180 include/linux/skbuff.h 	return unlikely(skb->active_extensions);
skb              4183 include/linux/skbuff.h static inline void skb_ext_put(struct sk_buff *skb) {}
skb              4184 include/linux/skbuff.h static inline void skb_ext_reset(struct sk_buff *skb) {}
skb              4185 include/linux/skbuff.h static inline void skb_ext_del(struct sk_buff *skb, int unused) {}
skb              4188 include/linux/skbuff.h static inline bool skb_has_extensions(struct sk_buff *skb) { return false; }
skb              4191 include/linux/skbuff.h static inline void nf_reset_ct(struct sk_buff *skb)
skb              4194 include/linux/skbuff.h 	nf_conntrack_put(skb_nfct(skb));
skb              4195 include/linux/skbuff.h 	skb->_nfct = 0;
skb              4199 include/linux/skbuff.h static inline void nf_reset_trace(struct sk_buff *skb)
skb              4202 include/linux/skbuff.h 	skb->nf_trace = 0;
skb              4206 include/linux/skbuff.h static inline void ipvs_reset(struct sk_buff *skb)
skb              4209 include/linux/skbuff.h 	skb->ipvs_property = 0;
skb              4241 include/linux/skbuff.h static inline void skb_init_secmark(struct sk_buff *skb)
skb              4243 include/linux/skbuff.h 	skb->secmark = 0;
skb              4249 include/linux/skbuff.h static inline void skb_init_secmark(struct sk_buff *skb)
skb              4253 include/linux/skbuff.h static inline int secpath_exists(const struct sk_buff *skb)
skb              4256 include/linux/skbuff.h 	return skb_ext_exist(skb, SKB_EXT_SEC_PATH);
skb              4262 include/linux/skbuff.h static inline bool skb_irq_freeable(const struct sk_buff *skb)
skb              4264 include/linux/skbuff.h 	return !skb->destructor &&
skb              4265 include/linux/skbuff.h 		!secpath_exists(skb) &&
skb              4266 include/linux/skbuff.h 		!skb_nfct(skb) &&
skb              4267 include/linux/skbuff.h 		!skb->_skb_refdst &&
skb              4268 include/linux/skbuff.h 		!skb_has_frag_list(skb);
skb              4271 include/linux/skbuff.h static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping)
skb              4273 include/linux/skbuff.h 	skb->queue_mapping = queue_mapping;
skb              4276 include/linux/skbuff.h static inline u16 skb_get_queue_mapping(const struct sk_buff *skb)
skb              4278 include/linux/skbuff.h 	return skb->queue_mapping;
skb              4286 include/linux/skbuff.h static inline void skb_record_rx_queue(struct sk_buff *skb, u16 rx_queue)
skb              4288 include/linux/skbuff.h 	skb->queue_mapping = rx_queue + 1;
skb              4291 include/linux/skbuff.h static inline u16 skb_get_rx_queue(const struct sk_buff *skb)
skb              4293 include/linux/skbuff.h 	return skb->queue_mapping - 1;
skb              4296 include/linux/skbuff.h static inline bool skb_rx_queue_recorded(const struct sk_buff *skb)
skb              4298 include/linux/skbuff.h 	return skb->queue_mapping != 0;
skb              4301 include/linux/skbuff.h static inline void skb_set_dst_pending_confirm(struct sk_buff *skb, u32 val)
skb              4303 include/linux/skbuff.h 	skb->dst_pending_confirm = val;
skb              4306 include/linux/skbuff.h static inline bool skb_get_dst_pending_confirm(const struct sk_buff *skb)
skb              4308 include/linux/skbuff.h 	return skb->dst_pending_confirm != 0;
skb              4311 include/linux/skbuff.h static inline struct sec_path *skb_sec_path(const struct sk_buff *skb)
skb              4314 include/linux/skbuff.h 	return skb_ext_find(skb, SKB_EXT_SEC_PATH);
skb              4336 include/linux/skbuff.h #define SKB_GSO_CB(skb) ((struct skb_gso_cb *)((skb)->cb + SKB_SGO_CB_OFFSET))
skb              4344 include/linux/skbuff.h static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra)
skb              4349 include/linux/skbuff.h 	headroom = skb_headroom(skb);
skb              4350 include/linux/skbuff.h 	ret = pskb_expand_head(skb, extra, 0, GFP_ATOMIC);
skb              4354 include/linux/skbuff.h 	new_headroom = skb_headroom(skb);
skb              4355 include/linux/skbuff.h 	SKB_GSO_CB(skb)->mac_offset += (new_headroom - headroom);
skb              4359 include/linux/skbuff.h static inline void gso_reset_checksum(struct sk_buff *skb, __wsum res)
skb              4362 include/linux/skbuff.h 	if (skb->remcsum_offload)
skb              4365 include/linux/skbuff.h 	SKB_GSO_CB(skb)->csum = res;
skb              4366 include/linux/skbuff.h 	SKB_GSO_CB(skb)->csum_start = skb_checksum_start(skb) - skb->head;
skb              4377 include/linux/skbuff.h static inline __sum16 gso_make_checksum(struct sk_buff *skb, __wsum res)
skb              4379 include/linux/skbuff.h 	unsigned char *csum_start = skb_transport_header(skb);
skb              4380 include/linux/skbuff.h 	int plen = (skb->head + SKB_GSO_CB(skb)->csum_start) - csum_start;
skb              4381 include/linux/skbuff.h 	__wsum partial = SKB_GSO_CB(skb)->csum;
skb              4383 include/linux/skbuff.h 	SKB_GSO_CB(skb)->csum = res;
skb              4384 include/linux/skbuff.h 	SKB_GSO_CB(skb)->csum_start = csum_start - skb->head;
skb              4389 include/linux/skbuff.h static inline bool skb_is_gso(const struct sk_buff *skb)
skb              4391 include/linux/skbuff.h 	return skb_shinfo(skb)->gso_size;
skb              4395 include/linux/skbuff.h static inline bool skb_is_gso_v6(const struct sk_buff *skb)
skb              4397 include/linux/skbuff.h 	return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
skb              4401 include/linux/skbuff.h static inline bool skb_is_gso_sctp(const struct sk_buff *skb)
skb              4403 include/linux/skbuff.h 	return skb_shinfo(skb)->gso_type & SKB_GSO_SCTP;
skb              4407 include/linux/skbuff.h static inline bool skb_is_gso_tcp(const struct sk_buff *skb)
skb              4409 include/linux/skbuff.h 	return skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6);
skb              4412 include/linux/skbuff.h static inline void skb_gso_reset(struct sk_buff *skb)
skb              4414 include/linux/skbuff.h 	skb_shinfo(skb)->gso_size = 0;
skb              4415 include/linux/skbuff.h 	skb_shinfo(skb)->gso_segs = 0;
skb              4416 include/linux/skbuff.h 	skb_shinfo(skb)->gso_type = 0;
skb              4435 include/linux/skbuff.h void __skb_warn_lro_forwarding(const struct sk_buff *skb);
skb              4437 include/linux/skbuff.h static inline bool skb_warn_if_lro(const struct sk_buff *skb)
skb              4441 include/linux/skbuff.h 	const struct skb_shared_info *shinfo = skb_shinfo(skb);
skb              4443 include/linux/skbuff.h 	if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 &&
skb              4445 include/linux/skbuff.h 		__skb_warn_lro_forwarding(skb);
skb              4451 include/linux/skbuff.h static inline void skb_forward_csum(struct sk_buff *skb)
skb              4454 include/linux/skbuff.h 	if (skb->ip_summed == CHECKSUM_COMPLETE)
skb              4455 include/linux/skbuff.h 		skb->ip_summed = CHECKSUM_NONE;
skb              4466 include/linux/skbuff.h static inline void skb_checksum_none_assert(const struct sk_buff *skb)
skb              4469 include/linux/skbuff.h 	BUG_ON(skb->ip_summed != CHECKSUM_NONE);
skb              4473 include/linux/skbuff.h bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
skb              4475 include/linux/skbuff.h int skb_checksum_setup(struct sk_buff *skb, bool recalculate);
skb              4476 include/linux/skbuff.h struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
skb              4478 include/linux/skbuff.h 				     __sum16(*skb_chkf)(struct sk_buff *skb));
skb              4489 include/linux/skbuff.h static inline bool skb_head_is_locked(const struct sk_buff *skb)
skb              4491 include/linux/skbuff.h 	return !skb->head_frag || skb_cloned(skb);
skb              4503 include/linux/skbuff.h static inline __wsum lco_csum(struct sk_buff *skb)
skb              4505 include/linux/skbuff.h 	unsigned char *csum_start = skb_checksum_start(skb);
skb              4506 include/linux/skbuff.h 	unsigned char *l4_hdr = skb_transport_header(skb);
skb              4511 include/linux/skbuff.h 						    skb->csum_offset));
skb              4519 include/linux/skbuff.h static inline bool skb_is_redirected(const struct sk_buff *skb)
skb              4522 include/linux/skbuff.h 	return skb->redirected;
skb              4528 include/linux/skbuff.h static inline void skb_set_redirected(struct sk_buff *skb, bool from_ingress)
skb              4531 include/linux/skbuff.h 	skb->redirected = 1;
skb              4532 include/linux/skbuff.h 	skb->from_ingress = from_ingress;
skb              4533 include/linux/skbuff.h 	if (skb->from_ingress)
skb              4534 include/linux/skbuff.h 		skb->tstamp = 0;
skb              4538 include/linux/skbuff.h static inline void skb_reset_redirect(struct sk_buff *skb)
skb              4541 include/linux/skbuff.h 	skb->redirected = 0;
skb                50 include/linux/skmsg.h 	struct sk_buff			*skb;
skb                79 include/linux/skmsg.h 	struct sk_buff			*skb;
skb                17 include/linux/sock_diag.h 	int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh);
skb                18 include/linux/sock_diag.h 	int (*get_info)(struct sk_buff *skb, struct sock *sk);
skb                19 include/linux/sock_diag.h 	int (*destroy)(struct sk_buff *skb, struct nlmsghdr *nlh);
skb                25 include/linux/sock_diag.h void sock_diag_register_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh));
skb                26 include/linux/sock_diag.h void sock_diag_unregister_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh));
skb                32 include/linux/sock_diag.h int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attr);
skb                34 include/linux/sock_diag.h 			     struct sk_buff *skb, int attrtype);
skb               198 include/linux/sunrpc/xdr.h 	struct sk_buff	*skb;
skb                24 include/linux/tcp.h static inline struct tcphdr *tcp_hdr(const struct sk_buff *skb)
skb                26 include/linux/tcp.h 	return (struct tcphdr *)skb_transport_header(skb);
skb                34 include/linux/tcp.h static inline unsigned int tcp_hdrlen(const struct sk_buff *skb)
skb                36 include/linux/tcp.h 	return __tcp_hdrlen(tcp_hdr(skb));
skb                39 include/linux/tcp.h static inline struct tcphdr *inner_tcp_hdr(const struct sk_buff *skb)
skb                41 include/linux/tcp.h 	return (struct tcphdr *)skb_inner_transport_header(skb);
skb                44 include/linux/tcp.h static inline unsigned int inner_tcp_hdrlen(const struct sk_buff *skb)
skb                46 include/linux/tcp.h 	return inner_tcp_hdr(skb)->doff * 4;
skb                49 include/linux/tcp.h static inline unsigned int tcp_optlen(const struct sk_buff *skb)
skb                51 include/linux/tcp.h 	return (tcp_hdr(skb)->doff - 5) * 4;
skb                62 include/linux/ti_wilink_st.h 	long (*write) (struct sk_buff *skb);
skb                21 include/linux/udp.h static inline struct udphdr *udp_hdr(const struct sk_buff *skb)
skb                23 include/linux/udp.h 	return (struct udphdr *)skb_transport_header(skb);
skb                26 include/linux/udp.h static inline struct udphdr *inner_udp_hdr(const struct sk_buff *skb)
skb                28 include/linux/udp.h 	return (struct udphdr *)skb_inner_transport_header(skb);
skb                75 include/linux/udp.h 	int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
skb                76 include/linux/udp.h 	int (*encap_err_lookup)(struct sock *sk, struct sk_buff *skb);
skb                82 include/linux/udp.h 					       struct sk_buff *skb);
skb                84 include/linux/udp.h 						struct sk_buff *skb,
skb               122 include/linux/udp.h 				 struct sk_buff *skb)
skb               126 include/linux/udp.h 	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
skb               127 include/linux/udp.h 		gso_size = skb_shinfo(skb)->gso_size;
skb               132 include/linux/udp.h static inline bool udp_unexpected_gso(struct sock *sk, struct sk_buff *skb)
skb               134 include/linux/udp.h 	return !udp_sk(sk)->gro_enabled && skb_is_gso(skb) &&
skb               135 include/linux/udp.h 	       skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4;
skb               150 include/linux/usb/cdc_ncm.h struct sk_buff *cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign);
skb               154 include/linux/usb/cdc_ncm.h cdc_ncm_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags);
skb               207 include/linux/usb/rndis_host.h extern int rndis_rx_fixup(struct usbnet *dev, struct sk_buff *skb);
skb               209 include/linux/usb/rndis_host.h rndis_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags);
skb               150 include/linux/usb/usbnet.h 	int	(*rx_fixup)(struct usbnet *dev, struct sk_buff *skb);
skb               154 include/linux/usb/usbnet.h 				struct sk_buff *skb, gfp_t flags);
skb               243 include/linux/usb/usbnet.h usbnet_set_skb_tx_stats(struct sk_buff *skb,
skb               246 include/linux/usb/usbnet.h 	struct skb_data *entry = (struct skb_data *) skb->cb;
skb               254 include/linux/usb/usbnet.h extern netdev_tx_t usbnet_start_xmit(struct sk_buff *skb,
skb                10 include/linux/virtio_net.h static inline int virtio_net_hdr_set_proto(struct sk_buff *skb,
skb                16 include/linux/virtio_net.h 		skb->protocol = cpu_to_be16(ETH_P_IP);
skb                19 include/linux/virtio_net.h 		skb->protocol = cpu_to_be16(ETH_P_IPV6);
skb                28 include/linux/virtio_net.h static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
skb                69 include/linux/virtio_net.h 		if (!skb_partial_csum_set(skb, start, off))
skb                72 include/linux/virtio_net.h 		p_off = skb_transport_offset(skb) + thlen;
skb                73 include/linux/virtio_net.h 		if (p_off > skb_headlen(skb))
skb                79 include/linux/virtio_net.h 		if (gso_type && skb->network_header) {
skb                82 include/linux/virtio_net.h 			if (!skb->protocol)
skb                83 include/linux/virtio_net.h 				virtio_net_hdr_set_proto(skb, hdr);
skb                85 include/linux/virtio_net.h 			if (!skb_flow_dissect_flow_keys_basic(NULL, skb, &keys,
skb                90 include/linux/virtio_net.h 				    skb->protocol == htons(ETH_P_IP)) {
skb                91 include/linux/virtio_net.h 					skb->protocol = htons(ETH_P_IPV6);
skb                98 include/linux/virtio_net.h 			if (p_off > skb_headlen(skb) ||
skb               102 include/linux/virtio_net.h 			skb_set_transport_header(skb, keys.control.thoff);
skb               105 include/linux/virtio_net.h 			if (p_off > skb_headlen(skb))
skb               112 include/linux/virtio_net.h 		struct skb_shared_info *shinfo = skb_shinfo(skb);
skb               115 include/linux/virtio_net.h 		if (skb->len - p_off > gso_size) {
skb               128 include/linux/virtio_net.h static inline int virtio_net_hdr_from_skb(const struct sk_buff *skb,
skb               136 include/linux/virtio_net.h 	if (skb_is_gso(skb)) {
skb               137 include/linux/virtio_net.h 		struct skb_shared_info *sinfo = skb_shinfo(skb);
skb               141 include/linux/virtio_net.h 						 skb_headlen(skb));
skb               155 include/linux/virtio_net.h 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb               158 include/linux/virtio_net.h 			skb_checksum_start_offset(skb) + vlan_hlen);
skb               160 include/linux/virtio_net.h 				skb->csum_offset);
skb               162 include/linux/virtio_net.h 		   skb->ip_summed == CHECKSUM_UNNECESSARY) {
skb               179 include/net/6lowpan.h struct lowpan_802154_cb *lowpan_802154_cb(const struct sk_buff *skb)
skb               181 include/net/6lowpan.h 	BUILD_BUG_ON(sizeof(struct lowpan_802154_cb) > sizeof(skb->cb));
skb               182 include/net/6lowpan.h 	return (struct lowpan_802154_cb *)skb->cb;
skb               260 include/net/6lowpan.h static inline bool lowpan_fetch_skb(struct sk_buff *skb, void *data,
skb               263 include/net/6lowpan.h 	if (unlikely(!pskb_may_pull(skb, len)))
skb               266 include/net/6lowpan.h 	skb_copy_from_linear_data(skb, data, len);
skb               267 include/net/6lowpan.h 	skb_pull(skb, len);
skb               308 include/net/6lowpan.h int lowpan_header_decompress(struct sk_buff *skb, const struct net_device *dev,
skb               327 include/net/6lowpan.h int lowpan_header_compress(struct sk_buff *skb, const struct net_device *dev,
skb               151 include/net/act_api.h int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb,
skb               175 include/net/act_api.h int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions,
skb               186 include/net/act_api.h int tcf_action_dump(struct sk_buff *skb, struct tc_action *actions[], int bind,
skb               188 include/net/act_api.h int tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int, int);
skb               189 include/net/act_api.h int tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int, int);
skb               205 include/net/addrconf.h static inline int ipv6_mc_may_pull(struct sk_buff *skb,
skb               208 include/net/addrconf.h 	if (skb_transport_offset(skb) + ipv6_transport_len(skb) < len)
skb               211 include/net/addrconf.h 	return pskb_may_pull(skb, len);
skb               232 include/net/addrconf.h int ipv6_mc_check_icmpv6(struct sk_buff *skb);
skb               233 include/net/addrconf.h int ipv6_mc_check_mld(struct sk_buff *skb);
skb               234 include/net/addrconf.h void addrconf_dad_failure(struct sk_buff *skb, struct inet6_ifaddr *ifp);
skb               244 include/net/addrconf.h static inline bool ipv6_is_mld(struct sk_buff *skb, int nexthdr, int offset)
skb               249 include/net/addrconf.h 	    !pskb_network_may_pull(skb, offset + sizeof(struct icmp6hdr)))
skb               252 include/net/addrconf.h 	hdr = (struct icmp6hdr *)(skb_network_header(skb) + offset);
skb               321 include/net/addrconf.h 						    const struct sk_buff *skb)
skb               324 include/net/addrconf.h 		dev = dev_get_by_index_rcu(dev_net(dev), inet6_iif(skb));
skb                13 include/net/af_unix.h void unix_destruct_scm(struct sk_buff *skb);
skb                44 include/net/af_unix.h #define UNIXCB(skb)	(*(struct unix_skb_parms *)&((skb)->cb))
skb                18 include/net/ah.h static inline struct ip_auth_hdr *ip_auth_hdr(const struct sk_buff *skb)
skb                20 include/net/ah.h 	return (struct ip_auth_hdr *)skb_transport_header(skb);
skb                80 include/net/arp.h void arp_xmit(struct sk_buff *skb);
skb                30 include/net/atmclip.h 	void (*old_push)(struct atm_vcc *vcc,struct sk_buff *skb);
skb                32 include/net/atmclip.h 	void (*old_pop)(struct atm_vcc *vcc,struct sk_buff *skb);
skb               293 include/net/ax25.h static inline __be16 ax25_type_trans(struct sk_buff *skb, struct net_device *dev)
skb               295 include/net/ax25.h 	skb->dev      = dev;
skb               296 include/net/ax25.h 	skb_reset_mac_header(skb);
skb               297 include/net/ax25.h 	skb->pkt_type = PACKET_HOST;
skb               400 include/net/ax25.h netdev_tx_t ax25_ip_xmit(struct sk_buff *skb);
skb               409 include/net/ax25.h void ax25_queue_xmit(struct sk_buff *skb, struct net_device *dev);
skb                34 include/net/ax88796.h 			struct sk_buff *skb, int ring_offset);
skb               304 include/net/bluetooth/bluetooth.h 				       u16 opcode, struct sk_buff *skb);
skb               329 include/net/bluetooth/bluetooth.h #define bt_cb(skb) ((struct bt_skb_cb *)((skb)->cb))
skb               331 include/net/bluetooth/bluetooth.h #define hci_skb_pkt_type(skb) bt_cb((skb))->pkt_type
skb               332 include/net/bluetooth/bluetooth.h #define hci_skb_expect(skb) bt_cb((skb))->expect
skb               333 include/net/bluetooth/bluetooth.h #define hci_skb_opcode(skb) bt_cb((skb))->hci.opcode
skb               337 include/net/bluetooth/bluetooth.h 	struct sk_buff *skb;
skb               339 include/net/bluetooth/bluetooth.h 	skb = alloc_skb(len + BT_SKB_RESERVE, how);
skb               340 include/net/bluetooth/bluetooth.h 	if (skb)
skb               341 include/net/bluetooth/bluetooth.h 		skb_reserve(skb, BT_SKB_RESERVE);
skb               342 include/net/bluetooth/bluetooth.h 	return skb;
skb               348 include/net/bluetooth/bluetooth.h 	struct sk_buff *skb;
skb               350 include/net/bluetooth/bluetooth.h 	skb = sock_alloc_send_skb(sk, len + BT_SKB_RESERVE, nb, err);
skb               351 include/net/bluetooth/bluetooth.h 	if (skb)
skb               352 include/net/bluetooth/bluetooth.h 		skb_reserve(skb, BT_SKB_RESERVE);
skb               354 include/net/bluetooth/bluetooth.h 	if (!skb && *err)
skb               366 include/net/bluetooth/bluetooth.h 	return skb;
skb               369 include/net/bluetooth/bluetooth.h 	kfree_skb(skb);
skb              2278 include/net/bluetooth/hci.h static inline struct hci_event_hdr *hci_event_hdr(const struct sk_buff *skb)
skb              2280 include/net/bluetooth/hci.h 	return (struct hci_event_hdr *) skb->data;
skb              2283 include/net/bluetooth/hci.h static inline struct hci_acl_hdr *hci_acl_hdr(const struct sk_buff *skb)
skb              2285 include/net/bluetooth/hci.h 	return (struct hci_acl_hdr *) skb->data;
skb              2288 include/net/bluetooth/hci.h static inline struct hci_sco_hdr *hci_sco_hdr(const struct sk_buff *skb)
skb              2290 include/net/bluetooth/hci.h 	return (struct hci_sco_hdr *) skb->data;
skb               441 include/net/bluetooth/hci_core.h 	int (*send)(struct hci_dev *hdev, struct sk_buff *skb);
skb               593 include/net/bluetooth/hci_core.h void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags);
skb               597 include/net/bluetooth/hci_core.h void sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb);
skb               605 include/net/bluetooth/hci_core.h static inline void sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb)
skb              1061 include/net/bluetooth/hci_core.h int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb);
skb              1062 include/net/bluetooth/hci_core.h int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb);
skb              1147 include/net/bluetooth/hci_core.h void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb);
skb              1460 include/net/bluetooth/hci_core.h void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags);
skb              1461 include/net/bluetooth/hci_core.h void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb);
skb              1469 include/net/bluetooth/hci_core.h void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb);
skb              1470 include/net/bluetooth/hci_core.h void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
skb              1472 include/net/bluetooth/hci_core.h void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb);
skb               612 include/net/bluetooth/l2cap.h 					 struct sk_buff *skb);
skb               869 include/net/bluetooth/l2cap.h static inline int l2cap_chan_no_recv(struct l2cap_chan *chan, struct sk_buff *skb)
skb               198 include/net/bluetooth/rfcomm.h 	void (*data_ready)(struct rfcomm_dlc *d, struct sk_buff *skb);
skb               240 include/net/bluetooth/rfcomm.h int  rfcomm_dlc_send(struct rfcomm_dlc *d, struct sk_buff *skb);
skb               241 include/net/bluetooth/rfcomm.h void rfcomm_dlc_send_noerror(struct rfcomm_dlc *d, struct sk_buff *skb);
skb               303 include/net/bond_3ad.h int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond,
skb               308 include/net/bond_3ad.h int bond_3ad_stats_fill(struct sk_buff *skb, struct bond_3ad_stats *stats);
skb               159 include/net/bond_alb.h int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev);
skb               160 include/net/bond_alb.h int bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev);
skb               509 include/net/bonding.h 					 struct sk_buff *skb)
skb               514 include/net/bonding.h 		netpoll_send_skb(np, skb);
skb               518 include/net/bonding.h 					 struct sk_buff *skb)
skb               612 include/net/bonding.h int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond, struct slave *slave);
skb               613 include/net/bonding.h void bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev);
skb               623 include/net/bonding.h u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb);
skb               746 include/net/bonding.h static inline void bond_tx_drop(struct net_device *dev, struct sk_buff *skb)
skb               749 include/net/bonding.h 	dev_kfree_skb_any(skb);
skb               113 include/net/busy_poll.h static inline void skb_mark_napi_id(struct sk_buff *skb,
skb               117 include/net/busy_poll.h 	skb->napi_id = napi->napi_id;
skb               122 include/net/busy_poll.h static inline void sk_mark_napi_id(struct sock *sk, const struct sk_buff *skb)
skb               125 include/net/busy_poll.h 	WRITE_ONCE(sk->sk_napi_id, skb->napi_id);
skb               127 include/net/busy_poll.h 	sk_rx_queue_set(sk, skb);
skb               132 include/net/busy_poll.h 					const struct sk_buff *skb)
skb               136 include/net/busy_poll.h 		WRITE_ONCE(sk->sk_napi_id, skb->napi_id);
skb                60 include/net/calipso.h bool calipso_validate(const struct sk_buff *skb, const unsigned char *option);
skb                70 include/net/calipso.h static inline bool calipso_validate(const struct sk_buff *skb,
skb              3773 include/net/cfg80211.h 	int	(*testmode_dump)(struct wiphy *wiphy, struct sk_buff *skb,
skb              4307 include/net/cfg80211.h 		      struct sk_buff *skb, const void *data, int data_len,
skb              5159 include/net/cfg80211.h unsigned int ieee80211_get_hdrlen_from_skb(const struct sk_buff *skb);
skb              5195 include/net/cfg80211.h int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr,
skb              5206 include/net/cfg80211.h static inline int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
skb              5209 include/net/cfg80211.h 	return ieee80211_data_to_8023_exthdr(skb, NULL, addr, iftype, 0);
skb              5228 include/net/cfg80211.h void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
skb              5239 include/net/cfg80211.h unsigned int cfg80211_classify8021d(struct sk_buff *skb,
skb              6118 include/net/cfg80211.h void __cfg80211_send_event_skb(struct sk_buff *skb, gfp_t gfp);
skb              6162 include/net/cfg80211.h int cfg80211_vendor_cmd_reply(struct sk_buff *skb);
skb              6246 include/net/cfg80211.h static inline void cfg80211_vendor_event(struct sk_buff *skb, gfp_t gfp)
skb              6248 include/net/cfg80211.h 	__cfg80211_send_event_skb(skb, gfp);
skb              6305 include/net/cfg80211.h static inline int cfg80211_testmode_reply(struct sk_buff *skb)
skb              6307 include/net/cfg80211.h 	return cfg80211_vendor_cmd_reply(skb);
skb              6349 include/net/cfg80211.h static inline void cfg80211_testmode_event(struct sk_buff *skb, gfp_t gfp)
skb              6351 include/net/cfg80211.h 	__cfg80211_send_event_skb(skb, gfp);
skb              6805 include/net/cfg80211.h 			      struct sk_buff *skb, bool unencrypted);
skb               320 include/net/cfg802154.h 	int	(*create)(struct sk_buff *skb, struct net_device *dev,
skb               367 include/net/cfg802154.h wpan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
skb               374 include/net/cfg802154.h 	return wpan_dev->header_ops->create(skb, dev, daddr, saddr, len);
skb               143 include/net/checksum.h void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb,
skb               145 include/net/checksum.h void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb,
skb               148 include/net/checksum.h void inet_proto_csum_replace_by_diff(__sum16 *sum, struct sk_buff *skb,
skb               151 include/net/checksum.h static inline void inet_proto_csum_replace2(__sum16 *sum, struct sk_buff *skb,
skb               155 include/net/checksum.h 	inet_proto_csum_replace4(sum, skb, (__force __be32)from,
skb               193 include/net/cipso_ipv4.h void cipso_v4_error(struct sk_buff *skb, int error, u32 gateway);
skb               205 include/net/cipso_ipv4.h int cipso_v4_skbuff_setattr(struct sk_buff *skb,
skb               208 include/net/cipso_ipv4.h int cipso_v4_skbuff_delattr(struct sk_buff *skb);
skb               209 include/net/cipso_ipv4.h int cipso_v4_skbuff_getattr(const struct sk_buff *skb,
skb               211 include/net/cipso_ipv4.h unsigned char *cipso_v4_optptr(const struct sk_buff *skb);
skb               212 include/net/cipso_ipv4.h int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option);
skb               214 include/net/cipso_ipv4.h static inline void cipso_v4_error(struct sk_buff *skb,
skb               256 include/net/cipso_ipv4.h static inline int cipso_v4_skbuff_setattr(struct sk_buff *skb,
skb               263 include/net/cipso_ipv4.h static inline int cipso_v4_skbuff_delattr(struct sk_buff *skb)
skb               268 include/net/cipso_ipv4.h static inline int cipso_v4_skbuff_getattr(const struct sk_buff *skb,
skb               274 include/net/cipso_ipv4.h static inline unsigned char *cipso_v4_optptr(const struct sk_buff *skb)
skb               279 include/net/cipso_ipv4.h static inline int cipso_v4_validate(const struct sk_buff *skb,
skb                48 include/net/cls_cgroup.h static inline u32 task_get_classid(const struct sk_buff *skb)
skb                62 include/net/cls_cgroup.h 		struct sock *sk = skb_to_full_sk(skb);
skb                78 include/net/cls_cgroup.h static inline u32 task_get_classid(const struct sk_buff *skb)
skb               158 include/net/codel.h typedef u32 (*codel_skb_len_t)(const struct sk_buff *skb);
skb               159 include/net/codel.h typedef codel_time_t (*codel_skb_time_t)(const struct sk_buff *skb);
skb               160 include/net/codel.h typedef void (*codel_skb_drop_t)(struct sk_buff *skb, void *ctx);
skb               100 include/net/codel_impl.h static bool codel_should_drop(const struct sk_buff *skb,
skb               113 include/net/codel_impl.h 	if (!skb) {
skb               118 include/net/codel_impl.h 	skb_len = skb_len_func(skb);
skb               119 include/net/codel_impl.h 	vars->ldelay = now - skb_time_func(skb);
skb               152 include/net/codel_impl.h 	struct sk_buff *skb = dequeue_func(vars, ctx);
skb               156 include/net/codel_impl.h 	if (!skb) {
skb               158 include/net/codel_impl.h 		return skb;
skb               161 include/net/codel_impl.h 	drop = codel_should_drop(skb, ctx, vars, params, stats,
skb               182 include/net/codel_impl.h 				if (params->ecn && INET_ECN_set_ce(skb)) {
skb               190 include/net/codel_impl.h 				stats->drop_len += skb_len_func(skb);
skb               191 include/net/codel_impl.h 				drop_func(skb, ctx);
skb               193 include/net/codel_impl.h 				skb = dequeue_func(vars, ctx);
skb               194 include/net/codel_impl.h 				if (!codel_should_drop(skb, ctx,
skb               213 include/net/codel_impl.h 		if (params->ecn && INET_ECN_set_ce(skb)) {
skb               216 include/net/codel_impl.h 			stats->drop_len += skb_len_func(skb);
skb               217 include/net/codel_impl.h 			drop_func(skb, ctx);
skb               220 include/net/codel_impl.h 			skb = dequeue_func(vars, ctx);
skb               221 include/net/codel_impl.h 			drop = codel_should_drop(skb, ctx, vars, params,
skb               249 include/net/codel_impl.h 	if (skb && codel_time_after(vars->ldelay, params->ce_threshold) &&
skb               250 include/net/codel_impl.h 	    INET_ECN_set_ce(skb))
skb               252 include/net/codel_impl.h 	return skb;
skb                58 include/net/codel_qdisc.h static struct codel_skb_cb *get_codel_cb(const struct sk_buff *skb)
skb                60 include/net/codel_qdisc.h 	qdisc_cb_private_validate(skb, sizeof(struct codel_skb_cb));
skb                61 include/net/codel_qdisc.h 	return (struct codel_skb_cb *)qdisc_skb_cb(skb)->data;
skb                64 include/net/codel_qdisc.h static codel_time_t codel_get_enqueue_time(const struct sk_buff *skb)
skb                66 include/net/codel_qdisc.h 	return get_codel_cb(skb)->enqueue_time;
skb                69 include/net/codel_qdisc.h static void codel_set_enqueue_time(struct sk_buff *skb)
skb                71 include/net/codel_qdisc.h 	get_codel_cb(skb)->enqueue_time = codel_get_time();
skb               215 include/net/devlink.h 	struct sk_buff *skb;
skb               259 include/net/devlink.h 	int (*actions_dump)(void *priv, struct sk_buff *skb);
skb               260 include/net/devlink.h 	int (*matches_dump)(void *priv, struct sk_buff *skb);
skb               824 include/net/devlink.h int devlink_dpipe_action_put(struct sk_buff *skb,
skb               826 include/net/devlink.h int devlink_dpipe_match_put(struct sk_buff *skb,
skb               969 include/net/devlink.h 			 struct sk_buff *skb, void *trap_ctx,
skb               151 include/net/dn.h #define DN_SKB_CB(skb) ((struct dn_skb_cb *)(skb)->cb)
skb               206 include/net/dn.h struct sock *dn_find_by_skb(struct sk_buff *skb);
skb               160 include/net/dn_dev.h void dn_dev_init_pkt(struct sk_buff *skb);
skb               161 include/net/dn_dev.h void dn_dev_veri_pkt(struct sk_buff *skb);
skb               162 include/net/dn_dev.h void dn_dev_hello(struct sk_buff *skb);
skb                91 include/net/dn_fib.h 	int (*dump)(struct dn_fib_table *t, struct sk_buff *skb, struct netlink_callback *cb);
skb               129 include/net/dn_fib.h int dn_fib_dump(struct sk_buff *skb, struct netlink_callback *cb);
skb                22 include/net/dn_neigh.h int dn_neigh_router_hello(struct net *net, struct sock *sk, struct sk_buff *skb);
skb                23 include/net/dn_neigh.h int dn_neigh_endnode_hello(struct net *net, struct sock *sk, struct sk_buff *skb);
skb                24 include/net/dn_neigh.h void dn_neigh_pointopoint_hello(struct sk_buff *skb);
skb                26 include/net/dn_neigh.h int dn_to_neigh_output(struct net *net, struct sock *sk, struct sk_buff *skb);
skb                16 include/net/dn_nsp.h void dn_nsp_return_disc(struct sk_buff *skb, unsigned char type,
skb                22 include/net/dn_nsp.h int dn_nsp_check_xmit_queue(struct sock *sk, struct sk_buff *skb,
skb                24 include/net/dn_nsp.h void dn_nsp_queue_xmit(struct sock *sk, struct sk_buff *skb, gfp_t gfp,
skb                30 include/net/dn_nsp.h int dn_nsp_backlog_rcv(struct sock *sk, struct sk_buff *skb);
skb                13 include/net/dn_route.h int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb);
skb                15 include/net/dn_route.h int dn_route_rcv(struct sk_buff *skb, struct net_device *dev,
skb                97 include/net/dn_route.h static inline void dn_rt_send(struct sk_buff *skb)
skb                99 include/net/dn_route.h 	dev_queue_xmit(skb);
skb               102 include/net/dn_route.h static inline void dn_rt_finish_output(struct sk_buff *skb, char *dst, char *src)
skb               104 include/net/dn_route.h 	struct net_device *dev = skb->dev;
skb               109 include/net/dn_route.h 	if (dev_hard_header(skb, dev, ETH_P_DNA_RT, dst, src, skb->len) >= 0)
skb               110 include/net/dn_route.h 		dn_rt_send(skb);
skb               112 include/net/dn_route.h 		kfree_skb(skb);
skb                23 include/net/drop_monitor.h void net_dm_hw_report(struct sk_buff *skb,
skb                27 include/net/drop_monitor.h net_dm_hw_report(struct sk_buff *skb,
skb                68 include/net/dsa.h 	struct sk_buff *(*xmit)(struct sk_buff *skb, struct net_device *dev);
skb                69 include/net/dsa.h 	struct sk_buff *(*rcv)(struct sk_buff *skb, struct net_device *dev,
skb                71 include/net/dsa.h 	int (*flow_dissect)(const struct sk_buff *skb, __be16 *proto,
skb                77 include/net/dsa.h 	bool (*filter)(const struct sk_buff *skb, struct net_device *dev);
skb                97 include/net/dsa.h #define __DSA_SKB_CB(skb) ((struct __dsa_skb_cb *)((skb)->cb))
skb                99 include/net/dsa.h #define DSA_SKB_CB(skb) ((struct dsa_skb_cb *)((skb)->cb))
skb               101 include/net/dsa.h #define DSA_SKB_CB_PRIV(skb)			\
skb               102 include/net/dsa.h 	((void *)(skb)->cb + offsetof(struct __dsa_skb_cb, priv))
skb               172 include/net/dsa.h 	struct sk_buff *(*rcv)(struct sk_buff *skb, struct net_device *dev,
skb               174 include/net/dsa.h 	bool (*filter)(const struct sk_buff *skb, struct net_device *dev);
skb               539 include/net/dsa.h 				 struct sk_buff *skb, unsigned int type);
skb               545 include/net/dsa.h 					  struct sk_buff *skb);
skb               564 include/net/dsa.h static inline bool dsa_can_decode(const struct sk_buff *skb,
skb               568 include/net/dsa.h 	return !dev->dsa_ptr->filter || dev->dsa_ptr->filter(skb, dev);
skb               641 include/net/dsa.h netdev_tx_t dsa_enqueue_skb(struct sk_buff *skb, struct net_device *dev);
skb                35 include/net/dst.h 	int			(*output)(struct net *net, struct sock *sk, struct sk_buff *skb);
skb               268 include/net/dst.h static inline void skb_dst_drop(struct sk_buff *skb)
skb               270 include/net/dst.h 	if (skb->_skb_refdst) {
skb               271 include/net/dst.h 		refdst_drop(skb->_skb_refdst);
skb               272 include/net/dst.h 		skb->_skb_refdst = 0UL;
skb               307 include/net/dst.h static inline bool skb_dst_force(struct sk_buff *skb)
skb               309 include/net/dst.h 	if (skb_dst_is_noref(skb)) {
skb               310 include/net/dst.h 		struct dst_entry *dst = skb_dst(skb);
skb               316 include/net/dst.h 		skb->_skb_refdst = (unsigned long)dst;
skb               319 include/net/dst.h 	return skb->_skb_refdst != 0UL;
skb               332 include/net/dst.h static inline void __skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev,
skb               335 include/net/dst.h 	skb->dev = dev;
skb               342 include/net/dst.h 	skb_clear_hash_if_not_l4(skb);
skb               343 include/net/dst.h 	skb_set_queue_mapping(skb, 0);
skb               344 include/net/dst.h 	skb_scrub_packet(skb, !net_eq(net, dev_net(dev)));
skb               357 include/net/dst.h static inline void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev,
skb               362 include/net/dst.h 	dev->stats.rx_bytes += skb->len;
skb               363 include/net/dst.h 	__skb_tunnel_rx(skb, dev, net);
skb               366 include/net/dst.h static inline u32 dst_tclassid(const struct sk_buff *skb)
skb               371 include/net/dst.h 	dst = skb_dst(skb);
skb               378 include/net/dst.h int dst_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb);
skb               379 include/net/dst.h static inline int dst_discard(struct sk_buff *skb)
skb               381 include/net/dst.h 	return dst_discard_out(&init_net, skb->sk, skb);
skb               402 include/net/dst.h 						     struct sk_buff *skb)
skb               404 include/net/dst.h 	struct neighbour *n =  dst->ops->neigh_lookup(dst, skb, NULL);
skb               415 include/net/dst.h static inline void dst_link_failure(struct sk_buff *skb)
skb               417 include/net/dst.h 	struct dst_entry *dst = skb_dst(skb);
skb               419 include/net/dst.h 		dst->ops->link_failure(skb);
skb               434 include/net/dst.h static inline int dst_output(struct net *net, struct sock *sk, struct sk_buff *skb)
skb               436 include/net/dst.h 	return skb_dst(skb)->output(net, sk, skb);
skb               440 include/net/dst.h static inline int dst_input(struct sk_buff *skb)
skb               442 include/net/dst.h 	return skb_dst(skb)->input(skb);
skb               514 include/net/dst.h static inline void skb_dst_update_pmtu(struct sk_buff *skb, u32 mtu)
skb               516 include/net/dst.h 	struct dst_entry *dst = skb_dst(skb);
skb               519 include/net/dst.h 		dst->ops->update_pmtu(dst, NULL, skb, mtu, true);
skb               523 include/net/dst.h static inline void skb_dst_update_pmtu_no_confirm(struct sk_buff *skb, u32 mtu)
skb               525 include/net/dst.h 	struct dst_entry *dst = skb_dst(skb);
skb               528 include/net/dst.h 		dst->ops->update_pmtu(dst, NULL, skb, mtu, false);
skb               531 include/net/dst.h static inline void skb_tunnel_check_pmtu(struct sk_buff *skb,
skb               537 include/net/dst.h 	if (skb->len > encap_mtu - headroom)
skb               538 include/net/dst.h 		skb_dst_update_pmtu_no_confirm(skb, encap_mtu - headroom);
skb                28 include/net/dst_metadata.h static inline struct metadata_dst *skb_metadata_dst(const struct sk_buff *skb)
skb                30 include/net/dst_metadata.h 	struct metadata_dst *md_dst = (struct metadata_dst *) skb_dst(skb);
skb                39 include/net/dst_metadata.h skb_tunnel_info(const struct sk_buff *skb)
skb                41 include/net/dst_metadata.h 	struct metadata_dst *md_dst = skb_metadata_dst(skb);
skb                47 include/net/dst_metadata.h 	dst = skb_dst(skb);
skb                54 include/net/dst_metadata.h static inline bool skb_valid_dst(const struct sk_buff *skb)
skb                56 include/net/dst_metadata.h 	struct dst_entry *dst = skb_dst(skb);
skb               108 include/net/dst_metadata.h static inline struct metadata_dst *tun_dst_unclone(struct sk_buff *skb)
skb               110 include/net/dst_metadata.h 	struct metadata_dst *md_dst = skb_metadata_dst(skb);
skb               124 include/net/dst_metadata.h 	skb_dst_drop(skb);
skb               126 include/net/dst_metadata.h 	skb_dst_set(skb, &new_md->dst);
skb               130 include/net/dst_metadata.h static inline struct ip_tunnel_info *skb_tunnel_info_unclone(struct sk_buff *skb)
skb               134 include/net/dst_metadata.h 	dst = tun_dst_unclone(skb);
skb               161 include/net/dst_metadata.h static inline struct metadata_dst *ip_tun_rx_dst(struct sk_buff *skb,
skb               166 include/net/dst_metadata.h 	const struct iphdr *iph = ip_hdr(skb);
skb               205 include/net/dst_metadata.h static inline struct metadata_dst *ipv6_tun_rx_dst(struct sk_buff *skb,
skb               210 include/net/dst_metadata.h 	const struct ipv6hdr *ip6h = ipv6_hdr(skb);
skb                30 include/net/dst_ops.h 					       struct sk_buff *skb, u32 mtu,
skb                33 include/net/dst_ops.h 					    struct sk_buff *skb);
skb                34 include/net/dst_ops.h 	int			(*local_out)(struct net *net, struct sock *sk, struct sk_buff *skb);
skb                36 include/net/dst_ops.h 						struct sk_buff *skb,
skb               155 include/net/erspan.h static inline void erspan_build_header(struct sk_buff *skb,
skb               159 include/net/erspan.h 	struct ethhdr *eth = (struct ethhdr *)skb->data;
skb               170 include/net/erspan.h 	tos = is_ipv4 ? ip_hdr(skb)->tos :
skb               171 include/net/erspan.h 			(ipv6_hdr(skb)->priority << 4) +
skb               172 include/net/erspan.h 			(ipv6_hdr(skb)->flow_lbl[0] >> 4);
skb               180 include/net/erspan.h 		qp = (struct qtag_prefix *)(skb->data + 2 * ETH_ALEN);
skb               185 include/net/erspan.h 	skb_push(skb, sizeof(*ershdr) + ERSPAN_V1_MDSIZE);
skb               186 include/net/erspan.h 	ershdr = (struct erspan_base_hdr *)skb->data;
skb               235 include/net/erspan.h static inline u8 erspan_detect_bso(struct sk_buff *skb)
skb               240 include/net/erspan.h 	if (skb->len < ETH_ZLEN)
skb               243 include/net/erspan.h 	if (skb->len > ETH_FRAME_LEN)
skb               249 include/net/erspan.h static inline void erspan_build_header_v2(struct sk_buff *skb,
skb               253 include/net/erspan.h 	struct ethhdr *eth = (struct ethhdr *)skb->data;
skb               266 include/net/erspan.h 	tos = is_ipv4 ? ip_hdr(skb)->tos :
skb               267 include/net/erspan.h 			(ipv6_hdr(skb)->priority << 4) +
skb               268 include/net/erspan.h 			(ipv6_hdr(skb)->flow_lbl[0] >> 4);
skb               274 include/net/erspan.h 		qp = (struct qtag_prefix *)(skb->data + 2 * ETH_ALEN);
skb               278 include/net/erspan.h 	bso = erspan_detect_bso(skb);
skb               279 include/net/erspan.h 	skb_push(skb, sizeof(*ershdr) + ERSPAN_V2_MDSIZE);
skb               280 include/net/erspan.h 	ershdr = (struct erspan_base_hdr *)skb->data;
skb                 9 include/net/esp.h static inline struct ip_esp_hdr *ip_esp_hdr(const struct sk_buff *skb)
skb                11 include/net/esp.h 	return (struct ip_esp_hdr *)skb_transport_header(skb);
skb                27 include/net/esp.h int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp);
skb                28 include/net/esp.h int esp_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp);
skb                29 include/net/esp.h int esp_input_done2(struct sk_buff *skb, int err);
skb                30 include/net/esp.h int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp);
skb                31 include/net/esp.h int esp6_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp);
skb                32 include/net/esp.h int esp6_input_done2(struct sk_buff *skb, int err);
skb               201 include/net/fib_rules.h int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh,
skb               203 include/net/fib_rules.h int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr *nlh,
skb               337 include/net/flow_dissector.h 	const struct sk_buff	*skb;
skb                15 include/net/fou.h int __fou_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
skb                17 include/net/fou.h int __gue_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
skb                16 include/net/fq_impl.h 			      struct sk_buff *skb)
skb                20 include/net/fq_impl.h 	tin->backlog_bytes -= skb->len;
skb                22 include/net/fq_impl.h 	flow->backlog -= skb->len;
skb                24 include/net/fq_impl.h 	fq->memory_usage -= skb->truesize;
skb                48 include/net/fq_impl.h 	struct sk_buff *skb;
skb                52 include/net/fq_impl.h 	skb = __skb_dequeue(&flow->queue);
skb                53 include/net/fq_impl.h 	if (!skb)
skb                56 include/net/fq_impl.h 	fq_adjust_removal(fq, flow, skb);
skb                59 include/net/fq_impl.h 	return skb;
skb                68 include/net/fq_impl.h 	struct sk_buff *skb;
skb                89 include/net/fq_impl.h 	skb = dequeue_func(fq, tin, flow);
skb                90 include/net/fq_impl.h 	if (!skb) {
skb               102 include/net/fq_impl.h 	flow->deficit -= skb->len;
skb               103 include/net/fq_impl.h 	tin->tx_bytes += skb->len;
skb               106 include/net/fq_impl.h 	return skb;
skb               109 include/net/fq_impl.h static u32 fq_flow_idx(struct fq *fq, struct sk_buff *skb)
skb               111 include/net/fq_impl.h 	u32 hash = skb_get_hash_perturb(skb, &fq->perturbation);
skb               118 include/net/fq_impl.h 					struct sk_buff *skb,
skb               127 include/net/fq_impl.h 		flow = get_default_func(fq, tin, idx, skb);
skb               158 include/net/fq_impl.h 			   struct sk_buff *skb,
skb               167 include/net/fq_impl.h 	flow = fq_flow_classify(fq, tin, idx, skb, get_default_func);
skb               170 include/net/fq_impl.h 	flow->backlog += skb->len;
skb               171 include/net/fq_impl.h 	tin->backlog_bytes += skb->len;
skb               173 include/net/fq_impl.h 	fq->memory_usage += skb->truesize;
skb               184 include/net/fq_impl.h 	__skb_queue_tail(&flow->queue, skb);
skb               193 include/net/fq_impl.h 		skb = fq_flow_dequeue(fq, flow);
skb               194 include/net/fq_impl.h 		if (!skb)
skb               197 include/net/fq_impl.h 		free_func(fq, flow->tin, flow, skb);
skb               215 include/net/fq_impl.h 	struct sk_buff *skb, *tmp;
skb               219 include/net/fq_impl.h 	skb_queue_walk_safe(&flow->queue, skb, tmp) {
skb               220 include/net/fq_impl.h 		if (!filter_func(fq, tin, flow, skb, filter_data))
skb               223 include/net/fq_impl.h 		__skb_unlink(skb, &flow->queue);
skb               224 include/net/fq_impl.h 		fq_adjust_removal(fq, flow, skb);
skb               225 include/net/fq_impl.h 		free_func(fq, tin, flow, skb);
skb               251 include/net/fq_impl.h 	struct sk_buff *skb;
skb               253 include/net/fq_impl.h 	while ((skb = fq_flow_dequeue(fq, flow)))
skb               254 include/net/fq_impl.h 		free_func(fq, flow->tin, flow, skb);
skb                37 include/net/garp.h static inline struct garp_skb_cb *garp_cb(struct sk_buff *skb)
skb                41 include/net/garp.h 	return (struct garp_skb_cb *)skb->cb;
skb                19 include/net/gen_stats.h 	struct sk_buff *  skb;
skb                31 include/net/gen_stats.h int gnet_stats_start_copy(struct sk_buff *skb, int type, spinlock_t *lock,
skb                34 include/net/gen_stats.h int gnet_stats_start_copy_compat(struct sk_buff *skb, int type,
skb                62 include/net/genetlink.h 					    struct sk_buff *skb,
skb                65 include/net/genetlink.h 					     struct sk_buff *skb,
skb               141 include/net/genetlink.h 	int		       (*doit)(struct sk_buff *skb,
skb               144 include/net/genetlink.h 	int		       (*dumpit)(struct sk_buff *skb,
skb               155 include/net/genetlink.h void genl_notify(const struct genl_family *family, struct sk_buff *skb,
skb               158 include/net/genetlink.h void *genlmsg_put(struct sk_buff *skb, u32 portid, u32 seq,
skb               236 include/net/genetlink.h static inline void *genlmsg_put_reply(struct sk_buff *skb,
skb               241 include/net/genetlink.h 	return genlmsg_put(skb, info->snd_portid, info->snd_seq, family,
skb               250 include/net/genetlink.h static inline void genlmsg_end(struct sk_buff *skb, void *hdr)
skb               252 include/net/genetlink.h 	nlmsg_end(skb, hdr - GENL_HDRLEN - NLMSG_HDRLEN);
skb               260 include/net/genetlink.h static inline void genlmsg_cancel(struct sk_buff *skb, void *hdr)
skb               263 include/net/genetlink.h 		nlmsg_cancel(skb, hdr - GENL_HDRLEN - NLMSG_HDRLEN);
skb               276 include/net/genetlink.h 					  struct net *net, struct sk_buff *skb,
skb               282 include/net/genetlink.h 	return nlmsg_multicast(net->genl_sock, skb, portid, group, flags);
skb               294 include/net/genetlink.h 				    struct sk_buff *skb, u32 portid,
skb               297 include/net/genetlink.h 	return genlmsg_multicast_netns(family, &init_net, skb,
skb               312 include/net/genetlink.h 			    struct sk_buff *skb, u32 portid,
skb               320 include/net/genetlink.h static inline int genlmsg_unicast(struct net *net, struct sk_buff *skb, u32 portid)
skb               322 include/net/genetlink.h 	return nlmsg_unicast(net->genl_sock, skb, portid);
skb               330 include/net/genetlink.h static inline int genlmsg_reply(struct sk_buff *skb, struct genl_info *info)
skb               332 include/net/genetlink.h 	return genlmsg_unicast(genl_info_net(info), skb, info->snd_portid);
skb                28 include/net/gre.h 	int  (*handler)(struct sk_buff *skb);
skb                29 include/net/gre.h 	void (*err_handler)(struct sk_buff *skb, u32 info);
skb                37 include/net/gre.h int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
skb               109 include/net/gre.h static inline __sum16 gre_checksum(struct sk_buff *skb)
skb               113 include/net/gre.h 	if (skb->ip_summed == CHECKSUM_PARTIAL)
skb               114 include/net/gre.h 		csum = lco_csum(skb);
skb               116 include/net/gre.h 		csum = skb_checksum(skb, 0, skb->len, 0);
skb               120 include/net/gre.h static inline void gre_build_header(struct sk_buff *skb, int hdr_len,
skb               126 include/net/gre.h 	skb_push(skb, hdr_len);
skb               128 include/net/gre.h 	skb_set_inner_protocol(skb, proto);
skb               129 include/net/gre.h 	skb_reset_transport_header(skb);
skb               130 include/net/gre.h 	greh = (struct gre_base_hdr *)skb->data;
skb               146 include/net/gre.h 		    !(skb_shinfo(skb)->gso_type &
skb               149 include/net/gre.h 			*(__sum16 *)ptr = gre_checksum(skb);
skb                15 include/net/gro_cells.h int gro_cells_receive(struct gro_cells *gcells, struct sk_buff *skb);
skb                46 include/net/icmp.h int icmp_rcv(struct sk_buff *skb);
skb                47 include/net/icmp.h int icmp_err(struct sk_buff *skb, u32 info);
skb                85 include/net/ieee802154_netdev.h int ieee802154_hdr_push(struct sk_buff *skb, struct ieee802154_hdr *hdr);
skb                90 include/net/ieee802154_netdev.h int ieee802154_hdr_pull(struct sk_buff *skb, struct ieee802154_hdr *hdr);
skb                96 include/net/ieee802154_netdev.h int ieee802154_hdr_peek_addrs(const struct sk_buff *skb,
skb               103 include/net/ieee802154_netdev.h int ieee802154_hdr_peek(const struct sk_buff *skb, struct ieee802154_hdr *hdr);
skb               127 include/net/ieee802154_netdev.h static inline int ieee802154_hdr_length(struct sk_buff *skb)
skb               130 include/net/ieee802154_netdev.h 	int len = ieee802154_hdr_pull(skb, &hdr);
skb               133 include/net/ieee802154_netdev.h 		skb_push(skb, len);
skb               216 include/net/ieee802154_netdev.h static inline struct ieee802154_mac_cb *mac_cb(struct sk_buff *skb)
skb               218 include/net/ieee802154_netdev.h 	return (struct ieee802154_mac_cb *)skb->cb;
skb               221 include/net/ieee802154_netdev.h static inline struct ieee802154_mac_cb *mac_cb_init(struct sk_buff *skb)
skb               223 include/net/ieee802154_netdev.h 	BUILD_BUG_ON(sizeof(struct ieee802154_mac_cb) > sizeof(skb->cb));
skb               225 include/net/ieee802154_netdev.h 	memset(skb->cb, 0, sizeof(struct ieee802154_mac_cb));
skb               226 include/net/ieee802154_netdev.h 	return mac_cb(skb);
skb                11 include/net/ife.h void *ife_encode(struct sk_buff *skb, u16 metalen);
skb                12 include/net/ife.h void *ife_decode(struct sk_buff *skb, u16 *metalen);
skb                23 include/net/ife.h static inline void *ife_encode(struct sk_buff *skb, u16 metalen)
skb                28 include/net/ife.h static inline void *ife_decode(struct sk_buff *skb, u16 *metalen)
skb                11 include/net/ila.h int ila_xlat_outgoing(struct sk_buff *skb);
skb                12 include/net/ila.h int ila_xlat_incoming(struct sk_buff *skb);
skb                24 include/net/inet6_connection_sock.h int inet6_csk_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl);
skb                53 include/net/inet6_hashtables.h 				   struct sk_buff *skb, int doff,
skb                62 include/net/inet6_hashtables.h 					  struct sk_buff *skb, int doff,
skb                77 include/net/inet6_hashtables.h 	return inet6_lookup_listener(net, hashinfo, skb, doff, saddr, sport,
skb                82 include/net/inet6_hashtables.h 					      struct sk_buff *skb, int doff,
skb                88 include/net/inet6_hashtables.h 	struct sock *sk = skb_steal_sock(skb);
skb                94 include/net/inet6_hashtables.h 	return __inet6_lookup(dev_net(skb_dst(skb)->dev), hashinfo, skb,
skb                95 include/net/inet6_hashtables.h 			      doff, &ipv6_hdr(skb)->saddr, sport,
skb                96 include/net/inet6_hashtables.h 			      &ipv6_hdr(skb)->daddr, ntohs(dport),
skb               101 include/net/inet6_hashtables.h 			  struct sk_buff *skb, int doff,
skb                49 include/net/inet_common.h struct sk_buff *inet_gro_receive(struct list_head *head, struct sk_buff *skb);
skb                50 include/net/inet_common.h int inet_gro_complete(struct sk_buff *skb, int nhoff);
skb                51 include/net/inet_common.h struct sk_buff *inet_gso_segment(struct sk_buff *skb,
skb                60 include/net/inet_common.h #define indirect_call_gro_receive(f2, f1, cb, head, skb)	\
skb                62 include/net/inet_common.h 	unlikely(gro_recursion_inc_test(skb)) ?			\
skb                63 include/net/inet_common.h 		NAPI_GRO_CB(skb)->flush |= 1, NULL :		\
skb                64 include/net/inet_common.h 		INDIRECT_CALL_2(cb, f2, f1, head, skb);		\
skb                34 include/net/inet_connection_sock.h 	int	    (*queue_xmit)(struct sock *sk, struct sk_buff *skb, struct flowi *fl);
skb                35 include/net/inet_connection_sock.h 	void	    (*send_check)(struct sock *sk, struct sk_buff *skb);
skb                37 include/net/inet_connection_sock.h 	void	    (*sk_rx_dst_set)(struct sock *sk, const struct sk_buff *skb);
skb                38 include/net/inet_connection_sock.h 	int	    (*conn_request)(struct sock *sk, struct sk_buff *skb);
skb                39 include/net/inet_connection_sock.h 	struct sock *(*syn_recv_sock)(const struct sock *sk, struct sk_buff *skb,
skb               135 include/net/inet_ecn.h static inline int IP6_ECN_set_ce(struct sk_buff *skb, struct ipv6hdr *iph)
skb               145 include/net/inet_ecn.h 	if (skb->ip_summed == CHECKSUM_COMPLETE)
skb               146 include/net/inet_ecn.h 		skb->csum = csum_add(csum_sub(skb->csum, (__force __wsum)from),
skb               151 include/net/inet_ecn.h static inline int IP6_ECN_set_ect1(struct sk_buff *skb, struct ipv6hdr *iph)
skb               161 include/net/inet_ecn.h 	if (skb->ip_summed == CHECKSUM_COMPLETE)
skb               162 include/net/inet_ecn.h 		skb->csum = csum_add(csum_sub(skb->csum, (__force __wsum)from),
skb               173 include/net/inet_ecn.h static inline int INET_ECN_set_ce(struct sk_buff *skb)
skb               175 include/net/inet_ecn.h 	switch (skb->protocol) {
skb               177 include/net/inet_ecn.h 		if (skb_network_header(skb) + sizeof(struct iphdr) <=
skb               178 include/net/inet_ecn.h 		    skb_tail_pointer(skb))
skb               179 include/net/inet_ecn.h 			return IP_ECN_set_ce(ip_hdr(skb));
skb               183 include/net/inet_ecn.h 		if (skb_network_header(skb) + sizeof(struct ipv6hdr) <=
skb               184 include/net/inet_ecn.h 		    skb_tail_pointer(skb))
skb               185 include/net/inet_ecn.h 			return IP6_ECN_set_ce(skb, ipv6_hdr(skb));
skb               192 include/net/inet_ecn.h static inline int INET_ECN_set_ect1(struct sk_buff *skb)
skb               194 include/net/inet_ecn.h 	switch (skb->protocol) {
skb               196 include/net/inet_ecn.h 		if (skb_network_header(skb) + sizeof(struct iphdr) <=
skb               197 include/net/inet_ecn.h 		    skb_tail_pointer(skb))
skb               198 include/net/inet_ecn.h 			return IP_ECN_set_ect1(ip_hdr(skb));
skb               202 include/net/inet_ecn.h 		if (skb_network_header(skb) + sizeof(struct ipv6hdr) <=
skb               203 include/net/inet_ecn.h 		    skb_tail_pointer(skb))
skb               204 include/net/inet_ecn.h 			return IP6_ECN_set_ect1(skb, ipv6_hdr(skb));
skb               253 include/net/inet_ecn.h static inline int INET_ECN_decapsulate(struct sk_buff *skb,
skb               262 include/net/inet_ecn.h 			INET_ECN_set_ce(skb);
skb               264 include/net/inet_ecn.h 			INET_ECN_set_ect1(skb);
skb               271 include/net/inet_ecn.h 				     struct sk_buff *skb)
skb               275 include/net/inet_ecn.h 	if (skb->protocol == htons(ETH_P_IP))
skb               276 include/net/inet_ecn.h 		inner = ip_hdr(skb)->tos;
skb               277 include/net/inet_ecn.h 	else if (skb->protocol == htons(ETH_P_IPV6))
skb               278 include/net/inet_ecn.h 		inner = ipv6_get_dsfield(ipv6_hdr(skb));
skb               282 include/net/inet_ecn.h 	return INET_ECN_decapsulate(skb, oiph->tos, inner);
skb               286 include/net/inet_ecn.h 				      struct sk_buff *skb)
skb               290 include/net/inet_ecn.h 	if (skb->protocol == htons(ETH_P_IP))
skb               291 include/net/inet_ecn.h 		inner = ip_hdr(skb)->tos;
skb               292 include/net/inet_ecn.h 	else if (skb->protocol == htons(ETH_P_IPV6))
skb               293 include/net/inet_ecn.h 		inner = ipv6_get_dsfield(ipv6_hdr(skb));
skb               297 include/net/inet_ecn.h 	return INET_ECN_decapsulate(skb, ipv6_get_dsfield(oipv6h), inner);
skb               169 include/net/inet_frag.h int inet_frag_queue_insert(struct inet_frag_queue *q, struct sk_buff *skb,
skb               171 include/net/inet_frag.h void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb,
skb               258 include/net/inet_hashtables.h 				    struct sk_buff *skb, int doff,
skb               266 include/net/inet_hashtables.h 		struct sk_buff *skb, int doff,
skb               270 include/net/inet_hashtables.h 	return __inet_lookup_listener(net, hashinfo, skb, doff, saddr, sport,
skb               343 include/net/inet_hashtables.h 					 struct sk_buff *skb, int doff,
skb               358 include/net/inet_hashtables.h 	return __inet_lookup_listener(net, hashinfo, skb, doff, saddr,
skb               364 include/net/inet_hashtables.h 				       struct sk_buff *skb, int doff,
skb               372 include/net/inet_hashtables.h 	sk = __inet_lookup(net, hashinfo, skb, doff, saddr, sport, daddr,
skb               381 include/net/inet_hashtables.h 					     struct sk_buff *skb,
skb               388 include/net/inet_hashtables.h 	struct sock *sk = skb_steal_sock(skb);
skb               389 include/net/inet_hashtables.h 	const struct iphdr *iph = ip_hdr(skb);
skb               395 include/net/inet_hashtables.h 	return __inet_lookup(dev_net(skb_dst(skb)->dev), hashinfo, skb,
skb               397 include/net/inet_hashtables.h 			     iph->daddr, dport, inet_iif(skb), sdif,
skb               108 include/net/inet_sock.h static inline u32 inet_request_mark(const struct sock *sk, struct sk_buff *skb)
skb               111 include/net/inet_sock.h 		return skb->mark;
skb               117 include/net/inet_sock.h 					    struct sk_buff *skb)
skb               123 include/net/inet_sock.h 		return l3mdev_master_ifindex_by_index(net, skb->skb_iif);
skb               281 include/net/inet_sock.h static inline struct sock *skb_to_full_sk(const struct sk_buff *skb)
skb               283 include/net/inet_sock.h 	return sk_to_full_sk(skb->sk);
skb                65 include/net/ip.h static inline unsigned int ip_hdrlen(const struct sk_buff *skb)
skb                67 include/net/ip.h 	return ip_hdr(skb)->ihl * 4;
skb                97 include/net/ip.h #define IPCB(skb) ((struct inet_skb_parm*)((skb)->cb))
skb                98 include/net/ip.h #define PKTINFO_SKB_CB(skb) ((struct in_pktinfo *)((skb)->cb))
skb               101 include/net/ip.h static inline int inet_sdif(struct sk_buff *skb)
skb               104 include/net/ip.h 	if (skb && ipv4_l3mdev_skb(IPCB(skb)->flags))
skb               105 include/net/ip.h 		return IPCB(skb)->iif;
skb               151 include/net/ip.h int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk,
skb               154 include/net/ip.h int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
skb               158 include/net/ip.h int ip_local_deliver(struct sk_buff *skb);
skb               159 include/net/ip.h void ip_protocol_deliver_rcu(struct net *net, struct sk_buff *skb, int proto);
skb               160 include/net/ip.h int ip_mr_input(struct sk_buff *skb);
skb               161 include/net/ip.h int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb);
skb               162 include/net/ip.h int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb);
skb               163 include/net/ip.h int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
skb               173 include/net/ip.h void ip_fraglist_init(struct sk_buff *skb, struct iphdr *iph,
skb               175 include/net/ip.h void ip_fraglist_prepare(struct sk_buff *skb, struct ip_fraglist_iter *iter);
skb               179 include/net/ip.h 	struct sk_buff *skb = iter->frag;
skb               181 include/net/ip.h 	iter->frag = skb->next;
skb               182 include/net/ip.h 	skb_mark_not_on_list(skb);
skb               184 include/net/ip.h 	return skb;
skb               198 include/net/ip.h void ip_frag_init(struct sk_buff *skb, unsigned int hlen, unsigned int ll_rs,
skb               200 include/net/ip.h struct sk_buff *ip_frag_next(struct sk_buff *skb,
skb               204 include/net/ip.h int __ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
skb               205 include/net/ip.h int ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
skb               207 include/net/ip.h int __ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
skb               212 include/net/ip.h 			       int odd, struct sk_buff *skb),
skb               218 include/net/ip.h 		       struct sk_buff *skb);
skb               224 include/net/ip.h int ip_send_skb(struct net *net, struct sk_buff *skb);
skb               229 include/net/ip.h 					int len, int odd, struct sk_buff *skb),
skb               234 include/net/ip.h static inline int ip_queue_xmit(struct sock *sk, struct sk_buff *skb,
skb               237 include/net/ip.h 	return __ip_queue_xmit(sk, skb, fl, inet_sk(sk)->tos);
skb               279 include/net/ip.h void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
skb               452 include/net/ip.h 					  const struct sk_buff *skb)
skb               455 include/net/ip.h 		bool forwarding = IPCB(skb)->flags & IPSKB_FORWARDED;
skb               457 include/net/ip.h 		return ip_dst_mtu_maybe_forward(skb_dst(skb), forwarding);
skb               460 include/net/ip.h 	return min(READ_ONCE(skb_dst(skb)->dev->mtu), IP_MAX_MTU);
skb               497 include/net/ip.h static inline void ip_select_ident_segs(struct net *net, struct sk_buff *skb,
skb               500 include/net/ip.h 	struct iphdr *iph = ip_hdr(skb);
skb               502 include/net/ip.h 	if ((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) {
skb               519 include/net/ip.h static inline void ip_select_ident(struct net *net, struct sk_buff *skb,
skb               522 include/net/ip.h 	ip_select_ident_segs(net, skb, sk, 1);
skb               525 include/net/ip.h static inline __wsum inet_compute_pseudo(struct sk_buff *skb, int proto)
skb               527 include/net/ip.h 	return csum_tcpudp_nofold(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
skb               528 include/net/ip.h 				  skb->len, proto, 0);
skb               545 include/net/ip.h static inline __wsum inet_gro_compute_pseudo(struct sk_buff *skb, int proto)
skb               547 include/net/ip.h 	const struct iphdr *iph = skb_gro_network_header(skb);
skb               550 include/net/ip.h 				  skb_gro_len(skb), proto, 0);
skb               645 include/net/ip.h bool ip_call_ra_chain(struct sk_buff *skb);
skb               677 include/net/ip.h int ip_defrag(struct net *net, struct sk_buff *skb, u32 user);
skb               679 include/net/ip.h struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user);
skb               681 include/net/ip.h static inline struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user)
skb               683 include/net/ip.h 	return skb;
skb               691 include/net/ip.h int ip_forward(struct sk_buff *skb);
skb               697 include/net/ip.h void ip_options_build(struct sk_buff *skb, struct ip_options *opt,
skb               701 include/net/ip.h 		      struct sk_buff *skb, const struct ip_options *sopt);
skb               703 include/net/ip.h 				  struct sk_buff *skb)
skb               705 include/net/ip.h 	return __ip_options_echo(net, dopt, skb, &IPCB(skb)->opt);
skb               708 include/net/ip.h void ip_options_fragment(struct sk_buff *skb);
skb               710 include/net/ip.h 			 struct sk_buff *skb, __be32 *info);
skb               712 include/net/ip.h 		       struct sk_buff *skb);
skb               718 include/net/ip.h void ip_forward_options(struct sk_buff *skb);
skb               719 include/net/ip.h int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev);
skb               725 include/net/ip.h void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb);
skb               727 include/net/ip.h 			 struct sk_buff *skb, int tlen, int offset);
skb               742 include/net/ip.h void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port,
skb               747 include/net/ip.h static inline void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb)
skb               749 include/net/ip.h 	ip_cmsg_recv_offset(msg, skb->sk, skb, 0, 0);
skb                39 include/net/ip6_checksum.h static inline __wsum ip6_compute_pseudo(struct sk_buff *skb, int proto)
skb                41 include/net/ip6_checksum.h 	return ~csum_unfold(csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
skb                42 include/net/ip6_checksum.h 					    &ipv6_hdr(skb)->daddr,
skb                43 include/net/ip6_checksum.h 					    skb->len, proto, 0));
skb                46 include/net/ip6_checksum.h static inline __wsum ip6_gro_compute_pseudo(struct sk_buff *skb, int proto)
skb                48 include/net/ip6_checksum.h 	const struct ipv6hdr *iph = skb_gro_network_header(skb);
skb                51 include/net/ip6_checksum.h 					    skb_gro_len(skb), proto, 0));
skb                62 include/net/ip6_checksum.h static inline void __tcp_v6_send_check(struct sk_buff *skb,
skb                66 include/net/ip6_checksum.h 	struct tcphdr *th = tcp_hdr(skb);
skb                68 include/net/ip6_checksum.h 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb                69 include/net/ip6_checksum.h 		th->check = ~tcp_v6_check(skb->len, saddr, daddr, 0);
skb                70 include/net/ip6_checksum.h 		skb->csum_start = skb_transport_header(skb) - skb->head;
skb                71 include/net/ip6_checksum.h 		skb->csum_offset = offsetof(struct tcphdr, check);
skb                73 include/net/ip6_checksum.h 		th->check = tcp_v6_check(skb->len, saddr, daddr,
skb                75 include/net/ip6_checksum.h 						      skb->csum));
skb                80 include/net/ip6_checksum.h static inline void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)
skb                84 include/net/ip6_checksum.h 	__tcp_v6_send_check(skb, &np->saddr, &sk->sk_v6_daddr);
skb                96 include/net/ip6_checksum.h void udp6_set_csum(bool nocheck, struct sk_buff *skb,
skb               100 include/net/ip6_checksum.h int udp6_csum_init(struct sk_buff *skb, struct udphdr *uh, int proto);
skb               395 include/net/ip6_fib.h 				   const struct sk_buff *skb,
skb               411 include/net/ip6_fib.h 		      const struct sk_buff *skb, int strict);
skb               515 include/net/ip6_fib.h 						 struct sk_buff *skb,
skb               524 include/net/ip6_fib.h 	skb_flow_dissect_flow_keys(skb, flkeys, flag);
skb               553 include/net/ip6_fib.h 						 struct sk_buff *skb,
skb                81 include/net/ip6_route.h void ip6_route_input(struct sk_buff *skb);
skb                85 include/net/ip6_route.h 					 const struct sk_buff *skb, int flags);
skb               112 include/net/ip6_route.h 				   const struct sk_buff *skb, int flags);
skb               115 include/net/ip6_route.h 			       const struct sk_buff *skb, int flags);
skb               152 include/net/ip6_route.h 			    const struct sk_buff *skb, int flags);
skb               154 include/net/ip6_route.h 		       const struct sk_buff *skb, struct flow_keys *hkeys);
skb               183 include/net/ip6_route.h void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu, int oif,
skb               185 include/net/ip6_route.h void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu);
skb               186 include/net/ip6_route.h void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark,
skb               188 include/net/ip6_route.h void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif);
skb               189 include/net/ip6_route.h void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk);
skb               194 include/net/ip6_route.h 	struct sk_buff *skb;
skb               212 include/net/ip6_route.h static inline const struct rt6_info *skb_rt6_info(const struct sk_buff *skb)
skb               214 include/net/ip6_route.h 	const struct dst_entry *dst = skb_dst(skb);
skb               243 include/net/ip6_route.h static inline bool ipv6_unicast_destination(const struct sk_buff *skb)
skb               245 include/net/ip6_route.h 	struct rt6_info *rt = (struct rt6_info *) skb_dst(skb);
skb               261 include/net/ip6_route.h int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
skb               264 include/net/ip6_route.h static inline int ip6_skb_dst_mtu(struct sk_buff *skb)
skb               266 include/net/ip6_route.h 	struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ?
skb               267 include/net/ip6_route.h 				inet6_sk(skb->sk) : NULL;
skb               270 include/net/ip6_route.h 	       skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb));
skb               336 include/net/ip6_route.h 				   struct net_device *dev, struct sk_buff *skb,
skb                70 include/net/ip6_tunnel.h 	int (*build_header)(struct sk_buff *skb, struct ip_tunnel_encap *e,
skb                72 include/net/ip6_tunnel.h 	int (*err_handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
skb               108 include/net/ip6_tunnel.h static inline int ip6_tnl_encap(struct sk_buff *skb, struct ip6_tnl *t,
skb               123 include/net/ip6_tunnel.h 		ret = ops->build_header(skb, &t->encap, protocol, fl6);
skb               139 include/net/ip6_tunnel.h int ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb,
skb               144 include/net/ip6_tunnel.h int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
skb               146 include/net/ip6_tunnel.h __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw);
skb               153 include/net/ip6_tunnel.h static inline void ip6tunnel_xmit(struct sock *sk, struct sk_buff *skb,
skb               158 include/net/ip6_tunnel.h 	memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
skb               159 include/net/ip6_tunnel.h 	pkt_len = skb->len - skb_inner_network_offset(skb);
skb               160 include/net/ip6_tunnel.h 	err = ip6_local_out(dev_net(skb_dst(skb)->dev), sk, skb);
skb               261 include/net/ip_fib.h int fib_table_dump(struct fib_table *table, struct sk_buff *skb,
skb               328 include/net/ip_fib.h 						 struct sk_buff *skb,
skb               383 include/net/ip_fib.h 						 struct sk_buff *skb,
skb               392 include/net/ip_fib.h 	skb_flow_dissect_flow_keys(skb, flkeys, flag);
skb               407 include/net/ip_fib.h __be32 fib_compute_spec_dst(struct sk_buff *skb);
skb               409 include/net/ip_fib.h int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
skb               445 include/net/ip_fib.h 		       const struct sk_buff *skb, struct flow_keys *flkeys);
skb               451 include/net/ip_fib.h 		     struct flowi4 *fl4, const struct sk_buff *skb);
skb               524 include/net/ip_fib.h int fib_nexthop_info(struct sk_buff *skb, const struct fib_nh_common *nh,
skb               526 include/net/ip_fib.h int fib_add_nexthop(struct sk_buff *skb, const struct fib_nh_common *nh,
skb               200 include/net/ip_tunnels.h ip_tunnel_dst_cache_usable(const struct sk_buff *skb,
skb               203 include/net/ip_tunnels.h 	if (skb->mark)
skb               268 include/net/ip_tunnels.h void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
skb               270 include/net/ip_tunnels.h void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
skb               283 include/net/ip_tunnels.h int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
skb               294 include/net/ip_tunnels.h 	int (*build_header)(struct sk_buff *skb, struct ip_tunnel_encap *e,
skb               296 include/net/ip_tunnels.h 	int (*err_handler)(struct sk_buff *skb, u32 info);
skb               312 include/net/ip_tunnels.h static inline bool pskb_inet_may_pull(struct sk_buff *skb)
skb               316 include/net/ip_tunnels.h 	switch (skb->protocol) {
skb               329 include/net/ip_tunnels.h 	return pskb_network_may_pull(skb, nhlen);
skb               352 include/net/ip_tunnels.h static inline int ip_tunnel_encap(struct sk_buff *skb, struct ip_tunnel *t,
skb               367 include/net/ip_tunnels.h 		ret = ops->build_header(skb, &t->encap, protocol, fl4);
skb               375 include/net/ip_tunnels.h 				       const struct sk_buff *skb)
skb               377 include/net/ip_tunnels.h 	if (skb->protocol == htons(ETH_P_IP))
skb               379 include/net/ip_tunnels.h 	else if (skb->protocol == htons(ETH_P_IPV6))
skb               386 include/net/ip_tunnels.h 				       const struct sk_buff *skb)
skb               388 include/net/ip_tunnels.h 	if (skb->protocol == htons(ETH_P_IP))
skb               390 include/net/ip_tunnels.h 	else if (skb->protocol == htons(ETH_P_IPV6))
skb               398 include/net/ip_tunnels.h 				     const struct sk_buff *skb)
skb               400 include/net/ip_tunnels.h 	u8 inner = ip_tunnel_get_dsfield(iph, skb);
skb               405 include/net/ip_tunnels.h int __iptunnel_pull_header(struct sk_buff *skb, int hdr_len,
skb               408 include/net/ip_tunnels.h static inline int iptunnel_pull_header(struct sk_buff *skb, int hdr_len,
skb               411 include/net/ip_tunnels.h 	return __iptunnel_pull_header(skb, hdr_len, inner_proto, false, xnet);
skb               414 include/net/ip_tunnels.h void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
skb               420 include/net/ip_tunnels.h int iptunnel_handle_offloads(struct sk_buff *skb, int gso_type_mask);
skb               422 include/net/ip_tunnels.h static inline int iptunnel_pull_offloads(struct sk_buff *skb)
skb               424 include/net/ip_tunnels.h 	if (skb_is_gso(skb)) {
skb               427 include/net/ip_tunnels.h 		err = skb_unclone(skb, GFP_ATOMIC);
skb               430 include/net/ip_tunnels.h 		skb_shinfo(skb)->gso_type &= ~(NETIF_F_GSO_ENCAP_ALL >>
skb               434 include/net/ip_tunnels.h 	skb->encapsulation = 0;
skb                59 include/net/ip_vs.h static inline void *frag_safe_skb_hp(const struct sk_buff *skb, int offset,
skb                62 include/net/ip_vs.h 	return skb_header_pointer(skb, offset, len, buffer);
skb                70 include/net/ip_vs.h ip_vs_fill_iph_skb_off(int af, const struct sk_buff *skb, int offset,
skb                80 include/net/ip_vs.h 			skb, offset, sizeof(_iph), &_iph);
skb                89 include/net/ip_vs.h 		iphdr->protocol  = ipv6_find_hdr(skb, &iphdr->len, -1,
skb                99 include/net/ip_vs.h 			skb, offset, sizeof(_iph), &_iph);
skb               114 include/net/ip_vs.h ip_vs_fill_iph_skb_icmp(int af, const struct sk_buff *skb, int offset,
skb               122 include/net/ip_vs.h 	return ip_vs_fill_iph_skb_off(af, skb, offset, hdr_flags, iphdr);
skb               126 include/net/ip_vs.h ip_vs_fill_iph_skb(int af, const struct sk_buff *skb, bool inverse,
skb               134 include/net/ip_vs.h 	return ip_vs_fill_iph_skb_off(af, skb, skb_network_offset(skb),
skb               240 include/net/ip_vs.h #define IP_VS_DBG_PKT(level, af, pp, skb, ofs, msg)			\
skb               243 include/net/ip_vs.h 			pp->debug_packet(af, pp, skb, ofs, msg);	\
skb               245 include/net/ip_vs.h #define IP_VS_DBG_RL_PKT(level, af, pp, skb, ofs, msg)			\
skb               249 include/net/ip_vs.h 			pp->debug_packet(af, pp, skb, ofs, msg);	\
skb               256 include/net/ip_vs.h #define IP_VS_DBG_PKT(level, af, pp, skb, ofs, msg)	do {} while (0)
skb               257 include/net/ip_vs.h #define IP_VS_DBG_RL_PKT(level, af, pp, skb, ofs, msg)	do {} while (0)
skb               433 include/net/ip_vs.h 			     int af, struct sk_buff *skb,
skb               441 include/net/ip_vs.h 		       const struct sk_buff *skb,
skb               447 include/net/ip_vs.h 			const struct sk_buff *skb,
skb               450 include/net/ip_vs.h 	int (*snat_handler)(struct sk_buff *skb, struct ip_vs_protocol *pp,
skb               453 include/net/ip_vs.h 	int (*dnat_handler)(struct sk_buff *skb, struct ip_vs_protocol *pp,
skb               459 include/net/ip_vs.h 				 const struct sk_buff *skb,
skb               469 include/net/ip_vs.h 			     const struct sk_buff *skb,
skb               545 include/net/ip_vs.h 	int (*packet_xmit)(struct sk_buff *skb, struct ip_vs_conn *cp,
skb               717 include/net/ip_vs.h 				       const struct sk_buff *skb,
skb               729 include/net/ip_vs.h 	int (*fill_param)(struct ip_vs_conn_param *p, struct sk_buff *skb);
skb               738 include/net/ip_vs.h 				       struct sk_buff *skb,
skb               790 include/net/ip_vs.h 	int (*conn_schedule)(struct sk_buff *skb, struct ip_vs_app *app,
skb               794 include/net/ip_vs.h 	(*conn_in_get)(const struct sk_buff *skb, struct ip_vs_app *app,
skb               798 include/net/ip_vs.h 	(*conn_out_get)(const struct sk_buff *skb, struct ip_vs_app *app,
skb               802 include/net/ip_vs.h 				const struct sk_buff *skb,
skb              1163 include/net/ip_vs.h 				      struct sk_buff *skb,
skb              1203 include/net/ip_vs.h 					    const struct sk_buff *skb,
skb              1209 include/net/ip_vs.h 					     const struct sk_buff *skb,
skb              1343 include/net/ip_vs.h int ip_vs_app_pkt_out(struct ip_vs_conn *, struct sk_buff *skb,
skb              1345 include/net/ip_vs.h int ip_vs_app_pkt_in(struct ip_vs_conn *, struct sk_buff *skb,
skb              1368 include/net/ip_vs.h 			       const struct sk_buff *skb, int offset,
skb              1390 include/net/ip_vs.h ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
skb              1393 include/net/ip_vs.h int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
skb              1461 include/net/ip_vs.h int ip_vs_null_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
skb              1463 include/net/ip_vs.h int ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
skb              1465 include/net/ip_vs.h int ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
skb              1467 include/net/ip_vs.h int ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
skb              1469 include/net/ip_vs.h int ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
skb              1471 include/net/ip_vs.h int ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
skb              1477 include/net/ip_vs.h int ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
skb              1479 include/net/ip_vs.h int ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
skb              1481 include/net/ip_vs.h int ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
skb              1483 include/net/ip_vs.h int ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
skb              1485 include/net/ip_vs.h int ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
skb              1535 include/net/ip_vs.h void ip_vs_nat_icmp(struct sk_buff *skb, struct ip_vs_protocol *pp,
skb              1539 include/net/ip_vs.h void ip_vs_nat_icmp_v6(struct sk_buff *skb, struct ip_vs_protocol *pp,
skb              1543 include/net/ip_vs.h __sum16 ip_vs_checksum_complete(struct sk_buff *skb, int offset);
skb              1571 include/net/ip_vs.h static inline void ip_vs_notrack(struct sk_buff *skb)
skb              1575 include/net/ip_vs.h 	struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
skb              1579 include/net/ip_vs.h 		nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
skb              1597 include/net/ip_vs.h void ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp,
skb              1599 include/net/ip_vs.h int ip_vs_confirm_conntrack(struct sk_buff *skb);
skb              1600 include/net/ip_vs.h void ip_vs_nfct_expect_related(struct sk_buff *skb, struct nf_conn *ct,
skb              1612 include/net/ip_vs.h static inline void ip_vs_update_conntrack(struct sk_buff *skb,
skb              1617 include/net/ip_vs.h static inline int ip_vs_confirm_conntrack(struct sk_buff *skb)
skb              1629 include/net/ip_vs.h 					     struct sk_buff *skb)
skb              1637 include/net/ip_vs.h 	ct = nf_ct_get(skb, &ctinfo);
skb                20 include/net/ipcomp.h int ipcomp_input(struct xfrm_state *x, struct sk_buff *skb);
skb                21 include/net/ipcomp.h int ipcomp_output(struct xfrm_state *x, struct sk_buff *skb);
skb                25 include/net/ipcomp.h static inline struct ip_comp_hdr *ip_comp_hdr(const struct sk_buff *skb)
skb                27 include/net/ipcomp.h 	return (struct ip_comp_hdr *)skb_transport_header(skb);
skb               163 include/net/ipv6.h int ip6_fraglist_init(struct sk_buff *skb, unsigned int hlen, u8 *prevhdr,
skb               166 include/net/ipv6.h void ip6_fraglist_prepare(struct sk_buff *skb, struct ip6_fraglist_iter *iter);
skb               170 include/net/ipv6.h 	struct sk_buff *skb = iter->frag;
skb               172 include/net/ipv6.h 	iter->frag = skb->next;
skb               173 include/net/ipv6.h 	skb_mark_not_on_list(skb);
skb               175 include/net/ipv6.h 	return skb;
skb               191 include/net/ipv6.h void ip6_frag_init(struct sk_buff *skb, unsigned int hlen, unsigned int mtu,
skb               194 include/net/ipv6.h struct sk_buff *ip6_frag_next(struct sk_buff *skb,
skb               422 include/net/ipv6.h void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info);
skb               429 include/net/ipv6.h int ipv6_parse_hopopts(struct sk_buff *skb);
skb               440 include/net/ipv6.h bool ipv6_opt_accepted(const struct sock *sk, const struct sk_buff *skb,
skb               809 include/net/ipv6.h __be32 ipv6_proxy_select_ident(struct net *net, struct sk_buff *skb);
skb               860 include/net/ipv6.h static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
skb               877 include/net/ipv6.h 	hash = skb_get_hash_flowi6(skb, fl6);
skb               907 include/net/ipv6.h static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
skb               973 include/net/ipv6.h int ipv6_rcv(struct sk_buff *skb, struct net_device *dev,
skb               978 include/net/ipv6.h int ip6_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb);
skb               983 include/net/ipv6.h int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
skb               986 include/net/ipv6.h int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr);
skb               990 include/net/ipv6.h 				int odd, struct sk_buff *skb),
skb               999 include/net/ipv6.h int ip6_send_skb(struct sk_buff *skb);
skb              1006 include/net/ipv6.h 					 int len, int odd, struct sk_buff *skb),
skb              1032 include/net/ipv6.h int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb);
skb              1033 include/net/ipv6.h int ip6_forward(struct sk_buff *skb);
skb              1034 include/net/ipv6.h int ip6_input(struct sk_buff *skb);
skb              1035 include/net/ipv6.h int ip6_mc_input(struct sk_buff *skb);
skb              1036 include/net/ipv6.h void ip6_protocol_deliver_rcu(struct net *net, struct sk_buff *skb, int nexthdr,
skb              1039 include/net/ipv6.h int __ip6_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
skb              1040 include/net/ipv6.h int ip6_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
skb              1046 include/net/ipv6.h void ipv6_push_nfrag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt,
skb              1049 include/net/ipv6.h void ipv6_push_frag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt,
skb              1064 include/net/ipv6.h int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset, int target,
skb              1067 include/net/ipv6.h int ipv6_find_tlv(const struct sk_buff *skb, int offset, int type);
skb              1098 include/net/ipv6.h void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port,
skb                31 include/net/ipv6_stubs.h 	int (*ipv6_route_input)(struct sk_buff *skb);
skb                41 include/net/ipv6_stubs.h 				 const struct sk_buff *skb, int strict);
skb                71 include/net/ipv6_stubs.h 				     struct sk_buff *skb);
skb               144 include/net/ipx.h int ipxitf_send(struct ipx_interface *intrfc, struct sk_buff *skb, char *node);
skb               151 include/net/ipx.h int ipxrtr_route_skb(struct sk_buff *skb);
skb                83 include/net/iucv/af_iucv.h static inline struct af_iucv_trans_hdr *iucv_trans_hdr(struct sk_buff *skb)
skb                85 include/net/iucv/af_iucv.h 	return (struct af_iucv_trans_hdr *)skb_network_header(skb);
skb               135 include/net/iucv/af_iucv.h 	void                    (*sk_txnotify)(struct sk_buff *skb,
skb                28 include/net/l3mdev.h 					  struct sk_buff *skb, u16 proto);
skb                30 include/net/l3mdev.h 					  struct sock *sk, struct sk_buff *skb,
skb               146 include/net/l3mdev.h struct sk_buff *l3mdev_l3_rcv(struct sk_buff *skb, u16 proto)
skb               150 include/net/l3mdev.h 	if (netif_is_l3_slave(skb->dev))
skb               151 include/net/l3mdev.h 		master = netdev_master_upper_dev_get_rcu(skb->dev);
skb               152 include/net/l3mdev.h 	else if (netif_is_l3_master(skb->dev) ||
skb               153 include/net/l3mdev.h 		 netif_has_l3_rx_handler(skb->dev))
skb               154 include/net/l3mdev.h 		master = skb->dev;
skb               157 include/net/l3mdev.h 		skb = master->l3mdev_ops->l3mdev_l3_rcv(master, skb, proto);
skb               159 include/net/l3mdev.h 	return skb;
skb               163 include/net/l3mdev.h struct sk_buff *l3mdev_ip_rcv(struct sk_buff *skb)
skb               165 include/net/l3mdev.h 	return l3mdev_l3_rcv(skb, AF_INET);
skb               169 include/net/l3mdev.h struct sk_buff *l3mdev_ip6_rcv(struct sk_buff *skb)
skb               171 include/net/l3mdev.h 	return l3mdev_l3_rcv(skb, AF_INET6);
skb               175 include/net/l3mdev.h struct sk_buff *l3mdev_l3_out(struct sock *sk, struct sk_buff *skb, u16 proto)
skb               177 include/net/l3mdev.h 	struct net_device *dev = skb_dst(skb)->dev;
skb               184 include/net/l3mdev.h 			skb = master->l3mdev_ops->l3mdev_l3_out(master, sk,
skb               185 include/net/l3mdev.h 								skb, proto);
skb               188 include/net/l3mdev.h 	return skb;
skb               192 include/net/l3mdev.h struct sk_buff *l3mdev_ip_out(struct sock *sk, struct sk_buff *skb)
skb               194 include/net/l3mdev.h 	return l3mdev_l3_out(sk, skb, AF_INET);
skb               198 include/net/l3mdev.h struct sk_buff *l3mdev_ip6_out(struct sock *sk, struct sk_buff *skb)
skb               200 include/net/l3mdev.h 	return l3mdev_l3_out(sk, skb, AF_INET6);
skb               260 include/net/l3mdev.h struct sk_buff *l3mdev_ip_rcv(struct sk_buff *skb)
skb               262 include/net/l3mdev.h 	return skb;
skb               266 include/net/l3mdev.h struct sk_buff *l3mdev_ip6_rcv(struct sk_buff *skb)
skb               268 include/net/l3mdev.h 	return skb;
skb               272 include/net/l3mdev.h struct sk_buff *l3mdev_ip_out(struct sock *sk, struct sk_buff *skb)
skb               274 include/net/l3mdev.h 	return skb;
skb               278 include/net/l3mdev.h struct sk_buff *l3mdev_ip6_out(struct sock *sk, struct sk_buff *skb)
skb               280 include/net/l3mdev.h 	return skb;
skb                62 include/net/lib80211.h 	int (*encrypt_mpdu) (struct sk_buff * skb, int hdr_len, void *priv);
skb                63 include/net/lib80211.h 	int (*decrypt_mpdu) (struct sk_buff * skb, int hdr_len, void *priv);
skb                67 include/net/lib80211.h 	int (*encrypt_msdu) (struct sk_buff * skb, int hdr_len, void *priv);
skb                68 include/net/lib80211.h 	int (*decrypt_msdu) (struct sk_buff * skb, int keyidx, int hdr_len,
skb                59 include/net/llc.h 	int		 (*rcv_func)(struct sk_buff *skb,
skb                98 include/net/llc.h int llc_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
skb               101 include/net/llc.h int llc_mac_hdr_init(struct sk_buff *skb, const unsigned char *sa,
skb               105 include/net/llc.h 		  void (*handler)(struct llc_sap *sap, struct sk_buff *skb));
skb               108 include/net/llc.h void llc_set_station_handler(void (*handler)(struct sk_buff *skb));
skb               111 include/net/llc.h 			     int (*rcv)(struct sk_buff *skb,
skb               135 include/net/llc.h int llc_build_and_send_ui_pkt(struct llc_sap *sap, struct sk_buff *skb,
skb               138 include/net/llc.h void llc_sap_handler(struct llc_sap *sap, struct sk_buff *skb);
skb               139 include/net/llc.h void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb);
skb                90 include/net/llc_c_ac.h typedef int (*llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
skb                92 include/net/llc_c_ac.h int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb);
skb                93 include/net/llc_c_ac.h int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb);
skb                94 include/net/llc_c_ac.h int llc_conn_ac_conn_confirm(struct sock *sk, struct sk_buff *skb);
skb                95 include/net/llc_c_ac.h int llc_conn_ac_data_ind(struct sock *sk, struct sk_buff *skb);
skb                96 include/net/llc_c_ac.h int llc_conn_ac_disc_ind(struct sock *sk, struct sk_buff *skb);
skb                97 include/net/llc_c_ac.h int llc_conn_ac_rst_ind(struct sock *sk, struct sk_buff *skb);
skb                98 include/net/llc_c_ac.h int llc_conn_ac_rst_confirm(struct sock *sk, struct sk_buff *skb);
skb               100 include/net/llc_c_ac.h 					    struct sk_buff *skb);
skb               102 include/net/llc_c_ac.h 					       struct sk_buff *skb);
skb               103 include/net/llc_c_ac.h int llc_conn_ac_send_disc_cmd_p_set_x(struct sock *sk, struct sk_buff *skb);
skb               104 include/net/llc_c_ac.h int llc_conn_ac_send_dm_rsp_f_set_p(struct sock *sk, struct sk_buff *skb);
skb               105 include/net/llc_c_ac.h int llc_conn_ac_send_dm_rsp_f_set_1(struct sock *sk, struct sk_buff *skb);
skb               106 include/net/llc_c_ac.h int llc_conn_ac_send_frmr_rsp_f_set_x(struct sock *sk, struct sk_buff *skb);
skb               107 include/net/llc_c_ac.h int llc_conn_ac_resend_frmr_rsp_f_set_0(struct sock *sk, struct sk_buff *skb);
skb               108 include/net/llc_c_ac.h int llc_conn_ac_resend_frmr_rsp_f_set_p(struct sock *sk, struct sk_buff *skb);
skb               109 include/net/llc_c_ac.h int llc_conn_ac_send_i_cmd_p_set_1(struct sock *sk, struct sk_buff *skb);
skb               110 include/net/llc_c_ac.h int llc_conn_ac_send_i_xxx_x_set_0(struct sock *sk, struct sk_buff *skb);
skb               111 include/net/llc_c_ac.h int llc_conn_ac_resend_i_xxx_x_set_0(struct sock *sk, struct sk_buff *skb);
skb               113 include/net/llc_c_ac.h 						struct sk_buff *skb);
skb               114 include/net/llc_c_ac.h int llc_conn_ac_resend_i_rsp_f_set_1(struct sock *sk, struct sk_buff *skb);
skb               115 include/net/llc_c_ac.h int llc_conn_ac_send_rej_cmd_p_set_1(struct sock *sk, struct sk_buff *skb);
skb               116 include/net/llc_c_ac.h int llc_conn_ac_send_rej_rsp_f_set_1(struct sock *sk, struct sk_buff *skb);
skb               117 include/net/llc_c_ac.h int llc_conn_ac_send_rej_xxx_x_set_0(struct sock *sk, struct sk_buff *skb);
skb               118 include/net/llc_c_ac.h int llc_conn_ac_send_rnr_cmd_p_set_1(struct sock *sk, struct sk_buff *skb);
skb               119 include/net/llc_c_ac.h int llc_conn_ac_send_rnr_rsp_f_set_1(struct sock *sk, struct sk_buff *skb);
skb               120 include/net/llc_c_ac.h int llc_conn_ac_send_rnr_xxx_x_set_0(struct sock *sk, struct sk_buff *skb);
skb               121 include/net/llc_c_ac.h int llc_conn_ac_set_remote_busy(struct sock *sk, struct sk_buff *skb);
skb               122 include/net/llc_c_ac.h int llc_conn_ac_opt_send_rnr_xxx_x_set_0(struct sock *sk, struct sk_buff *skb);
skb               123 include/net/llc_c_ac.h int llc_conn_ac_send_rr_cmd_p_set_1(struct sock *sk, struct sk_buff *skb);
skb               124 include/net/llc_c_ac.h int llc_conn_ac_send_rr_rsp_f_set_1(struct sock *sk, struct sk_buff *skb);
skb               125 include/net/llc_c_ac.h int llc_conn_ac_send_ack_rsp_f_set_1(struct sock *sk, struct sk_buff *skb);
skb               126 include/net/llc_c_ac.h int llc_conn_ac_send_rr_xxx_x_set_0(struct sock *sk, struct sk_buff *skb);
skb               127 include/net/llc_c_ac.h int llc_conn_ac_send_ack_xxx_x_set_0(struct sock *sk, struct sk_buff *skb);
skb               128 include/net/llc_c_ac.h int llc_conn_ac_send_sabme_cmd_p_set_x(struct sock *sk, struct sk_buff *skb);
skb               129 include/net/llc_c_ac.h int llc_conn_ac_send_ua_rsp_f_set_p(struct sock *sk, struct sk_buff *skb);
skb               130 include/net/llc_c_ac.h int llc_conn_ac_set_s_flag_0(struct sock *sk, struct sk_buff *skb);
skb               131 include/net/llc_c_ac.h int llc_conn_ac_set_s_flag_1(struct sock *sk, struct sk_buff *skb);
skb               132 include/net/llc_c_ac.h int llc_conn_ac_start_p_timer(struct sock *sk, struct sk_buff *skb);
skb               133 include/net/llc_c_ac.h int llc_conn_ac_start_ack_timer(struct sock *sk, struct sk_buff *skb);
skb               134 include/net/llc_c_ac.h int llc_conn_ac_start_rej_timer(struct sock *sk, struct sk_buff *skb);
skb               136 include/net/llc_c_ac.h 					     struct sk_buff *skb);
skb               137 include/net/llc_c_ac.h int llc_conn_ac_stop_ack_timer(struct sock *sk, struct sk_buff *skb);
skb               138 include/net/llc_c_ac.h int llc_conn_ac_stop_p_timer(struct sock *sk, struct sk_buff *skb);
skb               139 include/net/llc_c_ac.h int llc_conn_ac_stop_rej_timer(struct sock *sk, struct sk_buff *skb);
skb               140 include/net/llc_c_ac.h int llc_conn_ac_stop_all_timers(struct sock *sk, struct sk_buff *skb);
skb               141 include/net/llc_c_ac.h int llc_conn_ac_stop_other_timers(struct sock *sk, struct sk_buff *skb);
skb               142 include/net/llc_c_ac.h int llc_conn_ac_upd_nr_received(struct sock *sk, struct sk_buff *skb);
skb               143 include/net/llc_c_ac.h int llc_conn_ac_inc_tx_win_size(struct sock *sk, struct sk_buff *skb);
skb               144 include/net/llc_c_ac.h int llc_conn_ac_dec_tx_win_size(struct sock *sk, struct sk_buff *skb);
skb               145 include/net/llc_c_ac.h int llc_conn_ac_upd_p_flag(struct sock *sk, struct sk_buff *skb);
skb               146 include/net/llc_c_ac.h int llc_conn_ac_set_data_flag_2(struct sock *sk, struct sk_buff *skb);
skb               147 include/net/llc_c_ac.h int llc_conn_ac_set_data_flag_0(struct sock *sk, struct sk_buff *skb);
skb               148 include/net/llc_c_ac.h int llc_conn_ac_set_data_flag_1(struct sock *sk, struct sk_buff *skb);
skb               150 include/net/llc_c_ac.h 						  struct sk_buff *skb);
skb               151 include/net/llc_c_ac.h int llc_conn_ac_set_p_flag_0(struct sock *sk, struct sk_buff *skb);
skb               152 include/net/llc_c_ac.h int llc_conn_ac_set_remote_busy_0(struct sock *sk, struct sk_buff *skb);
skb               153 include/net/llc_c_ac.h int llc_conn_ac_set_retry_cnt_0(struct sock *sk, struct sk_buff *skb);
skb               154 include/net/llc_c_ac.h int llc_conn_ac_set_cause_flag_0(struct sock *sk, struct sk_buff *skb);
skb               155 include/net/llc_c_ac.h int llc_conn_ac_set_cause_flag_1(struct sock *sk, struct sk_buff *skb);
skb               156 include/net/llc_c_ac.h int llc_conn_ac_inc_retry_cnt_by_1(struct sock *sk, struct sk_buff *skb);
skb               157 include/net/llc_c_ac.h int llc_conn_ac_set_vr_0(struct sock *sk, struct sk_buff *skb);
skb               158 include/net/llc_c_ac.h int llc_conn_ac_inc_vr_by_1(struct sock *sk, struct sk_buff *skb);
skb               159 include/net/llc_c_ac.h int llc_conn_ac_set_vs_0(struct sock *sk, struct sk_buff *skb);
skb               160 include/net/llc_c_ac.h int llc_conn_ac_set_vs_nr(struct sock *sk, struct sk_buff *skb);
skb               161 include/net/llc_c_ac.h int llc_conn_ac_rst_vs(struct sock *sk, struct sk_buff *skb);
skb               162 include/net/llc_c_ac.h int llc_conn_ac_upd_vs(struct sock *sk, struct sk_buff *skb);
skb               163 include/net/llc_c_ac.h int llc_conn_disc(struct sock *sk, struct sk_buff *skb);
skb               164 include/net/llc_c_ac.h int llc_conn_reset(struct sock *sk, struct sk_buff *skb);
skb               165 include/net/llc_c_ac.h int llc_conn_ac_disc_confirm(struct sock *sk, struct sk_buff *skb);
skb               167 include/net/llc_c_ac.h int llc_conn_ac_send_ack_if_needed(struct sock *sk, struct sk_buff *skb);
skb               168 include/net/llc_c_ac.h int llc_conn_ac_adjust_npta_by_rr(struct sock *sk, struct sk_buff *skb);
skb               169 include/net/llc_c_ac.h int llc_conn_ac_adjust_npta_by_rnr(struct sock *sk, struct sk_buff *skb);
skb               170 include/net/llc_c_ac.h int llc_conn_ac_rst_sendack_flag(struct sock *sk, struct sk_buff *skb);
skb               171 include/net/llc_c_ac.h int llc_conn_ac_send_i_rsp_as_ack(struct sock *sk, struct sk_buff *skb);
skb               172 include/net/llc_c_ac.h int llc_conn_ac_send_i_as_ack(struct sock *sk, struct sk_buff *skb);
skb               123 include/net/llc_c_ev.h static __inline__ struct llc_conn_state_ev *llc_conn_ev(struct sk_buff *skb)
skb               125 include/net/llc_c_ev.h 	return (struct llc_conn_state_ev *)skb->cb;
skb               128 include/net/llc_c_ev.h typedef int (*llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
skb               129 include/net/llc_c_ev.h typedef int (*llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
skb               131 include/net/llc_c_ev.h int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb);
skb               132 include/net/llc_c_ev.h int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb);
skb               133 include/net/llc_c_ev.h int llc_conn_ev_disc_req(struct sock *sk, struct sk_buff *skb);
skb               134 include/net/llc_c_ev.h int llc_conn_ev_rst_req(struct sock *sk, struct sk_buff *skb);
skb               135 include/net/llc_c_ev.h int llc_conn_ev_local_busy_detected(struct sock *sk, struct sk_buff *skb);
skb               136 include/net/llc_c_ev.h int llc_conn_ev_local_busy_cleared(struct sock *sk, struct sk_buff *skb);
skb               137 include/net/llc_c_ev.h int llc_conn_ev_rx_bad_pdu(struct sock *sk, struct sk_buff *skb);
skb               138 include/net/llc_c_ev.h int llc_conn_ev_rx_disc_cmd_pbit_set_x(struct sock *sk, struct sk_buff *skb);
skb               139 include/net/llc_c_ev.h int llc_conn_ev_rx_dm_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb);
skb               140 include/net/llc_c_ev.h int llc_conn_ev_rx_frmr_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb);
skb               142 include/net/llc_c_ev.h 					     struct sk_buff *skb);
skb               143 include/net/llc_c_ev.h int llc_conn_ev_rx_i_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb);
skb               145 include/net/llc_c_ev.h 					      struct sk_buff *skb);
skb               147 include/net/llc_c_ev.h 					     struct sk_buff *skb);
skb               148 include/net/llc_c_ev.h int llc_conn_ev_rx_rej_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb);
skb               149 include/net/llc_c_ev.h int llc_conn_ev_rx_sabme_cmd_pbit_set_x(struct sock *sk, struct sk_buff *skb);
skb               150 include/net/llc_c_ev.h int llc_conn_ev_rx_ua_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb);
skb               151 include/net/llc_c_ev.h int llc_conn_ev_rx_xxx_cmd_pbit_set_x(struct sock *sk, struct sk_buff *skb);
skb               152 include/net/llc_c_ev.h int llc_conn_ev_rx_xxx_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb);
skb               154 include/net/llc_c_ev.h 					       struct sk_buff *skb);
skb               156 include/net/llc_c_ev.h 					       struct sk_buff *skb);
skb               157 include/net/llc_c_ev.h int llc_conn_ev_p_tmr_exp(struct sock *sk, struct sk_buff *skb);
skb               158 include/net/llc_c_ev.h int llc_conn_ev_ack_tmr_exp(struct sock *sk, struct sk_buff *skb);
skb               159 include/net/llc_c_ev.h int llc_conn_ev_rej_tmr_exp(struct sock *sk, struct sk_buff *skb);
skb               160 include/net/llc_c_ev.h int llc_conn_ev_busy_tmr_exp(struct sock *sk, struct sk_buff *skb);
skb               161 include/net/llc_c_ev.h int llc_conn_ev_sendack_tmr_exp(struct sock *sk, struct sk_buff *skb);
skb               163 include/net/llc_c_ev.h int llc_conn_ev_rx_xxx_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb);
skb               164 include/net/llc_c_ev.h int llc_conn_ev_rx_xxx_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb);
skb               166 include/net/llc_c_ev.h 					      struct sk_buff *skb);
skb               168 include/net/llc_c_ev.h 					      struct sk_buff *skb);
skb               169 include/net/llc_c_ev.h int llc_conn_ev_rx_i_cmd_pbit_set_0(struct sock *sk, struct sk_buff *skb);
skb               170 include/net/llc_c_ev.h int llc_conn_ev_rx_i_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb);
skb               172 include/net/llc_c_ev.h 					      struct sk_buff *skb);
skb               174 include/net/llc_c_ev.h 					      struct sk_buff *skb);
skb               175 include/net/llc_c_ev.h int llc_conn_ev_rx_i_rsp_fbit_set_0(struct sock *sk, struct sk_buff *skb);
skb               176 include/net/llc_c_ev.h int llc_conn_ev_rx_i_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb);
skb               177 include/net/llc_c_ev.h int llc_conn_ev_rx_rr_cmd_pbit_set_0(struct sock *sk, struct sk_buff *skb);
skb               178 include/net/llc_c_ev.h int llc_conn_ev_rx_rr_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb);
skb               179 include/net/llc_c_ev.h int llc_conn_ev_rx_rr_rsp_fbit_set_0(struct sock *sk, struct sk_buff *skb);
skb               180 include/net/llc_c_ev.h int llc_conn_ev_rx_rr_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb);
skb               181 include/net/llc_c_ev.h int llc_conn_ev_rx_rnr_cmd_pbit_set_0(struct sock *sk, struct sk_buff *skb);
skb               182 include/net/llc_c_ev.h int llc_conn_ev_rx_rnr_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb);
skb               183 include/net/llc_c_ev.h int llc_conn_ev_rx_rnr_rsp_fbit_set_0(struct sock *sk, struct sk_buff *skb);
skb               184 include/net/llc_c_ev.h int llc_conn_ev_rx_rnr_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb);
skb               185 include/net/llc_c_ev.h int llc_conn_ev_rx_rej_cmd_pbit_set_0(struct sock *sk, struct sk_buff *skb);
skb               186 include/net/llc_c_ev.h int llc_conn_ev_rx_rej_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb);
skb               187 include/net/llc_c_ev.h int llc_conn_ev_rx_rej_rsp_fbit_set_0(struct sock *sk, struct sk_buff *skb);
skb               188 include/net/llc_c_ev.h int llc_conn_ev_rx_rej_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb);
skb               189 include/net/llc_c_ev.h int llc_conn_ev_rx_any_frame(struct sock *sk, struct sk_buff *skb);
skb               190 include/net/llc_c_ev.h int llc_conn_ev_tx_buffer_full(struct sock *sk, struct sk_buff *skb);
skb               191 include/net/llc_c_ev.h int llc_conn_ev_init_p_f_cycle(struct sock *sk, struct sk_buff *skb);
skb               194 include/net/llc_c_ev.h int llc_conn_ev_qlfy_data_flag_eq_1(struct sock *sk, struct sk_buff *skb);
skb               195 include/net/llc_c_ev.h int llc_conn_ev_qlfy_data_flag_eq_0(struct sock *sk, struct sk_buff *skb);
skb               196 include/net/llc_c_ev.h int llc_conn_ev_qlfy_data_flag_eq_2(struct sock *sk, struct sk_buff *skb);
skb               197 include/net/llc_c_ev.h int llc_conn_ev_qlfy_p_flag_eq_1(struct sock *sk, struct sk_buff *skb);
skb               198 include/net/llc_c_ev.h int llc_conn_ev_qlfy_last_frame_eq_1(struct sock *sk, struct sk_buff *skb);
skb               199 include/net/llc_c_ev.h int llc_conn_ev_qlfy_last_frame_eq_0(struct sock *sk, struct sk_buff *skb);
skb               200 include/net/llc_c_ev.h int llc_conn_ev_qlfy_p_flag_eq_0(struct sock *sk, struct sk_buff *skb);
skb               201 include/net/llc_c_ev.h int llc_conn_ev_qlfy_p_flag_eq_f(struct sock *sk, struct sk_buff *skb);
skb               202 include/net/llc_c_ev.h int llc_conn_ev_qlfy_remote_busy_eq_0(struct sock *sk, struct sk_buff *skb);
skb               203 include/net/llc_c_ev.h int llc_conn_ev_qlfy_remote_busy_eq_1(struct sock *sk, struct sk_buff *skb);
skb               204 include/net/llc_c_ev.h int llc_conn_ev_qlfy_retry_cnt_lt_n2(struct sock *sk, struct sk_buff *skb);
skb               205 include/net/llc_c_ev.h int llc_conn_ev_qlfy_retry_cnt_gte_n2(struct sock *sk, struct sk_buff *skb);
skb               206 include/net/llc_c_ev.h int llc_conn_ev_qlfy_s_flag_eq_1(struct sock *sk, struct sk_buff *skb);
skb               207 include/net/llc_c_ev.h int llc_conn_ev_qlfy_s_flag_eq_0(struct sock *sk, struct sk_buff *skb);
skb               208 include/net/llc_c_ev.h int llc_conn_ev_qlfy_cause_flag_eq_1(struct sock *sk, struct sk_buff *skb);
skb               209 include/net/llc_c_ev.h int llc_conn_ev_qlfy_cause_flag_eq_0(struct sock *sk, struct sk_buff *skb);
skb               210 include/net/llc_c_ev.h int llc_conn_ev_qlfy_set_status_conn(struct sock *sk, struct sk_buff *skb);
skb               211 include/net/llc_c_ev.h int llc_conn_ev_qlfy_set_status_disc(struct sock *sk, struct sk_buff *skb);
skb               212 include/net/llc_c_ev.h int llc_conn_ev_qlfy_set_status_failed(struct sock *sk, struct sk_buff *skb);
skb               214 include/net/llc_c_ev.h 					    struct sk_buff *skb);
skb               215 include/net/llc_c_ev.h int llc_conn_ev_qlfy_set_status_refuse(struct sock *sk, struct sk_buff *skb);
skb               216 include/net/llc_c_ev.h int llc_conn_ev_qlfy_set_status_conflict(struct sock *sk, struct sk_buff *skb);
skb               217 include/net/llc_c_ev.h int llc_conn_ev_qlfy_set_status_rst_done(struct sock *sk, struct sk_buff *skb);
skb               219 include/net/llc_c_ev.h static __inline__ int llc_conn_space(struct sock *sk, struct sk_buff *skb)
skb               221 include/net/llc_c_ev.h 	return atomic_read(&sk->sk_rmem_alloc) + skb->truesize <
skb                88 include/net/llc_conn.h static __inline__ void llc_set_backlog_type(struct sk_buff *skb, char type)
skb                90 include/net/llc_conn.h 	skb->cb[sizeof(skb->cb) - 1] = type;
skb                93 include/net/llc_conn.h static __inline__ char llc_backlog_type(struct sk_buff *skb)
skb                95 include/net/llc_conn.h 	return skb->cb[sizeof(skb->cb) - 1];
skb               106 include/net/llc_conn.h int llc_conn_state_process(struct sock *sk, struct sk_buff *skb);
skb               107 include/net/llc_conn.h void llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb);
skb               108 include/net/llc_conn.h void llc_conn_rtn_pdu(struct sock *sk, struct sk_buff *skb);
skb                66 include/net/llc_if.h int llc_build_and_send_pkt(struct sock *sk, struct sk_buff *skb);
skb               203 include/net/llc_pdu.h static inline struct llc_pdu_sn *llc_pdu_sn_hdr(struct sk_buff *skb)
skb               205 include/net/llc_pdu.h 	return (struct llc_pdu_sn *)skb_network_header(skb);
skb               215 include/net/llc_pdu.h static inline struct llc_pdu_un *llc_pdu_un_hdr(struct sk_buff *skb)
skb               217 include/net/llc_pdu.h 	return (struct llc_pdu_un *)skb_network_header(skb);
skb               230 include/net/llc_pdu.h static inline void llc_pdu_header_init(struct sk_buff *skb, u8 type,
skb               236 include/net/llc_pdu.h 	skb_push(skb, hlen);
skb               237 include/net/llc_pdu.h 	skb_reset_network_header(skb);
skb               238 include/net/llc_pdu.h 	pdu = llc_pdu_un_hdr(skb);
skb               251 include/net/llc_pdu.h static inline void llc_pdu_decode_sa(struct sk_buff *skb, u8 *sa)
skb               253 include/net/llc_pdu.h 	if (skb->protocol == htons(ETH_P_802_2))
skb               254 include/net/llc_pdu.h 		memcpy(sa, eth_hdr(skb)->h_source, ETH_ALEN);
skb               264 include/net/llc_pdu.h static inline void llc_pdu_decode_da(struct sk_buff *skb, u8 *da)
skb               266 include/net/llc_pdu.h 	if (skb->protocol == htons(ETH_P_802_2))
skb               267 include/net/llc_pdu.h 		memcpy(da, eth_hdr(skb)->h_dest, ETH_ALEN);
skb               278 include/net/llc_pdu.h static inline void llc_pdu_decode_ssap(struct sk_buff *skb, u8 *ssap)
skb               280 include/net/llc_pdu.h 	*ssap = llc_pdu_un_hdr(skb)->ssap & 0xFE;
skb               291 include/net/llc_pdu.h static inline void llc_pdu_decode_dsap(struct sk_buff *skb, u8 *dsap)
skb               293 include/net/llc_pdu.h 	*dsap = llc_pdu_un_hdr(skb)->dsap & 0xFE;
skb               302 include/net/llc_pdu.h static inline void llc_pdu_init_as_ui_cmd(struct sk_buff *skb)
skb               304 include/net/llc_pdu.h 	struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb);
skb               316 include/net/llc_pdu.h static inline void llc_pdu_init_as_test_cmd(struct sk_buff *skb)
skb               318 include/net/llc_pdu.h 	struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb);
skb               332 include/net/llc_pdu.h static inline void llc_pdu_init_as_test_rsp(struct sk_buff *skb,
skb               335 include/net/llc_pdu.h 	struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb);
skb               346 include/net/llc_pdu.h 		skb_put(skb, dsize);
skb               364 include/net/llc_pdu.h static inline void llc_pdu_init_as_xid_cmd(struct sk_buff *skb,
skb               368 include/net/llc_pdu.h 	struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb);
skb               377 include/net/llc_pdu.h 	skb_put(skb, sizeof(struct llc_xid_info));
skb               388 include/net/llc_pdu.h static inline void llc_pdu_init_as_xid_rsp(struct sk_buff *skb,
skb               392 include/net/llc_pdu.h 	struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb);
skb               402 include/net/llc_pdu.h 	skb_put(skb, sizeof(struct llc_xid_info));
skb               413 include/net/llc_pdu.h void llc_pdu_set_cmd_rsp(struct sk_buff *skb, u8 type);
skb               414 include/net/llc_pdu.h void llc_pdu_set_pf_bit(struct sk_buff *skb, u8 bit_value);
skb               415 include/net/llc_pdu.h void llc_pdu_decode_pf_bit(struct sk_buff *skb, u8 *pf_bit);
skb               416 include/net/llc_pdu.h void llc_pdu_init_as_disc_cmd(struct sk_buff *skb, u8 p_bit);
skb               417 include/net/llc_pdu.h void llc_pdu_init_as_i_cmd(struct sk_buff *skb, u8 p_bit, u8 ns, u8 nr);
skb               418 include/net/llc_pdu.h void llc_pdu_init_as_rej_cmd(struct sk_buff *skb, u8 p_bit, u8 nr);
skb               419 include/net/llc_pdu.h void llc_pdu_init_as_rnr_cmd(struct sk_buff *skb, u8 p_bit, u8 nr);
skb               420 include/net/llc_pdu.h void llc_pdu_init_as_rr_cmd(struct sk_buff *skb, u8 p_bit, u8 nr);
skb               421 include/net/llc_pdu.h void llc_pdu_init_as_sabme_cmd(struct sk_buff *skb, u8 p_bit);
skb               422 include/net/llc_pdu.h void llc_pdu_init_as_dm_rsp(struct sk_buff *skb, u8 f_bit);
skb               423 include/net/llc_pdu.h void llc_pdu_init_as_frmr_rsp(struct sk_buff *skb, struct llc_pdu_sn *prev_pdu,
skb               425 include/net/llc_pdu.h void llc_pdu_init_as_rr_rsp(struct sk_buff *skb, u8 f_bit, u8 nr);
skb               426 include/net/llc_pdu.h void llc_pdu_init_as_rej_rsp(struct sk_buff *skb, u8 f_bit, u8 nr);
skb               427 include/net/llc_pdu.h void llc_pdu_init_as_rnr_rsp(struct sk_buff *skb, u8 f_bit, u8 nr);
skb               428 include/net/llc_pdu.h void llc_pdu_init_as_ua_rsp(struct sk_buff *skb, u8 f_bit);
skb                26 include/net/llc_s_ac.h typedef int (*llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
skb                28 include/net/llc_s_ac.h int llc_sap_action_unitdata_ind(struct llc_sap *sap, struct sk_buff *skb);
skb                29 include/net/llc_s_ac.h int llc_sap_action_send_ui(struct llc_sap *sap, struct sk_buff *skb);
skb                30 include/net/llc_s_ac.h int llc_sap_action_send_xid_c(struct llc_sap *sap, struct sk_buff *skb);
skb                31 include/net/llc_s_ac.h int llc_sap_action_send_xid_r(struct llc_sap *sap, struct sk_buff *skb);
skb                32 include/net/llc_s_ac.h int llc_sap_action_send_test_c(struct llc_sap *sap, struct sk_buff *skb);
skb                33 include/net/llc_s_ac.h int llc_sap_action_send_test_r(struct llc_sap *sap, struct sk_buff *skb);
skb                34 include/net/llc_s_ac.h int llc_sap_action_report_status(struct llc_sap *sap, struct sk_buff *skb);
skb                35 include/net/llc_s_ac.h int llc_sap_action_xid_ind(struct llc_sap *sap, struct sk_buff *skb);
skb                36 include/net/llc_s_ac.h int llc_sap_action_test_ind(struct llc_sap *sap, struct sk_buff *skb);
skb                47 include/net/llc_s_ev.h static __inline__ struct llc_sap_state_ev *llc_sap_ev(struct sk_buff *skb)
skb                49 include/net/llc_s_ev.h 	return (struct llc_sap_state_ev *)skb->cb;
skb                54 include/net/llc_s_ev.h typedef int (*llc_sap_ev_t)(struct llc_sap *sap, struct sk_buff *skb);
skb                56 include/net/llc_s_ev.h int llc_sap_ev_activation_req(struct llc_sap *sap, struct sk_buff *skb);
skb                57 include/net/llc_s_ev.h int llc_sap_ev_rx_ui(struct llc_sap *sap, struct sk_buff *skb);
skb                58 include/net/llc_s_ev.h int llc_sap_ev_unitdata_req(struct llc_sap *sap, struct sk_buff *skb);
skb                59 include/net/llc_s_ev.h int llc_sap_ev_xid_req(struct llc_sap *sap, struct sk_buff *skb);
skb                60 include/net/llc_s_ev.h int llc_sap_ev_rx_xid_c(struct llc_sap *sap, struct sk_buff *skb);
skb                61 include/net/llc_s_ev.h int llc_sap_ev_rx_xid_r(struct llc_sap *sap, struct sk_buff *skb);
skb                62 include/net/llc_s_ev.h int llc_sap_ev_test_req(struct llc_sap *sap, struct sk_buff *skb);
skb                63 include/net/llc_s_ev.h int llc_sap_ev_rx_test_c(struct llc_sap *sap, struct sk_buff *skb);
skb                64 include/net/llc_s_ev.h int llc_sap_ev_rx_test_r(struct llc_sap *sap, struct sk_buff *skb);
skb                65 include/net/llc_s_ev.h int llc_sap_ev_deactivation_req(struct llc_sap *sap, struct sk_buff *skb);
skb                22 include/net/llc_sap.h void llc_sap_rtn_pdu(struct llc_sap *sap, struct sk_buff *skb);
skb                23 include/net/llc_sap.h void llc_save_primitive(struct sock *sk, struct sk_buff *skb,
skb                28 include/net/llc_sap.h void llc_build_and_send_test_pkt(struct llc_sap *sap, struct sk_buff *skb,
skb                30 include/net/llc_sap.h void llc_build_and_send_xid_pkt(struct llc_sap *sap, struct sk_buff *skb,
skb                30 include/net/lwtunnel.h 	int		(*orig_output)(struct net *net, struct sock *sk, struct sk_buff *skb);
skb                42 include/net/lwtunnel.h 	int (*output)(struct net *net, struct sock *sk, struct sk_buff *skb);
skb                43 include/net/lwtunnel.h 	int (*input)(struct sk_buff *skb);
skb                44 include/net/lwtunnel.h 	int (*fill_encap)(struct sk_buff *skb,
skb                48 include/net/lwtunnel.h 	int (*xmit)(struct sk_buff *skb);
skb               121 include/net/lwtunnel.h int lwtunnel_fill_encap(struct sk_buff *skb, struct lwtunnel_state *lwtstate,
skb               126 include/net/lwtunnel.h int lwtunnel_output(struct net *net, struct sock *sk, struct sk_buff *skb);
skb               127 include/net/lwtunnel.h int lwtunnel_input(struct sk_buff *skb);
skb               128 include/net/lwtunnel.h int lwtunnel_xmit(struct sk_buff *skb);
skb               129 include/net/lwtunnel.h int bpf_lwt_push_ip_encap(struct sk_buff *skb, void *hdr, u32 len,
skb               221 include/net/lwtunnel.h static inline int lwtunnel_fill_encap(struct sk_buff *skb,
skb               244 include/net/lwtunnel.h static inline int lwtunnel_output(struct net *net, struct sock *sk, struct sk_buff *skb)
skb               249 include/net/lwtunnel.h static inline int lwtunnel_input(struct sk_buff *skb)
skb               254 include/net/lwtunnel.h static inline int lwtunnel_xmit(struct sk_buff *skb)
skb              1072 include/net/mac80211.h 	struct sk_buff *skb;
skb              1096 include/net/mac80211.h static inline struct ieee80211_tx_info *IEEE80211_SKB_CB(struct sk_buff *skb)
skb              1098 include/net/mac80211.h 	return (struct ieee80211_tx_info *)skb->cb;
skb              1101 include/net/mac80211.h static inline struct ieee80211_rx_status *IEEE80211_SKB_RXCB(struct sk_buff *skb)
skb              1103 include/net/mac80211.h 	return (struct ieee80211_rx_status *)skb->cb;
skb              2632 include/net/mac80211.h void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb);
skb              3759 include/net/mac80211.h 		   struct sk_buff *skb);
skb              3912 include/net/mac80211.h 	int (*testmode_dump)(struct ieee80211_hw *hw, struct sk_buff *skb,
skb              4048 include/net/mac80211.h 				       struct sk_buff *skb);
skb              4314 include/net/mac80211.h 		       struct sk_buff *skb, struct napi_struct *napi);
skb              4336 include/net/mac80211.h static inline void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb)
skb              4338 include/net/mac80211.h 	ieee80211_rx_napi(hw, NULL, skb, NULL);
skb              4354 include/net/mac80211.h void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb);
skb              4370 include/net/mac80211.h 				   struct sk_buff *skb)
skb              4373 include/net/mac80211.h 	ieee80211_rx(hw, skb);
skb              4504 include/net/mac80211.h 			    struct sk_buff *skb,
skb              4556 include/net/mac80211.h 			 struct sk_buff *skb);
skb              4616 include/net/mac80211.h 					  struct sk_buff *skb)
skb              4619 include/net/mac80211.h 	ieee80211_tx_status(hw, skb);
skb              4636 include/net/mac80211.h 				 struct sk_buff *skb);
skb              4991 include/net/mac80211.h 					  struct sk_buff *skb, u16 *p1k)
skb              4993 include/net/mac80211.h 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
skb              5026 include/net/mac80211.h 			    struct sk_buff *skb, u8 *p2k);
skb              5911 include/net/mac80211.h 	struct sk_buff *skb;
skb              5957 include/net/mac80211.h 			  struct sk_buff *skb);
skb              6134 include/net/mac80211.h 			      struct ieee80211_vif *vif, struct sk_buff *skb,
skb              6272 include/net/mac80211.h 	struct sk_buff *skb;
skb              6275 include/net/mac80211.h 	skb = ieee80211_tx_dequeue(hw, txq);
skb              6278 include/net/mac80211.h 	return skb;
skb               213 include/net/mac802154.h 				     struct sk_buff *skb);
skb               215 include/net/mac802154.h 				      struct sk_buff *skb);
skb               239 include/net/mac802154.h static inline __le16 ieee802154_get_fc_from_skb(const struct sk_buff *skb)
skb               244 include/net/mac802154.h 	if (WARN_ON(!skb_mac_header_was_set(skb) ||
skb               245 include/net/mac802154.h 		    (skb_tail_pointer(skb) -
skb               246 include/net/mac802154.h 		     skb_mac_header(skb)) < IEEE802154_FC_LEN))
skb               249 include/net/mac802154.h 	memcpy(&fc, skb_mac_header(skb), IEEE802154_FC_LEN);
skb               259 include/net/mac802154.h 						    const struct sk_buff *skb)
skb               269 include/net/mac802154.h 		dst_pan = skb_mac_header(skb) +
skb               288 include/net/mac802154.h 						    const struct sk_buff *skb)
skb               302 include/net/mac802154.h 			src_pan = ieee802154_skb_dst_pan(fc, skb);
skb               308 include/net/mac802154.h 			src_pan = skb_mac_header(skb) +
skb               313 include/net/mac802154.h 			src_pan = skb_mac_header(skb) +
skb               320 include/net/mac802154.h 			src_pan = skb_mac_header(skb) +
skb               348 include/net/mac802154.h 							  const struct sk_buff *skb)
skb               350 include/net/mac802154.h 	unsigned char *dst_pan = ieee802154_skb_dst_pan(fc, skb),
skb               351 include/net/mac802154.h 		      *src_pan = ieee802154_skb_src_pan(fc, skb);
skb               461 include/net/mac802154.h void ieee802154_rx_irqsafe(struct ieee802154_hw *hw, struct sk_buff *skb,
skb               486 include/net/mac802154.h void ieee802154_xmit_complete(struct ieee802154_hw *hw, struct sk_buff *skb,
skb                24 include/net/mpls.h static inline struct mpls_shim_hdr *mpls_hdr(const struct sk_buff *skb)
skb                26 include/net/mpls.h 	return (struct mpls_shim_hdr *)skb_network_header(skb);
skb                39 include/net/mrp.h static inline struct mrp_skb_cb *mrp_cb(struct sk_buff *skb)
skb                43 include/net/mrp.h 	return (struct mrp_skb_cb *)skb->cb;
skb               139 include/net/ndisc.h void __ndisc_fill_addr_option(struct sk_buff *skb, int type, void *data,
skb               213 include/net/ndisc.h 				    struct sk_buff *skb, u8 icmp6_type,
skb               277 include/net/ndisc.h 					      struct sk_buff *skb,
skb               282 include/net/ndisc.h 		dev->ndisc_ops->fill_addr_option(dev, skb, icmp6_type, NULL);
skb               286 include/net/ndisc.h 						       struct sk_buff *skb,
skb               290 include/net/ndisc.h 		dev->ndisc_ops->fill_addr_option(dev, skb, NDISC_REDIRECT, ha);
skb               459 include/net/ndisc.h int ndisc_rcv(struct sk_buff *skb);
skb               471 include/net/ndisc.h void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target);
skb               489 include/net/ndisc.h int igmp6_event_query(struct sk_buff *skb);
skb               491 include/net/ndisc.h int igmp6_event_report(struct sk_buff *skb);
skb               206 include/net/neighbour.h 	void			(*proxy_redo)(struct sk_buff *skb);
skb               326 include/net/neighbour.h int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb);
skb               334 include/net/neighbour.h int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb);
skb               335 include/net/neighbour.h int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb);
skb               336 include/net/neighbour.h int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb);
skb               354 include/net/neighbour.h 		    struct sk_buff *skb);
skb               437 include/net/neighbour.h static inline int neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
skb               444 include/net/neighbour.h 		return __neigh_event_send(neigh, skb);
skb               449 include/net/neighbour.h static inline int neigh_hh_bridge(struct hh_cache *hh, struct sk_buff *skb)
skb               456 include/net/neighbour.h 		memcpy(skb->data - hh_alen, hh->hh_data, ETH_ALEN + hh_alen - ETH_HLEN);
skb               462 include/net/neighbour.h static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb)
skb               478 include/net/neighbour.h 			if (likely(skb_headroom(skb) >= HH_DATA_MOD)) {
skb               480 include/net/neighbour.h 				memcpy(skb->data - HH_DATA_MOD, hh->hh_data,
skb               486 include/net/neighbour.h 			if (likely(skb_headroom(skb) >= hh_alen)) {
skb               487 include/net/neighbour.h 				memcpy(skb->data - hh_alen, hh->hh_data,
skb               493 include/net/neighbour.h 	if (WARN_ON_ONCE(skb_headroom(skb) < hh_alen)) {
skb               494 include/net/neighbour.h 		kfree_skb(skb);
skb               498 include/net/neighbour.h 	__skb_push(skb, hh_len);
skb               499 include/net/neighbour.h 	return dev_queue_xmit(skb);
skb               502 include/net/neighbour.h static inline int neigh_output(struct neighbour *n, struct sk_buff *skb,
skb               508 include/net/neighbour.h 		return neigh_hh_output(hh, skb);
skb               510 include/net/neighbour.h 		return n->output(n, skb);
skb               544 include/net/neighbour.h #define NEIGH_CB(skb)	((struct neighbour_cb *)(skb)->cb)
skb                 9 include/net/netfilter/br_netfilter.h static inline struct nf_bridge_info *nf_bridge_alloc(struct sk_buff *skb)
skb                12 include/net/netfilter/br_netfilter.h 	struct nf_bridge_info *b = skb_ext_add(skb, SKB_EXT_BRIDGE_NF);
skb                23 include/net/netfilter/br_netfilter.h void nf_bridge_update_protocol(struct sk_buff *skb);
skb                26 include/net/netfilter/br_netfilter.h 		      struct sk_buff *skb, struct net_device *indev,
skb                31 include/net/netfilter/br_netfilter.h unsigned int nf_bridge_encap_header_len(const struct sk_buff *skb);
skb                33 include/net/netfilter/br_netfilter.h static inline void nf_bridge_push_encap_header(struct sk_buff *skb)
skb                35 include/net/netfilter/br_netfilter.h 	unsigned int len = nf_bridge_encap_header_len(skb);
skb                37 include/net/netfilter/br_netfilter.h 	skb_push(skb, len);
skb                38 include/net/netfilter/br_netfilter.h 	skb->network_header -= len;
skb                41 include/net/netfilter/br_netfilter.h int br_nf_pre_routing_finish_bridge(struct net *net, struct sock *sk, struct sk_buff *skb);
skb                55 include/net/netfilter/br_netfilter.h struct net_device *setup_pre_routing(struct sk_buff *skb,
skb                59 include/net/netfilter/br_netfilter.h int br_validate_ipv6(struct net *net, struct sk_buff *skb);
skb                61 include/net/netfilter/br_netfilter.h 				    struct sk_buff *skb,
skb                64 include/net/netfilter/br_netfilter.h static inline int br_validate_ipv6(struct net *net, struct sk_buff *skb)
skb                70 include/net/netfilter/br_netfilter.h br_nf_pre_routing_ipv6(void *priv, struct sk_buff *skb,
skb                 8 include/net/netfilter/ipv4/nf_dup_ipv4.h void nf_dup_ipv4(struct net *net, struct sk_buff *skb, unsigned int hooknum,
skb                12 include/net/netfilter/ipv6/nf_defrag_ipv6.h int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user);
skb                 7 include/net/netfilter/ipv6/nf_dup_ipv6.h void nf_dup_ipv6(struct net *net, struct sk_buff *skb, unsigned int hooknum,
skb               150 include/net/netfilter/nf_conntrack.h nf_ct_get(const struct sk_buff *skb, enum ip_conntrack_info *ctinfo)
skb               152 include/net/netfilter/nf_conntrack.h 	unsigned long nfct = skb_get_nfct(skb);
skb               182 include/net/netfilter/nf_conntrack.h bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff,
skb               187 include/net/netfilter/nf_conntrack.h 			  const struct sk_buff *skb,
skb               193 include/net/netfilter/nf_conntrack.h 				      const struct sk_buff *skb,
skb               196 include/net/netfilter/nf_conntrack.h 	__nf_ct_refresh_acct(ct, ctinfo, skb, extra_jiffies, true);
skb               201 include/net/netfilter/nf_conntrack.h 				 const struct sk_buff *skb,
skb               204 include/net/netfilter/nf_conntrack.h 	__nf_ct_refresh_acct(ct, 0, skb, extra_jiffies, false);
skb               209 include/net/netfilter/nf_conntrack.h 		     const struct sk_buff *skb);
skb               255 include/net/netfilter/nf_conntrack.h static inline bool nf_is_loopback_packet(const struct sk_buff *skb)
skb               257 include/net/netfilter/nf_conntrack.h 	return skb->dev && skb->skb_iif && skb->dev->flags & IFF_LOOPBACK;
skb               317 include/net/netfilter/nf_conntrack.h nf_ct_set(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info info)
skb               319 include/net/netfilter/nf_conntrack.h 	skb_set_nfct(skb, (unsigned long)ct | info);
skb                25 include/net/netfilter/nf_conntrack_core.h unsigned int nf_conntrack_in(struct sk_buff *skb,
skb                53 include/net/netfilter/nf_conntrack_core.h int __nf_conntrack_confirm(struct sk_buff *skb);
skb                56 include/net/netfilter/nf_conntrack_core.h static inline int nf_conntrack_confirm(struct sk_buff *skb)
skb                58 include/net/netfilter/nf_conntrack_core.h 	struct nf_conn *ct = (struct nf_conn *)skb_nfct(skb);
skb                63 include/net/netfilter/nf_conntrack_core.h 			ret = __nf_conntrack_confirm(skb);
skb                70 include/net/netfilter/nf_conntrack_core.h unsigned int nf_confirm(struct sk_buff *skb, unsigned int protoff,
skb                45 include/net/netfilter/nf_conntrack_helper.h 	int (*help)(struct sk_buff *skb,
skb                53 include/net/netfilter/nf_conntrack_helper.h 	int (*to_nlattr)(struct sk_buff *skb, const struct nf_conn *ct);
skb                99 include/net/netfilter/nf_conntrack_helper.h 		       int (*help)(struct sk_buff *skb, unsigned int protoff,
skb               139 include/net/netfilter/nf_conntrack_helper.h int nf_conntrack_broadcast_help(struct sk_buff *skb, struct nf_conn *ct,
skb               150 include/net/netfilter/nf_conntrack_helper.h void nf_ct_helper_log(struct sk_buff *skb, const struct nf_conn *ct,
skb                34 include/net/netfilter/nf_conntrack_l4proto.h 	int (*to_nlattr)(struct sk_buff *skb, struct nlattr *nla,
skb                40 include/net/netfilter/nf_conntrack_l4proto.h 	int (*tuple_to_nlattr)(struct sk_buff *skb,
skb                51 include/net/netfilter/nf_conntrack_l4proto.h 		int (*obj_to_nlattr)(struct sk_buff *skb, const void *data);
skb                63 include/net/netfilter/nf_conntrack_l4proto.h bool icmp_pkt_to_tuple(const struct sk_buff *skb,
skb                68 include/net/netfilter/nf_conntrack_l4proto.h bool icmpv6_pkt_to_tuple(const struct sk_buff *skb,
skb                78 include/net/netfilter/nf_conntrack_l4proto.h int nf_conntrack_inet_error(struct nf_conn *tmpl, struct sk_buff *skb,
skb                85 include/net/netfilter/nf_conntrack_l4proto.h 			      struct sk_buff *skb,
skb                90 include/net/netfilter/nf_conntrack_l4proto.h 			      struct sk_buff *skb,
skb                95 include/net/netfilter/nf_conntrack_l4proto.h 			     struct sk_buff *skb,
skb               100 include/net/netfilter/nf_conntrack_l4proto.h 			       struct sk_buff *skb,
skb               105 include/net/netfilter/nf_conntrack_l4proto.h 			    struct sk_buff *skb,
skb               110 include/net/netfilter/nf_conntrack_l4proto.h 				struct sk_buff *skb,
skb               115 include/net/netfilter/nf_conntrack_l4proto.h 			    struct sk_buff *skb,
skb               120 include/net/netfilter/nf_conntrack_l4proto.h 			     struct sk_buff *skb,
skb               125 include/net/netfilter/nf_conntrack_l4proto.h 			     struct sk_buff *skb,
skb               130 include/net/netfilter/nf_conntrack_l4proto.h 			    struct sk_buff *skb,
skb               152 include/net/netfilter/nf_conntrack_l4proto.h int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb,
skb               161 include/net/netfilter/nf_conntrack_l4proto.h void nf_ct_l4proto_log_invalid(const struct sk_buff *skb,
skb               165 include/net/netfilter/nf_conntrack_l4proto.h void nf_l4proto_log_invalid(const struct sk_buff *skb,
skb               171 include/net/netfilter/nf_conntrack_l4proto.h void nf_l4proto_log_invalid(const struct sk_buff *skb, struct net *net,
skb               174 include/net/netfilter/nf_conntrack_l4proto.h void nf_ct_l4proto_log_invalid(const struct sk_buff *skb,
skb                38 include/net/netfilter/nf_conntrack_seqadj.h void nf_ct_tcp_seqadj_set(struct sk_buff *skb, struct nf_conn *ct,
skb                41 include/net/netfilter/nf_conntrack_seqadj.h int nf_ct_seq_adjust(struct sk_buff *skb, struct nf_conn *ct,
skb                29 include/net/netfilter/nf_conntrack_zones.h nf_ct_zone_tmpl(const struct nf_conn *tmpl, const struct sk_buff *skb,
skb                37 include/net/netfilter/nf_conntrack_zones.h 		return nf_ct_zone_init(tmp, skb->mark, tmpl->zone.dir, 0);
skb               108 include/net/netfilter/nf_flow_table.h 		      struct sk_buff *skb, unsigned int thoff,
skb               111 include/net/netfilter/nf_flow_table.h 		      struct sk_buff *skb, unsigned int thoff,
skb               118 include/net/netfilter/nf_flow_table.h unsigned int nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
skb               120 include/net/netfilter/nf_flow_table.h unsigned int nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
skb                42 include/net/netfilter/nf_log.h 		      const struct sk_buff *skb,
skb                81 include/net/netfilter/nf_log.h 		   const struct sk_buff *skb,
skb                91 include/net/netfilter/nf_log.h 		  const struct sk_buff *skb,
skb               104 include/net/netfilter/nf_log.h int nf_log_dump_udp_header(struct nf_log_buf *m, const struct sk_buff *skb,
skb               106 include/net/netfilter/nf_log.h int nf_log_dump_tcp_header(struct nf_log_buf *m, const struct sk_buff *skb,
skb               112 include/net/netfilter/nf_log.h 			       unsigned int hooknum, const struct sk_buff *skb,
skb               120 include/net/netfilter/nf_log.h 		     const struct sk_buff *skb,
skb                77 include/net/netfilter/nf_nat.h 			   unsigned int hooknum, struct sk_buff *skb);
skb                79 include/net/netfilter/nf_nat.h unsigned int nf_nat_manip_pkt(struct sk_buff *skb, struct nf_conn *ct,
skb                82 include/net/netfilter/nf_nat.h void nf_nat_csum_recalc(struct sk_buff *skb,
skb                86 include/net/netfilter/nf_nat.h int nf_nat_icmp_reply_translation(struct sk_buff *skb, struct nf_conn *ct,
skb                90 include/net/netfilter/nf_nat.h int nf_nat_icmpv6_reply_translation(struct sk_buff *skb, struct nf_conn *ct,
skb               104 include/net/netfilter/nf_nat.h nf_nat_inet_fn(void *priv, struct sk_buff *skb,
skb                11 include/net/netfilter/nf_nat_helper.h bool __nf_nat_mangle_tcp_packet(struct sk_buff *skb, struct nf_conn *ct,
skb                17 include/net/netfilter/nf_nat_helper.h static inline bool nf_nat_mangle_tcp_packet(struct sk_buff *skb,
skb                26 include/net/netfilter/nf_nat_helper.h 	return __nf_nat_mangle_tcp_packet(skb, ct, ctinfo, protoff,
skb                31 include/net/netfilter/nf_nat_helper.h bool nf_nat_mangle_udp_packet(struct sk_buff *skb, struct nf_conn *ct,
skb                 9 include/net/netfilter/nf_nat_masquerade.h nf_nat_masquerade_ipv4(struct sk_buff *skb, unsigned int hooknum,
skb                17 include/net/netfilter/nf_nat_masquerade.h nf_nat_masquerade_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range,
skb                 9 include/net/netfilter/nf_nat_redirect.h nf_nat_redirect_ipv4(struct sk_buff *skb,
skb                13 include/net/netfilter/nf_nat_redirect.h nf_nat_redirect_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range,
skb                14 include/net/netfilter/nf_queue.h 	struct sk_buff		*skb;
skb                79 include/net/netfilter/nf_queue.h static inline u32 hash_bridge(const struct sk_buff *skb, u32 initval)
skb                84 include/net/netfilter/nf_queue.h 	switch (eth_hdr(skb)->h_proto) {
skb                86 include/net/netfilter/nf_queue.h 		iph = skb_header_pointer(skb, skb_network_offset(skb),
skb                92 include/net/netfilter/nf_queue.h 		ip6h = skb_header_pointer(skb, skb_network_offset(skb),
skb               103 include/net/netfilter/nf_queue.h nfqueue_hash(const struct sk_buff *skb, u16 queue, u16 queues_total, u8 family,
skb               108 include/net/netfilter/nf_queue.h 		queue += reciprocal_scale(hash_v4(ip_hdr(skb), initval),
skb               112 include/net/netfilter/nf_queue.h 		queue += reciprocal_scale(hash_v6(ipv6_hdr(skb), initval),
skb               116 include/net/netfilter/nf_queue.h 		queue += reciprocal_scale(hash_bridge(skb, initval),
skb               124 include/net/netfilter/nf_queue.h int nf_queue(struct sk_buff *skb, struct nf_hook_state *state,
skb                 7 include/net/netfilter/nf_socket.h struct sock *nf_sk_lookup_slow_v4(struct net *net, const struct sk_buff *skb,
skb                10 include/net/netfilter/nf_socket.h struct sock *nf_sk_lookup_slow_v6(struct net *net, const struct sk_buff *skb,
skb                45 include/net/netfilter/nf_synproxy.h bool synproxy_parse_options(const struct sk_buff *skb, unsigned int doff,
skb                52 include/net/netfilter/nf_synproxy.h void synproxy_send_client_synack(struct net *net, const struct sk_buff *skb,
skb                57 include/net/netfilter/nf_synproxy.h 			      const struct sk_buff *skb,
skb                63 include/net/netfilter/nf_synproxy.h unsigned int ipv4_synproxy_hook(void *priv, struct sk_buff *skb,
skb                70 include/net/netfilter/nf_synproxy.h 				      const struct sk_buff *skb,
skb                74 include/net/netfilter/nf_synproxy.h bool synproxy_recv_client_ack_ipv6(struct net *net, const struct sk_buff *skb,
skb                78 include/net/netfilter/nf_synproxy.h unsigned int ipv6_synproxy_hook(void *priv, struct sk_buff *skb,
skb                22 include/net/netfilter/nf_tables.h 	struct sk_buff			*skb;
skb                55 include/net/netfilter/nf_tables.h 				   struct sk_buff *skb,
skb                58 include/net/netfilter/nf_tables.h 	pkt->skb = skb;
skb                63 include/net/netfilter/nf_tables.h 					  struct sk_buff *skb)
skb               192 include/net/netfilter/nf_tables.h int nft_data_dump(struct sk_buff *skb, int attr, const struct nft_data *data,
skb               207 include/net/netfilter/nf_tables.h int nft_dump_register(struct sk_buff *skb, unsigned int attr, unsigned int reg);
skb               796 include/net/netfilter/nf_tables.h 	int				(*dump)(struct sk_buff *skb,
skb               835 include/net/netfilter/nf_tables.h int nft_expr_dump(struct sk_buff *skb, unsigned int attr,
skb              1042 include/net/netfilter/nf_tables.h int nft_verdict_dump(struct sk_buff *skb, int type,
skb              1139 include/net/netfilter/nf_tables.h 	int				(*dump)(struct sk_buff *skb,
skb                 9 include/net/netfilter/nf_tables_ipv4.h 					struct sk_buff *skb)
skb                13 include/net/netfilter/nf_tables_ipv4.h 	ip = ip_hdr(pkt->skb);
skb                16 include/net/netfilter/nf_tables_ipv4.h 	pkt->xt.thoff = ip_hdrlen(pkt->skb);
skb                21 include/net/netfilter/nf_tables_ipv4.h 						  struct sk_buff *skb)
skb                26 include/net/netfilter/nf_tables_ipv4.h 	iph = skb_header_pointer(skb, skb_network_offset(skb), sizeof(*iph),
skb                36 include/net/netfilter/nf_tables_ipv4.h 	if (skb->len < len)
skb                50 include/net/netfilter/nf_tables_ipv4.h 						 struct sk_buff *skb)
skb                52 include/net/netfilter/nf_tables_ipv4.h 	if (__nft_set_pktinfo_ipv4_validate(pkt, skb) < 0)
skb                53 include/net/netfilter/nf_tables_ipv4.h 		nft_set_pktinfo_unspec(pkt, skb);
skb                10 include/net/netfilter/nf_tables_ipv6.h 					struct sk_buff *skb)
skb                16 include/net/netfilter/nf_tables_ipv6.h 	protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, &flags);
skb                18 include/net/netfilter/nf_tables_ipv6.h 		nft_set_pktinfo_unspec(pkt, skb);
skb                29 include/net/netfilter/nf_tables_ipv6.h 						  struct sk_buff *skb)
skb                39 include/net/netfilter/nf_tables_ipv6.h 	ip6h = skb_header_pointer(skb, skb_network_offset(skb), sizeof(*ip6h),
skb                48 include/net/netfilter/nf_tables_ipv6.h 	if (pkt_len + sizeof(*ip6h) > skb->len)
skb                51 include/net/netfilter/nf_tables_ipv6.h 	protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, &flags);
skb                67 include/net/netfilter/nf_tables_ipv6.h 						 struct sk_buff *skb)
skb                69 include/net/netfilter/nf_tables_ipv6.h 	if (__nft_set_pktinfo_ipv6_validate(pkt, skb) < 0)
skb                70 include/net/netfilter/nf_tables_ipv6.h 		nft_set_pktinfo_unspec(pkt, skb);
skb                21 include/net/netfilter/nf_tproxy.h static inline void nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk)
skb                23 include/net/netfilter/nf_tproxy.h 	skb_orphan(skb);
skb                24 include/net/netfilter/nf_tproxy.h 	skb->sk = sk;
skb                25 include/net/netfilter/nf_tproxy.h 	skb->destructor = sock_edemux;
skb                28 include/net/netfilter/nf_tproxy.h __be32 nf_tproxy_laddr4(struct sk_buff *skb, __be32 user_laddr, __be32 daddr);
skb                48 include/net/netfilter/nf_tproxy.h nf_tproxy_handle_time_wait4(struct net *net, struct sk_buff *skb,
skb                75 include/net/netfilter/nf_tproxy.h nf_tproxy_get_sock_v4(struct net *net, struct sk_buff *skb,
skb                83 include/net/netfilter/nf_tproxy.h nf_tproxy_laddr6(struct sk_buff *skb, const struct in6_addr *user_laddr,
skb               107 include/net/netfilter/nf_tproxy.h nf_tproxy_handle_time_wait6(struct sk_buff *skb, int tproto, int thoff,
skb               114 include/net/netfilter/nf_tproxy.h nf_tproxy_get_sock_v6(struct net *net, struct sk_buff *skb, int thoff,
skb                16 include/net/netfilter/nft_fib.h nft_fib_is_loopback(const struct sk_buff *skb, const struct net_device *in)
skb                18 include/net/netfilter/nft_fib.h 	return skb->pkt_type == PACKET_LOOPBACK || in->flags & IFF_LOOPBACK;
skb                21 include/net/netfilter/nft_fib.h int nft_fib_dump(struct sk_buff *skb, const struct nft_expr *expr);
skb                25 include/net/netfilter/nft_meta.h int nft_meta_get_dump(struct sk_buff *skb,
skb                28 include/net/netfilter/nft_meta.h int nft_meta_set_dump(struct sk_buff *skb,
skb                25 include/net/netfilter/nft_reject.h int nft_reject_dump(struct sk_buff *skb, const struct nft_expr *expr);
skb               255 include/net/netlabel.h 	unsigned char *(*skbuff_optptr)(const struct sk_buff *skb);
skb               256 include/net/netlabel.h 	int (*skbuff_setattr)(struct sk_buff *skb,
skb               259 include/net/netlabel.h 	int (*skbuff_delattr)(struct sk_buff *skb);
skb               484 include/net/netlabel.h int netlbl_skbuff_setattr(struct sk_buff *skb,
skb               487 include/net/netlabel.h int netlbl_skbuff_getattr(const struct sk_buff *skb,
skb               490 include/net/netlabel.h void netlbl_skbuff_err(struct sk_buff *skb, u16 family, int error, int gateway);
skb               496 include/net/netlabel.h int netlbl_cache_add(const struct sk_buff *skb, u16 family,
skb               645 include/net/netlabel.h static inline int netlbl_skbuff_setattr(struct sk_buff *skb,
skb               651 include/net/netlabel.h static inline int netlbl_skbuff_getattr(const struct sk_buff *skb,
skb               657 include/net/netlabel.h static inline void netlbl_skbuff_err(struct sk_buff *skb,
skb               667 include/net/netlabel.h static inline int netlbl_cache_add(const struct sk_buff *skb, u16 family,
skb               428 include/net/netlink.h int netlink_rcv_skb(struct sk_buff *skb,
skb               431 include/net/netlink.h int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 portid,
skb               447 include/net/netlink.h struct nlattr *__nla_reserve(struct sk_buff *skb, int attrtype, int attrlen);
skb               448 include/net/netlink.h struct nlattr *__nla_reserve_64bit(struct sk_buff *skb, int attrtype,
skb               450 include/net/netlink.h void *__nla_reserve_nohdr(struct sk_buff *skb, int attrlen);
skb               451 include/net/netlink.h struct nlattr *nla_reserve(struct sk_buff *skb, int attrtype, int attrlen);
skb               452 include/net/netlink.h struct nlattr *nla_reserve_64bit(struct sk_buff *skb, int attrtype,
skb               454 include/net/netlink.h void *nla_reserve_nohdr(struct sk_buff *skb, int attrlen);
skb               455 include/net/netlink.h void __nla_put(struct sk_buff *skb, int attrtype, int attrlen,
skb               457 include/net/netlink.h void __nla_put_64bit(struct sk_buff *skb, int attrtype, int attrlen,
skb               459 include/net/netlink.h void __nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data);
skb               460 include/net/netlink.h int nla_put(struct sk_buff *skb, int attrtype, int attrlen, const void *data);
skb               461 include/net/netlink.h int nla_put_64bit(struct sk_buff *skb, int attrtype, int attrlen,
skb               463 include/net/netlink.h int nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data);
skb               464 include/net/netlink.h int nla_append(struct sk_buff *skb, int attrlen, const void *data);
skb               849 include/net/netlink.h static inline struct nlmsghdr *nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq,
skb               852 include/net/netlink.h 	if (unlikely(skb_tailroom(skb) < nlmsg_total_size(payload)))
skb               855 include/net/netlink.h 	return __nlmsg_put(skb, portid, seq, type, payload, flags);
skb               869 include/net/netlink.h static inline struct nlmsghdr *nlmsg_put_answer(struct sk_buff *skb,
skb               874 include/net/netlink.h 	return nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
skb               900 include/net/netlink.h static inline void nlmsg_end(struct sk_buff *skb, struct nlmsghdr *nlh)
skb               902 include/net/netlink.h 	nlh->nlmsg_len = skb_tail_pointer(skb) - (unsigned char *)nlh;
skb               911 include/net/netlink.h static inline void *nlmsg_get_pos(struct sk_buff *skb)
skb               913 include/net/netlink.h 	return skb_tail_pointer(skb);
skb               923 include/net/netlink.h static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
skb               926 include/net/netlink.h 		WARN_ON((unsigned char *) mark < skb->data);
skb               927 include/net/netlink.h 		skb_trim(skb, (unsigned char *) mark - skb->data);
skb               939 include/net/netlink.h static inline void nlmsg_cancel(struct sk_buff *skb, struct nlmsghdr *nlh)
skb               941 include/net/netlink.h 	nlmsg_trim(skb, nlh);
skb               948 include/net/netlink.h static inline void nlmsg_free(struct sk_buff *skb)
skb               950 include/net/netlink.h 	kfree_skb(skb);
skb               961 include/net/netlink.h static inline int nlmsg_multicast(struct sock *sk, struct sk_buff *skb,
skb               966 include/net/netlink.h 	NETLINK_CB(skb).dst_group = group;
skb               968 include/net/netlink.h 	err = netlink_broadcast(sk, skb, portid, group, flags);
skb               981 include/net/netlink.h static inline int nlmsg_unicast(struct sock *sk, struct sk_buff *skb, u32 portid)
skb               985 include/net/netlink.h 	err = netlink_unicast(sk, skb, portid, MSG_DONTWAIT);
skb              1176 include/net/netlink.h static inline int nla_put_u8(struct sk_buff *skb, int attrtype, u8 value)
skb              1181 include/net/netlink.h 	return nla_put(skb, attrtype, sizeof(u8), &tmp);
skb              1190 include/net/netlink.h static inline int nla_put_u16(struct sk_buff *skb, int attrtype, u16 value)
skb              1194 include/net/netlink.h 	return nla_put(skb, attrtype, sizeof(u16), &tmp);
skb              1203 include/net/netlink.h static inline int nla_put_be16(struct sk_buff *skb, int attrtype, __be16 value)
skb              1207 include/net/netlink.h 	return nla_put(skb, attrtype, sizeof(__be16), &tmp);
skb              1216 include/net/netlink.h static inline int nla_put_net16(struct sk_buff *skb, int attrtype, __be16 value)
skb              1220 include/net/netlink.h 	return nla_put_be16(skb, attrtype | NLA_F_NET_BYTEORDER, tmp);
skb              1229 include/net/netlink.h static inline int nla_put_le16(struct sk_buff *skb, int attrtype, __le16 value)
skb              1233 include/net/netlink.h 	return nla_put(skb, attrtype, sizeof(__le16), &tmp);
skb              1242 include/net/netlink.h static inline int nla_put_u32(struct sk_buff *skb, int attrtype, u32 value)
skb              1246 include/net/netlink.h 	return nla_put(skb, attrtype, sizeof(u32), &tmp);
skb              1255 include/net/netlink.h static inline int nla_put_be32(struct sk_buff *skb, int attrtype, __be32 value)
skb              1259 include/net/netlink.h 	return nla_put(skb, attrtype, sizeof(__be32), &tmp);
skb              1268 include/net/netlink.h static inline int nla_put_net32(struct sk_buff *skb, int attrtype, __be32 value)
skb              1272 include/net/netlink.h 	return nla_put_be32(skb, attrtype | NLA_F_NET_BYTEORDER, tmp);
skb              1281 include/net/netlink.h static inline int nla_put_le32(struct sk_buff *skb, int attrtype, __le32 value)
skb              1285 include/net/netlink.h 	return nla_put(skb, attrtype, sizeof(__le32), &tmp);
skb              1295 include/net/netlink.h static inline int nla_put_u64_64bit(struct sk_buff *skb, int attrtype,
skb              1300 include/net/netlink.h 	return nla_put_64bit(skb, attrtype, sizeof(u64), &tmp, padattr);
skb              1310 include/net/netlink.h static inline int nla_put_be64(struct sk_buff *skb, int attrtype, __be64 value,
skb              1315 include/net/netlink.h 	return nla_put_64bit(skb, attrtype, sizeof(__be64), &tmp, padattr);
skb              1325 include/net/netlink.h static inline int nla_put_net64(struct sk_buff *skb, int attrtype, __be64 value,
skb              1330 include/net/netlink.h 	return nla_put_be64(skb, attrtype | NLA_F_NET_BYTEORDER, tmp,
skb              1341 include/net/netlink.h static inline int nla_put_le64(struct sk_buff *skb, int attrtype, __le64 value,
skb              1346 include/net/netlink.h 	return nla_put_64bit(skb, attrtype, sizeof(__le64), &tmp, padattr);
skb              1355 include/net/netlink.h static inline int nla_put_s8(struct sk_buff *skb, int attrtype, s8 value)
skb              1359 include/net/netlink.h 	return nla_put(skb, attrtype, sizeof(s8), &tmp);
skb              1368 include/net/netlink.h static inline int nla_put_s16(struct sk_buff *skb, int attrtype, s16 value)
skb              1372 include/net/netlink.h 	return nla_put(skb, attrtype, sizeof(s16), &tmp);
skb              1381 include/net/netlink.h static inline int nla_put_s32(struct sk_buff *skb, int attrtype, s32 value)
skb              1385 include/net/netlink.h 	return nla_put(skb, attrtype, sizeof(s32), &tmp);
skb              1395 include/net/netlink.h static inline int nla_put_s64(struct sk_buff *skb, int attrtype, s64 value,
skb              1400 include/net/netlink.h 	return nla_put_64bit(skb, attrtype, sizeof(s64), &tmp, padattr);
skb              1409 include/net/netlink.h static inline int nla_put_string(struct sk_buff *skb, int attrtype,
skb              1412 include/net/netlink.h 	return nla_put(skb, attrtype, strlen(str) + 1, str);
skb              1420 include/net/netlink.h static inline int nla_put_flag(struct sk_buff *skb, int attrtype)
skb              1422 include/net/netlink.h 	return nla_put(skb, attrtype, 0, NULL);
skb              1432 include/net/netlink.h static inline int nla_put_msecs(struct sk_buff *skb, int attrtype,
skb              1437 include/net/netlink.h 	return nla_put_64bit(skb, attrtype, sizeof(u64), &tmp, padattr);
skb              1447 include/net/netlink.h static inline int nla_put_in_addr(struct sk_buff *skb, int attrtype,
skb              1452 include/net/netlink.h 	return nla_put_be32(skb, attrtype, tmp);
skb              1462 include/net/netlink.h static inline int nla_put_in6_addr(struct sk_buff *skb, int attrtype,
skb              1465 include/net/netlink.h 	return nla_put(skb, attrtype, sizeof(*addr), addr);
skb              1682 include/net/netlink.h static inline struct nlattr *nla_nest_start_noflag(struct sk_buff *skb,
skb              1685 include/net/netlink.h 	struct nlattr *start = (struct nlattr *)skb_tail_pointer(skb);
skb              1687 include/net/netlink.h 	if (nla_put(skb, attrtype, 0, NULL) < 0)
skb              1703 include/net/netlink.h static inline struct nlattr *nla_nest_start(struct sk_buff *skb, int attrtype)
skb              1705 include/net/netlink.h 	return nla_nest_start_noflag(skb, attrtype | NLA_F_NESTED);
skb              1718 include/net/netlink.h static inline int nla_nest_end(struct sk_buff *skb, struct nlattr *start)
skb              1720 include/net/netlink.h 	start->nla_len = skb_tail_pointer(skb) - (unsigned char *)start;
skb              1721 include/net/netlink.h 	return skb->len;
skb              1732 include/net/netlink.h static inline void nla_nest_cancel(struct sk_buff *skb, struct nlattr *start)
skb              1734 include/net/netlink.h 	nlmsg_trim(skb, start);
skb              1785 include/net/netlink.h static inline bool nla_need_padding_for_64bit(struct sk_buff *skb)
skb              1793 include/net/netlink.h 	if (IS_ALIGNED((unsigned long)skb_tail_pointer(skb), 8))
skb              1811 include/net/netlink.h static inline int nla_align_64bit(struct sk_buff *skb, int padattr)
skb              1813 include/net/netlink.h 	if (nla_need_padding_for_64bit(skb) &&
skb              1814 include/net/netlink.h 	    !nla_reserve(skb, padattr, 0))
skb               232 include/net/netrom.h void __nr_transmit_reply(struct sk_buff *skb, int mine, unsigned char cmdflags);
skb               238 include/net/netrom.h #define nr_transmit_refusal(skb, mine)					\
skb               240 include/net/netrom.h 	__nr_transmit_reply((skb), (mine), NR_CONNACK | NR_CHOKE_FLAG);	\
skb               247 include/net/netrom.h #define nr_transmit_reset(skb, mine)					\
skb               249 include/net/netrom.h 	__nr_transmit_reply((skb), (mine), NR_RESET);			\
skb               164 include/net/nexthop.h int nexthop_mpath_fill_node(struct sk_buff *skb, struct nexthop *nh,
skb               176 include/net/nexthop.h 		if (fib_add_nexthop(skb, nhc, weight, rt_family) < 0)
skb               149 include/net/nfc/digital.h 	int (*in_send_cmd)(struct nfc_digital_dev *ddev, struct sk_buff *skb,
skb               155 include/net/nfc/digital.h 	int (*tg_send_cmd)(struct nfc_digital_dev *ddev, struct sk_buff *skb,
skb               235 include/net/nfc/digital.h 	int (*skb_check_crc)(struct sk_buff *skb);
skb               236 include/net/nfc/digital.h 	void (*skb_add_crc)(struct sk_buff *skb);
skb                25 include/net/nfc/hci.h 	int (*xmit) (struct nfc_hci_dev *hdev, struct sk_buff *skb);
skb                37 include/net/nfc/hci.h 			      struct nfc_target *target, struct sk_buff *skb,
skb                39 include/net/nfc/hci.h 	int (*tm_send)(struct nfc_hci_dev *hdev, struct sk_buff *skb);
skb                43 include/net/nfc/hci.h 			      struct sk_buff *skb);
skb                45 include/net/nfc/hci.h 			    struct sk_buff *skb);
skb                85 include/net/nfc/hci.h typedef int (*xmit) (struct sk_buff *skb, void *cb_data);
skb               249 include/net/nfc/hci.h 			   struct sk_buff *skb);
skb               251 include/net/nfc/hci.h 			  struct sk_buff *skb);
skb               253 include/net/nfc/hci.h 			    struct sk_buff *skb);
skb               254 include/net/nfc/hci.h void nfc_hci_recv_frame(struct nfc_hci_dev *hdev, struct sk_buff *skb);
skb               262 include/net/nfc/hci.h 		      struct sk_buff **skb);
skb               266 include/net/nfc/hci.h 		     const u8 *param, size_t param_len, struct sk_buff **skb);
skb                17 include/net/nfc/llc.h typedef void (*rcv_to_hci_t) (struct nfc_hci_dev *hdev, struct sk_buff *skb);
skb                18 include/net/nfc/llc.h typedef int (*xmit_to_drv_t) (struct nfc_hci_dev *hdev, struct sk_buff *skb);
skb                31 include/net/nfc/llc.h void nfc_llc_rcv_from_drv(struct nfc_llc *llc, struct sk_buff *skb);
skb                32 include/net/nfc/llc.h int nfc_llc_xmit_from_hci(struct nfc_llc *llc, struct sk_buff *skb);
skb                60 include/net/nfc/nci_core.h 	int (*rsp)(struct nci_dev *dev, struct sk_buff *skb);
skb                61 include/net/nfc/nci_core.h 	int (*ntf)(struct nci_dev *dev, struct sk_buff *skb);
skb                68 include/net/nfc/nci_core.h 	int   (*send)(struct nci_dev *ndev, struct sk_buff *skb);
skb                81 include/net/nfc/nci_core.h 				    struct sk_buff *skb);
skb                83 include/net/nfc/nci_core.h 				  struct sk_buff *skb);
skb               286 include/net/nfc/nci_core.h int nci_recv_frame(struct nci_dev *ndev, struct sk_buff *skb);
skb               287 include/net/nfc/nci_core.h int nci_send_frame(struct nci_dev *ndev, struct sk_buff *skb);
skb               305 include/net/nfc/nci_core.h 		     struct sk_buff **skb);
skb               312 include/net/nfc/nci_core.h 		      struct sk_buff **skb);
skb               320 include/net/nfc/nci_core.h 	struct sk_buff *skb;
skb               322 include/net/nfc/nci_core.h 	skb = alloc_skb(len + ndev->tx_headroom + ndev->tx_tailroom, how);
skb               323 include/net/nfc/nci_core.h 	if (skb)
skb               324 include/net/nfc/nci_core.h 		skb_reserve(skb, ndev->tx_headroom);
skb               326 include/net/nfc/nci_core.h 	return skb;
skb               351 include/net/nfc/nci_core.h void nci_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb);
skb               352 include/net/nfc/nci_core.h void nci_ntf_packet(struct nci_dev *ndev, struct sk_buff *skb);
skb               354 include/net/nfc/nci_core.h 			struct sk_buff *skb);
skb               356 include/net/nfc/nci_core.h 			struct sk_buff *skb);
skb               358 include/net/nfc/nci_core.h 			struct sk_buff *skb);
skb               360 include/net/nfc/nci_core.h 			struct sk_buff *skb);
skb               361 include/net/nfc/nci_core.h void nci_rx_data_packet(struct nci_dev *ndev, struct sk_buff *skb);
skb               363 include/net/nfc/nci_core.h int nci_send_data(struct nci_dev *ndev, __u8 conn_id, struct sk_buff *skb);
skb               365 include/net/nfc/nci_core.h void nci_data_exchange_complete(struct nci_dev *ndev, struct sk_buff *skb,
skb               367 include/net/nfc/nci_core.h void nci_hci_data_received_cb(void *context, struct sk_buff *skb, int err);
skb               414 include/net/nfc/nci_core.h 		 struct sk_buff *skb);
skb               432 include/net/nfc/nci_core.h 	int (*recv)(struct nci_uart *nci_uart, struct sk_buff *skb);
skb               435 include/net/nfc/nci_core.h 	int (*send)(struct nci_uart *nci_uart, struct sk_buff *skb);
skb                23 include/net/nfc/nfc.h 	int (*write)(void *dev_id, struct sk_buff *skb);
skb                41 include/net/nfc/nfc.h typedef void (*data_exchange_cb_t)(void *context, struct sk_buff *skb,
skb                62 include/net/nfc/nfc.h 			     struct sk_buff *skb, data_exchange_cb_t cb,
skb                64 include/net/nfc/nfc.h 	int (*tm_send)(struct nfc_dev *dev, struct sk_buff *skb);
skb               285 include/net/nfc/nfc.h int nfc_tm_data_received(struct nfc_dev *dev, struct sk_buff *skb);
skb               296 include/net/nfc/nfc.h void nfc_send_to_raw_sock(struct nfc_dev *dev, struct sk_buff *skb,
skb               316 include/net/nfc/nfc.h int nfc_vendor_cmd_reply(struct sk_buff *skb);
skb               255 include/net/nsh.h static inline struct nshhdr *nsh_hdr(struct sk_buff *skb)
skb               257 include/net/nsh.h 	return (struct nshhdr *)skb_network_header(skb);
skb               307 include/net/nsh.h int nsh_push(struct sk_buff *skb, const struct nshhdr *pushed_nh);
skb               308 include/net/nsh.h int nsh_pop(struct sk_buff *skb);
skb                 6 include/net/p8022.h 		     int (*func)(struct sk_buff *skb,
skb                19 include/net/phonet/gprs.h int pep_write(struct sock *sk, struct sk_buff *skb);
skb                59 include/net/phonet/pep.h static inline struct pnpipehdr *pnp_hdr(struct sk_buff *skb)
skb                61 include/net/phonet/pep.h 	return (struct pnpipehdr *)skb_transport_header(skb);
skb                39 include/net/phonet/phonet.h void pn_deliver_sock_broadcast(struct net *net, struct sk_buff *skb);
skb                50 include/net/phonet/phonet.h int pn_skb_send(struct sock *sk, struct sk_buff *skb,
skb                53 include/net/phonet/phonet.h static inline struct phonethdr *pn_hdr(struct sk_buff *skb)
skb                55 include/net/phonet/phonet.h 	return (struct phonethdr *)skb_network_header(skb);
skb                58 include/net/phonet/phonet.h static inline struct phonetmsg *pn_msg(struct sk_buff *skb)
skb                60 include/net/phonet/phonet.h 	return (struct phonetmsg *)skb_transport_header(skb);
skb                68 include/net/phonet/phonet.h void pn_skb_get_src_sockaddr(struct sk_buff *skb, struct sockaddr_pn *sa)
skb                70 include/net/phonet/phonet.h 	struct phonethdr *ph = pn_hdr(skb);
skb                80 include/net/phonet/phonet.h void pn_skb_get_dst_sockaddr(struct sk_buff *skb, struct sockaddr_pn *sa)
skb                82 include/net/phonet/phonet.h 	struct phonethdr *ph = pn_hdr(skb);
skb                34 include/net/ping.h 					     struct sk_buff *skb);
skb                37 include/net/ping.h 					       struct sk_buff *skb);
skb                39 include/net/ping.h 	void (*ipv6_icmp_error)(struct sock *sk, struct sk_buff *skb, int err,
skb                70 include/net/ping.h void ping_err(struct sk_buff *skb, int offset, u32 info);
skb                78 include/net/ping.h int  ping_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
skb                79 include/net/ping.h bool ping_rcv(struct sk_buff *skb);
skb                73 include/net/pkt_cls.h int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
skb               131 include/net/pkt_cls.h static inline int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
skb               296 include/net/pkt_cls.h tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts,
skb               300 include/net/pkt_cls.h 	return tcf_action_exec(skb, exts->actions, exts->nr_actions, res);
skb               311 include/net/pkt_cls.h int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts);
skb               312 include/net/pkt_cls.h int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts);
skb               438 include/net/pkt_cls.h static inline int tcf_em_tree_match(struct sk_buff *skb,
skb               443 include/net/pkt_cls.h 		return __tcf_em_tree_match(skb, tree, info);
skb               457 include/net/pkt_cls.h #define tcf_em_tree_dump(skb, t, tlv) (0)
skb               458 include/net/pkt_cls.h #define tcf_em_tree_match(skb, t, info) ((void)(info), 1)
skb               462 include/net/pkt_cls.h static inline unsigned char * tcf_get_base_ptr(struct sk_buff *skb, int layer)
skb               466 include/net/pkt_cls.h 			return skb_mac_header(skb);
skb               468 include/net/pkt_cls.h 			return skb_network_header(skb);
skb               470 include/net/pkt_cls.h 			return skb_transport_header(skb);
skb               476 include/net/pkt_cls.h static inline int tcf_valid_offset(const struct sk_buff *skb,
skb               479 include/net/pkt_cls.h 	return likely((ptr + len) <= skb_tail_pointer(skb) &&
skb               480 include/net/pkt_cls.h 		      ptr >= skb->head &&
skb               502 include/net/pkt_cls.h tcf_match_indev(struct sk_buff *skb, int ifindex)
skb               506 include/net/pkt_cls.h 	if  (!skb->skb_iif)
skb               508 include/net/pkt_cls.h 	return ifindex == skb->skb_iif;
skb               112 include/net/pkt_sched.h bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
skb               131 include/net/pkt_sched.h static inline __be16 tc_skb_protocol(const struct sk_buff *skb)
skb               137 include/net/pkt_sched.h 	if (skb_vlan_tag_present(skb))
skb               138 include/net/pkt_sched.h 		return skb->vlan_proto;
skb               139 include/net/pkt_sched.h 	return skb->protocol;
skb                38 include/net/protocol.h 	int			(*early_demux)(struct sk_buff *skb);
skb                39 include/net/protocol.h 	int			(*early_demux_handler)(struct sk_buff *skb);
skb                40 include/net/protocol.h 	int			(*handler)(struct sk_buff *skb);
skb                43 include/net/protocol.h 	int			(*err_handler)(struct sk_buff *skb, u32 info);
skb                56 include/net/protocol.h 	void	(*early_demux)(struct sk_buff *skb);
skb                57 include/net/protocol.h 	void    (*early_demux_handler)(struct sk_buff *skb);
skb                58 include/net/protocol.h 	int	(*handler)(struct sk_buff *skb);
skb                61 include/net/protocol.h 	int	(*err_handler)(struct sk_buff *skb,
skb                23 include/net/psample.h void psample_sample_packet(struct psample_group *group, struct sk_buff *skb,
skb                30 include/net/psample.h 					 struct sk_buff *skb, u32 trunc_size,
skb                18 include/net/rawv6.h int rawv6_rcv(struct sock *sk, struct sk_buff *skb);
skb                22 include/net/rawv6.h 					   struct sk_buff *skb));
skb                24 include/net/rawv6.h 					     struct sk_buff *skb));
skb                34 include/net/request_sock.h 	void		(*send_ack)(const struct sock *sk, struct sk_buff *skb,
skb                37 include/net/request_sock.h 				      struct sk_buff *skb);
skb               118 include/net/route.h 					const struct sk_buff *skb);
skb               121 include/net/route.h 					    const struct sk_buff *skb);
skb               179 include/net/route.h int ip_mc_validate_source(struct sk_buff *skb, __be32 daddr, __be32 saddr,
skb               182 include/net/route.h int ip_route_input_noref(struct sk_buff *skb, __be32 dst, __be32 src,
skb               184 include/net/route.h int ip_route_input_rcu(struct sk_buff *skb, __be32 dst, __be32 src,
skb               188 include/net/route.h static inline int ip_route_input(struct sk_buff *skb, __be32 dst, __be32 src,
skb               194 include/net/route.h 	err = ip_route_input_noref(skb, dst, src, tos, devin);
skb               196 include/net/route.h 		skb_dst_force(skb);
skb               197 include/net/route.h 		if (!skb_dst(skb))
skb               205 include/net/route.h void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu, int oif,
skb               207 include/net/route.h void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu);
skb               208 include/net/route.h void ipv4_redirect(struct sk_buff *skb, struct net *net, int oif, u8 protocol);
skb               209 include/net/route.h void ipv4_sk_redirect(struct sk_buff *skb, struct sock *sk);
skb               210 include/net/route.h void ip_rt_send_redirect(struct sk_buff *skb);
skb               221 include/net/route.h void ip_rt_get_source(u8 *src, struct sk_buff *skb, struct rtable *rt);
skb               235 include/net/route.h int fib_dump_info_fnhe(struct sk_buff *skb, struct netlink_callback *cb,
skb               337 include/net/route.h static inline int inet_iif(const struct sk_buff *skb)
skb               339 include/net/route.h 	struct rtable *rt = skb_rtable(skb);
skb               344 include/net/route.h 	return skb->skb_iif;
skb               370 include/net/route.h 						struct sk_buff *skb,
skb               382 include/net/route.h 		neigh = ip_neigh_gw4(dev, ip_hdr(skb)->daddr);
skb                44 include/net/rsi_91x.h 	int (*coex_send_pkt)(void *priv, struct sk_buff *skb, u8 hal_queue);
skb                86 include/net/rtnetlink.h 	int			(*fill_info)(struct sk_buff *skb,
skb                90 include/net/rtnetlink.h 	int			(*fill_xstats)(struct sk_buff *skb,
skb               104 include/net/rtnetlink.h 	int			(*fill_slave_info)(struct sk_buff *skb,
skb               110 include/net/rtnetlink.h 	int			(*fill_linkxstats)(struct sk_buff *skb,
skb               139 include/net/rtnetlink.h 	int			(*fill_link_af)(struct sk_buff *skb,
skb               150 include/net/rtnetlink.h 	int			(*fill_stats_af)(struct sk_buff *skb,
skb                58 include/net/sch_generic.h 	int 			(*enqueue)(struct sk_buff *skb,
skb               223 include/net/sch_generic.h 					struct sk_buff *skb, struct tcmsg*);
skb               242 include/net/sch_generic.h 	int 			(*enqueue)(struct sk_buff *skb,
skb               331 include/net/sch_generic.h 					struct sk_buff *skb, struct tcmsg*,
skb               333 include/net/sch_generic.h 	int			(*tmplt_dump)(struct sk_buff *skb,
skb               458 include/net/sch_generic.h static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
skb               462 include/net/sch_generic.h 	BUILD_BUG_ON(sizeof(skb->cb) < offsetof(struct qdisc_skb_cb, data) + sz);
skb               491 include/net/sch_generic.h static inline struct qdisc_skb_cb *qdisc_skb_cb(const struct sk_buff *skb)
skb               493 include/net/sch_generic.h 	return (struct qdisc_skb_cb *)skb->cb;
skb               675 include/net/sch_generic.h void __qdisc_calculate_pkt_len(struct sk_buff *skb,
skb               679 include/net/sch_generic.h static inline bool skb_at_tc_ingress(const struct sk_buff *skb)
skb               682 include/net/sch_generic.h 	return skb->tc_at_ingress;
skb               688 include/net/sch_generic.h static inline bool skb_skip_tc_classify(struct sk_buff *skb)
skb               691 include/net/sch_generic.h 	if (skb->tc_skip_classify) {
skb               692 include/net/sch_generic.h 		skb->tc_skip_classify = 0;
skb               764 include/net/sch_generic.h static inline unsigned int qdisc_pkt_len(const struct sk_buff *skb)
skb               766 include/net/sch_generic.h 	return qdisc_skb_cb(skb)->pkt_len;
skb               781 include/net/sch_generic.h static inline void qdisc_calculate_pkt_len(struct sk_buff *skb,
skb               788 include/net/sch_generic.h 		__qdisc_calculate_pkt_len(skb, stab);
skb               792 include/net/sch_generic.h static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
skb               795 include/net/sch_generic.h 	qdisc_calculate_pkt_len(skb, sch);
skb               796 include/net/sch_generic.h 	return sch->enqueue(skb, sch, to_free);
skb               807 include/net/sch_generic.h 				 const struct sk_buff *skb)
skb               810 include/net/sch_generic.h 		       qdisc_pkt_len(skb),
skb               811 include/net/sch_generic.h 		       skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1);
skb               823 include/net/sch_generic.h 				     const struct sk_buff *skb)
skb               826 include/net/sch_generic.h 	bstats_update(&bstats->bstats, skb);
skb               831 include/net/sch_generic.h 					   const struct sk_buff *skb)
skb               833 include/net/sch_generic.h 	bstats_cpu_update(this_cpu_ptr(sch->cpu_bstats), skb);
skb               837 include/net/sch_generic.h 				       const struct sk_buff *skb)
skb               839 include/net/sch_generic.h 	bstats_update(&sch->bstats, skb);
skb               843 include/net/sch_generic.h 					    const struct sk_buff *skb)
skb               845 include/net/sch_generic.h 	sch->qstats.backlog -= qdisc_pkt_len(skb);
skb               849 include/net/sch_generic.h 						const struct sk_buff *skb)
skb               851 include/net/sch_generic.h 	this_cpu_sub(sch->cpu_qstats->backlog, qdisc_pkt_len(skb));
skb               855 include/net/sch_generic.h 					    const struct sk_buff *skb)
skb               857 include/net/sch_generic.h 	sch->qstats.backlog += qdisc_pkt_len(skb);
skb               861 include/net/sch_generic.h 						const struct sk_buff *skb)
skb               863 include/net/sch_generic.h 	this_cpu_add(sch->cpu_qstats->backlog, qdisc_pkt_len(skb));
skb               953 include/net/sch_generic.h static inline void __qdisc_enqueue_tail(struct sk_buff *skb,
skb               959 include/net/sch_generic.h 		skb->next = NULL;
skb               960 include/net/sch_generic.h 		last->next = skb;
skb               961 include/net/sch_generic.h 		qh->tail = skb;
skb               963 include/net/sch_generic.h 		qh->tail = skb;
skb               964 include/net/sch_generic.h 		qh->head = skb;
skb               969 include/net/sch_generic.h static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch)
skb               971 include/net/sch_generic.h 	__qdisc_enqueue_tail(skb, &sch->q);
skb               972 include/net/sch_generic.h 	qdisc_qstats_backlog_inc(sch, skb);
skb               976 include/net/sch_generic.h static inline void __qdisc_enqueue_head(struct sk_buff *skb,
skb               979 include/net/sch_generic.h 	skb->next = qh->head;
skb               982 include/net/sch_generic.h 		qh->tail = skb;
skb               983 include/net/sch_generic.h 	qh->head = skb;
skb               989 include/net/sch_generic.h 	struct sk_buff *skb = qh->head;
skb               991 include/net/sch_generic.h 	if (likely(skb != NULL)) {
skb               992 include/net/sch_generic.h 		qh->head = skb->next;
skb               996 include/net/sch_generic.h 		skb->next = NULL;
skb               999 include/net/sch_generic.h 	return skb;
skb              1004 include/net/sch_generic.h 	struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
skb              1006 include/net/sch_generic.h 	if (likely(skb != NULL)) {
skb              1007 include/net/sch_generic.h 		qdisc_qstats_backlog_dec(sch, skb);
skb              1008 include/net/sch_generic.h 		qdisc_bstats_update(sch, skb);
skb              1011 include/net/sch_generic.h 	return skb;
skb              1017 include/net/sch_generic.h static inline void __qdisc_drop(struct sk_buff *skb, struct sk_buff **to_free)
skb              1019 include/net/sch_generic.h 	skb->next = *to_free;
skb              1020 include/net/sch_generic.h 	*to_free = skb;
skb              1023 include/net/sch_generic.h static inline void __qdisc_drop_all(struct sk_buff *skb,
skb              1026 include/net/sch_generic.h 	if (skb->prev)
skb              1027 include/net/sch_generic.h 		skb->prev->next = *to_free;
skb              1029 include/net/sch_generic.h 		skb->next = *to_free;
skb              1030 include/net/sch_generic.h 	*to_free = skb;
skb              1037 include/net/sch_generic.h 	struct sk_buff *skb = __qdisc_dequeue_head(qh);
skb              1039 include/net/sch_generic.h 	if (likely(skb != NULL)) {
skb              1040 include/net/sch_generic.h 		unsigned int len = qdisc_pkt_len(skb);
skb              1042 include/net/sch_generic.h 		qdisc_qstats_backlog_dec(sch, skb);
skb              1043 include/net/sch_generic.h 		__qdisc_drop(skb, to_free);
skb              1066 include/net/sch_generic.h 	struct sk_buff *skb = skb_peek(&sch->gso_skb);
skb              1069 include/net/sch_generic.h 	if (!skb) {
skb              1070 include/net/sch_generic.h 		skb = sch->dequeue(sch);
skb              1072 include/net/sch_generic.h 		if (skb) {
skb              1073 include/net/sch_generic.h 			__skb_queue_head(&sch->gso_skb, skb);
skb              1075 include/net/sch_generic.h 			qdisc_qstats_backlog_inc(sch, skb);
skb              1080 include/net/sch_generic.h 	return skb;
skb              1084 include/net/sch_generic.h 						 struct sk_buff *skb)
skb              1087 include/net/sch_generic.h 		qdisc_qstats_cpu_backlog_dec(sch, skb);
skb              1088 include/net/sch_generic.h 		qdisc_bstats_cpu_update(sch, skb);
skb              1091 include/net/sch_generic.h 		qdisc_qstats_backlog_dec(sch, skb);
skb              1092 include/net/sch_generic.h 		qdisc_bstats_update(sch, skb);
skb              1112 include/net/sch_generic.h 	struct sk_buff *skb = skb_peek(&sch->gso_skb);
skb              1114 include/net/sch_generic.h 	if (skb) {
skb              1115 include/net/sch_generic.h 		skb = __skb_dequeue(&sch->gso_skb);
skb              1117 include/net/sch_generic.h 			qdisc_qstats_cpu_backlog_dec(sch, skb);
skb              1120 include/net/sch_generic.h 			qdisc_qstats_backlog_dec(sch, skb);
skb              1124 include/net/sch_generic.h 		skb = sch->dequeue(sch);
skb              1127 include/net/sch_generic.h 	return skb;
skb              1167 include/net/sch_generic.h static inline void rtnl_qdisc_drop(struct sk_buff *skb, struct Qdisc *sch)
skb              1169 include/net/sch_generic.h 	rtnl_kfree_skbs(skb, skb);
skb              1173 include/net/sch_generic.h static inline int qdisc_drop_cpu(struct sk_buff *skb, struct Qdisc *sch,
skb              1176 include/net/sch_generic.h 	__qdisc_drop(skb, to_free);
skb              1182 include/net/sch_generic.h static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch,
skb              1185 include/net/sch_generic.h 	__qdisc_drop(skb, to_free);
skb              1191 include/net/sch_generic.h static inline int qdisc_drop_all(struct sk_buff *skb, struct Qdisc *sch,
skb              1194 include/net/sch_generic.h 	__qdisc_drop_all(skb, to_free);
skb              1263 include/net/sch_generic.h 						const struct sk_buff *skb)
skb              1265 include/net/sch_generic.h 	bstats_cpu_update(this_cpu_ptr(miniq->cpu_bstats), skb);
skb              1284 include/net/sch_generic.h static inline void skb_tc_reinsert(struct sk_buff *skb, struct tcf_result *res)
skb              1290 include/net/sch_generic.h 		ret = netif_receive_skb(skb);
skb              1292 include/net/sch_generic.h 		ret = dev_queue_xmit(skb);
skb                93 include/net/sctp/auth.h 			      struct sk_buff *skb, struct sctp_auth_chunk *auth,
skb                51 include/net/sctp/checksum.h static inline __le32 sctp_compute_cksum(const struct sk_buff *skb,
skb                54 include/net/sctp/checksum.h 	struct sctphdr *sh = (struct sctphdr *)(skb->data + offset);
skb                59 include/net/sctp/checksum.h 	new = ~__skb_checksum(skb, offset, skb->len - offset, ~(__wsum)0,
skb                93 include/net/sctp/sctp.h int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb);
skb                99 include/net/sctp/sctp.h void sctp_sock_rfree(struct sk_buff *skb);
skb               138 include/net/sctp/sctp.h int sctp_rcv(struct sk_buff *skb);
skb               139 include/net/sctp/sctp.h int sctp_v4_err(struct sk_buff *skb, u32 info);
skb               398 include/net/sctp/sctp.h static inline void sctp_skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
skb               400 include/net/sctp/sctp.h 	struct sctp_ulpevent *event = sctp_skb2event(skb);
skb               402 include/net/sctp/sctp.h 	skb_orphan(skb);
skb               403 include/net/sctp/sctp.h 	skb->sk = sk;
skb               404 include/net/sctp/sctp.h 	skb->destructor = sctp_sock_rfree;
skb               427 include/net/sctp/structs.h 	int		(*sctp_xmit)	(struct sk_buff *skb,
skb               463 include/net/sctp/structs.h 					 struct sk_buff *skb,
skb               573 include/net/sctp/structs.h 	struct sk_buff *skb;
skb              1139 include/net/sctp/structs.h static inline const struct sk_buff *sctp_gso_headskb(const struct sk_buff *skb)
skb              1141 include/net/sctp/structs.h 	const struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk;
skb              1143 include/net/sctp/structs.h 	return chunk->head_skb ? : skb;
skb                64 include/net/sctp/ulpevent.h static inline struct sctp_ulpevent *sctp_skb2event(struct sk_buff *skb)
skb                66 include/net/sctp/ulpevent.h 	return (struct sctp_ulpevent *)skb->cb;
skb                17 include/net/seg6.h static inline void update_csum_diff4(struct sk_buff *skb, __be32 from,
skb                22 include/net/seg6.h 	skb->csum = ~csum_partial((char *)diff, sizeof(diff), ~skb->csum);
skb                25 include/net/seg6.h static inline void update_csum_diff16(struct sk_buff *skb, __be32 *from,
skb                33 include/net/seg6.h 	skb->csum = ~csum_partial((char *)diff, sizeof(diff), ~skb->csum);
skb                61 include/net/seg6.h extern int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh,
skb                63 include/net/seg6.h extern int seg6_do_srh_inline(struct sk_buff *skb, struct ipv6_sr_hdr *osrh);
skb                64 include/net/seg6.h extern int seg6_lookup_nexthop(struct sk_buff *skb, struct in6_addr *nhaddr,
skb                51 include/net/seg6_hmac.h extern bool seg6_hmac_validate_skb(struct sk_buff *skb);
skb                17 include/net/seg6_local.h extern int seg6_lookup_nexthop(struct sk_buff *skb, struct in6_addr *nhaddr,
skb                19 include/net/seg6_local.h extern bool seg6_bpf_has_valid_srh(struct sk_buff *skb);
skb               502 include/net/sock.h 						  struct sk_buff *skb);
skb               506 include/net/sock.h 							struct sk_buff *skb);
skb               902 include/net/sock.h static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
skb               905 include/net/sock.h 	skb_dst_force(skb);
skb               908 include/net/sock.h 		sk->sk_backlog.head = skb;
skb               910 include/net/sock.h 		sk->sk_backlog.tail->next = skb;
skb               912 include/net/sock.h 	sk->sk_backlog.tail = skb;
skb               913 include/net/sock.h 	skb->next = NULL;
skb               929 include/net/sock.h static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb,
skb               940 include/net/sock.h 	if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC))
skb               943 include/net/sock.h 	__sk_add_backlog(sk, skb);
skb               944 include/net/sock.h 	sk->sk_backlog.len += skb->truesize;
skb               948 include/net/sock.h int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb);
skb               950 include/net/sock.h static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
skb               952 include/net/sock.h 	if (sk_memalloc_socks() && skb_pfmemalloc(skb))
skb               953 include/net/sock.h 		return __sk_backlog_rcv(sk, skb);
skb               955 include/net/sock.h 	return sk->sk_backlog_rcv(sk, skb);
skb               999 include/net/sock.h 					const struct sk_buff *skb)
skb              1002 include/net/sock.h 	if (unlikely(sk->sk_rxhash != skb->hash))
skb              1003 include/net/sock.h 		sk->sk_rxhash = skb->hash;
skb              1048 include/net/sock.h int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb);
skb              1121 include/net/sock.h 						struct sk_buff *skb);
skb              1427 include/net/sock.h sk_rmem_schedule(struct sock *sk, struct sk_buff *skb, int size)
skb              1433 include/net/sock.h 		skb_pfmemalloc(skb);
skb              1477 include/net/sock.h static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
skb              1480 include/net/sock.h 	sk_wmem_queued_add(sk, -skb->truesize);
skb              1481 include/net/sock.h 	sk_mem_uncharge(sk, skb->truesize);
skb              1483 include/net/sock.h 	    !sk->sk_tx_skb_cache && !skb_cloned(skb)) {
skb              1484 include/net/sock.h 		skb_zcopy_clear(skb, true);
skb              1485 include/net/sock.h 		sk->sk_tx_skb_cache = skb;
skb              1488 include/net/sock.h 	__kfree_skb(skb);
skb              1611 include/net/sock.h void __sock_wfree(struct sk_buff *skb);
skb              1612 include/net/sock.h void sock_wfree(struct sk_buff *skb);
skb              1615 include/net/sock.h void skb_orphan_partial(struct sk_buff *skb);
skb              1616 include/net/sock.h void sock_rfree(struct sk_buff *skb);
skb              1617 include/net/sock.h void sock_efree(struct sk_buff *skb);
skb              1619 include/net/sock.h void sock_edemux(struct sk_buff *skb);
skb              1742 include/net/sock.h int __sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested,
skb              1744 include/net/sock.h static inline int sk_receive_skb(struct sock *sk, struct sk_buff *skb,
skb              1747 include/net/sock.h 	return __sk_receive_skb(sk, skb, nested, 1, true);
skb              1773 include/net/sock.h static inline void sk_rx_queue_set(struct sock *sk, const struct sk_buff *skb)
skb              1776 include/net/sock.h 	if (skb_rx_queue_recorded(skb)) {
skb              1777 include/net/sock.h 		u16 rx_queue = skb_get_rx_queue(skb);
skb              1952 include/net/sock.h static inline void sock_confirm_neigh(struct sk_buff *skb, struct neighbour *n)
skb              1954 include/net/sock.h 	if (skb_get_dst_pending_confirm(skb)) {
skb              1955 include/net/sock.h 		struct sock *sk = skb->sk;
skb              1981 include/net/sock.h static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
skb              1985 include/net/sock.h 	if (skb->ip_summed == CHECKSUM_NONE) {
skb              1989 include/net/sock.h 		skb->csum = csum_block_add(skb->csum, csum, offset);
skb              1999 include/net/sock.h static inline int skb_add_data_nocache(struct sock *sk, struct sk_buff *skb,
skb              2002 include/net/sock.h 	int err, offset = skb->len;
skb              2004 include/net/sock.h 	err = skb_do_copy_data_nocache(sk, skb, from, skb_put(skb, copy),
skb              2007 include/net/sock.h 		__skb_trim(skb, offset);
skb              2013 include/net/sock.h 					   struct sk_buff *skb,
skb              2019 include/net/sock.h 	err = skb_do_copy_data_nocache(sk, skb, from, page_address(page) + off,
skb              2020 include/net/sock.h 				       copy, skb->len);
skb              2024 include/net/sock.h 	skb->len	     += copy;
skb              2025 include/net/sock.h 	skb->data_len	     += copy;
skb              2026 include/net/sock.h 	skb->truesize	     += copy;
skb              2123 include/net/sock.h static inline void skb_set_hash_from_sk(struct sk_buff *skb, struct sock *sk)
skb              2126 include/net/sock.h 		skb->l4_hash = 1;
skb              2127 include/net/sock.h 		skb->hash = sk->sk_txhash;
skb              2131 include/net/sock.h void skb_set_owner_w(struct sk_buff *skb, struct sock *sk);
skb              2141 include/net/sock.h static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
skb              2143 include/net/sock.h 	skb_orphan(skb);
skb              2144 include/net/sock.h 	skb->sk = sk;
skb              2145 include/net/sock.h 	skb->destructor = sock_rfree;
skb              2146 include/net/sock.h 	atomic_add(skb->truesize, &sk->sk_rmem_alloc);
skb              2147 include/net/sock.h 	sk_mem_charge(sk, skb->truesize);
skb              2156 include/net/sock.h 			struct sk_buff *skb, unsigned int flags,
skb              2158 include/net/sock.h 					   struct sk_buff *skb));
skb              2159 include/net/sock.h int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
skb              2160 include/net/sock.h int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
skb              2162 include/net/sock.h int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb);
skb              2324 include/net/sock.h sock_skb_set_dropcount(const struct sock *sk, struct sk_buff *skb)
skb              2326 include/net/sock.h 	SOCK_SKB_CB(skb)->dropcount = sock_flag(sk, SOCK_RXQ_OVFL) ?
skb              2330 include/net/sock.h static inline void sk_drops_add(struct sock *sk, const struct sk_buff *skb)
skb              2332 include/net/sock.h 	int segs = max_t(u16, 1, skb_shinfo(skb)->gso_segs);
skb              2366 include/net/sock.h 			   struct sk_buff *skb);
skb              2368 include/net/sock.h 			     struct sk_buff *skb);
skb              2371 include/net/sock.h sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
skb              2373 include/net/sock.h 	ktime_t kt = skb->tstamp;
skb              2374 include/net/sock.h 	struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb);
skb              2387 include/net/sock.h 		__sock_recv_timestamp(msg, sk, skb);
skb              2391 include/net/sock.h 	if (sock_flag(sk, SOCK_WIFI_STATUS) && skb->wifi_acked_valid)
skb              2392 include/net/sock.h 		__sock_recv_wifi_status(msg, sk, skb);
skb              2396 include/net/sock.h 			      struct sk_buff *skb);
skb              2400 include/net/sock.h 					  struct sk_buff *skb)
skb              2408 include/net/sock.h 		__sock_recv_ts_and_drops(msg, sk, skb);
skb              2410 include/net/sock.h 		sock_write_timestamp(sk, skb->tstamp);
skb              2445 include/net/sock.h static inline void skb_setup_tx_timestamp(struct sk_buff *skb, __u16 tsflags)
skb              2447 include/net/sock.h 	_sock_tx_timestamp(skb->sk, tsflags, &skb_shinfo(skb)->tx_flags,
skb              2448 include/net/sock.h 			   &skb_shinfo(skb)->tskey);
skb              2460 include/net/sock.h static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb)
skb              2462 include/net/sock.h 	__skb_unlink(skb, &sk->sk_receive_queue);
skb              2465 include/net/sock.h 		sk->sk_rx_skb_cache = skb;
skb              2466 include/net/sock.h 		skb_orphan(skb);
skb              2469 include/net/sock.h 	__kfree_skb(skb);
skb              2484 include/net/sock.h static inline struct sock *skb_steal_sock(struct sk_buff *skb)
skb              2486 include/net/sock.h 	if (skb->sk) {
skb              2487 include/net/sock.h 		struct sock *sk = skb->sk;
skb              2489 include/net/sock.h 		skb->destructor = NULL;
skb              2490 include/net/sock.h 		skb->sk = NULL;
skb              2508 include/net/sock.h static inline struct sk_buff *sk_validate_xmit_skb(struct sk_buff *skb,
skb              2512 include/net/sock.h 	struct sock *sk = skb->sk;
skb              2515 include/net/sock.h 		skb = sk->sk_validate_xmit_skb(sk, dev, skb);
skb              2517 include/net/sock.h 	} else if (unlikely(skb->decrypted)) {
skb              2519 include/net/sock.h 		kfree_skb(skb);
skb              2520 include/net/sock.h 		skb = NULL;
skb              2525 include/net/sock.h 	return skb;
skb                36 include/net/sock_reuseport.h 					  struct sk_buff *skb,
skb                44 include/net/strparser.h 	int (*parse_msg)(struct strparser *strp, struct sk_buff *skb);
skb                45 include/net/strparser.h 	void (*rcv_msg)(struct strparser *strp, struct sk_buff *skb);
skb                57 include/net/strparser.h static inline struct strp_msg *strp_msg(struct sk_buff *skb)
skb                59 include/net/strparser.h 	return (struct strp_msg *)((void *)skb->cb +
skb                44 include/net/tc_act/tc_ife.h 	int	(*get)(struct sk_buff *skb, struct tcf_meta_info *mi);
skb                53 include/net/tc_act/tc_ife.h int ife_get_meta_u32(struct sk_buff *skb, struct tcf_meta_info *mi);
skb                54 include/net/tc_act/tc_ife.h int ife_get_meta_u16(struct sk_buff *skb, struct tcf_meta_info *mi);
skb               315 include/net/tcp.h int tcp_v4_err(struct sk_buff *skb, u32);
skb               319 include/net/tcp.h int tcp_v4_early_demux(struct sk_buff *skb);
skb               320 include/net/tcp.h int tcp_v4_rcv(struct sk_buff *skb);
skb               332 include/net/tcp.h void tcp_wfree(struct sk_buff *skb);
skb               336 include/net/tcp.h int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb);
skb               337 include/net/tcp.h void tcp_rcv_established(struct sock *sk, struct sk_buff *skb);
skb               375 include/net/tcp.h 					      struct sk_buff *skb,
skb               377 include/net/tcp.h struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
skb               381 include/net/tcp.h 		      struct sk_buff *skb);
skb               412 include/net/tcp.h void tcp_parse_options(const struct net *net, const struct sk_buff *skb,
skb               431 include/net/tcp.h void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
skb               434 include/net/tcp.h int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
skb               437 include/net/tcp.h 				      struct sk_buff *skb);
skb               439 include/net/tcp.h struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
skb               444 include/net/tcp.h int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
skb               458 include/net/tcp.h void tcp_finish_connect(struct sock *sk, struct sk_buff *skb);
skb               460 include/net/tcp.h void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb);
skb               463 include/net/tcp.h struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
skb               468 include/net/tcp.h struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb);
skb               550 include/net/tcp.h __u32 cookie_v4_init_sequence(const struct sk_buff *skb, __u16 *mss);
skb               560 include/net/tcp.h struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
skb               564 include/net/tcp.h __u32 cookie_v6_init_sequence(const struct sk_buff *skb, __u16 *mss);
skb               570 include/net/tcp.h int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
skb               571 include/net/tcp.h int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
skb               582 include/net/tcp.h 		 struct sk_buff *skb, u32 len,
skb               597 include/net/tcp.h void tcp_skb_collapse_tstamp(struct sk_buff *skb,
skb               604 include/net/tcp.h void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb);
skb               784 include/net/tcp.h static inline u32 tcp_skb_timestamp(const struct sk_buff *skb)
skb               786 include/net/tcp.h 	return div_u64(skb->skb_mstamp_ns, NSEC_PER_SEC / TCP_TS_HZ);
skb               790 include/net/tcp.h static inline u64 tcp_skb_timestamp_us(const struct sk_buff *skb)
skb               792 include/net/tcp.h 	return div_u64(skb->skb_mstamp_ns, NSEC_PER_USEC);
skb               878 include/net/tcp.h static inline void bpf_compute_data_end_sk_skb(struct sk_buff *skb)
skb               880 include/net/tcp.h 	TCP_SKB_CB(skb)->bpf.data_end = skb->data + skb_headlen(skb);
skb               883 include/net/tcp.h static inline bool tcp_skb_bpf_ingress(const struct sk_buff *skb)
skb               885 include/net/tcp.h 	return TCP_SKB_CB(skb)->bpf.flags & BPF_F_INGRESS;
skb               888 include/net/tcp.h static inline struct sock *tcp_skb_bpf_redirect_fetch(struct sk_buff *skb)
skb               890 include/net/tcp.h 	return TCP_SKB_CB(skb)->bpf.sk_redir;
skb               893 include/net/tcp.h static inline void tcp_skb_bpf_redirect_clear(struct sk_buff *skb)
skb               895 include/net/tcp.h 	TCP_SKB_CB(skb)->bpf.sk_redir = NULL;
skb               902 include/net/tcp.h static inline int tcp_v6_iif(const struct sk_buff *skb)
skb               904 include/net/tcp.h 	return TCP_SKB_CB(skb)->header.h6.iif;
skb               907 include/net/tcp.h static inline int tcp_v6_iif_l3_slave(const struct sk_buff *skb)
skb               909 include/net/tcp.h 	bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags);
skb               911 include/net/tcp.h 	return l3_slave ? skb->skb_iif : TCP_SKB_CB(skb)->header.h6.iif;
skb               915 include/net/tcp.h static inline int tcp_v6_sdif(const struct sk_buff *skb)
skb               918 include/net/tcp.h 	if (skb && ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags))
skb               919 include/net/tcp.h 		return TCP_SKB_CB(skb)->header.h6.iif;
skb               925 include/net/tcp.h static inline bool inet_exact_dif_match(struct net *net, struct sk_buff *skb)
skb               929 include/net/tcp.h 	    skb && ipv4_l3mdev_skb(IPCB(skb)->flags))
skb               936 include/net/tcp.h static inline int tcp_v4_sdif(struct sk_buff *skb)
skb               939 include/net/tcp.h 	if (skb && ipv4_l3mdev_skb(TCP_SKB_CB(skb)->header.h4.flags))
skb               940 include/net/tcp.h 		return TCP_SKB_CB(skb)->header.h4.iif;
skb               948 include/net/tcp.h static inline int tcp_skb_pcount(const struct sk_buff *skb)
skb               950 include/net/tcp.h 	return TCP_SKB_CB(skb)->tcp_gso_segs;
skb               953 include/net/tcp.h static inline void tcp_skb_pcount_set(struct sk_buff *skb, int segs)
skb               955 include/net/tcp.h 	TCP_SKB_CB(skb)->tcp_gso_segs = segs;
skb               958 include/net/tcp.h static inline void tcp_skb_pcount_add(struct sk_buff *skb, int segs)
skb               960 include/net/tcp.h 	TCP_SKB_CB(skb)->tcp_gso_segs += segs;
skb               964 include/net/tcp.h static inline int tcp_skb_mss(const struct sk_buff *skb)
skb               966 include/net/tcp.h 	return TCP_SKB_CB(skb)->tcp_gso_size;
skb               969 include/net/tcp.h static inline bool tcp_skb_can_collapse_to(const struct sk_buff *skb)
skb               971 include/net/tcp.h 	return likely(!TCP_SKB_CB(skb)->eor);
skb              1134 include/net/tcp.h void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb);
skb              1135 include/net/tcp.h void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
skb              1275 include/net/tcp.h 					     const struct sk_buff *skb)
skb              1277 include/net/tcp.h 	s64 pacing_delay = skb ? skb->tstamp : tcp_sk(sk)->tcp_wstamp_ns;
skb              1288 include/net/tcp.h 					const struct sk_buff *skb)
skb              1290 include/net/tcp.h 	inet_csk_reset_xmit_timer(sk, what, when + tcp_pacing_delay(sk, skb),
skb              1341 include/net/tcp.h static inline bool tcp_checksum_complete(struct sk_buff *skb)
skb              1343 include/net/tcp.h 	return !skb_csum_unnecessary(skb) &&
skb              1344 include/net/tcp.h 		__skb_checksum_complete(skb);
skb              1347 include/net/tcp.h bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb);
skb              1348 include/net/tcp.h int tcp_filter(struct sock *sk, struct sk_buff *skb);
skb              1506 include/net/tcp.h bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb,
skb              1585 include/net/tcp.h 			const struct sock *sk, const struct sk_buff *skb);
skb              1653 include/net/tcp.h void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb);
skb              1654 include/net/tcp.h struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
skb              1726 include/net/tcp.h static inline void tcp_skb_tsorted_anchor_cleanup(struct sk_buff *skb)
skb              1728 include/net/tcp.h 	skb->destructor = NULL;
skb              1729 include/net/tcp.h 	skb->_skb_refdst = 0UL;
skb              1732 include/net/tcp.h #define tcp_skb_tsorted_save(skb) {		\
skb              1733 include/net/tcp.h 	unsigned long _save = skb->_skb_refdst;	\
skb              1734 include/net/tcp.h 	skb->_skb_refdst = 0UL;
skb              1736 include/net/tcp.h #define tcp_skb_tsorted_restore(skb)		\
skb              1737 include/net/tcp.h 	skb->_skb_refdst = _save;		\
skb              1762 include/net/tcp.h #define tcp_for_write_queue_from_safe(skb, tmp, sk)			\
skb              1763 include/net/tcp.h 	skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp)
skb              1771 include/net/tcp.h 				   const struct sk_buff *skb)
skb              1773 include/net/tcp.h 	return skb_queue_is_last(&sk->sk_write_queue, skb);
skb              1791 include/net/tcp.h static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
skb              1793 include/net/tcp.h 	__skb_queue_tail(&sk->sk_write_queue, skb);
skb              1796 include/net/tcp.h 	if (sk->sk_write_queue.next == skb)
skb              1802 include/net/tcp.h 						  struct sk_buff *skb,
skb              1805 include/net/tcp.h 	__skb_queue_before(&sk->sk_write_queue, skb, new);
skb              1808 include/net/tcp.h static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk)
skb              1810 include/net/tcp.h 	tcp_skb_tsorted_anchor_cleanup(skb);
skb              1811 include/net/tcp.h 	__skb_unlink(skb, &sk->sk_write_queue);
skb              1814 include/net/tcp.h void tcp_rbtree_insert(struct rb_root *root, struct sk_buff *skb);
skb              1816 include/net/tcp.h static inline void tcp_rtx_queue_unlink(struct sk_buff *skb, struct sock *sk)
skb              1818 include/net/tcp.h 	tcp_skb_tsorted_anchor_cleanup(skb);
skb              1819 include/net/tcp.h 	rb_erase(&skb->rbnode, &sk->tcp_rtx_queue);
skb              1822 include/net/tcp.h static inline void tcp_rtx_queue_unlink_and_free(struct sk_buff *skb, struct sock *sk)
skb              1824 include/net/tcp.h 	list_del(&skb->tcp_tsorted_anchor);
skb              1825 include/net/tcp.h 	tcp_rtx_queue_unlink(skb, sk);
skb              1826 include/net/tcp.h 	sk_wmem_free_skb(sk, skb);
skb              1853 include/net/tcp.h static inline void tcp_advance_highest_sack(struct sock *sk, struct sk_buff *skb)
skb              1855 include/net/tcp.h 	tcp_sk(sk)->highest_sack = skb_rb_next(skb);
skb              1924 include/net/tcp.h struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
skb              1926 include/net/tcp.h struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb);
skb              1927 include/net/tcp.h int tcp_gro_complete(struct sk_buff *skb);
skb              1929 include/net/tcp.h void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr);
skb              1958 include/net/tcp.h 		     struct sock *sk, struct sk_buff *skb);
skb              1968 include/net/tcp.h 					 const struct sk_buff *skb);
skb              1984 include/net/tcp.h 					  const struct sk_buff *skb);
skb              1988 include/net/tcp.h 			 struct sk_buff *skb);
skb              1990 include/net/tcp.h 	__u32 (*cookie_init_seq)(const struct sk_buff *skb,
skb              1995 include/net/tcp.h 	u32 (*init_seq)(const struct sk_buff *skb);
skb              1996 include/net/tcp.h 	u32 (*init_ts_off)(const struct net *net, const struct sk_buff *skb);
skb              2005 include/net/tcp.h 					 const struct sock *sk, struct sk_buff *skb,
skb              2010 include/net/tcp.h 	return ops->cookie_init_seq(skb, mss);
skb              2014 include/net/tcp.h 					 const struct sock *sk, struct sk_buff *skb,
skb              2027 include/net/tcp.h void tcp_mark_skb_lost(struct sock *sk, struct sk_buff *skb);
skb              2029 include/net/tcp.h extern s32 tcp_rack_skb_timeout(struct tcp_sock *tp, struct sk_buff *skb,
skb              2040 include/net/tcp.h 	const struct sk_buff *skb = tcp_rtx_queue_head(sk);
skb              2042 include/net/tcp.h 	u64 rto_time_stamp_us = tcp_skb_timestamp_us(skb) + jiffies_to_usecs(rto);
skb              2051 include/net/tcp.h 							 struct sk_buff *skb)
skb              2053 include/net/tcp.h 	const struct ip_options *opt = &TCP_SKB_CB(skb)->header.h4.opt;
skb              2060 include/net/tcp.h 		if (dopt && __ip_options_echo(net, &dopt->opt, skb, opt)) {
skb              2073 include/net/tcp.h static inline bool skb_is_tcp_pure_ack(const struct sk_buff *skb)
skb              2075 include/net/tcp.h 	return skb->truesize == 2;
skb              2078 include/net/tcp.h static inline void skb_set_tcp_pure_ack(struct sk_buff *skb)
skb              2080 include/net/tcp.h 	skb->truesize = 2;
skb              2109 include/net/tcp.h static inline void tcp_segs_in(struct tcp_sock *tp, const struct sk_buff *skb)
skb              2113 include/net/tcp.h 	segs_in = max_t(u16, 1, skb_shinfo(skb)->gso_segs);
skb              2115 include/net/tcp.h 	if (skb->len > tcp_hdrlen(skb))
skb              2153 include/net/tcp.h 	int (*get_info)(const struct sock *sk, struct sk_buff *skb);
skb              2292 include/net/tcp.h static inline void tcp_add_tx_delay(struct sk_buff *skb,
skb              2296 include/net/tcp.h 		skb->skb_mstamp_ns += (u64)tp->tcp_tx_delay * NSEC_PER_USEC;
skb               404 include/net/tls.h static inline struct tls_msg *tls_msg(struct sk_buff *skb)
skb               406 include/net/tls.h 	return (struct tls_msg *)strp_msg(skb);
skb               449 include/net/tls.h 		      struct sk_buff *skb);
skb               658 include/net/tls.h int decrypt_skb(struct sock *sk, struct sk_buff *skb,
skb               660 include/net/tls.h struct sk_buff *tls_encrypt_skb(struct sk_buff *skb);
skb               664 include/net/tls.h 				      struct sk_buff *skb);
skb               678 include/net/tls.h int tls_device_decrypted(struct sock *sk, struct sk_buff *skb);
skb               701 include/net/tls.h static inline int tls_device_decrypted(struct sock *sk, struct sk_buff *skb)
skb                38 include/net/transp_v6.h 			   struct sk_buff *skb);
skb                40 include/net/transp_v6.h 				  struct sk_buff *skb);
skb                42 include/net/transp_v6.h 				    struct sk_buff *skb);
skb                18 include/net/tso.h int tso_count_descs(struct sk_buff *skb);
skb                19 include/net/tso.h void tso_build_hdr(struct sk_buff *skb, char *hdr, struct tso_t *tso,
skb                21 include/net/tso.h void tso_build_data(struct sk_buff *skb, struct tso_t *tso, int size);
skb                22 include/net/tso.h void tso_start(struct sk_buff *skb, struct tso_t *tso);
skb               108 include/net/udp.h static inline __sum16 __udp_lib_checksum_complete(struct sk_buff *skb)
skb               110 include/net/udp.h 	return (UDP_SKB_CB(skb)->cscov == skb->len ?
skb               111 include/net/udp.h 		__skb_checksum_complete(skb) :
skb               112 include/net/udp.h 		__skb_checksum_complete_head(skb, UDP_SKB_CB(skb)->cscov));
skb               115 include/net/udp.h static inline int udp_lib_checksum_complete(struct sk_buff *skb)
skb               117 include/net/udp.h 	return !skb_csum_unnecessary(skb) &&
skb               118 include/net/udp.h 		__udp_lib_checksum_complete(skb);
skb               127 include/net/udp.h static inline __wsum udp_csum_outgoing(struct sock *sk, struct sk_buff *skb)
skb               129 include/net/udp.h 	__wsum csum = csum_partial(skb_transport_header(skb),
skb               131 include/net/udp.h 	skb_queue_walk(&sk->sk_write_queue, skb) {
skb               132 include/net/udp.h 		csum = csum_add(csum, skb->csum);
skb               137 include/net/udp.h static inline __wsum udp_csum(struct sk_buff *skb)
skb               139 include/net/udp.h 	__wsum csum = csum_partial(skb_transport_header(skb),
skb               140 include/net/udp.h 				   sizeof(struct udphdr), skb->csum);
skb               142 include/net/udp.h 	for (skb = skb_shinfo(skb)->frag_list; skb; skb = skb->next) {
skb               143 include/net/udp.h 		csum = csum_add(csum, skb->csum);
skb               154 include/net/udp.h void udp_set_csum(bool nocheck, struct sk_buff *skb,
skb               157 include/net/udp.h static inline void udp_csum_pull_header(struct sk_buff *skb)
skb               159 include/net/udp.h 	if (!skb->csum_valid && skb->ip_summed == CHECKSUM_NONE)
skb               160 include/net/udp.h 		skb->csum = csum_partial(skb->data, sizeof(struct udphdr),
skb               161 include/net/udp.h 					 skb->csum);
skb               162 include/net/udp.h 	skb_pull_rcsum(skb, sizeof(struct udphdr));
skb               163 include/net/udp.h 	UDP_SKB_CB(skb)->cscov -= sizeof(struct udphdr);
skb               169 include/net/udp.h struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
skb               171 include/net/udp.h int udp_gro_complete(struct sk_buff *skb, int nhoff, udp_lookup_t lookup);
skb               176 include/net/udp.h static inline struct udphdr *udp_gro_udphdr(struct sk_buff *skb)
skb               181 include/net/udp.h 	off  = skb_gro_offset(skb);
skb               183 include/net/udp.h 	uh   = skb_gro_header_fast(skb, off);
skb               184 include/net/udp.h 	if (skb_gro_header_hard(skb, hlen))
skb               185 include/net/udp.h 		uh = skb_gro_header_slow(skb, hlen, off);
skb               210 include/net/udp.h static inline __be16 udp_flow_src_port(struct net *net, struct sk_buff *skb,
skb               220 include/net/udp.h 	hash = skb_get_hash(skb);
skb               226 include/net/udp.h 			hash = jhash(skb->data, 2 * ETH_ALEN,
skb               227 include/net/udp.h 				     (__force u32) skb->protocol);
skb               264 include/net/udp.h void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len);
skb               265 include/net/udp.h int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb);
skb               266 include/net/udp.h void udp_skb_destructor(struct sock *sk, struct sk_buff *skb);
skb               277 include/net/udp.h int udp_v4_early_demux(struct sk_buff *skb);
skb               288 include/net/udp.h void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst);
skb               289 include/net/udp.h int udp_rcv(struct sk_buff *skb);
skb               296 include/net/udp.h struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
skb               308 include/net/udp.h 			       struct udp_table *tbl, struct sk_buff *skb);
skb               309 include/net/udp.h struct sock *udp4_lib_lookup_skb(struct sk_buff *skb,
skb               319 include/net/udp.h 			       struct sk_buff *skb);
skb               320 include/net/udp.h struct sock *udp6_lib_lookup_skb(struct sk_buff *skb,
skb               345 include/net/udp.h static inline struct udp_dev_scratch *udp_skb_scratch(struct sk_buff *skb)
skb               347 include/net/udp.h 	return (struct udp_dev_scratch *)&skb->dev_scratch;
skb               351 include/net/udp.h static inline unsigned int udp_skb_len(struct sk_buff *skb)
skb               353 include/net/udp.h 	return udp_skb_scratch(skb)->len;
skb               356 include/net/udp.h static inline bool udp_skb_csum_unnecessary(struct sk_buff *skb)
skb               358 include/net/udp.h 	return udp_skb_scratch(skb)->csum_unnecessary;
skb               361 include/net/udp.h static inline bool udp_skb_is_linear(struct sk_buff *skb)
skb               363 include/net/udp.h 	return udp_skb_scratch(skb)->is_linear;
skb               367 include/net/udp.h static inline unsigned int udp_skb_len(struct sk_buff *skb)
skb               369 include/net/udp.h 	return skb->len;
skb               372 include/net/udp.h static inline bool udp_skb_csum_unnecessary(struct sk_buff *skb)
skb               374 include/net/udp.h 	return skb_csum_unnecessary(skb);
skb               377 include/net/udp.h static inline bool udp_skb_is_linear(struct sk_buff *skb)
skb               379 include/net/udp.h 	return !skb_is_nonlinear(skb);
skb               383 include/net/udp.h static inline int copy_linear_skb(struct sk_buff *skb, int len, int off,
skb               388 include/net/udp.h 	n = copy_to_iter(skb->data + off, len, to);
skb               468 include/net/udp.h 					      struct sk_buff *skb, bool ipv4)
skb               486 include/net/udp.h 	if (skb->pkt_type == PACKET_LOOPBACK)
skb               487 include/net/udp.h 		skb->ip_summed = CHECKSUM_PARTIAL;
skb               492 include/net/udp.h 	segs = __skb_gso_segment(skb, features, false);
skb               494 include/net/udp.h 		int segs_nr = skb_shinfo(skb)->gso_segs;
skb               498 include/net/udp.h 		kfree_skb(skb);
skb               502 include/net/udp.h 	consume_skb(skb);
skb                67 include/net/udp_tunnel.h typedef int (*udp_tunnel_encap_rcv_t)(struct sock *sk, struct sk_buff *skb);
skb                69 include/net/udp_tunnel.h 					     struct sk_buff *skb);
skb                74 include/net/udp_tunnel.h typedef int (*udp_tunnel_gro_complete_t)(struct sock *sk, struct sk_buff *skb,
skb               141 include/net/udp_tunnel.h void udp_tunnel_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
skb               148 include/net/udp_tunnel.h 			 struct sk_buff *skb,
skb               157 include/net/udp_tunnel.h struct metadata_dst *udp_tun_rx_dst(struct sk_buff *skb, unsigned short family,
skb               162 include/net/udp_tunnel.h static inline int udp_tunnel_handle_offloads(struct sk_buff *skb, bool udp_csum)
skb               166 include/net/udp_tunnel.h 	return iptunnel_handle_offloads(skb, type);
skb                21 include/net/udplite.h 				      int len, int odd, struct sk_buff *skb)
skb                38 include/net/udplite.h static inline int udplite_checksum_init(struct sk_buff *skb, struct udphdr *uh)
skb                54 include/net/udplite.h 	else if (cscov < 8  || cscov > skb->len) {
skb                59 include/net/udplite.h 				    cscov, skb->len);
skb                62 include/net/udplite.h 	} else if (cscov < skb->len) {
skb                63 include/net/udplite.h         	UDP_SKB_CB(skb)->partial_cov = 1;
skb                64 include/net/udplite.h 		UDP_SKB_CB(skb)->cscov = cscov;
skb                65 include/net/udplite.h 		if (skb->ip_summed == CHECKSUM_COMPLETE)
skb                66 include/net/udplite.h 			skb->ip_summed = CHECKSUM_NONE;
skb                67 include/net/udplite.h 		skb->csum_valid = 0;
skb                74 include/net/udplite.h static inline __wsum udplite_csum_outgoing(struct sock *sk, struct sk_buff *skb)
skb                76 include/net/udplite.h 	const struct udp_sock *up = udp_sk(skb->sk);
skb                88 include/net/udplite.h 			udp_hdr(skb)->len = htons(up->pcslen);
skb               102 include/net/udplite.h 	skb->ip_summed = CHECKSUM_NONE;     /* no HW support for checksumming */
skb               104 include/net/udplite.h 	skb_queue_walk(&sk->sk_write_queue, skb) {
skb               105 include/net/udplite.h 		const int off = skb_transport_offset(skb);
skb               106 include/net/udplite.h 		const int len = skb->len - off;
skb               108 include/net/udplite.h 		csum = skb_checksum(skb, off, (cscov > len)? len : cscov, csum);
skb               117 include/net/udplite.h static inline __wsum udplite_csum(struct sk_buff *skb)
skb               119 include/net/udplite.h 	const struct udp_sock *up = udp_sk(skb->sk);
skb               120 include/net/udplite.h 	const int off = skb_transport_offset(skb);
skb               121 include/net/udplite.h 	int len = skb->len - off;
skb               126 include/net/udplite.h 		udp_hdr(skb)->len = htons(up->pcslen);
skb               128 include/net/udplite.h 	skb->ip_summed = CHECKSUM_NONE;     /* no HW support for checksumming */
skb               130 include/net/udplite.h 	return skb_checksum(skb, off, len, 0);
skb               295 include/net/vxlan.h static inline netdev_features_t vxlan_features_check(struct sk_buff *skb,
skb               300 include/net/vxlan.h 	if (!skb->encapsulation)
skb               303 include/net/vxlan.h 	switch (vlan_get_protocol(skb)) {
skb               305 include/net/vxlan.h 		l4_hdr = ip_hdr(skb)->protocol;
skb               308 include/net/vxlan.h 		l4_hdr = ipv6_hdr(skb)->nexthdr;
skb               315 include/net/vxlan.h 	    (skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
skb               316 include/net/vxlan.h 	     skb->inner_protocol != htons(ETH_P_TEB) ||
skb               317 include/net/vxlan.h 	     (skb_inner_mac_header(skb) - skb_transport_header(skb) !=
skb               319 include/net/vxlan.h 	     (skb->ip_summed != CHECKSUM_NONE &&
skb               320 include/net/vxlan.h 	      !can_checksum_protocol(features, inner_eth_hdr(skb)->h_proto))))
skb               331 include/net/vxlan.h static inline struct vxlanhdr *vxlan_hdr(struct sk_buff *skb)
skb               333 include/net/vxlan.h 	return (struct vxlanhdr *)(udp_hdr(skb) + 1);
skb               192 include/net/x25.h int x25_parse_address_block(struct sk_buff *skb,
skb                10 include/net/x25device.h static inline __be16 x25_type_trans(struct sk_buff *skb, struct net_device *dev)
skb                12 include/net/x25device.h 	skb->dev = dev;
skb                13 include/net/x25device.h 	skb_reset_mac_header(skb);
skb                14 include/net/x25device.h 	skb->pkt_type = PACKET_HOST;
skb               299 include/net/xfrm.h 			 struct sk_buff *skb,
skb               302 include/net/xfrm.h 			   struct sk_buff *skb,
skb               305 include/net/xfrm.h 	int	(*overflow)(struct xfrm_state *x, struct sk_buff *skb);
skb               309 include/net/xfrm.h 	struct xfrm_if	*(*decode_session)(struct sk_buff *skb,
skb               362 include/net/xfrm.h 	int			(*output)(struct net *net, struct sock *sk, struct sk_buff *skb);
skb               363 include/net/xfrm.h 	int			(*output_finish)(struct sock *sk, struct sk_buff *skb);
skb               365 include/net/xfrm.h 						 struct sk_buff *skb);
skb               367 include/net/xfrm.h 						  struct sk_buff *skb);
skb               368 include/net/xfrm.h 	int			(*transport_finish)(struct sk_buff *skb,
skb               370 include/net/xfrm.h 	void			(*local_error)(struct sk_buff *skb, u32 mtu);
skb               380 include/net/xfrm.h 	int			(*callback)(struct sk_buff *skb, u8 protocol,
skb               402 include/net/xfrm.h 	int			(*input)(struct xfrm_state *, struct sk_buff *skb);
skb               417 include/net/xfrm.h 	int		(*input_tail)(struct xfrm_state *x, struct sk_buff *skb);
skb               706 include/net/xfrm.h 				      struct sk_buff *skb);
skb               707 include/net/xfrm.h void xfrm_audit_state_replay(struct xfrm_state *x, struct sk_buff *skb,
skb               709 include/net/xfrm.h void xfrm_audit_state_notfound_simple(struct sk_buff *skb, u16 family);
skb               710 include/net/xfrm.h void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family, __be32 net_spi,
skb               712 include/net/xfrm.h void xfrm_audit_state_icvfail(struct xfrm_state *x, struct sk_buff *skb,
skb               737 include/net/xfrm.h 					     struct sk_buff *skb)
skb               742 include/net/xfrm.h 					   struct sk_buff *skb, __be32 net_seq)
skb               746 include/net/xfrm.h static inline void xfrm_audit_state_notfound_simple(struct sk_buff *skb,
skb               751 include/net/xfrm.h static inline void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family,
skb               757 include/net/xfrm.h 				     struct sk_buff *skb, u8 proto)
skb              1037 include/net/xfrm.h struct sec_path *secpath_set(struct sk_buff *skb);
skb              1040 include/net/xfrm.h secpath_reset(struct sk_buff *skb)
skb              1043 include/net/xfrm.h 	skb_ext_del(skb, SKB_EXT_SEC_PATH);
skb              1086 include/net/xfrm.h int __xfrm_policy_check(struct sock *, int dir, struct sk_buff *skb,
skb              1090 include/net/xfrm.h 				       struct sk_buff *skb,
skb              1093 include/net/xfrm.h 	struct net *net = dev_net(skb->dev);
skb              1097 include/net/xfrm.h 		return __xfrm_policy_check(sk, ndir, skb, family);
skb              1099 include/net/xfrm.h 	return	(!net->xfrm.policy_count[dir] && !secpath_exists(skb)) ||
skb              1100 include/net/xfrm.h 		(skb_dst(skb)->flags & DST_NOPOLICY) ||
skb              1101 include/net/xfrm.h 		__xfrm_policy_check(sk, ndir, skb, family);
skb              1104 include/net/xfrm.h static inline int xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, unsigned short family)
skb              1106 include/net/xfrm.h 	return __xfrm_policy_check2(sk, dir, skb, family, 0);
skb              1109 include/net/xfrm.h static inline int xfrm4_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
skb              1111 include/net/xfrm.h 	return xfrm_policy_check(sk, dir, skb, AF_INET);
skb              1114 include/net/xfrm.h static inline int xfrm6_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
skb              1116 include/net/xfrm.h 	return xfrm_policy_check(sk, dir, skb, AF_INET6);
skb              1120 include/net/xfrm.h 					     struct sk_buff *skb)
skb              1122 include/net/xfrm.h 	return __xfrm_policy_check2(sk, dir, skb, AF_INET, 1);
skb              1126 include/net/xfrm.h 					     struct sk_buff *skb)
skb              1128 include/net/xfrm.h 	return __xfrm_policy_check2(sk, dir, skb, AF_INET6, 1);
skb              1131 include/net/xfrm.h int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
skb              1134 include/net/xfrm.h static inline int xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
skb              1137 include/net/xfrm.h 	return __xfrm_decode_session(skb, fl, family, 0);
skb              1140 include/net/xfrm.h static inline int xfrm_decode_session_reverse(struct sk_buff *skb,
skb              1144 include/net/xfrm.h 	return __xfrm_decode_session(skb, fl, family, 1);
skb              1147 include/net/xfrm.h int __xfrm_route_forward(struct sk_buff *skb, unsigned short family);
skb              1149 include/net/xfrm.h static inline int xfrm_route_forward(struct sk_buff *skb, unsigned short family)
skb              1151 include/net/xfrm.h 	struct net *net = dev_net(skb->dev);
skb              1154 include/net/xfrm.h 		(skb_dst(skb)->flags & DST_NOXFRM) ||
skb              1155 include/net/xfrm.h 		__xfrm_route_forward(skb, family);
skb              1158 include/net/xfrm.h static inline int xfrm4_route_forward(struct sk_buff *skb)
skb              1160 include/net/xfrm.h 	return xfrm_route_forward(skb, AF_INET);
skb              1163 include/net/xfrm.h static inline int xfrm6_route_forward(struct sk_buff *skb)
skb              1165 include/net/xfrm.h 	return xfrm_route_forward(skb, AF_INET6);
skb              1201 include/net/xfrm.h static inline int xfrm6_route_forward(struct sk_buff *skb) { return 1; }
skb              1202 include/net/xfrm.h static inline int xfrm4_route_forward(struct sk_buff *skb) { return 1; }
skb              1203 include/net/xfrm.h static inline int xfrm6_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
skb              1207 include/net/xfrm.h static inline int xfrm4_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
skb              1211 include/net/xfrm.h static inline int xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, unsigned short family)
skb              1215 include/net/xfrm.h static inline int xfrm_decode_session_reverse(struct sk_buff *skb,
skb              1222 include/net/xfrm.h 					     struct sk_buff *skb)
skb              1227 include/net/xfrm.h 					     struct sk_buff *skb)
skb              1396 include/net/xfrm.h 	int (*handler)(struct sk_buff *skb);
skb              1397 include/net/xfrm.h 	int (*input_handler)(struct sk_buff *skb, int nexthdr, __be32 spi,
skb              1399 include/net/xfrm.h 	int (*cb_handler)(struct sk_buff *skb, int err);
skb              1400 include/net/xfrm.h 	int (*err_handler)(struct sk_buff *skb, u32 info);
skb              1407 include/net/xfrm.h 	int (*handler)(struct sk_buff *skb);
skb              1408 include/net/xfrm.h 	int (*cb_handler)(struct sk_buff *skb, int err);
skb              1409 include/net/xfrm.h 	int (*err_handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
skb              1418 include/net/xfrm.h 	int (*handler)(struct sk_buff *skb);
skb              1419 include/net/xfrm.h 	int (*err_handler)(struct sk_buff *skb, u32 info);
skb              1426 include/net/xfrm.h 	int (*handler)(struct sk_buff *skb);
skb              1427 include/net/xfrm.h 	int (*err_handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
skb              1548 include/net/xfrm.h int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type);
skb              1549 include/net/xfrm.h int xfrm_input_resume(struct sk_buff *skb, int nexthdr);
skb              1550 include/net/xfrm.h int xfrm_trans_queue(struct sk_buff *skb,
skb              1553 include/net/xfrm.h int xfrm_output_resume(struct sk_buff *skb, int err);
skb              1554 include/net/xfrm.h int xfrm_output(struct sock *sk, struct sk_buff *skb);
skb              1557 include/net/xfrm.h int pktgen_xfrm_outer_mode_output(struct xfrm_state *x, struct sk_buff *skb);
skb              1560 include/net/xfrm.h void xfrm_local_error(struct sk_buff *skb, int mtu);
skb              1561 include/net/xfrm.h int xfrm4_extract_header(struct sk_buff *skb);
skb              1562 include/net/xfrm.h int xfrm4_extract_input(struct xfrm_state *x, struct sk_buff *skb);
skb              1563 include/net/xfrm.h int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi,
skb              1565 include/net/xfrm.h int xfrm4_transport_finish(struct sk_buff *skb, int async);
skb              1566 include/net/xfrm.h int xfrm4_rcv(struct sk_buff *skb);
skb              1567 include/net/xfrm.h int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq);
skb              1569 include/net/xfrm.h static inline int xfrm4_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi)
skb              1571 include/net/xfrm.h 	XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL;
skb              1572 include/net/xfrm.h 	XFRM_SPI_SKB_CB(skb)->family = AF_INET;
skb              1573 include/net/xfrm.h 	XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
skb              1574 include/net/xfrm.h 	return xfrm_input(skb, nexthdr, spi, 0);
skb              1577 include/net/xfrm.h int xfrm4_extract_output(struct xfrm_state *x, struct sk_buff *skb);
skb              1578 include/net/xfrm.h int xfrm4_output(struct net *net, struct sock *sk, struct sk_buff *skb);
skb              1579 include/net/xfrm.h int xfrm4_output_finish(struct sock *sk, struct sk_buff *skb);
skb              1584 include/net/xfrm.h void xfrm4_local_error(struct sk_buff *skb, u32 mtu);
skb              1585 include/net/xfrm.h int xfrm6_extract_header(struct sk_buff *skb);
skb              1586 include/net/xfrm.h int xfrm6_extract_input(struct xfrm_state *x, struct sk_buff *skb);
skb              1587 include/net/xfrm.h int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi,
skb              1589 include/net/xfrm.h int xfrm6_transport_finish(struct sk_buff *skb, int async);
skb              1590 include/net/xfrm.h int xfrm6_rcv_tnl(struct sk_buff *skb, struct ip6_tnl *t);
skb              1591 include/net/xfrm.h int xfrm6_rcv(struct sk_buff *skb);
skb              1592 include/net/xfrm.h int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr,
skb              1594 include/net/xfrm.h void xfrm6_local_error(struct sk_buff *skb, u32 mtu);
skb              1601 include/net/xfrm.h int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb);
skb              1602 include/net/xfrm.h int xfrm6_output(struct net *net, struct sock *sk, struct sk_buff *skb);
skb              1603 include/net/xfrm.h int xfrm6_output_finish(struct sock *sk, struct sk_buff *skb);
skb              1604 include/net/xfrm.h int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb,
skb              1608 include/net/xfrm.h int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb);
skb              1617 include/net/xfrm.h static inline int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb)
skb              1620 include/net/xfrm.h  	kfree_skb(skb);
skb              1678 include/net/xfrm.h int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq);
skb              1823 include/net/xfrm.h static inline struct xfrm_state *xfrm_input_state(struct sk_buff *skb)
skb              1825 include/net/xfrm.h 	struct sec_path *sp = skb_sec_path(skb);
skb              1831 include/net/xfrm.h static inline struct xfrm_offload *xfrm_offload(struct sk_buff *skb)
skb              1834 include/net/xfrm.h 	struct sec_path *sp = skb_sec_path(skb);
skb              1848 include/net/xfrm.h void xfrm_dev_resume(struct sk_buff *skb);
skb              1850 include/net/xfrm.h struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again);
skb              1853 include/net/xfrm.h bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x);
skb              1902 include/net/xfrm.h static inline void xfrm_dev_resume(struct sk_buff *skb)
skb              1910 include/net/xfrm.h static inline struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again)
skb              1912 include/net/xfrm.h 	return skb;
skb              1928 include/net/xfrm.h static inline bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
skb              1953 include/net/xfrm.h static inline int xfrm_mark_put(struct sk_buff *skb, const struct xfrm_mark *m)
skb              1958 include/net/xfrm.h 		ret = nla_put(skb, XFRMA_MARK, sizeof(struct xfrm_mark), m);
skb              1969 include/net/xfrm.h static inline int xfrm_if_id_put(struct sk_buff *skb, __u32 if_id)
skb              1974 include/net/xfrm.h 		ret = nla_put_u32(skb, XFRMA_IF_ID, if_id);
skb              1978 include/net/xfrm.h static inline int xfrm_tunnel_check(struct sk_buff *skb, struct xfrm_state *x,
skb              1985 include/net/xfrm.h 		if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4)
skb              1989 include/net/xfrm.h 		if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6)
skb              2200 include/rdma/ib_verbs.h 	int (*send)(struct net_device *dev, struct sk_buff *skb,
skb                90 include/rdma/iw_portmap.h int iwpm_hello_cb(struct sk_buff *skb, struct netlink_callback *cb);
skb                16 include/rdma/rdma_netlink.h 	int (*doit)(struct sk_buff *skb, struct nlmsghdr *nlh,
skb                18 include/rdma/rdma_netlink.h 	int (*dump)(struct sk_buff *skb, struct netlink_callback *nlcb);
skb                63 include/rdma/rdma_netlink.h void *ibnl_put_msg(struct sk_buff *skb, struct nlmsghdr **nlh, int seq,
skb                74 include/rdma/rdma_netlink.h int ibnl_put_attr(struct sk_buff *skb, struct nlmsghdr *nlh,
skb                84 include/rdma/rdma_netlink.h int rdma_nl_unicast(struct net *net, struct sk_buff *skb, u32 pid);
skb                93 include/rdma/rdma_netlink.h int rdma_nl_unicast_wait(struct net *net, struct sk_buff *skb, __u32 pid);
skb               103 include/rdma/rdma_netlink.h int rdma_nl_multicast(struct net *net, struct sk_buff *skb,
skb                50 include/scsi/fc_frame.h #define fp_skb(fp)	(&((fp)->skb))
skb                51 include/scsi/fc_frame.h #define fr_hdr(fp)	((fp)->skb.data)
skb                52 include/scsi/fc_frame.h #define fr_len(fp)	((fp)->skb.len)
skb                53 include/scsi/fc_frame.h #define fr_cb(fp)	((struct fcoe_rcv_info *)&((fp)->skb.cb[0]))
skb                65 include/scsi/fc_frame.h 	struct sk_buff skb;
skb                85 include/scsi/fc_frame.h static inline struct fcoe_rcv_info *fcoe_dev_from_skb(const struct sk_buff *skb)
skb                87 include/scsi/fc_frame.h 	BUILD_BUG_ON(sizeof(struct fcoe_rcv_info) > sizeof(skb->cb));
skb                88 include/scsi/fc_frame.h 	return (struct fcoe_rcv_info *) skb->cb;
skb               256 include/scsi/libfcoe.h int fcoe_start_io(struct sk_buff *skb);
skb               373 include/scsi/libfcoe.h void fcoe_check_wait_queue(struct fc_lport *lport, struct sk_buff *skb);
skb               375 include/scsi/libfcoe.h int fcoe_get_paged_crc_eof(struct sk_buff *skb, int tlen,
skb                87 include/scsi/libiscsi_tcp.h extern int iscsi_tcp_recv_skb(struct iscsi_conn *conn, struct sk_buff *skb,
skb                16 include/trace/events/net.h 	TP_PROTO(const struct sk_buff *skb, const struct net_device *dev),
skb                18 include/trace/events/net.h 	TP_ARGS(skb, dev),
skb                42 include/trace/events/net.h 		__entry->queue_mapping = skb->queue_mapping;
skb                43 include/trace/events/net.h 		__entry->skbaddr = skb;
skb                44 include/trace/events/net.h 		__entry->vlan_tagged = skb_vlan_tag_present(skb);
skb                45 include/trace/events/net.h 		__entry->vlan_proto = ntohs(skb->vlan_proto);
skb                46 include/trace/events/net.h 		__entry->vlan_tci = skb_vlan_tag_get(skb);
skb                47 include/trace/events/net.h 		__entry->protocol = ntohs(skb->protocol);
skb                48 include/trace/events/net.h 		__entry->ip_summed = skb->ip_summed;
skb                49 include/trace/events/net.h 		__entry->len = skb->len;
skb                50 include/trace/events/net.h 		__entry->data_len = skb->data_len;
skb                51 include/trace/events/net.h 		__entry->network_offset = skb_network_offset(skb);
skb                53 include/trace/events/net.h 			skb_transport_header_was_set(skb);
skb                54 include/trace/events/net.h 		__entry->transport_offset = skb_transport_offset(skb);
skb                55 include/trace/events/net.h 		__entry->tx_flags = skb_shinfo(skb)->tx_flags;
skb                56 include/trace/events/net.h 		__entry->gso_size = skb_shinfo(skb)->gso_size;
skb                57 include/trace/events/net.h 		__entry->gso_segs = skb_shinfo(skb)->gso_segs;
skb                58 include/trace/events/net.h 		__entry->gso_type = skb_shinfo(skb)->gso_type;
skb                73 include/trace/events/net.h 	TP_PROTO(struct sk_buff *skb,
skb                78 include/trace/events/net.h 	TP_ARGS(skb, rc, dev, skb_len),
skb                88 include/trace/events/net.h 		__entry->skbaddr = skb;
skb               123 include/trace/events/net.h 	TP_PROTO(struct sk_buff *skb),
skb               125 include/trace/events/net.h 	TP_ARGS(skb),
skb               130 include/trace/events/net.h 		__string(	name,		skb->dev->name	)
skb               134 include/trace/events/net.h 		__entry->skbaddr = skb;
skb               135 include/trace/events/net.h 		__entry->len = skb->len;
skb               136 include/trace/events/net.h 		__assign_str(name, skb->dev->name);
skb               145 include/trace/events/net.h 	TP_PROTO(struct sk_buff *skb),
skb               147 include/trace/events/net.h 	TP_ARGS(skb)
skb               152 include/trace/events/net.h 	TP_PROTO(struct sk_buff *skb),
skb               154 include/trace/events/net.h 	TP_ARGS(skb)
skb               159 include/trace/events/net.h 	TP_PROTO(struct sk_buff *skb),
skb               161 include/trace/events/net.h 	TP_ARGS(skb)
skb               166 include/trace/events/net.h 	TP_PROTO(const struct sk_buff *skb),
skb               168 include/trace/events/net.h 	TP_ARGS(skb),
skb               171 include/trace/events/net.h 		__string(	name,			skb->dev->name	)
skb               193 include/trace/events/net.h 		__assign_str(name, skb->dev->name);
skb               195 include/trace/events/net.h 		__entry->napi_id = skb->napi_id;
skb               199 include/trace/events/net.h 		__entry->queue_mapping = skb->queue_mapping;
skb               200 include/trace/events/net.h 		__entry->skbaddr = skb;
skb               201 include/trace/events/net.h 		__entry->vlan_tagged = skb_vlan_tag_present(skb);
skb               202 include/trace/events/net.h 		__entry->vlan_proto = ntohs(skb->vlan_proto);
skb               203 include/trace/events/net.h 		__entry->vlan_tci = skb_vlan_tag_get(skb);
skb               204 include/trace/events/net.h 		__entry->protocol = ntohs(skb->protocol);
skb               205 include/trace/events/net.h 		__entry->ip_summed = skb->ip_summed;
skb               206 include/trace/events/net.h 		__entry->hash = skb->hash;
skb               207 include/trace/events/net.h 		__entry->l4_hash = skb->l4_hash;
skb               208 include/trace/events/net.h 		__entry->len = skb->len;
skb               209 include/trace/events/net.h 		__entry->data_len = skb->data_len;
skb               210 include/trace/events/net.h 		__entry->truesize = skb->truesize;
skb               211 include/trace/events/net.h 		__entry->mac_header_valid = skb_mac_header_was_set(skb);
skb               212 include/trace/events/net.h 		__entry->mac_header = skb_mac_header(skb) - skb->data;
skb               213 include/trace/events/net.h 		__entry->nr_frags = skb_shinfo(skb)->nr_frags;
skb               214 include/trace/events/net.h 		__entry->gso_size = skb_shinfo(skb)->gso_size;
skb               215 include/trace/events/net.h 		__entry->gso_type = skb_shinfo(skb)->gso_type;
skb               230 include/trace/events/net.h 	TP_PROTO(const struct sk_buff *skb),
skb               232 include/trace/events/net.h 	TP_ARGS(skb)
skb               237 include/trace/events/net.h 	TP_PROTO(const struct sk_buff *skb),
skb               239 include/trace/events/net.h 	TP_ARGS(skb)
skb               244 include/trace/events/net.h 	TP_PROTO(const struct sk_buff *skb),
skb               246 include/trace/events/net.h 	TP_ARGS(skb)
skb               251 include/trace/events/net.h 	TP_PROTO(const struct sk_buff *skb),
skb               253 include/trace/events/net.h 	TP_ARGS(skb)
skb               258 include/trace/events/net.h 	TP_PROTO(const struct sk_buff *skb),
skb               260 include/trace/events/net.h 	TP_ARGS(skb)
skb               265 include/trace/events/net.h 	TP_PROTO(const struct sk_buff *skb),
skb               267 include/trace/events/net.h 	TP_ARGS(skb)
skb                15 include/trace/events/qdisc.h 		 int packets, struct sk_buff *skb),
skb                17 include/trace/events/qdisc.h 	TP_ARGS(qdisc, txq, packets, skb),
skb                34 include/trace/events/qdisc.h 		__entry->packets	= skb ? packets : 0;
skb                35 include/trace/events/qdisc.h 		__entry->skbaddr	= skb;
skb               639 include/trace/events/rxrpc.h 	    TP_PROTO(struct sk_buff *skb, enum rxrpc_skb_trace op,
skb               642 include/trace/events/rxrpc.h 	    TP_ARGS(skb, op, usage, mod_count, flags, where),
skb               645 include/trace/events/rxrpc.h 		    __field(struct sk_buff *,		skb		)
skb               654 include/trace/events/rxrpc.h 		    __entry->skb = skb;
skb               663 include/trace/events/rxrpc.h 		      __entry->skb,
skb                69 include/trace/events/sctp.h 		struct sk_buff *skb = chunk->skb;
skb                72 include/trace/events/sctp.h 		__entry->mark = skb->mark;
skb                 3 include/trace/events/skb.h #define TRACE_SYSTEM skb
skb                17 include/trace/events/skb.h 	TP_PROTO(struct sk_buff *skb, void *location),
skb                19 include/trace/events/skb.h 	TP_ARGS(skb, location),
skb                28 include/trace/events/skb.h 		__entry->skbaddr = skb;
skb                30 include/trace/events/skb.h 		__entry->protocol = ntohs(skb->protocol);
skb                39 include/trace/events/skb.h 	TP_PROTO(struct sk_buff *skb),
skb                41 include/trace/events/skb.h 	TP_ARGS(skb),
skb                48 include/trace/events/skb.h 		__entry->skbaddr = skb;
skb                56 include/trace/events/skb.h 	TP_PROTO(const struct sk_buff *skb, int len),
skb                58 include/trace/events/skb.h 	TP_ARGS(skb, len),
skb                66 include/trace/events/skb.h 		__entry->skbaddr = skb;
skb                72 include/trace/events/sock.h 	TP_PROTO(struct sock *sk, struct sk_buff *skb),
skb                74 include/trace/events/sock.h 	TP_ARGS(sk, skb),
skb                84 include/trace/events/sock.h 		__entry->truesize   = skb->truesize;
skb                52 include/trace/events/tcp.h 	TP_PROTO(const struct sock *sk, const struct sk_buff *skb),
skb                54 include/trace/events/tcp.h 	TP_ARGS(sk, skb),
skb                72 include/trace/events/tcp.h 		__entry->skbaddr = skb;
skb                97 include/trace/events/tcp.h 	TP_PROTO(const struct sock *sk, const struct sk_buff *skb),
skb                99 include/trace/events/tcp.h 	TP_ARGS(sk, skb)
skb               108 include/trace/events/tcp.h 	TP_PROTO(const struct sock *sk, const struct sk_buff *skb),
skb               110 include/trace/events/tcp.h 	TP_ARGS(sk, skb)
skb               231 include/trace/events/tcp.h 	TP_PROTO(struct sock *sk, struct sk_buff *skb),
skb               233 include/trace/events/tcp.h 	TP_ARGS(sk, skb),
skb               254 include/trace/events/tcp.h 		const struct tcphdr *th = (const struct tcphdr *)skb->data;
skb               266 include/trace/events/tcp.h 		__entry->mark = skb->mark;
skb               268 include/trace/events/tcp.h 		__entry->data_len = skb->len - __tcp_hdrlen(th);
skb                47 include/uapi/linux/netfilter/nfnetlink_compat.h #define NFA_NEST(skb, type) \
skb                48 include/uapi/linux/netfilter/nfnetlink_compat.h ({	struct nfattr *__start = (struct nfattr *)skb_tail_pointer(skb); \
skb                49 include/uapi/linux/netfilter/nfnetlink_compat.h 	NFA_PUT(skb, (NFNL_NFA_NEST | type), 0, NULL); \
skb                51 include/uapi/linux/netfilter/nfnetlink_compat.h #define NFA_NEST_END(skb, start) \
skb                52 include/uapi/linux/netfilter/nfnetlink_compat.h ({      (start)->nfa_len = skb_tail_pointer(skb) - (unsigned char *)(start); \
skb                53 include/uapi/linux/netfilter/nfnetlink_compat.h         (skb)->len; })
skb                54 include/uapi/linux/netfilter/nfnetlink_compat.h #define NFA_NEST_CANCEL(skb, start) \
skb                56 include/uapi/linux/netfilter/nfnetlink_compat.h                 skb_trim(skb, (unsigned char *) (start) - (skb)->data); \
skb               696 ipc/mqueue.c   static inline void set_cookie(struct sk_buff *skb, char code)
skb               698 ipc/mqueue.c   	((char *)skb->data)[NOTIFY_COOKIE_LEN-1] = code;
skb               194 kernel/audit.c 	struct sk_buff       *skb;	/* formatted skb ready to send */
skb               202 kernel/audit.c 	struct sk_buff *skb;
skb               526 kernel/audit.c static void kauditd_printk_skb(struct sk_buff *skb)
skb               528 kernel/audit.c 	struct nlmsghdr *nlh = nlmsg_hdr(skb);
skb               543 kernel/audit.c static void kauditd_rehold_skb(struct sk_buff *skb)
skb               546 kernel/audit.c 	skb_queue_head(&audit_hold_queue, skb);
skb               561 kernel/audit.c static void kauditd_hold_skb(struct sk_buff *skb)
skb               565 kernel/audit.c 	kauditd_printk_skb(skb);
skb               569 kernel/audit.c 		kfree_skb(skb);
skb               576 kernel/audit.c 		skb_queue_tail(&audit_hold_queue, skb);
skb               582 kernel/audit.c 	kfree_skb(skb);
skb               594 kernel/audit.c static void kauditd_retry_skb(struct sk_buff *skb)
skb               599 kernel/audit.c 	skb_queue_tail(&audit_retry_queue, skb);
skb               616 kernel/audit.c 	struct sk_buff *skb;
skb               636 kernel/audit.c 	while ((skb = skb_dequeue(&audit_retry_queue)))
skb               637 kernel/audit.c 		kauditd_hold_skb(skb);
skb               651 kernel/audit.c static int auditd_send_unicast_skb(struct sk_buff *skb)
skb               670 kernel/audit.c 		kfree_skb(skb);
skb               679 kernel/audit.c 	rc = netlink_unicast(sk, skb, portid, 0);
skb               710 kernel/audit.c 			      void (*skb_hook)(struct sk_buff *skb),
skb               711 kernel/audit.c 			      void (*err_hook)(struct sk_buff *skb))
skb               714 kernel/audit.c 	struct sk_buff *skb;
skb               720 kernel/audit.c 	while ((skb = skb_dequeue(queue))) {
skb               723 kernel/audit.c 			(*skb_hook)(skb);
skb               728 kernel/audit.c 				(*err_hook)(skb);
skb               733 kernel/audit.c 		skb_get(skb);
skb               734 kernel/audit.c 		rc = netlink_unicast(sk, skb, portid, 0);
skb               742 kernel/audit.c 					(*err_hook)(skb);
skb               749 kernel/audit.c 				skb_queue_head(queue, skb);
skb               752 kernel/audit.c 			consume_skb(skb);
skb               770 kernel/audit.c static void kauditd_send_multicast_skb(struct sk_buff *skb)
skb               792 kernel/audit.c 	copy = skb_copy(skb, GFP_KERNEL);
skb               796 kernel/audit.c 	nlh->nlmsg_len = skb->len;
skb               885 kernel/audit.c 	struct sk_buff *skb;
skb               892 kernel/audit.c 	while ((skb = __skb_dequeue(&dest->q)) != NULL)
skb               893 kernel/audit.c 		netlink_unicast(sk, skb, dest->portid, 0);
skb               904 kernel/audit.c 	struct sk_buff	*skb;
skb               910 kernel/audit.c 	skb = nlmsg_new(size, GFP_KERNEL);
skb               911 kernel/audit.c 	if (!skb)
skb               914 kernel/audit.c 	nlh	= nlmsg_put(skb, 0, seq, t, size, flags);
skb               919 kernel/audit.c 	return skb;
skb               922 kernel/audit.c 	kfree_skb(skb);
skb               936 kernel/audit.c 	netlink_unicast(sk, reply->skb, reply->portid, 0);
skb               959 kernel/audit.c 	struct sk_buff *skb;
skb               967 kernel/audit.c 	skb = audit_make_reply(seq, type, done, multi, payload, size);
skb               968 kernel/audit.c 	if (!skb)
skb               973 kernel/audit.c 	reply->skb = skb;
skb               978 kernel/audit.c 	kfree_skb(skb);
skb               987 kernel/audit.c static int audit_netlink_ok(struct sk_buff *skb, u16 msg_type)
skb              1027 kernel/audit.c 		if (!netlink_capable(skb, CAP_AUDIT_CONTROL))
skb              1033 kernel/audit.c 		if (!netlink_capable(skb, CAP_AUDIT_WRITE))
skb              1074 kernel/audit.c static int audit_get_feature(struct sk_buff *skb)
skb              1078 kernel/audit.c 	seq = nlmsg_hdr(skb)->nlmsg_seq;
skb              1080 kernel/audit.c 	audit_send_reply(skb, seq, AUDIT_GET_FEATURE, 0, 0, &af, sizeof(af));
skb              1162 kernel/audit.c 	struct sk_buff *skb;
skb              1165 kernel/audit.c 	skb = audit_make_reply(0, AUDIT_REPLACE, 0, 0, &pvnr, sizeof(pvnr));
skb              1166 kernel/audit.c 	if (!skb)
skb              1168 kernel/audit.c 	return auditd_send_unicast_skb(skb);
skb              1171 kernel/audit.c static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
skb              1183 kernel/audit.c 	err = audit_netlink_ok(skb, msg_type);
skb              1206 kernel/audit.c 		audit_send_reply(skb, seq, AUDIT_GET, 0, 0, &s, sizeof(s));
skb              1262 kernel/audit.c 						 NETLINK_CB(skb).portid,
skb              1263 kernel/audit.c 						 sock_net(NETLINK_CB(skb).sk));
skb              1312 kernel/audit.c 		err = audit_get_feature(skb);
skb              1375 kernel/audit.c 		err = audit_list_rules_send(skb, seq);
skb              1441 kernel/audit.c 		audit_send_reply(skb, seq, AUDIT_SIGNAL_INFO, 0, 0,
skb              1453 kernel/audit.c 		audit_send_reply(skb, seq, AUDIT_TTY_GET, 0, 0, &s, sizeof(s));
skb              1502 kernel/audit.c static void audit_receive(struct sk_buff  *skb)
skb              1512 kernel/audit.c 	nlh = nlmsg_hdr(skb);
skb              1513 kernel/audit.c 	len = skb->len;
skb              1517 kernel/audit.c 		err = audit_receive_msg(skb, nlh);
skb              1520 kernel/audit.c 			netlink_ack(skb, nlh, err, NULL);
skb              1672 kernel/audit.c 	kfree_skb(ab->skb);
skb              1685 kernel/audit.c 	ab->skb = nlmsg_new(AUDIT_BUFSIZ, gfp_mask);
skb              1686 kernel/audit.c 	if (!ab->skb)
skb              1688 kernel/audit.c 	if (!nlmsg_put(ab->skb, 0, 0, type, 0, 0))
skb              1821 kernel/audit.c 	struct sk_buff *skb = ab->skb;
skb              1822 kernel/audit.c 	int oldtail = skb_tailroom(skb);
skb              1823 kernel/audit.c 	int ret = pskb_expand_head(skb, 0, extra, ab->gfp_mask);
skb              1824 kernel/audit.c 	int newtail = skb_tailroom(skb);
skb              1831 kernel/audit.c 	skb->truesize += newtail - oldtail;
skb              1845 kernel/audit.c 	struct sk_buff *skb;
skb              1851 kernel/audit.c 	BUG_ON(!ab->skb);
skb              1852 kernel/audit.c 	skb = ab->skb;
skb              1853 kernel/audit.c 	avail = skb_tailroom(skb);
skb              1860 kernel/audit.c 	len = vsnprintf(skb_tail_pointer(skb), avail, fmt, args);
skb              1869 kernel/audit.c 		len = vsnprintf(skb_tail_pointer(skb), avail, fmt, args2);
skb              1872 kernel/audit.c 		skb_put(skb, len);
skb              1914 kernel/audit.c 	struct sk_buff *skb;
skb              1919 kernel/audit.c 	BUG_ON(!ab->skb);
skb              1920 kernel/audit.c 	skb = ab->skb;
skb              1921 kernel/audit.c 	avail = skb_tailroom(skb);
skb              1931 kernel/audit.c 	ptr = skb_tail_pointer(skb);
skb              1935 kernel/audit.c 	skb_put(skb, len << 1); /* new string is twice the old string */
skb              1947 kernel/audit.c 	struct sk_buff *skb;
skb              1952 kernel/audit.c 	BUG_ON(!ab->skb);
skb              1953 kernel/audit.c 	skb = ab->skb;
skb              1954 kernel/audit.c 	avail = skb_tailroom(skb);
skb              1961 kernel/audit.c 	ptr = skb_tail_pointer(skb);
skb              1967 kernel/audit.c 	skb_put(skb, slen + 2);	/* don't include null terminator */
skb              2306 kernel/audit.c 	struct sk_buff *skb;
skb              2313 kernel/audit.c 		skb = ab->skb;
skb              2314 kernel/audit.c 		ab->skb = NULL;
skb              2318 kernel/audit.c 		nlh = nlmsg_hdr(skb);
skb              2319 kernel/audit.c 		nlh->nlmsg_len = skb->len - NLMSG_HDRLEN;
skb              2322 kernel/audit.c 		skb_queue_tail(&audit_queue, skb);
skb              1071 kernel/auditfilter.c 	struct sk_buff *skb;
skb              1084 kernel/auditfilter.c 			skb = audit_make_reply(seq, AUDIT_LIST_RULES, 0, 1,
skb              1087 kernel/auditfilter.c 			if (skb)
skb              1088 kernel/auditfilter.c 				skb_queue_tail(q, skb);
skb              1092 kernel/auditfilter.c 	skb = audit_make_reply(seq, AUDIT_LIST_RULES, 1, 1, NULL, 0);
skb              1093 kernel/auditfilter.c 	if (skb)
skb              1094 kernel/auditfilter.c 		skb_queue_tail(q, skb);
skb               627 kernel/bpf/cgroup.c 				struct sk_buff *skb,
skb               630 kernel/bpf/cgroup.c 	unsigned int offset = skb->data - skb_network_header(skb);
skb               643 kernel/bpf/cgroup.c 	save_sk = skb->sk;
skb               644 kernel/bpf/cgroup.c 	skb->sk = sk;
skb               645 kernel/bpf/cgroup.c 	__skb_push(skb, offset);
skb               648 kernel/bpf/cgroup.c 	bpf_compute_and_save_data_end(skb, &saved_data_end);
skb               652 kernel/bpf/cgroup.c 			cgrp->bpf.effective[type], skb, __bpf_prog_run_save_cb);
skb               654 kernel/bpf/cgroup.c 		ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], skb,
skb               658 kernel/bpf/cgroup.c 	bpf_restore_data_end(skb, saved_data_end);
skb               659 kernel/bpf/cgroup.c 	__skb_pull(skb, offset);
skb               660 kernel/bpf/cgroup.c 	skb->sk = save_sk;
skb                62 kernel/bpf/core.c void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
skb                67 kernel/bpf/core.c 		ptr = skb_network_header(skb) + k - SKF_NET_OFF;
skb                69 kernel/bpf/core.c 		ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
skb                71 kernel/bpf/core.c 	if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
skb              2102 kernel/bpf/core.c int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
skb               165 kernel/bpf/cpumap.c 					 struct sk_buff *skb)
skb               195 kernel/bpf/cpumap.c 	skb = build_skb_around(skb, pkt_data_start, frame_size);
skb               196 kernel/bpf/cpumap.c 	if (unlikely(!skb))
skb               199 kernel/bpf/cpumap.c 	skb_reserve(skb, hard_start_headroom);
skb               200 kernel/bpf/cpumap.c 	__skb_put(skb, xdpf->len);
skb               202 kernel/bpf/cpumap.c 		skb_metadata_set(skb, xdpf->metasize);
skb               205 kernel/bpf/cpumap.c 	skb->protocol = eth_type_trans(skb, xdpf->dev_rx);
skb               219 kernel/bpf/cpumap.c 	return skb;
skb               309 kernel/bpf/cpumap.c 			struct sk_buff *skb = skbs[i];
skb               312 kernel/bpf/cpumap.c 			skb = cpu_map_build_skb(rcpu, xdpf, skb);
skb               313 kernel/bpf/cpumap.c 			if (!skb) {
skb               319 kernel/bpf/cpumap.c 			ret = netif_receive_skb_core(skb);
skb               482 kernel/bpf/devmap.c int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
skb               487 kernel/bpf/devmap.c 	err = xdp_ok_fwd_dev(dst->dev, skb->len);
skb               490 kernel/bpf/devmap.c 	skb->dev = dst->dev;
skb               491 kernel/bpf/devmap.c 	generic_xdp_tx(skb, xdp_prog);
skb                72 kernel/taskstats.c 	struct sk_buff *skb;
skb                78 kernel/taskstats.c 	skb = genlmsg_new(size, GFP_KERNEL);
skb                79 kernel/taskstats.c 	if (!skb)
skb                85 kernel/taskstats.c 		reply = genlmsg_put(skb, 0, seq, &family, 0, cmd);
skb                87 kernel/taskstats.c 		reply = genlmsg_put_reply(skb, info, &family, 0, cmd);
skb                89 kernel/taskstats.c 		nlmsg_free(skb);
skb                93 kernel/taskstats.c 	*skbp = skb;
skb               100 kernel/taskstats.c static int send_reply(struct sk_buff *skb, struct genl_info *info)
skb               102 kernel/taskstats.c 	struct genlmsghdr *genlhdr = nlmsg_data(nlmsg_hdr(skb));
skb               105 kernel/taskstats.c 	genlmsg_end(skb, reply);
skb               107 kernel/taskstats.c 	return genlmsg_reply(skb, info);
skb               113 kernel/taskstats.c static void send_cpu_listeners(struct sk_buff *skb,
skb               116 kernel/taskstats.c 	struct genlmsghdr *genlhdr = nlmsg_data(nlmsg_hdr(skb));
skb               118 kernel/taskstats.c 	struct sk_buff *skb_next, *skb_cur = skb;
skb               122 kernel/taskstats.c 	genlmsg_end(skb, reply);
skb               359 kernel/taskstats.c static struct taskstats *mk_reply(struct sk_buff *skb, int type, u32 pid)
skb               368 kernel/taskstats.c 	na = nla_nest_start_noflag(skb, aggr);
skb               372 kernel/taskstats.c 	if (nla_put(skb, type, sizeof(pid), &pid) < 0) {
skb               373 kernel/taskstats.c 		nla_nest_cancel(skb, na);
skb               376 kernel/taskstats.c 	ret = nla_reserve_64bit(skb, TASKSTATS_TYPE_STATS,
skb               379 kernel/taskstats.c 		nla_nest_cancel(skb, na);
skb               382 kernel/taskstats.c 	nla_nest_end(skb, na);
skb               389 kernel/taskstats.c static int cgroupstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
skb               540 kernel/taskstats.c static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
skb               664 kernel/taskstats.c static int taskstats_pre_doit(const struct genl_ops *ops, struct sk_buff *skb,
skb               283 lib/kobject_uevent.c 	struct sk_buff *skb = NULL;
skb               289 lib/kobject_uevent.c 	skb = alloc_skb(len + env->buflen, GFP_KERNEL);
skb               290 lib/kobject_uevent.c 	if (!skb)
skb               294 lib/kobject_uevent.c 	scratch = skb_put(skb, len);
skb               297 lib/kobject_uevent.c 	skb_put_data(skb, env->buf, env->buflen);
skb               299 lib/kobject_uevent.c 	parms = &NETLINK_CB(skb);
skb               305 lib/kobject_uevent.c 	return skb;
skb               312 lib/kobject_uevent.c 	struct sk_buff *skb = NULL;
skb               323 lib/kobject_uevent.c 		if (!skb) {
skb               325 lib/kobject_uevent.c 			skb = alloc_uevent_skb(env, action_string, devpath);
skb               326 lib/kobject_uevent.c 			if (!skb)
skb               330 lib/kobject_uevent.c 		retval = netlink_broadcast(uevent_sock, skb_get(skb), 0, 1,
skb               336 lib/kobject_uevent.c 	consume_skb(skb);
skb               347 lib/kobject_uevent.c 	struct sk_buff *skb = NULL;
skb               350 lib/kobject_uevent.c 	skb = alloc_uevent_skb(env, action_string, devpath);
skb               351 lib/kobject_uevent.c 	if (!skb)
skb               356 lib/kobject_uevent.c 		struct netlink_skb_parms *parms = &NETLINK_CB(skb);
skb               371 lib/kobject_uevent.c 	ret = netlink_broadcast(usk, skb, 0, 1, GFP_KERNEL);
skb               681 lib/kobject_uevent.c static int uevent_net_broadcast(struct sock *usk, struct sk_buff *skb,
skb               696 lib/kobject_uevent.c 	if ((skb->len + ret) > UEVENT_BUFFER_SIZE) {
skb               702 lib/kobject_uevent.c 	skbc = skb_copy_expand(skb, 0, ret, GFP_KERNEL);
skb               724 lib/kobject_uevent.c static int uevent_net_rcv_skb(struct sk_buff *skb, struct nlmsghdr *nlh,
skb               738 lib/kobject_uevent.c 	net = sock_net(NETLINK_CB(skb).sk);
skb               739 lib/kobject_uevent.c 	if (!netlink_ns_capable(skb, net->user_ns, CAP_SYS_ADMIN)) {
skb               745 lib/kobject_uevent.c 	ret = uevent_net_broadcast(net->uevent_sock->sk, skb, extack);
skb               751 lib/kobject_uevent.c static void uevent_net_rcv(struct sk_buff *skb)
skb               753 lib/kobject_uevent.c 	netlink_rcv_skb(skb, &uevent_net_rcv_skb);
skb               636 lib/nlattr.c   struct nlattr *__nla_reserve(struct sk_buff *skb, int attrtype, int attrlen)
skb               640 lib/nlattr.c   	nla = skb_put(skb, nla_total_size(attrlen));
skb               664 lib/nlattr.c   struct nlattr *__nla_reserve_64bit(struct sk_buff *skb, int attrtype,
skb               667 lib/nlattr.c   	if (nla_need_padding_for_64bit(skb))
skb               668 lib/nlattr.c   		nla_align_64bit(skb, padattr);
skb               670 lib/nlattr.c   	return __nla_reserve(skb, attrtype, attrlen);
skb               684 lib/nlattr.c   void *__nla_reserve_nohdr(struct sk_buff *skb, int attrlen)
skb               686 lib/nlattr.c   	return skb_put_zero(skb, NLA_ALIGN(attrlen));
skb               702 lib/nlattr.c   struct nlattr *nla_reserve(struct sk_buff *skb, int attrtype, int attrlen)
skb               704 lib/nlattr.c   	if (unlikely(skb_tailroom(skb) < nla_total_size(attrlen)))
skb               707 lib/nlattr.c   	return __nla_reserve(skb, attrtype, attrlen);
skb               725 lib/nlattr.c   struct nlattr *nla_reserve_64bit(struct sk_buff *skb, int attrtype, int attrlen,
skb               730 lib/nlattr.c   	if (nla_need_padding_for_64bit(skb))
skb               734 lib/nlattr.c   	if (unlikely(skb_tailroom(skb) < len))
skb               737 lib/nlattr.c   	return __nla_reserve_64bit(skb, attrtype, attrlen, padattr);
skb               751 lib/nlattr.c   void *nla_reserve_nohdr(struct sk_buff *skb, int attrlen)
skb               753 lib/nlattr.c   	if (unlikely(skb_tailroom(skb) < NLA_ALIGN(attrlen)))
skb               756 lib/nlattr.c   	return __nla_reserve_nohdr(skb, attrlen);
skb               770 lib/nlattr.c   void __nla_put(struct sk_buff *skb, int attrtype, int attrlen,
skb               775 lib/nlattr.c   	nla = __nla_reserve(skb, attrtype, attrlen);
skb               791 lib/nlattr.c   void __nla_put_64bit(struct sk_buff *skb, int attrtype, int attrlen,
skb               796 lib/nlattr.c   	nla = __nla_reserve_64bit(skb, attrtype, attrlen, padattr);
skb               810 lib/nlattr.c   void __nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data)
skb               814 lib/nlattr.c   	start = __nla_reserve_nohdr(skb, attrlen);
skb               829 lib/nlattr.c   int nla_put(struct sk_buff *skb, int attrtype, int attrlen, const void *data)
skb               831 lib/nlattr.c   	if (unlikely(skb_tailroom(skb) < nla_total_size(attrlen)))
skb               834 lib/nlattr.c   	__nla_put(skb, attrtype, attrlen, data);
skb               850 lib/nlattr.c   int nla_put_64bit(struct sk_buff *skb, int attrtype, int attrlen,
skb               855 lib/nlattr.c   	if (nla_need_padding_for_64bit(skb))
skb               859 lib/nlattr.c   	if (unlikely(skb_tailroom(skb) < len))
skb               862 lib/nlattr.c   	__nla_put_64bit(skb, attrtype, attrlen, data, padattr);
skb               876 lib/nlattr.c   int nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data)
skb               878 lib/nlattr.c   	if (unlikely(skb_tailroom(skb) < NLA_ALIGN(attrlen)))
skb               881 lib/nlattr.c   	__nla_put_nohdr(skb, attrlen, data);
skb               895 lib/nlattr.c   int nla_append(struct sk_buff *skb, int attrlen, const void *data)
skb               897 lib/nlattr.c   	if (unlikely(skb_tailroom(skb) < NLA_ALIGN(attrlen)))
skb               900 lib/nlattr.c   	skb_put_data(skb, data, attrlen);
skb                31 lib/test_blackhole_dev.c 	struct sk_buff *skb;
skb                37 lib/test_blackhole_dev.c 	skb = alloc_skb(SKB_SIZE, GFP_KERNEL);
skb                38 lib/test_blackhole_dev.c 	if (!skb)
skb                42 lib/test_blackhole_dev.c 	skb_reserve(skb, HEAD_SIZE);
skb                46 lib/test_blackhole_dev.c 	memset(__skb_put(skb, data_len), 0xf, data_len);
skb                50 lib/test_blackhole_dev.c 	uh = (struct udphdr *)skb_push(skb, sizeof(struct udphdr));
skb                51 lib/test_blackhole_dev.c 	skb_set_transport_header(skb, 0);
skb                56 lib/test_blackhole_dev.c 	ip6h = (struct ipv6hdr *)skb_push(skb, sizeof(struct ipv6hdr));
skb                57 lib/test_blackhole_dev.c 	skb_set_network_header(skb, 0);
skb                64 lib/test_blackhole_dev.c 	ethh = (struct ethhdr *)skb_push(skb, sizeof(struct ethhdr));
skb                65 lib/test_blackhole_dev.c 	skb_set_mac_header(skb, 0);
skb                67 lib/test_blackhole_dev.c 	skb->protocol = htons(ETH_P_IPV6);
skb                68 lib/test_blackhole_dev.c 	skb->pkt_type = PACKET_HOST;
skb                69 lib/test_blackhole_dev.c 	skb->dev = blackhole_netdev;
skb                72 lib/test_blackhole_dev.c 	ret = dev_queue_xmit(skb);
skb              6470 lib/test_bpf.c 	struct sk_buff *skb;
skb              6475 lib/test_bpf.c 	skb = alloc_skb(MAX_DATA, GFP_KERNEL);
skb              6476 lib/test_bpf.c 	if (!skb)
skb              6479 lib/test_bpf.c 	__skb_put_data(skb, buf, size);
skb              6482 lib/test_bpf.c 	skb_reset_mac_header(skb);
skb              6483 lib/test_bpf.c 	skb->protocol = htons(ETH_P_IP);
skb              6484 lib/test_bpf.c 	skb->pkt_type = SKB_TYPE;
skb              6485 lib/test_bpf.c 	skb->mark = SKB_MARK;
skb              6486 lib/test_bpf.c 	skb->hash = SKB_HASH;
skb              6487 lib/test_bpf.c 	skb->queue_mapping = SKB_QUEUE_MAP;
skb              6488 lib/test_bpf.c 	skb->vlan_tci = SKB_VLAN_TCI;
skb              6489 lib/test_bpf.c 	skb->vlan_present = SKB_VLAN_PRESENT;
skb              6490 lib/test_bpf.c 	skb->vlan_proto = htons(ETH_P_IP);
skb              6492 lib/test_bpf.c 	skb->dev = &dev;
skb              6493 lib/test_bpf.c 	skb->dev->ifindex = SKB_DEV_IFINDEX;
skb              6494 lib/test_bpf.c 	skb->dev->type = SKB_DEV_TYPE;
skb              6495 lib/test_bpf.c 	skb_set_network_header(skb, min(size, ETH_HLEN));
skb              6497 lib/test_bpf.c 	return skb;
skb              6502 lib/test_bpf.c 	struct sk_buff *skb;
skb              6512 lib/test_bpf.c 	skb = populate_skb(test->data, test->test[sub].data_size);
skb              6513 lib/test_bpf.c 	if (!skb)
skb              6534 lib/test_bpf.c 		skb_add_rx_frag(skb, 0, page, 0, MAX_DATA, MAX_DATA);
skb              6537 lib/test_bpf.c 	return skb;
skb              6542 lib/test_bpf.c 	kfree_skb(skb);
skb              6806 lib/test_bpf.c 	struct sk_buff *skb[2];
skb              6820 lib/test_bpf.c 		skb[i] = dev_alloc_skb(headroom + data_size);
skb              6821 lib/test_bpf.c 		if (!skb[i]) {
skb              6828 lib/test_bpf.c 		skb_reserve(skb[i], headroom);
skb              6829 lib/test_bpf.c 		skb_put(skb[i], data_size);
skb              6830 lib/test_bpf.c 		skb[i]->protocol = htons(ETH_P_IP);
skb              6831 lib/test_bpf.c 		skb_reset_network_header(skb[i]);
skb              6832 lib/test_bpf.c 		skb_set_mac_header(skb[i], -ETH_HLEN);
skb              6834 lib/test_bpf.c 		skb_add_rx_frag(skb[i], 0, page[i], 0, 64, 64);
skb              6839 lib/test_bpf.c 	skb_shinfo(skb[0])->gso_size = 1448;
skb              6840 lib/test_bpf.c 	skb_shinfo(skb[0])->gso_type = SKB_GSO_TCPV4;
skb              6841 lib/test_bpf.c 	skb_shinfo(skb[0])->gso_type |= SKB_GSO_DODGY;
skb              6842 lib/test_bpf.c 	skb_shinfo(skb[0])->gso_segs = 0;
skb              6843 lib/test_bpf.c 	skb_shinfo(skb[0])->frag_list = skb[1];
skb              6846 lib/test_bpf.c 	skb[0]->len += skb[1]->len;
skb              6847 lib/test_bpf.c 	skb[0]->data_len += skb[1]->data_len;
skb              6848 lib/test_bpf.c 	skb[0]->truesize += skb[1]->truesize;
skb              6850 lib/test_bpf.c 	return skb[0];
skb              6855 lib/test_bpf.c 	kfree_skb(skb[0]);
skb              6865 lib/test_bpf.c 	struct sk_buff *skb, *segs;
skb              6871 lib/test_bpf.c 	skb = build_test_skb();
skb              6872 lib/test_bpf.c 	if (!skb) {
skb              6877 lib/test_bpf.c 	segs = skb_segment(skb, features);
skb              6885 lib/test_bpf.c 	kfree_skb(skb);
skb               303 net/6lowpan/iphc.c static int lowpan_iphc_uncompress_addr(struct sk_buff *skb,
skb               314 net/6lowpan/iphc.c 		fail = lowpan_fetch_skb(skb, ipaddr->s6_addr, 16);
skb               321 net/6lowpan/iphc.c 		fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[8], 8);
skb               330 net/6lowpan/iphc.c 		fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[14], 2);
skb               363 net/6lowpan/iphc.c static int lowpan_iphc_uncompress_ctx_addr(struct sk_buff *skb,
skb               383 net/6lowpan/iphc.c 		fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[8], 8);
skb               390 net/6lowpan/iphc.c 		fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[14], 2);
skb               426 net/6lowpan/iphc.c static int lowpan_uncompress_multicast_daddr(struct sk_buff *skb,
skb               437 net/6lowpan/iphc.c 		fail = lowpan_fetch_skb(skb, ipaddr->s6_addr, 16);
skb               444 net/6lowpan/iphc.c 		fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[1], 1);
skb               445 net/6lowpan/iphc.c 		fail |= lowpan_fetch_skb(skb, &ipaddr->s6_addr[11], 5);
skb               452 net/6lowpan/iphc.c 		fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[1], 1);
skb               453 net/6lowpan/iphc.c 		fail |= lowpan_fetch_skb(skb, &ipaddr->s6_addr[13], 3);
skb               461 net/6lowpan/iphc.c 		fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[15], 1);
skb               479 net/6lowpan/iphc.c static int lowpan_uncompress_multicast_ctx_daddr(struct sk_buff *skb,
skb               488 net/6lowpan/iphc.c 	fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[1], 2);
skb               489 net/6lowpan/iphc.c 	fail |= lowpan_fetch_skb(skb, &ipaddr->s6_addr[12], 4);
skb               545 net/6lowpan/iphc.c static int lowpan_iphc_tf_decompress(struct sk_buff *skb, struct ipv6hdr *hdr,
skb               554 net/6lowpan/iphc.c 		if (lowpan_fetch_skb(skb, tf, 4))
skb               569 net/6lowpan/iphc.c 		if (lowpan_fetch_skb(skb, tf, 3))
skb               583 net/6lowpan/iphc.c 		if (lowpan_fetch_skb(skb, tf, 1))
skb               612 net/6lowpan/iphc.c int lowpan_header_decompress(struct sk_buff *skb, const struct net_device *dev,
skb               621 net/6lowpan/iphc.c 		       skb->data, skb->len);
skb               623 net/6lowpan/iphc.c 	if (lowpan_fetch_skb(skb, &iphc0, sizeof(iphc0)) ||
skb               624 net/6lowpan/iphc.c 	    lowpan_fetch_skb(skb, &iphc1, sizeof(iphc1)))
skb               631 net/6lowpan/iphc.c 		if (lowpan_fetch_skb(skb, &cid, sizeof(cid)))
skb               635 net/6lowpan/iphc.c 	err = lowpan_iphc_tf_decompress(skb, &hdr,
skb               643 net/6lowpan/iphc.c 		if (lowpan_fetch_skb(skb, &hdr.nexthdr, sizeof(hdr.nexthdr)))
skb               654 net/6lowpan/iphc.c 		if (lowpan_fetch_skb(skb, &hdr.hop_limit,
skb               668 net/6lowpan/iphc.c 		err = lowpan_iphc_uncompress_ctx_addr(skb, dev, ci, &hdr.saddr,
skb               675 net/6lowpan/iphc.c 		err = lowpan_iphc_uncompress_addr(skb, dev, &hdr.saddr,
skb               686 net/6lowpan/iphc.c 		skb->pkt_type = PACKET_BROADCAST;
skb               697 net/6lowpan/iphc.c 		err = lowpan_uncompress_multicast_ctx_daddr(skb, ci,
skb               703 net/6lowpan/iphc.c 		skb->pkt_type = PACKET_BROADCAST;
skb               706 net/6lowpan/iphc.c 		err = lowpan_uncompress_multicast_daddr(skb, &hdr.daddr,
skb               710 net/6lowpan/iphc.c 		skb->pkt_type = PACKET_HOST;
skb               721 net/6lowpan/iphc.c 		err = lowpan_iphc_uncompress_ctx_addr(skb, dev, ci, &hdr.daddr,
skb               727 net/6lowpan/iphc.c 		skb->pkt_type = PACKET_HOST;
skb               729 net/6lowpan/iphc.c 		err = lowpan_iphc_uncompress_addr(skb, dev, &hdr.daddr,
skb               742 net/6lowpan/iphc.c 		err = lowpan_nhc_do_uncompression(skb, dev, &hdr);
skb               746 net/6lowpan/iphc.c 		err = skb_cow(skb, sizeof(hdr));
skb               753 net/6lowpan/iphc.c 		if (lowpan_802154_cb(skb)->d_size)
skb               754 net/6lowpan/iphc.c 			hdr.payload_len = htons(lowpan_802154_cb(skb)->d_size -
skb               757 net/6lowpan/iphc.c 			hdr.payload_len = htons(skb->len);
skb               760 net/6lowpan/iphc.c 		hdr.payload_len = htons(skb->len);
skb               765 net/6lowpan/iphc.c 		 skb_headroom(skb), skb->len);
skb               772 net/6lowpan/iphc.c 	skb_push(skb, sizeof(hdr));
skb               773 net/6lowpan/iphc.c 	skb_reset_mac_header(skb);
skb               774 net/6lowpan/iphc.c 	skb_reset_network_header(skb);
skb               775 net/6lowpan/iphc.c 	skb_copy_to_linear_data(skb, &hdr, sizeof(hdr));
skb              1131 net/6lowpan/iphc.c int lowpan_header_compress(struct sk_buff *skb, const struct net_device *dev,
skb              1140 net/6lowpan/iphc.c 	if (skb->protocol != htons(ETH_P_IPV6))
skb              1143 net/6lowpan/iphc.c 	hdr = ipv6_hdr(skb);
skb              1152 net/6lowpan/iphc.c 		       skb_network_header(skb), sizeof(struct ipv6hdr));
skb              1163 net/6lowpan/iphc.c 		       skb->data, skb->len);
skb              1199 net/6lowpan/iphc.c 	ret = lowpan_nhc_check_compression(skb, hdr, &hc_ptr);
skb              1294 net/6lowpan/iphc.c 		ret = lowpan_nhc_do_compression(skb, hdr, &hc_ptr);
skb              1302 net/6lowpan/iphc.c 	skb_pull(skb, sizeof(struct ipv6hdr));
skb              1303 net/6lowpan/iphc.c 	skb_reset_transport_header(skb);
skb              1304 net/6lowpan/iphc.c 	memcpy(skb_push(skb, hc_ptr - head), head, hc_ptr - head);
skb              1305 net/6lowpan/iphc.c 	skb_reset_network_header(skb);
skb              1307 net/6lowpan/iphc.c 	pr_debug("header len %d skb %u\n", (int)(hc_ptr - head), skb->len);
skb              1310 net/6lowpan/iphc.c 		       skb->data, skb->len);
skb               156 net/6lowpan/ndisc.c 					  struct sk_buff *skb, u8 icmp6_type,
skb               170 net/6lowpan/ndisc.c 			__ndisc_fill_addr_option(skb, ND_OPT_TARGET_LL_ADDR,
skb               191 net/6lowpan/ndisc.c 		__ndisc_fill_addr_option(skb, opt_type, &short_addr,
skb                61 net/6lowpan/nhc.c static struct lowpan_nhc *lowpan_nhc_by_nhcid(const struct sk_buff *skb)
skb                64 net/6lowpan/nhc.c 	const u8 *nhcid_skb_ptr = skb->data;
skb                72 net/6lowpan/nhc.c 		if (nhcid_skb_ptr + nhc->idlen > skb->data + skb->len)
skb                92 net/6lowpan/nhc.c int lowpan_nhc_check_compression(struct sk_buff *skb,
skb               109 net/6lowpan/nhc.c int lowpan_nhc_do_compression(struct sk_buff *skb, const struct ipv6hdr *hdr,
skb               136 net/6lowpan/nhc.c 	if (skb->transport_header == skb->network_header)
skb               137 net/6lowpan/nhc.c 		skb_set_transport_header(skb, sizeof(struct ipv6hdr));
skb               139 net/6lowpan/nhc.c 	ret = nhc->compress(skb, hc_ptr);
skb               144 net/6lowpan/nhc.c 	skb_pull(skb, nhc->nexthdrlen);
skb               152 net/6lowpan/nhc.c int lowpan_nhc_do_uncompression(struct sk_buff *skb,
skb               161 net/6lowpan/nhc.c 	nhc = lowpan_nhc_by_nhcid(skb);
skb               164 net/6lowpan/nhc.c 			ret = nhc->uncompress(skb, sizeof(struct ipv6hdr) +
skb               183 net/6lowpan/nhc.c 	skb_reset_transport_header(skb);
skb               185 net/6lowpan/nhc.c 		       skb_transport_header(skb), nhc->nexthdrlen);
skb                77 net/6lowpan/nhc.h 	int		(*uncompress)(struct sk_buff *skb, size_t needed);
skb                78 net/6lowpan/nhc.h 	int		(*compress)(struct sk_buff *skb, u8 **hc_ptr);
skb                98 net/6lowpan/nhc.h int lowpan_nhc_check_compression(struct sk_buff *skb,
skb               109 net/6lowpan/nhc.h int lowpan_nhc_do_compression(struct sk_buff *skb, const struct ipv6hdr *hdr,
skb               120 net/6lowpan/nhc.h int lowpan_nhc_do_uncompression(struct sk_buff *skb,
skb                37 net/6lowpan/nhc_udp.c static int udp_uncompress(struct sk_buff *skb, size_t needed)
skb                44 net/6lowpan/nhc_udp.c 	fail = lowpan_fetch_skb(skb, &tmp, sizeof(tmp));
skb                49 net/6lowpan/nhc_udp.c 		fail |= lowpan_fetch_skb(skb, &uh.source, sizeof(uh.source));
skb                50 net/6lowpan/nhc_udp.c 		fail |= lowpan_fetch_skb(skb, &uh.dest, sizeof(uh.dest));
skb                53 net/6lowpan/nhc_udp.c 		fail |= lowpan_fetch_skb(skb, &uh.source, sizeof(uh.source));
skb                54 net/6lowpan/nhc_udp.c 		fail |= lowpan_fetch_skb(skb, &val, sizeof(val));
skb                58 net/6lowpan/nhc_udp.c 		fail |= lowpan_fetch_skb(skb, &val, sizeof(val));
skb                60 net/6lowpan/nhc_udp.c 		fail |= lowpan_fetch_skb(skb, &uh.dest, sizeof(uh.dest));
skb                63 net/6lowpan/nhc_udp.c 		fail |= lowpan_fetch_skb(skb, &val, sizeof(val));
skb                79 net/6lowpan/nhc_udp.c 		fail |= lowpan_fetch_skb(skb, &uh.check, sizeof(uh.check));
skb                89 net/6lowpan/nhc_udp.c 	switch (lowpan_dev(skb->dev)->lltype) {
skb                91 net/6lowpan/nhc_udp.c 		if (lowpan_802154_cb(skb)->d_size)
skb                92 net/6lowpan/nhc_udp.c 			uh.len = htons(lowpan_802154_cb(skb)->d_size -
skb                95 net/6lowpan/nhc_udp.c 			uh.len = htons(skb->len + sizeof(struct udphdr));
skb                98 net/6lowpan/nhc_udp.c 		uh.len = htons(skb->len + sizeof(struct udphdr));
skb               106 net/6lowpan/nhc_udp.c 	err = skb_cow(skb, needed);
skb               110 net/6lowpan/nhc_udp.c 	skb_push(skb, sizeof(struct udphdr));
skb               111 net/6lowpan/nhc_udp.c 	skb_copy_to_linear_data(skb, &uh, sizeof(struct udphdr));
skb               116 net/6lowpan/nhc_udp.c static int udp_compress(struct sk_buff *skb, u8 **hc_ptr)
skb               118 net/6lowpan/nhc_udp.c 	const struct udphdr *uh = udp_hdr(skb);
skb                32 net/802/fc.c   static int fc_header(struct sk_buff *skb, struct net_device *dev,
skb                48 net/802/fc.c   		fch = skb_push(skb, hdr_len);
skb                58 net/802/fc.c   		fch = skb_push(skb, hdr_len);
skb                48 net/802/fddi.c static int fddi_header(struct sk_buff *skb, struct net_device *dev,
skb                57 net/802/fddi.c 	fddi = skb_push(skb, hl);
skb                93 net/802/fddi.c __be16 fddi_type_trans(struct sk_buff *skb, struct net_device *dev)
skb                95 net/802/fddi.c 	struct fddihdr *fddi = (struct fddihdr *)skb->data;
skb               103 net/802/fddi.c 	skb->dev = dev;
skb               104 net/802/fddi.c 	skb_reset_mac_header(skb);	/* point to frame control (FC) */
skb               108 net/802/fddi.c 		skb_pull(skb, FDDI_K_8022_HLEN-3);
skb               113 net/802/fddi.c 		skb_pull(skb, FDDI_K_SNAP_HLEN);		/* adjust for 21 byte header */
skb               122 net/802/fddi.c 			skb->pkt_type = PACKET_BROADCAST;
skb               124 net/802/fddi.c 			skb->pkt_type = PACKET_MULTICAST;
skb               130 net/802/fddi.c 			skb->pkt_type = PACKET_OTHERHOST;
skb               208 net/802/garp.c 	struct sk_buff *skb;
skb               212 net/802/garp.c 	skb = alloc_skb(app->dev->mtu + LL_RESERVED_SPACE(app->dev),
skb               214 net/802/garp.c 	if (!skb)
skb               217 net/802/garp.c 	skb->dev = app->dev;
skb               218 net/802/garp.c 	skb->protocol = htons(ETH_P_802_2);
skb               219 net/802/garp.c 	skb_reserve(skb, LL_RESERVED_SPACE(app->dev) + LLC_RESERVE);
skb               221 net/802/garp.c 	gp = __skb_put(skb, sizeof(*gp));
skb               224 net/802/garp.c 	app->pdu = skb;
skb               256 net/802/garp.c 	struct sk_buff *skb;
skb               258 net/802/garp.c 	while ((skb = skb_dequeue(&app->queue)))
skb               259 net/802/garp.c 		dev_queue_xmit(skb);
skb               414 net/802/garp.c static int garp_pdu_parse_end_mark(struct sk_buff *skb)
skb               416 net/802/garp.c 	if (!pskb_may_pull(skb, sizeof(u8)))
skb               418 net/802/garp.c 	if (*skb->data == GARP_END_MARK) {
skb               419 net/802/garp.c 		skb_pull(skb, sizeof(u8));
skb               425 net/802/garp.c static int garp_pdu_parse_attr(struct garp_applicant *app, struct sk_buff *skb,
skb               433 net/802/garp.c 	if (!pskb_may_pull(skb, sizeof(*ga)))
skb               435 net/802/garp.c 	ga = (struct garp_attr_hdr *)skb->data;
skb               439 net/802/garp.c 	if (!pskb_may_pull(skb, ga->len))
skb               441 net/802/garp.c 	skb_pull(skb, ga->len);
skb               478 net/802/garp.c static int garp_pdu_parse_msg(struct garp_applicant *app, struct sk_buff *skb)
skb               482 net/802/garp.c 	if (!pskb_may_pull(skb, sizeof(*gm)))
skb               484 net/802/garp.c 	gm = (struct garp_msg_hdr *)skb->data;
skb               487 net/802/garp.c 	skb_pull(skb, sizeof(*gm));
skb               489 net/802/garp.c 	while (skb->len > 0) {
skb               490 net/802/garp.c 		if (garp_pdu_parse_attr(app, skb, gm->attrtype) < 0)
skb               492 net/802/garp.c 		if (garp_pdu_parse_end_mark(skb) < 0)
skb               498 net/802/garp.c static void garp_pdu_rcv(const struct stp_proto *proto, struct sk_buff *skb,
skb               513 net/802/garp.c 	if (!pskb_may_pull(skb, sizeof(*gp)))
skb               515 net/802/garp.c 	gp = (struct garp_pdu_hdr *)skb->data;
skb               518 net/802/garp.c 	skb_pull(skb, sizeof(*gp));
skb               521 net/802/garp.c 	while (skb->len > 0) {
skb               522 net/802/garp.c 		if (garp_pdu_parse_msg(app, skb) < 0)
skb               524 net/802/garp.c 		if (garp_pdu_parse_end_mark(skb) < 0)
skb               529 net/802/garp.c 	kfree_skb(skb);
skb                42 net/802/hippi.c static int hippi_header(struct sk_buff *skb, struct net_device *dev,
skb                46 net/802/hippi.c 	struct hippi_hdr *hip = skb_push(skb, HIPPI_HLEN);
skb                47 net/802/hippi.c 	struct hippi_cb *hcb = (struct hippi_cb *) skb->cb;
skb                50 net/802/hippi.c 		len = skb->len - HIPPI_HLEN;
skb                93 net/802/hippi.c __be16 hippi_type_trans(struct sk_buff *skb, struct net_device *dev)
skb               101 net/802/hippi.c 	skb->dev = dev;
skb               102 net/802/hippi.c 	skb_reset_mac_header(skb);
skb               103 net/802/hippi.c 	hip = (struct hippi_hdr *)skb_mac_header(skb);
skb               104 net/802/hippi.c 	skb_pull(skb, HIPPI_HLEN);
skb               297 net/802/mrp.c  	struct sk_buff *skb;
skb               300 net/802/mrp.c  	skb = alloc_skb(app->dev->mtu + LL_RESERVED_SPACE(app->dev),
skb               302 net/802/mrp.c  	if (!skb)
skb               305 net/802/mrp.c  	skb->dev = app->dev;
skb               306 net/802/mrp.c  	skb->protocol = app->app->pkttype.type;
skb               307 net/802/mrp.c  	skb_reserve(skb, LL_RESERVED_SPACE(app->dev));
skb               308 net/802/mrp.c  	skb_reset_network_header(skb);
skb               309 net/802/mrp.c  	skb_reset_transport_header(skb);
skb               311 net/802/mrp.c  	ph = __skb_put(skb, sizeof(*ph));
skb               314 net/802/mrp.c  	app->pdu = skb;
skb               348 net/802/mrp.c  	struct sk_buff *skb;
skb               350 net/802/mrp.c  	while ((skb = skb_dequeue(&app->queue)))
skb               351 net/802/mrp.c  		dev_queue_xmit(skb);
skb               617 net/802/mrp.c  static int mrp_pdu_parse_end_mark(struct sk_buff *skb, int *offset)
skb               621 net/802/mrp.c  	if (skb_copy_bits(skb, *offset, &endmark, sizeof(endmark)) < 0)
skb               631 net/802/mrp.c  					struct sk_buff *skb,
skb               637 net/802/mrp.c  	attr = mrp_attr_lookup(app, mrp_cb(skb)->attrvalue,
skb               638 net/802/mrp.c  			       mrp_cb(skb)->mh->attrlen,
skb               639 net/802/mrp.c  			       mrp_cb(skb)->mh->attrtype);
skb               670 net/802/mrp.c  				 struct sk_buff *skb, int *offset)
skb               676 net/802/mrp.c  	mrp_cb(skb)->vah = skb_header_pointer(skb, *offset, sizeof(_vah),
skb               678 net/802/mrp.c  	if (!mrp_cb(skb)->vah)
skb               682 net/802/mrp.c  	if (get_unaligned(&mrp_cb(skb)->vah->lenflags) &
skb               685 net/802/mrp.c  	valen = be16_to_cpu(get_unaligned(&mrp_cb(skb)->vah->lenflags) &
skb               694 net/802/mrp.c  	if (sizeof(struct mrp_skb_cb) + mrp_cb(skb)->mh->attrlen >
skb               697 net/802/mrp.c  	if (skb_copy_bits(skb, *offset, mrp_cb(skb)->attrvalue,
skb               698 net/802/mrp.c  			  mrp_cb(skb)->mh->attrlen) < 0)
skb               700 net/802/mrp.c  	*offset += mrp_cb(skb)->mh->attrlen;
skb               706 net/802/mrp.c  		if (skb_copy_bits(skb, *offset, &vaevents,
skb               718 net/802/mrp.c  		mrp_pdu_parse_vecattr_event(app, skb, vaevent);
skb               723 net/802/mrp.c  		mrp_attrvalue_inc(mrp_cb(skb)->attrvalue,
skb               724 net/802/mrp.c  				  mrp_cb(skb)->mh->attrlen);
skb               728 net/802/mrp.c  		mrp_pdu_parse_vecattr_event(app, skb, vaevent);
skb               733 net/802/mrp.c  		mrp_attrvalue_inc(mrp_cb(skb)->attrvalue,
skb               734 net/802/mrp.c  				  mrp_cb(skb)->mh->attrlen);
skb               737 net/802/mrp.c  		mrp_pdu_parse_vecattr_event(app, skb, vaevent);
skb               742 net/802/mrp.c  static int mrp_pdu_parse_msg(struct mrp_applicant *app, struct sk_buff *skb,
skb               747 net/802/mrp.c  	mrp_cb(skb)->mh = skb_header_pointer(skb, *offset, sizeof(_mh), &_mh);
skb               748 net/802/mrp.c  	if (!mrp_cb(skb)->mh)
skb               752 net/802/mrp.c  	if (mrp_cb(skb)->mh->attrtype == 0 ||
skb               753 net/802/mrp.c  	    mrp_cb(skb)->mh->attrtype > app->app->maxattr ||
skb               754 net/802/mrp.c  	    mrp_cb(skb)->mh->attrlen == 0)
skb               757 net/802/mrp.c  	while (skb->len > *offset) {
skb               758 net/802/mrp.c  		if (mrp_pdu_parse_end_mark(skb, offset) < 0)
skb               760 net/802/mrp.c  		if (mrp_pdu_parse_vecattr(app, skb, offset) < 0)
skb               766 net/802/mrp.c  static int mrp_rcv(struct sk_buff *skb, struct net_device *dev,
skb               775 net/802/mrp.c  	int offset = skb_network_offset(skb);
skb               780 net/802/mrp.c  	if (unlikely(skb->pkt_type == PACKET_OTHERHOST))
skb               782 net/802/mrp.c  	skb = skb_share_check(skb, GFP_ATOMIC);
skb               783 net/802/mrp.c  	if (unlikely(!skb))
skb               792 net/802/mrp.c  	ph = skb_header_pointer(skb, offset, sizeof(_ph), &_ph);
skb               801 net/802/mrp.c  	while (skb->len > offset) {
skb               802 net/802/mrp.c  		if (mrp_pdu_parse_end_mark(skb, &offset) < 0)
skb               804 net/802/mrp.c  		if (mrp_pdu_parse_msg(app, skb, &offset) < 0)
skb               809 net/802/mrp.c  	kfree_skb(skb);
skb                25 net/802/p8022.c static int p8022_request(struct datalink_proto *dl, struct sk_buff *skb,
skb                28 net/802/p8022.c 	llc_build_and_send_ui_pkt(dl->sap, skb, dest, dl->sap->laddr.lsap);
skb                33 net/802/p8022.c 					    int (*func)(struct sk_buff *skb,
skb                27 net/802/p8023.c 			 struct sk_buff *skb, unsigned char *dest_node)
skb                29 net/802/p8023.c 	struct net_device *dev = skb->dev;
skb                31 net/802/p8023.c 	dev_hard_header(skb, dev, ETH_P_802_3, dest_node, NULL, skb->len);
skb                32 net/802/p8023.c 	return dev_queue_xmit(skb);
skb                45 net/802/psnap.c static int snap_rcv(struct sk_buff *skb, struct net_device *dev,
skb                54 net/802/psnap.c 	if (unlikely(!pskb_may_pull(skb, 5)))
skb                58 net/802/psnap.c 	proto = find_snap_client(skb_transport_header(skb));
skb                61 net/802/psnap.c 		skb->transport_header += 5;
skb                62 net/802/psnap.c 		skb_pull_rcsum(skb, 5);
skb                63 net/802/psnap.c 		rc = proto->rcvfunc(skb, dev, &snap_packet_type, orig_dev);
skb                74 net/802/psnap.c 	kfree_skb(skb);
skb                82 net/802/psnap.c 			struct sk_buff *skb, u8 *dest)
skb                84 net/802/psnap.c 	memcpy(skb_push(skb, 5), dl->type, 5);
skb                85 net/802/psnap.c 	llc_build_and_send_ui_pkt(snap_sap, skb, dest, snap_sap->laddr.lsap);
skb                30 net/802/stp.c  static int stp_pdu_rcv(struct sk_buff *skb, struct net_device *dev,
skb                33 net/802/stp.c  	const struct ethhdr *eh = eth_hdr(skb);
skb                34 net/802/stp.c  	const struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb);
skb                54 net/802/stp.c  	proto->rcv(proto, skb, dev);
skb                58 net/802/stp.c  	kfree_skb(skb);
skb                11 net/8021q/vlan_core.c 	struct sk_buff *skb = *skbp;
skb                12 net/8021q/vlan_core.c 	__be16 vlan_proto = skb->vlan_proto;
skb                13 net/8021q/vlan_core.c 	u16 vlan_id = skb_vlan_tag_get_id(skb);
skb                17 net/8021q/vlan_core.c 	vlan_dev = vlan_find_dev(skb->dev, vlan_proto, vlan_id);
skb                21 net/8021q/vlan_core.c 	skb = *skbp = skb_share_check(skb, GFP_ATOMIC);
skb                22 net/8021q/vlan_core.c 	if (unlikely(!skb))
skb                26 net/8021q/vlan_core.c 		kfree_skb(skb);
skb                31 net/8021q/vlan_core.c 	skb->dev = vlan_dev;
skb                32 net/8021q/vlan_core.c 	if (unlikely(skb->pkt_type == PACKET_OTHERHOST)) {
skb                36 net/8021q/vlan_core.c 		if (ether_addr_equal_64bits(eth_hdr(skb)->h_dest, vlan_dev->dev_addr))
skb                37 net/8021q/vlan_core.c 			skb->pkt_type = PACKET_HOST;
skb                43 net/8021q/vlan_core.c 		unsigned int offset = skb->data - skb_mac_header(skb);
skb                50 net/8021q/vlan_core.c 		skb_push(skb, offset);
skb                51 net/8021q/vlan_core.c 		skb = *skbp = vlan_insert_inner_tag(skb, skb->vlan_proto,
skb                52 net/8021q/vlan_core.c 						    skb->vlan_tci, skb->mac_len);
skb                53 net/8021q/vlan_core.c 		if (!skb)
skb                55 net/8021q/vlan_core.c 		skb_pull(skb, offset + VLAN_HLEN);
skb                56 net/8021q/vlan_core.c 		skb_reset_mac_len(skb);
skb                59 net/8021q/vlan_core.c 	skb->priority = vlan_get_ingress_priority(vlan_dev, skb->vlan_tci);
skb                60 net/8021q/vlan_core.c 	__vlan_hwaccel_clear_tag(skb);
skb                66 net/8021q/vlan_core.c 	rx_stats->rx_bytes += skb->len;
skb                67 net/8021q/vlan_core.c 	if (skb->pkt_type == PACKET_MULTICAST)
skb               458 net/8021q/vlan_core.c 					struct sk_buff *skb)
skb               468 net/8021q/vlan_core.c 	off_vlan = skb_gro_offset(skb);
skb               470 net/8021q/vlan_core.c 	vhdr = skb_gro_header_fast(skb, off_vlan);
skb               471 net/8021q/vlan_core.c 	if (skb_gro_header_hard(skb, hlen)) {
skb               472 net/8021q/vlan_core.c 		vhdr = skb_gro_header_slow(skb, hlen, off_vlan);
skb               497 net/8021q/vlan_core.c 	skb_gro_pull(skb, sizeof(*vhdr));
skb               498 net/8021q/vlan_core.c 	skb_gro_postpull_rcsum(skb, vhdr, sizeof(*vhdr));
skb               499 net/8021q/vlan_core.c 	pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
skb               504 net/8021q/vlan_core.c 	skb_gro_flush_final(skb, pp, flush);
skb               509 net/8021q/vlan_core.c static int vlan_gro_complete(struct sk_buff *skb, int nhoff)
skb               511 net/8021q/vlan_core.c 	struct vlan_hdr *vhdr = (struct vlan_hdr *)(skb->data + nhoff);
skb               519 net/8021q/vlan_core.c 		err = ptype->callbacks.gro_complete(skb, nhoff + sizeof(*vhdr));
skb                44 net/8021q/vlan_dev.c static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
skb                56 net/8021q/vlan_dev.c 		vhdr = skb_push(skb, VLAN_HLEN);
skb                59 net/8021q/vlan_dev.c 		vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb->priority);
skb                71 net/8021q/vlan_dev.c 		skb->protocol = vlan->vlan_proto;
skb                82 net/8021q/vlan_dev.c 	rc = dev_hard_header(skb, dev, type, daddr, saddr, len + vhdrlen);
skb                88 net/8021q/vlan_dev.c static inline netdev_tx_t vlan_netpoll_send_skb(struct vlan_dev_priv *vlan, struct sk_buff *skb)
skb                92 net/8021q/vlan_dev.c 		netpoll_send_skb(vlan->netpoll, skb);
skb                99 net/8021q/vlan_dev.c static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
skb               103 net/8021q/vlan_dev.c 	struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data);
skb               116 net/8021q/vlan_dev.c 		vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb->priority);
skb               117 net/8021q/vlan_dev.c 		__vlan_hwaccel_put_tag(skb, vlan->vlan_proto, vlan_tci);
skb               120 net/8021q/vlan_dev.c 	skb->dev = vlan->real_dev;
skb               121 net/8021q/vlan_dev.c 	len = skb->len;
skb               123 net/8021q/vlan_dev.c 		return vlan_netpoll_send_skb(vlan, skb);
skb               125 net/8021q/vlan_dev.c 	ret = dev_queue_xmit(skb);
skb               497 net/8021q/vlan_dev.c static int vlan_passthru_hard_header(struct sk_buff *skb, struct net_device *dev,
skb               508 net/8021q/vlan_dev.c 	return dev_hard_header(skb, real_dev, type, daddr, saddr, len);
skb               213 net/8021q/vlan_netlink.c static int vlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
skb               222 net/8021q/vlan_netlink.c 	if (nla_put_be16(skb, IFLA_VLAN_PROTOCOL, vlan->vlan_proto) ||
skb               223 net/8021q/vlan_netlink.c 	    nla_put_u16(skb, IFLA_VLAN_ID, vlan->vlan_id))
skb               228 net/8021q/vlan_netlink.c 		if (nla_put(skb, IFLA_VLAN_FLAGS, sizeof(f), &f))
skb               232 net/8021q/vlan_netlink.c 		nest = nla_nest_start_noflag(skb, IFLA_VLAN_INGRESS_QOS);
skb               242 net/8021q/vlan_netlink.c 			if (nla_put(skb, IFLA_VLAN_QOS_MAPPING,
skb               246 net/8021q/vlan_netlink.c 		nla_nest_end(skb, nest);
skb               250 net/8021q/vlan_netlink.c 		nest = nla_nest_start_noflag(skb, IFLA_VLAN_EGRESS_QOS);
skb               262 net/8021q/vlan_netlink.c 				if (nla_put(skb, IFLA_VLAN_QOS_MAPPING,
skb               267 net/8021q/vlan_netlink.c 		nla_nest_end(skb, nest);
skb               105 net/appletalk/aarp.c 	struct sk_buff *skb = alloc_skb(len, GFP_ATOMIC);
skb               108 net/appletalk/aarp.c 	if (!skb)
skb               112 net/appletalk/aarp.c 		kfree_skb(skb);
skb               117 net/appletalk/aarp.c 	skb_reserve(skb, dev->hard_header_len + aarp_dl->header_length);
skb               118 net/appletalk/aarp.c 	skb_reset_network_header(skb);
skb               119 net/appletalk/aarp.c 	skb_reset_transport_header(skb);
skb               120 net/appletalk/aarp.c 	skb_put(skb, sizeof(*eah));
skb               121 net/appletalk/aarp.c 	skb->protocol    = htons(ETH_P_ATALK);
skb               122 net/appletalk/aarp.c 	skb->dev	 = dev;
skb               123 net/appletalk/aarp.c 	eah		 = aarp_hdr(skb);
skb               145 net/appletalk/aarp.c 	aarp_dl->request(aarp_dl, skb, aarp_eth_multicast);
skb               158 net/appletalk/aarp.c 	struct sk_buff *skb = alloc_skb(len, GFP_ATOMIC);
skb               160 net/appletalk/aarp.c 	if (!skb)
skb               164 net/appletalk/aarp.c 	skb_reserve(skb, dev->hard_header_len + aarp_dl->header_length);
skb               165 net/appletalk/aarp.c 	skb_reset_network_header(skb);
skb               166 net/appletalk/aarp.c 	skb_reset_transport_header(skb);
skb               167 net/appletalk/aarp.c 	skb_put(skb, sizeof(*eah));
skb               168 net/appletalk/aarp.c 	skb->protocol    = htons(ETH_P_ATALK);
skb               169 net/appletalk/aarp.c 	skb->dev	 = dev;
skb               170 net/appletalk/aarp.c 	eah		 = aarp_hdr(skb);
skb               195 net/appletalk/aarp.c 	aarp_dl->request(aarp_dl, skb, sha);
skb               207 net/appletalk/aarp.c 	struct sk_buff *skb = alloc_skb(len, GFP_ATOMIC);
skb               211 net/appletalk/aarp.c 	if (!skb)
skb               215 net/appletalk/aarp.c 	skb_reserve(skb, dev->hard_header_len + aarp_dl->header_length);
skb               216 net/appletalk/aarp.c 	skb_reset_network_header(skb);
skb               217 net/appletalk/aarp.c 	skb_reset_transport_header(skb);
skb               218 net/appletalk/aarp.c 	skb_put(skb, sizeof(*eah));
skb               219 net/appletalk/aarp.c 	skb->protocol    = htons(ETH_P_ATALK);
skb               220 net/appletalk/aarp.c 	skb->dev	 = dev;
skb               221 net/appletalk/aarp.c 	eah		 = aarp_hdr(skb);
skb               243 net/appletalk/aarp.c 	aarp_dl->request(aarp_dl, skb, aarp_eth_multicast);
skb               542 net/appletalk/aarp.c int aarp_send_ddp(struct net_device *dev, struct sk_buff *skb,
skb               550 net/appletalk/aarp.c 	skb_reset_network_header(skb);
skb               555 net/appletalk/aarp.c 		struct ddpehdr *ddp = (struct ddpehdr *)skb->data;
skb               567 net/appletalk/aarp.c 			skb_pull(skb, sizeof(*ddp) - 4);
skb               574 net/appletalk/aarp.c 			*((__be16 *)skb->data) = htons(skb->len);
skb               582 net/appletalk/aarp.c 		skb_push(skb, 3);
skb               583 net/appletalk/aarp.c 		skb->data[0] = sa->s_node;
skb               584 net/appletalk/aarp.c 		skb->data[1] = at->s_node;
skb               585 net/appletalk/aarp.c 		skb->data[2] = ft;
skb               586 net/appletalk/aarp.c 		skb->dev     = dev;
skb               592 net/appletalk/aarp.c 		skb->protocol = htons(ETH_P_PPPTALK);
skb               593 net/appletalk/aarp.c 		skb->dev = dev;
skb               601 net/appletalk/aarp.c 	skb->dev = dev;
skb               602 net/appletalk/aarp.c 	skb->protocol = htons(ETH_P_ATALK);
skb               608 net/appletalk/aarp.c 		ddp_dl->request(ddp_dl, skb, ddp_eth_multicast);
skb               617 net/appletalk/aarp.c 		ddp_dl->request(ddp_dl, skb, a->hwaddr);
skb               625 net/appletalk/aarp.c 		skb_queue_tail(&a->packet_queue, skb);
skb               638 net/appletalk/aarp.c 	skb_queue_tail(&a->packet_queue, skb);
skb               666 net/appletalk/aarp.c 	if (skb->sk)
skb               667 net/appletalk/aarp.c 		skb->priority = skb->sk->sk_priority;
skb               668 net/appletalk/aarp.c 	if (dev_queue_xmit(skb))
skb               673 net/appletalk/aarp.c 	kfree_skb(skb);
skb               688 net/appletalk/aarp.c 	struct sk_buff *skb;
skb               700 net/appletalk/aarp.c 			while ((skb = skb_dequeue(&a->packet_queue)) != NULL) {
skb               703 net/appletalk/aarp.c 				ddp_dl->request(ddp_dl, skb, a->hwaddr);
skb               713 net/appletalk/aarp.c static int aarp_rcv(struct sk_buff *skb, struct net_device *dev,
skb               716 net/appletalk/aarp.c 	struct elapaarp *ea = aarp_hdr(skb);
skb               731 net/appletalk/aarp.c 	if (!skb_pull(skb, sizeof(*ea)))
skb               831 net/appletalk/aarp.c 					      skb->dev, &sa);
skb               866 net/appletalk/aarp.c 	kfree_skb(skb);
skb               925 net/appletalk/ddp.c static unsigned long atalk_sum_skb(const struct sk_buff *skb, int offset,
skb               928 net/appletalk/ddp.c 	int start = skb_headlen(skb);
skb               936 net/appletalk/ddp.c 		sum = atalk_sum_partial(skb->data + offset, copy, sum);
skb               944 net/appletalk/ddp.c 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb               946 net/appletalk/ddp.c 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
skb               967 net/appletalk/ddp.c 	skb_walk_frags(skb, frag_iter) {
skb               990 net/appletalk/ddp.c static __be16 atalk_checksum(const struct sk_buff *skb, int len)
skb               995 net/appletalk/ddp.c 	sum = atalk_sum_skb(skb, 4, len-4, 0);
skb              1280 net/appletalk/ddp.c static __inline__ int is_ip_over_ddp(struct sk_buff *skb)
skb              1282 net/appletalk/ddp.c 	return skb->data[12] == 22;
skb              1285 net/appletalk/ddp.c static int handle_ip_over_ddp(struct sk_buff *skb)
skb              1292 net/appletalk/ddp.c 		kfree_skb(skb);
skb              1296 net/appletalk/ddp.c 	skb->protocol = htons(ETH_P_IP);
skb              1297 net/appletalk/ddp.c 	skb_pull(skb, 13);
skb              1298 net/appletalk/ddp.c 	skb->dev   = dev;
skb              1299 net/appletalk/ddp.c 	skb_reset_transport_header(skb);
skb              1303 net/appletalk/ddp.c 	stats->rx_bytes += skb->len + 13;
skb              1304 net/appletalk/ddp.c 	return netif_rx(skb);  /* Send the SKB up to a higher place. */
skb              1308 net/appletalk/ddp.c #define is_ip_over_ddp(skb) 0
skb              1309 net/appletalk/ddp.c #define handle_ip_over_ddp(skb) 0
skb              1312 net/appletalk/ddp.c static int atalk_route_packet(struct sk_buff *skb, struct net_device *dev,
skb              1322 net/appletalk/ddp.c 	if (skb->pkt_type != PACKET_HOST || !ddp->deh_dnet) {
skb              1358 net/appletalk/ddp.c 	skb_trim(skb, min_t(unsigned int, origlen,
skb              1376 net/appletalk/ddp.c 	if (skb_headroom(skb) < 22) {
skb              1378 net/appletalk/ddp.c 		struct sk_buff *nskb = skb_realloc_headroom(skb, 32);
skb              1379 net/appletalk/ddp.c 		kfree_skb(skb);
skb              1380 net/appletalk/ddp.c 		skb = nskb;
skb              1382 net/appletalk/ddp.c 		skb = skb_unshare(skb, GFP_ATOMIC);
skb              1388 net/appletalk/ddp.c 	if (skb == NULL)
skb              1391 net/appletalk/ddp.c 	if (aarp_send_ddp(rt->dev, skb, &ta, NULL) == NET_XMIT_DROP)
skb              1395 net/appletalk/ddp.c 	kfree_skb(skb);
skb              1412 net/appletalk/ddp.c static int atalk_rcv(struct sk_buff *skb, struct net_device *dev,
skb              1426 net/appletalk/ddp.c 	if (!(skb = skb_share_check(skb, GFP_ATOMIC)))
skb              1430 net/appletalk/ddp.c 	if (!pskb_may_pull(skb, sizeof(*ddp)))
skb              1433 net/appletalk/ddp.c 	ddp = ddp_hdr(skb);
skb              1438 net/appletalk/ddp.c 	origlen = skb->len;
skb              1439 net/appletalk/ddp.c 	skb_trim(skb, min_t(unsigned int, skb->len, len_hops & 1023));
skb              1446 net/appletalk/ddp.c 	if (skb->len < sizeof(*ddp) || skb->len < (len_hops & 1023)) {
skb              1448 net/appletalk/ddp.c 			 "skb->len=%u)\n", len_hops & 1023, skb->len);
skb              1457 net/appletalk/ddp.c 	    atalk_checksum(skb, len_hops & 1023) != ddp->deh_sum)
skb              1471 net/appletalk/ddp.c 		return atalk_route_packet(skb, dev, ddp, len_hops, origlen);
skb              1475 net/appletalk/ddp.c 	if (is_ip_over_ddp(skb))
skb              1476 net/appletalk/ddp.c 		return handle_ip_over_ddp(skb);
skb              1490 net/appletalk/ddp.c 	if (sock_queue_rcv_skb(sock, skb) < 0)
skb              1496 net/appletalk/ddp.c 	kfree_skb(skb);
skb              1507 net/appletalk/ddp.c static int ltalk_rcv(struct sk_buff *skb, struct net_device *dev,
skb              1514 net/appletalk/ddp.c 	if (skb_mac_header(skb)[2] == 1) {
skb              1519 net/appletalk/ddp.c 		if (!ap || skb->len < sizeof(__be16) || skb->len > 1023)
skb              1523 net/appletalk/ddp.c 		if (!(skb = skb_share_check(skb, GFP_ATOMIC)))
skb              1530 net/appletalk/ddp.c 		ddp = skb_push(skb, sizeof(*ddp) - 4);
skb              1540 net/appletalk/ddp.c 		ddp->deh_dnode = skb_mac_header(skb)[0];     /* From physical header */
skb              1541 net/appletalk/ddp.c 		ddp->deh_snode = skb_mac_header(skb)[1];     /* From physical header */
skb              1550 net/appletalk/ddp.c 		ddp->deh_len_hops = htons(skb->len + (DDP_MAXHOPS << 10));
skb              1552 net/appletalk/ddp.c 	skb_reset_transport_header(skb);
skb              1554 net/appletalk/ddp.c 	return atalk_rcv(skb, dev, pt, orig_dev);
skb              1556 net/appletalk/ddp.c 	kfree_skb(skb);
skb              1568 net/appletalk/ddp.c 	struct sk_buff *skb;
skb              1637 net/appletalk/ddp.c 	skb = sock_alloc_send_skb(sk, size, (flags & MSG_DONTWAIT), &err);
skb              1639 net/appletalk/ddp.c 	if (!skb)
skb              1642 net/appletalk/ddp.c 	skb_reserve(skb, ddp_dl->header_length);
skb              1643 net/appletalk/ddp.c 	skb_reserve(skb, dev->hard_header_len);
skb              1644 net/appletalk/ddp.c 	skb->dev = dev;
skb              1648 net/appletalk/ddp.c 	ddp = skb_put(skb, sizeof(struct ddpehdr));
skb              1659 net/appletalk/ddp.c 	err = memcpy_from_msg(skb_put(skb, len), msg, len);
skb              1661 net/appletalk/ddp.c 		kfree_skb(skb);
skb              1669 net/appletalk/ddp.c 		ddp->deh_sum = atalk_checksum(skb, len + sizeof(*ddp));
skb              1677 net/appletalk/ddp.c 		struct sk_buff *skb2 = skb_copy(skb, GFP_KERNEL);
skb              1692 net/appletalk/ddp.c 		skb_orphan(skb);
skb              1701 net/appletalk/ddp.c 				kfree_skb(skb);
skb              1706 net/appletalk/ddp.c 			skb->dev = dev;
skb              1708 net/appletalk/ddp.c 		ddp_dl->request(ddp_dl, skb, dev->dev_addr);
skb              1719 net/appletalk/ddp.c 		aarp_send_ddp(dev, skb, &usat->sat_addr, NULL);
skb              1736 net/appletalk/ddp.c 	struct sk_buff *skb;
skb              1738 net/appletalk/ddp.c 	skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
skb              1742 net/appletalk/ddp.c 	if (!skb)
skb              1746 net/appletalk/ddp.c 	ddp = ddp_hdr(skb);
skb              1758 net/appletalk/ddp.c 	err = skb_copy_datagram_msg(skb, offset, msg, copied);
skb              1769 net/appletalk/ddp.c 	skb_free_datagram(sk, skb);	/* Free the datagram. */
skb              1801 net/appletalk/ddp.c 		struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
skb              1804 net/appletalk/ddp.c 		if (skb)
skb              1805 net/appletalk/ddp.c 			amount = skb->len - sizeof(struct ddpehdr);
skb                34 net/atm/atm_misc.c 		struct sk_buff *skb = alloc_skb(pdu_size, gfp_flags);
skb                36 net/atm/atm_misc.c 		if (skb) {
skb                37 net/atm/atm_misc.c 			atomic_add(skb->truesize-guess,
skb                39 net/atm/atm_misc.c 			return skb;
skb                33 net/atm/br2684.c static void skb_debug(const struct sk_buff *skb)
skb                38 net/atm/br2684.c 		       16, 1, skb->data, min(NUM2PRINT, skb->len), true);
skb                70 net/atm/br2684.c 	void (*old_push)(struct atm_vcc *vcc, struct sk_buff *skb);
skb                71 net/atm/br2684.c 	void (*old_pop)(struct atm_vcc *vcc, struct sk_buff *skb);
skb               185 net/atm/br2684.c static void br2684_pop(struct atm_vcc *vcc, struct sk_buff *skb)
skb               190 net/atm/br2684.c 	brvcc->old_pop(vcc, skb);
skb               202 net/atm/br2684.c static int br2684_xmit_vcc(struct sk_buff *skb, struct net_device *dev,
skb               212 net/atm/br2684.c 	if (skb_headroom(skb) < minheadroom) {
skb               213 net/atm/br2684.c 		struct sk_buff *skb2 = skb_realloc_headroom(skb, minheadroom);
skb               215 net/atm/br2684.c 		dev_kfree_skb(skb);
skb               220 net/atm/br2684.c 		skb = skb2;
skb               225 net/atm/br2684.c 			skb_push(skb, sizeof(llc_oui_pid_pad));
skb               226 net/atm/br2684.c 			skb_copy_to_linear_data(skb, llc_oui_pid_pad,
skb               229 net/atm/br2684.c 			unsigned short prot = ntohs(skb->protocol);
skb               231 net/atm/br2684.c 			skb_push(skb, sizeof(llc_oui_ipv4));
skb               234 net/atm/br2684.c 				skb_copy_to_linear_data(skb, llc_oui_ipv4,
skb               238 net/atm/br2684.c 				skb_copy_to_linear_data(skb, llc_oui_ipv6,
skb               242 net/atm/br2684.c 				dev_kfree_skb(skb);
skb               248 net/atm/br2684.c 			skb_push(skb, 2);
skb               249 net/atm/br2684.c 			memset(skb->data, 0, 2);
skb               252 net/atm/br2684.c 	skb_debug(skb);
skb               254 net/atm/br2684.c 	ATM_SKB(skb)->vcc = atmvcc = brvcc->atmvcc;
skb               255 net/atm/br2684.c 	pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n", skb, atmvcc, atmvcc->dev);
skb               256 net/atm/br2684.c 	atm_account_tx(atmvcc, skb);
skb               258 net/atm/br2684.c 	dev->stats.tx_bytes += skb->len;
skb               271 net/atm/br2684.c 	return !atmvcc->send(atmvcc, skb);
skb               285 net/atm/br2684.c static inline struct br2684_vcc *pick_outgoing_vcc(const struct sk_buff *skb,
skb               291 net/atm/br2684.c static netdev_tx_t br2684_start_xmit(struct sk_buff *skb,
skb               299 net/atm/br2684.c 	pr_debug("skb_dst(skb)=%p\n", skb_dst(skb));
skb               301 net/atm/br2684.c 	brvcc = pick_outgoing_vcc(skb, brdev);
skb               307 net/atm/br2684.c 		dev_kfree_skb(skb);
skb               318 net/atm/br2684.c 		dev_kfree_skb(skb);
skb               328 net/atm/br2684.c 	if (!br2684_xmit_vcc(skb, dev, brvcc)) {
skb               391 net/atm/br2684.c packet_fails_filter(__be16 type, struct br2684_vcc *brvcc, struct sk_buff *skb)
skb               396 net/atm/br2684.c 	    (((struct iphdr *)(skb->data))->daddr & brvcc->filter.
skb               423 net/atm/br2684.c static void br2684_push(struct atm_vcc *atmvcc, struct sk_buff *skb)
skb               431 net/atm/br2684.c 	if (unlikely(skb == NULL)) {
skb               444 net/atm/br2684.c 	skb_debug(skb);
skb               445 net/atm/br2684.c 	atm_return(atmvcc, skb->truesize);
skb               449 net/atm/br2684.c 		if (skb->len > 7 && skb->data[7] == 0x01)
skb               450 net/atm/br2684.c 			__skb_trim(skb, skb->len - 4);
skb               453 net/atm/br2684.c 		if ((skb->len >= (sizeof(llc_oui_ipv4))) &&
skb               454 net/atm/br2684.c 		    (memcmp(skb->data, llc_oui_ipv4,
skb               456 net/atm/br2684.c 			if (memcmp(skb->data + 6, ethertype_ipv6,
skb               458 net/atm/br2684.c 				skb->protocol = htons(ETH_P_IPV6);
skb               459 net/atm/br2684.c 			else if (memcmp(skb->data + 6, ethertype_ipv4,
skb               461 net/atm/br2684.c 				skb->protocol = htons(ETH_P_IP);
skb               464 net/atm/br2684.c 			skb_pull(skb, sizeof(llc_oui_ipv4));
skb               465 net/atm/br2684.c 			skb_reset_network_header(skb);
skb               466 net/atm/br2684.c 			skb->pkt_type = PACKET_HOST;
skb               472 net/atm/br2684.c 		} else if ((skb->len >= sizeof(llc_oui_pid_pad)) &&
skb               473 net/atm/br2684.c 			   (memcmp(skb->data, llc_oui_pid_pad, 7) == 0)) {
skb               474 net/atm/br2684.c 			skb_pull(skb, sizeof(llc_oui_pid_pad));
skb               475 net/atm/br2684.c 			skb->protocol = eth_type_trans(skb, net_dev);
skb               483 net/atm/br2684.c 			skb_reset_network_header(skb);
skb               484 net/atm/br2684.c 			iph = ip_hdr(skb);
skb               486 net/atm/br2684.c 				skb->protocol = htons(ETH_P_IP);
skb               488 net/atm/br2684.c 				skb->protocol = htons(ETH_P_IPV6);
skb               491 net/atm/br2684.c 			skb->pkt_type = PACKET_HOST;
skb               494 net/atm/br2684.c 			if (memcmp(skb->data, pad, BR2684_PAD_LEN) != 0)
skb               496 net/atm/br2684.c 			skb_pull(skb, BR2684_PAD_LEN);
skb               497 net/atm/br2684.c 			skb->protocol = eth_type_trans(skb, net_dev);
skb               502 net/atm/br2684.c 	if (unlikely(packet_fails_filter(skb->protocol, brvcc, skb)))
skb               505 net/atm/br2684.c 	skb->dev = net_dev;
skb               506 net/atm/br2684.c 	ATM_SKB(skb)->vcc = atmvcc;	/* needed ? */
skb               507 net/atm/br2684.c 	pr_debug("received packet's protocol: %x\n", ntohs(skb->protocol));
skb               508 net/atm/br2684.c 	skb_debug(skb);
skb               513 net/atm/br2684.c 	net_dev->stats.rx_bytes += skb->len;
skb               514 net/atm/br2684.c 	memset(ATM_SKB(skb), 0, sizeof(struct atm_skb_data));
skb               515 net/atm/br2684.c 	netif_rx(skb);
skb               524 net/atm/br2684.c 	dev_kfree_skb(skb);
skb                56 net/atm/clip.c 	struct sk_buff *skb;
skb                61 net/atm/clip.c 	skb = alloc_skb(sizeof(struct atmarp_ctrl), GFP_ATOMIC);
skb                62 net/atm/clip.c 	if (!skb)
skb                64 net/atm/clip.c 	ctrl = skb_put(skb, sizeof(struct atmarp_ctrl));
skb                68 net/atm/clip.c 	atm_force_charge(atmarpd, skb->truesize);
skb                71 net/atm/clip.c 	skb_queue_tail(&sk->sk_receive_queue, skb);
skb               142 net/atm/clip.c 		struct sk_buff *skb;
skb               147 net/atm/clip.c 		while ((skb = skb_dequeue(&n->arp_queue)) != NULL)
skb               148 net/atm/clip.c 			dev_kfree_skb(skb);
skb               165 net/atm/clip.c static int clip_arp_rcv(struct sk_buff *skb)
skb               170 net/atm/clip.c 	vcc = ATM_SKB(skb)->vcc;
skb               171 net/atm/clip.c 	if (!vcc || !atm_charge(vcc, skb->truesize)) {
skb               172 net/atm/clip.c 		dev_kfree_skb_any(skb);
skb               177 net/atm/clip.c 	CLIP_VCC(vcc)->old_push(vcc, skb);
skb               190 net/atm/clip.c static void clip_push(struct atm_vcc *vcc, struct sk_buff *skb)
skb               197 net/atm/clip.c 		atm_return(vcc, skb->truesize);
skb               198 net/atm/clip.c 		kfree_skb(skb);
skb               202 net/atm/clip.c 	if (!skb) {
skb               210 net/atm/clip.c 	atm_return(vcc, skb->truesize);
skb               211 net/atm/clip.c 	skb->dev = clip_vcc->entry ? clip_vcc->entry->neigh->dev : clip_devs;
skb               213 net/atm/clip.c 	if (!skb->dev) {
skb               214 net/atm/clip.c 		dev_kfree_skb_any(skb);
skb               217 net/atm/clip.c 	ATM_SKB(skb)->vcc = vcc;
skb               218 net/atm/clip.c 	skb_reset_mac_header(skb);
skb               220 net/atm/clip.c 	    skb->len < RFC1483LLC_LEN ||
skb               221 net/atm/clip.c 	    memcmp(skb->data, llc_oui, sizeof(llc_oui)))
skb               222 net/atm/clip.c 		skb->protocol = htons(ETH_P_IP);
skb               224 net/atm/clip.c 		skb->protocol = ((__be16 *)skb->data)[3];
skb               225 net/atm/clip.c 		skb_pull(skb, RFC1483LLC_LEN);
skb               226 net/atm/clip.c 		if (skb->protocol == htons(ETH_P_ARP)) {
skb               227 net/atm/clip.c 			skb->dev->stats.rx_packets++;
skb               228 net/atm/clip.c 			skb->dev->stats.rx_bytes += skb->len;
skb               229 net/atm/clip.c 			clip_arp_rcv(skb);
skb               234 net/atm/clip.c 	skb->dev->stats.rx_packets++;
skb               235 net/atm/clip.c 	skb->dev->stats.rx_bytes += skb->len;
skb               236 net/atm/clip.c 	memset(ATM_SKB(skb), 0, sizeof(struct atm_skb_data));
skb               237 net/atm/clip.c 	netif_rx(skb);
skb               245 net/atm/clip.c static void clip_pop(struct atm_vcc *vcc, struct sk_buff *skb)
skb               248 net/atm/clip.c 	struct net_device *dev = skb->dev;
skb               253 net/atm/clip.c 	clip_vcc->old_pop(vcc, skb);
skb               266 net/atm/clip.c static void clip_neigh_solicit(struct neighbour *neigh, struct sk_buff *skb)
skb               270 net/atm/clip.c 	pr_debug("(neigh %p, skb %p)\n", neigh, skb);
skb               274 net/atm/clip.c static void clip_neigh_error(struct neighbour *neigh, struct sk_buff *skb)
skb               277 net/atm/clip.c 	icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
skb               279 net/atm/clip.c 	kfree_skb(skb);
skb               328 net/atm/clip.c static netdev_tx_t clip_start_xmit(struct sk_buff *skb,
skb               332 net/atm/clip.c 	struct dst_entry *dst = skb_dst(skb);
skb               341 net/atm/clip.c 	pr_debug("(skb %p)\n", skb);
skb               344 net/atm/clip.c 		dev_kfree_skb(skb);
skb               352 net/atm/clip.c 		daddr = &ip_hdr(skb)->daddr;
skb               356 net/atm/clip.c 		dev_kfree_skb(skb);
skb               368 net/atm/clip.c 			skb_queue_tail(&entry->neigh->arp_queue, skb);
skb               370 net/atm/clip.c 			dev_kfree_skb(skb);
skb               376 net/atm/clip.c 	ATM_SKB(skb)->vcc = vcc = entry->vccs->vcc;
skb               381 net/atm/clip.c 		here = skb_push(skb, RFC1483LLC_LEN);
skb               383 net/atm/clip.c 		((__be16 *) here)[3] = skb->protocol;
skb               385 net/atm/clip.c 	atm_account_tx(vcc, skb);
skb               387 net/atm/clip.c 	pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n", skb, vcc, vcc->dev);
skb               394 net/atm/clip.c 	dev->stats.tx_bytes += skb->len;
skb               395 net/atm/clip.c 	vcc->send(vcc, skb);
skb               176 net/atm/common.c 	struct sk_buff *skb;
skb               187 net/atm/common.c 		while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
skb               188 net/atm/common.c 			atm_return(vcc, skb->truesize);
skb               189 net/atm/common.c 			kfree_skb(skb);
skb               228 net/atm/common.c 	struct sk_buff *skb, *tmp;
skb               238 net/atm/common.c 	skb_queue_walk_safe(&queue, skb, tmp) {
skb               239 net/atm/common.c 		__skb_unlink(skb, &queue);
skb               240 net/atm/common.c 		vcc->push(vcc, skb);
skb               527 net/atm/common.c 	struct sk_buff *skb;
skb               543 net/atm/common.c 	skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &error);
skb               544 net/atm/common.c 	if (!skb)
skb               547 net/atm/common.c 	copied = skb->len;
skb               553 net/atm/common.c 	error = skb_copy_datagram_msg(skb, 0, msg, copied);
skb               556 net/atm/common.c 	sock_recv_ts_and_drops(msg, sk, skb);
skb               560 net/atm/common.c 			 skb->truesize);
skb               561 net/atm/common.c 		atm_return(vcc, skb->truesize);
skb               564 net/atm/common.c 	skb_free_datagram(sk, skb);
skb               573 net/atm/common.c 	struct sk_buff *skb;
skb               628 net/atm/common.c 	skb = alloc_skb(eff, GFP_KERNEL);
skb               629 net/atm/common.c 	if (!skb) {
skb               633 net/atm/common.c 	pr_debug("%d += %d\n", sk_wmem_alloc_get(sk), skb->truesize);
skb               634 net/atm/common.c 	atm_account_tx(vcc, skb);
skb               636 net/atm/common.c 	skb->dev = NULL; /* for paths shared with net_device interfaces */
skb               637 net/atm/common.c 	if (!copy_from_iter_full(skb_put(skb, size), size, &m->msg_iter)) {
skb               638 net/atm/common.c 		kfree_skb(skb);
skb               643 net/atm/common.c 		memset(skb->data + size, 0, eff-size);
skb               644 net/atm/common.c 	error = vcc->dev->ops->send(vcc, skb);
skb                73 net/atm/ioctl.c 		struct sk_buff *skb;
skb                79 net/atm/ioctl.c 		skb = skb_peek(&sk->sk_receive_queue);
skb                80 net/atm/ioctl.c 		error = put_user(skb ? skb->len : 0,
skb                64 net/atm/lec.c  static netdev_tx_t lec_start_xmit(struct sk_buff *skb,
skb                82 net/atm/lec.c  				  struct atm_vcc *vcc, struct sk_buff *skb);
skb               102 net/atm/lec.c  					   struct sk_buff *skb));
skb               129 net/atm/lec.c  static void lec_handle_bridge(struct sk_buff *skb, struct net_device *dev)
skb               139 net/atm/lec.c  	buff = skb->data + skb->dev->hard_header_len;
skb               181 net/atm/lec.c  lec_send(struct atm_vcc *vcc, struct sk_buff *skb)
skb               183 net/atm/lec.c  	struct net_device *dev = skb->dev;
skb               185 net/atm/lec.c  	ATM_SKB(skb)->vcc = vcc;
skb               186 net/atm/lec.c  	atm_account_tx(vcc, skb);
skb               188 net/atm/lec.c  	if (vcc->send(vcc, skb) < 0) {
skb               194 net/atm/lec.c  	dev->stats.tx_bytes += skb->len;
skb               204 net/atm/lec.c  static netdev_tx_t lec_start_xmit(struct sk_buff *skb,
skb               221 net/atm/lec.c  		kfree_skb(skb);
skb               226 net/atm/lec.c  		 (long)skb->head, (long)skb->data, (long)skb_tail_pointer(skb),
skb               227 net/atm/lec.c  		 (long)skb_end_pointer(skb));
skb               229 net/atm/lec.c  	if (memcmp(skb->data, bridge_ula_lec, sizeof(bridge_ula_lec)) == 0)
skb               230 net/atm/lec.c  		lec_handle_bridge(skb, dev);
skb               234 net/atm/lec.c  	if (skb_headroom(skb) < 2) {
skb               236 net/atm/lec.c  		skb2 = skb_realloc_headroom(skb, LEC_HEADER_LEN);
skb               238 net/atm/lec.c  			kfree_skb(skb);
skb               241 net/atm/lec.c  		consume_skb(skb);
skb               242 net/atm/lec.c  		skb = skb2;
skb               244 net/atm/lec.c  	skb_push(skb, 2);
skb               247 net/atm/lec.c  	lec_h = (struct lecdatahdr_8023 *)skb->data;
skb               257 net/atm/lec.c  	       dev->name, skb->len, priv->lecid);
skb               259 net/atm/lec.c  		       skb->data, min(skb->len, MAX_DUMP_SKB), true);
skb               264 net/atm/lec.c  	if (skb->len < min_frame_size) {
skb               265 net/atm/lec.c  		if ((skb->len + skb_tailroom(skb)) < min_frame_size) {
skb               266 net/atm/lec.c  			skb2 = skb_copy_expand(skb, 0,
skb               267 net/atm/lec.c  					       min_frame_size - skb->truesize,
skb               269 net/atm/lec.c  			dev_kfree_skb(skb);
skb               274 net/atm/lec.c  			skb = skb2;
skb               276 net/atm/lec.c  		skb_put(skb, min_frame_size - skb->len);
skb               290 net/atm/lec.c  			skb_queue_tail(&entry->tx_wait, skb);
skb               295 net/atm/lec.c  			dev_kfree_skb(skb);
skb               309 net/atm/lec.c  	lec_send(vcc, skb);
skb               341 net/atm/lec.c  static int lec_atm_send(struct atm_vcc *vcc, struct sk_buff *skb)
skb               351 net/atm/lec.c  	WARN_ON(refcount_sub_and_test(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc));
skb               352 net/atm/lec.c  	mesg = (struct atmlec_msg *)skb->data;
skb               353 net/atm/lec.c  	tmp = skb->data;
skb               462 net/atm/lec.c  		dev_kfree_skb(skb);
skb               465 net/atm/lec.c  	dev_kfree_skb(skb);
skb               471 net/atm/lec.c  	struct sk_buff *skb;
skb               483 net/atm/lec.c  	while ((skb = skb_dequeue(&sk_atm(vcc)->sk_receive_queue))) {
skb               484 net/atm/lec.c  		atm_return(vcc, skb->truesize);
skb               485 net/atm/lec.c  		dev_kfree_skb(skb);
skb               514 net/atm/lec.c  	struct sk_buff *skb;
skb               519 net/atm/lec.c  	skb = alloc_skb(sizeof(struct atmlec_msg), GFP_ATOMIC);
skb               520 net/atm/lec.c  	if (!skb)
skb               522 net/atm/lec.c  	skb->len = sizeof(struct atmlec_msg);
skb               523 net/atm/lec.c  	mesg = (struct atmlec_msg *)skb->data;
skb               535 net/atm/lec.c  	atm_force_charge(priv->lecd, skb->truesize);
skb               537 net/atm/lec.c  	skb_queue_tail(&sk->sk_receive_queue, skb);
skb               582 net/atm/lec.c  static void lec_push(struct atm_vcc *vcc, struct sk_buff *skb)
skb               592 net/atm/lec.c  	if (!skb) {
skb               604 net/atm/lec.c  	       dev->name, skb->len, priv->lecid);
skb               606 net/atm/lec.c  		       skb->data, min(MAX_SKB_DUMP, skb->len), true);
skb               608 net/atm/lec.c  	if (memcmp(skb->data, lec_ctrl_magic, 4) == 0) {
skb               613 net/atm/lec.c  		skb_queue_tail(&sk->sk_receive_queue, skb);
skb               619 net/atm/lec.c  		atm_return(vcc, skb->truesize);
skb               620 net/atm/lec.c  		if (*(__be16 *) skb->data == htons(priv->lecid) ||
skb               627 net/atm/lec.c  			dev_kfree_skb(skb);
skb               630 net/atm/lec.c  		dst = ((struct lecdatahdr_8023 *)skb->data)->h_dest;
skb               638 net/atm/lec.c  			src = ((struct lecdatahdr_8023 *)skb->data)->h_source;
skb               650 net/atm/lec.c  			dev_kfree_skb(skb);
skb               654 net/atm/lec.c  			lec_arp_check_empties(priv, vcc, skb);
skb               655 net/atm/lec.c  		skb_pull(skb, 2);	/* skip lec_id */
skb               656 net/atm/lec.c  		skb->protocol = eth_type_trans(skb, dev);
skb               658 net/atm/lec.c  		dev->stats.rx_bytes += skb->len;
skb               659 net/atm/lec.c  		memset(ATM_SKB(skb), 0, sizeof(struct atm_skb_data));
skb               660 net/atm/lec.c  		netif_rx(skb);
skb               664 net/atm/lec.c  static void lec_pop(struct atm_vcc *vcc, struct sk_buff *skb)
skb               667 net/atm/lec.c  	struct net_device *dev = skb->dev;
skb               674 net/atm/lec.c  	vpriv->old_pop(vcc, skb);
skb              1088 net/atm/lec.c  	struct sk_buff *skb;
skb              1111 net/atm/lec.c  		skb = alloc_skb(*sizeoftlvs, GFP_ATOMIC);
skb              1112 net/atm/lec.c  		if (skb == NULL)
skb              1114 net/atm/lec.c  		skb->len = *sizeoftlvs;
skb              1115 net/atm/lec.c  		skb_copy_to_linear_data(skb, *tlvs, *sizeoftlvs);
skb              1116 net/atm/lec.c  		retval = send_to_lecd(priv, l_arp_xmt, dst_mac, NULL, skb);
skb              1132 net/atm/lec.c  	struct sk_buff *skb;
skb              1145 net/atm/lec.c  	skb = alloc_skb(sizeoftlvs, GFP_ATOMIC);
skb              1146 net/atm/lec.c  	if (skb == NULL)
skb              1148 net/atm/lec.c  	skb->len = sizeoftlvs;
skb              1149 net/atm/lec.c  	skb_copy_to_linear_data(skb, tlvs, sizeoftlvs);
skb              1150 net/atm/lec.c  	retval = send_to_lecd(priv, l_associate_req, NULL, NULL, skb);
skb              1687 net/atm/lec.c  				struct sk_buff *skb;
skb              1692 net/atm/lec.c  				while ((skb = skb_dequeue(&entry->tx_wait)))
skb              1693 net/atm/lec.c  					lec_send(vcc, skb);
skb              1942 net/atm/lec.c  	      void (*old_push) (struct atm_vcc *vcc, struct sk_buff *skb))
skb              2102 net/atm/lec.c  				struct sk_buff *skb;
skb              2108 net/atm/lec.c  				while ((skb = skb_dequeue(&entry->tx_wait)))
skb              2109 net/atm/lec.c  					lec_send(vcc, skb);
skb              2240 net/atm/lec.c  		      struct atm_vcc *vcc, struct sk_buff *skb)
skb              2245 net/atm/lec.c  	struct lecdatahdr_8023 *hdr = (struct lecdatahdr_8023 *)skb->data;
skb               149 net/atm/lec.h  	void (*old_pop) (struct atm_vcc *vcc, struct sk_buff *skb);
skb                22 net/atm/lec_arpc.h 	void (*old_push) (struct atm_vcc *vcc, struct sk_buff *skb);
skb                25 net/atm/lec_arpc.h 	void (*old_recv_push) (struct atm_vcc *vcc, struct sk_buff *skb);
skb                91 net/atm/mpc.c  static int msg_from_mpoad(struct atm_vcc *vcc, struct sk_buff *skb);
skb                93 net/atm/mpc.c  static void mpc_push(struct atm_vcc *vcc, struct sk_buff *skb);
skb                94 net/atm/mpc.c  static netdev_tx_t mpc_send_packet(struct sk_buff *skb,
skb               492 net/atm/mpc.c  static int send_via_shortcut(struct sk_buff *skb, struct mpoa_client *mpc)
skb               507 net/atm/mpc.c  	buff = skb->data + mpc->dev->hard_header_len;
skb               546 net/atm/mpc.c  		skb_pull(skb, ETH_HLEN);	/* get rid of Eth header */
skb               547 net/atm/mpc.c  		skb_push(skb, sizeof(tagged_llc_snap_hdr));
skb               549 net/atm/mpc.c  		skb_copy_to_linear_data(skb, &tagged_llc_snap_hdr,
skb               552 net/atm/mpc.c  		skb_pull(skb, ETH_HLEN);	/* get rid of Eth header */
skb               553 net/atm/mpc.c  		skb_push(skb, sizeof(struct llc_snap_hdr));
skb               555 net/atm/mpc.c  		skb_copy_to_linear_data(skb, &llc_snap_mpoa_data,
skb               559 net/atm/mpc.c  	atm_account_tx(entry->shortcut, skb);
skb               560 net/atm/mpc.c  	entry->shortcut->send(entry->shortcut, skb);
skb               570 net/atm/mpc.c  static netdev_tx_t mpc_send_packet(struct sk_buff *skb,
skb               583 net/atm/mpc.c  	eth = (struct ethhdr *)skb->data;
skb               588 net/atm/mpc.c  	if (skb->len < ETH_HLEN + sizeof(struct iphdr))
skb               590 net/atm/mpc.c  	skb_set_network_header(skb, ETH_HLEN);
skb               591 net/atm/mpc.c  	if (skb->len < ETH_HLEN + ip_hdr(skb)->ihl * 4 || ip_hdr(skb)->ihl < 5)
skb               596 net/atm/mpc.c  			if (send_via_shortcut(skb, mpc) == 0) /* try shortcut */
skb               602 net/atm/mpc.c  	return __netdev_start_xmit(mpc->old_ops, skb, dev, false);
skb               685 net/atm/mpc.c  static void mpc_push(struct atm_vcc *vcc, struct sk_buff *skb)
skb               695 net/atm/mpc.c  	if (skb == NULL) {
skb               701 net/atm/mpc.c  	skb->dev = dev;
skb               702 net/atm/mpc.c  	if (memcmp(skb->data, &llc_snap_mpoa_ctrl,
skb               708 net/atm/mpc.c  		skb_queue_tail(&sk->sk_receive_queue, skb);
skb               714 net/atm/mpc.c  	atm_return(vcc, skb->truesize);
skb               722 net/atm/mpc.c  	if (memcmp(skb->data, &llc_snap_mpoa_data_tagged,
skb               726 net/atm/mpc.c  	} else if (memcmp(skb->data, &llc_snap_mpoa_data,
skb               730 net/atm/mpc.c  		dev_kfree_skb_any(skb);
skb               734 net/atm/mpc.c  		dev_kfree_skb_any(skb);
skb               738 net/atm/mpc.c  	tmp = skb->data + sizeof(struct llc_snap_hdr);
skb               746 net/atm/mpc.c  		dev_kfree_skb_any(skb);
skb               759 net/atm/mpc.c  	skb_pull(skb, sizeof(struct llc_snap_hdr) + sizeof(tag));
skb               761 net/atm/mpc.c  	new_skb = skb_realloc_headroom(skb, eg->ctrl_info.DH_length);
skb               763 net/atm/mpc.c  	dev_kfree_skb_any(skb);
skb               872 net/atm/mpc.c  	struct sk_buff *skb;
skb               895 net/atm/mpc.c  	while ((skb = skb_dequeue(&sk_atm(vcc)->sk_receive_queue))) {
skb               896 net/atm/mpc.c  		atm_return(vcc, skb->truesize);
skb               897 net/atm/mpc.c  		kfree_skb(skb);
skb               908 net/atm/mpc.c  static int msg_from_mpoad(struct atm_vcc *vcc, struct sk_buff *skb)
skb               912 net/atm/mpc.c  	struct k_message *mesg = (struct k_message *)skb->data;
skb               913 net/atm/mpc.c  	WARN_ON(refcount_sub_and_test(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc));
skb               969 net/atm/mpc.c  	kfree_skb(skb);
skb               977 net/atm/mpc.c  	struct sk_buff *skb;
skb               985 net/atm/mpc.c  	skb = alloc_skb(sizeof(struct k_message), GFP_ATOMIC);
skb               986 net/atm/mpc.c  	if (skb == NULL)
skb               988 net/atm/mpc.c  	skb_put(skb, sizeof(struct k_message));
skb               989 net/atm/mpc.c  	skb_copy_to_linear_data(skb, mesg, sizeof(*mesg));
skb               990 net/atm/mpc.c  	atm_force_charge(mpc->mpoad_vcc, skb->truesize);
skb               993 net/atm/mpc.c  	skb_queue_tail(&sk->sk_receive_queue, skb);
skb              1251 net/atm/mpc.c  	struct sk_buff *skb;
skb              1259 net/atm/mpc.c  	skb = alloc_skb(sizeof(struct k_message), GFP_ATOMIC);
skb              1260 net/atm/mpc.c  	if (skb == NULL) {
skb              1265 net/atm/mpc.c  	skb_put(skb, sizeof(struct k_message));
skb              1266 net/atm/mpc.c  	memset(skb->data, 0, sizeof(struct k_message));
skb              1267 net/atm/mpc.c  	purge_msg = (struct k_message *)skb->data;
skb              1272 net/atm/mpc.c  	atm_force_charge(vcc, skb->truesize);
skb              1275 net/atm/mpc.c  	skb_queue_tail(&sk->sk_receive_queue, skb);
skb               132 net/atm/pppoatm.c static void pppoatm_pop(struct atm_vcc *atmvcc, struct sk_buff *skb)
skb               136 net/atm/pppoatm.c 	pvcc->old_pop(atmvcc, skb);
skb               179 net/atm/pppoatm.c static void pppoatm_push(struct atm_vcc *atmvcc, struct sk_buff *skb)
skb               183 net/atm/pppoatm.c 	if (skb == NULL) {			/* VCC was closed */
skb               193 net/atm/pppoatm.c 	atm_return(atmvcc, skb->truesize);
skb               196 net/atm/pppoatm.c 		if (skb->len < LLC_LEN ||
skb               197 net/atm/pppoatm.c 		    memcmp(skb->data, pppllc, LLC_LEN))
skb               199 net/atm/pppoatm.c 		skb_pull(skb, LLC_LEN);
skb               203 net/atm/pppoatm.c 			kfree_skb(skb);
skb               206 net/atm/pppoatm.c 		if (skb->len >= sizeof(pppllc) &&
skb               207 net/atm/pppoatm.c 		    !memcmp(skb->data, pppllc, sizeof(pppllc))) {
skb               209 net/atm/pppoatm.c 			skb_pull(skb, LLC_LEN);
skb               212 net/atm/pppoatm.c 		if (skb->len >= (sizeof(pppllc) - LLC_LEN) &&
skb               213 net/atm/pppoatm.c 		    !memcmp(skb->data, &pppllc[LLC_LEN],
skb               219 net/atm/pppoatm.c 		pr_debug("Couldn't autodetect yet (skb: %6ph)\n", skb->data);
skb               224 net/atm/pppoatm.c 	ppp_input(&pvcc->chan, skb);
skb               228 net/atm/pppoatm.c 	kfree_skb(skb);
skb               287 net/atm/pppoatm.c static int pppoatm_send(struct ppp_channel *chan, struct sk_buff *skb)
skb               293 net/atm/pppoatm.c 	ATM_SKB(skb)->vcc = pvcc->atmvcc;
skb               294 net/atm/pppoatm.c 	pr_debug("(skb=0x%p, vcc=0x%p)\n", skb, pvcc->atmvcc);
skb               295 net/atm/pppoatm.c 	if (skb->data[0] == '\0' && (pvcc->flags & SC_COMP_PROT))
skb               296 net/atm/pppoatm.c 		(void) skb_pull(skb, 1);
skb               298 net/atm/pppoatm.c 	vcc = ATM_SKB(skb)->vcc;
skb               313 net/atm/pppoatm.c 		kfree_skb(skb);
skb               319 net/atm/pppoatm.c 		if (skb_headroom(skb) < LLC_LEN) {
skb               321 net/atm/pppoatm.c 			n = skb_realloc_headroom(skb, LLC_LEN);
skb               327 net/atm/pppoatm.c 			consume_skb(skb);
skb               328 net/atm/pppoatm.c 			skb = n;
skb               329 net/atm/pppoatm.c 			if (skb == NULL) {
skb               333 net/atm/pppoatm.c 		} else if (!pppoatm_may_send(pvcc, skb->truesize))
skb               335 net/atm/pppoatm.c 		memcpy(skb_push(skb, LLC_LEN), pppllc, LLC_LEN);
skb               338 net/atm/pppoatm.c 		if (!pppoatm_may_send(pvcc, skb->truesize))
skb               344 net/atm/pppoatm.c 		kfree_skb(skb);
skb               348 net/atm/pppoatm.c 	atm_account_tx(vcc, skb);
skb               350 net/atm/pppoatm.c 		 skb, ATM_SKB(skb)->vcc, ATM_SKB(skb)->vcc->dev);
skb               351 net/atm/pppoatm.c 	ret = ATM_SKB(skb)->vcc->send(ATM_SKB(skb)->vcc, skb)
skb               361 net/atm/pppoatm.c 	if ((pvcc->flags & SC_COMP_PROT) && skb_headroom(skb) > 0 &&
skb               362 net/atm/pppoatm.c 	    skb->data[-1] == '\0')
skb               363 net/atm/pppoatm.c 		(void) skb_push(skb, 1);
skb                23 net/atm/raw.c  static void atm_push_raw(struct atm_vcc *vcc, struct sk_buff *skb)
skb                25 net/atm/raw.c  	if (skb) {
skb                28 net/atm/raw.c  		skb_queue_tail(&sk->sk_receive_queue, skb);
skb                33 net/atm/raw.c  static void atm_pop_raw(struct atm_vcc *vcc, struct sk_buff *skb)
skb                38 net/atm/raw.c  		 vcc->vci, sk_wmem_alloc_get(sk), ATM_SKB(skb)->acct_truesize);
skb                39 net/atm/raw.c  	WARN_ON(refcount_sub_and_test(ATM_SKB(skb)->acct_truesize, &sk->sk_wmem_alloc));
skb                40 net/atm/raw.c  	dev_kfree_skb_any(skb);
skb                44 net/atm/raw.c  static int atm_send_aal0(struct atm_vcc *vcc, struct sk_buff *skb)
skb                51 net/atm/raw.c  	    (((u32 *)skb->data)[0] & (ATM_HDR_VPI_MASK | ATM_HDR_VCI_MASK)) !=
skb                54 net/atm/raw.c  		kfree_skb(skb);
skb                57 net/atm/raw.c  	return vcc->dev->ops->send(vcc, skb);
skb                25 net/atm/signaling.c static void sigd_put_skb(struct sk_buff *skb)
skb                29 net/atm/signaling.c 		kfree_skb(skb);
skb                32 net/atm/signaling.c 	atm_force_charge(sigd, skb->truesize);
skb                33 net/atm/signaling.c 	skb_queue_tail(&sk_atm(sigd)->sk_receive_queue, skb);
skb                39 net/atm/signaling.c 	struct sk_buff *skb;
skb                58 net/atm/signaling.c 	while (!(skb = alloc_skb(sizeof(struct atmsvc_msg), GFP_KERNEL)))
skb                60 net/atm/signaling.c 	*(struct atmsvc_msg *)skb_put(skb, sizeof(struct atmsvc_msg)) = *msg;
skb                61 net/atm/signaling.c 	sigd_put_skb(skb);
skb                64 net/atm/signaling.c static int sigd_send(struct atm_vcc *vcc, struct sk_buff *skb)
skb                70 net/atm/signaling.c 	msg = (struct atmsvc_msg *) skb->data;
skb                71 net/atm/signaling.c 	WARN_ON(refcount_sub_and_test(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc));
skb               109 net/atm/signaling.c 			dev_kfree_skb(skb);
skb               113 net/atm/signaling.c 		skb_queue_tail(&sk->sk_receive_queue, skb);
skb               138 net/atm/signaling.c 	dev_kfree_skb(skb);
skb               147 net/atm/signaling.c 	struct sk_buff *skb;
skb               152 net/atm/signaling.c 	while (!(skb = alloc_skb(sizeof(struct atmsvc_msg), GFP_KERNEL)))
skb               154 net/atm/signaling.c 	msg = skb_put_zero(skb, sizeof(struct atmsvc_msg));
skb               174 net/atm/signaling.c 	sigd_put_skb(skb);
skb                49 net/atm/svc.c  	struct sk_buff *skb;
skb                65 net/atm/svc.c  	while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
skb                66 net/atm/svc.c  		atm_return(vcc, skb->truesize);
skb                69 net/atm/svc.c  		dev_kfree_skb(skb);
skb               326 net/atm/svc.c  	struct sk_buff *skb;
skb               345 net/atm/svc.c  		while (!(skb = skb_dequeue(&sk->sk_receive_queue)) &&
skb               370 net/atm/svc.c  		if (!skb) {
skb               374 net/atm/svc.c  		msg = (struct atmsvc_msg *)skb->data;
skb               383 net/atm/svc.c  		dev_kfree_skb(skb);
skb               239 net/ax25/af_ax25.c void ax25_send_to_raw(ax25_address *addr, struct sk_buff *skb, int proto)
skb               249 net/ax25/af_ax25.c 		    s->ax25_dev->dev == skb->dev &&
skb               251 net/ax25/af_ax25.c 			if ((copy = skb_clone(skb, GFP_ATOMIC)) == NULL)
skb               290 net/ax25/af_ax25.c 	struct sk_buff *skb;
skb               303 net/ax25/af_ax25.c 		while ((skb = skb_dequeue(&ax25->sk->sk_receive_queue)) != NULL) {
skb               304 net/ax25/af_ax25.c 			if (skb->sk != ax25->sk) {
skb               306 net/ax25/af_ax25.c 				ax25_cb *sax25 = sk_to_ax25(skb->sk);
skb               309 net/ax25/af_ax25.c 				sock_orphan(skb->sk);
skb               312 net/ax25/af_ax25.c 				skb->sk->sk_state = TCP_LISTEN;
skb               318 net/ax25/af_ax25.c 			kfree_skb(skb);
skb              1333 net/ax25/af_ax25.c 	struct sk_buff *skb;
skb              1362 net/ax25/af_ax25.c 		skb = skb_dequeue(&sk->sk_receive_queue);
skb              1363 net/ax25/af_ax25.c 		if (skb)
skb              1384 net/ax25/af_ax25.c 	newsk		 = skb->sk;
skb              1388 net/ax25/af_ax25.c 	kfree_skb(skb);
skb              1451 net/ax25/af_ax25.c 	struct sk_buff *skb;
skb              1555 net/ax25/af_ax25.c 	skb = sock_alloc_send_skb(sk, size, msg->msg_flags&MSG_DONTWAIT, &err);
skb              1556 net/ax25/af_ax25.c 	if (skb == NULL)
skb              1559 net/ax25/af_ax25.c 	skb_reserve(skb, size - len);
skb              1562 net/ax25/af_ax25.c 	if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
skb              1564 net/ax25/af_ax25.c 		kfree_skb(skb);
skb              1568 net/ax25/af_ax25.c 	skb_reset_network_header(skb);
skb              1572 net/ax25/af_ax25.c 		*(u8 *)skb_push(skb, 1) = sk->sk_protocol;
skb              1577 net/ax25/af_ax25.c 			kfree_skb(skb);
skb              1583 net/ax25/af_ax25.c 		ax25_output(ax25, ax25->paclen, skb);
skb              1589 net/ax25/af_ax25.c 	skb_push(skb, 1 + ax25_addr_size(dp));
skb              1594 net/ax25/af_ax25.c 	lv = ax25_addr_build(skb->data, &ax25->source_addr, &sax.sax25_call,
skb              1597 net/ax25/af_ax25.c 	skb_set_transport_header(skb, lv);
skb              1599 net/ax25/af_ax25.c 	*skb_transport_header(skb) = AX25_UI;
skb              1602 net/ax25/af_ax25.c 	ax25_queue_xmit(skb, ax25->ax25_dev->dev);
skb              1616 net/ax25/af_ax25.c 	struct sk_buff *skb;
skb              1631 net/ax25/af_ax25.c 	skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
skb              1633 net/ax25/af_ax25.c 	if (skb == NULL)
skb              1637 net/ax25/af_ax25.c 		skb_pull(skb, 1);		/* Remove PID */
skb              1639 net/ax25/af_ax25.c 	skb_reset_transport_header(skb);
skb              1640 net/ax25/af_ax25.c 	copied = skb->len;
skb              1647 net/ax25/af_ax25.c 	skb_copy_datagram_msg(skb, 0, msg, copied);
skb              1652 net/ax25/af_ax25.c 		const unsigned char *mac = skb_mac_header(skb);
skb              1656 net/ax25/af_ax25.c 		ax25_addr_parse(mac + 1, skb->data - mac - 1, &src, NULL,
skb              1675 net/ax25/af_ax25.c 	skb_free_datagram(sk, skb);
skb              1709 net/ax25/af_ax25.c 		struct sk_buff *skb;
skb              1712 net/ax25/af_ax25.c 		if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL)
skb              1713 net/ax25/af_ax25.c 			amount = skb->len;
skb                32 net/ax25/ax25_ds_in.c static int ax25_ds_state1_machine(ax25_cb *ax25, struct sk_buff *skb, int frametype, int pf, int type)
skb               100 net/ax25/ax25_ds_in.c static int ax25_ds_state2_machine(ax25_cb *ax25, struct sk_buff *skb, int frametype, int pf, int type)
skb               145 net/ax25/ax25_ds_in.c static int ax25_ds_state3_machine(ax25_cb *ax25, struct sk_buff *skb, int frametype, int ns, int nr, int pf, int type)
skb               240 net/ax25/ax25_ds_in.c 			queued = ax25_rx_iframe(ax25, skb);
skb               279 net/ax25/ax25_ds_in.c int ax25_ds_frame_in(ax25_cb *ax25, struct sk_buff *skb, int type)
skb               283 net/ax25/ax25_ds_in.c 	frametype = ax25_decode(ax25, skb, &ns, &nr, &pf);
skb               287 net/ax25/ax25_ds_in.c 		queued = ax25_ds_state1_machine(ax25, skb, frametype, pf, type);
skb               290 net/ax25/ax25_ds_in.c 		queued = ax25_ds_state2_machine(ax25, skb, frametype, pf, type);
skb               293 net/ax25/ax25_ds_in.c 		queued = ax25_ds_state3_machine(ax25, skb, frametype, ns, nr, pf, type);
skb               126 net/ax25/ax25_ds_subr.c 	struct sk_buff *skb;
skb               132 net/ax25/ax25_ds_subr.c 	if ((skb = alloc_skb(2, GFP_ATOMIC)) == NULL)
skb               135 net/ax25/ax25_ds_subr.c 	skb_reset_network_header(skb);
skb               136 net/ax25/ax25_ds_subr.c 	p = skb_put(skb, 2);
skb               141 net/ax25/ax25_ds_subr.c 	skb->protocol = ax25_type_trans(skb, ax25_dev->dev);
skb               143 net/ax25/ax25_ds_subr.c 	dev_queue_xmit(skb);
skb                34 net/ax25/ax25_in.c static int ax25_rx_fragment(ax25_cb *ax25, struct sk_buff *skb)
skb                39 net/ax25/ax25_in.c 		if (!(*skb->data & AX25_SEG_FIRST)) {
skb                40 net/ax25/ax25_in.c 			if ((ax25->fragno - 1) == (*skb->data & AX25_SEG_REM)) {
skb                42 net/ax25/ax25_in.c 				ax25->fragno = *skb->data & AX25_SEG_REM;
skb                43 net/ax25/ax25_in.c 				skb_pull(skb, 1);	/* skip fragno */
skb                44 net/ax25/ax25_in.c 				ax25->fraglen += skb->len;
skb                45 net/ax25/ax25_in.c 				skb_queue_tail(&ax25->frag_queue, skb);
skb                82 net/ax25/ax25_in.c 		if (*skb->data & AX25_SEG_FIRST) {
skb                84 net/ax25/ax25_in.c 			ax25->fragno = *skb->data & AX25_SEG_REM;
skb                85 net/ax25/ax25_in.c 			skb_pull(skb, 1);		/* skip fragno */
skb                86 net/ax25/ax25_in.c 			ax25->fraglen = skb->len;
skb                87 net/ax25/ax25_in.c 			skb_queue_tail(&ax25->frag_queue, skb);
skb                99 net/ax25/ax25_in.c int ax25_rx_iframe(ax25_cb *ax25, struct sk_buff *skb)
skb               105 net/ax25/ax25_in.c 	if (skb == NULL) return 0;
skb               109 net/ax25/ax25_in.c 	pid = *skb->data;
skb               116 net/ax25/ax25_in.c 		struct sk_buff *skbn = skb_copy(skb, GFP_ATOMIC);
skb               118 net/ax25/ax25_in.c 			kfree_skb(skb);
skb               119 net/ax25/ax25_in.c 			skb = skbn;
skb               122 net/ax25/ax25_in.c 		skb_pull(skb, 1);	/* Remove PID */
skb               123 net/ax25/ax25_in.c 		skb->mac_header = skb->network_header;
skb               124 net/ax25/ax25_in.c 		skb_reset_network_header(skb);
skb               125 net/ax25/ax25_in.c 		skb->dev      = ax25->ax25_dev->dev;
skb               126 net/ax25/ax25_in.c 		skb->pkt_type = PACKET_HOST;
skb               127 net/ax25/ax25_in.c 		skb->protocol = htons(ETH_P_IP);
skb               128 net/ax25/ax25_in.c 		netif_rx(skb);
skb               132 net/ax25/ax25_in.c 		skb_pull(skb, 1);	/* Remove PID */
skb               133 net/ax25/ax25_in.c 		return ax25_rx_fragment(ax25, skb);
skb               137 net/ax25/ax25_in.c 		skb_pull(skb, 1);	/* Remove PID */
skb               138 net/ax25/ax25_in.c 		return (*func)(skb, ax25);
skb               144 net/ax25/ax25_in.c 			if (sock_queue_rcv_skb(ax25->sk, skb) == 0)
skb               157 net/ax25/ax25_in.c static int ax25_process_rx_frame(ax25_cb *ax25, struct sk_buff *skb, int type, int dama)
skb               167 net/ax25/ax25_in.c 		queued = ax25_std_frame_in(ax25, skb, type);
skb               173 net/ax25/ax25_in.c 			queued = ax25_ds_frame_in(ax25, skb, type);
skb               175 net/ax25/ax25_in.c 			queued = ax25_std_frame_in(ax25, skb, type);
skb               183 net/ax25/ax25_in.c static int ax25_rcv(struct sk_buff *skb, struct net_device *dev,
skb               197 net/ax25/ax25_in.c 	skb_reset_transport_header(skb);
skb               206 net/ax25/ax25_in.c 	if (ax25_addr_parse(skb->data, skb->len, &src, &dest, &dp, &type, &dama) == NULL)
skb               218 net/ax25/ax25_in.c 	skb_pull(skb, ax25_addr_size(&dp));
skb               229 net/ax25/ax25_in.c 	if ((*skb->data & ~0x10) == AX25_UI && dp.lastrepeat + 1 == dp.ndigi) {
skb               230 net/ax25/ax25_in.c 		skb_set_transport_header(skb, 2); /* skip control and pid */
skb               232 net/ax25/ax25_in.c 		ax25_send_to_raw(&dest, skb, skb->data[1]);
skb               238 net/ax25/ax25_in.c 		switch (skb->data[1]) {
skb               240 net/ax25/ax25_in.c 			skb_pull(skb,2);		/* drop PID/CTRL */
skb               241 net/ax25/ax25_in.c 			skb_reset_transport_header(skb);
skb               242 net/ax25/ax25_in.c 			skb_reset_network_header(skb);
skb               243 net/ax25/ax25_in.c 			skb->dev      = dev;
skb               244 net/ax25/ax25_in.c 			skb->pkt_type = PACKET_HOST;
skb               245 net/ax25/ax25_in.c 			skb->protocol = htons(ETH_P_IP);
skb               246 net/ax25/ax25_in.c 			netif_rx(skb);
skb               250 net/ax25/ax25_in.c 			skb_pull(skb,2);
skb               251 net/ax25/ax25_in.c 			skb_reset_transport_header(skb);
skb               252 net/ax25/ax25_in.c 			skb_reset_network_header(skb);
skb               253 net/ax25/ax25_in.c 			skb->dev      = dev;
skb               254 net/ax25/ax25_in.c 			skb->pkt_type = PACKET_HOST;
skb               255 net/ax25/ax25_in.c 			skb->protocol = htons(ETH_P_ARP);
skb               256 net/ax25/ax25_in.c 			netif_rx(skb);
skb               265 net/ax25/ax25_in.c 					kfree_skb(skb);
skb               270 net/ax25/ax25_in.c 					skb_pull(skb, 2);
skb               271 net/ax25/ax25_in.c 					if (sock_queue_rcv_skb(sk, skb) != 0)
skb               272 net/ax25/ax25_in.c 						kfree_skb(skb);
skb               277 net/ax25/ax25_in.c 				kfree_skb(skb);
skb               282 net/ax25/ax25_in.c 			kfree_skb(skb);	/* Will scan SOCK_AX25 RAW sockets */
skb               310 net/ax25/ax25_in.c 		if (ax25_process_rx_frame(ax25, skb, type, dama) == 0)
skb               311 net/ax25/ax25_in.c 			kfree_skb(skb);
skb               321 net/ax25/ax25_in.c 	if ((*skb->data & ~AX25_PF) != AX25_SABM &&
skb               322 net/ax25/ax25_in.c 	    (*skb->data & ~AX25_PF) != AX25_SABME) {
skb               327 net/ax25/ax25_in.c 		if ((*skb->data & ~AX25_PF) != AX25_DM && mine)
skb               346 net/ax25/ax25_in.c 			kfree_skb(skb);
skb               354 net/ax25/ax25_in.c 		skb_set_owner_r(skb, make);
skb               355 net/ax25/ax25_in.c 		skb_queue_head(&sk->sk_receive_queue, skb);
skb               381 net/ax25/ax25_in.c 		kfree_skb(skb);
skb               396 net/ax25/ax25_in.c 	if ((*skb->data & ~AX25_PF) == AX25_SABME) {
skb               425 net/ax25/ax25_in.c 		kfree_skb(skb);
skb               433 net/ax25/ax25_in.c int ax25_kiss_rcv(struct sk_buff *skb, struct net_device *dev,
skb               436 net/ax25/ax25_in.c 	skb_orphan(skb);
skb               439 net/ax25/ax25_in.c 		kfree_skb(skb);
skb               443 net/ax25/ax25_in.c 	if ((*skb->data & 0x0F) != 0) {
skb               444 net/ax25/ax25_in.c 		kfree_skb(skb);	/* Not a KISS data frame */
skb               448 net/ax25/ax25_in.c 	skb_pull(skb, AX25_KISS_HEADER_LEN);	/* Remove the KISS byte */
skb               450 net/ax25/ax25_in.c 	return ax25_rcv(skb, dev, (ax25_address *)dev->dev_addr, ptype);
skb                45 net/ax25/ax25_ip.c static int ax25_hard_header(struct sk_buff *skb, struct net_device *dev,
skb                56 net/ax25/ax25_ip.c 	buff = skb_push(skb, AX25_HEADER_LEN);
skb                99 net/ax25/ax25_ip.c netdev_tx_t ax25_ip_xmit(struct sk_buff *skb)
skb               102 net/ax25/ax25_ip.c 	unsigned char *bp  = skb->data;
skb               123 net/ax25/ax25_ip.c 		dev = skb->dev;
skb               126 net/ax25/ax25_ip.c 		kfree_skb(skb);
skb               151 net/ax25/ax25_ip.c 			if ((ourskb = skb_copy(skb, GFP_ATOMIC)) == NULL) {
skb               152 net/ax25/ax25_ip.c 				kfree_skb(skb);
skb               156 net/ax25/ax25_ip.c 			if (skb->sk != NULL)
skb               157 net/ax25/ax25_ip.c 				skb_set_owner_w(ourskb, skb->sk);
skb               159 net/ax25/ax25_ip.c 			kfree_skb(skb);
skb               193 net/ax25/ax25_ip.c 	skb_pull(skb, AX25_KISS_HEADER_LEN);
skb               196 net/ax25/ax25_ip.c 		if ((ourskb = ax25_rt_build_path(skb, src, dst, route->digipeat)) == NULL) {
skb               197 net/ax25/ax25_ip.c 			kfree_skb(skb);
skb               201 net/ax25/ax25_ip.c 		skb = ourskb;
skb               204 net/ax25/ax25_ip.c 	ax25_queue_xmit(skb, dev);
skb               214 net/ax25/ax25_ip.c static int ax25_hard_header(struct sk_buff *skb, struct net_device *dev,
skb               221 net/ax25/ax25_ip.c netdev_tx_t ax25_ip_xmit(struct sk_buff *skb)
skb               223 net/ax25/ax25_ip.c 	kfree_skb(skb);
skb                32 net/ax25/ax25_out.c ax25_cb *ax25_send_frame(struct sk_buff *skb, int paclen, ax25_address *src, ax25_address *dest, ax25_digi *digi, struct net_device *dev)
skb                52 net/ax25/ax25_out.c 		ax25_output(ax25, paclen, skb);
skb               103 net/ax25/ax25_out.c 	ax25_output(ax25, paclen, skb);
skb               116 net/ax25/ax25_out.c void ax25_output(ax25_cb *ax25, int paclen, struct sk_buff *skb)
skb               124 net/ax25/ax25_out.c 		kfree_skb(skb);
skb               128 net/ax25/ax25_out.c 	if ((skb->len - 1) > paclen) {
skb               129 net/ax25/ax25_out.c 		if (*skb->data == AX25_P_TEXT) {
skb               130 net/ax25/ax25_out.c 			skb_pull(skb, 1); /* skip PID */
skb               137 net/ax25/ax25_out.c 		fragno = skb->len / paclen;
skb               138 net/ax25/ax25_out.c 		if (skb->len % paclen == 0) fragno--;
skb               140 net/ax25/ax25_out.c 		frontlen = skb_headroom(skb);	/* Address space + CTRL */
skb               142 net/ax25/ax25_out.c 		while (skb->len > 0) {
skb               150 net/ax25/ax25_out.c 			if (skb->sk != NULL)
skb               151 net/ax25/ax25_out.c 				skb_set_owner_w(skbn, skb->sk);
skb               155 net/ax25/ax25_out.c 			len = (paclen > skb->len) ? skb->len : paclen;
skb               160 net/ax25/ax25_out.c 						      skb_network_offset(skb));
skb               161 net/ax25/ax25_out.c 				skb_copy_from_linear_data(skb, skb_put(skbn, len), len);
skb               174 net/ax25/ax25_out.c 						      skb_network_offset(skb));
skb               175 net/ax25/ax25_out.c 				skb_copy_from_linear_data(skb, skb_put(skbn, len), len);
skb               180 net/ax25/ax25_out.c 			skb_pull(skb, len);
skb               184 net/ax25/ax25_out.c 		kfree_skb(skb);
skb               186 net/ax25/ax25_out.c 		skb_queue_tail(&ax25->write_queue, skb);	  /* Throw it on the queue */
skb               211 net/ax25/ax25_out.c static void ax25_send_iframe(ax25_cb *ax25, struct sk_buff *skb, int poll_bit)
skb               215 net/ax25/ax25_out.c 	if (skb == NULL)
skb               218 net/ax25/ax25_out.c 	skb_reset_network_header(skb);
skb               221 net/ax25/ax25_out.c 		frame = skb_push(skb, 1);
skb               228 net/ax25/ax25_out.c 		frame = skb_push(skb, 2);
skb               238 net/ax25/ax25_out.c 	ax25_transmit_buffer(ax25, skb, AX25_COMMAND);
skb               243 net/ax25/ax25_out.c 	struct sk_buff *skb, *skbn;
skb               272 net/ax25/ax25_out.c 	skb  = skb_dequeue(&ax25->write_queue);
skb               273 net/ax25/ax25_out.c 	if (!skb)
skb               279 net/ax25/ax25_out.c 		if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) {
skb               280 net/ax25/ax25_out.c 			skb_queue_head(&ax25->write_queue, skb);
skb               284 net/ax25/ax25_out.c 		if (skb->sk != NULL)
skb               285 net/ax25/ax25_out.c 			skb_set_owner_w(skbn, skb->sk);
skb               313 net/ax25/ax25_out.c 		skb_queue_tail(&ax25->ack_queue, skb);
skb               315 net/ax25/ax25_out.c 	} while (!last && (skb = skb_dequeue(&ax25->write_queue)) != NULL);
skb               326 net/ax25/ax25_out.c void ax25_transmit_buffer(ax25_cb *ax25, struct sk_buff *skb, int type)
skb               339 net/ax25/ax25_out.c 	if (skb_headroom(skb) < headroom) {
skb               340 net/ax25/ax25_out.c 		if ((skbn = skb_realloc_headroom(skb, headroom)) == NULL) {
skb               342 net/ax25/ax25_out.c 			kfree_skb(skb);
skb               346 net/ax25/ax25_out.c 		if (skb->sk != NULL)
skb               347 net/ax25/ax25_out.c 			skb_set_owner_w(skbn, skb->sk);
skb               349 net/ax25/ax25_out.c 		consume_skb(skb);
skb               350 net/ax25/ax25_out.c 		skb = skbn;
skb               353 net/ax25/ax25_out.c 	ptr = skb_push(skb, headroom);
skb               357 net/ax25/ax25_out.c 	ax25_queue_xmit(skb, ax25->ax25_dev->dev);
skb               364 net/ax25/ax25_out.c void ax25_queue_xmit(struct sk_buff *skb, struct net_device *dev)
skb               368 net/ax25/ax25_out.c 	skb->protocol = ax25_type_trans(skb, ax25_fwd_dev(dev));
skb               370 net/ax25/ax25_out.c 	ptr  = skb_push(skb, 1);
skb               373 net/ax25/ax25_out.c 	dev_queue_xmit(skb);
skb               441 net/ax25/ax25_route.c struct sk_buff *ax25_rt_build_path(struct sk_buff *skb, ax25_address *src,
skb               450 net/ax25/ax25_route.c 	if (skb_headroom(skb) < len) {
skb               451 net/ax25/ax25_route.c 		if ((skbn = skb_realloc_headroom(skb, len)) == NULL) {
skb               456 net/ax25/ax25_route.c 		if (skb->sk != NULL)
skb               457 net/ax25/ax25_route.c 			skb_set_owner_w(skbn, skb->sk);
skb               459 net/ax25/ax25_route.c 		consume_skb(skb);
skb               461 net/ax25/ax25_route.c 		skb = skbn;
skb               464 net/ax25/ax25_route.c 	bp = skb_push(skb, len);
skb               468 net/ax25/ax25_route.c 	return skb;
skb                39 net/ax25/ax25_std_in.c static int ax25_std_state1_machine(ax25_cb *ax25, struct sk_buff *skb, int frametype, int pf, int type)
skb               103 net/ax25/ax25_std_in.c static int ax25_std_state2_machine(ax25_cb *ax25, struct sk_buff *skb, int frametype, int pf, int type)
skb               141 net/ax25/ax25_std_in.c static int ax25_std_state3_machine(ax25_cb *ax25, struct sk_buff *skb, int frametype, int ns, int nr, int pf, int type)
skb               225 net/ax25/ax25_std_in.c 			queued = ax25_rx_iframe(ax25, skb);
skb               266 net/ax25/ax25_std_in.c static int ax25_std_state4_machine(ax25_cb *ax25, struct sk_buff *skb, int frametype, int ns, int nr, int pf, int type)
skb               380 net/ax25/ax25_std_in.c 			queued = ax25_rx_iframe(ax25, skb);
skb               419 net/ax25/ax25_std_in.c int ax25_std_frame_in(ax25_cb *ax25, struct sk_buff *skb, int type)
skb               423 net/ax25/ax25_std_in.c 	frametype = ax25_decode(ax25, skb, &ns, &nr, &pf);
skb               427 net/ax25/ax25_std_in.c 		queued = ax25_std_state1_machine(ax25, skb, frametype, pf, type);
skb               430 net/ax25/ax25_std_in.c 		queued = ax25_std_state2_machine(ax25, skb, frametype, pf, type);
skb               433 net/ax25/ax25_std_in.c 		queued = ax25_std_state3_machine(ax25, skb, frametype, ns, nr, pf, type);
skb               436 net/ax25/ax25_std_in.c 		queued = ax25_std_state4_machine(ax25, skb, frametype, ns, nr, pf, type);
skb                48 net/ax25/ax25_subr.c 	struct sk_buff *skb;
skb                55 net/ax25/ax25_subr.c 			skb = skb_dequeue(&ax25->ack_queue);
skb                56 net/ax25/ax25_subr.c 			kfree_skb(skb);
skb                64 net/ax25/ax25_subr.c 	struct sk_buff *skb;
skb                71 net/ax25/ax25_subr.c 	while ((skb = skb_dequeue_tail(&ax25->ack_queue)) != NULL)
skb                72 net/ax25/ax25_subr.c 		skb_queue_head(&ax25->write_queue, skb);
skb                97 net/ax25/ax25_subr.c int ax25_decode(ax25_cb *ax25, struct sk_buff *skb, int *ns, int *nr, int *pf)
skb               102 net/ax25/ax25_subr.c 	frame = skb->data;
skb               119 net/ax25/ax25_subr.c 		skb_pull(skb, 1);
skb               126 net/ax25/ax25_subr.c 			skb_pull(skb, 2);
skb               131 net/ax25/ax25_subr.c 			skb_pull(skb, 2);
skb               135 net/ax25/ax25_subr.c 			skb_pull(skb, 1);
skb               149 net/ax25/ax25_subr.c 	struct sk_buff *skb;
skb               152 net/ax25/ax25_subr.c 	if ((skb = alloc_skb(ax25->ax25_dev->dev->hard_header_len + 2, GFP_ATOMIC)) == NULL)
skb               155 net/ax25/ax25_subr.c 	skb_reserve(skb, ax25->ax25_dev->dev->hard_header_len);
skb               157 net/ax25/ax25_subr.c 	skb_reset_network_header(skb);
skb               161 net/ax25/ax25_subr.c 		dptr = skb_put(skb, 1);
skb               168 net/ax25/ax25_subr.c 			dptr = skb_put(skb, 1);
skb               172 net/ax25/ax25_subr.c 			dptr = skb_put(skb, 2);
skb               179 net/ax25/ax25_subr.c 	ax25_transmit_buffer(ax25, skb, type);
skb               189 net/ax25/ax25_subr.c 	struct sk_buff *skb;
skb               196 net/ax25/ax25_subr.c 	if ((skb = alloc_skb(dev->hard_header_len + 1, GFP_ATOMIC)) == NULL)
skb               199 net/ax25/ax25_subr.c 	skb_reserve(skb, dev->hard_header_len);
skb               200 net/ax25/ax25_subr.c 	skb_reset_network_header(skb);
skb               204 net/ax25/ax25_subr.c 	dptr = skb_put(skb, 1);
skb               211 net/ax25/ax25_subr.c 	dptr  = skb_push(skb, ax25_addr_size(digi));
skb               214 net/ax25/ax25_subr.c 	ax25_queue_xmit(skb, dev);
skb               208 net/batman-adv/bat_algo.c 	int portid = NETLINK_CB(cb->skb).portid;
skb               341 net/batman-adv/bat_iv_ogm.c 	struct sk_buff *skb;
skb               349 net/batman-adv/bat_iv_ogm.c 	packet_pos = forw_packet->skb->data;
skb               383 net/batman-adv/bat_iv_ogm.c 		packet_pos = forw_packet->skb->data + buff_pos;
skb               388 net/batman-adv/bat_iv_ogm.c 	skb = skb_clone(forw_packet->skb, GFP_ATOMIC);
skb               389 net/batman-adv/bat_iv_ogm.c 	if (skb) {
skb               392 net/batman-adv/bat_iv_ogm.c 				   skb->len + ETH_HLEN);
skb               393 net/batman-adv/bat_iv_ogm.c 		batadv_send_broadcast_skb(skb, hard_iface);
skb               451 net/batman-adv/bat_iv_ogm.c 	batadv_ogm_packet = (struct batadv_ogm_packet *)forw_packet->skb->data;
skb               545 net/batman-adv/bat_iv_ogm.c 	struct sk_buff *skb;
skb               558 net/batman-adv/bat_iv_ogm.c 	skb = netdev_alloc_skb_ip_align(NULL, skb_size);
skb               559 net/batman-adv/bat_iv_ogm.c 	if (!skb)
skb               563 net/batman-adv/bat_iv_ogm.c 						    queue_left, bat_priv, skb);
skb               565 net/batman-adv/bat_iv_ogm.c 		kfree_skb(skb);
skb               569 net/batman-adv/bat_iv_ogm.c 	forw_packet_aggr->skb->priority = TC_PRIO_CONTROL;
skb               570 net/batman-adv/bat_iv_ogm.c 	skb_reserve(forw_packet_aggr->skb, ETH_HLEN);
skb               572 net/batman-adv/bat_iv_ogm.c 	skb_buff = skb_put(forw_packet_aggr->skb, packet_len);
skb               597 net/batman-adv/bat_iv_ogm.c 	skb_put_data(forw_packet_aggr->skb, packet_buff, packet_len);
skb              1312 net/batman-adv/bat_iv_ogm.c batadv_iv_ogm_process_per_outif(const struct sk_buff *skb, int ogm_offset,
skb              1338 net/batman-adv/bat_iv_ogm.c 	skb_priv = skb_copy(skb, GFP_ATOMIC);
skb              1559 net/batman-adv/bat_iv_ogm.c static void batadv_iv_ogm_process(const struct sk_buff *skb, int ogm_offset,
skb              1573 net/batman-adv/bat_iv_ogm.c 	ogm_packet = (struct batadv_ogm_packet *)(skb->data + ogm_offset);
skb              1574 net/batman-adv/bat_iv_ogm.c 	ethhdr = eth_hdr(skb);
skb              1669 net/batman-adv/bat_iv_ogm.c 	batadv_iv_ogm_process_per_outif(skb, ogm_offset, orig_node,
skb              1683 net/batman-adv/bat_iv_ogm.c 		batadv_iv_ogm_process_per_outif(skb, ogm_offset, orig_node,
skb              1731 net/batman-adv/bat_iv_ogm.c static int batadv_iv_ogm_receive(struct sk_buff *skb,
skb              1741 net/batman-adv/bat_iv_ogm.c 	res = batadv_check_management_packet(skb, if_incoming, BATADV_OGM_HLEN);
skb              1753 net/batman-adv/bat_iv_ogm.c 			   skb->len + ETH_HLEN);
skb              1756 net/batman-adv/bat_iv_ogm.c 	ogm_packet = (struct batadv_ogm_packet *)skb->data;
skb              1759 net/batman-adv/bat_iv_ogm.c 	while (batadv_iv_ogm_aggr_packet(ogm_offset, skb_headlen(skb),
skb              1761 net/batman-adv/bat_iv_ogm.c 		batadv_iv_ogm_process(skb, ogm_offset, if_incoming);
skb              1766 net/batman-adv/bat_iv_ogm.c 		packet_pos = skb->data + ogm_offset;
skb              1774 net/batman-adv/bat_iv_ogm.c 		consume_skb(skb);
skb              1776 net/batman-adv/bat_iv_ogm.c 		kfree_skb(skb);
skb              2091 net/batman-adv/bat_iv_ogm.c 	int portid = NETLINK_CB(cb->skb).portid;
skb              2302 net/batman-adv/bat_iv_ogm.c 	int portid = NETLINK_CB(cb->skb).portid;
skb              2710 net/batman-adv/bat_iv_ogm.c 	int portid = NETLINK_CB(cb->skb).portid;
skb               306 net/batman-adv/bat_v.c 	int portid = NETLINK_CB(cb->skb).portid;
skb               586 net/batman-adv/bat_v.c 	int portid = NETLINK_CB(cb->skb).portid;
skb              1010 net/batman-adv/bat_v.c 	int portid = NETLINK_CB(cb->skb).portid;
skb               203 net/batman-adv/bat_v_elp.c 	struct sk_buff *skb;
skb               227 net/batman-adv/bat_v_elp.c 		skb = skb_copy_expand(hard_iface->bat_v.elp_skb, 0,
skb               230 net/batman-adv/bat_v_elp.c 		if (!skb)
skb               237 net/batman-adv/bat_v_elp.c 		skb_put_zero(skb, probe_len - hard_iface->bat_v.elp_skb->len);
skb               243 net/batman-adv/bat_v_elp.c 		batadv_send_skb_packet(skb, hard_iface, neigh->addr);
skb               262 net/batman-adv/bat_v_elp.c 	struct sk_buff *skb;
skb               282 net/batman-adv/bat_v_elp.c 	skb = skb_copy(hard_iface->bat_v.elp_skb, GFP_ATOMIC);
skb               283 net/batman-adv/bat_v_elp.c 	if (!skb)
skb               286 net/batman-adv/bat_v_elp.c 	elp_packet = (struct batadv_elp_packet *)skb->data;
skb               296 net/batman-adv/bat_v_elp.c 	batadv_send_broadcast_skb(skb, hard_iface);
skb               412 net/batman-adv/bat_v_elp.c 	struct sk_buff *skb;
skb               417 net/batman-adv/bat_v_elp.c 	skb = hard_iface->bat_v.elp_skb;
skb               418 net/batman-adv/bat_v_elp.c 	elp_packet = (struct batadv_elp_packet *)skb->data;
skb               511 net/batman-adv/bat_v_elp.c int batadv_v_elp_packet_recv(struct sk_buff *skb,
skb               517 net/batman-adv/bat_v_elp.c 	struct ethhdr *ethhdr = (struct ethhdr *)skb_mac_header(skb);
skb               521 net/batman-adv/bat_v_elp.c 	res = batadv_check_management_packet(skb, if_incoming, BATADV_ELP_HLEN);
skb               534 net/batman-adv/bat_v_elp.c 	elp_packet = (struct batadv_elp_packet *)skb->data;
skb               553 net/batman-adv/bat_v_elp.c 		consume_skb(skb);
skb               555 net/batman-adv/bat_v_elp.c 		kfree_skb(skb);
skb                20 net/batman-adv/bat_v_elp.h int batadv_v_elp_packet_recv(struct sk_buff *skb,
skb               120 net/batman-adv/bat_v_ogm.c static void batadv_v_ogm_send_to_if(struct sk_buff *skb,
skb               130 net/batman-adv/bat_v_ogm.c 			   skb->len + ETH_HLEN);
skb               132 net/batman-adv/bat_v_ogm.c 	batadv_send_broadcast_skb(skb, hard_iface);
skb               142 net/batman-adv/bat_v_ogm.c static unsigned int batadv_v_ogm_len(struct sk_buff *skb)
skb               146 net/batman-adv/bat_v_ogm.c 	ogm_packet = (struct batadv_ogm2_packet *)skb->data;
skb               159 net/batman-adv/bat_v_ogm.c static bool batadv_v_ogm_queue_left(struct sk_buff *skb,
skb               164 net/batman-adv/bat_v_ogm.c 	unsigned int ogm_len = batadv_v_ogm_len(skb);
skb               181 net/batman-adv/bat_v_ogm.c 	struct sk_buff *skb;
skb               185 net/batman-adv/bat_v_ogm.c 	while ((skb = skb_dequeue(&hard_iface->bat_v.aggr_list)))
skb               186 net/batman-adv/bat_v_ogm.c 		kfree_skb(skb);
skb               207 net/batman-adv/bat_v_ogm.c 	struct sk_buff *skb;
skb               223 net/batman-adv/bat_v_ogm.c 	while ((skb = skb_dequeue(&hard_iface->bat_v.aggr_list))) {
skb               224 net/batman-adv/bat_v_ogm.c 		hard_iface->bat_v.aggr_len -= batadv_v_ogm_len(skb);
skb               226 net/batman-adv/bat_v_ogm.c 		ogm_len = batadv_v_ogm_len(skb);
skb               227 net/batman-adv/bat_v_ogm.c 		skb_put_data(skb_aggr, skb->data, ogm_len);
skb               229 net/batman-adv/bat_v_ogm.c 		consume_skb(skb);
skb               240 net/batman-adv/bat_v_ogm.c static void batadv_v_ogm_queue_on_if(struct sk_buff *skb,
skb               246 net/batman-adv/bat_v_ogm.c 		batadv_v_ogm_send_to_if(skb, hard_iface);
skb               251 net/batman-adv/bat_v_ogm.c 	if (!batadv_v_ogm_queue_left(skb, hard_iface))
skb               254 net/batman-adv/bat_v_ogm.c 	hard_iface->bat_v.aggr_len += batadv_v_ogm_len(skb);
skb               255 net/batman-adv/bat_v_ogm.c 	skb_queue_tail(&hard_iface->bat_v.aggr_list, skb);
skb               267 net/batman-adv/bat_v_ogm.c 	struct sk_buff *skb, *skb_tmp;
skb               291 net/batman-adv/bat_v_ogm.c 	skb = netdev_alloc_skb_ip_align(NULL, ETH_HLEN + ogm_buff_len);
skb               292 net/batman-adv/bat_v_ogm.c 	if (!skb)
skb               295 net/batman-adv/bat_v_ogm.c 	skb_reserve(skb, ETH_HLEN);
skb               296 net/batman-adv/bat_v_ogm.c 	skb_put_data(skb, ogm_buff, ogm_buff_len);
skb               298 net/batman-adv/bat_v_ogm.c 	ogm_packet = (struct batadv_ogm2_packet *)skb->data;
skb               345 net/batman-adv/bat_v_ogm.c 		skb_tmp = skb_clone(skb, GFP_ATOMIC);
skb               356 net/batman-adv/bat_v_ogm.c 	consume_skb(skb);
skb               524 net/batman-adv/bat_v_ogm.c 	struct sk_buff *skb;
skb               561 net/batman-adv/bat_v_ogm.c 	skb = netdev_alloc_skb_ip_align(if_outgoing->net_dev,
skb               563 net/batman-adv/bat_v_ogm.c 	if (!skb)
skb               566 net/batman-adv/bat_v_ogm.c 	skb_reserve(skb, ETH_HLEN);
skb               567 net/batman-adv/bat_v_ogm.c 	skb_buff = skb_put_data(skb, ogm_received, packet_len);
skb               579 net/batman-adv/bat_v_ogm.c 	batadv_v_ogm_queue_on_if(skb, if_outgoing);
skb               856 net/batman-adv/bat_v_ogm.c static void batadv_v_ogm_process(const struct sk_buff *skb, int ogm_offset,
skb               869 net/batman-adv/bat_v_ogm.c 	ethhdr = eth_hdr(skb);
skb               870 net/batman-adv/bat_v_ogm.c 	ogm_packet = (struct batadv_ogm2_packet *)(skb->data + ogm_offset);
skb               986 net/batman-adv/bat_v_ogm.c int batadv_v_ogm_packet_recv(struct sk_buff *skb,
skb               991 net/batman-adv/bat_v_ogm.c 	struct ethhdr *ethhdr = eth_hdr(skb);
skb              1002 net/batman-adv/bat_v_ogm.c 	if (!batadv_check_management_packet(skb, if_incoming, BATADV_OGM2_HLEN))
skb              1008 net/batman-adv/bat_v_ogm.c 	ogm_packet = (struct batadv_ogm2_packet *)skb->data;
skb              1015 net/batman-adv/bat_v_ogm.c 			   skb->len + ETH_HLEN);
skb              1018 net/batman-adv/bat_v_ogm.c 	ogm_packet = (struct batadv_ogm2_packet *)skb->data;
skb              1020 net/batman-adv/bat_v_ogm.c 	while (batadv_v_ogm_aggr_packet(ogm_offset, skb_headlen(skb),
skb              1022 net/batman-adv/bat_v_ogm.c 		batadv_v_ogm_process(skb, ogm_offset, if_incoming);
skb              1027 net/batman-adv/bat_v_ogm.c 		packet_pos = skb->data + ogm_offset;
skb              1035 net/batman-adv/bat_v_ogm.c 		consume_skb(skb);
skb              1037 net/batman-adv/bat_v_ogm.c 		kfree_skb(skb);
skb                24 net/batman-adv/bat_v_ogm.h int batadv_v_ogm_packet_recv(struct sk_buff *skb,
skb               336 net/batman-adv/bridge_loop_avoidance.c 	struct sk_buff *skb;
skb               354 net/batman-adv/bridge_loop_avoidance.c 	skb = arp_create(ARPOP_REPLY, ETH_P_ARP,
skb               370 net/batman-adv/bridge_loop_avoidance.c 	if (!skb)
skb               373 net/batman-adv/bridge_loop_avoidance.c 	ethhdr = (struct ethhdr *)skb->data;
skb               428 net/batman-adv/bridge_loop_avoidance.c 		skb = vlan_insert_tag(skb, htons(ETH_P_8021Q),
skb               430 net/batman-adv/bridge_loop_avoidance.c 		if (!skb)
skb               434 net/batman-adv/bridge_loop_avoidance.c 	skb_reset_mac_header(skb);
skb               435 net/batman-adv/bridge_loop_avoidance.c 	skb->protocol = eth_type_trans(skb, soft_iface);
skb               438 net/batman-adv/bridge_loop_avoidance.c 			   skb->len + ETH_HLEN);
skb               440 net/batman-adv/bridge_loop_avoidance.c 	netif_rx(skb);
skb              1073 net/batman-adv/bridge_loop_avoidance.c 				     struct sk_buff *skb)
skb              1086 net/batman-adv/bridge_loop_avoidance.c 	vid = batadv_get_vid(skb, 0);
skb              1087 net/batman-adv/bridge_loop_avoidance.c 	ethhdr = eth_hdr(skb);
skb              1101 net/batman-adv/bridge_loop_avoidance.c 			vhdr = skb_header_pointer(skb, headlen, VLAN_HLEN,
skb              1117 net/batman-adv/bridge_loop_avoidance.c 	if (unlikely(!pskb_may_pull(skb, headlen + arp_hdr_len(skb->dev))))
skb              1121 net/batman-adv/bridge_loop_avoidance.c 	ethhdr = eth_hdr(skb);
skb              1595 net/batman-adv/bridge_loop_avoidance.c 				    struct sk_buff *skb)
skb              1603 net/batman-adv/bridge_loop_avoidance.c 	bcast_packet = (struct batadv_bcast_packet *)skb->data;
skb              1606 net/batman-adv/bridge_loop_avoidance.c 	crc = batadv_skb_crc32(skb, (u8 *)(bcast_packet + 1));
skb              1700 net/batman-adv/bridge_loop_avoidance.c bool batadv_bla_is_backbone_gw(struct sk_buff *skb,
skb              1710 net/batman-adv/bridge_loop_avoidance.c 	if (!pskb_may_pull(skb, hdr_size + ETH_HLEN))
skb              1713 net/batman-adv/bridge_loop_avoidance.c 	vid = batadv_get_vid(skb, hdr_size);
skb              1766 net/batman-adv/bridge_loop_avoidance.c batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb,
skb              1774 net/batman-adv/bridge_loop_avoidance.c 	ethhdr = eth_hdr(skb);
skb              1823 net/batman-adv/bridge_loop_avoidance.c bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
skb              1833 net/batman-adv/bridge_loop_avoidance.c 	ethhdr = eth_hdr(skb);
skb              1842 net/batman-adv/bridge_loop_avoidance.c 	if (batadv_bla_loopdetect_check(bat_priv, skb, primary_if, vid))
skb              1908 net/batman-adv/bridge_loop_avoidance.c 	kfree_skb(skb);
skb              1936 net/batman-adv/bridge_loop_avoidance.c bool batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb,
skb              1953 net/batman-adv/bridge_loop_avoidance.c 	if (batadv_bla_process_claim(bat_priv, primary_if, skb))
skb              1956 net/batman-adv/bridge_loop_avoidance.c 	ethhdr = eth_hdr(skb);
skb              2207 net/batman-adv/bridge_loop_avoidance.c 	int portid = NETLINK_CB(cb->skb).portid;
skb              2208 net/batman-adv/bridge_loop_avoidance.c 	struct net *net = sock_net(cb->skb->sk);
skb              2445 net/batman-adv/bridge_loop_avoidance.c 	int portid = NETLINK_CB(cb->skb).portid;
skb              2446 net/batman-adv/bridge_loop_avoidance.c 	struct net *net = sock_net(cb->skb->sk);
skb                37 net/batman-adv/bridge_loop_avoidance.h bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
skb                39 net/batman-adv/bridge_loop_avoidance.h bool batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb,
skb                41 net/batman-adv/bridge_loop_avoidance.h bool batadv_bla_is_backbone_gw(struct sk_buff *skb,
skb                52 net/batman-adv/bridge_loop_avoidance.h 				    struct sk_buff *skb);
skb                68 net/batman-adv/bridge_loop_avoidance.h 				 struct sk_buff *skb, unsigned short vid,
skb                75 net/batman-adv/bridge_loop_avoidance.h 				 struct sk_buff *skb, unsigned short vid)
skb                80 net/batman-adv/bridge_loop_avoidance.h static inline bool batadv_bla_is_backbone_gw(struct sk_buff *skb,
skb               107 net/batman-adv/bridge_loop_avoidance.h 			       struct sk_buff *skb)
skb               230 net/batman-adv/distributed-arp-table.c static u8 *batadv_arp_hw_src(struct sk_buff *skb, int hdr_size)
skb               234 net/batman-adv/distributed-arp-table.c 	addr = (u8 *)(skb->data + hdr_size);
skb               247 net/batman-adv/distributed-arp-table.c static __be32 batadv_arp_ip_src(struct sk_buff *skb, int hdr_size)
skb               249 net/batman-adv/distributed-arp-table.c 	return *(__be32 *)(batadv_arp_hw_src(skb, hdr_size) + ETH_ALEN);
skb               259 net/batman-adv/distributed-arp-table.c static u8 *batadv_arp_hw_dst(struct sk_buff *skb, int hdr_size)
skb               261 net/batman-adv/distributed-arp-table.c 	return batadv_arp_hw_src(skb, hdr_size) + ETH_ALEN + 4;
skb               271 net/batman-adv/distributed-arp-table.c static __be32 batadv_arp_ip_dst(struct sk_buff *skb, int hdr_size)
skb               273 net/batman-adv/distributed-arp-table.c 	return *(__be32 *)(batadv_arp_hw_src(skb, hdr_size) + ETH_ALEN * 2 + 4);
skb               421 net/batman-adv/distributed-arp-table.c static void batadv_dbg_arp(struct batadv_priv *bat_priv, struct sk_buff *skb,
skb               432 net/batman-adv/distributed-arp-table.c 	ip_src = batadv_arp_ip_src(skb, hdr_size);
skb               433 net/batman-adv/distributed-arp-table.c 	ip_dst = batadv_arp_ip_dst(skb, hdr_size);
skb               436 net/batman-adv/distributed-arp-table.c 		   batadv_arp_hw_src(skb, hdr_size), &ip_src,
skb               437 net/batman-adv/distributed-arp-table.c 		   batadv_arp_hw_dst(skb, hdr_size), &ip_dst);
skb               442 net/batman-adv/distributed-arp-table.c 	unicast_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
skb               488 net/batman-adv/distributed-arp-table.c static void batadv_dbg_arp(struct batadv_priv *bat_priv, struct sk_buff *skb,
skb               674 net/batman-adv/distributed-arp-table.c 				    struct sk_buff *skb, __be32 ip,
skb               699 net/batman-adv/distributed-arp-table.c 		tmp_skb = pskb_copy_for_clone(skb, GFP_ATOMIC);
skb               992 net/batman-adv/distributed-arp-table.c 	int portid = NETLINK_CB(cb->skb).portid;
skb               993 net/batman-adv/distributed-arp-table.c 	struct net *net = sock_net(cb->skb->sk);
skb              1055 net/batman-adv/distributed-arp-table.c 			       struct sk_buff *skb, int hdr_size)
skb              1064 net/batman-adv/distributed-arp-table.c 	if (unlikely(!pskb_may_pull(skb, hdr_size + ETH_HLEN)))
skb              1067 net/batman-adv/distributed-arp-table.c 	ethhdr = (struct ethhdr *)(skb->data + hdr_size);
skb              1073 net/batman-adv/distributed-arp-table.c 	if (unlikely(!pskb_may_pull(skb, hdr_size + ETH_HLEN +
skb              1074 net/batman-adv/distributed-arp-table.c 				    arp_hdr_len(skb->dev))))
skb              1077 net/batman-adv/distributed-arp-table.c 	arphdr = (struct arphdr *)(skb->data + hdr_size + ETH_HLEN);
skb              1095 net/batman-adv/distributed-arp-table.c 	ip_src = batadv_arp_ip_src(skb, hdr_size);
skb              1096 net/batman-adv/distributed-arp-table.c 	ip_dst = batadv_arp_ip_dst(skb, hdr_size);
skb              1103 net/batman-adv/distributed-arp-table.c 	hw_src = batadv_arp_hw_src(skb, hdr_size);
skb              1109 net/batman-adv/distributed-arp-table.c 		hw_dst = batadv_arp_hw_dst(skb, hdr_size);
skb              1129 net/batman-adv/distributed-arp-table.c static unsigned short batadv_dat_get_vid(struct sk_buff *skb, int *hdr_size)
skb              1133 net/batman-adv/distributed-arp-table.c 	vid = batadv_get_vid(skb, *hdr_size);
skb              1165 net/batman-adv/distributed-arp-table.c 	struct sk_buff *skb;
skb              1167 net/batman-adv/distributed-arp-table.c 	skb = arp_create(ARPOP_REPLY, ETH_P_ARP, ip_dst, bat_priv->soft_iface,
skb              1169 net/batman-adv/distributed-arp-table.c 	if (!skb)
skb              1172 net/batman-adv/distributed-arp-table.c 	skb_reset_mac_header(skb);
skb              1175 net/batman-adv/distributed-arp-table.c 		skb = vlan_insert_tag(skb, htons(ETH_P_8021Q),
skb              1178 net/batman-adv/distributed-arp-table.c 	return skb;
skb              1192 net/batman-adv/distributed-arp-table.c 					   struct sk_buff *skb)
skb              1207 net/batman-adv/distributed-arp-table.c 	vid = batadv_dat_get_vid(skb, &hdr_size);
skb              1209 net/batman-adv/distributed-arp-table.c 	type = batadv_arp_get_type(bat_priv, skb, hdr_size);
skb              1216 net/batman-adv/distributed-arp-table.c 	batadv_dbg_arp(bat_priv, skb, hdr_size, "Parsing outgoing ARP REQUEST");
skb              1218 net/batman-adv/distributed-arp-table.c 	ip_src = batadv_arp_ip_src(skb, hdr_size);
skb              1219 net/batman-adv/distributed-arp-table.c 	hw_src = batadv_arp_hw_src(skb, hdr_size);
skb              1220 net/batman-adv/distributed-arp-table.c 	ip_dst = batadv_arp_ip_dst(skb, hdr_size);
skb              1263 net/batman-adv/distributed-arp-table.c 				   skb->len + ETH_HLEN + hdr_size);
skb              1270 net/batman-adv/distributed-arp-table.c 		ret = batadv_dat_forward_data(bat_priv, skb, ip_dst, vid,
skb              1289 net/batman-adv/distributed-arp-table.c 					   struct sk_buff *skb, int hdr_size)
skb              1303 net/batman-adv/distributed-arp-table.c 	vid = batadv_dat_get_vid(skb, &hdr_size);
skb              1305 net/batman-adv/distributed-arp-table.c 	type = batadv_arp_get_type(bat_priv, skb, hdr_size);
skb              1309 net/batman-adv/distributed-arp-table.c 	hw_src = batadv_arp_hw_src(skb, hdr_size);
skb              1310 net/batman-adv/distributed-arp-table.c 	ip_src = batadv_arp_ip_src(skb, hdr_size);
skb              1311 net/batman-adv/distributed-arp-table.c 	ip_dst = batadv_arp_ip_dst(skb, hdr_size);
skb              1313 net/batman-adv/distributed-arp-table.c 	batadv_dbg_arp(bat_priv, skb, hdr_size, "Parsing incoming ARP REQUEST");
skb              1345 net/batman-adv/distributed-arp-table.c 		kfree_skb(skb);
skb              1355 net/batman-adv/distributed-arp-table.c 					 struct sk_buff *skb)
skb              1366 net/batman-adv/distributed-arp-table.c 	vid = batadv_dat_get_vid(skb, &hdr_size);
skb              1368 net/batman-adv/distributed-arp-table.c 	type = batadv_arp_get_type(bat_priv, skb, hdr_size);
skb              1372 net/batman-adv/distributed-arp-table.c 	batadv_dbg_arp(bat_priv, skb, hdr_size, "Parsing outgoing ARP REPLY");
skb              1374 net/batman-adv/distributed-arp-table.c 	hw_src = batadv_arp_hw_src(skb, hdr_size);
skb              1375 net/batman-adv/distributed-arp-table.c 	ip_src = batadv_arp_ip_src(skb, hdr_size);
skb              1376 net/batman-adv/distributed-arp-table.c 	hw_dst = batadv_arp_hw_dst(skb, hdr_size);
skb              1377 net/batman-adv/distributed-arp-table.c 	ip_dst = batadv_arp_ip_dst(skb, hdr_size);
skb              1385 net/batman-adv/distributed-arp-table.c 	batadv_dat_forward_data(bat_priv, skb, ip_src, vid,
skb              1387 net/batman-adv/distributed-arp-table.c 	batadv_dat_forward_data(bat_priv, skb, ip_dst, vid,
skb              1402 net/batman-adv/distributed-arp-table.c 					 struct sk_buff *skb, int hdr_size)
skb              1414 net/batman-adv/distributed-arp-table.c 	vid = batadv_dat_get_vid(skb, &hdr_size);
skb              1416 net/batman-adv/distributed-arp-table.c 	type = batadv_arp_get_type(bat_priv, skb, hdr_size);
skb              1420 net/batman-adv/distributed-arp-table.c 	batadv_dbg_arp(bat_priv, skb, hdr_size, "Parsing incoming ARP REPLY");
skb              1422 net/batman-adv/distributed-arp-table.c 	hw_src = batadv_arp_hw_src(skb, hdr_size);
skb              1423 net/batman-adv/distributed-arp-table.c 	ip_src = batadv_arp_ip_src(skb, hdr_size);
skb              1424 net/batman-adv/distributed-arp-table.c 	hw_dst = batadv_arp_hw_dst(skb, hdr_size);
skb              1425 net/batman-adv/distributed-arp-table.c 	ip_dst = batadv_arp_ip_dst(skb, hdr_size);
skb              1475 net/batman-adv/distributed-arp-table.c 		kfree_skb(skb);
skb              1494 net/batman-adv/distributed-arp-table.c batadv_dat_check_dhcp_ipudp(struct sk_buff *skb, __be32 *ip_src)
skb              1496 net/batman-adv/distributed-arp-table.c 	unsigned int offset = skb_network_offset(skb);
skb              1500 net/batman-adv/distributed-arp-table.c 	iphdr = skb_header_pointer(skb, offset, sizeof(_iphdr), &_iphdr);
skb              1508 net/batman-adv/distributed-arp-table.c 	skb_set_transport_header(skb, offset);
skb              1510 net/batman-adv/distributed-arp-table.c 	udphdr = skb_header_pointer(skb, offset, sizeof(_udphdr), &_udphdr);
skb              1534 net/batman-adv/distributed-arp-table.c batadv_dat_check_dhcp(struct sk_buff *skb, __be16 proto, __be32 *ip_src)
skb              1548 net/batman-adv/distributed-arp-table.c 	if (!batadv_dat_check_dhcp_ipudp(skb, ip_src))
skb              1551 net/batman-adv/distributed-arp-table.c 	offset = skb_transport_offset(skb) + sizeof(struct udphdr);
skb              1552 net/batman-adv/distributed-arp-table.c 	if (skb->len < offset + sizeof(struct batadv_dhcp_packet))
skb              1555 net/batman-adv/distributed-arp-table.c 	dhcp_h = skb_header_pointer(skb, offset, sizeof(_dhcp_h), &_dhcp_h);
skb              1562 net/batman-adv/distributed-arp-table.c 	magic = skb_header_pointer(skb, offset, sizeof(_magic), &_magic);
skb              1581 net/batman-adv/distributed-arp-table.c static int batadv_dat_get_dhcp_message_type(struct sk_buff *skb)
skb              1583 net/batman-adv/distributed-arp-table.c 	unsigned int offset = skb_transport_offset(skb) + sizeof(struct udphdr);
skb              1592 net/batman-adv/distributed-arp-table.c 	while ((tl = skb_header_pointer(skb, offset, sizeof(_tl), &_tl))) {
skb              1612 net/batman-adv/distributed-arp-table.c 	type = skb_header_pointer(skb, offset, sizeof(_type), &_type);
skb              1629 net/batman-adv/distributed-arp-table.c static bool batadv_dat_dhcp_get_yiaddr(struct sk_buff *skb, __be32 *buf)
skb              1631 net/batman-adv/distributed-arp-table.c 	unsigned int offset = skb_transport_offset(skb) + sizeof(struct udphdr);
skb              1635 net/batman-adv/distributed-arp-table.c 	yiaddr = skb_header_pointer(skb, offset, BATADV_DHCP_YIADDR_LEN, buf);
skb              1656 net/batman-adv/distributed-arp-table.c static bool batadv_dat_get_dhcp_chaddr(struct sk_buff *skb, u8 *buf)
skb              1658 net/batman-adv/distributed-arp-table.c 	unsigned int offset = skb_transport_offset(skb) + sizeof(struct udphdr);
skb              1662 net/batman-adv/distributed-arp-table.c 	chaddr = skb_header_pointer(skb, offset, BATADV_DHCP_CHADDR_LEN, buf);
skb              1693 net/batman-adv/distributed-arp-table.c 	struct sk_buff *skb;
skb              1695 net/batman-adv/distributed-arp-table.c 	skb = batadv_dat_arp_create_reply(bat_priv, yiaddr, ip_dst, chaddr,
skb              1697 net/batman-adv/distributed-arp-table.c 	if (!skb)
skb              1700 net/batman-adv/distributed-arp-table.c 	skb_set_network_header(skb, ETH_HLEN);
skb              1705 net/batman-adv/distributed-arp-table.c 	batadv_dat_forward_data(bat_priv, skb, yiaddr, vid,
skb              1707 net/batman-adv/distributed-arp-table.c 	batadv_dat_forward_data(bat_priv, skb, ip_dst, vid,
skb              1710 net/batman-adv/distributed-arp-table.c 	consume_skb(skb);
skb              1737 net/batman-adv/distributed-arp-table.c batadv_dat_check_dhcp_ack(struct sk_buff *skb, __be16 proto, __be32 *ip_src,
skb              1742 net/batman-adv/distributed-arp-table.c 	type = batadv_dat_check_dhcp(skb, proto, ip_src);
skb              1746 net/batman-adv/distributed-arp-table.c 	type = batadv_dat_get_dhcp_message_type(skb);
skb              1750 net/batman-adv/distributed-arp-table.c 	if (!batadv_dat_dhcp_get_yiaddr(skb, yiaddr))
skb              1753 net/batman-adv/distributed-arp-table.c 	if (!batadv_dat_get_dhcp_chaddr(skb, chaddr))
skb              1775 net/batman-adv/distributed-arp-table.c 					struct sk_buff *skb,
skb              1785 net/batman-adv/distributed-arp-table.c 	if (!batadv_dat_check_dhcp_ack(skb, proto, &ip_src, chaddr, &yiaddr))
skb              1788 net/batman-adv/distributed-arp-table.c 	batadv_dat_put_dhcp(bat_priv, chaddr, yiaddr, eth_hdr(skb)->h_source,
skb              1803 net/batman-adv/distributed-arp-table.c 					struct sk_buff *skb, int hdr_size)
skb              1815 net/batman-adv/distributed-arp-table.c 	if (unlikely(!pskb_may_pull(skb, hdr_size + ETH_HLEN)))
skb              1818 net/batman-adv/distributed-arp-table.c 	ethhdr = (struct ethhdr *)(skb->data + hdr_size);
skb              1819 net/batman-adv/distributed-arp-table.c 	skb_set_network_header(skb, hdr_size + ETH_HLEN);
skb              1822 net/batman-adv/distributed-arp-table.c 	if (!batadv_dat_check_dhcp_ack(skb, proto, &ip_src, chaddr, &yiaddr))
skb              1826 net/batman-adv/distributed-arp-table.c 	vid = batadv_dat_get_vid(skb, &hdr_size);
skb              1866 net/batman-adv/distributed-arp-table.c 	vid = batadv_dat_get_vid(forw_packet->skb, &hdr_size);
skb              1868 net/batman-adv/distributed-arp-table.c 	type = batadv_arp_get_type(bat_priv, forw_packet->skb, hdr_size);
skb              1872 net/batman-adv/distributed-arp-table.c 	ip_dst = batadv_arp_ip_dst(forw_packet->skb, hdr_size);
skb                29 net/batman-adv/distributed-arp-table.h 					   struct sk_buff *skb);
skb                31 net/batman-adv/distributed-arp-table.h 					   struct sk_buff *skb, int hdr_size);
skb                33 net/batman-adv/distributed-arp-table.h 					 struct sk_buff *skb);
skb                35 net/batman-adv/distributed-arp-table.h 					 struct sk_buff *skb, int hdr_size);
skb                37 net/batman-adv/distributed-arp-table.h 					struct sk_buff *skb,
skb                41 net/batman-adv/distributed-arp-table.h 					struct sk_buff *skb, int hdr_size);
skb               110 net/batman-adv/distributed-arp-table.h 				      struct sk_buff *skb)
skb               117 net/batman-adv/distributed-arp-table.h 				      struct sk_buff *skb, int hdr_size)
skb               124 net/batman-adv/distributed-arp-table.h 				    struct sk_buff *skb)
skb               131 net/batman-adv/distributed-arp-table.h 				    struct sk_buff *skb, int hdr_size)
skb               138 net/batman-adv/distributed-arp-table.h 				   struct sk_buff *skb, __be16 proto,
skb               145 net/batman-adv/distributed-arp-table.h 				   struct sk_buff *skb, int hdr_size)
skb                48 net/batman-adv/fragmentation.c 			kfree_skb(entry->skb);
skb                50 net/batman-adv/fragmentation.c 			consume_skb(entry->skb);
skb               139 net/batman-adv/fragmentation.c 				      struct sk_buff *skb,
skb               154 net/batman-adv/fragmentation.c 	if (skb_linearize(skb) < 0)
skb               157 net/batman-adv/fragmentation.c 	frag_packet = (struct batadv_frag_packet *)skb->data;
skb               165 net/batman-adv/fragmentation.c 	frag_entry_new->skb = skb;
skb               176 net/batman-adv/fragmentation.c 		chain->size = skb->len - hdr_size;
skb               193 net/batman-adv/fragmentation.c 			chain->size += skb->len - hdr_size;
skb               206 net/batman-adv/fragmentation.c 		chain->size += skb->len - hdr_size;
skb               233 net/batman-adv/fragmentation.c 		kfree_skb(skb);
skb               262 net/batman-adv/fragmentation.c 	skb_out = entry->skb;
skb               288 net/batman-adv/fragmentation.c 		size = entry->skb->len - hdr_size;
skb               289 net/batman-adv/fragmentation.c 		skb_put_data(skb_out, entry->skb->data + hdr_size, size);
skb               312 net/batman-adv/fragmentation.c bool batadv_frag_skb_buffer(struct sk_buff **skb,
skb               320 net/batman-adv/fragmentation.c 	if (!batadv_frag_insert_packet(orig_node_src, *skb, &head))
skb               334 net/batman-adv/fragmentation.c 	*skb = skb_out;
skb               350 net/batman-adv/fragmentation.c bool batadv_frag_skb_fwd(struct sk_buff *skb,
skb               361 net/batman-adv/fragmentation.c 	packet = (struct batadv_frag_packet *)skb->data;
skb               377 net/batman-adv/fragmentation.c 				   skb->len + ETH_HLEN);
skb               380 net/batman-adv/fragmentation.c 		batadv_send_unicast_skb(skb, neigh_node);
skb               404 net/batman-adv/fragmentation.c static struct sk_buff *batadv_frag_create(struct sk_buff *skb,
skb               416 net/batman-adv/fragmentation.c 	skb_fragment->priority = skb->priority;
skb               420 net/batman-adv/fragmentation.c 	skb_split(skb, skb_fragment, skb->len - fragment_size);
skb               438 net/batman-adv/fragmentation.c int batadv_frag_send_packet(struct sk_buff *skb,
skb               457 net/batman-adv/fragmentation.c 	if (skb->len == 0 || max_fragment_size == 0)
skb               460 net/batman-adv/fragmentation.c 	num_fragments = (skb->len - 1) / max_fragment_size + 1;
skb               461 net/batman-adv/fragmentation.c 	max_fragment_size = (skb->len - 1) / num_fragments + 1;
skb               483 net/batman-adv/fragmentation.c 	frag_header.total_size = htons(skb->len);
skb               490 net/batman-adv/fragmentation.c 	if (skb->priority >= 256 && skb->priority <= 263)
skb               491 net/batman-adv/fragmentation.c 		frag_header.priority = skb->priority - 256;
skb               499 net/batman-adv/fragmentation.c 	while (skb->len > max_fragment_size) {
skb               506 net/batman-adv/fragmentation.c 		skb_fragment = batadv_frag_create(skb, &frag_header,
skb               526 net/batman-adv/fragmentation.c 	if (batadv_skb_head_push(skb, header_size) < 0 ||
skb               527 net/batman-adv/fragmentation.c 	    pskb_expand_head(skb, header_size + ETH_HLEN, 0, GFP_ATOMIC) < 0) {
skb               532 net/batman-adv/fragmentation.c 	memcpy(skb->data, &frag_header, header_size);
skb               537 net/batman-adv/fragmentation.c 			   skb->len + ETH_HLEN);
skb               538 net/batman-adv/fragmentation.c 	ret = batadv_send_unicast_skb(skb, neigh_node);
skb               540 net/batman-adv/fragmentation.c 	skb = NULL;
skb               545 net/batman-adv/fragmentation.c 	kfree_skb(skb);
skb                20 net/batman-adv/fragmentation.h bool batadv_frag_skb_fwd(struct sk_buff *skb,
skb                23 net/batman-adv/fragmentation.h bool batadv_frag_skb_buffer(struct sk_buff **skb,
skb                25 net/batman-adv/fragmentation.h int batadv_frag_send_packet(struct sk_buff *skb,
skb               562 net/batman-adv/gateway_client.c 	struct net *net = sock_net(cb->skb->sk);
skb               621 net/batman-adv/gateway_client.c batadv_gw_dhcp_recipient_get(struct sk_buff *skb, unsigned int *header_len,
skb               635 net/batman-adv/gateway_client.c 	if (!pskb_may_pull(skb, *header_len + ETH_HLEN))
skb               638 net/batman-adv/gateway_client.c 	ethhdr = eth_hdr(skb);
skb               644 net/batman-adv/gateway_client.c 		if (!pskb_may_pull(skb, *header_len + VLAN_HLEN))
skb               647 net/batman-adv/gateway_client.c 		vhdr = vlan_eth_hdr(skb);
skb               655 net/batman-adv/gateway_client.c 		if (!pskb_may_pull(skb, *header_len + sizeof(*iphdr)))
skb               658 net/batman-adv/gateway_client.c 		iphdr = (struct iphdr *)(skb->data + *header_len);
skb               667 net/batman-adv/gateway_client.c 		if (!pskb_may_pull(skb, *header_len + sizeof(*ipv6hdr)))
skb               670 net/batman-adv/gateway_client.c 		ipv6hdr = (struct ipv6hdr *)(skb->data + *header_len);
skb               682 net/batman-adv/gateway_client.c 	if (!pskb_may_pull(skb, *header_len + sizeof(*udphdr)))
skb               685 net/batman-adv/gateway_client.c 	udphdr = (struct udphdr *)(skb->data + *header_len);
skb               707 net/batman-adv/gateway_client.c 	    pskb_may_pull(skb, chaddr_offset + ETH_ALEN)) {
skb               709 net/batman-adv/gateway_client.c 		p = skb->data + *header_len + BATADV_DHCP_HTYPE_OFFSET;
skb               714 net/batman-adv/gateway_client.c 		p = skb->data + *header_len + BATADV_DHCP_HLEN_OFFSET;
skb               718 net/batman-adv/gateway_client.c 		ether_addr_copy(chaddr, skb->data + chaddr_offset);
skb               741 net/batman-adv/gateway_client.c 			    struct sk_buff *skb)
skb               749 net/batman-adv/gateway_client.c 	struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
skb               754 net/batman-adv/gateway_client.c 	vid = batadv_get_vid(skb, 0);
skb                36 net/batman-adv/gateway_client.h bool batadv_gw_out_of_range(struct batadv_priv *bat_priv, struct sk_buff *skb);
skb                38 net/batman-adv/gateway_client.h batadv_gw_dhcp_recipient_get(struct sk_buff *skb, unsigned int *header_len,
skb               173 net/batman-adv/icmp_socket.c 	struct sk_buff *skb;
skb               199 net/batman-adv/icmp_socket.c 	skb = netdev_alloc_skb_ip_align(NULL, packet_len + ETH_HLEN);
skb               200 net/batman-adv/icmp_socket.c 	if (!skb) {
skb               205 net/batman-adv/icmp_socket.c 	skb->priority = TC_PRIO_CONTROL;
skb               206 net/batman-adv/icmp_socket.c 	skb_reserve(skb, ETH_HLEN);
skb               207 net/batman-adv/icmp_socket.c 	icmp_header = skb_put(skb, packet_len);
skb               274 net/batman-adv/icmp_socket.c 	batadv_send_unicast_skb(skb, neigh_node);
skb               281 net/batman-adv/icmp_socket.c 	kfree_skb(skb);
skb                69 net/batman-adv/main.c static int (*batadv_rx_handler[256])(struct sk_buff *skb,
skb               380 net/batman-adv/main.c void batadv_skb_set_priority(struct sk_buff *skb, int offset)
skb               389 net/batman-adv/main.c 	if (skb->priority >= 256 && skb->priority <= 263)
skb               392 net/batman-adv/main.c 	ethhdr = skb_header_pointer(skb, offset, sizeof(*ethhdr), &ethhdr_tmp);
skb               398 net/batman-adv/main.c 		vhdr = skb_header_pointer(skb, offset + sizeof(*vhdr),
skb               406 net/batman-adv/main.c 		ip_hdr = skb_header_pointer(skb, offset + sizeof(*ethhdr),
skb               413 net/batman-adv/main.c 		ip6_hdr = skb_header_pointer(skb, offset + sizeof(*ethhdr),
skb               423 net/batman-adv/main.c 	skb->priority = prio + 256;
skb               426 net/batman-adv/main.c static int batadv_recv_unhandled_packet(struct sk_buff *skb,
skb               429 net/batman-adv/main.c 	kfree_skb(skb);
skb               447 net/batman-adv/main.c int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
skb               466 net/batman-adv/main.c 	skb = skb_share_check(skb, GFP_ATOMIC);
skb               469 net/batman-adv/main.c 	if (!skb)
skb               473 net/batman-adv/main.c 	if (unlikely(!pskb_may_pull(skb, 2)))
skb               477 net/batman-adv/main.c 	if (unlikely(skb->mac_len != ETH_HLEN || !skb_mac_header(skb)))
skb               492 net/batman-adv/main.c 	batadv_ogm_packet = (struct batadv_ogm_packet *)skb->data;
skb               502 net/batman-adv/main.c 	memset(skb->cb, 0, sizeof(struct batadv_skb_cb));
skb               505 net/batman-adv/main.c 	(*batadv_rx_handler[idx])(skb, hard_iface);
skb               516 net/batman-adv/main.c 	kfree_skb(skb);
skb               582 net/batman-adv/main.c 	int (*curr)(struct sk_buff *skb,
skb               615 net/batman-adv/main.c __be32 batadv_skb_crc32(struct sk_buff *skb, u8 *payload_ptr)
skb               619 net/batman-adv/main.c 	unsigned int to = skb->len;
skb               625 net/batman-adv/main.c 	from = (unsigned int)(payload_ptr - skb->data);
skb               627 net/batman-adv/main.c 	skb_prepare_seq_read(skb, from, to, &st);
skb               644 net/batman-adv/main.c unsigned short batadv_get_vid(struct sk_buff *skb, size_t header_len)
skb               646 net/batman-adv/main.c 	struct ethhdr *ethhdr = (struct ethhdr *)(skb->data + header_len);
skb               653 net/batman-adv/main.c 	if (!pskb_may_pull(skb, header_len + VLAN_ETH_HLEN))
skb               656 net/batman-adv/main.c 	vhdr = (struct vlan_ethhdr *)(skb->data + header_len);
skb               249 net/batman-adv/main.h void batadv_skb_set_priority(struct sk_buff *skb, int offset);
skb               250 net/batman-adv/main.h int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
skb               258 net/batman-adv/main.h __be32 batadv_skb_crc32(struct sk_buff *skb, u8 *payload_ptr);
skb               383 net/batman-adv/main.h unsigned short batadv_get_vid(struct sk_buff *skb, size_t header_len);
skb               962 net/batman-adv/multicast.c static bool batadv_mcast_is_report_ipv4(struct sk_buff *skb)
skb               964 net/batman-adv/multicast.c 	if (ip_mc_check_igmp(skb) < 0)
skb               967 net/batman-adv/multicast.c 	switch (igmp_hdr(skb)->type) {
skb               992 net/batman-adv/multicast.c 					     struct sk_buff *skb,
skb               999 net/batman-adv/multicast.c 	if (!pskb_may_pull(skb, sizeof(struct ethhdr) + sizeof(*iphdr)))
skb              1002 net/batman-adv/multicast.c 	if (batadv_mcast_is_report_ipv4(skb))
skb              1005 net/batman-adv/multicast.c 	iphdr = ip_hdr(skb);
skb              1028 net/batman-adv/multicast.c static bool batadv_mcast_is_report_ipv6(struct sk_buff *skb)
skb              1030 net/batman-adv/multicast.c 	if (ipv6_mc_check_mld(skb) < 0)
skb              1033 net/batman-adv/multicast.c 	switch (icmp6_hdr(skb)->icmp6_type) {
skb              1056 net/batman-adv/multicast.c 					     struct sk_buff *skb,
skb              1063 net/batman-adv/multicast.c 	if (!pskb_may_pull(skb, sizeof(struct ethhdr) + sizeof(*ip6hdr)))
skb              1066 net/batman-adv/multicast.c 	if (batadv_mcast_is_report_ipv6(skb))
skb              1069 net/batman-adv/multicast.c 	ip6hdr = ipv6_hdr(skb);
skb              1098 net/batman-adv/multicast.c 					struct sk_buff *skb,
skb              1102 net/batman-adv/multicast.c 	struct ethhdr *ethhdr = eth_hdr(skb);
skb              1109 net/batman-adv/multicast.c 		return batadv_mcast_forw_mode_check_ipv4(bat_priv, skb,
skb              1116 net/batman-adv/multicast.c 		return batadv_mcast_forw_mode_check_ipv6(bat_priv, skb,
skb              1381 net/batman-adv/multicast.c batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb,
skb              1391 net/batman-adv/multicast.c 	ret = batadv_mcast_forw_mode_check(bat_priv, skb, &is_unsnoopable,
skb              1398 net/batman-adv/multicast.c 	ethhdr = eth_hdr(skb);
skb              1451 net/batman-adv/multicast.c batadv_mcast_forw_tt(struct batadv_priv *bat_priv, struct sk_buff *skb,
skb              1460 net/batman-adv/multicast.c 	const u8 *addr = eth_hdr(skb)->h_dest;
skb              1468 net/batman-adv/multicast.c 		newskb = skb_copy(skb, GFP_ATOMIC);
skb              1500 net/batman-adv/multicast.c 				struct sk_buff *skb, unsigned short vid)
skb              1510 net/batman-adv/multicast.c 		newskb = skb_copy(skb, GFP_ATOMIC);
skb              1538 net/batman-adv/multicast.c 				struct sk_buff *skb, unsigned short vid)
skb              1548 net/batman-adv/multicast.c 		newskb = skb_copy(skb, GFP_ATOMIC);
skb              1577 net/batman-adv/multicast.c 			   struct sk_buff *skb, unsigned short vid)
skb              1579 net/batman-adv/multicast.c 	switch (ntohs(eth_hdr(skb)->h_proto)) {
skb              1581 net/batman-adv/multicast.c 		return batadv_mcast_forw_want_all_ipv4(bat_priv, skb, vid);
skb              1583 net/batman-adv/multicast.c 		return batadv_mcast_forw_want_all_ipv6(bat_priv, skb, vid);
skb              1605 net/batman-adv/multicast.c 				struct sk_buff *skb, unsigned short vid)
skb              1615 net/batman-adv/multicast.c 		newskb = skb_copy(skb, GFP_ATOMIC);
skb              1643 net/batman-adv/multicast.c 				struct sk_buff *skb, unsigned short vid)
skb              1653 net/batman-adv/multicast.c 		newskb = skb_copy(skb, GFP_ATOMIC);
skb              1682 net/batman-adv/multicast.c 			   struct sk_buff *skb, unsigned short vid)
skb              1684 net/batman-adv/multicast.c 	switch (ntohs(eth_hdr(skb)->h_proto)) {
skb              1686 net/batman-adv/multicast.c 		return batadv_mcast_forw_want_all_rtr4(bat_priv, skb, vid);
skb              1688 net/batman-adv/multicast.c 		return batadv_mcast_forw_want_all_rtr6(bat_priv, skb, vid);
skb              1711 net/batman-adv/multicast.c int batadv_mcast_forw_send(struct batadv_priv *bat_priv, struct sk_buff *skb,
skb              1716 net/batman-adv/multicast.c 	ret = batadv_mcast_forw_tt(bat_priv, skb, vid);
skb              1718 net/batman-adv/multicast.c 		kfree_skb(skb);
skb              1722 net/batman-adv/multicast.c 	ret = batadv_mcast_forw_want_all(bat_priv, skb, vid);
skb              1724 net/batman-adv/multicast.c 		kfree_skb(skb);
skb              1728 net/batman-adv/multicast.c 	ret = batadv_mcast_forw_want_rtr(bat_priv, skb, vid);
skb              1730 net/batman-adv/multicast.c 		kfree_skb(skb);
skb              1734 net/batman-adv/multicast.c 	consume_skb(skb);
skb              2332 net/batman-adv/multicast.c 	struct net *net = sock_net(cb->skb->sk);
skb              2378 net/batman-adv/multicast.c 	int portid = NETLINK_CB(cb->skb).portid;
skb                46 net/batman-adv/multicast.h batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb,
skb                49 net/batman-adv/multicast.h int batadv_mcast_forw_send(struct batadv_priv *bat_priv, struct sk_buff *skb,
skb                68 net/batman-adv/multicast.h batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb,
skb                75 net/batman-adv/multicast.h batadv_mcast_forw_send(struct batadv_priv *bat_priv, struct sk_buff *skb,
skb                78 net/batman-adv/multicast.h 	kfree_skb(skb);
skb               411 net/batman-adv/netlink.c static int batadv_netlink_get_mesh(struct sk_buff *skb, struct genl_info *info)
skb               440 net/batman-adv/netlink.c static int batadv_netlink_set_mesh(struct sk_buff *skb, struct genl_info *info)
skb               709 net/batman-adv/netlink.c batadv_netlink_tp_meter_start(struct sk_buff *skb, struct genl_info *info)
skb               766 net/batman-adv/netlink.c batadv_netlink_tp_meter_cancel(struct sk_buff *skb, struct genl_info *info)
skb               885 net/batman-adv/netlink.c static int batadv_netlink_get_hardif(struct sk_buff *skb,
skb               918 net/batman-adv/netlink.c static int batadv_netlink_set_hardif(struct sk_buff *skb,
skb               956 net/batman-adv/netlink.c 	struct net *net = sock_net(cb->skb->sk);
skb               961 net/batman-adv/netlink.c 	int portid = NETLINK_CB(cb->skb).portid;
skb              1090 net/batman-adv/netlink.c static int batadv_netlink_get_vlan(struct sk_buff *skb, struct genl_info *info)
skb              1120 net/batman-adv/netlink.c static int batadv_netlink_set_vlan(struct sk_buff *skb, struct genl_info *info)
skb              1253 net/batman-adv/netlink.c static int batadv_pre_doit(const struct genl_ops *ops, struct sk_buff *skb,
skb              1318 net/batman-adv/netlink.c static void batadv_post_doit(const struct genl_ops *ops, struct sk_buff *skb,
skb                54 net/batman-adv/network-coding.c static int batadv_nc_recv_coded_packet(struct sk_buff *skb,
skb               259 net/batman-adv/network-coding.c 		kfree_skb(nc_packet->skb);
skb               261 net/batman-adv/network-coding.c 		consume_skb(nc_packet->skb);
skb               574 net/batman-adv/network-coding.c 	batadv_send_unicast_skb(nc_packet->skb, nc_packet->neigh_node);
skb               575 net/batman-adv/network-coding.c 	nc_packet->skb = NULL;
skb               649 net/batman-adv/network-coding.c 			   nc_packet->skb->len + ETH_HLEN);
skb              1045 net/batman-adv/network-coding.c 				   struct sk_buff *skb,
skb              1106 net/batman-adv/network-coding.c 		packet1 = (struct batadv_unicast_packet *)nc_packet->skb->data;
skb              1107 net/batman-adv/network-coding.c 		packet2 = (struct batadv_unicast_packet *)skb->data;
skb              1109 net/batman-adv/network-coding.c 		packet_id2 = batadv_skb_crc32(skb,
skb              1110 net/batman-adv/network-coding.c 					      skb->data + sizeof(*packet2));
skb              1117 net/batman-adv/network-coding.c 		packet1 = (struct batadv_unicast_packet *)skb->data;
skb              1118 net/batman-adv/network-coding.c 		packet2 = (struct batadv_unicast_packet *)nc_packet->skb->data;
skb              1119 net/batman-adv/network-coding.c 		packet_id1 = batadv_skb_crc32(skb,
skb              1120 net/batman-adv/network-coding.c 					      skb->data + sizeof(*packet1));
skb              1127 net/batman-adv/network-coding.c 	if (skb->len <= nc_packet->skb->len) {
skb              1128 net/batman-adv/network-coding.c 		skb_dest = nc_packet->skb;
skb              1129 net/batman-adv/network-coding.c 		skb_src = skb;
skb              1131 net/batman-adv/network-coding.c 		skb_dest = skb;
skb              1132 net/batman-adv/network-coding.c 		skb_src = nc_packet->skb;
skb              1208 net/batman-adv/network-coding.c 	nc_packet->skb = NULL;
skb              1243 net/batman-adv/network-coding.c static bool batadv_nc_skb_coding_possible(struct sk_buff *skb, u8 *dst, u8 *src)
skb              1245 net/batman-adv/network-coding.c 	if (BATADV_SKB_CB(skb)->decoded && !batadv_compare_eth(dst, src))
skb              1265 net/batman-adv/network-coding.c 		      struct sk_buff *skb,
skb              1299 net/batman-adv/network-coding.c 			if (!batadv_nc_skb_coding_possible(nc_packet->skb,
skb              1331 net/batman-adv/network-coding.c 			 struct sk_buff *skb,
skb              1348 net/batman-adv/network-coding.c 		if (!batadv_nc_skb_coding_possible(skb,
skb              1354 net/batman-adv/network-coding.c 						  out_nc_node, skb, eth_dst);
skb              1372 net/batman-adv/network-coding.c 					      struct sk_buff *skb,
skb              1378 net/batman-adv/network-coding.c 	skb = pskb_copy_for_clone(skb, GFP_ATOMIC);
skb              1379 net/batman-adv/network-coding.c 	if (!skb)
skb              1383 net/batman-adv/network-coding.c 	ethhdr = eth_hdr(skb);
skb              1388 net/batman-adv/network-coding.c 	skb_push(skb, ETH_HLEN);
skb              1391 net/batman-adv/network-coding.c 	batadv_nc_skb_store_for_decoding(bat_priv, skb);
skb              1396 net/batman-adv/network-coding.c 	consume_skb(skb);
skb              1412 net/batman-adv/network-coding.c static bool batadv_nc_skb_dst_search(struct sk_buff *skb,
skb              1425 net/batman-adv/network-coding.c 		nc_packet = batadv_nc_skb_src_search(bat_priv, skb,
skb              1439 net/batman-adv/network-coding.c 	batadv_nc_skb_store_before_coding(bat_priv, skb,
skb              1441 net/batman-adv/network-coding.c 	batadv_nc_skb_store_before_coding(bat_priv, nc_packet->skb,
skb              1445 net/batman-adv/network-coding.c 	if (batadv_nc_code_packets(bat_priv, skb, ethhdr, nc_packet,
skb              1466 net/batman-adv/network-coding.c static bool batadv_nc_skb_add_to_path(struct sk_buff *skb,
skb              1480 net/batman-adv/network-coding.c 	nc_packet->skb = skb;
skb              1500 net/batman-adv/network-coding.c bool batadv_nc_skb_forward(struct sk_buff *skb,
skb              1507 net/batman-adv/network-coding.c 	struct ethhdr *ethhdr = eth_hdr(skb);
skb              1516 net/batman-adv/network-coding.c 	payload = skb_network_header(skb);
skb              1522 net/batman-adv/network-coding.c 	if (batadv_nc_skb_dst_search(skb, neigh_node, ethhdr))
skb              1535 net/batman-adv/network-coding.c 	packet_id = batadv_skb_crc32(skb, payload + sizeof(*packet));
skb              1536 net/batman-adv/network-coding.c 	if (!batadv_nc_skb_add_to_path(skb, nc_path, neigh_node, packet_id))
skb              1556 net/batman-adv/network-coding.c 				      struct sk_buff *skb)
skb              1560 net/batman-adv/network-coding.c 	struct ethhdr *ethhdr = eth_hdr(skb);
skb              1569 net/batman-adv/network-coding.c 	payload = skb_network_header(skb);
skb              1584 net/batman-adv/network-coding.c 	skb = skb_clone(skb, GFP_ATOMIC);
skb              1585 net/batman-adv/network-coding.c 	if (unlikely(!skb))
skb              1588 net/batman-adv/network-coding.c 	if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
skb              1591 net/batman-adv/network-coding.c 	if (unlikely(!skb_pull_rcsum(skb, ETH_HLEN)))
skb              1595 net/batman-adv/network-coding.c 	packet_id = batadv_skb_crc32(skb, payload + sizeof(*packet));
skb              1596 net/batman-adv/network-coding.c 	if (!batadv_nc_skb_add_to_path(skb, nc_path, NULL, packet_id))
skb              1603 net/batman-adv/network-coding.c 	kfree_skb(skb);
skb              1617 net/batman-adv/network-coding.c 					 struct sk_buff *skb)
skb              1619 net/batman-adv/network-coding.c 	struct ethhdr *ethhdr = eth_hdr(skb);
skb              1625 net/batman-adv/network-coding.c 	skb_push(skb, ETH_HLEN);
skb              1627 net/batman-adv/network-coding.c 	batadv_nc_skb_store_for_decoding(bat_priv, skb);
skb              1641 net/batman-adv/network-coding.c batadv_nc_skb_decode_packet(struct batadv_priv *bat_priv, struct sk_buff *skb,
skb              1654 net/batman-adv/network-coding.c 	memcpy(&coded_packet_tmp, skb->data, sizeof(coded_packet_tmp));
skb              1655 net/batman-adv/network-coding.c 	memcpy(&ethhdr_tmp, skb_mac_header(skb), sizeof(ethhdr_tmp));
skb              1657 net/batman-adv/network-coding.c 	if (skb_cow(skb, 0) < 0)
skb              1660 net/batman-adv/network-coding.c 	if (unlikely(!skb_pull_rcsum(skb, h_diff)))
skb              1666 net/batman-adv/network-coding.c 	skb_set_mac_header(skb, -ETH_HLEN);
skb              1667 net/batman-adv/network-coding.c 	skb_reset_network_header(skb);
skb              1670 net/batman-adv/network-coding.c 	ethhdr = eth_hdr(skb);
skb              1682 net/batman-adv/network-coding.c 		skb->pkt_type = PACKET_HOST;
skb              1695 net/batman-adv/network-coding.c 	if (coding_len > skb->len)
skb              1701 net/batman-adv/network-coding.c 	batadv_nc_memxor(skb->data + h_size,
skb              1702 net/batman-adv/network-coding.c 			 nc_packet->skb->data + h_size,
skb              1706 net/batman-adv/network-coding.c 	if (nc_packet->skb->len > coding_len + h_size) {
skb              1707 net/batman-adv/network-coding.c 		err = pskb_trim_rcsum(skb, coding_len + h_size);
skb              1713 net/batman-adv/network-coding.c 	unicast_packet = (struct batadv_unicast_packet *)skb->data;
skb              1798 net/batman-adv/network-coding.c static int batadv_nc_recv_coded_packet(struct sk_buff *skb,
skb              1813 net/batman-adv/network-coding.c 	if (unlikely(!pskb_may_pull(skb, hdr_size)))
skb              1816 net/batman-adv/network-coding.c 	coded_packet = (struct batadv_coded_packet *)skb->data;
skb              1817 net/batman-adv/network-coding.c 	ethhdr = eth_hdr(skb);
skb              1836 net/batman-adv/network-coding.c 	if (skb_linearize(skb) < 0)
skb              1839 net/batman-adv/network-coding.c 	if (skb_linearize(nc_packet->skb) < 0)
skb              1843 net/batman-adv/network-coding.c 	unicast_packet = batadv_nc_skb_decode_packet(bat_priv, skb, nc_packet);
skb              1850 net/batman-adv/network-coding.c 	BATADV_SKB_CB(skb)->decoded = true;
skb              1853 net/batman-adv/network-coding.c 			   skb->len + ETH_HLEN);
skb              1854 net/batman-adv/network-coding.c 	return batadv_recv_unicast_packet(skb, recv_if);
skb              1859 net/batman-adv/network-coding.c 	kfree_skb(skb);
skb                35 net/batman-adv/network-coding.h bool batadv_nc_skb_forward(struct sk_buff *skb,
skb                38 net/batman-adv/network-coding.h 				      struct sk_buff *skb);
skb                40 net/batman-adv/network-coding.h 					 struct sk_buff *skb);
skb                89 net/batman-adv/network-coding.h static inline bool batadv_nc_skb_forward(struct sk_buff *skb,
skb                97 net/batman-adv/network-coding.h 				 struct sk_buff *skb)
skb               103 net/batman-adv/network-coding.h 				    struct sk_buff *skb)
skb               782 net/batman-adv/originator.c 	struct net *net = sock_net(cb->skb->sk);
skb              1479 net/batman-adv/originator.c 	struct net *net = sock_net(cb->skb->sk);
skb                42 net/batman-adv/routing.c static int batadv_route_unicast_packet(struct sk_buff *skb,
skb               180 net/batman-adv/routing.c bool batadv_check_management_packet(struct sk_buff *skb,
skb               187 net/batman-adv/routing.c 	if (unlikely(!pskb_may_pull(skb, header_len)))
skb               190 net/batman-adv/routing.c 	ethhdr = eth_hdr(skb);
skb               201 net/batman-adv/routing.c 	if (skb_cow(skb, 0) < 0)
skb               205 net/batman-adv/routing.c 	if (skb_linearize(skb) < 0)
skb               220 net/batman-adv/routing.c 				      struct sk_buff *skb)
skb               227 net/batman-adv/routing.c 	icmph = (struct batadv_icmp_header *)skb->data;
skb               234 net/batman-adv/routing.c 		if (skb_linearize(skb) < 0)
skb               237 net/batman-adv/routing.c 		batadv_socket_receive_packet(icmph, skb->len);
skb               251 net/batman-adv/routing.c 		if (skb_cow(skb, ETH_HLEN) < 0)
skb               254 net/batman-adv/routing.c 		icmph = (struct batadv_icmp_header *)skb->data;
skb               261 net/batman-adv/routing.c 		res = batadv_send_skb_to_orig(skb, orig_node, NULL);
skb               266 net/batman-adv/routing.c 		skb = NULL;
skb               269 net/batman-adv/routing.c 		if (!pskb_may_pull(skb, sizeof(struct batadv_icmp_tp_packet)))
skb               272 net/batman-adv/routing.c 		batadv_tp_meter_recv(bat_priv, skb);
skb               275 net/batman-adv/routing.c 		skb = NULL;
skb               287 net/batman-adv/routing.c 	kfree_skb(skb);
skb               293 net/batman-adv/routing.c 					 struct sk_buff *skb)
skb               300 net/batman-adv/routing.c 	icmp_packet = (struct batadv_icmp_packet *)skb->data;
skb               319 net/batman-adv/routing.c 	if (skb_cow(skb, ETH_HLEN) < 0)
skb               322 net/batman-adv/routing.c 	icmp_packet = (struct batadv_icmp_packet *)skb->data;
skb               329 net/batman-adv/routing.c 	res = batadv_send_skb_to_orig(skb, orig_node, NULL);
skb               334 net/batman-adv/routing.c 	skb = NULL;
skb               342 net/batman-adv/routing.c 	kfree_skb(skb);
skb               354 net/batman-adv/routing.c int batadv_recv_icmp_packet(struct sk_buff *skb,
skb               366 net/batman-adv/routing.c 	if (unlikely(!pskb_may_pull(skb, hdr_size)))
skb               369 net/batman-adv/routing.c 	ethhdr = eth_hdr(skb);
skb               383 net/batman-adv/routing.c 	icmph = (struct batadv_icmp_header *)skb->data;
skb               388 net/batman-adv/routing.c 	    skb->len >= sizeof(struct batadv_icmp_packet_rr)) {
skb               389 net/batman-adv/routing.c 		if (skb_linearize(skb) < 0)
skb               393 net/batman-adv/routing.c 		if (skb_cow(skb, ETH_HLEN) < 0)
skb               396 net/batman-adv/routing.c 		ethhdr = eth_hdr(skb);
skb               397 net/batman-adv/routing.c 		icmph = (struct batadv_icmp_header *)skb->data;
skb               409 net/batman-adv/routing.c 		return batadv_recv_my_icmp_packet(bat_priv, skb);
skb               413 net/batman-adv/routing.c 		return batadv_recv_icmp_ttl_exceeded(bat_priv, skb);
skb               421 net/batman-adv/routing.c 	if (skb_cow(skb, ETH_HLEN) < 0)
skb               424 net/batman-adv/routing.c 	icmph = (struct batadv_icmp_header *)skb->data;
skb               430 net/batman-adv/routing.c 	res = batadv_send_skb_to_orig(skb, orig_node, recv_if);
skb               435 net/batman-adv/routing.c 	skb = NULL;
skb               441 net/batman-adv/routing.c 	kfree_skb(skb);
skb               459 net/batman-adv/routing.c 				       struct sk_buff *skb, int hdr_size)
skb               464 net/batman-adv/routing.c 	if (unlikely(!pskb_may_pull(skb, hdr_size)))
skb               467 net/batman-adv/routing.c 	ethhdr = eth_hdr(skb);
skb               675 net/batman-adv/routing.c static int batadv_route_unicast_packet(struct sk_buff *skb,
skb               681 net/batman-adv/routing.c 	struct ethhdr *ethhdr = eth_hdr(skb);
skb               685 net/batman-adv/routing.c 	unicast_packet = (struct batadv_unicast_packet *)skb->data;
skb               701 net/batman-adv/routing.c 	if (skb_cow(skb, ETH_HLEN) < 0)
skb               705 net/batman-adv/routing.c 	unicast_packet = (struct batadv_unicast_packet *)skb->data;
skb               722 net/batman-adv/routing.c 		batadv_skb_set_priority(skb, hdr_len);
skb               724 net/batman-adv/routing.c 	len = skb->len;
skb               725 net/batman-adv/routing.c 	res = batadv_send_skb_to_orig(skb, orig_node, recv_if);
skb               737 net/batman-adv/routing.c 	skb = NULL;
skb               742 net/batman-adv/routing.c 	kfree_skb(skb);
skb               762 net/batman-adv/routing.c batadv_reroute_unicast_packet(struct batadv_priv *bat_priv, struct sk_buff *skb,
skb               791 net/batman-adv/routing.c 	skb_postpull_rcsum(skb, unicast_packet, sizeof(*unicast_packet));
skb               794 net/batman-adv/routing.c 	skb_postpush_rcsum(skb, unicast_packet, sizeof(*unicast_packet));
skb               807 net/batman-adv/routing.c 				      struct sk_buff *skb, int hdr_len)
skb               818 net/batman-adv/routing.c 	if (!pskb_may_pull(skb, hdr_len + ETH_HLEN))
skb               822 net/batman-adv/routing.c 	if (skb_cow(skb, sizeof(*unicast_packet)) < 0)
skb               825 net/batman-adv/routing.c 	unicast_packet = (struct batadv_unicast_packet *)skb->data;
skb               826 net/batman-adv/routing.c 	vid = batadv_get_vid(skb, hdr_len);
skb               827 net/batman-adv/routing.c 	ethhdr = (struct ethhdr *)(skb->data + hdr_len);
skb               835 net/batman-adv/routing.c 		if (batadv_reroute_unicast_packet(bat_priv, skb, unicast_packet,
skb               881 net/batman-adv/routing.c 	if (batadv_reroute_unicast_packet(bat_priv, skb, unicast_packet,
skb               905 net/batman-adv/routing.c 	skb_postpull_rcsum(skb, unicast_packet, sizeof(*unicast_packet));
skb               908 net/batman-adv/routing.c 	skb_postpush_rcsum(skb, unicast_packet, sizeof(*unicast_packet));
skb               924 net/batman-adv/routing.c int batadv_recv_unhandled_unicast_packet(struct sk_buff *skb,
skb               931 net/batman-adv/routing.c 	check = batadv_check_unicast_packet(bat_priv, skb, hdr_size);
skb               936 net/batman-adv/routing.c 	unicast_packet = (struct batadv_unicast_packet *)skb->data;
skb               940 net/batman-adv/routing.c 	return batadv_route_unicast_packet(skb, recv_if);
skb               943 net/batman-adv/routing.c 	kfree_skb(skb);
skb               954 net/batman-adv/routing.c int batadv_recv_unicast_packet(struct sk_buff *skb,
skb               967 net/batman-adv/routing.c 	unicast_packet = (struct batadv_unicast_packet *)skb->data;
skb               974 net/batman-adv/routing.c 	check = batadv_check_unicast_packet(bat_priv, skb, hdr_size);
skb               980 net/batman-adv/routing.c 		batadv_nc_skb_store_sniffed_unicast(bat_priv, skb);
skb               984 net/batman-adv/routing.c 	if (!batadv_check_unicast_ttvn(bat_priv, skb, hdr_size))
skb               987 net/batman-adv/routing.c 	unicast_packet = (struct batadv_unicast_packet *)skb->data;
skb               994 net/batman-adv/routing.c 		orig_addr_gw = eth_hdr(skb)->h_source;
skb               997 net/batman-adv/routing.c 			is_gw = batadv_bla_is_backbone_gw(skb, orig_node_gw,
skb              1010 net/batman-adv/routing.c 				(struct batadv_unicast_4addr_packet *)skb->data;
skb              1027 net/batman-adv/routing.c 		if (batadv_dat_snoop_incoming_arp_request(bat_priv, skb,
skb              1030 net/batman-adv/routing.c 		if (batadv_dat_snoop_incoming_arp_reply(bat_priv, skb,
skb              1034 net/batman-adv/routing.c 		batadv_dat_snoop_incoming_dhcp_ack(bat_priv, skb, hdr_size);
skb              1036 net/batman-adv/routing.c 		batadv_interface_rx(recv_if->soft_iface, skb, hdr_size,
skb              1046 net/batman-adv/routing.c 	ret = batadv_route_unicast_packet(skb, recv_if);
skb              1048 net/batman-adv/routing.c 	skb = NULL;
skb              1051 net/batman-adv/routing.c 	kfree_skb(skb);
skb              1064 net/batman-adv/routing.c int batadv_recv_unicast_tvlv(struct sk_buff *skb,
skb              1074 net/batman-adv/routing.c 	if (batadv_check_unicast_packet(bat_priv, skb, hdr_size) < 0)
skb              1078 net/batman-adv/routing.c 	if (skb_cow(skb, hdr_size) < 0)
skb              1082 net/batman-adv/routing.c 	if (skb_linearize(skb) < 0)
skb              1085 net/batman-adv/routing.c 	unicast_tvlv_packet = (struct batadv_unicast_tvlv_packet *)skb->data;
skb              1087 net/batman-adv/routing.c 	tvlv_buff = (unsigned char *)(skb->data + hdr_size);
skb              1090 net/batman-adv/routing.c 	if (tvlv_buff_len > skb->len - hdr_size)
skb              1099 net/batman-adv/routing.c 		ret = batadv_route_unicast_packet(skb, recv_if);
skb              1101 net/batman-adv/routing.c 		skb = NULL;
skb              1105 net/batman-adv/routing.c 	kfree_skb(skb);
skb              1121 net/batman-adv/routing.c int batadv_recv_frag_packet(struct sk_buff *skb,
skb              1129 net/batman-adv/routing.c 	if (batadv_check_unicast_packet(bat_priv, skb,
skb              1133 net/batman-adv/routing.c 	frag_packet = (struct batadv_frag_packet *)skb->data;
skb              1138 net/batman-adv/routing.c 	skb->priority = frag_packet->priority + 256;
skb              1142 net/batman-adv/routing.c 	    batadv_frag_skb_fwd(skb, recv_if, orig_node_src)) {
skb              1144 net/batman-adv/routing.c 		skb = NULL;
skb              1150 net/batman-adv/routing.c 	batadv_add_counter(bat_priv, BATADV_CNT_FRAG_RX_BYTES, skb->len);
skb              1153 net/batman-adv/routing.c 	if (!batadv_frag_skb_buffer(&skb, orig_node_src))
skb              1159 net/batman-adv/routing.c 	if (skb) {
skb              1160 net/batman-adv/routing.c 		batadv_batman_skb_recv(skb, recv_if->net_dev,
skb              1163 net/batman-adv/routing.c 		skb = NULL;
skb              1171 net/batman-adv/routing.c 	kfree_skb(skb);
skb              1183 net/batman-adv/routing.c int batadv_recv_bcast_packet(struct sk_buff *skb,
skb              1196 net/batman-adv/routing.c 	if (unlikely(!pskb_may_pull(skb, hdr_size)))
skb              1199 net/batman-adv/routing.c 	ethhdr = eth_hdr(skb);
skb              1213 net/batman-adv/routing.c 	bcast_packet = (struct batadv_bcast_packet *)skb->data;
skb              1252 net/batman-adv/routing.c 	if (batadv_bla_check_bcast_duplist(bat_priv, skb))
skb              1255 net/batman-adv/routing.c 	batadv_skb_set_priority(skb, sizeof(struct batadv_bcast_packet));
skb              1258 net/batman-adv/routing.c 	batadv_add_bcast_packet_to_list(bat_priv, skb, 1, false);
skb              1263 net/batman-adv/routing.c 	if (batadv_bla_is_backbone_gw(skb, orig_node, hdr_size))
skb              1266 net/batman-adv/routing.c 	if (batadv_dat_snoop_incoming_arp_request(bat_priv, skb, hdr_size))
skb              1268 net/batman-adv/routing.c 	if (batadv_dat_snoop_incoming_arp_reply(bat_priv, skb, hdr_size))
skb              1271 net/batman-adv/routing.c 	batadv_dat_snoop_incoming_dhcp_ack(bat_priv, skb, hdr_size);
skb              1274 net/batman-adv/routing.c 	batadv_interface_rx(recv_if->soft_iface, skb, hdr_size, orig_node);
skb              1283 net/batman-adv/routing.c 	kfree_skb(skb);
skb                15 net/batman-adv/routing.h bool batadv_check_management_packet(struct sk_buff *skb,
skb                22 net/batman-adv/routing.h int batadv_recv_icmp_packet(struct sk_buff *skb,
skb                24 net/batman-adv/routing.h int batadv_recv_unicast_packet(struct sk_buff *skb,
skb                26 net/batman-adv/routing.h int batadv_recv_frag_packet(struct sk_buff *skb,
skb                28 net/batman-adv/routing.h int batadv_recv_bcast_packet(struct sk_buff *skb,
skb                30 net/batman-adv/routing.h int batadv_recv_tt_query(struct sk_buff *skb,
skb                32 net/batman-adv/routing.h int batadv_recv_roam_adv(struct sk_buff *skb,
skb                34 net/batman-adv/routing.h int batadv_recv_unicast_tvlv(struct sk_buff *skb,
skb                36 net/batman-adv/routing.h int batadv_recv_unhandled_unicast_packet(struct sk_buff *skb,
skb                63 net/batman-adv/send.c int batadv_send_skb_packet(struct sk_buff *skb,
skb                86 net/batman-adv/send.c 	if (batadv_skb_head_push(skb, ETH_HLEN) < 0)
skb                89 net/batman-adv/send.c 	skb_reset_mac_header(skb);
skb                91 net/batman-adv/send.c 	ethhdr = eth_hdr(skb);
skb                96 net/batman-adv/send.c 	skb_set_network_header(skb, ETH_HLEN);
skb                97 net/batman-adv/send.c 	skb->protocol = htons(ETH_P_BATMAN);
skb                99 net/batman-adv/send.c 	skb->dev = hard_iface->net_dev;
skb               102 net/batman-adv/send.c 	batadv_nc_skb_store_for_decoding(bat_priv, skb);
skb               108 net/batman-adv/send.c 	ret = dev_queue_xmit(skb);
skb               111 net/batman-adv/send.c 	kfree_skb(skb);
skb               124 net/batman-adv/send.c int batadv_send_broadcast_skb(struct sk_buff *skb,
skb               127 net/batman-adv/send.c 	return batadv_send_skb_packet(skb, hard_iface, batadv_broadcast_addr);
skb               139 net/batman-adv/send.c int batadv_send_unicast_skb(struct sk_buff *skb,
skb               147 net/batman-adv/send.c 	ret = batadv_send_skb_packet(skb, neigh->if_incoming, neigh->addr);
skb               177 net/batman-adv/send.c int batadv_send_skb_to_orig(struct sk_buff *skb,
skb               196 net/batman-adv/send.c 	    skb->len > neigh_node->if_incoming->net_dev->mtu) {
skb               198 net/batman-adv/send.c 		ret = batadv_frag_send_packet(skb, orig_node, neigh_node);
skb               200 net/batman-adv/send.c 		skb = NULL;
skb               209 net/batman-adv/send.c 	if (recv_if && batadv_nc_skb_forward(skb, neigh_node))
skb               212 net/batman-adv/send.c 		ret = batadv_send_unicast_skb(skb, neigh_node);
skb               215 net/batman-adv/send.c 	skb = NULL;
skb               220 net/batman-adv/send.c 	kfree_skb(skb);
skb               235 net/batman-adv/send.c batadv_send_skb_push_fill_unicast(struct sk_buff *skb, int hdr_size,
skb               241 net/batman-adv/send.c 	if (batadv_skb_head_push(skb, hdr_size) < 0)
skb               244 net/batman-adv/send.c 	unicast_packet = (struct batadv_unicast_packet *)skb->data;
skb               265 net/batman-adv/send.c static bool batadv_send_skb_prepare_unicast(struct sk_buff *skb,
skb               270 net/batman-adv/send.c 	return batadv_send_skb_push_fill_unicast(skb, uni_size, orig_node);
skb               284 net/batman-adv/send.c 					   struct sk_buff *skb,
skb               300 net/batman-adv/send.c 	if (!batadv_send_skb_push_fill_unicast(skb, sizeof(*uc_4addr_packet),
skb               304 net/batman-adv/send.c 	uc_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
skb               334 net/batman-adv/send.c 			    struct sk_buff *skb, int packet_type,
skb               348 net/batman-adv/send.c 		if (!batadv_send_skb_prepare_unicast(skb, orig_node))
skb               352 net/batman-adv/send.c 		if (!batadv_send_skb_prepare_unicast_4addr(bat_priv, skb,
skb               367 net/batman-adv/send.c 	ethhdr = eth_hdr(skb);
skb               368 net/batman-adv/send.c 	unicast_packet = (struct batadv_unicast_packet *)skb->data;
skb               378 net/batman-adv/send.c 	ret = batadv_send_skb_to_orig(skb, orig_node, NULL);
skb               380 net/batman-adv/send.c 	skb = NULL;
skb               383 net/batman-adv/send.c 	kfree_skb(skb);
skb               406 net/batman-adv/send.c 				   struct sk_buff *skb, int packet_type,
skb               410 net/batman-adv/send.c 	struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
skb               425 net/batman-adv/send.c 	ret = batadv_send_skb_unicast(bat_priv, skb, packet_type,
skb               445 net/batman-adv/send.c int batadv_send_skb_via_gw(struct batadv_priv *bat_priv, struct sk_buff *skb,
skb               452 net/batman-adv/send.c 	ret = batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST_4ADDR,
skb               473 net/batman-adv/send.c 		kfree_skb(forw_packet->skb);
skb               475 net/batman-adv/send.c 		consume_skb(forw_packet->skb);
skb               505 net/batman-adv/send.c 			 struct sk_buff *skb)
skb               537 net/batman-adv/send.c 	forw_packet->skb = skb;
skb               755 net/batman-adv/send.c 				    const struct sk_buff *skb,
skb               768 net/batman-adv/send.c 	newskb = skb_copy(skb, GFP_ATOMIC);
skb               824 net/batman-adv/send.c 	return BATADV_SKB_CB(forw_packet->skb)->num_bcasts < max;
skb               835 net/batman-adv/send.c 	BATADV_SKB_CB(forw_packet->skb)->num_bcasts++;
skb               846 net/batman-adv/send.c 	return BATADV_SKB_CB(forw_packet->skb)->num_bcasts > 0;
skb               881 net/batman-adv/send.c 	bcast_packet = (struct batadv_bcast_packet *)forw_packet->skb->data;
skb               895 net/batman-adv/send.c 			neigh_addr = eth_hdr(forw_packet->skb)->h_source;
skb               939 net/batman-adv/send.c 		skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC);
skb                25 net/batman-adv/send.h 			 struct sk_buff *skb);
skb                32 net/batman-adv/send.h int batadv_send_skb_to_orig(struct sk_buff *skb,
skb                35 net/batman-adv/send.h int batadv_send_skb_packet(struct sk_buff *skb,
skb                38 net/batman-adv/send.h int batadv_send_broadcast_skb(struct sk_buff *skb,
skb                40 net/batman-adv/send.h int batadv_send_unicast_skb(struct sk_buff *skb,
skb                43 net/batman-adv/send.h 				    const struct sk_buff *skb,
skb                50 net/batman-adv/send.h 					   struct sk_buff *skb,
skb                54 net/batman-adv/send.h 			    struct sk_buff *skb, int packet_type,
skb                59 net/batman-adv/send.h 				   struct sk_buff *skb, int packet_type,
skb                62 net/batman-adv/send.h int batadv_send_skb_via_gw(struct batadv_priv *bat_priv, struct sk_buff *skb,
skb                79 net/batman-adv/send.h 					 struct sk_buff *skb, u8 *dst_hint,
skb                82 net/batman-adv/send.h 	return batadv_send_skb_via_tt_generic(bat_priv, skb, BATADV_UNICAST, 0,
skb               102 net/batman-adv/send.h 					       struct sk_buff *skb,
skb               107 net/batman-adv/send.h 	return batadv_send_skb_via_tt_generic(bat_priv, skb,
skb                64 net/batman-adv/soft-interface.c int batadv_skb_head_push(struct sk_buff *skb, unsigned int len)
skb                75 net/batman-adv/soft-interface.c 	result = skb_cow_head(skb, len);
skb                79 net/batman-adv/soft-interface.c 	skb_push(skb, len);
skb               180 net/batman-adv/soft-interface.c static netdev_tx_t batadv_interface_tx(struct sk_buff *skb,
skb               195 net/batman-adv/soft-interface.c 	int data_len = skb->len, ret;
skb               210 net/batman-adv/soft-interface.c 	memset(skb->cb, 0, sizeof(struct batadv_skb_cb));
skb               213 net/batman-adv/soft-interface.c 	vid = batadv_get_vid(skb, 0);
skb               215 net/batman-adv/soft-interface.c 	skb_reset_mac_header(skb);
skb               216 net/batman-adv/soft-interface.c 	ethhdr = eth_hdr(skb);
skb               222 net/batman-adv/soft-interface.c 		if (!pskb_may_pull(skb, sizeof(*vhdr)))
skb               224 net/batman-adv/soft-interface.c 		vhdr = vlan_eth_hdr(skb);
skb               238 net/batman-adv/soft-interface.c 	skb_set_network_header(skb, network_offset);
skb               240 net/batman-adv/soft-interface.c 	if (batadv_bla_tx(bat_priv, skb, vid))
skb               244 net/batman-adv/soft-interface.c 	ethhdr = eth_hdr(skb);
skb               250 net/batman-adv/soft-interface.c 						   vid, skb->skb_iif,
skb               251 net/batman-adv/soft-interface.c 						   skb->mark);
skb               257 net/batman-adv/soft-interface.c 	batadv_dat_snoop_outgoing_dhcp_ack(bat_priv, skb, proto, vid);
skb               279 net/batman-adv/soft-interface.c 		dhcp_rcp = batadv_gw_dhcp_recipient_get(skb, &header_len,
skb               284 net/batman-adv/soft-interface.c 		ethhdr = eth_hdr(skb);
skb               304 net/batman-adv/soft-interface.c 			forw_mode = batadv_mcast_forw_mode(bat_priv, skb,
skb               315 net/batman-adv/soft-interface.c 	batadv_skb_set_priority(skb, 0);
skb               327 net/batman-adv/soft-interface.c 		if (batadv_dat_snoop_outgoing_arp_request(bat_priv, skb))
skb               330 net/batman-adv/soft-interface.c 		if (batadv_skb_head_push(skb, sizeof(*bcast_packet)) < 0)
skb               333 net/batman-adv/soft-interface.c 		bcast_packet = (struct batadv_bcast_packet *)skb->data;
skb               351 net/batman-adv/soft-interface.c 		batadv_add_bcast_packet_to_list(bat_priv, skb, brd_delay, true);
skb               356 net/batman-adv/soft-interface.c 		consume_skb(skb);
skb               362 net/batman-adv/soft-interface.c 			ret = batadv_gw_out_of_range(bat_priv, skb);
skb               365 net/batman-adv/soft-interface.c 			ret = batadv_send_skb_via_gw(bat_priv, skb, vid);
skb               367 net/batman-adv/soft-interface.c 			ret = batadv_send_skb_unicast(bat_priv, skb,
skb               371 net/batman-adv/soft-interface.c 			ret = batadv_mcast_forw_send(bat_priv, skb, vid);
skb               374 net/batman-adv/soft-interface.c 								  skb))
skb               377 net/batman-adv/soft-interface.c 			batadv_dat_snoop_outgoing_arp_reply(bat_priv, skb);
skb               379 net/batman-adv/soft-interface.c 			ret = batadv_send_skb_via_tt(bat_priv, skb, dst_hint,
skb               391 net/batman-adv/soft-interface.c 	kfree_skb(skb);
skb               420 net/batman-adv/soft-interface.c 			 struct sk_buff *skb, int hdr_size,
skb               430 net/batman-adv/soft-interface.c 	batadv_bcast_packet = (struct batadv_bcast_packet *)skb->data;
skb               433 net/batman-adv/soft-interface.c 	skb_pull_rcsum(skb, hdr_size);
skb               434 net/batman-adv/soft-interface.c 	skb_reset_mac_header(skb);
skb               439 net/batman-adv/soft-interface.c 	nf_reset_ct(skb);
skb               441 net/batman-adv/soft-interface.c 	if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
skb               444 net/batman-adv/soft-interface.c 	vid = batadv_get_vid(skb, 0);
skb               445 net/batman-adv/soft-interface.c 	ethhdr = eth_hdr(skb);
skb               449 net/batman-adv/soft-interface.c 		if (!pskb_may_pull(skb, VLAN_ETH_HLEN))
skb               452 net/batman-adv/soft-interface.c 		vhdr = (struct vlan_ethhdr *)skb->data;
skb               464 net/batman-adv/soft-interface.c 	skb->protocol = eth_type_trans(skb, soft_iface);
skb               465 net/batman-adv/soft-interface.c 	skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
skb               469 net/batman-adv/soft-interface.c 			   skb->len + ETH_HLEN);
skb               474 net/batman-adv/soft-interface.c 	if (batadv_bla_rx(bat_priv, skb, vid, is_bcast))
skb               491 net/batman-adv/soft-interface.c 			skb->mark &= ~bat_priv->isolation_mark_mask;
skb               492 net/batman-adv/soft-interface.c 			skb->mark |= bat_priv->isolation_mark;
skb               499 net/batman-adv/soft-interface.c 	netif_rx(skb);
skb               503 net/batman-adv/soft-interface.c 	kfree_skb(skb);
skb                18 net/batman-adv/soft-interface.h int batadv_skb_head_push(struct sk_buff *skb, unsigned int len);
skb                20 net/batman-adv/soft-interface.h 			 struct sk_buff *skb, int hdr_size,
skb               579 net/batman-adv/tp_meter.c 	struct sk_buff *skb;
skb               584 net/batman-adv/tp_meter.c 	skb = netdev_alloc_skb_ip_align(NULL, len + ETH_HLEN);
skb               585 net/batman-adv/tp_meter.c 	if (unlikely(!skb))
skb               588 net/batman-adv/tp_meter.c 	skb_reserve(skb, ETH_HLEN);
skb               589 net/batman-adv/tp_meter.c 	icmp = skb_put(skb, sizeof(*icmp));
skb               606 net/batman-adv/tp_meter.c 	data = skb_put(skb, data_len);
skb               609 net/batman-adv/tp_meter.c 	r = batadv_send_skb_to_orig(skb, orig_node, NULL);
skb               624 net/batman-adv/tp_meter.c 			       const struct sk_buff *skb)
skb               638 net/batman-adv/tp_meter.c 	icmp = (struct batadv_icmp_tp_packet *)skb->data;
skb              1162 net/batman-adv/tp_meter.c 	struct sk_buff *skb;
skb              1177 net/batman-adv/tp_meter.c 	skb = netdev_alloc_skb_ip_align(NULL, sizeof(*icmp) + ETH_HLEN);
skb              1178 net/batman-adv/tp_meter.c 	if (unlikely(!skb)) {
skb              1183 net/batman-adv/tp_meter.c 	skb_reserve(skb, ETH_HLEN);
skb              1184 net/batman-adv/tp_meter.c 	icmp = skb_put(skb, sizeof(*icmp));
skb              1199 net/batman-adv/tp_meter.c 	r = batadv_send_skb_to_orig(skb, orig_node, NULL);
skb              1227 net/batman-adv/tp_meter.c 					  const struct sk_buff *skb)
skb              1238 net/batman-adv/tp_meter.c 	icmp = (struct batadv_icmp_tp_packet *)skb->data;
skb              1241 net/batman-adv/tp_meter.c 	payload_len = skb->len - sizeof(struct batadv_unicast_packet);
skb              1385 net/batman-adv/tp_meter.c 			       const struct sk_buff *skb)
skb              1392 net/batman-adv/tp_meter.c 	icmp = (struct batadv_icmp_tp_packet *)skb->data;
skb              1436 net/batman-adv/tp_meter.c 		if (!batadv_tp_handle_out_of_order(tp_vars, skb))
skb              1444 net/batman-adv/tp_meter.c 	packet_size = skb->len - sizeof(struct batadv_unicast_packet);
skb              1467 net/batman-adv/tp_meter.c void batadv_tp_meter_recv(struct batadv_priv *bat_priv, struct sk_buff *skb)
skb              1471 net/batman-adv/tp_meter.c 	icmp = (struct batadv_icmp_tp_packet *)skb->data;
skb              1475 net/batman-adv/tp_meter.c 		batadv_tp_recv_msg(bat_priv, skb);
skb              1478 net/batman-adv/tp_meter.c 		batadv_tp_recv_ack(bat_priv, skb);
skb              1485 net/batman-adv/tp_meter.c 	consume_skb(skb);
skb                20 net/batman-adv/tp_meter.h void batadv_tp_meter_recv(struct batadv_priv *bat_priv, struct sk_buff *skb);
skb              1253 net/batman-adv/translation-table.c 	struct net *net = sock_net(cb->skb->sk);
skb              1262 net/batman-adv/translation-table.c 	int portid = NETLINK_CB(cb->skb).portid;
skb              2169 net/batman-adv/translation-table.c 	struct net *net = sock_net(cb->skb->sk);
skb              2180 net/batman-adv/translation-table.c 	int portid = NETLINK_CB(cb->skb).portid;
skb               592 net/batman-adv/tvlv.c 	struct sk_buff *skb;
skb               603 net/batman-adv/tvlv.c 	skb = netdev_alloc_skb_ip_align(NULL, ETH_HLEN + hdr_len + tvlv_len);
skb               604 net/batman-adv/tvlv.c 	if (!skb)
skb               607 net/batman-adv/tvlv.c 	skb->priority = TC_PRIO_CONTROL;
skb               608 net/batman-adv/tvlv.c 	skb_reserve(skb, ETH_HLEN);
skb               609 net/batman-adv/tvlv.c 	tvlv_buff = skb_put(skb, sizeof(*unicast_tvlv_packet) + tvlv_len);
skb               628 net/batman-adv/tvlv.c 	batadv_send_skb_to_orig(skb, orig_node, NULL);
skb               318 net/batman-adv/types.h 	struct sk_buff *skb;
skb              2105 net/batman-adv/types.h 	struct sk_buff *skb;
skb              2149 net/batman-adv/types.h 	struct sk_buff *skb;
skb                36 net/bluetooth/6lowpan.c #define lowpan_cb(skb) ((struct skb_cb *)((skb)->cb))
skb               161 net/bluetooth/6lowpan.c 						  struct sk_buff *skb)
skb               163 net/bluetooth/6lowpan.c 	struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
skb               172 net/bluetooth/6lowpan.c 		if (ipv6_addr_any(&lowpan_cb(skb)->gw)) {
skb               180 net/bluetooth/6lowpan.c 			nexthop = &lowpan_cb(skb)->gw;
skb               189 net/bluetooth/6lowpan.c 		memcpy(&lowpan_cb(skb)->gw, nexthop, sizeof(struct in6_addr));
skb               263 net/bluetooth/6lowpan.c static int give_skb_to_upper(struct sk_buff *skb, struct net_device *dev)
skb               267 net/bluetooth/6lowpan.c 	skb_cp = skb_copy(skb, GFP_ATOMIC);
skb               274 net/bluetooth/6lowpan.c static int iphc_decompress(struct sk_buff *skb, struct net_device *netdev,
skb               281 net/bluetooth/6lowpan.c 	return lowpan_header_decompress(skb, netdev, netdev->dev_addr, saddr);
skb               284 net/bluetooth/6lowpan.c static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
skb               293 net/bluetooth/6lowpan.c 	if (dev->type != ARPHRD_6LOWPAN || !skb->len)
skb               296 net/bluetooth/6lowpan.c 	skb_reset_network_header(skb);
skb               298 net/bluetooth/6lowpan.c 	skb = skb_share_check(skb, GFP_ATOMIC);
skb               299 net/bluetooth/6lowpan.c 	if (!skb)
skb               303 net/bluetooth/6lowpan.c 	if (lowpan_is_ipv6(*skb_network_header(skb))) {
skb               305 net/bluetooth/6lowpan.c 		skb_pull(skb, 1);
skb               310 net/bluetooth/6lowpan.c 		local_skb = skb_copy_expand(skb, NET_SKB_PAD - 1,
skb               311 net/bluetooth/6lowpan.c 					    skb_tailroom(skb), GFP_ATOMIC);
skb               326 net/bluetooth/6lowpan.c 		dev->stats.rx_bytes += skb->len;
skb               330 net/bluetooth/6lowpan.c 		consume_skb(skb);
skb               331 net/bluetooth/6lowpan.c 	} else if (lowpan_is_iphc(*skb_network_header(skb))) {
skb               332 net/bluetooth/6lowpan.c 		local_skb = skb_clone(skb, GFP_ATOMIC);
skb               354 net/bluetooth/6lowpan.c 		dev->stats.rx_bytes += skb->len;
skb               358 net/bluetooth/6lowpan.c 		consume_skb(skb);
skb               372 net/bluetooth/6lowpan.c static int chan_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
skb               386 net/bluetooth/6lowpan.c 	err = recv_pkt(skb, dev->netdev, peer);
skb               395 net/bluetooth/6lowpan.c static int setup_header(struct sk_buff *skb, struct net_device *netdev,
skb               405 net/bluetooth/6lowpan.c 	hdr = ipv6_hdr(skb);
skb               412 net/bluetooth/6lowpan.c 		lowpan_cb(skb)->chan = NULL;
skb               422 net/bluetooth/6lowpan.c 		peer = peer_lookup_dst(dev, &ipv6_daddr, skb);
skb               431 net/bluetooth/6lowpan.c 		lowpan_cb(skb)->chan = peer->chan;
skb               436 net/bluetooth/6lowpan.c 	lowpan_header_compress(skb, netdev, daddr, dev->netdev->dev_addr);
skb               438 net/bluetooth/6lowpan.c 	err = dev_hard_header(skb, netdev, ETH_P_IPV6, NULL, NULL, 0);
skb               445 net/bluetooth/6lowpan.c static int header_create(struct sk_buff *skb, struct net_device *netdev,
skb               456 net/bluetooth/6lowpan.c static int send_pkt(struct l2cap_chan *chan, struct sk_buff *skb,
skb               466 net/bluetooth/6lowpan.c 	chan->data = skb;
skb               468 net/bluetooth/6lowpan.c 	iv.iov_base = skb->data;
skb               469 net/bluetooth/6lowpan.c 	iv.iov_len = skb->len;
skb               472 net/bluetooth/6lowpan.c 	iov_iter_kvec(&msg.msg_iter, WRITE, &iv, 1, skb->len);
skb               474 net/bluetooth/6lowpan.c 	err = l2cap_chan_send(chan, &msg, skb->len);
skb               487 net/bluetooth/6lowpan.c static int send_mcast_pkt(struct sk_buff *skb, struct net_device *netdev)
skb               507 net/bluetooth/6lowpan.c 			local_skb = skb_clone(skb, GFP_ATOMIC);
skb               526 net/bluetooth/6lowpan.c static netdev_tx_t bt_xmit(struct sk_buff *skb, struct net_device *netdev)
skb               535 net/bluetooth/6lowpan.c 	skb = skb_unshare(skb, GFP_ATOMIC);
skb               536 net/bluetooth/6lowpan.c 	if (!skb)
skb               544 net/bluetooth/6lowpan.c 	err = setup_header(skb, netdev, &addr, &addr_type);
skb               546 net/bluetooth/6lowpan.c 		kfree_skb(skb);
skb               551 net/bluetooth/6lowpan.c 		if (lowpan_cb(skb)->chan) {
skb               554 net/bluetooth/6lowpan.c 			       &lowpan_cb(skb)->addr, lowpan_cb(skb)->chan);
skb               555 net/bluetooth/6lowpan.c 			err = send_pkt(lowpan_cb(skb)->chan, skb, netdev);
skb               563 net/bluetooth/6lowpan.c 		err = send_mcast_pkt(skb, netdev);
skb               566 net/bluetooth/6lowpan.c 	dev_kfree_skb(skb);
skb               115 net/bluetooth/a2mp.c static int a2mp_command_rej(struct amp_mgr *mgr, struct sk_buff *skb,
skb               118 net/bluetooth/a2mp.c 	struct a2mp_cmd_rej *rej = (void *) skb->data;
skb               125 net/bluetooth/a2mp.c 	skb_pull(skb, sizeof(*rej));
skb               130 net/bluetooth/a2mp.c static int a2mp_discover_req(struct amp_mgr *mgr, struct sk_buff *skb,
skb               133 net/bluetooth/a2mp.c 	struct a2mp_discov_req *req = (void *) skb->data;
skb               143 net/bluetooth/a2mp.c 	skb_pull(skb, sizeof(*req));
skb               154 net/bluetooth/a2mp.c 		ext_feat = get_unaligned_le16(skb->data);
skb               157 net/bluetooth/a2mp.c 		skb_pull(skb, sizeof(ext_feat));
skb               190 net/bluetooth/a2mp.c static int a2mp_discover_rsp(struct amp_mgr *mgr, struct sk_buff *skb,
skb               193 net/bluetooth/a2mp.c 	struct a2mp_discov_rsp *rsp = (void *) skb->data;
skb               203 net/bluetooth/a2mp.c 	skb_pull(skb, sizeof(*rsp));
skb               214 net/bluetooth/a2mp.c 		ext_feat = get_unaligned_le16(skb->data);
skb               217 net/bluetooth/a2mp.c 		skb_pull(skb, sizeof(ext_feat));
skb               220 net/bluetooth/a2mp.c 	cl = (void *) skb->data;
skb               235 net/bluetooth/a2mp.c 		cl = skb_pull(skb, sizeof(*cl));
skb               267 net/bluetooth/a2mp.c static int a2mp_change_notify(struct amp_mgr *mgr, struct sk_buff *skb,
skb               270 net/bluetooth/a2mp.c 	struct a2mp_cl *cl = (void *) skb->data;
skb               272 net/bluetooth/a2mp.c 	while (skb->len >= sizeof(*cl)) {
skb               275 net/bluetooth/a2mp.c 		cl = skb_pull(skb, sizeof(*cl));
skb               291 net/bluetooth/a2mp.c static int a2mp_getinfo_req(struct amp_mgr *mgr, struct sk_buff *skb,
skb               294 net/bluetooth/a2mp.c 	struct a2mp_info_req *req  = (void *) skb->data;
skb               328 net/bluetooth/a2mp.c 	skb_pull(skb, sizeof(*req));
skb               332 net/bluetooth/a2mp.c static int a2mp_getinfo_rsp(struct amp_mgr *mgr, struct sk_buff *skb,
skb               335 net/bluetooth/a2mp.c 	struct a2mp_info_rsp *rsp = (struct a2mp_info_rsp *) skb->data;
skb               355 net/bluetooth/a2mp.c 	skb_pull(skb, sizeof(*rsp));
skb               359 net/bluetooth/a2mp.c static int a2mp_getampassoc_req(struct amp_mgr *mgr, struct sk_buff *skb,
skb               362 net/bluetooth/a2mp.c 	struct a2mp_amp_assoc_req *req = (void *) skb->data;
skb               398 net/bluetooth/a2mp.c 	skb_pull(skb, sizeof(*req));
skb               402 net/bluetooth/a2mp.c static int a2mp_getampassoc_rsp(struct amp_mgr *mgr, struct sk_buff *skb,
skb               405 net/bluetooth/a2mp.c 	struct a2mp_amp_assoc_rsp *rsp = (void *) skb->data;
skb               459 net/bluetooth/a2mp.c 	skb_pull(skb, len);
skb               463 net/bluetooth/a2mp.c static int a2mp_createphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb,
skb               466 net/bluetooth/a2mp.c 	struct a2mp_physlink_req *req = (void *) skb->data;
skb               539 net/bluetooth/a2mp.c 	skb_pull(skb, le16_to_cpu(hdr->len));
skb               543 net/bluetooth/a2mp.c static int a2mp_discphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb,
skb               546 net/bluetooth/a2mp.c 	struct a2mp_physlink_req *req = (void *) skb->data;
skb               582 net/bluetooth/a2mp.c 	skb_pull(skb, sizeof(*req));
skb               586 net/bluetooth/a2mp.c static inline int a2mp_cmd_rsp(struct amp_mgr *mgr, struct sk_buff *skb,
skb               591 net/bluetooth/a2mp.c 	skb_pull(skb, le16_to_cpu(hdr->len));
skb               596 net/bluetooth/a2mp.c static int a2mp_chan_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
skb               604 net/bluetooth/a2mp.c 	while (skb->len >= sizeof(*hdr)) {
skb               607 net/bluetooth/a2mp.c 		hdr = (void *) skb->data;
skb               612 net/bluetooth/a2mp.c 		skb_pull(skb, sizeof(*hdr));
skb               614 net/bluetooth/a2mp.c 		if (len > skb->len || !hdr->ident) {
skb               623 net/bluetooth/a2mp.c 			a2mp_command_rej(mgr, skb, hdr);
skb               627 net/bluetooth/a2mp.c 			err = a2mp_discover_req(mgr, skb, hdr);
skb               631 net/bluetooth/a2mp.c 			err = a2mp_change_notify(mgr, skb, hdr);
skb               635 net/bluetooth/a2mp.c 			err = a2mp_getinfo_req(mgr, skb, hdr);
skb               639 net/bluetooth/a2mp.c 			err = a2mp_getampassoc_req(mgr, skb, hdr);
skb               643 net/bluetooth/a2mp.c 			err = a2mp_createphyslink_req(mgr, skb, hdr);
skb               647 net/bluetooth/a2mp.c 			err = a2mp_discphyslink_req(mgr, skb, hdr);
skb               651 net/bluetooth/a2mp.c 			err = a2mp_discover_rsp(mgr, skb, hdr);
skb               655 net/bluetooth/a2mp.c 			err = a2mp_getinfo_rsp(mgr, skb, hdr);
skb               659 net/bluetooth/a2mp.c 			err = a2mp_getampassoc_rsp(mgr, skb, hdr);
skb               665 net/bluetooth/a2mp.c 			err = a2mp_cmd_rsp(mgr, skb, hdr);
skb               679 net/bluetooth/a2mp.c 		hdr = (void *) skb->data;
skb               689 net/bluetooth/a2mp.c 	kfree_skb(skb);
skb               725 net/bluetooth/a2mp.c 	struct sk_buff *skb;
skb               727 net/bluetooth/a2mp.c 	skb = bt_skb_alloc(hdr_len + len, GFP_KERNEL);
skb               728 net/bluetooth/a2mp.c 	if (!skb)
skb               731 net/bluetooth/a2mp.c 	return skb;
skb               872 net/bluetooth/a2mp.c 				       struct sk_buff *skb)
skb               130 net/bluetooth/a2mp.h 				       struct sk_buff *skb);
skb               139 net/bluetooth/a2mp.h 						     struct sk_buff *skb)
skb               256 net/bluetooth/af_bluetooth.c 	struct sk_buff *skb;
skb               266 net/bluetooth/af_bluetooth.c 	skb = skb_recv_datagram(sk, flags, noblock, &err);
skb               267 net/bluetooth/af_bluetooth.c 	if (!skb) {
skb               274 net/bluetooth/af_bluetooth.c 	skblen = skb->len;
skb               275 net/bluetooth/af_bluetooth.c 	copied = skb->len;
skb               281 net/bluetooth/af_bluetooth.c 	skb_reset_transport_header(skb);
skb               282 net/bluetooth/af_bluetooth.c 	err = skb_copy_datagram_msg(skb, 0, msg, copied);
skb               284 net/bluetooth/af_bluetooth.c 		sock_recv_ts_and_drops(msg, sk, skb);
skb               287 net/bluetooth/af_bluetooth.c 			bt_sk(sk)->skb_msg_name(skb, msg->msg_name,
skb               291 net/bluetooth/af_bluetooth.c 	skb_free_datagram(sk, skb);
skb               348 net/bluetooth/af_bluetooth.c 		struct sk_buff *skb;
skb               351 net/bluetooth/af_bluetooth.c 		skb = skb_dequeue(&sk->sk_receive_queue);
skb               352 net/bluetooth/af_bluetooth.c 		if (!skb) {
skb               375 net/bluetooth/af_bluetooth.c 		chunk = min_t(unsigned int, skb->len, size);
skb               376 net/bluetooth/af_bluetooth.c 		if (skb_copy_datagram_msg(skb, 0, msg, chunk)) {
skb               377 net/bluetooth/af_bluetooth.c 			skb_queue_head(&sk->sk_receive_queue, skb);
skb               385 net/bluetooth/af_bluetooth.c 		sock_recv_ts_and_drops(msg, sk, skb);
skb               388 net/bluetooth/af_bluetooth.c 			int skb_len = skb_headlen(skb);
skb               391 net/bluetooth/af_bluetooth.c 				__skb_pull(skb, chunk);
skb               395 net/bluetooth/af_bluetooth.c 				__skb_pull(skb, skb_len);
skb               398 net/bluetooth/af_bluetooth.c 				skb_walk_frags(skb, frag) {
skb               401 net/bluetooth/af_bluetooth.c 						skb->len -= chunk;
skb               402 net/bluetooth/af_bluetooth.c 						skb->data_len -= chunk;
skb               408 net/bluetooth/af_bluetooth.c 						skb->len -= frag->len;
skb               409 net/bluetooth/af_bluetooth.c 						skb->data_len -= frag->len;
skb               415 net/bluetooth/af_bluetooth.c 			if (skb->len) {
skb               416 net/bluetooth/af_bluetooth.c 				skb_queue_head(&sk->sk_receive_queue, skb);
skb               419 net/bluetooth/af_bluetooth.c 			kfree_skb(skb);
skb               423 net/bluetooth/af_bluetooth.c 			skb_queue_head(&sk->sk_receive_queue, skb);
skb               496 net/bluetooth/af_bluetooth.c 	struct sk_buff *skb;
skb               518 net/bluetooth/af_bluetooth.c 		skb = skb_peek(&sk->sk_receive_queue);
skb               519 net/bluetooth/af_bluetooth.c 		amount = skb ? skb->len : 0;
skb               217 net/bluetooth/amp.c 					  u16 opcode, struct sk_buff *skb)
skb               219 net/bluetooth/amp.c 	struct hci_rp_read_local_amp_assoc *rp = (void *)skb->data;
skb               228 net/bluetooth/amp.c 	frag_len = skb->len - sizeof(*rp);
skb               315 net/bluetooth/amp.c 					    u16 opcode, struct sk_buff *skb)
skb               317 net/bluetooth/amp.c 	struct hci_rp_write_remote_amp_assoc *rp = (void *)skb->data;
skb               257 net/bluetooth/bnep/core.c static int bnep_rx_extension(struct bnep_session *s, struct sk_buff *skb)
skb               263 net/bluetooth/bnep/core.c 		h = (void *) skb->data;
skb               264 net/bluetooth/bnep/core.c 		if (!skb_pull(skb, sizeof(*h))) {
skb               273 net/bluetooth/bnep/core.c 			bnep_rx_control(s, skb->data, skb->len);
skb               281 net/bluetooth/bnep/core.c 		if (!skb_pull(skb, h->len)) {
skb               298 net/bluetooth/bnep/core.c static int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb)
skb               304 net/bluetooth/bnep/core.c 	dev->stats.rx_bytes += skb->len;
skb               306 net/bluetooth/bnep/core.c 	type = *(u8 *) skb->data;
skb               307 net/bluetooth/bnep/core.c 	skb_pull(skb, 1);
skb               308 net/bluetooth/bnep/core.c 	ctrl_type = *(u8 *)skb->data;
skb               314 net/bluetooth/bnep/core.c 		if (bnep_rx_control(s, skb->data, skb->len) < 0) {
skb               316 net/bluetooth/bnep/core.c 			kfree_skb(skb);
skb               321 net/bluetooth/bnep/core.c 			kfree_skb(skb);
skb               329 net/bluetooth/bnep/core.c 			if (!skb_pull(skb, 2 + *(u8 *)(skb->data + 1) * 2))
skb               335 net/bluetooth/bnep/core.c 			if (!skb_pull(skb, 3 + *(u16 *)(skb->data + 1) * 2))
skb               339 net/bluetooth/bnep/core.c 			kfree_skb(skb);
skb               343 net/bluetooth/bnep/core.c 		skb_reset_mac_header(skb);
skb               346 net/bluetooth/bnep/core.c 		if (!skb_pull(skb, __bnep_rx_hlen[type & BNEP_TYPE_MASK]))
skb               349 net/bluetooth/bnep/core.c 		s->eh.h_proto = get_unaligned((__be16 *) (skb->data - 2));
skb               353 net/bluetooth/bnep/core.c 		if (bnep_rx_extension(s, skb) < 0)
skb               359 net/bluetooth/bnep/core.c 		if (!skb_pull(skb, 4))
skb               361 net/bluetooth/bnep/core.c 		s->eh.h_proto = get_unaligned((__be16 *) (skb->data - 2));
skb               366 net/bluetooth/bnep/core.c 	nskb = alloc_skb(2 + ETH_HLEN + skb->len, GFP_KERNEL);
skb               369 net/bluetooth/bnep/core.c 		kfree_skb(skb);
skb               382 net/bluetooth/bnep/core.c 		__skb_put_data(nskb, skb_mac_header(skb), ETH_ALEN);
skb               387 net/bluetooth/bnep/core.c 		__skb_put_data(nskb, skb_mac_header(skb), ETH_ALEN);
skb               392 net/bluetooth/bnep/core.c 		__skb_put_data(nskb, skb_mac_header(skb), ETH_ALEN * 2);
skb               397 net/bluetooth/bnep/core.c 	skb_copy_from_linear_data(skb, __skb_put(nskb, skb->len), skb->len);
skb               398 net/bluetooth/bnep/core.c 	kfree_skb(skb);
skb               408 net/bluetooth/bnep/core.c 	kfree_skb(skb);
skb               419 net/bluetooth/bnep/core.c static int bnep_tx_frame(struct bnep_session *s, struct sk_buff *skb)
skb               421 net/bluetooth/bnep/core.c 	struct ethhdr *eh = (void *) skb->data;
skb               427 net/bluetooth/bnep/core.c 	BT_DBG("skb %p dev %p type %d", skb, skb->dev, skb->pkt_type);
skb               429 net/bluetooth/bnep/core.c 	if (!skb->dev) {
skb               444 net/bluetooth/bnep/core.c 		skb_pull(skb, ETH_ALEN * 2);
skb               460 net/bluetooth/bnep/core.c 	iv[il++] = (struct kvec) { skb->data, skb->len };
skb               461 net/bluetooth/bnep/core.c 	len += skb->len;
skb               467 net/bluetooth/bnep/core.c 	kfree_skb(skb);
skb               483 net/bluetooth/bnep/core.c 	struct sk_buff *skb;
skb               495 net/bluetooth/bnep/core.c 		while ((skb = skb_dequeue(&sk->sk_receive_queue))) {
skb               496 net/bluetooth/bnep/core.c 			skb_orphan(skb);
skb               497 net/bluetooth/bnep/core.c 			if (!skb_linearize(skb))
skb               498 net/bluetooth/bnep/core.c 				bnep_rx_frame(s, skb);
skb               500 net/bluetooth/bnep/core.c 				kfree_skb(skb);
skb               507 net/bluetooth/bnep/core.c 		while ((skb = skb_dequeue(&sk->sk_write_queue)))
skb               508 net/bluetooth/bnep/core.c 			if (bnep_tx_frame(s, skb))
skb                56 net/bluetooth/bnep/netdev.c 	struct sk_buff *skb;
skb                62 net/bluetooth/bnep/netdev.c 	skb  = alloc_skb(size, GFP_ATOMIC);
skb                63 net/bluetooth/bnep/netdev.c 	if (!skb) {
skb                68 net/bluetooth/bnep/netdev.c 	r = (void *) skb->data;
skb                69 net/bluetooth/bnep/netdev.c 	__skb_put(skb, sizeof(*r));
skb                78 net/bluetooth/bnep/netdev.c 		__skb_put_data(skb, start, ETH_ALEN);
skb                79 net/bluetooth/bnep/netdev.c 		__skb_put_data(skb, dev->broadcast, ETH_ALEN);
skb                83 net/bluetooth/bnep/netdev.c 		int i, len = skb->len;
skb                86 net/bluetooth/bnep/netdev.c 			__skb_put_data(skb, dev->broadcast, ETH_ALEN);
skb                87 net/bluetooth/bnep/netdev.c 			__skb_put_data(skb, dev->broadcast, ETH_ALEN);
skb                96 net/bluetooth/bnep/netdev.c 			__skb_put_data(skb, ha->addr, ETH_ALEN);
skb                97 net/bluetooth/bnep/netdev.c 			__skb_put_data(skb, ha->addr, ETH_ALEN);
skb               101 net/bluetooth/bnep/netdev.c 		r->len = htons(skb->len - len);
skb               104 net/bluetooth/bnep/netdev.c 	skb_queue_tail(&sk->sk_write_queue, skb);
skb               122 net/bluetooth/bnep/netdev.c static int bnep_net_mc_filter(struct sk_buff *skb, struct bnep_session *s)
skb               124 net/bluetooth/bnep/netdev.c 	struct ethhdr *eh = (void *) skb->data;
skb               134 net/bluetooth/bnep/netdev.c static u16 bnep_net_eth_proto(struct sk_buff *skb)
skb               136 net/bluetooth/bnep/netdev.c 	struct ethhdr *eh = (void *) skb->data;
skb               142 net/bluetooth/bnep/netdev.c 	if (get_unaligned((__be16 *) skb->data) == htons(0xFFFF))
skb               148 net/bluetooth/bnep/netdev.c static int bnep_net_proto_filter(struct sk_buff *skb, struct bnep_session *s)
skb               150 net/bluetooth/bnep/netdev.c 	u16 proto = bnep_net_eth_proto(skb);
skb               159 net/bluetooth/bnep/netdev.c 	BT_DBG("BNEP: filtered skb %p, proto 0x%.4x", skb, proto);
skb               164 net/bluetooth/bnep/netdev.c static netdev_tx_t bnep_net_xmit(struct sk_buff *skb,
skb               170 net/bluetooth/bnep/netdev.c 	BT_DBG("skb %p, dev %p", skb, dev);
skb               173 net/bluetooth/bnep/netdev.c 	if (bnep_net_mc_filter(skb, s)) {
skb               174 net/bluetooth/bnep/netdev.c 		kfree_skb(skb);
skb               180 net/bluetooth/bnep/netdev.c 	if (bnep_net_proto_filter(skb, s)) {
skb               181 net/bluetooth/bnep/netdev.c 		kfree_skb(skb);
skb               192 net/bluetooth/bnep/netdev.c 	skb_queue_tail(&sk->sk_write_queue, skb);
skb               134 net/bluetooth/cmtp/capi.c static void cmtp_send_capimsg(struct cmtp_session *session, struct sk_buff *skb)
skb               136 net/bluetooth/cmtp/capi.c 	struct cmtp_scb *scb = (void *) skb->cb;
skb               138 net/bluetooth/cmtp/capi.c 	BT_DBG("session %p skb %p len %d", session, skb, skb->len);
skb               141 net/bluetooth/cmtp/capi.c 	scb->data = (CAPIMSG_COMMAND(skb->data) == CAPI_DATA_B3);
skb               143 net/bluetooth/cmtp/capi.c 	skb_queue_tail(&session->transmit, skb);
skb               152 net/bluetooth/cmtp/capi.c 	struct sk_buff *skb;
skb               157 net/bluetooth/cmtp/capi.c 	skb = alloc_skb(CAPI_MSG_BASELEN + 6 + len, GFP_ATOMIC);
skb               158 net/bluetooth/cmtp/capi.c 	if (!skb) {
skb               163 net/bluetooth/cmtp/capi.c 	s = skb_put(skb, CAPI_MSG_BASELEN + 6 + len);
skb               181 net/bluetooth/cmtp/capi.c 	cmtp_send_capimsg(session, skb);
skb               184 net/bluetooth/cmtp/capi.c static void cmtp_recv_interopmsg(struct cmtp_session *session, struct sk_buff *skb)
skb               191 net/bluetooth/cmtp/capi.c 	BT_DBG("session %p skb %p len %d", session, skb, skb->len);
skb               193 net/bluetooth/cmtp/capi.c 	switch (CAPIMSG_SUBCOMMAND(skb->data)) {
skb               195 net/bluetooth/cmtp/capi.c 		if (skb->len < CAPI_MSG_BASELEN + 10)
skb               198 net/bluetooth/cmtp/capi.c 		func = CAPIMSG_U16(skb->data, CAPI_MSG_BASELEN + 5);
skb               199 net/bluetooth/cmtp/capi.c 		info = CAPIMSG_U16(skb->data, CAPI_MSG_BASELEN + 8);
skb               203 net/bluetooth/cmtp/capi.c 			msgnum = CAPIMSG_MSGID(skb->data);
skb               209 net/bluetooth/cmtp/capi.c 				application->mapping = CAPIMSG_APPID(skb->data);
skb               216 net/bluetooth/cmtp/capi.c 			appl = CAPIMSG_APPID(skb->data);
skb               228 net/bluetooth/cmtp/capi.c 			if (skb->len < CAPI_MSG_BASELEN + 11 + sizeof(capi_profile))
skb               231 net/bluetooth/cmtp/capi.c 			controller = CAPIMSG_U16(skb->data, CAPI_MSG_BASELEN + 11);
skb               232 net/bluetooth/cmtp/capi.c 			msgnum = CAPIMSG_MSGID(skb->data);
skb               242 net/bluetooth/cmtp/capi.c 					skb->data + CAPI_MSG_BASELEN + 11,
skb               251 net/bluetooth/cmtp/capi.c 			if (skb->len < CAPI_MSG_BASELEN + 15)
skb               256 net/bluetooth/cmtp/capi.c 						skb->data[CAPI_MSG_BASELEN + 14]);
skb               260 net/bluetooth/cmtp/capi.c 					skb->data + CAPI_MSG_BASELEN + 15, len);
skb               266 net/bluetooth/cmtp/capi.c 			if (skb->len < CAPI_MSG_BASELEN + 32)
skb               270 net/bluetooth/cmtp/capi.c 				ctrl->version.majorversion = CAPIMSG_U32(skb->data, CAPI_MSG_BASELEN + 16);
skb               271 net/bluetooth/cmtp/capi.c 				ctrl->version.minorversion = CAPIMSG_U32(skb->data, CAPI_MSG_BASELEN + 20);
skb               272 net/bluetooth/cmtp/capi.c 				ctrl->version.majormanuversion = CAPIMSG_U32(skb->data, CAPI_MSG_BASELEN + 24);
skb               273 net/bluetooth/cmtp/capi.c 				ctrl->version.minormanuversion = CAPIMSG_U32(skb->data, CAPI_MSG_BASELEN + 28);
skb               279 net/bluetooth/cmtp/capi.c 			if (skb->len < CAPI_MSG_BASELEN + 17)
skb               284 net/bluetooth/cmtp/capi.c 						skb->data[CAPI_MSG_BASELEN + 16]);
skb               288 net/bluetooth/cmtp/capi.c 					skb->data + CAPI_MSG_BASELEN + 17, len);
skb               297 net/bluetooth/cmtp/capi.c 		if (skb->len < CAPI_MSG_BASELEN + 6)
skb               300 net/bluetooth/cmtp/capi.c 		func = CAPIMSG_U16(skb->data, CAPI_MSG_BASELEN + 3);
skb               303 net/bluetooth/cmtp/capi.c 			int len = min_t(uint, skb->len - CAPI_MSG_BASELEN - 6,
skb               304 net/bluetooth/cmtp/capi.c 						skb->data[CAPI_MSG_BASELEN + 5]);
skb               305 net/bluetooth/cmtp/capi.c 			appl = CAPIMSG_APPID(skb->data);
skb               306 net/bluetooth/cmtp/capi.c 			msgnum = CAPIMSG_MSGID(skb->data);
skb               308 net/bluetooth/cmtp/capi.c 						skb->data + CAPI_MSG_BASELEN + 6, len);
skb               314 net/bluetooth/cmtp/capi.c 	kfree_skb(skb);
skb               317 net/bluetooth/cmtp/capi.c void cmtp_recv_capimsg(struct cmtp_session *session, struct sk_buff *skb)
skb               324 net/bluetooth/cmtp/capi.c 	BT_DBG("session %p skb %p len %d", session, skb, skb->len);
skb               326 net/bluetooth/cmtp/capi.c 	if (skb->len < CAPI_MSG_BASELEN)
skb               329 net/bluetooth/cmtp/capi.c 	if (CAPIMSG_COMMAND(skb->data) == CAPI_INTEROPERABILITY) {
skb               330 net/bluetooth/cmtp/capi.c 		cmtp_recv_interopmsg(session, skb);
skb               335 net/bluetooth/cmtp/capi.c 		kfree_skb(skb);
skb               339 net/bluetooth/cmtp/capi.c 	appl = CAPIMSG_APPID(skb->data);
skb               340 net/bluetooth/cmtp/capi.c 	contr = CAPIMSG_CONTROL(skb->data);
skb               345 net/bluetooth/cmtp/capi.c 		CAPIMSG_SETAPPID(skb->data, appl);
skb               348 net/bluetooth/cmtp/capi.c 		kfree_skb(skb);
skb               354 net/bluetooth/cmtp/capi.c 		CAPIMSG_SETCONTROL(skb->data, contr);
skb               357 net/bluetooth/cmtp/capi.c 	capi_ctr_handle_message(ctrl, appl, skb);
skb               472 net/bluetooth/cmtp/capi.c static u16 cmtp_send_message(struct capi_ctr *ctrl, struct sk_buff *skb)
skb               479 net/bluetooth/cmtp/capi.c 	BT_DBG("ctrl %p skb %p", ctrl, skb);
skb               481 net/bluetooth/cmtp/capi.c 	appl = CAPIMSG_APPID(skb->data);
skb               482 net/bluetooth/cmtp/capi.c 	contr = CAPIMSG_CONTROL(skb->data);
skb               490 net/bluetooth/cmtp/capi.c 	CAPIMSG_SETAPPID(skb->data, application->mapping);
skb               494 net/bluetooth/cmtp/capi.c 		CAPIMSG_SETCONTROL(skb->data, contr);
skb               497 net/bluetooth/cmtp/capi.c 	cmtp_send_capimsg(session, skb);
skb               123 net/bluetooth/cmtp/cmtp.h void cmtp_recv_capimsg(struct cmtp_session *session, struct sk_buff *skb);
skb               109 net/bluetooth/cmtp/core.c 	struct sk_buff *skb = session->reassembly[id], *nskb;
skb               114 net/bluetooth/cmtp/core.c 	size = (skb) ? skb->len + count : count;
skb               122 net/bluetooth/cmtp/core.c 	if (skb && (skb->len > 0))
skb               123 net/bluetooth/cmtp/core.c 		skb_copy_from_linear_data(skb, skb_put(nskb, skb->len), skb->len);
skb               129 net/bluetooth/cmtp/core.c 	kfree_skb(skb);
skb               132 net/bluetooth/cmtp/core.c static inline int cmtp_recv_frame(struct cmtp_session *session, struct sk_buff *skb)
skb               137 net/bluetooth/cmtp/core.c 	BT_DBG("session %p skb %p len %d", session, skb, skb->len);
skb               139 net/bluetooth/cmtp/core.c 	while (skb->len > 0) {
skb               140 net/bluetooth/cmtp/core.c 		hdr = skb->data[0];
skb               145 net/bluetooth/cmtp/core.c 			len = skb->data[1];
skb               149 net/bluetooth/cmtp/core.c 			len = skb->data[1] | (skb->data[2] << 8);
skb               161 net/bluetooth/cmtp/core.c 		if (hdrlen + len > skb->len) {
skb               167 net/bluetooth/cmtp/core.c 			skb_pull(skb, hdrlen);
skb               173 net/bluetooth/cmtp/core.c 			cmtp_add_msgpart(session, id, skb->data + hdrlen, len);
skb               178 net/bluetooth/cmtp/core.c 			cmtp_add_msgpart(session, id, skb->data + hdrlen, len);
skb               186 net/bluetooth/cmtp/core.c 		skb_pull(skb, hdrlen + len);
skb               189 net/bluetooth/cmtp/core.c 	kfree_skb(skb);
skb               211 net/bluetooth/cmtp/core.c 	struct sk_buff *skb, *nskb;
skb               223 net/bluetooth/cmtp/core.c 	while ((skb = skb_dequeue(&session->transmit))) {
skb               224 net/bluetooth/cmtp/core.c 		struct cmtp_scb *scb = (void *) skb->cb;
skb               233 net/bluetooth/cmtp/core.c 		size = min_t(uint, ((tail < 258) ? (tail - 2) : (tail - 3)), skb->len);
skb               238 net/bluetooth/cmtp/core.c 				skb_queue_head(&session->transmit, skb);
skb               247 net/bluetooth/cmtp/core.c 				| ((skb->len == size) ? 0x00 : 0x01);
skb               253 net/bluetooth/cmtp/core.c 				| ((skb->len == size) ? 0x00 : 0x01);
skb               258 net/bluetooth/cmtp/core.c 		skb_copy_from_linear_data(skb, skb_put(nskb, size), size);
skb               259 net/bluetooth/cmtp/core.c 		skb_pull(skb, size);
skb               261 net/bluetooth/cmtp/core.c 		if (skb->len > 0) {
skb               262 net/bluetooth/cmtp/core.c 			skb_queue_head(&session->transmit, skb);
skb               269 net/bluetooth/cmtp/core.c 			kfree_skb(skb);
skb               282 net/bluetooth/cmtp/core.c 	struct sk_buff *skb;
skb               296 net/bluetooth/cmtp/core.c 		while ((skb = skb_dequeue(&sk->sk_receive_queue))) {
skb               297 net/bluetooth/cmtp/core.c 			skb_orphan(skb);
skb               298 net/bluetooth/cmtp/core.c 			if (!skb_linearize(skb))
skb               299 net/bluetooth/cmtp/core.c 				cmtp_recv_frame(session, skb);
skb               301 net/bluetooth/cmtp/core.c 				kfree_skb(skb);
skb                79 net/bluetooth/hci_core.c 	struct sk_buff *skb;
skb                95 net/bluetooth/hci_core.c 		skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
skb                98 net/bluetooth/hci_core.c 		skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
skb               102 net/bluetooth/hci_core.c 	if (IS_ERR(skb))
skb               103 net/bluetooth/hci_core.c 		return PTR_ERR(skb);
skb               105 net/bluetooth/hci_core.c 	kfree_skb(skb);
skb              3474 net/bluetooth/hci_core.c 	struct sk_buff *skb;
skb              3476 net/bluetooth/hci_core.c 	skb = bt_skb_alloc(3, GFP_ATOMIC);
skb              3477 net/bluetooth/hci_core.c 	if (!skb)
skb              3480 net/bluetooth/hci_core.c 	hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
skb              3481 net/bluetooth/hci_core.c 	skb_put_data(skb, hw_err, 3);
skb              3484 net/bluetooth/hci_core.c 	return hci_recv_frame(hdev, skb);
skb              3489 net/bluetooth/hci_core.c int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
skb              3493 net/bluetooth/hci_core.c 		kfree_skb(skb);
skb              3497 net/bluetooth/hci_core.c 	if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
skb              3498 net/bluetooth/hci_core.c 	    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
skb              3499 net/bluetooth/hci_core.c 	    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
skb              3500 net/bluetooth/hci_core.c 		kfree_skb(skb);
skb              3505 net/bluetooth/hci_core.c 	bt_cb(skb)->incoming = 1;
skb              3508 net/bluetooth/hci_core.c 	__net_timestamp(skb);
skb              3510 net/bluetooth/hci_core.c 	skb_queue_tail(&hdev->rx_q, skb);
skb              3518 net/bluetooth/hci_core.c int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
skb              3521 net/bluetooth/hci_core.c 	hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
skb              3524 net/bluetooth/hci_core.c 	__net_timestamp(skb);
skb              3526 net/bluetooth/hci_core.c 	skb_queue_tail(&hdev->rx_q, skb);
skb              3581 net/bluetooth/hci_core.c static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
skb              3585 net/bluetooth/hci_core.c 	BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
skb              3586 net/bluetooth/hci_core.c 	       skb->len);
skb              3589 net/bluetooth/hci_core.c 	__net_timestamp(skb);
skb              3592 net/bluetooth/hci_core.c 	hci_send_to_monitor(hdev, skb);
skb              3596 net/bluetooth/hci_core.c 		hci_send_to_sock(hdev, skb);
skb              3600 net/bluetooth/hci_core.c 	skb_orphan(skb);
skb              3603 net/bluetooth/hci_core.c 		kfree_skb(skb);
skb              3607 net/bluetooth/hci_core.c 	err = hdev->send(hdev, skb);
skb              3610 net/bluetooth/hci_core.c 		kfree_skb(skb);
skb              3618 net/bluetooth/hci_core.c 	struct sk_buff *skb;
skb              3622 net/bluetooth/hci_core.c 	skb = hci_prepare_cmd(hdev, opcode, plen, param);
skb              3623 net/bluetooth/hci_core.c 	if (!skb) {
skb              3631 net/bluetooth/hci_core.c 	bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
skb              3633 net/bluetooth/hci_core.c 	skb_queue_tail(&hdev->cmd_q, skb);
skb              3642 net/bluetooth/hci_core.c 	struct sk_buff *skb;
skb              3657 net/bluetooth/hci_core.c 	skb = hci_prepare_cmd(hdev, opcode, plen, param);
skb              3658 net/bluetooth/hci_core.c 	if (!skb) {
skb              3664 net/bluetooth/hci_core.c 	hci_send_frame(hdev, skb);
skb              3692 net/bluetooth/hci_core.c 	struct sk_buff *skb;
skb              3700 net/bluetooth/hci_core.c 	skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
skb              3703 net/bluetooth/hci_core.c 	return skb;
skb              3708 net/bluetooth/hci_core.c static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
skb              3711 net/bluetooth/hci_core.c 	int len = skb->len;
skb              3713 net/bluetooth/hci_core.c 	skb_push(skb, HCI_ACL_HDR_SIZE);
skb              3714 net/bluetooth/hci_core.c 	skb_reset_transport_header(skb);
skb              3715 net/bluetooth/hci_core.c 	hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
skb              3721 net/bluetooth/hci_core.c 			  struct sk_buff *skb, __u16 flags)
skb              3727 net/bluetooth/hci_core.c 	skb->len = skb_headlen(skb);
skb              3728 net/bluetooth/hci_core.c 	skb->data_len = 0;
skb              3730 net/bluetooth/hci_core.c 	hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
skb              3734 net/bluetooth/hci_core.c 		hci_add_acl_hdr(skb, conn->handle, flags);
skb              3737 net/bluetooth/hci_core.c 		hci_add_acl_hdr(skb, chan->handle, flags);
skb              3744 net/bluetooth/hci_core.c 	list = skb_shinfo(skb)->frag_list;
skb              3747 net/bluetooth/hci_core.c 		BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
skb              3749 net/bluetooth/hci_core.c 		skb_queue_tail(queue, skb);
skb              3752 net/bluetooth/hci_core.c 		BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
skb              3754 net/bluetooth/hci_core.c 		skb_shinfo(skb)->frag_list = NULL;
skb              3763 net/bluetooth/hci_core.c 		__skb_queue_tail(queue, skb);
skb              3768 net/bluetooth/hci_core.c 			skb = list; list = list->next;
skb              3770 net/bluetooth/hci_core.c 			hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
skb              3771 net/bluetooth/hci_core.c 			hci_add_acl_hdr(skb, conn->handle, flags);
skb              3773 net/bluetooth/hci_core.c 			BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
skb              3775 net/bluetooth/hci_core.c 			__skb_queue_tail(queue, skb);
skb              3782 net/bluetooth/hci_core.c void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
skb              3788 net/bluetooth/hci_core.c 	hci_queue_acl(chan, &chan->data_q, skb, flags);
skb              3794 net/bluetooth/hci_core.c void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
skb              3799 net/bluetooth/hci_core.c 	BT_DBG("%s len %d", hdev->name, skb->len);
skb              3802 net/bluetooth/hci_core.c 	hdr.dlen   = skb->len;
skb              3804 net/bluetooth/hci_core.c 	skb_push(skb, HCI_SCO_HDR_SIZE);
skb              3805 net/bluetooth/hci_core.c 	skb_reset_transport_header(skb);
skb              3806 net/bluetooth/hci_core.c 	memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
skb              3808 net/bluetooth/hci_core.c 	hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
skb              3810 net/bluetooth/hci_core.c 	skb_queue_tail(&conn->data_q, skb);
skb              3923 net/bluetooth/hci_core.c 			struct sk_buff *skb;
skb              3928 net/bluetooth/hci_core.c 			skb = skb_peek(&tmp->data_q);
skb              3929 net/bluetooth/hci_core.c 			if (skb->priority < cur_prio)
skb              3932 net/bluetooth/hci_core.c 			if (skb->priority > cur_prio) {
skb              3935 net/bluetooth/hci_core.c 				cur_prio = skb->priority;
skb              4002 net/bluetooth/hci_core.c 			struct sk_buff *skb;
skb              4012 net/bluetooth/hci_core.c 			skb = skb_peek(&chan->data_q);
skb              4013 net/bluetooth/hci_core.c 			if (skb->priority >= HCI_PRIO_MAX - 1)
skb              4016 net/bluetooth/hci_core.c 			skb->priority = HCI_PRIO_MAX - 1;
skb              4018 net/bluetooth/hci_core.c 			BT_DBG("chan %p skb %p promoted to %d", chan, skb,
skb              4019 net/bluetooth/hci_core.c 			       skb->priority);
skb              4030 net/bluetooth/hci_core.c static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
skb              4033 net/bluetooth/hci_core.c 	return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
skb              4051 net/bluetooth/hci_core.c 	struct sk_buff *skb;
skb              4059 net/bluetooth/hci_core.c 		while (quote-- && (skb = skb_peek(&chan->data_q))) {
skb              4060 net/bluetooth/hci_core.c 			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
skb              4061 net/bluetooth/hci_core.c 			       skb->len, skb->priority);
skb              4064 net/bluetooth/hci_core.c 			if (skb->priority < priority)
skb              4067 net/bluetooth/hci_core.c 			skb = skb_dequeue(&chan->data_q);
skb              4070 net/bluetooth/hci_core.c 						   bt_cb(skb)->force_active);
skb              4072 net/bluetooth/hci_core.c 			hci_send_frame(hdev, skb);
skb              4089 net/bluetooth/hci_core.c 	struct sk_buff *skb;
skb              4105 net/bluetooth/hci_core.c 		while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
skb              4108 net/bluetooth/hci_core.c 			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
skb              4109 net/bluetooth/hci_core.c 			       skb->len, skb->priority);
skb              4112 net/bluetooth/hci_core.c 			if (skb->priority < priority)
skb              4115 net/bluetooth/hci_core.c 			skb = skb_dequeue(&chan->data_q);
skb              4117 net/bluetooth/hci_core.c 			blocks = __get_blocks(hdev, skb);
skb              4122 net/bluetooth/hci_core.c 						   bt_cb(skb)->force_active);
skb              4124 net/bluetooth/hci_core.c 			hci_send_frame(hdev, skb);
skb              4166 net/bluetooth/hci_core.c 	struct sk_buff *skb;
skb              4175 net/bluetooth/hci_core.c 		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
skb              4176 net/bluetooth/hci_core.c 			BT_DBG("skb %p len %d", skb, skb->len);
skb              4177 net/bluetooth/hci_core.c 			hci_send_frame(hdev, skb);
skb              4189 net/bluetooth/hci_core.c 	struct sk_buff *skb;
skb              4199 net/bluetooth/hci_core.c 		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
skb              4200 net/bluetooth/hci_core.c 			BT_DBG("skb %p len %d", skb, skb->len);
skb              4201 net/bluetooth/hci_core.c 			hci_send_frame(hdev, skb);
skb              4213 net/bluetooth/hci_core.c 	struct sk_buff *skb;
skb              4233 net/bluetooth/hci_core.c 		while (quote-- && (skb = skb_peek(&chan->data_q))) {
skb              4234 net/bluetooth/hci_core.c 			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
skb              4235 net/bluetooth/hci_core.c 			       skb->len, skb->priority);
skb              4238 net/bluetooth/hci_core.c 			if (skb->priority < priority)
skb              4241 net/bluetooth/hci_core.c 			skb = skb_dequeue(&chan->data_q);
skb              4243 net/bluetooth/hci_core.c 			hci_send_frame(hdev, skb);
skb              4264 net/bluetooth/hci_core.c 	struct sk_buff *skb;
skb              4278 net/bluetooth/hci_core.c 	while ((skb = skb_dequeue(&hdev->raw_q)))
skb              4279 net/bluetooth/hci_core.c 		hci_send_frame(hdev, skb);
skb              4285 net/bluetooth/hci_core.c static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
skb              4287 net/bluetooth/hci_core.c 	struct hci_acl_hdr *hdr = (void *) skb->data;
skb              4291 net/bluetooth/hci_core.c 	skb_pull(skb, HCI_ACL_HDR_SIZE);
skb              4297 net/bluetooth/hci_core.c 	BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
skb              4310 net/bluetooth/hci_core.c 		l2cap_recv_acldata(conn, skb, flags);
skb              4317 net/bluetooth/hci_core.c 	kfree_skb(skb);
skb              4321 net/bluetooth/hci_core.c static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
skb              4323 net/bluetooth/hci_core.c 	struct hci_sco_hdr *hdr = (void *) skb->data;
skb              4327 net/bluetooth/hci_core.c 	skb_pull(skb, HCI_SCO_HDR_SIZE);
skb              4331 net/bluetooth/hci_core.c 	BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
skb              4341 net/bluetooth/hci_core.c 		sco_recv_scodata(conn, skb);
skb              4348 net/bluetooth/hci_core.c 	kfree_skb(skb);
skb              4353 net/bluetooth/hci_core.c 	struct sk_buff *skb;
skb              4355 net/bluetooth/hci_core.c 	skb = skb_peek(&hdev->cmd_q);
skb              4356 net/bluetooth/hci_core.c 	if (!skb)
skb              4359 net/bluetooth/hci_core.c 	return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
skb              4365 net/bluetooth/hci_core.c 	struct sk_buff *skb;
skb              4376 net/bluetooth/hci_core.c 	skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
skb              4377 net/bluetooth/hci_core.c 	if (!skb)
skb              4380 net/bluetooth/hci_core.c 	skb_queue_head(&hdev->cmd_q, skb);
skb              4388 net/bluetooth/hci_core.c 	struct sk_buff *skb;
skb              4434 net/bluetooth/hci_core.c 	while ((skb = __skb_dequeue(&hdev->cmd_q))) {
skb              4435 net/bluetooth/hci_core.c 		if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
skb              4436 net/bluetooth/hci_core.c 			__skb_queue_head(&hdev->cmd_q, skb);
skb              4440 net/bluetooth/hci_core.c 		if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
skb              4441 net/bluetooth/hci_core.c 			*req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
skb              4443 net/bluetooth/hci_core.c 			*req_complete = bt_cb(skb)->hci.req_complete;
skb              4444 net/bluetooth/hci_core.c 		kfree_skb(skb);
skb              4452 net/bluetooth/hci_core.c 	struct sk_buff *skb;
skb              4456 net/bluetooth/hci_core.c 	while ((skb = skb_dequeue(&hdev->rx_q))) {
skb              4458 net/bluetooth/hci_core.c 		hci_send_to_monitor(hdev, skb);
skb              4462 net/bluetooth/hci_core.c 			hci_send_to_sock(hdev, skb);
skb              4473 net/bluetooth/hci_core.c 			kfree_skb(skb);
skb              4479 net/bluetooth/hci_core.c 			switch (hci_skb_pkt_type(skb)) {
skb              4482 net/bluetooth/hci_core.c 				kfree_skb(skb);
skb              4488 net/bluetooth/hci_core.c 		switch (hci_skb_pkt_type(skb)) {
skb              4491 net/bluetooth/hci_core.c 			hci_event_packet(hdev, skb);
skb              4496 net/bluetooth/hci_core.c 			hci_acldata_packet(hdev, skb);
skb              4501 net/bluetooth/hci_core.c 			hci_scodata_packet(hdev, skb);
skb              4505 net/bluetooth/hci_core.c 			kfree_skb(skb);
skb              4514 net/bluetooth/hci_core.c 	struct sk_buff *skb;
skb              4521 net/bluetooth/hci_core.c 		skb = skb_dequeue(&hdev->cmd_q);
skb              4522 net/bluetooth/hci_core.c 		if (!skb)
skb              4527 net/bluetooth/hci_core.c 		hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
skb              4532 net/bluetooth/hci_core.c 			hci_send_frame(hdev, skb);
skb              4539 net/bluetooth/hci_core.c 			skb_queue_head(&hdev->cmd_q, skb);
skb                44 net/bluetooth/hci_event.c static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
skb                46 net/bluetooth/hci_event.c 	__u8 status = *((__u8 *) skb->data);
skb                69 net/bluetooth/hci_event.c static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
skb                71 net/bluetooth/hci_event.c 	__u8 status = *((__u8 *) skb->data);
skb                81 net/bluetooth/hci_event.c static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
skb                83 net/bluetooth/hci_event.c 	__u8 status = *((__u8 *) skb->data);
skb                96 net/bluetooth/hci_event.c 					  struct sk_buff *skb)
skb               101 net/bluetooth/hci_event.c static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
skb               103 net/bluetooth/hci_event.c 	struct hci_rp_role_discovery *rp = (void *) skb->data;
skb               120 net/bluetooth/hci_event.c static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
skb               122 net/bluetooth/hci_event.c 	struct hci_rp_read_link_policy *rp = (void *) skb->data;
skb               139 net/bluetooth/hci_event.c static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
skb               141 net/bluetooth/hci_event.c 	struct hci_rp_write_link_policy *rp = (void *) skb->data;
skb               164 net/bluetooth/hci_event.c 					struct sk_buff *skb)
skb               166 net/bluetooth/hci_event.c 	struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
skb               177 net/bluetooth/hci_event.c 					 struct sk_buff *skb)
skb               179 net/bluetooth/hci_event.c 	__u8 status = *((__u8 *) skb->data);
skb               194 net/bluetooth/hci_event.c static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
skb               196 net/bluetooth/hci_event.c 	__u8 status = *((__u8 *) skb->data);
skb               228 net/bluetooth/hci_event.c 					struct sk_buff *skb)
skb               230 net/bluetooth/hci_event.c 	struct hci_rp_read_stored_link_key *rp = (void *)skb->data;
skb               246 net/bluetooth/hci_event.c 					  struct sk_buff *skb)
skb               248 net/bluetooth/hci_event.c 	struct hci_rp_delete_stored_link_key *rp = (void *)skb->data;
skb               261 net/bluetooth/hci_event.c static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
skb               263 net/bluetooth/hci_event.c 	__u8 status = *((__u8 *) skb->data);
skb               282 net/bluetooth/hci_event.c static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
skb               284 net/bluetooth/hci_event.c 	struct hci_rp_read_local_name *rp = (void *) skb->data;
skb               296 net/bluetooth/hci_event.c static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
skb               298 net/bluetooth/hci_event.c 	__u8 status = *((__u8 *) skb->data);
skb               324 net/bluetooth/hci_event.c static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
skb               326 net/bluetooth/hci_event.c 	__u8 status = *((__u8 *) skb->data);
skb               347 net/bluetooth/hci_event.c static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
skb               349 net/bluetooth/hci_event.c 	__u8 status = *((__u8 *) skb->data);
skb               382 net/bluetooth/hci_event.c static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
skb               384 net/bluetooth/hci_event.c 	struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
skb               397 net/bluetooth/hci_event.c static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
skb               399 net/bluetooth/hci_event.c 	__u8 status = *((__u8 *) skb->data);
skb               419 net/bluetooth/hci_event.c static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
skb               421 net/bluetooth/hci_event.c 	struct hci_rp_read_voice_setting *rp = (void *) skb->data;
skb               443 net/bluetooth/hci_event.c 				       struct sk_buff *skb)
skb               445 net/bluetooth/hci_event.c 	__u8 status = *((__u8 *) skb->data);
skb               472 net/bluetooth/hci_event.c 					  struct sk_buff *skb)
skb               474 net/bluetooth/hci_event.c 	struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
skb               486 net/bluetooth/hci_event.c static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
skb               488 net/bluetooth/hci_event.c 	__u8 status = *((__u8 *) skb->data);
skb               518 net/bluetooth/hci_event.c static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
skb               520 net/bluetooth/hci_event.c 	u8 status = *((u8 *) skb->data);
skb               548 net/bluetooth/hci_event.c static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
skb               550 net/bluetooth/hci_event.c 	struct hci_rp_read_local_version *rp = (void *) skb->data;
skb               568 net/bluetooth/hci_event.c 				       struct sk_buff *skb)
skb               570 net/bluetooth/hci_event.c 	struct hci_rp_read_local_commands *rp = (void *) skb->data;
skb               583 net/bluetooth/hci_event.c 					     struct sk_buff *skb)
skb               585 net/bluetooth/hci_event.c 	struct hci_rp_read_auth_payload_to *rp = (void *)skb->data;
skb               603 net/bluetooth/hci_event.c 					      struct sk_buff *skb)
skb               605 net/bluetooth/hci_event.c 	struct hci_rp_write_auth_payload_to *rp = (void *)skb->data;
skb               628 net/bluetooth/hci_event.c 				       struct sk_buff *skb)
skb               630 net/bluetooth/hci_event.c 	struct hci_rp_read_local_features *rp = (void *) skb->data;
skb               678 net/bluetooth/hci_event.c 					   struct sk_buff *skb)
skb               680 net/bluetooth/hci_event.c 	struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
skb               695 net/bluetooth/hci_event.c 					  struct sk_buff *skb)
skb               697 net/bluetooth/hci_event.c 	struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
skb               707 net/bluetooth/hci_event.c static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
skb               709 net/bluetooth/hci_event.c 	struct hci_rp_read_buffer_size *rp = (void *) skb->data;
skb               733 net/bluetooth/hci_event.c static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
skb               735 net/bluetooth/hci_event.c 	struct hci_rp_read_bd_addr *rp = (void *) skb->data;
skb               750 net/bluetooth/hci_event.c 					   struct sk_buff *skb)
skb               752 net/bluetooth/hci_event.c 	struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
skb               766 net/bluetooth/hci_event.c 					    struct sk_buff *skb)
skb               768 net/bluetooth/hci_event.c 	u8 status = *((u8 *) skb->data);
skb               785 net/bluetooth/hci_event.c 					   struct sk_buff *skb)
skb               787 net/bluetooth/hci_event.c 	struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
skb               799 net/bluetooth/hci_event.c 					struct sk_buff *skb)
skb               801 net/bluetooth/hci_event.c 	u8 status = *((u8 *) skb->data);
skb               815 net/bluetooth/hci_event.c 					struct sk_buff *skb)
skb               817 net/bluetooth/hci_event.c 	struct hci_rp_read_data_block_size *rp = (void *) skb->data;
skb               834 net/bluetooth/hci_event.c static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
skb               836 net/bluetooth/hci_event.c 	struct hci_rp_read_clock *rp = (void *) skb->data;
skb               842 net/bluetooth/hci_event.c 	if (skb->len < sizeof(*rp))
skb               870 net/bluetooth/hci_event.c 				       struct sk_buff *skb)
skb               872 net/bluetooth/hci_event.c 	struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
skb               892 net/bluetooth/hci_event.c 					 struct sk_buff *skb)
skb               894 net/bluetooth/hci_event.c 	struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
skb               904 net/bluetooth/hci_event.c static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
skb               906 net/bluetooth/hci_event.c 	struct hci_rp_pin_code_reply *rp = (void *) skb->data;
skb               932 net/bluetooth/hci_event.c static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
skb               934 net/bluetooth/hci_event.c 	struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
skb               948 net/bluetooth/hci_event.c 				       struct sk_buff *skb)
skb               950 net/bluetooth/hci_event.c 	struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
skb               966 net/bluetooth/hci_event.c 					  struct sk_buff *skb)
skb               968 net/bluetooth/hci_event.c 	struct hci_rp_le_read_local_features *rp = (void *) skb->data;
skb               979 net/bluetooth/hci_event.c 					struct sk_buff *skb)
skb               981 net/bluetooth/hci_event.c 	struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
skb               991 net/bluetooth/hci_event.c static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
skb               993 net/bluetooth/hci_event.c 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
skb              1007 net/bluetooth/hci_event.c 					  struct sk_buff *skb)
skb              1009 net/bluetooth/hci_event.c 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
skb              1022 net/bluetooth/hci_event.c static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
skb              1024 net/bluetooth/hci_event.c 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
skb              1038 net/bluetooth/hci_event.c 					  struct sk_buff *skb)
skb              1040 net/bluetooth/hci_event.c 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
skb              1054 net/bluetooth/hci_event.c 				       struct sk_buff *skb)
skb              1056 net/bluetooth/hci_event.c 	struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
skb              1062 net/bluetooth/hci_event.c 					   struct sk_buff *skb)
skb              1064 net/bluetooth/hci_event.c 	struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
skb              1069 net/bluetooth/hci_event.c static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
skb              1071 net/bluetooth/hci_event.c 	__u8 status = *((__u8 *) skb->data);
skb              1090 net/bluetooth/hci_event.c static void hci_cc_le_set_default_phy(struct hci_dev *hdev, struct sk_buff *skb)
skb              1092 net/bluetooth/hci_event.c 	__u8 status = *((__u8 *) skb->data);
skb              1113 net/bluetooth/hci_event.c                                               struct sk_buff *skb)
skb              1115 net/bluetooth/hci_event.c 	__u8 status = *((__u8 *) skb->data);
skb              1141 net/bluetooth/hci_event.c static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
skb              1143 net/bluetooth/hci_event.c 	__u8 *sent, status = *((__u8 *) skb->data);
skb              1177 net/bluetooth/hci_event.c 					 struct sk_buff *skb)
skb              1180 net/bluetooth/hci_event.c 	__u8 status = *((__u8 *) skb->data);
skb              1210 net/bluetooth/hci_event.c static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
skb              1213 net/bluetooth/hci_event.c 	__u8 status = *((__u8 *) skb->data);
skb              1232 net/bluetooth/hci_event.c 					 struct sk_buff *skb)
skb              1235 net/bluetooth/hci_event.c 	__u8 status = *((__u8 *) skb->data);
skb              1343 net/bluetooth/hci_event.c 				      struct sk_buff *skb)
skb              1346 net/bluetooth/hci_event.c 	__u8 status = *((__u8 *) skb->data);
skb              1361 net/bluetooth/hci_event.c 				      struct sk_buff *skb)
skb              1364 net/bluetooth/hci_event.c 	__u8 status = *((__u8 *) skb->data);
skb              1379 net/bluetooth/hci_event.c 				      struct sk_buff *skb)
skb              1381 net/bluetooth/hci_event.c 	struct hci_rp_le_read_num_supported_adv_sets *rp = (void *) skb->data;
skb              1393 net/bluetooth/hci_event.c 					   struct sk_buff *skb)
skb              1395 net/bluetooth/hci_event.c 	struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
skb              1406 net/bluetooth/hci_event.c 				       struct sk_buff *skb)
skb              1408 net/bluetooth/hci_event.c 	__u8 status = *((__u8 *) skb->data);
skb              1419 net/bluetooth/hci_event.c 					struct sk_buff *skb)
skb              1422 net/bluetooth/hci_event.c 	__u8 status = *((__u8 *) skb->data);
skb              1438 net/bluetooth/hci_event.c 					  struct sk_buff *skb)
skb              1441 net/bluetooth/hci_event.c 	__u8 status = *((__u8 *) skb->data);
skb              1457 net/bluetooth/hci_event.c 					    struct sk_buff *skb)
skb              1459 net/bluetooth/hci_event.c 	struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
skb              1470 net/bluetooth/hci_event.c 					struct sk_buff *skb)
skb              1472 net/bluetooth/hci_event.c 	struct hci_rp_le_read_def_data_len *rp = (void *) skb->data;
skb              1484 net/bluetooth/hci_event.c 					 struct sk_buff *skb)
skb              1487 net/bluetooth/hci_event.c 	__u8 status = *((__u8 *) skb->data);
skb              1503 net/bluetooth/hci_event.c 					 struct sk_buff *skb)
skb              1506 net/bluetooth/hci_event.c 	__u8 status = *((__u8 *) skb->data);
skb              1523 net/bluetooth/hci_event.c 					  struct sk_buff *skb)
skb              1526 net/bluetooth/hci_event.c 	__u8 status = *((__u8 *) skb->data);
skb              1542 net/bluetooth/hci_event.c 				       struct sk_buff *skb)
skb              1544 net/bluetooth/hci_event.c 	__u8 status = *((__u8 *) skb->data);
skb              1555 net/bluetooth/hci_event.c 					   struct sk_buff *skb)
skb              1557 net/bluetooth/hci_event.c 	struct hci_rp_le_read_resolv_list_size *rp = (void *) skb->data;
skb              1568 net/bluetooth/hci_event.c 						struct sk_buff *skb)
skb              1570 net/bluetooth/hci_event.c 	__u8 *sent, status = *((__u8 *) skb->data);
skb              1592 net/bluetooth/hci_event.c 					struct sk_buff *skb)
skb              1594 net/bluetooth/hci_event.c 	struct hci_rp_le_read_max_data_len *rp = (void *) skb->data;
skb              1608 net/bluetooth/hci_event.c 					   struct sk_buff *skb)
skb              1611 net/bluetooth/hci_event.c 	__u8 status = *((__u8 *) skb->data);
skb              1641 net/bluetooth/hci_event.c static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
skb              1644 net/bluetooth/hci_event.c 	u8 status = *((u8 *) skb->data);
skb              1660 net/bluetooth/hci_event.c static void hci_cc_set_ext_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
skb              1662 net/bluetooth/hci_event.c 	struct hci_rp_le_set_ext_adv_params *rp = (void *) skb->data;
skb              1691 net/bluetooth/hci_event.c static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
skb              1693 net/bluetooth/hci_event.c 	struct hci_rp_read_rssi *rp = (void *) skb->data;
skb              1710 net/bluetooth/hci_event.c static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
skb              1713 net/bluetooth/hci_event.c 	struct hci_rp_read_tx_power *rp = (void *) skb->data;
skb              1744 net/bluetooth/hci_event.c static void hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, struct sk_buff *skb)
skb              1746 net/bluetooth/hci_event.c 	u8 status = *((u8 *) skb->data);
skb              2377 net/bluetooth/hci_event.c static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
skb              2379 net/bluetooth/hci_event.c 	__u8 status = *((__u8 *) skb->data);
skb              2436 net/bluetooth/hci_event.c static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
skb              2439 net/bluetooth/hci_event.c 	struct inquiry_info *info = (void *) (skb->data + 1);
skb              2440 net/bluetooth/hci_event.c 	int num_rsp = *((__u8 *) skb->data);
skb              2474 net/bluetooth/hci_event.c static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
skb              2476 net/bluetooth/hci_event.c 	struct hci_ev_conn_complete *ev = (void *) skb->data;
skb              2568 net/bluetooth/hci_event.c static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
skb              2570 net/bluetooth/hci_event.c 	struct hci_ev_conn_request *ev = (void *) skb->data;
skb              2679 net/bluetooth/hci_event.c static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
skb              2681 net/bluetooth/hci_event.c 	struct hci_ev_disconn_complete *ev = (void *) skb->data;
skb              2763 net/bluetooth/hci_event.c static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
skb              2765 net/bluetooth/hci_event.c 	struct hci_ev_auth_complete *ev = (void *) skb->data;
skb              2833 net/bluetooth/hci_event.c static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
skb              2835 net/bluetooth/hci_event.c 	struct hci_ev_remote_name *ev = (void *) skb->data;
skb              2876 net/bluetooth/hci_event.c 				       u16 opcode, struct sk_buff *skb)
skb              2884 net/bluetooth/hci_event.c 	if (!skb || skb->len < sizeof(*rp)) {
skb              2889 net/bluetooth/hci_event.c 	rp = (void *)skb->data;
skb              2931 net/bluetooth/hci_event.c static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
skb              2933 net/bluetooth/hci_event.c 	struct hci_ev_encrypt_change *ev = (void *) skb->data;
skb              3057 net/bluetooth/hci_event.c 					     struct sk_buff *skb)
skb              3059 net/bluetooth/hci_event.c 	struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
skb              3080 net/bluetooth/hci_event.c 				    struct sk_buff *skb)
skb              3082 net/bluetooth/hci_event.c 	struct hci_ev_remote_features *ev = (void *) skb->data;
skb              3128 net/bluetooth/hci_event.c static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
skb              3133 net/bluetooth/hci_event.c 	struct hci_ev_cmd_complete *ev = (void *) skb->data;
skb              3136 net/bluetooth/hci_event.c 	*status = skb->data[sizeof(*ev)];
skb              3138 net/bluetooth/hci_event.c 	skb_pull(skb, sizeof(*ev));
skb              3142 net/bluetooth/hci_event.c 		hci_cc_inquiry_cancel(hdev, skb);
skb              3146 net/bluetooth/hci_event.c 		hci_cc_periodic_inq(hdev, skb);
skb              3150 net/bluetooth/hci_event.c 		hci_cc_exit_periodic_inq(hdev, skb);
skb              3154 net/bluetooth/hci_event.c 		hci_cc_remote_name_req_cancel(hdev, skb);
skb              3158 net/bluetooth/hci_event.c 		hci_cc_role_discovery(hdev, skb);
skb              3162 net/bluetooth/hci_event.c 		hci_cc_read_link_policy(hdev, skb);
skb              3166 net/bluetooth/hci_event.c 		hci_cc_write_link_policy(hdev, skb);
skb              3170 net/bluetooth/hci_event.c 		hci_cc_read_def_link_policy(hdev, skb);
skb              3174 net/bluetooth/hci_event.c 		hci_cc_write_def_link_policy(hdev, skb);
skb              3178 net/bluetooth/hci_event.c 		hci_cc_reset(hdev, skb);
skb              3182 net/bluetooth/hci_event.c 		hci_cc_read_stored_link_key(hdev, skb);
skb              3186 net/bluetooth/hci_event.c 		hci_cc_delete_stored_link_key(hdev, skb);
skb              3190 net/bluetooth/hci_event.c 		hci_cc_write_local_name(hdev, skb);
skb              3194 net/bluetooth/hci_event.c 		hci_cc_read_local_name(hdev, skb);
skb              3198 net/bluetooth/hci_event.c 		hci_cc_write_auth_enable(hdev, skb);
skb              3202 net/bluetooth/hci_event.c 		hci_cc_write_encrypt_mode(hdev, skb);
skb              3206 net/bluetooth/hci_event.c 		hci_cc_write_scan_enable(hdev, skb);
skb              3210 net/bluetooth/hci_event.c 		hci_cc_read_class_of_dev(hdev, skb);
skb              3214 net/bluetooth/hci_event.c 		hci_cc_write_class_of_dev(hdev, skb);
skb              3218 net/bluetooth/hci_event.c 		hci_cc_read_voice_setting(hdev, skb);
skb              3222 net/bluetooth/hci_event.c 		hci_cc_write_voice_setting(hdev, skb);
skb              3226 net/bluetooth/hci_event.c 		hci_cc_read_num_supported_iac(hdev, skb);
skb              3230 net/bluetooth/hci_event.c 		hci_cc_write_ssp_mode(hdev, skb);
skb              3234 net/bluetooth/hci_event.c 		hci_cc_write_sc_support(hdev, skb);
skb              3238 net/bluetooth/hci_event.c 		hci_cc_read_auth_payload_timeout(hdev, skb);
skb              3242 net/bluetooth/hci_event.c 		hci_cc_write_auth_payload_timeout(hdev, skb);
skb              3246 net/bluetooth/hci_event.c 		hci_cc_read_local_version(hdev, skb);
skb              3250 net/bluetooth/hci_event.c 		hci_cc_read_local_commands(hdev, skb);
skb              3254 net/bluetooth/hci_event.c 		hci_cc_read_local_features(hdev, skb);
skb              3258 net/bluetooth/hci_event.c 		hci_cc_read_local_ext_features(hdev, skb);
skb              3262 net/bluetooth/hci_event.c 		hci_cc_read_buffer_size(hdev, skb);
skb              3266 net/bluetooth/hci_event.c 		hci_cc_read_bd_addr(hdev, skb);
skb              3270 net/bluetooth/hci_event.c 		hci_cc_read_page_scan_activity(hdev, skb);
skb              3274 net/bluetooth/hci_event.c 		hci_cc_write_page_scan_activity(hdev, skb);
skb              3278 net/bluetooth/hci_event.c 		hci_cc_read_page_scan_type(hdev, skb);
skb              3282 net/bluetooth/hci_event.c 		hci_cc_write_page_scan_type(hdev, skb);
skb              3286 net/bluetooth/hci_event.c 		hci_cc_read_data_block_size(hdev, skb);
skb              3290 net/bluetooth/hci_event.c 		hci_cc_read_flow_control_mode(hdev, skb);
skb              3294 net/bluetooth/hci_event.c 		hci_cc_read_local_amp_info(hdev, skb);
skb              3298 net/bluetooth/hci_event.c 		hci_cc_read_clock(hdev, skb);
skb              3302 net/bluetooth/hci_event.c 		hci_cc_read_inq_rsp_tx_power(hdev, skb);
skb              3306 net/bluetooth/hci_event.c 		hci_cc_pin_code_reply(hdev, skb);
skb              3310 net/bluetooth/hci_event.c 		hci_cc_pin_code_neg_reply(hdev, skb);
skb              3314 net/bluetooth/hci_event.c 		hci_cc_read_local_oob_data(hdev, skb);
skb              3318 net/bluetooth/hci_event.c 		hci_cc_read_local_oob_ext_data(hdev, skb);
skb              3322 net/bluetooth/hci_event.c 		hci_cc_le_read_buffer_size(hdev, skb);
skb              3326 net/bluetooth/hci_event.c 		hci_cc_le_read_local_features(hdev, skb);
skb              3330 net/bluetooth/hci_event.c 		hci_cc_le_read_adv_tx_power(hdev, skb);
skb              3334 net/bluetooth/hci_event.c 		hci_cc_user_confirm_reply(hdev, skb);
skb              3338 net/bluetooth/hci_event.c 		hci_cc_user_confirm_neg_reply(hdev, skb);
skb              3342 net/bluetooth/hci_event.c 		hci_cc_user_passkey_reply(hdev, skb);
skb              3346 net/bluetooth/hci_event.c 		hci_cc_user_passkey_neg_reply(hdev, skb);
skb              3350 net/bluetooth/hci_event.c 		hci_cc_le_set_random_addr(hdev, skb);
skb              3354 net/bluetooth/hci_event.c 		hci_cc_le_set_adv_enable(hdev, skb);
skb              3358 net/bluetooth/hci_event.c 		hci_cc_le_set_scan_param(hdev, skb);
skb              3362 net/bluetooth/hci_event.c 		hci_cc_le_set_scan_enable(hdev, skb);
skb              3366 net/bluetooth/hci_event.c 		hci_cc_le_read_white_list_size(hdev, skb);
skb              3370 net/bluetooth/hci_event.c 		hci_cc_le_clear_white_list(hdev, skb);
skb              3374 net/bluetooth/hci_event.c 		hci_cc_le_add_to_white_list(hdev, skb);
skb              3378 net/bluetooth/hci_event.c 		hci_cc_le_del_from_white_list(hdev, skb);
skb              3382 net/bluetooth/hci_event.c 		hci_cc_le_read_supported_states(hdev, skb);
skb              3386 net/bluetooth/hci_event.c 		hci_cc_le_read_def_data_len(hdev, skb);
skb              3390 net/bluetooth/hci_event.c 		hci_cc_le_write_def_data_len(hdev, skb);
skb              3394 net/bluetooth/hci_event.c 		hci_cc_le_add_to_resolv_list(hdev, skb);
skb              3398 net/bluetooth/hci_event.c 		hci_cc_le_del_from_resolv_list(hdev, skb);
skb              3402 net/bluetooth/hci_event.c 		hci_cc_le_clear_resolv_list(hdev, skb);
skb              3406 net/bluetooth/hci_event.c 		hci_cc_le_read_resolv_list_size(hdev, skb);
skb              3410 net/bluetooth/hci_event.c 		hci_cc_le_set_addr_resolution_enable(hdev, skb);
skb              3414 net/bluetooth/hci_event.c 		hci_cc_le_read_max_data_len(hdev, skb);
skb              3418 net/bluetooth/hci_event.c 		hci_cc_write_le_host_supported(hdev, skb);
skb              3422 net/bluetooth/hci_event.c 		hci_cc_set_adv_param(hdev, skb);
skb              3426 net/bluetooth/hci_event.c 		hci_cc_read_rssi(hdev, skb);
skb              3430 net/bluetooth/hci_event.c 		hci_cc_read_tx_power(hdev, skb);
skb              3434 net/bluetooth/hci_event.c 		hci_cc_write_ssp_debug_mode(hdev, skb);
skb              3438 net/bluetooth/hci_event.c 		hci_cc_le_set_ext_scan_param(hdev, skb);
skb              3442 net/bluetooth/hci_event.c 		hci_cc_le_set_ext_scan_enable(hdev, skb);
skb              3446 net/bluetooth/hci_event.c 		hci_cc_le_set_default_phy(hdev, skb);
skb              3450 net/bluetooth/hci_event.c 		hci_cc_le_read_num_adv_sets(hdev, skb);
skb              3454 net/bluetooth/hci_event.c 		hci_cc_set_ext_adv_param(hdev, skb);
skb              3458 net/bluetooth/hci_event.c 		hci_cc_le_set_ext_adv_enable(hdev, skb);
skb              3462 net/bluetooth/hci_event.c 		hci_cc_le_set_adv_set_random_addr(hdev, skb);
skb              3489 net/bluetooth/hci_event.c static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb,
skb              3494 net/bluetooth/hci_event.c 	struct hci_ev_cmd_status *ev = (void *) skb->data;
skb              3496 net/bluetooth/hci_event.c 	skb_pull(skb, sizeof(*ev));
skb              3602 net/bluetooth/hci_event.c static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb)
skb              3604 net/bluetooth/hci_event.c 	struct hci_ev_hardware_error *ev = (void *) skb->data;
skb              3611 net/bluetooth/hci_event.c static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
skb              3613 net/bluetooth/hci_event.c 	struct hci_ev_role_change *ev = (void *) skb->data;
skb              3633 net/bluetooth/hci_event.c static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
skb              3635 net/bluetooth/hci_event.c 	struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
skb              3643 net/bluetooth/hci_event.c 	if (skb->len < sizeof(*ev) ||
skb              3644 net/bluetooth/hci_event.c 	    skb->len < struct_size(ev, handles, ev->num_hndl)) {
skb              3721 net/bluetooth/hci_event.c static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
skb              3723 net/bluetooth/hci_event.c 	struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
skb              3731 net/bluetooth/hci_event.c 	if (skb->len < sizeof(*ev) ||
skb              3732 net/bluetooth/hci_event.c 	    skb->len < struct_size(ev, handles, ev->num_hndl)) {
skb              3772 net/bluetooth/hci_event.c static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
skb              3774 net/bluetooth/hci_event.c 	struct hci_ev_mode_change *ev = (void *) skb->data;
skb              3800 net/bluetooth/hci_event.c static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
skb              3802 net/bluetooth/hci_event.c 	struct hci_ev_pin_code_req *ev = (void *) skb->data;
skb              3870 net/bluetooth/hci_event.c static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
skb              3872 net/bluetooth/hci_event.c 	struct hci_ev_link_key_req *ev = (void *) skb->data;
skb              3930 net/bluetooth/hci_event.c static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
skb              3932 net/bluetooth/hci_event.c 	struct hci_ev_link_key_notify *ev = (void *) skb->data;
skb              3990 net/bluetooth/hci_event.c static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
skb              3992 net/bluetooth/hci_event.c 	struct hci_ev_clock_offset *ev = (void *) skb->data;
skb              4013 net/bluetooth/hci_event.c static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
skb              4015 net/bluetooth/hci_event.c 	struct hci_ev_pkt_type_change *ev = (void *) skb->data;
skb              4029 net/bluetooth/hci_event.c static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
skb              4031 net/bluetooth/hci_event.c 	struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
skb              4048 net/bluetooth/hci_event.c 					     struct sk_buff *skb)
skb              4051 net/bluetooth/hci_event.c 	int num_rsp = *((__u8 *) skb->data);
skb              4063 net/bluetooth/hci_event.c 	if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
skb              4065 net/bluetooth/hci_event.c 		info = (void *) (skb->data + 1);
skb              4086 net/bluetooth/hci_event.c 		struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
skb              4112 net/bluetooth/hci_event.c 					struct sk_buff *skb)
skb              4114 net/bluetooth/hci_event.c 	struct hci_ev_remote_ext_features *ev = (void *) skb->data;
skb              4176 net/bluetooth/hci_event.c 				       struct sk_buff *skb)
skb              4178 net/bluetooth/hci_event.c 	struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
skb              4260 net/bluetooth/hci_event.c 					    struct sk_buff *skb)
skb              4263 net/bluetooth/hci_event.c 	struct extended_inquiry_info *info = (void *) (skb->data + 1);
skb              4264 net/bluetooth/hci_event.c 	int num_rsp = *((__u8 *) skb->data);
skb              4310 net/bluetooth/hci_event.c 					 struct sk_buff *skb)
skb              4312 net/bluetooth/hci_event.c 	struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
skb              4419 net/bluetooth/hci_event.c static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
skb              4421 net/bluetooth/hci_event.c 	struct hci_ev_io_capa_request *ev = (void *) skb->data;
skb              4488 net/bluetooth/hci_event.c static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
skb              4490 net/bluetooth/hci_event.c 	struct hci_ev_io_capa_reply *ev = (void *) skb->data;
skb              4509 net/bluetooth/hci_event.c 					 struct sk_buff *skb)
skb              4511 net/bluetooth/hci_event.c 	struct hci_ev_user_confirm_req *ev = (void *) skb->data;
skb              4584 net/bluetooth/hci_event.c 					 struct sk_buff *skb)
skb              4586 net/bluetooth/hci_event.c 	struct hci_ev_user_passkey_req *ev = (void *) skb->data;
skb              4595 net/bluetooth/hci_event.c 					struct sk_buff *skb)
skb              4597 net/bluetooth/hci_event.c 	struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
skb              4615 net/bluetooth/hci_event.c static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
skb              4617 net/bluetooth/hci_event.c 	struct hci_ev_keypress_notify *ev = (void *) skb->data;
skb              4654 net/bluetooth/hci_event.c 					 struct sk_buff *skb)
skb              4656 net/bluetooth/hci_event.c 	struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
skb              4685 net/bluetooth/hci_event.c 					 struct sk_buff *skb)
skb              4687 net/bluetooth/hci_event.c 	struct hci_ev_remote_host_features *ev = (void *) skb->data;
skb              4707 net/bluetooth/hci_event.c 					    struct sk_buff *skb)
skb              4709 net/bluetooth/hci_event.c 	struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
skb              4761 net/bluetooth/hci_event.c static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
skb              4763 net/bluetooth/hci_event.c 	struct hci_ev_channel_selected *ev = (void *)skb->data;
skb              4768 net/bluetooth/hci_event.c 	skb_pull(skb, sizeof(*ev));
skb              4778 net/bluetooth/hci_event.c 				      struct sk_buff *skb)
skb              4780 net/bluetooth/hci_event.c 	struct hci_ev_phy_link_complete *ev = (void *) skb->data;
skb              4817 net/bluetooth/hci_event.c static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
skb              4819 net/bluetooth/hci_event.c 	struct hci_ev_logical_link_complete *ev = (void *) skb->data;
skb              4856 net/bluetooth/hci_event.c 					     struct sk_buff *skb)
skb              4858 net/bluetooth/hci_event.c 	struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
skb              4880 net/bluetooth/hci_event.c 					     struct sk_buff *skb)
skb              4882 net/bluetooth/hci_event.c 	struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
skb              5064 net/bluetooth/hci_event.c static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
skb              5066 net/bluetooth/hci_event.c 	struct hci_ev_le_conn_complete *ev = (void *) skb->data;
skb              5078 net/bluetooth/hci_event.c 					 struct sk_buff *skb)
skb              5080 net/bluetooth/hci_event.c 	struct hci_ev_le_enh_conn_complete *ev = (void *) skb->data;
skb              5091 net/bluetooth/hci_event.c static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, struct sk_buff *skb)
skb              5093 net/bluetooth/hci_event.c 	struct hci_evt_le_ext_adv_set_term *ev = (void *) skb->data;
skb              5120 net/bluetooth/hci_event.c 					    struct sk_buff *skb)
skb              5122 net/bluetooth/hci_event.c 	struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
skb              5428 net/bluetooth/hci_event.c static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
skb              5430 net/bluetooth/hci_event.c 	u8 num_reports = skb->data[0];
skb              5431 net/bluetooth/hci_event.c 	void *ptr = &skb->data[1];
skb              5500 net/bluetooth/hci_event.c static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
skb              5502 net/bluetooth/hci_event.c 	u8 num_reports = skb->data[0];
skb              5503 net/bluetooth/hci_event.c 	void *ptr = &skb->data[1];
skb              5527 net/bluetooth/hci_event.c 					    struct sk_buff *skb)
skb              5529 net/bluetooth/hci_event.c 	struct hci_ev_le_remote_feat_complete *ev = (void *)skb->data;
skb              5568 net/bluetooth/hci_event.c static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
skb              5570 net/bluetooth/hci_event.c 	struct hci_ev_le_ltk_req *ev = (void *) skb->data;
skb              5645 net/bluetooth/hci_event.c 					     struct sk_buff *skb)
skb              5647 net/bluetooth/hci_event.c 	struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data;
skb              5703 net/bluetooth/hci_event.c 					 struct sk_buff *skb)
skb              5705 net/bluetooth/hci_event.c 	u8 num_reports = skb->data[0];
skb              5706 net/bluetooth/hci_event.c 	void *ptr = &skb->data[1];
skb              5723 net/bluetooth/hci_event.c static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
skb              5725 net/bluetooth/hci_event.c 	struct hci_ev_le_meta *le_ev = (void *) skb->data;
skb              5727 net/bluetooth/hci_event.c 	skb_pull(skb, sizeof(*le_ev));
skb              5731 net/bluetooth/hci_event.c 		hci_le_conn_complete_evt(hdev, skb);
skb              5735 net/bluetooth/hci_event.c 		hci_le_conn_update_complete_evt(hdev, skb);
skb              5739 net/bluetooth/hci_event.c 		hci_le_adv_report_evt(hdev, skb);
skb              5743 net/bluetooth/hci_event.c 		hci_le_remote_feat_complete_evt(hdev, skb);
skb              5747 net/bluetooth/hci_event.c 		hci_le_ltk_request_evt(hdev, skb);
skb              5751 net/bluetooth/hci_event.c 		hci_le_remote_conn_param_req_evt(hdev, skb);
skb              5755 net/bluetooth/hci_event.c 		hci_le_direct_adv_report_evt(hdev, skb);
skb              5759 net/bluetooth/hci_event.c 		hci_le_ext_adv_report_evt(hdev, skb);
skb              5763 net/bluetooth/hci_event.c 		hci_le_enh_conn_complete_evt(hdev, skb);
skb              5767 net/bluetooth/hci_event.c 		hci_le_ext_adv_term_evt(hdev, skb);
skb              5776 net/bluetooth/hci_event.c 				 u8 event, struct sk_buff *skb)
skb              5781 net/bluetooth/hci_event.c 	if (!skb)
skb              5784 net/bluetooth/hci_event.c 	if (skb->len < sizeof(*hdr)) {
skb              5789 net/bluetooth/hci_event.c 	hdr = (void *) skb->data;
skb              5790 net/bluetooth/hci_event.c 	skb_pull(skb, HCI_EVENT_HDR_SIZE);
skb              5810 net/bluetooth/hci_event.c 	if (skb->len < sizeof(*ev)) {
skb              5815 net/bluetooth/hci_event.c 	ev = (void *) skb->data;
skb              5816 net/bluetooth/hci_event.c 	skb_pull(skb, sizeof(*ev));
skb              5827 net/bluetooth/hci_event.c void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
skb              5829 net/bluetooth/hci_event.c 	struct hci_event_hdr *hdr = (void *) skb->data;
skb              5851 net/bluetooth/hci_event.c 		orig_skb = skb_clone(skb, GFP_KERNEL);
skb              5853 net/bluetooth/hci_event.c 	skb_pull(skb, HCI_EVENT_HDR_SIZE);
skb              5857 net/bluetooth/hci_event.c 		hci_inquiry_complete_evt(hdev, skb);
skb              5861 net/bluetooth/hci_event.c 		hci_inquiry_result_evt(hdev, skb);
skb              5865 net/bluetooth/hci_event.c 		hci_conn_complete_evt(hdev, skb);
skb              5869 net/bluetooth/hci_event.c 		hci_conn_request_evt(hdev, skb);
skb              5873 net/bluetooth/hci_event.c 		hci_disconn_complete_evt(hdev, skb);
skb              5877 net/bluetooth/hci_event.c 		hci_auth_complete_evt(hdev, skb);
skb              5881 net/bluetooth/hci_event.c 		hci_remote_name_evt(hdev, skb);
skb              5885 net/bluetooth/hci_event.c 		hci_encrypt_change_evt(hdev, skb);
skb              5889 net/bluetooth/hci_event.c 		hci_change_link_key_complete_evt(hdev, skb);
skb              5893 net/bluetooth/hci_event.c 		hci_remote_features_evt(hdev, skb);
skb              5897 net/bluetooth/hci_event.c 		hci_cmd_complete_evt(hdev, skb, &opcode, &status,
skb              5902 net/bluetooth/hci_event.c 		hci_cmd_status_evt(hdev, skb, &opcode, &status, &req_complete,
skb              5907 net/bluetooth/hci_event.c 		hci_hardware_error_evt(hdev, skb);
skb              5911 net/bluetooth/hci_event.c 		hci_role_change_evt(hdev, skb);
skb              5915 net/bluetooth/hci_event.c 		hci_num_comp_pkts_evt(hdev, skb);
skb              5919 net/bluetooth/hci_event.c 		hci_mode_change_evt(hdev, skb);
skb              5923 net/bluetooth/hci_event.c 		hci_pin_code_request_evt(hdev, skb);
skb              5927 net/bluetooth/hci_event.c 		hci_link_key_request_evt(hdev, skb);
skb              5931 net/bluetooth/hci_event.c 		hci_link_key_notify_evt(hdev, skb);
skb              5935 net/bluetooth/hci_event.c 		hci_clock_offset_evt(hdev, skb);
skb              5939 net/bluetooth/hci_event.c 		hci_pkt_type_change_evt(hdev, skb);
skb              5943 net/bluetooth/hci_event.c 		hci_pscan_rep_mode_evt(hdev, skb);
skb              5947 net/bluetooth/hci_event.c 		hci_inquiry_result_with_rssi_evt(hdev, skb);
skb              5951 net/bluetooth/hci_event.c 		hci_remote_ext_features_evt(hdev, skb);
skb              5955 net/bluetooth/hci_event.c 		hci_sync_conn_complete_evt(hdev, skb);
skb              5959 net/bluetooth/hci_event.c 		hci_extended_inquiry_result_evt(hdev, skb);
skb              5963 net/bluetooth/hci_event.c 		hci_key_refresh_complete_evt(hdev, skb);
skb              5967 net/bluetooth/hci_event.c 		hci_io_capa_request_evt(hdev, skb);
skb              5971 net/bluetooth/hci_event.c 		hci_io_capa_reply_evt(hdev, skb);
skb              5975 net/bluetooth/hci_event.c 		hci_user_confirm_request_evt(hdev, skb);
skb              5979 net/bluetooth/hci_event.c 		hci_user_passkey_request_evt(hdev, skb);
skb              5983 net/bluetooth/hci_event.c 		hci_user_passkey_notify_evt(hdev, skb);
skb              5987 net/bluetooth/hci_event.c 		hci_keypress_notify_evt(hdev, skb);
skb              5991 net/bluetooth/hci_event.c 		hci_simple_pair_complete_evt(hdev, skb);
skb              5995 net/bluetooth/hci_event.c 		hci_remote_host_features_evt(hdev, skb);
skb              5999 net/bluetooth/hci_event.c 		hci_le_meta_evt(hdev, skb);
skb              6003 net/bluetooth/hci_event.c 		hci_remote_oob_data_request_evt(hdev, skb);
skb              6008 net/bluetooth/hci_event.c 		hci_chan_selected_evt(hdev, skb);
skb              6012 net/bluetooth/hci_event.c 		hci_phy_link_complete_evt(hdev, skb);
skb              6016 net/bluetooth/hci_event.c 		hci_loglink_complete_evt(hdev, skb);
skb              6020 net/bluetooth/hci_event.c 		hci_disconn_loglink_complete_evt(hdev, skb);
skb              6024 net/bluetooth/hci_event.c 		hci_disconn_phylink_complete_evt(hdev, skb);
skb              6029 net/bluetooth/hci_event.c 		hci_num_comp_blocks_evt(hdev, skb);
skb              6048 net/bluetooth/hci_event.c 	kfree_skb(skb);
skb                58 net/bluetooth/hci_request.c 	struct sk_buff *skb;
skb                75 net/bluetooth/hci_request.c 	skb = skb_peek_tail(&req->cmd_q);
skb                77 net/bluetooth/hci_request.c 		bt_cb(skb)->hci.req_complete = complete;
skb                79 net/bluetooth/hci_request.c 		bt_cb(skb)->hci.req_complete_skb = complete_skb;
skb                80 net/bluetooth/hci_request.c 		bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
skb               103 net/bluetooth/hci_request.c 				  struct sk_buff *skb)
skb               110 net/bluetooth/hci_request.c 		if (skb)
skb               111 net/bluetooth/hci_request.c 			hdev->req_skb = skb_get(skb);
skb               131 net/bluetooth/hci_request.c 	struct sk_buff *skb;
skb               167 net/bluetooth/hci_request.c 	skb = hdev->req_skb;
skb               173 net/bluetooth/hci_request.c 		kfree_skb(skb);
skb               177 net/bluetooth/hci_request.c 	if (!skb)
skb               180 net/bluetooth/hci_request.c 	return skb;
skb               290 net/bluetooth/hci_request.c 	struct sk_buff *skb;
skb               292 net/bluetooth/hci_request.c 	skb = bt_skb_alloc(len, GFP_ATOMIC);
skb               293 net/bluetooth/hci_request.c 	if (!skb)
skb               296 net/bluetooth/hci_request.c 	hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
skb               301 net/bluetooth/hci_request.c 		skb_put_data(skb, param, plen);
skb               303 net/bluetooth/hci_request.c 	BT_DBG("skb len %d", skb->len);
skb               305 net/bluetooth/hci_request.c 	hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
skb               306 net/bluetooth/hci_request.c 	hci_skb_opcode(skb) = opcode;
skb               308 net/bluetooth/hci_request.c 	return skb;
skb               316 net/bluetooth/hci_request.c 	struct sk_buff *skb;
skb               326 net/bluetooth/hci_request.c 	skb = hci_prepare_cmd(hdev, opcode, plen, param);
skb               327 net/bluetooth/hci_request.c 	if (!skb) {
skb               335 net/bluetooth/hci_request.c 		bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
skb               337 net/bluetooth/hci_request.c 	bt_cb(skb)->hci.req_event = event;
skb               339 net/bluetooth/hci_request.c 	skb_queue_tail(&req->cmd_q, skb);
skb               153 net/bluetooth/hci_sock.c static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
skb               161 net/bluetooth/hci_sock.c 	flt_type = hci_skb_pkt_type(skb) & HCI_FLT_TYPE_BITS;
skb               167 net/bluetooth/hci_sock.c 	if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT)
skb               170 net/bluetooth/hci_sock.c 	flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
skb               180 net/bluetooth/hci_sock.c 	    flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
skb               184 net/bluetooth/hci_sock.c 	    flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
skb               191 net/bluetooth/hci_sock.c void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
skb               196 net/bluetooth/hci_sock.c 	BT_DBG("hdev %p len %d", hdev, skb->len);
skb               207 net/bluetooth/hci_sock.c 		if (skb->sk == sk)
skb               211 net/bluetooth/hci_sock.c 			if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
skb               212 net/bluetooth/hci_sock.c 			    hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
skb               213 net/bluetooth/hci_sock.c 			    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
skb               214 net/bluetooth/hci_sock.c 			    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT)
skb               216 net/bluetooth/hci_sock.c 			if (is_filtered_packet(sk, skb))
skb               219 net/bluetooth/hci_sock.c 			if (!bt_cb(skb)->incoming)
skb               221 net/bluetooth/hci_sock.c 			if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
skb               222 net/bluetooth/hci_sock.c 			    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
skb               223 net/bluetooth/hci_sock.c 			    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT)
skb               232 net/bluetooth/hci_sock.c 			skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
skb               237 net/bluetooth/hci_sock.c 			memcpy(skb_push(skb_copy, 1), &hci_skb_pkt_type(skb), 1);
skb               254 net/bluetooth/hci_sock.c static void __hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
skb               259 net/bluetooth/hci_sock.c 	BT_DBG("channel %u len %d", channel, skb->len);
skb               278 net/bluetooth/hci_sock.c 		nskb = skb_clone(skb, GFP_ATOMIC);
skb               288 net/bluetooth/hci_sock.c void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
skb               292 net/bluetooth/hci_sock.c 	__hci_send_to_channel(channel, skb, flag, skip_sk);
skb               297 net/bluetooth/hci_sock.c void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
skb               306 net/bluetooth/hci_sock.c 	BT_DBG("hdev %p len %d", hdev, skb->len);
skb               308 net/bluetooth/hci_sock.c 	switch (hci_skb_pkt_type(skb)) {
skb               316 net/bluetooth/hci_sock.c 		if (bt_cb(skb)->incoming)
skb               322 net/bluetooth/hci_sock.c 		if (bt_cb(skb)->incoming)
skb               335 net/bluetooth/hci_sock.c 	skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
skb               343 net/bluetooth/hci_sock.c 	hdr->len = cpu_to_le16(skb->len);
skb               366 net/bluetooth/hci_sock.c 		struct sk_buff *skb;
skb               379 net/bluetooth/hci_sock.c 		skb = bt_skb_alloc(6 + data_len, GFP_ATOMIC);
skb               380 net/bluetooth/hci_sock.c 		if (!skb)
skb               383 net/bluetooth/hci_sock.c 		put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
skb               384 net/bluetooth/hci_sock.c 		put_unaligned_le16(event, skb_put(skb, 2));
skb               387 net/bluetooth/hci_sock.c 			skb_put_data(skb, data, data_len);
skb               389 net/bluetooth/hci_sock.c 		skb->tstamp = tstamp;
skb               391 net/bluetooth/hci_sock.c 		hdr = skb_push(skb, HCI_MON_HDR_SIZE);
skb               394 net/bluetooth/hci_sock.c 		hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
skb               396 net/bluetooth/hci_sock.c 		__hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
skb               398 net/bluetooth/hci_sock.c 		kfree_skb(skb);
skb               409 net/bluetooth/hci_sock.c 	struct sk_buff *skb;
skb               414 net/bluetooth/hci_sock.c 		skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
skb               415 net/bluetooth/hci_sock.c 		if (!skb)
skb               418 net/bluetooth/hci_sock.c 		ni = skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
skb               428 net/bluetooth/hci_sock.c 		skb = bt_skb_alloc(0, GFP_ATOMIC);
skb               429 net/bluetooth/hci_sock.c 		if (!skb)
skb               442 net/bluetooth/hci_sock.c 		skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC);
skb               443 net/bluetooth/hci_sock.c 		if (!skb)
skb               446 net/bluetooth/hci_sock.c 		ii = skb_put(skb, HCI_MON_INDEX_INFO_SIZE);
skb               454 net/bluetooth/hci_sock.c 		skb = bt_skb_alloc(0, GFP_ATOMIC);
skb               455 net/bluetooth/hci_sock.c 		if (!skb)
skb               462 net/bluetooth/hci_sock.c 		skb = bt_skb_alloc(0, GFP_ATOMIC);
skb               463 net/bluetooth/hci_sock.c 		if (!skb)
skb               473 net/bluetooth/hci_sock.c 	__net_timestamp(skb);
skb               475 net/bluetooth/hci_sock.c 	hdr = skb_push(skb, HCI_MON_HDR_SIZE);
skb               478 net/bluetooth/hci_sock.c 	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
skb               480 net/bluetooth/hci_sock.c 	return skb;
skb               486 net/bluetooth/hci_sock.c 	struct sk_buff *skb;
skb               515 net/bluetooth/hci_sock.c 	skb = bt_skb_alloc(14 + TASK_COMM_LEN , GFP_ATOMIC);
skb               516 net/bluetooth/hci_sock.c 	if (!skb)
skb               521 net/bluetooth/hci_sock.c 	put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
skb               522 net/bluetooth/hci_sock.c 	put_unaligned_le16(format, skb_put(skb, 2));
skb               523 net/bluetooth/hci_sock.c 	skb_put_data(skb, ver, sizeof(ver));
skb               524 net/bluetooth/hci_sock.c 	put_unaligned_le32(flags, skb_put(skb, 4));
skb               525 net/bluetooth/hci_sock.c 	skb_put_u8(skb, TASK_COMM_LEN);
skb               526 net/bluetooth/hci_sock.c 	skb_put_data(skb, hci_pi(sk)->comm, TASK_COMM_LEN);
skb               528 net/bluetooth/hci_sock.c 	__net_timestamp(skb);
skb               530 net/bluetooth/hci_sock.c 	hdr = skb_push(skb, HCI_MON_HDR_SIZE);
skb               536 net/bluetooth/hci_sock.c 	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
skb               538 net/bluetooth/hci_sock.c 	return skb;
skb               544 net/bluetooth/hci_sock.c 	struct sk_buff *skb;
skb               560 net/bluetooth/hci_sock.c 	skb = bt_skb_alloc(4, GFP_ATOMIC);
skb               561 net/bluetooth/hci_sock.c 	if (!skb)
skb               564 net/bluetooth/hci_sock.c 	put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
skb               566 net/bluetooth/hci_sock.c 	__net_timestamp(skb);
skb               568 net/bluetooth/hci_sock.c 	hdr = skb_push(skb, HCI_MON_HDR_SIZE);
skb               574 net/bluetooth/hci_sock.c 	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
skb               576 net/bluetooth/hci_sock.c 	return skb;
skb               584 net/bluetooth/hci_sock.c 	struct sk_buff *skb;
skb               586 net/bluetooth/hci_sock.c 	skb = bt_skb_alloc(6 + len, GFP_ATOMIC);
skb               587 net/bluetooth/hci_sock.c 	if (!skb)
skb               590 net/bluetooth/hci_sock.c 	put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
skb               591 net/bluetooth/hci_sock.c 	put_unaligned_le16(opcode, skb_put(skb, 2));
skb               594 net/bluetooth/hci_sock.c 		skb_put_data(skb, buf, len);
skb               596 net/bluetooth/hci_sock.c 	__net_timestamp(skb);
skb               598 net/bluetooth/hci_sock.c 	hdr = skb_push(skb, HCI_MON_HDR_SIZE);
skb               601 net/bluetooth/hci_sock.c 	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
skb               603 net/bluetooth/hci_sock.c 	return skb;
skb               611 net/bluetooth/hci_sock.c 	struct sk_buff *skb;
skb               618 net/bluetooth/hci_sock.c 	skb = bt_skb_alloc(len + 1, GFP_ATOMIC);
skb               619 net/bluetooth/hci_sock.c 	if (!skb)
skb               623 net/bluetooth/hci_sock.c 	vsprintf(skb_put(skb, len), fmt, args);
skb               624 net/bluetooth/hci_sock.c 	*(u8 *)skb_put(skb, 1) = 0;
skb               627 net/bluetooth/hci_sock.c 	__net_timestamp(skb);
skb               629 net/bluetooth/hci_sock.c 	hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
skb               632 net/bluetooth/hci_sock.c 	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
skb               634 net/bluetooth/hci_sock.c 	if (sock_queue_rcv_skb(sk, skb))
skb               635 net/bluetooth/hci_sock.c 		kfree_skb(skb);
skb               645 net/bluetooth/hci_sock.c 		struct sk_buff *skb;
skb               647 net/bluetooth/hci_sock.c 		skb = create_monitor_event(hdev, HCI_DEV_REG);
skb               648 net/bluetooth/hci_sock.c 		if (!skb)
skb               651 net/bluetooth/hci_sock.c 		if (sock_queue_rcv_skb(sk, skb))
skb               652 net/bluetooth/hci_sock.c 			kfree_skb(skb);
skb               657 net/bluetooth/hci_sock.c 		skb = create_monitor_event(hdev, HCI_DEV_OPEN);
skb               658 net/bluetooth/hci_sock.c 		if (!skb)
skb               661 net/bluetooth/hci_sock.c 		if (sock_queue_rcv_skb(sk, skb))
skb               662 net/bluetooth/hci_sock.c 			kfree_skb(skb);
skb               665 net/bluetooth/hci_sock.c 			skb = create_monitor_event(hdev, HCI_DEV_UP);
skb               667 net/bluetooth/hci_sock.c 			skb = create_monitor_event(hdev, HCI_DEV_SETUP);
skb               669 net/bluetooth/hci_sock.c 			skb = NULL;
skb               671 net/bluetooth/hci_sock.c 		if (skb) {
skb               672 net/bluetooth/hci_sock.c 			if (sock_queue_rcv_skb(sk, skb))
skb               673 net/bluetooth/hci_sock.c 				kfree_skb(skb);
skb               687 net/bluetooth/hci_sock.c 		struct sk_buff *skb;
skb               689 net/bluetooth/hci_sock.c 		skb = create_monitor_ctrl_open(sk);
skb               690 net/bluetooth/hci_sock.c 		if (!skb)
skb               693 net/bluetooth/hci_sock.c 		if (sock_queue_rcv_skb(mon_sk, skb))
skb               694 net/bluetooth/hci_sock.c 			kfree_skb(skb);
skb               705 net/bluetooth/hci_sock.c 	struct sk_buff *skb;
skb               707 net/bluetooth/hci_sock.c 	skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
skb               708 net/bluetooth/hci_sock.c 	if (!skb)
skb               711 net/bluetooth/hci_sock.c 	hdr = skb_put(skb, HCI_EVENT_HDR_SIZE);
skb               715 net/bluetooth/hci_sock.c 	ev = skb_put(skb, sizeof(*ev) + dlen);
skb               719 net/bluetooth/hci_sock.c 	bt_cb(skb)->incoming = 1;
skb               720 net/bluetooth/hci_sock.c 	__net_timestamp(skb);
skb               722 net/bluetooth/hci_sock.c 	hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
skb               723 net/bluetooth/hci_sock.c 	hci_send_to_sock(hdev, skb);
skb               724 net/bluetooth/hci_sock.c 	kfree_skb(skb);
skb               732 net/bluetooth/hci_sock.c 		struct sk_buff *skb;
skb               735 net/bluetooth/hci_sock.c 		skb = create_monitor_event(hdev, event);
skb               736 net/bluetooth/hci_sock.c 		if (skb) {
skb               737 net/bluetooth/hci_sock.c 			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
skb               739 net/bluetooth/hci_sock.c 			kfree_skb(skb);
skb               827 net/bluetooth/hci_sock.c 	struct sk_buff *skb;
skb               844 net/bluetooth/hci_sock.c 		skb = create_monitor_ctrl_close(sk);
skb               845 net/bluetooth/hci_sock.c 		if (skb) {
skb               846 net/bluetooth/hci_sock.c 			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
skb               848 net/bluetooth/hci_sock.c 			kfree_skb(skb);
skb               989 net/bluetooth/hci_sock.c 		struct sk_buff *skb;
skb               995 net/bluetooth/hci_sock.c 		skb = create_monitor_ctrl_open(sk);
skb               996 net/bluetooth/hci_sock.c 		if (skb) {
skb               997 net/bluetooth/hci_sock.c 			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
skb               999 net/bluetooth/hci_sock.c 			kfree_skb(skb);
skb              1066 net/bluetooth/hci_sock.c 	struct sk_buff *skb;
skb              1114 net/bluetooth/hci_sock.c 			skb = create_monitor_ctrl_close(sk);
skb              1115 net/bluetooth/hci_sock.c 			if (skb) {
skb              1116 net/bluetooth/hci_sock.c 				hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
skb              1118 net/bluetooth/hci_sock.c 				kfree_skb(skb);
skb              1128 net/bluetooth/hci_sock.c 		skb = create_monitor_ctrl_open(sk);
skb              1129 net/bluetooth/hci_sock.c 		if (skb) {
skb              1130 net/bluetooth/hci_sock.c 			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
skb              1132 net/bluetooth/hci_sock.c 			kfree_skb(skb);
skb              1203 net/bluetooth/hci_sock.c 			skb = create_monitor_ctrl_close(sk);
skb              1204 net/bluetooth/hci_sock.c 			if (skb) {
skb              1205 net/bluetooth/hci_sock.c 				hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
skb              1207 net/bluetooth/hci_sock.c 				kfree_skb(skb);
skb              1219 net/bluetooth/hci_sock.c 		skb = create_monitor_ctrl_open(sk);
skb              1220 net/bluetooth/hci_sock.c 		if (skb) {
skb              1221 net/bluetooth/hci_sock.c 			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
skb              1223 net/bluetooth/hci_sock.c 			kfree_skb(skb);
skb              1311 net/bluetooth/hci_sock.c 				skb = create_monitor_ctrl_close(sk);
skb              1312 net/bluetooth/hci_sock.c 				if (skb) {
skb              1313 net/bluetooth/hci_sock.c 					hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
skb              1315 net/bluetooth/hci_sock.c 					kfree_skb(skb);
skb              1320 net/bluetooth/hci_sock.c 			skb = create_monitor_ctrl_open(sk);
skb              1321 net/bluetooth/hci_sock.c 			if (skb) {
skb              1322 net/bluetooth/hci_sock.c 				hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
skb              1324 net/bluetooth/hci_sock.c 				kfree_skb(skb);
skb              1376 net/bluetooth/hci_sock.c 			  struct sk_buff *skb)
skb              1381 net/bluetooth/hci_sock.c 		int incoming = bt_cb(skb)->incoming;
skb              1394 net/bluetooth/hci_sock.c 		skb_get_timestamp(skb, &tv);
skb              1417 net/bluetooth/hci_sock.c 	struct sk_buff *skb;
skb              1432 net/bluetooth/hci_sock.c 	skb = skb_recv_datagram(sk, flags, noblock, &err);
skb              1433 net/bluetooth/hci_sock.c 	if (!skb)
skb              1436 net/bluetooth/hci_sock.c 	skblen = skb->len;
skb              1437 net/bluetooth/hci_sock.c 	copied = skb->len;
skb              1443 net/bluetooth/hci_sock.c 	skb_reset_transport_header(skb);
skb              1444 net/bluetooth/hci_sock.c 	err = skb_copy_datagram_msg(skb, 0, msg, copied);
skb              1448 net/bluetooth/hci_sock.c 		hci_sock_cmsg(sk, msg, skb);
skb              1452 net/bluetooth/hci_sock.c 		sock_recv_timestamp(msg, sk, skb);
skb              1456 net/bluetooth/hci_sock.c 			sock_recv_timestamp(msg, sk, skb);
skb              1460 net/bluetooth/hci_sock.c 	skb_free_datagram(sk, skb);
skb              1505 net/bluetooth/hci_sock.c 		struct sk_buff *skb;
skb              1508 net/bluetooth/hci_sock.c 		skb = create_monitor_ctrl_command(sk, index, opcode, len,
skb              1510 net/bluetooth/hci_sock.c 		if (skb) {
skb              1511 net/bluetooth/hci_sock.c 			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
skb              1513 net/bluetooth/hci_sock.c 			kfree_skb(skb);
skb              1595 net/bluetooth/hci_sock.c 	struct sk_buff *skb;
skb              1607 net/bluetooth/hci_sock.c 	skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
skb              1608 net/bluetooth/hci_sock.c 	if (!skb)
skb              1611 net/bluetooth/hci_sock.c 	if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
skb              1616 net/bluetooth/hci_sock.c 	hdr = (void *)skb->data;
skb              1624 net/bluetooth/hci_sock.c 		__u8 priority = skb->data[sizeof(*hdr)];
skb              1625 net/bluetooth/hci_sock.c 		__u8 ident_len = skb->data[sizeof(*hdr) + 1];
skb              1640 net/bluetooth/hci_sock.c 		if (priority > 7 || skb->data[len - 1] != 0x00 ||
skb              1642 net/bluetooth/hci_sock.c 		    skb->data[sizeof(*hdr) + ident_len + 1] != 0x00) {
skb              1665 net/bluetooth/hci_sock.c 	hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL);
skb              1672 net/bluetooth/hci_sock.c 	kfree_skb(skb);
skb              1682 net/bluetooth/hci_sock.c 	struct sk_buff *skb;
skb              1732 net/bluetooth/hci_sock.c 	skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
skb              1733 net/bluetooth/hci_sock.c 	if (!skb)
skb              1736 net/bluetooth/hci_sock.c 	if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
skb              1741 net/bluetooth/hci_sock.c 	hci_skb_pkt_type(skb) = skb->data[0];
skb              1742 net/bluetooth/hci_sock.c 	skb_pull(skb, 1);
skb              1750 net/bluetooth/hci_sock.c 		if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
skb              1751 net/bluetooth/hci_sock.c 		    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
skb              1752 net/bluetooth/hci_sock.c 		    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
skb              1757 net/bluetooth/hci_sock.c 		skb_queue_tail(&hdev->raw_q, skb);
skb              1759 net/bluetooth/hci_sock.c 	} else if (hci_skb_pkt_type(skb) == HCI_COMMAND_PKT) {
skb              1760 net/bluetooth/hci_sock.c 		u16 opcode = get_unaligned_le16(skb->data);
skb              1775 net/bluetooth/hci_sock.c 		hci_skb_opcode(skb) = opcode;
skb              1778 net/bluetooth/hci_sock.c 			skb_queue_tail(&hdev->raw_q, skb);
skb              1784 net/bluetooth/hci_sock.c 			bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
skb              1786 net/bluetooth/hci_sock.c 			skb_queue_tail(&hdev->cmd_q, skb);
skb              1795 net/bluetooth/hci_sock.c 		if (hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
skb              1796 net/bluetooth/hci_sock.c 		    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
skb              1801 net/bluetooth/hci_sock.c 		skb_queue_tail(&hdev->raw_q, skb);
skb              1812 net/bluetooth/hci_sock.c 	kfree_skb(skb);
skb               102 net/bluetooth/hidp/core.c 	struct sk_buff *skb;
skb               111 net/bluetooth/hidp/core.c 	skb = alloc_skb(size + 1, GFP_ATOMIC);
skb               112 net/bluetooth/hidp/core.c 	if (!skb) {
skb               117 net/bluetooth/hidp/core.c 	skb_put_u8(skb, hdr);
skb               119 net/bluetooth/hidp/core.c 		skb_put_data(skb, data, size);
skb               125 net/bluetooth/hidp/core.c 	skb_queue_tail(transmit, skb);
skb               178 net/bluetooth/hidp/core.c static void hidp_input_report(struct hidp_session *session, struct sk_buff *skb)
skb               182 net/bluetooth/hidp/core.c 	unsigned char *udata = skb->data + 1;
skb               183 net/bluetooth/hidp/core.c 	signed char *sdata = skb->data + 1;
skb               184 net/bluetooth/hidp/core.c 	int i, size = skb->len - 1;
skb               186 net/bluetooth/hidp/core.c 	switch (skb->data[0]) {
skb               239 net/bluetooth/hidp/core.c 	struct sk_buff *skb;
skb               295 net/bluetooth/hidp/core.c 	skb = session->report_return;
skb               296 net/bluetooth/hidp/core.c 	if (skb) {
skb               297 net/bluetooth/hidp/core.c 		len = skb->len < count ? skb->len : count;
skb               298 net/bluetooth/hidp/core.c 		memcpy(data, skb->data, len);
skb               300 net/bluetooth/hidp/core.c 		kfree_skb(skb);
skb               507 net/bluetooth/hidp/core.c static int hidp_process_data(struct hidp_session *session, struct sk_buff *skb,
skb               511 net/bluetooth/hidp/core.c 	BT_DBG("session %p skb %p len %d param 0x%02x", session, skb, skb->len, param);
skb               518 net/bluetooth/hidp/core.c 			hidp_input_report(session, skb);
skb               522 net/bluetooth/hidp/core.c 					    skb->data, skb->len, 0);
skb               538 net/bluetooth/hidp/core.c 		    session->waiting_report_number == skb->data[0]) {
skb               540 net/bluetooth/hidp/core.c 			session->report_return = skb;
skb               551 net/bluetooth/hidp/core.c 					struct sk_buff *skb)
skb               556 net/bluetooth/hidp/core.c 	BT_DBG("session %p skb %p len %d", session, skb, skb->len);
skb               558 net/bluetooth/hidp/core.c 	hdr = skb->data[0];
skb               559 net/bluetooth/hidp/core.c 	skb_pull(skb, 1);
skb               574 net/bluetooth/hidp/core.c 		free_skb = hidp_process_data(session, skb, param);
skb               584 net/bluetooth/hidp/core.c 		kfree_skb(skb);
skb               588 net/bluetooth/hidp/core.c 				struct sk_buff *skb)
skb               592 net/bluetooth/hidp/core.c 	BT_DBG("session %p skb %p len %d", session, skb, skb->len);
skb               594 net/bluetooth/hidp/core.c 	hdr = skb->data[0];
skb               595 net/bluetooth/hidp/core.c 	skb_pull(skb, 1);
skb               601 net/bluetooth/hidp/core.c 			hidp_input_report(session, skb);
skb               605 net/bluetooth/hidp/core.c 					    skb->data, skb->len, 1);
skb               606 net/bluetooth/hidp/core.c 			BT_DBG("report len %d", skb->len);
skb               612 net/bluetooth/hidp/core.c 	kfree_skb(skb);
skb               635 net/bluetooth/hidp/core.c 	struct sk_buff *skb;
skb               640 net/bluetooth/hidp/core.c 	while ((skb = skb_dequeue(transmit))) {
skb               641 net/bluetooth/hidp/core.c 		ret = hidp_send_frame(sock, skb->data, skb->len);
skb               643 net/bluetooth/hidp/core.c 			skb_queue_head(transmit, skb);
skb               647 net/bluetooth/hidp/core.c 			kfree_skb(skb);
skb               652 net/bluetooth/hidp/core.c 		kfree_skb(skb);
skb              1192 net/bluetooth/hidp/core.c 	struct sk_buff *skb;
skb              1213 net/bluetooth/hidp/core.c 		while ((skb = skb_dequeue(&intr_sk->sk_receive_queue))) {
skb              1214 net/bluetooth/hidp/core.c 			skb_orphan(skb);
skb              1215 net/bluetooth/hidp/core.c 			if (!skb_linearize(skb))
skb              1216 net/bluetooth/hidp/core.c 				hidp_recv_intr_frame(session, skb);
skb              1218 net/bluetooth/hidp/core.c 				kfree_skb(skb);
skb              1226 net/bluetooth/hidp/core.c 		while ((skb = skb_dequeue(&ctrl_sk->sk_receive_queue))) {
skb              1227 net/bluetooth/hidp/core.c 			skb_orphan(skb);
skb              1228 net/bluetooth/hidp/core.c 			if (!skb_linearize(skb))
skb              1229 net/bluetooth/hidp/core.c 				hidp_recv_ctrl_frame(session, skb);
skb              1231 net/bluetooth/hidp/core.c 				kfree_skb(skb);
skb               308 net/bluetooth/l2cap_core.c 	struct sk_buff *skb;
skb               310 net/bluetooth/l2cap_core.c 	skb_queue_walk(head, skb) {
skb               311 net/bluetooth/l2cap_core.c 		if (bt_cb(skb)->l2cap.txseq == seq)
skb               312 net/bluetooth/l2cap_core.c 			return skb;
skb               862 net/bluetooth/l2cap_core.c 	struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
skb               867 net/bluetooth/l2cap_core.c 	if (!skb)
skb               878 net/bluetooth/l2cap_core.c 	bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
skb               879 net/bluetooth/l2cap_core.c 	skb->priority = HCI_PRIO_MAX;
skb               881 net/bluetooth/l2cap_core.c 	hci_send_acl(conn->hchan, skb, flags);
skb               890 net/bluetooth/l2cap_core.c static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
skb               895 net/bluetooth/l2cap_core.c 	BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
skb               896 net/bluetooth/l2cap_core.c 	       skb->priority);
skb               900 net/bluetooth/l2cap_core.c 			hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
skb               902 net/bluetooth/l2cap_core.c 			kfree_skb(skb);
skb               918 net/bluetooth/l2cap_core.c 	bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
skb               919 net/bluetooth/l2cap_core.c 	hci_send_acl(chan->conn->hchan, skb, flags);
skb               971 net/bluetooth/l2cap_core.c 				    struct sk_buff *skb)
skb               974 net/bluetooth/l2cap_core.c 		__unpack_extended_control(get_unaligned_le32(skb->data),
skb               975 net/bluetooth/l2cap_core.c 					  &bt_cb(skb)->l2cap);
skb               976 net/bluetooth/l2cap_core.c 		skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
skb               978 net/bluetooth/l2cap_core.c 		__unpack_enhanced_control(get_unaligned_le16(skb->data),
skb               979 net/bluetooth/l2cap_core.c 					  &bt_cb(skb)->l2cap);
skb               980 net/bluetooth/l2cap_core.c 		skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
skb              1024 net/bluetooth/l2cap_core.c 				  struct sk_buff *skb)
skb              1028 net/bluetooth/l2cap_core.c 				   skb->data + L2CAP_HDR_SIZE);
skb              1031 net/bluetooth/l2cap_core.c 				   skb->data + L2CAP_HDR_SIZE);
skb              1046 net/bluetooth/l2cap_core.c 	struct sk_buff *skb;
skb              1053 net/bluetooth/l2cap_core.c 	skb = bt_skb_alloc(hlen, GFP_KERNEL);
skb              1055 net/bluetooth/l2cap_core.c 	if (!skb)
skb              1058 net/bluetooth/l2cap_core.c 	lh = skb_put(skb, L2CAP_HDR_SIZE);
skb              1063 net/bluetooth/l2cap_core.c 		put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
skb              1065 net/bluetooth/l2cap_core.c 		put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
skb              1068 net/bluetooth/l2cap_core.c 		u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
skb              1069 net/bluetooth/l2cap_core.c 		put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
skb              1072 net/bluetooth/l2cap_core.c 	skb->priority = HCI_PRIO_MAX;
skb              1073 net/bluetooth/l2cap_core.c 	return skb;
skb              1079 net/bluetooth/l2cap_core.c 	struct sk_buff *skb;
skb              1112 net/bluetooth/l2cap_core.c 	skb = l2cap_create_sframe_pdu(chan, control_field);
skb              1113 net/bluetooth/l2cap_core.c 	if (!IS_ERR(skb))
skb              1114 net/bluetooth/l2cap_core.c 		l2cap_do_send(chan, skb);
skb              1208 net/bluetooth/l2cap_core.c 	struct sk_buff *skb;
skb              1220 net/bluetooth/l2cap_core.c 	skb_queue_walk(&chan->tx_q, skb) {
skb              1221 net/bluetooth/l2cap_core.c 		if (bt_cb(skb)->l2cap.retries)
skb              1222 net/bluetooth/l2cap_core.c 			bt_cb(skb)->l2cap.retries = 1;
skb              1874 net/bluetooth/l2cap_core.c 	struct sk_buff *skb;
skb              1886 net/bluetooth/l2cap_core.c 		skb = skb_dequeue(&chan->tx_q);
skb              1888 net/bluetooth/l2cap_core.c 		bt_cb(skb)->l2cap.retries = 1;
skb              1889 net/bluetooth/l2cap_core.c 		control = &bt_cb(skb)->l2cap;
skb              1894 net/bluetooth/l2cap_core.c 		__pack_control(chan, control, skb);
skb              1897 net/bluetooth/l2cap_core.c 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
skb              1898 net/bluetooth/l2cap_core.c 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
skb              1901 net/bluetooth/l2cap_core.c 		l2cap_do_send(chan, skb);
skb              1912 net/bluetooth/l2cap_core.c 	struct sk_buff *skb, *tx_skb;
skb              1931 net/bluetooth/l2cap_core.c 		skb = chan->tx_send_head;
skb              1933 net/bluetooth/l2cap_core.c 		bt_cb(skb)->l2cap.retries = 1;
skb              1934 net/bluetooth/l2cap_core.c 		control = &bt_cb(skb)->l2cap;
skb              1943 net/bluetooth/l2cap_core.c 		__pack_control(chan, control, skb);
skb              1946 net/bluetooth/l2cap_core.c 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
skb              1947 net/bluetooth/l2cap_core.c 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
skb              1953 net/bluetooth/l2cap_core.c 		tx_skb = skb_clone(skb, GFP_KERNEL);
skb              1965 net/bluetooth/l2cap_core.c 		if (skb_queue_is_last(&chan->tx_q, skb))
skb              1968 net/bluetooth/l2cap_core.c 			chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
skb              1983 net/bluetooth/l2cap_core.c 	struct sk_buff *skb;
skb              1998 net/bluetooth/l2cap_core.c 		skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
skb              1999 net/bluetooth/l2cap_core.c 		if (!skb) {
skb              2005 net/bluetooth/l2cap_core.c 		bt_cb(skb)->l2cap.retries++;
skb              2006 net/bluetooth/l2cap_core.c 		control = bt_cb(skb)->l2cap;
skb              2009 net/bluetooth/l2cap_core.c 		    bt_cb(skb)->l2cap.retries > chan->max_tx) {
skb              2022 net/bluetooth/l2cap_core.c 		if (skb_cloned(skb)) {
skb              2026 net/bluetooth/l2cap_core.c 			tx_skb = skb_copy(skb, GFP_KERNEL);
skb              2028 net/bluetooth/l2cap_core.c 			tx_skb = skb_clone(skb, GFP_KERNEL);
skb              2073 net/bluetooth/l2cap_core.c 	struct sk_buff *skb;
skb              2086 net/bluetooth/l2cap_core.c 		skb_queue_walk(&chan->tx_q, skb) {
skb              2087 net/bluetooth/l2cap_core.c 			if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
skb              2088 net/bluetooth/l2cap_core.c 			    skb == chan->tx_send_head)
skb              2092 net/bluetooth/l2cap_core.c 		skb_queue_walk_from(&chan->tx_q, skb) {
skb              2093 net/bluetooth/l2cap_core.c 			if (skb == chan->tx_send_head)
skb              2097 net/bluetooth/l2cap_core.c 					      bt_cb(skb)->l2cap.txseq);
skb              2156 net/bluetooth/l2cap_core.c 					 int count, struct sk_buff *skb)
skb              2162 net/bluetooth/l2cap_core.c 	if (!copy_from_iter_full(skb_put(skb, count), count, &msg->msg_iter))
skb              2169 net/bluetooth/l2cap_core.c 	frag = &skb_shinfo(skb)->frag_list;
skb              2189 net/bluetooth/l2cap_core.c 		skb->len += (*frag)->len;
skb              2190 net/bluetooth/l2cap_core.c 		skb->data_len += (*frag)->len;
skb              2202 net/bluetooth/l2cap_core.c 	struct sk_buff *skb;
skb              2211 net/bluetooth/l2cap_core.c 	skb = chan->ops->alloc_skb(chan, hlen, count,
skb              2213 net/bluetooth/l2cap_core.c 	if (IS_ERR(skb))
skb              2214 net/bluetooth/l2cap_core.c 		return skb;
skb              2217 net/bluetooth/l2cap_core.c 	lh = skb_put(skb, L2CAP_HDR_SIZE);
skb              2220 net/bluetooth/l2cap_core.c 	put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
skb              2222 net/bluetooth/l2cap_core.c 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
skb              2224 net/bluetooth/l2cap_core.c 		kfree_skb(skb);
skb              2227 net/bluetooth/l2cap_core.c 	return skb;
skb              2234 net/bluetooth/l2cap_core.c 	struct sk_buff *skb;
skb              2242 net/bluetooth/l2cap_core.c 	skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
skb              2244 net/bluetooth/l2cap_core.c 	if (IS_ERR(skb))
skb              2245 net/bluetooth/l2cap_core.c 		return skb;
skb              2248 net/bluetooth/l2cap_core.c 	lh = skb_put(skb, L2CAP_HDR_SIZE);
skb              2252 net/bluetooth/l2cap_core.c 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
skb              2254 net/bluetooth/l2cap_core.c 		kfree_skb(skb);
skb              2257 net/bluetooth/l2cap_core.c 	return skb;
skb              2265 net/bluetooth/l2cap_core.c 	struct sk_buff *skb;
skb              2284 net/bluetooth/l2cap_core.c 	skb = chan->ops->alloc_skb(chan, hlen, count,
skb              2286 net/bluetooth/l2cap_core.c 	if (IS_ERR(skb))
skb              2287 net/bluetooth/l2cap_core.c 		return skb;
skb              2290 net/bluetooth/l2cap_core.c 	lh = skb_put(skb, L2CAP_HDR_SIZE);
skb              2296 net/bluetooth/l2cap_core.c 		put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
skb              2298 net/bluetooth/l2cap_core.c 		put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
skb              2301 net/bluetooth/l2cap_core.c 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
skb              2303 net/bluetooth/l2cap_core.c 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
skb              2305 net/bluetooth/l2cap_core.c 		kfree_skb(skb);
skb              2309 net/bluetooth/l2cap_core.c 	bt_cb(skb)->l2cap.fcs = chan->fcs;
skb              2310 net/bluetooth/l2cap_core.c 	bt_cb(skb)->l2cap.retries = 0;
skb              2311 net/bluetooth/l2cap_core.c 	return skb;
skb              2318 net/bluetooth/l2cap_core.c 	struct sk_buff *skb;
skb              2356 net/bluetooth/l2cap_core.c 		skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
skb              2358 net/bluetooth/l2cap_core.c 		if (IS_ERR(skb)) {
skb              2360 net/bluetooth/l2cap_core.c 			return PTR_ERR(skb);
skb              2363 net/bluetooth/l2cap_core.c 		bt_cb(skb)->l2cap.sar = sar;
skb              2364 net/bluetooth/l2cap_core.c 		__skb_queue_tail(seg_queue, skb);
skb              2386 net/bluetooth/l2cap_core.c 	struct sk_buff *skb;
skb              2402 net/bluetooth/l2cap_core.c 	skb = chan->ops->alloc_skb(chan, hlen, count,
skb              2404 net/bluetooth/l2cap_core.c 	if (IS_ERR(skb))
skb              2405 net/bluetooth/l2cap_core.c 		return skb;
skb              2408 net/bluetooth/l2cap_core.c 	lh = skb_put(skb, L2CAP_HDR_SIZE);
skb              2413 net/bluetooth/l2cap_core.c 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
skb              2415 net/bluetooth/l2cap_core.c 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
skb              2417 net/bluetooth/l2cap_core.c 		kfree_skb(skb);
skb              2421 net/bluetooth/l2cap_core.c 	return skb;
skb              2428 net/bluetooth/l2cap_core.c 	struct sk_buff *skb;
skb              2441 net/bluetooth/l2cap_core.c 		skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
skb              2442 net/bluetooth/l2cap_core.c 		if (IS_ERR(skb)) {
skb              2444 net/bluetooth/l2cap_core.c 			return PTR_ERR(skb);
skb              2447 net/bluetooth/l2cap_core.c 		__skb_queue_tail(seg_queue, skb);
skb              2478 net/bluetooth/l2cap_core.c 	struct sk_buff *skb;
skb              2487 net/bluetooth/l2cap_core.c 		skb = l2cap_create_connless_pdu(chan, msg, len);
skb              2488 net/bluetooth/l2cap_core.c 		if (IS_ERR(skb))
skb              2489 net/bluetooth/l2cap_core.c 			return PTR_ERR(skb);
skb              2495 net/bluetooth/l2cap_core.c 			kfree_skb(skb);
skb              2499 net/bluetooth/l2cap_core.c 		l2cap_do_send(chan, skb);
skb              2538 net/bluetooth/l2cap_core.c 		skb = l2cap_create_basic_pdu(chan, msg, len);
skb              2539 net/bluetooth/l2cap_core.c 		if (IS_ERR(skb))
skb              2540 net/bluetooth/l2cap_core.c 			return PTR_ERR(skb);
skb              2546 net/bluetooth/l2cap_core.c 			kfree_skb(skb);
skb              2550 net/bluetooth/l2cap_core.c 		l2cap_do_send(chan, skb);
skb              2894 net/bluetooth/l2cap_core.c static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
skb              2908 net/bluetooth/l2cap_core.c 		if (bt_cb(skb)->l2cap.chan == chan)
skb              2911 net/bluetooth/l2cap_core.c 		nskb = skb_clone(skb, GFP_KERNEL);
skb              2925 net/bluetooth/l2cap_core.c 	struct sk_buff *skb, **frag;
skb              2939 net/bluetooth/l2cap_core.c 	skb = bt_skb_alloc(count, GFP_KERNEL);
skb              2940 net/bluetooth/l2cap_core.c 	if (!skb)
skb              2943 net/bluetooth/l2cap_core.c 	lh = skb_put(skb, L2CAP_HDR_SIZE);
skb              2951 net/bluetooth/l2cap_core.c 	cmd = skb_put(skb, L2CAP_CMD_HDR_SIZE);
skb              2958 net/bluetooth/l2cap_core.c 		skb_put_data(skb, data, count);
skb              2962 net/bluetooth/l2cap_core.c 	len -= skb->len;
skb              2965 net/bluetooth/l2cap_core.c 	frag = &skb_shinfo(skb)->frag_list;
skb              2981 net/bluetooth/l2cap_core.c 	return skb;
skb              2984 net/bluetooth/l2cap_core.c 	kfree_skb(skb);
skb              5741 net/bluetooth/l2cap_core.c 					struct sk_buff *skb)
skb              5751 net/bluetooth/l2cap_core.c 	if (skb->len < L2CAP_CMD_HDR_SIZE)
skb              5754 net/bluetooth/l2cap_core.c 	cmd = (void *) skb->data;
skb              5755 net/bluetooth/l2cap_core.c 	skb_pull(skb, L2CAP_CMD_HDR_SIZE);
skb              5761 net/bluetooth/l2cap_core.c 	if (len != skb->len || !cmd->ident) {
skb              5766 net/bluetooth/l2cap_core.c 	err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
skb              5778 net/bluetooth/l2cap_core.c 	kfree_skb(skb);
skb              5782 net/bluetooth/l2cap_core.c 				     struct sk_buff *skb)
skb              5785 net/bluetooth/l2cap_core.c 	u8 *data = skb->data;
skb              5786 net/bluetooth/l2cap_core.c 	int len = skb->len;
skb              5790 net/bluetooth/l2cap_core.c 	l2cap_raw_recv(conn, skb);
skb              5827 net/bluetooth/l2cap_core.c 	kfree_skb(skb);
skb              5830 net/bluetooth/l2cap_core.c static int l2cap_check_fcs(struct l2cap_chan *chan,  struct sk_buff *skb)
skb              5841 net/bluetooth/l2cap_core.c 		skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
skb              5842 net/bluetooth/l2cap_core.c 		rcv_fcs = get_unaligned_le16(skb->data + skb->len);
skb              5843 net/bluetooth/l2cap_core.c 		our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
skb              5885 net/bluetooth/l2cap_core.c static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
skb              5891 net/bluetooth/l2cap_core.c 	if (!skb_has_frag_list(skb))
skb              5892 net/bluetooth/l2cap_core.c 		skb_shinfo(skb)->frag_list = new_frag;
skb              5899 net/bluetooth/l2cap_core.c 	skb->len += new_frag->len;
skb              5900 net/bluetooth/l2cap_core.c 	skb->data_len += new_frag->len;
skb              5901 net/bluetooth/l2cap_core.c 	skb->truesize += new_frag->truesize;
skb              5904 net/bluetooth/l2cap_core.c static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
skb              5914 net/bluetooth/l2cap_core.c 		err = chan->ops->recv(chan, skb);
skb              5921 net/bluetooth/l2cap_core.c 		if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE))
skb              5924 net/bluetooth/l2cap_core.c 		chan->sdu_len = get_unaligned_le16(skb->data);
skb              5925 net/bluetooth/l2cap_core.c 		skb_pull(skb, L2CAP_SDULEN_SIZE);
skb              5932 net/bluetooth/l2cap_core.c 		if (skb->len >= chan->sdu_len)
skb              5935 net/bluetooth/l2cap_core.c 		chan->sdu = skb;
skb              5936 net/bluetooth/l2cap_core.c 		chan->sdu_last_frag = skb;
skb              5938 net/bluetooth/l2cap_core.c 		skb = NULL;
skb              5946 net/bluetooth/l2cap_core.c 		append_skb_frag(chan->sdu, skb,
skb              5948 net/bluetooth/l2cap_core.c 		skb = NULL;
skb              5960 net/bluetooth/l2cap_core.c 		append_skb_frag(chan->sdu, skb,
skb              5962 net/bluetooth/l2cap_core.c 		skb = NULL;
skb              5979 net/bluetooth/l2cap_core.c 		kfree_skb(skb);
skb              6016 net/bluetooth/l2cap_core.c 		struct sk_buff *skb;
skb              6020 net/bluetooth/l2cap_core.c 		skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
skb              6022 net/bluetooth/l2cap_core.c 		if (!skb)
skb              6025 net/bluetooth/l2cap_core.c 		skb_unlink(skb, &chan->srej_q);
skb              6027 net/bluetooth/l2cap_core.c 		err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap);
skb              6043 net/bluetooth/l2cap_core.c 	struct sk_buff *skb;
skb              6053 net/bluetooth/l2cap_core.c 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
skb              6055 net/bluetooth/l2cap_core.c 	if (skb == NULL) {
skb              6061 net/bluetooth/l2cap_core.c 	if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) {
skb              6101 net/bluetooth/l2cap_core.c 	struct sk_buff *skb;
skb              6111 net/bluetooth/l2cap_core.c 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
skb              6113 net/bluetooth/l2cap_core.c 	if (chan->max_tx && skb &&
skb              6114 net/bluetooth/l2cap_core.c 	    bt_cb(skb)->l2cap.retries >= chan->max_tx) {
skb              6223 net/bluetooth/l2cap_core.c 			       struct sk_buff *skb, u8 event)
skb              6228 net/bluetooth/l2cap_core.c 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
skb              6249 net/bluetooth/l2cap_core.c 			err = l2cap_reassemble_sdu(chan, skb, control);
skb              6282 net/bluetooth/l2cap_core.c 			skb_queue_tail(&chan->srej_q, skb);
skb              6284 net/bluetooth/l2cap_core.c 			BT_DBG("Queued %p (queue len %d)", skb,
skb              6347 net/bluetooth/l2cap_core.c 	if (skb && !skb_in_use) {
skb              6348 net/bluetooth/l2cap_core.c 		BT_DBG("Freeing %p", skb);
skb              6349 net/bluetooth/l2cap_core.c 		kfree_skb(skb);
skb              6357 net/bluetooth/l2cap_core.c 				    struct sk_buff *skb, u8 event)
skb              6363 net/bluetooth/l2cap_core.c 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
skb              6372 net/bluetooth/l2cap_core.c 			skb_queue_tail(&chan->srej_q, skb);
skb              6374 net/bluetooth/l2cap_core.c 			BT_DBG("Queued %p (queue len %d)", skb,
skb              6383 net/bluetooth/l2cap_core.c 			skb_queue_tail(&chan->srej_q, skb);
skb              6385 net/bluetooth/l2cap_core.c 			BT_DBG("Queued %p (queue len %d)", skb,
skb              6398 net/bluetooth/l2cap_core.c 			skb_queue_tail(&chan->srej_q, skb);
skb              6400 net/bluetooth/l2cap_core.c 			BT_DBG("Queued %p (queue len %d)", skb,
skb              6412 net/bluetooth/l2cap_core.c 			skb_queue_tail(&chan->srej_q, skb);
skb              6414 net/bluetooth/l2cap_core.c 			BT_DBG("Queued %p (queue len %d)", skb,
skb              6490 net/bluetooth/l2cap_core.c 	if (skb && !skb_in_use) {
skb              6491 net/bluetooth/l2cap_core.c 		BT_DBG("Freeing %p", skb);
skb              6492 net/bluetooth/l2cap_core.c 		kfree_skb(skb);
skb              6514 net/bluetooth/l2cap_core.c 				 struct sk_buff *skb, u8 event)
skb              6518 net/bluetooth/l2cap_core.c 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
skb              6552 net/bluetooth/l2cap_core.c 				 struct sk_buff *skb, u8 event)
skb              6583 net/bluetooth/l2cap_core.c 		err = l2cap_rx_state_recv(chan, control, skb, event);
skb              6598 net/bluetooth/l2cap_core.c 		    struct sk_buff *skb, u8 event)
skb              6603 net/bluetooth/l2cap_core.c 	       control, skb, event, chan->rx_state);
skb              6608 net/bluetooth/l2cap_core.c 			err = l2cap_rx_state_recv(chan, control, skb, event);
skb              6611 net/bluetooth/l2cap_core.c 			err = l2cap_rx_state_srej_sent(chan, control, skb,
skb              6615 net/bluetooth/l2cap_core.c 			err = l2cap_rx_state_wait_p(chan, control, skb, event);
skb              6618 net/bluetooth/l2cap_core.c 			err = l2cap_rx_state_wait_f(chan, control, skb, event);
skb              6635 net/bluetooth/l2cap_core.c 			   struct sk_buff *skb)
skb              6637 net/bluetooth/l2cap_core.c 	BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
skb              6649 net/bluetooth/l2cap_core.c 		l2cap_reassemble_sdu(chan, skb, control);
skb              6658 net/bluetooth/l2cap_core.c 		if (skb) {
skb              6659 net/bluetooth/l2cap_core.c 			BT_DBG("Freeing %p", skb);
skb              6660 net/bluetooth/l2cap_core.c 			kfree_skb(skb);
skb              6670 net/bluetooth/l2cap_core.c static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
skb              6672 net/bluetooth/l2cap_core.c 	struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
skb              6676 net/bluetooth/l2cap_core.c 	__unpack_control(chan, skb);
skb              6678 net/bluetooth/l2cap_core.c 	len = skb->len;
skb              6685 net/bluetooth/l2cap_core.c 	if (l2cap_check_fcs(chan, skb))
skb              6700 net/bluetooth/l2cap_core.c 	     chan->mode == L2CAP_MODE_STREAMING) && sk_filter(chan->data, skb))
skb              6718 net/bluetooth/l2cap_core.c 			err = l2cap_rx(chan, control, skb, event);
skb              6720 net/bluetooth/l2cap_core.c 			err = l2cap_stream_rx(chan, control, skb);
skb              6751 net/bluetooth/l2cap_core.c 		if (l2cap_rx(chan, control, skb, event))
skb              6758 net/bluetooth/l2cap_core.c 	kfree_skb(skb);
skb              6785 net/bluetooth/l2cap_core.c static int l2cap_le_recv(struct l2cap_chan *chan, struct sk_buff *skb)
skb              6789 net/bluetooth/l2cap_core.c 	BT_DBG("SDU reassemble complete: chan %p skb->len %u", chan, skb->len);
skb              6792 net/bluetooth/l2cap_core.c 	err = chan->ops->recv(chan, skb);
skb              6800 net/bluetooth/l2cap_core.c static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
skb              6810 net/bluetooth/l2cap_core.c 	if (chan->imtu < skb->len) {
skb              6829 net/bluetooth/l2cap_core.c 		sdu_len = get_unaligned_le16(skb->data);
skb              6830 net/bluetooth/l2cap_core.c 		skb_pull(skb, L2CAP_SDULEN_SIZE);
skb              6833 net/bluetooth/l2cap_core.c 		       sdu_len, skb->len, chan->imtu);
skb              6841 net/bluetooth/l2cap_core.c 		if (skb->len > sdu_len) {
skb              6847 net/bluetooth/l2cap_core.c 		if (skb->len == sdu_len)
skb              6848 net/bluetooth/l2cap_core.c 			return l2cap_le_recv(chan, skb);
skb              6850 net/bluetooth/l2cap_core.c 		chan->sdu = skb;
skb              6852 net/bluetooth/l2cap_core.c 		chan->sdu_last_frag = skb;
skb              6855 net/bluetooth/l2cap_core.c 		if (skb->len + L2CAP_SDULEN_SIZE < chan->mps) {
skb              6856 net/bluetooth/l2cap_core.c 			u16 mps_len = skb->len + L2CAP_SDULEN_SIZE;
skb              6868 net/bluetooth/l2cap_core.c 	       chan->sdu->len, skb->len, chan->sdu_len);
skb              6870 net/bluetooth/l2cap_core.c 	if (chan->sdu->len + skb->len > chan->sdu_len) {
skb              6876 net/bluetooth/l2cap_core.c 	append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
skb              6877 net/bluetooth/l2cap_core.c 	skb = NULL;
skb              6890 net/bluetooth/l2cap_core.c 		kfree_skb(skb);
skb              6905 net/bluetooth/l2cap_core.c 			       struct sk_buff *skb)
skb              6912 net/bluetooth/l2cap_core.c 			chan = a2mp_channel_create(conn, skb);
skb              6914 net/bluetooth/l2cap_core.c 				kfree_skb(skb);
skb              6922 net/bluetooth/l2cap_core.c 			kfree_skb(skb);
skb              6927 net/bluetooth/l2cap_core.c 	BT_DBG("chan %p, len %d", chan, skb->len);
skb              6941 net/bluetooth/l2cap_core.c 		if (l2cap_le_data_rcv(chan, skb) < 0)
skb              6952 net/bluetooth/l2cap_core.c 		if (chan->imtu < skb->len) {
skb              6957 net/bluetooth/l2cap_core.c 		if (!chan->ops->recv(chan, skb))
skb              6963 net/bluetooth/l2cap_core.c 		l2cap_data_rcv(chan, skb);
skb              6972 net/bluetooth/l2cap_core.c 	kfree_skb(skb);
skb              6979 net/bluetooth/l2cap_core.c 				  struct sk_buff *skb)
skb              6992 net/bluetooth/l2cap_core.c 	BT_DBG("chan %p, len %d", chan, skb->len);
skb              6997 net/bluetooth/l2cap_core.c 	if (chan->imtu < skb->len)
skb              7001 net/bluetooth/l2cap_core.c 	bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst);
skb              7002 net/bluetooth/l2cap_core.c 	bt_cb(skb)->l2cap.psm = psm;
skb              7004 net/bluetooth/l2cap_core.c 	if (!chan->ops->recv(chan, skb)) {
skb              7012 net/bluetooth/l2cap_core.c 	kfree_skb(skb);
skb              7015 net/bluetooth/l2cap_core.c static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
skb              7017 net/bluetooth/l2cap_core.c 	struct l2cap_hdr *lh = (void *) skb->data;
skb              7024 net/bluetooth/l2cap_core.c 		skb_queue_tail(&conn->pending_rx, skb);
skb              7028 net/bluetooth/l2cap_core.c 	skb_pull(skb, L2CAP_HDR_SIZE);
skb              7032 net/bluetooth/l2cap_core.c 	if (len != skb->len) {
skb              7033 net/bluetooth/l2cap_core.c 		kfree_skb(skb);
skb              7043 net/bluetooth/l2cap_core.c 		kfree_skb(skb);
skb              7051 net/bluetooth/l2cap_core.c 		l2cap_sig_channel(conn, skb);
skb              7055 net/bluetooth/l2cap_core.c 		psm = get_unaligned((__le16 *) skb->data);
skb              7056 net/bluetooth/l2cap_core.c 		skb_pull(skb, L2CAP_PSMLEN_SIZE);
skb              7057 net/bluetooth/l2cap_core.c 		l2cap_conless_channel(conn, psm, skb);
skb              7061 net/bluetooth/l2cap_core.c 		l2cap_le_sig_channel(conn, skb);
skb              7065 net/bluetooth/l2cap_core.c 		l2cap_data_channel(conn, cid, skb);
skb              7074 net/bluetooth/l2cap_core.c 	struct sk_buff *skb;
skb              7078 net/bluetooth/l2cap_core.c 	while ((skb = skb_dequeue(&conn->pending_rx)))
skb              7079 net/bluetooth/l2cap_core.c 		l2cap_recv_frame(conn, skb);
skb              7579 net/bluetooth/l2cap_core.c void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
skb              7595 net/bluetooth/l2cap_core.c 	BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
skb              7602 net/bluetooth/l2cap_core.c 			BT_ERR("Unexpected start frame (len %d)", skb->len);
skb              7610 net/bluetooth/l2cap_core.c 		if (skb->len < L2CAP_HDR_SIZE) {
skb              7611 net/bluetooth/l2cap_core.c 			BT_ERR("Frame is too short (len %d)", skb->len);
skb              7616 net/bluetooth/l2cap_core.c 		hdr = (struct l2cap_hdr *) skb->data;
skb              7619 net/bluetooth/l2cap_core.c 		if (len == skb->len) {
skb              7621 net/bluetooth/l2cap_core.c 			l2cap_recv_frame(conn, skb);
skb              7625 net/bluetooth/l2cap_core.c 		BT_DBG("Start: total len %d, frag len %d", len, skb->len);
skb              7627 net/bluetooth/l2cap_core.c 		if (skb->len > len) {
skb              7629 net/bluetooth/l2cap_core.c 			       skb->len, len);
skb              7639 net/bluetooth/l2cap_core.c 		skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
skb              7640 net/bluetooth/l2cap_core.c 					  skb->len);
skb              7641 net/bluetooth/l2cap_core.c 		conn->rx_len = len - skb->len;
skb              7645 net/bluetooth/l2cap_core.c 		BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
skb              7648 net/bluetooth/l2cap_core.c 			BT_ERR("Unexpected continuation frame (len %d)", skb->len);
skb              7653 net/bluetooth/l2cap_core.c 		if (skb->len > conn->rx_len) {
skb              7655 net/bluetooth/l2cap_core.c 			       skb->len, conn->rx_len);
skb              7663 net/bluetooth/l2cap_core.c 		skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
skb              7664 net/bluetooth/l2cap_core.c 					  skb->len);
skb              7665 net/bluetooth/l2cap_core.c 		conn->rx_len -= skb->len;
skb              7680 net/bluetooth/l2cap_core.c 	kfree_skb(skb);
skb              1265 net/bluetooth/l2cap_sock.c static int l2cap_sock_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
skb              1282 net/bluetooth/l2cap_sock.c 		err = sk_filter(sk, skb);
skb              1287 net/bluetooth/l2cap_sock.c 	err = __sock_queue_rcv_skb(sk, skb);
skb              1299 net/bluetooth/l2cap_sock.c 		l2cap_pi(sk)->rx_busy_skb = skb;
skb              1383 net/bluetooth/l2cap_sock.c 	struct sk_buff *skb;
skb              1387 net/bluetooth/l2cap_sock.c 	skb = bt_skb_send_alloc(sk, hdr_len + len, nb, &err);
skb              1390 net/bluetooth/l2cap_sock.c 	if (!skb)
skb              1393 net/bluetooth/l2cap_sock.c 	skb->priority = sk->sk_priority;
skb              1395 net/bluetooth/l2cap_sock.c 	bt_cb(skb)->l2cap.chan = chan;
skb              1397 net/bluetooth/l2cap_sock.c 	return skb;
skb              1502 net/bluetooth/l2cap_sock.c static void l2cap_skb_msg_name(struct sk_buff *skb, void *msg_name,
skb              1509 net/bluetooth/l2cap_sock.c 	la->l2_psm = bt_cb(skb)->l2cap.psm;
skb              1510 net/bluetooth/l2cap_sock.c 	bacpy(&la->l2_bdaddr, &bt_cb(skb)->l2cap.bdaddr);
skb              3349 net/bluetooth/mgmt.c 				     u16 opcode, struct sk_buff *skb)
skb              3535 net/bluetooth/mgmt.c 				         u16 opcode, struct sk_buff *skb)
skb              3547 net/bluetooth/mgmt.c 	if (status || !skb) {
skb              3556 net/bluetooth/mgmt.c 		struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
skb              3558 net/bluetooth/mgmt.c 		if (skb->len < sizeof(*rp)) {
skb              3570 net/bluetooth/mgmt.c 		struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
skb              3572 net/bluetooth/mgmt.c 		if (skb->len < sizeof(*rp)) {
skb              6017 net/bluetooth/mgmt.c 					     u16 opcode, struct sk_buff *skb)
skb              6045 net/bluetooth/mgmt.c 		if (skb->len != sizeof(*rp)) {
skb              6050 net/bluetooth/mgmt.c 			rp = (void *)skb->data;
skb              6061 net/bluetooth/mgmt.c 		if (skb->len != sizeof(*rp)) {
skb              6066 net/bluetooth/mgmt.c 			rp = (void *)skb->data;
skb                37 net/bluetooth/mgmt_util.c 	struct sk_buff *skb;
skb                39 net/bluetooth/mgmt_util.c 	skb = bt_skb_alloc(6 + len, GFP_ATOMIC);
skb                40 net/bluetooth/mgmt_util.c 	if (!skb)
skb                43 net/bluetooth/mgmt_util.c 	put_unaligned_le32(cookie, skb_put(skb, 4));
skb                44 net/bluetooth/mgmt_util.c 	put_unaligned_le16(opcode, skb_put(skb, 2));
skb                47 net/bluetooth/mgmt_util.c 		skb_put_data(skb, buf, len);
skb                49 net/bluetooth/mgmt_util.c 	__net_timestamp(skb);
skb                51 net/bluetooth/mgmt_util.c 	hdr = skb_push(skb, HCI_MON_HDR_SIZE);
skb                54 net/bluetooth/mgmt_util.c 	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
skb                56 net/bluetooth/mgmt_util.c 	return skb;
skb                62 net/bluetooth/mgmt_util.c 	struct sk_buff *skb;
skb                65 net/bluetooth/mgmt_util.c 	skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
skb                66 net/bluetooth/mgmt_util.c 	if (!skb)
skb                69 net/bluetooth/mgmt_util.c 	hdr = skb_put(skb, sizeof(*hdr));
skb                78 net/bluetooth/mgmt_util.c 		skb_put_data(skb, data, data_len);
skb                81 net/bluetooth/mgmt_util.c 	__net_timestamp(skb);
skb                83 net/bluetooth/mgmt_util.c 	hci_send_to_channel(channel, skb, flag, skip_sk);
skb                87 net/bluetooth/mgmt_util.c 					    skb_get_ktime(skb), flag, skip_sk);
skb                89 net/bluetooth/mgmt_util.c 	kfree_skb(skb);
skb                95 net/bluetooth/mgmt_util.c 	struct sk_buff *skb, *mskb;
skb               102 net/bluetooth/mgmt_util.c 	skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
skb               103 net/bluetooth/mgmt_util.c 	if (!skb)
skb               106 net/bluetooth/mgmt_util.c 	hdr = skb_put(skb, sizeof(*hdr));
skb               112 net/bluetooth/mgmt_util.c 	ev = skb_put(skb, sizeof(*ev));
skb               119 net/bluetooth/mgmt_util.c 		skb->tstamp = mskb->tstamp;
skb               121 net/bluetooth/mgmt_util.c 		__net_timestamp(skb);
skb               123 net/bluetooth/mgmt_util.c 	err = sock_queue_rcv_skb(sk, skb);
skb               125 net/bluetooth/mgmt_util.c 		kfree_skb(skb);
skb               139 net/bluetooth/mgmt_util.c 	struct sk_buff *skb, *mskb;
skb               146 net/bluetooth/mgmt_util.c 	skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
skb               147 net/bluetooth/mgmt_util.c 	if (!skb)
skb               150 net/bluetooth/mgmt_util.c 	hdr = skb_put(skb, sizeof(*hdr));
skb               156 net/bluetooth/mgmt_util.c 	ev = skb_put(skb, sizeof(*ev) + rp_len);
skb               167 net/bluetooth/mgmt_util.c 		skb->tstamp = mskb->tstamp;
skb               169 net/bluetooth/mgmt_util.c 		__net_timestamp(skb);
skb               171 net/bluetooth/mgmt_util.c 	err = sock_queue_rcv_skb(sk, skb);
skb               173 net/bluetooth/mgmt_util.c 		kfree_skb(skb);
skb                63 net/bluetooth/rfcomm/core.c static void rfcomm_make_uih(struct sk_buff *skb, u8 addr);
skb               556 net/bluetooth/rfcomm/core.c int rfcomm_dlc_send(struct rfcomm_dlc *d, struct sk_buff *skb)
skb               558 net/bluetooth/rfcomm/core.c 	int len = skb->len;
skb               568 net/bluetooth/rfcomm/core.c 	rfcomm_make_uih(skb, d->addr);
skb               569 net/bluetooth/rfcomm/core.c 	skb_queue_tail(&d->tx_queue, skb);
skb               576 net/bluetooth/rfcomm/core.c void rfcomm_dlc_send_noerror(struct rfcomm_dlc *d, struct sk_buff *skb)
skb               578 net/bluetooth/rfcomm/core.c 	int len = skb->len;
skb               582 net/bluetooth/rfcomm/core.c 	rfcomm_make_uih(skb, d->addr);
skb               583 net/bluetooth/rfcomm/core.c 	skb_queue_tail(&d->tx_queue, skb);
skb               859 net/bluetooth/rfcomm/core.c 	struct sk_buff *skb;
skb               863 net/bluetooth/rfcomm/core.c 	skb = alloc_skb(sizeof(*cmd), GFP_KERNEL);
skb               864 net/bluetooth/rfcomm/core.c 	if (!skb)
skb               867 net/bluetooth/rfcomm/core.c 	cmd = __skb_put(skb, sizeof(*cmd));
skb               873 net/bluetooth/rfcomm/core.c 	skb_queue_tail(&d->tx_queue, skb);
skb              1146 net/bluetooth/rfcomm/core.c static void rfcomm_make_uih(struct sk_buff *skb, u8 addr)
skb              1149 net/bluetooth/rfcomm/core.c 	int len = skb->len;
skb              1153 net/bluetooth/rfcomm/core.c 		hdr = skb_push(skb, 4);
skb              1156 net/bluetooth/rfcomm/core.c 		hdr = skb_push(skb, 3);
skb              1162 net/bluetooth/rfcomm/core.c 	crc = skb_put(skb, 1);
skb              1395 net/bluetooth/rfcomm/core.c static int rfcomm_recv_pn(struct rfcomm_session *s, int cr, struct sk_buff *skb)
skb              1397 net/bluetooth/rfcomm/core.c 	struct rfcomm_pn *pn = (void *) skb->data;
skb              1447 net/bluetooth/rfcomm/core.c static int rfcomm_recv_rpn(struct rfcomm_session *s, int cr, int len, struct sk_buff *skb)
skb              1449 net/bluetooth/rfcomm/core.c 	struct rfcomm_rpn *rpn = (void *) skb->data;
skb              1553 net/bluetooth/rfcomm/core.c static int rfcomm_recv_rls(struct rfcomm_session *s, int cr, struct sk_buff *skb)
skb              1555 net/bluetooth/rfcomm/core.c 	struct rfcomm_rls *rls = (void *) skb->data;
skb              1572 net/bluetooth/rfcomm/core.c static int rfcomm_recv_msc(struct rfcomm_session *s, int cr, struct sk_buff *skb)
skb              1574 net/bluetooth/rfcomm/core.c 	struct rfcomm_msc *msc = (void *) skb->data;
skb              1608 net/bluetooth/rfcomm/core.c static int rfcomm_recv_mcc(struct rfcomm_session *s, struct sk_buff *skb)
skb              1610 net/bluetooth/rfcomm/core.c 	struct rfcomm_mcc *mcc = (void *) skb->data;
skb              1619 net/bluetooth/rfcomm/core.c 	skb_pull(skb, 2);
skb              1623 net/bluetooth/rfcomm/core.c 		rfcomm_recv_pn(s, cr, skb);
skb              1627 net/bluetooth/rfcomm/core.c 		rfcomm_recv_rpn(s, cr, len, skb);
skb              1631 net/bluetooth/rfcomm/core.c 		rfcomm_recv_rls(s, cr, skb);
skb              1635 net/bluetooth/rfcomm/core.c 		rfcomm_recv_msc(s, cr, skb);
skb              1654 net/bluetooth/rfcomm/core.c 			rfcomm_send_test(s, 0, skb->data, skb->len);
skb              1668 net/bluetooth/rfcomm/core.c static int rfcomm_recv_data(struct rfcomm_session *s, u8 dlci, int pf, struct sk_buff *skb)
skb              1681 net/bluetooth/rfcomm/core.c 		u8 credits = *(u8 *) skb->data; skb_pull(skb, 1);
skb              1688 net/bluetooth/rfcomm/core.c 	if (skb->len && d->state == BT_CONNECTED) {
skb              1691 net/bluetooth/rfcomm/core.c 		d->data_ready(d, skb);
skb              1697 net/bluetooth/rfcomm/core.c 	kfree_skb(skb);
skb              1702 net/bluetooth/rfcomm/core.c 						struct sk_buff *skb)
skb              1704 net/bluetooth/rfcomm/core.c 	struct rfcomm_hdr *hdr = (void *) skb->data;
skb              1709 net/bluetooth/rfcomm/core.c 		kfree_skb(skb);
skb              1717 net/bluetooth/rfcomm/core.c 	skb->len--; skb->tail--;
skb              1718 net/bluetooth/rfcomm/core.c 	fcs = *(u8 *)skb_tail_pointer(skb);
skb              1720 net/bluetooth/rfcomm/core.c 	if (__check_fcs(skb->data, type, fcs)) {
skb              1722 net/bluetooth/rfcomm/core.c 		kfree_skb(skb);
skb              1727 net/bluetooth/rfcomm/core.c 		skb_pull(skb, 3);
skb              1729 net/bluetooth/rfcomm/core.c 		skb_pull(skb, 4);
skb              1753 net/bluetooth/rfcomm/core.c 			rfcomm_recv_data(s, dlci, __test_pf(hdr->ctrl), skb);
skb              1756 net/bluetooth/rfcomm/core.c 		rfcomm_recv_mcc(s, skb);
skb              1763 net/bluetooth/rfcomm/core.c 	kfree_skb(skb);
skb              1793 net/bluetooth/rfcomm/core.c 	struct sk_buff *skb;
skb              1820 net/bluetooth/rfcomm/core.c 	while (d->tx_credits && (skb = skb_dequeue(&d->tx_queue))) {
skb              1821 net/bluetooth/rfcomm/core.c 		err = rfcomm_send_frame(d->session, skb->data, skb->len);
skb              1823 net/bluetooth/rfcomm/core.c 			skb_queue_head(&d->tx_queue, skb);
skb              1826 net/bluetooth/rfcomm/core.c 		kfree_skb(skb);
skb              1900 net/bluetooth/rfcomm/core.c 	struct sk_buff *skb;
skb              1905 net/bluetooth/rfcomm/core.c 	while ((skb = skb_dequeue(&sk->sk_receive_queue))) {
skb              1906 net/bluetooth/rfcomm/core.c 		skb_orphan(skb);
skb              1907 net/bluetooth/rfcomm/core.c 		if (!skb_linearize(skb)) {
skb              1908 net/bluetooth/rfcomm/core.c 			s = rfcomm_recv_frame(s, skb);
skb              1912 net/bluetooth/rfcomm/core.c 			kfree_skb(skb);
skb                50 net/bluetooth/rfcomm/sock.c static void rfcomm_sk_data_ready(struct rfcomm_dlc *d, struct sk_buff *skb)
skb                56 net/bluetooth/rfcomm/sock.c 	atomic_add(skb->len, &sk->sk_rmem_alloc);
skb                57 net/bluetooth/rfcomm/sock.c 	skb_queue_tail(&sk->sk_receive_queue, skb);
skb               564 net/bluetooth/rfcomm/sock.c 	struct sk_buff *skb;
skb               588 net/bluetooth/rfcomm/sock.c 		skb = sock_alloc_send_skb(sk, size + RFCOMM_SKB_RESERVE,
skb               590 net/bluetooth/rfcomm/sock.c 		if (!skb) {
skb               595 net/bluetooth/rfcomm/sock.c 		skb_reserve(skb, RFCOMM_SKB_HEAD_RESERVE);
skb               597 net/bluetooth/rfcomm/sock.c 		err = memcpy_from_msg(skb_put(skb, size), msg, size);
skb               599 net/bluetooth/rfcomm/sock.c 			kfree_skb(skb);
skb               605 net/bluetooth/rfcomm/sock.c 		skb->priority = sk->sk_priority;
skb               607 net/bluetooth/rfcomm/sock.c 		err = rfcomm_dlc_send(d, skb);
skb               609 net/bluetooth/rfcomm/sock.c 			kfree_skb(skb);
skb                75 net/bluetooth/rfcomm/tty.c static void rfcomm_dev_data_ready(struct rfcomm_dlc *dlc, struct sk_buff *skb);
skb               280 net/bluetooth/rfcomm/tty.c 		struct sk_buff *skb;
skb               286 net/bluetooth/rfcomm/tty.c 		while ((skb = skb_dequeue(&sk->sk_receive_queue))) {
skb               287 net/bluetooth/rfcomm/tty.c 			skb_orphan(skb);
skb               288 net/bluetooth/rfcomm/tty.c 			skb_queue_tail(&dev->pending, skb);
skb               289 net/bluetooth/rfcomm/tty.c 			atomic_sub(skb->len, &sk->sk_rmem_alloc);
skb               361 net/bluetooth/rfcomm/tty.c static void rfcomm_wfree(struct sk_buff *skb)
skb               363 net/bluetooth/rfcomm/tty.c 	struct rfcomm_dev *dev = (void *) skb->sk;
skb               370 net/bluetooth/rfcomm/tty.c static void rfcomm_set_owner_w(struct sk_buff *skb, struct rfcomm_dev *dev)
skb               374 net/bluetooth/rfcomm/tty.c 	skb->sk = (void *) dev;
skb               375 net/bluetooth/rfcomm/tty.c 	skb->destructor = rfcomm_wfree;
skb               380 net/bluetooth/rfcomm/tty.c 	struct sk_buff *skb = alloc_skb(size, priority);
skb               381 net/bluetooth/rfcomm/tty.c 	if (skb)
skb               382 net/bluetooth/rfcomm/tty.c 		rfcomm_set_owner_w(skb, dev);
skb               383 net/bluetooth/rfcomm/tty.c 	return skb;
skb               602 net/bluetooth/rfcomm/tty.c static void rfcomm_dev_data_ready(struct rfcomm_dlc *dlc, struct sk_buff *skb)
skb               607 net/bluetooth/rfcomm/tty.c 		kfree_skb(skb);
skb               612 net/bluetooth/rfcomm/tty.c 		skb_queue_tail(&dev->pending, skb);
skb               616 net/bluetooth/rfcomm/tty.c 	BT_DBG("dlc %p len %d", dlc, skb->len);
skb               618 net/bluetooth/rfcomm/tty.c 	tty_insert_flip_string(&dev->port, skb->data, skb->len);
skb               621 net/bluetooth/rfcomm/tty.c 	kfree_skb(skb);
skb               662 net/bluetooth/rfcomm/tty.c 	struct sk_buff *skb;
skb               669 net/bluetooth/rfcomm/tty.c 	while ((skb = skb_dequeue(&dev->pending))) {
skb               670 net/bluetooth/rfcomm/tty.c 		inserted += tty_insert_flip_string(&dev->port, skb->data,
skb               671 net/bluetooth/rfcomm/tty.c 				skb->len);
skb               672 net/bluetooth/rfcomm/tty.c 		kfree_skb(skb);
skb               785 net/bluetooth/rfcomm/tty.c 	struct sk_buff *skb;
skb               793 net/bluetooth/rfcomm/tty.c 		skb = rfcomm_wmalloc(dev, size + RFCOMM_SKB_RESERVE, GFP_ATOMIC);
skb               794 net/bluetooth/rfcomm/tty.c 		if (!skb)
skb               797 net/bluetooth/rfcomm/tty.c 		skb_reserve(skb, RFCOMM_SKB_HEAD_RESERVE);
skb               799 net/bluetooth/rfcomm/tty.c 		skb_put_data(skb, buf + sent, size);
skb               801 net/bluetooth/rfcomm/tty.c 		rfcomm_dlc_send_noerror(dlc, skb);
skb               278 net/bluetooth/sco.c 	struct sk_buff *skb;
skb               287 net/bluetooth/sco.c 	skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
skb               288 net/bluetooth/sco.c 	if (!skb)
skb               291 net/bluetooth/sco.c 	if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
skb               292 net/bluetooth/sco.c 		kfree_skb(skb);
skb               296 net/bluetooth/sco.c 	hci_send_sco(conn->hcon, skb);
skb               301 net/bluetooth/sco.c static void sco_recv_frame(struct sco_conn *conn, struct sk_buff *skb)
skb               312 net/bluetooth/sco.c 	BT_DBG("sk %p len %d", sk, skb->len);
skb               317 net/bluetooth/sco.c 	if (!sock_queue_rcv_skb(sk, skb))
skb               321 net/bluetooth/sco.c 	kfree_skb(skb);
skb              1136 net/bluetooth/sco.c void sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb)
skb              1143 net/bluetooth/sco.c 	BT_DBG("conn %p len %d", conn, skb->len);
skb              1145 net/bluetooth/sco.c 	if (skb->len) {
skb              1146 net/bluetooth/sco.c 		sco_recv_frame(conn, skb);
skb              1151 net/bluetooth/sco.c 	kfree_skb(skb);
skb              1727 net/bluetooth/smp.c static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
skb              1729 net/bluetooth/smp.c 	struct smp_cmd_pairing rsp, *req = (void *) skb->data;
skb              1738 net/bluetooth/smp.c 	if (skb->len < sizeof(*req))
skb              1764 net/bluetooth/smp.c 	skb_pull(skb, sizeof(*req));
skb              1918 net/bluetooth/smp.c static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb)
skb              1920 net/bluetooth/smp.c 	struct smp_cmd_pairing *req, *rsp = (void *) skb->data;
skb              1929 net/bluetooth/smp.c 	if (skb->len < sizeof(*rsp))
skb              1935 net/bluetooth/smp.c 	skb_pull(skb, sizeof(*rsp));
skb              2077 net/bluetooth/smp.c static u8 smp_cmd_pairing_confirm(struct l2cap_conn *conn, struct sk_buff *skb)
skb              2084 net/bluetooth/smp.c 	if (skb->len < sizeof(smp->pcnf))
skb              2087 net/bluetooth/smp.c 	memcpy(smp->pcnf, skb->data, sizeof(smp->pcnf));
skb              2088 net/bluetooth/smp.c 	skb_pull(skb, sizeof(smp->pcnf));
skb              2119 net/bluetooth/smp.c static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb)
skb              2130 net/bluetooth/smp.c 	if (skb->len < sizeof(smp->rrnd))
skb              2133 net/bluetooth/smp.c 	memcpy(smp->rrnd, skb->data, sizeof(smp->rrnd));
skb              2134 net/bluetooth/smp.c 	skb_pull(skb, sizeof(smp->rrnd));
skb              2254 net/bluetooth/smp.c static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
skb              2256 net/bluetooth/smp.c 	struct smp_cmd_security_req *rp = (void *) skb->data;
skb              2265 net/bluetooth/smp.c 	if (skb->len < sizeof(*rp))
skb              2304 net/bluetooth/smp.c 	skb_pull(skb, sizeof(*rp));
skb              2451 net/bluetooth/smp.c static int smp_cmd_encrypt_info(struct l2cap_conn *conn, struct sk_buff *skb)
skb              2453 net/bluetooth/smp.c 	struct smp_cmd_encrypt_info *rp = (void *) skb->data;
skb              2459 net/bluetooth/smp.c 	if (skb->len < sizeof(*rp))
skb              2464 net/bluetooth/smp.c 	skb_pull(skb, sizeof(*rp));
skb              2471 net/bluetooth/smp.c static int smp_cmd_master_ident(struct l2cap_conn *conn, struct sk_buff *skb)
skb              2473 net/bluetooth/smp.c 	struct smp_cmd_master_ident *rp = (void *) skb->data;
skb              2483 net/bluetooth/smp.c 	if (skb->len < sizeof(*rp))
skb              2494 net/bluetooth/smp.c 	skb_pull(skb, sizeof(*rp));
skb              2507 net/bluetooth/smp.c static int smp_cmd_ident_info(struct l2cap_conn *conn, struct sk_buff *skb)
skb              2509 net/bluetooth/smp.c 	struct smp_cmd_ident_info *info = (void *) skb->data;
skb              2515 net/bluetooth/smp.c 	if (skb->len < sizeof(*info))
skb              2520 net/bluetooth/smp.c 	skb_pull(skb, sizeof(*info));
skb              2528 net/bluetooth/smp.c 				   struct sk_buff *skb)
skb              2530 net/bluetooth/smp.c 	struct smp_cmd_ident_addr_info *info = (void *) skb->data;
skb              2538 net/bluetooth/smp.c 	if (skb->len < sizeof(*info))
skb              2547 net/bluetooth/smp.c 	skb_pull(skb, sizeof(*info));
skb              2596 net/bluetooth/smp.c static int smp_cmd_sign_info(struct l2cap_conn *conn, struct sk_buff *skb)
skb              2598 net/bluetooth/smp.c 	struct smp_cmd_sign_info *rp = (void *) skb->data;
skb              2605 net/bluetooth/smp.c 	if (skb->len < sizeof(*rp))
skb              2611 net/bluetooth/smp.c 	skb_pull(skb, sizeof(*rp));
skb              2672 net/bluetooth/smp.c static int smp_cmd_public_key(struct l2cap_conn *conn, struct sk_buff *skb)
skb              2674 net/bluetooth/smp.c 	struct smp_cmd_public_key *key = (void *) skb->data;
skb              2685 net/bluetooth/smp.c 	if (skb->len < sizeof(*key))
skb              2803 net/bluetooth/smp.c static int smp_cmd_dhkey_check(struct l2cap_conn *conn, struct sk_buff *skb)
skb              2805 net/bluetooth/smp.c 	struct smp_cmd_dhkey_check *check = (void *) skb->data;
skb              2815 net/bluetooth/smp.c 	if (skb->len < sizeof(*check))
skb              2869 net/bluetooth/smp.c 				   struct sk_buff *skb)
skb              2871 net/bluetooth/smp.c 	struct smp_cmd_keypress_notify *kp = (void *) skb->data;
skb              2878 net/bluetooth/smp.c static int smp_sig_channel(struct l2cap_chan *chan, struct sk_buff *skb)
skb              2886 net/bluetooth/smp.c 	if (skb->len < 1)
skb              2894 net/bluetooth/smp.c 	code = skb->data[0];
skb              2895 net/bluetooth/smp.c 	skb_pull(skb, sizeof(code));
skb              2913 net/bluetooth/smp.c 		reason = smp_cmd_pairing_req(conn, skb);
skb              2922 net/bluetooth/smp.c 		reason = smp_cmd_pairing_rsp(conn, skb);
skb              2926 net/bluetooth/smp.c 		reason = smp_cmd_security_req(conn, skb);
skb              2930 net/bluetooth/smp.c 		reason = smp_cmd_pairing_confirm(conn, skb);
skb              2934 net/bluetooth/smp.c 		reason = smp_cmd_pairing_random(conn, skb);
skb              2938 net/bluetooth/smp.c 		reason = smp_cmd_encrypt_info(conn, skb);
skb              2942 net/bluetooth/smp.c 		reason = smp_cmd_master_ident(conn, skb);
skb              2946 net/bluetooth/smp.c 		reason = smp_cmd_ident_info(conn, skb);
skb              2950 net/bluetooth/smp.c 		reason = smp_cmd_ident_addr_info(conn, skb);
skb              2954 net/bluetooth/smp.c 		reason = smp_cmd_sign_info(conn, skb);
skb              2958 net/bluetooth/smp.c 		reason = smp_cmd_public_key(conn, skb);
skb              2962 net/bluetooth/smp.c 		reason = smp_cmd_dhkey_check(conn, skb);
skb              2966 net/bluetooth/smp.c 		reason = smp_cmd_keypress_notify(conn, skb);
skb              2979 net/bluetooth/smp.c 		kfree_skb(skb);
skb              2987 net/bluetooth/smp.c 	kfree_skb(skb);
skb              3114 net/bluetooth/smp.c static int smp_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
skb              3120 net/bluetooth/smp.c 	err = smp_sig_channel(chan, skb);
skb              3137 net/bluetooth/smp.c 	struct sk_buff *skb;
skb              3139 net/bluetooth/smp.c 	skb = bt_skb_alloc(hdr_len + len, GFP_KERNEL);
skb              3140 net/bluetooth/smp.c 	if (!skb)
skb              3143 net/bluetooth/smp.c 	skb->priority = HCI_PRIO_MAX;
skb              3144 net/bluetooth/smp.c 	bt_cb(skb)->l2cap.chan = chan;
skb              3146 net/bluetooth/smp.c 	return skb;
skb               199 net/bpf/test_run.c static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb)
skb               201 net/bpf/test_run.c 	struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
skb               224 net/bpf/test_run.c 	skb->priority = __skb->priority;
skb               230 net/bpf/test_run.c static void convert_skb_to___skb(struct sk_buff *skb, struct __sk_buff *__skb)
skb               232 net/bpf/test_run.c 	struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
skb               237 net/bpf/test_run.c 	__skb->priority = skb->priority;
skb               250 net/bpf/test_run.c 	struct sk_buff *skb;
skb               289 net/bpf/test_run.c 	skb = build_skb(data, 0);
skb               290 net/bpf/test_run.c 	if (!skb) {
skb               296 net/bpf/test_run.c 	skb->sk = sk;
skb               298 net/bpf/test_run.c 	skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
skb               299 net/bpf/test_run.c 	__skb_put(skb, size);
skb               300 net/bpf/test_run.c 	skb->protocol = eth_type_trans(skb, current->nsproxy->net_ns->loopback_dev);
skb               301 net/bpf/test_run.c 	skb_reset_network_header(skb);
skb               304 net/bpf/test_run.c 		__skb_push(skb, hh_len);
skb               306 net/bpf/test_run.c 		bpf_compute_data_pointers(skb);
skb               307 net/bpf/test_run.c 	ret = convert___skb_to_skb(skb, ctx);
skb               310 net/bpf/test_run.c 	ret = bpf_test_run(prog, skb, repeat, &retval, &duration);
skb               314 net/bpf/test_run.c 		if (skb_headroom(skb) < hh_len) {
skb               315 net/bpf/test_run.c 			int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
skb               317 net/bpf/test_run.c 			if (pskb_expand_head(skb, nhead, 0, GFP_USER)) {
skb               322 net/bpf/test_run.c 		memset(__skb_push(skb, hh_len), 0, hh_len);
skb               324 net/bpf/test_run.c 	convert_skb_to___skb(skb, ctx);
skb               326 net/bpf/test_run.c 	size = skb->len;
skb               328 net/bpf/test_run.c 	if (WARN_ON_ONCE(skb_is_nonlinear(skb)))
skb               329 net/bpf/test_run.c 		size = skb_headlen(skb);
skb               330 net/bpf/test_run.c 	ret = bpf_test_finish(kattr, uattr, skb->data, size, retval, duration);
skb               335 net/bpf/test_run.c 	kfree_skb(skb);
skb                51 net/bridge/br_arp_nd_proxy.c 	struct sk_buff *skb;
skb                63 net/bridge/br_arp_nd_proxy.c 	skb = arp_create(ARPOP_REPLY, ETH_P_ARP, dest_ip, dev, src_ip,
skb                65 net/bridge/br_arp_nd_proxy.c 	if (!skb)
skb                77 net/bridge/br_arp_nd_proxy.c 		__vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
skb                80 net/bridge/br_arp_nd_proxy.c 		arp_xmit(skb);
skb                82 net/bridge/br_arp_nd_proxy.c 		skb_reset_mac_header(skb);
skb                83 net/bridge/br_arp_nd_proxy.c 		__skb_pull(skb, skb_network_offset(skb));
skb                84 net/bridge/br_arp_nd_proxy.c 		skb->ip_summed = CHECKSUM_UNNECESSARY;
skb                85 net/bridge/br_arp_nd_proxy.c 		skb->pkt_type = PACKET_HOST;
skb                87 net/bridge/br_arp_nd_proxy.c 		netif_rx_ni(skb);
skb               120 net/bridge/br_arp_nd_proxy.c void br_do_proxy_suppress_arp(struct sk_buff *skb, struct net_bridge *br,
skb               130 net/bridge/br_arp_nd_proxy.c 	BR_INPUT_SKB_CB(skb)->proxyarp_replied = 0;
skb               133 net/bridge/br_arp_nd_proxy.c 	    !pskb_may_pull(skb, arp_hdr_len(dev)))
skb               136 net/bridge/br_arp_nd_proxy.c 	parp = arp_hdr(skb);
skb               160 net/bridge/br_arp_nd_proxy.c 			BR_INPUT_SKB_CB(skb)->proxyarp_replied = 1;
skb               169 net/bridge/br_arp_nd_proxy.c 		vlandev = __vlan_find_dev_deep_rcu(br->dev, skb->vlan_proto,
skb               180 net/bridge/br_arp_nd_proxy.c 		BR_INPUT_SKB_CB(skb)->proxyarp_replied = 1;
skb               201 net/bridge/br_arp_nd_proxy.c 					br_arp_send(br, p, skb->dev, sip, tip,
skb               204 net/bridge/br_arp_nd_proxy.c 					br_arp_send(br, p, skb->dev, sip, tip,
skb               206 net/bridge/br_arp_nd_proxy.c 						    skb->vlan_proto,
skb               207 net/bridge/br_arp_nd_proxy.c 						    skb_vlan_tag_get(skb));
skb               216 net/bridge/br_arp_nd_proxy.c 				BR_INPUT_SKB_CB(skb)->proxyarp_replied = 1;
skb               225 net/bridge/br_arp_nd_proxy.c struct nd_msg *br_is_nd_neigh_msg(struct sk_buff *skb, struct nd_msg *msg)
skb               229 net/bridge/br_arp_nd_proxy.c 	m = skb_header_pointer(skb, skb_network_offset(skb) +
skb               387 net/bridge/br_arp_nd_proxy.c void br_do_suppress_nd(struct sk_buff *skb, struct net_bridge *br,
skb               396 net/bridge/br_arp_nd_proxy.c 	BR_INPUT_SKB_CB(skb)->proxyarp_replied = 0;
skb               404 net/bridge/br_arp_nd_proxy.c 		BR_INPUT_SKB_CB(skb)->proxyarp_replied = 1;
skb               411 net/bridge/br_arp_nd_proxy.c 	iphdr = ipv6_hdr(skb);
skb               417 net/bridge/br_arp_nd_proxy.c 		BR_INPUT_SKB_CB(skb)->proxyarp_replied = 1;
skb               423 net/bridge/br_arp_nd_proxy.c 		vlandev = __vlan_find_dev_deep_rcu(br->dev, skb->vlan_proto,
skb               435 net/bridge/br_arp_nd_proxy.c 		BR_INPUT_SKB_CB(skb)->proxyarp_replied = 1;
skb               454 net/bridge/br_arp_nd_proxy.c 					br_nd_send(br, p, skb, n,
skb               455 net/bridge/br_arp_nd_proxy.c 						   skb->vlan_proto,
skb               456 net/bridge/br_arp_nd_proxy.c 						   skb_vlan_tag_get(skb), msg);
skb               458 net/bridge/br_arp_nd_proxy.c 					br_nd_send(br, p, skb, n, 0, 0, msg);
skb               468 net/bridge/br_arp_nd_proxy.c 				BR_INPUT_SKB_CB(skb)->proxyarp_replied = 1;
skb                28 net/bridge/br_device.c netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
skb                41 net/bridge/br_device.c 	if (nf_ops && nf_ops->br_dev_xmit_hook(skb)) {
skb                48 net/bridge/br_device.c 	brstats->tx_bytes += skb->len;
skb                51 net/bridge/br_device.c 	br_switchdev_frame_unmark(skb);
skb                52 net/bridge/br_device.c 	BR_INPUT_SKB_CB(skb)->brdev = dev;
skb                53 net/bridge/br_device.c 	BR_INPUT_SKB_CB(skb)->frag_max_size = 0;
skb                55 net/bridge/br_device.c 	skb_reset_mac_header(skb);
skb                56 net/bridge/br_device.c 	eth = eth_hdr(skb);
skb                57 net/bridge/br_device.c 	skb_pull(skb, ETH_HLEN);
skb                59 net/bridge/br_device.c 	if (!br_allowed_ingress(br, br_vlan_group_rcu(br), skb, &vid))
skb                66 net/bridge/br_device.c 		br_do_proxy_suppress_arp(skb, br, vid, NULL);
skb                68 net/bridge/br_device.c 		   skb->protocol == htons(ETH_P_IPV6) &&
skb                70 net/bridge/br_device.c 		   pskb_may_pull(skb, sizeof(struct ipv6hdr) +
skb                72 net/bridge/br_device.c 		   ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6) {
skb                75 net/bridge/br_device.c 			msg = br_is_nd_neigh_msg(skb, &_msg);
skb                77 net/bridge/br_device.c 				br_do_suppress_nd(skb, br, vid, NULL, msg);
skb                80 net/bridge/br_device.c 	dest = eth_hdr(skb)->h_dest;
skb                82 net/bridge/br_device.c 		br_flood(br, skb, BR_PKT_BROADCAST, false, true);
skb                85 net/bridge/br_device.c 			br_flood(br, skb, BR_PKT_MULTICAST, false, true);
skb                88 net/bridge/br_device.c 		if (br_multicast_rcv(br, NULL, skb, vid)) {
skb                89 net/bridge/br_device.c 			kfree_skb(skb);
skb                93 net/bridge/br_device.c 		mdst = br_mdb_get(br, skb, vid);
skb                94 net/bridge/br_device.c 		if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) &&
skb                95 net/bridge/br_device.c 		    br_multicast_querier_exists(br, eth_hdr(skb)))
skb                96 net/bridge/br_device.c 			br_multicast_flood(mdst, skb, false, true);
skb                98 net/bridge/br_device.c 			br_flood(br, skb, BR_PKT_MULTICAST, false, true);
skb               100 net/bridge/br_device.c 		br_forward(dst->dst, skb, false, true);
skb               102 net/bridge/br_device.c 		br_flood(br, skb, BR_PKT_UNICAST, false, true);
skb               629 net/bridge/br_fdb.c static int fdb_fill_info(struct sk_buff *skb, const struct net_bridge *br,
skb               638 net/bridge/br_fdb.c 	nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags);
skb               658 net/bridge/br_fdb.c 	if (nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->key.addr))
skb               660 net/bridge/br_fdb.c 	if (nla_put_u32(skb, NDA_MASTER, br->dev->ifindex))
skb               666 net/bridge/br_fdb.c 	if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
skb               669 net/bridge/br_fdb.c 	if (fdb->key.vlan_id && nla_put(skb, NDA_VLAN, sizeof(u16),
skb               673 net/bridge/br_fdb.c 	nlmsg_end(skb, nlh);
skb               677 net/bridge/br_fdb.c 	nlmsg_cancel(skb, nlh);
skb               695 net/bridge/br_fdb.c 	struct sk_buff *skb;
skb               701 net/bridge/br_fdb.c 	skb = nlmsg_new(fdb_nlmsg_size(), GFP_ATOMIC);
skb               702 net/bridge/br_fdb.c 	if (skb == NULL)
skb               705 net/bridge/br_fdb.c 	err = fdb_fill_info(skb, br, fdb, 0, 0, type, 0);
skb               709 net/bridge/br_fdb.c 		kfree_skb(skb);
skb               712 net/bridge/br_fdb.c 	rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
skb               719 net/bridge/br_fdb.c int br_fdb_dump(struct sk_buff *skb,
skb               733 net/bridge/br_fdb.c 		err = ndo_dflt_fdb_dump(skb, cb, dev, NULL, idx);
skb               756 net/bridge/br_fdb.c 		err = fdb_fill_info(skb, br, f,
skb               757 net/bridge/br_fdb.c 				    NETLINK_CB(cb->skb).portid,
skb               771 net/bridge/br_fdb.c int br_fdb_get(struct sk_buff *skb,
skb               790 net/bridge/br_fdb.c 	err = fdb_fill_info(skb, br, f, portid, seq,
skb                22 net/bridge/br_forward.c 				 const struct sk_buff *skb)
skb                27 net/bridge/br_forward.c 	return ((p->flags & BR_HAIRPIN_MODE) || skb->dev != p->dev) &&
skb                28 net/bridge/br_forward.c 		br_allowed_egress(vg, skb) && p->state == BR_STATE_FORWARDING &&
skb                29 net/bridge/br_forward.c 		nbp_switchdev_allowed_egress(p, skb) &&
skb                30 net/bridge/br_forward.c 		!br_skb_isolated(p, skb);
skb                33 net/bridge/br_forward.c int br_dev_queue_push_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
skb                35 net/bridge/br_forward.c 	skb_push(skb, ETH_HLEN);
skb                36 net/bridge/br_forward.c 	if (!is_skb_forwardable(skb->dev, skb))
skb                39 net/bridge/br_forward.c 	br_drop_fake_rtable(skb);
skb                41 net/bridge/br_forward.c 	if (skb->ip_summed == CHECKSUM_PARTIAL &&
skb                42 net/bridge/br_forward.c 	    (skb->protocol == htons(ETH_P_8021Q) ||
skb                43 net/bridge/br_forward.c 	     skb->protocol == htons(ETH_P_8021AD))) {
skb                46 net/bridge/br_forward.c 		if (!__vlan_get_protocol(skb, skb->protocol, &depth))
skb                49 net/bridge/br_forward.c 		skb_set_network_header(skb, depth);
skb                52 net/bridge/br_forward.c 	dev_queue_xmit(skb);
skb                57 net/bridge/br_forward.c 	kfree_skb(skb);
skb                62 net/bridge/br_forward.c int br_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
skb                64 net/bridge/br_forward.c 	skb->tstamp = 0;
skb                66 net/bridge/br_forward.c 		       net, sk, skb, NULL, skb->dev,
skb                73 net/bridge/br_forward.c 			 struct sk_buff *skb, bool local_orig)
skb                81 net/bridge/br_forward.c 	skb = br_handle_vlan(to->br, to, vg, skb);
skb                82 net/bridge/br_forward.c 	if (!skb)
skb                85 net/bridge/br_forward.c 	indev = skb->dev;
skb                86 net/bridge/br_forward.c 	skb->dev = to->dev;
skb                88 net/bridge/br_forward.c 		if (skb_warn_if_lro(skb)) {
skb                89 net/bridge/br_forward.c 			kfree_skb(skb);
skb                93 net/bridge/br_forward.c 		skb_forward_csum(skb);
skb                97 net/bridge/br_forward.c 			skb_push(skb, ETH_HLEN);
skb                98 net/bridge/br_forward.c 			if (!is_skb_forwardable(skb->dev, skb))
skb                99 net/bridge/br_forward.c 				kfree_skb(skb);
skb               101 net/bridge/br_forward.c 				br_netpoll_send_skb(to, skb);
skb               105 net/bridge/br_forward.c 		net = dev_net(skb->dev);
skb               110 net/bridge/br_forward.c 		net, NULL, skb, indev, skb->dev,
skb               115 net/bridge/br_forward.c 			 struct sk_buff *skb, bool local_orig)
skb               117 net/bridge/br_forward.c 	struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev;
skb               119 net/bridge/br_forward.c 	skb = skb_clone(skb, GFP_ATOMIC);
skb               120 net/bridge/br_forward.c 	if (!skb) {
skb               125 net/bridge/br_forward.c 	__br_forward(prev, skb, local_orig);
skb               139 net/bridge/br_forward.c 		struct sk_buff *skb, bool local_rcv, bool local_orig)
skb               154 net/bridge/br_forward.c 	if (should_deliver(to, skb)) {
skb               156 net/bridge/br_forward.c 			deliver_clone(to, skb, local_orig);
skb               158 net/bridge/br_forward.c 			__br_forward(to, skb, local_orig);
skb               164 net/bridge/br_forward.c 		kfree_skb(skb);
skb               170 net/bridge/br_forward.c 	struct sk_buff *skb, bool local_orig)
skb               172 net/bridge/br_forward.c 	u8 igmp_type = br_multicast_igmp_type(skb);
skb               175 net/bridge/br_forward.c 	if (!should_deliver(p, skb))
skb               181 net/bridge/br_forward.c 	err = deliver_clone(prev, skb, local_orig);
skb               185 net/bridge/br_forward.c 	br_multicast_count(p->br, p, skb, igmp_type, BR_MCAST_DIR_TX);
skb               191 net/bridge/br_forward.c void br_flood(struct net_bridge *br, struct sk_buff *skb,
skb               207 net/bridge/br_forward.c 			if (!(p->flags & BR_MCAST_FLOOD) && skb->dev != br->dev)
skb               211 net/bridge/br_forward.c 			if (!(p->flags & BR_BCAST_FLOOD) && skb->dev != br->dev)
skb               220 net/bridge/br_forward.c 		    BR_INPUT_SKB_CB(skb)->proxyarp_replied)
skb               223 net/bridge/br_forward.c 		prev = maybe_deliver(prev, p, skb, local_orig);
skb               232 net/bridge/br_forward.c 		deliver_clone(prev, skb, local_orig);
skb               234 net/bridge/br_forward.c 		__br_forward(prev, skb, local_orig);
skb               239 net/bridge/br_forward.c 		kfree_skb(skb);
skb               243 net/bridge/br_forward.c static void maybe_deliver_addr(struct net_bridge_port *p, struct sk_buff *skb,
skb               246 net/bridge/br_forward.c 	struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev;
skb               247 net/bridge/br_forward.c 	const unsigned char *src = eth_hdr(skb)->h_source;
skb               249 net/bridge/br_forward.c 	if (!should_deliver(p, skb))
skb               253 net/bridge/br_forward.c 	if (skb->dev == p->dev && ether_addr_equal(src, addr))
skb               256 net/bridge/br_forward.c 	skb = skb_copy(skb, GFP_ATOMIC);
skb               257 net/bridge/br_forward.c 	if (!skb) {
skb               263 net/bridge/br_forward.c 		memcpy(eth_hdr(skb)->h_dest, addr, ETH_ALEN);
skb               265 net/bridge/br_forward.c 	__br_forward(p, skb, local_orig);
skb               270 net/bridge/br_forward.c 			struct sk_buff *skb,
skb               273 net/bridge/br_forward.c 	struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev;
skb               291 net/bridge/br_forward.c 				maybe_deliver_addr(lport, skb, p->eth_addr,
skb               299 net/bridge/br_forward.c 		prev = maybe_deliver(prev, port, skb, local_orig);
skb               313 net/bridge/br_forward.c 		deliver_clone(prev, skb, local_orig);
skb               315 net/bridge/br_forward.c 		__br_forward(prev, skb, local_orig);
skb               320 net/bridge/br_forward.c 		kfree_skb(skb);
skb                26 net/bridge/br_input.c br_netif_receive_skb(struct net *net, struct sock *sk, struct sk_buff *skb)
skb                28 net/bridge/br_input.c 	br_drop_fake_rtable(skb);
skb                29 net/bridge/br_input.c 	return netif_receive_skb(skb);
skb                32 net/bridge/br_input.c static int br_pass_frame_up(struct sk_buff *skb)
skb                34 net/bridge/br_input.c 	struct net_device *indev, *brdev = BR_INPUT_SKB_CB(skb)->brdev;
skb                41 net/bridge/br_input.c 	brstats->rx_bytes += skb->len;
skb                50 net/bridge/br_input.c 	    !br_allowed_egress(vg, skb)) {
skb                51 net/bridge/br_input.c 		kfree_skb(skb);
skb                55 net/bridge/br_input.c 	indev = skb->dev;
skb                56 net/bridge/br_input.c 	skb->dev = brdev;
skb                57 net/bridge/br_input.c 	skb = br_handle_vlan(br, NULL, vg, skb);
skb                58 net/bridge/br_input.c 	if (!skb)
skb                61 net/bridge/br_input.c 	br_multicast_count(br, NULL, skb, br_multicast_igmp_type(skb),
skb                65 net/bridge/br_input.c 		       dev_net(indev), NULL, skb, indev, NULL,
skb                70 net/bridge/br_input.c int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
skb                72 net/bridge/br_input.c 	struct net_bridge_port *p = br_port_get_rcu(skb->dev);
skb                83 net/bridge/br_input.c 	if (!br_allowed_ingress(p->br, nbp_vlan_group_rcu(p), skb, &vid))
skb                86 net/bridge/br_input.c 	nbp_switchdev_frame_mark(p, skb);
skb                91 net/bridge/br_input.c 		br_fdb_update(br, p, eth_hdr(skb)->h_source, vid, false);
skb                94 net/bridge/br_input.c 	if (is_multicast_ether_addr(eth_hdr(skb)->h_dest)) {
skb                96 net/bridge/br_input.c 		if (is_broadcast_ether_addr(eth_hdr(skb)->h_dest)) {
skb               101 net/bridge/br_input.c 			if (br_multicast_rcv(br, p, skb, vid))
skb               109 net/bridge/br_input.c 	BR_INPUT_SKB_CB(skb)->brdev = br->dev;
skb               110 net/bridge/br_input.c 	BR_INPUT_SKB_CB(skb)->src_port_isolated = !!(p->flags & BR_ISOLATED);
skb               113 net/bridge/br_input.c 	    (skb->protocol == htons(ETH_P_ARP) ||
skb               114 net/bridge/br_input.c 	     skb->protocol == htons(ETH_P_RARP))) {
skb               115 net/bridge/br_input.c 		br_do_proxy_suppress_arp(skb, br, vid, p);
skb               117 net/bridge/br_input.c 		   skb->protocol == htons(ETH_P_IPV6) &&
skb               119 net/bridge/br_input.c 		   pskb_may_pull(skb, sizeof(struct ipv6hdr) +
skb               121 net/bridge/br_input.c 		   ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6) {
skb               124 net/bridge/br_input.c 			msg = br_is_nd_neigh_msg(skb, &_msg);
skb               126 net/bridge/br_input.c 				br_do_suppress_nd(skb, br, vid, p, msg);
skb               131 net/bridge/br_input.c 		mdst = br_mdb_get(br, skb, vid);
skb               132 net/bridge/br_input.c 		if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) &&
skb               133 net/bridge/br_input.c 		    br_multicast_querier_exists(br, eth_hdr(skb))) {
skb               146 net/bridge/br_input.c 		dst = br_fdb_find_rcu(br, eth_hdr(skb)->h_dest, vid);
skb               155 net/bridge/br_input.c 			return br_pass_frame_up(skb);
skb               159 net/bridge/br_input.c 		br_forward(dst->dst, skb, local_rcv, false);
skb               162 net/bridge/br_input.c 			br_flood(br, skb, pkt_type, local_rcv, false);
skb               164 net/bridge/br_input.c 			br_multicast_flood(mdst, skb, local_rcv, false);
skb               168 net/bridge/br_input.c 		return br_pass_frame_up(skb);
skb               173 net/bridge/br_input.c 	kfree_skb(skb);
skb               178 net/bridge/br_input.c static void __br_handle_local_finish(struct sk_buff *skb)
skb               180 net/bridge/br_input.c 	struct net_bridge_port *p = br_port_get_rcu(skb->dev);
skb               186 net/bridge/br_input.c 	    br_should_learn(p, skb, &vid))
skb               187 net/bridge/br_input.c 		br_fdb_update(p->br, p, eth_hdr(skb)->h_source, vid, false);
skb               191 net/bridge/br_input.c static int br_handle_local_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
skb               193 net/bridge/br_input.c 	__br_handle_local_finish(skb);
skb               199 net/bridge/br_input.c static int nf_hook_bridge_pre(struct sk_buff *skb, struct sk_buff **pskb)
skb               208 net/bridge/br_input.c 	net = dev_net(skb->dev);
skb               219 net/bridge/br_input.c 			   NFPROTO_BRIDGE, skb->dev, NULL, NULL,
skb               223 net/bridge/br_input.c 		verdict = nf_hook_entry_hookfn(&e->hooks[i], skb, &state);
skb               226 net/bridge/br_input.c 			if (BR_INPUT_SKB_CB(skb)->br_netfilter_broute) {
skb               227 net/bridge/br_input.c 				*pskb = skb;
skb               232 net/bridge/br_input.c 			kfree_skb(skb);
skb               235 net/bridge/br_input.c 			ret = nf_queue(skb, &state, i, verdict);
skb               244 net/bridge/br_input.c 	net = dev_net(skb->dev);
skb               245 net/bridge/br_input.c 	br_handle_frame_finish(net, NULL, skb);
skb               247 net/bridge/br_input.c 	br_handle_frame_finish(dev_net(skb->dev), NULL, skb);
skb               259 net/bridge/br_input.c 	struct sk_buff *skb = *pskb;
skb               260 net/bridge/br_input.c 	const unsigned char *dest = eth_hdr(skb)->h_dest;
skb               262 net/bridge/br_input.c 	if (unlikely(skb->pkt_type == PACKET_LOOPBACK))
skb               265 net/bridge/br_input.c 	if (!is_valid_ether_addr(eth_hdr(skb)->h_source))
skb               268 net/bridge/br_input.c 	skb = skb_share_check(skb, GFP_ATOMIC);
skb               269 net/bridge/br_input.c 	if (!skb)
skb               272 net/bridge/br_input.c 	memset(skb->cb, 0, sizeof(struct br_input_skb_cb));
skb               274 net/bridge/br_input.c 	p = br_port_get_rcu(skb->dev);
skb               276 net/bridge/br_input.c 		if (br_handle_ingress_vlan_tunnel(skb, p,
skb               305 net/bridge/br_input.c 			*pskb = skb;
skb               306 net/bridge/br_input.c 			__br_handle_local_finish(skb);
skb               316 net/bridge/br_input.c 			*pskb = skb;
skb               317 net/bridge/br_input.c 			__br_handle_local_finish(skb);
skb               333 net/bridge/br_input.c 			    dev_net(skb->dev), NULL, skb, skb->dev, NULL,
skb               346 net/bridge/br_input.c 			skb->pkt_type = PACKET_HOST;
skb               348 net/bridge/br_input.c 		return nf_hook_bridge_pre(skb, pskb);
skb               351 net/bridge/br_input.c 		kfree_skb(skb);
skb                19 net/bridge/br_mdb.c static int br_rports_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
skb                29 net/bridge/br_mdb.c 	nest = nla_nest_start_noflag(skb, MDBA_ROUTER);
skb                36 net/bridge/br_mdb.c 		port_nest = nla_nest_start_noflag(skb, MDBA_ROUTER_PORT);
skb                39 net/bridge/br_mdb.c 		if (nla_put_nohdr(skb, sizeof(u32), &p->dev->ifindex) ||
skb                40 net/bridge/br_mdb.c 		    nla_put_u32(skb, MDBA_ROUTER_PATTR_TIMER,
skb                42 net/bridge/br_mdb.c 		    nla_put_u8(skb, MDBA_ROUTER_PATTR_TYPE,
skb                44 net/bridge/br_mdb.c 			nla_nest_cancel(skb, port_nest);
skb                47 net/bridge/br_mdb.c 		nla_nest_end(skb, port_nest);
skb                50 net/bridge/br_mdb.c 	nla_nest_end(skb, nest);
skb                53 net/bridge/br_mdb.c 	nla_nest_cancel(skb, nest);
skb                80 net/bridge/br_mdb.c static int __mdb_fill_info(struct sk_buff *skb,
skb               110 net/bridge/br_mdb.c 	nest_ent = nla_nest_start_noflag(skb,
skb               115 net/bridge/br_mdb.c 	if (nla_put_nohdr(skb, sizeof(e), &e) ||
skb               116 net/bridge/br_mdb.c 	    nla_put_u32(skb,
skb               119 net/bridge/br_mdb.c 		nla_nest_cancel(skb, nest_ent);
skb               122 net/bridge/br_mdb.c 	nla_nest_end(skb, nest_ent);
skb               127 net/bridge/br_mdb.c static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
skb               138 net/bridge/br_mdb.c 	nest = nla_nest_start_noflag(skb, MDBA_MDB);
skb               149 net/bridge/br_mdb.c 		nest2 = nla_nest_start_noflag(skb, MDBA_MDB_ENTRY);
skb               156 net/bridge/br_mdb.c 			err = __mdb_fill_info(skb, mp, NULL);
skb               158 net/bridge/br_mdb.c 				nla_nest_cancel(skb, nest2);
skb               168 net/bridge/br_mdb.c 			err = __mdb_fill_info(skb, mp, p);
skb               170 net/bridge/br_mdb.c 				nla_nest_cancel(skb, nest2);
skb               174 net/bridge/br_mdb.c 		nla_nest_end(skb, nest2);
skb               181 net/bridge/br_mdb.c 	nla_nest_end(skb, nest);
skb               208 net/bridge/br_mdb.c static int br_mdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
skb               211 net/bridge/br_mdb.c 	struct net *net = sock_net(skb->sk);
skb               235 net/bridge/br_mdb.c 			nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
skb               244 net/bridge/br_mdb.c 			if (br_mdb_fill_info(skb, cb, dev) < 0)
skb               246 net/bridge/br_mdb.c 			if (br_rports_fill_info(skb, cb, dev) < 0)
skb               250 net/bridge/br_mdb.c 			nlmsg_end(skb, nlh);
skb               258 net/bridge/br_mdb.c 		nlmsg_end(skb, nlh);
skb               261 net/bridge/br_mdb.c 	return skb->len;
skb               264 net/bridge/br_mdb.c static int nlmsg_populate_mdb_fill(struct sk_buff *skb,
skb               273 net/bridge/br_mdb.c 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), 0);
skb               281 net/bridge/br_mdb.c 	nest = nla_nest_start_noflag(skb, MDBA_MDB);
skb               284 net/bridge/br_mdb.c 	nest2 = nla_nest_start_noflag(skb, MDBA_MDB_ENTRY);
skb               288 net/bridge/br_mdb.c 	if (nla_put(skb, MDBA_MDB_ENTRY_INFO, sizeof(*entry), entry))
skb               291 net/bridge/br_mdb.c 	nla_nest_end(skb, nest2);
skb               292 net/bridge/br_mdb.c 	nla_nest_end(skb, nest);
skb               293 net/bridge/br_mdb.c 	nlmsg_end(skb, nlh);
skb               297 net/bridge/br_mdb.c 	nla_nest_end(skb, nest);
skb               299 net/bridge/br_mdb.c 	nlmsg_cancel(skb, nlh);
skb               395 net/bridge/br_mdb.c 	struct sk_buff *skb;
skb               424 net/bridge/br_mdb.c 	skb = nlmsg_new(rtnl_mdb_nlmsg_size(), GFP_ATOMIC);
skb               425 net/bridge/br_mdb.c 	if (!skb)
skb               428 net/bridge/br_mdb.c 	err = nlmsg_populate_mdb_fill(skb, dev, entry, 0, 0, type, NTF_SELF);
skb               430 net/bridge/br_mdb.c 		kfree_skb(skb);
skb               434 net/bridge/br_mdb.c 	rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC);
skb               460 net/bridge/br_mdb.c static int nlmsg_populate_rtr_fill(struct sk_buff *skb,
skb               469 net/bridge/br_mdb.c 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), 0);
skb               477 net/bridge/br_mdb.c 	nest = nla_nest_start_noflag(skb, MDBA_ROUTER);
skb               481 net/bridge/br_mdb.c 	if (nla_put_u32(skb, MDBA_ROUTER_PORT, ifindex))
skb               484 net/bridge/br_mdb.c 	nla_nest_end(skb, nest);
skb               485 net/bridge/br_mdb.c 	nlmsg_end(skb, nlh);
skb               489 net/bridge/br_mdb.c 	nla_nest_end(skb, nest);
skb               491 net/bridge/br_mdb.c 	nlmsg_cancel(skb, nlh);
skb               505 net/bridge/br_mdb.c 	struct sk_buff *skb;
skb               510 net/bridge/br_mdb.c 	skb = nlmsg_new(rtnl_rtr_nlmsg_size(), GFP_ATOMIC);
skb               511 net/bridge/br_mdb.c 	if (!skb)
skb               514 net/bridge/br_mdb.c 	err = nlmsg_populate_rtr_fill(skb, dev, ifindex, 0, 0, type, NTF_SELF);
skb               516 net/bridge/br_mdb.c 		kfree_skb(skb);
skb               520 net/bridge/br_mdb.c 	rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC);
skb               552 net/bridge/br_mdb.c static int br_mdb_parse(struct sk_buff *skb, struct nlmsghdr *nlh,
skb               555 net/bridge/br_mdb.c 	struct net *net = sock_net(skb->sk);
skb               680 net/bridge/br_mdb.c static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh,
skb               683 net/bridge/br_mdb.c 	struct net *net = sock_net(skb->sk);
skb               692 net/bridge/br_mdb.c 	err = br_mdb_parse(skb, nlh, &dev, &entry);
skb               785 net/bridge/br_mdb.c static int br_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
skb               788 net/bridge/br_mdb.c 	struct net *net = sock_net(skb->sk);
skb               797 net/bridge/br_mdb.c 	err = br_mdb_parse(skb, nlh, &dev, &entry);
skb               112 net/bridge/br_multicast.c 					struct sk_buff *skb, u16 vid)
skb               119 net/bridge/br_multicast.c 	if (BR_INPUT_SKB_CB(skb)->igmp)
skb               123 net/bridge/br_multicast.c 	ip.proto = skb->protocol;
skb               126 net/bridge/br_multicast.c 	switch (skb->protocol) {
skb               128 net/bridge/br_multicast.c 		ip.u.ip4 = ip_hdr(skb)->daddr;
skb               132 net/bridge/br_multicast.c 		ip.u.ip6 = ipv6_hdr(skb)->daddr;
skb               222 net/bridge/br_multicast.c 	struct sk_buff *skb;
skb               230 net/bridge/br_multicast.c 	skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*iph) +
skb               232 net/bridge/br_multicast.c 	if (!skb)
skb               235 net/bridge/br_multicast.c 	skb->protocol = htons(ETH_P_IP);
skb               237 net/bridge/br_multicast.c 	skb_reset_mac_header(skb);
skb               238 net/bridge/br_multicast.c 	eth = eth_hdr(skb);
skb               248 net/bridge/br_multicast.c 	skb_put(skb, sizeof(*eth));
skb               250 net/bridge/br_multicast.c 	skb_set_network_header(skb, skb->len);
skb               251 net/bridge/br_multicast.c 	iph = ip_hdr(skb);
skb               269 net/bridge/br_multicast.c 	skb_put(skb, 24);
skb               271 net/bridge/br_multicast.c 	skb_set_transport_header(skb, skb->len);
skb               276 net/bridge/br_multicast.c 		ih = igmp_hdr(skb);
skb               286 net/bridge/br_multicast.c 		ihv3 = igmpv3_query_hdr(skb);
skb               302 net/bridge/br_multicast.c 	skb_put(skb, igmp_hdr_size);
skb               303 net/bridge/br_multicast.c 	__skb_pull(skb, sizeof(*eth));
skb               306 net/bridge/br_multicast.c 	return skb;
skb               319 net/bridge/br_multicast.c 	struct sk_buff *skb;
skb               326 net/bridge/br_multicast.c 	skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*ip6h) +
skb               328 net/bridge/br_multicast.c 	if (!skb)
skb               331 net/bridge/br_multicast.c 	skb->protocol = htons(ETH_P_IPV6);
skb               334 net/bridge/br_multicast.c 	skb_reset_mac_header(skb);
skb               335 net/bridge/br_multicast.c 	eth = eth_hdr(skb);
skb               339 net/bridge/br_multicast.c 	skb_put(skb, sizeof(*eth));
skb               342 net/bridge/br_multicast.c 	skb_set_network_header(skb, skb->len);
skb               343 net/bridge/br_multicast.c 	ip6h = ipv6_hdr(skb);
skb               352 net/bridge/br_multicast.c 		kfree_skb(skb);
skb               370 net/bridge/br_multicast.c 	skb_put(skb, sizeof(*ip6h) + 8);
skb               373 net/bridge/br_multicast.c 	skb_set_transport_header(skb, skb->len);
skb               380 net/bridge/br_multicast.c 		mldq = (struct mld_msg *)icmp6_hdr(skb);
skb               394 net/bridge/br_multicast.c 		mld2q = (struct mld2_query *)icmp6_hdr(skb);
skb               414 net/bridge/br_multicast.c 	skb_put(skb, mld_hdr_size);
skb               416 net/bridge/br_multicast.c 	__skb_pull(skb, sizeof(*eth));
skb               419 net/bridge/br_multicast.c 	return skb;
skb               702 net/bridge/br_multicast.c 					    struct sk_buff *skb)
skb               705 net/bridge/br_multicast.c 		br->ip4_querier.addr.u.ip4 = ip_hdr(skb)->saddr;
skb               708 net/bridge/br_multicast.c 		br->ip6_querier.addr.u.ip6 = ipv6_hdr(skb)->saddr;
skb               716 net/bridge/br_multicast.c 	struct sk_buff *skb;
skb               719 net/bridge/br_multicast.c 	skb = br_multicast_alloc_query(br, ip, &igmp_type);
skb               720 net/bridge/br_multicast.c 	if (!skb)
skb               724 net/bridge/br_multicast.c 		skb->dev = port->dev;
skb               725 net/bridge/br_multicast.c 		br_multicast_count(br, port, skb, igmp_type,
skb               728 net/bridge/br_multicast.c 			dev_net(port->dev), NULL, skb, NULL, skb->dev,
skb               731 net/bridge/br_multicast.c 		br_multicast_select_own_querier(br, ip, skb);
skb               732 net/bridge/br_multicast.c 		br_multicast_count(br, port, skb, igmp_type,
skb               734 net/bridge/br_multicast.c 		netif_rx(skb);
skb               918 net/bridge/br_multicast.c 					 struct sk_buff *skb,
skb               932 net/bridge/br_multicast.c 	ih = igmpv3_report_hdr(skb);
skb               934 net/bridge/br_multicast.c 	len = skb_transport_offset(skb) + sizeof(*ih);
skb               938 net/bridge/br_multicast.c 		if (!ip_mc_may_pull(skb, len))
skb               941 net/bridge/br_multicast.c 		grec = (void *)(skb->data + len - sizeof(*grec));
skb               947 net/bridge/br_multicast.c 		if (!ip_mc_may_pull(skb, len))
skb               964 net/bridge/br_multicast.c 		src = eth_hdr(skb)->h_source;
skb               983 net/bridge/br_multicast.c 					struct sk_buff *skb,
skb               996 net/bridge/br_multicast.c 	if (!ipv6_mc_may_pull(skb, sizeof(*icmp6h)))
skb               999 net/bridge/br_multicast.c 	icmp6h = icmp6_hdr(skb);
skb              1001 net/bridge/br_multicast.c 	len = skb_transport_offset(skb) + sizeof(*icmp6h);
skb              1009 net/bridge/br_multicast.c 		if (skb_transport_offset(skb) + ipv6_transport_len(skb) <
skb              1013 net/bridge/br_multicast.c 		_nsrcs = skb_header_pointer(skb, nsrcs_offset,
skb              1021 net/bridge/br_multicast.c 		if (!ipv6_mc_may_pull(skb, len + grec_len))
skb              1024 net/bridge/br_multicast.c 		grec = (struct mld2_grec *)(skb->data + len);
skb              1041 net/bridge/br_multicast.c 		src = eth_hdr(skb)->h_source;
skb              1217 net/bridge/br_multicast.c 				   struct sk_buff *skb,
skb              1220 net/bridge/br_multicast.c 	unsigned int transport_len = ip_transport_len(skb);
skb              1221 net/bridge/br_multicast.c 	const struct iphdr *iph = ip_hdr(skb);
skb              1222 net/bridge/br_multicast.c 	struct igmphdr *ih = igmp_hdr(skb);
skb              1247 net/bridge/br_multicast.c 		ih3 = igmpv3_query_hdr(skb);
skb              1294 net/bridge/br_multicast.c 				  struct sk_buff *skb,
skb              1297 net/bridge/br_multicast.c 	unsigned int transport_len = ipv6_transport_len(skb);
skb              1306 net/bridge/br_multicast.c 	unsigned int offset = skb_transport_offset(skb);
skb              1317 net/bridge/br_multicast.c 		if (!pskb_may_pull(skb, offset + sizeof(*mld))) {
skb              1321 net/bridge/br_multicast.c 		mld = (struct mld_msg *) icmp6_hdr(skb);
skb              1326 net/bridge/br_multicast.c 		if (!pskb_may_pull(skb, offset + sizeof(*mld2q))) {
skb              1330 net/bridge/br_multicast.c 		mld2q = (struct mld2_query *)icmp6_hdr(skb);
skb              1341 net/bridge/br_multicast.c 		saddr.u.ip6 = ipv6_hdr(skb)->saddr;
skb              1569 net/bridge/br_multicast.c 			     const struct sk_buff *skb)
skb              1571 net/bridge/br_multicast.c 	unsigned int offset = skb_transport_offset(skb);
skb              1574 net/bridge/br_multicast.c 	pimhdr = skb_header_pointer(skb, offset, sizeof(_pimhdr), &_pimhdr);
skb              1584 net/bridge/br_multicast.c 				    struct sk_buff *skb)
skb              1586 net/bridge/br_multicast.c 	if (ip_hdr(skb)->protocol != IPPROTO_IGMP ||
skb              1587 net/bridge/br_multicast.c 	    igmp_hdr(skb)->type != IGMP_MRDISC_ADV)
skb              1597 net/bridge/br_multicast.c 				 struct sk_buff *skb,
skb              1604 net/bridge/br_multicast.c 	err = ip_mc_check_igmp(skb);
skb              1607 net/bridge/br_multicast.c 		if (!ipv4_is_local_multicast(ip_hdr(skb)->daddr)) {
skb              1608 net/bridge/br_multicast.c 			BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
skb              1609 net/bridge/br_multicast.c 		} else if (pim_ipv4_all_pim_routers(ip_hdr(skb)->daddr)) {
skb              1610 net/bridge/br_multicast.c 			if (ip_hdr(skb)->protocol == IPPROTO_PIM)
skb              1611 net/bridge/br_multicast.c 				br_multicast_pim(br, port, skb);
skb              1612 net/bridge/br_multicast.c 		} else if (ipv4_is_all_snoopers(ip_hdr(skb)->daddr)) {
skb              1613 net/bridge/br_multicast.c 			br_ip4_multicast_mrd_rcv(br, port, skb);
skb              1618 net/bridge/br_multicast.c 		br_multicast_err_count(br, port, skb->protocol);
skb              1622 net/bridge/br_multicast.c 	ih = igmp_hdr(skb);
skb              1623 net/bridge/br_multicast.c 	src = eth_hdr(skb)->h_source;
skb              1624 net/bridge/br_multicast.c 	BR_INPUT_SKB_CB(skb)->igmp = ih->type;
skb              1629 net/bridge/br_multicast.c 		BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
skb              1633 net/bridge/br_multicast.c 		err = br_ip4_multicast_igmp3_report(br, port, skb, vid);
skb              1636 net/bridge/br_multicast.c 		br_ip4_multicast_query(br, port, skb, vid);
skb              1643 net/bridge/br_multicast.c 	br_multicast_count(br, port, skb, BR_INPUT_SKB_CB(skb)->igmp,
skb              1652 net/bridge/br_multicast.c 				    struct sk_buff *skb)
skb              1656 net/bridge/br_multicast.c 	if (ipv6_hdr(skb)->nexthdr != IPPROTO_ICMPV6)
skb              1659 net/bridge/br_multicast.c 	ret = ipv6_mc_check_icmpv6(skb);
skb              1663 net/bridge/br_multicast.c 	if (icmp6_hdr(skb)->icmp6_type != ICMPV6_MRDISC_ADV)
skb              1673 net/bridge/br_multicast.c 				 struct sk_buff *skb,
skb              1680 net/bridge/br_multicast.c 	err = ipv6_mc_check_mld(skb);
skb              1683 net/bridge/br_multicast.c 		if (!ipv6_addr_is_ll_all_nodes(&ipv6_hdr(skb)->daddr))
skb              1684 net/bridge/br_multicast.c 			BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
skb              1686 net/bridge/br_multicast.c 		if (ipv6_addr_is_all_snoopers(&ipv6_hdr(skb)->daddr)) {
skb              1687 net/bridge/br_multicast.c 			err = br_ip6_multicast_mrd_rcv(br, port, skb);
skb              1690 net/bridge/br_multicast.c 				br_multicast_err_count(br, port, skb->protocol);
skb              1697 net/bridge/br_multicast.c 		br_multicast_err_count(br, port, skb->protocol);
skb              1701 net/bridge/br_multicast.c 	mld = (struct mld_msg *)skb_transport_header(skb);
skb              1702 net/bridge/br_multicast.c 	BR_INPUT_SKB_CB(skb)->igmp = mld->mld_type;
skb              1706 net/bridge/br_multicast.c 		src = eth_hdr(skb)->h_source;
skb              1707 net/bridge/br_multicast.c 		BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
skb              1712 net/bridge/br_multicast.c 		err = br_ip6_multicast_mld2_report(br, port, skb, vid);
skb              1715 net/bridge/br_multicast.c 		err = br_ip6_multicast_query(br, port, skb, vid);
skb              1718 net/bridge/br_multicast.c 		src = eth_hdr(skb)->h_source;
skb              1723 net/bridge/br_multicast.c 	br_multicast_count(br, port, skb, BR_INPUT_SKB_CB(skb)->igmp,
skb              1731 net/bridge/br_multicast.c 		     struct sk_buff *skb, u16 vid)
skb              1735 net/bridge/br_multicast.c 	BR_INPUT_SKB_CB(skb)->igmp = 0;
skb              1736 net/bridge/br_multicast.c 	BR_INPUT_SKB_CB(skb)->mrouters_only = 0;
skb              1741 net/bridge/br_multicast.c 	switch (skb->protocol) {
skb              1743 net/bridge/br_multicast.c 		ret = br_multicast_ipv4_rcv(br, port, skb, vid);
skb              1747 net/bridge/br_multicast.c 		ret = br_multicast_ipv6_rcv(br, port, skb, vid);
skb              2313 net/bridge/br_multicast.c 			       const struct sk_buff *skb, u8 type, u8 dir)
skb              2316 net/bridge/br_multicast.c 	__be16 proto = skb->protocol;
skb              2322 net/bridge/br_multicast.c 		t_len = ntohs(ip_hdr(skb)->tot_len) - ip_hdrlen(skb);
skb              2337 net/bridge/br_multicast.c 				unsigned int offset = skb_transport_offset(skb);
skb              2340 net/bridge/br_multicast.c 				ih = skb_header_pointer(skb, offset,
skb              2357 net/bridge/br_multicast.c 		t_len = ntohs(ipv6_hdr(skb)->payload_len) +
skb              2359 net/bridge/br_multicast.c 		t_len -= skb_network_header_len(skb);
skb              2384 net/bridge/br_multicast.c 			const struct sk_buff *skb, u8 type, u8 dir)
skb              2399 net/bridge/br_multicast.c 	br_mcast_stats_add(stats, skb, type, dir);
skb                66 net/bridge/br_netfilter_hooks.c #define IS_IP(skb) \
skb                67 net/bridge/br_netfilter_hooks.c 	(!skb_vlan_tag_present(skb) && skb->protocol == htons(ETH_P_IP))
skb                69 net/bridge/br_netfilter_hooks.c #define IS_IPV6(skb) \
skb                70 net/bridge/br_netfilter_hooks.c 	(!skb_vlan_tag_present(skb) && skb->protocol == htons(ETH_P_IPV6))
skb                72 net/bridge/br_netfilter_hooks.c #define IS_ARP(skb) \
skb                73 net/bridge/br_netfilter_hooks.c 	(!skb_vlan_tag_present(skb) && skb->protocol == htons(ETH_P_ARP))
skb                75 net/bridge/br_netfilter_hooks.c static inline __be16 vlan_proto(const struct sk_buff *skb)
skb                77 net/bridge/br_netfilter_hooks.c 	if (skb_vlan_tag_present(skb))
skb                78 net/bridge/br_netfilter_hooks.c 		return skb->protocol;
skb                79 net/bridge/br_netfilter_hooks.c 	else if (skb->protocol == htons(ETH_P_8021Q))
skb                80 net/bridge/br_netfilter_hooks.c 		return vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
skb                85 net/bridge/br_netfilter_hooks.c static inline bool is_vlan_ip(const struct sk_buff *skb, const struct net *net)
skb                89 net/bridge/br_netfilter_hooks.c 	return vlan_proto(skb) == htons(ETH_P_IP) && brnet->filter_vlan_tagged;
skb                92 net/bridge/br_netfilter_hooks.c static inline bool is_vlan_ipv6(const struct sk_buff *skb,
skb                97 net/bridge/br_netfilter_hooks.c 	return vlan_proto(skb) == htons(ETH_P_IPV6) &&
skb               101 net/bridge/br_netfilter_hooks.c static inline bool is_vlan_arp(const struct sk_buff *skb, const struct net *net)
skb               105 net/bridge/br_netfilter_hooks.c 	return vlan_proto(skb) == htons(ETH_P_ARP) && brnet->filter_vlan_tagged;
skb               108 net/bridge/br_netfilter_hooks.c static inline __be16 pppoe_proto(const struct sk_buff *skb)
skb               110 net/bridge/br_netfilter_hooks.c 	return *((__be16 *)(skb_mac_header(skb) + ETH_HLEN +
skb               114 net/bridge/br_netfilter_hooks.c static inline bool is_pppoe_ip(const struct sk_buff *skb, const struct net *net)
skb               118 net/bridge/br_netfilter_hooks.c 	return skb->protocol == htons(ETH_P_PPP_SES) &&
skb               119 net/bridge/br_netfilter_hooks.c 	       pppoe_proto(skb) == htons(PPP_IP) && brnet->filter_pppoe_tagged;
skb               122 net/bridge/br_netfilter_hooks.c static inline bool is_pppoe_ipv6(const struct sk_buff *skb,
skb               127 net/bridge/br_netfilter_hooks.c 	return skb->protocol == htons(ETH_P_PPP_SES) &&
skb               128 net/bridge/br_netfilter_hooks.c 	       pppoe_proto(skb) == htons(PPP_IPV6) &&
skb               145 net/bridge/br_netfilter_hooks.c static void nf_bridge_info_free(struct sk_buff *skb)
skb               147 net/bridge/br_netfilter_hooks.c 	skb_ext_del(skb, SKB_EXT_BRIDGE_NF);
skb               158 net/bridge/br_netfilter_hooks.c static inline struct nf_bridge_info *nf_bridge_unshare(struct sk_buff *skb)
skb               160 net/bridge/br_netfilter_hooks.c 	return skb_ext_add(skb, SKB_EXT_BRIDGE_NF);
skb               163 net/bridge/br_netfilter_hooks.c unsigned int nf_bridge_encap_header_len(const struct sk_buff *skb)
skb               165 net/bridge/br_netfilter_hooks.c 	switch (skb->protocol) {
skb               175 net/bridge/br_netfilter_hooks.c static inline void nf_bridge_pull_encap_header(struct sk_buff *skb)
skb               177 net/bridge/br_netfilter_hooks.c 	unsigned int len = nf_bridge_encap_header_len(skb);
skb               179 net/bridge/br_netfilter_hooks.c 	skb_pull(skb, len);
skb               180 net/bridge/br_netfilter_hooks.c 	skb->network_header += len;
skb               183 net/bridge/br_netfilter_hooks.c static inline void nf_bridge_pull_encap_header_rcsum(struct sk_buff *skb)
skb               185 net/bridge/br_netfilter_hooks.c 	unsigned int len = nf_bridge_encap_header_len(skb);
skb               187 net/bridge/br_netfilter_hooks.c 	skb_pull_rcsum(skb, len);
skb               188 net/bridge/br_netfilter_hooks.c 	skb->network_header += len;
skb               196 net/bridge/br_netfilter_hooks.c static int br_validate_ipv4(struct net *net, struct sk_buff *skb)
skb               201 net/bridge/br_netfilter_hooks.c 	if (!pskb_may_pull(skb, sizeof(struct iphdr)))
skb               204 net/bridge/br_netfilter_hooks.c 	iph = ip_hdr(skb);
skb               210 net/bridge/br_netfilter_hooks.c 	if (!pskb_may_pull(skb, iph->ihl*4))
skb               213 net/bridge/br_netfilter_hooks.c 	iph = ip_hdr(skb);
skb               218 net/bridge/br_netfilter_hooks.c 	if (skb->len < len) {
skb               224 net/bridge/br_netfilter_hooks.c 	if (pskb_trim_rcsum(skb, len)) {
skb               229 net/bridge/br_netfilter_hooks.c 	memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
skb               245 net/bridge/br_netfilter_hooks.c void nf_bridge_update_protocol(struct sk_buff *skb)
skb               247 net/bridge/br_netfilter_hooks.c 	const struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
skb               251 net/bridge/br_netfilter_hooks.c 		skb->protocol = htons(ETH_P_8021Q);
skb               254 net/bridge/br_netfilter_hooks.c 		skb->protocol = htons(ETH_P_PPP_SES);
skb               266 net/bridge/br_netfilter_hooks.c int br_nf_pre_routing_finish_bridge(struct net *net, struct sock *sk, struct sk_buff *skb)
skb               271 net/bridge/br_netfilter_hooks.c 	skb->dev = bridge_parent(skb->dev);
skb               272 net/bridge/br_netfilter_hooks.c 	if (!skb->dev)
skb               274 net/bridge/br_netfilter_hooks.c 	dst = skb_dst(skb);
skb               275 net/bridge/br_netfilter_hooks.c 	neigh = dst_neigh_lookup_skb(dst, skb);
skb               277 net/bridge/br_netfilter_hooks.c 		struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
skb               281 net/bridge/br_netfilter_hooks.c 			neigh_hh_bridge(&neigh->hh, skb);
skb               282 net/bridge/br_netfilter_hooks.c 			skb->dev = nf_bridge->physindev;
skb               283 net/bridge/br_netfilter_hooks.c 			ret = br_handle_frame_finish(net, sk, skb);
skb               289 net/bridge/br_netfilter_hooks.c 			skb_copy_from_linear_data_offset(skb,
skb               296 net/bridge/br_netfilter_hooks.c 			ret = neigh->output(neigh, skb);
skb               302 net/bridge/br_netfilter_hooks.c 	kfree_skb(skb);
skb               307 net/bridge/br_netfilter_hooks.c br_nf_ipv4_daddr_was_changed(const struct sk_buff *skb,
skb               310 net/bridge/br_netfilter_hooks.c 	return ip_hdr(skb)->daddr != nf_bridge->ipv4_daddr;
skb               352 net/bridge/br_netfilter_hooks.c static int br_nf_pre_routing_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
skb               354 net/bridge/br_netfilter_hooks.c 	struct net_device *dev = skb->dev;
skb               355 net/bridge/br_netfilter_hooks.c 	struct iphdr *iph = ip_hdr(skb);
skb               356 net/bridge/br_netfilter_hooks.c 	struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
skb               360 net/bridge/br_netfilter_hooks.c 	nf_bridge->frag_max_size = IPCB(skb)->frag_max_size;
skb               363 net/bridge/br_netfilter_hooks.c 		skb->pkt_type = PACKET_OTHERHOST;
skb               367 net/bridge/br_netfilter_hooks.c 	if (br_nf_ipv4_daddr_was_changed(skb, nf_bridge)) {
skb               368 net/bridge/br_netfilter_hooks.c 		if ((err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev))) {
skb               387 net/bridge/br_netfilter_hooks.c 					skb_dst_set(skb, &rt->dst);
skb               393 net/bridge/br_netfilter_hooks.c 			kfree_skb(skb);
skb               396 net/bridge/br_netfilter_hooks.c 			if (skb_dst(skb)->dev == dev) {
skb               398 net/bridge/br_netfilter_hooks.c 				skb->dev = nf_bridge->physindev;
skb               399 net/bridge/br_netfilter_hooks.c 				nf_bridge_update_protocol(skb);
skb               400 net/bridge/br_netfilter_hooks.c 				nf_bridge_push_encap_header(skb);
skb               402 net/bridge/br_netfilter_hooks.c 						  net, sk, skb, skb->dev,
skb               407 net/bridge/br_netfilter_hooks.c 			ether_addr_copy(eth_hdr(skb)->h_dest, dev->dev_addr);
skb               408 net/bridge/br_netfilter_hooks.c 			skb->pkt_type = PACKET_HOST;
skb               413 net/bridge/br_netfilter_hooks.c 			kfree_skb(skb);
skb               416 net/bridge/br_netfilter_hooks.c 		skb_dst_set_noref(skb, &rt->dst);
skb               419 net/bridge/br_netfilter_hooks.c 	skb->dev = nf_bridge->physindev;
skb               420 net/bridge/br_netfilter_hooks.c 	nf_bridge_update_protocol(skb);
skb               421 net/bridge/br_netfilter_hooks.c 	nf_bridge_push_encap_header(skb);
skb               422 net/bridge/br_netfilter_hooks.c 	br_nf_hook_thresh(NF_BR_PRE_ROUTING, net, sk, skb, skb->dev, NULL,
skb               427 net/bridge/br_netfilter_hooks.c static struct net_device *brnf_get_logical_dev(struct sk_buff *skb,
skb               436 net/bridge/br_netfilter_hooks.c 	if (brnet->pass_vlan_indev == 0 || !skb_vlan_tag_present(skb))
skb               439 net/bridge/br_netfilter_hooks.c 	vlan = __vlan_find_dev_deep_rcu(br, skb->vlan_proto,
skb               440 net/bridge/br_netfilter_hooks.c 				    skb_vlan_tag_get(skb) & VLAN_VID_MASK);
skb               446 net/bridge/br_netfilter_hooks.c struct net_device *setup_pre_routing(struct sk_buff *skb, const struct net *net)
skb               448 net/bridge/br_netfilter_hooks.c 	struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
skb               450 net/bridge/br_netfilter_hooks.c 	if (skb->pkt_type == PACKET_OTHERHOST) {
skb               451 net/bridge/br_netfilter_hooks.c 		skb->pkt_type = PACKET_HOST;
skb               456 net/bridge/br_netfilter_hooks.c 	nf_bridge->physindev = skb->dev;
skb               457 net/bridge/br_netfilter_hooks.c 	skb->dev = brnf_get_logical_dev(skb, skb->dev, net);
skb               459 net/bridge/br_netfilter_hooks.c 	if (skb->protocol == htons(ETH_P_8021Q))
skb               461 net/bridge/br_netfilter_hooks.c 	else if (skb->protocol == htons(ETH_P_PPP_SES))
skb               465 net/bridge/br_netfilter_hooks.c 	skb_orphan(skb);
skb               466 net/bridge/br_netfilter_hooks.c 	return skb->dev;
skb               476 net/bridge/br_netfilter_hooks.c 				      struct sk_buff *skb,
skb               482 net/bridge/br_netfilter_hooks.c 	__u32 len = nf_bridge_encap_header_len(skb);
skb               485 net/bridge/br_netfilter_hooks.c 	if (unlikely(!pskb_may_pull(skb, len)))
skb               494 net/bridge/br_netfilter_hooks.c 	if (IS_IPV6(skb) || is_vlan_ipv6(skb, state->net) ||
skb               495 net/bridge/br_netfilter_hooks.c 	    is_pppoe_ipv6(skb, state->net)) {
skb               504 net/bridge/br_netfilter_hooks.c 		nf_bridge_pull_encap_header_rcsum(skb);
skb               505 net/bridge/br_netfilter_hooks.c 		return br_nf_pre_routing_ipv6(priv, skb, state);
skb               511 net/bridge/br_netfilter_hooks.c 	if (!IS_IP(skb) && !is_vlan_ip(skb, state->net) &&
skb               512 net/bridge/br_netfilter_hooks.c 	    !is_pppoe_ip(skb, state->net))
skb               515 net/bridge/br_netfilter_hooks.c 	nf_bridge_pull_encap_header_rcsum(skb);
skb               517 net/bridge/br_netfilter_hooks.c 	if (br_validate_ipv4(state->net, skb))
skb               520 net/bridge/br_netfilter_hooks.c 	if (!nf_bridge_alloc(skb))
skb               522 net/bridge/br_netfilter_hooks.c 	if (!setup_pre_routing(skb, state->net))
skb               525 net/bridge/br_netfilter_hooks.c 	nf_bridge = nf_bridge_info_get(skb);
skb               526 net/bridge/br_netfilter_hooks.c 	nf_bridge->ipv4_daddr = ip_hdr(skb)->daddr;
skb               528 net/bridge/br_netfilter_hooks.c 	skb->protocol = htons(ETH_P_IP);
skb               529 net/bridge/br_netfilter_hooks.c 	skb->transport_header = skb->network_header + ip_hdr(skb)->ihl * 4;
skb               531 net/bridge/br_netfilter_hooks.c 	NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, state->net, state->sk, skb,
skb               532 net/bridge/br_netfilter_hooks.c 		skb->dev, NULL,
skb               540 net/bridge/br_netfilter_hooks.c static int br_nf_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
skb               542 net/bridge/br_netfilter_hooks.c 	struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
skb               545 net/bridge/br_netfilter_hooks.c 	if (!IS_ARP(skb) && !is_vlan_arp(skb, net)) {
skb               547 net/bridge/br_netfilter_hooks.c 		if (skb->protocol == htons(ETH_P_IP))
skb               548 net/bridge/br_netfilter_hooks.c 			nf_bridge->frag_max_size = IPCB(skb)->frag_max_size;
skb               550 net/bridge/br_netfilter_hooks.c 		if (skb->protocol == htons(ETH_P_IPV6))
skb               551 net/bridge/br_netfilter_hooks.c 			nf_bridge->frag_max_size = IP6CB(skb)->frag_max_size;
skb               555 net/bridge/br_netfilter_hooks.c 			skb->pkt_type = PACKET_OTHERHOST;
skb               558 net/bridge/br_netfilter_hooks.c 		nf_bridge_update_protocol(skb);
skb               560 net/bridge/br_netfilter_hooks.c 		in = *((struct net_device **)(skb->cb));
skb               562 net/bridge/br_netfilter_hooks.c 	nf_bridge_push_encap_header(skb);
skb               564 net/bridge/br_netfilter_hooks.c 	br_nf_hook_thresh(NF_BR_FORWARD, net, sk, skb, in, skb->dev,
skb               576 net/bridge/br_netfilter_hooks.c 				     struct sk_buff *skb,
skb               583 net/bridge/br_netfilter_hooks.c 	nf_bridge = nf_bridge_info_get(skb);
skb               589 net/bridge/br_netfilter_hooks.c 	if (!nf_bridge_unshare(skb))
skb               592 net/bridge/br_netfilter_hooks.c 	nf_bridge = nf_bridge_info_get(skb);
skb               600 net/bridge/br_netfilter_hooks.c 	if (IS_IP(skb) || is_vlan_ip(skb, state->net) ||
skb               601 net/bridge/br_netfilter_hooks.c 	    is_pppoe_ip(skb, state->net))
skb               603 net/bridge/br_netfilter_hooks.c 	else if (IS_IPV6(skb) || is_vlan_ipv6(skb, state->net) ||
skb               604 net/bridge/br_netfilter_hooks.c 		 is_pppoe_ipv6(skb, state->net))
skb               609 net/bridge/br_netfilter_hooks.c 	nf_bridge_pull_encap_header(skb);
skb               611 net/bridge/br_netfilter_hooks.c 	if (skb->pkt_type == PACKET_OTHERHOST) {
skb               612 net/bridge/br_netfilter_hooks.c 		skb->pkt_type = PACKET_HOST;
skb               617 net/bridge/br_netfilter_hooks.c 		if (br_validate_ipv4(state->net, skb))
skb               619 net/bridge/br_netfilter_hooks.c 		IPCB(skb)->frag_max_size = nf_bridge->frag_max_size;
skb               623 net/bridge/br_netfilter_hooks.c 		if (br_validate_ipv6(state->net, skb))
skb               625 net/bridge/br_netfilter_hooks.c 		IP6CB(skb)->frag_max_size = nf_bridge->frag_max_size;
skb               628 net/bridge/br_netfilter_hooks.c 	nf_bridge->physoutdev = skb->dev;
skb               630 net/bridge/br_netfilter_hooks.c 		skb->protocol = htons(ETH_P_IP);
skb               632 net/bridge/br_netfilter_hooks.c 		skb->protocol = htons(ETH_P_IPV6);
skb               634 net/bridge/br_netfilter_hooks.c 	NF_HOOK(pf, NF_INET_FORWARD, state->net, NULL, skb,
skb               635 net/bridge/br_netfilter_hooks.c 		brnf_get_logical_dev(skb, state->in, state->net),
skb               642 net/bridge/br_netfilter_hooks.c 				      struct sk_buff *skb,
skb               647 net/bridge/br_netfilter_hooks.c 	struct net_device **d = (struct net_device **)(skb->cb);
skb               659 net/bridge/br_netfilter_hooks.c 	if (!IS_ARP(skb)) {
skb               660 net/bridge/br_netfilter_hooks.c 		if (!is_vlan_arp(skb, state->net))
skb               662 net/bridge/br_netfilter_hooks.c 		nf_bridge_pull_encap_header(skb);
skb               665 net/bridge/br_netfilter_hooks.c 	if (unlikely(!pskb_may_pull(skb, sizeof(struct arphdr))))
skb               668 net/bridge/br_netfilter_hooks.c 	if (arp_hdr(skb)->ar_pln != 4) {
skb               669 net/bridge/br_netfilter_hooks.c 		if (is_vlan_arp(skb, state->net))
skb               670 net/bridge/br_netfilter_hooks.c 			nf_bridge_push_encap_header(skb);
skb               674 net/bridge/br_netfilter_hooks.c 	NF_HOOK(NFPROTO_ARP, NF_ARP_FORWARD, state->net, state->sk, skb,
skb               680 net/bridge/br_netfilter_hooks.c static int br_nf_push_frag_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
skb               686 net/bridge/br_netfilter_hooks.c 	err = skb_cow_head(skb, data->size);
skb               689 net/bridge/br_netfilter_hooks.c 		kfree_skb(skb);
skb               694 net/bridge/br_netfilter_hooks.c 		__vlan_hwaccel_put_tag(skb, data->vlan_proto, data->vlan_tci);
skb               696 net/bridge/br_netfilter_hooks.c 	skb_copy_to_linear_data_offset(skb, -data->size, data->mac, data->size);
skb               697 net/bridge/br_netfilter_hooks.c 	__skb_push(skb, data->encap_size);
skb               699 net/bridge/br_netfilter_hooks.c 	nf_bridge_info_free(skb);
skb               700 net/bridge/br_netfilter_hooks.c 	return br_dev_queue_push_xmit(net, sk, skb);
skb               704 net/bridge/br_netfilter_hooks.c br_nf_ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
skb               707 net/bridge/br_netfilter_hooks.c 	unsigned int mtu = ip_skb_dst_mtu(sk, skb);
skb               708 net/bridge/br_netfilter_hooks.c 	struct iphdr *iph = ip_hdr(skb);
skb               710 net/bridge/br_netfilter_hooks.c 	if (unlikely(((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) ||
skb               711 net/bridge/br_netfilter_hooks.c 		     (IPCB(skb)->frag_max_size &&
skb               712 net/bridge/br_netfilter_hooks.c 		      IPCB(skb)->frag_max_size > mtu))) {
skb               714 net/bridge/br_netfilter_hooks.c 		kfree_skb(skb);
skb               718 net/bridge/br_netfilter_hooks.c 	return ip_do_fragment(net, sk, skb, output);
skb               721 net/bridge/br_netfilter_hooks.c static unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb)
skb               723 net/bridge/br_netfilter_hooks.c 	const struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
skb               730 net/bridge/br_netfilter_hooks.c static int br_nf_dev_queue_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
skb               732 net/bridge/br_netfilter_hooks.c 	struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
skb               735 net/bridge/br_netfilter_hooks.c 	mtu_reserved = nf_bridge_mtu_reduction(skb);
skb               736 net/bridge/br_netfilter_hooks.c 	mtu = skb->dev->mtu;
skb               741 net/bridge/br_netfilter_hooks.c 	if (skb_is_gso(skb) || skb->len + mtu_reserved <= mtu) {
skb               742 net/bridge/br_netfilter_hooks.c 		nf_bridge_info_free(skb);
skb               743 net/bridge/br_netfilter_hooks.c 		return br_dev_queue_push_xmit(net, sk, skb);
skb               750 net/bridge/br_netfilter_hooks.c 	    skb->protocol == htons(ETH_P_IP)) {
skb               753 net/bridge/br_netfilter_hooks.c 		if (br_validate_ipv4(net, skb))
skb               756 net/bridge/br_netfilter_hooks.c 		IPCB(skb)->frag_max_size = nf_bridge->frag_max_size;
skb               758 net/bridge/br_netfilter_hooks.c 		nf_bridge_update_protocol(skb);
skb               762 net/bridge/br_netfilter_hooks.c 		if (skb_vlan_tag_present(skb)) {
skb               763 net/bridge/br_netfilter_hooks.c 			data->vlan_tci = skb->vlan_tci;
skb               764 net/bridge/br_netfilter_hooks.c 			data->vlan_proto = skb->vlan_proto;
skb               769 net/bridge/br_netfilter_hooks.c 		data->encap_size = nf_bridge_encap_header_len(skb);
skb               772 net/bridge/br_netfilter_hooks.c 		skb_copy_from_linear_data_offset(skb, -data->size, data->mac,
skb               775 net/bridge/br_netfilter_hooks.c 		return br_nf_ip_fragment(net, sk, skb, br_nf_push_frag_xmit);
skb               778 net/bridge/br_netfilter_hooks.c 	    skb->protocol == htons(ETH_P_IPV6)) {
skb               782 net/bridge/br_netfilter_hooks.c 		if (br_validate_ipv6(net, skb))
skb               785 net/bridge/br_netfilter_hooks.c 		IP6CB(skb)->frag_max_size = nf_bridge->frag_max_size;
skb               787 net/bridge/br_netfilter_hooks.c 		nf_bridge_update_protocol(skb);
skb               790 net/bridge/br_netfilter_hooks.c 		data->encap_size = nf_bridge_encap_header_len(skb);
skb               793 net/bridge/br_netfilter_hooks.c 		skb_copy_from_linear_data_offset(skb, -data->size, data->mac,
skb               797 net/bridge/br_netfilter_hooks.c 			return v6ops->fragment(net, sk, skb, br_nf_push_frag_xmit);
skb               799 net/bridge/br_netfilter_hooks.c 		kfree_skb(skb);
skb               802 net/bridge/br_netfilter_hooks.c 	nf_bridge_info_free(skb);
skb               803 net/bridge/br_netfilter_hooks.c 	return br_dev_queue_push_xmit(net, sk, skb);
skb               805 net/bridge/br_netfilter_hooks.c 	kfree_skb(skb);
skb               811 net/bridge/br_netfilter_hooks.c 				       struct sk_buff *skb,
skb               814 net/bridge/br_netfilter_hooks.c 	struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
skb               815 net/bridge/br_netfilter_hooks.c 	struct net_device *realoutdev = bridge_parent(skb->dev);
skb               829 net/bridge/br_netfilter_hooks.c 	if (IS_IP(skb) || is_vlan_ip(skb, state->net) ||
skb               830 net/bridge/br_netfilter_hooks.c 	    is_pppoe_ip(skb, state->net))
skb               832 net/bridge/br_netfilter_hooks.c 	else if (IS_IPV6(skb) || is_vlan_ipv6(skb, state->net) ||
skb               833 net/bridge/br_netfilter_hooks.c 		 is_pppoe_ipv6(skb, state->net))
skb               840 net/bridge/br_netfilter_hooks.c 	if (skb->pkt_type == PACKET_OTHERHOST) {
skb               841 net/bridge/br_netfilter_hooks.c 		skb->pkt_type = PACKET_HOST;
skb               845 net/bridge/br_netfilter_hooks.c 	nf_bridge_pull_encap_header(skb);
skb               847 net/bridge/br_netfilter_hooks.c 		skb->protocol = htons(ETH_P_IP);
skb               849 net/bridge/br_netfilter_hooks.c 		skb->protocol = htons(ETH_P_IPV6);
skb               851 net/bridge/br_netfilter_hooks.c 	NF_HOOK(pf, NF_INET_POST_ROUTING, state->net, state->sk, skb,
skb               862 net/bridge/br_netfilter_hooks.c 				   struct sk_buff *skb,
skb               865 net/bridge/br_netfilter_hooks.c 	struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
skb               868 net/bridge/br_netfilter_hooks.c 	    !netif_is_l3_master(skb->dev) &&
skb               869 net/bridge/br_netfilter_hooks.c 	    !netif_is_l3_slave(skb->dev)) {
skb               870 net/bridge/br_netfilter_hooks.c 		state->okfn(state->net, state->sk, skb);
skb               886 net/bridge/br_netfilter_hooks.c static void br_nf_pre_routing_finish_bridge_slow(struct sk_buff *skb)
skb               888 net/bridge/br_netfilter_hooks.c 	struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
skb               890 net/bridge/br_netfilter_hooks.c 	skb_pull(skb, ETH_HLEN);
skb               895 net/bridge/br_netfilter_hooks.c 	skb_copy_to_linear_data_offset(skb, -(ETH_HLEN - ETH_ALEN),
skb               898 net/bridge/br_netfilter_hooks.c 	skb->dev = nf_bridge->physindev;
skb               901 net/bridge/br_netfilter_hooks.c 	br_handle_frame_finish(dev_net(skb->dev), NULL, skb);
skb               904 net/bridge/br_netfilter_hooks.c static int br_nf_dev_xmit(struct sk_buff *skb)
skb               906 net/bridge/br_netfilter_hooks.c 	const struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
skb               909 net/bridge/br_netfilter_hooks.c 		br_nf_pre_routing_finish_bridge_slow(skb);
skb               996 net/bridge/br_netfilter_hooks.c 		      struct sock *sk, struct sk_buff *skb,
skb              1010 net/bridge/br_netfilter_hooks.c 		return okfn(net, sk, skb);
skb              1020 net/bridge/br_netfilter_hooks.c 	ret = nf_hook_slow(skb, &state, e, i);
skb              1022 net/bridge/br_netfilter_hooks.c 		ret = okfn(net, sk, skb);
skb                46 net/bridge/br_netfilter_ipv6.c static int br_nf_check_hbh_len(struct sk_buff *skb)
skb                48 net/bridge/br_netfilter_ipv6.c 	unsigned char *raw = (u8 *)(ipv6_hdr(skb) + 1);
skb                50 net/bridge/br_netfilter_ipv6.c 	const unsigned char *nh = skb_network_header(skb);
skb                54 net/bridge/br_netfilter_ipv6.c 	if ((raw + len) - skb->data > skb_headlen(skb))
skb                76 net/bridge/br_netfilter_ipv6.c 			    ipv6_hdr(skb)->payload_len)
skb                78 net/bridge/br_netfilter_ipv6.c 			if (pkt_len > skb->len - sizeof(struct ipv6hdr))
skb                80 net/bridge/br_netfilter_ipv6.c 			if (pskb_trim_rcsum(skb,
skb                83 net/bridge/br_netfilter_ipv6.c 			nh = skb_network_header(skb);
skb                99 net/bridge/br_netfilter_ipv6.c int br_validate_ipv6(struct net *net, struct sk_buff *skb)
skb               102 net/bridge/br_netfilter_ipv6.c 	struct inet6_dev *idev = __in6_dev_get(skb->dev);
skb               106 net/bridge/br_netfilter_ipv6.c 	if (!pskb_may_pull(skb, ip6h_len))
skb               109 net/bridge/br_netfilter_ipv6.c 	if (skb->len < ip6h_len)
skb               112 net/bridge/br_netfilter_ipv6.c 	hdr = ipv6_hdr(skb);
skb               120 net/bridge/br_netfilter_ipv6.c 		if (pkt_len + ip6h_len > skb->len) {
skb               125 net/bridge/br_netfilter_ipv6.c 		if (pskb_trim_rcsum(skb, pkt_len + ip6h_len)) {
skb               130 net/bridge/br_netfilter_ipv6.c 		hdr = ipv6_hdr(skb);
skb               132 net/bridge/br_netfilter_ipv6.c 	if (hdr->nexthdr == NEXTHDR_HOP && br_nf_check_hbh_len(skb))
skb               135 net/bridge/br_netfilter_ipv6.c 	memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
skb               148 net/bridge/br_netfilter_ipv6.c br_nf_ipv6_daddr_was_changed(const struct sk_buff *skb,
skb               151 net/bridge/br_netfilter_ipv6.c 	return memcmp(&nf_bridge->ipv6_daddr, &ipv6_hdr(skb)->daddr,
skb               152 net/bridge/br_netfilter_ipv6.c 		      sizeof(ipv6_hdr(skb)->daddr)) != 0;
skb               160 net/bridge/br_netfilter_ipv6.c static int br_nf_pre_routing_finish_ipv6(struct net *net, struct sock *sk, struct sk_buff *skb)
skb               162 net/bridge/br_netfilter_ipv6.c 	struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
skb               164 net/bridge/br_netfilter_ipv6.c 	struct net_device *dev = skb->dev;
skb               167 net/bridge/br_netfilter_ipv6.c 	nf_bridge->frag_max_size = IP6CB(skb)->frag_max_size;
skb               170 net/bridge/br_netfilter_ipv6.c 		skb->pkt_type = PACKET_OTHERHOST;
skb               174 net/bridge/br_netfilter_ipv6.c 	if (br_nf_ipv6_daddr_was_changed(skb, nf_bridge)) {
skb               175 net/bridge/br_netfilter_ipv6.c 		skb_dst_drop(skb);
skb               176 net/bridge/br_netfilter_ipv6.c 		v6ops->route_input(skb);
skb               178 net/bridge/br_netfilter_ipv6.c 		if (skb_dst(skb)->error) {
skb               179 net/bridge/br_netfilter_ipv6.c 			kfree_skb(skb);
skb               183 net/bridge/br_netfilter_ipv6.c 		if (skb_dst(skb)->dev == dev) {
skb               184 net/bridge/br_netfilter_ipv6.c 			skb->dev = nf_bridge->physindev;
skb               185 net/bridge/br_netfilter_ipv6.c 			nf_bridge_update_protocol(skb);
skb               186 net/bridge/br_netfilter_ipv6.c 			nf_bridge_push_encap_header(skb);
skb               188 net/bridge/br_netfilter_ipv6.c 					  net, sk, skb, skb->dev, NULL,
skb               192 net/bridge/br_netfilter_ipv6.c 		ether_addr_copy(eth_hdr(skb)->h_dest, dev->dev_addr);
skb               193 net/bridge/br_netfilter_ipv6.c 		skb->pkt_type = PACKET_HOST;
skb               197 net/bridge/br_netfilter_ipv6.c 			kfree_skb(skb);
skb               200 net/bridge/br_netfilter_ipv6.c 		skb_dst_set_noref(skb, &rt->dst);
skb               203 net/bridge/br_netfilter_ipv6.c 	skb->dev = nf_bridge->physindev;
skb               204 net/bridge/br_netfilter_ipv6.c 	nf_bridge_update_protocol(skb);
skb               205 net/bridge/br_netfilter_ipv6.c 	nf_bridge_push_encap_header(skb);
skb               206 net/bridge/br_netfilter_ipv6.c 	br_nf_hook_thresh(NF_BR_PRE_ROUTING, net, sk, skb,
skb               207 net/bridge/br_netfilter_ipv6.c 			  skb->dev, NULL, br_handle_frame_finish);
skb               216 net/bridge/br_netfilter_ipv6.c 				    struct sk_buff *skb,
skb               221 net/bridge/br_netfilter_ipv6.c 	if (br_validate_ipv6(state->net, skb))
skb               224 net/bridge/br_netfilter_ipv6.c 	nf_bridge = nf_bridge_alloc(skb);
skb               227 net/bridge/br_netfilter_ipv6.c 	if (!setup_pre_routing(skb, state->net))
skb               230 net/bridge/br_netfilter_ipv6.c 	nf_bridge = nf_bridge_info_get(skb);
skb               231 net/bridge/br_netfilter_ipv6.c 	nf_bridge->ipv6_daddr = ipv6_hdr(skb)->daddr;
skb               233 net/bridge/br_netfilter_ipv6.c 	skb->protocol = htons(ETH_P_IPV6);
skb               234 net/bridge/br_netfilter_ipv6.c 	skb->transport_header = skb->network_header + sizeof(struct ipv6hdr);
skb               236 net/bridge/br_netfilter_ipv6.c 	NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, state->net, state->sk, skb,
skb               237 net/bridge/br_netfilter_ipv6.c 		skb->dev, NULL,
skb               172 net/bridge/br_netlink.c static int br_port_fill_attrs(struct sk_buff *skb,
skb               179 net/bridge/br_netlink.c 	if (nla_put_u8(skb, IFLA_BRPORT_STATE, p->state) ||
skb               180 net/bridge/br_netlink.c 	    nla_put_u16(skb, IFLA_BRPORT_PRIORITY, p->priority) ||
skb               181 net/bridge/br_netlink.c 	    nla_put_u32(skb, IFLA_BRPORT_COST, p->path_cost) ||
skb               182 net/bridge/br_netlink.c 	    nla_put_u8(skb, IFLA_BRPORT_MODE, mode) ||
skb               183 net/bridge/br_netlink.c 	    nla_put_u8(skb, IFLA_BRPORT_GUARD, !!(p->flags & BR_BPDU_GUARD)) ||
skb               184 net/bridge/br_netlink.c 	    nla_put_u8(skb, IFLA_BRPORT_PROTECT,
skb               186 net/bridge/br_netlink.c 	    nla_put_u8(skb, IFLA_BRPORT_FAST_LEAVE,
skb               188 net/bridge/br_netlink.c 	    nla_put_u8(skb, IFLA_BRPORT_MCAST_TO_UCAST,
skb               190 net/bridge/br_netlink.c 	    nla_put_u8(skb, IFLA_BRPORT_LEARNING, !!(p->flags & BR_LEARNING)) ||
skb               191 net/bridge/br_netlink.c 	    nla_put_u8(skb, IFLA_BRPORT_UNICAST_FLOOD,
skb               193 net/bridge/br_netlink.c 	    nla_put_u8(skb, IFLA_BRPORT_MCAST_FLOOD,
skb               195 net/bridge/br_netlink.c 	    nla_put_u8(skb, IFLA_BRPORT_BCAST_FLOOD,
skb               197 net/bridge/br_netlink.c 	    nla_put_u8(skb, IFLA_BRPORT_PROXYARP, !!(p->flags & BR_PROXYARP)) ||
skb               198 net/bridge/br_netlink.c 	    nla_put_u8(skb, IFLA_BRPORT_PROXYARP_WIFI,
skb               200 net/bridge/br_netlink.c 	    nla_put(skb, IFLA_BRPORT_ROOT_ID, sizeof(struct ifla_bridge_id),
skb               202 net/bridge/br_netlink.c 	    nla_put(skb, IFLA_BRPORT_BRIDGE_ID, sizeof(struct ifla_bridge_id),
skb               204 net/bridge/br_netlink.c 	    nla_put_u16(skb, IFLA_BRPORT_DESIGNATED_PORT, p->designated_port) ||
skb               205 net/bridge/br_netlink.c 	    nla_put_u16(skb, IFLA_BRPORT_DESIGNATED_COST, p->designated_cost) ||
skb               206 net/bridge/br_netlink.c 	    nla_put_u16(skb, IFLA_BRPORT_ID, p->port_id) ||
skb               207 net/bridge/br_netlink.c 	    nla_put_u16(skb, IFLA_BRPORT_NO, p->port_no) ||
skb               208 net/bridge/br_netlink.c 	    nla_put_u8(skb, IFLA_BRPORT_TOPOLOGY_CHANGE_ACK,
skb               210 net/bridge/br_netlink.c 	    nla_put_u8(skb, IFLA_BRPORT_CONFIG_PENDING, p->config_pending) ||
skb               211 net/bridge/br_netlink.c 	    nla_put_u8(skb, IFLA_BRPORT_VLAN_TUNNEL, !!(p->flags &
skb               213 net/bridge/br_netlink.c 	    nla_put_u16(skb, IFLA_BRPORT_GROUP_FWD_MASK, p->group_fwd_mask) ||
skb               214 net/bridge/br_netlink.c 	    nla_put_u8(skb, IFLA_BRPORT_NEIGH_SUPPRESS,
skb               216 net/bridge/br_netlink.c 	    nla_put_u8(skb, IFLA_BRPORT_ISOLATED, !!(p->flags & BR_ISOLATED)))
skb               220 net/bridge/br_netlink.c 	if (nla_put_u64_64bit(skb, IFLA_BRPORT_MESSAGE_AGE_TIMER, timerval,
skb               224 net/bridge/br_netlink.c 	if (nla_put_u64_64bit(skb, IFLA_BRPORT_FORWARD_DELAY_TIMER, timerval,
skb               228 net/bridge/br_netlink.c 	if (nla_put_u64_64bit(skb, IFLA_BRPORT_HOLD_TIMER, timerval,
skb               233 net/bridge/br_netlink.c 	if (nla_put_u8(skb, IFLA_BRPORT_MULTICAST_ROUTER,
skb               242 net/bridge/br_netlink.c 		nla_put_u32(skb, IFLA_BRPORT_BACKUP_PORT,
skb               249 net/bridge/br_netlink.c static int br_fill_ifvlaninfo_range(struct sk_buff *skb, u16 vid_start,
skb               258 net/bridge/br_netlink.c 		if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
skb               264 net/bridge/br_netlink.c 		if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
skb               270 net/bridge/br_netlink.c 		if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
skb               281 net/bridge/br_netlink.c static int br_fill_ifvlaninfo_compressed(struct sk_buff *skb,
skb               311 net/bridge/br_netlink.c 			err = br_fill_ifvlaninfo_range(skb, vid_range_start,
skb               326 net/bridge/br_netlink.c 		err = br_fill_ifvlaninfo_range(skb, vid_range_start,
skb               336 net/bridge/br_netlink.c static int br_fill_ifvlaninfo(struct sk_buff *skb,
skb               356 net/bridge/br_netlink.c 		if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
skb               371 net/bridge/br_netlink.c static int br_fill_ifinfo(struct sk_buff *skb,
skb               389 net/bridge/br_netlink.c 	nlh = nlmsg_put(skb, pid, seq, event, sizeof(*hdr), flags);
skb               401 net/bridge/br_netlink.c 	if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
skb               402 net/bridge/br_netlink.c 	    nla_put_u32(skb, IFLA_MASTER, br->dev->ifindex) ||
skb               403 net/bridge/br_netlink.c 	    nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
skb               404 net/bridge/br_netlink.c 	    nla_put_u8(skb, IFLA_OPERSTATE, operstate) ||
skb               406 net/bridge/br_netlink.c 	     nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
skb               408 net/bridge/br_netlink.c 	     nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))))
skb               414 net/bridge/br_netlink.c 		nest = nla_nest_start(skb, IFLA_PROTINFO);
skb               415 net/bridge/br_netlink.c 		if (nest == NULL || br_port_fill_attrs(skb, port) < 0)
skb               417 net/bridge/br_netlink.c 		nla_nest_end(skb, nest);
skb               438 net/bridge/br_netlink.c 		af = nla_nest_start_noflag(skb, IFLA_AF_SPEC);
skb               444 net/bridge/br_netlink.c 			err = br_fill_ifvlaninfo_compressed(skb, vg);
skb               446 net/bridge/br_netlink.c 			err = br_fill_ifvlaninfo(skb, vg);
skb               449 net/bridge/br_netlink.c 			err = br_fill_vlan_tunnel_info(skb, vg);
skb               453 net/bridge/br_netlink.c 		nla_nest_end(skb, af);
skb               457 net/bridge/br_netlink.c 	nlmsg_end(skb, nlh);
skb               461 net/bridge/br_netlink.c 	nlmsg_cancel(skb, nlh);
skb               471 net/bridge/br_netlink.c 	struct sk_buff *skb;
skb               490 net/bridge/br_netlink.c 	skb = nlmsg_new(br_nlmsg_size(dev, filter), GFP_ATOMIC);
skb               491 net/bridge/br_netlink.c 	if (skb == NULL)
skb               494 net/bridge/br_netlink.c 	err = br_fill_ifinfo(skb, port, 0, 0, event, 0, filter, dev);
skb               498 net/bridge/br_netlink.c 		kfree_skb(skb);
skb               501 net/bridge/br_netlink.c 	rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC);
skb               510 net/bridge/br_netlink.c int br_getlink(struct sk_buff *skb, u32 pid, u32 seq,
skb               519 net/bridge/br_netlink.c 	return br_fill_ifinfo(skb, port, pid, seq, RTM_NEWLINK, nlflags,
skb               994 net/bridge/br_netlink.c static int br_port_fill_slave_info(struct sk_buff *skb,
skb               998 net/bridge/br_netlink.c 	return br_port_fill_attrs(skb, br_port_get_rtnl(dev));
skb              1387 net/bridge/br_netlink.c static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev)
skb              1401 net/bridge/br_netlink.c 	if (nla_put_u64_64bit(skb, IFLA_BR_HELLO_TIMER, clockval, IFLA_BR_PAD))
skb              1404 net/bridge/br_netlink.c 	if (nla_put_u64_64bit(skb, IFLA_BR_TCN_TIMER, clockval, IFLA_BR_PAD))
skb              1407 net/bridge/br_netlink.c 	if (nla_put_u64_64bit(skb, IFLA_BR_TOPOLOGY_CHANGE_TIMER, clockval,
skb              1411 net/bridge/br_netlink.c 	if (nla_put_u64_64bit(skb, IFLA_BR_GC_TIMER, clockval, IFLA_BR_PAD))
skb              1415 net/bridge/br_netlink.c 	if (nla_put_u32(skb, IFLA_BR_FORWARD_DELAY, forward_delay) ||
skb              1416 net/bridge/br_netlink.c 	    nla_put_u32(skb, IFLA_BR_HELLO_TIME, hello_time) ||
skb              1417 net/bridge/br_netlink.c 	    nla_put_u32(skb, IFLA_BR_MAX_AGE, age_time) ||
skb              1418 net/bridge/br_netlink.c 	    nla_put_u32(skb, IFLA_BR_AGEING_TIME, ageing_time) ||
skb              1419 net/bridge/br_netlink.c 	    nla_put_u32(skb, IFLA_BR_STP_STATE, stp_enabled) ||
skb              1420 net/bridge/br_netlink.c 	    nla_put_u16(skb, IFLA_BR_PRIORITY, priority) ||
skb              1421 net/bridge/br_netlink.c 	    nla_put_u8(skb, IFLA_BR_VLAN_FILTERING, vlan_enabled) ||
skb              1422 net/bridge/br_netlink.c 	    nla_put_u16(skb, IFLA_BR_GROUP_FWD_MASK, br->group_fwd_mask) ||
skb              1423 net/bridge/br_netlink.c 	    nla_put(skb, IFLA_BR_BRIDGE_ID, sizeof(struct ifla_bridge_id),
skb              1425 net/bridge/br_netlink.c 	    nla_put(skb, IFLA_BR_ROOT_ID, sizeof(struct ifla_bridge_id),
skb              1427 net/bridge/br_netlink.c 	    nla_put_u16(skb, IFLA_BR_ROOT_PORT, br->root_port) ||
skb              1428 net/bridge/br_netlink.c 	    nla_put_u32(skb, IFLA_BR_ROOT_PATH_COST, br->root_path_cost) ||
skb              1429 net/bridge/br_netlink.c 	    nla_put_u8(skb, IFLA_BR_TOPOLOGY_CHANGE, br->topology_change) ||
skb              1430 net/bridge/br_netlink.c 	    nla_put_u8(skb, IFLA_BR_TOPOLOGY_CHANGE_DETECTED,
skb              1432 net/bridge/br_netlink.c 	    nla_put(skb, IFLA_BR_GROUP_ADDR, ETH_ALEN, br->group_addr) ||
skb              1433 net/bridge/br_netlink.c 	    nla_put(skb, IFLA_BR_MULTI_BOOLOPT, sizeof(bm), &bm))
skb              1437 net/bridge/br_netlink.c 	if (nla_put_be16(skb, IFLA_BR_VLAN_PROTOCOL, br->vlan_proto) ||
skb              1438 net/bridge/br_netlink.c 	    nla_put_u16(skb, IFLA_BR_VLAN_DEFAULT_PVID, br->default_pvid) ||
skb              1439 net/bridge/br_netlink.c 	    nla_put_u8(skb, IFLA_BR_VLAN_STATS_ENABLED,
skb              1441 net/bridge/br_netlink.c 	    nla_put_u8(skb, IFLA_BR_VLAN_STATS_PER_PORT,
skb              1446 net/bridge/br_netlink.c 	if (nla_put_u8(skb, IFLA_BR_MCAST_ROUTER, br->multicast_router) ||
skb              1447 net/bridge/br_netlink.c 	    nla_put_u8(skb, IFLA_BR_MCAST_SNOOPING,
skb              1449 net/bridge/br_netlink.c 	    nla_put_u8(skb, IFLA_BR_MCAST_QUERY_USE_IFADDR,
skb              1451 net/bridge/br_netlink.c 	    nla_put_u8(skb, IFLA_BR_MCAST_QUERIER,
skb              1453 net/bridge/br_netlink.c 	    nla_put_u8(skb, IFLA_BR_MCAST_STATS_ENABLED,
skb              1455 net/bridge/br_netlink.c 	    nla_put_u32(skb, IFLA_BR_MCAST_HASH_ELASTICITY, RHT_ELASTICITY) ||
skb              1456 net/bridge/br_netlink.c 	    nla_put_u32(skb, IFLA_BR_MCAST_HASH_MAX, br->hash_max) ||
skb              1457 net/bridge/br_netlink.c 	    nla_put_u32(skb, IFLA_BR_MCAST_LAST_MEMBER_CNT,
skb              1459 net/bridge/br_netlink.c 	    nla_put_u32(skb, IFLA_BR_MCAST_STARTUP_QUERY_CNT,
skb              1461 net/bridge/br_netlink.c 	    nla_put_u8(skb, IFLA_BR_MCAST_IGMP_VERSION,
skb              1465 net/bridge/br_netlink.c 	if (nla_put_u8(skb, IFLA_BR_MCAST_MLD_VERSION,
skb              1470 net/bridge/br_netlink.c 	if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_LAST_MEMBER_INTVL, clockval,
skb              1474 net/bridge/br_netlink.c 	if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_MEMBERSHIP_INTVL, clockval,
skb              1478 net/bridge/br_netlink.c 	if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_QUERIER_INTVL, clockval,
skb              1482 net/bridge/br_netlink.c 	if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_QUERY_INTVL, clockval,
skb              1486 net/bridge/br_netlink.c 	if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_QUERY_RESPONSE_INTVL, clockval,
skb              1490 net/bridge/br_netlink.c 	if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_STARTUP_QUERY_INTVL, clockval,
skb              1495 net/bridge/br_netlink.c 	if (nla_put_u8(skb, IFLA_BR_NF_CALL_IPTABLES,
skb              1497 net/bridge/br_netlink.c 	    nla_put_u8(skb, IFLA_BR_NF_CALL_IP6TABLES,
skb              1499 net/bridge/br_netlink.c 	    nla_put_u8(skb, IFLA_BR_NF_CALL_ARPTABLES,
skb              1542 net/bridge/br_netlink.c static int br_fill_linkxstats(struct sk_buff *skb,
skb              1570 net/bridge/br_netlink.c 	nest = nla_nest_start_noflag(skb, LINK_XSTATS_TYPE_BRIDGE);
skb              1595 net/bridge/br_netlink.c 			if (nla_put(skb, BRIDGE_XSTATS_VLAN, sizeof(vxi), &vxi))
skb              1602 net/bridge/br_netlink.c 		nla = nla_reserve_64bit(skb, BRIDGE_XSTATS_MCAST,
skb              1610 net/bridge/br_netlink.c 	nla_nest_end(skb, nest);
skb              1616 net/bridge/br_netlink.c 	nla_nest_end(skb, nest);
skb                90 net/bridge/br_netlink_tunnel.c static int br_fill_vlan_tinfo(struct sk_buff *skb, u16 vid,
skb                96 net/bridge/br_netlink_tunnel.c 	tmap = nla_nest_start_noflag(skb, IFLA_BRIDGE_VLAN_TUNNEL_INFO);
skb                99 net/bridge/br_netlink_tunnel.c 	if (nla_put_u32(skb, IFLA_BRIDGE_VLAN_TUNNEL_ID,
skb               102 net/bridge/br_netlink_tunnel.c 	if (nla_put_u16(skb, IFLA_BRIDGE_VLAN_TUNNEL_VID,
skb               105 net/bridge/br_netlink_tunnel.c 	if (nla_put_u16(skb, IFLA_BRIDGE_VLAN_TUNNEL_FLAGS,
skb               108 net/bridge/br_netlink_tunnel.c 	nla_nest_end(skb, tmap);
skb               113 net/bridge/br_netlink_tunnel.c 	nla_nest_cancel(skb, tmap);
skb               118 net/bridge/br_netlink_tunnel.c static int br_fill_vlan_tinfo_range(struct sk_buff *skb,
skb               126 net/bridge/br_netlink_tunnel.c 		err = br_fill_vlan_tinfo(skb, vtbegin->vid,
skb               132 net/bridge/br_netlink_tunnel.c 		err = br_fill_vlan_tinfo(skb, vtend->vid,
skb               138 net/bridge/br_netlink_tunnel.c 		err = br_fill_vlan_tinfo(skb, vtbegin->vid,
skb               148 net/bridge/br_netlink_tunnel.c int br_fill_vlan_tunnel_info(struct sk_buff *skb,
skb               172 net/bridge/br_netlink_tunnel.c 			err = br_fill_vlan_tinfo_range(skb, vtbegin, vtend);
skb               182 net/bridge/br_netlink_tunnel.c 		err = br_fill_vlan_tinfo_range(skb, vtbegin, vtend);
skb                25 net/bridge/br_nf_core.c 			     struct sk_buff *skb, u32 mtu,
skb                31 net/bridge/br_nf_core.c 			  struct sk_buff *skb)
skb                41 net/bridge/br_nf_core.c 					   struct sk_buff *skb,
skb               517 net/bridge/br_private.h netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev);
skb               520 net/bridge/br_private.h 				       struct sk_buff *skb)
skb               525 net/bridge/br_private.h 		netpoll_send_skb(np, skb);
skb               532 net/bridge/br_private.h 				       struct sk_buff *skb)
skb               576 net/bridge/br_private.h int br_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
skb               578 net/bridge/br_private.h int br_fdb_get(struct sk_buff *skb, struct nlattr *tb[], struct net_device *dev,
skb               598 net/bridge/br_private.h int br_dev_queue_push_xmit(struct net *net, struct sock *sk, struct sk_buff *skb);
skb               599 net/bridge/br_private.h void br_forward(const struct net_bridge_port *to, struct sk_buff *skb,
skb               601 net/bridge/br_private.h int br_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb);
skb               602 net/bridge/br_private.h void br_flood(struct net_bridge *br, struct sk_buff *skb,
skb               607 net/bridge/br_private.h 				   const struct sk_buff *skb)
skb               609 net/bridge/br_private.h 	return BR_INPUT_SKB_CB(skb)->src_port_isolated &&
skb               628 net/bridge/br_private.h int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb);
skb               660 net/bridge/br_private.h 		     struct sk_buff *skb, u16 vid);
skb               662 net/bridge/br_private.h 					struct sk_buff *skb, u16 vid);
skb               672 net/bridge/br_private.h 			struct sk_buff *skb, bool local_rcv, bool local_orig);
skb               697 net/bridge/br_private.h 			const struct sk_buff *skb, u8 type, u8 dir);
skb               755 net/bridge/br_private.h static inline int br_multicast_igmp_type(const struct sk_buff *skb)
skb               757 net/bridge/br_private.h 	return BR_INPUT_SKB_CB(skb)->igmp;
skb               762 net/bridge/br_private.h 				   struct sk_buff *skb,
skb               769 net/bridge/br_private.h 						      struct sk_buff *skb, u16 vid)
skb               808 net/bridge/br_private.h 				      struct sk_buff *skb,
skb               843 net/bridge/br_private.h 				      const struct sk_buff *skb,
skb               857 net/bridge/br_private.h static inline int br_multicast_igmp_type(const struct sk_buff *skb)
skb               866 net/bridge/br_private.h 			struct net_bridge_vlan_group *vg, struct sk_buff *skb,
skb               869 net/bridge/br_private.h 		       const struct sk_buff *skb);
skb               870 net/bridge/br_private.h bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid);
skb               874 net/bridge/br_private.h 			       struct sk_buff *skb);
skb               930 net/bridge/br_private.h static inline int br_vlan_get_tag(const struct sk_buff *skb, u16 *vid)
skb               934 net/bridge/br_private.h 	if (skb_vlan_tag_present(skb)) {
skb               935 net/bridge/br_private.h 		*vid = skb_vlan_tag_get_id(skb);
skb               956 net/bridge/br_private.h 				      struct sk_buff *skb,
skb               963 net/bridge/br_private.h 				     const struct sk_buff *skb)
skb               969 net/bridge/br_private.h 				   struct sk_buff *skb, u16 *vid)
skb               977 net/bridge/br_private.h 					     struct sk_buff *skb)
skb               979 net/bridge/br_private.h 	return skb;
skb              1035 net/bridge/br_private.h static inline u16 br_vlan_get_tag(const struct sk_buff *skb, u16 *tag)
skb              1099 net/bridge/br_private.h 	int (*br_dev_xmit_hook)(struct sk_buff *skb);
skb              1143 net/bridge/br_private.h void br_stp_rcv(const struct stp_proto *proto, struct sk_buff *skb,
skb              1165 net/bridge/br_private.h int br_getlink(struct sk_buff *skb, u32 pid, u32 seq, struct net_device *dev,
skb              1190 net/bridge/br_private.h 			      struct sk_buff *skb);
skb              1192 net/bridge/br_private.h 				  const struct sk_buff *skb);
skb              1202 net/bridge/br_private.h static inline void br_switchdev_frame_unmark(struct sk_buff *skb)
skb              1204 net/bridge/br_private.h 	skb->offload_fwd_mark = 0;
skb              1213 net/bridge/br_private.h 					    struct sk_buff *skb)
skb              1218 net/bridge/br_private.h 						const struct sk_buff *skb)
skb              1247 net/bridge/br_private.h static inline void br_switchdev_frame_unmark(struct sk_buff *skb)
skb              1254 net/bridge/br_private.h void br_do_proxy_suppress_arp(struct sk_buff *skb, struct net_bridge *br,
skb              1256 net/bridge/br_private.h void br_do_suppress_nd(struct sk_buff *skb, struct net_bridge *br,
skb              1258 net/bridge/br_private.h struct nd_msg *br_is_nd_neigh_msg(struct sk_buff *skb, struct nd_msg *m);
skb                28 net/bridge/br_private_tunnel.h int br_fill_vlan_tunnel_info(struct sk_buff *skb,
skb                40 net/bridge/br_private_tunnel.h int br_handle_ingress_vlan_tunnel(struct sk_buff *skb,
skb                43 net/bridge/br_private_tunnel.h int br_handle_egress_vlan_tunnel(struct sk_buff *skb,
skb                72 net/bridge/br_private_tunnel.h static inline int br_handle_ingress_vlan_tunnel(struct sk_buff *skb,
skb                30 net/bridge/br_stp_bpdu.c 			       struct sk_buff *skb)
skb                32 net/bridge/br_stp_bpdu.c 	return dev_queue_xmit(skb);
skb                38 net/bridge/br_stp_bpdu.c 	struct sk_buff *skb;
skb                40 net/bridge/br_stp_bpdu.c 	skb = dev_alloc_skb(length+LLC_RESERVE);
skb                41 net/bridge/br_stp_bpdu.c 	if (!skb)
skb                44 net/bridge/br_stp_bpdu.c 	skb->dev = p->dev;
skb                45 net/bridge/br_stp_bpdu.c 	skb->protocol = htons(ETH_P_802_2);
skb                46 net/bridge/br_stp_bpdu.c 	skb->priority = TC_PRIO_CONTROL;
skb                48 net/bridge/br_stp_bpdu.c 	skb_reserve(skb, LLC_RESERVE);
skb                49 net/bridge/br_stp_bpdu.c 	__skb_put_data(skb, data, length);
skb                51 net/bridge/br_stp_bpdu.c 	llc_pdu_header_init(skb, LLC_PDU_TYPE_U, LLC_SAP_BSPAN,
skb                53 net/bridge/br_stp_bpdu.c 	llc_pdu_init_as_ui_cmd(skb);
skb                55 net/bridge/br_stp_bpdu.c 	llc_mac_hdr_init(skb, p->dev->dev_addr, p->br->group_addr);
skb                57 net/bridge/br_stp_bpdu.c 	skb_reset_mac_header(skb);
skb                60 net/bridge/br_stp_bpdu.c 		dev_net(p->dev), NULL, skb, NULL, skb->dev,
skb               143 net/bridge/br_stp_bpdu.c void br_stp_rcv(const struct stp_proto *proto, struct sk_buff *skb,
skb               150 net/bridge/br_stp_bpdu.c 	if (!pskb_may_pull(skb, 4))
skb               154 net/bridge/br_stp_bpdu.c 	buf = skb->data;
skb               174 net/bridge/br_stp_bpdu.c 	if (!ether_addr_equal(eth_hdr(skb)->h_dest, br->group_addr))
skb               184 net/bridge/br_stp_bpdu.c 	buf = skb_pull(skb, 3);
skb               189 net/bridge/br_stp_bpdu.c 		if (!pskb_may_pull(skb, 32))
skb               192 net/bridge/br_stp_bpdu.c 		buf = skb->data;
skb               230 net/bridge/br_stp_bpdu.c 					  eth_hdr(skb)->h_source,
skb               242 net/bridge/br_stp_bpdu.c 	kfree_skb(skb);
skb                44 net/bridge/br_switchdev.c 			      struct sk_buff *skb)
skb                46 net/bridge/br_switchdev.c 	if (skb->offload_fwd_mark && !WARN_ON_ONCE(!p->offload_fwd_mark))
skb                47 net/bridge/br_switchdev.c 		BR_INPUT_SKB_CB(skb)->offload_fwd_mark = p->offload_fwd_mark;
skb                51 net/bridge/br_switchdev.c 				  const struct sk_buff *skb)
skb                53 net/bridge/br_switchdev.c 	return !skb->offload_fwd_mark ||
skb                54 net/bridge/br_switchdev.c 	       BR_INPUT_SKB_CB(skb)->offload_fwd_mark != p->offload_fwd_mark;
skb               395 net/bridge/br_vlan.c 			       struct sk_buff *skb)
skb               402 net/bridge/br_vlan.c 	if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
skb               409 net/bridge/br_vlan.c 	br_vlan_get_tag(skb, &vid);
skb               417 net/bridge/br_vlan.c 		if ((br->dev->flags & IFF_PROMISC) && skb->dev == br->dev) {
skb               420 net/bridge/br_vlan.c 			kfree_skb(skb);
skb               427 net/bridge/br_vlan.c 		stats->tx_bytes += skb->len;
skb               433 net/bridge/br_vlan.c 		__vlan_hwaccel_clear_tag(skb);
skb               436 net/bridge/br_vlan.c 	    br_handle_egress_vlan_tunnel(skb, v)) {
skb               437 net/bridge/br_vlan.c 		kfree_skb(skb);
skb               441 net/bridge/br_vlan.c 	return skb;
skb               447 net/bridge/br_vlan.c 			      struct sk_buff *skb, u16 *vid)
skb               453 net/bridge/br_vlan.c 	BR_INPUT_SKB_CB(skb)->vlan_filtered = true;
skb               458 net/bridge/br_vlan.c 	if (unlikely(!skb_vlan_tag_present(skb) &&
skb               459 net/bridge/br_vlan.c 		     skb->protocol == br->vlan_proto)) {
skb               460 net/bridge/br_vlan.c 		skb = skb_vlan_untag(skb);
skb               461 net/bridge/br_vlan.c 		if (unlikely(!skb))
skb               465 net/bridge/br_vlan.c 	if (!br_vlan_get_tag(skb, vid)) {
skb               467 net/bridge/br_vlan.c 		if (skb->vlan_proto != br->vlan_proto) {
skb               469 net/bridge/br_vlan.c 			skb_push(skb, ETH_HLEN);
skb               470 net/bridge/br_vlan.c 			skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
skb               471 net/bridge/br_vlan.c 							skb_vlan_tag_get(skb));
skb               472 net/bridge/br_vlan.c 			if (unlikely(!skb))
skb               475 net/bridge/br_vlan.c 			skb_pull(skb, ETH_HLEN);
skb               476 net/bridge/br_vlan.c 			skb_reset_mac_len(skb);
skb               503 net/bridge/br_vlan.c 			__vlan_hwaccel_put_tag(skb, br->vlan_proto, pvid);
skb               510 net/bridge/br_vlan.c 			skb->vlan_tci |= pvid;
skb               523 net/bridge/br_vlan.c 		stats->rx_bytes += skb->len;
skb               531 net/bridge/br_vlan.c 	kfree_skb(skb);
skb               536 net/bridge/br_vlan.c 			struct net_bridge_vlan_group *vg, struct sk_buff *skb,
skb               543 net/bridge/br_vlan.c 		BR_INPUT_SKB_CB(skb)->vlan_filtered = false;
skb               547 net/bridge/br_vlan.c 	return __allowed_ingress(br, vg, skb, vid);
skb               552 net/bridge/br_vlan.c 		       const struct sk_buff *skb)
skb               558 net/bridge/br_vlan.c 	if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
skb               561 net/bridge/br_vlan.c 	br_vlan_get_tag(skb, &vid);
skb               570 net/bridge/br_vlan.c bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid)
skb               583 net/bridge/br_vlan.c 	if (!br_vlan_get_tag(skb, vid) && skb->vlan_proto != br->vlan_proto)
skb               155 net/bridge/br_vlan_tunnel.c int br_handle_ingress_vlan_tunnel(struct sk_buff *skb,
skb               159 net/bridge/br_vlan_tunnel.c 	struct ip_tunnel_info *tinfo = skb_tunnel_info(skb);
skb               166 net/bridge/br_vlan_tunnel.c 	if (skb_vlan_tagged(skb))
skb               174 net/bridge/br_vlan_tunnel.c 	skb_dst_drop(skb);
skb               176 net/bridge/br_vlan_tunnel.c 	__vlan_hwaccel_put_tag(skb, p->br->vlan_proto, vlan->vid);
skb               181 net/bridge/br_vlan_tunnel.c int br_handle_egress_vlan_tunnel(struct sk_buff *skb,
skb               189 net/bridge/br_vlan_tunnel.c 	if (unlikely(!skb_vlan_tag_present(skb)))
skb               192 net/bridge/br_vlan_tunnel.c 	skb_dst_drop(skb);
skb               193 net/bridge/br_vlan_tunnel.c 	err = skb_vlan_pop(skb);
skb               197 net/bridge/br_vlan_tunnel.c 	skb_dst_set(skb, dst_clone(&vlan->tinfo.tunnel_dst->dst));
skb                17 net/bridge/netfilter/ebt_802_3.c static struct ebt_802_3_hdr *ebt_802_3_hdr(const struct sk_buff *skb)
skb                19 net/bridge/netfilter/ebt_802_3.c 	return (struct ebt_802_3_hdr *)skb_mac_header(skb);
skb                23 net/bridge/netfilter/ebt_802_3.c ebt_802_3_mt(const struct sk_buff *skb, struct xt_action_param *par)
skb                26 net/bridge/netfilter/ebt_802_3.c 	const struct ebt_802_3_hdr *hdr = ebt_802_3_hdr(skb);
skb                71 net/bridge/netfilter/ebt_among.c static int get_ip_dst(const struct sk_buff *skb, __be32 *addr)
skb                73 net/bridge/netfilter/ebt_among.c 	if (eth_hdr(skb)->h_proto == htons(ETH_P_IP)) {
skb                77 net/bridge/netfilter/ebt_among.c 		ih = skb_header_pointer(skb, 0, sizeof(_iph), &_iph);
skb                81 net/bridge/netfilter/ebt_among.c 	} else if (eth_hdr(skb)->h_proto == htons(ETH_P_ARP)) {
skb                87 net/bridge/netfilter/ebt_among.c 		ah = skb_header_pointer(skb, 0, sizeof(_arph), &_arph);
skb                92 net/bridge/netfilter/ebt_among.c 		bp = skb_header_pointer(skb, sizeof(struct arphdr) +
skb               102 net/bridge/netfilter/ebt_among.c static int get_ip_src(const struct sk_buff *skb, __be32 *addr)
skb               104 net/bridge/netfilter/ebt_among.c 	if (eth_hdr(skb)->h_proto == htons(ETH_P_IP)) {
skb               108 net/bridge/netfilter/ebt_among.c 		ih = skb_header_pointer(skb, 0, sizeof(_iph), &_iph);
skb               112 net/bridge/netfilter/ebt_among.c 	} else if (eth_hdr(skb)->h_proto == htons(ETH_P_ARP)) {
skb               118 net/bridge/netfilter/ebt_among.c 		ah = skb_header_pointer(skb, 0, sizeof(_arph), &_arph);
skb               123 net/bridge/netfilter/ebt_among.c 		bp = skb_header_pointer(skb, sizeof(struct arphdr) +
skb               133 net/bridge/netfilter/ebt_among.c ebt_among_mt(const struct sk_buff *skb, struct xt_action_param *par)
skb               144 net/bridge/netfilter/ebt_among.c 		smac = eth_hdr(skb)->h_source;
skb               145 net/bridge/netfilter/ebt_among.c 		if (get_ip_src(skb, &sip))
skb               159 net/bridge/netfilter/ebt_among.c 		dmac = eth_hdr(skb)->h_dest;
skb               160 net/bridge/netfilter/ebt_among.c 		if (get_ip_dst(skb, &dip))
skb                20 net/bridge/netfilter/ebt_arp.c ebt_arp_mt(const struct sk_buff *skb, struct xt_action_param *par)
skb                26 net/bridge/netfilter/ebt_arp.c 	ah = skb_header_pointer(skb, 0, sizeof(_arph), &_arph);
skb                45 net/bridge/netfilter/ebt_arp.c 		sap = skb_header_pointer(skb, sizeof(struct arphdr) +
skb                50 net/bridge/netfilter/ebt_arp.c 		dap = skb_header_pointer(skb, sizeof(struct arphdr) +
skb                75 net/bridge/netfilter/ebt_arp.c 			mp = skb_header_pointer(skb, sizeof(struct arphdr),
skb                86 net/bridge/netfilter/ebt_arp.c 			mp = skb_header_pointer(skb, sizeof(struct arphdr) +
skb                20 net/bridge/netfilter/ebt_arpreply.c ebt_arpreply_tg(struct sk_buff *skb, const struct xt_action_param *par)
skb                30 net/bridge/netfilter/ebt_arpreply.c 	ap = skb_header_pointer(skb, 0, sizeof(_ah), &_ah);
skb                40 net/bridge/netfilter/ebt_arpreply.c 	shp = skb_header_pointer(skb, sizeof(_ah), ETH_ALEN, &_sha);
skb                44 net/bridge/netfilter/ebt_arpreply.c 	siptr = skb_header_pointer(skb, sizeof(_ah) + ETH_ALEN,
skb                49 net/bridge/netfilter/ebt_arpreply.c 	diptr = skb_header_pointer(skb,
skb                20 net/bridge/netfilter/ebt_dnat.c ebt_dnat_tg(struct sk_buff *skb, const struct xt_action_param *par)
skb                24 net/bridge/netfilter/ebt_dnat.c 	if (skb_ensure_writable(skb, ETH_ALEN))
skb                27 net/bridge/netfilter/ebt_dnat.c 	ether_addr_copy(eth_hdr(skb)->h_dest, info->mac);
skb                31 net/bridge/netfilter/ebt_dnat.c 			skb->pkt_type = PACKET_BROADCAST;
skb                33 net/bridge/netfilter/ebt_dnat.c 			skb->pkt_type = PACKET_MULTICAST;
skb                53 net/bridge/netfilter/ebt_dnat.c 			skb->pkt_type = PACKET_HOST;
skb                55 net/bridge/netfilter/ebt_dnat.c 			skb->pkt_type = PACKET_OTHERHOST;
skb                38 net/bridge/netfilter/ebt_ip.c ebt_ip_mt(const struct sk_buff *skb, struct xt_action_param *par)
skb                46 net/bridge/netfilter/ebt_ip.c 	ih = skb_header_pointer(skb, 0, sizeof(_iph), &_iph);
skb                70 net/bridge/netfilter/ebt_ip.c 		pptr = skb_header_pointer(skb, ih->ihl*4,
skb                38 net/bridge/netfilter/ebt_ip6.c ebt_ip6_mt(const struct sk_buff *skb, struct xt_action_param *par)
skb                46 net/bridge/netfilter/ebt_ip6.c 	ih6 = skb_header_pointer(skb, 0, sizeof(_ip6h), &_ip6h);
skb                67 net/bridge/netfilter/ebt_ip6.c 		offset_ph = ipv6_skip_exthdr(skb, sizeof(_ip6h), &nexthdr, &frag_off);
skb                77 net/bridge/netfilter/ebt_ip6.c 		pptr = skb_header_pointer(skb, offset_ph, sizeof(_pkthdr),
skb                36 net/bridge/netfilter/ebt_limit.c ebt_limit_mt(const struct sk_buff *skb, struct xt_action_param *par)
skb                53 net/bridge/netfilter/ebt_log.c print_ports(const struct sk_buff *skb, uint8_t protocol, int offset)
skb                63 net/bridge/netfilter/ebt_log.c 		pptr = skb_header_pointer(skb, offset,
skb                75 net/bridge/netfilter/ebt_log.c 	       const struct sk_buff *skb, const struct net_device *in,
skb                89 net/bridge/netfilter/ebt_log.c 	       eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
skb                90 net/bridge/netfilter/ebt_log.c 	       ntohs(eth_hdr(skb)->h_proto));
skb                97 net/bridge/netfilter/ebt_log.c 	if ((bitmask & EBT_LOG_IP) && eth_hdr(skb)->h_proto ==
skb               102 net/bridge/netfilter/ebt_log.c 		ih = skb_header_pointer(skb, 0, sizeof(_iph), &_iph);
skb               109 net/bridge/netfilter/ebt_log.c 		print_ports(skb, ih->protocol, ih->ihl*4);
skb               114 net/bridge/netfilter/ebt_log.c 	if ((bitmask & EBT_LOG_IP6) && eth_hdr(skb)->h_proto ==
skb               122 net/bridge/netfilter/ebt_log.c 		ih = skb_header_pointer(skb, 0, sizeof(_iph), &_iph);
skb               130 net/bridge/netfilter/ebt_log.c 		offset_ph = ipv6_skip_exthdr(skb, sizeof(_iph), &nexthdr, &frag_off);
skb               133 net/bridge/netfilter/ebt_log.c 		print_ports(skb, nexthdr, offset_ph);
skb               139 net/bridge/netfilter/ebt_log.c 	    ((eth_hdr(skb)->h_proto == htons(ETH_P_ARP)) ||
skb               140 net/bridge/netfilter/ebt_log.c 	     (eth_hdr(skb)->h_proto == htons(ETH_P_RARP)))) {
skb               144 net/bridge/netfilter/ebt_log.c 		ah = skb_header_pointer(skb, 0, sizeof(_arph), &_arph);
skb               162 net/bridge/netfilter/ebt_log.c 			ap = skb_header_pointer(skb, sizeof(_arph),
skb               179 net/bridge/netfilter/ebt_log.c ebt_log_tg(struct sk_buff *skb, const struct xt_action_param *par)
skb               194 net/bridge/netfilter/ebt_log.c 		nf_log_packet(net, NFPROTO_BRIDGE, xt_hooknum(par), skb,
skb               198 net/bridge/netfilter/ebt_log.c 		ebt_log_packet(net, NFPROTO_BRIDGE, xt_hooknum(par), skb,
skb                23 net/bridge/netfilter/ebt_mark.c ebt_mark_tg(struct sk_buff *skb, const struct xt_action_param *par)
skb                29 net/bridge/netfilter/ebt_mark.c 		skb->mark = info->mark;
skb                31 net/bridge/netfilter/ebt_mark.c 		skb->mark |= info->mark;
skb                33 net/bridge/netfilter/ebt_mark.c 		skb->mark &= info->mark;
skb                35 net/bridge/netfilter/ebt_mark.c 		skb->mark ^= info->mark;
skb                17 net/bridge/netfilter/ebt_mark_m.c ebt_mark_mt(const struct sk_buff *skb, struct xt_action_param *par)
skb                22 net/bridge/netfilter/ebt_mark_m.c 		return !!(skb->mark & info->mask) ^ info->invert;
skb                23 net/bridge/netfilter/ebt_mark_m.c 	return ((skb->mark & info->mask) == info->mark) ^ info->invert;
skb                24 net/bridge/netfilter/ebt_nflog.c ebt_nflog_tg(struct sk_buff *skb, const struct xt_action_param *par)
skb                36 net/bridge/netfilter/ebt_nflog.c 	nf_log_packet(net, PF_BRIDGE, xt_hooknum(par), skb, xt_in(par),
skb                17 net/bridge/netfilter/ebt_pkttype.c ebt_pkttype_mt(const struct sk_buff *skb, struct xt_action_param *par)
skb                21 net/bridge/netfilter/ebt_pkttype.c 	return (skb->pkt_type == info->pkt_type) ^ info->invert;
skb                20 net/bridge/netfilter/ebt_redirect.c ebt_redirect_tg(struct sk_buff *skb, const struct xt_action_param *par)
skb                24 net/bridge/netfilter/ebt_redirect.c 	if (skb_ensure_writable(skb, ETH_ALEN))
skb                29 net/bridge/netfilter/ebt_redirect.c 		ether_addr_copy(eth_hdr(skb)->h_dest,
skb                32 net/bridge/netfilter/ebt_redirect.c 		ether_addr_copy(eth_hdr(skb)->h_dest, xt_in(par)->dev_addr);
skb                33 net/bridge/netfilter/ebt_redirect.c 	skb->pkt_type = PACKET_HOST;
skb                21 net/bridge/netfilter/ebt_snat.c ebt_snat_tg(struct sk_buff *skb, const struct xt_action_param *par)
skb                25 net/bridge/netfilter/ebt_snat.c 	if (skb_ensure_writable(skb, ETH_ALEN * 2))
skb                28 net/bridge/netfilter/ebt_snat.c 	ether_addr_copy(eth_hdr(skb)->h_source, info->mac);
skb                30 net/bridge/netfilter/ebt_snat.c 	    eth_hdr(skb)->h_proto == htons(ETH_P_ARP)) {
skb                34 net/bridge/netfilter/ebt_snat.c 		ap = skb_header_pointer(skb, 0, sizeof(_ah), &_ah);
skb                39 net/bridge/netfilter/ebt_snat.c 		if (skb_store_bits(skb, sizeof(_ah), info->mac, ETH_ALEN))
skb               121 net/bridge/netfilter/ebt_stp.c ebt_stp_mt(const struct sk_buff *skb, struct xt_action_param *par)
skb               128 net/bridge/netfilter/ebt_stp.c 	sp = skb_header_pointer(skb, 0, sizeof(_stph), &_stph);
skb               145 net/bridge/netfilter/ebt_stp.c 		st = skb_header_pointer(skb, sizeof(_stph),
skb                26 net/bridge/netfilter/ebt_vlan.c ebt_vlan_mt(const struct sk_buff *skb, struct xt_action_param *par)
skb                36 net/bridge/netfilter/ebt_vlan.c 	if (skb_vlan_tag_present(skb)) {
skb                37 net/bridge/netfilter/ebt_vlan.c 		TCI = skb_vlan_tag_get(skb);
skb                38 net/bridge/netfilter/ebt_vlan.c 		encap = skb->protocol;
skb                43 net/bridge/netfilter/ebt_vlan.c 		fp = skb_header_pointer(skb, 0, sizeof(_frame), &_frame);
skb                54 net/bridge/netfilter/ebtable_broute.c static unsigned int ebt_broute(void *priv, struct sk_buff *skb,
skb                57 net/bridge/netfilter/ebtable_broute.c 	struct net_bridge_port *p = br_port_get_rcu(skb->dev);
skb                69 net/bridge/netfilter/ebtable_broute.c 	ret = ebt_do_table(skb, &state, state.net->xt.broute_table);
skb                81 net/bridge/netfilter/ebtable_broute.c 	BR_INPUT_SKB_CB(skb)->br_netfilter_broute = 1;
skb                86 net/bridge/netfilter/ebtable_broute.c 	dest = eth_hdr(skb)->h_dest;
skb                87 net/bridge/netfilter/ebtable_broute.c 	if (skb->pkt_type == PACKET_HOST &&
skb                88 net/bridge/netfilter/ebtable_broute.c 	    !ether_addr_equal(skb->dev->dev_addr, dest) &&
skb                90 net/bridge/netfilter/ebtable_broute.c 		skb->pkt_type = PACKET_OTHERHOST;
skb                62 net/bridge/netfilter/ebtable_filter.c ebt_in_hook(void *priv, struct sk_buff *skb,
skb                65 net/bridge/netfilter/ebtable_filter.c 	return ebt_do_table(skb, state, state->net->xt.frame_filter);
skb                69 net/bridge/netfilter/ebtable_filter.c ebt_out_hook(void *priv, struct sk_buff *skb,
skb                72 net/bridge/netfilter/ebtable_filter.c 	return ebt_do_table(skb, state, state->net->xt.frame_filter);
skb                62 net/bridge/netfilter/ebtable_nat.c ebt_nat_in(void *priv, struct sk_buff *skb,
skb                65 net/bridge/netfilter/ebtable_nat.c 	return ebt_do_table(skb, state, state->net->xt.frame_nat);
skb                69 net/bridge/netfilter/ebtable_nat.c ebt_nat_out(void *priv, struct sk_buff *skb,
skb                72 net/bridge/netfilter/ebtable_nat.c 	return ebt_do_table(skb, state, state->net->xt.frame_nat);
skb                80 net/bridge/netfilter/ebtables.c ebt_do_watcher(const struct ebt_entry_watcher *w, struct sk_buff *skb,
skb                85 net/bridge/netfilter/ebtables.c 	w->u.watcher->target(skb, par);
skb                91 net/bridge/netfilter/ebtables.c ebt_do_match(struct ebt_entry_match *m, const struct sk_buff *skb,
skb                96 net/bridge/netfilter/ebtables.c 	return !m->u.match->match(skb, par);
skb               118 net/bridge/netfilter/ebtables.c ebt_basic_match(const struct ebt_entry *e, const struct sk_buff *skb,
skb               121 net/bridge/netfilter/ebtables.c 	const struct ethhdr *h = eth_hdr(skb);
skb               125 net/bridge/netfilter/ebtables.c 	if (skb_vlan_tag_present(skb))
skb               179 net/bridge/netfilter/ebtables.c unsigned int ebt_do_table(struct sk_buff *skb,
skb               214 net/bridge/netfilter/ebtables.c 		if (ebt_basic_match(point, skb, state->in, state->out))
skb               217 net/bridge/netfilter/ebtables.c 		if (EBT_MATCH_ITERATE(point, ebt_do_match, skb, &acpar) != 0)
skb               224 net/bridge/netfilter/ebtables.c 		ADD_COUNTER(*(counter_base + i), skb->len, 1);
skb               229 net/bridge/netfilter/ebtables.c 		EBT_WATCHER_ITERATE(point, ebt_do_watcher, skb, &acpar);
skb               238 net/bridge/netfilter/ebtables.c 			verdict = t->u.target->target(skb, &acpar);
skb                28 net/bridge/netfilter/nf_conntrack_bridge.c 			     struct sk_buff *skb,
skb                34 net/bridge/netfilter/nf_conntrack_bridge.c 	int frag_max_size = BR_INPUT_SKB_CB(skb)->frag_max_size;
skb                36 net/bridge/netfilter/nf_conntrack_bridge.c 	ktime_t tstamp = skb->tstamp;
skb                42 net/bridge/netfilter/nf_conntrack_bridge.c 	if (skb->ip_summed == CHECKSUM_PARTIAL &&
skb                43 net/bridge/netfilter/nf_conntrack_bridge.c 	    (err = skb_checksum_help(skb)))
skb                46 net/bridge/netfilter/nf_conntrack_bridge.c 	iph = ip_hdr(skb);
skb                54 net/bridge/netfilter/nf_conntrack_bridge.c 	ll_rs = LL_RESERVED_SPACE(skb->dev);
skb                55 net/bridge/netfilter/nf_conntrack_bridge.c 	mtu = skb->dev->mtu;
skb                57 net/bridge/netfilter/nf_conntrack_bridge.c 	if (skb_has_frag_list(skb)) {
skb                58 net/bridge/netfilter/nf_conntrack_bridge.c 		unsigned int first_len = skb_pagelen(skb);
skb                63 net/bridge/netfilter/nf_conntrack_bridge.c 		    skb_headroom(skb) < ll_rs)
skb                66 net/bridge/netfilter/nf_conntrack_bridge.c 		if (skb_cloned(skb))
skb                69 net/bridge/netfilter/nf_conntrack_bridge.c 		skb_walk_frags(skb, frag) {
skb                78 net/bridge/netfilter/nf_conntrack_bridge.c 		ip_fraglist_init(skb, iph, hlen, &iter);
skb                82 net/bridge/netfilter/nf_conntrack_bridge.c 				ip_fraglist_prepare(skb, &iter);
skb                84 net/bridge/netfilter/nf_conntrack_bridge.c 			skb->tstamp = tstamp;
skb                85 net/bridge/netfilter/nf_conntrack_bridge.c 			err = output(net, sk, data, skb);
skb                89 net/bridge/netfilter/nf_conntrack_bridge.c 			skb = ip_fraglist_next(&iter);
skb                98 net/bridge/netfilter/nf_conntrack_bridge.c 	ip_frag_init(skb, hlen, ll_rs, frag_max_size, false, &state);
skb               103 net/bridge/netfilter/nf_conntrack_bridge.c 		skb2 = ip_frag_next(skb, &state);
skb               114 net/bridge/netfilter/nf_conntrack_bridge.c 	consume_skb(skb);
skb               118 net/bridge/netfilter/nf_conntrack_bridge.c 	kfree_skb(skb);
skb               123 net/bridge/netfilter/nf_conntrack_bridge.c static void br_skb_cb_save(struct sk_buff *skb, struct br_input_skb_cb *cb,
skb               126 net/bridge/netfilter/nf_conntrack_bridge.c 	memcpy(cb, skb->cb, sizeof(*cb));
skb               127 net/bridge/netfilter/nf_conntrack_bridge.c 	memset(skb->cb, 0, inet_skb_parm_size);
skb               130 net/bridge/netfilter/nf_conntrack_bridge.c static void br_skb_cb_restore(struct sk_buff *skb,
skb               134 net/bridge/netfilter/nf_conntrack_bridge.c 	memcpy(skb->cb, cb, sizeof(*cb));
skb               135 net/bridge/netfilter/nf_conntrack_bridge.c 	BR_INPUT_SKB_CB(skb)->frag_max_size = fragsz;
skb               138 net/bridge/netfilter/nf_conntrack_bridge.c static unsigned int nf_ct_br_defrag4(struct sk_buff *skb,
skb               147 net/bridge/netfilter/nf_conntrack_bridge.c 	if (!ip_is_fragment(ip_hdr(skb)))
skb               150 net/bridge/netfilter/nf_conntrack_bridge.c 	ct = nf_ct_get(skb, &ctinfo);
skb               154 net/bridge/netfilter/nf_conntrack_bridge.c 	br_skb_cb_save(skb, &cb, sizeof(struct inet_skb_parm));
skb               156 net/bridge/netfilter/nf_conntrack_bridge.c 	err = ip_defrag(state->net, skb,
skb               160 net/bridge/netfilter/nf_conntrack_bridge.c 		br_skb_cb_restore(skb, &cb, IPCB(skb)->frag_max_size);
skb               161 net/bridge/netfilter/nf_conntrack_bridge.c 		skb->ignore_df = 1;
skb               168 net/bridge/netfilter/nf_conntrack_bridge.c static unsigned int nf_ct_br_defrag6(struct sk_buff *skb,
skb               177 net/bridge/netfilter/nf_conntrack_bridge.c 	ct = nf_ct_get(skb, &ctinfo);
skb               181 net/bridge/netfilter/nf_conntrack_bridge.c 	br_skb_cb_save(skb, &cb, sizeof(struct inet6_skb_parm));
skb               183 net/bridge/netfilter/nf_conntrack_bridge.c 	err = nf_ipv6_br_defrag(state->net, skb,
skb               189 net/bridge/netfilter/nf_conntrack_bridge.c 	br_skb_cb_restore(skb, &cb, IP6CB(skb)->frag_max_size);
skb               193 net/bridge/netfilter/nf_conntrack_bridge.c static int nf_ct_br_ip_check(const struct sk_buff *skb)
skb               198 net/bridge/netfilter/nf_conntrack_bridge.c 	nhoff = skb_network_offset(skb);
skb               199 net/bridge/netfilter/nf_conntrack_bridge.c 	iph = ip_hdr(skb);
skb               205 net/bridge/netfilter/nf_conntrack_bridge.c 	if (skb->len < nhoff + len ||
skb               212 net/bridge/netfilter/nf_conntrack_bridge.c static int nf_ct_br_ipv6_check(const struct sk_buff *skb)
skb               217 net/bridge/netfilter/nf_conntrack_bridge.c 	nhoff = skb_network_offset(skb);
skb               218 net/bridge/netfilter/nf_conntrack_bridge.c 	hdr = ipv6_hdr(skb);
skb               223 net/bridge/netfilter/nf_conntrack_bridge.c 	if (skb->len < len)
skb               229 net/bridge/netfilter/nf_conntrack_bridge.c static unsigned int nf_ct_bridge_pre(void *priv, struct sk_buff *skb,
skb               238 net/bridge/netfilter/nf_conntrack_bridge.c 	ct = nf_ct_get(skb, &ctinfo);
skb               243 net/bridge/netfilter/nf_conntrack_bridge.c 	switch (skb->protocol) {
skb               245 net/bridge/netfilter/nf_conntrack_bridge.c 		if (!pskb_may_pull(skb, sizeof(struct iphdr)))
skb               248 net/bridge/netfilter/nf_conntrack_bridge.c 		len = ntohs(ip_hdr(skb)->tot_len);
skb               249 net/bridge/netfilter/nf_conntrack_bridge.c 		if (pskb_trim_rcsum(skb, len))
skb               252 net/bridge/netfilter/nf_conntrack_bridge.c 		if (nf_ct_br_ip_check(skb))
skb               256 net/bridge/netfilter/nf_conntrack_bridge.c 		ret = nf_ct_br_defrag4(skb, &bridge_state);
skb               259 net/bridge/netfilter/nf_conntrack_bridge.c 		if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
skb               262 net/bridge/netfilter/nf_conntrack_bridge.c 		len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len);
skb               263 net/bridge/netfilter/nf_conntrack_bridge.c 		if (pskb_trim_rcsum(skb, len))
skb               266 net/bridge/netfilter/nf_conntrack_bridge.c 		if (nf_ct_br_ipv6_check(skb))
skb               270 net/bridge/netfilter/nf_conntrack_bridge.c 		ret = nf_ct_br_defrag6(skb, &bridge_state);
skb               273 net/bridge/netfilter/nf_conntrack_bridge.c 		nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
skb               280 net/bridge/netfilter/nf_conntrack_bridge.c 	return nf_conntrack_in(skb, &bridge_state);
skb               283 net/bridge/netfilter/nf_conntrack_bridge.c static void nf_ct_bridge_frag_save(struct sk_buff *skb,
skb               286 net/bridge/netfilter/nf_conntrack_bridge.c 	if (skb_vlan_tag_present(skb)) {
skb               288 net/bridge/netfilter/nf_conntrack_bridge.c 		data->vlan_tci = skb->vlan_tci;
skb               289 net/bridge/netfilter/nf_conntrack_bridge.c 		data->vlan_proto = skb->vlan_proto;
skb               293 net/bridge/netfilter/nf_conntrack_bridge.c 	skb_copy_from_linear_data_offset(skb, -ETH_HLEN, data->mac, ETH_HLEN);
skb               297 net/bridge/netfilter/nf_conntrack_bridge.c nf_ct_bridge_refrag(struct sk_buff *skb, const struct nf_hook_state *state,
skb               304 net/bridge/netfilter/nf_conntrack_bridge.c 	if (!BR_INPUT_SKB_CB(skb)->frag_max_size)
skb               307 net/bridge/netfilter/nf_conntrack_bridge.c 	nf_ct_bridge_frag_save(skb, &data);
skb               308 net/bridge/netfilter/nf_conntrack_bridge.c 	switch (skb->protocol) {
skb               310 net/bridge/netfilter/nf_conntrack_bridge.c 		nf_br_ip_fragment(state->net, state->sk, skb, &data, output);
skb               313 net/bridge/netfilter/nf_conntrack_bridge.c 		nf_br_ip6_fragment(state->net, state->sk, skb, &data, output);
skb               324 net/bridge/netfilter/nf_conntrack_bridge.c static int nf_ct_bridge_frag_restore(struct sk_buff *skb,
skb               329 net/bridge/netfilter/nf_conntrack_bridge.c 	err = skb_cow_head(skb, ETH_HLEN);
skb               331 net/bridge/netfilter/nf_conntrack_bridge.c 		kfree_skb(skb);
skb               335 net/bridge/netfilter/nf_conntrack_bridge.c 		__vlan_hwaccel_put_tag(skb, data->vlan_proto, data->vlan_tci);
skb               336 net/bridge/netfilter/nf_conntrack_bridge.c 	else if (skb_vlan_tag_present(skb))
skb               337 net/bridge/netfilter/nf_conntrack_bridge.c 		__vlan_hwaccel_clear_tag(skb);
skb               339 net/bridge/netfilter/nf_conntrack_bridge.c 	skb_copy_to_linear_data_offset(skb, -ETH_HLEN, data->mac, ETH_HLEN);
skb               340 net/bridge/netfilter/nf_conntrack_bridge.c 	skb_reset_mac_header(skb);
skb               347 net/bridge/netfilter/nf_conntrack_bridge.c 				    struct sk_buff *skb)
skb               351 net/bridge/netfilter/nf_conntrack_bridge.c 	err = nf_ct_bridge_frag_restore(skb, data);
skb               355 net/bridge/netfilter/nf_conntrack_bridge.c 	return br_dev_queue_push_xmit(net, sk, skb);
skb               358 net/bridge/netfilter/nf_conntrack_bridge.c static unsigned int nf_ct_bridge_confirm(struct sk_buff *skb)
skb               364 net/bridge/netfilter/nf_conntrack_bridge.c 	ct = nf_ct_get(skb, &ctinfo);
skb               366 net/bridge/netfilter/nf_conntrack_bridge.c 		return nf_conntrack_confirm(skb);
skb               368 net/bridge/netfilter/nf_conntrack_bridge.c 	switch (skb->protocol) {
skb               370 net/bridge/netfilter/nf_conntrack_bridge.c 		protoff = skb_network_offset(skb) + ip_hdrlen(skb);
skb               373 net/bridge/netfilter/nf_conntrack_bridge.c 		 unsigned char pnum = ipv6_hdr(skb)->nexthdr;
skb               376 net/bridge/netfilter/nf_conntrack_bridge.c 		protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &pnum,
skb               379 net/bridge/netfilter/nf_conntrack_bridge.c 			return nf_conntrack_confirm(skb);
skb               385 net/bridge/netfilter/nf_conntrack_bridge.c 	return nf_confirm(skb, protoff, ct, ctinfo);
skb               388 net/bridge/netfilter/nf_conntrack_bridge.c static unsigned int nf_ct_bridge_post(void *priv, struct sk_buff *skb,
skb               393 net/bridge/netfilter/nf_conntrack_bridge.c 	ret = nf_ct_bridge_confirm(skb);
skb               397 net/bridge/netfilter/nf_conntrack_bridge.c 	return nf_ct_bridge_refrag(skb, state, nf_ct_bridge_refrag_post);
skb                18 net/bridge/netfilter/nf_log_bridge.c 				 const struct sk_buff *skb,
skb                24 net/bridge/netfilter/nf_log_bridge.c 	nf_log_l2packet(net, pf, eth_hdr(skb)->h_proto, hooknum, skb,
skb                42 net/bridge/netfilter/nft_reject_bridge.c static int nft_bridge_iphdr_validate(struct sk_buff *skb)
skb                47 net/bridge/netfilter/nft_reject_bridge.c 	if (!pskb_may_pull(skb, sizeof(struct iphdr)))
skb                50 net/bridge/netfilter/nft_reject_bridge.c 	iph = ip_hdr(skb);
skb                55 net/bridge/netfilter/nft_reject_bridge.c 	if (skb->len < len)
skb                60 net/bridge/netfilter/nft_reject_bridge.c 	if (!pskb_may_pull(skb, iph->ihl*4))
skb               165 net/bridge/netfilter/nft_reject_bridge.c static int nft_bridge_ip6hdr_validate(struct sk_buff *skb)
skb               170 net/bridge/netfilter/nft_reject_bridge.c 	if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
skb               173 net/bridge/netfilter/nft_reject_bridge.c 	hdr = ipv6_hdr(skb);
skb               178 net/bridge/netfilter/nft_reject_bridge.c 	if (pkt_len + sizeof(struct ipv6hdr) > skb->len)
skb               218 net/bridge/netfilter/nft_reject_bridge.c static bool reject6_br_csum_ok(struct sk_buff *skb, int hook)
skb               220 net/bridge/netfilter/nft_reject_bridge.c 	const struct ipv6hdr *ip6h = ipv6_hdr(skb);
skb               225 net/bridge/netfilter/nft_reject_bridge.c 	if (skb_csum_unnecessary(skb))
skb               229 net/bridge/netfilter/nft_reject_bridge.c 	    pskb_trim_rcsum(skb, ntohs(ip6h->payload_len) + sizeof(*ip6h)))
skb               232 net/bridge/netfilter/nft_reject_bridge.c 	ip6h = ipv6_hdr(skb);
skb               233 net/bridge/netfilter/nft_reject_bridge.c 	thoff = ipv6_skip_exthdr(skb, ((u8*)(ip6h+1) - skb->data), &proto, &fo);
skb               234 net/bridge/netfilter/nft_reject_bridge.c 	if (thoff < 0 || thoff >= skb->len || (fo & htons(~0x7)) != 0)
skb               240 net/bridge/netfilter/nft_reject_bridge.c 	return nf_ip6_checksum(skb, hook, thoff, proto) == 0;
skb               302 net/bridge/netfilter/nft_reject_bridge.c 	const unsigned char *dest = eth_hdr(pkt->skb)->h_dest;
skb               308 net/bridge/netfilter/nft_reject_bridge.c 	switch (eth_hdr(pkt->skb)->h_proto) {
skb               312 net/bridge/netfilter/nft_reject_bridge.c 			nft_reject_br_send_v4_unreach(nft_net(pkt), pkt->skb,
skb               318 net/bridge/netfilter/nft_reject_bridge.c 			nft_reject_br_send_v4_tcp_reset(nft_net(pkt), pkt->skb,
skb               323 net/bridge/netfilter/nft_reject_bridge.c 			nft_reject_br_send_v4_unreach(nft_net(pkt), pkt->skb,
skb               333 net/bridge/netfilter/nft_reject_bridge.c 			nft_reject_br_send_v6_unreach(nft_net(pkt), pkt->skb,
skb               339 net/bridge/netfilter/nft_reject_bridge.c 			nft_reject_br_send_v6_tcp_reset(nft_net(pkt), pkt->skb,
skb               344 net/bridge/netfilter/nft_reject_bridge.c 			nft_reject_br_send_v6_unreach(nft_net(pkt), pkt->skb,
skb               399 net/bridge/netfilter/nft_reject_bridge.c static int nft_reject_bridge_dump(struct sk_buff *skb,
skb               404 net/bridge/netfilter/nft_reject_bridge.c 	if (nla_put_be32(skb, NFTA_REJECT_TYPE, htonl(priv->type)))
skb               410 net/bridge/netfilter/nft_reject_bridge.c 		if (nla_put_u8(skb, NFTA_REJECT_ICMP_CODE, priv->icmp_code))
skb                40 net/caif/caif_dev.c 	void (*xoff_skb_dtor)(struct sk_buff *skb);
skb               122 net/caif/caif_dev.c static void caif_flow_cb(struct sk_buff *skb)
skb               125 net/caif/caif_dev.c 	void (*dtor)(struct sk_buff *skb) = NULL;
skb               128 net/caif/caif_dev.c 	WARN_ON(skb->dev == NULL);
skb               131 net/caif/caif_dev.c 	caifd = caif_get(skb->dev);
skb               147 net/caif/caif_dev.c 	if (WARN_ON(caifd->xoff_skb != skb))
skb               148 net/caif/caif_dev.c 		skb = NULL;
skb               155 net/caif/caif_dev.c 	if (dtor && skb)
skb               156 net/caif/caif_dev.c 		dtor(skb);
skb               171 net/caif/caif_dev.c 	struct sk_buff *skb;
skb               176 net/caif/caif_dev.c 	skb = cfpkt_tonative(pkt);
skb               177 net/caif/caif_dev.c 	skb->dev = caifd->netdev;
skb               178 net/caif/caif_dev.c 	skb_reset_network_header(skb);
skb               179 net/caif/caif_dev.c 	skb->protocol = htons(ETH_P_CAIF);
skb               192 net/caif/caif_dev.c 		txq = netdev_get_tx_queue(skb->dev, 0);
skb               223 net/caif/caif_dev.c 	caifd->xoff_skb = skb;
skb               224 net/caif/caif_dev.c 	caifd->xoff_skb_dtor = skb->destructor;
skb               225 net/caif/caif_dev.c 	skb->destructor = caif_flow_cb;
skb               234 net/caif/caif_dev.c 	err = dev_queue_xmit(skb);
skb               245 net/caif/caif_dev.c static int receive(struct sk_buff *skb, struct net_device *dev,
skb               252 net/caif/caif_dev.c 	pkt = cfpkt_fromnative(CAIF_DIR_IN, skb);
skb               260 net/caif/caif_dev.c 		kfree_skb(skb);
skb               124 net/caif/caif_socket.c static void caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
skb               132 net/caif/caif_socket.c 	if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
skb               141 net/caif/caif_socket.c 	err = sk_filter(sk, skb);
skb               145 net/caif/caif_socket.c 	if (!sk_rmem_schedule(sk, skb, skb->truesize) && rx_flow_is_on(cf_sk)) {
skb               150 net/caif/caif_socket.c 	skb->dev = NULL;
skb               151 net/caif/caif_socket.c 	skb_set_owner_r(skb, sk);
skb               155 net/caif/caif_socket.c 		__skb_queue_tail(list, skb);
skb               161 net/caif/caif_socket.c 		kfree_skb(skb);
skb               168 net/caif/caif_socket.c 	struct sk_buff *skb;
skb               171 net/caif/caif_socket.c 	skb = cfpkt_tonative(pkt);
skb               174 net/caif/caif_socket.c 		kfree_skb(skb);
skb               177 net/caif/caif_socket.c 	caif_queue_rcv_skb(&cf_sk->sk, skb);
skb               276 net/caif/caif_socket.c 	struct sk_buff *skb;
skb               284 net/caif/caif_socket.c 	skb = skb_recv_datagram(sk, flags, 0 , &ret);
skb               285 net/caif/caif_socket.c 	if (!skb)
skb               287 net/caif/caif_socket.c 	copylen = skb->len;
skb               293 net/caif/caif_socket.c 	ret = skb_copy_datagram_msg(skb, 0, m, copylen);
skb               297 net/caif/caif_socket.c 	ret = (flags & MSG_TRUNC) ? skb->len : copylen;
skb               299 net/caif/caif_socket.c 	skb_free_datagram(sk, skb);
skb               374 net/caif/caif_socket.c 		struct sk_buff *skb;
skb               381 net/caif/caif_socket.c 		skb = skb_dequeue(&sk->sk_receive_queue);
skb               384 net/caif/caif_socket.c 		if (skb == NULL) {
skb               424 net/caif/caif_socket.c 		chunk = min_t(unsigned int, skb->len, size);
skb               425 net/caif/caif_socket.c 		if (memcpy_to_msg(msg, skb->data, chunk)) {
skb               426 net/caif/caif_socket.c 			skb_queue_head(&sk->sk_receive_queue, skb);
skb               436 net/caif/caif_socket.c 			skb_pull(skb, chunk);
skb               439 net/caif/caif_socket.c 			if (skb->len) {
skb               440 net/caif/caif_socket.c 				skb_queue_head(&sk->sk_receive_queue, skb);
skb               443 net/caif/caif_socket.c 			kfree_skb(skb);
skb               450 net/caif/caif_socket.c 			skb_queue_head(&sk->sk_receive_queue, skb);
skb               500 net/caif/caif_socket.c static int transmit_skb(struct sk_buff *skb, struct caifsock *cf_sk,
skb               505 net/caif/caif_socket.c 	pkt = cfpkt_fromnative(CAIF_DIR_OUT, skb);
skb               506 net/caif/caif_socket.c 	memset(skb->cb, 0, sizeof(struct caif_payload_info));
skb               510 net/caif/caif_socket.c 		kfree_skb(skb);
skb               525 net/caif/caif_socket.c 	struct sk_buff *skb = NULL;
skb               566 net/caif/caif_socket.c 	skb = sock_alloc_send_skb(sk, buffer_size, noblock, &ret);
skb               568 net/caif/caif_socket.c 	if (!skb || skb_tailroom(skb) < buffer_size)
skb               571 net/caif/caif_socket.c 	skb_reserve(skb, cf_sk->headroom);
skb               573 net/caif/caif_socket.c 	ret = memcpy_from_msg(skb_put(skb, len), msg, len);
skb               577 net/caif/caif_socket.c 	ret = transmit_skb(skb, cf_sk, noblock, timeo);
skb               584 net/caif/caif_socket.c 	kfree_skb(skb);
skb               599 net/caif/caif_socket.c 	struct sk_buff *skb;
skb               630 net/caif/caif_socket.c 		skb = sock_alloc_send_skb(sk,
skb               635 net/caif/caif_socket.c 		if (skb == NULL)
skb               638 net/caif/caif_socket.c 		skb_reserve(skb, cf_sk->headroom);
skb               646 net/caif/caif_socket.c 		size = min_t(int, size, skb_tailroom(skb));
skb               648 net/caif/caif_socket.c 		err = memcpy_from_msg(skb_put(skb, size), msg, size);
skb               650 net/caif/caif_socket.c 			kfree_skb(skb);
skb               653 net/caif/caif_socket.c 		err = transmit_skb(skb, cf_sk,
skb                53 net/caif/caif_usb.c 	struct sk_buff *skb;
skb                56 net/caif/caif_usb.c 	skb = cfpkt_tonative(pkt);
skb                58 net/caif/caif_usb.c 	skb_reset_network_header(skb);
skb                59 net/caif/caif_usb.c 	skb->protocol = htons(ETH_P_IP);
skb                64 net/caif/caif_usb.c 	if (skb_headroom(skb) < ETH_HLEN + CFUSB_PAD_DESCR_SZ + hpad) {
skb                66 net/caif/caif_usb.c 		kfree_skb(skb);
skb                20 net/caif/cfpkt_skbuff.c 	skb_reset_tail_pointer(&pkt->skb); \
skb                36 net/caif/cfpkt_skbuff.c 	struct sk_buff skb;
skb                47 net/caif/cfpkt_skbuff.c 	return (struct cfpkt_priv_data *) pkt->skb.cb;
skb                57 net/caif/cfpkt_skbuff.c 	return &pkt->skb;
skb                60 net/caif/cfpkt_skbuff.c static inline struct cfpkt *skb_to_pkt(struct sk_buff *skb)
skb                62 net/caif/cfpkt_skbuff.c 	return (struct cfpkt *) skb;
skb                81 net/caif/cfpkt_skbuff.c 	struct sk_buff *skb;
skb                83 net/caif/cfpkt_skbuff.c 	skb = alloc_skb(len + pfx, GFP_ATOMIC);
skb                84 net/caif/cfpkt_skbuff.c 	if (unlikely(skb == NULL))
skb                87 net/caif/cfpkt_skbuff.c 	skb_reserve(skb, pfx);
skb                88 net/caif/cfpkt_skbuff.c 	return skb_to_pkt(skb);
skb                98 net/caif/cfpkt_skbuff.c 	struct sk_buff *skb = pkt_to_skb(pkt);
skb                99 net/caif/cfpkt_skbuff.c 	kfree_skb(skb);
skb               104 net/caif/cfpkt_skbuff.c 	struct sk_buff *skb = pkt_to_skb(pkt);
skb               105 net/caif/cfpkt_skbuff.c 	return skb->len > 0;
skb               110 net/caif/cfpkt_skbuff.c 	struct sk_buff *skb = pkt_to_skb(pkt);
skb               111 net/caif/cfpkt_skbuff.c 	if (skb_headlen(skb) >= len) {
skb               112 net/caif/cfpkt_skbuff.c 		memcpy(data, skb->data, len);
skb               121 net/caif/cfpkt_skbuff.c 	struct sk_buff *skb = pkt_to_skb(pkt);
skb               126 net/caif/cfpkt_skbuff.c 	if (unlikely(len > skb->len)) {
skb               131 net/caif/cfpkt_skbuff.c 	if (unlikely(len > skb_headlen(skb))) {
skb               132 net/caif/cfpkt_skbuff.c 		if (unlikely(skb_linearize(skb) != 0)) {
skb               137 net/caif/cfpkt_skbuff.c 	from = skb_pull(skb, len);
skb               147 net/caif/cfpkt_skbuff.c 	struct sk_buff *skb = pkt_to_skb(pkt);
skb               153 net/caif/cfpkt_skbuff.c 	if (unlikely(skb_linearize(skb) != 0)) {
skb               157 net/caif/cfpkt_skbuff.c 	if (unlikely(skb->data + len > skb_tail_pointer(skb))) {
skb               161 net/caif/cfpkt_skbuff.c 	from = skb_tail_pointer(skb) - len;
skb               162 net/caif/cfpkt_skbuff.c 	skb_trim(skb, skb->len - len);
skb               174 net/caif/cfpkt_skbuff.c 	struct sk_buff *skb = pkt_to_skb(pkt);
skb               183 net/caif/cfpkt_skbuff.c 	lastskb = skb;
skb               186 net/caif/cfpkt_skbuff.c 	if (unlikely(skb_tailroom(skb) < len)) {
skb               194 net/caif/cfpkt_skbuff.c 	if (unlikely((addlen > 0) || skb_cloned(skb) || skb_shared(skb))) {
skb               197 net/caif/cfpkt_skbuff.c 		if (unlikely(skb_cow_data(skb, addlen, &lastskb) < 0)) {
skb               204 net/caif/cfpkt_skbuff.c 	to = pskb_put(skb, lastskb, len);
skb               217 net/caif/cfpkt_skbuff.c 	struct sk_buff *skb = pkt_to_skb(pkt);
skb               224 net/caif/cfpkt_skbuff.c 	if (unlikely(skb_headroom(skb) < len)) {
skb               230 net/caif/cfpkt_skbuff.c 	ret = skb_cow_data(skb, 0, &lastskb);
skb               236 net/caif/cfpkt_skbuff.c 	to = skb_push(skb, len);
skb               249 net/caif/cfpkt_skbuff.c 	struct sk_buff *skb = pkt_to_skb(pkt);
skb               250 net/caif/cfpkt_skbuff.c 	return skb->len;
skb               263 net/caif/cfpkt_skbuff.c 	if (unlikely(skb_linearize(&pkt->skb) != 0)) {
skb               267 net/caif/cfpkt_skbuff.c 	return iter_func(data, pkt->skb.data, cfpkt_getlen(pkt));
skb               272 net/caif/cfpkt_skbuff.c 	struct sk_buff *skb = pkt_to_skb(pkt);
skb               278 net/caif/cfpkt_skbuff.c 	if (likely(len <= skb->len)) {
skb               279 net/caif/cfpkt_skbuff.c 		if (unlikely(skb->data_len))
skb               280 net/caif/cfpkt_skbuff.c 			___pskb_trim(skb, len);
skb               282 net/caif/cfpkt_skbuff.c 			skb_trim(skb, len);
skb               288 net/caif/cfpkt_skbuff.c 	if (unlikely(!cfpkt_pad_trail(pkt, len - skb->len)))
skb               334 net/caif/cfpkt_skbuff.c 	struct sk_buff *skb = pkt_to_skb(pkt);
skb               336 net/caif/cfpkt_skbuff.c 	u8 *split = skb->data + pos;
skb               337 net/caif/cfpkt_skbuff.c 	u16 len2nd = skb_tail_pointer(skb) - split;
skb               342 net/caif/cfpkt_skbuff.c 	if (skb->data + pos > skb_tail_pointer(skb)) {
skb               361 net/caif/cfpkt_skbuff.c 	skb_trim(skb, pos);
skb               363 net/caif/cfpkt_skbuff.c 	skb2->priority = skb->priority;
skb                72 net/caif/chnl_net.c 	struct sk_buff *skb;
skb                82 net/caif/chnl_net.c 	skb = (struct sk_buff *) cfpkt_tonative(pkt);
skb                85 net/caif/chnl_net.c 	pktlen = skb->len;
skb                90 net/caif/chnl_net.c 	skb->dev = priv->netdev;
skb                93 net/caif/chnl_net.c 	ip_version = skb_header_pointer(skb, 0, 1, &buf);
skb                95 net/caif/chnl_net.c 		kfree_skb(skb);
skb               101 net/caif/chnl_net.c 		skb->protocol = htons(ETH_P_IP);
skb               104 net/caif/chnl_net.c 		skb->protocol = htons(ETH_P_IPV6);
skb               107 net/caif/chnl_net.c 		kfree_skb(skb);
skb               114 net/caif/chnl_net.c 		skb->ip_summed = CHECKSUM_UNNECESSARY;
skb               116 net/caif/chnl_net.c 		skb->ip_summed = CHECKSUM_NONE;
skb               119 net/caif/chnl_net.c 		netif_rx(skb);
skb               121 net/caif/chnl_net.c 		netif_rx_ni(skb);
skb               214 net/caif/chnl_net.c static int chnl_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb               223 net/caif/chnl_net.c 	if (skb->len > priv->netdev->mtu) {
skb               225 net/caif/chnl_net.c 		kfree_skb(skb);
skb               232 net/caif/chnl_net.c 		kfree_skb(skb);
skb               238 net/caif/chnl_net.c 		swap(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr);
skb               241 net/caif/chnl_net.c 	len = skb->len;
skb               243 net/caif/chnl_net.c 	pkt = cfpkt_fromnative(CAIF_DIR_OUT, (void *) skb);
skb               422 net/caif/chnl_net.c static int ipcaif_fill_info(struct sk_buff *skb, const struct net_device *dev)
skb               427 net/caif/chnl_net.c 	if (nla_put_u32(skb, IFLA_CAIF_IPV4_CONNID,
skb               429 net/caif/chnl_net.c 	    nla_put_u32(skb, IFLA_CAIF_IPV6_CONNID,
skb               433 net/caif/chnl_net.c 	if (nla_put_u8(skb, IFLA_CAIF_LOOPBACK, loop))
skb               199 net/can/af_can.c int can_send(struct sk_buff *skb, int loop)
skb               202 net/can/af_can.c 	struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
skb               203 net/can/af_can.c 	struct can_pkg_stats *pkg_stats = dev_net(skb->dev)->can.pkg_stats;
skb               206 net/can/af_can.c 	if (skb->len == CAN_MTU) {
skb               207 net/can/af_can.c 		skb->protocol = htons(ETH_P_CAN);
skb               210 net/can/af_can.c 	} else if (skb->len == CANFD_MTU) {
skb               211 net/can/af_can.c 		skb->protocol = htons(ETH_P_CANFD);
skb               222 net/can/af_can.c 	if (unlikely(skb->len > skb->dev->mtu && cfd->len > CAN_MAX_DLEN)) {
skb               227 net/can/af_can.c 	if (unlikely(skb->dev->type != ARPHRD_CAN)) {
skb               232 net/can/af_can.c 	if (unlikely(!(skb->dev->flags & IFF_UP))) {
skb               237 net/can/af_can.c 	skb->ip_summed = CHECKSUM_UNNECESSARY;
skb               239 net/can/af_can.c 	skb_reset_mac_header(skb);
skb               240 net/can/af_can.c 	skb_reset_network_header(skb);
skb               241 net/can/af_can.c 	skb_reset_transport_header(skb);
skb               247 net/can/af_can.c 		skb->pkt_type = PACKET_LOOPBACK;
skb               257 net/can/af_can.c 		if (!(skb->dev->flags & IFF_ECHO)) {
skb               261 net/can/af_can.c 			newskb = skb_clone(skb, GFP_ATOMIC);
skb               263 net/can/af_can.c 				kfree_skb(skb);
skb               267 net/can/af_can.c 			can_skb_set_owner(newskb, skb->sk);
skb               273 net/can/af_can.c 		skb->pkt_type = PACKET_HOST;
skb               277 net/can/af_can.c 	err = dev_queue_xmit(skb);
skb               296 net/can/af_can.c 	kfree_skb(skb);
skb               567 net/can/af_can.c static inline void deliver(struct sk_buff *skb, struct receiver *rcv)
skb               569 net/can/af_can.c 	rcv->func(skb, rcv->data);
skb               573 net/can/af_can.c static int can_rcv_filter(struct can_dev_rcv_lists *dev_rcv_lists, struct sk_buff *skb)
skb               577 net/can/af_can.c 	struct can_frame *cf = (struct can_frame *)skb->data;
skb               587 net/can/af_can.c 				deliver(skb, rcv);
skb               596 net/can/af_can.c 		deliver(skb, rcv);
skb               603 net/can/af_can.c 			deliver(skb, rcv);
skb               611 net/can/af_can.c 			deliver(skb, rcv);
skb               623 net/can/af_can.c 				deliver(skb, rcv);
skb               630 net/can/af_can.c 			deliver(skb, rcv);
skb               638 net/can/af_can.c static void can_receive(struct sk_buff *skb, struct net_device *dev)
skb               650 net/can/af_can.c 	while (!(can_skb_prv(skb)->skbcnt))
skb               651 net/can/af_can.c 		can_skb_prv(skb)->skbcnt = atomic_inc_return(&skbcounter);
skb               656 net/can/af_can.c 	matches = can_rcv_filter(net->can.rx_alldev_list, skb);
skb               660 net/can/af_can.c 	matches += can_rcv_filter(dev_rcv_lists, skb);
skb               665 net/can/af_can.c 	consume_skb(skb);
skb               673 net/can/af_can.c static int can_rcv(struct sk_buff *skb, struct net_device *dev,
skb               676 net/can/af_can.c 	struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
skb               678 net/can/af_can.c 	if (unlikely(dev->type != ARPHRD_CAN || skb->len != CAN_MTU ||
skb               681 net/can/af_can.c 			     dev->type, skb->len, cfd->len);
skb               682 net/can/af_can.c 		kfree_skb(skb);
skb               686 net/can/af_can.c 	can_receive(skb, dev);
skb               690 net/can/af_can.c static int canfd_rcv(struct sk_buff *skb, struct net_device *dev,
skb               693 net/can/af_can.c 	struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
skb               695 net/can/af_can.c 	if (unlikely(dev->type != ARPHRD_CAN || skb->len != CANFD_MTU ||
skb               698 net/can/af_can.c 			     dev->type, skb->len, cfd->len);
skb               699 net/can/af_can.c 		kfree_skb(skb);
skb               703 net/can/af_can.c 	can_receive(skb, dev);
skb                56 net/can/af_can.h 	void (*func)(struct sk_buff *skb, void *data);
skb               269 net/can/bcm.c  	struct sk_buff *skb;
skb               283 net/can/bcm.c  	skb = alloc_skb(op->cfsiz + sizeof(struct can_skb_priv), gfp_any());
skb               284 net/can/bcm.c  	if (!skb)
skb               287 net/can/bcm.c  	can_skb_reserve(skb);
skb               288 net/can/bcm.c  	can_skb_prv(skb)->ifindex = dev->ifindex;
skb               289 net/can/bcm.c  	can_skb_prv(skb)->skbcnt = 0;
skb               291 net/can/bcm.c  	skb_put_data(skb, cf, op->cfsiz);
skb               294 net/can/bcm.c  	skb->dev = dev;
skb               295 net/can/bcm.c  	can_skb_set_owner(skb, op->sk);
skb               296 net/can/bcm.c  	can_send(skb, 1);
skb               316 net/can/bcm.c  	struct sk_buff *skb;
skb               323 net/can/bcm.c  	skb = alloc_skb(sizeof(*head) + datalen, gfp_any());
skb               324 net/can/bcm.c  	if (!skb)
skb               327 net/can/bcm.c  	skb_put_data(skb, head, sizeof(*head));
skb               331 net/can/bcm.c  		firstframe = (struct canfd_frame *)skb_tail_pointer(skb);
skb               333 net/can/bcm.c  		skb_put_data(skb, frames, datalen);
skb               347 net/can/bcm.c  		skb->tstamp = op->rx_stamp;
skb               358 net/can/bcm.c  	addr = (struct sockaddr_can *)skb->cb;
skb               363 net/can/bcm.c  	err = sock_queue_rcv_skb(sk, skb);
skb               367 net/can/bcm.c  		kfree_skb(skb);
skb               633 net/can/bcm.c  static void bcm_rx_handler(struct sk_buff *skb, void *data)
skb               636 net/can/bcm.c  	const struct canfd_frame *rxframe = (struct canfd_frame *)skb->data;
skb               643 net/can/bcm.c  	if (skb->len != op->cfsiz)
skb               650 net/can/bcm.c  	op->rx_stamp = skb->tstamp;
skb               652 net/can/bcm.c  	op->rx_ifindex = skb->dev->ifindex;
skb              1224 net/can/bcm.c  	struct sk_buff *skb;
skb              1232 net/can/bcm.c  	skb = alloc_skb(cfsiz + sizeof(struct can_skb_priv), GFP_KERNEL);
skb              1233 net/can/bcm.c  	if (!skb)
skb              1236 net/can/bcm.c  	can_skb_reserve(skb);
skb              1238 net/can/bcm.c  	err = memcpy_from_msg(skb_put(skb, cfsiz), msg, cfsiz);
skb              1240 net/can/bcm.c  		kfree_skb(skb);
skb              1246 net/can/bcm.c  		kfree_skb(skb);
skb              1250 net/can/bcm.c  	can_skb_prv(skb)->ifindex = dev->ifindex;
skb              1251 net/can/bcm.c  	can_skb_prv(skb)->skbcnt = 0;
skb              1252 net/can/bcm.c  	skb->dev = dev;
skb              1253 net/can/bcm.c  	can_skb_set_owner(skb, sk);
skb              1254 net/can/bcm.c  	err = can_send(skb, 1); /* send with loopback */
skb              1598 net/can/bcm.c  	struct sk_buff *skb;
skb              1605 net/can/bcm.c  	skb = skb_recv_datagram(sk, flags, noblock, &error);
skb              1606 net/can/bcm.c  	if (!skb)
skb              1609 net/can/bcm.c  	if (skb->len < size)
skb              1610 net/can/bcm.c  		size = skb->len;
skb              1612 net/can/bcm.c  	err = memcpy_to_msg(msg, skb->data, size);
skb              1614 net/can/bcm.c  		skb_free_datagram(sk, skb);
skb              1618 net/can/bcm.c  	sock_recv_ts_and_drops(msg, sk, skb);
skb              1623 net/can/bcm.c  		memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
skb              1626 net/can/bcm.c  	skb_free_datagram(sk, skb);
skb               396 net/can/gw.c   static void can_can_gw_rcv(struct sk_buff *skb, void *data)
skb               405 net/can/gw.c   		if (skb->len != CANFD_MTU)
skb               408 net/can/gw.c   		if (skb->len != CAN_MTU)
skb               423 net/can/gw.c   #define cgw_hops(skb) ((skb)->csum_start)
skb               425 net/can/gw.c   	BUG_ON(skb->ip_summed != CHECKSUM_UNNECESSARY);
skb               427 net/can/gw.c   	if (cgw_hops(skb) >= max_hops) {
skb               440 net/can/gw.c   	    can_skb_prv(skb)->ifindex == gwj->dst.dev->ifindex)
skb               449 net/can/gw.c   		nskb = skb_copy(skb, GFP_ATOMIC);
skb               451 net/can/gw.c   		nskb = skb_clone(skb, GFP_ATOMIC);
skb               459 net/can/gw.c   	cgw_hops(nskb) = cgw_hops(skb) + 1;
skb               546 net/can/gw.c   static int cgw_put_job(struct sk_buff *skb, struct cgw_job *gwj, int type,
skb               552 net/can/gw.c   	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*rtcan), flags);
skb               564 net/can/gw.c   		if (nla_put_u32(skb, CGW_HANDLED, gwj->handled_frames) < 0)
skb               569 net/can/gw.c   		if (nla_put_u32(skb, CGW_DROPPED, gwj->dropped_frames) < 0)
skb               574 net/can/gw.c   		if (nla_put_u32(skb, CGW_DELETED, gwj->deleted_frames) < 0)
skb               581 net/can/gw.c   		if (nla_put_u8(skb, CGW_LIM_HOPS, gwj->limit_hops) < 0)
skb               591 net/can/gw.c   			if (nla_put(skb, CGW_FDMOD_AND, sizeof(mb), &mb) < 0)
skb               598 net/can/gw.c   			if (nla_put(skb, CGW_FDMOD_OR, sizeof(mb), &mb) < 0)
skb               605 net/can/gw.c   			if (nla_put(skb, CGW_FDMOD_XOR, sizeof(mb), &mb) < 0)
skb               612 net/can/gw.c   			if (nla_put(skb, CGW_FDMOD_SET, sizeof(mb), &mb) < 0)
skb               621 net/can/gw.c   			if (nla_put(skb, CGW_MOD_AND, sizeof(mb), &mb) < 0)
skb               628 net/can/gw.c   			if (nla_put(skb, CGW_MOD_OR, sizeof(mb), &mb) < 0)
skb               635 net/can/gw.c   			if (nla_put(skb, CGW_MOD_XOR, sizeof(mb), &mb) < 0)
skb               642 net/can/gw.c   			if (nla_put(skb, CGW_MOD_SET, sizeof(mb), &mb) < 0)
skb               648 net/can/gw.c   		if (nla_put_u32(skb, CGW_MOD_UID, gwj->mod.uid) < 0)
skb               653 net/can/gw.c   		if (nla_put(skb, CGW_CS_CRC8, CGW_CS_CRC8_LEN,
skb               659 net/can/gw.c   		if (nla_put(skb, CGW_CS_XOR, CGW_CS_XOR_LEN,
skb               666 net/can/gw.c   			if (nla_put(skb, CGW_FILTER, sizeof(struct can_filter),
skb               671 net/can/gw.c   		if (nla_put_u32(skb, CGW_SRC_IF, gwj->ccgw.src_idx) < 0)
skb               674 net/can/gw.c   		if (nla_put_u32(skb, CGW_DST_IF, gwj->ccgw.dst_idx) < 0)
skb               678 net/can/gw.c   	nlmsg_end(skb, nlh);
skb               682 net/can/gw.c   	nlmsg_cancel(skb, nlh);
skb               687 net/can/gw.c   static int cgw_dump_jobs(struct sk_buff *skb, struct netlink_callback *cb)
skb               689 net/can/gw.c   	struct net *net = sock_net(skb->sk);
skb               699 net/can/gw.c   		if (cgw_put_job(skb, gwj, RTM_NEWROUTE,
skb               700 net/can/gw.c   				NETLINK_CB(cb->skb).portid,
skb               710 net/can/gw.c   	return skb->len;
skb               988 net/can/gw.c   static int cgw_create_job(struct sk_buff *skb,  struct nlmsghdr *nlh,
skb               991 net/can/gw.c   	struct net *net = sock_net(skb->sk);
skb               999 net/can/gw.c   	if (!netlink_capable(skb, CAP_NET_ADMIN))
skb              1100 net/can/gw.c   static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh,
skb              1103 net/can/gw.c   	struct net *net = sock_net(skb->sk);
skb              1112 net/can/gw.c   	if (!netlink_capable(skb, CAP_NET_ADMIN))
skb                32 net/can/j1939/address-claim.c static inline name_t j1939_skb_to_name(const struct sk_buff *skb)
skb                34 net/can/j1939/address-claim.c 	return le64_to_cpup((__le64 *)skb->data);
skb                37 net/can/j1939/address-claim.c static inline bool j1939_ac_msg_is_request(struct sk_buff *skb)
skb                39 net/can/j1939/address-claim.c 	struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb);
skb                42 net/can/j1939/address-claim.c 	if (skb->len < 3 || skcb->addr.pgn != J1939_PGN_REQUEST)
skb                45 net/can/j1939/address-claim.c 	req_pgn = skb->data[0] | (skb->data[1] << 8) | (skb->data[2] << 16);
skb                51 net/can/j1939/address-claim.c 				    struct sk_buff *skb)
skb                53 net/can/j1939/address-claim.c 	struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb);
skb                55 net/can/j1939/address-claim.c 	if (skb->len != 8) {
skb                57 net/can/j1939/address-claim.c 			      skb->len);
skb                61 net/can/j1939/address-claim.c 	if (skcb->addr.src_name != j1939_skb_to_name(skb)) {
skb                79 net/can/j1939/address-claim.c int j1939_ac_fixup(struct j1939_priv *priv, struct sk_buff *skb)
skb                81 net/can/j1939/address-claim.c 	struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb);
skb                89 net/can/j1939/address-claim.c 		ret = j1939_ac_verify_outgoing(priv, skb);
skb               105 net/can/j1939/address-claim.c 		    !j1939_ac_msg_is_request(skb)) {
skb               126 net/can/j1939/address-claim.c static void j1939_ac_process(struct j1939_priv *priv, struct sk_buff *skb)
skb               128 net/can/j1939/address-claim.c 	struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb);
skb               132 net/can/j1939/address-claim.c 	if (skb->len != 8) {
skb               134 net/can/j1939/address-claim.c 			      skb->len);
skb               138 net/can/j1939/address-claim.c 	name = j1939_skb_to_name(skb);
skb               207 net/can/j1939/address-claim.c void j1939_ac_recv(struct j1939_priv *priv, struct sk_buff *skb)
skb               209 net/can/j1939/address-claim.c 	struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb);
skb               214 net/can/j1939/address-claim.c 		j1939_ac_process(priv, skb);
skb               172 net/can/j1939/j1939-priv.h struct j1939_sk_buff_cb *j1939_skb_to_cb(const struct sk_buff *skb)
skb               174 net/can/j1939/j1939-priv.h 	BUILD_BUG_ON(sizeof(struct j1939_sk_buff_cb) > sizeof(skb->cb));
skb               176 net/can/j1939/j1939-priv.h 	return (struct j1939_sk_buff_cb *)skb->cb;
skb               179 net/can/j1939/j1939-priv.h int j1939_send_one(struct j1939_priv *priv, struct sk_buff *skb);
skb               180 net/can/j1939/j1939-priv.h void j1939_sk_recv(struct j1939_priv *priv, struct sk_buff *skb);
skb               190 net/can/j1939/j1939-priv.h 				    struct sk_buff *skb, size_t size);
skb               191 net/can/j1939/j1939-priv.h int j1939_tp_recv(struct j1939_priv *priv, struct sk_buff *skb);
skb               192 net/can/j1939/j1939-priv.h int j1939_ac_fixup(struct j1939_priv *priv, struct sk_buff *skb);
skb               193 net/can/j1939/j1939-priv.h void j1939_ac_recv(struct j1939_priv *priv, struct sk_buff *skb);
skb               194 net/can/j1939/j1939-priv.h void j1939_simple_recv(struct j1939_priv *priv, struct sk_buff *skb);
skb               324 net/can/j1939/j1939-priv.h 			     struct sk_buff *skb);
skb                41 net/can/j1939/main.c 	struct sk_buff *skb;
skb                50 net/can/j1939/main.c 	skb = skb_clone(iskb, GFP_ATOMIC);
skb                51 net/can/j1939/main.c 	if (!skb)
skb                55 net/can/j1939/main.c 	can_skb_set_owner(skb, iskb->sk);
skb                61 net/can/j1939/main.c 	cf = (void *)skb->data;
skb                62 net/can/j1939/main.c 	skb_pull(skb, J1939_CAN_HDR);
skb                65 net/can/j1939/main.c 	skb_trim(skb, min_t(uint8_t, cf->can_dlc, 8));
skb                68 net/can/j1939/main.c 	skcb = j1939_skb_to_cb(skb);
skb                99 net/can/j1939/main.c 	j1939_ac_recv(priv, skb);
skb               101 net/can/j1939/main.c 	if (j1939_tp_recv(priv, skb))
skb               105 net/can/j1939/main.c 	j1939_simple_recv(priv, skb);
skb               106 net/can/j1939/main.c 	j1939_sk_recv(priv, skb);
skb               109 net/can/j1939/main.c 	kfree_skb(skb);
skb               303 net/can/j1939/main.c int j1939_send_one(struct j1939_priv *priv, struct sk_buff *skb)
skb               307 net/can/j1939/main.c 	struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb);
skb               319 net/can/j1939/main.c 	ret = j1939_ac_fixup(priv, skb);
skb               322 net/can/j1939/main.c 	dlc = skb->len;
skb               325 net/can/j1939/main.c 	cf = skb_push(skb, J1939_CAN_HDR);
skb               328 net/can/j1939/main.c 	skb_put(skb, J1939_CAN_FTR + (8 - dlc));
skb               340 net/can/j1939/main.c 	return can_send(skb, 1);
skb               343 net/can/j1939/main.c 	kfree_skb(skb);
skb               299 net/can/j1939/socket.c 	struct sk_buff *skb;
skb               307 net/can/j1939/socket.c 	skb = skb_clone(oskb, GFP_ATOMIC);
skb               308 net/can/j1939/socket.c 	if (!skb) {
skb               312 net/can/j1939/socket.c 	can_skb_set_owner(skb, oskb->sk);
skb               314 net/can/j1939/socket.c 	skcb = j1939_skb_to_cb(skb);
skb               316 net/can/j1939/socket.c 	if (skb->sk)
skb               319 net/can/j1939/socket.c 	if (sock_queue_rcv_skb(&jsk->sk, skb) < 0)
skb               320 net/can/j1939/socket.c 		kfree_skb(skb);
skb               339 net/can/j1939/socket.c void j1939_sk_recv(struct j1939_priv *priv, struct sk_buff *skb)
skb               345 net/can/j1939/socket.c 		j1939_sk_recv_one(jsk, skb);
skb               776 net/can/j1939/socket.c 	struct sk_buff *skb;
skb               787 net/can/j1939/socket.c 	skb = skb_recv_datagram(sk, flags, 0, &ret);
skb               788 net/can/j1939/socket.c 	if (!skb)
skb               791 net/can/j1939/socket.c 	if (size < skb->len)
skb               794 net/can/j1939/socket.c 		size = skb->len;
skb               796 net/can/j1939/socket.c 	ret = memcpy_to_msg(msg, skb->data, size);
skb               798 net/can/j1939/socket.c 		skb_free_datagram(sk, skb);
skb               802 net/can/j1939/socket.c 	skcb = j1939_skb_to_cb(skb);
skb               820 net/can/j1939/socket.c 		paddr->can_ifindex = skb->skb_iif;
skb               826 net/can/j1939/socket.c 	sock_recv_ts_and_drops(msg, sk, skb);
skb               828 net/can/j1939/socket.c 	skb_free_datagram(sk, skb);
skb               840 net/can/j1939/socket.c 	struct sk_buff *skb;
skb               843 net/can/j1939/socket.c 	skb = sock_alloc_send_skb(sk,
skb               849 net/can/j1939/socket.c 	if (!skb)
skb               852 net/can/j1939/socket.c 	can_skb_reserve(skb);
skb               853 net/can/j1939/socket.c 	can_skb_prv(skb)->ifindex = ndev->ifindex;
skb               854 net/can/j1939/socket.c 	can_skb_prv(skb)->skbcnt = 0;
skb               855 net/can/j1939/socket.c 	skb_reserve(skb, offsetof(struct can_frame, data));
skb               857 net/can/j1939/socket.c 	ret = memcpy_from_msg(skb_put(skb, size), msg, size);
skb               861 net/can/j1939/socket.c 	skb->dev = ndev;
skb               863 net/can/j1939/socket.c 	skcb = j1939_skb_to_cb(skb);
skb               881 net/can/j1939/socket.c 	return skb;
skb               884 net/can/j1939/socket.c 	kfree_skb(skb);
skb               925 net/can/j1939/socket.c 	struct sk_buff *skb;
skb               938 net/can/j1939/socket.c 	skb = j1939_sk_get_timestamping_opt_stats(session);
skb               939 net/can/j1939/socket.c 	if (!skb)
skb               942 net/can/j1939/socket.c 	skb->tstamp = ktime_get_real();
skb               944 net/can/j1939/socket.c 	BUILD_BUG_ON(sizeof(struct sock_exterr_skb) > sizeof(skb->cb));
skb               946 net/can/j1939/socket.c 	serr = SKB_EXT_ERR(skb);
skb               951 net/can/j1939/socket.c 			kfree_skb(skb);
skb               962 net/can/j1939/socket.c 			kfree_skb(skb);
skb               987 net/can/j1939/socket.c 	err = sock_queue_err_skb(sk, skb);
skb               990 net/can/j1939/socket.c 		kfree_skb(skb);
skb              1006 net/can/j1939/socket.c 	struct sk_buff *skb;
skb              1025 net/can/j1939/socket.c 		skb = j1939_sk_alloc_skb(priv->ndev, sk, msg, segment_size,
skb              1030 net/can/j1939/socket.c 		skcb = j1939_skb_to_cb(skb);
skb              1037 net/can/j1939/socket.c 			session = j1939_tp_send(priv, skb, size);
skb              1058 net/can/j1939/socket.c 			j1939_session_skb_queue(session, skb);
skb              1090 net/can/j1939/socket.c 	kfree_skb(skb);
skb               339 net/can/j1939/transport.c 			     struct sk_buff *skb)
skb               341 net/can/j1939/transport.c 	struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb);
skb               344 net/can/j1939/transport.c 	j1939_ac_fixup(priv, skb);
skb               352 net/can/j1939/transport.c 	skb_queue_tail(&session->skb_queue, skb);
skb               358 net/can/j1939/transport.c 	struct sk_buff *skb = NULL;
skb               372 net/can/j1939/transport.c 			skb = do_skb;
skb               377 net/can/j1939/transport.c 	if (!skb)
skb               382 net/can/j1939/transport.c 	return skb;
skb               508 net/can/j1939/transport.c 					struct sk_buff *skb)
skb               510 net/can/j1939/transport.c 	struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb);
skb               519 net/can/j1939/transport.c 		    session->tskey == skcb->tskey && session->sk == skb->sk)
skb               565 net/can/j1939/transport.c 	struct sk_buff *skb;
skb               568 net/can/j1939/transport.c 	skb = alloc_skb(sizeof(struct can_frame) + sizeof(struct can_skb_priv),
skb               570 net/can/j1939/transport.c 	if (unlikely(!skb))
skb               573 net/can/j1939/transport.c 	skb->dev = priv->ndev;
skb               574 net/can/j1939/transport.c 	can_skb_reserve(skb);
skb               575 net/can/j1939/transport.c 	can_skb_prv(skb)->ifindex = priv->ndev->ifindex;
skb               577 net/can/j1939/transport.c 	skb_reserve(skb, offsetof(struct can_frame, data));
skb               579 net/can/j1939/transport.c 	memcpy(skb->cb, re_skcb, sizeof(skb->cb));
skb               580 net/can/j1939/transport.c 	skcb = j1939_skb_to_cb(skb);
skb               596 net/can/j1939/transport.c 	return skb;
skb               604 net/can/j1939/transport.c 	struct sk_buff *skb;
skb               606 net/can/j1939/transport.c 	skb = j1939_tp_tx_dat_new(priv, &session->skcb,
skb               608 net/can/j1939/transport.c 	if (IS_ERR(skb))
skb               609 net/can/j1939/transport.c 		return PTR_ERR(skb);
skb               611 net/can/j1939/transport.c 	skb_put_data(skb, dat, len);
skb               613 net/can/j1939/transport.c 		memset(skb_put(skb, 8 - len), 0xff, 8 - len);
skb               615 net/can/j1939/transport.c 	return j1939_send_one(priv, skb);
skb               622 net/can/j1939/transport.c 	struct sk_buff *skb;
skb               628 net/can/j1939/transport.c 	skb = j1939_tp_tx_dat_new(priv, re_skcb, true, swap_src_dst);
skb               629 net/can/j1939/transport.c 	if (IS_ERR(skb))
skb               630 net/can/j1939/transport.c 		return PTR_ERR(skb);
skb               632 net/can/j1939/transport.c 	skdat = skb_put(skb, 8);
skb               638 net/can/j1939/transport.c 	return j1939_send_one(priv, skb);
skb               989 net/can/j1939/transport.c 	struct sk_buff *skb;
skb               995 net/can/j1939/transport.c 	skb = skb_clone(se_skb, GFP_ATOMIC);
skb               996 net/can/j1939/transport.c 	if (!skb)
skb               999 net/can/j1939/transport.c 	can_skb_set_owner(skb, se_skb->sk);
skb              1003 net/can/j1939/transport.c 	ret = j1939_send_one(priv, skb);
skb              1145 net/can/j1939/transport.c 	struct sk_buff *skb;
skb              1148 net/can/j1939/transport.c 		skb = j1939_session_skb_find(session);
skb              1150 net/can/j1939/transport.c 		j1939_sk_recv(session->priv, skb);
skb              1200 net/can/j1939/transport.c 				     const struct sk_buff *skb)
skb              1202 net/can/j1939/transport.c 	const struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb);
skb              1203 net/can/j1939/transport.c 	pgn_t pgn = j1939_xtp_ctl_to_pgn(skb->data);
skb              1206 net/can/j1939/transport.c 	u8 cmd = skb->data[0];
skb              1252 net/can/j1939/transport.c static void j1939_xtp_rx_abort_one(struct j1939_priv *priv, struct sk_buff *skb,
skb              1255 net/can/j1939/transport.c 	struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb);
skb              1257 net/can/j1939/transport.c 	u8 abort = skb->data[1];
skb              1264 net/can/j1939/transport.c 	if (j1939_xtp_rx_cmd_bad_pgn(session, skb))
skb              1268 net/can/j1939/transport.c 		    session, j1939_xtp_ctl_to_pgn(skb->data), abort,
skb              1283 net/can/j1939/transport.c j1939_xtp_rx_abort(struct j1939_priv *priv, struct sk_buff *skb,
skb              1286 net/can/j1939/transport.c 	j1939_xtp_rx_abort_one(priv, skb, false, transmitter);
skb              1287 net/can/j1939/transport.c 	j1939_xtp_rx_abort_one(priv, skb, true, transmitter);
skb              1291 net/can/j1939/transport.c j1939_xtp_rx_eoma_one(struct j1939_session *session, struct sk_buff *skb)
skb              1293 net/can/j1939/transport.c 	struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb);
skb              1297 net/can/j1939/transport.c 	if (j1939_xtp_rx_cmd_bad_pgn(session, skb))
skb              1300 net/can/j1939/transport.c 	dat = skb->data;
skb              1323 net/can/j1939/transport.c j1939_xtp_rx_eoma(struct j1939_priv *priv, struct sk_buff *skb,
skb              1326 net/can/j1939/transport.c 	struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb);
skb              1334 net/can/j1939/transport.c 	j1939_xtp_rx_eoma_one(session, skb);
skb              1339 net/can/j1939/transport.c j1939_xtp_rx_cts_one(struct j1939_session *session, struct sk_buff *skb)
skb              1345 net/can/j1939/transport.c 	dat = skb->data;
skb              1347 net/can/j1939/transport.c 	if (j1939_xtp_rx_cmd_bad_pgn(session, skb))
skb              1399 net/can/j1939/transport.c j1939_xtp_rx_cts(struct j1939_priv *priv, struct sk_buff *skb, bool transmitter)
skb              1401 net/can/j1939/transport.c 	struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb);
skb              1408 net/can/j1939/transport.c 	j1939_xtp_rx_cts_one(session, skb);
skb              1413 net/can/j1939/transport.c 					       struct sk_buff *skb, size_t size)
skb              1432 net/can/j1939/transport.c 	skb_queue_tail(&session->skb_queue, skb);
skb              1434 net/can/j1939/transport.c 	skcb = j1939_skb_to_cb(skb);
skb              1455 net/can/j1939/transport.c 	struct sk_buff *skb;
skb              1459 net/can/j1939/transport.c 	skb = alloc_skb(size + sizeof(struct can_skb_priv), GFP_ATOMIC);
skb              1460 net/can/j1939/transport.c 	if (unlikely(!skb))
skb              1463 net/can/j1939/transport.c 	skb->dev = priv->ndev;
skb              1464 net/can/j1939/transport.c 	can_skb_reserve(skb);
skb              1465 net/can/j1939/transport.c 	can_skb_prv(skb)->ifindex = priv->ndev->ifindex;
skb              1466 net/can/j1939/transport.c 	skcb = j1939_skb_to_cb(skb);
skb              1469 net/can/j1939/transport.c 	session = j1939_session_new(priv, skb, size);
skb              1471 net/can/j1939/transport.c 		kfree_skb(skb);
skb              1476 net/can/j1939/transport.c 	skb_put(skb, size);
skb              1513 net/can/j1939/transport.c 					    struct sk_buff *skb)
skb              1516 net/can/j1939/transport.c 	struct j1939_sk_buff_cb skcb = *j1939_skb_to_cb(skb);
skb              1524 net/can/j1939/transport.c 	dat = skb->data;
skb              1580 net/can/j1939/transport.c 					   struct sk_buff *skb)
skb              1582 net/can/j1939/transport.c 	struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb);
skb              1586 net/can/j1939/transport.c 		if (j1939_xtp_rx_cmd_bad_pgn(session, skb))
skb              1624 net/can/j1939/transport.c static void j1939_xtp_rx_rts(struct j1939_priv *priv, struct sk_buff *skb,
skb              1627 net/can/j1939/transport.c 	struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb);
skb              1629 net/can/j1939/transport.c 	u8 cmd = skb->data[0];
skb              1653 net/can/j1939/transport.c 		session = j1939_xtp_rx_rts_session_new(priv, skb);
skb              1657 net/can/j1939/transport.c 		if (j1939_xtp_rx_rts_session_active(session, skb)) {
skb              1675 net/can/j1939/transport.c 				 struct sk_buff *skb)
skb              1677 net/can/j1939/transport.c 	const u8 *dat = skb->data;
skb              1679 net/can/j1939/transport.c 	if (j1939_xtp_rx_cmd_bad_pgn(session, skb))
skb              1685 net/can/j1939/transport.c 	session->pkt.dpo = j1939_etp_ctl_to_packet(skb->data);
skb              1690 net/can/j1939/transport.c static void j1939_xtp_rx_dpo(struct j1939_priv *priv, struct sk_buff *skb,
skb              1693 net/can/j1939/transport.c 	struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb);
skb              1704 net/can/j1939/transport.c 	j1939_xtp_rx_dpo_one(session, skb);
skb              1709 net/can/j1939/transport.c 				 struct sk_buff *skb)
skb              1722 net/can/j1939/transport.c 	skcb = j1939_skb_to_cb(skb);
skb              1723 net/can/j1939/transport.c 	dat = skb->data;
skb              1724 net/can/j1939/transport.c 	if (skb->len <= 1)
skb              1765 net/can/j1939/transport.c 	if (nbytes <= 0 || (nbytes + 1) > skb->len) {
skb              1767 net/can/j1939/transport.c 			    __func__, session, nbytes, skb->len);
skb              1806 net/can/j1939/transport.c static void j1939_xtp_rx_dat(struct j1939_priv *priv, struct sk_buff *skb)
skb              1811 net/can/j1939/transport.c 	skcb = j1939_skb_to_cb(skb);
skb              1820 net/can/j1939/transport.c 			j1939_xtp_rx_dat_one(session, skb);
skb              1830 net/can/j1939/transport.c 			j1939_xtp_rx_dat_one(session, skb);
skb              1836 net/can/j1939/transport.c 				    struct sk_buff *skb, size_t size)
skb              1838 net/can/j1939/transport.c 	struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb);
skb              1864 net/can/j1939/transport.c 	ret = j1939_ac_fixup(priv, skb);
skb              1877 net/can/j1939/transport.c 	session = j1939_session_new(priv, skb, size);
skb              1882 net/can/j1939/transport.c 	sock_hold(skb->sk);
skb              1883 net/can/j1939/transport.c 	session->sk = skb->sk;
skb              1899 net/can/j1939/transport.c static void j1939_tp_cmd_recv(struct j1939_priv *priv, struct sk_buff *skb)
skb              1901 net/can/j1939/transport.c 	struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb);
skb              1903 net/can/j1939/transport.c 	u8 cmd = skb->data[0];
skb              1921 net/can/j1939/transport.c 			j1939_xtp_rx_rts(priv, skb, true);
skb              1924 net/can/j1939/transport.c 			j1939_xtp_rx_rts(priv, skb, false);
skb              1936 net/can/j1939/transport.c 			j1939_xtp_rx_cts(priv, skb, false);
skb              1939 net/can/j1939/transport.c 			j1939_xtp_rx_cts(priv, skb, true);
skb              1948 net/can/j1939/transport.c 			j1939_xtp_rx_dpo(priv, skb, true);
skb              1951 net/can/j1939/transport.c 			j1939_xtp_rx_dpo(priv, skb, false);
skb              1963 net/can/j1939/transport.c 			j1939_xtp_rx_eoma(priv, skb, false);
skb              1966 net/can/j1939/transport.c 			j1939_xtp_rx_eoma(priv, skb, true);
skb              1972 net/can/j1939/transport.c 			j1939_xtp_rx_abort(priv, skb, true);
skb              1975 net/can/j1939/transport.c 			j1939_xtp_rx_abort(priv, skb, false);
skb              1983 net/can/j1939/transport.c int j1939_tp_recv(struct j1939_priv *priv, struct sk_buff *skb)
skb              1985 net/can/j1939/transport.c 	struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb);
skb              1995 net/can/j1939/transport.c 		j1939_xtp_rx_dat(priv, skb);
skb              2002 net/can/j1939/transport.c 		if (skb->len < 8)
skb              2005 net/can/j1939/transport.c 		j1939_tp_cmd_recv(priv, skb);
skb              2013 net/can/j1939/transport.c void j1939_simple_recv(struct j1939_priv *priv, struct sk_buff *skb)
skb              2017 net/can/j1939/transport.c 	if (!skb->sk)
skb              2021 net/can/j1939/transport.c 	session = j1939_session_get_simple(priv, skb);
skb                78 net/can/raw.c  	const struct sk_buff *skb;
skb               102 net/can/raw.c  static inline unsigned int *raw_flags(struct sk_buff *skb)
skb               108 net/can/raw.c  	return (unsigned int *)(&((struct sockaddr_can *)skb->cb)[1]);
skb               121 net/can/raw.c  	struct sk_buff *skb;
skb               133 net/can/raw.c  	if (this_cpu_ptr(ro->uniq)->skb == oskb &&
skb               144 net/can/raw.c  		this_cpu_ptr(ro->uniq)->skb = oskb;
skb               153 net/can/raw.c  	skb = skb_clone(oskb, GFP_ATOMIC);
skb               154 net/can/raw.c  	if (!skb)
skb               164 net/can/raw.c  	addr = (struct sockaddr_can *)skb->cb;
skb               167 net/can/raw.c  	addr->can_ifindex = skb->dev->ifindex;
skb               170 net/can/raw.c  	pflags = raw_flags(skb);
skb               177 net/can/raw.c  	if (sock_queue_rcv_skb(sk, skb) < 0)
skb               178 net/can/raw.c  		kfree_skb(skb);
skb               728 net/can/raw.c  	struct sk_buff *skb;
skb               760 net/can/raw.c  	skb = sock_alloc_send_skb(sk, size + sizeof(struct can_skb_priv),
skb               762 net/can/raw.c  	if (!skb)
skb               765 net/can/raw.c  	can_skb_reserve(skb);
skb               766 net/can/raw.c  	can_skb_prv(skb)->ifindex = dev->ifindex;
skb               767 net/can/raw.c  	can_skb_prv(skb)->skbcnt = 0;
skb               769 net/can/raw.c  	err = memcpy_from_msg(skb_put(skb, size), msg, size);
skb               773 net/can/raw.c  	skb_setup_tx_timestamp(skb, sk->sk_tsflags);
skb               775 net/can/raw.c  	skb->dev = dev;
skb               776 net/can/raw.c  	skb->sk  = sk;
skb               777 net/can/raw.c  	skb->priority = sk->sk_priority;
skb               779 net/can/raw.c  	err = can_send(skb, ro->loopback);
skb               789 net/can/raw.c  	kfree_skb(skb);
skb               800 net/can/raw.c  	struct sk_buff *skb;
skb               807 net/can/raw.c  	skb = skb_recv_datagram(sk, flags, noblock, &err);
skb               808 net/can/raw.c  	if (!skb)
skb               811 net/can/raw.c  	if (size < skb->len)
skb               814 net/can/raw.c  		size = skb->len;
skb               816 net/can/raw.c  	err = memcpy_to_msg(msg, skb->data, size);
skb               818 net/can/raw.c  		skb_free_datagram(sk, skb);
skb               822 net/can/raw.c  	sock_recv_ts_and_drops(msg, sk, skb);
skb               827 net/can/raw.c  		memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
skb               831 net/can/raw.c  	msg->msg_flags |= *(raw_flags(skb));
skb               833 net/can/raw.c  	skb_free_datagram(sk, skb);
skb                89 net/core/datagram.c 				const struct sk_buff *skb)
skb               101 net/core/datagram.c 	if (READ_ONCE(sk->sk_receive_queue.prev) != skb)
skb               137 net/core/datagram.c static struct sk_buff *skb_set_peeked(struct sk_buff *skb)
skb               141 net/core/datagram.c 	if (skb->peeked)
skb               142 net/core/datagram.c 		return skb;
skb               145 net/core/datagram.c 	if (!skb_shared(skb))
skb               148 net/core/datagram.c 	nskb = skb_clone(skb, GFP_ATOMIC);
skb               152 net/core/datagram.c 	skb->prev->next = nskb;
skb               153 net/core/datagram.c 	skb->next->prev = nskb;
skb               154 net/core/datagram.c 	nskb->prev = skb->prev;
skb               155 net/core/datagram.c 	nskb->next = skb->next;
skb               157 net/core/datagram.c 	consume_skb(skb);
skb               158 net/core/datagram.c 	skb = nskb;
skb               161 net/core/datagram.c 	skb->peeked = 1;
skb               163 net/core/datagram.c 	return skb;
skb               170 net/core/datagram.c 							   struct sk_buff *skb),
skb               175 net/core/datagram.c 	struct sk_buff *skb;
skb               184 net/core/datagram.c 	skb_queue_walk(queue, skb) {
skb               186 net/core/datagram.c 			if (peek_at_off && _off >= skb->len &&
skb               187 net/core/datagram.c 			    (_off || skb->peeked)) {
skb               188 net/core/datagram.c 				_off -= skb->len;
skb               191 net/core/datagram.c 			if (!skb->len) {
skb               192 net/core/datagram.c 				skb = skb_set_peeked(skb);
skb               193 net/core/datagram.c 				if (IS_ERR(skb)) {
skb               194 net/core/datagram.c 					*err = PTR_ERR(skb);
skb               198 net/core/datagram.c 			refcount_inc(&skb->users);
skb               200 net/core/datagram.c 			__skb_unlink(skb, queue);
skb               202 net/core/datagram.c 				destructor(sk, skb);
skb               205 net/core/datagram.c 		return skb;
skb               247 net/core/datagram.c 							   struct sk_buff *skb),
skb               252 net/core/datagram.c 	struct sk_buff *skb;
skb               270 net/core/datagram.c 		skb = __skb_try_recv_from_queue(sk, queue, flags, destructor,
skb               275 net/core/datagram.c 		if (skb)
skb               276 net/core/datagram.c 			return skb;
skb               294 net/core/datagram.c 						       struct sk_buff *skb),
skb               297 net/core/datagram.c 	struct sk_buff *skb, *last;
skb               303 net/core/datagram.c 		skb = __skb_try_recv_datagram(sk, flags, destructor, off, err,
skb               305 net/core/datagram.c 		if (skb)
skb               306 net/core/datagram.c 			return skb;
skb               327 net/core/datagram.c void skb_free_datagram(struct sock *sk, struct sk_buff *skb)
skb               329 net/core/datagram.c 	consume_skb(skb);
skb               334 net/core/datagram.c void __skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb, int len)
skb               338 net/core/datagram.c 	if (!skb_unref(skb)) {
skb               345 net/core/datagram.c 	skb_orphan(skb);
skb               350 net/core/datagram.c 	__kfree_skb(skb);
skb               355 net/core/datagram.c 			struct sk_buff *skb, unsigned int flags,
skb               357 net/core/datagram.c 					   struct sk_buff *skb))
skb               364 net/core/datagram.c 		if (skb->next) {
skb               365 net/core/datagram.c 			__skb_unlink(skb, sk_queue);
skb               366 net/core/datagram.c 			refcount_dec(&skb->users);
skb               368 net/core/datagram.c 				destructor(sk, skb);
skb               400 net/core/datagram.c int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
skb               402 net/core/datagram.c 	int err = __sk_queue_drop_skb(sk, &sk->sk_receive_queue, skb, flags,
skb               405 net/core/datagram.c 	kfree_skb(skb);
skb               416 net/core/datagram.c static int __skb_datagram_iter(const struct sk_buff *skb, int offset,
skb               421 net/core/datagram.c 	int start = skb_headlen(skb);
skb               430 net/core/datagram.c 				    skb->data + offset, copy, data, to);
skb               439 net/core/datagram.c 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb               441 net/core/datagram.c 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
skb               465 net/core/datagram.c 	skb_walk_frags(skb, frag_iter) {
skb               511 net/core/datagram.c int skb_copy_and_hash_datagram_iter(const struct sk_buff *skb, int offset,
skb               515 net/core/datagram.c 	return __skb_datagram_iter(skb, offset, to, len, true,
skb               533 net/core/datagram.c int skb_copy_datagram_iter(const struct sk_buff *skb, int offset,
skb               536 net/core/datagram.c 	trace_skb_copy_datagram_iovec(skb, len);
skb               537 net/core/datagram.c 	return __skb_datagram_iter(skb, offset, to, len, false,
skb               551 net/core/datagram.c int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset,
skb               555 net/core/datagram.c 	int start = skb_headlen(skb);
skb               563 net/core/datagram.c 		if (copy_from_iter(skb->data + offset, copy, from) != copy)
skb               571 net/core/datagram.c 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb               573 net/core/datagram.c 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
skb               596 net/core/datagram.c 	skb_walk_frags(skb, frag_iter) {
skb               623 net/core/datagram.c int __zerocopy_sg_from_iter(struct sock *sk, struct sk_buff *skb,
skb               626 net/core/datagram.c 	int frag = skb_shinfo(skb)->nr_frags;
skb               647 net/core/datagram.c 		skb->data_len += copied;
skb               648 net/core/datagram.c 		skb->len += copied;
skb               649 net/core/datagram.c 		skb->truesize += truesize;
skb               654 net/core/datagram.c 			refcount_add(truesize, &skb->sk->sk_wmem_alloc);
skb               658 net/core/datagram.c 			skb_fill_page_desc(skb, frag++, pages[n], start, size);
skb               678 net/core/datagram.c int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *from)
skb               680 net/core/datagram.c 	int copy = min_t(int, skb_headlen(skb), iov_iter_count(from));
skb               683 net/core/datagram.c 	if (skb_copy_datagram_from_iter(skb, 0, from, copy))
skb               686 net/core/datagram.c 	return __zerocopy_sg_from_iter(NULL, skb, from, ~0U);
skb               699 net/core/datagram.c static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
skb               703 net/core/datagram.c 	return __skb_datagram_iter(skb, offset, to, len, true,
skb               719 net/core/datagram.c int skb_copy_and_csum_datagram_msg(struct sk_buff *skb,
skb               723 net/core/datagram.c 	int chunk = skb->len - hlen;
skb               729 net/core/datagram.c 		if (__skb_checksum_complete(skb))
skb               731 net/core/datagram.c 		if (skb_copy_datagram_msg(skb, hlen, msg, chunk))
skb               734 net/core/datagram.c 		csum = csum_partial(skb->data, hlen, skb->csum);
skb               735 net/core/datagram.c 		if (skb_copy_and_csum_datagram(skb, hlen, &msg->msg_iter,
skb               744 net/core/datagram.c 		if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
skb               745 net/core/datagram.c 		    !skb->csum_complete_sw)
skb               746 net/core/datagram.c 			netdev_rx_csum_fault(NULL, skb);
skb                12 net/core/datagram.h int __zerocopy_sg_from_iter(struct sock *sk, struct sk_buff *skb,
skb               159 net/core/dev.c static int netif_rx_internal(struct sk_buff *skb);
skb               623 net/core/dev.c int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
skb               630 net/core/dev.c 	info = skb_tunnel_info_unclone(skb);
skb               636 net/core/dev.c 	return dev->netdev_ops->ndo_fill_metadata_dst(dev, skb);
skb              1809 net/core/dev.c static inline void net_timestamp_set(struct sk_buff *skb)
skb              1811 net/core/dev.c 	skb->tstamp = 0;
skb              1813 net/core/dev.c 		__net_timestamp(skb);
skb              1822 net/core/dev.c bool is_skb_forwardable(const struct net_device *dev, const struct sk_buff *skb)
skb              1830 net/core/dev.c 	if (skb->len <= len)
skb              1836 net/core/dev.c 	if (skb_is_gso(skb))
skb              1843 net/core/dev.c int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
skb              1845 net/core/dev.c 	int ret = ____dev_forward_skb(dev, skb);
skb              1848 net/core/dev.c 		skb->protocol = eth_type_trans(skb, dev);
skb              1849 net/core/dev.c 		skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
skb              1874 net/core/dev.c int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
skb              1876 net/core/dev.c 	return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb);
skb              1880 net/core/dev.c static inline int deliver_skb(struct sk_buff *skb,
skb              1884 net/core/dev.c 	if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
skb              1886 net/core/dev.c 	refcount_inc(&skb->users);
skb              1887 net/core/dev.c 	return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
skb              1890 net/core/dev.c static inline void deliver_ptype_list_skb(struct sk_buff *skb,
skb              1902 net/core/dev.c 			deliver_skb(skb, pt_prev, orig_dev);
skb              1908 net/core/dev.c static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
skb              1910 net/core/dev.c 	if (!ptype->af_packet_priv || !skb->sk)
skb              1914 net/core/dev.c 		return ptype->id_match(ptype, skb->sk);
skb              1915 net/core/dev.c 	else if ((struct sock *)ptype->af_packet_priv == skb->sk)
skb              1937 net/core/dev.c void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
skb              1953 net/core/dev.c 		if (skb_loop_sk(ptype, skb))
skb              1957 net/core/dev.c 			deliver_skb(skb2, pt_prev, skb->dev);
skb              1963 net/core/dev.c 		skb2 = skb_clone(skb, GFP_ATOMIC);
skb              1995 net/core/dev.c 			pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
skb              2684 net/core/dev.c static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb)
skb              2686 net/core/dev.c 	return (struct dev_kfree_skb_cb *)skb->cb;
skb              2714 net/core/dev.c void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
skb              2718 net/core/dev.c 	if (unlikely(!skb))
skb              2721 net/core/dev.c 	if (likely(refcount_read(&skb->users) == 1)) {
skb              2723 net/core/dev.c 		refcount_set(&skb->users, 0);
skb              2724 net/core/dev.c 	} else if (likely(!refcount_dec_and_test(&skb->users))) {
skb              2727 net/core/dev.c 	get_kfree_skb_cb(skb)->reason = reason;
skb              2729 net/core/dev.c 	skb->next = __this_cpu_read(softnet_data.completion_queue);
skb              2730 net/core/dev.c 	__this_cpu_write(softnet_data.completion_queue, skb);
skb              2736 net/core/dev.c void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason)
skb              2739 net/core/dev.c 		__dev_kfree_skb_irq(skb, reason);
skb              2741 net/core/dev.c 		dev_kfree_skb(skb);
skb              2783 net/core/dev.c 		       struct sk_buff *skb)
skb              2790 net/core/dev.c 		u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
skb              2796 net/core/dev.c 	if (skb_rx_queue_recorded(skb)) {
skb              2797 net/core/dev.c 		hash = skb_get_rx_queue(skb);
skb              2805 net/core/dev.c 	return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset;
skb              2808 net/core/dev.c static void skb_warn_bad_offload(const struct sk_buff *skb)
skb              2811 net/core/dev.c 	struct net_device *dev = skb->dev;
skb              2823 net/core/dev.c 	skb_dump(KERN_WARNING, skb, false);
skb              2826 net/core/dev.c 	     skb->sk ? &skb->sk->sk_route_caps : &null_features);
skb              2833 net/core/dev.c int skb_checksum_help(struct sk_buff *skb)
skb              2838 net/core/dev.c 	if (skb->ip_summed == CHECKSUM_COMPLETE)
skb              2841 net/core/dev.c 	if (unlikely(skb_shinfo(skb)->gso_size)) {
skb              2842 net/core/dev.c 		skb_warn_bad_offload(skb);
skb              2849 net/core/dev.c 	if (skb_has_shared_frag(skb)) {
skb              2850 net/core/dev.c 		ret = __skb_linearize(skb);
skb              2855 net/core/dev.c 	offset = skb_checksum_start_offset(skb);
skb              2856 net/core/dev.c 	BUG_ON(offset >= skb_headlen(skb));
skb              2857 net/core/dev.c 	csum = skb_checksum(skb, offset, skb->len - offset, 0);
skb              2859 net/core/dev.c 	offset += skb->csum_offset;
skb              2860 net/core/dev.c 	BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
skb              2862 net/core/dev.c 	if (skb_cloned(skb) &&
skb              2863 net/core/dev.c 	    !skb_clone_writable(skb, offset + sizeof(__sum16))) {
skb              2864 net/core/dev.c 		ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
skb              2869 net/core/dev.c 	*(__sum16 *)(skb->data + offset) = csum_fold(csum) ?: CSUM_MANGLED_0;
skb              2871 net/core/dev.c 	skb->ip_summed = CHECKSUM_NONE;
skb              2877 net/core/dev.c int skb_crc32c_csum_help(struct sk_buff *skb)
skb              2882 net/core/dev.c 	if (skb->ip_summed != CHECKSUM_PARTIAL)
skb              2885 net/core/dev.c 	if (unlikely(skb_is_gso(skb)))
skb              2891 net/core/dev.c 	if (unlikely(skb_has_shared_frag(skb))) {
skb              2892 net/core/dev.c 		ret = __skb_linearize(skb);
skb              2896 net/core/dev.c 	start = skb_checksum_start_offset(skb);
skb              2898 net/core/dev.c 	if (WARN_ON_ONCE(offset >= skb_headlen(skb))) {
skb              2902 net/core/dev.c 	if (skb_cloned(skb) &&
skb              2903 net/core/dev.c 	    !skb_clone_writable(skb, offset + sizeof(__le32))) {
skb              2904 net/core/dev.c 		ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
skb              2908 net/core/dev.c 	crc32c_csum = cpu_to_le32(~__skb_checksum(skb, start,
skb              2909 net/core/dev.c 						  skb->len - start, ~(__u32)0,
skb              2911 net/core/dev.c 	*(__le32 *)(skb->data + offset) = crc32c_csum;
skb              2912 net/core/dev.c 	skb->ip_summed = CHECKSUM_NONE;
skb              2913 net/core/dev.c 	skb->csum_not_inet = 0;
skb              2918 net/core/dev.c __be16 skb_network_protocol(struct sk_buff *skb, int *depth)
skb              2920 net/core/dev.c 	__be16 type = skb->protocol;
skb              2926 net/core/dev.c 		if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
skb              2929 net/core/dev.c 		eth = (struct ethhdr *)skb->data;
skb              2933 net/core/dev.c 	return __vlan_get_protocol(skb, type, depth);
skb              2941 net/core/dev.c struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
skb              2946 net/core/dev.c 	int vlan_depth = skb->mac_len;
skb              2947 net/core/dev.c 	__be16 type = skb_network_protocol(skb, &vlan_depth);
skb              2952 net/core/dev.c 	__skb_pull(skb, vlan_depth);
skb              2957 net/core/dev.c 			segs = ptype->callbacks.gso_segment(skb, features);
skb              2963 net/core/dev.c 	__skb_push(skb, skb->data - skb_mac_header(skb));
skb              2972 net/core/dev.c static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
skb              2975 net/core/dev.c 		return skb->ip_summed != CHECKSUM_PARTIAL &&
skb              2976 net/core/dev.c 		       skb->ip_summed != CHECKSUM_UNNECESSARY;
skb              2978 net/core/dev.c 	return skb->ip_summed == CHECKSUM_NONE;
skb              2994 net/core/dev.c struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
skb              2999 net/core/dev.c 	if (unlikely(skb_needs_check(skb, tx_path))) {
skb              3003 net/core/dev.c 		err = skb_cow_head(skb, 0);
skb              3014 net/core/dev.c 		struct net_device *dev = skb->dev;
skb              3017 net/core/dev.c 		if (!skb_gso_ok(skb, features | partial_features))
skb              3022 net/core/dev.c 		     sizeof(*SKB_GSO_CB(skb)) > sizeof(skb->cb));
skb              3024 net/core/dev.c 	SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
skb              3025 net/core/dev.c 	SKB_GSO_CB(skb)->encap_level = 0;
skb              3027 net/core/dev.c 	skb_reset_mac_header(skb);
skb              3028 net/core/dev.c 	skb_reset_mac_len(skb);
skb              3030 net/core/dev.c 	segs = skb_mac_gso_segment(skb, features);
skb              3032 net/core/dev.c 	if (unlikely(skb_needs_check(skb, tx_path) && !IS_ERR(segs)))
skb              3033 net/core/dev.c 		skb_warn_bad_offload(skb);
skb              3041 net/core/dev.c void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb)
skb              3045 net/core/dev.c 		skb_dump(KERN_ERR, skb, true);
skb              3053 net/core/dev.c static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
skb              3059 net/core/dev.c 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb              3060 net/core/dev.c 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
skb              3074 net/core/dev.c static netdev_features_t net_mpls_features(struct sk_buff *skb,
skb              3079 net/core/dev.c 		features &= skb->dev->mpls_features;
skb              3084 net/core/dev.c static netdev_features_t net_mpls_features(struct sk_buff *skb,
skb              3092 net/core/dev.c static netdev_features_t harmonize_features(struct sk_buff *skb,
skb              3098 net/core/dev.c 	type = skb_network_protocol(skb, &tmp);
skb              3099 net/core/dev.c 	features = net_mpls_features(skb, features, type);
skb              3101 net/core/dev.c 	if (skb->ip_summed != CHECKSUM_NONE &&
skb              3105 net/core/dev.c 	if (illegal_highdma(skb->dev, skb))
skb              3111 net/core/dev.c netdev_features_t passthru_features_check(struct sk_buff *skb,
skb              3119 net/core/dev.c static netdev_features_t dflt_features_check(struct sk_buff *skb,
skb              3123 net/core/dev.c 	return vlan_features_check(skb, features);
skb              3126 net/core/dev.c static netdev_features_t gso_features_check(const struct sk_buff *skb,
skb              3130 net/core/dev.c 	u16 gso_segs = skb_shinfo(skb)->gso_segs;
skb              3141 net/core/dev.c 	if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL))
skb              3147 net/core/dev.c 	if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
skb              3148 net/core/dev.c 		struct iphdr *iph = skb->encapsulation ?
skb              3149 net/core/dev.c 				    inner_ip_hdr(skb) : ip_hdr(skb);
skb              3158 net/core/dev.c netdev_features_t netif_skb_features(struct sk_buff *skb)
skb              3160 net/core/dev.c 	struct net_device *dev = skb->dev;
skb              3163 net/core/dev.c 	if (skb_is_gso(skb))
skb              3164 net/core/dev.c 		features = gso_features_check(skb, dev, features);
skb              3170 net/core/dev.c 	if (skb->encapsulation)
skb              3173 net/core/dev.c 	if (skb_vlan_tagged(skb))
skb              3180 net/core/dev.c 		features &= dev->netdev_ops->ndo_features_check(skb, dev,
skb              3183 net/core/dev.c 		features &= dflt_features_check(skb, dev, features);
skb              3185 net/core/dev.c 	return harmonize_features(skb, features);
skb              3189 net/core/dev.c static int xmit_one(struct sk_buff *skb, struct net_device *dev,
skb              3196 net/core/dev.c 		dev_queue_xmit_nit(skb, dev);
skb              3198 net/core/dev.c 	len = skb->len;
skb              3199 net/core/dev.c 	trace_net_dev_start_xmit(skb, dev);
skb              3200 net/core/dev.c 	rc = netdev_start_xmit(skb, dev, txq, more);
skb              3201 net/core/dev.c 	trace_net_dev_xmit(skb, rc, dev, len);
skb              3209 net/core/dev.c 	struct sk_buff *skb = first;
skb              3212 net/core/dev.c 	while (skb) {
skb              3213 net/core/dev.c 		struct sk_buff *next = skb->next;
skb              3215 net/core/dev.c 		skb_mark_not_on_list(skb);
skb              3216 net/core/dev.c 		rc = xmit_one(skb, dev, txq, next != NULL);
skb              3218 net/core/dev.c 			skb->next = next;
skb              3222 net/core/dev.c 		skb = next;
skb              3223 net/core/dev.c 		if (netif_tx_queue_stopped(txq) && skb) {
skb              3231 net/core/dev.c 	return skb;
skb              3234 net/core/dev.c static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb,
skb              3237 net/core/dev.c 	if (skb_vlan_tag_present(skb) &&
skb              3238 net/core/dev.c 	    !vlan_hw_offload_capable(features, skb->vlan_proto))
skb              3239 net/core/dev.c 		skb = __vlan_hwaccel_push_inside(skb);
skb              3240 net/core/dev.c 	return skb;
skb              3243 net/core/dev.c int skb_csum_hwoffload_help(struct sk_buff *skb,
skb              3246 net/core/dev.c 	if (unlikely(skb->csum_not_inet))
skb              3248 net/core/dev.c 			skb_crc32c_csum_help(skb);
skb              3250 net/core/dev.c 	return !!(features & NETIF_F_CSUM_MASK) ? 0 : skb_checksum_help(skb);
skb              3254 net/core/dev.c static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev, bool *again)
skb              3258 net/core/dev.c 	features = netif_skb_features(skb);
skb              3259 net/core/dev.c 	skb = validate_xmit_vlan(skb, features);
skb              3260 net/core/dev.c 	if (unlikely(!skb))
skb              3263 net/core/dev.c 	skb = sk_validate_xmit_skb(skb, dev);
skb              3264 net/core/dev.c 	if (unlikely(!skb))
skb              3267 net/core/dev.c 	if (netif_needs_gso(skb, features)) {
skb              3270 net/core/dev.c 		segs = skb_gso_segment(skb, features);
skb              3274 net/core/dev.c 			consume_skb(skb);
skb              3275 net/core/dev.c 			skb = segs;
skb              3278 net/core/dev.c 		if (skb_needs_linearize(skb, features) &&
skb              3279 net/core/dev.c 		    __skb_linearize(skb))
skb              3286 net/core/dev.c 		if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb              3287 net/core/dev.c 			if (skb->encapsulation)
skb              3288 net/core/dev.c 				skb_set_inner_transport_header(skb,
skb              3289 net/core/dev.c 							       skb_checksum_start_offset(skb));
skb              3291 net/core/dev.c 				skb_set_transport_header(skb,
skb              3292 net/core/dev.c 							 skb_checksum_start_offset(skb));
skb              3293 net/core/dev.c 			if (skb_csum_hwoffload_help(skb, features))
skb              3298 net/core/dev.c 	skb = validate_xmit_xfrm(skb, features, again);
skb              3300 net/core/dev.c 	return skb;
skb              3303 net/core/dev.c 	kfree_skb(skb);
skb              3309 net/core/dev.c struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again)
skb              3313 net/core/dev.c 	for (; skb != NULL; skb = next) {
skb              3314 net/core/dev.c 		next = skb->next;
skb              3315 net/core/dev.c 		skb_mark_not_on_list(skb);
skb              3318 net/core/dev.c 		skb->prev = skb;
skb              3320 net/core/dev.c 		skb = validate_xmit_skb(skb, dev, again);
skb              3321 net/core/dev.c 		if (!skb)
skb              3325 net/core/dev.c 			head = skb;
skb              3327 net/core/dev.c 			tail->next = skb;
skb              3331 net/core/dev.c 		tail = skb->prev;
skb              3337 net/core/dev.c static void qdisc_pkt_len_init(struct sk_buff *skb)
skb              3339 net/core/dev.c 	const struct skb_shared_info *shinfo = skb_shinfo(skb);
skb              3341 net/core/dev.c 	qdisc_skb_cb(skb)->pkt_len = skb->len;
skb              3346 net/core/dev.c 	if (shinfo->gso_size && skb_transport_header_was_set(skb)) {
skb              3351 net/core/dev.c 		hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
skb              3358 net/core/dev.c 			th = skb_header_pointer(skb, skb_transport_offset(skb),
skb              3365 net/core/dev.c 			if (skb_header_pointer(skb, skb_transport_offset(skb),
skb              3371 net/core/dev.c 			gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
skb              3374 net/core/dev.c 		qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len;
skb              3378 net/core/dev.c static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
skb              3387 net/core/dev.c 	qdisc_calculate_pkt_len(skb, q);
skb              3390 net/core/dev.c 		rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
skb              3410 net/core/dev.c 		__qdisc_drop(skb, &to_free);
skb              3420 net/core/dev.c 		qdisc_bstats_update(q, skb);
skb              3422 net/core/dev.c 		if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) {
skb              3433 net/core/dev.c 		rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
skb              3452 net/core/dev.c static void skb_update_prio(struct sk_buff *skb)
skb              3458 net/core/dev.c 	if (skb->priority)
skb              3460 net/core/dev.c 	map = rcu_dereference_bh(skb->dev->priomap);
skb              3463 net/core/dev.c 	sk = skb_to_full_sk(skb);
skb              3470 net/core/dev.c 		skb->priority = map->priomap[prioidx];
skb              3473 net/core/dev.c #define skb_update_prio(skb)
skb              3482 net/core/dev.c int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
skb              3484 net/core/dev.c 	skb_reset_mac_header(skb);
skb              3485 net/core/dev.c 	__skb_pull(skb, skb_network_offset(skb));
skb              3486 net/core/dev.c 	skb->pkt_type = PACKET_LOOPBACK;
skb              3487 net/core/dev.c 	skb->ip_summed = CHECKSUM_UNNECESSARY;
skb              3488 net/core/dev.c 	WARN_ON(!skb_dst(skb));
skb              3489 net/core/dev.c 	skb_dst_force(skb);
skb              3490 net/core/dev.c 	netif_rx_ni(skb);
skb              3497 net/core/dev.c sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
skb              3503 net/core/dev.c 		return skb;
skb              3506 net/core/dev.c 	mini_qdisc_bstats_cpu_update(miniq, skb);
skb              3508 net/core/dev.c 	switch (tcf_classify(skb, miniq->filter_list, &cl_res, false)) {
skb              3511 net/core/dev.c 		skb->tc_index = TC_H_MIN(cl_res.classid);
skb              3516 net/core/dev.c 		kfree_skb(skb);
skb              3522 net/core/dev.c 		consume_skb(skb);
skb              3526 net/core/dev.c 		skb_do_redirect(skb);
skb              3533 net/core/dev.c 	return skb;
skb              3538 net/core/dev.c static int __get_xps_queue_idx(struct net_device *dev, struct sk_buff *skb,
skb              3546 net/core/dev.c 		tci += netdev_get_prio_tc_map(dev, skb->priority);
skb              3555 net/core/dev.c 						skb_get_hash(skb), map->len)];
skb              3564 net/core/dev.c 			 struct sk_buff *skb)
skb              3568 net/core/dev.c 	struct sock *sk = skb->sk;
skb              3583 net/core/dev.c 			queue_index = __get_xps_queue_idx(dev, skb, dev_maps,
skb              3591 net/core/dev.c 			unsigned int tci = skb->sender_cpu - 1;
skb              3593 net/core/dev.c 			queue_index = __get_xps_queue_idx(dev, skb, dev_maps,
skb              3605 net/core/dev.c u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
skb              3612 net/core/dev.c u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
skb              3619 net/core/dev.c u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
skb              3622 net/core/dev.c 	struct sock *sk = skb->sk;
skb              3627 net/core/dev.c 	if (queue_index < 0 || skb->ooo_okay ||
skb              3629 net/core/dev.c 		int new_index = get_xps_queue(dev, sb_dev, skb);
skb              3632 net/core/dev.c 			new_index = skb_tx_hash(dev, sb_dev, skb);
skb              3647 net/core/dev.c 					 struct sk_buff *skb,
skb              3653 net/core/dev.c 	u32 sender_cpu = skb->sender_cpu - 1;
skb              3656 net/core/dev.c 		skb->sender_cpu = raw_smp_processor_id() + 1;
skb              3663 net/core/dev.c 			queue_index = ops->ndo_select_queue(dev, skb, sb_dev);
skb              3665 net/core/dev.c 			queue_index = netdev_pick_tx(dev, skb, sb_dev);
skb              3670 net/core/dev.c 	skb_set_queue_mapping(skb, queue_index);
skb              3700 net/core/dev.c static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
skb              3702 net/core/dev.c 	struct net_device *dev = skb->dev;
skb              3708 net/core/dev.c 	skb_reset_mac_header(skb);
skb              3710 net/core/dev.c 	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP))
skb              3711 net/core/dev.c 		__skb_tstamp_tx(skb, NULL, skb->sk, SCM_TSTAMP_SCHED);
skb              3718 net/core/dev.c 	skb_update_prio(skb);
skb              3720 net/core/dev.c 	qdisc_pkt_len_init(skb);
skb              3722 net/core/dev.c 	skb->tc_at_ingress = 0;
skb              3725 net/core/dev.c 		skb = sch_handle_egress(skb, &rc, dev);
skb              3726 net/core/dev.c 		if (!skb)
skb              3735 net/core/dev.c 		skb_dst_drop(skb);
skb              3737 net/core/dev.c 		skb_dst_force(skb);
skb              3739 net/core/dev.c 	txq = netdev_core_pick_tx(dev, skb, sb_dev);
skb              3742 net/core/dev.c 	trace_net_dev_queue(skb);
skb              3744 net/core/dev.c 		rc = __dev_xmit_skb(skb, q, dev, txq);
skb              3767 net/core/dev.c 			skb = validate_xmit_skb(skb, dev, &again);
skb              3768 net/core/dev.c 			if (!skb)
skb              3775 net/core/dev.c 				skb = dev_hard_start_xmit(skb, dev, txq, &rc);
skb              3799 net/core/dev.c 	kfree_skb_list(skb);
skb              3806 net/core/dev.c int dev_queue_xmit(struct sk_buff *skb)
skb              3808 net/core/dev.c 	return __dev_queue_xmit(skb, NULL);
skb              3812 net/core/dev.c int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev)
skb              3814 net/core/dev.c 	return __dev_queue_xmit(skb, sb_dev);
skb              3818 net/core/dev.c int dev_direct_xmit(struct sk_buff *skb, u16 queue_id)
skb              3820 net/core/dev.c 	struct net_device *dev = skb->dev;
skb              3821 net/core/dev.c 	struct sk_buff *orig_skb = skb;
skb              3830 net/core/dev.c 	skb = validate_xmit_skb_list(skb, dev, &again);
skb              3831 net/core/dev.c 	if (skb != orig_skb)
skb              3834 net/core/dev.c 	skb_set_queue_mapping(skb, queue_id);
skb              3835 net/core/dev.c 	txq = skb_get_tx_queue(dev, skb);
skb              3841 net/core/dev.c 		ret = netdev_start_xmit(skb, dev, txq, false);
skb              3847 net/core/dev.c 		kfree_skb(skb);
skb              3852 net/core/dev.c 	kfree_skb_list(skb);
skb              3898 net/core/dev.c set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
skb              3911 net/core/dev.c 		if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
skb              3915 net/core/dev.c 		if (rxq_index == skb_get_rx_queue(skb))
skb              3922 net/core/dev.c 		flow_id = skb_get_hash(skb) & flow_table->mask;
skb              3923 net/core/dev.c 		rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
skb              3947 net/core/dev.c static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
skb              3958 net/core/dev.c 	if (skb_rx_queue_recorded(skb)) {
skb              3959 net/core/dev.c 		u16 index = skb_get_rx_queue(skb);
skb              3978 net/core/dev.c 	skb_reset_network_header(skb);
skb              3979 net/core/dev.c 	hash = skb_get_hash(skb);
skb              4018 net/core/dev.c 			rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
skb              4118 net/core/dev.c static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
skb              4133 net/core/dev.c 		new_flow = skb_get_hash(skb) & (fl->num_buckets - 1);
skb              4158 net/core/dev.c static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
skb              4170 net/core/dev.c 	if (!netif_running(skb->dev))
skb              4173 net/core/dev.c 	if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {
skb              4176 net/core/dev.c 			__skb_queue_tail(&sd->input_pkt_queue, skb);
skb              4199 net/core/dev.c 	atomic_long_inc(&skb->dev->rx_dropped);
skb              4200 net/core/dev.c 	kfree_skb(skb);
skb              4204 net/core/dev.c static struct netdev_rx_queue *netif_get_rxqueue(struct sk_buff *skb)
skb              4206 net/core/dev.c 	struct net_device *dev = skb->dev;
skb              4211 net/core/dev.c 	if (skb_rx_queue_recorded(skb)) {
skb              4212 net/core/dev.c 		u16 index = skb_get_rx_queue(skb);
skb              4227 net/core/dev.c static u32 netif_receive_generic_xdp(struct sk_buff *skb,
skb              4243 net/core/dev.c 	if (skb_is_redirected(skb))
skb              4250 net/core/dev.c 	if (skb_cloned(skb) || skb_is_nonlinear(skb) ||
skb              4251 net/core/dev.c 	    skb_headroom(skb) < XDP_PACKET_HEADROOM) {
skb              4252 net/core/dev.c 		int hroom = XDP_PACKET_HEADROOM - skb_headroom(skb);
skb              4253 net/core/dev.c 		int troom = skb->tail + skb->data_len - skb->end;
skb              4258 net/core/dev.c 		if (pskb_expand_head(skb,
skb              4262 net/core/dev.c 		if (skb_linearize(skb))
skb              4269 net/core/dev.c 	mac_len = skb->data - skb_mac_header(skb);
skb              4270 net/core/dev.c 	hlen = skb_headlen(skb) + mac_len;
skb              4271 net/core/dev.c 	xdp->data = skb->data - mac_len;
skb              4274 net/core/dev.c 	xdp->data_hard_start = skb->data - skb_headroom(skb);
skb              4281 net/core/dev.c 	rxqueue = netif_get_rxqueue(skb);
skb              4290 net/core/dev.c 			__skb_pull(skb, off);
skb              4292 net/core/dev.c 			__skb_push(skb, -off);
skb              4294 net/core/dev.c 		skb->mac_header += off;
skb              4295 net/core/dev.c 		skb_reset_network_header(skb);
skb              4303 net/core/dev.c 		skb_set_tail_pointer(skb, xdp->data_end - xdp->data);
skb              4304 net/core/dev.c 		skb->len -= off;
skb              4312 net/core/dev.c 		__skb_push(skb, ETH_HLEN);
skb              4313 net/core/dev.c 		skb->protocol = eth_type_trans(skb, skb->dev);
skb              4319 net/core/dev.c 		__skb_push(skb, mac_len);
skb              4324 net/core/dev.c 			skb_metadata_set(skb, metalen);
skb              4330 net/core/dev.c 		trace_xdp_exception(skb->dev, xdp_prog, act);
skb              4334 net/core/dev.c 		kfree_skb(skb);
skb              4344 net/core/dev.c void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog)
skb              4346 net/core/dev.c 	struct net_device *dev = skb->dev;
skb              4351 net/core/dev.c 	txq = netdev_core_pick_tx(dev, skb, NULL);
skb              4355 net/core/dev.c 		rc = netdev_start_xmit(skb, dev, txq, 0);
skb              4362 net/core/dev.c 		kfree_skb(skb);
skb              4369 net/core/dev.c int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb)
skb              4376 net/core/dev.c 		act = netif_receive_generic_xdp(skb, &xdp, xdp_prog);
skb              4380 net/core/dev.c 				err = xdp_do_generic_redirect(skb->dev, skb,
skb              4386 net/core/dev.c 				generic_xdp_tx(skb, xdp_prog);
skb              4394 net/core/dev.c 	kfree_skb(skb);
skb              4399 net/core/dev.c static int netif_rx_internal(struct sk_buff *skb)
skb              4403 net/core/dev.c 	net_timestamp_check(netdev_tstamp_prequeue, skb);
skb              4405 net/core/dev.c 	trace_netif_rx(skb);
skb              4415 net/core/dev.c 		cpu = get_rps_cpu(skb->dev, skb, &rflow);
skb              4419 net/core/dev.c 		ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
skb              4428 net/core/dev.c 		ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
skb              4449 net/core/dev.c int netif_rx(struct sk_buff *skb)
skb              4453 net/core/dev.c 	trace_netif_rx_entry(skb);
skb              4455 net/core/dev.c 	ret = netif_rx_internal(skb);
skb              4462 net/core/dev.c int netif_rx_ni(struct sk_buff *skb)
skb              4466 net/core/dev.c 	trace_netif_rx_ni_entry(skb);
skb              4469 net/core/dev.c 	err = netif_rx_internal(skb);
skb              4492 net/core/dev.c 			struct sk_buff *skb = clist;
skb              4496 net/core/dev.c 			WARN_ON(refcount_read(&skb->users));
skb              4497 net/core/dev.c 			if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED))
skb              4498 net/core/dev.c 				trace_consume_skb(skb);
skb              4500 net/core/dev.c 				trace_kfree_skb(skb, net_tx_action);
skb              4502 net/core/dev.c 			if (skb->fclone != SKB_FCLONE_UNAVAILABLE)
skb              4503 net/core/dev.c 				__kfree_skb(skb);
skb              4505 net/core/dev.c 				__kfree_skb_defer(skb);
skb              4552 net/core/dev.c sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
skb              4556 net/core/dev.c 	struct mini_Qdisc *miniq = rcu_dereference_bh(skb->dev->miniq_ingress);
skb              4565 net/core/dev.c 		return skb;
skb              4568 net/core/dev.c 		*ret = deliver_skb(skb, *pt_prev, orig_dev);
skb              4572 net/core/dev.c 	qdisc_skb_cb(skb)->pkt_len = skb->len;
skb              4573 net/core/dev.c 	skb->tc_at_ingress = 1;
skb              4574 net/core/dev.c 	mini_qdisc_bstats_cpu_update(miniq, skb);
skb              4576 net/core/dev.c 	switch (tcf_classify(skb, miniq->filter_list, &cl_res, false)) {
skb              4579 net/core/dev.c 		skb->tc_index = TC_H_MIN(cl_res.classid);
skb              4583 net/core/dev.c 		kfree_skb(skb);
skb              4588 net/core/dev.c 		consume_skb(skb);
skb              4595 net/core/dev.c 		__skb_push(skb, skb->mac_len);
skb              4596 net/core/dev.c 		skb_do_redirect(skb);
skb              4604 net/core/dev.c 	return skb;
skb              4681 net/core/dev.c static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
skb              4683 net/core/dev.c 	switch (skb->protocol) {
skb              4695 net/core/dev.c static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev,
skb              4699 net/core/dev.c 	if (nf_hook_ingress_active(skb)) {
skb              4703 net/core/dev.c 			*ret = deliver_skb(skb, *pt_prev, orig_dev);
skb              4708 net/core/dev.c 		ingress_retval = nf_hook_ingress(skb);
skb              4721 net/core/dev.c 	struct sk_buff *skb = *pskb;
skb              4727 net/core/dev.c 	net_timestamp_check(!netdev_tstamp_prequeue, skb);
skb              4729 net/core/dev.c 	trace_netif_receive_skb(skb);
skb              4731 net/core/dev.c 	orig_dev = skb->dev;
skb              4733 net/core/dev.c 	skb_reset_network_header(skb);
skb              4734 net/core/dev.c 	if (!skb_transport_header_was_set(skb))
skb              4735 net/core/dev.c 		skb_reset_transport_header(skb);
skb              4736 net/core/dev.c 	skb_reset_mac_len(skb);
skb              4741 net/core/dev.c 	skb->skb_iif = skb->dev->ifindex;
skb              4749 net/core/dev.c 		ret2 = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb);
skb              4756 net/core/dev.c 		skb_reset_mac_len(skb);
skb              4759 net/core/dev.c 	if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
skb              4760 net/core/dev.c 	    skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
skb              4761 net/core/dev.c 		skb = skb_vlan_untag(skb);
skb              4762 net/core/dev.c 		if (unlikely(!skb))
skb              4766 net/core/dev.c 	if (skb_skip_tc_classify(skb))
skb              4774 net/core/dev.c 			ret = deliver_skb(skb, pt_prev, orig_dev);
skb              4778 net/core/dev.c 	list_for_each_entry_rcu(ptype, &skb->dev->ptype_all, list) {
skb              4780 net/core/dev.c 			ret = deliver_skb(skb, pt_prev, orig_dev);
skb              4787 net/core/dev.c 		skb = sch_handle_ingress(skb, &pt_prev, &ret, orig_dev);
skb              4788 net/core/dev.c 		if (!skb)
skb              4791 net/core/dev.c 		if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0)
skb              4795 net/core/dev.c 	skb_reset_redirect(skb);
skb              4797 net/core/dev.c 	if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
skb              4800 net/core/dev.c 	if (skb_vlan_tag_present(skb)) {
skb              4802 net/core/dev.c 			ret = deliver_skb(skb, pt_prev, orig_dev);
skb              4805 net/core/dev.c 		if (vlan_do_receive(&skb))
skb              4807 net/core/dev.c 		else if (unlikely(!skb))
skb              4811 net/core/dev.c 	rx_handler = rcu_dereference(skb->dev->rx_handler);
skb              4814 net/core/dev.c 			ret = deliver_skb(skb, pt_prev, orig_dev);
skb              4817 net/core/dev.c 		switch (rx_handler(&skb)) {
skb              4832 net/core/dev.c 	if (unlikely(skb_vlan_tag_present(skb))) {
skb              4834 net/core/dev.c 		if (skb_vlan_tag_get_id(skb)) {
skb              4838 net/core/dev.c 			skb->pkt_type = PACKET_OTHERHOST;
skb              4839 net/core/dev.c 		} else if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
skb              4840 net/core/dev.c 			   skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
skb              4845 net/core/dev.c 			__vlan_hwaccel_clear_tag(skb);
skb              4846 net/core/dev.c 			skb = skb_vlan_untag(skb);
skb              4847 net/core/dev.c 			if (unlikely(!skb))
skb              4849 net/core/dev.c 			if (vlan_do_receive(&skb))
skb              4854 net/core/dev.c 			else if (unlikely(!skb))
skb              4867 net/core/dev.c 		__vlan_hwaccel_clear_tag(skb);
skb              4870 net/core/dev.c 	type = skb->protocol;
skb              4874 net/core/dev.c 		deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
skb              4879 net/core/dev.c 	deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
skb              4882 net/core/dev.c 	if (unlikely(skb->dev != orig_dev)) {
skb              4883 net/core/dev.c 		deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
skb              4884 net/core/dev.c 				       &skb->dev->ptype_specific);
skb              4888 net/core/dev.c 		if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
skb              4894 net/core/dev.c 			atomic_long_inc(&skb->dev->rx_dropped);
skb              4896 net/core/dev.c 			atomic_long_inc(&skb->dev->rx_nohandler);
skb              4897 net/core/dev.c 		kfree_skb(skb);
skb              4911 net/core/dev.c 	*pskb = skb;
skb              4915 net/core/dev.c static int __netif_receive_skb_one_core(struct sk_buff *skb, bool pfmemalloc)
skb              4917 net/core/dev.c 	struct net_device *orig_dev = skb->dev;
skb              4921 net/core/dev.c 	ret = __netif_receive_skb_core(&skb, pfmemalloc, &pt_prev);
skb              4923 net/core/dev.c 		ret = INDIRECT_CALL_INET(pt_prev->func, ipv6_rcv, ip_rcv, skb,
skb              4924 net/core/dev.c 					 skb->dev, pt_prev, orig_dev);
skb              4943 net/core/dev.c int netif_receive_skb_core(struct sk_buff *skb)
skb              4948 net/core/dev.c 	ret = __netif_receive_skb_one_core(skb, false);
skb              4959 net/core/dev.c 	struct sk_buff *skb, *next;
skb              4969 net/core/dev.c 		list_for_each_entry_safe(skb, next, head, list) {
skb              4970 net/core/dev.c 			skb_list_del_init(skb);
skb              4971 net/core/dev.c 			pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
skb              4991 net/core/dev.c 	struct sk_buff *skb, *next;
skb              4994 net/core/dev.c 	list_for_each_entry_safe(skb, next, head, list) {
skb              4995 net/core/dev.c 		struct net_device *orig_dev = skb->dev;
skb              4998 net/core/dev.c 		skb_list_del_init(skb);
skb              4999 net/core/dev.c 		__netif_receive_skb_core(&skb, pfmemalloc, &pt_prev);
skb              5010 net/core/dev.c 		list_add_tail(&skb->list, &sublist);
skb              5017 net/core/dev.c static int __netif_receive_skb(struct sk_buff *skb)
skb              5021 net/core/dev.c 	if (sk_memalloc_socks() && skb_pfmemalloc(skb)) {
skb              5034 net/core/dev.c 		ret = __netif_receive_skb_one_core(skb, true);
skb              5037 net/core/dev.c 		ret = __netif_receive_skb_one_core(skb, false);
skb              5045 net/core/dev.c 	struct sk_buff *skb, *next;
skb              5048 net/core/dev.c 	list_for_each_entry_safe(skb, next, head, list) {
skb              5049 net/core/dev.c 		if ((sk_memalloc_socks() && skb_pfmemalloc(skb)) != pfmemalloc) {
skb              5053 net/core/dev.c 			list_cut_before(&sublist, head, &skb->list);
skb              5105 net/core/dev.c static int netif_receive_skb_internal(struct sk_buff *skb)
skb              5109 net/core/dev.c 	net_timestamp_check(netdev_tstamp_prequeue, skb);
skb              5111 net/core/dev.c 	if (skb_defer_rx_timestamp(skb))
skb              5118 net/core/dev.c 		int cpu = get_rps_cpu(skb->dev, skb, &rflow);
skb              5121 net/core/dev.c 			ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
skb              5127 net/core/dev.c 	ret = __netif_receive_skb(skb);
skb              5134 net/core/dev.c 	struct sk_buff *skb, *next;
skb              5138 net/core/dev.c 	list_for_each_entry_safe(skb, next, head, list) {
skb              5139 net/core/dev.c 		net_timestamp_check(netdev_tstamp_prequeue, skb);
skb              5140 net/core/dev.c 		skb_list_del_init(skb);
skb              5141 net/core/dev.c 		if (!skb_defer_rx_timestamp(skb))
skb              5142 net/core/dev.c 			list_add_tail(&skb->list, &sublist);
skb              5149 net/core/dev.c 		list_for_each_entry_safe(skb, next, head, list) {
skb              5151 net/core/dev.c 			int cpu = get_rps_cpu(skb->dev, skb, &rflow);
skb              5155 net/core/dev.c 				skb_list_del_init(skb);
skb              5156 net/core/dev.c 				enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
skb              5180 net/core/dev.c int netif_receive_skb(struct sk_buff *skb)
skb              5184 net/core/dev.c 	trace_netif_receive_skb_entry(skb);
skb              5186 net/core/dev.c 	ret = netif_receive_skb_internal(skb);
skb              5205 net/core/dev.c 	struct sk_buff *skb;
skb              5210 net/core/dev.c 		list_for_each_entry(skb, head, list)
skb              5211 net/core/dev.c 			trace_netif_receive_skb_list_entry(skb);
skb              5223 net/core/dev.c 	struct sk_buff *skb, *tmp;
skb              5231 net/core/dev.c 	skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
skb              5232 net/core/dev.c 		if (skb->dev->reg_state == NETREG_UNREGISTERING) {
skb              5233 net/core/dev.c 			__skb_unlink(skb, &sd->input_pkt_queue);
skb              5234 net/core/dev.c 			kfree_skb(skb);
skb              5241 net/core/dev.c 	skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
skb              5242 net/core/dev.c 		if (skb->dev->reg_state == NETREG_UNREGISTERING) {
skb              5243 net/core/dev.c 			__skb_unlink(skb, &sd->process_queue);
skb              5244 net/core/dev.c 			kfree_skb(skb);
skb              5280 net/core/dev.c static void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb)
skb              5282 net/core/dev.c 	list_add_tail(&skb->list, &napi->rx_list);
skb              5289 net/core/dev.c static int napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb)
skb              5292 net/core/dev.c 	__be16 type = skb->protocol;
skb              5296 net/core/dev.c 	BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
skb              5298 net/core/dev.c 	if (NAPI_GRO_CB(skb)->count == 1) {
skb              5299 net/core/dev.c 		skb_shinfo(skb)->gso_size = 0;
skb              5310 net/core/dev.c 					 skb, 0);
skb              5317 net/core/dev.c 		kfree_skb(skb);
skb              5322 net/core/dev.c 	gro_normal_one(napi, skb);
skb              5330 net/core/dev.c 	struct sk_buff *skb, *p;
skb              5332 net/core/dev.c 	list_for_each_entry_safe_reverse(skb, p, head, list) {
skb              5333 net/core/dev.c 		if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
skb              5335 net/core/dev.c 		skb_list_del_init(skb);
skb              5336 net/core/dev.c 		napi_gro_complete(napi, skb);
skb              5362 net/core/dev.c 					  struct sk_buff *skb)
skb              5364 net/core/dev.c 	unsigned int maclen = skb->dev->hard_header_len;
skb              5365 net/core/dev.c 	u32 hash = skb_get_hash_raw(skb);
skb              5380 net/core/dev.c 		diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
skb              5381 net/core/dev.c 		diffs |= skb_vlan_tag_present(p) ^ skb_vlan_tag_present(skb);
skb              5383 net/core/dev.c 			diffs |= p->vlan_tci ^ skb->vlan_tci;
skb              5384 net/core/dev.c 		diffs |= skb_metadata_dst_cmp(p, skb);
skb              5385 net/core/dev.c 		diffs |= skb_metadata_differs(p, skb);
skb              5388 net/core/dev.c 						      skb_mac_header(skb));
skb              5391 net/core/dev.c 				       skb_mac_header(skb),
skb              5399 net/core/dev.c static void skb_gro_reset_offset(struct sk_buff *skb)
skb              5401 net/core/dev.c 	const struct skb_shared_info *pinfo = skb_shinfo(skb);
skb              5404 net/core/dev.c 	NAPI_GRO_CB(skb)->data_offset = 0;
skb              5405 net/core/dev.c 	NAPI_GRO_CB(skb)->frag0 = NULL;
skb              5406 net/core/dev.c 	NAPI_GRO_CB(skb)->frag0_len = 0;
skb              5408 net/core/dev.c 	if (skb_mac_header(skb) == skb_tail_pointer(skb) &&
skb              5411 net/core/dev.c 		NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
skb              5412 net/core/dev.c 		NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
skb              5414 net/core/dev.c 						    skb->end - skb->tail);
skb              5418 net/core/dev.c static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
skb              5420 net/core/dev.c 	struct skb_shared_info *pinfo = skb_shinfo(skb);
skb              5422 net/core/dev.c 	BUG_ON(skb->end - skb->tail < grow);
skb              5424 net/core/dev.c 	memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
skb              5426 net/core/dev.c 	skb->data_len -= grow;
skb              5427 net/core/dev.c 	skb->tail += grow;
skb              5433 net/core/dev.c 		skb_frag_unref(skb, 0);
skb              5462 net/core/dev.c static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
skb              5464 net/core/dev.c 	u32 hash = skb_get_hash_raw(skb) & (GRO_HASH_BUCKETS - 1);
skb              5467 net/core/dev.c 	__be16 type = skb->protocol;
skb              5474 net/core/dev.c 	if (netif_elide_gro(skb->dev))
skb              5477 net/core/dev.c 	gro_head = gro_list_prepare(napi, skb);
skb              5484 net/core/dev.c 		skb_set_network_header(skb, skb_gro_offset(skb));
skb              5485 net/core/dev.c 		skb_reset_mac_len(skb);
skb              5486 net/core/dev.c 		NAPI_GRO_CB(skb)->same_flow = 0;
skb              5487 net/core/dev.c 		NAPI_GRO_CB(skb)->flush = skb_is_gso(skb) || skb_has_frag_list(skb);
skb              5488 net/core/dev.c 		NAPI_GRO_CB(skb)->free = 0;
skb              5489 net/core/dev.c 		NAPI_GRO_CB(skb)->encap_mark = 0;
skb              5490 net/core/dev.c 		NAPI_GRO_CB(skb)->recursion_counter = 0;
skb              5491 net/core/dev.c 		NAPI_GRO_CB(skb)->is_fou = 0;
skb              5492 net/core/dev.c 		NAPI_GRO_CB(skb)->is_atomic = 1;
skb              5493 net/core/dev.c 		NAPI_GRO_CB(skb)->gro_remcsum_start = 0;
skb              5496 net/core/dev.c 		switch (skb->ip_summed) {
skb              5498 net/core/dev.c 			NAPI_GRO_CB(skb)->csum = skb->csum;
skb              5499 net/core/dev.c 			NAPI_GRO_CB(skb)->csum_valid = 1;
skb              5500 net/core/dev.c 			NAPI_GRO_CB(skb)->csum_cnt = 0;
skb              5503 net/core/dev.c 			NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
skb              5504 net/core/dev.c 			NAPI_GRO_CB(skb)->csum_valid = 0;
skb              5507 net/core/dev.c 			NAPI_GRO_CB(skb)->csum_cnt = 0;
skb              5508 net/core/dev.c 			NAPI_GRO_CB(skb)->csum_valid = 0;
skb              5513 net/core/dev.c 					gro_head, skb);
skb              5526 net/core/dev.c 	same_flow = NAPI_GRO_CB(skb)->same_flow;
skb              5527 net/core/dev.c 	ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
skb              5538 net/core/dev.c 	if (NAPI_GRO_CB(skb)->flush)
skb              5546 net/core/dev.c 	NAPI_GRO_CB(skb)->count = 1;
skb              5547 net/core/dev.c 	NAPI_GRO_CB(skb)->age = jiffies;
skb              5548 net/core/dev.c 	NAPI_GRO_CB(skb)->last = skb;
skb              5549 net/core/dev.c 	skb_shinfo(skb)->gso_size = skb_gro_len(skb);
skb              5550 net/core/dev.c 	list_add(&skb->list, gro_head);
skb              5554 net/core/dev.c 	grow = skb_gro_offset(skb) - skb_headlen(skb);
skb              5556 net/core/dev.c 		gro_pull_from_frag0(skb, grow);
skb              5600 net/core/dev.c static void napi_skb_free_stolen_head(struct sk_buff *skb)
skb              5602 net/core/dev.c 	skb_dst_drop(skb);
skb              5603 net/core/dev.c 	skb_ext_put(skb);
skb              5604 net/core/dev.c 	kmem_cache_free(skbuff_head_cache, skb);
skb              5607 net/core/dev.c static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
skb              5611 net/core/dev.c 		if (netif_receive_skb_internal(skb))
skb              5616 net/core/dev.c 		kfree_skb(skb);
skb              5620 net/core/dev.c 		if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
skb              5621 net/core/dev.c 			napi_skb_free_stolen_head(skb);
skb              5623 net/core/dev.c 			__kfree_skb(skb);
skb              5635 net/core/dev.c gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
skb              5639 net/core/dev.c 	skb_mark_napi_id(skb, napi);
skb              5640 net/core/dev.c 	trace_napi_gro_receive_entry(skb);
skb              5642 net/core/dev.c 	skb_gro_reset_offset(skb);
skb              5644 net/core/dev.c 	ret = napi_skb_finish(dev_gro_receive(napi, skb), skb);
skb              5651 net/core/dev.c static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
skb              5653 net/core/dev.c 	if (unlikely(skb->pfmemalloc)) {
skb              5654 net/core/dev.c 		consume_skb(skb);
skb              5657 net/core/dev.c 	__skb_pull(skb, skb_headlen(skb));
skb              5659 net/core/dev.c 	skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
skb              5660 net/core/dev.c 	__vlan_hwaccel_clear_tag(skb);
skb              5661 net/core/dev.c 	skb->dev = napi->dev;
skb              5662 net/core/dev.c 	skb->skb_iif = 0;
skb              5665 net/core/dev.c 	skb->pkt_type = PACKET_HOST;
skb              5667 net/core/dev.c 	skb->encapsulation = 0;
skb              5668 net/core/dev.c 	skb_shinfo(skb)->gso_type = 0;
skb              5669 net/core/dev.c 	skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
skb              5670 net/core/dev.c 	skb_ext_reset(skb);
skb              5672 net/core/dev.c 	napi->skb = skb;
skb              5677 net/core/dev.c 	struct sk_buff *skb = napi->skb;
skb              5679 net/core/dev.c 	if (!skb) {
skb              5680 net/core/dev.c 		skb = napi_alloc_skb(napi, GRO_MAX_HEAD);
skb              5681 net/core/dev.c 		if (skb) {
skb              5682 net/core/dev.c 			napi->skb = skb;
skb              5683 net/core/dev.c 			skb_mark_napi_id(skb, napi);
skb              5686 net/core/dev.c 	return skb;
skb              5691 net/core/dev.c 				      struct sk_buff *skb,
skb              5697 net/core/dev.c 		__skb_push(skb, ETH_HLEN);
skb              5698 net/core/dev.c 		skb->protocol = eth_type_trans(skb, skb->dev);
skb              5700 net/core/dev.c 			gro_normal_one(napi, skb);
skb              5704 net/core/dev.c 		napi_reuse_skb(napi, skb);
skb              5708 net/core/dev.c 		if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
skb              5709 net/core/dev.c 			napi_skb_free_stolen_head(skb);
skb              5711 net/core/dev.c 			napi_reuse_skb(napi, skb);
skb              5728 net/core/dev.c 	struct sk_buff *skb = napi->skb;
skb              5732 net/core/dev.c 	napi->skb = NULL;
skb              5734 net/core/dev.c 	skb_reset_mac_header(skb);
skb              5735 net/core/dev.c 	skb_gro_reset_offset(skb);
skb              5737 net/core/dev.c 	if (unlikely(skb_gro_header_hard(skb, hlen))) {
skb              5738 net/core/dev.c 		eth = skb_gro_header_slow(skb, hlen, 0);
skb              5742 net/core/dev.c 			napi_reuse_skb(napi, skb);
skb              5746 net/core/dev.c 		eth = (const struct ethhdr *)skb->data;
skb              5747 net/core/dev.c 		gro_pull_from_frag0(skb, hlen);
skb              5748 net/core/dev.c 		NAPI_GRO_CB(skb)->frag0 += hlen;
skb              5749 net/core/dev.c 		NAPI_GRO_CB(skb)->frag0_len -= hlen;
skb              5751 net/core/dev.c 	__skb_pull(skb, hlen);
skb              5758 net/core/dev.c 	skb->protocol = eth->h_proto;
skb              5760 net/core/dev.c 	return skb;
skb              5766 net/core/dev.c 	struct sk_buff *skb = napi_frags_skb(napi);
skb              5768 net/core/dev.c 	if (!skb)
skb              5771 net/core/dev.c 	trace_napi_gro_frags_entry(skb);
skb              5773 net/core/dev.c 	ret = napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
skb              5783 net/core/dev.c __sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
skb              5788 net/core/dev.c 	wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0);
skb              5791 net/core/dev.c 	sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum));
skb              5794 net/core/dev.c 		if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
skb              5795 net/core/dev.c 		    !skb->csum_complete_sw)
skb              5796 net/core/dev.c 			netdev_rx_csum_fault(skb->dev, skb);
skb              5799 net/core/dev.c 	NAPI_GRO_CB(skb)->csum = wsum;
skb              5800 net/core/dev.c 	NAPI_GRO_CB(skb)->csum_valid = 1;
skb              5865 net/core/dev.c 		struct sk_buff *skb;
skb              5867 net/core/dev.c 		while ((skb = __skb_dequeue(&sd->process_queue))) {
skb              5869 net/core/dev.c 			__netif_receive_skb(skb);
skb              6227 net/core/dev.c 	napi->skb = NULL;
skb              6266 net/core/dev.c 		struct sk_buff *skb, *n;
skb              6268 net/core/dev.c 		list_for_each_entry_safe(skb, n, &napi->gro_hash[i].list, list)
skb              6269 net/core/dev.c 			kfree_skb(skb);
skb              8519 net/core/dev.c 		struct sk_buff *skb = NULL;
skb              8533 net/core/dev.c 			skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U, 0,
skb              8545 net/core/dev.c 		if (skb)
skb              8546 net/core/dev.c 			rtmsg_ifinfo_send(skb, dev, GFP_KERNEL);
skb              9827 net/core/dev.c 	struct sk_buff *skb;
skb              9878 net/core/dev.c 	while ((skb = __skb_dequeue(&oldsd->process_queue))) {
skb              9879 net/core/dev.c 		netif_rx_ni(skb);
skb              9882 net/core/dev.c 	while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) {
skb              9883 net/core/dev.c 		netif_rx_ni(skb);
skb               388 net/core/devlink.c 			       struct sk_buff *skb, struct genl_info *info)
skb               433 net/core/devlink.c 				 struct sk_buff *skb, struct genl_info *info)
skb               630 net/core/devlink.c static int devlink_nl_cmd_get_doit(struct sk_buff *skb, struct genl_info *info)
skb               667 net/core/devlink.c 				      NETLINK_CB(cb->skb).portid,
skb               680 net/core/devlink.c static int devlink_nl_cmd_port_get_doit(struct sk_buff *skb,
skb               724 net/core/devlink.c 						   NETLINK_CB(cb->skb).portid,
skb               764 net/core/devlink.c static int devlink_nl_cmd_port_set_doit(struct sk_buff *skb,
skb               792 net/core/devlink.c static int devlink_nl_cmd_port_split_doit(struct sk_buff *skb,
skb               817 net/core/devlink.c static int devlink_nl_cmd_port_unsplit_doit(struct sk_buff *skb,
skb               868 net/core/devlink.c static int devlink_nl_cmd_sb_get_doit(struct sk_buff *skb,
skb               912 net/core/devlink.c 						 NETLINK_CB(cb->skb).portid,
skb               973 net/core/devlink.c static int devlink_nl_cmd_sb_pool_get_doit(struct sk_buff *skb,
skb              1049 net/core/devlink.c 						   NETLINK_CB(cb->skb).portid,
skb              1079 net/core/devlink.c static int devlink_nl_cmd_sb_pool_set_doit(struct sk_buff *skb,
skb              1164 net/core/devlink.c static int devlink_nl_cmd_sb_port_pool_get_doit(struct sk_buff *skb,
skb              1247 net/core/devlink.c 							NETLINK_CB(cb->skb).portid,
skb              1277 net/core/devlink.c static int devlink_nl_cmd_sb_port_pool_set_doit(struct sk_buff *skb,
skb              1364 net/core/devlink.c static int devlink_nl_cmd_sb_tc_pool_bind_get_doit(struct sk_buff *skb,
skb              1476 net/core/devlink.c 							   NETLINK_CB(cb->skb).portid,
skb              1508 net/core/devlink.c static int devlink_nl_cmd_sb_tc_pool_bind_set_doit(struct sk_buff *skb,
skb              1542 net/core/devlink.c static int devlink_nl_cmd_sb_occ_snapshot_doit(struct sk_buff *skb,
skb              1554 net/core/devlink.c static int devlink_nl_cmd_sb_occ_max_clear_doit(struct sk_buff *skb,
skb              1621 net/core/devlink.c static int devlink_nl_cmd_eswitch_get_doit(struct sk_buff *skb,
skb              1643 net/core/devlink.c static int devlink_nl_cmd_eswitch_set_doit(struct sk_buff *skb,
skb              1686 net/core/devlink.c int devlink_dpipe_match_put(struct sk_buff *skb,
skb              1693 net/core/devlink.c 	match_attr = nla_nest_start_noflag(skb, DEVLINK_ATTR_DPIPE_MATCH);
skb              1697 net/core/devlink.c 	if (nla_put_u32(skb, DEVLINK_ATTR_DPIPE_MATCH_TYPE, match->type) ||
skb              1698 net/core/devlink.c 	    nla_put_u32(skb, DEVLINK_ATTR_DPIPE_HEADER_INDEX, match->header_index) ||
skb              1699 net/core/devlink.c 	    nla_put_u32(skb, DEVLINK_ATTR_DPIPE_HEADER_ID, header->id) ||
skb              1700 net/core/devlink.c 	    nla_put_u32(skb, DEVLINK_ATTR_DPIPE_FIELD_ID, field->id) ||
skb              1701 net/core/devlink.c 	    nla_put_u8(skb, DEVLINK_ATTR_DPIPE_HEADER_GLOBAL, header->global))
skb              1704 net/core/devlink.c 	nla_nest_end(skb, match_attr);
skb              1708 net/core/devlink.c 	nla_nest_cancel(skb, match_attr);
skb              1714 net/core/devlink.c 				     struct sk_buff *skb)
skb              1718 net/core/devlink.c 	matches_attr = nla_nest_start_noflag(skb,
skb              1723 net/core/devlink.c 	if (table->table_ops->matches_dump(table->priv, skb))
skb              1726 net/core/devlink.c 	nla_nest_end(skb, matches_attr);
skb              1730 net/core/devlink.c 	nla_nest_cancel(skb, matches_attr);
skb              1734 net/core/devlink.c int devlink_dpipe_action_put(struct sk_buff *skb,
skb              1741 net/core/devlink.c 	action_attr = nla_nest_start_noflag(skb, DEVLINK_ATTR_DPIPE_ACTION);
skb              1745 net/core/devlink.c 	if (nla_put_u32(skb, DEVLINK_ATTR_DPIPE_ACTION_TYPE, action->type) ||
skb              1746 net/core/devlink.c 	    nla_put_u32(skb, DEVLINK_ATTR_DPIPE_HEADER_INDEX, action->header_index) ||
skb              1747 net/core/devlink.c 	    nla_put_u32(skb, DEVLINK_ATTR_DPIPE_HEADER_ID, header->id) ||
skb              1748 net/core/devlink.c 	    nla_put_u32(skb, DEVLINK_ATTR_DPIPE_FIELD_ID, field->id) ||
skb              1749 net/core/devlink.c 	    nla_put_u8(skb, DEVLINK_ATTR_DPIPE_HEADER_GLOBAL, header->global))
skb              1752 net/core/devlink.c 	nla_nest_end(skb, action_attr);
skb              1756 net/core/devlink.c 	nla_nest_cancel(skb, action_attr);
skb              1762 net/core/devlink.c 				     struct sk_buff *skb)
skb              1766 net/core/devlink.c 	actions_attr = nla_nest_start_noflag(skb,
skb              1771 net/core/devlink.c 	if (table->table_ops->actions_dump(table->priv, skb))
skb              1774 net/core/devlink.c 	nla_nest_end(skb, actions_attr);
skb              1778 net/core/devlink.c 	nla_nest_cancel(skb, actions_attr);
skb              1782 net/core/devlink.c static int devlink_dpipe_table_put(struct sk_buff *skb,
skb              1789 net/core/devlink.c 	table_attr = nla_nest_start_noflag(skb, DEVLINK_ATTR_DPIPE_TABLE);
skb              1793 net/core/devlink.c 	if (nla_put_string(skb, DEVLINK_ATTR_DPIPE_TABLE_NAME, table->name) ||
skb              1794 net/core/devlink.c 	    nla_put_u64_64bit(skb, DEVLINK_ATTR_DPIPE_TABLE_SIZE, table_size,
skb              1797 net/core/devlink.c 	if (nla_put_u8(skb, DEVLINK_ATTR_DPIPE_TABLE_COUNTERS_ENABLED,
skb              1802 net/core/devlink.c 		if (nla_put_u64_64bit(skb, DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_ID,
skb              1804 net/core/devlink.c 		    nla_put_u64_64bit(skb, DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_UNITS,
skb              1808 net/core/devlink.c 	if (devlink_dpipe_matches_put(table, skb))
skb              1811 net/core/devlink.c 	if (devlink_dpipe_actions_put(table, skb))
skb              1814 net/core/devlink.c 	nla_nest_end(skb, table_attr);
skb              1818 net/core/devlink.c 	nla_nest_cancel(skb, table_attr);
skb              1846 net/core/devlink.c 	struct sk_buff *skb = NULL;
skb              1856 net/core/devlink.c 	err = devlink_dpipe_send_and_alloc_skb(&skb, info);
skb              1860 net/core/devlink.c 	hdr = genlmsg_put(skb, info->snd_portid, info->snd_seq,
skb              1863 net/core/devlink.c 		nlmsg_free(skb);
skb              1867 net/core/devlink.c 	if (devlink_nl_put_handle(skb, devlink))
skb              1869 net/core/devlink.c 	tables_attr = nla_nest_start_noflag(skb, DEVLINK_ATTR_DPIPE_TABLES);
skb              1877 net/core/devlink.c 			err = devlink_dpipe_table_put(skb, table);
skb              1886 net/core/devlink.c 				err = devlink_dpipe_table_put(skb, table);
skb              1894 net/core/devlink.c 	nla_nest_end(skb, tables_attr);
skb              1895 net/core/devlink.c 	genlmsg_end(skb, hdr);
skb              1900 net/core/devlink.c 	nlh = nlmsg_put(skb, info->snd_portid, info->snd_seq,
skb              1903 net/core/devlink.c 		err = devlink_dpipe_send_and_alloc_skb(&skb, info);
skb              1909 net/core/devlink.c 	return genlmsg_reply(skb, info);
skb              1914 net/core/devlink.c 	nlmsg_free(skb);
skb              1918 net/core/devlink.c static int devlink_nl_cmd_dpipe_table_get(struct sk_buff *skb,
skb              1932 net/core/devlink.c static int devlink_dpipe_value_put(struct sk_buff *skb,
skb              1935 net/core/devlink.c 	if (nla_put(skb, DEVLINK_ATTR_DPIPE_VALUE,
skb              1939 net/core/devlink.c 		if (nla_put(skb, DEVLINK_ATTR_DPIPE_VALUE_MASK,
skb              1943 net/core/devlink.c 		if (nla_put_u32(skb, DEVLINK_ATTR_DPIPE_VALUE_MAPPING,
skb              1949 net/core/devlink.c static int devlink_dpipe_action_value_put(struct sk_buff *skb,
skb              1954 net/core/devlink.c 	if (devlink_dpipe_action_put(skb, value->action))
skb              1956 net/core/devlink.c 	if (devlink_dpipe_value_put(skb, value))
skb              1961 net/core/devlink.c static int devlink_dpipe_action_values_put(struct sk_buff *skb,
skb              1970 net/core/devlink.c 		action_attr = nla_nest_start_noflag(skb,
skb              1974 net/core/devlink.c 		err = devlink_dpipe_action_value_put(skb, &values[i]);
skb              1977 net/core/devlink.c 		nla_nest_end(skb, action_attr);
skb              1982 net/core/devlink.c 	nla_nest_cancel(skb, action_attr);
skb              1986 net/core/devlink.c static int devlink_dpipe_match_value_put(struct sk_buff *skb,
skb              1991 net/core/devlink.c 	if (devlink_dpipe_match_put(skb, value->match))
skb              1993 net/core/devlink.c 	if (devlink_dpipe_value_put(skb, value))
skb              1998 net/core/devlink.c static int devlink_dpipe_match_values_put(struct sk_buff *skb,
skb              2007 net/core/devlink.c 		match_attr = nla_nest_start_noflag(skb,
skb              2011 net/core/devlink.c 		err = devlink_dpipe_match_value_put(skb, &values[i]);
skb              2014 net/core/devlink.c 		nla_nest_end(skb, match_attr);
skb              2019 net/core/devlink.c 	nla_nest_cancel(skb, match_attr);
skb              2023 net/core/devlink.c static int devlink_dpipe_entry_put(struct sk_buff *skb,
skb              2029 net/core/devlink.c 	entry_attr = nla_nest_start_noflag(skb, DEVLINK_ATTR_DPIPE_ENTRY);
skb              2033 net/core/devlink.c 	if (nla_put_u64_64bit(skb, DEVLINK_ATTR_DPIPE_ENTRY_INDEX, entry->index,
skb              2037 net/core/devlink.c 		if (nla_put_u64_64bit(skb, DEVLINK_ATTR_DPIPE_ENTRY_COUNTER,
skb              2041 net/core/devlink.c 	matches_attr = nla_nest_start_noflag(skb,
skb              2046 net/core/devlink.c 	err = devlink_dpipe_match_values_put(skb, entry->match_values,
skb              2049 net/core/devlink.c 		nla_nest_cancel(skb, matches_attr);
skb              2052 net/core/devlink.c 	nla_nest_end(skb, matches_attr);
skb              2054 net/core/devlink.c 	actions_attr = nla_nest_start_noflag(skb,
skb              2059 net/core/devlink.c 	err = devlink_dpipe_action_values_put(skb, entry->action_values,
skb              2062 net/core/devlink.c 		nla_nest_cancel(skb, actions_attr);
skb              2065 net/core/devlink.c 	nla_nest_end(skb, actions_attr);
skb              2067 net/core/devlink.c 	nla_nest_end(skb, entry_attr);
skb              2074 net/core/devlink.c 	nla_nest_cancel(skb, entry_attr);
skb              2096 net/core/devlink.c 	err = devlink_dpipe_send_and_alloc_skb(&dump_ctx->skb,
skb              2101 net/core/devlink.c 	dump_ctx->hdr = genlmsg_put(dump_ctx->skb,
skb              2110 net/core/devlink.c 	if (devlink_nl_put_handle(dump_ctx->skb, devlink))
skb              2112 net/core/devlink.c 	dump_ctx->nest = nla_nest_start_noflag(dump_ctx->skb,
skb              2119 net/core/devlink.c 	nlmsg_free(dump_ctx->skb);
skb              2127 net/core/devlink.c 	return devlink_dpipe_entry_put(dump_ctx->skb, entry);
skb              2133 net/core/devlink.c 	nla_nest_end(dump_ctx->skb, dump_ctx->nest);
skb              2134 net/core/devlink.c 	genlmsg_end(dump_ctx->skb, dump_ctx->hdr);
skb              2169 net/core/devlink.c 	dump_ctx.skb = NULL;
skb              2180 net/core/devlink.c 	nlh = nlmsg_put(dump_ctx.skb, info->snd_portid, info->snd_seq,
skb              2183 net/core/devlink.c 		err = devlink_dpipe_send_and_alloc_skb(&dump_ctx.skb, info);
skb              2188 net/core/devlink.c 	return genlmsg_reply(dump_ctx.skb, info);
skb              2191 net/core/devlink.c static int devlink_nl_cmd_dpipe_entries_get(struct sk_buff *skb,
skb              2214 net/core/devlink.c static int devlink_dpipe_fields_put(struct sk_buff *skb,
skb              2223 net/core/devlink.c 		field_attr = nla_nest_start_noflag(skb,
skb              2227 net/core/devlink.c 		if (nla_put_string(skb, DEVLINK_ATTR_DPIPE_FIELD_NAME, field->name) ||
skb              2228 net/core/devlink.c 		    nla_put_u32(skb, DEVLINK_ATTR_DPIPE_FIELD_ID, field->id) ||
skb              2229 net/core/devlink.c 		    nla_put_u32(skb, DEVLINK_ATTR_DPIPE_FIELD_BITWIDTH, field->bitwidth) ||
skb              2230 net/core/devlink.c 		    nla_put_u32(skb, DEVLINK_ATTR_DPIPE_FIELD_MAPPING_TYPE, field->mapping_type))
skb              2232 net/core/devlink.c 		nla_nest_end(skb, field_attr);
skb              2237 net/core/devlink.c 	nla_nest_cancel(skb, field_attr);
skb              2241 net/core/devlink.c static int devlink_dpipe_header_put(struct sk_buff *skb,
skb              2247 net/core/devlink.c 	header_attr = nla_nest_start_noflag(skb, DEVLINK_ATTR_DPIPE_HEADER);
skb              2251 net/core/devlink.c 	if (nla_put_string(skb, DEVLINK_ATTR_DPIPE_HEADER_NAME, header->name) ||
skb              2252 net/core/devlink.c 	    nla_put_u32(skb, DEVLINK_ATTR_DPIPE_HEADER_ID, header->id) ||
skb              2253 net/core/devlink.c 	    nla_put_u8(skb, DEVLINK_ATTR_DPIPE_HEADER_GLOBAL, header->global))
skb              2256 net/core/devlink.c 	fields_attr = nla_nest_start_noflag(skb,
skb              2261 net/core/devlink.c 	err = devlink_dpipe_fields_put(skb, header);
skb              2263 net/core/devlink.c 		nla_nest_cancel(skb, fields_attr);
skb              2266 net/core/devlink.c 	nla_nest_end(skb, fields_attr);
skb              2267 net/core/devlink.c 	nla_nest_end(skb, header_attr);
skb              2272 net/core/devlink.c 	nla_nest_cancel(skb, header_attr);
skb              2283 net/core/devlink.c 	struct sk_buff *skb = NULL;
skb              2291 net/core/devlink.c 	err = devlink_dpipe_send_and_alloc_skb(&skb, info);
skb              2295 net/core/devlink.c 	hdr = genlmsg_put(skb, info->snd_portid, info->snd_seq,
skb              2298 net/core/devlink.c 		nlmsg_free(skb);
skb              2302 net/core/devlink.c 	if (devlink_nl_put_handle(skb, devlink))
skb              2304 net/core/devlink.c 	headers_attr = nla_nest_start_noflag(skb, DEVLINK_ATTR_DPIPE_HEADERS);
skb              2310 net/core/devlink.c 		err = devlink_dpipe_header_put(skb, dpipe_headers->headers[i]);
skb              2318 net/core/devlink.c 	nla_nest_end(skb, headers_attr);
skb              2319 net/core/devlink.c 	genlmsg_end(skb, hdr);
skb              2324 net/core/devlink.c 	nlh = nlmsg_put(skb, info->snd_portid, info->snd_seq,
skb              2327 net/core/devlink.c 		err = devlink_dpipe_send_and_alloc_skb(&skb, info);
skb              2332 net/core/devlink.c 	return genlmsg_reply(skb, info);
skb              2337 net/core/devlink.c 	nlmsg_free(skb);
skb              2341 net/core/devlink.c static int devlink_nl_cmd_dpipe_headers_get(struct sk_buff *skb,
skb              2375 net/core/devlink.c static int devlink_nl_cmd_dpipe_table_counters_set(struct sk_buff *skb,
skb              2463 net/core/devlink.c static int devlink_nl_cmd_resource_set(struct sk_buff *skb,
skb              2495 net/core/devlink.c 				 struct sk_buff *skb)
skb              2500 net/core/devlink.c 	if (nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_GRAN,
skb              2502 net/core/devlink.c 	    nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_MAX,
skb              2504 net/core/devlink.c 	    nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_MIN,
skb              2506 net/core/devlink.c 	    nla_put_u8(skb, DEVLINK_ATTR_RESOURCE_UNIT, size_params->unit))
skb              2512 net/core/devlink.c 				    struct sk_buff *skb)
skb              2516 net/core/devlink.c 	return nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_OCC,
skb              2521 net/core/devlink.c static int devlink_resource_put(struct devlink *devlink, struct sk_buff *skb,
skb              2528 net/core/devlink.c 	resource_attr = nla_nest_start_noflag(skb, DEVLINK_ATTR_RESOURCE);
skb              2532 net/core/devlink.c 	if (nla_put_string(skb, DEVLINK_ATTR_RESOURCE_NAME, resource->name) ||
skb              2533 net/core/devlink.c 	    nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE, resource->size,
skb              2535 net/core/devlink.c 	    nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_ID, resource->id,
skb              2539 net/core/devlink.c 		nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_NEW,
skb              2541 net/core/devlink.c 	if (devlink_resource_occ_put(resource, skb))
skb              2543 net/core/devlink.c 	if (devlink_resource_size_params_put(resource, skb))
skb              2548 net/core/devlink.c 	if (nla_put_u8(skb, DEVLINK_ATTR_RESOURCE_SIZE_VALID,
skb              2552 net/core/devlink.c 	child_resource_attr = nla_nest_start_noflag(skb,
skb              2558 net/core/devlink.c 		if (devlink_resource_put(devlink, skb, child_resource))
skb              2562 net/core/devlink.c 	nla_nest_end(skb, child_resource_attr);
skb              2564 net/core/devlink.c 	nla_nest_end(skb, resource_attr);
skb              2568 net/core/devlink.c 	nla_nest_cancel(skb, child_resource_attr);
skb              2570 net/core/devlink.c 	nla_nest_cancel(skb, resource_attr);
skb              2580 net/core/devlink.c 	struct sk_buff *skb = NULL;
skb              2590 net/core/devlink.c 	err = devlink_dpipe_send_and_alloc_skb(&skb, info);
skb              2594 net/core/devlink.c 	hdr = genlmsg_put(skb, info->snd_portid, info->snd_seq,
skb              2597 net/core/devlink.c 		nlmsg_free(skb);
skb              2601 net/core/devlink.c 	if (devlink_nl_put_handle(skb, devlink))
skb              2604 net/core/devlink.c 	resources_attr = nla_nest_start_noflag(skb,
skb              2612 net/core/devlink.c 		err = devlink_resource_put(devlink, skb, resource);
skb              2621 net/core/devlink.c 	nla_nest_end(skb, resources_attr);
skb              2622 net/core/devlink.c 	genlmsg_end(skb, hdr);
skb              2626 net/core/devlink.c 	nlh = nlmsg_put(skb, info->snd_portid, info->snd_seq,
skb              2629 net/core/devlink.c 		err = devlink_dpipe_send_and_alloc_skb(&skb, info);
skb              2634 net/core/devlink.c 	return genlmsg_reply(skb, info);
skb              2639 net/core/devlink.c 	nlmsg_free(skb);
skb              2643 net/core/devlink.c static int devlink_nl_cmd_resource_dump(struct sk_buff *skb,
skb              2697 net/core/devlink.c static int devlink_nl_cmd_reload(struct sk_buff *skb, struct genl_info *info)
skb              2820 net/core/devlink.c static int devlink_nl_cmd_flash_update(struct sk_buff *skb,
skb              3172 net/core/devlink.c 						    NETLINK_CB(cb->skb).portid,
skb              3278 net/core/devlink.c static int devlink_nl_cmd_param_get_doit(struct sk_buff *skb,
skb              3363 net/core/devlink.c static int devlink_nl_cmd_param_set_doit(struct sk_buff *skb,
skb              3439 net/core/devlink.c 						NETLINK_CB(cb->skb).portid,
skb              3458 net/core/devlink.c static int devlink_nl_cmd_port_param_get_doit(struct sk_buff *skb,
skb              3487 net/core/devlink.c static int devlink_nl_cmd_port_param_set_doit(struct sk_buff *skb,
skb              3648 net/core/devlink.c static int devlink_nl_cmd_region_get_doit(struct sk_buff *skb,
skb              3702 net/core/devlink.c 						     NETLINK_CB(cb->skb).portid,
skb              3719 net/core/devlink.c static int devlink_nl_cmd_region_del(struct sk_buff *skb,
skb              3778 net/core/devlink.c static int devlink_nl_region_read_snapshot_fill(struct sk_buff *skb,
skb              3812 net/core/devlink.c 		err = devlink_nl_cmd_region_read_chunk_fill(skb, devlink,
skb              3825 net/core/devlink.c static int devlink_nl_cmd_region_read_dumpit(struct sk_buff *skb,
skb              3852 net/core/devlink.c 	devlink = devlink_get_from_attrs(sock_net(cb->skb->sk), attrs);
skb              3879 net/core/devlink.c 	hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
skb              3887 net/core/devlink.c 	err = devlink_nl_put_handle(skb, devlink);
skb              3891 net/core/devlink.c 	err = nla_put_string(skb, DEVLINK_ATTR_REGION_NAME, region_name);
skb              3895 net/core/devlink.c 	chunks_attr = nla_nest_start_noflag(skb, DEVLINK_ATTR_REGION_CHUNKS);
skb              3917 net/core/devlink.c 	err = devlink_nl_region_read_snapshot_fill(skb, devlink,
skb              3934 net/core/devlink.c 	nla_nest_end(skb, chunks_attr);
skb              3935 net/core/devlink.c 	genlmsg_end(skb, hdr);
skb              3940 net/core/devlink.c 	return skb->len;
skb              3943 net/core/devlink.c 	genlmsg_cancel(skb, hdr);
skb              4056 net/core/devlink.c static int devlink_nl_cmd_info_get_doit(struct sk_buff *skb,
skb              4105 net/core/devlink.c 					   NETLINK_CB(cb->skb).portid,
skb              4451 net/core/devlink.c devlink_fmsg_item_fill_type(struct devlink_fmsg_item *msg, struct sk_buff *skb)
skb              4460 net/core/devlink.c 		return nla_put_u8(skb, DEVLINK_ATTR_FMSG_OBJ_VALUE_TYPE,
skb              4468 net/core/devlink.c devlink_fmsg_item_fill_data(struct devlink_fmsg_item *msg, struct sk_buff *skb)
skb              4478 net/core/devlink.c 		return nla_put_u8(skb, attrtype, tmp);
skb              4480 net/core/devlink.c 		return nla_put_u8(skb, attrtype, *(u8 *) msg->value);
skb              4482 net/core/devlink.c 		return nla_put_u32(skb, attrtype, *(u32 *) msg->value);
skb              4484 net/core/devlink.c 		return nla_put_u64_64bit(skb, attrtype, *(u64 *) msg->value,
skb              4487 net/core/devlink.c 		return nla_put_string(skb, attrtype, (char *) &msg->value);
skb              4489 net/core/devlink.c 		return nla_put(skb, attrtype, msg->len, (void *) &msg->value);
skb              4496 net/core/devlink.c devlink_fmsg_prepare_skb(struct devlink_fmsg *fmsg, struct sk_buff *skb,
skb              4504 net/core/devlink.c 	fmsg_nlattr = nla_nest_start_noflag(skb, DEVLINK_ATTR_FMSG);
skb              4519 net/core/devlink.c 			err = nla_put_flag(skb, item->attrtype);
skb              4522 net/core/devlink.c 			err = devlink_fmsg_item_fill_type(item, skb);
skb              4525 net/core/devlink.c 			err = devlink_fmsg_item_fill_data(item, skb);
skb              4528 net/core/devlink.c 			err = nla_put_string(skb, item->attrtype,
skb              4541 net/core/devlink.c 	nla_nest_end(skb, fmsg_nlattr);
skb              4550 net/core/devlink.c 	struct sk_buff *skb;
skb              4559 net/core/devlink.c 		skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
skb              4560 net/core/devlink.c 		if (!skb)
skb              4563 net/core/devlink.c 		hdr = genlmsg_put(skb, info->snd_portid, info->snd_seq,
skb              4570 net/core/devlink.c 		err = devlink_fmsg_prepare_skb(fmsg, skb, &index);
skb              4576 net/core/devlink.c 		genlmsg_end(skb, hdr);
skb              4577 net/core/devlink.c 		err = genlmsg_reply(skb, info);
skb              4582 net/core/devlink.c 	skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
skb              4583 net/core/devlink.c 	if (!skb)
skb              4585 net/core/devlink.c 	nlh = nlmsg_put(skb, info->snd_portid, info->snd_seq,
skb              4592 net/core/devlink.c 	return genlmsg_reply(skb, info);
skb              4595 net/core/devlink.c 	nlmsg_free(skb);
skb              4599 net/core/devlink.c static int devlink_fmsg_dumpit(struct devlink_fmsg *fmsg, struct sk_buff *skb,
skb              4608 net/core/devlink.c 	hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
skb              4615 net/core/devlink.c 	err = devlink_fmsg_prepare_skb(fmsg, skb, &index);
skb              4620 net/core/devlink.c 	genlmsg_end(skb, hdr);
skb              4621 net/core/devlink.c 	return skb->len;
skb              4624 net/core/devlink.c 	genlmsg_cancel(skb, hdr);
skb              4907 net/core/devlink.c 	devlink = devlink_get_from_attrs(sock_net(cb->skb->sk), attrs);
skb              4991 net/core/devlink.c static int devlink_nl_cmd_health_reporter_get_doit(struct sk_buff *skb,
skb              5048 net/core/devlink.c 							      NETLINK_CB(cb->skb).portid,
skb              5067 net/core/devlink.c devlink_nl_cmd_health_reporter_set_doit(struct sk_buff *skb,
skb              5100 net/core/devlink.c static int devlink_nl_cmd_health_reporter_recover_doit(struct sk_buff *skb,
skb              5117 net/core/devlink.c static int devlink_nl_cmd_health_reporter_diagnose_doit(struct sk_buff *skb,
skb              5162 net/core/devlink.c devlink_nl_cmd_health_reporter_dump_get_dumpit(struct sk_buff *skb,
skb              5190 net/core/devlink.c 	err = devlink_fmsg_dumpit(reporter->dump_fmsg, skb, cb,
skb              5200 net/core/devlink.c devlink_nl_cmd_health_reporter_dump_clear_doit(struct sk_buff *skb,
skb              5435 net/core/devlink.c static int devlink_nl_cmd_trap_get_doit(struct sk_buff *skb,
skb              5491 net/core/devlink.c 						   NETLINK_CB(cb->skb).portid,
skb              5552 net/core/devlink.c static int devlink_nl_cmd_trap_set_doit(struct sk_buff *skb,
skb              5639 net/core/devlink.c static int devlink_nl_cmd_trap_group_get_doit(struct sk_buff *skb,
skb              5679 net/core/devlink.c 	u32 portid = NETLINK_CB(cb->skb).portid;
skb              5763 net/core/devlink.c static int devlink_nl_cmd_trap_group_set_doit(struct sk_buff *skb,
skb              7979 net/core/devlink.c void devlink_trap_report(struct devlink *devlink, struct sk_buff *skb,
skb              7985 net/core/devlink.c 	devlink_trap_stats_update(trap_item->stats, skb->len);
skb              7986 net/core/devlink.c 	devlink_trap_stats_update(trap_item->group_item->stats, skb->len);
skb              7990 net/core/devlink.c 	net_dm_hw_report(skb, &hw_metadata);
skb                78 net/core/drop_monitor.c 		struct sk_buff			*skb;
skb               110 net/core/drop_monitor.c 	void (*kfree_skb_probe)(void *ignore, struct sk_buff *skb,
skb               116 net/core/drop_monitor.c 	void (*hw_probe)(struct sk_buff *skb,
skb               134 net/core/drop_monitor.c 	struct sk_buff *skb;
skb               142 net/core/drop_monitor.c 	skb = genlmsg_new(al, GFP_KERNEL);
skb               144 net/core/drop_monitor.c 	if (!skb)
skb               147 net/core/drop_monitor.c 	msg_header = genlmsg_put(skb, 0, 0, &net_drop_monitor_family,
skb               150 net/core/drop_monitor.c 		nlmsg_free(skb);
skb               151 net/core/drop_monitor.c 		skb = NULL;
skb               154 net/core/drop_monitor.c 	nla = nla_reserve(skb, NLA_UNSPEC,
skb               157 net/core/drop_monitor.c 		nlmsg_free(skb);
skb               158 net/core/drop_monitor.c 		skb = NULL;
skb               169 net/core/drop_monitor.c 	swap(data->skb, skb);
skb               172 net/core/drop_monitor.c 	if (skb) {
skb               173 net/core/drop_monitor.c 		struct nlmsghdr *nlh = (struct nlmsghdr *)skb->data;
skb               176 net/core/drop_monitor.c 		genlmsg_end(skb, genlmsg_data(gnlh));
skb               179 net/core/drop_monitor.c 	return skb;
skb               188 net/core/drop_monitor.c 	struct sk_buff *skb;
skb               193 net/core/drop_monitor.c 	skb = reset_per_cpu_data(data);
skb               195 net/core/drop_monitor.c 	if (skb)
skb               196 net/core/drop_monitor.c 		genlmsg_multicast(&net_drop_monitor_family, skb, 0,
skb               212 net/core/drop_monitor.c static void trace_drop_common(struct sk_buff *skb, void *location)
skb               226 net/core/drop_monitor.c 	dskb = data->skb;
skb               262 net/core/drop_monitor.c static void trace_kfree_skb_hit(void *ignore, struct sk_buff *skb, void *location)
skb               264 net/core/drop_monitor.c 	trace_drop_common(skb, location);
skb               434 net/core/drop_monitor.c net_dm_hw_summary_probe(struct sk_buff *skb,
skb               485 net/core/drop_monitor.c 					      struct sk_buff *skb,
skb               493 net/core/drop_monitor.c 	if (!skb_mac_header_was_set(skb))
skb               496 net/core/drop_monitor.c 	nskb = skb_clone(skb, GFP_ATOMIC);
skb               595 net/core/drop_monitor.c static int net_dm_packet_report_fill(struct sk_buff *msg, struct sk_buff *skb,
skb               598 net/core/drop_monitor.c 	u64 pc = (u64)(uintptr_t) NET_DM_SKB_CB(skb)->pc;
skb               615 net/core/drop_monitor.c 	snprintf(buf, sizeof(buf), "%pS", NET_DM_SKB_CB(skb)->pc);
skb               619 net/core/drop_monitor.c 	rc = net_dm_packet_report_in_port_put(msg, skb->skb_iif, NULL);
skb               624 net/core/drop_monitor.c 			      ktime_to_ns(skb->tstamp), NET_DM_ATTR_PAD))
skb               627 net/core/drop_monitor.c 	if (nla_put_u32(msg, NET_DM_ATTR_ORIG_LEN, skb->len))
skb               633 net/core/drop_monitor.c 	if (nla_put_u16(msg, NET_DM_ATTR_PROTO, be16_to_cpu(skb->protocol)))
skb               639 net/core/drop_monitor.c 	if (skb_copy_bits(skb, 0, nla_data(attr), payload_len))
skb               654 net/core/drop_monitor.c static void net_dm_packet_report(struct sk_buff *skb)
skb               661 net/core/drop_monitor.c 	if (skb->data > skb_mac_header(skb))
skb               662 net/core/drop_monitor.c 		skb_push(skb, skb->data - skb_mac_header(skb));
skb               664 net/core/drop_monitor.c 		skb_pull(skb, skb_mac_header(skb) - skb->data);
skb               667 net/core/drop_monitor.c 	payload_len = min_t(size_t, skb->len, NET_DM_MAX_PACKET_SIZE);
skb               675 net/core/drop_monitor.c 	rc = net_dm_packet_report_fill(msg, skb, payload_len);
skb               684 net/core/drop_monitor.c 	consume_skb(skb);
skb               691 net/core/drop_monitor.c 	struct sk_buff *skb;
skb               702 net/core/drop_monitor.c 	while ((skb = __skb_dequeue(&list)))
skb               703 net/core/drop_monitor.c 		net_dm_packet_report(skb);
skb               734 net/core/drop_monitor.c 					struct sk_buff *skb, size_t payload_len)
skb               740 net/core/drop_monitor.c 	hw_metadata = NET_DM_SKB_CB(skb)->hw_metadata;
skb               769 net/core/drop_monitor.c 			      ktime_to_ns(skb->tstamp), NET_DM_ATTR_PAD))
skb               772 net/core/drop_monitor.c 	if (nla_put_u32(msg, NET_DM_ATTR_ORIG_LEN, skb->len))
skb               778 net/core/drop_monitor.c 	if (nla_put_u16(msg, NET_DM_ATTR_PROTO, be16_to_cpu(skb->protocol)))
skb               784 net/core/drop_monitor.c 	if (skb_copy_bits(skb, 0, nla_data(attr), payload_len))
skb               845 net/core/drop_monitor.c static void net_dm_hw_packet_report(struct sk_buff *skb)
skb               852 net/core/drop_monitor.c 	if (skb->data > skb_mac_header(skb))
skb               853 net/core/drop_monitor.c 		skb_push(skb, skb->data - skb_mac_header(skb));
skb               855 net/core/drop_monitor.c 		skb_pull(skb, skb_mac_header(skb) - skb->data);
skb               857 net/core/drop_monitor.c 	payload_len = min_t(size_t, skb->len, NET_DM_MAX_PACKET_SIZE);
skb               861 net/core/drop_monitor.c 	hw_metadata = NET_DM_SKB_CB(skb)->hw_metadata;
skb               867 net/core/drop_monitor.c 	rc = net_dm_hw_packet_report_fill(msg, skb, payload_len);
skb               876 net/core/drop_monitor.c 	net_dm_hw_metadata_free(NET_DM_SKB_CB(skb)->hw_metadata);
skb               877 net/core/drop_monitor.c 	consume_skb(skb);
skb               884 net/core/drop_monitor.c 	struct sk_buff *skb;
skb               895 net/core/drop_monitor.c 	while ((skb = __skb_dequeue(&list)))
skb               896 net/core/drop_monitor.c 		net_dm_hw_packet_report(skb);
skb               900 net/core/drop_monitor.c net_dm_hw_packet_probe(struct sk_buff *skb,
skb               909 net/core/drop_monitor.c 	if (!skb_mac_header_was_set(skb))
skb               912 net/core/drop_monitor.c 	nskb = skb_clone(skb, GFP_ATOMIC);
skb               959 net/core/drop_monitor.c void net_dm_hw_report(struct sk_buff *skb,
skb               967 net/core/drop_monitor.c 	net_dm_alert_ops_arr[net_dm_alert_mode]->hw_probe(skb, hw_metadata);
skb              1024 net/core/drop_monitor.c 		struct sk_buff *skb;
skb              1028 net/core/drop_monitor.c 		while ((skb = __skb_dequeue(&hw_data->drop_queue))) {
skb              1031 net/core/drop_monitor.c 			hw_metadata = NET_DM_SKB_CB(skb)->hw_metadata;
skb              1033 net/core/drop_monitor.c 			consume_skb(skb);
skb              1054 net/core/drop_monitor.c 		struct sk_buff *skb;
skb              1062 net/core/drop_monitor.c 		skb = reset_per_cpu_data(data);
skb              1063 net/core/drop_monitor.c 		consume_skb(skb);
skb              1105 net/core/drop_monitor.c 		struct sk_buff *skb;
skb              1109 net/core/drop_monitor.c 		while ((skb = __skb_dequeue(&data->drop_queue)))
skb              1110 net/core/drop_monitor.c 			consume_skb(skb);
skb              1212 net/core/drop_monitor.c static int net_dm_cmd_config(struct sk_buff *skb,
skb              1270 net/core/drop_monitor.c static int net_dm_cmd_trace(struct sk_buff *skb,
skb              1321 net/core/drop_monitor.c static int net_dm_cmd_config_get(struct sk_buff *skb, struct genl_info *info)
skb              1456 net/core/drop_monitor.c static int net_dm_cmd_stats_get(struct sk_buff *skb, struct genl_info *info)
skb              1552 net/core/drop_monitor.c 			      struct sk_buff *skb, struct genl_info *info)
skb              1560 net/core/drop_monitor.c 				struct sk_buff *skb, struct genl_info *info)
skb              1612 net/core/drop_monitor.c 	consume_skb(data->skb);
skb                30 net/core/dst.c int dst_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
skb                32 net/core/dst.c 	kfree_skb(skb);
skb               244 net/core/dst.c static int dst_md_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
skb               247 net/core/dst.c 	kfree_skb(skb);
skb               251 net/core/dst.c static int dst_md_discard(struct sk_buff *skb)
skb               254 net/core/dst.c 	kfree_skb(skb);
skb               215 net/core/fib_rules.c static int nla_put_uid_range(struct sk_buff *skb, struct fib_kuid_range *range)
skb               222 net/core/fib_rules.c 	return nla_put(skb, FRA_UID_RANGE, sizeof(out), &out);
skb               239 net/core/fib_rules.c static int nla_put_port_range(struct sk_buff *skb, int attrtype,
skb               242 net/core/fib_rules.c 	return nla_put(skb, attrtype, sizeof(*range), range);
skb               486 net/core/fib_rules.c static int fib_nl2rule(struct sk_buff *skb, struct nlmsghdr *nlh,
skb               493 net/core/fib_rules.c 	struct net *net = sock_net(skb->sk);
skb               723 net/core/fib_rules.c int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh,
skb               726 net/core/fib_rules.c 	struct net *net = sock_net(skb->sk);
skb               753 net/core/fib_rules.c 	err = fib_nl2rule(skb, nlh, extack, ops, tb, &rule, &user_priority);
skb               763 net/core/fib_rules.c 	err = ops->configure(rule, skb, frh, tb, extack);
skb               818 net/core/fib_rules.c 	notify_rule_change(RTM_NEWRULE, rule, ops, nlh, NETLINK_CB(skb).portid);
skb               831 net/core/fib_rules.c int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr *nlh,
skb               834 net/core/fib_rules.c 	struct net *net = sock_net(skb->sk);
skb               861 net/core/fib_rules.c 	err = fib_nl2rule(skb, nlh, extack, ops, tb, &nlrule, &user_priority);
skb               918 net/core/fib_rules.c 			   NETLINK_CB(skb).portid);
skb               957 net/core/fib_rules.c static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
skb               964 net/core/fib_rules.c 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*frh), flags);
skb               971 net/core/fib_rules.c 	if (nla_put_u32(skb, FRA_TABLE, rule->table))
skb               973 net/core/fib_rules.c 	if (nla_put_u32(skb, FRA_SUPPRESS_PREFIXLEN, rule->suppress_prefixlen))
skb               980 net/core/fib_rules.c 	if (nla_put_u8(skb, FRA_PROTOCOL, rule->proto))
skb               988 net/core/fib_rules.c 		if (nla_put_string(skb, FRA_IIFNAME, rule->iifname))
skb               995 net/core/fib_rules.c 		if (nla_put_string(skb, FRA_OIFNAME, rule->oifname))
skb              1002 net/core/fib_rules.c 	     nla_put_u32(skb, FRA_PRIORITY, rule->pref)) ||
skb              1004 net/core/fib_rules.c 	     nla_put_u32(skb, FRA_FWMARK, rule->mark)) ||
skb              1006 net/core/fib_rules.c 	     nla_put_u32(skb, FRA_FWMASK, rule->mark_mask)) ||
skb              1008 net/core/fib_rules.c 	     nla_put_u32(skb, FRA_GOTO, rule->target)) ||
skb              1010 net/core/fib_rules.c 	     nla_put_be64(skb, FRA_TUN_ID, rule->tun_id, FRA_PAD)) ||
skb              1012 net/core/fib_rules.c 	     nla_put_u8(skb, FRA_L3MDEV, rule->l3mdev)) ||
skb              1014 net/core/fib_rules.c 	     nla_put_uid_range(skb, &rule->uid_range)) ||
skb              1016 net/core/fib_rules.c 	     nla_put_port_range(skb, FRA_SPORT_RANGE, &rule->sport_range)) ||
skb              1018 net/core/fib_rules.c 	     nla_put_port_range(skb, FRA_DPORT_RANGE, &rule->dport_range)) ||
skb              1019 net/core/fib_rules.c 	    (rule->ip_proto && nla_put_u8(skb, FRA_IP_PROTO, rule->ip_proto)))
skb              1023 net/core/fib_rules.c 		if (nla_put_u32(skb, FRA_SUPPRESS_IFGROUP, rule->suppress_ifgroup))
skb              1027 net/core/fib_rules.c 	if (ops->fill(rule, skb, frh) < 0)
skb              1030 net/core/fib_rules.c 	nlmsg_end(skb, nlh);
skb              1034 net/core/fib_rules.c 	nlmsg_cancel(skb, nlh);
skb              1038 net/core/fib_rules.c static int dump_rules(struct sk_buff *skb, struct netlink_callback *cb,
skb              1050 net/core/fib_rules.c 		err = fib_nl_fill_rule(skb, rule, NETLINK_CB(cb->skb).portid,
skb              1091 net/core/fib_rules.c static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb)
skb              1094 net/core/fib_rules.c 	struct net *net = sock_net(skb->sk);
skb              1112 net/core/fib_rules.c 		dump_rules(skb, cb, ops);
skb              1114 net/core/fib_rules.c 		return skb->len;
skb              1122 net/core/fib_rules.c 		if (dump_rules(skb, cb, ops) < 0)
skb              1132 net/core/fib_rules.c 	return skb->len;
skb              1140 net/core/fib_rules.c 	struct sk_buff *skb;
skb              1144 net/core/fib_rules.c 	skb = nlmsg_new(fib_rule_nlmsg_size(ops, rule), GFP_KERNEL);
skb              1145 net/core/fib_rules.c 	if (skb == NULL)
skb              1148 net/core/fib_rules.c 	err = fib_nl_fill_rule(skb, rule, pid, nlh->nlmsg_seq, event, 0, ops);
skb              1152 net/core/fib_rules.c 		kfree_skb(skb);
skb              1156 net/core/fib_rules.c 	rtnl_notify(skb, net, pid, ops->nlgroup, nlh, GFP_KERNEL);
skb                90 net/core/filter.c int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap)
skb               100 net/core/filter.c 	if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC)) {
skb               104 net/core/filter.c 	err = BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb);
skb               108 net/core/filter.c 	err = security_sock_rcv_skb(sk, skb);
skb               115 net/core/filter.c 		struct sock *save_sk = skb->sk;
skb               118 net/core/filter.c 		skb->sk = sk;
skb               119 net/core/filter.c 		pkt_len = bpf_prog_run_save_cb(filter->prog, skb);
skb               120 net/core/filter.c 		skb->sk = save_sk;
skb               121 net/core/filter.c 		err = pkt_len ? pskb_trim(skb, max(cap, pkt_len)) : -EPERM;
skb               129 net/core/filter.c BPF_CALL_1(bpf_skb_get_pay_offset, struct sk_buff *, skb)
skb               131 net/core/filter.c 	return skb_get_poff(skb);
skb               134 net/core/filter.c BPF_CALL_3(bpf_skb_get_nlattr, struct sk_buff *, skb, u32, a, u32, x)
skb               138 net/core/filter.c 	if (skb_is_nonlinear(skb))
skb               141 net/core/filter.c 	if (skb->len < sizeof(struct nlattr))
skb               144 net/core/filter.c 	if (a > skb->len - sizeof(struct nlattr))
skb               147 net/core/filter.c 	nla = nla_find((struct nlattr *) &skb->data[a], skb->len - a, x);
skb               149 net/core/filter.c 		return (void *) nla - (void *) skb->data;
skb               154 net/core/filter.c BPF_CALL_3(bpf_skb_get_nlattr_nest, struct sk_buff *, skb, u32, a, u32, x)
skb               158 net/core/filter.c 	if (skb_is_nonlinear(skb))
skb               161 net/core/filter.c 	if (skb->len < sizeof(struct nlattr))
skb               164 net/core/filter.c 	if (a > skb->len - sizeof(struct nlattr))
skb               167 net/core/filter.c 	nla = (struct nlattr *) &skb->data[a];
skb               168 net/core/filter.c 	if (nla->nla_len > skb->len - a)
skb               173 net/core/filter.c 		return (void *) nla - (void *) skb->data;
skb               178 net/core/filter.c BPF_CALL_4(bpf_skb_load_helper_8, const struct sk_buff *, skb, const void *,
skb               187 net/core/filter.c 		if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp)))
skb               190 net/core/filter.c 		ptr = bpf_internal_load_pointer_neg_helper(skb, offset, len);
skb               198 net/core/filter.c BPF_CALL_2(bpf_skb_load_helper_8_no_cache, const struct sk_buff *, skb,
skb               201 net/core/filter.c 	return ____bpf_skb_load_helper_8(skb, skb->data, skb->len - skb->data_len,
skb               205 net/core/filter.c BPF_CALL_4(bpf_skb_load_helper_16, const struct sk_buff *, skb, const void *,
skb               214 net/core/filter.c 		if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp)))
skb               217 net/core/filter.c 		ptr = bpf_internal_load_pointer_neg_helper(skb, offset, len);
skb               225 net/core/filter.c BPF_CALL_2(bpf_skb_load_helper_16_no_cache, const struct sk_buff *, skb,
skb               228 net/core/filter.c 	return ____bpf_skb_load_helper_16(skb, skb->data, skb->len - skb->data_len,
skb               232 net/core/filter.c BPF_CALL_4(bpf_skb_load_helper_32, const struct sk_buff *, skb, const void *,
skb               241 net/core/filter.c 		if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp)))
skb               244 net/core/filter.c 		ptr = bpf_internal_load_pointer_neg_helper(skb, offset, len);
skb               252 net/core/filter.c BPF_CALL_2(bpf_skb_load_helper_32_no_cache, const struct sk_buff *, skb,
skb               255 net/core/filter.c 	return ____bpf_skb_load_helper_32(skb, skb->data, skb->len - skb->data_len,
skb              1632 net/core/filter.c static inline int __bpf_try_make_writable(struct sk_buff *skb,
skb              1635 net/core/filter.c 	return skb_ensure_writable(skb, write_len);
skb              1638 net/core/filter.c static inline int bpf_try_make_writable(struct sk_buff *skb,
skb              1641 net/core/filter.c 	int err = __bpf_try_make_writable(skb, write_len);
skb              1643 net/core/filter.c 	bpf_compute_data_pointers(skb);
skb              1647 net/core/filter.c static int bpf_try_make_head_writable(struct sk_buff *skb)
skb              1649 net/core/filter.c 	return bpf_try_make_writable(skb, skb_headlen(skb));
skb              1652 net/core/filter.c static inline void bpf_push_mac_rcsum(struct sk_buff *skb)
skb              1654 net/core/filter.c 	if (skb_at_tc_ingress(skb))
skb              1655 net/core/filter.c 		skb_postpush_rcsum(skb, skb_mac_header(skb), skb->mac_len);
skb              1658 net/core/filter.c static inline void bpf_pull_mac_rcsum(struct sk_buff *skb)
skb              1660 net/core/filter.c 	if (skb_at_tc_ingress(skb))
skb              1661 net/core/filter.c 		skb_postpull_rcsum(skb, skb_mac_header(skb), skb->mac_len);
skb              1664 net/core/filter.c BPF_CALL_5(bpf_skb_store_bytes, struct sk_buff *, skb, u32, offset,
skb              1673 net/core/filter.c 	if (unlikely(bpf_try_make_writable(skb, offset + len)))
skb              1676 net/core/filter.c 	ptr = skb->data + offset;
skb              1678 net/core/filter.c 		__skb_postpull_rcsum(skb, ptr, len, offset);
skb              1683 net/core/filter.c 		__skb_postpush_rcsum(skb, ptr, len, offset);
skb              1685 net/core/filter.c 		skb_clear_hash(skb);
skb              1701 net/core/filter.c BPF_CALL_4(bpf_skb_load_bytes, const struct sk_buff *, skb, u32, offset,
skb              1709 net/core/filter.c 	ptr = skb_header_pointer(skb, offset, len, to);
skb              1740 net/core/filter.c 	if (unlikely(!ctx->skb))
skb              1743 net/core/filter.c 	ptr = skb_header_pointer(ctx->skb, offset, len, to);
skb              1765 net/core/filter.c BPF_CALL_5(bpf_skb_load_bytes_relative, const struct sk_buff *, skb,
skb              1768 net/core/filter.c 	u8 *end = skb_tail_pointer(skb);
skb              1769 net/core/filter.c 	u8 *net = skb_network_header(skb);
skb              1770 net/core/filter.c 	u8 *mac = skb_mac_header(skb);
skb              1808 net/core/filter.c BPF_CALL_2(bpf_skb_pull_data, struct sk_buff *, skb, u32, len)
skb              1819 net/core/filter.c 	return bpf_try_make_writable(skb, len ? : skb_headlen(skb));
skb              1842 net/core/filter.c static inline int sk_skb_try_make_writable(struct sk_buff *skb,
skb              1845 net/core/filter.c 	int err = __bpf_try_make_writable(skb, write_len);
skb              1847 net/core/filter.c 	bpf_compute_data_end_sk_skb(skb);
skb              1851 net/core/filter.c BPF_CALL_2(sk_skb_pull_data, struct sk_buff *, skb, u32, len)
skb              1862 net/core/filter.c 	return sk_skb_try_make_writable(skb, len ? : skb_headlen(skb));
skb              1873 net/core/filter.c BPF_CALL_5(bpf_l3_csum_replace, struct sk_buff *, skb, u32, offset,
skb              1882 net/core/filter.c 	if (unlikely(bpf_try_make_writable(skb, offset + sizeof(*ptr))))
skb              1885 net/core/filter.c 	ptr = (__sum16 *)(skb->data + offset);
skb              1917 net/core/filter.c BPF_CALL_5(bpf_l4_csum_replace, struct sk_buff *, skb, u32, offset,
skb              1930 net/core/filter.c 	if (unlikely(bpf_try_make_writable(skb, offset + sizeof(*ptr))))
skb              1933 net/core/filter.c 	ptr = (__sum16 *)(skb->data + offset);
skb              1942 net/core/filter.c 		inet_proto_csum_replace_by_diff(ptr, skb, to, is_pseudo);
skb              1945 net/core/filter.c 		inet_proto_csum_replace2(ptr, skb, from, to, is_pseudo);
skb              1948 net/core/filter.c 		inet_proto_csum_replace4(ptr, skb, from, to, is_pseudo);
skb              2009 net/core/filter.c BPF_CALL_2(bpf_csum_update, struct sk_buff *, skb, __wsum, csum)
skb              2015 net/core/filter.c 	if (skb->ip_summed == CHECKSUM_COMPLETE)
skb              2016 net/core/filter.c 		return (skb->csum = csum_add(skb->csum, csum));
skb              2029 net/core/filter.c static inline int __bpf_rx_skb(struct net_device *dev, struct sk_buff *skb)
skb              2031 net/core/filter.c 	return dev_forward_skb(dev, skb);
skb              2035 net/core/filter.c 				      struct sk_buff *skb)
skb              2037 net/core/filter.c 	int ret = ____dev_forward_skb(dev, skb);
skb              2040 net/core/filter.c 		skb->dev = dev;
skb              2041 net/core/filter.c 		ret = netif_rx(skb);
skb              2047 net/core/filter.c static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb)
skb              2053 net/core/filter.c 		kfree_skb(skb);
skb              2057 net/core/filter.c 	skb->dev = dev;
skb              2058 net/core/filter.c 	skb->tstamp = 0;
skb              2061 net/core/filter.c 	ret = dev_queue_xmit(skb);
skb              2067 net/core/filter.c static int __bpf_redirect_no_mac(struct sk_buff *skb, struct net_device *dev,
skb              2070 net/core/filter.c 	unsigned int mlen = skb_network_offset(skb);
skb              2073 net/core/filter.c 		__skb_pull(skb, mlen);
skb              2080 net/core/filter.c 		if (!skb_at_tc_ingress(skb))
skb              2081 net/core/filter.c 			skb_postpull_rcsum(skb, skb_mac_header(skb), mlen);
skb              2083 net/core/filter.c 	skb_pop_mac_header(skb);
skb              2084 net/core/filter.c 	skb_reset_mac_len(skb);
skb              2086 net/core/filter.c 	       __bpf_rx_skb_no_mac(dev, skb) : __bpf_tx_skb(dev, skb);
skb              2089 net/core/filter.c static int __bpf_redirect_common(struct sk_buff *skb, struct net_device *dev,
skb              2093 net/core/filter.c 	if (unlikely(skb->mac_header >= skb->network_header)) {
skb              2094 net/core/filter.c 		kfree_skb(skb);
skb              2098 net/core/filter.c 	bpf_push_mac_rcsum(skb);
skb              2100 net/core/filter.c 	       __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb);
skb              2103 net/core/filter.c static int __bpf_redirect(struct sk_buff *skb, struct net_device *dev,
skb              2107 net/core/filter.c 		return __bpf_redirect_common(skb, dev, flags);
skb              2109 net/core/filter.c 		return __bpf_redirect_no_mac(skb, dev, flags);
skb              2112 net/core/filter.c BPF_CALL_3(bpf_clone_redirect, struct sk_buff *, skb, u32, ifindex, u64, flags)
skb              2121 net/core/filter.c 	dev = dev_get_by_index_rcu(dev_net(skb->dev), ifindex);
skb              2125 net/core/filter.c 	clone = skb_clone(skb, GFP_ATOMIC);
skb              2134 net/core/filter.c 	ret = bpf_try_make_head_writable(skb);
skb              2168 net/core/filter.c int skb_do_redirect(struct sk_buff *skb)
skb              2173 net/core/filter.c 	dev = dev_get_by_index_rcu(dev_net(skb->dev), ri->tgt_index);
skb              2176 net/core/filter.c 		kfree_skb(skb);
skb              2180 net/core/filter.c 	return __bpf_redirect(skb, dev, ri->flags);
skb              2645 net/core/filter.c BPF_CALL_1(bpf_get_cgroup_classid, const struct sk_buff *, skb)
skb              2647 net/core/filter.c 	return task_get_classid(skb);
skb              2657 net/core/filter.c BPF_CALL_1(bpf_get_route_realm, const struct sk_buff *, skb)
skb              2659 net/core/filter.c 	return dst_tclassid(skb);
skb              2669 net/core/filter.c BPF_CALL_1(bpf_get_hash_recalc, struct sk_buff *, skb)
skb              2676 net/core/filter.c 	return skb_get_hash(skb);
skb              2686 net/core/filter.c BPF_CALL_1(bpf_set_hash_invalid, struct sk_buff *, skb)
skb              2691 net/core/filter.c 	skb_clear_hash(skb);
skb              2702 net/core/filter.c BPF_CALL_2(bpf_set_hash, struct sk_buff *, skb, u32, hash)
skb              2708 net/core/filter.c 	__skb_set_sw_hash(skb, hash, true);
skb              2720 net/core/filter.c BPF_CALL_3(bpf_skb_vlan_push, struct sk_buff *, skb, __be16, vlan_proto,
skb              2729 net/core/filter.c 	bpf_push_mac_rcsum(skb);
skb              2730 net/core/filter.c 	ret = skb_vlan_push(skb, vlan_proto, vlan_tci);
skb              2731 net/core/filter.c 	bpf_pull_mac_rcsum(skb);
skb              2733 net/core/filter.c 	bpf_compute_data_pointers(skb);
skb              2746 net/core/filter.c BPF_CALL_1(bpf_skb_vlan_pop, struct sk_buff *, skb)
skb              2750 net/core/filter.c 	bpf_push_mac_rcsum(skb);
skb              2751 net/core/filter.c 	ret = skb_vlan_pop(skb);
skb              2752 net/core/filter.c 	bpf_pull_mac_rcsum(skb);
skb              2754 net/core/filter.c 	bpf_compute_data_pointers(skb);
skb              2765 net/core/filter.c static int bpf_skb_generic_push(struct sk_buff *skb, u32 off, u32 len)
skb              2770 net/core/filter.c 	skb_push(skb, len);
skb              2771 net/core/filter.c 	memmove(skb->data, skb->data + len, off);
skb              2772 net/core/filter.c 	memset(skb->data + off, 0, len);
skb              2782 net/core/filter.c static int bpf_skb_generic_pop(struct sk_buff *skb, u32 off, u32 len)
skb              2787 net/core/filter.c 	if (unlikely(!pskb_may_pull(skb, off + len)))
skb              2790 net/core/filter.c 	skb_postpull_rcsum(skb, skb->data + off, len);
skb              2791 net/core/filter.c 	memmove(skb->data + len, skb->data, off);
skb              2792 net/core/filter.c 	__skb_pull(skb, len);
skb              2797 net/core/filter.c static int bpf_skb_net_hdr_push(struct sk_buff *skb, u32 off, u32 len)
skb              2799 net/core/filter.c 	bool trans_same = skb->transport_header == skb->network_header;
skb              2806 net/core/filter.c 	ret = bpf_skb_generic_push(skb, off, len);
skb              2808 net/core/filter.c 		skb->mac_header -= len;
skb              2809 net/core/filter.c 		skb->network_header -= len;
skb              2811 net/core/filter.c 			skb->transport_header = skb->network_header;
skb              2817 net/core/filter.c static int bpf_skb_net_hdr_pop(struct sk_buff *skb, u32 off, u32 len)
skb              2819 net/core/filter.c 	bool trans_same = skb->transport_header == skb->network_header;
skb              2823 net/core/filter.c 	ret = bpf_skb_generic_pop(skb, off, len);
skb              2825 net/core/filter.c 		skb->mac_header += len;
skb              2826 net/core/filter.c 		skb->network_header += len;
skb              2828 net/core/filter.c 			skb->transport_header = skb->network_header;
skb              2834 net/core/filter.c static int bpf_skb_proto_4_to_6(struct sk_buff *skb)
skb              2837 net/core/filter.c 	u32 off = skb_mac_header_len(skb);
skb              2840 net/core/filter.c 	if (skb_is_gso(skb) && !skb_is_gso_tcp(skb))
skb              2843 net/core/filter.c 	ret = skb_cow(skb, len_diff);
skb              2847 net/core/filter.c 	ret = bpf_skb_net_hdr_push(skb, off, len_diff);
skb              2851 net/core/filter.c 	if (skb_is_gso(skb)) {
skb              2852 net/core/filter.c 		struct skb_shared_info *shinfo = skb_shinfo(skb);
skb              2869 net/core/filter.c 	skb->protocol = htons(ETH_P_IPV6);
skb              2870 net/core/filter.c 	skb_clear_hash(skb);
skb              2875 net/core/filter.c static int bpf_skb_proto_6_to_4(struct sk_buff *skb)
skb              2878 net/core/filter.c 	u32 off = skb_mac_header_len(skb);
skb              2881 net/core/filter.c 	if (skb_is_gso(skb) && !skb_is_gso_tcp(skb))
skb              2884 net/core/filter.c 	ret = skb_unclone(skb, GFP_ATOMIC);
skb              2888 net/core/filter.c 	ret = bpf_skb_net_hdr_pop(skb, off, len_diff);
skb              2892 net/core/filter.c 	if (skb_is_gso(skb)) {
skb              2893 net/core/filter.c 		struct skb_shared_info *shinfo = skb_shinfo(skb);
skb              2910 net/core/filter.c 	skb->protocol = htons(ETH_P_IP);
skb              2911 net/core/filter.c 	skb_clear_hash(skb);
skb              2916 net/core/filter.c static int bpf_skb_proto_xlat(struct sk_buff *skb, __be16 to_proto)
skb              2918 net/core/filter.c 	__be16 from_proto = skb->protocol;
skb              2922 net/core/filter.c 		return bpf_skb_proto_4_to_6(skb);
skb              2926 net/core/filter.c 		return bpf_skb_proto_6_to_4(skb);
skb              2931 net/core/filter.c BPF_CALL_3(bpf_skb_change_proto, struct sk_buff *, skb, __be16, proto,
skb              2956 net/core/filter.c 	ret = bpf_skb_proto_xlat(skb, proto);
skb              2957 net/core/filter.c 	bpf_compute_data_pointers(skb);
skb              2970 net/core/filter.c BPF_CALL_2(bpf_skb_change_type, struct sk_buff *, skb, u32, pkt_type)
skb              2973 net/core/filter.c 	if (unlikely(!skb_pkt_type_ok(skb->pkt_type) ||
skb              2977 net/core/filter.c 	skb->pkt_type = pkt_type;
skb              2989 net/core/filter.c static u32 bpf_skb_net_base_len(const struct sk_buff *skb)
skb              2991 net/core/filter.c 	switch (skb->protocol) {
skb              3011 net/core/filter.c static int bpf_skb_net_grow(struct sk_buff *skb, u32 off, u32 len_diff,
skb              3020 net/core/filter.c 	if (skb_is_gso(skb) && !skb_is_gso_tcp(skb)) {
skb              3022 net/core/filter.c 		if (!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) ||
skb              3027 net/core/filter.c 	ret = skb_cow_head(skb, len_diff);
skb              3032 net/core/filter.c 		if (skb->protocol != htons(ETH_P_IP) &&
skb              3033 net/core/filter.c 		    skb->protocol != htons(ETH_P_IPV6))
skb              3044 net/core/filter.c 		if (skb->encapsulation)
skb              3047 net/core/filter.c 		mac_len = skb->network_header - skb->mac_header;
skb              3048 net/core/filter.c 		inner_net = skb->network_header;
skb              3051 net/core/filter.c 		inner_trans = skb->transport_header;
skb              3054 net/core/filter.c 	ret = bpf_skb_net_hdr_push(skb, off, len_diff);
skb              3059 net/core/filter.c 		skb->inner_mac_header = inner_net - inner_mac_len;
skb              3060 net/core/filter.c 		skb->inner_network_header = inner_net;
skb              3061 net/core/filter.c 		skb->inner_transport_header = inner_trans;
skb              3062 net/core/filter.c 		skb_set_inner_protocol(skb, skb->protocol);
skb              3064 net/core/filter.c 		skb->encapsulation = 1;
skb              3065 net/core/filter.c 		skb_set_network_header(skb, mac_len);
skb              3082 net/core/filter.c 			skb_set_transport_header(skb, mac_len + nh_len);
skb              3086 net/core/filter.c 		if (skb->protocol == htons(ETH_P_IP) &&
skb              3088 net/core/filter.c 			skb->protocol = htons(ETH_P_IPV6);
skb              3089 net/core/filter.c 		else if (skb->protocol == htons(ETH_P_IPV6) &&
skb              3091 net/core/filter.c 			skb->protocol = htons(ETH_P_IP);
skb              3094 net/core/filter.c 	if (skb_is_gso(skb)) {
skb              3095 net/core/filter.c 		struct skb_shared_info *shinfo = skb_shinfo(skb);
skb              3109 net/core/filter.c static int bpf_skb_net_shrink(struct sk_buff *skb, u32 off, u32 len_diff,
skb              3117 net/core/filter.c 	if (skb_is_gso(skb) && !skb_is_gso_tcp(skb)) {
skb              3119 net/core/filter.c 		if (!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) ||
skb              3124 net/core/filter.c 	ret = skb_unclone(skb, GFP_ATOMIC);
skb              3128 net/core/filter.c 	ret = bpf_skb_net_hdr_pop(skb, off, len_diff);
skb              3132 net/core/filter.c 	if (skb_is_gso(skb)) {
skb              3133 net/core/filter.c 		struct skb_shared_info *shinfo = skb_shinfo(skb);
skb              3147 net/core/filter.c static u32 __bpf_skb_max_len(const struct sk_buff *skb)
skb              3149 net/core/filter.c 	return skb->dev ? skb->dev->mtu + skb->dev->hard_header_len :
skb              3153 net/core/filter.c BPF_CALL_4(bpf_skb_adjust_room, struct sk_buff *, skb, s32, len_diff,
skb              3157 net/core/filter.c 	u32 len_min = bpf_skb_net_base_len(skb);
skb              3158 net/core/filter.c 	u32 len_max = __bpf_skb_max_len(skb);
skb              3159 net/core/filter.c 	__be16 proto = skb->protocol;
skb              3172 net/core/filter.c 	off = skb_mac_header_len(skb);
skb              3175 net/core/filter.c 		off += bpf_skb_net_base_len(skb);
skb              3183 net/core/filter.c 	len_cur = skb->len - skb_network_offset(skb);
skb              3186 net/core/filter.c 	    (!shrink && (skb->len + len_diff_abs > len_max &&
skb              3187 net/core/filter.c 			 !skb_is_gso(skb))))
skb              3190 net/core/filter.c 	ret = shrink ? bpf_skb_net_shrink(skb, off, len_diff_abs, flags) :
skb              3191 net/core/filter.c 		       bpf_skb_net_grow(skb, off, len_diff_abs, flags);
skb              3193 net/core/filter.c 	bpf_compute_data_pointers(skb);
skb              3207 net/core/filter.c static u32 __bpf_skb_min_len(const struct sk_buff *skb)
skb              3209 net/core/filter.c 	u32 min_len = skb_network_offset(skb);
skb              3211 net/core/filter.c 	if (skb_transport_header_was_set(skb))
skb              3212 net/core/filter.c 		min_len = skb_transport_offset(skb);
skb              3213 net/core/filter.c 	if (skb->ip_summed == CHECKSUM_PARTIAL)
skb              3214 net/core/filter.c 		min_len = skb_checksum_start_offset(skb) +
skb              3215 net/core/filter.c 			  skb->csum_offset + sizeof(__sum16);
skb              3219 net/core/filter.c static int bpf_skb_grow_rcsum(struct sk_buff *skb, unsigned int new_len)
skb              3221 net/core/filter.c 	unsigned int old_len = skb->len;
skb              3224 net/core/filter.c 	ret = __skb_grow_rcsum(skb, new_len);
skb              3226 net/core/filter.c 		memset(skb->data + old_len, 0, new_len - old_len);
skb              3230 net/core/filter.c static int bpf_skb_trim_rcsum(struct sk_buff *skb, unsigned int new_len)
skb              3232 net/core/filter.c 	return __skb_trim_rcsum(skb, new_len);
skb              3235 net/core/filter.c static inline int __bpf_skb_change_tail(struct sk_buff *skb, u32 new_len,
skb              3238 net/core/filter.c 	u32 max_len = __bpf_skb_max_len(skb);
skb              3239 net/core/filter.c 	u32 min_len = __bpf_skb_min_len(skb);
skb              3244 net/core/filter.c 	if (skb->encapsulation)
skb              3263 net/core/filter.c 	ret = __bpf_try_make_writable(skb, skb->len);
skb              3265 net/core/filter.c 		if (new_len > skb->len)
skb              3266 net/core/filter.c 			ret = bpf_skb_grow_rcsum(skb, new_len);
skb              3267 net/core/filter.c 		else if (new_len < skb->len)
skb              3268 net/core/filter.c 			ret = bpf_skb_trim_rcsum(skb, new_len);
skb              3269 net/core/filter.c 		if (!ret && skb_is_gso(skb))
skb              3270 net/core/filter.c 			skb_gso_reset(skb);
skb              3275 net/core/filter.c BPF_CALL_3(bpf_skb_change_tail, struct sk_buff *, skb, u32, new_len,
skb              3278 net/core/filter.c 	int ret = __bpf_skb_change_tail(skb, new_len, flags);
skb              3280 net/core/filter.c 	bpf_compute_data_pointers(skb);
skb              3293 net/core/filter.c BPF_CALL_3(sk_skb_change_tail, struct sk_buff *, skb, u32, new_len,
skb              3296 net/core/filter.c 	int ret = __bpf_skb_change_tail(skb, new_len, flags);
skb              3298 net/core/filter.c 	bpf_compute_data_end_sk_skb(skb);
skb              3311 net/core/filter.c static inline int __bpf_skb_change_head(struct sk_buff *skb, u32 head_room,
skb              3314 net/core/filter.c 	u32 max_len = __bpf_skb_max_len(skb);
skb              3315 net/core/filter.c 	u32 new_len = skb->len + head_room;
skb              3318 net/core/filter.c 	if (unlikely(flags || (!skb_is_gso(skb) && new_len > max_len) ||
skb              3319 net/core/filter.c 		     new_len < skb->len))
skb              3322 net/core/filter.c 	ret = skb_cow(skb, head_room);
skb              3333 net/core/filter.c 		__skb_push(skb, head_room);
skb              3334 net/core/filter.c 		memset(skb->data, 0, head_room);
skb              3335 net/core/filter.c 		skb_reset_mac_header(skb);
skb              3341 net/core/filter.c BPF_CALL_3(bpf_skb_change_head, struct sk_buff *, skb, u32, head_room,
skb              3344 net/core/filter.c 	int ret = __bpf_skb_change_head(skb, head_room, flags);
skb              3346 net/core/filter.c 	bpf_compute_data_pointers(skb);
skb              3359 net/core/filter.c BPF_CALL_3(sk_skb_change_head, struct sk_buff *, skb, u32, head_room,
skb              3362 net/core/filter.c 	int ret = __bpf_skb_change_head(skb, head_room, flags);
skb              3364 net/core/filter.c 	bpf_compute_data_end_sk_skb(skb);
skb              3650 net/core/filter.c 				       struct sk_buff *skb,
skb              3668 net/core/filter.c 		err = dev_map_generic_redirect(dst, skb, xdp_prog);
skb              3677 net/core/filter.c 		consume_skb(skb);
skb              3691 net/core/filter.c int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
skb              3701 net/core/filter.c 		return xdp_do_generic_redirect_map(dev, skb, xdp, xdp_prog,
skb              3710 net/core/filter.c 	err = xdp_ok_fwd_dev(fwd, skb->len);
skb              3714 net/core/filter.c 	skb->dev = fwd;
skb              3716 net/core/filter.c 	generic_xdp_tx(skb, xdp_prog);
skb              3783 net/core/filter.c static unsigned long bpf_skb_copy(void *dst_buff, const void *skb,
skb              3786 net/core/filter.c 	void *ptr = skb_header_pointer(skb, off, len, dst_buff);
skb              3796 net/core/filter.c BPF_CALL_5(bpf_skb_event_output, struct sk_buff *, skb, struct bpf_map *, map,
skb              3803 net/core/filter.c 	if (unlikely(skb_size > skb->len))
skb              3806 net/core/filter.c 	return bpf_event_output(map, flags, meta, meta_size, skb, skb_size,
skb              3826 net/core/filter.c BPF_CALL_4(bpf_skb_get_tunnel_key, struct sk_buff *, skb, struct bpf_tunnel_key *, to,
skb              3829 net/core/filter.c 	const struct ip_tunnel_info *info = skb_tunnel_info(skb);
skb              3896 net/core/filter.c BPF_CALL_3(bpf_skb_get_tunnel_opt, struct sk_buff *, skb, u8 *, to, u32, size)
skb              3898 net/core/filter.c 	const struct ip_tunnel_info *info = skb_tunnel_info(skb);
skb              3932 net/core/filter.c BPF_CALL_4(bpf_skb_set_tunnel_key, struct sk_buff *, skb,
skb              3962 net/core/filter.c 	skb_dst_drop(skb);
skb              3964 net/core/filter.c 	skb_dst_set(skb, (struct dst_entry *) md);
skb              4005 net/core/filter.c BPF_CALL_3(bpf_skb_set_tunnel_opt, struct sk_buff *, skb,
skb              4008 net/core/filter.c 	struct ip_tunnel_info *info = skb_tunnel_info(skb);
skb              4055 net/core/filter.c BPF_CALL_3(bpf_skb_under_cgroup, struct sk_buff *, skb, struct bpf_map *, map,
skb              4062 net/core/filter.c 	sk = skb_to_full_sk(skb);
skb              4085 net/core/filter.c BPF_CALL_1(bpf_skb_cgroup_id, const struct sk_buff *, skb)
skb              4087 net/core/filter.c 	struct sock *sk = skb_to_full_sk(skb);
skb              4104 net/core/filter.c BPF_CALL_2(bpf_skb_ancestor_cgroup_id, const struct sk_buff *, skb, int,
skb              4107 net/core/filter.c 	struct sock *sk = skb_to_full_sk(skb);
skb              4163 net/core/filter.c BPF_CALL_1(bpf_get_socket_cookie, struct sk_buff *, skb)
skb              4165 net/core/filter.c 	return skb->sk ? sock_gen_cookie(skb->sk) : 0;
skb              4199 net/core/filter.c BPF_CALL_1(bpf_get_socket_uid, struct sk_buff *, skb)
skb              4201 net/core/filter.c 	struct sock *sk = sk_to_full_sk(skb->sk);
skb              4553 net/core/filter.c BPF_CALL_5(bpf_skb_get_xfrm_state, struct sk_buff *, skb, u32, index,
skb              4556 net/core/filter.c 	const struct sec_path *sp = skb_sec_path(skb);
skb              4874 net/core/filter.c BPF_CALL_4(bpf_skb_fib_lookup, struct sk_buff *, skb,
skb              4877 net/core/filter.c 	struct net *net = dev_net(skb->dev);
skb              4903 net/core/filter.c 		if (!is_skb_forwardable(dev, skb))
skb              4921 net/core/filter.c static int bpf_push_seg6_encap(struct sk_buff *skb, u32 type, void *hdr, u32 len)
skb              4931 net/core/filter.c 		if (skb->protocol != htons(ETH_P_IPV6))
skb              4934 net/core/filter.c 		err = seg6_do_srh_inline(skb, srh);
skb              4937 net/core/filter.c 		skb_reset_inner_headers(skb);
skb              4938 net/core/filter.c 		skb->encapsulation = 1;
skb              4939 net/core/filter.c 		err = seg6_do_srh_encap(skb, srh, IPPROTO_IPV6);
skb              4945 net/core/filter.c 	bpf_compute_data_pointers(skb);
skb              4949 net/core/filter.c 	ipv6_hdr(skb)->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
skb              4950 net/core/filter.c 	skb_set_transport_header(skb, sizeof(struct ipv6hdr));
skb              4952 net/core/filter.c 	return seg6_lookup_nexthop(skb, NULL, 0);
skb              4957 net/core/filter.c static int bpf_push_ip_encap(struct sk_buff *skb, void *hdr, u32 len,
skb              4960 net/core/filter.c 	return bpf_lwt_push_ip_encap(skb, hdr, len, ingress);
skb              4964 net/core/filter.c BPF_CALL_4(bpf_lwt_in_push_encap, struct sk_buff *, skb, u32, type, void *, hdr,
skb              4971 net/core/filter.c 		return bpf_push_seg6_encap(skb, type, hdr, len);
skb              4975 net/core/filter.c 		return bpf_push_ip_encap(skb, hdr, len, true /* ingress */);
skb              4982 net/core/filter.c BPF_CALL_4(bpf_lwt_xmit_push_encap, struct sk_buff *, skb, u32, type,
skb              4988 net/core/filter.c 		return bpf_push_ip_encap(skb, hdr, len, false /* egress */);
skb              5016 net/core/filter.c BPF_CALL_4(bpf_lwt_seg6_store_bytes, struct sk_buff *, skb, u32, offset,
skb              5031 net/core/filter.c 	ptr = skb->data + offset;
skb              5038 net/core/filter.c 	if (unlikely(bpf_try_make_writable(skb, offset + len)))
skb              5040 net/core/filter.c 	if (ipv6_find_hdr(skb, &srhoff, IPPROTO_ROUTING, NULL, NULL) < 0)
skb              5042 net/core/filter.c 	srh_state->srh = (struct ipv6_sr_hdr *)(skb->data + srhoff);
skb              5044 net/core/filter.c 	memcpy(skb->data + offset, from, len);
skb              5058 net/core/filter.c static void bpf_update_srh_state(struct sk_buff *skb)
skb              5064 net/core/filter.c 	if (ipv6_find_hdr(skb, &srhoff, IPPROTO_ROUTING, NULL, NULL) < 0) {
skb              5067 net/core/filter.c 		srh_state->srh = (struct ipv6_sr_hdr *)(skb->data + srhoff);
skb              5073 net/core/filter.c BPF_CALL_4(bpf_lwt_seg6_action, struct sk_buff *, skb,
skb              5083 net/core/filter.c 		if (!seg6_bpf_has_valid_srh(skb))
skb              5087 net/core/filter.c 		return seg6_lookup_nexthop(skb, (struct in6_addr *)param, 0);
skb              5089 net/core/filter.c 		if (!seg6_bpf_has_valid_srh(skb))
skb              5093 net/core/filter.c 		return seg6_lookup_nexthop(skb, NULL, *(int *)param);
skb              5095 net/core/filter.c 		if (!seg6_bpf_has_valid_srh(skb))
skb              5100 net/core/filter.c 		if (ipv6_find_hdr(skb, &hdroff, IPPROTO_IPV6, NULL, NULL) < 0)
skb              5102 net/core/filter.c 		if (!pskb_pull(skb, hdroff))
skb              5105 net/core/filter.c 		skb_postpull_rcsum(skb, skb_network_header(skb), hdroff);
skb              5106 net/core/filter.c 		skb_reset_network_header(skb);
skb              5107 net/core/filter.c 		skb_reset_transport_header(skb);
skb              5108 net/core/filter.c 		skb->encapsulation = 0;
skb              5110 net/core/filter.c 		bpf_compute_data_pointers(skb);
skb              5111 net/core/filter.c 		bpf_update_srh_state(skb);
skb              5112 net/core/filter.c 		return seg6_lookup_nexthop(skb, NULL, *(int *)param);
skb              5114 net/core/filter.c 		if (srh_state->srh && !seg6_bpf_has_valid_srh(skb))
skb              5116 net/core/filter.c 		err = bpf_push_seg6_encap(skb, BPF_LWT_ENCAP_SEG6_INLINE,
skb              5119 net/core/filter.c 			bpf_update_srh_state(skb);
skb              5123 net/core/filter.c 		if (srh_state->srh && !seg6_bpf_has_valid_srh(skb))
skb              5125 net/core/filter.c 		err = bpf_push_seg6_encap(skb, BPF_LWT_ENCAP_SEG6,
skb              5128 net/core/filter.c 			bpf_update_srh_state(skb);
skb              5146 net/core/filter.c BPF_CALL_3(bpf_lwt_seg6_adjust_srh, struct sk_buff *, skb, u32, offset,
skb              5164 net/core/filter.c 	ptr = skb->data + offset;
skb              5172 net/core/filter.c 		ret = skb_cow_head(skb, len);
skb              5176 net/core/filter.c 		ret = bpf_skb_net_hdr_push(skb, offset, len);
skb              5178 net/core/filter.c 		ret = bpf_skb_net_hdr_pop(skb, offset, -1 * len);
skb              5181 net/core/filter.c 	bpf_compute_data_pointers(skb);
skb              5185 net/core/filter.c 	hdr = (struct ipv6hdr *)skb->data;
skb              5186 net/core/filter.c 	hdr->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
skb              5188 net/core/filter.c 	if (ipv6_find_hdr(skb, &srhoff, IPPROTO_ROUTING, NULL, NULL) < 0)
skb              5190 net/core/filter.c 	srh_state->srh = (struct ipv6_sr_hdr *)(skb->data + srhoff);
skb              5258 net/core/filter.c __bpf_skc_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
skb              5279 net/core/filter.c 		sdif = inet_sdif(skb);
skb              5281 net/core/filter.c 		sdif = inet6_sdif(skb);
skb              5299 net/core/filter.c __bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
skb              5303 net/core/filter.c 	struct sock *sk = __bpf_skc_lookup(skb, tuple, len, caller_net,
skb              5318 net/core/filter.c bpf_skc_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
skb              5324 net/core/filter.c 	if (skb->dev) {
skb              5325 net/core/filter.c 		caller_net = dev_net(skb->dev);
skb              5326 net/core/filter.c 		ifindex = skb->dev->ifindex;
skb              5328 net/core/filter.c 		caller_net = sock_net(skb->sk);
skb              5332 net/core/filter.c 	return __bpf_skc_lookup(skb, tuple, len, caller_net, ifindex, proto,
skb              5337 net/core/filter.c bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
skb              5340 net/core/filter.c 	struct sock *sk = bpf_skc_lookup(skb, tuple, len, proto, netns_id,
skb              5354 net/core/filter.c BPF_CALL_5(bpf_skc_lookup_tcp, struct sk_buff *, skb,
skb              5357 net/core/filter.c 	return (unsigned long)bpf_skc_lookup(skb, tuple, len, IPPROTO_TCP,
skb              5373 net/core/filter.c BPF_CALL_5(bpf_sk_lookup_tcp, struct sk_buff *, skb,
skb              5376 net/core/filter.c 	return (unsigned long)bpf_sk_lookup(skb, tuple, len, IPPROTO_TCP,
skb              5392 net/core/filter.c BPF_CALL_5(bpf_sk_lookup_udp, struct sk_buff *, skb,
skb              5395 net/core/filter.c 	return (unsigned long)bpf_sk_lookup(skb, tuple, len, IPPROTO_UDP,
skb              5727 net/core/filter.c BPF_CALL_1(bpf_skb_ecn_set_ce, struct sk_buff *, skb)
skb              5731 net/core/filter.c 	if (skb->protocol == cpu_to_be16(ETH_P_IP))
skb              5733 net/core/filter.c 	else if (skb->protocol == cpu_to_be16(ETH_P_IPV6))
skb              5738 net/core/filter.c 	if (skb_headlen(skb) < iphdr_len)
skb              5741 net/core/filter.c 	if (skb_cloned(skb) && !skb_clone_writable(skb, iphdr_len))
skb              5744 net/core/filter.c 	return INET_ECN_set_ce(skb);
skb              8676 net/core/filter.c 	struct sk_buff *skb;
skb              8687 net/core/filter.c 				    struct sock *sk, struct sk_buff *skb,
skb              8690 net/core/filter.c 	reuse_kern->skb = skb;
skb              8693 net/core/filter.c 	reuse_kern->data_end = skb->data + skb_headlen(skb);
skb              8700 net/core/filter.c 				  struct bpf_prog *prog, struct sk_buff *skb,
skb              8706 net/core/filter.c 	bpf_init_reuseport_kern(&reuse_kern, reuse, sk, skb, hash);
skb              8774 net/core/filter.c 	return ____bpf_skb_load_bytes(reuse_kern->skb, offset, to, len);
skb              8791 net/core/filter.c 	return ____bpf_skb_load_bytes_relative(reuse_kern->skb, offset, to,
skb              8873 net/core/filter.c 				    skb,				\
skb               158 net/core/flow_dissector.c static __be16 skb_flow_get_be16(const struct sk_buff *skb, int poff,
skb               163 net/core/flow_dissector.c 	u = __skb_header_pointer(skb, poff, sizeof(_u), data, hlen, &_u);
skb               199 net/core/flow_dissector.c __be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
skb               205 net/core/flow_dissector.c 		data = skb->data;
skb               206 net/core/flow_dissector.c 		hlen = skb_headlen(skb);
skb               212 net/core/flow_dissector.c 		ports = __skb_header_pointer(skb, thoff + poff,
skb               222 net/core/flow_dissector.c void skb_flow_dissect_meta(const struct sk_buff *skb,
skb               234 net/core/flow_dissector.c 	meta->ingress_ifindex = skb->skb_iif;
skb               255 net/core/flow_dissector.c skb_flow_dissect_ct(const struct sk_buff *skb,
skb               270 net/core/flow_dissector.c 	ct = nf_ct_get(skb, &ctinfo);
skb               295 net/core/flow_dissector.c skb_flow_dissect_tunnel_info(const struct sk_buff *skb,
skb               319 net/core/flow_dissector.c 	info = skb_tunnel_info(skb);
skb               405 net/core/flow_dissector.c __skb_flow_dissect_mpls(const struct sk_buff *skb,
skb               418 net/core/flow_dissector.c 	hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data,
skb               451 net/core/flow_dissector.c __skb_flow_dissect_arp(const struct sk_buff *skb,
skb               468 net/core/flow_dissector.c 	arp = __skb_header_pointer(skb, nhoff, sizeof(_arp), data,
skb               481 net/core/flow_dissector.c 	arp_eth = __skb_header_pointer(skb, nhoff + sizeof(_arp),
skb               506 net/core/flow_dissector.c __skb_flow_dissect_gre(const struct sk_buff *skb,
skb               518 net/core/flow_dissector.c 	hdr = __skb_header_pointer(skb, *p_nhoff, sizeof(_hdr),
skb               549 net/core/flow_dissector.c 		keyid = __skb_header_pointer(skb, *p_nhoff + offset,
skb               576 net/core/flow_dissector.c 			eth = __skb_header_pointer(skb, *p_nhoff + offset,
skb               598 net/core/flow_dissector.c 		ppp_hdr = __skb_header_pointer(skb, *p_nhoff + offset,
skb               647 net/core/flow_dissector.c __skb_flow_dissect_batadv(const struct sk_buff *skb,
skb               657 net/core/flow_dissector.c 	hdr = __skb_header_pointer(skb, *p_nhoff, sizeof(_hdr), data, hlen,
skb               679 net/core/flow_dissector.c __skb_flow_dissect_tcp(const struct sk_buff *skb,
skb               689 net/core/flow_dissector.c 	th = __skb_header_pointer(skb, thoff, sizeof(_th), data, hlen, &_th);
skb               703 net/core/flow_dissector.c __skb_flow_dissect_ports(const struct sk_buff *skb,
skb               723 net/core/flow_dissector.c 	key_ports->ports = __skb_flow_get_ports(skb, nhoff, ip_proto,
skb               728 net/core/flow_dissector.c __skb_flow_dissect_ipv4(const struct sk_buff *skb,
skb               745 net/core/flow_dissector.c __skb_flow_dissect_ipv6(const struct sk_buff *skb,
skb               894 net/core/flow_dissector.c 			const struct sk_buff *skb,
skb               914 net/core/flow_dissector.c 		data = skb->data;
skb               915 net/core/flow_dissector.c 		proto = skb_vlan_tag_present(skb) ?
skb               916 net/core/flow_dissector.c 			 skb->vlan_proto : skb->protocol;
skb               917 net/core/flow_dissector.c 		nhoff = skb_network_offset(skb);
skb               918 net/core/flow_dissector.c 		hlen = skb_headlen(skb);
skb               920 net/core/flow_dissector.c 		if (unlikely(skb->dev && netdev_uses_dsa(skb->dev) &&
skb               925 net/core/flow_dissector.c 			ops = skb->dev->dsa_ptr->tag_ops;
skb               927 net/core/flow_dissector.c 			    !ops->flow_dissect(skb, &proto, &offset)) {
skb               949 net/core/flow_dissector.c 	if (skb) {
skb               951 net/core/flow_dissector.c 			if (skb->dev)
skb               952 net/core/flow_dissector.c 				net = dev_net(skb->dev);
skb               953 net/core/flow_dissector.c 			else if (skb->sk)
skb               954 net/core/flow_dissector.c 				net = sock_net(skb->sk);
skb               972 net/core/flow_dissector.c 			if (skb) {
skb               973 net/core/flow_dissector.c 				ctx.skb = skb;
skb               978 net/core/flow_dissector.c 				n_proto = skb->protocol;
skb               993 net/core/flow_dissector.c 		struct ethhdr *eth = eth_hdr(skb);
skb              1010 net/core/flow_dissector.c 		iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph);
skb              1047 net/core/flow_dissector.c 		__skb_flow_dissect_ipv4(skb, flow_dissector,
skb              1056 net/core/flow_dissector.c 		iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph);
skb              1095 net/core/flow_dissector.c 		__skb_flow_dissect_ipv6(skb, flow_dissector,
skb              1107 net/core/flow_dissector.c 		    skb && skb_vlan_tag_present(skb)) {
skb              1108 net/core/flow_dissector.c 			proto = skb->protocol;
skb              1110 net/core/flow_dissector.c 			vlan = __skb_header_pointer(skb, nhoff, sizeof(_vlan),
skb              1136 net/core/flow_dissector.c 				key_vlan->vlan_id = skb_vlan_tag_get_id(skb);
skb              1137 net/core/flow_dissector.c 				key_vlan->vlan_priority = skb_vlan_tag_get_prio(skb);
skb              1156 net/core/flow_dissector.c 		hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen, &_hdr);
skb              1182 net/core/flow_dissector.c 		hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr),
skb              1203 net/core/flow_dissector.c 		fdret = __skb_flow_dissect_mpls(skb, flow_dissector,
skb              1219 net/core/flow_dissector.c 		fdret = __skb_flow_dissect_arp(skb, flow_dissector,
skb              1225 net/core/flow_dissector.c 		fdret = __skb_flow_dissect_batadv(skb, key_control, data,
skb              1255 net/core/flow_dissector.c 		fdret = __skb_flow_dissect_gre(skb, key_control, flow_dissector,
skb              1268 net/core/flow_dissector.c 		opthdr = __skb_header_pointer(skb, nhoff, sizeof(_opthdr),
skb              1287 net/core/flow_dissector.c 		fh = __skb_header_pointer(skb, nhoff, sizeof(_fh),
skb              1342 net/core/flow_dissector.c 		__skb_flow_dissect_tcp(skb, flow_dissector, target_container,
skb              1351 net/core/flow_dissector.c 		__skb_flow_dissect_ports(skb, flow_dissector, target_container,
skb              1359 net/core/flow_dissector.c 		key_icmp->icmp = skb_flow_get_be16(skb, nhoff, data, hlen);
skb              1384 net/core/flow_dissector.c 	key_control->thoff = min_t(u16, nhoff, skb ? skb->len : hlen);
skb              1513 net/core/flow_dissector.c static inline u32 ___skb_get_hash(const struct sk_buff *skb,
skb              1517 net/core/flow_dissector.c 	skb_flow_dissect_flow_keys(skb, keys,
skb              1552 net/core/flow_dissector.c u32 __skb_get_hash_symmetric(const struct sk_buff *skb)
skb              1559 net/core/flow_dissector.c 	__skb_flow_dissect(NULL, skb, &flow_keys_dissector_symmetric,
skb              1576 net/core/flow_dissector.c void __skb_get_hash(struct sk_buff *skb)
skb              1583 net/core/flow_dissector.c 	hash = ___skb_get_hash(skb, &keys, &hashrnd);
skb              1585 net/core/flow_dissector.c 	__skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
skb              1589 net/core/flow_dissector.c __u32 skb_get_hash_perturb(const struct sk_buff *skb,
skb              1594 net/core/flow_dissector.c 	return ___skb_get_hash(skb, &keys, perturb);
skb              1598 net/core/flow_dissector.c u32 __skb_get_poff(const struct sk_buff *skb, void *data,
skb              1614 net/core/flow_dissector.c 		doff = __skb_header_pointer(skb, poff + 12, sizeof(_doff),
skb              1658 net/core/flow_dissector.c u32 skb_get_poff(const struct sk_buff *skb)
skb              1662 net/core/flow_dissector.c 	if (!skb_flow_dissect_flow_keys_basic(NULL, skb, &keys,
skb              1666 net/core/flow_dissector.c 	return __skb_get_poff(skb, skb->data, &keys, skb_headlen(skb));
skb                26 net/core/gen_stats.c 	if (nla_put_64bit(d->skb, type, size, buf, padattr))
skb                59 net/core/gen_stats.c gnet_stats_start_copy_compat(struct sk_buff *skb, int type, int tc_stats_type,
skb                67 net/core/gen_stats.c 		d->tail = (struct nlattr *)skb_tail_pointer(skb);
skb                68 net/core/gen_stats.c 	d->skb = skb;
skb               110 net/core/gen_stats.c gnet_stats_start_copy(struct sk_buff *skb, int type, spinlock_t *lock,
skb               113 net/core/gen_stats.c 	return gnet_stats_start_copy_compat(skb, type, 0, 0, lock, d, padattr);
skb               409 net/core/gen_stats.c 		d->tail->nla_len = skb_tail_pointer(d->skb) - (u8 *)d->tail;
skb                12 net/core/gro_cells.c int gro_cells_receive(struct gro_cells *gcells, struct sk_buff *skb)
skb                14 net/core/gro_cells.c 	struct net_device *dev = skb->dev;
skb                22 net/core/gro_cells.c 	if (!gcells->cells || skb_cloned(skb) || netif_elide_gro(dev)) {
skb                23 net/core/gro_cells.c 		res = netif_rx(skb);
skb                32 net/core/gro_cells.c 		kfree_skb(skb);
skb                37 net/core/gro_cells.c 	__skb_queue_tail(&cell->napi_skbs, skb);
skb                53 net/core/gro_cells.c 	struct sk_buff *skb;
skb                57 net/core/gro_cells.c 		skb = __skb_dequeue(&cell->napi_skbs);
skb                58 net/core/gro_cells.c 		if (!skb)
skb                60 net/core/gro_cells.c 		napi_gro_receive(napi, skb);
skb                37 net/core/lwt_bpf.c static int run_lwt_bpf(struct sk_buff *skb, struct bpf_lwt_prog *lwt,
skb                48 net/core/lwt_bpf.c 	bpf_compute_data_pointers(skb);
skb                49 net/core/lwt_bpf.c 	ret = bpf_prog_run_save_cb(lwt->prog, skb);
skb                62 net/core/lwt_bpf.c 			skb_reset_mac_header(skb);
skb                63 net/core/lwt_bpf.c 			ret = skb_do_redirect(skb);
skb                70 net/core/lwt_bpf.c 		kfree_skb(skb);
skb                76 net/core/lwt_bpf.c 		kfree_skb(skb);
skb                86 net/core/lwt_bpf.c static int bpf_lwt_input_reroute(struct sk_buff *skb)
skb                90 net/core/lwt_bpf.c 	if (skb->protocol == htons(ETH_P_IP)) {
skb                91 net/core/lwt_bpf.c 		struct net_device *dev = skb_dst(skb)->dev;
skb                92 net/core/lwt_bpf.c 		struct iphdr *iph = ip_hdr(skb);
skb                95 net/core/lwt_bpf.c 		skb_dst_drop(skb);
skb                96 net/core/lwt_bpf.c 		err = ip_route_input_noref(skb, iph->daddr, iph->saddr,
skb                99 net/core/lwt_bpf.c 	} else if (skb->protocol == htons(ETH_P_IPV6)) {
skb               100 net/core/lwt_bpf.c 		skb_dst_drop(skb);
skb               101 net/core/lwt_bpf.c 		err = ipv6_stub->ipv6_route_input(skb);
skb               108 net/core/lwt_bpf.c 	return dst_input(skb);
skb               111 net/core/lwt_bpf.c 	kfree_skb(skb);
skb               115 net/core/lwt_bpf.c static int bpf_input(struct sk_buff *skb)
skb               117 net/core/lwt_bpf.c 	struct dst_entry *dst = skb_dst(skb);
skb               123 net/core/lwt_bpf.c 		ret = run_lwt_bpf(skb, &bpf->in, dst, NO_REDIRECT);
skb               127 net/core/lwt_bpf.c 			return bpf_lwt_input_reroute(skb);
skb               131 net/core/lwt_bpf.c 		kfree_skb(skb);
skb               135 net/core/lwt_bpf.c 	return dst->lwtstate->orig_input(skb);
skb               138 net/core/lwt_bpf.c static int bpf_output(struct net *net, struct sock *sk, struct sk_buff *skb)
skb               140 net/core/lwt_bpf.c 	struct dst_entry *dst = skb_dst(skb);
skb               146 net/core/lwt_bpf.c 		ret = run_lwt_bpf(skb, &bpf->out, dst, NO_REDIRECT);
skb               154 net/core/lwt_bpf.c 		kfree_skb(skb);
skb               158 net/core/lwt_bpf.c 	return dst->lwtstate->orig_output(net, sk, skb);
skb               161 net/core/lwt_bpf.c static int xmit_check_hhlen(struct sk_buff *skb)
skb               163 net/core/lwt_bpf.c 	int hh_len = skb_dst(skb)->dev->hard_header_len;
skb               165 net/core/lwt_bpf.c 	if (skb_headroom(skb) < hh_len) {
skb               166 net/core/lwt_bpf.c 		int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
skb               168 net/core/lwt_bpf.c 		if (pskb_expand_head(skb, nhead, 0, GFP_ATOMIC))
skb               175 net/core/lwt_bpf.c static int bpf_lwt_xmit_reroute(struct sk_buff *skb)
skb               177 net/core/lwt_bpf.c 	struct net_device *l3mdev = l3mdev_master_dev_rcu(skb_dst(skb)->dev);
skb               185 net/core/lwt_bpf.c 	if (skb->protocol == htons(ETH_P_IP))
skb               187 net/core/lwt_bpf.c 	else if (skb->protocol == htons(ETH_P_IPV6))
skb               192 net/core/lwt_bpf.c 	sk = sk_to_full_sk(skb->sk);
skb               198 net/core/lwt_bpf.c 		net = dev_net(skb_dst(skb)->dev);
skb               202 net/core/lwt_bpf.c 		struct iphdr *iph = ip_hdr(skb);
skb               207 net/core/lwt_bpf.c 		fl4.flowi4_mark = skb->mark;
skb               222 net/core/lwt_bpf.c 		struct ipv6hdr *iph6 = ipv6_hdr(skb);
skb               226 net/core/lwt_bpf.c 		fl6.flowi6_mark = skb->mark;
skb               233 net/core/lwt_bpf.c 		dst = ipv6_stub->ipv6_dst_lookup_flow(net, skb->sk, &fl6, NULL);
skb               250 net/core/lwt_bpf.c 	err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
skb               254 net/core/lwt_bpf.c 	skb_dst_drop(skb);
skb               255 net/core/lwt_bpf.c 	skb_dst_set(skb, dst);
skb               257 net/core/lwt_bpf.c 	err = dst_output(dev_net(skb_dst(skb)->dev), skb->sk, skb);
skb               265 net/core/lwt_bpf.c 	kfree_skb(skb);
skb               269 net/core/lwt_bpf.c static int bpf_xmit(struct sk_buff *skb)
skb               271 net/core/lwt_bpf.c 	struct dst_entry *dst = skb_dst(skb);
skb               276 net/core/lwt_bpf.c 		__be16 proto = skb->protocol;
skb               279 net/core/lwt_bpf.c 		ret = run_lwt_bpf(skb, &bpf->xmit, dst, CAN_REDIRECT);
skb               286 net/core/lwt_bpf.c 			if (skb->protocol != proto) {
skb               287 net/core/lwt_bpf.c 				kfree_skb(skb);
skb               293 net/core/lwt_bpf.c 			ret = xmit_check_hhlen(skb);
skb               301 net/core/lwt_bpf.c 			return bpf_lwt_xmit_reroute(skb);
skb               444 net/core/lwt_bpf.c static int bpf_fill_lwt_prog(struct sk_buff *skb, int attr,
skb               452 net/core/lwt_bpf.c 	nest = nla_nest_start_noflag(skb, attr);
skb               457 net/core/lwt_bpf.c 	    nla_put_string(skb, LWT_BPF_PROG_NAME, prog->name))
skb               460 net/core/lwt_bpf.c 	return nla_nest_end(skb, nest);
skb               463 net/core/lwt_bpf.c static int bpf_fill_encap_info(struct sk_buff *skb, struct lwtunnel_state *lwt)
skb               467 net/core/lwt_bpf.c 	if (bpf_fill_lwt_prog(skb, LWT_BPF_IN, &bpf->in) < 0 ||
skb               468 net/core/lwt_bpf.c 	    bpf_fill_lwt_prog(skb, LWT_BPF_OUT, &bpf->out) < 0 ||
skb               469 net/core/lwt_bpf.c 	    bpf_fill_lwt_prog(skb, LWT_BPF_XMIT, &bpf->xmit) < 0)
skb               524 net/core/lwt_bpf.c static int handle_gso_type(struct sk_buff *skb, unsigned int gso_type,
skb               527 net/core/lwt_bpf.c 	struct skb_shared_info *shinfo = skb_shinfo(skb);
skb               536 net/core/lwt_bpf.c static int handle_gso_encap(struct sk_buff *skb, bool ipv4, int encap_len)
skb               546 net/core/lwt_bpf.c 	if (!(skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
skb               550 net/core/lwt_bpf.c 		protocol = ip_hdr(skb)->protocol;
skb               552 net/core/lwt_bpf.c 		next_hdr = skb_network_header(skb) + next_hdr_offset;
skb               554 net/core/lwt_bpf.c 		protocol = ipv6_hdr(skb)->nexthdr;
skb               556 net/core/lwt_bpf.c 		next_hdr = skb_network_header(skb) + next_hdr_offset;
skb               566 net/core/lwt_bpf.c 			return handle_gso_type(skb, SKB_GSO_GRE_CSUM,
skb               568 net/core/lwt_bpf.c 		return handle_gso_type(skb, SKB_GSO_GRE, encap_len);
skb               576 net/core/lwt_bpf.c 			return handle_gso_type(skb, SKB_GSO_UDP_TUNNEL_CSUM,
skb               578 net/core/lwt_bpf.c 		return handle_gso_type(skb, SKB_GSO_UDP_TUNNEL, encap_len);
skb               583 net/core/lwt_bpf.c 			return handle_gso_type(skb, SKB_GSO_IPXIP4, encap_len);
skb               585 net/core/lwt_bpf.c 			return handle_gso_type(skb, SKB_GSO_IPXIP6, encap_len);
skb               592 net/core/lwt_bpf.c int bpf_lwt_push_ip_encap(struct sk_buff *skb, void *hdr, u32 len, bool ingress)
skb               616 net/core/lwt_bpf.c 		err = skb_cow_head(skb, len + skb->mac_len);
skb               618 net/core/lwt_bpf.c 		err = skb_cow_head(skb,
skb               619 net/core/lwt_bpf.c 				   len + LL_RESERVED_SPACE(skb_dst(skb)->dev));
skb               624 net/core/lwt_bpf.c 	skb_reset_inner_headers(skb);
skb               625 net/core/lwt_bpf.c 	skb_reset_inner_mac_header(skb);  /* mac header is not yet set */
skb               626 net/core/lwt_bpf.c 	skb_set_inner_protocol(skb, skb->protocol);
skb               627 net/core/lwt_bpf.c 	skb->encapsulation = 1;
skb               628 net/core/lwt_bpf.c 	skb_push(skb, len);
skb               630 net/core/lwt_bpf.c 		skb_postpush_rcsum(skb, iph, len);
skb               631 net/core/lwt_bpf.c 	skb_reset_network_header(skb);
skb               632 net/core/lwt_bpf.c 	memcpy(skb_network_header(skb), hdr, len);
skb               633 net/core/lwt_bpf.c 	bpf_compute_data_pointers(skb);
skb               634 net/core/lwt_bpf.c 	skb_clear_hash(skb);
skb               637 net/core/lwt_bpf.c 		skb->protocol = htons(ETH_P_IP);
skb               638 net/core/lwt_bpf.c 		iph = ip_hdr(skb);
skb               644 net/core/lwt_bpf.c 		skb->protocol = htons(ETH_P_IPV6);
skb               647 net/core/lwt_bpf.c 	if (skb_is_gso(skb))
skb               648 net/core/lwt_bpf.c 		return handle_gso_encap(skb, ipv4, len);
skb               221 net/core/lwtunnel.c int lwtunnel_fill_encap(struct sk_buff *skb, struct lwtunnel_state *lwtstate,
skb               235 net/core/lwtunnel.c 	nest = nla_nest_start_noflag(skb, encap_attr);
skb               243 net/core/lwtunnel.c 		ret = ops->fill_encap(skb, lwtstate);
skb               248 net/core/lwtunnel.c 	nla_nest_end(skb, nest);
skb               249 net/core/lwtunnel.c 	ret = nla_put_u16(skb, encap_type_attr, lwtstate->type);
skb               256 net/core/lwtunnel.c 	nla_nest_cancel(skb, nest);
skb               312 net/core/lwtunnel.c int lwtunnel_output(struct net *net, struct sock *sk, struct sk_buff *skb)
skb               314 net/core/lwtunnel.c 	struct dst_entry *dst = skb_dst(skb);
skb               331 net/core/lwtunnel.c 		ret = ops->output(net, sk, skb);
skb               340 net/core/lwtunnel.c 	kfree_skb(skb);
skb               346 net/core/lwtunnel.c int lwtunnel_xmit(struct sk_buff *skb)
skb               348 net/core/lwtunnel.c 	struct dst_entry *dst = skb_dst(skb);
skb               366 net/core/lwtunnel.c 		ret = ops->xmit(skb);
skb               375 net/core/lwtunnel.c 	kfree_skb(skb);
skb               381 net/core/lwtunnel.c int lwtunnel_input(struct sk_buff *skb)
skb               383 net/core/lwtunnel.c 	struct dst_entry *dst = skb_dst(skb);
skb               400 net/core/lwtunnel.c 		ret = ops->input(skb);
skb               409 net/core/lwtunnel.c 	kfree_skb(skb);
skb                93 net/core/neighbour.c static int neigh_blackhole(struct neighbour *neigh, struct sk_buff *skb)
skb                95 net/core/neighbour.c 	kfree_skb(skb);
skb               278 net/core/neighbour.c 	struct sk_buff *skb;
skb               280 net/core/neighbour.c 	while ((skb = skb_dequeue(list)) != NULL) {
skb               281 net/core/neighbour.c 		dev_put(skb->dev);
skb               282 net/core/neighbour.c 		kfree_skb(skb);
skb               979 net/core/neighbour.c 	struct sk_buff *skb;
skb               991 net/core/neighbour.c 	       (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
skb               993 net/core/neighbour.c 		neigh->ops->error_report(neigh, skb);
skb              1003 net/core/neighbour.c 	struct sk_buff *skb = skb_peek_tail(&neigh->arp_queue);
skb              1005 net/core/neighbour.c 	if (skb)
skb              1006 net/core/neighbour.c 		skb = skb_clone(skb, GFP_ATOMIC);
skb              1009 net/core/neighbour.c 		neigh->ops->solicit(neigh, skb);
skb              1011 net/core/neighbour.c 	consume_skb(skb);
skb              1104 net/core/neighbour.c int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
skb              1136 net/core/neighbour.c 			kfree_skb(skb);
skb              1149 net/core/neighbour.c 		if (skb) {
skb              1150 net/core/neighbour.c 			while (neigh->arp_queue_len_bytes + skb->truesize >
skb              1161 net/core/neighbour.c 			skb_dst_force(skb);
skb              1162 net/core/neighbour.c 			__skb_queue_tail(&neigh->arp_queue, skb);
skb              1163 net/core/neighbour.c 			neigh->arp_queue_len_bytes += skb->truesize;
skb              1180 net/core/neighbour.c 	kfree_skb(skb);
skb              1359 net/core/neighbour.c 		struct sk_buff *skb;
skb              1364 net/core/neighbour.c 		       (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
skb              1365 net/core/neighbour.c 			struct dst_entry *dst = skb_dst(skb);
skb              1380 net/core/neighbour.c 				n2 = dst_neigh_lookup_skb(dst, skb);
skb              1384 net/core/neighbour.c 			n1->output(n1, skb);
skb              1467 net/core/neighbour.c int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb)
skb              1471 net/core/neighbour.c 	if (!neigh_event_send(neigh, skb)) {
skb              1480 net/core/neighbour.c 			__skb_pull(skb, skb_network_offset(skb));
skb              1482 net/core/neighbour.c 			err = dev_hard_header(skb, dev, ntohs(skb->protocol),
skb              1483 net/core/neighbour.c 					      neigh->ha, NULL, skb->len);
skb              1487 net/core/neighbour.c 			rc = dev_queue_xmit(skb);
skb              1495 net/core/neighbour.c 	kfree_skb(skb);
skb              1502 net/core/neighbour.c int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb)
skb              1509 net/core/neighbour.c 		__skb_pull(skb, skb_network_offset(skb));
skb              1511 net/core/neighbour.c 		err = dev_hard_header(skb, dev, ntohs(skb->protocol),
skb              1512 net/core/neighbour.c 				      neigh->ha, NULL, skb->len);
skb              1516 net/core/neighbour.c 		err = dev_queue_xmit(skb);
skb              1519 net/core/neighbour.c 		kfree_skb(skb);
skb              1525 net/core/neighbour.c int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb)
skb              1527 net/core/neighbour.c 	return dev_queue_xmit(skb);
skb              1536 net/core/neighbour.c 	struct sk_buff *skb, *n;
skb              1540 net/core/neighbour.c 	skb_queue_walk_safe(&tbl->proxy_queue, skb, n) {
skb              1541 net/core/neighbour.c 		long tdif = NEIGH_CB(skb)->sched_next - now;
skb              1544 net/core/neighbour.c 			struct net_device *dev = skb->dev;
skb              1546 net/core/neighbour.c 			__skb_unlink(skb, &tbl->proxy_queue);
skb              1549 net/core/neighbour.c 				tbl->proxy_redo(skb);
skb              1552 net/core/neighbour.c 				kfree_skb(skb);
skb              1566 net/core/neighbour.c 		    struct sk_buff *skb)
skb              1574 net/core/neighbour.c 		kfree_skb(skb);
skb              1578 net/core/neighbour.c 	NEIGH_CB(skb)->sched_next = sched_next;
skb              1579 net/core/neighbour.c 	NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
skb              1586 net/core/neighbour.c 	skb_dst_drop(skb);
skb              1587 net/core/neighbour.c 	dev_hold(skb->dev);
skb              1588 net/core/neighbour.c 	__skb_queue_tail(&tbl->proxy_queue, skb);
skb              1784 net/core/neighbour.c static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh,
skb              1787 net/core/neighbour.c 	struct net *net = sock_net(skb->sk);
skb              1839 net/core/neighbour.c 			     NETLINK_CB(skb).portid, extack);
skb              1849 net/core/neighbour.c static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh,
skb              1854 net/core/neighbour.c 	struct net *net = sock_net(skb->sk);
skb              1971 net/core/neighbour.c 				     NETLINK_CB(skb).portid, extack);
skb              1979 net/core/neighbour.c static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
skb              1983 net/core/neighbour.c 	nest = nla_nest_start_noflag(skb, NDTA_PARMS);
skb              1988 net/core/neighbour.c 	     nla_put_u32(skb, NDTPA_IFINDEX, parms->dev->ifindex)) ||
skb              1989 net/core/neighbour.c 	    nla_put_u32(skb, NDTPA_REFCNT, refcount_read(&parms->refcnt)) ||
skb              1990 net/core/neighbour.c 	    nla_put_u32(skb, NDTPA_QUEUE_LENBYTES,
skb              1993 net/core/neighbour.c 	    nla_put_u32(skb, NDTPA_QUEUE_LEN,
skb              1995 net/core/neighbour.c 	    nla_put_u32(skb, NDTPA_PROXY_QLEN, NEIGH_VAR(parms, PROXY_QLEN)) ||
skb              1996 net/core/neighbour.c 	    nla_put_u32(skb, NDTPA_APP_PROBES, NEIGH_VAR(parms, APP_PROBES)) ||
skb              1997 net/core/neighbour.c 	    nla_put_u32(skb, NDTPA_UCAST_PROBES,
skb              1999 net/core/neighbour.c 	    nla_put_u32(skb, NDTPA_MCAST_PROBES,
skb              2001 net/core/neighbour.c 	    nla_put_u32(skb, NDTPA_MCAST_REPROBES,
skb              2003 net/core/neighbour.c 	    nla_put_msecs(skb, NDTPA_REACHABLE_TIME, parms->reachable_time,
skb              2005 net/core/neighbour.c 	    nla_put_msecs(skb, NDTPA_BASE_REACHABLE_TIME,
skb              2007 net/core/neighbour.c 	    nla_put_msecs(skb, NDTPA_GC_STALETIME,
skb              2009 net/core/neighbour.c 	    nla_put_msecs(skb, NDTPA_DELAY_PROBE_TIME,
skb              2011 net/core/neighbour.c 	    nla_put_msecs(skb, NDTPA_RETRANS_TIME,
skb              2013 net/core/neighbour.c 	    nla_put_msecs(skb, NDTPA_ANYCAST_DELAY,
skb              2015 net/core/neighbour.c 	    nla_put_msecs(skb, NDTPA_PROXY_DELAY,
skb              2017 net/core/neighbour.c 	    nla_put_msecs(skb, NDTPA_LOCKTIME,
skb              2020 net/core/neighbour.c 	return nla_nest_end(skb, nest);
skb              2023 net/core/neighbour.c 	nla_nest_cancel(skb, nest);
skb              2027 net/core/neighbour.c static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
skb              2033 net/core/neighbour.c 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
skb              2044 net/core/neighbour.c 	if (nla_put_string(skb, NDTA_NAME, tbl->id) ||
skb              2045 net/core/neighbour.c 	    nla_put_msecs(skb, NDTA_GC_INTERVAL, tbl->gc_interval, NDTA_PAD) ||
skb              2046 net/core/neighbour.c 	    nla_put_u32(skb, NDTA_THRESH1, tbl->gc_thresh1) ||
skb              2047 net/core/neighbour.c 	    nla_put_u32(skb, NDTA_THRESH2, tbl->gc_thresh2) ||
skb              2048 net/core/neighbour.c 	    nla_put_u32(skb, NDTA_THRESH3, tbl->gc_thresh3))
skb              2070 net/core/neighbour.c 		if (nla_put(skb, NDTA_CONFIG, sizeof(ndc), &ndc))
skb              2097 net/core/neighbour.c 		if (nla_put_64bit(skb, NDTA_STATS, sizeof(ndst), &ndst,
skb              2103 net/core/neighbour.c 	if (neightbl_fill_parms(skb, &tbl->parms) < 0)
skb              2107 net/core/neighbour.c 	nlmsg_end(skb, nlh);
skb              2112 net/core/neighbour.c 	nlmsg_cancel(skb, nlh);
skb              2116 net/core/neighbour.c static int neightbl_fill_param_info(struct sk_buff *skb,
skb              2125 net/core/neighbour.c 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
skb              2136 net/core/neighbour.c 	if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 ||
skb              2137 net/core/neighbour.c 	    neightbl_fill_parms(skb, parms) < 0)
skb              2141 net/core/neighbour.c 	nlmsg_end(skb, nlh);
skb              2145 net/core/neighbour.c 	nlmsg_cancel(skb, nlh);
skb              2175 net/core/neighbour.c static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh,
skb              2178 net/core/neighbour.c 	struct net *net = sock_net(skb->sk);
skb              2361 net/core/neighbour.c static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
skb              2364 net/core/neighbour.c 	struct net *net = sock_net(skb->sk);
skb              2389 net/core/neighbour.c 		if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).portid,
skb              2403 net/core/neighbour.c 			if (neightbl_fill_param_info(skb, tbl, p,
skb              2404 net/core/neighbour.c 						     NETLINK_CB(cb->skb).portid,
skb              2419 net/core/neighbour.c 	return skb->len;
skb              2422 net/core/neighbour.c static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
skb              2430 net/core/neighbour.c 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
skb              2442 net/core/neighbour.c 	if (nla_put(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key))
skb              2451 net/core/neighbour.c 		if (nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, haddr) < 0) {
skb              2463 net/core/neighbour.c 	if (nla_put_u32(skb, NDA_PROBES, atomic_read(&neigh->probes)) ||
skb              2464 net/core/neighbour.c 	    nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
skb              2467 net/core/neighbour.c 	if (neigh->protocol && nla_put_u8(skb, NDA_PROTOCOL, neigh->protocol))
skb              2470 net/core/neighbour.c 	nlmsg_end(skb, nlh);
skb              2474 net/core/neighbour.c 	nlmsg_cancel(skb, nlh);
skb              2478 net/core/neighbour.c static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
skb              2485 net/core/neighbour.c 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
skb              2498 net/core/neighbour.c 	if (nla_put(skb, NDA_DST, tbl->key_len, pn->key))
skb              2501 net/core/neighbour.c 	if (pn->protocol && nla_put_u8(skb, NDA_PROTOCOL, pn->protocol))
skb              2504 net/core/neighbour.c 	nlmsg_end(skb, nlh);
skb              2508 net/core/neighbour.c 	nlmsg_cancel(skb, nlh);
skb              2545 net/core/neighbour.c static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
skb              2549 net/core/neighbour.c 	struct net *net = sock_net(skb->sk);
skb              2573 net/core/neighbour.c 			if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
skb              2584 net/core/neighbour.c 	rc = skb->len;
skb              2592 net/core/neighbour.c static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
skb              2597 net/core/neighbour.c 	struct net *net = sock_net(skb->sk);
skb              2616 net/core/neighbour.c 			if (pneigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
skb              2629 net/core/neighbour.c 	rc = skb->len;
skb              2698 net/core/neighbour.c static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
skb              2733 net/core/neighbour.c 			err = pneigh_dump_table(tbl, skb, cb, &filter);
skb              2735 net/core/neighbour.c 			err = neigh_dump_table(tbl, skb, cb, &filter);
skb              2741 net/core/neighbour.c 	return skb->len;
skb              2817 net/core/neighbour.c 	struct sk_buff *skb;
skb              2820 net/core/neighbour.c 	skb = nlmsg_new(neigh_nlmsg_size(), GFP_KERNEL);
skb              2821 net/core/neighbour.c 	if (!skb)
skb              2824 net/core/neighbour.c 	err = neigh_fill_info(skb, neigh, pid, seq, RTM_NEWNEIGH, 0);
skb              2826 net/core/neighbour.c 		kfree_skb(skb);
skb              2830 net/core/neighbour.c 	err = rtnl_unicast(skb, net, pid);
skb              2845 net/core/neighbour.c 	struct sk_buff *skb;
skb              2848 net/core/neighbour.c 	skb = nlmsg_new(pneigh_nlmsg_size(), GFP_KERNEL);
skb              2849 net/core/neighbour.c 	if (!skb)
skb              2852 net/core/neighbour.c 	err = pneigh_fill_info(skb, neigh, pid, seq, RTM_NEWNEIGH, 0, tbl);
skb              2854 net/core/neighbour.c 		kfree_skb(skb);
skb              2858 net/core/neighbour.c 	err = rtnl_unicast(skb, net, pid);
skb              2982 net/core/neighbour.c 	       const void *addr, struct sk_buff *skb)
skb              3007 net/core/neighbour.c 		err = neigh->output(neigh, skb);
skb              3011 net/core/neighbour.c 		err = dev_hard_header(skb, dev, ntohs(skb->protocol),
skb              3012 net/core/neighbour.c 				      addr, NULL, skb->len);
skb              3015 net/core/neighbour.c 		err = dev_queue_xmit(skb);
skb              3020 net/core/neighbour.c 	kfree_skb(skb);
skb              3348 net/core/neighbour.c 	struct sk_buff *skb;
skb              3351 net/core/neighbour.c 	skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC);
skb              3352 net/core/neighbour.c 	if (skb == NULL)
skb              3355 net/core/neighbour.c 	err = neigh_fill_info(skb, n, pid, 0, type, flags);
skb              3359 net/core/neighbour.c 		kfree_skb(skb);
skb              3362 net/core/neighbour.c 	rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
skb               722 net/core/net_namespace.c static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh,
skb               725 net/core/net_namespace.c 	struct net *net = sock_net(skb->sk);
skb               770 net/core/net_namespace.c 		rtnl_net_notifyid(net, RTM_NEWNSID, err, NETLINK_CB(skb).portid,
skb               801 net/core/net_namespace.c static int rtnl_net_fill(struct sk_buff *skb, struct net_fill_args *args)
skb               806 net/core/net_namespace.c 	nlh = nlmsg_put(skb, args->portid, args->seq, args->cmd, sizeof(*rth),
skb               814 net/core/net_namespace.c 	if (nla_put_s32(skb, NETNSA_NSID, args->nsid))
skb               818 net/core/net_namespace.c 	    nla_put_s32(skb, NETNSA_CURRENT_NSID, args->ref_nsid))
skb               821 net/core/net_namespace.c 	nlmsg_end(skb, nlh);
skb               825 net/core/net_namespace.c 	nlmsg_cancel(skb, nlh);
skb               829 net/core/net_namespace.c static int rtnl_net_valid_getid_req(struct sk_buff *skb,
skb               836 net/core/net_namespace.c 	if (!netlink_strict_get_check(skb))
skb               866 net/core/net_namespace.c static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh,
skb               869 net/core/net_namespace.c 	struct net *net = sock_net(skb->sk);
skb               872 net/core/net_namespace.c 		.portid = NETLINK_CB(skb).portid,
skb               881 net/core/net_namespace.c 	err = rtnl_net_valid_getid_req(skb, nlh, tb, extack);
skb               909 net/core/net_namespace.c 		target = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, id);
skb               932 net/core/net_namespace.c 	err = rtnl_unicast(msg, net, NETLINK_CB(skb).portid);
skb               947 net/core/net_namespace.c 	struct sk_buff *skb;
skb               964 net/core/net_namespace.c 	ret = rtnl_net_fill(net_cb->skb, &net_cb->fillargs);
skb              1015 net/core/net_namespace.c static int rtnl_net_dumpid(struct sk_buff *skb, struct netlink_callback *cb)
skb              1018 net/core/net_namespace.c 		.tgt_net = sock_net(skb->sk),
skb              1019 net/core/net_namespace.c 		.skb = skb,
skb              1021 net/core/net_namespace.c 			.portid = NETLINK_CB(cb->skb).portid,
skb              1032 net/core/net_namespace.c 		err = rtnl_valid_dump_net_req(cb->nlh, skb->sk, &net_cb, cb);
skb              1055 net/core/net_namespace.c 	return err < 0 ? err : skb->len;
skb                72 net/core/netpoll.c static int netpoll_start_xmit(struct sk_buff *skb, struct net_device *dev,
skb                78 net/core/netpoll.c 	features = netif_skb_features(skb);
skb                80 net/core/netpoll.c 	if (skb_vlan_tag_present(skb) &&
skb                81 net/core/netpoll.c 	    !vlan_hw_offload_capable(features, skb->vlan_proto)) {
skb                82 net/core/netpoll.c 		skb = __vlan_hwaccel_push_inside(skb);
skb                83 net/core/netpoll.c 		if (unlikely(!skb)) {
skb                92 net/core/netpoll.c 	status = netdev_start_xmit(skb, dev, txq, false);
skb               102 net/core/netpoll.c 	struct sk_buff *skb;
skb               105 net/core/netpoll.c 	while ((skb = skb_dequeue(&npinfo->txq))) {
skb               106 net/core/netpoll.c 		struct net_device *dev = skb->dev;
skb               111 net/core/netpoll.c 			kfree_skb(skb);
skb               117 net/core/netpoll.c 		q_index = skb_get_queue_mapping(skb);
skb               120 net/core/netpoll.c 			skb_set_queue_mapping(skb, q_index);
skb               125 net/core/netpoll.c 		    !dev_xmit_complete(netpoll_start_xmit(skb, dev, txq))) {
skb               126 net/core/netpoll.c 			skb_queue_head(&npinfo->txq, skb);
skb               227 net/core/netpoll.c 	struct sk_buff *skb;
skb               232 net/core/netpoll.c 		skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
skb               233 net/core/netpoll.c 		if (!skb)
skb               236 net/core/netpoll.c 		__skb_queue_tail(&skb_pool, skb);
skb               255 net/core/netpoll.c 			struct sk_buff *skb = clist;
skb               257 net/core/netpoll.c 			if (!skb_irq_freeable(skb)) {
skb               258 net/core/netpoll.c 				refcount_set(&skb->users, 1);
skb               259 net/core/netpoll.c 				dev_kfree_skb_any(skb); /* put this one back */
skb               261 net/core/netpoll.c 				__kfree_skb(skb);
skb               272 net/core/netpoll.c 	struct sk_buff *skb;
skb               278 net/core/netpoll.c 	skb = alloc_skb(len, GFP_ATOMIC);
skb               279 net/core/netpoll.c 	if (!skb)
skb               280 net/core/netpoll.c 		skb = skb_dequeue(&skb_pool);
skb               282 net/core/netpoll.c 	if (!skb) {
skb               290 net/core/netpoll.c 	refcount_set(&skb->users, 1);
skb               291 net/core/netpoll.c 	skb_reserve(skb, reserve);
skb               292 net/core/netpoll.c 	return skb;
skb               307 net/core/netpoll.c void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
skb               319 net/core/netpoll.c 		dev_kfree_skb_irq(skb);
skb               327 net/core/netpoll.c 		txq = netdev_core_pick_tx(dev, skb, NULL);
skb               334 net/core/netpoll.c 					status = netpoll_start_xmit(skb, dev, txq);
skb               356 net/core/netpoll.c 		skb_queue_tail(&npinfo->txq, skb);
skb               365 net/core/netpoll.c 	struct sk_buff *skb;
skb               382 net/core/netpoll.c 	skb = find_skb(np, total_len + np->dev->needed_tailroom,
skb               384 net/core/netpoll.c 	if (!skb)
skb               387 net/core/netpoll.c 	skb_copy_to_linear_data(skb, msg, len);
skb               388 net/core/netpoll.c 	skb_put(skb, len);
skb               390 net/core/netpoll.c 	skb_push(skb, sizeof(*udph));
skb               391 net/core/netpoll.c 	skb_reset_transport_header(skb);
skb               392 net/core/netpoll.c 	udph = udp_hdr(skb);
skb               406 net/core/netpoll.c 		skb_push(skb, sizeof(*ip6h));
skb               407 net/core/netpoll.c 		skb_reset_network_header(skb);
skb               408 net/core/netpoll.c 		ip6h = ipv6_hdr(skb);
skb               422 net/core/netpoll.c 		eth = skb_push(skb, ETH_HLEN);
skb               423 net/core/netpoll.c 		skb_reset_mac_header(skb);
skb               424 net/core/netpoll.c 		skb->protocol = eth->h_proto = htons(ETH_P_IPV6);
skb               434 net/core/netpoll.c 		skb_push(skb, sizeof(*iph));
skb               435 net/core/netpoll.c 		skb_reset_network_header(skb);
skb               436 net/core/netpoll.c 		iph = ip_hdr(skb);
skb               451 net/core/netpoll.c 		eth = skb_push(skb, ETH_HLEN);
skb               452 net/core/netpoll.c 		skb_reset_mac_header(skb);
skb               453 net/core/netpoll.c 		skb->protocol = eth->h_proto = htons(ETH_P_IP);
skb               459 net/core/netpoll.c 	skb->dev = np->dev;
skb               461 net/core/netpoll.c 	netpoll_send_skb(np, skb);
skb               387 net/core/pktgen.c 	struct sk_buff *skb;	/* skb we are to transmit next, used for when we
skb              2497 net/core/pktgen.c static int pktgen_output_ipsec(struct sk_buff *skb, struct pktgen_dev *pkt_dev)
skb              2514 net/core/pktgen.c 		skb->_skb_refdst = (unsigned long)&pkt_dev->xdst.u.dst | SKB_DST_NOREF;
skb              2517 net/core/pktgen.c 	err = pktgen_xfrm_outer_mode_output(x, skb);
skb              2523 net/core/pktgen.c 	err = x->type->output(x, skb);
skb              2529 net/core/pktgen.c 	x->curlft.bytes += skb->len;
skb              2552 net/core/pktgen.c 			      struct sk_buff *skb, __be16 protocol)
skb              2562 net/core/pktgen.c 			nhead = x->props.header_len - skb_headroom(skb);
skb              2564 net/core/pktgen.c 				ret = pskb_expand_head(skb, nhead, 0, GFP_ATOMIC);
skb              2573 net/core/pktgen.c 			skb_pull(skb, ETH_HLEN);
skb              2574 net/core/pktgen.c 			ret = pktgen_output_ipsec(skb, pkt_dev);
skb              2580 net/core/pktgen.c 			eth = skb_push(skb, ETH_HLEN);
skb              2585 net/core/pktgen.c 			iph = ip_hdr(skb);
skb              2586 net/core/pktgen.c 			iph->tot_len = htons(skb->len - ETH_HLEN);
skb              2592 net/core/pktgen.c 	kfree_skb(skb);
skb              2613 net/core/pktgen.c static void pktgen_finalize_skb(struct pktgen_dev *pkt_dev, struct sk_buff *skb,
skb              2619 net/core/pktgen.c 	pgh = skb_put(skb, sizeof(*pgh));
skb              2623 net/core/pktgen.c 		skb_put_zero(skb, datalen);
skb              2634 net/core/pktgen.c 			skb_put_zero(skb, len);
skb              2652 net/core/pktgen.c 			skb_frag_set_page(skb, i, pkt_dev->page);
skb              2653 net/core/pktgen.c 			skb_frag_off_set(&skb_shinfo(skb)->frags[i], 0);
skb              2656 net/core/pktgen.c 				skb_frag_size_set(&skb_shinfo(skb)->frags[i],
skb              2659 net/core/pktgen.c 				skb_frag_size_set(&skb_shinfo(skb)->frags[i], frag_len);
skb              2660 net/core/pktgen.c 			datalen -= skb_frag_size(&skb_shinfo(skb)->frags[i]);
skb              2661 net/core/pktgen.c 			skb->len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
skb              2662 net/core/pktgen.c 			skb->data_len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
skb              2664 net/core/pktgen.c 			skb_shinfo(skb)->nr_frags = i;
skb              2696 net/core/pktgen.c 	struct sk_buff *skb = NULL;
skb              2703 net/core/pktgen.c 		skb = __alloc_skb(NET_SKB_PAD + size, GFP_NOWAIT, 0, node);
skb              2704 net/core/pktgen.c 		if (likely(skb)) {
skb              2705 net/core/pktgen.c 			skb_reserve(skb, NET_SKB_PAD);
skb              2706 net/core/pktgen.c 			skb->dev = dev;
skb              2709 net/core/pktgen.c 		 skb = __netdev_alloc_skb(dev, size, GFP_NOWAIT);
skb              2713 net/core/pktgen.c 	if (likely(skb))
skb              2714 net/core/pktgen.c 		skb_reserve(skb, extralen - 16);
skb              2716 net/core/pktgen.c 	return skb;
skb              2722 net/core/pktgen.c 	struct sk_buff *skb = NULL;
skb              2747 net/core/pktgen.c 	skb = pktgen_alloc_skb(odev, pkt_dev);
skb              2748 net/core/pktgen.c 	if (!skb) {
skb              2753 net/core/pktgen.c 	prefetchw(skb->data);
skb              2754 net/core/pktgen.c 	skb_reserve(skb, 16);
skb              2757 net/core/pktgen.c 	eth = skb_push(skb, 14);
skb              2758 net/core/pktgen.c 	mpls = skb_put(skb, pkt_dev->nr_labels * sizeof(__u32));
skb              2764 net/core/pktgen.c 			svlan_tci = skb_put(skb, sizeof(__be16));
skb              2768 net/core/pktgen.c 			svlan_encapsulated_proto = skb_put(skb,
skb              2772 net/core/pktgen.c 		vlan_tci = skb_put(skb, sizeof(__be16));
skb              2776 net/core/pktgen.c 		vlan_encapsulated_proto = skb_put(skb, sizeof(__be16));
skb              2780 net/core/pktgen.c 	skb_reset_mac_header(skb);
skb              2781 net/core/pktgen.c 	skb_set_network_header(skb, skb->len);
skb              2782 net/core/pktgen.c 	iph = skb_put(skb, sizeof(struct iphdr));
skb              2784 net/core/pktgen.c 	skb_set_transport_header(skb, skb->len);
skb              2785 net/core/pktgen.c 	udph = skb_put(skb, sizeof(struct udphdr));
skb              2786 net/core/pktgen.c 	skb_set_queue_mapping(skb, queue_map);
skb              2787 net/core/pktgen.c 	skb->priority = pkt_dev->skb_priority;
skb              2816 net/core/pktgen.c 	skb->protocol = protocol;
skb              2817 net/core/pktgen.c 	skb->dev = odev;
skb              2818 net/core/pktgen.c 	skb->pkt_type = PACKET_HOST;
skb              2820 net/core/pktgen.c 	pktgen_finalize_skb(pkt_dev, skb, datalen);
skb              2823 net/core/pktgen.c 		skb->ip_summed = CHECKSUM_NONE;
skb              2825 net/core/pktgen.c 		skb->ip_summed = CHECKSUM_PARTIAL;
skb              2826 net/core/pktgen.c 		skb->csum = 0;
skb              2827 net/core/pktgen.c 		udp4_hwcsum(skb, iph->saddr, iph->daddr);
skb              2829 net/core/pktgen.c 		__wsum csum = skb_checksum(skb, skb_transport_offset(skb), datalen + 8, 0);
skb              2840 net/core/pktgen.c 	if (!process_ipsec(pkt_dev, skb, protocol))
skb              2844 net/core/pktgen.c 	return skb;
skb              2850 net/core/pktgen.c 	struct sk_buff *skb = NULL;
skb              2875 net/core/pktgen.c 	skb = pktgen_alloc_skb(odev, pkt_dev);
skb              2876 net/core/pktgen.c 	if (!skb) {
skb              2881 net/core/pktgen.c 	prefetchw(skb->data);
skb              2882 net/core/pktgen.c 	skb_reserve(skb, 16);
skb              2885 net/core/pktgen.c 	eth = skb_push(skb, 14);
skb              2886 net/core/pktgen.c 	mpls = skb_put(skb, pkt_dev->nr_labels * sizeof(__u32));
skb              2892 net/core/pktgen.c 			svlan_tci = skb_put(skb, sizeof(__be16));
skb              2896 net/core/pktgen.c 			svlan_encapsulated_proto = skb_put(skb,
skb              2900 net/core/pktgen.c 		vlan_tci = skb_put(skb, sizeof(__be16));
skb              2904 net/core/pktgen.c 		vlan_encapsulated_proto = skb_put(skb, sizeof(__be16));
skb              2908 net/core/pktgen.c 	skb_reset_mac_header(skb);
skb              2909 net/core/pktgen.c 	skb_set_network_header(skb, skb->len);
skb              2910 net/core/pktgen.c 	iph = skb_put(skb, sizeof(struct ipv6hdr));
skb              2912 net/core/pktgen.c 	skb_set_transport_header(skb, skb->len);
skb              2913 net/core/pktgen.c 	udph = skb_put(skb, sizeof(struct udphdr));
skb              2914 net/core/pktgen.c 	skb_set_queue_mapping(skb, queue_map);
skb              2915 net/core/pktgen.c 	skb->priority = pkt_dev->skb_priority;
skb              2951 net/core/pktgen.c 	skb->protocol = protocol;
skb              2952 net/core/pktgen.c 	skb->dev = odev;
skb              2953 net/core/pktgen.c 	skb->pkt_type = PACKET_HOST;
skb              2955 net/core/pktgen.c 	pktgen_finalize_skb(pkt_dev, skb, datalen);
skb              2958 net/core/pktgen.c 		skb->ip_summed = CHECKSUM_NONE;
skb              2960 net/core/pktgen.c 		skb->ip_summed = CHECKSUM_PARTIAL;
skb              2961 net/core/pktgen.c 		skb->csum_start = skb_transport_header(skb) - skb->head;
skb              2962 net/core/pktgen.c 		skb->csum_offset = offsetof(struct udphdr, check);
skb              2965 net/core/pktgen.c 		__wsum csum = skb_checksum(skb, skb_transport_offset(skb), udplen, 0);
skb              2974 net/core/pktgen.c 	return skb;
skb              3014 net/core/pktgen.c 			pkt_dev->skb = NULL;
skb              3174 net/core/pktgen.c 	int nr_frags = pkt_dev->skb ? skb_shinfo(pkt_dev->skb)->nr_frags : -1;
skb              3183 net/core/pktgen.c 	kfree_skb(pkt_dev->skb);
skb              3184 net/core/pktgen.c 	pkt_dev->skb = NULL;
skb              3242 net/core/pktgen.c 		kfree_skb(cur->skb);
skb              3243 net/core/pktgen.c 		cur->skb = NULL;
skb              3263 net/core/pktgen.c 		kfree_skb(cur->skb);
skb              3264 net/core/pktgen.c 		cur->skb = NULL;
skb              3287 net/core/pktgen.c 	while (refcount_read(&(pkt_dev->skb->users)) != 1) {
skb              3304 net/core/pktgen.c 	struct sk_buff *skb;
skb              3322 net/core/pktgen.c 	if (!pkt_dev->skb || (pkt_dev->last_ok &&
skb              3325 net/core/pktgen.c 		kfree_skb(pkt_dev->skb);
skb              3327 net/core/pktgen.c 		pkt_dev->skb = fill_packet(odev, pkt_dev);
skb              3328 net/core/pktgen.c 		if (pkt_dev->skb == NULL) {
skb              3334 net/core/pktgen.c 		pkt_dev->last_pkt_size = pkt_dev->skb->len;
skb              3342 net/core/pktgen.c 		skb = pkt_dev->skb;
skb              3343 net/core/pktgen.c 		skb->protocol = eth_type_trans(skb, skb->dev);
skb              3344 net/core/pktgen.c 		refcount_add(burst, &skb->users);
skb              3347 net/core/pktgen.c 			ret = netif_receive_skb(skb);
skb              3352 net/core/pktgen.c 			if (refcount_read(&skb->users) != burst) {
skb              3356 net/core/pktgen.c 				WARN_ON(refcount_sub_and_test(burst - 1, &skb->users));
skb              3365 net/core/pktgen.c 			skb_reset_redirect(skb);
skb              3370 net/core/pktgen.c 		refcount_inc(&pkt_dev->skb->users);
skb              3372 net/core/pktgen.c 		ret = dev_queue_xmit(pkt_dev->skb);
skb              3400 net/core/pktgen.c 	txq = skb_get_tx_queue(odev, pkt_dev->skb);
skb              3411 net/core/pktgen.c 	refcount_add(burst, &pkt_dev->skb->users);
skb              3414 net/core/pktgen.c 	ret = netdev_start_xmit(pkt_dev->skb, odev, txq, --burst > 0);
skb              3437 net/core/pktgen.c 		refcount_dec(&(pkt_dev->skb->users));
skb              3441 net/core/pktgen.c 		WARN_ON(refcount_sub_and_test(burst, &pkt_dev->skb->users));
skb               104 net/core/ptp_classifier.c unsigned int ptp_classify_raw(const struct sk_buff *skb)
skb               106 net/core/ptp_classifier.c 	return BPF_PROG_RUN(ptp_insns, skb);
skb               616 net/core/rtnetlink.c static int rtnl_link_slave_info_fill(struct sk_buff *skb,
skb               630 net/core/rtnetlink.c 	if (nla_put_string(skb, IFLA_INFO_SLAVE_KIND, ops->kind) < 0)
skb               633 net/core/rtnetlink.c 		slave_data = nla_nest_start_noflag(skb, IFLA_INFO_SLAVE_DATA);
skb               636 net/core/rtnetlink.c 		err = ops->fill_slave_info(skb, master_dev, dev);
skb               639 net/core/rtnetlink.c 		nla_nest_end(skb, slave_data);
skb               644 net/core/rtnetlink.c 	nla_nest_cancel(skb, slave_data);
skb               648 net/core/rtnetlink.c static int rtnl_link_info_fill(struct sk_buff *skb,
skb               657 net/core/rtnetlink.c 	if (nla_put_string(skb, IFLA_INFO_KIND, ops->kind) < 0)
skb               660 net/core/rtnetlink.c 		err = ops->fill_xstats(skb, dev);
skb               665 net/core/rtnetlink.c 		data = nla_nest_start_noflag(skb, IFLA_INFO_DATA);
skb               668 net/core/rtnetlink.c 		err = ops->fill_info(skb, dev);
skb               671 net/core/rtnetlink.c 		nla_nest_end(skb, data);
skb               676 net/core/rtnetlink.c 	nla_nest_cancel(skb, data);
skb               680 net/core/rtnetlink.c static int rtnl_link_fill(struct sk_buff *skb, const struct net_device *dev)
skb               685 net/core/rtnetlink.c 	linkinfo = nla_nest_start_noflag(skb, IFLA_LINKINFO);
skb               689 net/core/rtnetlink.c 	err = rtnl_link_info_fill(skb, dev);
skb               693 net/core/rtnetlink.c 	err = rtnl_link_slave_info_fill(skb, dev);
skb               697 net/core/rtnetlink.c 	nla_nest_end(skb, linkinfo);
skb               701 net/core/rtnetlink.c 	nla_nest_cancel(skb, linkinfo);
skb               706 net/core/rtnetlink.c int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned int group, int echo)
skb               711 net/core/rtnetlink.c 	NETLINK_CB(skb).dst_group = group;
skb               713 net/core/rtnetlink.c 		refcount_inc(&skb->users);
skb               714 net/core/rtnetlink.c 	netlink_broadcast(rtnl, skb, pid, group, GFP_KERNEL);
skb               716 net/core/rtnetlink.c 		err = netlink_unicast(rtnl, skb, pid, MSG_DONTWAIT);
skb               720 net/core/rtnetlink.c int rtnl_unicast(struct sk_buff *skb, struct net *net, u32 pid)
skb               724 net/core/rtnetlink.c 	return nlmsg_unicast(rtnl, skb, pid);
skb               728 net/core/rtnetlink.c void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid, u32 group,
skb               737 net/core/rtnetlink.c 	nlmsg_notify(rtnl, skb, pid, group, report, flags);
skb               749 net/core/rtnetlink.c int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics)
skb               758 net/core/rtnetlink.c 	mx = nla_nest_start_noflag(skb, RTA_METRICS);
skb               770 net/core/rtnetlink.c 				if (nla_put_string(skb, i + 1, name))
skb               778 net/core/rtnetlink.c 				if (nla_put_u32(skb, i + 1, user_features))
skb               781 net/core/rtnetlink.c 				if (nla_put_u32(skb, i + 1, metrics[i]))
skb               789 net/core/rtnetlink.c 		nla_nest_cancel(skb, mx);
skb               793 net/core/rtnetlink.c 	return nla_nest_end(skb, mx);
skb               796 net/core/rtnetlink.c 	nla_nest_cancel(skb, mx);
skb               801 net/core/rtnetlink.c int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id,
skb               821 net/core/rtnetlink.c 	return nla_put(skb, RTA_CACHEINFO, sizeof(ci), &ci);
skb              1033 net/core/rtnetlink.c static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev)
skb              1040 net/core/rtnetlink.c 	vf_ports = nla_nest_start_noflag(skb, IFLA_VF_PORTS);
skb              1045 net/core/rtnetlink.c 		vf_port = nla_nest_start_noflag(skb, IFLA_VF_PORT);
skb              1048 net/core/rtnetlink.c 		if (nla_put_u32(skb, IFLA_PORT_VF, vf))
skb              1050 net/core/rtnetlink.c 		err = dev->netdev_ops->ndo_get_vf_port(dev, vf, skb);
skb              1054 net/core/rtnetlink.c 			nla_nest_cancel(skb, vf_port);
skb              1057 net/core/rtnetlink.c 		nla_nest_end(skb, vf_port);
skb              1060 net/core/rtnetlink.c 	nla_nest_end(skb, vf_ports);
skb              1065 net/core/rtnetlink.c 	nla_nest_cancel(skb, vf_ports);
skb              1069 net/core/rtnetlink.c static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev)
skb              1074 net/core/rtnetlink.c 	port_self = nla_nest_start_noflag(skb, IFLA_PORT_SELF);
skb              1078 net/core/rtnetlink.c 	err = dev->netdev_ops->ndo_get_vf_port(dev, PORT_SELF_VF, skb);
skb              1080 net/core/rtnetlink.c 		nla_nest_cancel(skb, port_self);
skb              1084 net/core/rtnetlink.c 	nla_nest_end(skb, port_self);
skb              1089 net/core/rtnetlink.c static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev,
skb              1098 net/core/rtnetlink.c 	err = rtnl_port_self_fill(skb, dev);
skb              1103 net/core/rtnetlink.c 		err = rtnl_vf_ports_fill(skb, dev);
skb              1111 net/core/rtnetlink.c static int rtnl_phys_port_id_fill(struct sk_buff *skb, struct net_device *dev)
skb              1123 net/core/rtnetlink.c 	if (nla_put(skb, IFLA_PHYS_PORT_ID, ppid.id_len, ppid.id))
skb              1129 net/core/rtnetlink.c static int rtnl_phys_port_name_fill(struct sk_buff *skb, struct net_device *dev)
skb              1141 net/core/rtnetlink.c 	if (nla_put_string(skb, IFLA_PHYS_PORT_NAME, name))
skb              1147 net/core/rtnetlink.c static int rtnl_phys_switch_id_fill(struct sk_buff *skb, struct net_device *dev)
skb              1159 net/core/rtnetlink.c 	if (nla_put(skb, IFLA_PHYS_SWITCH_ID, ppid.id_len, ppid.id))
skb              1165 net/core/rtnetlink.c static noinline_for_stack int rtnl_fill_stats(struct sk_buff *skb,
skb              1171 net/core/rtnetlink.c 	attr = nla_reserve_64bit(skb, IFLA_STATS64,
skb              1179 net/core/rtnetlink.c 	attr = nla_reserve(skb, IFLA_STATS,
skb              1189 net/core/rtnetlink.c static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
skb              1253 net/core/rtnetlink.c 	vf = nla_nest_start_noflag(skb, IFLA_VF_INFO);
skb              1256 net/core/rtnetlink.c 	if (nla_put(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac) ||
skb              1257 net/core/rtnetlink.c 	    nla_put(skb, IFLA_VF_BROADCAST, sizeof(vf_broadcast), &vf_broadcast) ||
skb              1258 net/core/rtnetlink.c 	    nla_put(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan) ||
skb              1259 net/core/rtnetlink.c 	    nla_put(skb, IFLA_VF_RATE, sizeof(vf_rate),
skb              1261 net/core/rtnetlink.c 	    nla_put(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate),
skb              1263 net/core/rtnetlink.c 	    nla_put(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk),
skb              1265 net/core/rtnetlink.c 	    nla_put(skb, IFLA_VF_LINK_STATE, sizeof(vf_linkstate),
skb              1267 net/core/rtnetlink.c 	    nla_put(skb, IFLA_VF_RSS_QUERY_EN,
skb              1270 net/core/rtnetlink.c 	    nla_put(skb, IFLA_VF_TRUST,
skb              1273 net/core/rtnetlink.c 	vfvlanlist = nla_nest_start_noflag(skb, IFLA_VF_VLAN_LIST);
skb              1276 net/core/rtnetlink.c 	if (nla_put(skb, IFLA_VF_VLAN_INFO, sizeof(vf_vlan_info),
skb              1278 net/core/rtnetlink.c 		nla_nest_cancel(skb, vfvlanlist);
skb              1281 net/core/rtnetlink.c 	nla_nest_end(skb, vfvlanlist);
skb              1286 net/core/rtnetlink.c 	vfstats = nla_nest_start_noflag(skb, IFLA_VF_STATS);
skb              1289 net/core/rtnetlink.c 	if (nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_PACKETS,
skb              1291 net/core/rtnetlink.c 	    nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_PACKETS,
skb              1293 net/core/rtnetlink.c 	    nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_BYTES,
skb              1295 net/core/rtnetlink.c 	    nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_BYTES,
skb              1297 net/core/rtnetlink.c 	    nla_put_u64_64bit(skb, IFLA_VF_STATS_BROADCAST,
skb              1299 net/core/rtnetlink.c 	    nla_put_u64_64bit(skb, IFLA_VF_STATS_MULTICAST,
skb              1301 net/core/rtnetlink.c 	    nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_DROPPED,
skb              1303 net/core/rtnetlink.c 	    nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_DROPPED,
skb              1305 net/core/rtnetlink.c 		nla_nest_cancel(skb, vfstats);
skb              1308 net/core/rtnetlink.c 	nla_nest_end(skb, vfstats);
skb              1309 net/core/rtnetlink.c 	nla_nest_end(skb, vf);
skb              1313 net/core/rtnetlink.c 	nla_nest_cancel(skb, vf);
skb              1315 net/core/rtnetlink.c 	nla_nest_cancel(skb, vfinfo);
skb              1319 net/core/rtnetlink.c static noinline_for_stack int rtnl_fill_vf(struct sk_buff *skb,
skb              1330 net/core/rtnetlink.c 	if (nla_put_u32(skb, IFLA_NUM_VF, num_vfs))
skb              1336 net/core/rtnetlink.c 	vfinfo = nla_nest_start_noflag(skb, IFLA_VFINFO_LIST);
skb              1341 net/core/rtnetlink.c 		if (rtnl_fill_vfinfo(skb, dev, i, vfinfo))
skb              1345 net/core/rtnetlink.c 	nla_nest_end(skb, vfinfo);
skb              1349 net/core/rtnetlink.c static int rtnl_fill_link_ifmap(struct sk_buff *skb, struct net_device *dev)
skb              1361 net/core/rtnetlink.c 	if (nla_put_64bit(skb, IFLA_MAP, sizeof(map), &map, IFLA_PAD))
skb              1390 net/core/rtnetlink.c static int rtnl_xdp_report_one(struct sk_buff *skb, struct net_device *dev,
skb              1402 net/core/rtnetlink.c 	err = nla_put_u32(skb, attr, curr_id);
skb              1414 net/core/rtnetlink.c static int rtnl_xdp_fill(struct sk_buff *skb, struct net_device *dev)
skb              1421 net/core/rtnetlink.c 	xdp = nla_nest_start_noflag(skb, IFLA_XDP);
skb              1427 net/core/rtnetlink.c 	err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_SKB,
skb              1431 net/core/rtnetlink.c 	err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_DRV,
skb              1435 net/core/rtnetlink.c 	err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_HW,
skb              1440 net/core/rtnetlink.c 	err = nla_put_u8(skb, IFLA_XDP_ATTACHED, mode);
skb              1445 net/core/rtnetlink.c 		err = nla_put_u32(skb, IFLA_XDP_PROG_ID, prog_id);
skb              1450 net/core/rtnetlink.c 	nla_nest_end(skb, xdp);
skb              1454 net/core/rtnetlink.c 	nla_nest_cancel(skb, xdp);
skb              1488 net/core/rtnetlink.c static int put_master_ifindex(struct sk_buff *skb, struct net_device *dev)
skb              1497 net/core/rtnetlink.c 		ret = nla_put_u32(skb, IFLA_MASTER, upper_dev->ifindex);
skb              1503 net/core/rtnetlink.c static int nla_put_iflink(struct sk_buff *skb, const struct net_device *dev,
skb              1509 net/core/rtnetlink.c 		return nla_put_u32(skb, IFLA_LINK, ifindex);
skb              1514 net/core/rtnetlink.c static noinline_for_stack int nla_put_ifalias(struct sk_buff *skb,
skb              1521 net/core/rtnetlink.c 	return ret > 0 ? nla_put_string(skb, IFLA_IFALIAS, buf) : 0;
skb              1524 net/core/rtnetlink.c static int rtnl_fill_link_netnsid(struct sk_buff *skb,
skb              1536 net/core/rtnetlink.c 			if (nla_put_s32(skb, IFLA_LINK_NETNSID, id))
skb              1543 net/core/rtnetlink.c 	return nla_put_iflink(skb, dev, put_iflink);
skb              1546 net/core/rtnetlink.c static int rtnl_fill_link_af(struct sk_buff *skb,
skb              1553 net/core/rtnetlink.c 	af_spec = nla_nest_start_noflag(skb, IFLA_AF_SPEC);
skb              1564 net/core/rtnetlink.c 		af = nla_nest_start_noflag(skb, af_ops->family);
skb              1568 net/core/rtnetlink.c 		err = af_ops->fill_link_af(skb, dev, ext_filter_mask);
skb              1576 net/core/rtnetlink.c 			nla_nest_cancel(skb, af);
skb              1580 net/core/rtnetlink.c 		nla_nest_end(skb, af);
skb              1583 net/core/rtnetlink.c 	nla_nest_end(skb, af_spec);
skb              1587 net/core/rtnetlink.c static int rtnl_fill_ifinfo(struct sk_buff *skb,
skb              1598 net/core/rtnetlink.c 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags);
skb              1610 net/core/rtnetlink.c 	if (tgt_netnsid >= 0 && nla_put_s32(skb, IFLA_TARGET_NETNSID, tgt_netnsid))
skb              1613 net/core/rtnetlink.c 	if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
skb              1614 net/core/rtnetlink.c 	    nla_put_u32(skb, IFLA_TXQLEN, dev->tx_queue_len) ||
skb              1615 net/core/rtnetlink.c 	    nla_put_u8(skb, IFLA_OPERSTATE,
skb              1617 net/core/rtnetlink.c 	    nla_put_u8(skb, IFLA_LINKMODE, dev->link_mode) ||
skb              1618 net/core/rtnetlink.c 	    nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
skb              1619 net/core/rtnetlink.c 	    nla_put_u32(skb, IFLA_MIN_MTU, dev->min_mtu) ||
skb              1620 net/core/rtnetlink.c 	    nla_put_u32(skb, IFLA_MAX_MTU, dev->max_mtu) ||
skb              1621 net/core/rtnetlink.c 	    nla_put_u32(skb, IFLA_GROUP, dev->group) ||
skb              1622 net/core/rtnetlink.c 	    nla_put_u32(skb, IFLA_PROMISCUITY, dev->promiscuity) ||
skb              1623 net/core/rtnetlink.c 	    nla_put_u32(skb, IFLA_NUM_TX_QUEUES, dev->num_tx_queues) ||
skb              1624 net/core/rtnetlink.c 	    nla_put_u32(skb, IFLA_GSO_MAX_SEGS, dev->gso_max_segs) ||
skb              1625 net/core/rtnetlink.c 	    nla_put_u32(skb, IFLA_GSO_MAX_SIZE, dev->gso_max_size) ||
skb              1627 net/core/rtnetlink.c 	    nla_put_u32(skb, IFLA_NUM_RX_QUEUES, dev->num_rx_queues) ||
skb              1629 net/core/rtnetlink.c 	    put_master_ifindex(skb, dev) ||
skb              1630 net/core/rtnetlink.c 	    nla_put_u8(skb, IFLA_CARRIER, netif_carrier_ok(dev)) ||
skb              1632 net/core/rtnetlink.c 	     nla_put_string(skb, IFLA_QDISC, dev->qdisc->ops->id)) ||
skb              1633 net/core/rtnetlink.c 	    nla_put_ifalias(skb, dev) ||
skb              1634 net/core/rtnetlink.c 	    nla_put_u32(skb, IFLA_CARRIER_CHANGES,
skb              1637 net/core/rtnetlink.c 	    nla_put_u8(skb, IFLA_PROTO_DOWN, dev->proto_down) ||
skb              1638 net/core/rtnetlink.c 	    nla_put_u32(skb, IFLA_CARRIER_UP_COUNT,
skb              1640 net/core/rtnetlink.c 	    nla_put_u32(skb, IFLA_CARRIER_DOWN_COUNT,
skb              1645 net/core/rtnetlink.c 		if (nla_put_u32(skb, IFLA_EVENT, event))
skb              1649 net/core/rtnetlink.c 	if (rtnl_fill_link_ifmap(skb, dev))
skb              1653 net/core/rtnetlink.c 		if (nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr) ||
skb              1654 net/core/rtnetlink.c 		    nla_put(skb, IFLA_BROADCAST, dev->addr_len, dev->broadcast))
skb              1658 net/core/rtnetlink.c 	if (rtnl_phys_port_id_fill(skb, dev))
skb              1661 net/core/rtnetlink.c 	if (rtnl_phys_port_name_fill(skb, dev))
skb              1664 net/core/rtnetlink.c 	if (rtnl_phys_switch_id_fill(skb, dev))
skb              1667 net/core/rtnetlink.c 	if (rtnl_fill_stats(skb, dev))
skb              1670 net/core/rtnetlink.c 	if (rtnl_fill_vf(skb, dev, ext_filter_mask))
skb              1673 net/core/rtnetlink.c 	if (rtnl_port_fill(skb, dev, ext_filter_mask))
skb              1676 net/core/rtnetlink.c 	if (rtnl_xdp_fill(skb, dev))
skb              1680 net/core/rtnetlink.c 		if (rtnl_link_fill(skb, dev) < 0)
skb              1684 net/core/rtnetlink.c 	if (rtnl_fill_link_netnsid(skb, dev, src_net, gfp))
skb              1688 net/core/rtnetlink.c 	    nla_put_s32(skb, IFLA_NEW_NETNSID, *new_nsid) < 0)
skb              1691 net/core/rtnetlink.c 	    nla_put_s32(skb, IFLA_NEW_IFINDEX, new_ifindex) < 0)
skb              1696 net/core/rtnetlink.c 	if (rtnl_fill_link_af(skb, dev, ext_filter_mask))
skb              1700 net/core/rtnetlink.c 	nlmsg_end(skb, nlh);
skb              1706 net/core/rtnetlink.c 	nlmsg_cancel(skb, nlh);
skb              1927 net/core/rtnetlink.c static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
skb              1931 net/core/rtnetlink.c 	struct net *net = sock_net(skb->sk);
skb              1964 net/core/rtnetlink.c 			tgt_net = rtnl_get_net_ns_capable(skb->sk, netnsid);
skb              1999 net/core/rtnetlink.c 			err = rtnl_fill_ifinfo(skb, dev, net,
skb              2001 net/core/rtnetlink.c 					       NETLINK_CB(cb->skb).portid,
skb              2007 net/core/rtnetlink.c 				if (likely(skb->len))
skb              2017 net/core/rtnetlink.c 	err = skb->len;
skb              2022 net/core/rtnetlink.c 	nl_dump_check_consistent(cb, nlmsg_hdr(skb));
skb              2078 net/core/rtnetlink.c static struct net *rtnl_link_get_net_capable(const struct sk_buff *skb,
skb              2088 net/core/rtnetlink.c 	if (!netlink_ns_capable(skb, net->user_ns, cap)) {
skb              2404 net/core/rtnetlink.c static int do_setlink(const struct sk_buff *skb,
skb              2417 net/core/rtnetlink.c 		struct net *net = rtnl_link_get_net_capable(skb, dev_net(dev),
skb              2747 net/core/rtnetlink.c static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
skb              2750 net/core/rtnetlink.c 	struct net *net = sock_net(skb->sk);
skb              2785 net/core/rtnetlink.c 	err = do_setlink(skb, dev, ifm, extack, tb, ifname, 0);
skb              2842 net/core/rtnetlink.c static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
skb              2845 net/core/rtnetlink.c 	struct net *net = sock_net(skb->sk);
skb              2868 net/core/rtnetlink.c 		tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, netnsid);
skb              2998 net/core/rtnetlink.c static int rtnl_group_changelink(const struct sk_buff *skb,
skb              3009 net/core/rtnetlink.c 			err = do_setlink(skb, dev, ifm, extack, tb, NULL, 0);
skb              3018 net/core/rtnetlink.c static int __rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
skb              3026 net/core/rtnetlink.c 	struct net *net = sock_net(skb->sk);
skb              3160 net/core/rtnetlink.c 		return do_setlink(skb, dev, ifm, extack, tb, ifname, status);
skb              3165 net/core/rtnetlink.c 			return rtnl_group_changelink(skb, net,
skb              3197 net/core/rtnetlink.c 	dest_net = rtnl_link_get_net_capable(skb, net, tb, CAP_NET_ADMIN);
skb              3211 net/core/rtnetlink.c 		if (!netlink_ns_capable(skb, link_net->user_ns, CAP_NET_ADMIN))
skb              3275 net/core/rtnetlink.c static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
skb              3285 net/core/rtnetlink.c 	ret = __rtnl_newlink(skb, nlh, attr, extack);
skb              3290 net/core/rtnetlink.c static int rtnl_valid_getlink_req(struct sk_buff *skb,
skb              3303 net/core/rtnetlink.c 	if (!netlink_strict_get_check(skb))
skb              3337 net/core/rtnetlink.c static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr *nlh,
skb              3340 net/core/rtnetlink.c 	struct net *net = sock_net(skb->sk);
skb              3351 net/core/rtnetlink.c 	err = rtnl_valid_getlink_req(skb, nlh, tb, extack);
skb              3361 net/core/rtnetlink.c 		tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, netnsid);
skb              3391 net/core/rtnetlink.c 			       RTM_NEWLINK, NETLINK_CB(skb).portid,
skb              3399 net/core/rtnetlink.c 		err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid);
skb              3407 net/core/rtnetlink.c static u16 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh)
skb              3409 net/core/rtnetlink.c 	struct net *net = sock_net(skb->sk);
skb              3442 net/core/rtnetlink.c static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb)
skb              3480 net/core/rtnetlink.c 		ret = dumpit(skb, cb);
skb              3486 net/core/rtnetlink.c 	return skb->len ? : ret;
skb              3495 net/core/rtnetlink.c 	struct sk_buff *skb;
skb              3499 net/core/rtnetlink.c 	skb = nlmsg_new((if_info_size = if_nlmsg_size(dev, 0)), flags);
skb              3500 net/core/rtnetlink.c 	if (skb == NULL)
skb              3503 net/core/rtnetlink.c 	err = rtnl_fill_ifinfo(skb, dev, dev_net(dev),
skb              3509 net/core/rtnetlink.c 		kfree_skb(skb);
skb              3512 net/core/rtnetlink.c 	return skb;
skb              3519 net/core/rtnetlink.c void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev, gfp_t flags)
skb              3523 net/core/rtnetlink.c 	rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, flags);
skb              3530 net/core/rtnetlink.c 	struct sk_buff *skb;
skb              3535 net/core/rtnetlink.c 	skb = rtmsg_ifinfo_build_skb(type, dev, change, event, flags, new_nsid,
skb              3537 net/core/rtnetlink.c 	if (skb)
skb              3538 net/core/rtnetlink.c 		rtmsg_ifinfo_send(skb, dev, flags);
skb              3555 net/core/rtnetlink.c static int nlmsg_populate_fdb_fill(struct sk_buff *skb,
skb              3564 net/core/rtnetlink.c 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), nlflags);
skb              3577 net/core/rtnetlink.c 	if (nla_put(skb, NDA_LLADDR, ETH_ALEN, addr))
skb              3580 net/core/rtnetlink.c 		if (nla_put(skb, NDA_VLAN, sizeof(u16), &vid))
skb              3583 net/core/rtnetlink.c 	nlmsg_end(skb, nlh);
skb              3587 net/core/rtnetlink.c 	nlmsg_cancel(skb, nlh);
skb              3603 net/core/rtnetlink.c 	struct sk_buff *skb;
skb              3606 net/core/rtnetlink.c 	skb = nlmsg_new(rtnl_fdb_nlmsg_size(), GFP_ATOMIC);
skb              3607 net/core/rtnetlink.c 	if (!skb)
skb              3610 net/core/rtnetlink.c 	err = nlmsg_populate_fdb_fill(skb, dev, addr, vid,
skb              3613 net/core/rtnetlink.c 		kfree_skb(skb);
skb              3617 net/core/rtnetlink.c 	rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
skb              3682 net/core/rtnetlink.c static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh,
skb              3685 net/core/rtnetlink.c 	struct net *net = sock_net(skb->sk);
skb              3790 net/core/rtnetlink.c static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
skb              3793 net/core/rtnetlink.c 	struct net *net = sock_net(skb->sk);
skb              3801 net/core/rtnetlink.c 	if (!netlink_capable(skb, CAP_NET_ADMIN))
skb              3872 net/core/rtnetlink.c static int nlmsg_populate_fdb(struct sk_buff *skb,
skb              3882 net/core/rtnetlink.c 	portid = NETLINK_CB(cb->skb).portid;
skb              3889 net/core/rtnetlink.c 		err = nlmsg_populate_fdb_fill(skb, dev, ha->addr, 0,
skb              3912 net/core/rtnetlink.c int ndo_dflt_fdb_dump(struct sk_buff *skb,
skb              3924 net/core/rtnetlink.c 	err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->uc);
skb              3927 net/core/rtnetlink.c 	err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->mc);
skb              4023 net/core/rtnetlink.c static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
skb              4029 net/core/rtnetlink.c 	struct net *net = sock_net(skb->sk);
skb              4087 net/core/rtnetlink.c 					err = cops->ndo_fdb_dump(skb, cb,
skb              4096 net/core/rtnetlink.c 				err = dev->netdev_ops->ndo_fdb_dump(skb, cb,
skb              4100 net/core/rtnetlink.c 				err = ndo_dflt_fdb_dump(skb, cb, dev, NULL,
skb              4120 net/core/rtnetlink.c 	return skb->len;
skb              4193 net/core/rtnetlink.c 	struct sk_buff *skb;
skb              4264 net/core/rtnetlink.c 	skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
skb              4265 net/core/rtnetlink.c 	if (!skb)
skb              4270 net/core/rtnetlink.c 	err = ops->ndo_fdb_get(skb, tb, dev, addr, vid,
skb              4276 net/core/rtnetlink.c 	return rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
skb              4278 net/core/rtnetlink.c 	kfree_skb(skb);
skb              4282 net/core/rtnetlink.c static int brport_nla_put_flag(struct sk_buff *skb, u32 flags, u32 mask,
skb              4286 net/core/rtnetlink.c 		return nla_put_u8(skb, attrnum, !!(flags & flag));
skb              4290 net/core/rtnetlink.c int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
skb              4294 net/core/rtnetlink.c 			    int (*vlan_fill)(struct sk_buff *skb,
skb              4306 net/core/rtnetlink.c 	nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*ifm), nlflags);
skb              4319 net/core/rtnetlink.c 	if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
skb              4320 net/core/rtnetlink.c 	    nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
skb              4321 net/core/rtnetlink.c 	    nla_put_u8(skb, IFLA_OPERSTATE, operstate) ||
skb              4323 net/core/rtnetlink.c 	     nla_put_u32(skb, IFLA_MASTER, br_dev->ifindex)) ||
skb              4325 net/core/rtnetlink.c 	     nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
skb              4327 net/core/rtnetlink.c 	     nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))))
skb              4330 net/core/rtnetlink.c 	br_afspec = nla_nest_start_noflag(skb, IFLA_AF_SPEC);
skb              4334 net/core/rtnetlink.c 	if (nla_put_u16(skb, IFLA_BRIDGE_FLAGS, BRIDGE_FLAGS_SELF)) {
skb              4335 net/core/rtnetlink.c 		nla_nest_cancel(skb, br_afspec);
skb              4340 net/core/rtnetlink.c 		if (nla_put_u16(skb, IFLA_BRIDGE_MODE, mode)) {
skb              4341 net/core/rtnetlink.c 			nla_nest_cancel(skb, br_afspec);
skb              4346 net/core/rtnetlink.c 		err = vlan_fill(skb, dev, filter_mask);
skb              4348 net/core/rtnetlink.c 			nla_nest_cancel(skb, br_afspec);
skb              4352 net/core/rtnetlink.c 	nla_nest_end(skb, br_afspec);
skb              4354 net/core/rtnetlink.c 	protinfo = nla_nest_start(skb, IFLA_PROTINFO);
skb              4358 net/core/rtnetlink.c 	if (brport_nla_put_flag(skb, flags, mask,
skb              4360 net/core/rtnetlink.c 	    brport_nla_put_flag(skb, flags, mask,
skb              4362 net/core/rtnetlink.c 	    brport_nla_put_flag(skb, flags, mask,
skb              4365 net/core/rtnetlink.c 	    brport_nla_put_flag(skb, flags, mask,
skb              4367 net/core/rtnetlink.c 	    brport_nla_put_flag(skb, flags, mask,
skb              4369 net/core/rtnetlink.c 	    brport_nla_put_flag(skb, flags, mask,
skb              4371 net/core/rtnetlink.c 	    brport_nla_put_flag(skb, flags, mask,
skb              4373 net/core/rtnetlink.c 	    brport_nla_put_flag(skb, flags, mask,
skb              4375 net/core/rtnetlink.c 		nla_nest_cancel(skb, protinfo);
skb              4379 net/core/rtnetlink.c 	nla_nest_end(skb, protinfo);
skb              4381 net/core/rtnetlink.c 	nlmsg_end(skb, nlh);
skb              4384 net/core/rtnetlink.c 	nlmsg_cancel(skb, nlh);
skb              4443 net/core/rtnetlink.c static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
skb              4446 net/core/rtnetlink.c 	struct net *net = sock_net(skb->sk);
skb              4449 net/core/rtnetlink.c 	u32 portid = NETLINK_CB(cb->skb).portid;
skb              4467 net/core/rtnetlink.c 						skb, portid, seq, dev,
skb              4470 net/core/rtnetlink.c 					if (likely(skb->len))
skb              4481 net/core/rtnetlink.c 				err = ops->ndo_bridge_getlink(skb, portid,
skb              4486 net/core/rtnetlink.c 					if (likely(skb->len))
skb              4495 net/core/rtnetlink.c 	err = skb->len;
skb              4521 net/core/rtnetlink.c 	struct sk_buff *skb;
skb              4527 net/core/rtnetlink.c 	skb = nlmsg_new(bridge_nlmsg_size(), GFP_ATOMIC);
skb              4528 net/core/rtnetlink.c 	if (!skb) {
skb              4533 net/core/rtnetlink.c 	err = dev->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev, 0, 0);
skb              4537 net/core/rtnetlink.c 	if (!skb->len)
skb              4540 net/core/rtnetlink.c 	rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC);
skb              4544 net/core/rtnetlink.c 	kfree_skb(skb);
skb              4550 net/core/rtnetlink.c static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
skb              4553 net/core/rtnetlink.c 	struct net *net = sock_net(skb->sk);
skb              4627 net/core/rtnetlink.c static int rtnl_bridge_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
skb              4630 net/core/rtnetlink.c 	struct net *net = sock_net(skb->sk);
skb              4720 net/core/rtnetlink.c static int rtnl_get_offload_stats(struct sk_buff *skb, struct net_device *dev,
skb              4744 net/core/rtnetlink.c 		attr = nla_reserve_64bit(skb, attr_id, size,
skb              4794 net/core/rtnetlink.c static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev,
skb              4807 net/core/rtnetlink.c 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifsm), flags);
skb              4821 net/core/rtnetlink.c 		attr = nla_reserve_64bit(skb, IFLA_STATS_LINK_64,
skb              4836 net/core/rtnetlink.c 			attr = nla_nest_start_noflag(skb,
skb              4841 net/core/rtnetlink.c 			err = ops->fill_linkxstats(skb, dev, prividx, *idxattr);
skb              4842 net/core/rtnetlink.c 			nla_nest_end(skb, attr);
skb              4859 net/core/rtnetlink.c 			attr = nla_nest_start_noflag(skb,
skb              4864 net/core/rtnetlink.c 			err = ops->fill_linkxstats(skb, dev, prividx, *idxattr);
skb              4865 net/core/rtnetlink.c 			nla_nest_end(skb, attr);
skb              4875 net/core/rtnetlink.c 		attr = nla_nest_start_noflag(skb,
skb              4880 net/core/rtnetlink.c 		err = rtnl_get_offload_stats(skb, dev, prividx);
skb              4882 net/core/rtnetlink.c 			nla_nest_cancel(skb, attr);
skb              4884 net/core/rtnetlink.c 			nla_nest_end(skb, attr);
skb              4895 net/core/rtnetlink.c 		attr = nla_nest_start_noflag(skb, IFLA_STATS_AF_SPEC);
skb              4905 net/core/rtnetlink.c 				af = nla_nest_start_noflag(skb,
skb              4911 net/core/rtnetlink.c 				err = af_ops->fill_stats_af(skb, dev);
skb              4914 net/core/rtnetlink.c 					nla_nest_cancel(skb, af);
skb              4920 net/core/rtnetlink.c 				nla_nest_end(skb, af);
skb              4925 net/core/rtnetlink.c 		nla_nest_end(skb, attr);
skb              4930 net/core/rtnetlink.c 	nlmsg_end(skb, nlh);
skb              4937 net/core/rtnetlink.c 		nlmsg_cancel(skb, nlh);
skb              4939 net/core/rtnetlink.c 		nlmsg_end(skb, nlh);
skb              5042 net/core/rtnetlink.c static int rtnl_stats_get(struct sk_buff *skb, struct nlmsghdr *nlh,
skb              5045 net/core/rtnetlink.c 	struct net *net = sock_net(skb->sk);
skb              5053 net/core/rtnetlink.c 	err = rtnl_valid_stats_req(nlh, netlink_strict_get_check(skb),
skb              5076 net/core/rtnetlink.c 				  NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
skb              5083 net/core/rtnetlink.c 		err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid);
skb              5089 net/core/rtnetlink.c static int rtnl_stats_dump(struct sk_buff *skb, struct netlink_callback *cb)
skb              5093 net/core/rtnetlink.c 	struct net *net = sock_net(skb->sk);
skb              5125 net/core/rtnetlink.c 			err = rtnl_fill_statsinfo(skb, dev, RTM_NEWSTATS,
skb              5126 net/core/rtnetlink.c 						  NETLINK_CB(cb->skb).portid,
skb              5133 net/core/rtnetlink.c 			WARN_ON((err == -EMSGSIZE) && (skb->len == 0));
skb              5139 net/core/rtnetlink.c 			nl_dump_check_consistent(cb, nlmsg_hdr(skb));
skb              5150 net/core/rtnetlink.c 	return skb->len;
skb              5155 net/core/rtnetlink.c static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
skb              5158 net/core/rtnetlink.c 	struct net *net = sock_net(skb->sk);
skb              5181 net/core/rtnetlink.c 	if (kind != 2 && !netlink_net_capable(skb, CAP_NET_ADMIN))
skb              5201 net/core/rtnetlink.c 			min_dump_alloc = rtnl_calcit(skb, nlh);
skb              5217 net/core/rtnetlink.c 			err = netlink_dump_start(rtnl, skb, nlh, &c);
skb              5245 net/core/rtnetlink.c 			err = doit(skb, nlh, extack);
skb              5254 net/core/rtnetlink.c 		err = link->doit(skb, nlh, extack);
skb              5270 net/core/rtnetlink.c static void rtnetlink_rcv(struct sk_buff *skb)
skb              5272 net/core/rtnetlink.c 	netlink_rcv_skb(skb, &rtnetlink_rcv_msg);
skb               101 net/core/skbuff.c static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr,
skb               105 net/core/skbuff.c 		 msg, addr, skb->len, sz, skb->head, skb->data,
skb               106 net/core/skbuff.c 		 (unsigned long)skb->tail, (unsigned long)skb->end,
skb               107 net/core/skbuff.c 		 skb->dev ? skb->dev->name : "<NULL>");
skb               111 net/core/skbuff.c static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr)
skb               113 net/core/skbuff.c 	skb_panic(skb, sz, addr, __func__);
skb               116 net/core/skbuff.c static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr)
skb               118 net/core/skbuff.c 	skb_panic(skb, sz, addr, __func__);
skb               186 net/core/skbuff.c 	struct sk_buff *skb;
skb               197 net/core/skbuff.c 	skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node);
skb               198 net/core/skbuff.c 	if (!skb)
skb               200 net/core/skbuff.c 	prefetchw(skb);
skb               224 net/core/skbuff.c 	memset(skb, 0, offsetof(struct sk_buff, tail));
skb               226 net/core/skbuff.c 	skb->truesize = SKB_TRUESIZE(size);
skb               227 net/core/skbuff.c 	skb->pfmemalloc = pfmemalloc;
skb               228 net/core/skbuff.c 	refcount_set(&skb->users, 1);
skb               229 net/core/skbuff.c 	skb->head = data;
skb               230 net/core/skbuff.c 	skb->data = data;
skb               231 net/core/skbuff.c 	skb_reset_tail_pointer(skb);
skb               232 net/core/skbuff.c 	skb->end = skb->tail + size;
skb               233 net/core/skbuff.c 	skb->mac_header = (typeof(skb->mac_header))~0U;
skb               234 net/core/skbuff.c 	skb->transport_header = (typeof(skb->transport_header))~0U;
skb               237 net/core/skbuff.c 	shinfo = skb_shinfo(skb);
skb               244 net/core/skbuff.c 		fclones = container_of(skb, struct sk_buff_fclones, skb1);
skb               246 net/core/skbuff.c 		skb->fclone = SKB_FCLONE_ORIG;
skb               252 net/core/skbuff.c 	return skb;
skb               254 net/core/skbuff.c 	kmem_cache_free(cache, skb);
skb               255 net/core/skbuff.c 	skb = NULL;
skb               261 net/core/skbuff.c static struct sk_buff *__build_skb_around(struct sk_buff *skb,
skb               270 net/core/skbuff.c 	skb->truesize = SKB_TRUESIZE(size);
skb               271 net/core/skbuff.c 	refcount_set(&skb->users, 1);
skb               272 net/core/skbuff.c 	skb->head = data;
skb               273 net/core/skbuff.c 	skb->data = data;
skb               274 net/core/skbuff.c 	skb_reset_tail_pointer(skb);
skb               275 net/core/skbuff.c 	skb->end = skb->tail + size;
skb               276 net/core/skbuff.c 	skb->mac_header = (typeof(skb->mac_header))~0U;
skb               277 net/core/skbuff.c 	skb->transport_header = (typeof(skb->transport_header))~0U;
skb               280 net/core/skbuff.c 	shinfo = skb_shinfo(skb);
skb               284 net/core/skbuff.c 	return skb;
skb               308 net/core/skbuff.c 	struct sk_buff *skb;
skb               310 net/core/skbuff.c 	skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC);
skb               311 net/core/skbuff.c 	if (unlikely(!skb))
skb               314 net/core/skbuff.c 	memset(skb, 0, offsetof(struct sk_buff, tail));
skb               316 net/core/skbuff.c 	return __build_skb_around(skb, data, frag_size);
skb               326 net/core/skbuff.c 	struct sk_buff *skb = __build_skb(data, frag_size);
skb               328 net/core/skbuff.c 	if (skb && frag_size) {
skb               329 net/core/skbuff.c 		skb->head_frag = 1;
skb               331 net/core/skbuff.c 			skb->pfmemalloc = 1;
skb               333 net/core/skbuff.c 	return skb;
skb               343 net/core/skbuff.c struct sk_buff *build_skb_around(struct sk_buff *skb,
skb               346 net/core/skbuff.c 	if (unlikely(!skb))
skb               349 net/core/skbuff.c 	skb = __build_skb_around(skb, data, frag_size);
skb               351 net/core/skbuff.c 	if (skb && frag_size) {
skb               352 net/core/skbuff.c 		skb->head_frag = 1;
skb               354 net/core/skbuff.c 			skb->pfmemalloc = 1;
skb               356 net/core/skbuff.c 	return skb;
skb               428 net/core/skbuff.c 	struct sk_buff *skb;
skb               436 net/core/skbuff.c 		skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE);
skb               437 net/core/skbuff.c 		if (!skb)
skb               463 net/core/skbuff.c 	skb = __build_skb(data, len);
skb               464 net/core/skbuff.c 	if (unlikely(!skb)) {
skb               471 net/core/skbuff.c 		skb->pfmemalloc = 1;
skb               472 net/core/skbuff.c 	skb->head_frag = 1;
skb               475 net/core/skbuff.c 	skb_reserve(skb, NET_SKB_PAD);
skb               476 net/core/skbuff.c 	skb->dev = dev;
skb               479 net/core/skbuff.c 	return skb;
skb               500 net/core/skbuff.c 	struct sk_buff *skb;
skb               507 net/core/skbuff.c 		skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE);
skb               508 net/core/skbuff.c 		if (!skb)
skb               523 net/core/skbuff.c 	skb = __build_skb(data, len);
skb               524 net/core/skbuff.c 	if (unlikely(!skb)) {
skb               531 net/core/skbuff.c 		skb->pfmemalloc = 1;
skb               532 net/core/skbuff.c 	skb->head_frag = 1;
skb               535 net/core/skbuff.c 	skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
skb               536 net/core/skbuff.c 	skb->dev = napi->dev;
skb               539 net/core/skbuff.c 	return skb;
skb               543 net/core/skbuff.c void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
skb               546 net/core/skbuff.c 	skb_fill_page_desc(skb, i, page, off, size);
skb               547 net/core/skbuff.c 	skb->len += size;
skb               548 net/core/skbuff.c 	skb->data_len += size;
skb               549 net/core/skbuff.c 	skb->truesize += truesize;
skb               553 net/core/skbuff.c void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
skb               556 net/core/skbuff.c 	skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
skb               559 net/core/skbuff.c 	skb->len += size;
skb               560 net/core/skbuff.c 	skb->data_len += size;
skb               561 net/core/skbuff.c 	skb->truesize += truesize;
skb               571 net/core/skbuff.c static inline void skb_drop_fraglist(struct sk_buff *skb)
skb               573 net/core/skbuff.c 	skb_drop_list(&skb_shinfo(skb)->frag_list);
skb               576 net/core/skbuff.c static void skb_clone_fraglist(struct sk_buff *skb)
skb               580 net/core/skbuff.c 	skb_walk_frags(skb, list)
skb               584 net/core/skbuff.c static void skb_free_head(struct sk_buff *skb)
skb               586 net/core/skbuff.c 	unsigned char *head = skb->head;
skb               588 net/core/skbuff.c 	if (skb->head_frag)
skb               594 net/core/skbuff.c static void skb_release_data(struct sk_buff *skb)
skb               596 net/core/skbuff.c 	struct skb_shared_info *shinfo = skb_shinfo(skb);
skb               599 net/core/skbuff.c 	if (skb->cloned &&
skb               600 net/core/skbuff.c 	    atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1,
skb               610 net/core/skbuff.c 	skb_zcopy_clear(skb, true);
skb               611 net/core/skbuff.c 	skb_free_head(skb);
skb               617 net/core/skbuff.c static void kfree_skbmem(struct sk_buff *skb)
skb               621 net/core/skbuff.c 	switch (skb->fclone) {
skb               623 net/core/skbuff.c 		kmem_cache_free(skbuff_head_cache, skb);
skb               627 net/core/skbuff.c 		fclones = container_of(skb, struct sk_buff_fclones, skb1);
skb               638 net/core/skbuff.c 		fclones = container_of(skb, struct sk_buff_fclones, skb2);
skb               647 net/core/skbuff.c void skb_release_head_state(struct sk_buff *skb)
skb               649 net/core/skbuff.c 	skb_dst_drop(skb);
skb               650 net/core/skbuff.c 	if (skb->destructor) {
skb               652 net/core/skbuff.c 		skb->destructor(skb);
skb               655 net/core/skbuff.c 	nf_conntrack_put(skb_nfct(skb));
skb               657 net/core/skbuff.c 	skb_ext_put(skb);
skb               661 net/core/skbuff.c static void skb_release_all(struct sk_buff *skb)
skb               663 net/core/skbuff.c 	skb_release_head_state(skb);
skb               664 net/core/skbuff.c 	if (likely(skb->head))
skb               665 net/core/skbuff.c 		skb_release_data(skb);
skb               677 net/core/skbuff.c void __kfree_skb(struct sk_buff *skb)
skb               679 net/core/skbuff.c 	skb_release_all(skb);
skb               680 net/core/skbuff.c 	kfree_skbmem(skb);
skb               691 net/core/skbuff.c void kfree_skb(struct sk_buff *skb)
skb               693 net/core/skbuff.c 	if (!skb_unref(skb))
skb               696 net/core/skbuff.c 	trace_kfree_skb(skb, __builtin_return_address(0));
skb               697 net/core/skbuff.c 	__kfree_skb(skb);
skb               718 net/core/skbuff.c void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt)
skb               721 net/core/skbuff.c 	struct skb_shared_info *sh = skb_shinfo(skb);
skb               722 net/core/skbuff.c 	struct net_device *dev = skb->dev;
skb               723 net/core/skbuff.c 	struct sock *sk = skb->sk;
skb               733 net/core/skbuff.c 		len = skb->len;
skb               735 net/core/skbuff.c 		len = min_t(int, skb->len, MAX_HEADER + 128);
skb               737 net/core/skbuff.c 	headroom = skb_headroom(skb);
skb               738 net/core/skbuff.c 	tailroom = skb_tailroom(skb);
skb               740 net/core/skbuff.c 	has_mac = skb_mac_header_was_set(skb);
skb               741 net/core/skbuff.c 	has_trans = skb_transport_header_was_set(skb);
skb               748 net/core/skbuff.c 	       level, skb->len, headroom, skb_headlen(skb), tailroom,
skb               749 net/core/skbuff.c 	       has_mac ? skb->mac_header : -1,
skb               750 net/core/skbuff.c 	       has_mac ? skb_mac_header_len(skb) : -1,
skb               751 net/core/skbuff.c 	       skb->network_header,
skb               752 net/core/skbuff.c 	       has_trans ? skb_network_header_len(skb) : -1,
skb               753 net/core/skbuff.c 	       has_trans ? skb->transport_header : -1,
skb               756 net/core/skbuff.c 	       skb->csum, skb->ip_summed, skb->csum_complete_sw,
skb               757 net/core/skbuff.c 	       skb->csum_valid, skb->csum_level,
skb               758 net/core/skbuff.c 	       skb->hash, skb->sw_hash, skb->l4_hash,
skb               759 net/core/skbuff.c 	       ntohs(skb->protocol), skb->pkt_type, skb->skb_iif);
skb               770 net/core/skbuff.c 			       16, 1, skb->head, headroom, false);
skb               772 net/core/skbuff.c 	seg_len = min_t(int, skb_headlen(skb), len);
skb               775 net/core/skbuff.c 			       16, 1, skb->data, seg_len, false);
skb               780 net/core/skbuff.c 			       16, 1, skb_tail_pointer(skb), tailroom, false);
skb               782 net/core/skbuff.c 	for (i = 0; len && i < skb_shinfo(skb)->nr_frags; i++) {
skb               783 net/core/skbuff.c 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
skb               803 net/core/skbuff.c 	if (full_pkt && skb_has_frag_list(skb)) {
skb               805 net/core/skbuff.c 		skb_walk_frags(skb, list_skb)
skb               818 net/core/skbuff.c void skb_tx_error(struct sk_buff *skb)
skb               820 net/core/skbuff.c 	skb_zcopy_clear(skb, true);
skb               832 net/core/skbuff.c void consume_skb(struct sk_buff *skb)
skb               834 net/core/skbuff.c 	if (!skb_unref(skb))
skb               837 net/core/skbuff.c 	trace_consume_skb(skb);
skb               838 net/core/skbuff.c 	__kfree_skb(skb);
skb               849 net/core/skbuff.c void __consume_stateless_skb(struct sk_buff *skb)
skb               851 net/core/skbuff.c 	trace_consume_skb(skb);
skb               852 net/core/skbuff.c 	skb_release_data(skb);
skb               853 net/core/skbuff.c 	kfree_skbmem(skb);
skb               868 net/core/skbuff.c static inline void _kfree_skb_defer(struct sk_buff *skb)
skb               873 net/core/skbuff.c 	skb_release_all(skb);
skb               876 net/core/skbuff.c 	nc->skb_cache[nc->skb_count++] = skb;
skb               880 net/core/skbuff.c 	prefetchw(skb);
skb               890 net/core/skbuff.c void __kfree_skb_defer(struct sk_buff *skb)
skb               892 net/core/skbuff.c 	_kfree_skb_defer(skb);
skb               895 net/core/skbuff.c void napi_consume_skb(struct sk_buff *skb, int budget)
skb               897 net/core/skbuff.c 	if (unlikely(!skb))
skb               902 net/core/skbuff.c 		dev_consume_skb_any(skb);
skb               906 net/core/skbuff.c 	if (!skb_unref(skb))
skb               910 net/core/skbuff.c 	trace_consume_skb(skb);
skb               913 net/core/skbuff.c 	if (skb->fclone != SKB_FCLONE_UNAVAILABLE) {
skb               914 net/core/skbuff.c 		__kfree_skb(skb);
skb               918 net/core/skbuff.c 	_kfree_skb_defer(skb);
skb               981 net/core/skbuff.c static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
skb               983 net/core/skbuff.c #define C(x) n->x = skb->x
skb               987 net/core/skbuff.c 	__copy_skb_header(n, skb);
skb               992 net/core/skbuff.c 	n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len;
skb              1006 net/core/skbuff.c 	atomic_inc(&(skb_shinfo(skb)->dataref));
skb              1007 net/core/skbuff.c 	skb->cloned = 1;
skb              1098 net/core/skbuff.c 	struct sk_buff *skb;
skb              1102 net/core/skbuff.c 	skb = sock_omalloc(sk, 0, GFP_KERNEL);
skb              1103 net/core/skbuff.c 	if (!skb)
skb              1106 net/core/skbuff.c 	BUILD_BUG_ON(sizeof(*uarg) > sizeof(skb->cb));
skb              1107 net/core/skbuff.c 	uarg = (void *)skb->cb;
skb              1111 net/core/skbuff.c 		kfree_skb(skb);
skb              1176 net/core/skbuff.c static bool skb_zerocopy_notify_extend(struct sk_buff *skb, u32 lo, u16 len)
skb              1178 net/core/skbuff.c 	struct sock_exterr_skb *serr = SKB_EXT_ERR(skb);
skb              1198 net/core/skbuff.c 	struct sk_buff *tail, *skb = skb_from_uarg(uarg);
skb              1200 net/core/skbuff.c 	struct sock *sk = skb->sk;
skb              1218 net/core/skbuff.c 	serr = SKB_EXT_ERR(skb);
skb              1232 net/core/skbuff.c 		__skb_queue_tail(q, skb);
skb              1233 net/core/skbuff.c 		skb = NULL;
skb              1240 net/core/skbuff.c 	consume_skb(skb);
skb              1270 net/core/skbuff.c int skb_zerocopy_iter_dgram(struct sk_buff *skb, struct msghdr *msg, int len)
skb              1272 net/core/skbuff.c 	return __zerocopy_sg_from_iter(skb->sk, skb, &msg->msg_iter, len);
skb              1276 net/core/skbuff.c int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
skb              1280 net/core/skbuff.c 	struct ubuf_info *orig_uarg = skb_zcopy(skb);
skb              1282 net/core/skbuff.c 	int err, orig_len = skb->len;
skb              1290 net/core/skbuff.c 	err = __zerocopy_sg_from_iter(sk, skb, &msg->msg_iter, len);
skb              1291 net/core/skbuff.c 	if (err == -EFAULT || (err == -EMSGSIZE && skb->len == orig_len)) {
skb              1292 net/core/skbuff.c 		struct sock *save_sk = skb->sk;
skb              1296 net/core/skbuff.c 		skb->sk = sk;
skb              1297 net/core/skbuff.c 		___pskb_trim(skb, orig_len);
skb              1298 net/core/skbuff.c 		skb->sk = save_sk;
skb              1302 net/core/skbuff.c 	skb_zcopy_set(skb, uarg, NULL);
skb              1303 net/core/skbuff.c 	return skb->len - orig_len;
skb              1342 net/core/skbuff.c int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
skb              1344 net/core/skbuff.c 	int num_frags = skb_shinfo(skb)->nr_frags;
skb              1349 net/core/skbuff.c 	if (skb_shared(skb) || skb_unclone(skb, gfp_mask))
skb              1355 net/core/skbuff.c 	new_frags = (__skb_pagelen(skb) + PAGE_SIZE - 1) >> PAGE_SHIFT;
skb              1373 net/core/skbuff.c 		skb_frag_t *f = &skb_shinfo(skb)->frags[i];
skb              1400 net/core/skbuff.c 		skb_frag_unref(skb, i);
skb              1404 net/core/skbuff.c 		__skb_fill_page_desc(skb, i, head, 0, PAGE_SIZE);
skb              1407 net/core/skbuff.c 	__skb_fill_page_desc(skb, new_frags - 1, head, 0, d_off);
skb              1408 net/core/skbuff.c 	skb_shinfo(skb)->nr_frags = new_frags;
skb              1411 net/core/skbuff.c 	skb_zcopy_clear(skb, false);
skb              1430 net/core/skbuff.c struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
skb              1432 net/core/skbuff.c 	struct sk_buff_fclones *fclones = container_of(skb,
skb              1437 net/core/skbuff.c 	if (skb_orphan_frags(skb, gfp_mask))
skb              1440 net/core/skbuff.c 	if (skb->fclone == SKB_FCLONE_ORIG &&
skb              1445 net/core/skbuff.c 		if (skb_pfmemalloc(skb))
skb              1455 net/core/skbuff.c 	return __skb_clone(n, skb);
skb              1459 net/core/skbuff.c void skb_headers_offset_update(struct sk_buff *skb, int off)
skb              1462 net/core/skbuff.c 	if (skb->ip_summed == CHECKSUM_PARTIAL)
skb              1463 net/core/skbuff.c 		skb->csum_start += off;
skb              1465 net/core/skbuff.c 	skb->transport_header += off;
skb              1466 net/core/skbuff.c 	skb->network_header   += off;
skb              1467 net/core/skbuff.c 	if (skb_mac_header_was_set(skb))
skb              1468 net/core/skbuff.c 		skb->mac_header += off;
skb              1469 net/core/skbuff.c 	skb->inner_transport_header += off;
skb              1470 net/core/skbuff.c 	skb->inner_network_header += off;
skb              1471 net/core/skbuff.c 	skb->inner_mac_header += off;
skb              1485 net/core/skbuff.c static inline int skb_alloc_rx_flag(const struct sk_buff *skb)
skb              1487 net/core/skbuff.c 	if (skb_pfmemalloc(skb))
skb              1509 net/core/skbuff.c struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
skb              1511 net/core/skbuff.c 	int headerlen = skb_headroom(skb);
skb              1512 net/core/skbuff.c 	unsigned int size = skb_end_offset(skb) + skb->data_len;
skb              1514 net/core/skbuff.c 					skb_alloc_rx_flag(skb), NUMA_NO_NODE);
skb              1522 net/core/skbuff.c 	skb_put(n, skb->len);
skb              1524 net/core/skbuff.c 	BUG_ON(skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len));
skb              1526 net/core/skbuff.c 	skb_copy_header(n, skb);
skb              1548 net/core/skbuff.c struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom,
skb              1551 net/core/skbuff.c 	unsigned int size = skb_headlen(skb) + headroom;
skb              1552 net/core/skbuff.c 	int flags = skb_alloc_rx_flag(skb) | (fclone ? SKB_ALLOC_FCLONE : 0);
skb              1561 net/core/skbuff.c 	skb_put(n, skb_headlen(skb));
skb              1563 net/core/skbuff.c 	skb_copy_from_linear_data(skb, n->data, n->len);
skb              1565 net/core/skbuff.c 	n->truesize += skb->data_len;
skb              1566 net/core/skbuff.c 	n->data_len  = skb->data_len;
skb              1567 net/core/skbuff.c 	n->len	     = skb->len;
skb              1569 net/core/skbuff.c 	if (skb_shinfo(skb)->nr_frags) {
skb              1572 net/core/skbuff.c 		if (skb_orphan_frags(skb, gfp_mask) ||
skb              1573 net/core/skbuff.c 		    skb_zerocopy_clone(n, skb, gfp_mask)) {
skb              1578 net/core/skbuff.c 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb              1579 net/core/skbuff.c 			skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
skb              1580 net/core/skbuff.c 			skb_frag_ref(skb, i);
skb              1585 net/core/skbuff.c 	if (skb_has_frag_list(skb)) {
skb              1586 net/core/skbuff.c 		skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list;
skb              1590 net/core/skbuff.c 	skb_copy_header(n, skb);
skb              1612 net/core/skbuff.c int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
skb              1615 net/core/skbuff.c 	int i, osize = skb_end_offset(skb);
skb              1622 net/core/skbuff.c 	BUG_ON(skb_shared(skb));
skb              1626 net/core/skbuff.c 	if (skb_pfmemalloc(skb))
skb              1637 net/core/skbuff.c 	memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head);
skb              1640 net/core/skbuff.c 	       skb_shinfo(skb),
skb              1641 net/core/skbuff.c 	       offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags]));
skb              1648 net/core/skbuff.c 	if (skb_cloned(skb)) {
skb              1649 net/core/skbuff.c 		if (skb_orphan_frags(skb, gfp_mask))
skb              1651 net/core/skbuff.c 		if (skb_zcopy(skb))
skb              1652 net/core/skbuff.c 			refcount_inc(&skb_uarg(skb)->refcnt);
skb              1653 net/core/skbuff.c 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
skb              1654 net/core/skbuff.c 			skb_frag_ref(skb, i);
skb              1656 net/core/skbuff.c 		if (skb_has_frag_list(skb))
skb              1657 net/core/skbuff.c 			skb_clone_fraglist(skb);
skb              1659 net/core/skbuff.c 		skb_release_data(skb);
skb              1661 net/core/skbuff.c 		skb_free_head(skb);
skb              1663 net/core/skbuff.c 	off = (data + nhead) - skb->head;
skb              1665 net/core/skbuff.c 	skb->head     = data;
skb              1666 net/core/skbuff.c 	skb->head_frag = 0;
skb              1667 net/core/skbuff.c 	skb->data    += off;
skb              1669 net/core/skbuff.c 	skb->end      = size;
skb              1672 net/core/skbuff.c 	skb->end      = skb->head + size;
skb              1674 net/core/skbuff.c 	skb->tail	      += off;
skb              1675 net/core/skbuff.c 	skb_headers_offset_update(skb, nhead);
skb              1676 net/core/skbuff.c 	skb->cloned   = 0;
skb              1677 net/core/skbuff.c 	skb->hdr_len  = 0;
skb              1678 net/core/skbuff.c 	skb->nohdr    = 0;
skb              1679 net/core/skbuff.c 	atomic_set(&skb_shinfo(skb)->dataref, 1);
skb              1681 net/core/skbuff.c 	skb_metadata_clear(skb);
skb              1687 net/core/skbuff.c 	if (!skb->sk || skb->destructor == sock_edemux)
skb              1688 net/core/skbuff.c 		skb->truesize += size - osize;
skb              1701 net/core/skbuff.c struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
skb              1704 net/core/skbuff.c 	int delta = headroom - skb_headroom(skb);
skb              1707 net/core/skbuff.c 		skb2 = pskb_copy(skb, GFP_ATOMIC);
skb              1709 net/core/skbuff.c 		skb2 = skb_clone(skb, GFP_ATOMIC);
skb              1738 net/core/skbuff.c struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
skb              1745 net/core/skbuff.c 	struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom,
skb              1746 net/core/skbuff.c 					gfp_mask, skb_alloc_rx_flag(skb),
skb              1748 net/core/skbuff.c 	int oldheadroom = skb_headroom(skb);
skb              1757 net/core/skbuff.c 	skb_put(n, skb->len);
skb              1767 net/core/skbuff.c 	BUG_ON(skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off,
skb              1768 net/core/skbuff.c 			     skb->len + head_copy_len));
skb              1770 net/core/skbuff.c 	skb_copy_header(n, skb);
skb              1792 net/core/skbuff.c int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error)
skb              1798 net/core/skbuff.c 	if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) {
skb              1799 net/core/skbuff.c 		memset(skb->data+skb->len, 0, pad);
skb              1803 net/core/skbuff.c 	ntail = skb->data_len + pad - (skb->end - skb->tail);
skb              1804 net/core/skbuff.c 	if (likely(skb_cloned(skb) || ntail > 0)) {
skb              1805 net/core/skbuff.c 		err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC);
skb              1813 net/core/skbuff.c 	err = skb_linearize(skb);
skb              1817 net/core/skbuff.c 	memset(skb->data + skb->len, 0, pad);
skb              1822 net/core/skbuff.c 		kfree_skb(skb);
skb              1840 net/core/skbuff.c void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len)
skb              1842 net/core/skbuff.c 	if (tail != skb) {
skb              1843 net/core/skbuff.c 		skb->data_len += len;
skb              1844 net/core/skbuff.c 		skb->len += len;
skb              1859 net/core/skbuff.c void *skb_put(struct sk_buff *skb, unsigned int len)
skb              1861 net/core/skbuff.c 	void *tmp = skb_tail_pointer(skb);
skb              1862 net/core/skbuff.c 	SKB_LINEAR_ASSERT(skb);
skb              1863 net/core/skbuff.c 	skb->tail += len;
skb              1864 net/core/skbuff.c 	skb->len  += len;
skb              1865 net/core/skbuff.c 	if (unlikely(skb->tail > skb->end))
skb              1866 net/core/skbuff.c 		skb_over_panic(skb, len, __builtin_return_address(0));
skb              1880 net/core/skbuff.c void *skb_push(struct sk_buff *skb, unsigned int len)
skb              1882 net/core/skbuff.c 	skb->data -= len;
skb              1883 net/core/skbuff.c 	skb->len  += len;
skb              1884 net/core/skbuff.c 	if (unlikely(skb->data < skb->head))
skb              1885 net/core/skbuff.c 		skb_under_panic(skb, len, __builtin_return_address(0));
skb              1886 net/core/skbuff.c 	return skb->data;
skb              1900 net/core/skbuff.c void *skb_pull(struct sk_buff *skb, unsigned int len)
skb              1902 net/core/skbuff.c 	return skb_pull_inline(skb, len);
skb              1915 net/core/skbuff.c void skb_trim(struct sk_buff *skb, unsigned int len)
skb              1917 net/core/skbuff.c 	if (skb->len > len)
skb              1918 net/core/skbuff.c 		__skb_trim(skb, len);
skb              1925 net/core/skbuff.c int ___pskb_trim(struct sk_buff *skb, unsigned int len)
skb              1929 net/core/skbuff.c 	int offset = skb_headlen(skb);
skb              1930 net/core/skbuff.c 	int nfrags = skb_shinfo(skb)->nr_frags;
skb              1934 net/core/skbuff.c 	if (skb_cloned(skb) &&
skb              1935 net/core/skbuff.c 	    unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))))
skb              1943 net/core/skbuff.c 		int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]);
skb              1950 net/core/skbuff.c 		skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset);
skb              1953 net/core/skbuff.c 		skb_shinfo(skb)->nr_frags = i;
skb              1956 net/core/skbuff.c 			skb_frag_unref(skb, i);
skb              1958 net/core/skbuff.c 		if (skb_has_frag_list(skb))
skb              1959 net/core/skbuff.c 			skb_drop_fraglist(skb);
skb              1963 net/core/skbuff.c 	for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp);
skb              1995 net/core/skbuff.c 	if (len > skb_headlen(skb)) {
skb              1996 net/core/skbuff.c 		skb->data_len -= skb->len - len;
skb              1997 net/core/skbuff.c 		skb->len       = len;
skb              1999 net/core/skbuff.c 		skb->len       = len;
skb              2000 net/core/skbuff.c 		skb->data_len  = 0;
skb              2001 net/core/skbuff.c 		skb_set_tail_pointer(skb, len);
skb              2004 net/core/skbuff.c 	if (!skb->sk || skb->destructor == sock_edemux)
skb              2005 net/core/skbuff.c 		skb_condense(skb);
skb              2012 net/core/skbuff.c int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len)
skb              2014 net/core/skbuff.c 	if (skb->ip_summed == CHECKSUM_COMPLETE) {
skb              2015 net/core/skbuff.c 		int delta = skb->len - len;
skb              2017 net/core/skbuff.c 		skb->csum = csum_block_sub(skb->csum,
skb              2018 net/core/skbuff.c 					   skb_checksum(skb, len, delta, 0),
skb              2021 net/core/skbuff.c 	return __pskb_trim(skb, len);
skb              2050 net/core/skbuff.c void *__pskb_pull_tail(struct sk_buff *skb, int delta)
skb              2056 net/core/skbuff.c 	int i, k, eat = (skb->tail + delta) - skb->end;
skb              2058 net/core/skbuff.c 	if (eat > 0 || skb_cloned(skb)) {
skb              2059 net/core/skbuff.c 		if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0,
skb              2064 net/core/skbuff.c 	BUG_ON(skb_copy_bits(skb, skb_headlen(skb),
skb              2065 net/core/skbuff.c 			     skb_tail_pointer(skb), delta));
skb              2070 net/core/skbuff.c 	if (!skb_has_frag_list(skb))
skb              2075 net/core/skbuff.c 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb              2076 net/core/skbuff.c 		int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
skb              2091 net/core/skbuff.c 		struct sk_buff *list = skb_shinfo(skb)->frag_list;
skb              2125 net/core/skbuff.c 		while ((list = skb_shinfo(skb)->frag_list) != insp) {
skb              2126 net/core/skbuff.c 			skb_shinfo(skb)->frag_list = list->next;
skb              2132 net/core/skbuff.c 			skb_shinfo(skb)->frag_list = clone;
skb              2140 net/core/skbuff.c 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb              2141 net/core/skbuff.c 		int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
skb              2144 net/core/skbuff.c 			skb_frag_unref(skb, i);
skb              2147 net/core/skbuff.c 			skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
skb              2149 net/core/skbuff.c 			*frag = skb_shinfo(skb)->frags[i];
skb              2160 net/core/skbuff.c 	skb_shinfo(skb)->nr_frags = k;
skb              2163 net/core/skbuff.c 	skb->tail     += delta;
skb              2164 net/core/skbuff.c 	skb->data_len -= delta;
skb              2166 net/core/skbuff.c 	if (!skb->data_len)
skb              2167 net/core/skbuff.c 		skb_zcopy_clear(skb, false);
skb              2169 net/core/skbuff.c 	return skb_tail_pointer(skb);
skb              2188 net/core/skbuff.c int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
skb              2190 net/core/skbuff.c 	int start = skb_headlen(skb);
skb              2194 net/core/skbuff.c 	if (offset > (int)skb->len - len)
skb              2201 net/core/skbuff.c 		skb_copy_from_linear_data_offset(skb, offset, to, copy);
skb              2208 net/core/skbuff.c 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb              2210 net/core/skbuff.c 		skb_frag_t *f = &skb_shinfo(skb)->frags[i];
skb              2239 net/core/skbuff.c 	skb_walk_frags(skb, frag_iter) {
skb              2373 net/core/skbuff.c static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
skb              2385 net/core/skbuff.c 	if (__splice_segment(virt_to_page(skb->data),
skb              2386 net/core/skbuff.c 			     (unsigned long) skb->data & (PAGE_SIZE - 1),
skb              2387 net/core/skbuff.c 			     skb_headlen(skb),
skb              2389 net/core/skbuff.c 			     skb_head_is_locked(skb),
skb              2396 net/core/skbuff.c 	for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) {
skb              2397 net/core/skbuff.c 		const skb_frag_t *f = &skb_shinfo(skb)->frags[seg];
skb              2405 net/core/skbuff.c 	skb_walk_frags(skb, iter) {
skb              2425 net/core/skbuff.c int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
skb              2440 net/core/skbuff.c 	__skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk);
skb              2450 net/core/skbuff.c int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset,
skb              2454 net/core/skbuff.c 	struct sk_buff *head = skb;
skb              2461 net/core/skbuff.c 	while (offset < skb_headlen(skb) && len) {
skb              2465 net/core/skbuff.c 		slen = min_t(int, len, skb_headlen(skb) - offset);
skb              2466 net/core/skbuff.c 		kv.iov_base = skb->data + offset;
skb              2484 net/core/skbuff.c 	offset -= skb_headlen(skb);
skb              2487 net/core/skbuff.c 	for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags; fragidx++) {
skb              2488 net/core/skbuff.c 		skb_frag_t *frag  = &skb_shinfo(skb)->frags[fragidx];
skb              2496 net/core/skbuff.c 	for (; len && fragidx < skb_shinfo(skb)->nr_frags; fragidx++) {
skb              2497 net/core/skbuff.c 		skb_frag_t *frag  = &skb_shinfo(skb)->frags[fragidx];
skb              2519 net/core/skbuff.c 		if (skb == head) {
skb              2520 net/core/skbuff.c 			if (skb_has_frag_list(skb)) {
skb              2521 net/core/skbuff.c 				skb = skb_shinfo(skb)->frag_list;
skb              2524 net/core/skbuff.c 		} else if (skb->next) {
skb              2525 net/core/skbuff.c 			skb = skb->next;
skb              2550 net/core/skbuff.c int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
skb              2552 net/core/skbuff.c 	int start = skb_headlen(skb);
skb              2556 net/core/skbuff.c 	if (offset > (int)skb->len - len)
skb              2562 net/core/skbuff.c 		skb_copy_to_linear_data_offset(skb, offset, from, copy);
skb              2569 net/core/skbuff.c 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb              2570 net/core/skbuff.c 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
skb              2600 net/core/skbuff.c 	skb_walk_frags(skb, frag_iter) {
skb              2628 net/core/skbuff.c __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
skb              2631 net/core/skbuff.c 	int start = skb_headlen(skb);
skb              2641 net/core/skbuff.c 				       skb->data + offset, copy, csum);
skb              2648 net/core/skbuff.c 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb              2650 net/core/skbuff.c 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
skb              2685 net/core/skbuff.c 	skb_walk_frags(skb, frag_iter) {
skb              2712 net/core/skbuff.c __wsum skb_checksum(const struct sk_buff *skb, int offset,
skb              2720 net/core/skbuff.c 	return __skb_checksum(skb, offset, len, csum, &ops);
skb              2726 net/core/skbuff.c __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
skb              2729 net/core/skbuff.c 	int start = skb_headlen(skb);
skb              2738 net/core/skbuff.c 		csum = csum_partial_copy_nocheck(skb->data + offset, to,
skb              2747 net/core/skbuff.c 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb              2752 net/core/skbuff.c 		end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
skb              2754 net/core/skbuff.c 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
skb              2783 net/core/skbuff.c 	skb_walk_frags(skb, frag_iter) {
skb              2810 net/core/skbuff.c __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len)
skb              2814 net/core/skbuff.c 	sum = csum_fold(skb_checksum(skb, 0, len, skb->csum));
skb              2817 net/core/skbuff.c 		if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
skb              2818 net/core/skbuff.c 		    !skb->csum_complete_sw)
skb              2819 net/core/skbuff.c 			netdev_rx_csum_fault(skb->dev, skb);
skb              2821 net/core/skbuff.c 	if (!skb_shared(skb))
skb              2822 net/core/skbuff.c 		skb->csum_valid = !sum;
skb              2836 net/core/skbuff.c __sum16 __skb_checksum_complete(struct sk_buff *skb)
skb              2841 net/core/skbuff.c 	csum = skb_checksum(skb, 0, skb->len, 0);
skb              2843 net/core/skbuff.c 	sum = csum_fold(csum_add(skb->csum, csum));
skb              2852 net/core/skbuff.c 		if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
skb              2853 net/core/skbuff.c 		    !skb->csum_complete_sw)
skb              2854 net/core/skbuff.c 			netdev_rx_csum_fault(skb->dev, skb);
skb              2857 net/core/skbuff.c 	if (!skb_shared(skb)) {
skb              2859 net/core/skbuff.c 		skb->csum = csum;
skb              2860 net/core/skbuff.c 		skb->ip_summed = CHECKSUM_COMPLETE;
skb              2861 net/core/skbuff.c 		skb->csum_complete_sw = 1;
skb              2862 net/core/skbuff.c 		skb->csum_valid = !sum;
skb              2998 net/core/skbuff.c void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
skb              3003 net/core/skbuff.c 	if (skb->ip_summed == CHECKSUM_PARTIAL)
skb              3004 net/core/skbuff.c 		csstart = skb_checksum_start_offset(skb);
skb              3006 net/core/skbuff.c 		csstart = skb_headlen(skb);
skb              3008 net/core/skbuff.c 	BUG_ON(csstart > skb_headlen(skb));
skb              3010 net/core/skbuff.c 	skb_copy_from_linear_data(skb, to, csstart);
skb              3013 net/core/skbuff.c 	if (csstart != skb->len)
skb              3014 net/core/skbuff.c 		csum = skb_copy_and_csum_bits(skb, csstart, to + csstart,
skb              3015 net/core/skbuff.c 					      skb->len - csstart, 0);
skb              3017 net/core/skbuff.c 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb              3018 net/core/skbuff.c 		long csstuff = csstart + skb->csum_offset;
skb              3076 net/core/skbuff.c 	struct sk_buff *skb;
skb              3077 net/core/skbuff.c 	while ((skb = skb_dequeue(list)) != NULL)
skb              3078 net/core/skbuff.c 		kfree_skb(skb);
skb              3098 net/core/skbuff.c 		struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode);
skb              3101 net/core/skbuff.c 		rb_erase(&skb->rbnode, root);
skb              3102 net/core/skbuff.c 		sum += skb->truesize;
skb              3103 net/core/skbuff.c 		kfree_skb(skb);
skb              3160 net/core/skbuff.c void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
skb              3165 net/core/skbuff.c 	__skb_unlink(skb, list);
skb              3190 net/core/skbuff.c static inline void skb_split_inside_header(struct sk_buff *skb,
skb              3196 net/core/skbuff.c 	skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len),
skb              3199 net/core/skbuff.c 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
skb              3200 net/core/skbuff.c 		skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i];
skb              3202 net/core/skbuff.c 	skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags;
skb              3203 net/core/skbuff.c 	skb_shinfo(skb)->nr_frags  = 0;
skb              3204 net/core/skbuff.c 	skb1->data_len		   = skb->data_len;
skb              3206 net/core/skbuff.c 	skb->data_len		   = 0;
skb              3207 net/core/skbuff.c 	skb->len		   = len;
skb              3208 net/core/skbuff.c 	skb_set_tail_pointer(skb, len);
skb              3211 net/core/skbuff.c static inline void skb_split_no_header(struct sk_buff *skb,
skb              3216 net/core/skbuff.c 	const int nfrags = skb_shinfo(skb)->nr_frags;
skb              3218 net/core/skbuff.c 	skb_shinfo(skb)->nr_frags = 0;
skb              3219 net/core/skbuff.c 	skb1->len		  = skb1->data_len = skb->len - len;
skb              3220 net/core/skbuff.c 	skb->len		  = len;
skb              3221 net/core/skbuff.c 	skb->data_len		  = len - pos;
skb              3224 net/core/skbuff.c 		int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
skb              3227 net/core/skbuff.c 			skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i];
skb              3238 net/core/skbuff.c 				skb_frag_ref(skb, i);
skb              3241 net/core/skbuff.c 				skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos);
skb              3242 net/core/skbuff.c 				skb_shinfo(skb)->nr_frags++;
skb              3246 net/core/skbuff.c 			skb_shinfo(skb)->nr_frags++;
skb              3258 net/core/skbuff.c void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len)
skb              3260 net/core/skbuff.c 	int pos = skb_headlen(skb);
skb              3262 net/core/skbuff.c 	skb_shinfo(skb1)->tx_flags |= skb_shinfo(skb)->tx_flags &
skb              3264 net/core/skbuff.c 	skb_zerocopy_clone(skb1, skb, 0);
skb              3266 net/core/skbuff.c 		skb_split_inside_header(skb, skb1, len, pos);
skb              3268 net/core/skbuff.c 		skb_split_no_header(skb, skb1, len, pos);
skb              3276 net/core/skbuff.c static int skb_prepare_for_shift(struct sk_buff *skb)
skb              3278 net/core/skbuff.c 	return skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
skb              3299 net/core/skbuff.c int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
skb              3304 net/core/skbuff.c 	BUG_ON(shiftlen > skb->len);
skb              3306 net/core/skbuff.c 	if (skb_headlen(skb))
skb              3308 net/core/skbuff.c 	if (skb_zcopy(tgt) || skb_zcopy(skb))
skb              3314 net/core/skbuff.c 	fragfrom = &skb_shinfo(skb)->frags[from];
skb              3328 net/core/skbuff.c 			if (skb_prepare_for_shift(skb) ||
skb              3333 net/core/skbuff.c 			fragfrom = &skb_shinfo(skb)->frags[from];
skb              3347 net/core/skbuff.c 	if ((shiftlen == skb->len) &&
skb              3348 net/core/skbuff.c 	    (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to))
skb              3351 net/core/skbuff.c 	if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt))
skb              3354 net/core/skbuff.c 	while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) {
skb              3358 net/core/skbuff.c 		fragfrom = &skb_shinfo(skb)->frags[from];
skb              3386 net/core/skbuff.c 		fragfrom = &skb_shinfo(skb)->frags[0];
skb              3395 net/core/skbuff.c 	while (from < skb_shinfo(skb)->nr_frags)
skb              3396 net/core/skbuff.c 		skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++];
skb              3397 net/core/skbuff.c 	skb_shinfo(skb)->nr_frags = to;
skb              3399 net/core/skbuff.c 	BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags);
skb              3406 net/core/skbuff.c 	skb->ip_summed = CHECKSUM_PARTIAL;
skb              3409 net/core/skbuff.c 	skb->len -= shiftlen;
skb              3410 net/core/skbuff.c 	skb->data_len -= shiftlen;
skb              3411 net/core/skbuff.c 	skb->truesize -= shiftlen;
skb              3429 net/core/skbuff.c void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
skb              3434 net/core/skbuff.c 	st->root_skb = st->cur_skb = skb;
skb              3572 net/core/skbuff.c unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
skb              3581 net/core/skbuff.c 	skb_prepare_seq_read(skb, from, to, TS_SKB_CB(&state));
skb              3588 net/core/skbuff.c int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
skb              3591 net/core/skbuff.c 	int i = skb_shinfo(skb)->nr_frags;
skb              3593 net/core/skbuff.c 	if (skb_can_coalesce(skb, i, page, offset)) {
skb              3594 net/core/skbuff.c 		skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], size);
skb              3597 net/core/skbuff.c 		skb_fill_page_desc(skb, i, page, offset, size);
skb              3617 net/core/skbuff.c void *skb_pull_rcsum(struct sk_buff *skb, unsigned int len)
skb              3619 net/core/skbuff.c 	unsigned char *data = skb->data;
skb              3621 net/core/skbuff.c 	BUG_ON(len > skb->len);
skb              3622 net/core/skbuff.c 	__skb_pull(skb, len);
skb              3623 net/core/skbuff.c 	skb_postpull_rcsum(skb, data, len);
skb              3624 net/core/skbuff.c 	return skb->data;
skb              3986 net/core/skbuff.c int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
skb              3988 net/core/skbuff.c 	struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb);
skb              3989 net/core/skbuff.c 	unsigned int offset = skb_gro_offset(skb);
skb              3990 net/core/skbuff.c 	unsigned int headlen = skb_headlen(skb);
skb              3991 net/core/skbuff.c 	unsigned int len = skb_gro_len(skb);
skb              3995 net/core/skbuff.c 	if (unlikely(p->len + len >= 65536 || NAPI_GRO_CB(skb)->flush))
skb              4024 net/core/skbuff.c 		delta_truesize = skb->truesize -
skb              4025 net/core/skbuff.c 				 SKB_TRUESIZE(skb_end_offset(skb));
skb              4027 net/core/skbuff.c 		skb->truesize -= skb->data_len;
skb              4028 net/core/skbuff.c 		skb->len -= skb->data_len;
skb              4029 net/core/skbuff.c 		skb->data_len = 0;
skb              4031 net/core/skbuff.c 		NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE;
skb              4033 net/core/skbuff.c 	} else if (skb->head_frag) {
skb              4036 net/core/skbuff.c 		struct page *page = virt_to_head_page(skb->head);
skb              4043 net/core/skbuff.c 		first_offset = skb->data -
skb              4056 net/core/skbuff.c 		delta_truesize = skb->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff));
skb              4057 net/core/skbuff.c 		NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD;
skb              4062 net/core/skbuff.c 	delta_truesize = skb->truesize;
skb              4068 net/core/skbuff.c 		skb->data_len -= eat;
skb              4069 net/core/skbuff.c 		skb->len -= eat;
skb              4073 net/core/skbuff.c 	__skb_pull(skb, offset);
skb              4076 net/core/skbuff.c 		skb_shinfo(p)->frag_list = skb;
skb              4078 net/core/skbuff.c 		NAPI_GRO_CB(p)->last->next = skb;
skb              4079 net/core/skbuff.c 	NAPI_GRO_CB(p)->last = skb;
skb              4080 net/core/skbuff.c 	__skb_header_release(skb);
skb              4093 net/core/skbuff.c 	NAPI_GRO_CB(skb)->same_flow = 1;
skb              4162 net/core/skbuff.c __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len,
skb              4165 net/core/skbuff.c 	int start = skb_headlen(skb);
skb              4176 net/core/skbuff.c 		sg_set_buf(sg, skb->data + offset, copy);
skb              4183 net/core/skbuff.c 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb              4188 net/core/skbuff.c 		end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
skb              4190 net/core/skbuff.c 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
skb              4206 net/core/skbuff.c 	skb_walk_frags(skb, frag_iter) {
skb              4245 net/core/skbuff.c int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
skb              4247 net/core/skbuff.c 	int nsg = __skb_to_sgvec(skb, sg, offset, len, 0);
skb              4277 net/core/skbuff.c int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
skb              4280 net/core/skbuff.c 	return __skb_to_sgvec(skb, sg, offset, len, 0);
skb              4303 net/core/skbuff.c int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
skb              4313 net/core/skbuff.c 	if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) &&
skb              4314 net/core/skbuff.c 	    __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL)
skb              4318 net/core/skbuff.c 	if (!skb_has_frag_list(skb)) {
skb              4324 net/core/skbuff.c 		if (skb_tailroom(skb) < tailbits &&
skb              4325 net/core/skbuff.c 		    pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC))
skb              4329 net/core/skbuff.c 		*trailer = skb;
skb              4336 net/core/skbuff.c 	skb_p = &skb_shinfo(skb)->frag_list;
skb              4396 net/core/skbuff.c static void sock_rmem_free(struct sk_buff *skb)
skb              4398 net/core/skbuff.c 	struct sock *sk = skb->sk;
skb              4400 net/core/skbuff.c 	atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
skb              4403 net/core/skbuff.c static void skb_set_err_queue(struct sk_buff *skb)
skb              4408 net/core/skbuff.c 	skb->pkt_type = PACKET_OUTGOING;
skb              4415 net/core/skbuff.c int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
skb              4417 net/core/skbuff.c 	if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
skb              4421 net/core/skbuff.c 	skb_orphan(skb);
skb              4422 net/core/skbuff.c 	skb->sk = sk;
skb              4423 net/core/skbuff.c 	skb->destructor = sock_rmem_free;
skb              4424 net/core/skbuff.c 	atomic_add(skb->truesize, &sk->sk_rmem_alloc);
skb              4425 net/core/skbuff.c 	skb_set_err_queue(skb);
skb              4428 net/core/skbuff.c 	skb_dst_force(skb);
skb              4430 net/core/skbuff.c 	skb_queue_tail(&sk->sk_error_queue, skb);
skb              4437 net/core/skbuff.c static bool is_icmp_err_skb(const struct sk_buff *skb)
skb              4439 net/core/skbuff.c 	return skb && (SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP ||
skb              4440 net/core/skbuff.c 		       SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP6);
skb              4446 net/core/skbuff.c 	struct sk_buff *skb, *skb_next = NULL;
skb              4451 net/core/skbuff.c 	skb = __skb_dequeue(q);
skb              4452 net/core/skbuff.c 	if (skb && (skb_next = skb_peek(q))) {
skb              4459 net/core/skbuff.c 	if (is_icmp_err_skb(skb) && !icmp_next)
skb              4465 net/core/skbuff.c 	return skb;
skb              4482 net/core/skbuff.c struct sk_buff *skb_clone_sk(struct sk_buff *skb)
skb              4484 net/core/skbuff.c 	struct sock *sk = skb->sk;
skb              4490 net/core/skbuff.c 	clone = skb_clone(skb, GFP_ATOMIC);
skb              4503 net/core/skbuff.c static void __skb_complete_tx_timestamp(struct sk_buff *skb,
skb              4511 net/core/skbuff.c 	BUILD_BUG_ON(sizeof(struct sock_exterr_skb) > sizeof(skb->cb));
skb              4513 net/core/skbuff.c 	serr = SKB_EXT_ERR(skb);
skb              4519 net/core/skbuff.c 	serr->header.h4.iif = skb->dev ? skb->dev->ifindex : 0;
skb              4521 net/core/skbuff.c 		serr->ee.ee_data = skb_shinfo(skb)->tskey;
skb              4527 net/core/skbuff.c 	err = sock_queue_err_skb(sk, skb);
skb              4530 net/core/skbuff.c 		kfree_skb(skb);
skb              4547 net/core/skbuff.c void skb_complete_tx_timestamp(struct sk_buff *skb,
skb              4550 net/core/skbuff.c 	struct sock *sk = skb->sk;
skb              4559 net/core/skbuff.c 		*skb_hwtstamps(skb) = *hwtstamps;
skb              4560 net/core/skbuff.c 		__skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND, false);
skb              4566 net/core/skbuff.c 	kfree_skb(skb);
skb              4574 net/core/skbuff.c 	struct sk_buff *skb;
skb              4593 net/core/skbuff.c 			skb = tcp_get_timestamping_opt_stats(sk);
skb              4597 net/core/skbuff.c 			skb = alloc_skb(0, GFP_ATOMIC);
skb              4599 net/core/skbuff.c 		skb = skb_clone(orig_skb, GFP_ATOMIC);
skb              4601 net/core/skbuff.c 	if (!skb)
skb              4605 net/core/skbuff.c 		skb_shinfo(skb)->tx_flags |= skb_shinfo(orig_skb)->tx_flags &
skb              4607 net/core/skbuff.c 		skb_shinfo(skb)->tskey = skb_shinfo(orig_skb)->tskey;
skb              4611 net/core/skbuff.c 		*skb_hwtstamps(skb) = *hwtstamps;
skb              4613 net/core/skbuff.c 		skb->tstamp = ktime_get_real();
skb              4615 net/core/skbuff.c 	__skb_complete_tx_timestamp(skb, sk, tstype, opt_stats);
skb              4627 net/core/skbuff.c void skb_complete_wifi_ack(struct sk_buff *skb, bool acked)
skb              4629 net/core/skbuff.c 	struct sock *sk = skb->sk;
skb              4633 net/core/skbuff.c 	skb->wifi_acked_valid = 1;
skb              4634 net/core/skbuff.c 	skb->wifi_acked = acked;
skb              4636 net/core/skbuff.c 	serr = SKB_EXT_ERR(skb);
skb              4645 net/core/skbuff.c 		err = sock_queue_err_skb(sk, skb);
skb              4649 net/core/skbuff.c 		kfree_skb(skb);
skb              4665 net/core/skbuff.c bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off)
skb              4668 net/core/skbuff.c 	u32 csum_start = skb_headroom(skb) + (u32)start;
skb              4670 net/core/skbuff.c 	if (unlikely(csum_start > U16_MAX || csum_end > skb_headlen(skb))) {
skb              4672 net/core/skbuff.c 				     start, off, skb_headroom(skb), skb_headlen(skb));
skb              4675 net/core/skbuff.c 	skb->ip_summed = CHECKSUM_PARTIAL;
skb              4676 net/core/skbuff.c 	skb->csum_start = csum_start;
skb              4677 net/core/skbuff.c 	skb->csum_offset = off;
skb              4678 net/core/skbuff.c 	skb_set_transport_header(skb, start);
skb              4683 net/core/skbuff.c static int skb_maybe_pull_tail(struct sk_buff *skb, unsigned int len,
skb              4686 net/core/skbuff.c 	if (skb_headlen(skb) >= len)
skb              4692 net/core/skbuff.c 	if (max > skb->len)
skb              4693 net/core/skbuff.c 		max = skb->len;
skb              4695 net/core/skbuff.c 	if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL)
skb              4698 net/core/skbuff.c 	if (skb_headlen(skb) < len)
skb              4706 net/core/skbuff.c static __sum16 *skb_checksum_setup_ip(struct sk_buff *skb,
skb              4714 net/core/skbuff.c 		err = skb_maybe_pull_tail(skb, off + sizeof(struct tcphdr),
skb              4716 net/core/skbuff.c 		if (!err && !skb_partial_csum_set(skb, off,
skb              4720 net/core/skbuff.c 		return err ? ERR_PTR(err) : &tcp_hdr(skb)->check;
skb              4723 net/core/skbuff.c 		err = skb_maybe_pull_tail(skb, off + sizeof(struct udphdr),
skb              4725 net/core/skbuff.c 		if (!err && !skb_partial_csum_set(skb, off,
skb              4729 net/core/skbuff.c 		return err ? ERR_PTR(err) : &udp_hdr(skb)->check;
skb              4740 net/core/skbuff.c static int skb_checksum_setup_ipv4(struct sk_buff *skb, bool recalculate)
skb              4749 net/core/skbuff.c 	err = skb_maybe_pull_tail(skb,
skb              4755 net/core/skbuff.c 	if (ip_hdr(skb)->frag_off & htons(IP_OFFSET | IP_MF))
skb              4758 net/core/skbuff.c 	off = ip_hdrlen(skb);
skb              4765 net/core/skbuff.c 	csum = skb_checksum_setup_ip(skb, ip_hdr(skb)->protocol, off);
skb              4770 net/core/skbuff.c 		*csum = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
skb              4771 net/core/skbuff.c 					   ip_hdr(skb)->daddr,
skb              4772 net/core/skbuff.c 					   skb->len - off,
skb              4773 net/core/skbuff.c 					   ip_hdr(skb)->protocol, 0);
skb              4785 net/core/skbuff.c #define OPT_HDR(type, skb, off) \
skb              4786 net/core/skbuff.c 	(type *)(skb_network_header(skb) + (off))
skb              4788 net/core/skbuff.c static int skb_checksum_setup_ipv6(struct sk_buff *skb, bool recalculate)
skb              4803 net/core/skbuff.c 	err = skb_maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN);
skb              4807 net/core/skbuff.c 	nexthdr = ipv6_hdr(skb)->nexthdr;
skb              4809 net/core/skbuff.c 	len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len);
skb              4817 net/core/skbuff.c 			err = skb_maybe_pull_tail(skb,
skb              4824 net/core/skbuff.c 			hp = OPT_HDR(struct ipv6_opt_hdr, skb, off);
skb              4832 net/core/skbuff.c 			err = skb_maybe_pull_tail(skb,
skb              4839 net/core/skbuff.c 			hp = OPT_HDR(struct ip_auth_hdr, skb, off);
skb              4847 net/core/skbuff.c 			err = skb_maybe_pull_tail(skb,
skb              4854 net/core/skbuff.c 			hp = OPT_HDR(struct frag_hdr, skb, off);
skb              4874 net/core/skbuff.c 	csum = skb_checksum_setup_ip(skb, nexthdr, off);
skb              4879 net/core/skbuff.c 		*csum = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
skb              4880 net/core/skbuff.c 					 &ipv6_hdr(skb)->daddr,
skb              4881 net/core/skbuff.c 					 skb->len - off, nexthdr, 0);
skb              4893 net/core/skbuff.c int skb_checksum_setup(struct sk_buff *skb, bool recalculate)
skb              4897 net/core/skbuff.c 	switch (skb->protocol) {
skb              4899 net/core/skbuff.c 		err = skb_checksum_setup_ipv4(skb, recalculate);
skb              4903 net/core/skbuff.c 		err = skb_checksum_setup_ipv6(skb, recalculate);
skb              4928 net/core/skbuff.c static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb,
skb              4932 net/core/skbuff.c 	unsigned int len = skb_transport_offset(skb) + transport_len;
skb              4935 net/core/skbuff.c 	if (skb->len < len)
skb              4937 net/core/skbuff.c 	else if (skb->len == len)
skb              4938 net/core/skbuff.c 		return skb;
skb              4940 net/core/skbuff.c 	skb_chk = skb_clone(skb, GFP_ATOMIC);
skb              4968 net/core/skbuff.c struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
skb              4970 net/core/skbuff.c 				     __sum16(*skb_chkf)(struct sk_buff *skb))
skb              4973 net/core/skbuff.c 	unsigned int offset = skb_transport_offset(skb);
skb              4976 net/core/skbuff.c 	skb_chk = skb_checksum_maybe_trim(skb, transport_len);
skb              4993 net/core/skbuff.c 	if (skb_chk && skb_chk != skb)
skb              5001 net/core/skbuff.c void __skb_warn_lro_forwarding(const struct sk_buff *skb)
skb              5004 net/core/skbuff.c 			     skb->dev->name);
skb              5008 net/core/skbuff.c void kfree_skb_partial(struct sk_buff *skb, bool head_stolen)
skb              5011 net/core/skbuff.c 		skb_release_head_state(skb);
skb              5012 net/core/skbuff.c 		kmem_cache_free(skbuff_head_cache, skb);
skb              5014 net/core/skbuff.c 		__kfree_skb(skb);
skb              5116 net/core/skbuff.c void skb_scrub_packet(struct sk_buff *skb, bool xnet)
skb              5118 net/core/skbuff.c 	skb->pkt_type = PACKET_HOST;
skb              5119 net/core/skbuff.c 	skb->skb_iif = 0;
skb              5120 net/core/skbuff.c 	skb->ignore_df = 0;
skb              5121 net/core/skbuff.c 	skb_dst_drop(skb);
skb              5122 net/core/skbuff.c 	skb_ext_reset(skb);
skb              5123 net/core/skbuff.c 	nf_reset_ct(skb);
skb              5124 net/core/skbuff.c 	nf_reset_trace(skb);
skb              5127 net/core/skbuff.c 	skb->offload_fwd_mark = 0;
skb              5128 net/core/skbuff.c 	skb->offload_l3_fwd_mark = 0;
skb              5134 net/core/skbuff.c 	ipvs_reset(skb);
skb              5135 net/core/skbuff.c 	skb->mark = 0;
skb              5136 net/core/skbuff.c 	skb->tstamp = 0;
skb              5150 net/core/skbuff.c static unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)
skb              5152 net/core/skbuff.c 	const struct skb_shared_info *shinfo = skb_shinfo(skb);
skb              5155 net/core/skbuff.c 	if (skb->encapsulation) {
skb              5156 net/core/skbuff.c 		thlen = skb_inner_transport_header(skb) -
skb              5157 net/core/skbuff.c 			skb_transport_header(skb);
skb              5160 net/core/skbuff.c 			thlen += inner_tcp_hdrlen(skb);
skb              5162 net/core/skbuff.c 		thlen = tcp_hdrlen(skb);
skb              5163 net/core/skbuff.c 	} else if (unlikely(skb_is_gso_sctp(skb))) {
skb              5185 net/core/skbuff.c static unsigned int skb_gso_network_seglen(const struct sk_buff *skb)
skb              5187 net/core/skbuff.c 	unsigned int hdr_len = skb_transport_header(skb) -
skb              5188 net/core/skbuff.c 			       skb_network_header(skb);
skb              5190 net/core/skbuff.c 	return hdr_len + skb_gso_transport_seglen(skb);
skb              5202 net/core/skbuff.c static unsigned int skb_gso_mac_seglen(const struct sk_buff *skb)
skb              5204 net/core/skbuff.c 	unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
skb              5206 net/core/skbuff.c 	return hdr_len + skb_gso_transport_seglen(skb);
skb              5230 net/core/skbuff.c static inline bool skb_gso_size_check(const struct sk_buff *skb,
skb              5233 net/core/skbuff.c 	const struct skb_shared_info *shinfo = skb_shinfo(skb);
skb              5242 net/core/skbuff.c 	skb_walk_frags(skb, iter) {
skb              5260 net/core/skbuff.c bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu)
skb              5262 net/core/skbuff.c 	return skb_gso_size_check(skb, skb_gso_network_seglen(skb), mtu);
skb              5275 net/core/skbuff.c bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len)
skb              5277 net/core/skbuff.c 	return skb_gso_size_check(skb, skb_gso_mac_seglen(skb), len);
skb              5281 net/core/skbuff.c static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb)
skb              5286 net/core/skbuff.c 	if (skb_cow(skb, skb_headroom(skb)) < 0) {
skb              5287 net/core/skbuff.c 		kfree_skb(skb);
skb              5291 net/core/skbuff.c 	mac_len = skb->data - skb_mac_header(skb);
skb              5293 net/core/skbuff.c 		memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb),
skb              5297 net/core/skbuff.c 	meta_len = skb_metadata_len(skb);
skb              5299 net/core/skbuff.c 		meta = skb_metadata_end(skb) - meta_len;
skb              5303 net/core/skbuff.c 	skb->mac_header += VLAN_HLEN;
skb              5304 net/core/skbuff.c 	return skb;
skb              5307 net/core/skbuff.c struct sk_buff *skb_vlan_untag(struct sk_buff *skb)
skb              5312 net/core/skbuff.c 	if (unlikely(skb_vlan_tag_present(skb))) {
skb              5314 net/core/skbuff.c 		return skb;
skb              5317 net/core/skbuff.c 	skb = skb_share_check(skb, GFP_ATOMIC);
skb              5318 net/core/skbuff.c 	if (unlikely(!skb))
skb              5321 net/core/skbuff.c 	if (unlikely(!pskb_may_pull(skb, VLAN_HLEN)))
skb              5324 net/core/skbuff.c 	vhdr = (struct vlan_hdr *)skb->data;
skb              5326 net/core/skbuff.c 	__vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci);
skb              5328 net/core/skbuff.c 	skb_pull_rcsum(skb, VLAN_HLEN);
skb              5329 net/core/skbuff.c 	vlan_set_encap_proto(skb, vhdr);
skb              5331 net/core/skbuff.c 	skb = skb_reorder_vlan_header(skb);
skb              5332 net/core/skbuff.c 	if (unlikely(!skb))
skb              5335 net/core/skbuff.c 	skb_reset_network_header(skb);
skb              5336 net/core/skbuff.c 	skb_reset_transport_header(skb);
skb              5337 net/core/skbuff.c 	skb_reset_mac_len(skb);
skb              5339 net/core/skbuff.c 	return skb;
skb              5342 net/core/skbuff.c 	kfree_skb(skb);
skb              5347 net/core/skbuff.c int skb_ensure_writable(struct sk_buff *skb, int write_len)
skb              5349 net/core/skbuff.c 	if (!pskb_may_pull(skb, write_len))
skb              5352 net/core/skbuff.c 	if (!skb_cloned(skb) || skb_clone_writable(skb, write_len))
skb              5355 net/core/skbuff.c 	return pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
skb              5362 net/core/skbuff.c int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci)
skb              5365 net/core/skbuff.c 	int offset = skb->data - skb_mac_header(skb);
skb              5374 net/core/skbuff.c 	err = skb_ensure_writable(skb, VLAN_ETH_HLEN);
skb              5378 net/core/skbuff.c 	skb_postpull_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN);
skb              5380 net/core/skbuff.c 	vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN);
skb              5383 net/core/skbuff.c 	memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
skb              5384 net/core/skbuff.c 	__skb_pull(skb, VLAN_HLEN);
skb              5386 net/core/skbuff.c 	vlan_set_encap_proto(skb, vhdr);
skb              5387 net/core/skbuff.c 	skb->mac_header += VLAN_HLEN;
skb              5389 net/core/skbuff.c 	if (skb_network_offset(skb) < ETH_HLEN)
skb              5390 net/core/skbuff.c 		skb_set_network_header(skb, ETH_HLEN);
skb              5392 net/core/skbuff.c 	skb_reset_mac_len(skb);
skb              5401 net/core/skbuff.c int skb_vlan_pop(struct sk_buff *skb)
skb              5407 net/core/skbuff.c 	if (likely(skb_vlan_tag_present(skb))) {
skb              5408 net/core/skbuff.c 		__vlan_hwaccel_clear_tag(skb);
skb              5410 net/core/skbuff.c 		if (unlikely(!eth_type_vlan(skb->protocol)))
skb              5413 net/core/skbuff.c 		err = __skb_vlan_pop(skb, &vlan_tci);
skb              5418 net/core/skbuff.c 	if (likely(!eth_type_vlan(skb->protocol)))
skb              5421 net/core/skbuff.c 	vlan_proto = skb->protocol;
skb              5422 net/core/skbuff.c 	err = __skb_vlan_pop(skb, &vlan_tci);
skb              5426 net/core/skbuff.c 	__vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
skb              5434 net/core/skbuff.c int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci)
skb              5436 net/core/skbuff.c 	if (skb_vlan_tag_present(skb)) {
skb              5437 net/core/skbuff.c 		int offset = skb->data - skb_mac_header(skb);
skb              5446 net/core/skbuff.c 		err = __vlan_insert_tag(skb, skb->vlan_proto,
skb              5447 net/core/skbuff.c 					skb_vlan_tag_get(skb));
skb              5451 net/core/skbuff.c 		skb->protocol = skb->vlan_proto;
skb              5452 net/core/skbuff.c 		skb->mac_len += VLAN_HLEN;
skb              5454 net/core/skbuff.c 		skb_postpush_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN);
skb              5456 net/core/skbuff.c 	__vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
skb              5462 net/core/skbuff.c static void skb_mod_eth_type(struct sk_buff *skb, struct ethhdr *hdr,
skb              5465 net/core/skbuff.c 	if (skb->ip_summed == CHECKSUM_COMPLETE) {
skb              5468 net/core/skbuff.c 		skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum);
skb              5486 net/core/skbuff.c int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto,
skb              5496 net/core/skbuff.c 	if (skb->encapsulation)
skb              5499 net/core/skbuff.c 	err = skb_cow_head(skb, MPLS_HLEN);
skb              5503 net/core/skbuff.c 	if (!skb->inner_protocol) {
skb              5504 net/core/skbuff.c 		skb_set_inner_network_header(skb, mac_len);
skb              5505 net/core/skbuff.c 		skb_set_inner_protocol(skb, skb->protocol);
skb              5508 net/core/skbuff.c 	skb_push(skb, MPLS_HLEN);
skb              5509 net/core/skbuff.c 	memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb),
skb              5511 net/core/skbuff.c 	skb_reset_mac_header(skb);
skb              5512 net/core/skbuff.c 	skb_set_network_header(skb, mac_len);
skb              5514 net/core/skbuff.c 	lse = mpls_hdr(skb);
skb              5516 net/core/skbuff.c 	skb_postpush_rcsum(skb, lse, MPLS_HLEN);
skb              5519 net/core/skbuff.c 		skb_mod_eth_type(skb, eth_hdr(skb), mpls_proto);
skb              5520 net/core/skbuff.c 	skb->protocol = mpls_proto;
skb              5538 net/core/skbuff.c int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len,
skb              5543 net/core/skbuff.c 	if (unlikely(!eth_p_mpls(skb->protocol)))
skb              5546 net/core/skbuff.c 	err = skb_ensure_writable(skb, mac_len + MPLS_HLEN);
skb              5550 net/core/skbuff.c 	skb_postpull_rcsum(skb, mpls_hdr(skb), MPLS_HLEN);
skb              5551 net/core/skbuff.c 	memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb),
skb              5554 net/core/skbuff.c 	__skb_pull(skb, MPLS_HLEN);
skb              5555 net/core/skbuff.c 	skb_reset_mac_header(skb);
skb              5556 net/core/skbuff.c 	skb_set_network_header(skb, mac_len);
skb              5562 net/core/skbuff.c 		hdr = (struct ethhdr *)((void *)mpls_hdr(skb) - ETH_HLEN);
skb              5563 net/core/skbuff.c 		skb_mod_eth_type(skb, hdr, next_proto);
skb              5565 net/core/skbuff.c 	skb->protocol = next_proto;
skb              5581 net/core/skbuff.c int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse)
skb              5585 net/core/skbuff.c 	if (unlikely(!eth_p_mpls(skb->protocol)))
skb              5588 net/core/skbuff.c 	err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN);
skb              5592 net/core/skbuff.c 	if (skb->ip_summed == CHECKSUM_COMPLETE) {
skb              5593 net/core/skbuff.c 		__be32 diff[] = { ~mpls_hdr(skb)->label_stack_entry, mpls_lse };
skb              5595 net/core/skbuff.c 		skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum);
skb              5598 net/core/skbuff.c 	mpls_hdr(skb)->label_stack_entry = mpls_lse;
skb              5613 net/core/skbuff.c int skb_mpls_dec_ttl(struct sk_buff *skb)
skb              5618 net/core/skbuff.c 	if (unlikely(!eth_p_mpls(skb->protocol)))
skb              5621 net/core/skbuff.c 	lse = be32_to_cpu(mpls_hdr(skb)->label_stack_entry);
skb              5629 net/core/skbuff.c 	return skb_mpls_update_lse(skb, cpu_to_be32(lse));
skb              5652 net/core/skbuff.c 	struct sk_buff *skb;
skb              5664 net/core/skbuff.c 	skb = alloc_skb(header_len, gfp_mask);
skb              5665 net/core/skbuff.c 	if (!skb)
skb              5668 net/core/skbuff.c 	skb->truesize += npages << PAGE_SHIFT;
skb              5693 net/core/skbuff.c 		skb_fill_page_desc(skb, i, page, 0, chunk);
skb              5697 net/core/skbuff.c 	return skb;
skb              5700 net/core/skbuff.c 	kfree_skb(skb);
skb              5706 net/core/skbuff.c static int pskb_carve_inside_header(struct sk_buff *skb, const u32 off,
skb              5710 net/core/skbuff.c 	int size = skb_end_offset(skb);
skb              5716 net/core/skbuff.c 	if (skb_pfmemalloc(skb))
skb              5727 net/core/skbuff.c 	skb_copy_from_linear_data_offset(skb, off, data, new_hlen);
skb              5728 net/core/skbuff.c 	skb->len -= off;
skb              5731 net/core/skbuff.c 	       skb_shinfo(skb),
skb              5733 net/core/skbuff.c 			frags[skb_shinfo(skb)->nr_frags]));
skb              5734 net/core/skbuff.c 	if (skb_cloned(skb)) {
skb              5736 net/core/skbuff.c 		if (skb_orphan_frags(skb, gfp_mask)) {
skb              5740 net/core/skbuff.c 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
skb              5741 net/core/skbuff.c 			skb_frag_ref(skb, i);
skb              5742 net/core/skbuff.c 		if (skb_has_frag_list(skb))
skb              5743 net/core/skbuff.c 			skb_clone_fraglist(skb);
skb              5744 net/core/skbuff.c 		skb_release_data(skb);
skb              5749 net/core/skbuff.c 		skb_free_head(skb);
skb              5752 net/core/skbuff.c 	skb->head = data;
skb              5753 net/core/skbuff.c 	skb->data = data;
skb              5754 net/core/skbuff.c 	skb->head_frag = 0;
skb              5756 net/core/skbuff.c 	skb->end = size;
skb              5758 net/core/skbuff.c 	skb->end = skb->head + size;
skb              5760 net/core/skbuff.c 	skb_set_tail_pointer(skb, skb_headlen(skb));
skb              5761 net/core/skbuff.c 	skb_headers_offset_update(skb, 0);
skb              5762 net/core/skbuff.c 	skb->cloned = 0;
skb              5763 net/core/skbuff.c 	skb->hdr_len = 0;
skb              5764 net/core/skbuff.c 	skb->nohdr = 0;
skb              5765 net/core/skbuff.c 	atomic_set(&skb_shinfo(skb)->dataref, 1);
skb              5770 net/core/skbuff.c static int pskb_carve(struct sk_buff *skb, const u32 off, gfp_t gfp);
skb              5775 net/core/skbuff.c static int pskb_carve_frag_list(struct sk_buff *skb,
skb              5829 net/core/skbuff.c static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off,
skb              5833 net/core/skbuff.c 	int size = skb_end_offset(skb);
skb              5835 net/core/skbuff.c 	const int nfrags = skb_shinfo(skb)->nr_frags;
skb              5840 net/core/skbuff.c 	if (skb_pfmemalloc(skb))
skb              5851 net/core/skbuff.c 	       skb_shinfo(skb), offsetof(struct skb_shared_info,
skb              5852 net/core/skbuff.c 					 frags[skb_shinfo(skb)->nr_frags]));
skb              5853 net/core/skbuff.c 	if (skb_orphan_frags(skb, gfp_mask)) {
skb              5859 net/core/skbuff.c 		int fsize = skb_frag_size(&skb_shinfo(skb)->frags[i]);
skb              5862 net/core/skbuff.c 			shinfo->frags[k] = skb_shinfo(skb)->frags[i];
skb              5876 net/core/skbuff.c 			skb_frag_ref(skb, i);
skb              5882 net/core/skbuff.c 	if (skb_has_frag_list(skb))
skb              5883 net/core/skbuff.c 		skb_clone_fraglist(skb);
skb              5887 net/core/skbuff.c 		pskb_carve_frag_list(skb, shinfo, off - pos, gfp_mask);
skb              5889 net/core/skbuff.c 	skb_release_data(skb);
skb              5891 net/core/skbuff.c 	skb->head = data;
skb              5892 net/core/skbuff.c 	skb->head_frag = 0;
skb              5893 net/core/skbuff.c 	skb->data = data;
skb              5895 net/core/skbuff.c 	skb->end = size;
skb              5897 net/core/skbuff.c 	skb->end = skb->head + size;
skb              5899 net/core/skbuff.c 	skb_reset_tail_pointer(skb);
skb              5900 net/core/skbuff.c 	skb_headers_offset_update(skb, 0);
skb              5901 net/core/skbuff.c 	skb->cloned   = 0;
skb              5902 net/core/skbuff.c 	skb->hdr_len  = 0;
skb              5903 net/core/skbuff.c 	skb->nohdr    = 0;
skb              5904 net/core/skbuff.c 	skb->len -= off;
skb              5905 net/core/skbuff.c 	skb->data_len = skb->len;
skb              5906 net/core/skbuff.c 	atomic_set(&skb_shinfo(skb)->dataref, 1);
skb              5911 net/core/skbuff.c static int pskb_carve(struct sk_buff *skb, const u32 len, gfp_t gfp)
skb              5913 net/core/skbuff.c 	int headlen = skb_headlen(skb);
skb              5916 net/core/skbuff.c 		return pskb_carve_inside_header(skb, len, headlen, gfp);
skb              5918 net/core/skbuff.c 		return pskb_carve_inside_nonlinear(skb, len, headlen, gfp);
skb              5924 net/core/skbuff.c struct sk_buff *pskb_extract(struct sk_buff *skb, int off,
skb              5927 net/core/skbuff.c 	struct sk_buff  *clone = skb_clone(skb, gfp);
skb              5953 net/core/skbuff.c void skb_condense(struct sk_buff *skb)
skb              5955 net/core/skbuff.c 	if (skb->data_len) {
skb              5956 net/core/skbuff.c 		if (skb->data_len > skb->end - skb->tail ||
skb              5957 net/core/skbuff.c 		    skb_cloned(skb))
skb              5961 net/core/skbuff.c 		__pskb_pull_tail(skb, skb->data_len);
skb              5970 net/core/skbuff.c 	skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
skb              6033 net/core/skbuff.c void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id)
skb              6038 net/core/skbuff.c 	if (skb->active_extensions) {
skb              6039 net/core/skbuff.c 		old = skb->extensions;
skb              6041 net/core/skbuff.c 		new = skb_ext_maybe_cow(old, skb->active_extensions);
skb              6061 net/core/skbuff.c 	skb->extensions = new;
skb              6062 net/core/skbuff.c 	skb->active_extensions |= 1 << id;
skb              6077 net/core/skbuff.c void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id)
skb              6079 net/core/skbuff.c 	struct skb_ext *ext = skb->extensions;
skb              6081 net/core/skbuff.c 	skb->active_extensions &= ~(1 << id);
skb              6082 net/core/skbuff.c 	if (skb->active_extensions == 0) {
skb              6083 net/core/skbuff.c 		skb->extensions = NULL;
skb               174 net/core/skmsg.c 	if (!msg->skb)
skb               193 net/core/skmsg.c 	consume_skb(msg->skb);
skb               399 net/core/skmsg.c static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb)
skb               408 net/core/skmsg.c 	if (!sk_rmem_schedule(sk, skb, skb->len)) {
skb               414 net/core/skmsg.c 	num_sge = skb_to_sgvec(skb, msg->sg.data, 0, skb->len);
skb               420 net/core/skmsg.c 	sk_mem_charge(sk, skb->len);
skb               421 net/core/skmsg.c 	copied = skb->len;
skb               425 net/core/skmsg.c 	msg->skb = skb;
skb               432 net/core/skmsg.c static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb,
skb               436 net/core/skmsg.c 		return sk_psock_skb_ingress(psock, skb);
skb               438 net/core/skmsg.c 		return skb_send_sock_locked(psock->sk, skb, off, len);
skb               445 net/core/skmsg.c 	struct sk_buff *skb;
skb               452 net/core/skmsg.c 	if (state->skb) {
skb               453 net/core/skmsg.c 		skb = state->skb;
skb               456 net/core/skmsg.c 		state->skb = NULL;
skb               460 net/core/skmsg.c 	while ((skb = skb_dequeue(&psock->ingress_skb))) {
skb               461 net/core/skmsg.c 		len = skb->len;
skb               464 net/core/skmsg.c 		ingress = tcp_skb_bpf_ingress(skb);
skb               468 net/core/skmsg.c 				ret = sk_psock_handle_skb(psock, skb, off,
skb               472 net/core/skmsg.c 					state->skb = skb;
skb               480 net/core/skmsg.c 				kfree_skb(skb);
skb               488 net/core/skmsg.c 			kfree_skb(skb);
skb               662 net/core/skmsg.c 			    struct sk_buff *skb)
skb               666 net/core/skmsg.c 	skb->sk = psock->sk;
skb               667 net/core/skmsg.c 	bpf_compute_data_end_sk_skb(skb);
skb               669 net/core/skmsg.c 	ret = BPF_PROG_RUN(prog, skb);
skb               677 net/core/skmsg.c 	skb->sk = NULL;
skb               690 net/core/skmsg.c 				   struct sk_buff *skb, int verdict)
skb               705 net/core/skmsg.c 			struct tcp_skb_cb *tcp = TCP_SKB_CB(skb);
skb               708 net/core/skmsg.c 			skb_queue_tail(&psock->ingress_skb, skb);
skb               714 net/core/skmsg.c 		sk_other = tcp_skb_bpf_redirect_fetch(skb);
skb               721 net/core/skmsg.c 		ingress = tcp_skb_bpf_ingress(skb);
skb               727 net/core/skmsg.c 				skb_set_owner_w(skb, sk_other);
skb               728 net/core/skmsg.c 			skb_queue_tail(&psock_other->ingress_skb, skb);
skb               737 net/core/skmsg.c 		kfree_skb(skb);
skb               741 net/core/skmsg.c static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb)
skb               750 net/core/skmsg.c 		skb_orphan(skb);
skb               751 net/core/skmsg.c 		tcp_skb_bpf_redirect_clear(skb);
skb               752 net/core/skmsg.c 		ret = sk_psock_bpf_run(psock, prog, skb);
skb               753 net/core/skmsg.c 		ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb));
skb               756 net/core/skmsg.c 	sk_psock_verdict_apply(psock, skb, ret);
skb               764 net/core/skmsg.c static int sk_psock_strp_parse(struct strparser *strp, struct sk_buff *skb)
skb               768 net/core/skmsg.c 	int ret = skb->len;
skb               773 net/core/skmsg.c 		ret = sk_psock_bpf_run(psock, prog, skb);
skb               317 net/core/sock.c int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
skb               326 net/core/sock.c 	ret = sk->sk_backlog_rcv(sk, skb);
skb               451 net/core/sock.c int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
skb               458 net/core/sock.c 		trace_sock_rcvqueue_full(sk, skb);
skb               462 net/core/sock.c 	if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
skb               467 net/core/sock.c 	skb->dev = NULL;
skb               468 net/core/sock.c 	skb_set_owner_r(skb, sk);
skb               473 net/core/sock.c 	skb_dst_force(skb);
skb               476 net/core/sock.c 	sock_skb_set_dropcount(sk, skb);
skb               477 net/core/sock.c 	__skb_queue_tail(list, skb);
skb               486 net/core/sock.c int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
skb               490 net/core/sock.c 	err = sk_filter(sk, skb);
skb               494 net/core/sock.c 	return __sock_queue_rcv_skb(sk, skb);
skb               498 net/core/sock.c int __sk_receive_skb(struct sock *sk, struct sk_buff *skb,
skb               503 net/core/sock.c 	if (sk_filter_trim_cap(sk, skb, trim_cap))
skb               506 net/core/sock.c 	skb->dev = NULL;
skb               522 net/core/sock.c 		rc = sk_backlog_rcv(sk, skb);
skb               525 net/core/sock.c 	} else if (sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf))) {
skb               537 net/core/sock.c 	kfree_skb(skb);
skb              1952 net/core/sock.c void sock_wfree(struct sk_buff *skb)
skb              1954 net/core/sock.c 	struct sock *sk = skb->sk;
skb              1955 net/core/sock.c 	unsigned int len = skb->truesize;
skb              1978 net/core/sock.c void __sock_wfree(struct sk_buff *skb)
skb              1980 net/core/sock.c 	struct sock *sk = skb->sk;
skb              1982 net/core/sock.c 	if (refcount_sub_and_test(skb->truesize, &sk->sk_wmem_alloc))
skb              1986 net/core/sock.c void skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
skb              1988 net/core/sock.c 	skb_orphan(skb);
skb              1989 net/core/sock.c 	skb->sk = sk;
skb              1992 net/core/sock.c 		skb->destructor = sock_edemux;
skb              1997 net/core/sock.c 	skb->destructor = sock_wfree;
skb              1998 net/core/sock.c 	skb_set_hash_from_sk(skb, sk);
skb              2004 net/core/sock.c 	refcount_add(skb->truesize, &sk->sk_wmem_alloc);
skb              2008 net/core/sock.c static bool can_skb_orphan_partial(const struct sk_buff *skb)
skb              2014 net/core/sock.c 	if (skb->decrypted)
skb              2017 net/core/sock.c 	return (skb->destructor == sock_wfree ||
skb              2018 net/core/sock.c 		(IS_ENABLED(CONFIG_INET) && skb->destructor == tcp_wfree));
skb              2027 net/core/sock.c void skb_orphan_partial(struct sk_buff *skb)
skb              2029 net/core/sock.c 	if (skb_is_tcp_pure_ack(skb))
skb              2032 net/core/sock.c 	if (can_skb_orphan_partial(skb)) {
skb              2033 net/core/sock.c 		struct sock *sk = skb->sk;
skb              2036 net/core/sock.c 			WARN_ON(refcount_sub_and_test(skb->truesize, &sk->sk_wmem_alloc));
skb              2037 net/core/sock.c 			skb->destructor = sock_efree;
skb              2040 net/core/sock.c 		skb_orphan(skb);
skb              2048 net/core/sock.c void sock_rfree(struct sk_buff *skb)
skb              2050 net/core/sock.c 	struct sock *sk = skb->sk;
skb              2051 net/core/sock.c 	unsigned int len = skb->truesize;
skb              2062 net/core/sock.c void sock_efree(struct sk_buff *skb)
skb              2064 net/core/sock.c 	sock_put(skb->sk);
skb              2098 net/core/sock.c 		struct sk_buff *skb = alloc_skb(size, priority);
skb              2100 net/core/sock.c 		if (skb) {
skb              2101 net/core/sock.c 			skb_set_owner_w(skb, sk);
skb              2102 net/core/sock.c 			return skb;
skb              2109 net/core/sock.c static void sock_ofree(struct sk_buff *skb)
skb              2111 net/core/sock.c 	struct sock *sk = skb->sk;
skb              2113 net/core/sock.c 	atomic_sub(skb->truesize, &sk->sk_omem_alloc);
skb              2119 net/core/sock.c 	struct sk_buff *skb;
skb              2126 net/core/sock.c 	skb = alloc_skb(size, priority);
skb              2127 net/core/sock.c 	if (!skb)
skb              2130 net/core/sock.c 	atomic_add(skb->truesize, &sk->sk_omem_alloc);
skb              2131 net/core/sock.c 	skb->sk = sk;
skb              2132 net/core/sock.c 	skb->destructor = sock_ofree;
skb              2133 net/core/sock.c 	return skb;
skb              2221 net/core/sock.c 	struct sk_buff *skb;
skb              2247 net/core/sock.c 	skb = alloc_skb_with_frags(header_len, data_len, max_page_order,
skb              2249 net/core/sock.c 	if (skb)
skb              2250 net/core/sock.c 		skb_set_owner_w(skb, sk);
skb              2251 net/core/sock.c 	return skb;
skb              2430 net/core/sock.c 	struct sk_buff *skb, *next;
skb              2432 net/core/sock.c 	while ((skb = sk->sk_backlog.head) != NULL) {
skb              2438 net/core/sock.c 			next = skb->next;
skb              2440 net/core/sock.c 			WARN_ON_ONCE(skb_dst_is_noref(skb));
skb              2441 net/core/sock.c 			skb_mark_not_on_list(skb);
skb              2442 net/core/sock.c 			sk_backlog_rcv(sk, skb);
skb              2446 net/core/sock.c 			skb = next;
skb              2447 net/core/sock.c 		} while (skb != NULL);
skb              2477 net/core/sock.c int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb)
skb              2484 net/core/sock.c 	rc = sk_wait_event(sk, timeo, skb_peek_tail(&sk->sk_receive_queue) != skb, &wait);
skb              3069 net/core/sock.c 	struct sk_buff *skb;
skb              3073 net/core/sock.c 	skb = sock_dequeue_err_skb(sk);
skb              3074 net/core/sock.c 	if (skb == NULL)
skb              3077 net/core/sock.c 	copied = skb->len;
skb              3082 net/core/sock.c 	err = skb_copy_datagram_msg(skb, 0, msg, copied);
skb              3086 net/core/sock.c 	sock_recv_timestamp(msg, sk, skb);
skb              3088 net/core/sock.c 	serr = SKB_EXT_ERR(skb);
skb              3095 net/core/sock.c 	kfree_skb(skb);
skb                19 net/core/sock_diag.c static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh);
skb                60 net/core/sock_diag.c int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attrtype)
skb                66 net/core/sock_diag.c 	return nla_put(skb, attrtype, sizeof(mem), &mem);
skb                71 net/core/sock_diag.c 			     struct sk_buff *skb, int attrtype)
skb                80 net/core/sock_diag.c 		nla_reserve(skb, attrtype, 0);
skb                95 net/core/sock_diag.c 	attr = nla_reserve(skb, attrtype, flen);
skb               126 net/core/sock_diag.c 	struct sk_buff *skb;
skb               132 net/core/sock_diag.c 	skb = nlmsg_new(sock_diag_nlmsg_size(), GFP_KERNEL);
skb               133 net/core/sock_diag.c 	if (!skb)
skb               139 net/core/sock_diag.c 		err = hndl->get_info(skb, sk);
skb               143 net/core/sock_diag.c 		nlmsg_multicast(sock_net(sk)->diag_nlsk, skb, 0, group,
skb               146 net/core/sock_diag.c 		kfree_skb(skb);
skb               164 net/core/sock_diag.c void sock_diag_register_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh))
skb               172 net/core/sock_diag.c void sock_diag_unregister_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh))
skb               212 net/core/sock_diag.c static int __sock_diag_cmd(struct sk_buff *skb, struct nlmsghdr *nlh)
skb               233 net/core/sock_diag.c 		err = hndl->dump(skb, nlh);
skb               235 net/core/sock_diag.c 		err = hndl->destroy(skb, nlh);
skb               243 net/core/sock_diag.c static int sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
skb               256 net/core/sock_diag.c 			ret = inet_rcv_compat(skb, nlh);
skb               264 net/core/sock_diag.c 		return __sock_diag_cmd(skb, nlh);
skb               272 net/core/sock_diag.c static void sock_diag_rcv(struct sk_buff *skb)
skb               275 net/core/sock_diag.c 	netlink_rcv_skb(skb, &sock_diag_rcv_msg);
skb               461 net/core/sock_map.c BPF_CALL_4(bpf_sk_redirect_map, struct sk_buff *, skb,
skb               464 net/core/sock_map.c 	struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
skb               922 net/core/sock_map.c BPF_CALL_4(bpf_sk_redirect_hash, struct sk_buff *, skb,
skb               925 net/core/sock_map.c 	struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
skb               226 net/core/sock_reuseport.c 				   struct bpf_prog *prog, struct sk_buff *skb,
skb               232 net/core/sock_reuseport.c 	if (skb_shared(skb)) {
skb               233 net/core/sock_reuseport.c 		nskb = skb_clone(skb, GFP_ATOMIC);
skb               236 net/core/sock_reuseport.c 		skb = nskb;
skb               240 net/core/sock_reuseport.c 	if (!pskb_pull(skb, hdr_len)) {
skb               244 net/core/sock_reuseport.c 	index = bpf_prog_run_save_cb(prog, skb);
skb               245 net/core/sock_reuseport.c 	__skb_push(skb, hdr_len);
skb               267 net/core/sock_reuseport.c 				   struct sk_buff *skb,
skb               288 net/core/sock_reuseport.c 		if (!prog || !skb)
skb               292 net/core/sock_reuseport.c 			sk2 = bpf_run_sk_reuseport(reuse, sk, prog, skb, hash);
skb               294 net/core/sock_reuseport.c 			sk2 = run_bpf_filter(reuse, socks, prog, skb, hdr_len);
skb                13 net/core/timestamping.c static unsigned int classify(const struct sk_buff *skb)
skb                15 net/core/timestamping.c 	if (likely(skb->dev && skb->dev->phydev &&
skb                16 net/core/timestamping.c 		   skb->dev->phydev->drv))
skb                17 net/core/timestamping.c 		return ptp_classify_raw(skb);
skb                22 net/core/timestamping.c void skb_clone_tx_timestamp(struct sk_buff *skb)
skb                28 net/core/timestamping.c 	if (!skb->sk)
skb                31 net/core/timestamping.c 	type = classify(skb);
skb                35 net/core/timestamping.c 	phydev = skb->dev->phydev;
skb                37 net/core/timestamping.c 		clone = skb_clone_sk(skb);
skb                45 net/core/timestamping.c bool skb_defer_rx_timestamp(struct sk_buff *skb)
skb                50 net/core/timestamping.c 	if (!skb->dev || !skb->dev->phydev || !skb->dev->phydev->drv)
skb                53 net/core/timestamping.c 	if (skb_headroom(skb) < ETH_HLEN)
skb                56 net/core/timestamping.c 	__skb_push(skb, ETH_HLEN);
skb                58 net/core/timestamping.c 	type = ptp_classify_raw(skb);
skb                60 net/core/timestamping.c 	__skb_pull(skb, ETH_HLEN);
skb                65 net/core/timestamping.c 	phydev = skb->dev->phydev;
skb                67 net/core/timestamping.c 		return phydev->drv->rxtstamp(phydev, skb, type);
skb                 9 net/core/tso.c int tso_count_descs(struct sk_buff *skb)
skb                12 net/core/tso.c 	return skb_shinfo(skb)->gso_segs * 2 + skb_shinfo(skb)->nr_frags;
skb                16 net/core/tso.c void tso_build_hdr(struct sk_buff *skb, char *hdr, struct tso_t *tso,
skb                20 net/core/tso.c 	int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
skb                21 net/core/tso.c 	int mac_hdr_len = skb_network_offset(skb);
skb                23 net/core/tso.c 	memcpy(hdr, skb->data, hdr_len);
skb                33 net/core/tso.c 		iph->payload_len = htons(size + tcp_hdrlen(skb));
skb                35 net/core/tso.c 	tcph = (struct tcphdr *)(hdr + skb_transport_offset(skb));
skb                47 net/core/tso.c void tso_build_data(struct sk_buff *skb, struct tso_t *tso, int size)
skb                54 net/core/tso.c 	    (tso->next_frag_idx < skb_shinfo(skb)->nr_frags)) {
skb                55 net/core/tso.c 		skb_frag_t *frag = &skb_shinfo(skb)->frags[tso->next_frag_idx];
skb                65 net/core/tso.c void tso_start(struct sk_buff *skb, struct tso_t *tso)
skb                67 net/core/tso.c 	int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
skb                69 net/core/tso.c 	tso->ip_id = ntohs(ip_hdr(skb)->id);
skb                70 net/core/tso.c 	tso->tcp_seq = ntohl(tcp_hdr(skb)->seq);
skb                72 net/core/tso.c 	tso->ipv6 = vlan_get_protocol(skb) == htons(ETH_P_IPV6);
skb                75 net/core/tso.c 	tso->size = skb_headlen(skb) - hdr_len;
skb                76 net/core/tso.c 	tso->data = skb->data + hdr_len;
skb                78 net/core/tso.c 	    (tso->next_frag_idx < skb_shinfo(skb)->nr_frags)) {
skb                79 net/core/tso.c 		skb_frag_t *frag = &skb_shinfo(skb)->frags[tso->next_frag_idx];
skb               425 net/core/utils.c void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb,
skb               428 net/core/utils.c 	if (skb->ip_summed != CHECKSUM_PARTIAL) {
skb               430 net/core/utils.c 		if (skb->ip_summed == CHECKSUM_COMPLETE && pseudohdr)
skb               431 net/core/utils.c 			skb->csum = ~csum_add(csum_sub(~(skb->csum),
skb               458 net/core/utils.c void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb,
skb               466 net/core/utils.c 	if (skb->ip_summed != CHECKSUM_PARTIAL) {
skb               475 net/core/utils.c void inet_proto_csum_replace_by_diff(__sum16 *sum, struct sk_buff *skb,
skb               478 net/core/utils.c 	if (skb->ip_summed != CHECKSUM_PARTIAL) {
skb               480 net/core/utils.c 		if (skb->ip_summed == CHECKSUM_COMPLETE && pseudohdr)
skb               481 net/core/utils.c 			skb->csum = ~csum_add(diff, ~skb->csum);
skb               185 net/dcb/dcbnl.c 	struct sk_buff *skb;
skb               189 net/dcb/dcbnl.c 	skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
skb               190 net/dcb/dcbnl.c 	if (!skb)
skb               193 net/dcb/dcbnl.c 	nlh = nlmsg_put(skb, port, seq, type, sizeof(*dcb), flags);
skb               204 net/dcb/dcbnl.c 	return skb;
skb               208 net/dcb/dcbnl.c 			  u32 seq, struct nlattr **tb, struct sk_buff *skb)
skb               214 net/dcb/dcbnl.c 	return nla_put_u8(skb, DCB_ATTR_STATE,
skb               219 net/dcb/dcbnl.c 			   u32 seq, struct nlattr **tb, struct sk_buff *skb)
skb               239 net/dcb/dcbnl.c 	nest = nla_nest_start_noflag(skb, DCB_ATTR_PFC_CFG);
skb               252 net/dcb/dcbnl.c 		ret = nla_put_u8(skb, i, value);
skb               254 net/dcb/dcbnl.c 			nla_nest_cancel(skb, nest);
skb               258 net/dcb/dcbnl.c 	nla_nest_end(skb, nest);
skb               264 net/dcb/dcbnl.c 				u32 seq, struct nlattr **tb, struct sk_buff *skb)
skb               274 net/dcb/dcbnl.c 	return nla_put(skb, DCB_ATTR_PERM_HWADDR, sizeof(perm_addr), perm_addr);
skb               278 net/dcb/dcbnl.c 			u32 seq, struct nlattr **tb, struct sk_buff *skb)
skb               298 net/dcb/dcbnl.c 	nest = nla_nest_start_noflag(skb, DCB_ATTR_CAP);
skb               310 net/dcb/dcbnl.c 			ret = nla_put_u8(skb, i, value);
skb               312 net/dcb/dcbnl.c 				nla_nest_cancel(skb, nest);
skb               317 net/dcb/dcbnl.c 	nla_nest_end(skb, nest);
skb               323 net/dcb/dcbnl.c 			   u32 seq, struct nlattr **tb, struct sk_buff *skb)
skb               343 net/dcb/dcbnl.c 	nest = nla_nest_start_noflag(skb, DCB_ATTR_NUMTCS);
skb               356 net/dcb/dcbnl.c 			ret = nla_put_u8(skb, i, value);
skb               358 net/dcb/dcbnl.c 				nla_nest_cancel(skb, nest);
skb               364 net/dcb/dcbnl.c 	nla_nest_end(skb, nest);
skb               370 net/dcb/dcbnl.c 			   u32 seq, struct nlattr **tb, struct sk_buff *skb)
skb               400 net/dcb/dcbnl.c 	return nla_put_u8(skb, DCB_ATTR_NUMTCS, !!ret);
skb               404 net/dcb/dcbnl.c 			     u32 seq, struct nlattr **tb, struct sk_buff *skb)
skb               409 net/dcb/dcbnl.c 	return nla_put_u8(skb, DCB_ATTR_PFC_STATE,
skb               414 net/dcb/dcbnl.c 			     u32 seq, struct nlattr **tb, struct sk_buff *skb)
skb               428 net/dcb/dcbnl.c 	return nla_put_u8(skb, DCB_ATTR_PFC_STATE, 0);
skb               432 net/dcb/dcbnl.c 			u32 seq, struct nlattr **tb, struct sk_buff *skb)
skb               476 net/dcb/dcbnl.c 	app_nest = nla_nest_start_noflag(skb, DCB_ATTR_APP);
skb               480 net/dcb/dcbnl.c 	ret = nla_put_u8(skb, DCB_APP_ATTR_IDTYPE, idtype);
skb               484 net/dcb/dcbnl.c 	ret = nla_put_u16(skb, DCB_APP_ATTR_ID, id);
skb               488 net/dcb/dcbnl.c 	ret = nla_put_u8(skb, DCB_APP_ATTR_PRIORITY, up);
skb               492 net/dcb/dcbnl.c 	nla_nest_end(skb, app_nest);
skb               497 net/dcb/dcbnl.c 	nla_nest_cancel(skb, app_nest);
skb               502 net/dcb/dcbnl.c 			u32 seq, struct nlattr **tb, struct sk_buff *skb)
skb               545 net/dcb/dcbnl.c 	ret = nla_put_u8(skb, DCB_ATTR_APP, ret);
skb               552 net/dcb/dcbnl.c 			     struct nlattr **tb, struct sk_buff *skb, int dir)
skb               577 net/dcb/dcbnl.c 	pg_nest = nla_nest_start_noflag(skb, DCB_ATTR_PG_CFG);
skb               598 net/dcb/dcbnl.c 		param_nest = nla_nest_start_noflag(skb, i);
skb               621 net/dcb/dcbnl.c 			ret = nla_put_u8(skb,
skb               628 net/dcb/dcbnl.c 			ret = nla_put_u8(skb,
skb               635 net/dcb/dcbnl.c 			ret = nla_put_u8(skb,
skb               642 net/dcb/dcbnl.c 			ret = nla_put_u8(skb, DCB_TC_ATTR_PARAM_BW_PCT,
skb               647 net/dcb/dcbnl.c 		nla_nest_end(skb, param_nest);
skb               670 net/dcb/dcbnl.c 		ret = nla_put_u8(skb, i, tc_pct);
skb               675 net/dcb/dcbnl.c 	nla_nest_end(skb, pg_nest);
skb               680 net/dcb/dcbnl.c 	nla_nest_cancel(skb, param_nest);
skb               682 net/dcb/dcbnl.c 	nla_nest_cancel(skb, pg_nest);
skb               688 net/dcb/dcbnl.c 			     u32 seq, struct nlattr **tb, struct sk_buff *skb)
skb               690 net/dcb/dcbnl.c 	return __dcbnl_pg_getcfg(netdev, nlh, tb, skb, 0);
skb               694 net/dcb/dcbnl.c 			     u32 seq, struct nlattr **tb, struct sk_buff *skb)
skb               696 net/dcb/dcbnl.c 	return __dcbnl_pg_getcfg(netdev, nlh, tb, skb, 1);
skb               700 net/dcb/dcbnl.c 			  u32 seq, struct nlattr **tb, struct sk_buff *skb)
skb               712 net/dcb/dcbnl.c 	return nla_put_u8(skb, DCB_ATTR_STATE,
skb               717 net/dcb/dcbnl.c 			   u32 seq, struct nlattr **tb, struct sk_buff *skb)
skb               744 net/dcb/dcbnl.c 	return nla_put_u8(skb, DCB_ATTR_PFC_CFG, 0);
skb               748 net/dcb/dcbnl.c 			u32 seq, struct nlattr **tb, struct sk_buff *skb)
skb               758 net/dcb/dcbnl.c 	ret = nla_put_u8(skb, DCB_ATTR_SET_ALL,
skb               766 net/dcb/dcbnl.c 			     u32 seq, struct nlattr **tb, struct sk_buff *skb,
skb               855 net/dcb/dcbnl.c 	return nla_put_u8(skb, DCB_ATTR_PG_CFG, 0);
skb               859 net/dcb/dcbnl.c 			     u32 seq, struct nlattr **tb, struct sk_buff *skb)
skb               861 net/dcb/dcbnl.c 	return __dcbnl_pg_setcfg(netdev, nlh, seq, tb, skb, 0);
skb               865 net/dcb/dcbnl.c 			     u32 seq, struct nlattr **tb, struct sk_buff *skb)
skb               867 net/dcb/dcbnl.c 	return __dcbnl_pg_setcfg(netdev, nlh, seq, tb, skb, 1);
skb               871 net/dcb/dcbnl.c 			    u32 seq, struct nlattr **tb, struct sk_buff *skb)
skb               894 net/dcb/dcbnl.c 	bcn_nest = nla_nest_start_noflag(skb, DCB_ATTR_BCN);
skb               907 net/dcb/dcbnl.c 		ret = nla_put_u8(skb, i, value_byte);
skb               918 net/dcb/dcbnl.c 		ret = nla_put_u32(skb, i, value_integer);
skb               923 net/dcb/dcbnl.c 	nla_nest_end(skb, bcn_nest);
skb               928 net/dcb/dcbnl.c 	nla_nest_cancel(skb, bcn_nest);
skb               933 net/dcb/dcbnl.c 			    u32 seq, struct nlattr **tb, struct sk_buff *skb)
skb               970 net/dcb/dcbnl.c 	return nla_put_u8(skb, DCB_ATTR_BCN, 0);
skb               973 net/dcb/dcbnl.c static int dcbnl_build_peer_app(struct net_device *netdev, struct sk_buff* skb,
skb              1008 net/dcb/dcbnl.c 		app = nla_nest_start_noflag(skb, app_nested_type);
skb              1013 net/dcb/dcbnl.c 		    nla_put(skb, app_info_type, sizeof(info), &info))
skb              1017 net/dcb/dcbnl.c 			if (nla_put(skb, app_entry_type, sizeof(struct dcb_app),
skb              1021 net/dcb/dcbnl.c 		nla_nest_end(skb, app);
skb              1031 net/dcb/dcbnl.c static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
skb              1039 net/dcb/dcbnl.c 	if (nla_put_string(skb, DCB_ATTR_IFNAME, netdev->name))
skb              1042 net/dcb/dcbnl.c 	ieee = nla_nest_start_noflag(skb, DCB_ATTR_IEEE);
skb              1051 net/dcb/dcbnl.c 		    nla_put(skb, DCB_ATTR_IEEE_ETS, sizeof(ets), &ets))
skb              1060 net/dcb/dcbnl.c 			err = nla_put(skb, DCB_ATTR_IEEE_MAXRATE,
skb              1073 net/dcb/dcbnl.c 			err = nla_put(skb, DCB_ATTR_IEEE_QCN,
skb              1086 net/dcb/dcbnl.c 			err = nla_put(skb, DCB_ATTR_IEEE_QCN_STATS,
skb              1098 net/dcb/dcbnl.c 		    nla_put(skb, DCB_ATTR_IEEE_PFC, sizeof(pfc), &pfc))
skb              1108 net/dcb/dcbnl.c 		    nla_put(skb, DCB_ATTR_DCB_BUFFER, sizeof(buffer), &buffer))
skb              1112 net/dcb/dcbnl.c 	app = nla_nest_start_noflag(skb, DCB_ATTR_IEEE_APP_TABLE);
skb              1119 net/dcb/dcbnl.c 			err = nla_put(skb, DCB_ATTR_IEEE_APP, sizeof(itr->app),
skb              1134 net/dcb/dcbnl.c 	nla_nest_end(skb, app);
skb              1142 net/dcb/dcbnl.c 		    nla_put(skb, DCB_ATTR_IEEE_PEER_ETS, sizeof(ets), &ets))
skb              1151 net/dcb/dcbnl.c 		    nla_put(skb, DCB_ATTR_IEEE_PEER_PFC, sizeof(pfc), &pfc))
skb              1156 net/dcb/dcbnl.c 		err = dcbnl_build_peer_app(netdev, skb,
skb              1164 net/dcb/dcbnl.c 	nla_nest_end(skb, ieee);
skb              1166 net/dcb/dcbnl.c 		err = nla_put_u8(skb, DCB_ATTR_DCBX, dcbx);
skb              1174 net/dcb/dcbnl.c static int dcbnl_cee_pg_fill(struct sk_buff *skb, struct net_device *dev,
skb              1180 net/dcb/dcbnl.c 	struct nlattr *pg = nla_nest_start_noflag(skb, i);
skb              1186 net/dcb/dcbnl.c 		struct nlattr *tc_nest = nla_nest_start_noflag(skb, i);
skb              1203 net/dcb/dcbnl.c 		if (nla_put_u8(skb, DCB_TC_ATTR_PARAM_PGID, pgid) ||
skb              1204 net/dcb/dcbnl.c 		    nla_put_u8(skb, DCB_TC_ATTR_PARAM_UP_MAPPING, up_map) ||
skb              1205 net/dcb/dcbnl.c 		    nla_put_u8(skb, DCB_TC_ATTR_PARAM_STRICT_PRIO, prio) ||
skb              1206 net/dcb/dcbnl.c 		    nla_put_u8(skb, DCB_TC_ATTR_PARAM_BW_PCT, tc_pct))
skb              1208 net/dcb/dcbnl.c 		nla_nest_end(skb, tc_nest);
skb              1220 net/dcb/dcbnl.c 		if (nla_put_u8(skb, i, tc_pct))
skb              1223 net/dcb/dcbnl.c 	nla_nest_end(skb, pg);
skb              1227 net/dcb/dcbnl.c static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev)
skb              1235 net/dcb/dcbnl.c 	if (nla_put_string(skb, DCB_ATTR_IFNAME, netdev->name))
skb              1237 net/dcb/dcbnl.c 	cee = nla_nest_start_noflag(skb, DCB_ATTR_CEE);
skb              1243 net/dcb/dcbnl.c 		err = dcbnl_cee_pg_fill(skb, netdev, 1);
skb              1249 net/dcb/dcbnl.c 		err = dcbnl_cee_pg_fill(skb, netdev, 0);
skb              1256 net/dcb/dcbnl.c 		struct nlattr *pfc_nest = nla_nest_start_noflag(skb,
skb              1264 net/dcb/dcbnl.c 			if (nla_put_u8(skb, i, value))
skb              1267 net/dcb/dcbnl.c 		nla_nest_end(skb, pfc_nest);
skb              1272 net/dcb/dcbnl.c 	app = nla_nest_start_noflag(skb, DCB_ATTR_CEE_APP_TABLE);
skb              1278 net/dcb/dcbnl.c 			struct nlattr *app_nest = nla_nest_start_noflag(skb,
skb              1283 net/dcb/dcbnl.c 			err = nla_put_u8(skb, DCB_APP_ATTR_IDTYPE,
skb              1288 net/dcb/dcbnl.c 			err = nla_put_u16(skb, DCB_APP_ATTR_ID,
skb              1293 net/dcb/dcbnl.c 			err = nla_put_u8(skb, DCB_APP_ATTR_PRIORITY,
skb              1298 net/dcb/dcbnl.c 			nla_nest_end(skb, app_nest);
skb              1301 net/dcb/dcbnl.c 	nla_nest_end(skb, app);
skb              1312 net/dcb/dcbnl.c 		struct nlattr *feat = nla_nest_start_noflag(skb,
skb              1320 net/dcb/dcbnl.c 			    nla_put_u8(skb, i, value))
skb              1323 net/dcb/dcbnl.c 		nla_nest_end(skb, feat);
skb              1332 net/dcb/dcbnl.c 		    nla_put(skb, DCB_ATTR_CEE_PEER_PG, sizeof(pg), &pg))
skb              1341 net/dcb/dcbnl.c 		    nla_put(skb, DCB_ATTR_CEE_PEER_PFC, sizeof(pfc), &pfc))
skb              1346 net/dcb/dcbnl.c 		err = dcbnl_build_peer_app(netdev, skb,
skb              1353 net/dcb/dcbnl.c 	nla_nest_end(skb, cee);
skb              1357 net/dcb/dcbnl.c 		err = nla_put_u8(skb, DCB_ATTR_DCBX, dcbx);
skb              1374 net/dcb/dcbnl.c 	struct sk_buff *skb;
skb              1382 net/dcb/dcbnl.c 	skb = dcbnl_newmsg(event, cmd, portid, seq, 0, &nlh);
skb              1383 net/dcb/dcbnl.c 	if (!skb)
skb              1387 net/dcb/dcbnl.c 		err = dcbnl_ieee_fill(skb, dev);
skb              1389 net/dcb/dcbnl.c 		err = dcbnl_cee_fill(skb, dev);
skb              1393 net/dcb/dcbnl.c 		nlmsg_free(skb);
skb              1397 net/dcb/dcbnl.c 		nlmsg_end(skb, nlh);
skb              1398 net/dcb/dcbnl.c 		rtnl_notify(skb, net, 0, RTNLGRP_DCB, NULL, GFP_KERNEL);
skb              1425 net/dcb/dcbnl.c 			  u32 seq, struct nlattr **tb, struct sk_buff *skb)
skb              1509 net/dcb/dcbnl.c 	err = nla_put_u8(skb, DCB_ATTR_IEEE, err);
skb              1515 net/dcb/dcbnl.c 			  u32 seq, struct nlattr **tb, struct sk_buff *skb)
skb              1522 net/dcb/dcbnl.c 	return dcbnl_ieee_fill(skb, netdev);
skb              1526 net/dcb/dcbnl.c 			  u32 seq, struct nlattr **tb, struct sk_buff *skb)
skb              1564 net/dcb/dcbnl.c 	err = nla_put_u8(skb, DCB_ATTR_IEEE, err);
skb              1572 net/dcb/dcbnl.c 			 u32 seq, struct nlattr **tb, struct sk_buff *skb)
skb              1577 net/dcb/dcbnl.c 	return nla_put_u8(skb, DCB_ATTR_DCBX,
skb              1582 net/dcb/dcbnl.c 			 u32 seq, struct nlattr **tb, struct sk_buff *skb)
skb              1594 net/dcb/dcbnl.c 	return nla_put_u8(skb, DCB_ATTR_DCBX,
skb              1599 net/dcb/dcbnl.c 			    u32 seq, struct nlattr **tb, struct sk_buff *skb)
skb              1618 net/dcb/dcbnl.c 	nest = nla_nest_start_noflag(skb, DCB_ATTR_FEATCFG);
skb              1631 net/dcb/dcbnl.c 			ret = nla_put_u8(skb, i, value);
skb              1634 net/dcb/dcbnl.c 			nla_nest_cancel(skb, nest);
skb              1638 net/dcb/dcbnl.c 	nla_nest_end(skb, nest);
skb              1645 net/dcb/dcbnl.c 			    u32 seq, struct nlattr **tb, struct sk_buff *skb)
skb              1676 net/dcb/dcbnl.c 	ret = nla_put_u8(skb, DCB_ATTR_FEATCFG, ret);
skb              1683 net/dcb/dcbnl.c 			 u32 seq, struct nlattr **tb, struct sk_buff *skb)
skb              1690 net/dcb/dcbnl.c 	return dcbnl_cee_fill(skb, netdev);
skb              1732 net/dcb/dcbnl.c static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
skb              1735 net/dcb/dcbnl.c 	struct net *net = sock_net(skb->sk);
skb              1739 net/dcb/dcbnl.c 	u32 portid = skb ? NETLINK_CB(skb).portid : 0;
skb              1745 net/dcb/dcbnl.c 	if ((nlh->nlmsg_type == RTM_SETDCB) && !netlink_capable(skb, CAP_NET_ADMIN))
skb               246 net/dccp/ackvec.c void dccp_ackvec_input(struct dccp_ackvec *av, struct sk_buff *skb)
skb               248 net/dccp/ackvec.c 	u64 seqno = DCCP_SKB_CB(skb)->dccpd_seq;
skb               108 net/dccp/ackvec.h void dccp_ackvec_input(struct dccp_ackvec *av, struct sk_buff *skb);
skb                61 net/dccp/ccid.h 						  struct sk_buff *skb);
skb                65 net/dccp/ccid.h 						     struct sk_buff *skb);
skb                67 net/dccp/ccid.h 						  struct sk_buff *skb);
skb                71 net/dccp/ccid.h 						  struct sk_buff *skb);
skb               164 net/dccp/ccid.h 					 struct sk_buff *skb)
skb               167 net/dccp/ccid.h 		return ccid->ccid_ops->ccid_hc_tx_send_packet(sk, skb);
skb               179 net/dccp/ccid.h 					  struct sk_buff *skb)
skb               182 net/dccp/ccid.h 		ccid->ccid_ops->ccid_hc_rx_packet_recv(sk, skb);
skb               186 net/dccp/ccid.h 					  struct sk_buff *skb)
skb               189 net/dccp/ccid.h 		ccid->ccid_ops->ccid_hc_tx_packet_recv(sk, skb);
skb               220 net/dccp/ccid.h 					    struct sk_buff *skb)
skb               223 net/dccp/ccid.h 		return ccid->ccid_ops->ccid_hc_rx_insert_options(sk, skb);
skb                67 net/dccp/ccids/ccid2.c static int ccid2_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb)
skb               506 net/dccp/ccids/ccid2.c static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
skb               518 net/dccp/ccids/ccid2.c 	seqno = DCCP_SKB_CB(skb)->dccpd_seq;
skb               555 net/dccp/ccids/ccid2.c 	if (dccp_packet_without_ack(skb))
skb               562 net/dccp/ccids/ccid2.c 	ackno = DCCP_SKB_CB(skb)->dccpd_ack_seq;
skb               758 net/dccp/ccids/ccid2.c static void ccid2_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
skb               762 net/dccp/ccids/ccid2.c 	if (!dccp_data_packet(skb))
skb               267 net/dccp/ccids/ccid3.c static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb)
skb               279 net/dccp/ccids/ccid3.c 	if (unlikely(skb->len == 0))
skb               291 net/dccp/ccids/ccid3.c 		hc->tx_s = skb->len;
skb               337 net/dccp/ccids/ccid3.c 	DCCP_SKB_CB(skb)->dccpd_ccval  = hc->tx_last_win_count;
skb               354 net/dccp/ccids/ccid3.c static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
skb               363 net/dccp/ccids/ccid3.c 	if (!(DCCP_SKB_CB(skb)->dccpd_type == DCCP_PKT_ACK ||
skb               364 net/dccp/ccids/ccid3.c 	      DCCP_SKB_CB(skb)->dccpd_type == DCCP_PKT_DATAACK))
skb               374 net/dccp/ccids/ccid3.c 	acked = tfrc_tx_hist_find_entry(hc->tx_hist, dccp_hdr_ack_seq(skb));
skb               585 net/dccp/ccids/ccid3.c 				      const struct sk_buff *skb,
skb               626 net/dccp/ccids/ccid3.c 	hc->rx_last_counter	    = dccp_hdr(skb)->dccph_ccval;
skb               633 net/dccp/ccids/ccid3.c static int ccid3_hc_rx_insert_options(struct sock *sk, struct sk_buff *skb)
skb               641 net/dccp/ccids/ccid3.c 	if (dccp_packet_without_ack(skb))
skb               647 net/dccp/ccids/ccid3.c 	if (dccp_insert_option(skb, TFRC_OPT_LOSS_EVENT_RATE,
skb               649 net/dccp/ccids/ccid3.c 	    dccp_insert_option(skb, TFRC_OPT_RECEIVE_RATE,
skb               701 net/dccp/ccids/ccid3.c static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
skb               706 net/dccp/ccids/ccid3.c 	const bool is_data_packet = dccp_data_packet(skb);
skb               710 net/dccp/ccids/ccid3.c 			const u32 payload = skb->len - dccp_hdr(skb)->dccph_doff * 4;
skb               723 net/dccp/ccids/ccid3.c 	if (tfrc_rx_hist_duplicate(&hc->rx_hist, skb))
skb               727 net/dccp/ccids/ccid3.c 		const u32 payload = skb->len - dccp_hdr(skb)->dccph_doff * 4;
skb               739 net/dccp/ccids/ccid3.c 				skb, ndp, ccid3_first_li, sk)) {
skb               754 net/dccp/ccids/ccid3.c 		const u32 sample = tfrc_rx_hist_sample_rtt(&hc->rx_hist, skb);
skb               763 net/dccp/ccids/ccid3.c 	} else if (tfrc_lh_update_i_mean(&hc->rx_li_hist, skb)) {
skb               774 net/dccp/ccids/ccid3.c 	if (SUB16(dccp_hdr(skb)->dccph_ccval, hc->rx_last_counter) > 3)
skb               778 net/dccp/ccids/ccid3.c 	tfrc_rx_hist_add_packet(&hc->rx_hist, skb, ndp);
skb               782 net/dccp/ccids/ccid3.c 		ccid3_hc_rx_send_feedback(sk, skb, do_feedback);
skb                84 net/dccp/ccids/lib/loss_interval.c u8 tfrc_lh_update_i_mean(struct tfrc_loss_hist *lh, struct sk_buff *skb)
skb                93 net/dccp/ccids/lib/loss_interval.c 	len = dccp_delta_seqno(cur->li_seqno, DCCP_SKB_CB(skb)->dccpd_seq) + 1;
skb                98 net/dccp/ccids/lib/loss_interval.c 	if (SUB16(dccp_hdr(skb)->dccph_ccval, cur->li_ccval) > 4)
skb                98 net/dccp/ccids/lib/packet_history.c 					       const struct sk_buff *skb,
skb               101 net/dccp/ccids/lib/packet_history.c 	const struct dccp_hdr *dh = dccp_hdr(skb);
skb               103 net/dccp/ccids/lib/packet_history.c 	entry->tfrchrx_seqno = DCCP_SKB_CB(skb)->dccpd_seq;
skb               111 net/dccp/ccids/lib/packet_history.c 			     const struct sk_buff *skb,
skb               116 net/dccp/ccids/lib/packet_history.c 	tfrc_rx_hist_entry_from_skb(entry, skb, ndp);
skb               120 net/dccp/ccids/lib/packet_history.c int tfrc_rx_hist_duplicate(struct tfrc_rx_hist *h, struct sk_buff *skb)
skb               122 net/dccp/ccids/lib/packet_history.c 	const u64 seq = DCCP_SKB_CB(skb)->dccpd_seq;
skb               152 net/dccp/ccids/lib/packet_history.c static void __do_track_loss(struct tfrc_rx_hist *h, struct sk_buff *skb, u64 n1)
skb               155 net/dccp/ccids/lib/packet_history.c 	    s1 = DCCP_SKB_CB(skb)->dccpd_seq;
skb               159 net/dccp/ccids/lib/packet_history.c 		tfrc_rx_hist_entry_from_skb(tfrc_rx_hist_entry(h, 1), skb, n1);
skb               163 net/dccp/ccids/lib/packet_history.c static void __one_after_loss(struct tfrc_rx_hist *h, struct sk_buff *skb, u32 n2)
skb               167 net/dccp/ccids/lib/packet_history.c 	    s2 = DCCP_SKB_CB(skb)->dccpd_seq;
skb               171 net/dccp/ccids/lib/packet_history.c 		tfrc_rx_hist_entry_from_skb(tfrc_rx_hist_entry(h, 2), skb, n2);
skb               186 net/dccp/ccids/lib/packet_history.c 			tfrc_rx_hist_entry_from_skb(tfrc_rx_hist_loss_prev(h), skb, n2);
skb               194 net/dccp/ccids/lib/packet_history.c 		tfrc_rx_hist_entry_from_skb(tfrc_rx_hist_entry(h, 1), skb, n2);
skb               200 net/dccp/ccids/lib/packet_history.c static int __two_after_loss(struct tfrc_rx_hist *h, struct sk_buff *skb, u32 n3)
skb               205 net/dccp/ccids/lib/packet_history.c 	    s3 = DCCP_SKB_CB(skb)->dccpd_seq;
skb               209 net/dccp/ccids/lib/packet_history.c 		tfrc_rx_hist_entry_from_skb(tfrc_rx_hist_entry(h, 3), skb, n3);
skb               220 net/dccp/ccids/lib/packet_history.c 		tfrc_rx_hist_entry_from_skb(tfrc_rx_hist_entry(h, 2), skb, n3);
skb               245 net/dccp/ccids/lib/packet_history.c 			tfrc_rx_hist_entry_from_skb(tfrc_rx_hist_loss_prev(h), skb, n3);
skb               256 net/dccp/ccids/lib/packet_history.c 	tfrc_rx_hist_entry_from_skb(tfrc_rx_hist_entry(h, 1), skb, n3);
skb               313 net/dccp/ccids/lib/packet_history.c 			struct sk_buff *skb, const u64 ndp,
skb               319 net/dccp/ccids/lib/packet_history.c 		__do_track_loss(h, skb, ndp);
skb               321 net/dccp/ccids/lib/packet_history.c 		__one_after_loss(h, skb, ndp);
skb               324 net/dccp/ccids/lib/packet_history.c 	} else if (__two_after_loss(h, skb, ndp)) {
skb               389 net/dccp/ccids/lib/packet_history.c u32 tfrc_rx_hist_sample_rtt(struct tfrc_rx_hist *h, const struct sk_buff *skb)
skb               392 net/dccp/ccids/lib/packet_history.c 	    delta_v = SUB16(dccp_hdr(skb)->dccph_ccval,
skb               129 net/dccp/ccids/lib/packet_history.h void tfrc_rx_hist_add_packet(struct tfrc_rx_hist *h, const struct sk_buff *skb,
skb               132 net/dccp/ccids/lib/packet_history.h int tfrc_rx_hist_duplicate(struct tfrc_rx_hist *h, struct sk_buff *skb);
skb               136 net/dccp/ccids/lib/packet_history.h 			struct sk_buff *skb, const u64 ndp,
skb               138 net/dccp/ccids/lib/packet_history.h u32 tfrc_rx_hist_sample_rtt(struct tfrc_rx_hist *h, const struct sk_buff *skb);
skb               205 net/dccp/dccp.h static inline unsigned int dccp_csum_coverage(const struct sk_buff *skb)
skb               207 net/dccp/dccp.h 	const struct dccp_hdr* dh = dccp_hdr(skb);
skb               210 net/dccp/dccp.h 		return skb->len;
skb               214 net/dccp/dccp.h static inline void dccp_csum_outgoing(struct sk_buff *skb)
skb               216 net/dccp/dccp.h 	unsigned int cov = dccp_csum_coverage(skb);
skb               218 net/dccp/dccp.h 	if (cov >= skb->len)
skb               219 net/dccp/dccp.h 		dccp_hdr(skb)->dccph_cscov = 0;
skb               221 net/dccp/dccp.h 	skb->csum = skb_checksum(skb, 0, (cov > skb->len)? skb->len : cov, 0);
skb               224 net/dccp/dccp.h void dccp_v4_send_check(struct sock *sk, struct sk_buff *skb);
skb               229 net/dccp/dccp.h void dccp_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
skb               238 net/dccp/dccp.h void dccp_qpolicy_push(struct sock *sk, struct sk_buff *skb);
skb               240 net/dccp/dccp.h void dccp_qpolicy_drop(struct sock *sk, struct sk_buff *skb);
skb               266 net/dccp/dccp.h 		    struct sk_buff const *skb);
skb               268 net/dccp/dccp.h int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
skb               272 net/dccp/dccp.h 				       const struct sk_buff *skb);
skb               274 net/dccp/dccp.h int dccp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
skb               276 net/dccp/dccp.h struct sock *dccp_v4_request_recv_sock(const struct sock *sk, struct sk_buff *skb,
skb               281 net/dccp/dccp.h struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb,
skb               285 net/dccp/dccp.h 		       struct sk_buff *skb);
skb               286 net/dccp/dccp.h int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
skb               288 net/dccp/dccp.h int dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
skb               321 net/dccp/dccp.h struct sk_buff *dccp_ctl_make_reset(struct sock *sk, struct sk_buff *skb);
skb               324 net/dccp/dccp.h int dccp_invalid_packet(struct sk_buff *skb);
skb               368 net/dccp/dccp.h static inline int dccp_non_data_packet(const struct sk_buff *skb)
skb               370 net/dccp/dccp.h 	const __u8 type = DCCP_SKB_CB(skb)->dccpd_type;
skb               381 net/dccp/dccp.h static inline int dccp_data_packet(const struct sk_buff *skb)
skb               383 net/dccp/dccp.h 	const __u8 type = DCCP_SKB_CB(skb)->dccpd_type;
skb               391 net/dccp/dccp.h static inline int dccp_packet_without_ack(const struct sk_buff *skb)
skb               393 net/dccp/dccp.h 	const __u8 type = DCCP_SKB_CB(skb)->dccpd_type;
skb               473 net/dccp/dccp.h 			  struct sk_buff *skb);
skb               477 net/dccp/dccp.h int dccp_insert_options(struct sock *sk, struct sk_buff *skb);
skb               481 net/dccp/dccp.h int dccp_insert_option(struct sk_buff *skb, unsigned char option,
skb                48 net/dccp/diag.c static void dccp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
skb                51 net/dccp/diag.c 	inet_diag_dump_icsk(&dccp_hashinfo, skb, cb, r, bc);
skb               630 net/dccp/feat.c 			  struct sk_buff *skb)
skb               662 net/dccp/feat.c 		if (dccp_insert_fn_opt(skb, opt, pos->feat_num, ptr, len, rpt))
skb               664 net/dccp/feat.c 		if (pos->needs_mandatory && dccp_insert_option_mandatory(skb))
skb               667 net/dccp/feat.c 		if (skb->sk->sk_state == DCCP_OPEN &&
skb               131 net/dccp/feat.h int dccp_insert_option_mandatory(struct sk_buff *skb);
skb               132 net/dccp/feat.h int dccp_insert_fn_opt(struct sk_buff *skb, u8 type, u8 feat, u8 *val, u8 len,
skb                22 net/dccp/input.c static void dccp_enqueue_skb(struct sock *sk, struct sk_buff *skb)
skb                24 net/dccp/input.c 	__skb_pull(skb, dccp_hdr(skb)->dccph_doff * 4);
skb                25 net/dccp/input.c 	__skb_queue_tail(&sk->sk_receive_queue, skb);
skb                26 net/dccp/input.c 	skb_set_owner_r(skb, sk);
skb                30 net/dccp/input.c static void dccp_fin(struct sock *sk, struct sk_buff *skb)
skb                40 net/dccp/input.c 	dccp_enqueue_skb(sk, skb);
skb                43 net/dccp/input.c static int dccp_rcv_close(struct sock *sk, struct sk_buff *skb)
skb                77 net/dccp/input.c 		dccp_fin(sk, skb);
skb                89 net/dccp/input.c static int dccp_rcv_closereq(struct sock *sk, struct sk_buff *skb)
skb               100 net/dccp/input.c 		dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq, DCCP_PKT_SYNC);
skb               114 net/dccp/input.c 		dccp_fin(sk, skb);
skb               145 net/dccp/input.c static void dccp_rcv_reset(struct sock *sk, struct sk_buff *skb)
skb               147 net/dccp/input.c 	u16 err = dccp_reset_code_convert(dccp_hdr_reset(skb)->dccph_reset_code);
skb               152 net/dccp/input.c 	dccp_fin(sk, skb);
skb               159 net/dccp/input.c static void dccp_handle_ackvec_processing(struct sock *sk, struct sk_buff *skb)
skb               165 net/dccp/input.c 	if (DCCP_SKB_CB(skb)->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ)
skb               166 net/dccp/input.c 		dccp_ackvec_clear_state(av, DCCP_SKB_CB(skb)->dccpd_ack_seq);
skb               167 net/dccp/input.c 	dccp_ackvec_input(av, skb);
skb               170 net/dccp/input.c static void dccp_deliver_input_to_ccids(struct sock *sk, struct sk_buff *skb)
skb               176 net/dccp/input.c 		ccid_hc_rx_packet_recv(dp->dccps_hc_rx_ccid, sk, skb);
skb               182 net/dccp/input.c 		ccid_hc_tx_packet_recv(dp->dccps_hc_tx_ccid, sk, skb);
skb               185 net/dccp/input.c static int dccp_check_seqno(struct sock *sk, struct sk_buff *skb)
skb               187 net/dccp/input.c 	const struct dccp_hdr *dh = dccp_hdr(skb);
skb               189 net/dccp/input.c 	u64 lswl, lawl, seqno = DCCP_SKB_CB(skb)->dccpd_seq,
skb               190 net/dccp/input.c 			ackno = DCCP_SKB_CB(skb)->dccpd_ack_seq;
skb               283 net/dccp/input.c static int __dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
skb               288 net/dccp/input.c 	switch (dccp_hdr(skb)->dccph_type) {
skb               296 net/dccp/input.c 		dccp_enqueue_skb(sk, skb);
skb               309 net/dccp/input.c 		dccp_rcv_reset(sk, skb);
skb               312 net/dccp/input.c 		if (dccp_rcv_closereq(sk, skb))
skb               316 net/dccp/input.c 		if (dccp_rcv_close(sk, skb))
skb               339 net/dccp/input.c 				     DCCP_SKB_CB(skb)->dccpd_seq) >= 0) {
skb               341 net/dccp/input.c 			dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq,
skb               346 net/dccp/input.c 		dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq,
skb               360 net/dccp/input.c 	__kfree_skb(skb);
skb               364 net/dccp/input.c int dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
skb               367 net/dccp/input.c 	if (dccp_check_seqno(sk, skb))
skb               370 net/dccp/input.c 	if (dccp_parse_options(sk, NULL, skb))
skb               373 net/dccp/input.c 	dccp_handle_ackvec_processing(sk, skb);
skb               374 net/dccp/input.c 	dccp_deliver_input_to_ccids(sk, skb);
skb               376 net/dccp/input.c 	return __dccp_rcv_established(sk, skb, dh, len);
skb               378 net/dccp/input.c 	__kfree_skb(skb);
skb               385 net/dccp/input.c 					       struct sk_buff *skb,
skb               405 net/dccp/input.c 		if (!between48(DCCP_SKB_CB(skb)->dccpd_ack_seq,
skb               410 net/dccp/input.c 			   (unsigned long long)DCCP_SKB_CB(skb)->dccpd_ack_seq,
skb               420 net/dccp/input.c 		if (dccp_parse_options(sk, NULL, skb))
skb               441 net/dccp/input.c 		dp->dccps_gsr = dp->dccps_isr = DCCP_SKB_CB(skb)->dccpd_seq;
skb               494 net/dccp/input.c 			__kfree_skb(skb);
skb               503 net/dccp/input.c 	DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_PACKET_ERROR;
skb               507 net/dccp/input.c 	DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_ABORTED;
skb               518 net/dccp/input.c 						   struct sk_buff *skb,
skb               556 net/dccp/input.c 		dp->dccps_osr = DCCP_SKB_CB(skb)->dccpd_seq;
skb               561 net/dccp/input.c 			__dccp_rcv_established(sk, skb, dh, len);
skb               571 net/dccp/input.c int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
skb               575 net/dccp/input.c 	struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
skb               609 net/dccp/input.c 			acceptable = inet_csk(sk)->icsk_af_ops->conn_request(sk, skb) >= 0;
skb               614 net/dccp/input.c 			consume_skb(skb);
skb               629 net/dccp/input.c 	if (sk->sk_state != DCCP_REQUESTING && dccp_check_seqno(sk, skb))
skb               650 net/dccp/input.c 	if (dccp_parse_options(sk, NULL, skb))
skb               662 net/dccp/input.c 		dccp_rcv_reset(sk, skb);
skb               665 net/dccp/input.c 		if (dccp_rcv_closereq(sk, skb))
skb               669 net/dccp/input.c 		if (dccp_rcv_close(sk, skb))
skb               676 net/dccp/input.c 		queued = dccp_rcv_request_sent_state_process(sk, skb, dh, len);
skb               680 net/dccp/input.c 		__kfree_skb(skb);
skb               685 net/dccp/input.c 		dccp_handle_ackvec_processing(sk, skb);
skb               686 net/dccp/input.c 		dccp_deliver_input_to_ccids(sk, skb);
skb               689 net/dccp/input.c 		queued = dccp_rcv_respond_partopen_state_process(sk, skb,
skb               709 net/dccp/input.c 		__kfree_skb(skb);
skb               186 net/dccp/ipv4.c static void dccp_do_redirect(struct sk_buff *skb, struct sock *sk)
skb               191 net/dccp/ipv4.c 		dst->ops->redirect(dst, sk, skb);
skb               230 net/dccp/ipv4.c static int dccp_v4_err(struct sk_buff *skb, u32 info)
skb               232 net/dccp/ipv4.c 	const struct iphdr *iph = (struct iphdr *)skb->data;
skb               237 net/dccp/ipv4.c 	const int type = icmp_hdr(skb)->type;
skb               238 net/dccp/ipv4.c 	const int code = icmp_hdr(skb)->code;
skb               242 net/dccp/ipv4.c 	struct net *net = dev_net(skb->dev);
skb               250 net/dccp/ipv4.c 	dh = (struct dccp_hdr *)(skb->data + offset);
skb               255 net/dccp/ipv4.c 				       inet_iif(skb), 0);
skb               291 net/dccp/ipv4.c 			dccp_do_redirect(skb, sk);
skb               361 net/dccp/ipv4.c static inline __sum16 dccp_v4_csum_finish(struct sk_buff *skb,
skb               364 net/dccp/ipv4.c 	return csum_tcpudp_magic(src, dst, skb->len, IPPROTO_DCCP, skb->csum);
skb               367 net/dccp/ipv4.c void dccp_v4_send_check(struct sock *sk, struct sk_buff *skb)
skb               370 net/dccp/ipv4.c 	struct dccp_hdr *dh = dccp_hdr(skb);
skb               372 net/dccp/ipv4.c 	dccp_csum_outgoing(skb);
skb               373 net/dccp/ipv4.c 	dh->dccph_checksum = dccp_v4_csum_finish(skb,
skb               379 net/dccp/ipv4.c static inline u64 dccp_v4_init_sequence(const struct sk_buff *skb)
skb               381 net/dccp/ipv4.c 	return secure_dccp_sequence_number(ip_hdr(skb)->daddr,
skb               382 net/dccp/ipv4.c 					   ip_hdr(skb)->saddr,
skb               383 net/dccp/ipv4.c 					   dccp_hdr(skb)->dccph_dport,
skb               384 net/dccp/ipv4.c 					   dccp_hdr(skb)->dccph_sport);
skb               394 net/dccp/ipv4.c 				       struct sk_buff *skb,
skb               407 net/dccp/ipv4.c 	newsk = dccp_create_openreq_child(sk, req, skb);
skb               417 net/dccp/ipv4.c 	newinet->mc_index  = inet_iif(skb);
skb               418 net/dccp/ipv4.c 	newinet->mc_ttl	   = ip_hdr(skb)->ttl;
skb               453 net/dccp/ipv4.c 					   struct sk_buff *skb)
skb               456 net/dccp/ipv4.c 	const struct iphdr *iph = ip_hdr(skb);
skb               458 net/dccp/ipv4.c 		.flowi4_oif = inet_iif(skb),
skb               463 net/dccp/ipv4.c 		.fl4_sport = dccp_hdr(skb)->dccph_dport,
skb               464 net/dccp/ipv4.c 		.fl4_dport = dccp_hdr(skb)->dccph_sport,
skb               467 net/dccp/ipv4.c 	security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
skb               480 net/dccp/ipv4.c 	struct sk_buff *skb;
skb               488 net/dccp/ipv4.c 	skb = dccp_make_response(sk, dst, req);
skb               489 net/dccp/ipv4.c 	if (skb != NULL) {
skb               491 net/dccp/ipv4.c 		struct dccp_hdr *dh = dccp_hdr(skb);
skb               493 net/dccp/ipv4.c 		dh->dccph_checksum = dccp_v4_csum_finish(skb, ireq->ir_loc_addr,
skb               496 net/dccp/ipv4.c 		err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
skb               512 net/dccp/ipv4.c 	struct sk_buff *skb;
skb               528 net/dccp/ipv4.c 	skb = dccp_ctl_make_reset(ctl_sk, rxskb);
skb               529 net/dccp/ipv4.c 	if (skb == NULL)
skb               533 net/dccp/ipv4.c 	dccp_hdr(skb)->dccph_checksum = dccp_v4_csum_finish(skb, rxiph->saddr,
skb               535 net/dccp/ipv4.c 	skb_dst_set(skb, dst_clone(dst));
skb               539 net/dccp/ipv4.c 	err = ip_build_and_send_pkt(skb, ctl_sk,
skb               573 net/dccp/ipv4.c int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
skb               578 net/dccp/ipv4.c 	const __be32 service = dccp_hdr_request(skb)->dccph_req_service;
skb               579 net/dccp/ipv4.c 	struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
skb               582 net/dccp/ipv4.c 	if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
skb               605 net/dccp/ipv4.c 	if (dccp_reqsk_init(req, dccp_sk(sk), skb))
skb               609 net/dccp/ipv4.c 	if (dccp_parse_options(sk, dreq, skb))
skb               612 net/dccp/ipv4.c 	if (security_inet_conn_request(sk, skb, req))
skb               616 net/dccp/ipv4.c 	sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
skb               617 net/dccp/ipv4.c 	sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
skb               618 net/dccp/ipv4.c 	ireq->ir_mark = inet_request_mark(sk, skb);
skb               631 net/dccp/ipv4.c 	dreq->dreq_iss	   = dccp_v4_init_sequence(skb);
skb               650 net/dccp/ipv4.c int dccp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
skb               652 net/dccp/ipv4.c 	struct dccp_hdr *dh = dccp_hdr(skb);
skb               655 net/dccp/ipv4.c 		if (dccp_rcv_established(sk, skb, dh, skb->len))
skb               684 net/dccp/ipv4.c 	if (dccp_rcv_state_process(sk, skb, dh, skb->len))
skb               689 net/dccp/ipv4.c 	dccp_v4_ctl_send_reset(sk, skb);
skb               690 net/dccp/ipv4.c 	kfree_skb(skb);
skb               700 net/dccp/ipv4.c int dccp_invalid_packet(struct sk_buff *skb)
skb               706 net/dccp/ipv4.c 	if (skb->pkt_type != PACKET_HOST)
skb               710 net/dccp/ipv4.c 	if (!pskb_may_pull(skb, sizeof(struct dccp_hdr))) {
skb               715 net/dccp/ipv4.c 	dh = dccp_hdr(skb);
skb               727 net/dccp/ipv4.c 	if (dccph_doff < dccp_hdr_len(skb) / sizeof(u32)) {
skb               734 net/dccp/ipv4.c 	if (!pskb_may_pull(skb, dccph_doff * sizeof(u32))) {
skb               738 net/dccp/ipv4.c 	dh = dccp_hdr(skb);
skb               754 net/dccp/ipv4.c 	cscov = dccp_csum_coverage(skb);
skb               755 net/dccp/ipv4.c 	if (cscov > skb->len) {
skb               757 net/dccp/ipv4.c 			  dh->dccph_cscov, skb->len);
skb               763 net/dccp/ipv4.c 	skb->csum = skb_checksum(skb, 0, cscov, 0);
skb               770 net/dccp/ipv4.c static int dccp_v4_rcv(struct sk_buff *skb)
skb               780 net/dccp/ipv4.c 	if (dccp_invalid_packet(skb))
skb               783 net/dccp/ipv4.c 	iph = ip_hdr(skb);
skb               785 net/dccp/ipv4.c 	if (dccp_v4_csum_finish(skb, iph->saddr, iph->daddr)) {
skb               790 net/dccp/ipv4.c 	dh = dccp_hdr(skb);
skb               792 net/dccp/ipv4.c 	DCCP_SKB_CB(skb)->dccpd_seq  = dccp_hdr_seq(dh);
skb               793 net/dccp/ipv4.c 	DCCP_SKB_CB(skb)->dccpd_type = dh->dccph_type;
skb               799 net/dccp/ipv4.c 		      (unsigned long long) DCCP_SKB_CB(skb)->dccpd_seq);
skb               801 net/dccp/ipv4.c 	if (dccp_packet_without_ack(skb)) {
skb               802 net/dccp/ipv4.c 		DCCP_SKB_CB(skb)->dccpd_ack_seq = DCCP_PKT_WITHOUT_ACK_SEQ;
skb               805 net/dccp/ipv4.c 		DCCP_SKB_CB(skb)->dccpd_ack_seq = dccp_hdr_ack_seq(skb);
skb               807 net/dccp/ipv4.c 				  DCCP_SKB_CB(skb)->dccpd_ack_seq);
skb               811 net/dccp/ipv4.c 	sk = __inet_lookup_skb(&dccp_hashinfo, skb, __dccp_hdr_len(dh),
skb               842 net/dccp/ipv4.c 		nsk = dccp_check_req(sk, skb, req);
skb               849 net/dccp/ipv4.c 		} else if (dccp_child_process(sk, nsk, skb)) {
skb               850 net/dccp/ipv4.c 			dccp_v4_ctl_send_reset(sk, skb);
skb               872 net/dccp/ipv4.c 	if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
skb               874 net/dccp/ipv4.c 	nf_reset_ct(skb);
skb               876 net/dccp/ipv4.c 	return __sk_receive_skb(sk, skb, 1, dh->dccph_doff * 4, refcounted);
skb               879 net/dccp/ipv4.c 	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
skb               888 net/dccp/ipv4.c 		DCCP_SKB_CB(skb)->dccpd_reset_code =
skb               890 net/dccp/ipv4.c 		dccp_v4_ctl_send_reset(sk, skb);
skb               894 net/dccp/ipv4.c 	kfree_skb(skb);
skb                42 net/dccp/ipv6.c static inline __sum16 dccp_v6_csum_finish(struct sk_buff *skb,
skb                46 net/dccp/ipv6.c 	return csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_DCCP, skb->csum);
skb                49 net/dccp/ipv6.c static inline void dccp_v6_send_check(struct sock *sk, struct sk_buff *skb)
skb                52 net/dccp/ipv6.c 	struct dccp_hdr *dh = dccp_hdr(skb);
skb                54 net/dccp/ipv6.c 	dccp_csum_outgoing(skb);
skb                55 net/dccp/ipv6.c 	dh->dccph_checksum = dccp_v6_csum_finish(skb, &np->saddr, &sk->sk_v6_daddr);
skb                58 net/dccp/ipv6.c static inline __u64 dccp_v6_init_sequence(struct sk_buff *skb)
skb                60 net/dccp/ipv6.c 	return secure_dccpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
skb                61 net/dccp/ipv6.c 					     ipv6_hdr(skb)->saddr.s6_addr32,
skb                62 net/dccp/ipv6.c 					     dccp_hdr(skb)->dccph_dport,
skb                63 net/dccp/ipv6.c 					     dccp_hdr(skb)->dccph_sport     );
skb                67 net/dccp/ipv6.c static int dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
skb                70 net/dccp/ipv6.c 	const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
skb                77 net/dccp/ipv6.c 	struct net *net = dev_net(skb->dev);
skb                85 net/dccp/ipv6.c 	dh = (struct dccp_hdr *)(skb->data + offset);
skb                90 net/dccp/ipv6.c 					inet6_iif(skb), 0);
skb                93 net/dccp/ipv6.c 		__ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
skb               129 net/dccp/ipv6.c 				dst->ops->redirect(dst, sk, skb);
skb               192 net/dccp/ipv6.c 	struct sk_buff *skb;
skb               220 net/dccp/ipv6.c 	skb = dccp_make_response(sk, dst, req);
skb               221 net/dccp/ipv6.c 	if (skb != NULL) {
skb               222 net/dccp/ipv6.c 		struct dccp_hdr *dh = dccp_hdr(skb);
skb               225 net/dccp/ipv6.c 		dh->dccph_checksum = dccp_v6_csum_finish(skb,
skb               233 net/dccp/ipv6.c 		err = ip6_xmit(sk, skb, &fl6, sk->sk_mark, opt, np->tclass,
skb               254 net/dccp/ipv6.c 	struct sk_buff *skb;
skb               266 net/dccp/ipv6.c 	skb = dccp_ctl_make_reset(ctl_sk, rxskb);
skb               267 net/dccp/ipv6.c 	if (skb == NULL)
skb               271 net/dccp/ipv6.c 	dccp_hdr(skb)->dccph_checksum = dccp_v6_csum_finish(skb, &rxip6h->saddr,
skb               280 net/dccp/ipv6.c 	fl6.fl6_dport = dccp_hdr(skb)->dccph_dport;
skb               281 net/dccp/ipv6.c 	fl6.fl6_sport = dccp_hdr(skb)->dccph_sport;
skb               287 net/dccp/ipv6.c 		skb_dst_set(skb, dst);
skb               288 net/dccp/ipv6.c 		ip6_xmit(ctl_sk, skb, &fl6, 0, NULL, 0, 0);
skb               294 net/dccp/ipv6.c 	kfree_skb(skb);
skb               307 net/dccp/ipv6.c static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
skb               313 net/dccp/ipv6.c 	const __be32 service = dccp_hdr_request(skb)->dccph_req_service;
skb               314 net/dccp/ipv6.c 	struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
skb               316 net/dccp/ipv6.c 	if (skb->protocol == htons(ETH_P_IP))
skb               317 net/dccp/ipv6.c 		return dccp_v4_conn_request(sk, skb);
skb               319 net/dccp/ipv6.c 	if (!ipv6_unicast_destination(skb))
skb               340 net/dccp/ipv6.c 	if (dccp_reqsk_init(req, dccp_sk(sk), skb))
skb               344 net/dccp/ipv6.c 	if (dccp_parse_options(sk, dreq, skb))
skb               347 net/dccp/ipv6.c 	if (security_inet_conn_request(sk, skb, req))
skb               351 net/dccp/ipv6.c 	ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
skb               352 net/dccp/ipv6.c 	ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
skb               354 net/dccp/ipv6.c 	ireq->ir_mark = inet_request_mark(sk, skb);
skb               356 net/dccp/ipv6.c 	if (ipv6_opt_accepted(sk, skb, IP6CB(skb)) ||
skb               359 net/dccp/ipv6.c 		refcount_inc(&skb->users);
skb               360 net/dccp/ipv6.c 		ireq->pktopts = skb;
skb               367 net/dccp/ipv6.c 		ireq->ir_iif = inet6_iif(skb);
skb               378 net/dccp/ipv6.c 	dreq->dreq_iss	   = dccp_v6_init_sequence(skb);
skb               397 net/dccp/ipv6.c 					      struct sk_buff *skb,
skb               411 net/dccp/ipv6.c 	if (skb->protocol == htons(ETH_P_IP)) {
skb               415 net/dccp/ipv6.c 		newsk = dccp_v4_request_recv_sock(sk, skb, req, dst,
skb               436 net/dccp/ipv6.c 		newnp->mcast_oif   = inet_iif(skb);
skb               437 net/dccp/ipv6.c 		newnp->mcast_hops  = ip_hdr(skb)->ttl;
skb               466 net/dccp/ipv6.c 	newsk = dccp_create_openreq_child(sk, req, skb);
skb               505 net/dccp/ipv6.c 	newnp->mcast_oif  = inet6_iif(skb);
skb               506 net/dccp/ipv6.c 	newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
skb               565 net/dccp/ipv6.c static int dccp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
skb               578 net/dccp/ipv6.c 	if (skb->protocol == htons(ETH_P_IP))
skb               579 net/dccp/ipv6.c 		return dccp_v4_do_rcv(sk, skb);
skb               581 net/dccp/ipv6.c 	if (sk_filter(sk, skb))
skb               603 net/dccp/ipv6.c 		opt_skb = skb_clone(skb, GFP_ATOMIC);
skb               606 net/dccp/ipv6.c 		if (dccp_rcv_established(sk, skb, dccp_hdr(skb), skb->len))
skb               638 net/dccp/ipv6.c 	if (dccp_rcv_state_process(sk, skb, dccp_hdr(skb), skb->len))
skb               645 net/dccp/ipv6.c 	dccp_v6_ctl_send_reset(sk, skb);
skb               649 net/dccp/ipv6.c 	kfree_skb(skb);
skb               682 net/dccp/ipv6.c static int dccp_v6_rcv(struct sk_buff *skb)
skb               691 net/dccp/ipv6.c 	if (dccp_invalid_packet(skb))
skb               695 net/dccp/ipv6.c 	if (dccp_v6_csum_finish(skb, &ipv6_hdr(skb)->saddr,
skb               696 net/dccp/ipv6.c 				     &ipv6_hdr(skb)->daddr)) {
skb               701 net/dccp/ipv6.c 	dh = dccp_hdr(skb);
skb               703 net/dccp/ipv6.c 	DCCP_SKB_CB(skb)->dccpd_seq  = dccp_hdr_seq(dh);
skb               704 net/dccp/ipv6.c 	DCCP_SKB_CB(skb)->dccpd_type = dh->dccph_type;
skb               706 net/dccp/ipv6.c 	if (dccp_packet_without_ack(skb))
skb               707 net/dccp/ipv6.c 		DCCP_SKB_CB(skb)->dccpd_ack_seq = DCCP_PKT_WITHOUT_ACK_SEQ;
skb               709 net/dccp/ipv6.c 		DCCP_SKB_CB(skb)->dccpd_ack_seq = dccp_hdr_ack_seq(skb);
skb               712 net/dccp/ipv6.c 	sk = __inet6_lookup_skb(&dccp_hashinfo, skb, __dccp_hdr_len(dh),
skb               714 net/dccp/ipv6.c 				inet6_iif(skb), 0, &refcounted);
skb               744 net/dccp/ipv6.c 		nsk = dccp_check_req(sk, skb, req);
skb               751 net/dccp/ipv6.c 		} else if (dccp_child_process(sk, nsk, skb)) {
skb               752 net/dccp/ipv6.c 			dccp_v6_ctl_send_reset(sk, skb);
skb               772 net/dccp/ipv6.c 	if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
skb               775 net/dccp/ipv6.c 	return __sk_receive_skb(sk, skb, 1, dh->dccph_doff * 4,
skb               779 net/dccp/ipv6.c 	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
skb               788 net/dccp/ipv6.c 		DCCP_SKB_CB(skb)->dccpd_reset_code =
skb               790 net/dccp/ipv6.c 		dccp_v6_ctl_send_reset(sk, skb);
skb               794 net/dccp/ipv6.c 	kfree_skb(skb);
skb                79 net/dccp/minisocks.c 				       const struct sk_buff *skb)
skb               140 net/dccp/minisocks.c struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb,
skb               155 net/dccp/minisocks.c 	if (dccp_hdr(skb)->dccph_type == DCCP_PKT_REQUEST) {
skb               157 net/dccp/minisocks.c 		if (after48(DCCP_SKB_CB(skb)->dccpd_seq, dreq->dreq_gsr)) {
skb               159 net/dccp/minisocks.c 			dreq->dreq_gsr = DCCP_SKB_CB(skb)->dccpd_seq;
skb               171 net/dccp/minisocks.c 	DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_PACKET_ERROR;
skb               173 net/dccp/minisocks.c 	if (dccp_hdr(skb)->dccph_type != DCCP_PKT_ACK &&
skb               174 net/dccp/minisocks.c 	    dccp_hdr(skb)->dccph_type != DCCP_PKT_DATAACK)
skb               178 net/dccp/minisocks.c 	if (!between48(DCCP_SKB_CB(skb)->dccpd_ack_seq,
skb               183 net/dccp/minisocks.c 			      DCCP_SKB_CB(skb)->dccpd_ack_seq,
skb               189 net/dccp/minisocks.c 	if (dccp_parse_options(sk, dreq, skb))
skb               192 net/dccp/minisocks.c 	child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
skb               199 net/dccp/minisocks.c 	DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_TOO_BUSY;
skb               201 net/dccp/minisocks.c 	if (dccp_hdr(skb)->dccph_type != DCCP_PKT_RESET)
skb               202 net/dccp/minisocks.c 		req->rsk_ops->send_reset(sk, skb);
skb               218 net/dccp/minisocks.c 		       struct sk_buff *skb)
skb               224 net/dccp/minisocks.c 		ret = dccp_rcv_state_process(child, skb, dccp_hdr(skb),
skb               225 net/dccp/minisocks.c 					     skb->len);
skb               235 net/dccp/minisocks.c 		__sk_add_backlog(child, skb);
skb               245 net/dccp/minisocks.c void dccp_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
skb               254 net/dccp/minisocks.c 		    struct dccp_sock const *dp, struct sk_buff const *skb)
skb               259 net/dccp/minisocks.c 	inet_rsk(req)->ir_rmt_port = dccp_hdr(skb)->dccph_sport;
skb               260 net/dccp/minisocks.c 	inet_rsk(req)->ir_num	   = ntohs(dccp_hdr(skb)->dccph_dport);
skb                48 net/dccp/options.c 		       struct sk_buff *skb)
skb                51 net/dccp/options.c 	const struct dccp_hdr *dh = dccp_hdr(skb);
skb                52 net/dccp/options.c 	const u8 pkt_type = DCCP_SKB_CB(skb)->dccpd_type;
skb                53 net/dccp/options.c 	unsigned char *options = (unsigned char *)dh + dccp_hdr_len(skb);
skb               154 net/dccp/options.c 				      DCCP_SKB_CB(skb)->dccpd_ack_seq);
skb               170 net/dccp/options.c 				      DCCP_SKB_CB(skb)->dccpd_ack_seq);
skb               194 net/dccp/options.c 			if (dccp_packet_without_ack(skb))   /* RFC 4340, 13.2 */
skb               220 net/dccp/options.c 			if (dccp_packet_without_ack(skb))   /* RFC 4340, 11.4 */
skb               256 net/dccp/options.c 	DCCP_SKB_CB(skb)->dccpd_reset_code = rc;
skb               257 net/dccp/options.c 	DCCP_SKB_CB(skb)->dccpd_reset_data[0] = opt;
skb               258 net/dccp/options.c 	DCCP_SKB_CB(skb)->dccpd_reset_data[1] = len > 0 ? value[0] : 0;
skb               259 net/dccp/options.c 	DCCP_SKB_CB(skb)->dccpd_reset_data[2] = len > 1 ? value[1] : 0;
skb               288 net/dccp/options.c int dccp_insert_option(struct sk_buff *skb, const unsigned char option,
skb               293 net/dccp/options.c 	if (DCCP_SKB_CB(skb)->dccpd_opt_len + len + 2 > DCCP_MAX_OPT_LEN)
skb               296 net/dccp/options.c 	DCCP_SKB_CB(skb)->dccpd_opt_len += len + 2;
skb               298 net/dccp/options.c 	to    = skb_push(skb, len + 2);
skb               308 net/dccp/options.c static int dccp_insert_option_ndp(struct sock *sk, struct sk_buff *skb)
skb               313 net/dccp/options.c 	if (dccp_non_data_packet(skb))
skb               323 net/dccp/options.c 		if (DCCP_SKB_CB(skb)->dccpd_opt_len + len > DCCP_MAX_OPT_LEN)
skb               326 net/dccp/options.c 		DCCP_SKB_CB(skb)->dccpd_opt_len += len;
skb               328 net/dccp/options.c 		ptr = skb_push(skb, len);
skb               342 net/dccp/options.c static int dccp_insert_option_timestamp(struct sk_buff *skb)
skb               348 net/dccp/options.c 	return dccp_insert_option(skb, DCCPO_TIMESTAMP, &now, sizeof(now));
skb               353 net/dccp/options.c 					     struct sk_buff *skb)
skb               372 net/dccp/options.c 	if (DCCP_SKB_CB(skb)->dccpd_opt_len + len > DCCP_MAX_OPT_LEN)
skb               375 net/dccp/options.c 	DCCP_SKB_CB(skb)->dccpd_opt_len += len;
skb               377 net/dccp/options.c 	to    = skb_push(skb, len);
skb               395 net/dccp/options.c static int dccp_insert_option_ackvec(struct sock *sk, struct sk_buff *skb)
skb               399 net/dccp/options.c 	struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
skb               419 net/dccp/options.c 	    len + dcb->dccpd_opt_len + skb->len > dp->dccps_mss_cache) {
skb               421 net/dccp/options.c 			  "MPS=%u ==> reduce payload size?\n", len, skb->len,
skb               428 net/dccp/options.c 	to   = skb_push(skb, len);
skb               477 net/dccp/options.c int dccp_insert_option_mandatory(struct sk_buff *skb)
skb               479 net/dccp/options.c 	if (DCCP_SKB_CB(skb)->dccpd_opt_len >= DCCP_MAX_OPT_LEN)
skb               482 net/dccp/options.c 	DCCP_SKB_CB(skb)->dccpd_opt_len++;
skb               483 net/dccp/options.c 	*(u8 *)skb_push(skb, 1) = DCCPO_MANDATORY;
skb               500 net/dccp/options.c int dccp_insert_fn_opt(struct sk_buff *skb, u8 type, u8 feat,
skb               515 net/dccp/options.c 	if (DCCP_SKB_CB(skb)->dccpd_opt_len + tot_len > DCCP_MAX_OPT_LEN) {
skb               519 net/dccp/options.c 	DCCP_SKB_CB(skb)->dccpd_opt_len += tot_len;
skb               521 net/dccp/options.c 	to    = skb_push(skb, tot_len);
skb               534 net/dccp/options.c static void dccp_insert_option_padding(struct sk_buff *skb)
skb               536 net/dccp/options.c 	int padding = DCCP_SKB_CB(skb)->dccpd_opt_len % 4;
skb               540 net/dccp/options.c 		memset(skb_push(skb, padding), 0, padding);
skb               541 net/dccp/options.c 		DCCP_SKB_CB(skb)->dccpd_opt_len += padding;
skb               545 net/dccp/options.c int dccp_insert_options(struct sock *sk, struct sk_buff *skb)
skb               549 net/dccp/options.c 	DCCP_SKB_CB(skb)->dccpd_opt_len = 0;
skb               551 net/dccp/options.c 	if (dp->dccps_send_ndp_count && dccp_insert_option_ndp(sk, skb))
skb               554 net/dccp/options.c 	if (DCCP_SKB_CB(skb)->dccpd_type != DCCP_PKT_DATA) {
skb               557 net/dccp/options.c 		if (dccp_feat_insert_opts(dp, NULL, skb))
skb               560 net/dccp/options.c 		if (DCCP_SKB_CB(skb)->dccpd_type == DCCP_PKT_REQUEST) {
skb               565 net/dccp/options.c 			if (dccp_insert_option_timestamp(skb))
skb               569 net/dccp/options.c 			   dccp_insert_option_ackvec(sk, skb)) {
skb               575 net/dccp/options.c 		if (ccid_hc_rx_insert_options(dp->dccps_hc_rx_ccid, sk, skb))
skb               581 net/dccp/options.c 	    dccp_insert_option_timestamp_echo(dp, NULL, skb))
skb               584 net/dccp/options.c 	dccp_insert_option_padding(skb);
skb               588 net/dccp/options.c int dccp_insert_options_rsk(struct dccp_request_sock *dreq, struct sk_buff *skb)
skb               590 net/dccp/options.c 	DCCP_SKB_CB(skb)->dccpd_opt_len = 0;
skb               592 net/dccp/options.c 	if (dccp_feat_insert_opts(NULL, dreq, skb))
skb               596 net/dccp/options.c 	if (dccp_insert_option_timestamp(skb))
skb               600 net/dccp/options.c 	    dccp_insert_option_timestamp_echo(NULL, dreq, skb))
skb               603 net/dccp/options.c 	dccp_insert_option_padding(skb);
skb                28 net/dccp/output.c static struct sk_buff *dccp_skb_entail(struct sock *sk, struct sk_buff *skb)
skb                30 net/dccp/output.c 	skb_set_owner_w(skb, sk);
skb                32 net/dccp/output.c 	sk->sk_send_head = skb;
skb                42 net/dccp/output.c static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb)
skb                44 net/dccp/output.c 	if (likely(skb != NULL)) {
skb                48 net/dccp/output.c 		struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
skb                88 net/dccp/output.c 			WARN_ON(skb->sk);
skb                89 net/dccp/output.c 			skb_set_owner_w(skb, sk);
skb                93 net/dccp/output.c 		if (dccp_insert_options(sk, skb)) {
skb                94 net/dccp/output.c 			kfree_skb(skb);
skb               100 net/dccp/output.c 		dh = dccp_zeroed_hdr(skb, dccp_header_size);
skb               113 net/dccp/output.c 			dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), ackno);
skb               117 net/dccp/output.c 			dccp_hdr_request(skb)->dccph_req_service =
skb               126 net/dccp/output.c 			dccp_hdr_reset(skb)->dccph_reset_code =
skb               131 net/dccp/output.c 		icsk->icsk_af_ops->send_check(sk, skb);
skb               138 net/dccp/output.c 		err = icsk->icsk_af_ops->queue_xmit(sk, skb, &inet->cork.fl);
skb               245 net/dccp/output.c 	struct sk_buff *skb = dccp_qpolicy_pop(sk);
skb               247 net/dccp/output.c 	if (unlikely(skb == NULL))
skb               249 net/dccp/output.c 	len = skb->len;
skb               270 net/dccp/output.c 		DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_DATAACK;
skb               272 net/dccp/output.c 		DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_DATAACK;
skb               274 net/dccp/output.c 		DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_DATA;
skb               277 net/dccp/output.c 	err = dccp_transmit_skb(sk, skb);
skb               307 net/dccp/output.c 	struct sk_buff *skb;
skb               310 net/dccp/output.c 	while (*time_budget > 0 && (skb = skb_peek(&sk->sk_write_queue))) {
skb               311 net/dccp/output.c 		rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb);
skb               337 net/dccp/output.c 			kfree_skb(skb);
skb               346 net/dccp/output.c 	struct sk_buff *skb;
skb               348 net/dccp/output.c 	while ((skb = dccp_qpolicy_top(sk))) {
skb               349 net/dccp/output.c 		int rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb);
skb               362 net/dccp/output.c 			dccp_qpolicy_drop(sk, skb);
skb               398 net/dccp/output.c 	struct sk_buff *skb;
skb               404 net/dccp/output.c 	skb = sock_wmalloc((struct sock *)sk, MAX_DCCP_HEADER, 1,
skb               406 net/dccp/output.c 	if (!skb)
skb               409 net/dccp/output.c 	skb_reserve(skb, MAX_DCCP_HEADER);
skb               411 net/dccp/output.c 	skb_dst_set(skb, dst_clone(dst));
skb               416 net/dccp/output.c 	DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESPONSE;
skb               417 net/dccp/output.c 	DCCP_SKB_CB(skb)->dccpd_seq  = dreq->dreq_gss;
skb               423 net/dccp/output.c 	if (dccp_insert_options_rsk(dreq, skb))
skb               427 net/dccp/output.c 	dh = dccp_zeroed_hdr(skb, dccp_header_size);
skb               432 net/dccp/output.c 			   DCCP_SKB_CB(skb)->dccpd_opt_len) / 4;
skb               436 net/dccp/output.c 	dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dreq->dreq_gsr);
skb               437 net/dccp/output.c 	dccp_hdr_response(skb)->dccph_resp_service = dreq->dreq_service;
skb               439 net/dccp/output.c 	dccp_csum_outgoing(skb);
skb               444 net/dccp/output.c 	return skb;
skb               446 net/dccp/output.c 	kfree_skb(skb);
skb               461 net/dccp/output.c 	struct sk_buff *skb;
skb               463 net/dccp/output.c 	skb = alloc_skb(sk->sk_prot->max_header, GFP_ATOMIC);
skb               464 net/dccp/output.c 	if (skb == NULL)
skb               467 net/dccp/output.c 	skb_reserve(skb, sk->sk_prot->max_header);
skb               470 net/dccp/output.c 	dh = dccp_zeroed_hdr(skb, dccp_hdr_reset_len);
skb               477 net/dccp/output.c 	dhr = dccp_hdr_reset(skb);
skb               496 net/dccp/output.c 	dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dcb->dccpd_seq);
skb               498 net/dccp/output.c 	dccp_csum_outgoing(skb);
skb               499 net/dccp/output.c 	return skb;
skb               507 net/dccp/output.c 	struct sk_buff *skb;
skb               517 net/dccp/output.c 	skb = sock_wmalloc(sk, sk->sk_prot->max_header, 1, GFP_ATOMIC);
skb               518 net/dccp/output.c 	if (skb == NULL)
skb               522 net/dccp/output.c 	skb_reserve(skb, sk->sk_prot->max_header);
skb               523 net/dccp/output.c 	DCCP_SKB_CB(skb)->dccpd_type	   = DCCP_PKT_RESET;
skb               524 net/dccp/output.c 	DCCP_SKB_CB(skb)->dccpd_reset_code = code;
skb               526 net/dccp/output.c 	return dccp_transmit_skb(sk, skb);
skb               534 net/dccp/output.c 	struct sk_buff *skb;
skb               551 net/dccp/output.c 	skb = alloc_skb(sk->sk_prot->max_header, sk->sk_allocation);
skb               552 net/dccp/output.c 	if (unlikely(skb == NULL))
skb               556 net/dccp/output.c 	skb_reserve(skb, sk->sk_prot->max_header);
skb               558 net/dccp/output.c 	DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_REQUEST;
skb               560 net/dccp/output.c 	dccp_transmit_skb(sk, dccp_skb_entail(sk, skb));
skb               576 net/dccp/output.c 		struct sk_buff *skb = alloc_skb(sk->sk_prot->max_header,
skb               579 net/dccp/output.c 		if (skb == NULL) {
skb               589 net/dccp/output.c 		skb_reserve(skb, sk->sk_prot->max_header);
skb               590 net/dccp/output.c 		DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_ACK;
skb               591 net/dccp/output.c 		dccp_transmit_skb(sk, skb);
skb               638 net/dccp/output.c 	struct sk_buff *skb = alloc_skb(sk->sk_prot->max_header, GFP_ATOMIC);
skb               640 net/dccp/output.c 	if (skb == NULL) {
skb               647 net/dccp/output.c 	skb_reserve(skb, sk->sk_prot->max_header);
skb               648 net/dccp/output.c 	DCCP_SKB_CB(skb)->dccpd_type = pkt_type;
skb               649 net/dccp/output.c 	DCCP_SKB_CB(skb)->dccpd_ack_seq = ackno;
skb               657 net/dccp/output.c 	dccp_transmit_skb(sk, skb);
skb               670 net/dccp/output.c 	struct sk_buff *skb;
skb               673 net/dccp/output.c 	skb = alloc_skb(sk->sk_prot->max_header, prio);
skb               674 net/dccp/output.c 	if (skb == NULL)
skb               678 net/dccp/output.c 	skb_reserve(skb, sk->sk_prot->max_header);
skb               680 net/dccp/output.c 		DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_CLOSEREQ;
skb               682 net/dccp/output.c 		DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_CLOSE;
skb               685 net/dccp/output.c 		skb = dccp_skb_entail(sk, skb);
skb               699 net/dccp/output.c 	dccp_transmit_skb(sk, skb);
skb               379 net/dccp/proto.c 		struct sk_buff *skb;
skb               382 net/dccp/proto.c 		skb = skb_peek(&sk->sk_receive_queue);
skb               383 net/dccp/proto.c 		if (skb != NULL) {
skb               388 net/dccp/proto.c 			amount = skb->len;
skb               712 net/dccp/proto.c static int dccp_msghdr_parse(struct msghdr *msg, struct sk_buff *skb)
skb               726 net/dccp/proto.c 	skb->priority = 0;
skb               736 net/dccp/proto.c 		    !dccp_qpolicy_param_ok(skb->sk, cmsg->cmsg_type))
skb               743 net/dccp/proto.c 			skb->priority = *(__u32 *)CMSG_DATA(cmsg);
skb               757 net/dccp/proto.c 	struct sk_buff *skb;
skb               786 net/dccp/proto.c 	skb = sock_alloc_send_skb(sk, size, noblock, &rc);
skb               788 net/dccp/proto.c 	if (skb == NULL)
skb               796 net/dccp/proto.c 	skb_reserve(skb, sk->sk_prot->max_header);
skb               797 net/dccp/proto.c 	rc = memcpy_from_msg(skb_put(skb, len), msg, len);
skb               801 net/dccp/proto.c 	rc = dccp_msghdr_parse(msg, skb);
skb               805 net/dccp/proto.c 	dccp_qpolicy_push(sk, skb);
skb               817 net/dccp/proto.c 	kfree_skb(skb);
skb               839 net/dccp/proto.c 		struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
skb               841 net/dccp/proto.c 		if (skb == NULL)
skb               844 net/dccp/proto.c 		dh = dccp_hdr(skb);
skb               864 net/dccp/proto.c 			sk_eat_skb(sk, skb);
skb               907 net/dccp/proto.c 		if (len > skb->len)
skb               908 net/dccp/proto.c 			len = skb->len;
skb               909 net/dccp/proto.c 		else if (len < skb->len)
skb               912 net/dccp/proto.c 		if (skb_copy_datagram_msg(skb, 0, msg, len)) {
skb               918 net/dccp/proto.c 			len = skb->len;
skb               921 net/dccp/proto.c 			sk_eat_skb(sk, skb);
skb               999 net/dccp/proto.c 	struct sk_buff *skb;
skb              1023 net/dccp/proto.c 	while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
skb              1024 net/dccp/proto.c 		data_was_unread += skb->len;
skb              1025 net/dccp/proto.c 		__kfree_skb(skb);
skb                15 net/dccp/qpolicy.c static void qpolicy_simple_push(struct sock *sk, struct sk_buff *skb)
skb                17 net/dccp/qpolicy.c 	skb_queue_tail(&sk->sk_write_queue, skb);
skb                38 net/dccp/qpolicy.c 	struct sk_buff *skb, *best = NULL;
skb                40 net/dccp/qpolicy.c 	skb_queue_walk(&sk->sk_write_queue, skb)
skb                41 net/dccp/qpolicy.c 		if (best == NULL || skb->priority > best->priority)
skb                42 net/dccp/qpolicy.c 			best = skb;
skb                48 net/dccp/qpolicy.c 	struct sk_buff *skb, *worst = NULL;
skb                50 net/dccp/qpolicy.c 	skb_queue_walk(&sk->sk_write_queue, skb)
skb                51 net/dccp/qpolicy.c 		if (worst == NULL || skb->priority < worst->priority)
skb                52 net/dccp/qpolicy.c 			worst = skb;
skb                70 net/dccp/qpolicy.c 	void		(*push)	(struct sock *sk, struct sk_buff *skb);
skb                93 net/dccp/qpolicy.c void dccp_qpolicy_push(struct sock *sk, struct sk_buff *skb)
skb                95 net/dccp/qpolicy.c 	qpol_table[dccp_sk(sk)->dccps_qpolicy].push(sk, skb);
skb               103 net/dccp/qpolicy.c void dccp_qpolicy_drop(struct sock *sk, struct sk_buff *skb)
skb               105 net/dccp/qpolicy.c 	if (skb != NULL) {
skb               106 net/dccp/qpolicy.c 		skb_unlink(skb, &sk->sk_write_queue);
skb               107 net/dccp/qpolicy.c 		kfree_skb(skb);
skb               118 net/dccp/qpolicy.c 	struct sk_buff *skb = dccp_qpolicy_top(sk);
skb               120 net/dccp/qpolicy.c 	if (skb != NULL) {
skb               122 net/dccp/qpolicy.c 		skb->priority = 0;
skb               123 net/dccp/qpolicy.c 		skb_unlink(skb, &sk->sk_write_queue);
skb               125 net/dccp/qpolicy.c 	return skb;
skb               405 net/decnet/af_decnet.c struct sock *dn_find_by_skb(struct sk_buff *skb)
skb               407 net/decnet/af_decnet.c 	struct dn_skb_cb *cb = DN_SKB_CB(skb);
skb              1001 net/decnet/af_decnet.c static void dn_access_copy(struct sk_buff *skb, struct accessdata_dn *acc)
skb              1003 net/decnet/af_decnet.c 	unsigned char *ptr = skb->data;
skb              1016 net/decnet/af_decnet.c 	skb_pull(skb, acc->acc_accl + acc->acc_passl + acc->acc_userl + 3);
skb              1020 net/decnet/af_decnet.c static void dn_user_copy(struct sk_buff *skb, struct optdata_dn *opt)
skb              1022 net/decnet/af_decnet.c 	unsigned char *ptr = skb->data;
skb              1029 net/decnet/af_decnet.c 	skb_pull(skb, len + 1);
skb              1035 net/decnet/af_decnet.c 	struct sk_buff *skb = NULL;
skb              1041 net/decnet/af_decnet.c 		skb = skb_dequeue(&sk->sk_receive_queue);
skb              1042 net/decnet/af_decnet.c 		if (skb == NULL) {
skb              1044 net/decnet/af_decnet.c 			skb = skb_dequeue(&sk->sk_receive_queue);
skb              1047 net/decnet/af_decnet.c 		if (skb != NULL)
skb              1062 net/decnet/af_decnet.c 	return skb == NULL ? ERR_PTR(err) : skb;
skb              1069 net/decnet/af_decnet.c 	struct sk_buff *skb = NULL;
skb              1084 net/decnet/af_decnet.c 	skb = skb_dequeue(&sk->sk_receive_queue);
skb              1085 net/decnet/af_decnet.c 	if (skb == NULL) {
skb              1086 net/decnet/af_decnet.c 		skb = dn_wait_for_connect(sk, &timeo);
skb              1087 net/decnet/af_decnet.c 		if (IS_ERR(skb)) {
skb              1089 net/decnet/af_decnet.c 			return PTR_ERR(skb);
skb              1093 net/decnet/af_decnet.c 	cb = DN_SKB_CB(skb);
skb              1098 net/decnet/af_decnet.c 		kfree_skb(skb);
skb              1103 net/decnet/af_decnet.c 	dst = skb_dst(skb);
skb              1105 net/decnet/af_decnet.c 	skb_dst_set(skb, NULL);
skb              1129 net/decnet/af_decnet.c 	skb_pull(skb, dn_username2sockaddr(skb->data, skb->len, &(DN_SK(newsk)->addr), &type));
skb              1130 net/decnet/af_decnet.c 	skb_pull(skb, dn_username2sockaddr(skb->data, skb->len, &(DN_SK(newsk)->peer), &type));
skb              1134 net/decnet/af_decnet.c 	menuver = *skb->data;
skb              1135 net/decnet/af_decnet.c 	skb_pull(skb, 1);
skb              1138 net/decnet/af_decnet.c 		dn_access_copy(skb, &(DN_SK(newsk)->accessdata));
skb              1141 net/decnet/af_decnet.c 		dn_user_copy(skb, &(DN_SK(newsk)->conndata_in));
skb              1149 net/decnet/af_decnet.c 	kfree_skb(skb);
skb              1220 net/decnet/af_decnet.c 	struct sk_buff *skb;
skb              1246 net/decnet/af_decnet.c 		skb = skb_peek(&scp->other_receive_queue);
skb              1247 net/decnet/af_decnet.c 		if (skb) {
skb              1248 net/decnet/af_decnet.c 			amount = skb->len;
skb              1250 net/decnet/af_decnet.c 			skb_queue_walk(&sk->sk_receive_queue, skb)
skb              1251 net/decnet/af_decnet.c 				amount += skb->len;
skb              1642 net/decnet/af_decnet.c 	struct sk_buff *skb;
skb              1648 net/decnet/af_decnet.c 	skb_queue_walk(q, skb) {
skb              1649 net/decnet/af_decnet.c 		struct dn_skb_cb *cb = DN_SKB_CB(skb);
skb              1650 net/decnet/af_decnet.c 		len += skb->len;
skb              1679 net/decnet/af_decnet.c 	struct sk_buff *skb, *n;
skb              1754 net/decnet/af_decnet.c 	skb_queue_walk_safe(queue, skb, n) {
skb              1755 net/decnet/af_decnet.c 		unsigned int chunk = skb->len;
skb              1756 net/decnet/af_decnet.c 		cb = DN_SKB_CB(skb);
skb              1761 net/decnet/af_decnet.c 		if (memcpy_to_msg(msg, skb->data, chunk)) {
skb              1768 net/decnet/af_decnet.c 			skb_pull(skb, chunk);
skb              1772 net/decnet/af_decnet.c 		if (skb->len == 0) {
skb              1773 net/decnet/af_decnet.c 			skb_unlink(skb, queue);
skb              1774 net/decnet/af_decnet.c 			kfree_skb(skb);
skb              1897 net/decnet/af_decnet.c 	struct sk_buff *skb = sock_alloc_send_skb(sk, datalen,
skb              1899 net/decnet/af_decnet.c 	if (skb) {
skb              1900 net/decnet/af_decnet.c 		skb->protocol = htons(ETH_P_DNA_RT);
skb              1901 net/decnet/af_decnet.c 		skb->pkt_type = PACKET_OUTGOING;
skb              1903 net/decnet/af_decnet.c 	return skb;
skb              1917 net/decnet/af_decnet.c 	struct sk_buff *skb = NULL;
skb              2019 net/decnet/af_decnet.c 		skb = dn_alloc_send_pskb(sk, len + 64 + DN_MAX_NSP_DATA_HEADER,
skb              2025 net/decnet/af_decnet.c 		if (!skb)
skb              2028 net/decnet/af_decnet.c 		cb = DN_SKB_CB(skb);
skb              2030 net/decnet/af_decnet.c 		skb_reserve(skb, 64 + DN_MAX_NSP_DATA_HEADER);
skb              2032 net/decnet/af_decnet.c 		if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
skb              2059 net/decnet/af_decnet.c 		dn_nsp_queue_xmit(sk, skb, sk->sk_allocation, flags & MSG_OOB);
skb              2060 net/decnet/af_decnet.c 		skb = NULL;
skb              2067 net/decnet/af_decnet.c 	kfree_skb(skb);
skb               569 net/decnet/dn_dev.c static int dn_nl_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh,
skb               572 net/decnet/dn_dev.c 	struct net *net = sock_net(skb->sk);
skb               580 net/decnet/dn_dev.c 	if (!netlink_capable(skb, CAP_NET_ADMIN))
skb               615 net/decnet/dn_dev.c static int dn_nl_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh,
skb               618 net/decnet/dn_dev.c 	struct net *net = sock_net(skb->sk);
skb               626 net/decnet/dn_dev.c 	if (!netlink_capable(skb, CAP_NET_ADMIN))
skb               684 net/decnet/dn_dev.c static int dn_nl_fill_ifaddr(struct sk_buff *skb, struct dn_ifaddr *ifa,
skb               691 net/decnet/dn_dev.c 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*ifm), flags);
skb               703 net/decnet/dn_dev.c 	     nla_put_le16(skb, IFA_ADDRESS, ifa->ifa_address)) ||
skb               705 net/decnet/dn_dev.c 	     nla_put_le16(skb, IFA_LOCAL, ifa->ifa_local)) ||
skb               707 net/decnet/dn_dev.c 	     nla_put_string(skb, IFA_LABEL, ifa->ifa_label)) ||
skb               708 net/decnet/dn_dev.c 	     nla_put_u32(skb, IFA_FLAGS, ifa_flags))
skb               710 net/decnet/dn_dev.c 	nlmsg_end(skb, nlh);
skb               714 net/decnet/dn_dev.c 	nlmsg_cancel(skb, nlh);
skb               720 net/decnet/dn_dev.c 	struct sk_buff *skb;
skb               723 net/decnet/dn_dev.c 	skb = alloc_skb(dn_ifaddr_nlmsg_size(), GFP_KERNEL);
skb               724 net/decnet/dn_dev.c 	if (skb == NULL)
skb               727 net/decnet/dn_dev.c 	err = dn_nl_fill_ifaddr(skb, ifa, 0, 0, event, 0);
skb               731 net/decnet/dn_dev.c 		kfree_skb(skb);
skb               734 net/decnet/dn_dev.c 	rtnl_notify(skb, &init_net, 0, RTNLGRP_DECnet_IFADDR, NULL, GFP_KERNEL);
skb               741 net/decnet/dn_dev.c static int dn_nl_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
skb               743 net/decnet/dn_dev.c 	struct net *net = sock_net(skb->sk);
skb               774 net/decnet/dn_dev.c 			if (dn_nl_fill_ifaddr(skb, ifa, NETLINK_CB(cb->skb).portid,
skb               787 net/decnet/dn_dev.c 	return skb->len;
skb               841 net/decnet/dn_dev.c 	struct sk_buff *skb = NULL;
skb               845 net/decnet/dn_dev.c 	if ((skb = dn_alloc_skb(NULL, sizeof(*msg), GFP_ATOMIC)) == NULL)
skb               848 net/decnet/dn_dev.c 	skb->dev = dev;
skb               850 net/decnet/dn_dev.c 	msg = skb_put(skb, sizeof(*msg));
skb               871 net/decnet/dn_dev.c 	pktlen = skb_push(skb, 2);
skb               872 net/decnet/dn_dev.c 	*pktlen = cpu_to_le16(skb->len - 2);
skb               874 net/decnet/dn_dev.c 	skb_reset_network_header(skb);
skb               876 net/decnet/dn_dev.c 	dn_rt_finish_output(skb, dn_rt_all_rt_mcast, msg->id);
skb               911 net/decnet/dn_dev.c 	struct sk_buff *skb;
skb               929 net/decnet/dn_dev.c 	if ((skb = dn_alloc_skb(NULL, size, GFP_ATOMIC)) == NULL)
skb               932 net/decnet/dn_dev.c 	skb->dev = dev;
skb               933 net/decnet/dn_dev.c 	ptr = skb_put(skb, size);
skb               961 net/decnet/dn_dev.c 	skb_trim(skb, (27 + *i2));
skb               963 net/decnet/dn_dev.c 	pktlen = skb_push(skb, 2);
skb               964 net/decnet/dn_dev.c 	*pktlen = cpu_to_le16(skb->len - 2);
skb               966 net/decnet/dn_dev.c 	skb_reset_network_header(skb);
skb               969 net/decnet/dn_dev.c 		struct sk_buff *skb2 = skb_copy(skb, GFP_ATOMIC);
skb               975 net/decnet/dn_dev.c 	dn_rt_finish_output(skb, dn_rt_all_rt_mcast, src);
skb               992 net/decnet/dn_dev.c 	struct sk_buff *skb = dn_alloc_skb(NULL, size, GFP_ATOMIC);
skb               997 net/decnet/dn_dev.c 	if (skb == NULL)
skb              1000 net/decnet/dn_dev.c 	skb->dev = dev;
skb              1001 net/decnet/dn_dev.c 	skb_push(skb, dev->hard_header_len);
skb              1002 net/decnet/dn_dev.c 	ptr = skb_put(skb, 2 + 4 + tdlen);
skb              1013 net/decnet/dn_dev.c 	dn_rt_finish_output(skb, dn_rt_all_rt_mcast, src);
skb              1240 net/decnet/dn_dev.c void dn_dev_init_pkt(struct sk_buff *skb)
skb              1244 net/decnet/dn_dev.c void dn_dev_veri_pkt(struct sk_buff *skb)
skb              1248 net/decnet/dn_dev.c void dn_dev_hello(struct sk_buff *skb)
skb               505 net/decnet/dn_fib.c static int dn_fib_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh,
skb               508 net/decnet/dn_fib.c 	struct net *net = sock_net(skb->sk);
skb               514 net/decnet/dn_fib.c 	if (!netlink_capable(skb, CAP_NET_ADMIN))
skb               529 net/decnet/dn_fib.c 	return tb->delete(tb, r, attrs, nlh, &NETLINK_CB(skb));
skb               532 net/decnet/dn_fib.c static int dn_fib_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh,
skb               535 net/decnet/dn_fib.c 	struct net *net = sock_net(skb->sk);
skb               541 net/decnet/dn_fib.c 	if (!netlink_capable(skb, CAP_NET_ADMIN))
skb               556 net/decnet/dn_fib.c 	return tb->insert(tb, r, attrs, nlh, &NETLINK_CB(skb));
skb                54 net/decnet/dn_neigh.c static int dn_neigh_output(struct neighbour *neigh, struct sk_buff *skb);
skb               167 net/decnet/dn_neigh.c static void dn_neigh_error_report(struct neighbour *neigh, struct sk_buff *skb)
skb               170 net/decnet/dn_neigh.c 	kfree_skb(skb);
skb               173 net/decnet/dn_neigh.c static int dn_neigh_output(struct neighbour *neigh, struct sk_buff *skb)
skb               175 net/decnet/dn_neigh.c 	struct dst_entry *dst = skb_dst(skb);
skb               185 net/decnet/dn_neigh.c 		err = dev_hard_header(skb, dev, ntohs(skb->protocol),
skb               186 net/decnet/dn_neigh.c 				      neigh->ha, mac_addr, skb->len);
skb               190 net/decnet/dn_neigh.c 		err = dev_queue_xmit(skb);
skb               192 net/decnet/dn_neigh.c 		kfree_skb(skb);
skb               198 net/decnet/dn_neigh.c static int dn_neigh_output_packet(struct net *net, struct sock *sk, struct sk_buff *skb)
skb               200 net/decnet/dn_neigh.c 	struct dst_entry *dst = skb_dst(skb);
skb               204 net/decnet/dn_neigh.c 	return neigh->output(neigh, skb);
skb               211 net/decnet/dn_neigh.c 			  struct sk_buff *skb)
skb               217 net/decnet/dn_neigh.c 	struct dn_skb_cb *cb = DN_SKB_CB(skb);
skb               220 net/decnet/dn_neigh.c 	if (skb_headroom(skb) < headroom) {
skb               221 net/decnet/dn_neigh.c 		struct sk_buff *skb2 = skb_realloc_headroom(skb, headroom);
skb               224 net/decnet/dn_neigh.c 			kfree_skb(skb);
skb               227 net/decnet/dn_neigh.c 		consume_skb(skb);
skb               228 net/decnet/dn_neigh.c 		skb = skb2;
skb               232 net/decnet/dn_neigh.c 	data = skb_push(skb, sizeof(struct dn_long_packet) + 3);
skb               235 net/decnet/dn_neigh.c 	*((__le16 *)data) = cpu_to_le16(skb->len - 2);
skb               248 net/decnet/dn_neigh.c 	skb_reset_network_header(skb);
skb               251 net/decnet/dn_neigh.c 		       &init_net, sk, skb, NULL, neigh->dev,
skb               259 net/decnet/dn_neigh.c 			   struct sk_buff *skb)
skb               265 net/decnet/dn_neigh.c 	struct dn_skb_cb *cb = DN_SKB_CB(skb);
skb               268 net/decnet/dn_neigh.c 	if (skb_headroom(skb) < headroom) {
skb               269 net/decnet/dn_neigh.c 		struct sk_buff *skb2 = skb_realloc_headroom(skb, headroom);
skb               272 net/decnet/dn_neigh.c 			kfree_skb(skb);
skb               275 net/decnet/dn_neigh.c 		consume_skb(skb);
skb               276 net/decnet/dn_neigh.c 		skb = skb2;
skb               280 net/decnet/dn_neigh.c 	data = skb_push(skb, sizeof(struct dn_short_packet) + 2);
skb               281 net/decnet/dn_neigh.c 	*((__le16 *)data) = cpu_to_le16(skb->len - 2);
skb               289 net/decnet/dn_neigh.c 	skb_reset_network_header(skb);
skb               292 net/decnet/dn_neigh.c 		       &init_net, sk, skb, NULL, neigh->dev,
skb               302 net/decnet/dn_neigh.c 			    struct sk_buff *skb)
skb               308 net/decnet/dn_neigh.c 	struct dn_skb_cb *cb = DN_SKB_CB(skb);
skb               310 net/decnet/dn_neigh.c 	if (skb_headroom(skb) < headroom) {
skb               311 net/decnet/dn_neigh.c 		struct sk_buff *skb2 = skb_realloc_headroom(skb, headroom);
skb               314 net/decnet/dn_neigh.c 			kfree_skb(skb);
skb               317 net/decnet/dn_neigh.c 		consume_skb(skb);
skb               318 net/decnet/dn_neigh.c 		skb = skb2;
skb               322 net/decnet/dn_neigh.c 	data = skb_push(skb, sizeof(struct dn_short_packet) + 2);
skb               323 net/decnet/dn_neigh.c 	*((__le16 *)data) = cpu_to_le16(skb->len - 2);
skb               331 net/decnet/dn_neigh.c 	skb_reset_network_header(skb);
skb               334 net/decnet/dn_neigh.c 		       &init_net, sk, skb, NULL, neigh->dev,
skb               338 net/decnet/dn_neigh.c int dn_to_neigh_output(struct net *net, struct sock *sk, struct sk_buff *skb)
skb               340 net/decnet/dn_neigh.c 	struct dst_entry *dst = skb_dst(skb);
skb               357 net/decnet/dn_neigh.c 		return dn_phase3_output(neigh, sk, skb);
skb               359 net/decnet/dn_neigh.c 		return dn_long_output(neigh, sk, skb);
skb               361 net/decnet/dn_neigh.c 		return dn_short_output(neigh, sk, skb);
skb               374 net/decnet/dn_neigh.c void dn_neigh_pointopoint_hello(struct sk_buff *skb)
skb               376 net/decnet/dn_neigh.c 	kfree_skb(skb);
skb               382 net/decnet/dn_neigh.c int dn_neigh_router_hello(struct net *net, struct sock *sk, struct sk_buff *skb)
skb               384 net/decnet/dn_neigh.c 	struct rtnode_hello_message *msg = (struct rtnode_hello_message *)skb->data;
skb               393 net/decnet/dn_neigh.c 	neigh = __neigh_lookup(&dn_neigh_table, &src, skb->dev, 1);
skb               407 net/decnet/dn_neigh.c 				memcpy(neigh->ha, &eth_hdr(skb)->h_source, ETH_ALEN);
skb               437 net/decnet/dn_neigh.c 	kfree_skb(skb);
skb               444 net/decnet/dn_neigh.c int dn_neigh_endnode_hello(struct net *net, struct sock *sk, struct sk_buff *skb)
skb               446 net/decnet/dn_neigh.c 	struct endnode_hello_message *msg = (struct endnode_hello_message *)skb->data;
skb               453 net/decnet/dn_neigh.c 	neigh = __neigh_lookup(&dn_neigh_table, &src, skb->dev, 1);
skb               466 net/decnet/dn_neigh.c 				memcpy(neigh->ha, &eth_hdr(skb)->h_source, ETH_ALEN);
skb               476 net/decnet/dn_neigh.c 	kfree_skb(skb);
skb                73 net/decnet/dn_nsp_in.c static void dn_log_martian(struct sk_buff *skb, const char *msg)
skb                76 net/decnet/dn_nsp_in.c 		char *devname = skb->dev ? skb->dev->name : "???";
skb                77 net/decnet/dn_nsp_in.c 		struct dn_skb_cb *cb = DN_SKB_CB(skb);
skb                92 net/decnet/dn_nsp_in.c static void dn_ack(struct sock *sk, struct sk_buff *skb, unsigned short ack)
skb               102 net/decnet/dn_nsp_in.c 			wakeup |= dn_nsp_check_xmit_queue(sk, skb,
skb               112 net/decnet/dn_nsp_in.c 			wakeup |= dn_nsp_check_xmit_queue(sk, skb,
skb               128 net/decnet/dn_nsp_in.c static int dn_process_ack(struct sock *sk, struct sk_buff *skb, int oth)
skb               130 net/decnet/dn_nsp_in.c 	__le16 *ptr = (__le16 *)skb->data;
skb               134 net/decnet/dn_nsp_in.c 	if (skb->len < 2)
skb               138 net/decnet/dn_nsp_in.c 		skb_pull(skb, 2);
skb               144 net/decnet/dn_nsp_in.c 			dn_ack(sk, skb, ack);
skb               148 net/decnet/dn_nsp_in.c 	if (skb->len < 2)
skb               152 net/decnet/dn_nsp_in.c 		skb_pull(skb, 2);
skb               157 net/decnet/dn_nsp_in.c 			dn_ack(sk, skb, ack);
skb               217 net/decnet/dn_nsp_in.c static struct sock *dn_find_listener(struct sk_buff *skb, unsigned short *reason)
skb               219 net/decnet/dn_nsp_in.c 	struct dn_skb_cb *cb = DN_SKB_CB(skb);
skb               220 net/decnet/dn_nsp_in.c 	struct nsp_conn_init_msg *msg = (struct nsp_conn_init_msg *)skb->data;
skb               243 net/decnet/dn_nsp_in.c 	if (!pskb_may_pull(skb, sizeof(*msg)))
skb               246 net/decnet/dn_nsp_in.c 	skb_pull(skb, sizeof(*msg));
skb               248 net/decnet/dn_nsp_in.c 	len = skb->len;
skb               249 net/decnet/dn_nsp_in.c 	ptr = skb->data;
skb               318 net/decnet/dn_nsp_in.c 	dn_log_martian(skb, ci_err_table[err].text);
skb               324 net/decnet/dn_nsp_in.c static void dn_nsp_conn_init(struct sock *sk, struct sk_buff *skb)
skb               327 net/decnet/dn_nsp_in.c 		kfree_skb(skb);
skb               332 net/decnet/dn_nsp_in.c 	skb_queue_tail(&sk->sk_receive_queue, skb);
skb               336 net/decnet/dn_nsp_in.c static void dn_nsp_conn_conf(struct sock *sk, struct sk_buff *skb)
skb               338 net/decnet/dn_nsp_in.c 	struct dn_skb_cb *cb = DN_SKB_CB(skb);
skb               342 net/decnet/dn_nsp_in.c 	if (skb->len < 4)
skb               345 net/decnet/dn_nsp_in.c 	ptr = skb->data;
skb               362 net/decnet/dn_nsp_in.c 		if (skb->len > 0) {
skb               363 net/decnet/dn_nsp_in.c 			u16 dlen = *skb->data;
skb               364 net/decnet/dn_nsp_in.c 			if ((dlen <= 16) && (dlen <= skb->len)) {
skb               366 net/decnet/dn_nsp_in.c 				skb_copy_from_linear_data_offset(skb, 1,
skb               376 net/decnet/dn_nsp_in.c 	kfree_skb(skb);
skb               379 net/decnet/dn_nsp_in.c static void dn_nsp_conn_ack(struct sock *sk, struct sk_buff *skb)
skb               388 net/decnet/dn_nsp_in.c 	kfree_skb(skb);
skb               391 net/decnet/dn_nsp_in.c static void dn_nsp_disc_init(struct sock *sk, struct sk_buff *skb)
skb               394 net/decnet/dn_nsp_in.c 	struct dn_skb_cb *cb = DN_SKB_CB(skb);
skb               397 net/decnet/dn_nsp_in.c 	if (skb->len < 2)
skb               400 net/decnet/dn_nsp_in.c 	reason = le16_to_cpu(*(__le16 *)skb->data);
skb               401 net/decnet/dn_nsp_in.c 	skb_pull(skb, 2);
skb               407 net/decnet/dn_nsp_in.c 	if (skb->len > 0) {
skb               408 net/decnet/dn_nsp_in.c 		u16 dlen = *skb->data;
skb               409 net/decnet/dn_nsp_in.c 		if ((dlen <= 16) && (dlen <= skb->len)) {
skb               411 net/decnet/dn_nsp_in.c 			skb_copy_from_linear_data_offset(skb, 1, scp->discdata_in.opt_data, dlen);
skb               452 net/decnet/dn_nsp_in.c 	kfree_skb(skb);
skb               459 net/decnet/dn_nsp_in.c static void dn_nsp_disc_conf(struct sock *sk, struct sk_buff *skb)
skb               464 net/decnet/dn_nsp_in.c 	if (skb->len != 2)
skb               467 net/decnet/dn_nsp_in.c 	reason = le16_to_cpu(*(__le16 *)skb->data);
skb               501 net/decnet/dn_nsp_in.c 	kfree_skb(skb);
skb               504 net/decnet/dn_nsp_in.c static void dn_nsp_linkservice(struct sock *sk, struct sk_buff *skb)
skb               511 net/decnet/dn_nsp_in.c 	char *ptr = skb->data;
skb               514 net/decnet/dn_nsp_in.c 	if (skb->len != 4)
skb               570 net/decnet/dn_nsp_in.c 	kfree_skb(skb);
skb               578 net/decnet/dn_nsp_in.c static __inline__ int dn_queue_skb(struct sock *sk, struct sk_buff *skb, int sig, struct sk_buff_head *queue)
skb               585 net/decnet/dn_nsp_in.c 	if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
skb               591 net/decnet/dn_nsp_in.c 	err = sk_filter(sk, skb);
skb               595 net/decnet/dn_nsp_in.c 	skb_set_owner_r(skb, sk);
skb               596 net/decnet/dn_nsp_in.c 	skb_queue_tail(queue, skb);
skb               604 net/decnet/dn_nsp_in.c static void dn_nsp_otherdata(struct sock *sk, struct sk_buff *skb)
skb               608 net/decnet/dn_nsp_in.c 	struct dn_skb_cb *cb = DN_SKB_CB(skb);
skb               611 net/decnet/dn_nsp_in.c 	if (skb->len < 2)
skb               614 net/decnet/dn_nsp_in.c 	cb->segnum = segnum = le16_to_cpu(*(__le16 *)skb->data);
skb               615 net/decnet/dn_nsp_in.c 	skb_pull(skb, 2);
skb               619 net/decnet/dn_nsp_in.c 		if (dn_queue_skb(sk, skb, SIGURG, &scp->other_receive_queue) == 0) {
skb               629 net/decnet/dn_nsp_in.c 		kfree_skb(skb);
skb               632 net/decnet/dn_nsp_in.c static void dn_nsp_data(struct sock *sk, struct sk_buff *skb)
skb               636 net/decnet/dn_nsp_in.c 	struct dn_skb_cb *cb = DN_SKB_CB(skb);
skb               639 net/decnet/dn_nsp_in.c 	if (skb->len < 2)
skb               642 net/decnet/dn_nsp_in.c 	cb->segnum = segnum = le16_to_cpu(*(__le16 *)skb->data);
skb               643 net/decnet/dn_nsp_in.c 	skb_pull(skb, 2);
skb               646 net/decnet/dn_nsp_in.c 		if (dn_queue_skb(sk, skb, SIGIO, &sk->sk_receive_queue) == 0) {
skb               660 net/decnet/dn_nsp_in.c 		kfree_skb(skb);
skb               668 net/decnet/dn_nsp_in.c static void dn_returned_conn_init(struct sock *sk, struct sk_buff *skb)
skb               679 net/decnet/dn_nsp_in.c 	kfree_skb(skb);
skb               682 net/decnet/dn_nsp_in.c static int dn_nsp_no_socket(struct sk_buff *skb, unsigned short reason)
skb               684 net/decnet/dn_nsp_in.c 	struct dn_skb_cb *cb = DN_SKB_CB(skb);
skb               695 net/decnet/dn_nsp_in.c 			dn_nsp_return_disc(skb, NSP_DISCINIT, reason);
skb               699 net/decnet/dn_nsp_in.c 			dn_nsp_return_disc(skb, NSP_DISCCONF, reason);
skb               706 net/decnet/dn_nsp_in.c 	kfree_skb(skb);
skb               711 net/decnet/dn_nsp_in.c 			    struct sk_buff *skb)
skb               713 net/decnet/dn_nsp_in.c 	struct dn_skb_cb *cb = DN_SKB_CB(skb);
skb               715 net/decnet/dn_nsp_in.c 	unsigned char *ptr = (unsigned char *)skb->data;
skb               718 net/decnet/dn_nsp_in.c 	if (!pskb_may_pull(skb, 2))
skb               721 net/decnet/dn_nsp_in.c 	skb_reset_transport_header(skb);
skb               743 net/decnet/dn_nsp_in.c 			sk = dn_find_listener(skb, &reason);
skb               748 net/decnet/dn_nsp_in.c 	if (!pskb_may_pull(skb, 3))
skb               761 net/decnet/dn_nsp_in.c 	if (pskb_may_pull(skb, 5)) {
skb               764 net/decnet/dn_nsp_in.c 		skb_pull(skb, 5);
skb               779 net/decnet/dn_nsp_in.c 	sk = dn_find_by_skb(skb);
skb               791 net/decnet/dn_nsp_in.c 			if (unlikely(skb_linearize(skb)))
skb               795 net/decnet/dn_nsp_in.c 		return sk_receive_skb(sk, skb, 0);
skb               798 net/decnet/dn_nsp_in.c 	return dn_nsp_no_socket(skb, reason);
skb               801 net/decnet/dn_nsp_in.c 	kfree_skb(skb);
skb               805 net/decnet/dn_nsp_in.c int dn_nsp_rx(struct sk_buff *skb)
skb               808 net/decnet/dn_nsp_in.c 		       &init_net, NULL, skb, skb->dev, NULL,
skb               817 net/decnet/dn_nsp_in.c int dn_nsp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
skb               820 net/decnet/dn_nsp_in.c 	struct dn_skb_cb *cb = DN_SKB_CB(skb);
skb               824 net/decnet/dn_nsp_in.c 			dn_returned_conn_init(sk, skb);
skb               826 net/decnet/dn_nsp_in.c 			kfree_skb(skb);
skb               837 net/decnet/dn_nsp_in.c 			dn_nsp_conn_init(sk, skb);
skb               840 net/decnet/dn_nsp_in.c 			dn_nsp_conn_conf(sk, skb);
skb               843 net/decnet/dn_nsp_in.c 			dn_nsp_disc_init(sk, skb);
skb               846 net/decnet/dn_nsp_in.c 			dn_nsp_disc_conf(sk, skb);
skb               855 net/decnet/dn_nsp_in.c 		dn_nsp_conn_ack(sk, skb);
skb               876 net/decnet/dn_nsp_in.c 		dn_process_ack(sk, skb, other);
skb               890 net/decnet/dn_nsp_in.c 				dn_nsp_linkservice(sk, skb);
skb               893 net/decnet/dn_nsp_in.c 				dn_nsp_otherdata(sk, skb);
skb               896 net/decnet/dn_nsp_in.c 				dn_nsp_data(sk, skb);
skb               901 net/decnet/dn_nsp_in.c 			kfree_skb(skb);
skb                66 net/decnet/dn_nsp_out.c static void dn_nsp_send(struct sk_buff *skb)
skb                68 net/decnet/dn_nsp_out.c 	struct sock *sk = skb->sk;
skb                73 net/decnet/dn_nsp_out.c 	skb_reset_transport_header(skb);
skb                79 net/decnet/dn_nsp_out.c 		skb_dst_set(skb, dst);
skb                80 net/decnet/dn_nsp_out.c 		dst_output(&init_net, skb->sk, skb);
skb               112 net/decnet/dn_nsp_out.c 	struct sk_buff *skb;
skb               115 net/decnet/dn_nsp_out.c 	if ((skb = alloc_skb(size + hdr, pri)) == NULL)
skb               118 net/decnet/dn_nsp_out.c 	skb->protocol = htons(ETH_P_DNA_RT);
skb               119 net/decnet/dn_nsp_out.c 	skb->pkt_type = PACKET_OUTGOING;
skb               122 net/decnet/dn_nsp_out.c 		skb_set_owner_w(skb, sk);
skb               124 net/decnet/dn_nsp_out.c 	skb_reserve(skb, hdr);
skb               126 net/decnet/dn_nsp_out.c 	return skb;
skb               203 net/decnet/dn_nsp_out.c static inline unsigned int dn_nsp_clone_and_send(struct sk_buff *skb,
skb               206 net/decnet/dn_nsp_out.c 	struct dn_skb_cb *cb = DN_SKB_CB(skb);
skb               210 net/decnet/dn_nsp_out.c 	if ((skb2 = skb_clone(skb, gfp)) != NULL) {
skb               214 net/decnet/dn_nsp_out.c 		skb2->sk = skb->sk;
skb               233 net/decnet/dn_nsp_out.c 	struct sk_buff *skb;
skb               239 net/decnet/dn_nsp_out.c 	if ((skb = skb_peek(&scp->other_xmit_queue)) != NULL)
skb               240 net/decnet/dn_nsp_out.c 		reduce_win = dn_nsp_clone_and_send(skb, GFP_ATOMIC);
skb               250 net/decnet/dn_nsp_out.c 	if ((skb = skb_peek(&scp->data_xmit_queue)) != NULL)
skb               251 net/decnet/dn_nsp_out.c 		reduce_win = dn_nsp_clone_and_send(skb, GFP_ATOMIC);
skb               279 net/decnet/dn_nsp_out.c static inline __le16 *dn_mk_common_header(struct dn_scp *scp, struct sk_buff *skb, unsigned char msgflag, int len)
skb               281 net/decnet/dn_nsp_out.c 	unsigned char *ptr = skb_push(skb, len);
skb               293 net/decnet/dn_nsp_out.c static __le16 *dn_mk_ack_header(struct sock *sk, struct sk_buff *skb, unsigned char msgflag, int hlen, int other)
skb               314 net/decnet/dn_nsp_out.c 	ptr = dn_mk_common_header(scp, skb, msgflag, hlen);
skb               322 net/decnet/dn_nsp_out.c static __le16 *dn_nsp_mk_data_header(struct sock *sk, struct sk_buff *skb, int oth)
skb               325 net/decnet/dn_nsp_out.c 	struct dn_skb_cb *cb = DN_SKB_CB(skb);
skb               326 net/decnet/dn_nsp_out.c 	__le16 *ptr = dn_mk_ack_header(sk, skb, cb->nsp_flags, 11, oth);
skb               340 net/decnet/dn_nsp_out.c void dn_nsp_queue_xmit(struct sock *sk, struct sk_buff *skb,
skb               344 net/decnet/dn_nsp_out.c 	struct dn_skb_cb *cb = DN_SKB_CB(skb);
skb               348 net/decnet/dn_nsp_out.c 	dn_nsp_mk_data_header(sk, skb, oth);
skb               358 net/decnet/dn_nsp_out.c 		skb_queue_tail(&scp->other_xmit_queue, skb);
skb               360 net/decnet/dn_nsp_out.c 		skb_queue_tail(&scp->data_xmit_queue, skb);
skb               365 net/decnet/dn_nsp_out.c 	dn_nsp_clone_and_send(skb, gfp);
skb               369 net/decnet/dn_nsp_out.c int dn_nsp_check_xmit_queue(struct sock *sk, struct sk_buff *skb, struct sk_buff_head *q, unsigned short acknum)
skb               371 net/decnet/dn_nsp_out.c 	struct dn_skb_cb *cb = DN_SKB_CB(skb);
skb               444 net/decnet/dn_nsp_out.c 	struct sk_buff *skb = NULL;
skb               446 net/decnet/dn_nsp_out.c 	if ((skb = dn_alloc_skb(sk, 9, GFP_ATOMIC)) == NULL)
skb               449 net/decnet/dn_nsp_out.c 	skb_reserve(skb, 9);
skb               450 net/decnet/dn_nsp_out.c 	dn_mk_ack_header(sk, skb, 0x04, 9, 0);
skb               451 net/decnet/dn_nsp_out.c 	dn_nsp_send(skb);
skb               456 net/decnet/dn_nsp_out.c 	struct sk_buff *skb = NULL;
skb               458 net/decnet/dn_nsp_out.c 	if ((skb = dn_alloc_skb(sk, 9, GFP_ATOMIC)) == NULL)
skb               461 net/decnet/dn_nsp_out.c 	skb_reserve(skb, 9);
skb               462 net/decnet/dn_nsp_out.c 	dn_mk_ack_header(sk, skb, 0x14, 9, 1);
skb               463 net/decnet/dn_nsp_out.c 	dn_nsp_send(skb);
skb               470 net/decnet/dn_nsp_out.c 	struct sk_buff *skb = NULL;
skb               473 net/decnet/dn_nsp_out.c 	if ((skb = dn_alloc_skb(sk, 3, sk->sk_allocation)) == NULL)
skb               476 net/decnet/dn_nsp_out.c 	msg = skb_put(skb, 3);
skb               480 net/decnet/dn_nsp_out.c 	dn_nsp_send(skb);
skb               496 net/decnet/dn_nsp_out.c 	struct sk_buff *skb = NULL;
skb               500 net/decnet/dn_nsp_out.c 	if ((skb = dn_alloc_skb(sk, 50 + len, gfp)) == NULL)
skb               503 net/decnet/dn_nsp_out.c 	msg = skb_put(skb, sizeof(*msg));
skb               511 net/decnet/dn_nsp_out.c 	skb_put_u8(skb, len);
skb               514 net/decnet/dn_nsp_out.c 		skb_put_data(skb, scp->conndata_out.opt_data, len);
skb               517 net/decnet/dn_nsp_out.c 	dn_nsp_send(skb);
skb               529 net/decnet/dn_nsp_out.c 	struct sk_buff *skb = NULL;
skb               539 net/decnet/dn_nsp_out.c 	if ((skb = dn_alloc_skb(sk, size, gfp)) == NULL)
skb               542 net/decnet/dn_nsp_out.c 	msg = skb_put(skb, size);
skb               562 net/decnet/dn_nsp_out.c 	skb_dst_set(skb, dst_clone(dst));
skb               563 net/decnet/dn_nsp_out.c 	dst_output(&init_net, skb->sk, skb);
skb               584 net/decnet/dn_nsp_out.c void dn_nsp_return_disc(struct sk_buff *skb, unsigned char msgflg,
skb               587 net/decnet/dn_nsp_out.c 	struct dn_skb_cb *cb = DN_SKB_CB(skb);
skb               591 net/decnet/dn_nsp_out.c 	dn_nsp_do_disc(NULL, msgflg, reason, gfp, skb_dst(skb), ddl,
skb               599 net/decnet/dn_nsp_out.c 	struct sk_buff *skb;
skb               603 net/decnet/dn_nsp_out.c 	if ((skb = dn_alloc_skb(sk, DN_MAX_NSP_DATA_HEADER + 2, gfp)) == NULL)
skb               606 net/decnet/dn_nsp_out.c 	skb_reserve(skb, DN_MAX_NSP_DATA_HEADER);
skb               607 net/decnet/dn_nsp_out.c 	ptr = skb_put(skb, 2);
skb               608 net/decnet/dn_nsp_out.c 	DN_SKB_CB(skb)->nsp_flags = 0x10;
skb               612 net/decnet/dn_nsp_out.c 	dn_nsp_queue_xmit(sk, skb, gfp, 1);
skb               637 net/decnet/dn_nsp_out.c 	struct sk_buff *skb = dn_alloc_skb(sk, 200, allocation);
skb               639 net/decnet/dn_nsp_out.c 	if (!skb)
skb               642 net/decnet/dn_nsp_out.c 	cb  = DN_SKB_CB(skb);
skb               643 net/decnet/dn_nsp_out.c 	msg = skb_put(skb, sizeof(*msg));
skb               656 net/decnet/dn_nsp_out.c 	skb_put(skb, dn_sockaddr2username(&scp->peer,
skb               657 net/decnet/dn_nsp_out.c 					  skb_tail_pointer(skb), type));
skb               658 net/decnet/dn_nsp_out.c 	skb_put(skb, dn_sockaddr2username(&scp->addr,
skb               659 net/decnet/dn_nsp_out.c 					  skb_tail_pointer(skb), 2));
skb               667 net/decnet/dn_nsp_out.c 	skb_put_u8(skb, menuver);	/* Menu Version		*/
skb               670 net/decnet/dn_nsp_out.c 	skb_put_u8(skb, aux);
skb               672 net/decnet/dn_nsp_out.c 		skb_put_data(skb, scp->accessdata.acc_user, aux);
skb               675 net/decnet/dn_nsp_out.c 	skb_put_u8(skb, aux);
skb               677 net/decnet/dn_nsp_out.c 		skb_put_data(skb, scp->accessdata.acc_pass, aux);
skb               680 net/decnet/dn_nsp_out.c 	skb_put_u8(skb, aux);
skb               682 net/decnet/dn_nsp_out.c 		skb_put_data(skb, scp->accessdata.acc_acc, aux);
skb               685 net/decnet/dn_nsp_out.c 	skb_put_u8(skb, aux);
skb               687 net/decnet/dn_nsp_out.c 		skb_put_data(skb, scp->conndata_out.opt_data, aux);
skb               694 net/decnet/dn_nsp_out.c 	dn_nsp_send(skb);
skb               113 net/decnet/dn_route.c 			       struct sk_buff *skb , u32 mtu,
skb               116 net/decnet/dn_route.c 			    struct sk_buff *skb);
skb               118 net/decnet/dn_route.c 					     struct sk_buff *skb,
skb               255 net/decnet/dn_route.c 			       struct sk_buff *skb, u32 mtu,
skb               285 net/decnet/dn_route.c 			    struct sk_buff *skb)
skb               303 net/decnet/dn_route.c static void dn_dst_link_failure(struct sk_buff *skb)
skb               418 net/decnet/dn_route.c static int dn_return_short(struct sk_buff *skb)
skb               426 net/decnet/dn_route.c 	skb_push(skb, skb->data - skb_network_header(skb));
skb               428 net/decnet/dn_route.c 	if ((skb = skb_unshare(skb, GFP_ATOMIC)) == NULL)
skb               431 net/decnet/dn_route.c 	cb = DN_SKB_CB(skb);
skb               433 net/decnet/dn_route.c 	ptr = skb->data + 2;
skb               444 net/decnet/dn_route.c 	skb->pkt_type = PACKET_OUTGOING;
skb               445 net/decnet/dn_route.c 	dn_rt_finish_output(skb, NULL, NULL);
skb               454 net/decnet/dn_route.c static int dn_return_long(struct sk_buff *skb)
skb               462 net/decnet/dn_route.c 	skb_push(skb, skb->data - skb_network_header(skb));
skb               464 net/decnet/dn_route.c 	if ((skb = skb_unshare(skb, GFP_ATOMIC)) == NULL)
skb               467 net/decnet/dn_route.c 	cb = DN_SKB_CB(skb);
skb               469 net/decnet/dn_route.c 	ptr = skb->data + 2;
skb               490 net/decnet/dn_route.c 	skb->pkt_type = PACKET_OUTGOING;
skb               491 net/decnet/dn_route.c 	dn_rt_finish_output(skb, dst_addr, src_addr);
skb               501 net/decnet/dn_route.c static int dn_route_rx_packet(struct net *net, struct sock *sk, struct sk_buff *skb)
skb               506 net/decnet/dn_route.c 	if ((err = dn_route_input(skb)) == 0)
skb               507 net/decnet/dn_route.c 		return dst_input(skb);
skb               509 net/decnet/dn_route.c 	cb = DN_SKB_CB(skb);
skb               511 net/decnet/dn_route.c 		char *devname = skb->dev ? skb->dev->name : "???";
skb               515 net/decnet/dn_route.c 			(int)cb->rt_flags, devname, skb->len,
skb               517 net/decnet/dn_route.c 			err, skb->pkt_type);
skb               520 net/decnet/dn_route.c 	if ((skb->pkt_type == PACKET_HOST) && (cb->rt_flags & DN_RT_F_RQR)) {
skb               523 net/decnet/dn_route.c 			return dn_return_short(skb);
skb               525 net/decnet/dn_route.c 			return dn_return_long(skb);
skb               529 net/decnet/dn_route.c 	kfree_skb(skb);
skb               533 net/decnet/dn_route.c static int dn_route_rx_long(struct sk_buff *skb)
skb               535 net/decnet/dn_route.c 	struct dn_skb_cb *cb = DN_SKB_CB(skb);
skb               536 net/decnet/dn_route.c 	unsigned char *ptr = skb->data;
skb               538 net/decnet/dn_route.c 	if (!pskb_may_pull(skb, 21)) /* 20 for long header, 1 for shortest nsp */
skb               541 net/decnet/dn_route.c 	skb_pull(skb, 20);
skb               542 net/decnet/dn_route.c 	skb_reset_transport_header(skb);
skb               563 net/decnet/dn_route.c 		       &init_net, NULL, skb, skb->dev, NULL,
skb               567 net/decnet/dn_route.c 	kfree_skb(skb);
skb               573 net/decnet/dn_route.c static int dn_route_rx_short(struct sk_buff *skb)
skb               575 net/decnet/dn_route.c 	struct dn_skb_cb *cb = DN_SKB_CB(skb);
skb               576 net/decnet/dn_route.c 	unsigned char *ptr = skb->data;
skb               578 net/decnet/dn_route.c 	if (!pskb_may_pull(skb, 6)) /* 5 for short header + 1 for shortest nsp */
skb               581 net/decnet/dn_route.c 	skb_pull(skb, 5);
skb               582 net/decnet/dn_route.c 	skb_reset_transport_header(skb);
skb               591 net/decnet/dn_route.c 		       &init_net, NULL, skb, skb->dev, NULL,
skb               595 net/decnet/dn_route.c 	kfree_skb(skb);
skb               599 net/decnet/dn_route.c static int dn_route_discard(struct net *net, struct sock *sk, struct sk_buff *skb)
skb               605 net/decnet/dn_route.c 	kfree_skb(skb);
skb               609 net/decnet/dn_route.c static int dn_route_ptp_hello(struct net *net, struct sock *sk, struct sk_buff *skb)
skb               611 net/decnet/dn_route.c 	dn_dev_hello(skb);
skb               612 net/decnet/dn_route.c 	dn_neigh_pointopoint_hello(skb);
skb               616 net/decnet/dn_route.c int dn_route_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
skb               620 net/decnet/dn_route.c 	__u16 len = le16_to_cpu(*(__le16 *)skb->data);
skb               630 net/decnet/dn_route.c 	if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
skb               633 net/decnet/dn_route.c 	if (!pskb_may_pull(skb, 3))
skb               636 net/decnet/dn_route.c 	skb_pull(skb, 2);
skb               638 net/decnet/dn_route.c 	if (len > skb->len)
skb               641 net/decnet/dn_route.c 	skb_trim(skb, len);
skb               643 net/decnet/dn_route.c 	flags = *skb->data;
skb               645 net/decnet/dn_route.c 	cb = DN_SKB_CB(skb);
skb               654 net/decnet/dn_route.c 		if (!pskb_may_pull(skb, padlen + 1))
skb               656 net/decnet/dn_route.c 		skb_pull(skb, padlen);
skb               657 net/decnet/dn_route.c 		flags = *skb->data;
skb               660 net/decnet/dn_route.c 	skb_reset_network_header(skb);
skb               673 net/decnet/dn_route.c 			(int)flags, (dev) ? dev->name : "???", len, skb->len,
skb               677 net/decnet/dn_route.c 		if (unlikely(skb_linearize(skb)))
skb               682 net/decnet/dn_route.c 			dn_dev_init_pkt(skb);
skb               685 net/decnet/dn_route.c 			dn_dev_veri_pkt(skb);
skb               695 net/decnet/dn_route.c 				       &init_net, NULL, skb, skb->dev, NULL,
skb               701 net/decnet/dn_route.c 				       &init_net, NULL, skb, skb->dev, NULL,
skb               705 net/decnet/dn_route.c 				       &init_net, NULL, skb, skb->dev, NULL,
skb               710 net/decnet/dn_route.c 				       &init_net, NULL, skb, skb->dev, NULL,
skb               717 net/decnet/dn_route.c 		skb_pull(skb, 1); /* Pull flags */
skb               721 net/decnet/dn_route.c 			return dn_route_rx_long(skb);
skb               723 net/decnet/dn_route.c 			return dn_route_rx_short(skb);
skb               728 net/decnet/dn_route.c 	kfree_skb(skb);
skb               733 net/decnet/dn_route.c static int dn_output(struct net *net, struct sock *sk, struct sk_buff *skb)
skb               735 net/decnet/dn_route.c 	struct dst_entry *dst = skb_dst(skb);
skb               738 net/decnet/dn_route.c 	struct dn_skb_cb *cb = DN_SKB_CB(skb);
skb               745 net/decnet/dn_route.c 	skb->dev = dev;
skb               760 net/decnet/dn_route.c 		       &init_net, sk, skb, NULL, dev,
skb               766 net/decnet/dn_route.c 	kfree_skb(skb);
skb               771 net/decnet/dn_route.c static int dn_forward(struct sk_buff *skb)
skb               773 net/decnet/dn_route.c 	struct dn_skb_cb *cb = DN_SKB_CB(skb);
skb               774 net/decnet/dn_route.c 	struct dst_entry *dst = skb_dst(skb);
skb               778 net/decnet/dn_route.c 	struct net_device *dev = skb->dev;
skb               780 net/decnet/dn_route.c 	if (skb->pkt_type != PACKET_HOST)
skb               784 net/decnet/dn_route.c 	rt = (struct dn_route *)skb_dst(skb);
skb               786 net/decnet/dn_route.c 	if (skb_cow(skb, LL_RESERVED_SPACE(rt->dst.dev)+header_len))
skb               795 net/decnet/dn_route.c 	skb->dev = rt->dst.dev;
skb               807 net/decnet/dn_route.c 		       &init_net, NULL, skb, dev, skb->dev,
skb               811 net/decnet/dn_route.c 	kfree_skb(skb);
skb               819 net/decnet/dn_route.c static int dn_rt_bug_out(struct net *net, struct sock *sk, struct sk_buff *skb)
skb               821 net/decnet/dn_route.c 	struct dn_skb_cb *cb = DN_SKB_CB(skb);
skb               826 net/decnet/dn_route.c 	kfree_skb(skb);
skb               831 net/decnet/dn_route.c static int dn_rt_bug(struct sk_buff *skb)
skb               833 net/decnet/dn_route.c 	struct dn_skb_cb *cb = DN_SKB_CB(skb);
skb               838 net/decnet/dn_route.c 	kfree_skb(skb);
skb               856 net/decnet/dn_route.c 					     struct sk_buff *skb,
skb              1300 net/decnet/dn_route.c static int dn_route_input_slow(struct sk_buff *skb)
skb              1303 net/decnet/dn_route.c 	struct dn_skb_cb *cb = DN_SKB_CB(skb);
skb              1304 net/decnet/dn_route.c 	struct net_device *in_dev = skb->dev;
skb              1316 net/decnet/dn_route.c 		.flowidn_mark = skb->mark,
skb              1317 net/decnet/dn_route.c 		.flowidn_iif = skb->dev->ifindex,
skb              1491 net/decnet/dn_route.c 	skb_dst_set(skb, &rt->dst);
skb              1517 net/decnet/dn_route.c static int dn_route_input(struct sk_buff *skb)
skb              1520 net/decnet/dn_route.c 	struct dn_skb_cb *cb = DN_SKB_CB(skb);
skb              1523 net/decnet/dn_route.c 	if (skb_dst(skb))
skb              1532 net/decnet/dn_route.c 		    (rt->fld.flowidn_mark == skb->mark) &&
skb              1536 net/decnet/dn_route.c 			skb_dst_set(skb, (struct dst_entry *)rt);
skb              1542 net/decnet/dn_route.c 	return dn_route_input_slow(skb);
skb              1545 net/decnet/dn_route.c static int dn_rt_fill_info(struct sk_buff *skb, u32 portid, u32 seq,
skb              1548 net/decnet/dn_route.c 	struct dn_route *rt = (struct dn_route *)skb_dst(skb);
skb              1553 net/decnet/dn_route.c 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*r), flags);
skb              1571 net/decnet/dn_route.c 	if (nla_put_u32(skb, RTA_TABLE, RT_TABLE_MAIN) < 0 ||
skb              1572 net/decnet/dn_route.c 	    nla_put_le16(skb, RTA_DST, rt->rt_daddr) < 0)
skb              1577 net/decnet/dn_route.c 		if (nla_put_le16(skb, RTA_SRC, rt->fld.saddr) < 0)
skb              1581 net/decnet/dn_route.c 	    nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex) < 0)
skb              1589 net/decnet/dn_route.c 	if (nla_put_le16(skb, RTA_PREFSRC, rt->rt_local_src) < 0)
skb              1593 net/decnet/dn_route.c 	    nla_put_le16(skb, RTA_GATEWAY, rt->rt_gateway) < 0)
skb              1596 net/decnet/dn_route.c 	if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0)
skb              1600 net/decnet/dn_route.c 	if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires,
skb              1605 net/decnet/dn_route.c 	    nla_put_u32(skb, RTA_IIF, rt->fld.flowidn_iif) < 0)
skb              1608 net/decnet/dn_route.c 	nlmsg_end(skb, nlh);
skb              1612 net/decnet/dn_route.c 	nlmsg_cancel(skb, nlh);
skb              1641 net/decnet/dn_route.c 	struct sk_buff *skb;
skb              1656 net/decnet/dn_route.c 	skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
skb              1657 net/decnet/dn_route.c 	if (skb == NULL)
skb              1659 net/decnet/dn_route.c 	skb_reset_mac_header(skb);
skb              1660 net/decnet/dn_route.c 	cb = DN_SKB_CB(skb);
skb              1675 net/decnet/dn_route.c 			kfree_skb(skb);
skb              1678 net/decnet/dn_route.c 		skb->protocol = htons(ETH_P_DNA_RT);
skb              1679 net/decnet/dn_route.c 		skb->dev = dev;
skb              1683 net/decnet/dn_route.c 		err = dn_route_input(skb);
skb              1686 net/decnet/dn_route.c 		rt = (struct dn_route *)skb_dst(skb);
skb              1696 net/decnet/dn_route.c 	skb->dev = NULL;
skb              1699 net/decnet/dn_route.c 	skb_dst_set(skb, &rt->dst);
skb              1703 net/decnet/dn_route.c 	err = dn_rt_fill_info(skb, NETLINK_CB(in_skb).portid, nlh->nlmsg_seq, RTM_NEWROUTE, 0, 0);
skb              1709 net/decnet/dn_route.c 	return rtnl_unicast(skb, &init_net, NETLINK_CB(in_skb).portid);
skb              1712 net/decnet/dn_route.c 	kfree_skb(skb);
skb              1720 net/decnet/dn_route.c int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb)
skb              1722 net/decnet/dn_route.c 	struct net *net = sock_net(skb->sk);
skb              1751 net/decnet/dn_route.c 			skb_dst_set(skb, dst_clone(&rt->dst));
skb              1752 net/decnet/dn_route.c 			if (dn_rt_fill_info(skb, NETLINK_CB(cb->skb).portid,
skb              1755 net/decnet/dn_route.c 				skb_dst_drop(skb);
skb              1759 net/decnet/dn_route.c 			skb_dst_drop(skb);
skb              1767 net/decnet/dn_route.c 	return skb->len;
skb               122 net/decnet/dn_rules.c static int dn_fib_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
skb               202 net/decnet/dn_rules.c static int dn_fib_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
skb               212 net/decnet/dn_rules.c 	     nla_put_le16(skb, FRA_DST, r->dst)) ||
skb               214 net/decnet/dn_rules.c 	     nla_put_le16(skb, FRA_SRC, r->src)))
skb               301 net/decnet/dn_table.c static int dn_fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
skb               308 net/decnet/dn_table.c 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*rtm), flags);
skb               323 net/decnet/dn_table.c 	if (nla_put_u32(skb, RTA_TABLE, tb_id) < 0)
skb               327 net/decnet/dn_table.c 	    nla_put(skb, RTA_DST, 2, dst) < 0)
skb               331 net/decnet/dn_table.c 	    nla_put_u32(skb, RTA_PRIORITY, fi->fib_priority) < 0)
skb               334 net/decnet/dn_table.c 	if (rtnetlink_put_metrics(skb, fi->fib_metrics) < 0)
skb               339 net/decnet/dn_table.c 		    nla_put_le16(skb, RTA_GATEWAY, fi->fib_nh->nh_gw) < 0)
skb               343 net/decnet/dn_table.c 		    nla_put_u32(skb, RTA_OIF, fi->fib_nh->nh_oif) < 0)
skb               351 net/decnet/dn_table.c 		mp_head = nla_nest_start_noflag(skb, RTA_MULTIPATH);
skb               356 net/decnet/dn_table.c 			if (!(nhp = nla_reserve_nohdr(skb, sizeof(*nhp))))
skb               364 net/decnet/dn_table.c 			    nla_put_le16(skb, RTA_GATEWAY, nh->nh_gw) < 0)
skb               367 net/decnet/dn_table.c 			nhp->rtnh_len = skb_tail_pointer(skb) - (unsigned char *)nhp;
skb               370 net/decnet/dn_table.c 		nla_nest_end(skb, mp_head);
skb               373 net/decnet/dn_table.c 	nlmsg_end(skb, nlh);
skb               377 net/decnet/dn_table.c 	nlmsg_cancel(skb, nlh);
skb               385 net/decnet/dn_table.c 	struct sk_buff *skb;
skb               389 net/decnet/dn_table.c 	skb = nlmsg_new(dn_fib_nlmsg_size(DN_FIB_INFO(f)), GFP_KERNEL);
skb               390 net/decnet/dn_table.c 	if (skb == NULL)
skb               393 net/decnet/dn_table.c 	err = dn_fib_dump_info(skb, portid, nlh->nlmsg_seq, event, tb_id,
skb               399 net/decnet/dn_table.c 		kfree_skb(skb);
skb               402 net/decnet/dn_table.c 	rtnl_notify(skb, &init_net, portid, RTNLGRP_DECnet_ROUTE, nlh, GFP_KERNEL);
skb               409 net/decnet/dn_table.c static __inline__ int dn_hash_dump_bucket(struct sk_buff *skb,
skb               423 net/decnet/dn_table.c 		if (dn_fib_dump_info(skb, NETLINK_CB(cb->skb).portid,
skb               435 net/decnet/dn_table.c 	return skb->len;
skb               438 net/decnet/dn_table.c static __inline__ int dn_hash_dump_zone(struct sk_buff *skb,
skb               453 net/decnet/dn_table.c 		if (dn_hash_dump_bucket(skb, cb, tb, dz, dz->dz_hash[h]) < 0) {
skb               459 net/decnet/dn_table.c 	return skb->len;
skb               462 net/decnet/dn_table.c static int dn_fib_table_dump(struct dn_fib_table *tb, struct sk_buff *skb,
skb               477 net/decnet/dn_table.c 		if (dn_hash_dump_zone(skb, cb, tb, dz) < 0) {
skb               486 net/decnet/dn_table.c 	return skb->len;
skb               489 net/decnet/dn_table.c int dn_fib_dump(struct sk_buff *skb, struct netlink_callback *cb)
skb               491 net/decnet/dn_table.c 	struct net *net = sock_net(skb->sk);
skb               502 net/decnet/dn_table.c 			return dn_cache_dump(skb, cb);
skb               515 net/decnet/dn_table.c 			if (tb->dump(tb, skb, cb) < 0)
skb               526 net/decnet/dn_table.c 	return skb->len;
skb                33 net/decnet/netfilter/dn_rtmsg.c 	struct sk_buff *skb = NULL;
skb                42 net/decnet/netfilter/dn_rtmsg.c 	skb = nlmsg_new(size, GFP_ATOMIC);
skb                43 net/decnet/netfilter/dn_rtmsg.c 	if (!skb) {
skb                47 net/decnet/netfilter/dn_rtmsg.c 	old_tail = skb->tail;
skb                48 net/decnet/netfilter/dn_rtmsg.c 	nlh = nlmsg_put(skb, 0, 0, 0, size, 0);
skb                50 net/decnet/netfilter/dn_rtmsg.c 		kfree_skb(skb);
skb                58 net/decnet/netfilter/dn_rtmsg.c 	nlh->nlmsg_len = skb->tail - old_tail;
skb                59 net/decnet/netfilter/dn_rtmsg.c 	return skb;
skb                62 net/decnet/netfilter/dn_rtmsg.c static void dnrmg_send_peer(struct sk_buff *skb)
skb                67 net/decnet/netfilter/dn_rtmsg.c 	unsigned char flags = *skb->data;
skb                80 net/decnet/netfilter/dn_rtmsg.c 	skb2 = dnrmg_build_message(skb, &status);
skb                89 net/decnet/netfilter/dn_rtmsg.c 			struct sk_buff *skb,
skb                92 net/decnet/netfilter/dn_rtmsg.c 	dnrmg_send_peer(skb);
skb                97 net/decnet/netfilter/dn_rtmsg.c #define RCV_SKB_FAIL(err) do { netlink_ack(skb, nlh, (err), NULL); return; } while (0)
skb                99 net/decnet/netfilter/dn_rtmsg.c static inline void dnrmg_receive_user_skb(struct sk_buff *skb)
skb               101 net/decnet/netfilter/dn_rtmsg.c 	struct nlmsghdr *nlh = nlmsg_hdr(skb);
skb               103 net/decnet/netfilter/dn_rtmsg.c 	if (skb->len < sizeof(*nlh) ||
skb               105 net/decnet/netfilter/dn_rtmsg.c 	    skb->len < nlh->nlmsg_len)
skb               108 net/decnet/netfilter/dn_rtmsg.c 	if (!netlink_capable(skb, CAP_NET_ADMIN))
skb                29 net/dsa/dsa.c  static struct sk_buff *dsa_slave_notag_xmit(struct sk_buff *skb,
skb                33 net/dsa/dsa.c  	return skb;
skb               180 net/dsa/dsa.c  				       struct sk_buff *skb)
skb               185 net/dsa/dsa.c  	if (skb_headroom(skb) < ETH_HLEN)
skb               188 net/dsa/dsa.c  	__skb_push(skb, ETH_HLEN);
skb               190 net/dsa/dsa.c  	type = ptp_classify_raw(skb);
skb               192 net/dsa/dsa.c  	__skb_pull(skb, ETH_HLEN);
skb               198 net/dsa/dsa.c  		return ds->ops->port_rxtstamp(ds, p->dp->index, skb, type);
skb               203 net/dsa/dsa.c  static int dsa_switch_rcv(struct sk_buff *skb, struct net_device *dev,
skb               212 net/dsa/dsa.c  		kfree_skb(skb);
skb               216 net/dsa/dsa.c  	skb = skb_unshare(skb, GFP_ATOMIC);
skb               217 net/dsa/dsa.c  	if (!skb)
skb               220 net/dsa/dsa.c  	nskb = cpu_dp->rcv(skb, dev, pt);
skb               222 net/dsa/dsa.c  		kfree_skb(skb);
skb               226 net/dsa/dsa.c  	skb = nskb;
skb               227 net/dsa/dsa.c  	p = netdev_priv(skb->dev);
skb               228 net/dsa/dsa.c  	skb_push(skb, ETH_HLEN);
skb               229 net/dsa/dsa.c  	skb->pkt_type = PACKET_HOST;
skb               230 net/dsa/dsa.c  	skb->protocol = eth_type_trans(skb, skb->dev);
skb               235 net/dsa/dsa.c  	s->rx_bytes += skb->len;
skb               238 net/dsa/dsa.c  	if (dsa_skb_defer_rx_timestamp(p, skb))
skb               241 net/dsa/dsa.c  	netif_receive_skb(skb);
skb                66 net/dsa/dsa_priv.h 	struct sk_buff *	(*xmit)(struct sk_buff *skb,
skb               194 net/dsa/dsa_priv.h void *dsa_defer_xmit(struct sk_buff *skb, struct net_device *dev);
skb               185 net/dsa/slave.c 	struct sk_buff *skb;
skb               195 net/dsa/slave.c 	u32 portid = NETLINK_CB(dump->cb->skb).portid;
skb               203 net/dsa/slave.c 	nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH,
skb               217 net/dsa/slave.c 	if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, addr))
skb               220 net/dsa/slave.c 	if (vid && nla_put_u16(dump->skb, NDA_VLAN, vid))
skb               223 net/dsa/slave.c 	nlmsg_end(dump->skb, nlh);
skb               230 net/dsa/slave.c 	nlmsg_cancel(dump->skb, nlh);
skb               235 net/dsa/slave.c dsa_slave_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
skb               242 net/dsa/slave.c 		.skb = skb,
skb               448 net/dsa/slave.c 						     struct sk_buff *skb)
skb               454 net/dsa/slave.c 		netpoll_send_skb(p->netpoll, skb);
skb               462 net/dsa/slave.c 				 struct sk_buff *skb)
skb               468 net/dsa/slave.c 	type = ptp_classify_raw(skb);
skb               475 net/dsa/slave.c 	clone = skb_clone_sk(skb);
skb               479 net/dsa/slave.c 	DSA_SKB_CB(skb)->clone = clone;
skb               487 net/dsa/slave.c netdev_tx_t dsa_enqueue_skb(struct sk_buff *skb, struct net_device *dev)
skb               493 net/dsa/slave.c 		return dsa_slave_netpoll_send_skb(dev, skb);
skb               498 net/dsa/slave.c 	skb->dev = dsa_slave_to_master(dev);
skb               499 net/dsa/slave.c 	dev_queue_xmit(skb);
skb               505 net/dsa/slave.c static netdev_tx_t dsa_slave_xmit(struct sk_buff *skb, struct net_device *dev)
skb               514 net/dsa/slave.c 	s->tx_bytes += skb->len;
skb               517 net/dsa/slave.c 	DSA_SKB_CB(skb)->deferred_xmit = false;
skb               518 net/dsa/slave.c 	DSA_SKB_CB(skb)->clone = NULL;
skb               523 net/dsa/slave.c 	dsa_skb_tx_timestamp(p, skb);
skb               528 net/dsa/slave.c 	nskb = p->xmit(skb, dev);
skb               530 net/dsa/slave.c 		if (!DSA_SKB_CB(skb)->deferred_xmit)
skb               531 net/dsa/slave.c 			kfree_skb(skb);
skb               538 net/dsa/slave.c void *dsa_defer_xmit(struct sk_buff *skb, struct net_device *dev)
skb               542 net/dsa/slave.c 	DSA_SKB_CB(skb)->deferred_xmit = true;
skb               544 net/dsa/slave.c 	skb_queue_tail(&dp->xmit_queue, skb);
skb               554 net/dsa/slave.c 	struct sk_buff *skb;
skb               559 net/dsa/slave.c 	while ((skb = skb_dequeue(&dp->xmit_queue)) != NULL)
skb               560 net/dsa/slave.c 		ds->ops->port_deferred_xmit(ds, dp->index, skb);
skb               292 net/dsa/tag_8021q.c struct sk_buff *dsa_8021q_xmit(struct sk_buff *skb, struct net_device *netdev,
skb               298 net/dsa/tag_8021q.c 	return vlan_insert_tag(skb, htons(tpid), tci);
skb                61 net/dsa/tag_brcm.c static struct sk_buff *brcm_tag_xmit_ll(struct sk_buff *skb,
skb                66 net/dsa/tag_brcm.c 	u16 queue = skb_get_queue_mapping(skb);
skb                69 net/dsa/tag_brcm.c 	if (skb_cow_head(skb, BRCM_TAG_LEN) < 0)
skb                81 net/dsa/tag_brcm.c 	if (__skb_put_padto(skb, ETH_ZLEN + BRCM_TAG_LEN, false))
skb                84 net/dsa/tag_brcm.c 	skb_push(skb, BRCM_TAG_LEN);
skb                87 net/dsa/tag_brcm.c 		memmove(skb->data, skb->data + BRCM_TAG_LEN, offset);
skb                89 net/dsa/tag_brcm.c 	brcm_tag = skb->data + offset;
skb               105 net/dsa/tag_brcm.c 	skb_set_queue_mapping(skb, BRCM_TAG_SET_PORT_QUEUE(dp->index, queue));
skb               107 net/dsa/tag_brcm.c 	return skb;
skb               110 net/dsa/tag_brcm.c static struct sk_buff *brcm_tag_rcv_ll(struct sk_buff *skb,
skb               118 net/dsa/tag_brcm.c 	if (unlikely(!pskb_may_pull(skb, BRCM_TAG_LEN)))
skb               121 net/dsa/tag_brcm.c 	brcm_tag = skb->data - offset;
skb               136 net/dsa/tag_brcm.c 	skb->dev = dsa_master_find_slave(dev, 0, source_port);
skb               137 net/dsa/tag_brcm.c 	if (!skb->dev)
skb               141 net/dsa/tag_brcm.c 	skb_pull_rcsum(skb, BRCM_TAG_LEN);
skb               143 net/dsa/tag_brcm.c 	skb->offload_fwd_mark = 1;
skb               145 net/dsa/tag_brcm.c 	return skb;
skb               150 net/dsa/tag_brcm.c static struct sk_buff *brcm_tag_xmit(struct sk_buff *skb,
skb               154 net/dsa/tag_brcm.c 	return brcm_tag_xmit_ll(skb, dev, 2 * ETH_ALEN);
skb               158 net/dsa/tag_brcm.c static struct sk_buff *brcm_tag_rcv(struct sk_buff *skb, struct net_device *dev,
skb               164 net/dsa/tag_brcm.c 	nskb = brcm_tag_rcv_ll(skb, dev, pt, 2);
skb               189 net/dsa/tag_brcm.c static struct sk_buff *brcm_tag_xmit_prepend(struct sk_buff *skb,
skb               193 net/dsa/tag_brcm.c 	return brcm_tag_xmit_ll(skb, dev, 0);
skb               196 net/dsa/tag_brcm.c static struct sk_buff *brcm_tag_rcv_prepend(struct sk_buff *skb,
skb               201 net/dsa/tag_brcm.c 	return brcm_tag_rcv_ll(skb, dev, pt, ETH_HLEN);
skb                15 net/dsa/tag_dsa.c static struct sk_buff *dsa_xmit(struct sk_buff *skb, struct net_device *dev)
skb                25 net/dsa/tag_dsa.c 	if (skb->protocol == htons(ETH_P_8021Q)) {
skb                26 net/dsa/tag_dsa.c 		if (skb_cow_head(skb, 0) < 0)
skb                32 net/dsa/tag_dsa.c 		dsa_header = skb->data + 2 * ETH_ALEN;
skb                44 net/dsa/tag_dsa.c 		if (skb_cow_head(skb, DSA_HLEN) < 0)
skb                46 net/dsa/tag_dsa.c 		skb_push(skb, DSA_HLEN);
skb                48 net/dsa/tag_dsa.c 		memmove(skb->data, skb->data + DSA_HLEN, 2 * ETH_ALEN);
skb                53 net/dsa/tag_dsa.c 		dsa_header = skb->data + 2 * ETH_ALEN;
skb                60 net/dsa/tag_dsa.c 	return skb;
skb                63 net/dsa/tag_dsa.c static struct sk_buff *dsa_rcv(struct sk_buff *skb, struct net_device *dev,
skb                70 net/dsa/tag_dsa.c 	if (unlikely(!pskb_may_pull(skb, DSA_HLEN)))
skb                76 net/dsa/tag_dsa.c 	dsa_header = skb->data - 2;
skb                90 net/dsa/tag_dsa.c 	skb->dev = dsa_master_find_slave(dev, source_device, source_port);
skb                91 net/dsa/tag_dsa.c 	if (!skb->dev)
skb               122 net/dsa/tag_dsa.c 		if (skb->ip_summed == CHECKSUM_COMPLETE) {
skb               123 net/dsa/tag_dsa.c 			__wsum c = skb->csum;
skb               126 net/dsa/tag_dsa.c 			skb->csum = c;
skb               134 net/dsa/tag_dsa.c 		skb_pull_rcsum(skb, DSA_HLEN);
skb               135 net/dsa/tag_dsa.c 		memmove(skb->data - ETH_HLEN,
skb               136 net/dsa/tag_dsa.c 			skb->data - ETH_HLEN - DSA_HLEN,
skb               140 net/dsa/tag_dsa.c 	skb->offload_fwd_mark = 1;
skb               142 net/dsa/tag_dsa.c 	return skb;
skb               145 net/dsa/tag_dsa.c static int dsa_tag_flow_dissect(const struct sk_buff *skb, __be16 *proto,
skb               149 net/dsa/tag_dsa.c 	*proto = ((__be16 *)skb->data)[1];
skb                16 net/dsa/tag_edsa.c static struct sk_buff *edsa_xmit(struct sk_buff *skb, struct net_device *dev)
skb                27 net/dsa/tag_edsa.c 	if (skb->protocol == htons(ETH_P_8021Q)) {
skb                28 net/dsa/tag_edsa.c 		if (skb_cow_head(skb, DSA_HLEN) < 0)
skb                30 net/dsa/tag_edsa.c 		skb_push(skb, DSA_HLEN);
skb                32 net/dsa/tag_edsa.c 		memmove(skb->data, skb->data + DSA_HLEN, 2 * ETH_ALEN);
skb                37 net/dsa/tag_edsa.c 		edsa_header = skb->data + 2 * ETH_ALEN;
skb                53 net/dsa/tag_edsa.c 		if (skb_cow_head(skb, EDSA_HLEN) < 0)
skb                55 net/dsa/tag_edsa.c 		skb_push(skb, EDSA_HLEN);
skb                57 net/dsa/tag_edsa.c 		memmove(skb->data, skb->data + EDSA_HLEN, 2 * ETH_ALEN);
skb                62 net/dsa/tag_edsa.c 		edsa_header = skb->data + 2 * ETH_ALEN;
skb                73 net/dsa/tag_edsa.c 	return skb;
skb                76 net/dsa/tag_edsa.c static struct sk_buff *edsa_rcv(struct sk_buff *skb, struct net_device *dev,
skb                83 net/dsa/tag_edsa.c 	if (unlikely(!pskb_may_pull(skb, EDSA_HLEN)))
skb                89 net/dsa/tag_edsa.c 	edsa_header = skb->data + 2;
skb               103 net/dsa/tag_edsa.c 	skb->dev = dsa_master_find_slave(dev, source_device, source_port);
skb               104 net/dsa/tag_edsa.c 	if (!skb->dev)
skb               132 net/dsa/tag_edsa.c 		skb_pull_rcsum(skb, DSA_HLEN);
skb               137 net/dsa/tag_edsa.c 		if (skb->ip_summed == CHECKSUM_COMPLETE) {
skb               138 net/dsa/tag_edsa.c 			__wsum c = skb->csum;
skb               141 net/dsa/tag_edsa.c 			skb->csum = c;
skb               146 net/dsa/tag_edsa.c 		memmove(skb->data - ETH_HLEN,
skb               147 net/dsa/tag_edsa.c 			skb->data - ETH_HLEN - DSA_HLEN,
skb               153 net/dsa/tag_edsa.c 		skb_pull_rcsum(skb, EDSA_HLEN);
skb               154 net/dsa/tag_edsa.c 		memmove(skb->data - ETH_HLEN,
skb               155 net/dsa/tag_edsa.c 			skb->data - ETH_HLEN - EDSA_HLEN,
skb               159 net/dsa/tag_edsa.c 	skb->offload_fwd_mark = 1;
skb               161 net/dsa/tag_edsa.c 	return skb;
skb               164 net/dsa/tag_edsa.c static int edsa_tag_flow_dissect(const struct sk_buff *skb, __be16 *proto,
skb               168 net/dsa/tag_edsa.c 	*proto = ((__be16 *)skb->data)[3];
skb                59 net/dsa/tag_gswip.c static struct sk_buff *gswip_tag_xmit(struct sk_buff *skb,
skb                66 net/dsa/tag_gswip.c 	err = skb_cow_head(skb, GSWIP_TX_HEADER_LEN);
skb                70 net/dsa/tag_gswip.c 	skb_push(skb, GSWIP_TX_HEADER_LEN);
skb                72 net/dsa/tag_gswip.c 	gswip_tag = skb->data;
skb                79 net/dsa/tag_gswip.c 	return skb;
skb                82 net/dsa/tag_gswip.c static struct sk_buff *gswip_tag_rcv(struct sk_buff *skb,
skb                89 net/dsa/tag_gswip.c 	if (unlikely(!pskb_may_pull(skb, GSWIP_RX_HEADER_LEN)))
skb                92 net/dsa/tag_gswip.c 	gswip_tag = skb->data - ETH_HLEN;
skb                96 net/dsa/tag_gswip.c 	skb->dev = dsa_master_find_slave(dev, 0, port);
skb                97 net/dsa/tag_gswip.c 	if (!skb->dev)
skb               101 net/dsa/tag_gswip.c 	skb_pull_rcsum(skb, GSWIP_RX_HEADER_LEN);
skb               103 net/dsa/tag_gswip.c 	return skb;
skb                17 net/dsa/tag_ksz.c static struct sk_buff *ksz_common_xmit(struct sk_buff *skb,
skb                23 net/dsa/tag_ksz.c 	padlen = (skb->len >= ETH_ZLEN) ? 0 : ETH_ZLEN - skb->len;
skb                25 net/dsa/tag_ksz.c 	if (skb_tailroom(skb) >= padlen + len) {
skb                27 net/dsa/tag_ksz.c 		if (__skb_put_padto(skb, skb->len + padlen, false))
skb                30 net/dsa/tag_ksz.c 		nskb = skb;
skb                32 net/dsa/tag_ksz.c 		nskb = alloc_skb(NET_IP_ALIGN + skb->len +
skb                40 net/dsa/tag_ksz.c 				       skb_network_header(skb) - skb->head);
skb                42 net/dsa/tag_ksz.c 					 skb_transport_header(skb) - skb->head);
skb                43 net/dsa/tag_ksz.c 		skb_copy_and_csum_dev(skb, skb_put(nskb, skb->len));
skb                51 net/dsa/tag_ksz.c 		consume_skb(skb);
skb                57 net/dsa/tag_ksz.c static struct sk_buff *ksz_common_rcv(struct sk_buff *skb,
skb                61 net/dsa/tag_ksz.c 	skb->dev = dsa_master_find_slave(dev, 0, port);
skb                62 net/dsa/tag_ksz.c 	if (!skb->dev)
skb                65 net/dsa/tag_ksz.c 	pskb_trim_rcsum(skb, skb->len - len);
skb                67 net/dsa/tag_ksz.c 	skb->offload_fwd_mark = true;
skb                69 net/dsa/tag_ksz.c 	return skb;
skb                92 net/dsa/tag_ksz.c static struct sk_buff *ksz8795_xmit(struct sk_buff *skb, struct net_device *dev)
skb                99 net/dsa/tag_ksz.c 	nskb = ksz_common_xmit(skb, dev, KSZ8795_INGRESS_TAG_LEN);
skb               114 net/dsa/tag_ksz.c static struct sk_buff *ksz8795_rcv(struct sk_buff *skb, struct net_device *dev,
skb               117 net/dsa/tag_ksz.c 	u8 *tag = skb_tail_pointer(skb) - KSZ_EGRESS_TAG_LEN;
skb               119 net/dsa/tag_ksz.c 	return ksz_common_rcv(skb, dev, tag[0] & 7, KSZ_EGRESS_TAG_LEN);
skb               156 net/dsa/tag_ksz.c static struct sk_buff *ksz9477_xmit(struct sk_buff *skb,
skb               164 net/dsa/tag_ksz.c 	nskb = ksz_common_xmit(skb, dev, KSZ9477_INGRESS_TAG_LEN);
skb               182 net/dsa/tag_ksz.c static struct sk_buff *ksz9477_rcv(struct sk_buff *skb, struct net_device *dev,
skb               186 net/dsa/tag_ksz.c 	u8 *tag = skb_tail_pointer(skb) - KSZ_EGRESS_TAG_LEN;
skb               194 net/dsa/tag_ksz.c 	return ksz_common_rcv(skb, dev, port, len);
skb               211 net/dsa/tag_ksz.c static struct sk_buff *ksz9893_xmit(struct sk_buff *skb,
skb               219 net/dsa/tag_ksz.c 	nskb = ksz_common_xmit(skb, dev, KSZ_INGRESS_TAG_LEN);
skb                55 net/dsa/tag_lan9303.c static struct sk_buff *lan9303_xmit(struct sk_buff *skb, struct net_device *dev)
skb                63 net/dsa/tag_lan9303.c 	if (skb_cow_head(skb, LAN9303_TAG_LEN) < 0) {
skb                70 net/dsa/tag_lan9303.c 	skb_push(skb, LAN9303_TAG_LEN);
skb                73 net/dsa/tag_lan9303.c 	memmove(skb->data, skb->data + LAN9303_TAG_LEN, 2 * ETH_ALEN);
skb                75 net/dsa/tag_lan9303.c 	lan9303_tag = (u16 *)(skb->data + 2 * ETH_ALEN);
skb                77 net/dsa/tag_lan9303.c 	lan9303_tag[1] = lan9303_xmit_use_arl(dp, skb->data) ?
skb                82 net/dsa/tag_lan9303.c 	return skb;
skb                85 net/dsa/tag_lan9303.c static struct sk_buff *lan9303_rcv(struct sk_buff *skb, struct net_device *dev,
skb                92 net/dsa/tag_lan9303.c 	if (unlikely(!pskb_may_pull(skb, LAN9303_TAG_LEN))) {
skb               104 net/dsa/tag_lan9303.c 	lan9303_tag = (u16 *)(skb->data - 2);
skb               114 net/dsa/tag_lan9303.c 	skb->dev = dsa_master_find_slave(dev, 0, source_port);
skb               115 net/dsa/tag_lan9303.c 	if (!skb->dev) {
skb               123 net/dsa/tag_lan9303.c 	skb_pull_rcsum(skb, 2 + 2);
skb               124 net/dsa/tag_lan9303.c 	memmove(skb->data - ETH_HLEN, skb->data - (ETH_HLEN + LAN9303_TAG_LEN),
skb               126 net/dsa/tag_lan9303.c 	skb->offload_fwd_mark = !(lan9303_tag1 & LAN9303_TAG_RX_TRAPPED_TO_CPU);
skb               128 net/dsa/tag_lan9303.c 	return skb;
skb                20 net/dsa/tag_mtk.c static struct sk_buff *mtk_tag_xmit(struct sk_buff *skb,
skb                26 net/dsa/tag_mtk.c 	unsigned char *dest = eth_hdr(skb)->h_dest;
skb                36 net/dsa/tag_mtk.c 	if (!skb_vlan_tagged(skb)) {
skb                37 net/dsa/tag_mtk.c 		if (skb_cow_head(skb, MTK_HDR_LEN) < 0)
skb                40 net/dsa/tag_mtk.c 		skb_push(skb, MTK_HDR_LEN);
skb                41 net/dsa/tag_mtk.c 		memmove(skb->data, skb->data + MTK_HDR_LEN, 2 * ETH_ALEN);
skb                45 net/dsa/tag_mtk.c 	mtk_tag = skb->data + 2 * ETH_ALEN;
skb                64 net/dsa/tag_mtk.c 	return skb;
skb                67 net/dsa/tag_mtk.c static struct sk_buff *mtk_tag_rcv(struct sk_buff *skb, struct net_device *dev,
skb                72 net/dsa/tag_mtk.c 	unsigned char *dest = eth_hdr(skb)->h_dest;
skb                76 net/dsa/tag_mtk.c 	if (unlikely(!pskb_may_pull(skb, MTK_HDR_LEN)))
skb                83 net/dsa/tag_mtk.c 	phdr = (__be16 *)(skb->data - 2);
skb                87 net/dsa/tag_mtk.c 	skb_pull_rcsum(skb, MTK_HDR_LEN);
skb                89 net/dsa/tag_mtk.c 	memmove(skb->data - ETH_HLEN,
skb                90 net/dsa/tag_mtk.c 		skb->data - ETH_HLEN - MTK_HDR_LEN,
skb                96 net/dsa/tag_mtk.c 	skb->dev = dsa_master_find_slave(dev, 0, port);
skb                97 net/dsa/tag_mtk.c 	if (!skb->dev)
skb               102 net/dsa/tag_mtk.c 		skb->offload_fwd_mark = 1;
skb               104 net/dsa/tag_mtk.c 	return skb;
skb               107 net/dsa/tag_mtk.c static int mtk_tag_flow_dissect(const struct sk_buff *skb, __be16 *proto,
skb               111 net/dsa/tag_mtk.c 	*proto = ((__be16 *)skb->data)[1];
skb                31 net/dsa/tag_qca.c static struct sk_buff *qca_tag_xmit(struct sk_buff *skb, struct net_device *dev)
skb                36 net/dsa/tag_qca.c 	if (skb_cow_head(skb, QCA_HDR_LEN) < 0)
skb                39 net/dsa/tag_qca.c 	skb_push(skb, QCA_HDR_LEN);
skb                41 net/dsa/tag_qca.c 	memmove(skb->data, skb->data + QCA_HDR_LEN, 2 * ETH_ALEN);
skb                42 net/dsa/tag_qca.c 	phdr = (u16 *)(skb->data + 2 * ETH_ALEN);
skb                50 net/dsa/tag_qca.c 	return skb;
skb                53 net/dsa/tag_qca.c static struct sk_buff *qca_tag_rcv(struct sk_buff *skb, struct net_device *dev,
skb                60 net/dsa/tag_qca.c 	if (unlikely(!pskb_may_pull(skb, QCA_HDR_LEN)))
skb                67 net/dsa/tag_qca.c 	phdr = (__be16 *)(skb->data - 2);
skb                76 net/dsa/tag_qca.c 	skb_pull_rcsum(skb, QCA_HDR_LEN);
skb                77 net/dsa/tag_qca.c 	memmove(skb->data - ETH_HLEN, skb->data - ETH_HLEN - QCA_HDR_LEN,
skb                83 net/dsa/tag_qca.c 	skb->dev = dsa_master_find_slave(dev, 0, port);
skb                84 net/dsa/tag_qca.c 	if (!skb->dev)
skb                87 net/dsa/tag_qca.c 	return skb;
skb                90 net/dsa/tag_qca.c static int qca_tag_flow_dissect(const struct sk_buff *skb, __be16 *proto,
skb                94 net/dsa/tag_qca.c 	*proto = ((__be16 *)skb->data)[0];
skb                11 net/dsa/tag_sja1105.c static inline bool sja1105_is_link_local(const struct sk_buff *skb)
skb                13 net/dsa/tag_sja1105.c 	const struct ethhdr *hdr = eth_hdr(skb);
skb                35 net/dsa/tag_sja1105.c static void sja1105_meta_unpack(const struct sk_buff *skb,
skb                38 net/dsa/tag_sja1105.c 	u8 *buf = skb_mac_header(skb) + ETH_HLEN;
skb                57 net/dsa/tag_sja1105.c static inline bool sja1105_is_meta_frame(const struct sk_buff *skb)
skb                59 net/dsa/tag_sja1105.c 	const struct ethhdr *hdr = eth_hdr(skb);
skb                75 net/dsa/tag_sja1105.c static bool sja1105_filter(const struct sk_buff *skb, struct net_device *dev)
skb                79 net/dsa/tag_sja1105.c 	if (sja1105_is_link_local(skb))
skb                81 net/dsa/tag_sja1105.c 	if (sja1105_is_meta_frame(skb))
skb                86 net/dsa/tag_sja1105.c static struct sk_buff *sja1105_xmit(struct sk_buff *skb,
skb                92 net/dsa/tag_sja1105.c 	u16 queue_mapping = skb_get_queue_mapping(skb);
skb                99 net/dsa/tag_sja1105.c 	if (unlikely(sja1105_is_link_local(skb)))
skb               100 net/dsa/tag_sja1105.c 		return dsa_defer_xmit(skb, netdev);
skb               108 net/dsa/tag_sja1105.c 		return skb;
skb               110 net/dsa/tag_sja1105.c 	return dsa_8021q_xmit(skb, netdev, ETH_P_SJA1105,
skb               114 net/dsa/tag_sja1105.c static void sja1105_transfer_meta(struct sk_buff *skb,
skb               117 net/dsa/tag_sja1105.c 	struct ethhdr *hdr = eth_hdr(skb);
skb               121 net/dsa/tag_sja1105.c 	SJA1105_SKB_CB(skb)->meta_tstamp = meta->tstamp;
skb               145 net/dsa/tag_sja1105.c *sja1105_rcv_meta_state_machine(struct sk_buff *skb,
skb               153 net/dsa/tag_sja1105.c 	dp = dsa_slave_to_port(skb->dev);
skb               162 net/dsa/tag_sja1105.c 			return skb;
skb               179 net/dsa/tag_sja1105.c 		sp->data->stampable_skb = skb_get(skb);
skb               215 net/dsa/tag_sja1105.c 		if (stampable_skb->dev != skb->dev) {
skb               225 net/dsa/tag_sja1105.c 		kfree_skb(skb);
skb               226 net/dsa/tag_sja1105.c 		skb = stampable_skb;
skb               227 net/dsa/tag_sja1105.c 		sja1105_transfer_meta(skb, meta);
skb               232 net/dsa/tag_sja1105.c 	return skb;
skb               235 net/dsa/tag_sja1105.c static struct sk_buff *sja1105_rcv(struct sk_buff *skb,
skb               247 net/dsa/tag_sja1105.c 	hdr = eth_hdr(skb);
skb               250 net/dsa/tag_sja1105.c 	is_link_local = sja1105_is_link_local(skb);
skb               251 net/dsa/tag_sja1105.c 	is_meta = sja1105_is_meta_frame(skb);
skb               253 net/dsa/tag_sja1105.c 	skb->offload_fwd_mark = 1;
skb               257 net/dsa/tag_sja1105.c 		skb_push_rcsum(skb, ETH_HLEN);
skb               258 net/dsa/tag_sja1105.c 		__skb_vlan_pop(skb, &tci);
skb               259 net/dsa/tag_sja1105.c 		skb_pull_rcsum(skb, ETH_HLEN);
skb               260 net/dsa/tag_sja1105.c 		skb_reset_network_header(skb);
skb               261 net/dsa/tag_sja1105.c 		skb_reset_transport_header(skb);
skb               266 net/dsa/tag_sja1105.c 		skb->priority = (tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
skb               278 net/dsa/tag_sja1105.c 		sja1105_meta_unpack(skb, &meta);
skb               285 net/dsa/tag_sja1105.c 	skb->dev = dsa_master_find_slave(netdev, switch_id, source_port);
skb               286 net/dsa/tag_sja1105.c 	if (!skb->dev) {
skb               291 net/dsa/tag_sja1105.c 	return sja1105_rcv_meta_state_machine(skb, &meta, is_link_local,
skb                13 net/dsa/tag_trailer.c static struct sk_buff *trailer_xmit(struct sk_buff *skb, struct net_device *dev)
skb                27 net/dsa/tag_trailer.c 	if (skb->len < 60)
skb                28 net/dsa/tag_trailer.c 		padlen = 60 - skb->len;
skb                30 net/dsa/tag_trailer.c 	nskb = alloc_skb(NET_IP_ALIGN + skb->len + padlen + 4, GFP_ATOMIC);
skb                36 net/dsa/tag_trailer.c 	skb_set_network_header(nskb, skb_network_header(skb) - skb->head);
skb                37 net/dsa/tag_trailer.c 	skb_set_transport_header(nskb, skb_transport_header(skb) - skb->head);
skb                38 net/dsa/tag_trailer.c 	skb_copy_and_csum_dev(skb, skb_put(nskb, skb->len));
skb                39 net/dsa/tag_trailer.c 	consume_skb(skb);
skb                54 net/dsa/tag_trailer.c static struct sk_buff *trailer_rcv(struct sk_buff *skb, struct net_device *dev,
skb                60 net/dsa/tag_trailer.c 	if (skb_linearize(skb))
skb                63 net/dsa/tag_trailer.c 	trailer = skb_tail_pointer(skb) - 4;
skb                70 net/dsa/tag_trailer.c 	skb->dev = dsa_master_find_slave(dev, 0, source_port);
skb                71 net/dsa/tag_trailer.c 	if (!skb->dev)
skb                74 net/dsa/tag_trailer.c 	if (pskb_trim_rcsum(skb, skb->len - 4))
skb                77 net/dsa/tag_trailer.c 	return skb;
skb                79 net/ethernet/eth.c int eth_header(struct sk_buff *skb, struct net_device *dev,
skb                83 net/ethernet/eth.c 	struct ethhdr *eth = skb_push(skb, ETH_HLEN);
skb               155 net/ethernet/eth.c __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev)
skb               161 net/ethernet/eth.c 	skb->dev = dev;
skb               162 net/ethernet/eth.c 	skb_reset_mac_header(skb);
skb               164 net/ethernet/eth.c 	eth = (struct ethhdr *)skb->data;
skb               165 net/ethernet/eth.c 	skb_pull_inline(skb, ETH_HLEN);
skb               171 net/ethernet/eth.c 				skb->pkt_type = PACKET_BROADCAST;
skb               173 net/ethernet/eth.c 				skb->pkt_type = PACKET_MULTICAST;
skb               175 net/ethernet/eth.c 			skb->pkt_type = PACKET_OTHERHOST;
skb               189 net/ethernet/eth.c 	if (unlikely(netdev_uses_dsa(dev)) && dsa_can_decode(skb, dev))
skb               201 net/ethernet/eth.c 	sap = skb_header_pointer(skb, 0, sizeof(*sap), &_service_access_point);
skb               217 net/ethernet/eth.c int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr)
skb               219 net/ethernet/eth.c 	const struct ethhdr *eth = eth_hdr(skb);
skb               278 net/ethernet/eth.c __be16 eth_header_parse_protocol(const struct sk_buff *skb)
skb               280 net/ethernet/eth.c 	const struct ethhdr *eth = eth_hdr(skb);
skb               453 net/ethernet/eth.c struct sk_buff *eth_gro_receive(struct list_head *head, struct sk_buff *skb)
skb               463 net/ethernet/eth.c 	off_eth = skb_gro_offset(skb);
skb               465 net/ethernet/eth.c 	eh = skb_gro_header_fast(skb, off_eth);
skb               466 net/ethernet/eth.c 	if (skb_gro_header_hard(skb, hlen)) {
skb               467 net/ethernet/eth.c 		eh = skb_gro_header_slow(skb, hlen, off_eth);
skb               494 net/ethernet/eth.c 	skb_gro_pull(skb, sizeof(*eh));
skb               495 net/ethernet/eth.c 	skb_gro_postpull_rcsum(skb, eh, sizeof(*eh));
skb               496 net/ethernet/eth.c 	pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
skb               501 net/ethernet/eth.c 	skb_gro_flush_final(skb, pp, flush);
skb               507 net/ethernet/eth.c int eth_gro_complete(struct sk_buff *skb, int nhoff)
skb               509 net/ethernet/eth.c 	struct ethhdr *eh = (struct ethhdr *)(skb->data + nhoff);
skb               514 net/ethernet/eth.c 	if (skb->encapsulation)
skb               515 net/ethernet/eth.c 		skb_set_inner_mac_header(skb, nhoff);
skb               520 net/ethernet/eth.c 		err = ptype->callbacks.gro_complete(skb, nhoff +
skb               224 net/hsr/hsr_device.c static int hsr_dev_xmit(struct sk_buff *skb, struct net_device *dev)
skb               231 net/hsr/hsr_device.c 		skb->dev = master->dev;
skb               232 net/hsr/hsr_device.c 		hsr_forward_skb(skb, master);
skb               235 net/hsr/hsr_device.c 		dev_kfree_skb_any(skb);
skb               248 net/hsr/hsr_device.c 	struct sk_buff *skb;
skb               257 net/hsr/hsr_device.c 	skb = dev_alloc_skb(sizeof(struct hsr_tag) +
skb               261 net/hsr/hsr_device.c 	if (!skb)
skb               264 net/hsr/hsr_device.c 	skb_reserve(skb, hlen);
skb               266 net/hsr/hsr_device.c 	skb->dev = master->dev;
skb               267 net/hsr/hsr_device.c 	skb->protocol = htons(hsr_ver ? ETH_P_HSR : ETH_P_PRP);
skb               268 net/hsr/hsr_device.c 	skb->priority = TC_PRIO_CONTROL;
skb               270 net/hsr/hsr_device.c 	if (dev_hard_header(skb, skb->dev, (hsr_ver ? ETH_P_HSR : ETH_P_PRP),
skb               272 net/hsr/hsr_device.c 			    skb->dev->dev_addr, skb->len) <= 0)
skb               274 net/hsr/hsr_device.c 	skb_reset_mac_header(skb);
skb               275 net/hsr/hsr_device.c 	skb_reset_network_header(skb);
skb               276 net/hsr/hsr_device.c 	skb_reset_transport_header(skb);
skb               279 net/hsr/hsr_device.c 		hsr_tag = skb_put(skb, sizeof(struct hsr_tag));
skb               284 net/hsr/hsr_device.c 	hsr_stag = skb_put(skb, sizeof(struct hsr_sup_tag));
skb               307 net/hsr/hsr_device.c 	hsr_sp = skb_put(skb, sizeof(struct hsr_sup_payload));
skb               310 net/hsr/hsr_device.c 	if (skb_put_padto(skb, ETH_ZLEN + HSR_HLEN))
skb               313 net/hsr/hsr_device.c 	hsr_forward_skb(skb, master);
skb               318 net/hsr/hsr_device.c 	kfree_skb(skb);
skb                45 net/hsr/hsr_forward.c static bool is_supervision_frame(struct hsr_priv *hsr, struct sk_buff *skb)
skb                51 net/hsr/hsr_forward.c 	WARN_ON_ONCE(!skb_mac_header_was_set(skb));
skb                52 net/hsr/hsr_forward.c 	eth_hdr = (struct ethhdr *)skb_mac_header(skb);
skb                66 net/hsr/hsr_forward.c 		hsr_V1_hdr = (struct hsrv1_ethhdr_sp *)skb_mac_header(skb);
skb                73 net/hsr/hsr_forward.c 		     &((struct hsrv0_ethhdr_sp *)skb_mac_header(skb))->hsr_sup;
skb                89 net/hsr/hsr_forward.c 	struct sk_buff *skb;
skb                94 net/hsr/hsr_forward.c 	skb = __pskb_copy(skb_in, skb_headroom(skb_in) - HSR_HLEN, GFP_ATOMIC);
skb                96 net/hsr/hsr_forward.c 	if (!skb)
skb                99 net/hsr/hsr_forward.c 	skb_reset_mac_header(skb);
skb               101 net/hsr/hsr_forward.c 	if (skb->ip_summed == CHECKSUM_PARTIAL)
skb               102 net/hsr/hsr_forward.c 		skb->csum_start -= HSR_HLEN;
skb               108 net/hsr/hsr_forward.c 	dst = skb_mac_header(skb);
skb               111 net/hsr/hsr_forward.c 	skb->protocol = eth_hdr(skb)->h_proto;
skb               112 net/hsr/hsr_forward.c 	return skb;
skb               123 net/hsr/hsr_forward.c static void hsr_fill_tag(struct sk_buff *skb, struct hsr_frame_info *frame,
skb               135 net/hsr/hsr_forward.c 	lsdu_size = skb->len - 14;
skb               139 net/hsr/hsr_forward.c 	hsr_ethhdr = (struct hsr_ethhdr *)skb_mac_header(skb);
skb               155 net/hsr/hsr_forward.c 	struct sk_buff *skb;
skb               158 net/hsr/hsr_forward.c 	skb = __pskb_copy(skb_o, skb_headroom(skb_o) + HSR_HLEN, GFP_ATOMIC);
skb               159 net/hsr/hsr_forward.c 	if (!skb)
skb               161 net/hsr/hsr_forward.c 	skb_reset_mac_header(skb);
skb               163 net/hsr/hsr_forward.c 	if (skb->ip_summed == CHECKSUM_PARTIAL)
skb               164 net/hsr/hsr_forward.c 		skb->csum_start += HSR_HLEN;
skb               170 net/hsr/hsr_forward.c 	src = skb_mac_header(skb);
skb               171 net/hsr/hsr_forward.c 	dst = skb_push(skb, HSR_HLEN);
skb               173 net/hsr/hsr_forward.c 	skb_reset_mac_header(skb);
skb               175 net/hsr/hsr_forward.c 	hsr_fill_tag(skb, frame, port, port->hsr->prot_version);
skb               177 net/hsr/hsr_forward.c 	return skb;
skb               197 net/hsr/hsr_forward.c static void hsr_deliver_master(struct sk_buff *skb, struct net_device *dev,
skb               203 net/hsr/hsr_forward.c 	was_multicast_frame = (skb->pkt_type == PACKET_MULTICAST);
skb               204 net/hsr/hsr_forward.c 	hsr_addr_subst_source(node_src, skb);
skb               205 net/hsr/hsr_forward.c 	skb_pull(skb, ETH_HLEN);
skb               206 net/hsr/hsr_forward.c 	res = netif_rx(skb);
skb               211 net/hsr/hsr_forward.c 		dev->stats.rx_bytes += skb->len;
skb               217 net/hsr/hsr_forward.c static int hsr_xmit(struct sk_buff *skb, struct hsr_port *port,
skb               221 net/hsr/hsr_forward.c 		hsr_addr_subst_dest(frame->node_src, skb, port);
skb               226 net/hsr/hsr_forward.c 		ether_addr_copy(eth_hdr(skb)->h_source, port->dev->dev_addr);
skb               228 net/hsr/hsr_forward.c 	return dev_queue_xmit(skb);
skb               245 net/hsr/hsr_forward.c 	struct sk_buff *skb;
skb               273 net/hsr/hsr_forward.c 			skb = frame_get_tagged_skb(frame, port);
skb               275 net/hsr/hsr_forward.c 			skb = frame_get_stripped_skb(frame, port);
skb               276 net/hsr/hsr_forward.c 		if (!skb) {
skb               281 net/hsr/hsr_forward.c 		skb->dev = port->dev;
skb               283 net/hsr/hsr_forward.c 			hsr_deliver_master(skb, port->dev, frame->node_src);
skb               285 net/hsr/hsr_forward.c 			hsr_xmit(skb, port, frame);
skb               289 net/hsr/hsr_forward.c static void check_local_dest(struct hsr_priv *hsr, struct sk_buff *skb,
skb               292 net/hsr/hsr_forward.c 	if (hsr_addr_is_self(hsr, eth_hdr(skb)->h_dest)) {
skb               294 net/hsr/hsr_forward.c 		skb->pkt_type = PACKET_HOST;
skb               299 net/hsr/hsr_forward.c 	if (skb->pkt_type == PACKET_HOST ||
skb               300 net/hsr/hsr_forward.c 	    skb->pkt_type == PACKET_MULTICAST ||
skb               301 net/hsr/hsr_forward.c 	    skb->pkt_type == PACKET_BROADCAST) {
skb               309 net/hsr/hsr_forward.c 			       struct sk_buff *skb, struct hsr_port *port)
skb               314 net/hsr/hsr_forward.c 	frame->is_supervision = is_supervision_frame(port->hsr, skb);
skb               315 net/hsr/hsr_forward.c 	frame->node_src = hsr_get_node(port, skb, frame->is_supervision);
skb               319 net/hsr/hsr_forward.c 	ethhdr = (struct ethhdr *)skb_mac_header(skb);
skb               329 net/hsr/hsr_forward.c 		frame->skb_hsr = skb;
skb               330 net/hsr/hsr_forward.c 		frame->sequence_nr = hsr_get_skb_sequence_nr(skb);
skb               332 net/hsr/hsr_forward.c 		frame->skb_std = skb;
skb               342 net/hsr/hsr_forward.c 	check_local_dest(port->hsr, skb, frame);
skb               348 net/hsr/hsr_forward.c void hsr_forward_skb(struct sk_buff *skb, struct hsr_port *port)
skb               352 net/hsr/hsr_forward.c 	if (skb_mac_header(skb) != skb->data) {
skb               358 net/hsr/hsr_forward.c 	if (hsr_fill_frame_info(&frame, skb, port) < 0)
skb               367 net/hsr/hsr_forward.c 		port->dev->stats.tx_bytes += skb->len;
skb               378 net/hsr/hsr_forward.c 	kfree_skb(skb);
skb                14 net/hsr/hsr_forward.h void hsr_forward_skb(struct sk_buff *skb, struct hsr_port *port);
skb               176 net/hsr/hsr_framereg.c struct hsr_node *hsr_get_node(struct hsr_port *port, struct sk_buff *skb,
skb               185 net/hsr/hsr_framereg.c 	if (!skb_mac_header_was_set(skb))
skb               188 net/hsr/hsr_framereg.c 	ethhdr = (struct ethhdr *)skb_mac_header(skb);
skb               204 net/hsr/hsr_framereg.c 		seq_out = hsr_get_skb_sequence_nr(skb) - 1;
skb               221 net/hsr/hsr_framereg.c void hsr_handle_sup_frame(struct sk_buff *skb, struct hsr_node *node_curr,
skb               231 net/hsr/hsr_framereg.c 	ethhdr = (struct ethhdr *)skb_mac_header(skb);
skb               234 net/hsr/hsr_framereg.c 	skb_pull(skb, sizeof(struct ethhdr));
skb               238 net/hsr/hsr_framereg.c 		skb_pull(skb, sizeof(struct hsr_tag));
skb               241 net/hsr/hsr_framereg.c 	skb_pull(skb, sizeof(struct hsr_sup_tag));
skb               243 net/hsr/hsr_framereg.c 	hsr_sp = (struct hsr_sup_payload *)skb->data;
skb               277 net/hsr/hsr_framereg.c 	skb_push(skb, sizeof(struct hsrv1_ethhdr_sp));
skb               286 net/hsr/hsr_framereg.c void hsr_addr_subst_source(struct hsr_node *node, struct sk_buff *skb)
skb               288 net/hsr/hsr_framereg.c 	if (!skb_mac_header_was_set(skb)) {
skb               293 net/hsr/hsr_framereg.c 	memcpy(&eth_hdr(skb)->h_source, node->macaddress_A, ETH_ALEN);
skb               305 net/hsr/hsr_framereg.c void hsr_addr_subst_dest(struct hsr_node *node_src, struct sk_buff *skb,
skb               310 net/hsr/hsr_framereg.c 	if (!skb_mac_header_was_set(skb)) {
skb               315 net/hsr/hsr_framereg.c 	if (!is_unicast_ether_addr(eth_hdr(skb)->h_dest))
skb               319 net/hsr/hsr_framereg.c 				       eth_hdr(skb)->h_dest);
skb               327 net/hsr/hsr_framereg.c 	ether_addr_copy(eth_hdr(skb)->h_dest, node_dst->macaddress_B);
skb                17 net/hsr/hsr_framereg.h struct hsr_node *hsr_get_node(struct hsr_port *port, struct sk_buff *skb,
skb                19 net/hsr/hsr_framereg.h void hsr_handle_sup_frame(struct sk_buff *skb, struct hsr_node *node_curr,
skb                23 net/hsr/hsr_framereg.h void hsr_addr_subst_source(struct hsr_node *node, struct sk_buff *skb);
skb                24 net/hsr/hsr_framereg.h void hsr_addr_subst_dest(struct hsr_node *node_src, struct sk_buff *skb,
skb               179 net/hsr/hsr_main.h static inline u16 hsr_get_skb_sequence_nr(struct sk_buff *skb)
skb               183 net/hsr/hsr_main.h 	hsr_ethhdr = (struct hsr_ethhdr *)skb_mac_header(skb);
skb                78 net/hsr/hsr_netlink.c static int hsr_fill_info(struct sk_buff *skb, const struct net_device *dev)
skb                91 net/hsr/hsr_netlink.c 		res = nla_put_u32(skb, IFLA_HSR_SLAVE1, port->dev->ifindex);
skb                99 net/hsr/hsr_netlink.c 		res = nla_put_u32(skb, IFLA_HSR_SLAVE2, port->dev->ifindex);
skb               104 net/hsr/hsr_netlink.c 	if (nla_put(skb, IFLA_HSR_SUPERVISION_ADDR, ETH_ALEN,
skb               106 net/hsr/hsr_netlink.c 	    nla_put_u16(skb, IFLA_HSR_SEQ_NR, hsr->sequence_nr))
skb               149 net/hsr/hsr_netlink.c 	struct sk_buff *skb;
skb               154 net/hsr/hsr_netlink.c 	skb = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
skb               155 net/hsr/hsr_netlink.c 	if (!skb)
skb               158 net/hsr/hsr_netlink.c 	msg_head = genlmsg_put(skb, 0, 0, &hsr_genl_family, 0,
skb               163 net/hsr/hsr_netlink.c 	res = nla_put(skb, HSR_A_NODE_ADDR, ETH_ALEN, addr);
skb               167 net/hsr/hsr_netlink.c 	res = nla_put_u32(skb, HSR_A_IFINDEX, port->dev->ifindex);
skb               171 net/hsr/hsr_netlink.c 	genlmsg_end(skb, msg_head);
skb               172 net/hsr/hsr_netlink.c 	genlmsg_multicast(&hsr_genl_family, skb, 0, 0, GFP_ATOMIC);
skb               177 net/hsr/hsr_netlink.c 	kfree_skb(skb);
skb               191 net/hsr/hsr_netlink.c 	struct sk_buff *skb;
skb               196 net/hsr/hsr_netlink.c 	skb = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
skb               197 net/hsr/hsr_netlink.c 	if (!skb)
skb               200 net/hsr/hsr_netlink.c 	msg_head = genlmsg_put(skb, 0, 0, &hsr_genl_family, 0, HSR_C_NODE_DOWN);
skb               204 net/hsr/hsr_netlink.c 	res = nla_put(skb, HSR_A_NODE_ADDR, ETH_ALEN, addr);
skb               208 net/hsr/hsr_netlink.c 	genlmsg_end(skb, msg_head);
skb               209 net/hsr/hsr_netlink.c 	genlmsg_multicast(&hsr_genl_family, skb, 0, 0, GFP_ATOMIC);
skb               214 net/hsr/hsr_netlink.c 	kfree_skb(skb);
skb                19 net/hsr/hsr_slave.c 	struct sk_buff *skb = *pskb;
skb                23 net/hsr/hsr_slave.c 	if (!skb_mac_header_was_set(skb)) {
skb                29 net/hsr/hsr_slave.c 	port = hsr_port_get_rcu(skb->dev);
skb                33 net/hsr/hsr_slave.c 	if (hsr_addr_is_self(port->hsr, eth_hdr(skb)->h_source)) {
skb                35 net/hsr/hsr_slave.c 		kfree_skb(skb);
skb                39 net/hsr/hsr_slave.c 	protocol = eth_hdr(skb)->h_proto;
skb                43 net/hsr/hsr_slave.c 	skb_push(skb, ETH_HLEN);
skb                45 net/hsr/hsr_slave.c 	hsr_forward_skb(skb, port);
skb                33 net/ieee802154/6lowpan/6lowpan_i.h int lowpan_frag_rcv(struct sk_buff *skb, const u8 frag_type);
skb                40 net/ieee802154/6lowpan/6lowpan_i.h int lowpan_header_create(struct sk_buff *skb, struct net_device *dev,
skb                43 net/ieee802154/6lowpan/6lowpan_i.h netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *dev);
skb                45 net/ieee802154/6lowpan/6lowpan_i.h int lowpan_iphc_decompress(struct sk_buff *skb);
skb                46 net/ieee802154/6lowpan/6lowpan_i.h lowpan_rx_result lowpan_rx_h_ipv6(struct sk_buff *skb);
skb                33 net/ieee802154/6lowpan/reassembly.c static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *skb,
skb                85 net/ieee802154/6lowpan/reassembly.c 			     struct sk_buff *skb, u8 frag_type)
skb               100 net/ieee802154/6lowpan/reassembly.c 	offset = lowpan_802154_cb(skb)->d_offset << 3;
skb               101 net/ieee802154/6lowpan/reassembly.c 	end = lowpan_802154_cb(skb)->d_size;
skb               104 net/ieee802154/6lowpan/reassembly.c 	if (offset + skb->len == end) {
skb               122 net/ieee802154/6lowpan/reassembly.c 	ldev = skb->dev;
skb               124 net/ieee802154/6lowpan/reassembly.c 		skb->dev = NULL;
skb               128 net/ieee802154/6lowpan/reassembly.c 	err = inet_frag_queue_insert(&fq->q, skb, offset, end);
skb               132 net/ieee802154/6lowpan/reassembly.c 	fq->q.stamp = skb->tstamp;
skb               136 net/ieee802154/6lowpan/reassembly.c 	fq->q.meat += skb->len;
skb               137 net/ieee802154/6lowpan/reassembly.c 	add_frag_mem_limit(fq->q.fqdir, skb->truesize);
skb               142 net/ieee802154/6lowpan/reassembly.c 		unsigned long orefdst = skb->_skb_refdst;
skb               144 net/ieee802154/6lowpan/reassembly.c 		skb->_skb_refdst = 0UL;
skb               145 net/ieee802154/6lowpan/reassembly.c 		res = lowpan_frag_reasm(fq, skb, prev_tail, ldev);
skb               146 net/ieee802154/6lowpan/reassembly.c 		skb->_skb_refdst = orefdst;
skb               149 net/ieee802154/6lowpan/reassembly.c 	skb_dst_drop(skb);
skb               153 net/ieee802154/6lowpan/reassembly.c 	kfree_skb(skb);
skb               163 net/ieee802154/6lowpan/reassembly.c static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *skb,
skb               170 net/ieee802154/6lowpan/reassembly.c 	reasm_data = inet_frag_reasm_prepare(&fq->q, skb, prev_tail);
skb               173 net/ieee802154/6lowpan/reassembly.c 	inet_frag_reasm_finish(&fq->q, skb, reasm_data, false);
skb               175 net/ieee802154/6lowpan/reassembly.c 	skb->dev = ldev;
skb               176 net/ieee802154/6lowpan/reassembly.c 	skb->tstamp = fq->q.stamp;
skb               187 net/ieee802154/6lowpan/reassembly.c static int lowpan_frag_rx_handlers_result(struct sk_buff *skb,
skb               205 net/ieee802154/6lowpan/reassembly.c static lowpan_rx_result lowpan_frag_rx_h_iphc(struct sk_buff *skb)
skb               209 net/ieee802154/6lowpan/reassembly.c 	if (!lowpan_is_iphc(*skb_network_header(skb)))
skb               212 net/ieee802154/6lowpan/reassembly.c 	ret = lowpan_iphc_decompress(skb);
skb               219 net/ieee802154/6lowpan/reassembly.c static int lowpan_invoke_frag_rx_handlers(struct sk_buff *skb)
skb               225 net/ieee802154/6lowpan/reassembly.c 		res = rxh(skb);	\
skb               235 net/ieee802154/6lowpan/reassembly.c 	return lowpan_frag_rx_handlers_result(skb, res);
skb               242 net/ieee802154/6lowpan/reassembly.c static int lowpan_get_cb(struct sk_buff *skb, u8 frag_type,
skb               249 net/ieee802154/6lowpan/reassembly.c 	fail = lowpan_fetch_skb(skb, &high, 1);
skb               250 net/ieee802154/6lowpan/reassembly.c 	fail |= lowpan_fetch_skb(skb, &low, 1);
skb               256 net/ieee802154/6lowpan/reassembly.c 	fail |= lowpan_fetch_skb(skb, &d_tag, 2);
skb               260 net/ieee802154/6lowpan/reassembly.c 		fail |= lowpan_fetch_skb(skb, &cb->d_offset, 1);
skb               262 net/ieee802154/6lowpan/reassembly.c 		skb_reset_network_header(skb);
skb               267 net/ieee802154/6lowpan/reassembly.c 		fail |= !skb->len;
skb               276 net/ieee802154/6lowpan/reassembly.c int lowpan_frag_rcv(struct sk_buff *skb, u8 frag_type)
skb               279 net/ieee802154/6lowpan/reassembly.c 	struct net *net = dev_net(skb->dev);
skb               280 net/ieee802154/6lowpan/reassembly.c 	struct lowpan_802154_cb *cb = lowpan_802154_cb(skb);
skb               284 net/ieee802154/6lowpan/reassembly.c 	if (ieee802154_hdr_peek_addrs(skb, &hdr) < 0)
skb               287 net/ieee802154/6lowpan/reassembly.c 	err = lowpan_get_cb(skb, frag_type, cb);
skb               292 net/ieee802154/6lowpan/reassembly.c 		err = lowpan_invoke_frag_rx_handlers(skb);
skb               307 net/ieee802154/6lowpan/reassembly.c 		ret = lowpan_frag_queue(fq, skb, frag_type);
skb               315 net/ieee802154/6lowpan/reassembly.c 	kfree_skb(skb);
skb                21 net/ieee802154/6lowpan/rx.c static int lowpan_give_skb_to_device(struct sk_buff *skb)
skb                23 net/ieee802154/6lowpan/rx.c 	skb->protocol = htons(ETH_P_IPV6);
skb                24 net/ieee802154/6lowpan/rx.c 	skb->dev->stats.rx_packets++;
skb                25 net/ieee802154/6lowpan/rx.c 	skb->dev->stats.rx_bytes += skb->len;
skb                27 net/ieee802154/6lowpan/rx.c 	return netif_rx(skb);
skb                30 net/ieee802154/6lowpan/rx.c static int lowpan_rx_handlers_result(struct sk_buff *skb, lowpan_rx_result res)
skb                40 net/ieee802154/6lowpan/rx.c 		kfree_skb(skb);
skb                46 net/ieee802154/6lowpan/rx.c 		return lowpan_give_skb_to_device(skb);
skb                64 net/ieee802154/6lowpan/rx.c static lowpan_rx_result lowpan_rx_h_frag(struct sk_buff *skb)
skb                68 net/ieee802154/6lowpan/rx.c 	if (!(lowpan_is_frag1(*skb_network_header(skb)) ||
skb                69 net/ieee802154/6lowpan/rx.c 	      lowpan_is_fragn(*skb_network_header(skb))))
skb                72 net/ieee802154/6lowpan/rx.c 	ret = lowpan_frag_rcv(skb, *skb_network_header(skb) &
skb                83 net/ieee802154/6lowpan/rx.c int lowpan_iphc_decompress(struct sk_buff *skb)
skb                87 net/ieee802154/6lowpan/rx.c 	if (ieee802154_hdr_peek_addrs(skb, &hdr) < 0)
skb                90 net/ieee802154/6lowpan/rx.c 	return lowpan_header_decompress(skb, skb->dev, &hdr.dest, &hdr.source);
skb                93 net/ieee802154/6lowpan/rx.c static lowpan_rx_result lowpan_rx_h_iphc(struct sk_buff *skb)
skb                97 net/ieee802154/6lowpan/rx.c 	if (!lowpan_is_iphc(*skb_network_header(skb)))
skb               103 net/ieee802154/6lowpan/rx.c 	lowpan_802154_cb(skb)->d_size = 0;
skb               105 net/ieee802154/6lowpan/rx.c 	ret = lowpan_iphc_decompress(skb);
skb               112 net/ieee802154/6lowpan/rx.c lowpan_rx_result lowpan_rx_h_ipv6(struct sk_buff *skb)
skb               114 net/ieee802154/6lowpan/rx.c 	if (!lowpan_is_ipv6(*skb_network_header(skb)))
skb               118 net/ieee802154/6lowpan/rx.c 	skb_pull(skb, 1);
skb               127 net/ieee802154/6lowpan/rx.c static lowpan_rx_result lowpan_rx_h_esc(struct sk_buff *skb)
skb               129 net/ieee802154/6lowpan/rx.c 	if (!lowpan_is_esc(*skb_network_header(skb)))
skb               132 net/ieee802154/6lowpan/rx.c 	net_warn_ratelimited("%s: %s\n", skb->dev->name,
skb               143 net/ieee802154/6lowpan/rx.c static lowpan_rx_result lowpan_rx_h_hc1(struct sk_buff *skb)
skb               145 net/ieee802154/6lowpan/rx.c 	if (!lowpan_is_hc1(*skb_network_header(skb)))
skb               148 net/ieee802154/6lowpan/rx.c 	net_warn_ratelimited("%s: %s\n", skb->dev->name,
skb               159 net/ieee802154/6lowpan/rx.c static lowpan_rx_result lowpan_rx_h_dff(struct sk_buff *skb)
skb               161 net/ieee802154/6lowpan/rx.c 	if (!lowpan_is_dff(*skb_network_header(skb)))
skb               164 net/ieee802154/6lowpan/rx.c 	net_warn_ratelimited("%s: %s\n", skb->dev->name,
skb               175 net/ieee802154/6lowpan/rx.c static lowpan_rx_result lowpan_rx_h_bc0(struct sk_buff *skb)
skb               177 net/ieee802154/6lowpan/rx.c 	if (!lowpan_is_bc0(*skb_network_header(skb)))
skb               180 net/ieee802154/6lowpan/rx.c 	net_warn_ratelimited("%s: %s\n", skb->dev->name,
skb               191 net/ieee802154/6lowpan/rx.c static lowpan_rx_result lowpan_rx_h_mesh(struct sk_buff *skb)
skb               193 net/ieee802154/6lowpan/rx.c 	if (!lowpan_is_mesh(*skb_network_header(skb)))
skb               196 net/ieee802154/6lowpan/rx.c 	net_warn_ratelimited("%s: %s\n", skb->dev->name,
skb               202 net/ieee802154/6lowpan/rx.c static int lowpan_invoke_rx_handlers(struct sk_buff *skb)
skb               208 net/ieee802154/6lowpan/rx.c 		res = rxh(skb);	\
skb               224 net/ieee802154/6lowpan/rx.c 	return lowpan_rx_handlers_result(skb, res);
skb               251 net/ieee802154/6lowpan/rx.c static inline bool lowpan_rx_h_check(struct sk_buff *skb)
skb               253 net/ieee802154/6lowpan/rx.c 	__le16 fc = ieee802154_get_fc_from_skb(skb);
skb               257 net/ieee802154/6lowpan/rx.c 	    !ieee802154_skb_is_intra_pan_addressing(fc, skb))
skb               261 net/ieee802154/6lowpan/rx.c 	if (unlikely(!skb->len))
skb               264 net/ieee802154/6lowpan/rx.c 	if (lowpan_is_nalp(*skb_network_header(skb)) ||
skb               265 net/ieee802154/6lowpan/rx.c 	    lowpan_is_reserved(*skb_network_header(skb)))
skb               271 net/ieee802154/6lowpan/rx.c static int lowpan_rcv(struct sk_buff *skb, struct net_device *wdev,
skb               277 net/ieee802154/6lowpan/rx.c 	    skb->pkt_type == PACKET_OTHERHOST ||
skb               278 net/ieee802154/6lowpan/rx.c 	    !lowpan_rx_h_check(skb))
skb               286 net/ieee802154/6lowpan/rx.c 	skb = skb_share_check(skb, GFP_ATOMIC);
skb               287 net/ieee802154/6lowpan/rx.c 	if (!skb)
skb               289 net/ieee802154/6lowpan/rx.c 	skb->dev = ldev;
skb               295 net/ieee802154/6lowpan/rx.c 	if (lowpan_is_frag1(*skb_network_header(skb)) ||
skb               296 net/ieee802154/6lowpan/rx.c 	    lowpan_is_iphc(*skb_network_header(skb))) {
skb               297 net/ieee802154/6lowpan/rx.c 		skb = skb_unshare(skb, GFP_ATOMIC);
skb               298 net/ieee802154/6lowpan/rx.c 		if (!skb)
skb               302 net/ieee802154/6lowpan/rx.c 	return lowpan_invoke_rx_handlers(skb);
skb               305 net/ieee802154/6lowpan/rx.c 	kfree_skb(skb);
skb                19 net/ieee802154/6lowpan/tx.c lowpan_addr_info *lowpan_skb_priv(const struct sk_buff *skb)
skb                21 net/ieee802154/6lowpan/tx.c 	WARN_ON_ONCE(skb_headroom(skb) < sizeof(struct lowpan_addr_info));
skb                22 net/ieee802154/6lowpan/tx.c 	return (struct lowpan_addr_info *)(skb->data -
skb                33 net/ieee802154/6lowpan/tx.c int lowpan_header_create(struct sk_buff *skb, struct net_device *ldev,
skb                38 net/ieee802154/6lowpan/tx.c 	struct lowpan_addr_info *info = lowpan_skb_priv(skb);
skb                40 net/ieee802154/6lowpan/tx.c 	const struct ipv6hdr *hdr = ipv6_hdr(skb);
skb               101 net/ieee802154/6lowpan/tx.c lowpan_alloc_frag(struct sk_buff *skb, int size,
skb               104 net/ieee802154/6lowpan/tx.c 	struct net_device *wdev = lowpan_802154_dev(skb->dev)->wdev;
skb               113 net/ieee802154/6lowpan/tx.c 		frag->priority = skb->priority;
skb               116 net/ieee802154/6lowpan/tx.c 		*mac_cb(frag) = *mac_cb(skb);
skb               119 net/ieee802154/6lowpan/tx.c 			skb_put_data(frag, skb_mac_header(skb), skb->mac_len);
skb               137 net/ieee802154/6lowpan/tx.c lowpan_xmit_fragment(struct sk_buff *skb, const struct ieee802154_hdr *wpan_hdr,
skb               145 net/ieee802154/6lowpan/tx.c 	frag = lowpan_alloc_frag(skb, frag_hdrlen + len, wpan_hdr, frag1);
skb               150 net/ieee802154/6lowpan/tx.c 	skb_put_data(frag, skb_network_header(skb) + offset, len);
skb               158 net/ieee802154/6lowpan/tx.c lowpan_xmit_fragmented(struct sk_buff *skb, struct net_device *ldev,
skb               177 net/ieee802154/6lowpan/tx.c 			      skb_network_header_len(skb), 8);
skb               179 net/ieee802154/6lowpan/tx.c 	skb_offset = skb_network_header_len(skb);
skb               180 net/ieee802154/6lowpan/tx.c 	skb_unprocessed = skb->len - skb->mac_len - skb_offset;
skb               182 net/ieee802154/6lowpan/tx.c 	rc = lowpan_xmit_fragment(skb, wpan_hdr, frag_hdr,
skb               184 net/ieee802154/6lowpan/tx.c 				  frag_len + skb_network_header_len(skb),
skb               204 net/ieee802154/6lowpan/tx.c 		rc = lowpan_xmit_fragment(skb, wpan_hdr, frag_hdr,
skb               216 net/ieee802154/6lowpan/tx.c 	consume_skb(skb);
skb               220 net/ieee802154/6lowpan/tx.c 	kfree_skb(skb);
skb               224 net/ieee802154/6lowpan/tx.c static int lowpan_header(struct sk_buff *skb, struct net_device *ldev,
skb               228 net/ieee802154/6lowpan/tx.c 	struct ieee802154_mac_cb *cb = mac_cb_init(skb);
skb               231 net/ieee802154/6lowpan/tx.c 	memcpy(&info, lowpan_skb_priv(skb), sizeof(info));
skb               233 net/ieee802154/6lowpan/tx.c 	*dgram_size = skb->len;
skb               234 net/ieee802154/6lowpan/tx.c 	lowpan_header_compress(skb, ldev, &info.daddr, &info.saddr);
skb               236 net/ieee802154/6lowpan/tx.c 	*dgram_offset = (*dgram_size - skb->len) + skb_network_header_len(skb);
skb               246 net/ieee802154/6lowpan/tx.c 	return wpan_dev_hard_header(skb, lowpan_802154_dev(ldev)->wdev,
skb               250 net/ieee802154/6lowpan/tx.c netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *ldev)
skb               258 net/ieee802154/6lowpan/tx.c 	WARN_ON_ONCE(skb->len > IPV6_MIN_MTU);
skb               263 net/ieee802154/6lowpan/tx.c 	if (unlikely(skb_headroom(skb) < ldev->needed_headroom ||
skb               264 net/ieee802154/6lowpan/tx.c 		     skb_tailroom(skb) < ldev->needed_tailroom)) {
skb               267 net/ieee802154/6lowpan/tx.c 		nskb = skb_copy_expand(skb, ldev->needed_headroom,
skb               270 net/ieee802154/6lowpan/tx.c 			consume_skb(skb);
skb               271 net/ieee802154/6lowpan/tx.c 			skb = nskb;
skb               273 net/ieee802154/6lowpan/tx.c 			kfree_skb(skb);
skb               277 net/ieee802154/6lowpan/tx.c 		skb = skb_unshare(skb, GFP_ATOMIC);
skb               278 net/ieee802154/6lowpan/tx.c 		if (!skb)
skb               282 net/ieee802154/6lowpan/tx.c 	ret = lowpan_header(skb, ldev, &dgram_size, &dgram_offset);
skb               284 net/ieee802154/6lowpan/tx.c 		kfree_skb(skb);
skb               288 net/ieee802154/6lowpan/tx.c 	if (ieee802154_hdr_peek(skb, &wpan_hdr) < 0) {
skb               289 net/ieee802154/6lowpan/tx.c 		kfree_skb(skb);
skb               295 net/ieee802154/6lowpan/tx.c 	if (skb_tail_pointer(skb) - skb_network_header(skb) <= max_single) {
skb               296 net/ieee802154/6lowpan/tx.c 		skb->dev = lowpan_802154_dev(ldev)->wdev;
skb               299 net/ieee802154/6lowpan/tx.c 		return dev_queue_xmit(skb);
skb               304 net/ieee802154/6lowpan/tx.c 		rc = lowpan_xmit_fragmented(skb, ldev, &wpan_hdr, dgram_size,
skb                78 net/ieee802154/header_ops.c ieee802154_hdr_push(struct sk_buff *skb, struct ieee802154_hdr *hdr)
skb               117 net/ieee802154/header_ops.c 	memcpy(skb_push(skb, pos), buf, pos);
skb               234 net/ieee802154/header_ops.c ieee802154_hdr_pull(struct sk_buff *skb, struct ieee802154_hdr *hdr)
skb               238 net/ieee802154/header_ops.c 	if (!pskb_may_pull(skb, 3))
skb               241 net/ieee802154/header_ops.c 	memcpy(hdr, skb->data, 3);
skb               244 net/ieee802154/header_ops.c 	if (rc < 0 || !pskb_may_pull(skb, rc))
skb               247 net/ieee802154/header_ops.c 	pos += ieee802154_hdr_get_addrs(skb->data + pos, hdr);
skb               250 net/ieee802154/header_ops.c 		int want = pos + ieee802154_hdr_sechdr_len(skb->data[pos]);
skb               252 net/ieee802154/header_ops.c 		if (!pskb_may_pull(skb, want))
skb               255 net/ieee802154/header_ops.c 		pos += ieee802154_hdr_get_sechdr(skb->data + pos, &hdr->sec);
skb               258 net/ieee802154/header_ops.c 	skb_pull(skb, pos);
skb               264 net/ieee802154/header_ops.c ieee802154_hdr_peek_addrs(const struct sk_buff *skb, struct ieee802154_hdr *hdr)
skb               266 net/ieee802154/header_ops.c 	const u8 *buf = skb_mac_header(skb);
skb               269 net/ieee802154/header_ops.c 	if (buf + 3 > skb_tail_pointer(skb))
skb               275 net/ieee802154/header_ops.c 	if (rc < 0 || buf + rc > skb_tail_pointer(skb))
skb               284 net/ieee802154/header_ops.c ieee802154_hdr_peek(const struct sk_buff *skb, struct ieee802154_hdr *hdr)
skb               286 net/ieee802154/header_ops.c 	const u8 *buf = skb_mac_header(skb);
skb               289 net/ieee802154/header_ops.c 	pos = ieee802154_hdr_peek_addrs(skb, hdr);
skb               297 net/ieee802154/header_ops.c 		if (buf + want > skb_tail_pointer(skb))
skb                37 net/ieee802154/ieee802154.h int ieee802154_list_phy(struct sk_buff *skb, struct genl_info *info);
skb                38 net/ieee802154/ieee802154.h int ieee802154_dump_phy(struct sk_buff *skb, struct netlink_callback *cb);
skb                39 net/ieee802154/ieee802154.h int ieee802154_add_iface(struct sk_buff *skb, struct genl_info *info);
skb                40 net/ieee802154/ieee802154.h int ieee802154_del_iface(struct sk_buff *skb, struct genl_info *info);
skb                47 net/ieee802154/ieee802154.h int ieee802154_associate_req(struct sk_buff *skb, struct genl_info *info);
skb                48 net/ieee802154/ieee802154.h int ieee802154_associate_resp(struct sk_buff *skb, struct genl_info *info);
skb                49 net/ieee802154/ieee802154.h int ieee802154_disassociate_req(struct sk_buff *skb, struct genl_info *info);
skb                50 net/ieee802154/ieee802154.h int ieee802154_scan_req(struct sk_buff *skb, struct genl_info *info);
skb                51 net/ieee802154/ieee802154.h int ieee802154_start_req(struct sk_buff *skb, struct genl_info *info);
skb                52 net/ieee802154/ieee802154.h int ieee802154_list_iface(struct sk_buff *skb, struct genl_info *info);
skb                53 net/ieee802154/ieee802154.h int ieee802154_dump_iface(struct sk_buff *skb, struct netlink_callback *cb);
skb                54 net/ieee802154/ieee802154.h int ieee802154_set_macparams(struct sk_buff *skb, struct genl_info *info);
skb                56 net/ieee802154/ieee802154.h int ieee802154_llsec_getparams(struct sk_buff *skb, struct genl_info *info);
skb                57 net/ieee802154/ieee802154.h int ieee802154_llsec_setparams(struct sk_buff *skb, struct genl_info *info);
skb                58 net/ieee802154/ieee802154.h int ieee802154_llsec_add_key(struct sk_buff *skb, struct genl_info *info);
skb                59 net/ieee802154/ieee802154.h int ieee802154_llsec_del_key(struct sk_buff *skb, struct genl_info *info);
skb                60 net/ieee802154/ieee802154.h int ieee802154_llsec_dump_keys(struct sk_buff *skb,
skb                62 net/ieee802154/ieee802154.h int ieee802154_llsec_add_dev(struct sk_buff *skb, struct genl_info *info);
skb                63 net/ieee802154/ieee802154.h int ieee802154_llsec_del_dev(struct sk_buff *skb, struct genl_info *info);
skb                64 net/ieee802154/ieee802154.h int ieee802154_llsec_dump_devs(struct sk_buff *skb,
skb                66 net/ieee802154/ieee802154.h int ieee802154_llsec_add_devkey(struct sk_buff *skb, struct genl_info *info);
skb                67 net/ieee802154/ieee802154.h int ieee802154_llsec_del_devkey(struct sk_buff *skb, struct genl_info *info);
skb                68 net/ieee802154/ieee802154.h int ieee802154_llsec_dump_devkeys(struct sk_buff *skb,
skb                70 net/ieee802154/ieee802154.h int ieee802154_llsec_add_seclevel(struct sk_buff *skb, struct genl_info *info);
skb                71 net/ieee802154/ieee802154.h int ieee802154_llsec_del_seclevel(struct sk_buff *skb, struct genl_info *info);
skb                72 net/ieee802154/ieee802154.h int ieee802154_llsec_dump_seclevels(struct sk_buff *skb,
skb               173 net/ieee802154/nl-mac.c int ieee802154_associate_req(struct sk_buff *skb, struct genl_info *info)
skb               220 net/ieee802154/nl-mac.c int ieee802154_associate_resp(struct sk_buff *skb, struct genl_info *info)
skb               253 net/ieee802154/nl-mac.c int ieee802154_disassociate_req(struct sk_buff *skb, struct genl_info *info)
skb               295 net/ieee802154/nl-mac.c int ieee802154_start_req(struct sk_buff *skb, struct genl_info *info)
skb               367 net/ieee802154/nl-mac.c int ieee802154_scan_req(struct sk_buff *skb, struct genl_info *info)
skb               404 net/ieee802154/nl-mac.c int ieee802154_list_iface(struct sk_buff *skb, struct genl_info *info)
skb               438 net/ieee802154/nl-mac.c int ieee802154_dump_iface(struct sk_buff *skb, struct netlink_callback *cb)
skb               440 net/ieee802154/nl-mac.c 	struct net *net = sock_net(skb->sk);
skb               452 net/ieee802154/nl-mac.c 		if (ieee802154_nl_fill_iface(skb, NETLINK_CB(cb->skb).portid,
skb               461 net/ieee802154/nl-mac.c 	return skb->len;
skb               464 net/ieee802154/nl-mac.c int ieee802154_set_macparams(struct sk_buff *skb, struct genl_info *info)
skb               642 net/ieee802154/nl-mac.c int ieee802154_llsec_getparams(struct sk_buff *skb, struct genl_info *info)
skb               695 net/ieee802154/nl-mac.c int ieee802154_llsec_setparams(struct sk_buff *skb, struct genl_info *info)
skb               759 net/ieee802154/nl-mac.c 	struct sk_buff *skb;
skb               769 net/ieee802154/nl-mac.c ieee802154_llsec_dump_table(struct sk_buff *skb, struct netlink_callback *cb,
skb               772 net/ieee802154/nl-mac.c 	struct net *net = sock_net(skb->sk);
skb               787 net/ieee802154/nl-mac.c 		data.skb = skb;
skb               791 net/ieee802154/nl-mac.c 		data.portid = NETLINK_CB(cb->skb).portid;
skb               807 net/ieee802154/nl-mac.c 	return skb->len;
skb               811 net/ieee802154/nl-mac.c ieee802154_nl_llsec_change(struct sk_buff *skb, struct genl_info *info,
skb               882 net/ieee802154/nl-mac.c int ieee802154_llsec_add_key(struct sk_buff *skb, struct genl_info *info)
skb               888 net/ieee802154/nl-mac.c 	return ieee802154_nl_llsec_change(skb, info, llsec_add_key);
skb               902 net/ieee802154/nl-mac.c int ieee802154_llsec_del_key(struct sk_buff *skb, struct genl_info *info)
skb               904 net/ieee802154/nl-mac.c 	return ieee802154_nl_llsec_change(skb, info, llsec_remove_key);
skb               957 net/ieee802154/nl-mac.c 		if (ieee802154_nl_fill_key(data->skb, data->portid,
skb               969 net/ieee802154/nl-mac.c int ieee802154_llsec_dump_keys(struct sk_buff *skb, struct netlink_callback *cb)
skb               971 net/ieee802154/nl-mac.c 	return ieee802154_llsec_dump_table(skb, cb, llsec_iter_keys);
skb              1017 net/ieee802154/nl-mac.c int ieee802154_llsec_add_dev(struct sk_buff *skb, struct genl_info *info)
skb              1023 net/ieee802154/nl-mac.c 	return ieee802154_nl_llsec_change(skb, info, llsec_add_dev);
skb              1039 net/ieee802154/nl-mac.c int ieee802154_llsec_del_dev(struct sk_buff *skb, struct genl_info *info)
skb              1041 net/ieee802154/nl-mac.c 	return ieee802154_nl_llsec_change(skb, info, llsec_del_dev);
skb              1088 net/ieee802154/nl-mac.c 		if (ieee802154_nl_fill_dev(data->skb, data->portid,
skb              1100 net/ieee802154/nl-mac.c int ieee802154_llsec_dump_devs(struct sk_buff *skb, struct netlink_callback *cb)
skb              1102 net/ieee802154/nl-mac.c 	return ieee802154_llsec_dump_table(skb, cb, llsec_iter_devs);
skb              1122 net/ieee802154/nl-mac.c int ieee802154_llsec_add_devkey(struct sk_buff *skb, struct genl_info *info)
skb              1128 net/ieee802154/nl-mac.c 	return ieee802154_nl_llsec_change(skb, info, llsec_add_devkey);
skb              1146 net/ieee802154/nl-mac.c int ieee802154_llsec_del_devkey(struct sk_buff *skb, struct genl_info *info)
skb              1148 net/ieee802154/nl-mac.c 	return ieee802154_nl_llsec_change(skb, info, llsec_del_devkey);
skb              1198 net/ieee802154/nl-mac.c 			if (ieee802154_nl_fill_devkey(data->skb, data->portid,
skb              1214 net/ieee802154/nl-mac.c int ieee802154_llsec_dump_devkeys(struct sk_buff *skb,
skb              1217 net/ieee802154/nl-mac.c 	return ieee802154_llsec_dump_table(skb, cb, llsec_iter_devkeys);
skb              1256 net/ieee802154/nl-mac.c int ieee802154_llsec_add_seclevel(struct sk_buff *skb, struct genl_info *info)
skb              1262 net/ieee802154/nl-mac.c 	return ieee802154_nl_llsec_change(skb, info, llsec_add_seclevel);
skb              1276 net/ieee802154/nl-mac.c int ieee802154_llsec_del_seclevel(struct sk_buff *skb, struct genl_info *info)
skb              1278 net/ieee802154/nl-mac.c 	return ieee802154_nl_llsec_change(skb, info, llsec_del_seclevel);
skb              1324 net/ieee802154/nl-mac.c 		if (ieee802154_nl_fill_seclevel(data->skb, data->portid,
skb              1337 net/ieee802154/nl-mac.c int ieee802154_llsec_dump_seclevels(struct sk_buff *skb,
skb              1340 net/ieee802154/nl-mac.c 	return ieee802154_llsec_dump_table(skb, cb, llsec_iter_seclevels);
skb                71 net/ieee802154/nl-phy.c int ieee802154_list_phy(struct sk_buff *skb, struct genl_info *info)
skb               114 net/ieee802154/nl-phy.c 	struct sk_buff *skb;
skb               129 net/ieee802154/nl-phy.c 	rc = ieee802154_nl_fill_phy(data->skb,
skb               130 net/ieee802154/nl-phy.c 				    NETLINK_CB(data->cb->skb).portid,
skb               143 net/ieee802154/nl-phy.c int ieee802154_dump_phy(struct sk_buff *skb, struct netlink_callback *cb)
skb               147 net/ieee802154/nl-phy.c 		.skb = skb,
skb               158 net/ieee802154/nl-phy.c 	return skb->len;
skb               161 net/ieee802154/nl-phy.c int ieee802154_add_iface(struct sk_buff *skb, struct genl_info *info)
skb               264 net/ieee802154/nl-phy.c int ieee802154_del_iface(struct sk_buff *skb, struct genl_info *info)
skb               234 net/ieee802154/nl802154.c nl802154_prepare_wpan_dev_dump(struct sk_buff *skb,
skb               252 net/ieee802154/nl802154.c 		*wpan_dev = __cfg802154_wpan_dev_from_attrs(sock_net(skb->sk),
skb               301 net/ieee802154/nl802154.c static inline void *nl802154hdr_put(struct sk_buff *skb, u32 portid, u32 seq,
skb               305 net/ieee802154/nl802154.c 	return genlmsg_put(skb, portid, seq, &nl802154_fam, flags, cmd);
skb               556 net/ieee802154/nl802154.c static int nl802154_dump_wpan_phy_parse(struct sk_buff *skb,
skb               595 net/ieee802154/nl802154.c nl802154_dump_wpan_phy(struct sk_buff *skb, struct netlink_callback *cb)
skb               609 net/ieee802154/nl802154.c 		ret = nl802154_dump_wpan_phy_parse(skb, cb, state);
skb               619 net/ieee802154/nl802154.c 		if (!net_eq(wpan_phy_net(&rdev->wpan_phy), sock_net(skb->sk)))
skb               629 net/ieee802154/nl802154.c 					     skb,
skb               630 net/ieee802154/nl802154.c 					     NETLINK_CB(cb->skb).portid,
skb               634 net/ieee802154/nl802154.c 			    !skb->len && cb->min_dump_alloc < 4096) {
skb               648 net/ieee802154/nl802154.c 	return skb->len;
skb               657 net/ieee802154/nl802154.c static int nl802154_get_wpan_phy(struct sk_buff *skb, struct genl_info *info)
skb               852 net/ieee802154/nl802154.c nl802154_dump_interface(struct sk_buff *skb, struct netlink_callback *cb)
skb               863 net/ieee802154/nl802154.c 		if (!net_eq(wpan_phy_net(&rdev->wpan_phy), sock_net(skb->sk)))
skb               876 net/ieee802154/nl802154.c 			if (nl802154_send_iface(skb, NETLINK_CB(cb->skb).portid,
skb               892 net/ieee802154/nl802154.c 	return skb->len;
skb               895 net/ieee802154/nl802154.c static int nl802154_get_interface(struct sk_buff *skb, struct genl_info *info)
skb               914 net/ieee802154/nl802154.c static int nl802154_new_interface(struct sk_buff *skb, struct genl_info *info)
skb               945 net/ieee802154/nl802154.c static int nl802154_del_interface(struct sk_buff *skb, struct genl_info *info)
skb               965 net/ieee802154/nl802154.c static int nl802154_set_channel(struct sk_buff *skb, struct genl_info *info)
skb               985 net/ieee802154/nl802154.c static int nl802154_set_cca_mode(struct sk_buff *skb, struct genl_info *info)
skb              1016 net/ieee802154/nl802154.c static int nl802154_set_cca_ed_level(struct sk_buff *skb, struct genl_info *info)
skb              1038 net/ieee802154/nl802154.c static int nl802154_set_tx_power(struct sk_buff *skb, struct genl_info *info)
skb              1060 net/ieee802154/nl802154.c static int nl802154_set_pan_id(struct sk_buff *skb, struct genl_info *info)
skb              1096 net/ieee802154/nl802154.c static int nl802154_set_short_addr(struct sk_buff *skb, struct genl_info *info)
skb              1138 net/ieee802154/nl802154.c nl802154_set_backoff_exponent(struct sk_buff *skb, struct genl_info *info)
skb              1168 net/ieee802154/nl802154.c nl802154_set_max_csma_backoffs(struct sk_buff *skb, struct genl_info *info)
skb              1194 net/ieee802154/nl802154.c nl802154_set_max_frame_retries(struct sk_buff *skb, struct genl_info *info)
skb              1218 net/ieee802154/nl802154.c static int nl802154_set_lbt_mode(struct sk_buff *skb, struct genl_info *info)
skb              1243 net/ieee802154/nl802154.c nl802154_set_ackreq_default(struct sk_buff *skb, struct genl_info *info)
skb              1264 net/ieee802154/nl802154.c static int nl802154_wpan_phy_netns(struct sk_buff *skb, struct genl_info *info)
skb              1393 net/ieee802154/nl802154.c static int nl802154_set_llsec_params(struct sk_buff *skb,
skb              1497 net/ieee802154/nl802154.c nl802154_dump_llsec_key(struct sk_buff *skb, struct netlink_callback *cb)
skb              1505 net/ieee802154/nl802154.c 	err = nl802154_prepare_wpan_dev_dump(skb, cb, &rdev, &wpan_dev);
skb              1522 net/ieee802154/nl802154.c 		if (nl802154_send_key(skb, NL802154_CMD_NEW_SEC_KEY,
skb              1523 net/ieee802154/nl802154.c 				      NETLINK_CB(cb->skb).portid,
skb              1537 net/ieee802154/nl802154.c 	err = skb->len;
skb              1553 net/ieee802154/nl802154.c static int nl802154_add_llsec_key(struct sk_buff *skb, struct genl_info *info)
skb              1603 net/ieee802154/nl802154.c static int nl802154_del_llsec_key(struct sk_buff *skb, struct genl_info *info)
skb              1663 net/ieee802154/nl802154.c nl802154_dump_llsec_dev(struct sk_buff *skb, struct netlink_callback *cb)
skb              1671 net/ieee802154/nl802154.c 	err = nl802154_prepare_wpan_dev_dump(skb, cb, &rdev, &wpan_dev);
skb              1688 net/ieee802154/nl802154.c 		if (nl802154_send_device(skb, NL802154_CMD_NEW_SEC_LEVEL,
skb              1689 net/ieee802154/nl802154.c 					 NETLINK_CB(cb->skb).portid,
skb              1703 net/ieee802154/nl802154.c 	err = skb->len;
skb              1754 net/ieee802154/nl802154.c static int nl802154_add_llsec_dev(struct sk_buff *skb, struct genl_info *info)
skb              1768 net/ieee802154/nl802154.c static int nl802154_del_llsec_dev(struct sk_buff *skb, struct genl_info *info)
skb              1831 net/ieee802154/nl802154.c nl802154_dump_llsec_devkey(struct sk_buff *skb, struct netlink_callback *cb)
skb              1840 net/ieee802154/nl802154.c 	err = nl802154_prepare_wpan_dev_dump(skb, cb, &rdev, &wpan_dev);
skb              1859 net/ieee802154/nl802154.c 			if (nl802154_send_devkey(skb,
skb              1861 net/ieee802154/nl802154.c 						 NETLINK_CB(cb->skb).portid,
skb              1879 net/ieee802154/nl802154.c 	err = skb->len;
skb              1892 net/ieee802154/nl802154.c static int nl802154_add_llsec_devkey(struct sk_buff *skb, struct genl_info *info)
skb              1923 net/ieee802154/nl802154.c static int nl802154_del_llsec_devkey(struct sk_buff *skb, struct genl_info *info)
skb              1993 net/ieee802154/nl802154.c nl802154_dump_llsec_seclevel(struct sk_buff *skb, struct netlink_callback *cb)
skb              2001 net/ieee802154/nl802154.c 	err = nl802154_prepare_wpan_dev_dump(skb, cb, &rdev, &wpan_dev);
skb              2018 net/ieee802154/nl802154.c 		if (nl802154_send_seclevel(skb, NL802154_CMD_NEW_SEC_LEVEL,
skb              2019 net/ieee802154/nl802154.c 					   NETLINK_CB(cb->skb).portid,
skb              2033 net/ieee802154/nl802154.c 	err = skb->len;
skb              2081 net/ieee802154/nl802154.c static int nl802154_add_llsec_seclevel(struct sk_buff *skb,
skb              2096 net/ieee802154/nl802154.c static int nl802154_del_llsec_seclevel(struct sk_buff *skb,
skb              2123 net/ieee802154/nl802154.c static int nl802154_pre_doit(const struct genl_ops *ops, struct sk_buff *skb,
skb              2185 net/ieee802154/nl802154.c static void nl802154_post_doit(const struct genl_ops *ops, struct sk_buff *skb,
skb               245 net/ieee802154/socket.c 	struct sk_buff *skb;
skb               278 net/ieee802154/socket.c 	skb = sock_alloc_send_skb(sk, hlen + tlen + size,
skb               280 net/ieee802154/socket.c 	if (!skb)
skb               283 net/ieee802154/socket.c 	skb_reserve(skb, hlen);
skb               285 net/ieee802154/socket.c 	skb_reset_mac_header(skb);
skb               286 net/ieee802154/socket.c 	skb_reset_network_header(skb);
skb               288 net/ieee802154/socket.c 	err = memcpy_from_msg(skb_put(skb, size), msg, size);
skb               292 net/ieee802154/socket.c 	skb->dev = dev;
skb               293 net/ieee802154/socket.c 	skb->protocol = htons(ETH_P_IEEE802154);
skb               295 net/ieee802154/socket.c 	err = dev_queue_xmit(skb);
skb               304 net/ieee802154/socket.c 	kfree_skb(skb);
skb               316 net/ieee802154/socket.c 	struct sk_buff *skb;
skb               318 net/ieee802154/socket.c 	skb = skb_recv_datagram(sk, flags, noblock, &err);
skb               319 net/ieee802154/socket.c 	if (!skb)
skb               322 net/ieee802154/socket.c 	copied = skb->len;
skb               328 net/ieee802154/socket.c 	err = skb_copy_datagram_msg(skb, 0, msg, copied);
skb               332 net/ieee802154/socket.c 	sock_recv_ts_and_drops(msg, sk, skb);
skb               335 net/ieee802154/socket.c 		copied = skb->len;
skb               337 net/ieee802154/socket.c 	skb_free_datagram(sk, skb);
skb               344 net/ieee802154/socket.c static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
skb               346 net/ieee802154/socket.c 	skb = skb_share_check(skb, GFP_ATOMIC);
skb               347 net/ieee802154/socket.c 	if (!skb)
skb               350 net/ieee802154/socket.c 	if (sock_queue_rcv_skb(sk, skb) < 0) {
skb               351 net/ieee802154/socket.c 		kfree_skb(skb);
skb               358 net/ieee802154/socket.c static void ieee802154_raw_deliver(struct net_device *dev, struct sk_buff *skb)
skb               369 net/ieee802154/socket.c 			clone = skb_clone(skb, GFP_ATOMIC);
skb               543 net/ieee802154/socket.c 		struct sk_buff *skb;
skb               548 net/ieee802154/socket.c 		skb = skb_peek(&sk->sk_receive_queue);
skb               549 net/ieee802154/socket.c 		if (skb) {
skb               554 net/ieee802154/socket.c 			amount = skb->len - ieee802154_hdr_length(skb);
skb               608 net/ieee802154/socket.c 	struct sk_buff *skb;
skb               646 net/ieee802154/socket.c 	skb = sock_alloc_send_skb(sk, hlen + tlen + size,
skb               649 net/ieee802154/socket.c 	if (!skb)
skb               652 net/ieee802154/socket.c 	skb_reserve(skb, hlen);
skb               654 net/ieee802154/socket.c 	skb_reset_network_header(skb);
skb               656 net/ieee802154/socket.c 	cb = mac_cb_init(skb);
skb               674 net/ieee802154/socket.c 	err = wpan_dev_hard_header(skb, dev, &dst_addr,
skb               679 net/ieee802154/socket.c 	err = memcpy_from_msg(skb_put(skb, size), msg, size);
skb               683 net/ieee802154/socket.c 	skb->dev = dev;
skb               684 net/ieee802154/socket.c 	skb->protocol = htons(ETH_P_IEEE802154);
skb               686 net/ieee802154/socket.c 	err = dev_queue_xmit(skb);
skb               695 net/ieee802154/socket.c 	kfree_skb(skb);
skb               707 net/ieee802154/socket.c 	struct sk_buff *skb;
skb               711 net/ieee802154/socket.c 	skb = skb_recv_datagram(sk, flags, noblock, &err);
skb               712 net/ieee802154/socket.c 	if (!skb)
skb               715 net/ieee802154/socket.c 	copied = skb->len;
skb               722 net/ieee802154/socket.c 	err = skb_copy_datagram_msg(skb, 0, msg, copied);
skb               726 net/ieee802154/socket.c 	sock_recv_ts_and_drops(msg, sk, skb);
skb               736 net/ieee802154/socket.c 		ieee802154_addr_to_sa(&saddr->addr, &mac_cb(skb)->source);
skb               742 net/ieee802154/socket.c 			       sizeof(uint8_t), &(mac_cb(skb)->lqi));
skb               748 net/ieee802154/socket.c 		copied = skb->len;
skb               750 net/ieee802154/socket.c 	skb_free_datagram(sk, skb);
skb               757 net/ieee802154/socket.c static int dgram_rcv_skb(struct sock *sk, struct sk_buff *skb)
skb               759 net/ieee802154/socket.c 	skb = skb_share_check(skb, GFP_ATOMIC);
skb               760 net/ieee802154/socket.c 	if (!skb)
skb               763 net/ieee802154/socket.c 	if (sock_queue_rcv_skb(sk, skb) < 0) {
skb               764 net/ieee802154/socket.c 		kfree_skb(skb);
skb               790 net/ieee802154/socket.c static int ieee802154_dgram_deliver(struct net_device *dev, struct sk_buff *skb)
skb               811 net/ieee802154/socket.c 				clone = skb_clone(skb, GFP_ATOMIC);
skb               821 net/ieee802154/socket.c 		dgram_rcv_skb(prev, skb);
skb               823 net/ieee802154/socket.c 		kfree_skb(skb);
skb              1064 net/ieee802154/socket.c static int ieee802154_rcv(struct sk_buff *skb, struct net_device *dev,
skb              1072 net/ieee802154/socket.c 			     DUMP_PREFIX_NONE, skb->data, skb->len);
skb              1078 net/ieee802154/socket.c 	ieee802154_raw_deliver(dev, skb);
skb              1083 net/ieee802154/socket.c 	if (skb->pkt_type != PACKET_OTHERHOST)
skb              1084 net/ieee802154/socket.c 		return ieee802154_dgram_deliver(dev, skb);
skb              1087 net/ieee802154/socket.c 	kfree_skb(skb);
skb                34 net/ife/ife.c  void *ife_encode(struct sk_buff *skb, u16 metalen)
skb                40 net/ife/ife.c  	int total_push = hdrm + skb->dev->hard_header_len;
skb                46 net/ife/ife.c  	err = skb_cow_head(skb, total_push);
skb                50 net/ife/ife.c  	iethh = (struct ethhdr *) skb->data;
skb                52 net/ife/ife.c  	__skb_push(skb, total_push);
skb                53 net/ife/ife.c  	memcpy(skb->data, iethh, skb->dev->hard_header_len);
skb                54 net/ife/ife.c  	skb_reset_mac_header(skb);
skb                55 net/ife/ife.c  	skboff += skb->dev->hard_header_len;
skb                58 net/ife/ife.c  	ifehdr = (struct ifeheadr *) (skb->data + skboff);
skb                66 net/ife/ife.c  void *ife_decode(struct sk_buff *skb, u16 *metalen)
skb                72 net/ife/ife.c  	if (!pskb_may_pull(skb, skb->dev->hard_header_len + IFE_METAHDRLEN))
skb                75 net/ife/ife.c  	ifehdr = (struct ifeheadr *) (skb->data + skb->dev->hard_header_len);
skb                77 net/ife/ife.c  	total_pull = skb->dev->hard_header_len + ifehdrln;
skb                82 net/ife/ife.c  	if (unlikely(!pskb_may_pull(skb, total_pull)))
skb                85 net/ife/ife.c  	skb_set_mac_header(skb, total_pull);
skb                86 net/ife/ife.c  	__skb_pull(skb, total_pull);
skb              1297 net/ipv4/af_inet.c struct sk_buff *inet_gso_segment(struct sk_buff *skb,
skb              1310 net/ipv4/af_inet.c 	skb_reset_network_header(skb);
skb              1311 net/ipv4/af_inet.c 	nhoff = skb_network_header(skb) - skb_mac_header(skb);
skb              1312 net/ipv4/af_inet.c 	if (unlikely(!pskb_may_pull(skb, sizeof(*iph))))
skb              1315 net/ipv4/af_inet.c 	iph = ip_hdr(skb);
skb              1324 net/ipv4/af_inet.c 	if (unlikely(!pskb_may_pull(skb, ihl)))
skb              1326 net/ipv4/af_inet.c 	__skb_pull(skb, ihl);
skb              1328 net/ipv4/af_inet.c 	encap = SKB_GSO_CB(skb)->encap_level > 0;
skb              1330 net/ipv4/af_inet.c 		features &= skb->dev->hw_enc_features;
skb              1331 net/ipv4/af_inet.c 	SKB_GSO_CB(skb)->encap_level += ihl;
skb              1333 net/ipv4/af_inet.c 	skb_reset_transport_header(skb);
skb              1337 net/ipv4/af_inet.c 	if (!skb->encapsulation || encap) {
skb              1338 net/ipv4/af_inet.c 		udpfrag = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP);
skb              1339 net/ipv4/af_inet.c 		fixedid = !!(skb_shinfo(skb)->gso_type & SKB_GSO_TCP_FIXEDID);
skb              1342 net/ipv4/af_inet.c 		if (fixedid && !(ip_hdr(skb)->frag_off & htons(IP_DF)))
skb              1348 net/ipv4/af_inet.c 		segs = ops->callbacks.gso_segment(skb, features);
skb              1355 net/ipv4/af_inet.c 	skb = segs;
skb              1357 net/ipv4/af_inet.c 		iph = (struct iphdr *)(skb_mac_header(skb) + nhoff);
skb              1360 net/ipv4/af_inet.c 			if (skb->next)
skb              1362 net/ipv4/af_inet.c 			offset += skb->len - nhoff - ihl;
skb              1363 net/ipv4/af_inet.c 			tot_len = skb->len - nhoff;
skb              1364 net/ipv4/af_inet.c 		} else if (skb_is_gso(skb)) {
skb              1367 net/ipv4/af_inet.c 				id += skb_shinfo(skb)->gso_segs;
skb              1371 net/ipv4/af_inet.c 				tot_len = skb_shinfo(skb)->gso_size +
skb              1372 net/ipv4/af_inet.c 					  SKB_GSO_CB(skb)->data_offset +
skb              1373 net/ipv4/af_inet.c 					  skb->head - (unsigned char *)iph;
skb              1375 net/ipv4/af_inet.c 				tot_len = skb->len - nhoff;
skb              1379 net/ipv4/af_inet.c 			tot_len = skb->len - nhoff;
skb              1384 net/ipv4/af_inet.c 			skb_reset_inner_headers(skb);
skb              1385 net/ipv4/af_inet.c 		skb->network_header = (u8 *)iph - skb->head;
skb              1386 net/ipv4/af_inet.c 		skb_reset_mac_len(skb);
skb              1387 net/ipv4/af_inet.c 	} while ((skb = skb->next));
skb              1394 net/ipv4/af_inet.c static struct sk_buff *ipip_gso_segment(struct sk_buff *skb,
skb              1397 net/ipv4/af_inet.c 	if (!(skb_shinfo(skb)->gso_type & SKB_GSO_IPXIP4))
skb              1400 net/ipv4/af_inet.c 	return inet_gso_segment(skb, features);
skb              1407 net/ipv4/af_inet.c struct sk_buff *inet_gro_receive(struct list_head *head, struct sk_buff *skb)
skb              1419 net/ipv4/af_inet.c 	off = skb_gro_offset(skb);
skb              1421 net/ipv4/af_inet.c 	iph = skb_gro_header_fast(skb, off);
skb              1422 net/ipv4/af_inet.c 	if (skb_gro_header_hard(skb, hlen)) {
skb              1423 net/ipv4/af_inet.c 		iph = skb_gro_header_slow(skb, hlen, off);
skb              1445 net/ipv4/af_inet.c 	flush = (u16)((ntohl(*(__be32 *)iph) ^ skb_gro_len(skb)) | (id & ~IP_DF));
skb              1497 net/ipv4/af_inet.c 		if (NAPI_GRO_CB(skb)->is_atomic)
skb              1503 net/ipv4/af_inet.c 	NAPI_GRO_CB(skb)->is_atomic = !!(iph->frag_off & htons(IP_DF));
skb              1504 net/ipv4/af_inet.c 	NAPI_GRO_CB(skb)->flush |= flush;
skb              1505 net/ipv4/af_inet.c 	skb_set_network_header(skb, off);
skb              1513 net/ipv4/af_inet.c 	skb_gro_pull(skb, sizeof(*iph));
skb              1514 net/ipv4/af_inet.c 	skb_set_transport_header(skb, skb_gro_offset(skb));
skb              1517 net/ipv4/af_inet.c 				       ops->callbacks.gro_receive, head, skb);
skb              1523 net/ipv4/af_inet.c 	skb_gro_flush_final(skb, pp, flush);
skb              1530 net/ipv4/af_inet.c 					struct sk_buff *skb)
skb              1532 net/ipv4/af_inet.c 	if (NAPI_GRO_CB(skb)->encap_mark) {
skb              1533 net/ipv4/af_inet.c 		NAPI_GRO_CB(skb)->flush = 1;
skb              1537 net/ipv4/af_inet.c 	NAPI_GRO_CB(skb)->encap_mark = 1;
skb              1539 net/ipv4/af_inet.c 	return inet_gro_receive(head, skb);
skb              1581 net/ipv4/af_inet.c int inet_gro_complete(struct sk_buff *skb, int nhoff)
skb              1583 net/ipv4/af_inet.c 	__be16 newlen = htons(skb->len - nhoff);
skb              1584 net/ipv4/af_inet.c 	struct iphdr *iph = (struct iphdr *)(skb->data + nhoff);
skb              1589 net/ipv4/af_inet.c 	if (skb->encapsulation) {
skb              1590 net/ipv4/af_inet.c 		skb_set_inner_protocol(skb, cpu_to_be16(ETH_P_IP));
skb              1591 net/ipv4/af_inet.c 		skb_set_inner_network_header(skb, nhoff);
skb              1608 net/ipv4/af_inet.c 			      skb, nhoff + sizeof(*iph));
skb              1617 net/ipv4/af_inet.c static int ipip_gro_complete(struct sk_buff *skb, int nhoff)
skb              1619 net/ipv4/af_inet.c 	skb->encapsulation = 1;
skb              1620 net/ipv4/af_inet.c 	skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP4;
skb              1621 net/ipv4/af_inet.c 	return inet_gro_complete(skb, nhoff);
skb               124 net/ipv4/ah4.c 	struct sk_buff *skb = base->data;
skb               125 net/ipv4/ah4.c 	struct xfrm_state *x = skb_dst(skb)->xfrm;
skb               127 net/ipv4/ah4.c 	struct iphdr *top_iph = ip_hdr(skb);
skb               128 net/ipv4/ah4.c 	struct ip_auth_hdr *ah = ip_auth_hdr(skb);
skb               129 net/ipv4/ah4.c 	int ihl = ip_hdrlen(skb);
skb               131 net/ipv4/ah4.c 	iph = AH_SKB_CB(skb)->tmp;
skb               143 net/ipv4/ah4.c 	kfree(AH_SKB_CB(skb)->tmp);
skb               144 net/ipv4/ah4.c 	xfrm_output_resume(skb, err);
skb               147 net/ipv4/ah4.c static int ah_output(struct xfrm_state *x, struct sk_buff *skb)
skb               168 net/ipv4/ah4.c 	if ((err = skb_cow_data(skb, 0, &trailer)) < 0)
skb               172 net/ipv4/ah4.c 	skb_push(skb, -skb_network_offset(skb));
skb               173 net/ipv4/ah4.c 	ah = ip_auth_hdr(skb);
skb               174 net/ipv4/ah4.c 	ihl = ip_hdrlen(skb);
skb               192 net/ipv4/ah4.c 	top_iph = ip_hdr(skb);
skb               206 net/ipv4/ah4.c 	ah->nexthdr = *skb_mac_header(skb);
skb               207 net/ipv4/ah4.c 	*skb_mac_header(skb) = IPPROTO_AH;
skb               210 net/ipv4/ah4.c 	top_iph->tot_len = htons(skb->len);
skb               222 net/ipv4/ah4.c 	ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
skb               225 net/ipv4/ah4.c 	err = skb_to_sgvec_nomark(skb, sg, 0, skb->len);
skb               231 net/ipv4/ah4.c 		*seqhi = htonl(XFRM_SKB_CB(skb)->seq.output.hi);
skb               234 net/ipv4/ah4.c 	ahash_request_set_crypt(req, sg, icv, skb->len + seqhi_len);
skb               235 net/ipv4/ah4.c 	ahash_request_set_callback(req, 0, ah_output_done, skb);
skb               237 net/ipv4/ah4.c 	AH_SKB_CB(skb)->tmp = iph;
skb               270 net/ipv4/ah4.c 	struct sk_buff *skb = base->data;
skb               271 net/ipv4/ah4.c 	struct xfrm_state *x = xfrm_input_state(skb);
skb               273 net/ipv4/ah4.c 	struct ip_auth_hdr *ah = ip_auth_hdr(skb);
skb               274 net/ipv4/ah4.c 	int ihl = ip_hdrlen(skb);
skb               280 net/ipv4/ah4.c 	work_iph = AH_SKB_CB(skb)->tmp;
skb               290 net/ipv4/ah4.c 	skb->network_header += ah_hlen;
skb               291 net/ipv4/ah4.c 	memcpy(skb_network_header(skb), work_iph, ihl);
skb               292 net/ipv4/ah4.c 	__skb_pull(skb, ah_hlen + ihl);
skb               295 net/ipv4/ah4.c 		skb_reset_transport_header(skb);
skb               297 net/ipv4/ah4.c 		skb_set_transport_header(skb, -ihl);
skb               299 net/ipv4/ah4.c 	kfree(AH_SKB_CB(skb)->tmp);
skb               300 net/ipv4/ah4.c 	xfrm_input_resume(skb, err);
skb               303 net/ipv4/ah4.c static int ah_input(struct xfrm_state *x, struct sk_buff *skb)
skb               324 net/ipv4/ah4.c 	if (!pskb_may_pull(skb, sizeof(*ah)))
skb               327 net/ipv4/ah4.c 	ah = (struct ip_auth_hdr *)skb->data;
skb               344 net/ipv4/ah4.c 	if (!pskb_may_pull(skb, ah_hlen))
skb               349 net/ipv4/ah4.c 	if (skb_unclone(skb, GFP_ATOMIC))
skb               352 net/ipv4/ah4.c 	skb->ip_summed = CHECKSUM_NONE;
skb               355 net/ipv4/ah4.c 	if ((err = skb_cow_data(skb, 0, &trailer)) < 0)
skb               359 net/ipv4/ah4.c 	ah = (struct ip_auth_hdr *)skb->data;
skb               360 net/ipv4/ah4.c 	iph = ip_hdr(skb);
skb               361 net/ipv4/ah4.c 	ihl = ip_hdrlen(skb);
skb               397 net/ipv4/ah4.c 	skb_push(skb, ihl);
skb               400 net/ipv4/ah4.c 	err = skb_to_sgvec_nomark(skb, sg, 0, skb->len);
skb               406 net/ipv4/ah4.c 		*seqhi = XFRM_SKB_CB(skb)->seq.input.hi;
skb               409 net/ipv4/ah4.c 	ahash_request_set_crypt(req, sg, icv, skb->len + seqhi_len);
skb               410 net/ipv4/ah4.c 	ahash_request_set_callback(req, 0, ah_input_done, skb);
skb               412 net/ipv4/ah4.c 	AH_SKB_CB(skb)->tmp = work_iph;
skb               426 net/ipv4/ah4.c 	skb->network_header += ah_hlen;
skb               427 net/ipv4/ah4.c 	memcpy(skb_network_header(skb), work_iph, ihl);
skb               428 net/ipv4/ah4.c 	__skb_pull(skb, ah_hlen + ihl);
skb               430 net/ipv4/ah4.c 		skb_reset_transport_header(skb);
skb               432 net/ipv4/ah4.c 		skb_set_transport_header(skb, -ihl);
skb               442 net/ipv4/ah4.c static int ah4_err(struct sk_buff *skb, u32 info)
skb               444 net/ipv4/ah4.c 	struct net *net = dev_net(skb->dev);
skb               445 net/ipv4/ah4.c 	const struct iphdr *iph = (const struct iphdr *)skb->data;
skb               446 net/ipv4/ah4.c 	struct ip_auth_hdr *ah = (struct ip_auth_hdr *)(skb->data+(iph->ihl<<2));
skb               449 net/ipv4/ah4.c 	switch (icmp_hdr(skb)->type) {
skb               451 net/ipv4/ah4.c 		if (icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
skb               459 net/ipv4/ah4.c 	x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
skb               464 net/ipv4/ah4.c 	if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH)
skb               465 net/ipv4/ah4.c 		ipv4_update_pmtu(skb, net, info, 0, IPPROTO_AH);
skb               467 net/ipv4/ah4.c 		ipv4_redirect(skb, net, 0, IPPROTO_AH);
skb               550 net/ipv4/ah4.c static int ah4_rcv_cb(struct sk_buff *skb, int err)
skb               125 net/ipv4/arp.c static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb);
skb               126 net/ipv4/arp.c static void arp_error_report(struct neighbour *neigh, struct sk_buff *skb);
skb               127 net/ipv4/arp.c static void parp_redo(struct sk_buff *skb);
skb               291 net/ipv4/arp.c static void arp_error_report(struct neighbour *neigh, struct sk_buff *skb)
skb               293 net/ipv4/arp.c 	dst_link_failure(skb);
skb               294 net/ipv4/arp.c 	kfree_skb(skb);
skb               305 net/ipv4/arp.c 	struct sk_buff *skb;
skb               311 net/ipv4/arp.c 	skb = arp_create(type, ptype, dest_ip, dev, src_ip,
skb               313 net/ipv4/arp.c 	if (!skb)
skb               316 net/ipv4/arp.c 	skb_dst_set(skb, dst_clone(dst));
skb               317 net/ipv4/arp.c 	arp_xmit(skb);
skb               330 net/ipv4/arp.c static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb)
skb               349 net/ipv4/arp.c 		if (skb && inet_addr_type_dev_table(dev_net(dev), dev,
skb               350 net/ipv4/arp.c 					  ip_hdr(skb)->saddr) == RTN_LOCAL)
skb               351 net/ipv4/arp.c 			saddr = ip_hdr(skb)->saddr;
skb               354 net/ipv4/arp.c 		if (!skb)
skb               356 net/ipv4/arp.c 		saddr = ip_hdr(skb)->saddr;
skb               387 net/ipv4/arp.c 	if (skb && !(dev->priv_flags & IFF_XMIT_DST_RELEASE))
skb               388 net/ipv4/arp.c 		dst = skb_dst(skb);
skb               527 net/ipv4/arp.c 	struct sk_buff *skb;
skb               537 net/ipv4/arp.c 	skb = alloc_skb(arp_hdr_len(dev) + hlen + tlen, GFP_ATOMIC);
skb               538 net/ipv4/arp.c 	if (!skb)
skb               541 net/ipv4/arp.c 	skb_reserve(skb, hlen);
skb               542 net/ipv4/arp.c 	skb_reset_network_header(skb);
skb               543 net/ipv4/arp.c 	arp = skb_put(skb, arp_hdr_len(dev));
skb               544 net/ipv4/arp.c 	skb->dev = dev;
skb               545 net/ipv4/arp.c 	skb->protocol = htons(ETH_P_ARP);
skb               554 net/ipv4/arp.c 	if (dev_hard_header(skb, dev, ptype, dest_hw, src_hw, skb->len) < 0)
skb               620 net/ipv4/arp.c 	return skb;
skb               623 net/ipv4/arp.c 	kfree_skb(skb);
skb               628 net/ipv4/arp.c static int arp_xmit_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
skb               630 net/ipv4/arp.c 	return dev_queue_xmit(skb);
skb               636 net/ipv4/arp.c void arp_xmit(struct sk_buff *skb)
skb               640 net/ipv4/arp.c 		dev_net(skb->dev), NULL, skb, NULL, skb->dev,
skb               675 net/ipv4/arp.c static int arp_process(struct net *net, struct sock *sk, struct sk_buff *skb)
skb               677 net/ipv4/arp.c 	struct net_device *dev = skb->dev;
skb               698 net/ipv4/arp.c 	arp = arp_hdr(skb);
skb               798 net/ipv4/arp.c 	if (arp->ar_op == htons(ARPOP_REQUEST) && skb_metadata_dst(skb))
skb               800 net/ipv4/arp.c 			    iptunnel_metadata_reply(skb_metadata_dst(skb),
skb               814 net/ipv4/arp.c 	    ip_route_input_noref(skb, tip, sip, 0, dev) == 0) {
skb               816 net/ipv4/arp.c 		rt = skb_rtable(skb);
skb               846 net/ipv4/arp.c 				if (NEIGH_CB(skb)->flags & LOCALLY_ENQUEUED ||
skb               847 net/ipv4/arp.c 				    skb->pkt_type == PACKET_HOST ||
skb               855 net/ipv4/arp.c 						       in_dev->arp_parms, skb);
skb               907 net/ipv4/arp.c 		    skb->pkt_type != PACKET_HOST)
skb               915 net/ipv4/arp.c 	consume_skb(skb);
skb               922 net/ipv4/arp.c 	kfree_skb(skb);
skb               926 net/ipv4/arp.c static void parp_redo(struct sk_buff *skb)
skb               928 net/ipv4/arp.c 	arp_process(dev_net(skb->dev), NULL, skb);
skb               936 net/ipv4/arp.c static int arp_rcv(struct sk_buff *skb, struct net_device *dev,
skb               943 net/ipv4/arp.c 	    skb->pkt_type == PACKET_OTHERHOST ||
skb               944 net/ipv4/arp.c 	    skb->pkt_type == PACKET_LOOPBACK)
skb               947 net/ipv4/arp.c 	skb = skb_share_check(skb, GFP_ATOMIC);
skb               948 net/ipv4/arp.c 	if (!skb)
skb               952 net/ipv4/arp.c 	if (!pskb_may_pull(skb, arp_hdr_len(dev)))
skb               955 net/ipv4/arp.c 	arp = arp_hdr(skb);
skb               959 net/ipv4/arp.c 	memset(NEIGH_CB(skb), 0, sizeof(struct neighbour_cb));
skb               962 net/ipv4/arp.c 		       dev_net(dev), NULL, skb, dev, NULL,
skb               966 net/ipv4/arp.c 	consume_skb(skb);
skb               969 net/ipv4/arp.c 	kfree_skb(skb);
skb              1507 net/ipv4/cipso_ipv4.c unsigned char *cipso_v4_optptr(const struct sk_buff *skb)
skb              1509 net/ipv4/cipso_ipv4.c 	const struct iphdr *iph = ip_hdr(skb);
skb              1510 net/ipv4/cipso_ipv4.c 	unsigned char *optptr = (unsigned char *)&(ip_hdr(skb)[1]);
skb              1555 net/ipv4/cipso_ipv4.c int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option)
skb              1673 net/ipv4/cipso_ipv4.c 			if (!skb || !(skb->dev->flags & IFF_LOOPBACK)) {
skb              1725 net/ipv4/cipso_ipv4.c void cipso_v4_error(struct sk_buff *skb, int error, u32 gateway)
skb              1731 net/ipv4/cipso_ipv4.c 	if (ip_hdr(skb)->protocol == IPPROTO_ICMP || error != -EACCES)
skb              1740 net/ipv4/cipso_ipv4.c 	opt->optlen = ip_hdr(skb)->ihl*4 - sizeof(struct iphdr);
skb              1742 net/ipv4/cipso_ipv4.c 	res = __ip_options_compile(dev_net(skb->dev), opt, skb, NULL);
skb              1749 net/ipv4/cipso_ipv4.c 		__icmp_send(skb, ICMP_DEST_UNREACH, ICMP_NET_ANO, 0, opt);
skb              1751 net/ipv4/cipso_ipv4.c 		__icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_ANO, 0, opt);
skb              2168 net/ipv4/cipso_ipv4.c int cipso_v4_skbuff_setattr(struct sk_buff *skb,
skb              2174 net/ipv4/cipso_ipv4.c 	struct ip_options *opt = &IPCB(skb)->opt;
skb              2196 net/ipv4/cipso_ipv4.c 	ret_val = skb_cow(skb, skb_headroom(skb) + len_delta);
skb              2203 net/ipv4/cipso_ipv4.c 		iph = ip_hdr(skb);
skb              2204 net/ipv4/cipso_ipv4.c 		skb_push(skb, len_delta);
skb              2206 net/ipv4/cipso_ipv4.c 		skb_reset_network_header(skb);
skb              2207 net/ipv4/cipso_ipv4.c 		iph = ip_hdr(skb);
skb              2209 net/ipv4/cipso_ipv4.c 		iph = ip_hdr(skb);
skb              2212 net/ipv4/cipso_ipv4.c 		iph = ip_hdr(skb);
skb              2230 net/ipv4/cipso_ipv4.c 		iph->tot_len = htons(skb->len);
skb              2246 net/ipv4/cipso_ipv4.c int cipso_v4_skbuff_delattr(struct sk_buff *skb)
skb              2250 net/ipv4/cipso_ipv4.c 	struct ip_options *opt = &IPCB(skb)->opt;
skb              2257 net/ipv4/cipso_ipv4.c 	ret_val = skb_cow(skb, skb_headroom(skb));
skb              2265 net/ipv4/cipso_ipv4.c 	iph = ip_hdr(skb);
skb               644 net/ipv4/devinet.c static int inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh,
skb               647 net/ipv4/devinet.c 	struct net *net = sock_net(skb->sk);
skb               686 net/ipv4/devinet.c 		__inet_del_ifa(in_dev, ifap, 1, nlh, NETLINK_CB(skb).portid);
skb               928 net/ipv4/devinet.c static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh,
skb               931 net/ipv4/devinet.c 	struct net *net = sock_net(skb->sk);
skb               957 net/ipv4/devinet.c 		return __inet_insert_ifa(ifa, nlh, NETLINK_CB(skb).portid,
skb               978 net/ipv4/devinet.c 		rtmsg_ifa(RTM_NEWADDR, ifa, nlh, NETLINK_CB(skb).portid);
skb              1636 net/ipv4/devinet.c static int put_cacheinfo(struct sk_buff *skb, unsigned long cstamp,
skb              1646 net/ipv4/devinet.c 	return nla_put(skb, IFA_CACHEINFO, sizeof(ci), &ci);
skb              1649 net/ipv4/devinet.c static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa,
skb              1656 net/ipv4/devinet.c 	nlh = nlmsg_put(skb, args->portid, args->seq, args->event, sizeof(*ifm),
skb              1669 net/ipv4/devinet.c 	    nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid))
skb              1694 net/ipv4/devinet.c 	     nla_put_in_addr(skb, IFA_ADDRESS, ifa->ifa_address)) ||
skb              1696 net/ipv4/devinet.c 	     nla_put_in_addr(skb, IFA_LOCAL, ifa->ifa_local)) ||
skb              1698 net/ipv4/devinet.c 	     nla_put_in_addr(skb, IFA_BROADCAST, ifa->ifa_broadcast)) ||
skb              1700 net/ipv4/devinet.c 	     nla_put_string(skb, IFA_LABEL, ifa->ifa_label)) ||
skb              1701 net/ipv4/devinet.c 	    nla_put_u32(skb, IFA_FLAGS, ifa->ifa_flags) ||
skb              1703 net/ipv4/devinet.c 	     nla_put_u32(skb, IFA_RT_PRIORITY, ifa->ifa_rt_priority)) ||
skb              1704 net/ipv4/devinet.c 	    put_cacheinfo(skb, ifa->ifa_cstamp, ifa->ifa_tstamp,
skb              1708 net/ipv4/devinet.c 	nlmsg_end(skb, nlh);
skb              1712 net/ipv4/devinet.c 	nlmsg_cancel(skb, nlh);
skb              1773 net/ipv4/devinet.c static int in_dev_dump_addr(struct in_device *in_dev, struct sk_buff *skb,
skb              1786 net/ipv4/devinet.c 		err = inet_fill_ifaddr(skb, ifa, fillargs);
skb              1790 net/ipv4/devinet.c 		nl_dump_check_consistent(cb, nlmsg_hdr(skb));
skb              1801 net/ipv4/devinet.c static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
skb              1805 net/ipv4/devinet.c 		.portid = NETLINK_CB(cb->skb).portid,
skb              1811 net/ipv4/devinet.c 	struct net *net = sock_net(skb->sk);
skb              1827 net/ipv4/devinet.c 						 skb->sk, cb);
skb              1841 net/ipv4/devinet.c 				err = in_dev_dump_addr(in_dev, skb, cb, s_ip_idx,
skb              1863 net/ipv4/devinet.c 			err = in_dev_dump_addr(in_dev, skb, cb, s_ip_idx,
skb              1882 net/ipv4/devinet.c 	return skb->len ? : err;
skb              1895 net/ipv4/devinet.c 	struct sk_buff *skb;
skb              1900 net/ipv4/devinet.c 	skb = nlmsg_new(inet_nlmsg_size(), GFP_KERNEL);
skb              1901 net/ipv4/devinet.c 	if (!skb)
skb              1904 net/ipv4/devinet.c 	err = inet_fill_ifaddr(skb, ifa, &fillargs);
skb              1908 net/ipv4/devinet.c 		kfree_skb(skb);
skb              1911 net/ipv4/devinet.c 	rtnl_notify(skb, net, portid, RTNLGRP_IPV4_IFADDR, nlh, GFP_KERNEL);
skb              1929 net/ipv4/devinet.c static int inet_fill_link_af(struct sk_buff *skb, const struct net_device *dev,
skb              1939 net/ipv4/devinet.c 	nla = nla_reserve(skb, IFLA_INET_CONF, IPV4_DEVCONF_MAX * 4);
skb              2027 net/ipv4/devinet.c static int inet_netconf_fill_devconf(struct sk_buff *skb, int ifindex,
skb              2036 net/ipv4/devinet.c 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct netconfmsg),
skb              2047 net/ipv4/devinet.c 	if (nla_put_s32(skb, NETCONFA_IFINDEX, ifindex) < 0)
skb              2054 net/ipv4/devinet.c 	    nla_put_s32(skb, NETCONFA_FORWARDING,
skb              2058 net/ipv4/devinet.c 	    nla_put_s32(skb, NETCONFA_RP_FILTER,
skb              2062 net/ipv4/devinet.c 	    nla_put_s32(skb, NETCONFA_MC_FORWARDING,
skb              2066 net/ipv4/devinet.c 	    nla_put_s32(skb, NETCONFA_BC_FORWARDING,
skb              2070 net/ipv4/devinet.c 	    nla_put_s32(skb, NETCONFA_PROXY_NEIGH,
skb              2074 net/ipv4/devinet.c 	    nla_put_s32(skb, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
skb              2079 net/ipv4/devinet.c 	nlmsg_end(skb, nlh);
skb              2083 net/ipv4/devinet.c 	nlmsg_cancel(skb, nlh);
skb              2090 net/ipv4/devinet.c 	struct sk_buff *skb;
skb              2093 net/ipv4/devinet.c 	skb = nlmsg_new(inet_netconf_msgsize_devconf(type), GFP_KERNEL);
skb              2094 net/ipv4/devinet.c 	if (!skb)
skb              2097 net/ipv4/devinet.c 	err = inet_netconf_fill_devconf(skb, ifindex, devconf, 0, 0,
skb              2102 net/ipv4/devinet.c 		kfree_skb(skb);
skb              2105 net/ipv4/devinet.c 	rtnl_notify(skb, net, 0, RTNLGRP_IPV4_NETCONF, NULL, GFP_KERNEL);
skb              2120 net/ipv4/devinet.c static int inet_netconf_valid_get_req(struct sk_buff *skb,
skb              2132 net/ipv4/devinet.c 	if (!netlink_strict_get_check(skb))
skb              2165 net/ipv4/devinet.c 	struct sk_buff *skb;
skb              2200 net/ipv4/devinet.c 	skb = nlmsg_new(inet_netconf_msgsize_devconf(NETCONFA_ALL), GFP_KERNEL);
skb              2201 net/ipv4/devinet.c 	if (!skb)
skb              2204 net/ipv4/devinet.c 	err = inet_netconf_fill_devconf(skb, ifindex, devconf,
skb              2211 net/ipv4/devinet.c 		kfree_skb(skb);
skb              2214 net/ipv4/devinet.c 	err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
skb              2219 net/ipv4/devinet.c static int inet_netconf_dump_devconf(struct sk_buff *skb,
skb              2223 net/ipv4/devinet.c 	struct net *net = sock_net(skb->sk);
skb              2261 net/ipv4/devinet.c 			if (inet_netconf_fill_devconf(skb, dev->ifindex,
skb              2263 net/ipv4/devinet.c 						      NETLINK_CB(cb->skb).portid,
skb              2271 net/ipv4/devinet.c 			nl_dump_check_consistent(cb, nlmsg_hdr(skb));
skb              2278 net/ipv4/devinet.c 		if (inet_netconf_fill_devconf(skb, NETCONFA_IFINDEX_ALL,
skb              2280 net/ipv4/devinet.c 					      NETLINK_CB(cb->skb).portid,
skb              2289 net/ipv4/devinet.c 		if (inet_netconf_fill_devconf(skb, NETCONFA_IFINDEX_DEFAULT,
skb              2291 net/ipv4/devinet.c 					      NETLINK_CB(cb->skb).portid,
skb              2303 net/ipv4/devinet.c 	return skb->len;
skb               122 net/ipv4/esp4.c 	struct sk_buff *skb = base->data;
skb               123 net/ipv4/esp4.c 	struct xfrm_offload *xo = xfrm_offload(skb);
skb               128 net/ipv4/esp4.c 		struct sec_path *sp = skb_sec_path(skb);
skb               132 net/ipv4/esp4.c 		x = skb_dst(skb)->xfrm;
skb               135 net/ipv4/esp4.c 	tmp = ESP_SKB_CB(skb)->tmp;
skb               142 net/ipv4/esp4.c 			kfree_skb(skb);
skb               146 net/ipv4/esp4.c 		skb_push(skb, skb->data - skb_mac_header(skb));
skb               147 net/ipv4/esp4.c 		secpath_reset(skb);
skb               148 net/ipv4/esp4.c 		xfrm_dev_resume(skb);
skb               150 net/ipv4/esp4.c 		xfrm_output_resume(skb, err);
skb               155 net/ipv4/esp4.c static void esp_restore_header(struct sk_buff *skb, unsigned int offset)
skb               157 net/ipv4/esp4.c 	struct ip_esp_hdr *esph = (void *)(skb->data + offset);
skb               158 net/ipv4/esp4.c 	void *tmp = ESP_SKB_CB(skb)->tmp;
skb               165 net/ipv4/esp4.c static void esp_output_restore_header(struct sk_buff *skb)
skb               167 net/ipv4/esp4.c 	void *tmp = ESP_SKB_CB(skb)->tmp;
skb               170 net/ipv4/esp4.c 	esp_restore_header(skb, skb_transport_offset(skb) + extra->esphoff -
skb               174 net/ipv4/esp4.c static struct ip_esp_hdr *esp_output_set_extra(struct sk_buff *skb,
skb               185 net/ipv4/esp4.c 		struct xfrm_offload *xo = xfrm_offload(skb);
skb               190 net/ipv4/esp4.c 			seqhi = XFRM_SKB_CB(skb)->seq.output.hi;
skb               193 net/ipv4/esp4.c 				 skb_transport_header(skb);
skb               206 net/ipv4/esp4.c 	struct sk_buff *skb = base->data;
skb               208 net/ipv4/esp4.c 	esp_output_restore_header(skb);
skb               228 net/ipv4/esp4.c static int esp_output_udp_encap(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
skb               244 net/ipv4/esp4.c 	len = skb->len + esp->tailen - skb_transport_offset(skb);
skb               266 net/ipv4/esp4.c 	*skb_mac_header(skb) = IPPROTO_UDP;
skb               272 net/ipv4/esp4.c int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
skb               284 net/ipv4/esp4.c 		int err = esp_output_udp_encap(x, skb, esp);
skb               290 net/ipv4/esp4.c 	if (!skb_cloned(skb)) {
skb               291 net/ipv4/esp4.c 		if (tailen <= skb_tailroom(skb)) {
skb               293 net/ipv4/esp4.c 			trailer = skb;
skb               297 net/ipv4/esp4.c 		} else if ((skb_shinfo(skb)->nr_frags < MAX_SKB_FRAGS)
skb               298 net/ipv4/esp4.c 			   && !skb_has_frag_list(skb)) {
skb               300 net/ipv4/esp4.c 			struct sock *sk = skb->sk;
skb               325 net/ipv4/esp4.c 			nfrags = skb_shinfo(skb)->nr_frags;
skb               327 net/ipv4/esp4.c 			__skb_fill_page_desc(skb, nfrags, page, pfrag->offset,
skb               329 net/ipv4/esp4.c 			skb_shinfo(skb)->nr_frags = ++nfrags;
skb               337 net/ipv4/esp4.c 			skb->len += tailen;
skb               338 net/ipv4/esp4.c 			skb->data_len += tailen;
skb               339 net/ipv4/esp4.c 			skb->truesize += tailen;
skb               348 net/ipv4/esp4.c 	esph_offset = (unsigned char *)esp->esph - skb_transport_header(skb);
skb               350 net/ipv4/esp4.c 	nfrags = skb_cow_data(skb, tailen, &trailer);
skb               354 net/ipv4/esp4.c 	esp->esph = (struct ip_esp_hdr *)(skb_transport_header(skb) + esph_offset);
skb               358 net/ipv4/esp4.c 	pskb_put(skb, trailer, tailen);
skb               365 net/ipv4/esp4.c int esp_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
skb               407 net/ipv4/esp4.c 	esph = esp_output_set_extra(skb, x, esp->esph, extra);
skb               411 net/ipv4/esp4.c 	err = skb_to_sgvec(skb, sg,
skb               412 net/ipv4/esp4.c 		           (unsigned char *)esph - skb->data,
skb               421 net/ipv4/esp4.c 		allocsize = ALIGN(skb->data_len, L1_CACHE_BYTES);
skb               429 net/ipv4/esp4.c 		skb_shinfo(skb)->nr_frags = 1;
skb               434 net/ipv4/esp4.c 		__skb_fill_page_desc(skb, 0, page, pfrag->offset, skb->data_len);
skb               438 net/ipv4/esp4.c 		sg_init_table(dsg, skb_shinfo(skb)->nr_frags + 1);
skb               439 net/ipv4/esp4.c 		err = skb_to_sgvec(skb, dsg,
skb               440 net/ipv4/esp4.c 			           (unsigned char *)esph - skb->data,
skb               447 net/ipv4/esp4.c 		aead_request_set_callback(req, 0, esp_output_done_esn, skb);
skb               449 net/ipv4/esp4.c 		aead_request_set_callback(req, 0, esp_output_done, skb);
skb               458 net/ipv4/esp4.c 	ESP_SKB_CB(skb)->tmp = tmp;
skb               471 net/ipv4/esp4.c 			esp_output_restore_header(skb);
skb               484 net/ipv4/esp4.c static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
skb               494 net/ipv4/esp4.c 	esp.proto = *skb_mac_header(skb);
skb               495 net/ipv4/esp4.c 	*skb_mac_header(skb) = IPPROTO_ESP;
skb               504 net/ipv4/esp4.c 		struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
skb               508 net/ipv4/esp4.c 		if (skb->len < padto)
skb               509 net/ipv4/esp4.c 			esp.tfclen = padto - skb->len;
skb               512 net/ipv4/esp4.c 	esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize);
skb               513 net/ipv4/esp4.c 	esp.plen = esp.clen - skb->len - esp.tfclen;
skb               516 net/ipv4/esp4.c 	esp.esph = ip_esp_hdr(skb);
skb               518 net/ipv4/esp4.c 	esp.nfrags = esp_output_head(x, skb, &esp);
skb               525 net/ipv4/esp4.c 	esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
skb               526 net/ipv4/esp4.c 	esp.seqno = cpu_to_be64(XFRM_SKB_CB(skb)->seq.output.low +
skb               527 net/ipv4/esp4.c 				 ((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32));
skb               529 net/ipv4/esp4.c 	skb_push(skb, -skb_network_offset(skb));
skb               531 net/ipv4/esp4.c 	return esp_output_tail(x, skb, &esp);
skb               534 net/ipv4/esp4.c static inline int esp_remove_trailer(struct sk_buff *skb)
skb               536 net/ipv4/esp4.c 	struct xfrm_state *x = xfrm_input_state(skb);
skb               537 net/ipv4/esp4.c 	struct xfrm_offload *xo = xfrm_offload(skb);
skb               547 net/ipv4/esp4.c 	elen = skb->len - hlen;
skb               554 net/ipv4/esp4.c 	if (skb_copy_bits(skb, skb->len - alen - 2, nexthdr, 2))
skb               566 net/ipv4/esp4.c 	if (skb->ip_summed == CHECKSUM_COMPLETE) {
skb               567 net/ipv4/esp4.c 		csumdiff = skb_checksum(skb, skb->len - trimlen, trimlen, 0);
skb               568 net/ipv4/esp4.c 		skb->csum = csum_block_sub(skb->csum, csumdiff,
skb               569 net/ipv4/esp4.c 					   skb->len - trimlen);
skb               571 net/ipv4/esp4.c 	pskb_trim(skb, skb->len - trimlen);
skb               579 net/ipv4/esp4.c int esp_input_done2(struct sk_buff *skb, int err)
skb               582 net/ipv4/esp4.c 	struct xfrm_state *x = xfrm_input_state(skb);
skb               583 net/ipv4/esp4.c 	struct xfrm_offload *xo = xfrm_offload(skb);
skb               589 net/ipv4/esp4.c 		kfree(ESP_SKB_CB(skb)->tmp);
skb               594 net/ipv4/esp4.c 	err = esp_remove_trailer(skb);
skb               598 net/ipv4/esp4.c 	iph = ip_hdr(skb);
skb               603 net/ipv4/esp4.c 		struct udphdr *uh = (void *)(skb_network_header(skb) + ihl);
skb               635 net/ipv4/esp4.c 			skb->ip_summed = CHECKSUM_UNNECESSARY;
skb               638 net/ipv4/esp4.c 	skb_pull_rcsum(skb, hlen);
skb               640 net/ipv4/esp4.c 		skb_reset_transport_header(skb);
skb               642 net/ipv4/esp4.c 		skb_set_transport_header(skb, -ihl);
skb               655 net/ipv4/esp4.c 	struct sk_buff *skb = base->data;
skb               657 net/ipv4/esp4.c 	xfrm_input_resume(skb, esp_input_done2(skb, err));
skb               660 net/ipv4/esp4.c static void esp_input_restore_header(struct sk_buff *skb)
skb               662 net/ipv4/esp4.c 	esp_restore_header(skb, 0);
skb               663 net/ipv4/esp4.c 	__skb_pull(skb, 4);
skb               666 net/ipv4/esp4.c static void esp_input_set_header(struct sk_buff *skb, __be32 *seqhi)
skb               668 net/ipv4/esp4.c 	struct xfrm_state *x = xfrm_input_state(skb);
skb               676 net/ipv4/esp4.c 		esph = skb_push(skb, 4);
skb               679 net/ipv4/esp4.c 		esph->seq_no = XFRM_SKB_CB(skb)->seq.input.hi;
skb               685 net/ipv4/esp4.c 	struct sk_buff *skb = base->data;
skb               687 net/ipv4/esp4.c 	esp_input_restore_header(skb);
skb               696 net/ipv4/esp4.c static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
skb               702 net/ipv4/esp4.c 	int elen = skb->len - sizeof(struct ip_esp_hdr) - ivlen;
skb               712 net/ipv4/esp4.c 	if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + ivlen))
skb               726 net/ipv4/esp4.c 	if (!skb_cloned(skb)) {
skb               727 net/ipv4/esp4.c 		if (!skb_is_nonlinear(skb)) {
skb               731 net/ipv4/esp4.c 		} else if (!skb_has_frag_list(skb)) {
skb               732 net/ipv4/esp4.c 			nfrags = skb_shinfo(skb)->nr_frags;
skb               739 net/ipv4/esp4.c 	err = skb_cow_data(skb, 0, &trailer);
skb               751 net/ipv4/esp4.c 	ESP_SKB_CB(skb)->tmp = tmp;
skb               757 net/ipv4/esp4.c 	esp_input_set_header(skb, seqhi);
skb               760 net/ipv4/esp4.c 	err = skb_to_sgvec(skb, sg, 0, skb->len);
skb               766 net/ipv4/esp4.c 	skb->ip_summed = CHECKSUM_NONE;
skb               769 net/ipv4/esp4.c 		aead_request_set_callback(req, 0, esp_input_done_esn, skb);
skb               771 net/ipv4/esp4.c 		aead_request_set_callback(req, 0, esp_input_done, skb);
skb               781 net/ipv4/esp4.c 		esp_input_restore_header(skb);
skb               783 net/ipv4/esp4.c 	err = esp_input_done2(skb, err);
skb               789 net/ipv4/esp4.c static int esp4_err(struct sk_buff *skb, u32 info)
skb               791 net/ipv4/esp4.c 	struct net *net = dev_net(skb->dev);
skb               792 net/ipv4/esp4.c 	const struct iphdr *iph = (const struct iphdr *)skb->data;
skb               793 net/ipv4/esp4.c 	struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data+(iph->ihl<<2));
skb               796 net/ipv4/esp4.c 	switch (icmp_hdr(skb)->type) {
skb               798 net/ipv4/esp4.c 		if (icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
skb               806 net/ipv4/esp4.c 	x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
skb               811 net/ipv4/esp4.c 	if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH)
skb               812 net/ipv4/esp4.c 		ipv4_update_pmtu(skb, net, info, 0, IPPROTO_ESP);
skb               814 net/ipv4/esp4.c 		ipv4_redirect(skb, net, 0, IPPROTO_ESP);
skb              1001 net/ipv4/esp4.c static int esp4_rcv_cb(struct sk_buff *skb, int err)
skb                29 net/ipv4/esp4_offload.c 					struct sk_buff *skb)
skb                31 net/ipv4/esp4_offload.c 	int offset = skb_gro_offset(skb);
skb                38 net/ipv4/esp4_offload.c 	if (!pskb_pull(skb, offset))
skb                41 net/ipv4/esp4_offload.c 	if ((err = xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq)) != 0)
skb                44 net/ipv4/esp4_offload.c 	xo = xfrm_offload(skb);
skb                46 net/ipv4/esp4_offload.c 		struct sec_path *sp = secpath_set(skb);
skb                54 net/ipv4/esp4_offload.c 		x = xfrm_state_lookup(dev_net(skb->dev), skb->mark,
skb                55 net/ipv4/esp4_offload.c 				      (xfrm_address_t *)&ip_hdr(skb)->daddr,
skb                60 net/ipv4/esp4_offload.c 		skb->mark = xfrm_smark_get(skb->mark, x);
skb                65 net/ipv4/esp4_offload.c 		xo = xfrm_offload(skb);
skb                72 net/ipv4/esp4_offload.c 	XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL;
skb                73 net/ipv4/esp4_offload.c 	XFRM_SPI_SKB_CB(skb)->family = AF_INET;
skb                74 net/ipv4/esp4_offload.c 	XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
skb                75 net/ipv4/esp4_offload.c 	XFRM_SPI_SKB_CB(skb)->seq = seq;
skb                79 net/ipv4/esp4_offload.c 	xfrm_input(skb, IPPROTO_ESP, spi, -2);
skb                83 net/ipv4/esp4_offload.c 	secpath_reset(skb);
skb                85 net/ipv4/esp4_offload.c 	skb_push(skb, offset);
skb                86 net/ipv4/esp4_offload.c 	NAPI_GRO_CB(skb)->same_flow = 0;
skb                87 net/ipv4/esp4_offload.c 	NAPI_GRO_CB(skb)->flush = 1;
skb                92 net/ipv4/esp4_offload.c static void esp4_gso_encap(struct xfrm_state *x, struct sk_buff *skb)
skb                95 net/ipv4/esp4_offload.c 	struct iphdr *iph = ip_hdr(skb);
skb                96 net/ipv4/esp4_offload.c 	struct xfrm_offload *xo = xfrm_offload(skb);
skb                99 net/ipv4/esp4_offload.c 	skb_push(skb, -skb_network_offset(skb));
skb               100 net/ipv4/esp4_offload.c 	esph = ip_esp_hdr(skb);
skb               101 net/ipv4/esp4_offload.c 	*skb_mac_header(skb) = IPPROTO_ESP;
skb               104 net/ipv4/esp4_offload.c 	esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
skb               110 net/ipv4/esp4_offload.c 						struct sk_buff *skb,
skb               113 net/ipv4/esp4_offload.c 	__skb_push(skb, skb->mac_len);
skb               114 net/ipv4/esp4_offload.c 	return skb_mac_gso_segment(skb, features);
skb               118 net/ipv4/esp4_offload.c 						   struct sk_buff *skb,
skb               123 net/ipv4/esp4_offload.c 	struct xfrm_offload *xo = xfrm_offload(skb);
skb               125 net/ipv4/esp4_offload.c 	skb->transport_header += x->props.header_len;
skb               128 net/ipv4/esp4_offload.c 		segs = ops->callbacks.gso_segment(skb, features);
skb               134 net/ipv4/esp4_offload.c 						    struct sk_buff *skb,
skb               139 net/ipv4/esp4_offload.c 		return xfrm4_tunnel_gso_segment(x, skb, features);
skb               141 net/ipv4/esp4_offload.c 		return xfrm4_transport_gso_segment(x, skb, features);
skb               147 net/ipv4/esp4_offload.c static struct sk_buff *esp4_gso_segment(struct sk_buff *skb,
skb               154 net/ipv4/esp4_offload.c 	struct xfrm_offload *xo = xfrm_offload(skb);
skb               160 net/ipv4/esp4_offload.c 	if (!(skb_shinfo(skb)->gso_type & SKB_GSO_ESP))
skb               163 net/ipv4/esp4_offload.c 	sp = skb_sec_path(skb);
skb               166 net/ipv4/esp4_offload.c 	esph = ip_esp_hdr(skb);
skb               171 net/ipv4/esp4_offload.c 	if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)))
skb               174 net/ipv4/esp4_offload.c 	__skb_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead));
skb               176 net/ipv4/esp4_offload.c 	skb->encap_hdr_csum = 1;
skb               178 net/ipv4/esp4_offload.c 	if ((!(skb->dev->gso_partial_features & NETIF_F_HW_ESP) &&
skb               179 net/ipv4/esp4_offload.c 	     !(features & NETIF_F_HW_ESP)) || x->xso.dev != skb->dev)
skb               182 net/ipv4/esp4_offload.c 		 !(skb->dev->gso_partial_features & NETIF_F_HW_ESP_TX_CSUM))
skb               187 net/ipv4/esp4_offload.c 	return xfrm4_outer_mode_gso_segment(x, skb, esp_features);
skb               190 net/ipv4/esp4_offload.c static int esp_input_tail(struct xfrm_state *x, struct sk_buff *skb)
skb               193 net/ipv4/esp4_offload.c 	struct xfrm_offload *xo = xfrm_offload(skb);
skb               195 net/ipv4/esp4_offload.c 	if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead)))
skb               199 net/ipv4/esp4_offload.c 		skb->ip_summed = CHECKSUM_NONE;
skb               201 net/ipv4/esp4_offload.c 	return esp_input_done2(skb, 0);
skb               204 net/ipv4/esp4_offload.c static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb,  netdev_features_t features)
skb               218 net/ipv4/esp4_offload.c 	xo = xfrm_offload(skb);
skb               224 net/ipv4/esp4_offload.c 	     !(skb->dev->gso_partial_features & NETIF_F_HW_ESP)) ||
skb               225 net/ipv4/esp4_offload.c 	    x->xso.dev != skb->dev) {
skb               241 net/ipv4/esp4_offload.c 	esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize);
skb               242 net/ipv4/esp4_offload.c 	esp.plen = esp.clen - skb->len - esp.tfclen;
skb               245 net/ipv4/esp4_offload.c 	esp.esph = ip_esp_hdr(skb);
skb               248 net/ipv4/esp4_offload.c 	if (!hw_offload || (hw_offload && !skb_is_gso(skb))) {
skb               249 net/ipv4/esp4_offload.c 		esp.nfrags = esp_output_head(x, skb, &esp);
skb               259 net/ipv4/esp4_offload.c 	skb_push(skb, -skb_network_offset(skb));
skb               264 net/ipv4/esp4_offload.c 		if (!skb_is_gso(skb))
skb               267 net/ipv4/esp4_offload.c 			xo->seq.low += skb_shinfo(skb)->gso_segs;
skb               272 net/ipv4/esp4_offload.c 	ip_hdr(skb)->tot_len = htons(skb->len);
skb               273 net/ipv4/esp4_offload.c 	ip_send_check(ip_hdr(skb));
skb               278 net/ipv4/esp4_offload.c 	err = esp_output_tail(x, skb, &esp);
skb               282 net/ipv4/esp4_offload.c 	secpath_reset(skb);
skb               280 net/ipv4/fib_frontend.c __be32 fib_compute_spec_dst(struct sk_buff *skb)
skb               282 net/ipv4/fib_frontend.c 	struct net_device *dev = skb->dev;
skb               289 net/ipv4/fib_frontend.c 	rt = skb_rtable(skb);
skb               292 net/ipv4/fib_frontend.c 		return ip_hdr(skb)->daddr;
skb               299 net/ipv4/fib_frontend.c 	if (!ipv4_is_zeronet(ip_hdr(skb)->saddr)) {
skb               304 net/ipv4/fib_frontend.c 			.daddr = ip_hdr(skb)->saddr,
skb               305 net/ipv4/fib_frontend.c 			.flowi4_tos = RT_TOS(ip_hdr(skb)->tos),
skb               307 net/ipv4/fib_frontend.c 			.flowi4_mark = vmark ? skb->mark : 0,
skb               315 net/ipv4/fib_frontend.c 	return inet_select_addr(dev, ip_hdr(skb)->saddr, scope);
skb               353 net/ipv4/fib_frontend.c static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
skb               378 net/ipv4/fib_frontend.c 	fl4.flowi4_mark = IN_DEV_SRC_VMARK(idev) ? skb->mark : 0;
skb               379 net/ipv4/fib_frontend.c 	if (!fib4_rules_early_flow_dissect(net, skb, &fl4, &flkeys)) {
skb               428 net/ipv4/fib_frontend.c int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
skb               432 net/ipv4/fib_frontend.c 	int r = secpath_exists(skb) ? 0 : IN_DEV_RPFILTER(idev);
skb               455 net/ipv4/fib_frontend.c 	return __fib_validate_source(skb, src, dst, tos, oif, dev, r, idev, itag);
skb               728 net/ipv4/fib_frontend.c static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
skb               754 net/ipv4/fib_frontend.c 	cfg->fc_nlinfo.portid = NETLINK_CB(skb).portid;
skb               845 net/ipv4/fib_frontend.c static int inet_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh,
skb               848 net/ipv4/fib_frontend.c 	struct net *net = sock_net(skb->sk);
skb               853 net/ipv4/fib_frontend.c 	err = rtm_to_fib_config(net, skb, nlh, &cfg, extack);
skb               875 net/ipv4/fib_frontend.c static int inet_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh,
skb               878 net/ipv4/fib_frontend.c 	struct net *net = sock_net(skb->sk);
skb               883 net/ipv4/fib_frontend.c 	err = rtm_to_fib_config(net, skb, nlh, &cfg, extack);
skb               974 net/ipv4/fib_frontend.c static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
skb               979 net/ipv4/fib_frontend.c 	struct net *net = sock_net(skb->sk);
skb               998 net/ipv4/fib_frontend.c 		return skb->len;
skb              1004 net/ipv4/fib_frontend.c 				return skb->len;
skb              1011 net/ipv4/fib_frontend.c 		err = fib_table_dump(tb, skb, cb, &filter);
skb              1013 net/ipv4/fib_frontend.c 		return skb->len ? : err;
skb              1030 net/ipv4/fib_frontend.c 			err = fib_table_dump(tb, skb, cb, &filter);
skb              1032 net/ipv4/fib_frontend.c 				if (likely(skb->len))
skb              1043 net/ipv4/fib_frontend.c 	err = skb->len;
skb              1363 net/ipv4/fib_frontend.c static void nl_fib_input(struct sk_buff *skb)
skb              1370 net/ipv4/fib_frontend.c 	net = sock_net(skb->sk);
skb              1371 net/ipv4/fib_frontend.c 	nlh = nlmsg_hdr(skb);
skb              1372 net/ipv4/fib_frontend.c 	if (skb->len < nlmsg_total_size(sizeof(*frn)) ||
skb              1373 net/ipv4/fib_frontend.c 	    skb->len < nlh->nlmsg_len ||
skb              1377 net/ipv4/fib_frontend.c 	skb = netlink_skb_clone(skb, GFP_KERNEL);
skb              1378 net/ipv4/fib_frontend.c 	if (!skb)
skb              1380 net/ipv4/fib_frontend.c 	nlh = nlmsg_hdr(skb);
skb              1385 net/ipv4/fib_frontend.c 	portid = NETLINK_CB(skb).portid;      /* netlink portid */
skb              1386 net/ipv4/fib_frontend.c 	NETLINK_CB(skb).portid = 0;        /* from kernel */
skb              1387 net/ipv4/fib_frontend.c 	NETLINK_CB(skb).dst_group = 0;  /* unicast */
skb              1388 net/ipv4/fib_frontend.c 	netlink_unicast(net->ipv4.fibnl, skb, portid, MSG_DONTWAIT);
skb                38 net/ipv4/fib_lookup.h int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event, u32 tb_id,
skb               218 net/ipv4/fib_rules.c static int fib4_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
skb               223 net/ipv4/fib_rules.c 	struct net *net = sock_net(skb->sk);
skb               332 net/ipv4/fib_rules.c static int fib4_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
skb               342 net/ipv4/fib_rules.c 	     nla_put_in_addr(skb, FRA_DST, rule4->dst)) ||
skb               344 net/ipv4/fib_rules.c 	     nla_put_in_addr(skb, FRA_SRC, rule4->src)))
skb               348 net/ipv4/fib_rules.c 	    nla_put_u32(skb, FRA_FLOW, rule4->tclassid))
skb               507 net/ipv4/fib_semantics.c 	struct sk_buff *skb;
skb               511 net/ipv4/fib_semantics.c 	skb = nlmsg_new(fib_nlmsg_size(fa->fa_info), GFP_KERNEL);
skb               512 net/ipv4/fib_semantics.c 	if (!skb)
skb               515 net/ipv4/fib_semantics.c 	err = fib_dump_info(skb, info->portid, seq, event, tb_id,
skb               521 net/ipv4/fib_semantics.c 		kfree_skb(skb);
skb               524 net/ipv4/fib_semantics.c 	rtnl_notify(skb, info->nl_net, info->portid, RTNLGRP_IPV4_ROUTE,
skb              1584 net/ipv4/fib_semantics.c int fib_nexthop_info(struct sk_buff *skb, const struct fib_nh_common *nhc,
skb              1609 net/ipv4/fib_semantics.c 		if (nla_put_in_addr(skb, RTA_GATEWAY, nhc->nhc_gw.ipv4))
skb              1621 net/ipv4/fib_semantics.c 			nla = nla_reserve(skb, RTA_VIA, alen + 2);
skb              1628 net/ipv4/fib_semantics.c 		} else if (nla_put_in6_addr(skb, RTA_GATEWAY,
skb              1640 net/ipv4/fib_semantics.c 	    nla_put_u32(skb, RTA_OIF, nhc->nhc_dev->ifindex))
skb              1644 net/ipv4/fib_semantics.c 	    lwtunnel_fill_encap(skb, nhc->nhc_lwtstate,
skb              1656 net/ipv4/fib_semantics.c int fib_add_nexthop(struct sk_buff *skb, const struct fib_nh_common *nhc,
skb              1663 net/ipv4/fib_semantics.c 	rtnh = nla_reserve_nohdr(skb, sizeof(*rtnh));
skb              1670 net/ipv4/fib_semantics.c 	if (fib_nexthop_info(skb, nhc, rt_family, &flags, true) < 0)
skb              1676 net/ipv4/fib_semantics.c 	rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *)rtnh;
skb              1687 net/ipv4/fib_semantics.c static int fib_add_multipath(struct sk_buff *skb, struct fib_info *fi)
skb              1691 net/ipv4/fib_semantics.c 	mp = nla_nest_start_noflag(skb, RTA_MULTIPATH);
skb              1696 net/ipv4/fib_semantics.c 		if (nexthop_mpath_fill_node(skb, fi->nh, AF_INET) < 0)
skb              1702 net/ipv4/fib_semantics.c 		if (fib_add_nexthop(skb, &nh->nh_common, nh->fib_nh_weight,
skb              1707 net/ipv4/fib_semantics.c 		    nla_put_u32(skb, RTA_FLOW, nh->nh_tclassid))
skb              1713 net/ipv4/fib_semantics.c 	nla_nest_end(skb, mp);
skb              1721 net/ipv4/fib_semantics.c static int fib_add_multipath(struct sk_buff *skb, struct fib_info *fi)
skb              1727 net/ipv4/fib_semantics.c int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
skb              1735 net/ipv4/fib_semantics.c 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*rtm), flags);
skb              1748 net/ipv4/fib_semantics.c 	if (nla_put_u32(skb, RTA_TABLE, tb_id))
skb              1756 net/ipv4/fib_semantics.c 	    nla_put_in_addr(skb, RTA_DST, dst))
skb              1759 net/ipv4/fib_semantics.c 	    nla_put_u32(skb, RTA_PRIORITY, fi->fib_priority))
skb              1761 net/ipv4/fib_semantics.c 	if (rtnetlink_put_metrics(skb, fi->fib_metrics->metrics) < 0)
skb              1765 net/ipv4/fib_semantics.c 	    nla_put_in_addr(skb, RTA_PREFSRC, fi->fib_prefsrc))
skb              1769 net/ipv4/fib_semantics.c 		if (nla_put_u32(skb, RTA_NH_ID, fi->nh->id))
skb              1779 net/ipv4/fib_semantics.c 		if (fib_nexthop_info(skb, nhc, AF_INET, &flags, false) < 0)
skb              1789 net/ipv4/fib_semantics.c 			    nla_put_u32(skb, RTA_FLOW, nh->nh_tclassid))
skb              1794 net/ipv4/fib_semantics.c 		if (fib_add_multipath(skb, fi) < 0)
skb              1798 net/ipv4/fib_semantics.c 	nlmsg_end(skb, nlh);
skb              1802 net/ipv4/fib_semantics.c 	nlmsg_cancel(skb, nlh);
skb              2191 net/ipv4/fib_semantics.c 		     struct flowi4 *fl4, const struct sk_buff *skb)
skb              2198 net/ipv4/fib_semantics.c 		int h = fib_multipath_hash(net, fl4, skb, NULL);
skb              2088 net/ipv4/fib_trie.c 			     struct sk_buff *skb, struct netlink_callback *cb,
skb              2131 net/ipv4/fib_trie.c 				err = fib_dump_info(skb,
skb              2132 net/ipv4/fib_trie.c 						    NETLINK_CB(cb->skb).portid,
skb              2147 net/ipv4/fib_trie.c 			err = fib_dump_info_fnhe(skb, cb, tb->tb_id, fi,
skb              2158 net/ipv4/fib_trie.c 	return skb->len;
skb              2167 net/ipv4/fib_trie.c int fib_table_dump(struct fib_table *tb, struct sk_buff *skb,
skb              2182 net/ipv4/fib_trie.c 		return skb->len;
skb              2187 net/ipv4/fib_trie.c 		err = fn_trie_dump_leaf(l, tb, skb, cb, filter);
skb              2208 net/ipv4/fib_trie.c 	return skb->len;
skb                54 net/ipv4/fou.c static int fou_recv_pull(struct sk_buff *skb, struct fou *fou, size_t len)
skb                60 net/ipv4/fou.c 		ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(skb)->tot_len) - len);
skb                62 net/ipv4/fou.c 		ipv6_hdr(skb)->payload_len =
skb                63 net/ipv4/fou.c 		    htons(ntohs(ipv6_hdr(skb)->payload_len) - len);
skb                65 net/ipv4/fou.c 	__skb_pull(skb, len);
skb                66 net/ipv4/fou.c 	skb_postpull_rcsum(skb, udp_hdr(skb), len);
skb                67 net/ipv4/fou.c 	skb_reset_transport_header(skb);
skb                68 net/ipv4/fou.c 	return iptunnel_pull_offloads(skb);
skb                71 net/ipv4/fou.c static int fou_udp_recv(struct sock *sk, struct sk_buff *skb)
skb                78 net/ipv4/fou.c 	if (fou_recv_pull(skb, fou, sizeof(struct udphdr)))
skb                84 net/ipv4/fou.c 	kfree_skb(skb);
skb                88 net/ipv4/fou.c static struct guehdr *gue_remcsum(struct sk_buff *skb, struct guehdr *guehdr,
skb                98 net/ipv4/fou.c 	if (skb->remcsum_offload)
skb               101 net/ipv4/fou.c 	if (!pskb_may_pull(skb, plen))
skb               103 net/ipv4/fou.c 	guehdr = (struct guehdr *)&udp_hdr(skb)[1];
skb               105 net/ipv4/fou.c 	skb_remcsum_process(skb, (void *)guehdr + hdrlen,
skb               111 net/ipv4/fou.c static int gue_control_message(struct sk_buff *skb, struct guehdr *guehdr)
skb               114 net/ipv4/fou.c 	kfree_skb(skb);
skb               118 net/ipv4/fou.c static int gue_udp_recv(struct sock *sk, struct sk_buff *skb)
skb               131 net/ipv4/fou.c 	if (!pskb_may_pull(skb, len))
skb               134 net/ipv4/fou.c 	guehdr = (struct guehdr *)&udp_hdr(skb)[1];
skb               156 net/ipv4/fou.c 		if (fou_recv_pull(skb, fou, sizeof(struct udphdr)))
skb               169 net/ipv4/fou.c 	if (!pskb_may_pull(skb, len))
skb               173 net/ipv4/fou.c 	guehdr = (struct guehdr *)&udp_hdr(skb)[1];
skb               181 net/ipv4/fou.c 		ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(skb)->tot_len) - len);
skb               183 net/ipv4/fou.c 		ipv6_hdr(skb)->payload_len =
skb               184 net/ipv4/fou.c 		    htons(ntohs(ipv6_hdr(skb)->payload_len) - len);
skb               189 net/ipv4/fou.c 	skb_postpull_rcsum(skb, udp_hdr(skb), len);
skb               199 net/ipv4/fou.c 			guehdr = gue_remcsum(skb, guehdr, data + doffset,
skb               213 net/ipv4/fou.c 		return gue_control_message(skb, guehdr);
skb               216 net/ipv4/fou.c 	__skb_pull(skb, sizeof(struct udphdr) + hdrlen);
skb               217 net/ipv4/fou.c 	skb_reset_transport_header(skb);
skb               219 net/ipv4/fou.c 	if (iptunnel_pull_offloads(skb))
skb               225 net/ipv4/fou.c 	kfree_skb(skb);
skb               231 net/ipv4/fou.c 				       struct sk_buff *skb)
skb               244 net/ipv4/fou.c 	NAPI_GRO_CB(skb)->encap_mark = 0;
skb               247 net/ipv4/fou.c 	NAPI_GRO_CB(skb)->is_fou = 1;
skb               250 net/ipv4/fou.c 	offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
skb               255 net/ipv4/fou.c 	pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
skb               263 net/ipv4/fou.c static int fou_gro_complete(struct sock *sk, struct sk_buff *skb,
skb               272 net/ipv4/fou.c 	offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
skb               277 net/ipv4/fou.c 	err = ops->callbacks.gro_complete(skb, nhoff);
skb               279 net/ipv4/fou.c 	skb_set_inner_mac_header(skb, nhoff);
skb               287 net/ipv4/fou.c static struct guehdr *gue_gro_remcsum(struct sk_buff *skb, unsigned int off,
skb               296 net/ipv4/fou.c 	if (skb->remcsum_offload)
skb               299 net/ipv4/fou.c 	if (!NAPI_GRO_CB(skb)->csum_valid)
skb               302 net/ipv4/fou.c 	guehdr = skb_gro_remcsum_process(skb, (void *)guehdr, off, hdrlen,
skb               305 net/ipv4/fou.c 	skb->remcsum_offload = 1;
skb               312 net/ipv4/fou.c 				       struct sk_buff *skb)
skb               329 net/ipv4/fou.c 	off = skb_gro_offset(skb);
skb               332 net/ipv4/fou.c 	guehdr = skb_gro_header_fast(skb, off);
skb               333 net/ipv4/fou.c 	if (skb_gro_header_hard(skb, len)) {
skb               334 net/ipv4/fou.c 		guehdr = skb_gro_header_slow(skb, len, off);
skb               361 net/ipv4/fou.c 	if (skb_gro_header_hard(skb, len)) {
skb               362 net/ipv4/fou.c 		guehdr = skb_gro_header_slow(skb, len, off);
skb               376 net/ipv4/fou.c 	skb_gro_postpull_rcsum(skb, guehdr, hdrlen);
skb               386 net/ipv4/fou.c 			guehdr = gue_gro_remcsum(skb, off, guehdr,
skb               400 net/ipv4/fou.c 	skb_gro_pull(skb, hdrlen);
skb               436 net/ipv4/fou.c 	NAPI_GRO_CB(skb)->encap_mark = 0;
skb               439 net/ipv4/fou.c 	NAPI_GRO_CB(skb)->is_fou = 1;
skb               442 net/ipv4/fou.c 	offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
skb               447 net/ipv4/fou.c 	pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
skb               453 net/ipv4/fou.c 	skb_gro_flush_final_remcsum(skb, pp, flush, &grc);
skb               458 net/ipv4/fou.c static int gue_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff)
skb               461 net/ipv4/fou.c 	struct guehdr *guehdr = (struct guehdr *)(skb->data + nhoff);
skb               489 net/ipv4/fou.c 	offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
skb               494 net/ipv4/fou.c 	err = ops->callbacks.gro_complete(skb, nhoff + guehlen);
skb               496 net/ipv4/fou.c 	skb_set_inner_mac_header(skb, nhoff + guehlen);
skb               762 net/ipv4/fou.c static int fou_nl_cmd_add_port(struct sk_buff *skb, struct genl_info *info)
skb               775 net/ipv4/fou.c static int fou_nl_cmd_rm_port(struct sk_buff *skb, struct genl_info *info)
skb               825 net/ipv4/fou.c 			 u32 flags, struct sk_buff *skb, u8 cmd)
skb               829 net/ipv4/fou.c 	hdr = genlmsg_put(skb, portid, seq, &fou_nl_family, flags, cmd);
skb               833 net/ipv4/fou.c 	if (fou_fill_info(fou, skb) < 0)
skb               836 net/ipv4/fou.c 	genlmsg_end(skb, hdr);
skb               840 net/ipv4/fou.c 	genlmsg_cancel(skb, hdr);
skb               844 net/ipv4/fou.c static int fou_nl_cmd_get_port(struct sk_buff *skb, struct genl_info *info)
skb               891 net/ipv4/fou.c static int fou_nl_dump(struct sk_buff *skb, struct netlink_callback *cb)
skb               893 net/ipv4/fou.c 	struct net *net = sock_net(skb->sk);
skb               902 net/ipv4/fou.c 		ret = fou_dump_info(fout, NETLINK_CB(cb->skb).portid,
skb               904 net/ipv4/fou.c 				    skb, FOU_CMD_GET);
skb               911 net/ipv4/fou.c 	return skb->len;
skb               971 net/ipv4/fou.c int __fou_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
skb               976 net/ipv4/fou.c 	err = iptunnel_handle_offloads(skb, type);
skb               980 net/ipv4/fou.c 	*sport = e->sport ? : udp_flow_src_port(dev_net(skb->dev),
skb               981 net/ipv4/fou.c 						skb, 0, 0, false);
skb               987 net/ipv4/fou.c int __gue_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
skb               997 net/ipv4/fou.c 	    skb->ip_summed == CHECKSUM_PARTIAL) {
skb              1005 net/ipv4/fou.c 	err = iptunnel_handle_offloads(skb, type);
skb              1010 net/ipv4/fou.c 	*sport = e->sport ? : udp_flow_src_port(dev_net(skb->dev),
skb              1011 net/ipv4/fou.c 						skb, 0, 0, false);
skb              1015 net/ipv4/fou.c 	skb_push(skb, hdrlen);
skb              1017 net/ipv4/fou.c 	guehdr = (struct guehdr *)skb->data;
skb              1035 net/ipv4/fou.c 			u16 csum_start = skb_checksum_start_offset(skb);
skb              1043 net/ipv4/fou.c 			pd[1] = htons(csum_start + skb->csum_offset);
skb              1045 net/ipv4/fou.c 			if (!skb_is_gso(skb)) {
skb              1046 net/ipv4/fou.c 				skb->ip_summed = CHECKSUM_NONE;
skb              1047 net/ipv4/fou.c 				skb->encapsulation = 0;
skb              1062 net/ipv4/fou.c static void fou_build_udp(struct sk_buff *skb, struct ip_tunnel_encap *e,
skb              1067 net/ipv4/fou.c 	skb_push(skb, sizeof(struct udphdr));
skb              1068 net/ipv4/fou.c 	skb_reset_transport_header(skb);
skb              1070 net/ipv4/fou.c 	uh = udp_hdr(skb);
skb              1074 net/ipv4/fou.c 	uh->len = htons(skb->len);
skb              1075 net/ipv4/fou.c 	udp_set_csum(!(e->flags & TUNNEL_ENCAP_FLAG_CSUM), skb,
skb              1076 net/ipv4/fou.c 		     fl4->saddr, fl4->daddr, skb->len);
skb              1081 net/ipv4/fou.c static int fou_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
skb              1089 net/ipv4/fou.c 	err = __fou_build_header(skb, e, protocol, &sport, type);
skb              1093 net/ipv4/fou.c 	fou_build_udp(skb, e, fl4, protocol, sport);
skb              1098 net/ipv4/fou.c static int gue_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
skb              1106 net/ipv4/fou.c 	err = __gue_build_header(skb, e, protocol, &sport, type);
skb              1110 net/ipv4/fou.c 	fou_build_udp(skb, e, fl4, protocol, sport);
skb              1115 net/ipv4/fou.c static int gue_err_proto_handler(int proto, struct sk_buff *skb, u32 info)
skb              1120 net/ipv4/fou.c 		if (!ipprot->err_handler(skb, info))
skb              1127 net/ipv4/fou.c static int gue_err(struct sk_buff *skb, u32 info)
skb              1129 net/ipv4/fou.c 	int transport_offset = skb_transport_offset(skb);
skb              1135 net/ipv4/fou.c 	if (!pskb_may_pull(skb, transport_offset + len))
skb              1138 net/ipv4/fou.c 	guehdr = (struct guehdr *)&udp_hdr(skb)[1];
skb              1145 net/ipv4/fou.c 		skb_set_transport_header(skb, -(int)sizeof(struct icmphdr));
skb              1149 net/ipv4/fou.c 			ret = gue_err_proto_handler(IPPROTO_IPIP, skb, info);
skb              1153 net/ipv4/fou.c 			ret = gue_err_proto_handler(IPPROTO_IPV6, skb, info);
skb              1170 net/ipv4/fou.c 	if (!pskb_may_pull(skb, transport_offset + len + optlen))
skb              1173 net/ipv4/fou.c 	guehdr = (struct guehdr *)&udp_hdr(skb)[1];
skb              1185 net/ipv4/fou.c 	skb_set_transport_header(skb, -(int)sizeof(struct icmphdr));
skb              1186 net/ipv4/fou.c 	ret = gue_err_proto_handler(guehdr->proto_ctype, skb, info);
skb              1189 net/ipv4/fou.c 	skb_set_transport_header(skb, transport_offset);
skb                62 net/ipv4/gre_demux.c int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
skb                69 net/ipv4/gre_demux.c 	if (unlikely(!pskb_may_pull(skb, nhs + sizeof(struct gre_base_hdr))))
skb                72 net/ipv4/gre_demux.c 	greh = (struct gre_base_hdr *)(skb->data + nhs);
skb                79 net/ipv4/gre_demux.c 	if (!pskb_may_pull(skb, nhs + hdr_len))
skb                82 net/ipv4/gre_demux.c 	greh = (struct gre_base_hdr *)(skb->data + nhs);
skb                87 net/ipv4/gre_demux.c 		if (!skb_checksum_simple_validate(skb)) {
skb                88 net/ipv4/gre_demux.c 			skb_checksum_try_convert(skb, IPPROTO_GRE,
skb               117 net/ipv4/gre_demux.c 		val = skb_header_pointer(skb, nhs + hdr_len,
skb               135 net/ipv4/gre_demux.c 		if (!pskb_may_pull(skb, nhs + hdr_len + sizeof(*ershdr)))
skb               138 net/ipv4/gre_demux.c 		ershdr = (struct erspan_base_hdr *)(skb->data + nhs + hdr_len);
skb               146 net/ipv4/gre_demux.c static int gre_rcv(struct sk_buff *skb)
skb               152 net/ipv4/gre_demux.c 	if (!pskb_may_pull(skb, 12))
skb               155 net/ipv4/gre_demux.c 	ver = skb->data[1]&0x7f;
skb               163 net/ipv4/gre_demux.c 	ret = proto->handler(skb);
skb               170 net/ipv4/gre_demux.c 	kfree_skb(skb);
skb               174 net/ipv4/gre_demux.c static int gre_err(struct sk_buff *skb, u32 info)
skb               177 net/ipv4/gre_demux.c 	const struct iphdr *iph = (const struct iphdr *)skb->data;
skb               178 net/ipv4/gre_demux.c 	u8 ver = skb->data[(iph->ihl<<2) + 1]&0x7f;
skb               187 net/ipv4/gre_demux.c 		proto->err_handler(skb, info);
skb                14 net/ipv4/gre_offload.c static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
skb                17 net/ipv4/gre_offload.c 	int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb);
skb                19 net/ipv4/gre_offload.c 	u16 mac_offset = skb->mac_header;
skb                20 net/ipv4/gre_offload.c 	__be16 protocol = skb->protocol;
skb                21 net/ipv4/gre_offload.c 	u16 mac_len = skb->mac_len;
skb                25 net/ipv4/gre_offload.c 	if (!skb->encapsulation)
skb                31 net/ipv4/gre_offload.c 	if (unlikely(!pskb_may_pull(skb, tnl_hlen)))
skb                35 net/ipv4/gre_offload.c 	skb->encapsulation = 0;
skb                36 net/ipv4/gre_offload.c 	SKB_GSO_CB(skb)->encap_level = 0;
skb                37 net/ipv4/gre_offload.c 	__skb_pull(skb, tnl_hlen);
skb                38 net/ipv4/gre_offload.c 	skb_reset_mac_header(skb);
skb                39 net/ipv4/gre_offload.c 	skb_set_network_header(skb, skb_inner_network_offset(skb));
skb                40 net/ipv4/gre_offload.c 	skb->mac_len = skb_inner_network_offset(skb);
skb                41 net/ipv4/gre_offload.c 	skb->protocol = skb->inner_protocol;
skb                43 net/ipv4/gre_offload.c 	need_csum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_GRE_CSUM);
skb                44 net/ipv4/gre_offload.c 	skb->encap_hdr_csum = need_csum;
skb                46 net/ipv4/gre_offload.c 	features &= skb->dev->hw_enc_features;
skb                49 net/ipv4/gre_offload.c 	segs = skb_mac_gso_segment(skb, features);
skb                51 net/ipv4/gre_offload.c 		skb_gso_error_unwind(skb, protocol, tnl_hlen, mac_offset,
skb                58 net/ipv4/gre_offload.c 	outer_hlen = skb_tnl_header_len(skb);
skb                60 net/ipv4/gre_offload.c 	skb = segs;
skb                66 net/ipv4/gre_offload.c 		if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb                67 net/ipv4/gre_offload.c 			skb_reset_inner_headers(skb);
skb                68 net/ipv4/gre_offload.c 			skb->encapsulation = 1;
skb                71 net/ipv4/gre_offload.c 		skb->mac_len = mac_len;
skb                72 net/ipv4/gre_offload.c 		skb->protocol = protocol;
skb                74 net/ipv4/gre_offload.c 		__skb_push(skb, outer_hlen);
skb                75 net/ipv4/gre_offload.c 		skb_reset_mac_header(skb);
skb                76 net/ipv4/gre_offload.c 		skb_set_network_header(skb, mac_len);
skb                77 net/ipv4/gre_offload.c 		skb_set_transport_header(skb, gre_offset);
skb                82 net/ipv4/gre_offload.c 		greh = (struct gre_base_hdr *)skb_transport_header(skb);
skb                85 net/ipv4/gre_offload.c 		if (gso_partial && skb_is_gso(skb)) {
skb                92 net/ipv4/gre_offload.c 			partial_adj = skb->len + skb_headroom(skb) -
skb                93 net/ipv4/gre_offload.c 				      SKB_GSO_CB(skb)->data_offset -
skb                94 net/ipv4/gre_offload.c 				      skb_shinfo(skb)->gso_size;
skb               101 net/ipv4/gre_offload.c 		*pcsum = gso_make_checksum(skb, 0);
skb               102 net/ipv4/gre_offload.c 	} while ((skb = skb->next));
skb               108 net/ipv4/gre_offload.c 				       struct sk_buff *skb)
skb               119 net/ipv4/gre_offload.c 	if (NAPI_GRO_CB(skb)->encap_mark)
skb               122 net/ipv4/gre_offload.c 	NAPI_GRO_CB(skb)->encap_mark = 1;
skb               124 net/ipv4/gre_offload.c 	off = skb_gro_offset(skb);
skb               126 net/ipv4/gre_offload.c 	greh = skb_gro_header_fast(skb, off);
skb               127 net/ipv4/gre_offload.c 	if (skb_gro_header_hard(skb, hlen)) {
skb               128 net/ipv4/gre_offload.c 		greh = skb_gro_header_slow(skb, hlen, off);
skb               147 net/ipv4/gre_offload.c 	if ((greh->flags & GRE_CSUM) && NAPI_GRO_CB(skb)->is_fou)
skb               166 net/ipv4/gre_offload.c 	if (skb_gro_header_hard(skb, hlen)) {
skb               167 net/ipv4/gre_offload.c 		greh = skb_gro_header_slow(skb, hlen, off);
skb               173 net/ipv4/gre_offload.c 	if ((greh->flags & GRE_CSUM) && !NAPI_GRO_CB(skb)->flush) {
skb               174 net/ipv4/gre_offload.c 		if (skb_gro_checksum_simple_validate(skb))
skb               177 net/ipv4/gre_offload.c 		skb_gro_checksum_try_convert(skb, IPPROTO_GRE, 0,
skb               211 net/ipv4/gre_offload.c 	skb_gro_pull(skb, grehlen);
skb               214 net/ipv4/gre_offload.c 	skb_gro_postpull_rcsum(skb, greh, grehlen);
skb               216 net/ipv4/gre_offload.c 	pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
skb               222 net/ipv4/gre_offload.c 	skb_gro_flush_final(skb, pp, flush);
skb               227 net/ipv4/gre_offload.c static int gre_gro_complete(struct sk_buff *skb, int nhoff)
skb               229 net/ipv4/gre_offload.c 	struct gre_base_hdr *greh = (struct gre_base_hdr *)(skb->data + nhoff);
skb               235 net/ipv4/gre_offload.c 	skb->encapsulation = 1;
skb               236 net/ipv4/gre_offload.c 	skb_shinfo(skb)->gso_type = SKB_GSO_GRE;
skb               248 net/ipv4/gre_offload.c 		err = ptype->callbacks.gro_complete(skb, nhoff + grehlen);
skb               252 net/ipv4/gre_offload.c 	skb_set_inner_mac_header(skb, nhoff + grehlen);
skb               101 net/ipv4/icmp.c 	struct sk_buff *skb;
skb               189 net/ipv4/icmp.c 	bool (*handler)(struct sk_buff *skb);
skb               348 net/ipv4/icmp.c 			  struct sk_buff *skb)
skb               353 net/ipv4/icmp.c 	csum = skb_copy_and_csum_bits(icmp_param->skb,
skb               357 net/ipv4/icmp.c 	skb->csum = csum_block_add(skb->csum, csum, odd);
skb               359 net/ipv4/icmp.c 		nf_ct_attach(skb, icmp_param->skb);
skb               368 net/ipv4/icmp.c 	struct sk_buff *skb;
skb               377 net/ipv4/icmp.c 	} else if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) {
skb               378 net/ipv4/icmp.c 		struct icmphdr *icmph = icmp_hdr(skb);
skb               389 net/ipv4/icmp.c 		skb->ip_summed = CHECKSUM_NONE;
skb               398 net/ipv4/icmp.c static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
skb               401 net/ipv4/icmp.c 	struct rtable *rt = skb_rtable(skb);
skb               407 net/ipv4/icmp.c 	u32 mark = IP4_REPLY_MARK(net, skb->mark);
skb               411 net/ipv4/icmp.c 	if (ip_options_echo(net, &icmp_param->replyopts.opt.opt, skb))
skb               429 net/ipv4/icmp.c 	inet->tos = ip_hdr(skb)->tos;
skb               431 net/ipv4/icmp.c 	daddr = ipc.addr = ip_hdr(skb)->saddr;
skb               432 net/ipv4/icmp.c 	saddr = fib_compute_spec_dst(skb);
skb               444 net/ipv4/icmp.c 	fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos);
skb               446 net/ipv4/icmp.c 	fl4.flowi4_oif = l3mdev_master_ifindex(skb->dev);
skb               447 net/ipv4/icmp.c 	security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
skb               709 net/ipv4/icmp.c 	icmp_param.skb	  = skb_in;
skb               751 net/ipv4/icmp.c static void icmp_socket_deliver(struct sk_buff *skb, u32 info)
skb               753 net/ipv4/icmp.c 	const struct iphdr *iph = (const struct iphdr *) skb->data;
skb               760 net/ipv4/icmp.c 	if (!pskb_may_pull(skb, iph->ihl * 4 + 8)) {
skb               761 net/ipv4/icmp.c 		__ICMP_INC_STATS(dev_net(skb->dev), ICMP_MIB_INERRORS);
skb               765 net/ipv4/icmp.c 	raw_icmp_error(skb, protocol, info);
skb               769 net/ipv4/icmp.c 		ipprot->err_handler(skb, info);
skb               787 net/ipv4/icmp.c static bool icmp_unreach(struct sk_buff *skb)
skb               794 net/ipv4/icmp.c 	net = dev_net(skb_dst(skb)->dev);
skb               802 net/ipv4/icmp.c 	if (!pskb_may_pull(skb, sizeof(struct iphdr)))
skb               805 net/ipv4/icmp.c 	icmph = icmp_hdr(skb);
skb               806 net/ipv4/icmp.c 	iph   = (const struct iphdr *)skb->data;
skb               878 net/ipv4/icmp.c 	    inet_addr_type_dev_table(net, skb->dev, iph->daddr) == RTN_BROADCAST) {
skb               880 net/ipv4/icmp.c 				     &ip_hdr(skb)->saddr,
skb               882 net/ipv4/icmp.c 				     &iph->daddr, skb->dev->name);
skb               886 net/ipv4/icmp.c 	icmp_socket_deliver(skb, info);
skb               900 net/ipv4/icmp.c static bool icmp_redirect(struct sk_buff *skb)
skb               902 net/ipv4/icmp.c 	if (skb->len < sizeof(struct iphdr)) {
skb               903 net/ipv4/icmp.c 		__ICMP_INC_STATS(dev_net(skb->dev), ICMP_MIB_INERRORS);
skb               907 net/ipv4/icmp.c 	if (!pskb_may_pull(skb, sizeof(struct iphdr))) {
skb               912 net/ipv4/icmp.c 	icmp_socket_deliver(skb, ntohl(icmp_hdr(skb)->un.gateway));
skb               928 net/ipv4/icmp.c static bool icmp_echo(struct sk_buff *skb)
skb               932 net/ipv4/icmp.c 	net = dev_net(skb_dst(skb)->dev);
skb               936 net/ipv4/icmp.c 		icmp_param.data.icmph	   = *icmp_hdr(skb);
skb               938 net/ipv4/icmp.c 		icmp_param.skb		   = skb;
skb               940 net/ipv4/icmp.c 		icmp_param.data_len	   = skb->len;
skb               942 net/ipv4/icmp.c 		icmp_reply(&icmp_param, skb);
skb               955 net/ipv4/icmp.c static bool icmp_timestamp(struct sk_buff *skb)
skb               961 net/ipv4/icmp.c 	if (skb->len < 4)
skb               970 net/ipv4/icmp.c 	BUG_ON(skb_copy_bits(skb, 0, &icmp_param.data.times[0], 4));
skb               972 net/ipv4/icmp.c 	icmp_param.data.icmph	   = *icmp_hdr(skb);
skb               975 net/ipv4/icmp.c 	icmp_param.skb		   = skb;
skb               979 net/ipv4/icmp.c 	icmp_reply(&icmp_param, skb);
skb               983 net/ipv4/icmp.c 	__ICMP_INC_STATS(dev_net(skb_dst(skb)->dev), ICMP_MIB_INERRORS);
skb               987 net/ipv4/icmp.c static bool icmp_discard(struct sk_buff *skb)
skb               996 net/ipv4/icmp.c int icmp_rcv(struct sk_buff *skb)
skb               999 net/ipv4/icmp.c 	struct rtable *rt = skb_rtable(skb);
skb              1003 net/ipv4/icmp.c 	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
skb              1004 net/ipv4/icmp.c 		struct sec_path *sp = skb_sec_path(skb);
skb              1011 net/ipv4/icmp.c 		if (!pskb_may_pull(skb, sizeof(*icmph) + sizeof(struct iphdr)))
skb              1014 net/ipv4/icmp.c 		nh = skb_network_offset(skb);
skb              1015 net/ipv4/icmp.c 		skb_set_network_header(skb, sizeof(*icmph));
skb              1017 net/ipv4/icmp.c 		if (!xfrm4_policy_check_reverse(NULL, XFRM_POLICY_IN, skb))
skb              1020 net/ipv4/icmp.c 		skb_set_network_header(skb, nh);
skb              1025 net/ipv4/icmp.c 	if (skb_checksum_simple_validate(skb))
skb              1028 net/ipv4/icmp.c 	if (!pskb_pull(skb, sizeof(*icmph)))
skb              1031 net/ipv4/icmp.c 	icmph = icmp_hdr(skb);
skb              1068 net/ipv4/icmp.c 	success = icmp_pointers[icmph->type].handler(skb);
skb              1071 net/ipv4/icmp.c 		consume_skb(skb);
skb              1076 net/ipv4/icmp.c 	kfree_skb(skb);
skb              1085 net/ipv4/icmp.c int icmp_err(struct sk_buff *skb, u32 info)
skb              1087 net/ipv4/icmp.c 	struct iphdr *iph = (struct iphdr *)skb->data;
skb              1089 net/ipv4/icmp.c 	struct icmphdr *icmph = (struct icmphdr *)(skb->data + offset);
skb              1090 net/ipv4/icmp.c 	int type = icmp_hdr(skb)->type;
skb              1091 net/ipv4/icmp.c 	int code = icmp_hdr(skb)->code;
skb              1092 net/ipv4/icmp.c 	struct net *net = dev_net(skb->dev);
skb              1099 net/ipv4/icmp.c 		ping_err(skb, offset, info);
skb              1104 net/ipv4/icmp.c 		ipv4_update_pmtu(skb, net, info, 0, IPPROTO_ICMP);
skb              1106 net/ipv4/icmp.c 		ipv4_redirect(skb, net, 0, IPPROTO_ICMP);
skb               350 net/ipv4/igmp.c 	struct sk_buff *skb;
skb               361 net/ipv4/igmp.c 		skb = alloc_skb(size + hlen + tlen,
skb               363 net/ipv4/igmp.c 		if (skb)
skb               369 net/ipv4/igmp.c 	skb->priority = TC_PRIO_CONTROL;
skb               375 net/ipv4/igmp.c 		kfree_skb(skb);
skb               379 net/ipv4/igmp.c 	skb_dst_set(skb, &rt->dst);
skb               380 net/ipv4/igmp.c 	skb->dev = dev;
skb               382 net/ipv4/igmp.c 	skb_reserve(skb, hlen);
skb               383 net/ipv4/igmp.c 	skb_tailroom_reserve(skb, mtu, tlen);
skb               385 net/ipv4/igmp.c 	skb_reset_network_header(skb);
skb               386 net/ipv4/igmp.c 	pip = ip_hdr(skb);
skb               387 net/ipv4/igmp.c 	skb_put(skb, sizeof(struct iphdr) + 4);
skb               402 net/ipv4/igmp.c 	ip_select_ident(net, skb, NULL);
skb               408 net/ipv4/igmp.c 	skb->transport_header = skb->network_header + sizeof(struct iphdr) + 4;
skb               409 net/ipv4/igmp.c 	skb_put(skb, sizeof(*pig));
skb               410 net/ipv4/igmp.c 	pig = igmpv3_report_hdr(skb);
skb               416 net/ipv4/igmp.c 	return skb;
skb               419 net/ipv4/igmp.c static int igmpv3_sendpack(struct sk_buff *skb)
skb               421 net/ipv4/igmp.c 	struct igmphdr *pig = igmp_hdr(skb);
skb               422 net/ipv4/igmp.c 	const int igmplen = skb_tail_pointer(skb) - skb_transport_header(skb);
skb               424 net/ipv4/igmp.c 	pig->csum = ip_compute_csum(igmp_hdr(skb), igmplen);
skb               426 net/ipv4/igmp.c 	return ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);
skb               434 net/ipv4/igmp.c static struct sk_buff *add_grhead(struct sk_buff *skb, struct ip_mc_list *pmc,
skb               441 net/ipv4/igmp.c 	if (!skb) {
skb               442 net/ipv4/igmp.c 		skb = igmpv3_newpack(dev, mtu);
skb               443 net/ipv4/igmp.c 		if (!skb)
skb               446 net/ipv4/igmp.c 	pgr = skb_put(skb, sizeof(struct igmpv3_grec));
skb               451 net/ipv4/igmp.c 	pih = igmpv3_report_hdr(skb);
skb               454 net/ipv4/igmp.c 	return skb;
skb               457 net/ipv4/igmp.c #define AVAILABLE(skb)	((skb) ? skb_availroom(skb) : 0)
skb               459 net/ipv4/igmp.c static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc,
skb               471 net/ipv4/igmp.c 		return skb;
skb               473 net/ipv4/igmp.c 		return skb;
skb               477 net/ipv4/igmp.c 		return skb;
skb               491 net/ipv4/igmp.c 	pih = skb ? igmpv3_report_hdr(skb) : NULL;
skb               496 net/ipv4/igmp.c 		    AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) {
skb               497 net/ipv4/igmp.c 			if (skb)
skb               498 net/ipv4/igmp.c 				igmpv3_sendpack(skb);
skb               499 net/ipv4/igmp.c 			skb = igmpv3_newpack(dev, mtu);
skb               527 net/ipv4/igmp.c 		if (AVAILABLE(skb) < sizeof(__be32) +
skb               533 net/ipv4/igmp.c 			if (skb)
skb               534 net/ipv4/igmp.c 				igmpv3_sendpack(skb);
skb               535 net/ipv4/igmp.c 			skb = igmpv3_newpack(dev, mtu);
skb               540 net/ipv4/igmp.c 			skb = add_grhead(skb, pmc, type, &pgr, mtu);
skb               543 net/ipv4/igmp.c 		if (!skb)
skb               545 net/ipv4/igmp.c 		psrc = skb_put(skb, sizeof(__be32));
skb               568 net/ipv4/igmp.c 			return skb;
skb               571 net/ipv4/igmp.c 			if (skb && AVAILABLE(skb) < sizeof(struct igmpv3_grec)) {
skb               572 net/ipv4/igmp.c 				igmpv3_sendpack(skb);
skb               573 net/ipv4/igmp.c 				skb = NULL; /* add_grhead will get a new one */
skb               575 net/ipv4/igmp.c 			skb = add_grhead(skb, pmc, type, &pgr, mtu);
skb               583 net/ipv4/igmp.c 	return skb;
skb               588 net/ipv4/igmp.c 	struct sk_buff *skb = NULL;
skb               605 net/ipv4/igmp.c 			skb = add_grec(skb, pmc, type, 0, 0);
skb               615 net/ipv4/igmp.c 		skb = add_grec(skb, pmc, type, 0, 0);
skb               618 net/ipv4/igmp.c 	if (!skb)
skb               620 net/ipv4/igmp.c 	return igmpv3_sendpack(skb);
skb               654 net/ipv4/igmp.c 	struct sk_buff *skb = NULL;
skb               667 net/ipv4/igmp.c 			skb = add_grec(skb, pmc, type, 1, 0);
skb               668 net/ipv4/igmp.c 			skb = add_grec(skb, pmc, dtype, 1, 1);
skb               673 net/ipv4/igmp.c 				skb = add_grec(skb, pmc, type, 1, 0);
skb               703 net/ipv4/igmp.c 		skb = add_grec(skb, pmc, type, 0, 0);
skb               704 net/ipv4/igmp.c 		skb = add_grec(skb, pmc, dtype, 0, 1);	/* deleted sources */
skb               712 net/ipv4/igmp.c 			skb = add_grec(skb, pmc, type, 0, 0);
skb               719 net/ipv4/igmp.c 	if (!skb)
skb               721 net/ipv4/igmp.c 	(void) igmpv3_sendpack(skb);
skb               727 net/ipv4/igmp.c 	struct sk_buff *skb;
skb               757 net/ipv4/igmp.c 	skb = alloc_skb(IGMP_SIZE + hlen + tlen, GFP_ATOMIC);
skb               758 net/ipv4/igmp.c 	if (!skb) {
skb               762 net/ipv4/igmp.c 	skb->priority = TC_PRIO_CONTROL;
skb               764 net/ipv4/igmp.c 	skb_dst_set(skb, &rt->dst);
skb               766 net/ipv4/igmp.c 	skb_reserve(skb, hlen);
skb               768 net/ipv4/igmp.c 	skb_reset_network_header(skb);
skb               769 net/ipv4/igmp.c 	iph = ip_hdr(skb);
skb               770 net/ipv4/igmp.c 	skb_put(skb, sizeof(struct iphdr) + 4);
skb               780 net/ipv4/igmp.c 	ip_select_ident(net, skb, NULL);
skb               786 net/ipv4/igmp.c 	ih = skb_put(skb, sizeof(struct igmphdr));
skb               793 net/ipv4/igmp.c 	return ip_local_out(net, skb->sk, skb);
skb               933 net/ipv4/igmp.c static bool igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
skb               936 net/ipv4/igmp.c 	struct igmphdr 		*ih = igmp_hdr(skb);
skb               937 net/ipv4/igmp.c 	struct igmpv3_query *ih3 = igmpv3_query_hdr(skb);
skb               984 net/ipv4/igmp.c 		if (!pskb_may_pull(skb, sizeof(struct igmpv3_query)))
skb               987 net/ipv4/igmp.c 		ih3 = igmpv3_query_hdr(skb);
skb               989 net/ipv4/igmp.c 			if (!pskb_may_pull(skb, sizeof(struct igmpv3_query)
skb               992 net/ipv4/igmp.c 			ih3 = igmpv3_query_hdr(skb);
skb              1061 net/ipv4/igmp.c int igmp_rcv(struct sk_buff *skb)
skb              1065 net/ipv4/igmp.c 	struct net_device *dev = skb->dev;
skb              1067 net/ipv4/igmp.c 	int len = skb->len;
skb              1071 net/ipv4/igmp.c 		dev = dev_get_by_index_rcu(dev_net(dev), IPCB(skb)->iif);
skb              1080 net/ipv4/igmp.c 	if (!pskb_may_pull(skb, sizeof(struct igmphdr)))
skb              1083 net/ipv4/igmp.c 	if (skb_checksum_simple_validate(skb))
skb              1086 net/ipv4/igmp.c 	ih = igmp_hdr(skb);
skb              1089 net/ipv4/igmp.c 		dropped = igmp_heard_query(in_dev, skb, len);
skb              1094 net/ipv4/igmp.c 		if (rt_is_output_route(skb_rtable(skb)))
skb              1097 net/ipv4/igmp.c 		if (skb->pkt_type == PACKET_MULTICAST ||
skb              1098 net/ipv4/igmp.c 		    skb->pkt_type == PACKET_BROADCAST)
skb              1103 net/ipv4/igmp.c 		return pim_rcv_v1(skb);
skb              1118 net/ipv4/igmp.c 		kfree_skb(skb);
skb              1120 net/ipv4/igmp.c 		consume_skb(skb);
skb              1482 net/ipv4/igmp.c static int ip_mc_check_iphdr(struct sk_buff *skb)
skb              1486 net/ipv4/igmp.c 	unsigned int offset = skb_network_offset(skb) + sizeof(*iph);
skb              1488 net/ipv4/igmp.c 	if (!pskb_may_pull(skb, offset))
skb              1491 net/ipv4/igmp.c 	iph = ip_hdr(skb);
skb              1493 net/ipv4/igmp.c 	if (iph->version != 4 || ip_hdrlen(skb) < sizeof(*iph))
skb              1496 net/ipv4/igmp.c 	offset += ip_hdrlen(skb) - sizeof(*iph);
skb              1498 net/ipv4/igmp.c 	if (!pskb_may_pull(skb, offset))
skb              1501 net/ipv4/igmp.c 	iph = ip_hdr(skb);
skb              1506 net/ipv4/igmp.c 	len = skb_network_offset(skb) + ntohs(iph->tot_len);
skb              1507 net/ipv4/igmp.c 	if (skb->len < len || len < offset)
skb              1510 net/ipv4/igmp.c 	skb_set_transport_header(skb, offset);
skb              1515 net/ipv4/igmp.c static int ip_mc_check_igmp_reportv3(struct sk_buff *skb)
skb              1517 net/ipv4/igmp.c 	unsigned int len = skb_transport_offset(skb);
skb              1521 net/ipv4/igmp.c 	return ip_mc_may_pull(skb, len) ? 0 : -EINVAL;
skb              1524 net/ipv4/igmp.c static int ip_mc_check_igmp_query(struct sk_buff *skb)
skb              1526 net/ipv4/igmp.c 	unsigned int transport_len = ip_transport_len(skb);
skb              1535 net/ipv4/igmp.c 		len = skb_transport_offset(skb) + sizeof(struct igmpv3_query);
skb              1536 net/ipv4/igmp.c 		if (!ip_mc_may_pull(skb, len))
skb              1543 net/ipv4/igmp.c 	if (!igmp_hdr(skb)->group &&
skb              1544 net/ipv4/igmp.c 	    ip_hdr(skb)->daddr != htonl(INADDR_ALLHOSTS_GROUP))
skb              1550 net/ipv4/igmp.c static int ip_mc_check_igmp_msg(struct sk_buff *skb)
skb              1552 net/ipv4/igmp.c 	switch (igmp_hdr(skb)->type) {
skb              1558 net/ipv4/igmp.c 		return ip_mc_check_igmp_reportv3(skb);
skb              1560 net/ipv4/igmp.c 		return ip_mc_check_igmp_query(skb);
skb              1566 net/ipv4/igmp.c static inline __sum16 ip_mc_validate_checksum(struct sk_buff *skb)
skb              1568 net/ipv4/igmp.c 	return skb_checksum_simple_validate(skb);
skb              1571 net/ipv4/igmp.c static int ip_mc_check_igmp_csum(struct sk_buff *skb)
skb              1573 net/ipv4/igmp.c 	unsigned int len = skb_transport_offset(skb) + sizeof(struct igmphdr);
skb              1574 net/ipv4/igmp.c 	unsigned int transport_len = ip_transport_len(skb);
skb              1577 net/ipv4/igmp.c 	if (!ip_mc_may_pull(skb, len))
skb              1580 net/ipv4/igmp.c 	skb_chk = skb_checksum_trimmed(skb, transport_len,
skb              1585 net/ipv4/igmp.c 	if (skb_chk != skb)
skb              1606 net/ipv4/igmp.c int ip_mc_check_igmp(struct sk_buff *skb)
skb              1608 net/ipv4/igmp.c 	int ret = ip_mc_check_iphdr(skb);
skb              1613 net/ipv4/igmp.c 	if (ip_hdr(skb)->protocol != IPPROTO_IGMP)
skb              1616 net/ipv4/igmp.c 	ret = ip_mc_check_igmp_csum(skb);
skb              1620 net/ipv4/igmp.c 	return ip_mc_check_igmp_msg(skb);
skb               113 net/ipv4/inet_diag.c int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb,
skb               120 net/ipv4/inet_diag.c 	if (nla_put_u8(skb, INET_DIAG_SHUTDOWN, sk->sk_shutdown))
skb               127 net/ipv4/inet_diag.c 		if (nla_put_u8(skb, INET_DIAG_TOS, inet->tos) < 0)
skb               133 net/ipv4/inet_diag.c 			if (nla_put_u8(skb, INET_DIAG_TCLASS,
skb               138 net/ipv4/inet_diag.c 		    nla_put_u8(skb, INET_DIAG_SKV6ONLY, ipv6_only_sock(sk)))
skb               143 net/ipv4/inet_diag.c 	if (net_admin && nla_put_u32(skb, INET_DIAG_MARK, sk->sk_mark))
skb               160 net/ipv4/inet_diag.c 		if (nla_put_u32(skb, INET_DIAG_CLASS_ID, classid))
skb               174 net/ipv4/inet_diag.c 		      struct sk_buff *skb, const struct inet_diag_req_v2 *req,
skb               191 net/ipv4/inet_diag.c 	nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r),
skb               204 net/ipv4/inet_diag.c 	if (inet_diag_msg_attrs_fill(sk, skb, r, ext, user_ns, net_admin))
skb               215 net/ipv4/inet_diag.c 		if (nla_put(skb, INET_DIAG_MEMINFO, sizeof(minfo), &minfo) < 0)
skb               220 net/ipv4/inet_diag.c 		if (sock_diag_put_meminfo(sk, skb, INET_DIAG_SKMEMINFO))
skb               228 net/ipv4/inet_diag.c 		if (nla_put_u8(skb, INET_DIAG_PROTOCOL, sk->sk_protocol))
skb               260 net/ipv4/inet_diag.c 		attr = nla_reserve_64bit(skb, INET_DIAG_INFO,
skb               275 net/ipv4/inet_diag.c 			err = nla_put_string(skb, INET_DIAG_CONG, ca_ops->name);
skb               284 net/ipv4/inet_diag.c 		if (handler->idiag_get_aux(sk, net_admin, skb) < 0)
skb               297 net/ipv4/inet_diag.c 		if (sz && nla_put(skb, attr, sz, &info) < 0)
skb               302 net/ipv4/inet_diag.c 	nlmsg_end(skb, nlh);
skb               306 net/ipv4/inet_diag.c 	nlmsg_cancel(skb, nlh);
skb               312 net/ipv4/inet_diag.c 			      struct sk_buff *skb,
skb               319 net/ipv4/inet_diag.c 	return inet_sk_diag_fill(sk, inet_csk(sk), skb, req, user_ns,
skb               324 net/ipv4/inet_diag.c 			       struct sk_buff *skb,
skb               333 net/ipv4/inet_diag.c 	nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r),
skb               356 net/ipv4/inet_diag.c 	nlmsg_end(skb, nlh);
skb               360 net/ipv4/inet_diag.c static int inet_req_diag_fill(struct sock *sk, struct sk_buff *skb,
skb               369 net/ipv4/inet_diag.c 	nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r),
skb               390 net/ipv4/inet_diag.c 	if (net_admin && nla_put_u32(skb, INET_DIAG_MARK,
skb               394 net/ipv4/inet_diag.c 	nlmsg_end(skb, nlh);
skb               398 net/ipv4/inet_diag.c static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
skb               405 net/ipv4/inet_diag.c 		return inet_twsk_diag_fill(sk, skb, portid, seq,
skb               409 net/ipv4/inet_diag.c 		return inet_req_diag_fill(sk, skb, portid, seq,
skb               412 net/ipv4/inet_diag.c 	return inet_csk_diag_fill(sk, skb, r, user_ns, portid, seq,
skb               784 net/ipv4/inet_diag.c 			      const struct sk_buff *skb)
skb               786 net/ipv4/inet_diag.c 	bool net_admin = netlink_net_capable(skb, CAP_NET_ADMIN);
skb               850 net/ipv4/inet_diag.c 			      struct sk_buff *skb,
skb               859 net/ipv4/inet_diag.c 	return inet_csk_diag_fill(sk, skb, r,
skb               860 net/ipv4/inet_diag.c 				  sk_user_ns(NETLINK_CB(cb->skb).sk),
skb               861 net/ipv4/inet_diag.c 				  NETLINK_CB(cb->skb).portid,
skb               892 net/ipv4/inet_diag.c void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb,
skb               896 net/ipv4/inet_diag.c 	bool net_admin = netlink_net_capable(cb->skb, CAP_NET_ADMIN);
skb               897 net/ipv4/inet_diag.c 	struct net *net = sock_net(skb->sk);
skb               937 net/ipv4/inet_diag.c 				if (inet_csk_diag_dump(sk, skb, cb, r,
skb              1016 net/ipv4/inet_diag.c 				res = sk_diag_fill(sk_arr[idx], skb, r,
skb              1017 net/ipv4/inet_diag.c 					   sk_user_ns(NETLINK_CB(cb->skb).sk),
skb              1018 net/ipv4/inet_diag.c 					   NETLINK_CB(cb->skb).portid,
skb              1043 net/ipv4/inet_diag.c static int __inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
skb              1052 net/ipv4/inet_diag.c 		handler->dump(skb, cb, r, bc);
skb              1057 net/ipv4/inet_diag.c 	return err ? : skb->len;
skb              1060 net/ipv4/inet_diag.c static int inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
skb              1068 net/ipv4/inet_diag.c 	return __inet_diag_dump(skb, cb, nlmsg_data(cb->nlh), bc);
skb              1083 net/ipv4/inet_diag.c static int inet_diag_dump_compat(struct sk_buff *skb,
skb              1100 net/ipv4/inet_diag.c 	return __inet_diag_dump(skb, cb, &req, bc);
skb              1118 net/ipv4/inet_diag.c static int inet_diag_rcv_msg_compat(struct sk_buff *skb, struct nlmsghdr *nlh)
skb              1121 net/ipv4/inet_diag.c 	struct net *net = sock_net(skb->sk);
skb              1134 net/ipv4/inet_diag.c 			err = inet_diag_bc_audit(attr, skb);
skb              1142 net/ipv4/inet_diag.c 			return netlink_dump_start(net->diag_nlsk, skb, nlh, &c);
skb              1146 net/ipv4/inet_diag.c 	return inet_diag_get_exact_compat(skb, nlh);
skb              1149 net/ipv4/inet_diag.c static int inet_diag_handler_cmd(struct sk_buff *skb, struct nlmsghdr *h)
skb              1152 net/ipv4/inet_diag.c 	struct net *net = sock_net(skb->sk);
skb              1165 net/ipv4/inet_diag.c 			err = inet_diag_bc_audit(attr, skb);
skb              1173 net/ipv4/inet_diag.c 			return netlink_dump_start(net->diag_nlsk, skb, h, &c);
skb              1177 net/ipv4/inet_diag.c 	return inet_diag_cmd_exact(h->nlmsg_type, skb, h, nlmsg_data(h));
skb              1181 net/ipv4/inet_diag.c int inet_diag_handler_get_info(struct sk_buff *skb, struct sock *sk)
skb              1190 net/ipv4/inet_diag.c 	nlh = nlmsg_put(skb, 0, 0, SOCK_DIAG_BY_FAMILY, sizeof(*r), 0);
skb              1201 net/ipv4/inet_diag.c 	if ((err = nla_put_u8(skb, INET_DIAG_PROTOCOL, sk->sk_protocol))) {
skb              1202 net/ipv4/inet_diag.c 		nlmsg_cancel(skb, nlh);
skb              1209 net/ipv4/inet_diag.c 		nlmsg_cancel(skb, nlh);
skb              1214 net/ipv4/inet_diag.c 		? nla_reserve_64bit(skb, INET_DIAG_INFO,
skb              1224 net/ipv4/inet_diag.c 	nlmsg_end(skb, nlh);
skb                44 net/ipv4/inet_fragment.c #define FRAG_CB(skb)		((struct ipfrag_skb_cb *)((skb)->cb))
skb                46 net/ipv4/inet_fragment.c static void fragcb_clear(struct sk_buff *skb)
skb                48 net/ipv4/inet_fragment.c 	RB_CLEAR_NODE(&skb->rbnode);
skb                49 net/ipv4/inet_fragment.c 	FRAG_CB(skb)->next_frag = NULL;
skb                50 net/ipv4/inet_fragment.c 	FRAG_CB(skb)->frag_run_len = skb->len;
skb                55 net/ipv4/inet_fragment.c 				   struct sk_buff *skb)
skb                57 net/ipv4/inet_fragment.c 	fragcb_clear(skb);
skb                59 net/ipv4/inet_fragment.c 	FRAG_CB(q->last_run_head)->frag_run_len += skb->len;
skb                60 net/ipv4/inet_fragment.c 	FRAG_CB(q->fragments_tail)->next_frag = skb;
skb                61 net/ipv4/inet_fragment.c 	q->fragments_tail = skb;
skb                65 net/ipv4/inet_fragment.c static void fragrun_create(struct inet_frag_queue *q, struct sk_buff *skb)
skb                67 net/ipv4/inet_fragment.c 	BUILD_BUG_ON(sizeof(struct ipfrag_skb_cb) > sizeof(skb->cb));
skb                68 net/ipv4/inet_fragment.c 	fragcb_clear(skb);
skb                71 net/ipv4/inet_fragment.c 		rb_link_node(&skb->rbnode, &q->last_run_head->rbnode,
skb                74 net/ipv4/inet_fragment.c 		rb_link_node(&skb->rbnode, NULL, &q->rb_fragments.rb_node);
skb                75 net/ipv4/inet_fragment.c 	rb_insert_color(&skb->rbnode, &q->rb_fragments);
skb                77 net/ipv4/inet_fragment.c 	q->fragments_tail = skb;
skb                78 net/ipv4/inet_fragment.c 	q->last_run_head = skb;
skb               238 net/ipv4/inet_fragment.c 		struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode);
skb               241 net/ipv4/inet_fragment.c 		rb_erase(&skb->rbnode, root);
skb               242 net/ipv4/inet_fragment.c 		while (skb) {
skb               243 net/ipv4/inet_fragment.c 			struct sk_buff *next = FRAG_CB(skb)->next_frag;
skb               245 net/ipv4/inet_fragment.c 			sum += skb->truesize;
skb               246 net/ipv4/inet_fragment.c 			kfree_skb(skb);
skb               247 net/ipv4/inet_fragment.c 			skb = next;
skb               344 net/ipv4/inet_fragment.c int inet_frag_queue_insert(struct inet_frag_queue *q, struct sk_buff *skb,
skb               359 net/ipv4/inet_fragment.c 		fragrun_create(q, skb);  /* First fragment. */
skb               366 net/ipv4/inet_fragment.c 			fragrun_append_to_last(q, skb);
skb               368 net/ipv4/inet_fragment.c 			fragrun_create(q, skb);
skb               397 net/ipv4/inet_fragment.c 		fragcb_clear(skb);
skb               398 net/ipv4/inet_fragment.c 		rb_link_node(&skb->rbnode, parent, rbn);
skb               399 net/ipv4/inet_fragment.c 		rb_insert_color(&skb->rbnode, &q->rb_fragments);
skb               402 net/ipv4/inet_fragment.c 	skb->ip_defrag_offset = offset;
skb               408 net/ipv4/inet_fragment.c void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb,
skb               415 net/ipv4/inet_fragment.c 	if (head != skb) {
skb               416 net/ipv4/inet_fragment.c 		fp = skb_clone(skb, GFP_ATOMIC);
skb               419 net/ipv4/inet_fragment.c 		FRAG_CB(fp)->next_frag = FRAG_CB(skb)->next_frag;
skb               420 net/ipv4/inet_fragment.c 		if (RB_EMPTY_NODE(&skb->rbnode))
skb               423 net/ipv4/inet_fragment.c 			rb_replace_node(&skb->rbnode, &fp->rbnode,
skb               425 net/ipv4/inet_fragment.c 		if (q->fragments_tail == skb)
skb               427 net/ipv4/inet_fragment.c 		skb_morph(skb, head);
skb               428 net/ipv4/inet_fragment.c 		FRAG_CB(skb)->next_frag = FRAG_CB(head)->next_frag;
skb               429 net/ipv4/inet_fragment.c 		rb_replace_node(&head->rbnode, &skb->rbnode,
skb               432 net/ipv4/inet_fragment.c 		head = skb;
skb               547 net/ipv4/inet_fragment.c 	struct sk_buff *head, *skb;
skb               552 net/ipv4/inet_fragment.c 	skb = FRAG_CB(head)->next_frag;
skb               553 net/ipv4/inet_fragment.c 	if (skb)
skb               554 net/ipv4/inet_fragment.c 		rb_replace_node(&head->rbnode, &skb->rbnode,
skb               259 net/ipv4/inet_hashtables.c 				struct sk_buff *skb, int doff,
skb               264 net/ipv4/inet_hashtables.c 	bool exact_dif = inet_exact_dif_match(net, skb);
skb               279 net/ipv4/inet_hashtables.c 							       skb, doff);
skb               293 net/ipv4/inet_hashtables.c 				    struct sk_buff *skb, int doff,
skb               305 net/ipv4/inet_hashtables.c 	result = inet_lhash2_lookup(net, ilb2, skb, doff,
skb               315 net/ipv4/inet_hashtables.c 	result = inet_lhash2_lookup(net, ilb2, skb, doff,
skb               340 net/ipv4/inet_hashtables.c void sock_edemux(struct sk_buff *skb)
skb               342 net/ipv4/inet_hashtables.c 	sock_gen_put(skb->sk);
skb                43 net/ipv4/ip_forward.c static bool ip_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
skb                45 net/ipv4/ip_forward.c 	if (skb->len <= mtu)
skb                48 net/ipv4/ip_forward.c 	if (unlikely((ip_hdr(skb)->frag_off & htons(IP_DF)) == 0))
skb                52 net/ipv4/ip_forward.c 	if (unlikely(IPCB(skb)->frag_max_size > mtu))
skb                55 net/ipv4/ip_forward.c 	if (skb->ignore_df)
skb                58 net/ipv4/ip_forward.c 	if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu))
skb                65 net/ipv4/ip_forward.c static int ip_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
skb                67 net/ipv4/ip_forward.c 	struct ip_options *opt	= &(IPCB(skb)->opt);
skb                70 net/ipv4/ip_forward.c 	__IP_ADD_STATS(net, IPSTATS_MIB_OUTOCTETS, skb->len);
skb                73 net/ipv4/ip_forward.c 	if (skb->offload_l3_fwd_mark) {
skb                74 net/ipv4/ip_forward.c 		consume_skb(skb);
skb                80 net/ipv4/ip_forward.c 		ip_forward_options(skb);
skb                82 net/ipv4/ip_forward.c 	skb->tstamp = 0;
skb                83 net/ipv4/ip_forward.c 	return dst_output(net, sk, skb);
skb                86 net/ipv4/ip_forward.c int ip_forward(struct sk_buff *skb)
skb                91 net/ipv4/ip_forward.c 	struct ip_options *opt	= &(IPCB(skb)->opt);
skb                95 net/ipv4/ip_forward.c 	if (skb->pkt_type != PACKET_HOST)
skb                98 net/ipv4/ip_forward.c 	if (unlikely(skb->sk))
skb               101 net/ipv4/ip_forward.c 	if (skb_warn_if_lro(skb))
skb               104 net/ipv4/ip_forward.c 	if (!xfrm4_policy_check(NULL, XFRM_POLICY_FWD, skb))
skb               107 net/ipv4/ip_forward.c 	if (IPCB(skb)->opt.router_alert && ip_call_ra_chain(skb))
skb               110 net/ipv4/ip_forward.c 	skb_forward_csum(skb);
skb               111 net/ipv4/ip_forward.c 	net = dev_net(skb->dev);
skb               118 net/ipv4/ip_forward.c 	if (ip_hdr(skb)->ttl <= 1)
skb               121 net/ipv4/ip_forward.c 	if (!xfrm4_route_forward(skb))
skb               124 net/ipv4/ip_forward.c 	rt = skb_rtable(skb);
skb               129 net/ipv4/ip_forward.c 	IPCB(skb)->flags |= IPSKB_FORWARDED;
skb               131 net/ipv4/ip_forward.c 	if (ip_exceeds_mtu(skb, mtu)) {
skb               133 net/ipv4/ip_forward.c 		icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
skb               139 net/ipv4/ip_forward.c 	if (skb_cow(skb, LL_RESERVED_SPACE(rt->dst.dev)+rt->dst.header_len))
skb               141 net/ipv4/ip_forward.c 	iph = ip_hdr(skb);
skb               150 net/ipv4/ip_forward.c 	if (IPCB(skb)->flags & IPSKB_DOREDIRECT && !opt->srr &&
skb               151 net/ipv4/ip_forward.c 	    !skb_sec_path(skb))
skb               152 net/ipv4/ip_forward.c 		ip_rt_send_redirect(skb);
skb               155 net/ipv4/ip_forward.c 		skb->priority = rt_tos2priority(iph->tos);
skb               158 net/ipv4/ip_forward.c 		       net, NULL, skb, skb->dev, rt->dst.dev,
skb               165 net/ipv4/ip_forward.c 	 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_SR_FAILED, 0);
skb               171 net/ipv4/ip_forward.c 	icmp_send(skb, ICMP_TIME_EXCEEDED, ICMP_EXC_TTL, 0);
skb               173 net/ipv4/ip_forward.c 	kfree_skb(skb);
skb                78 net/ipv4/ip_fragment.c static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
skb               272 net/ipv4/ip_fragment.c static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
skb               285 net/ipv4/ip_fragment.c 	if (!(IPCB(skb)->flags & IPSKB_FRAG_COMPLETE) &&
skb               292 net/ipv4/ip_fragment.c 	ecn = ip4_frag_ecn(ip_hdr(skb)->tos);
skb               293 net/ipv4/ip_fragment.c 	offset = ntohs(ip_hdr(skb)->frag_off);
skb               297 net/ipv4/ip_fragment.c 	ihl = ip_hdrlen(skb);
skb               300 net/ipv4/ip_fragment.c 	end = offset + skb->len - skb_network_offset(skb) - ihl;
skb               316 net/ipv4/ip_fragment.c 			if (skb->ip_summed != CHECKSUM_UNNECESSARY)
skb               317 net/ipv4/ip_fragment.c 				skb->ip_summed = CHECKSUM_NONE;
skb               330 net/ipv4/ip_fragment.c 	if (!pskb_pull(skb, skb_network_offset(skb) + ihl))
skb               333 net/ipv4/ip_fragment.c 	err = pskb_trim_rcsum(skb, end - offset);
skb               338 net/ipv4/ip_fragment.c 	dev = skb->dev;
skb               343 net/ipv4/ip_fragment.c 	err = inet_frag_queue_insert(&qp->q, skb, offset, end);
skb               350 net/ipv4/ip_fragment.c 	qp->q.stamp = skb->tstamp;
skb               351 net/ipv4/ip_fragment.c 	qp->q.meat += skb->len;
skb               353 net/ipv4/ip_fragment.c 	add_frag_mem_limit(qp->q.fqdir, skb->truesize);
skb               357 net/ipv4/ip_fragment.c 	fragsize = skb->len + ihl;
skb               362 net/ipv4/ip_fragment.c 	if (ip_hdr(skb)->frag_off & htons(IP_DF) &&
skb               368 net/ipv4/ip_fragment.c 		unsigned long orefdst = skb->_skb_refdst;
skb               370 net/ipv4/ip_fragment.c 		skb->_skb_refdst = 0UL;
skb               371 net/ipv4/ip_fragment.c 		err = ip_frag_reasm(qp, skb, prev_tail, dev);
skb               372 net/ipv4/ip_fragment.c 		skb->_skb_refdst = orefdst;
skb               378 net/ipv4/ip_fragment.c 	skb_dst_drop(skb);
skb               383 net/ipv4/ip_fragment.c 		kfree_skb(skb);
skb               392 net/ipv4/ip_fragment.c 	kfree_skb(skb);
skb               402 net/ipv4/ip_fragment.c static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
skb               420 net/ipv4/ip_fragment.c 	reasm_data = inet_frag_reasm_prepare(&qp->q, skb, prev_tail);
skb               424 net/ipv4/ip_fragment.c 	len = ip_hdrlen(skb) + qp->q.len;
skb               429 net/ipv4/ip_fragment.c 	inet_frag_reasm_finish(&qp->q, skb, reasm_data,
skb               432 net/ipv4/ip_fragment.c 	skb->dev = dev;
skb               433 net/ipv4/ip_fragment.c 	IPCB(skb)->frag_max_size = max(qp->max_df_size, qp->q.max_size);
skb               435 net/ipv4/ip_fragment.c 	iph = ip_hdr(skb);
skb               448 net/ipv4/ip_fragment.c 		IPCB(skb)->flags |= IPSKB_FRAG_PMTU;
skb               474 net/ipv4/ip_fragment.c int ip_defrag(struct net *net, struct sk_buff *skb, u32 user)
skb               476 net/ipv4/ip_fragment.c 	struct net_device *dev = skb->dev ? : skb_dst(skb)->dev;
skb               481 net/ipv4/ip_fragment.c 	skb_orphan(skb);
skb               484 net/ipv4/ip_fragment.c 	qp = ip_find(net, ip_hdr(skb), user, vif);
skb               490 net/ipv4/ip_fragment.c 		ret = ip_frag_queue(qp, skb);
skb               498 net/ipv4/ip_fragment.c 	kfree_skb(skb);
skb               503 net/ipv4/ip_fragment.c struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user)
skb               509 net/ipv4/ip_fragment.c 	if (skb->protocol != htons(ETH_P_IP))
skb               510 net/ipv4/ip_fragment.c 		return skb;
skb               512 net/ipv4/ip_fragment.c 	netoff = skb_network_offset(skb);
skb               514 net/ipv4/ip_fragment.c 	if (skb_copy_bits(skb, netoff, &iph, sizeof(iph)) < 0)
skb               515 net/ipv4/ip_fragment.c 		return skb;
skb               518 net/ipv4/ip_fragment.c 		return skb;
skb               521 net/ipv4/ip_fragment.c 	if (skb->len < netoff + len || len < (iph.ihl * 4))
skb               522 net/ipv4/ip_fragment.c 		return skb;
skb               525 net/ipv4/ip_fragment.c 		skb = skb_share_check(skb, GFP_ATOMIC);
skb               526 net/ipv4/ip_fragment.c 		if (skb) {
skb               527 net/ipv4/ip_fragment.c 			if (!pskb_may_pull(skb, netoff + iph.ihl * 4)) {
skb               528 net/ipv4/ip_fragment.c 				kfree_skb(skb);
skb               531 net/ipv4/ip_fragment.c 			if (pskb_trim_rcsum(skb, netoff + len)) {
skb               532 net/ipv4/ip_fragment.c 				kfree_skb(skb);
skb               535 net/ipv4/ip_fragment.c 			memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
skb               536 net/ipv4/ip_fragment.c 			if (ip_defrag(net, skb, user))
skb               538 net/ipv4/ip_fragment.c 			skb_clear_hash(skb);
skb               541 net/ipv4/ip_fragment.c 	return skb;
skb               111 net/ipv4/ip_gre.c static void erspan_build_header(struct sk_buff *skb,
skb               119 net/ipv4/ip_gre.c static int ipgre_err(struct sk_buff *skb, u32 info,
skb               136 net/ipv4/ip_gre.c 	struct net *net = dev_net(skb->dev);
skb               139 net/ipv4/ip_gre.c 	const int type = icmp_hdr(skb)->type;
skb               140 net/ipv4/ip_gre.c 	const int code = icmp_hdr(skb)->code;
skb               152 net/ipv4/ip_gre.c 	iph = (const struct iphdr *)(icmp_hdr(skb) + 1);
skb               153 net/ipv4/ip_gre.c 	t = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
skb               182 net/ipv4/ip_gre.c 		data_len = icmp_hdr(skb)->un.reserved[1] * 4; /* RFC 4884 4.1 */
skb               191 net/ipv4/ip_gre.c            !ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4 + tpi->hdr_len,
skb               212 net/ipv4/ip_gre.c static void gre_err(struct sk_buff *skb, u32 info)
skb               228 net/ipv4/ip_gre.c 	const struct iphdr *iph = (struct iphdr *)skb->data;
skb               229 net/ipv4/ip_gre.c 	const int type = icmp_hdr(skb)->type;
skb               230 net/ipv4/ip_gre.c 	const int code = icmp_hdr(skb)->code;
skb               233 net/ipv4/ip_gre.c 	if (gre_parse_header(skb, &tpi, NULL, htons(ETH_P_IP),
skb               238 net/ipv4/ip_gre.c 		ipv4_update_pmtu(skb, dev_net(skb->dev), info,
skb               239 net/ipv4/ip_gre.c 				 skb->dev->ifindex, IPPROTO_GRE);
skb               243 net/ipv4/ip_gre.c 		ipv4_redirect(skb, dev_net(skb->dev), skb->dev->ifindex,
skb               248 net/ipv4/ip_gre.c 	ipgre_err(skb, info, &tpi);
skb               251 net/ipv4/ip_gre.c static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
skb               254 net/ipv4/ip_gre.c 	struct net *net = dev_net(skb->dev);
skb               266 net/ipv4/ip_gre.c 	iph = ip_hdr(skb);
skb               267 net/ipv4/ip_gre.c 	ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len);
skb               270 net/ipv4/ip_gre.c 	tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex,
skb               276 net/ipv4/ip_gre.c 		if (unlikely(!pskb_may_pull(skb, len)))
skb               279 net/ipv4/ip_gre.c 		if (__iptunnel_pull_header(skb,
skb               296 net/ipv4/ip_gre.c 			tun_dst = ip_tun_rx_dst(skb, flags,
skb               305 net/ipv4/ip_gre.c 			gh = skb_network_header(skb) +
skb               306 net/ipv4/ip_gre.c 			     skb_network_header_len(skb);
skb               320 net/ipv4/ip_gre.c 		skb_reset_mac_header(skb);
skb               321 net/ipv4/ip_gre.c 		ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
skb               327 net/ipv4/ip_gre.c 	kfree_skb(skb);
skb               331 net/ipv4/ip_gre.c static int __ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
skb               338 net/ipv4/ip_gre.c 	iph = ip_hdr(skb);
skb               339 net/ipv4/ip_gre.c 	tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
skb               343 net/ipv4/ip_gre.c 		if (__iptunnel_pull_header(skb, hdr_len, tpi->proto,
skb               348 net/ipv4/ip_gre.c 			skb_pop_mac_header(skb);
skb               350 net/ipv4/ip_gre.c 			skb_reset_mac_header(skb);
skb               357 net/ipv4/ip_gre.c 			tun_dst = ip_tun_rx_dst(skb, flags, tun_id, 0);
skb               362 net/ipv4/ip_gre.c 		ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
skb               368 net/ipv4/ip_gre.c 	kfree_skb(skb);
skb               372 net/ipv4/ip_gre.c static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
skb               375 net/ipv4/ip_gre.c 	struct net *net = dev_net(skb->dev);
skb               384 net/ipv4/ip_gre.c 	res = __ipgre_rcv(skb, tpi, itn, hdr_len, false);
skb               390 net/ipv4/ip_gre.c 		res = __ipgre_rcv(skb, tpi, itn, hdr_len, true);
skb               395 net/ipv4/ip_gre.c static int gre_rcv(struct sk_buff *skb)
skb               402 net/ipv4/ip_gre.c 	if (ipv4_is_multicast(ip_hdr(skb)->daddr)) {
skb               404 net/ipv4/ip_gre.c 		if (rt_is_output_route(skb_rtable(skb)))
skb               409 net/ipv4/ip_gre.c 	hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP), 0);
skb               415 net/ipv4/ip_gre.c 		if (erspan_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
skb               420 net/ipv4/ip_gre.c 	if (ipgre_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
skb               424 net/ipv4/ip_gre.c 	icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
skb               426 net/ipv4/ip_gre.c 	kfree_skb(skb);
skb               430 net/ipv4/ip_gre.c static void __gre_xmit(struct sk_buff *skb, struct net_device *dev,
skb               440 net/ipv4/ip_gre.c 	gre_build_header(skb, tunnel->tun_hlen,
skb               444 net/ipv4/ip_gre.c 	ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol);
skb               447 net/ipv4/ip_gre.c static int gre_handle_offloads(struct sk_buff *skb, bool csum)
skb               449 net/ipv4/ip_gre.c 	return iptunnel_handle_offloads(skb, csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
skb               452 net/ipv4/ip_gre.c static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev,
skb               461 net/ipv4/ip_gre.c 	tun_info = skb_tunnel_info(skb);
skb               469 net/ipv4/ip_gre.c 	if (skb_cow_head(skb, dev->needed_headroom))
skb               473 net/ipv4/ip_gre.c 	if (gre_handle_offloads(skb, !!(tun_info->key.tun_flags & TUNNEL_CSUM)))
skb               478 net/ipv4/ip_gre.c 	gre_build_header(skb, tunnel_hlen, flags, proto,
skb               482 net/ipv4/ip_gre.c 	ip_md_tunnel_xmit(skb, dev, IPPROTO_GRE, tunnel_hlen);
skb               487 net/ipv4/ip_gre.c 	kfree_skb(skb);
skb               491 net/ipv4/ip_gre.c static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev)
skb               504 net/ipv4/ip_gre.c 	tun_info = skb_tunnel_info(skb);
skb               520 net/ipv4/ip_gre.c 	if (skb_cow_head(skb, dev->needed_headroom))
skb               523 net/ipv4/ip_gre.c 	if (gre_handle_offloads(skb, false))
skb               526 net/ipv4/ip_gre.c 	if (skb->len > dev->mtu + dev->hard_header_len) {
skb               527 net/ipv4/ip_gre.c 		pskb_trim(skb, dev->mtu + dev->hard_header_len);
skb               531 net/ipv4/ip_gre.c 	nhoff = skb_network_header(skb) - skb_mac_header(skb);
skb               532 net/ipv4/ip_gre.c 	if (skb->protocol == htons(ETH_P_IP) &&
skb               533 net/ipv4/ip_gre.c 	    (ntohs(ip_hdr(skb)->tot_len) > skb->len - nhoff))
skb               536 net/ipv4/ip_gre.c 	thoff = skb_transport_header(skb) - skb_mac_header(skb);
skb               537 net/ipv4/ip_gre.c 	if (skb->protocol == htons(ETH_P_IPV6) &&
skb               538 net/ipv4/ip_gre.c 	    (ntohs(ipv6_hdr(skb)->payload_len) > skb->len - thoff))
skb               542 net/ipv4/ip_gre.c 		erspan_build_header(skb, ntohl(tunnel_id_to_key32(key->tun_id)),
skb               546 net/ipv4/ip_gre.c 		erspan_build_header_v2(skb,
skb               556 net/ipv4/ip_gre.c 	gre_build_header(skb, 8, TUNNEL_SEQ,
skb               559 net/ipv4/ip_gre.c 	ip_md_tunnel_xmit(skb, dev, IPPROTO_GRE, tunnel_hlen);
skb               564 net/ipv4/ip_gre.c 	kfree_skb(skb);
skb               568 net/ipv4/ip_gre.c static int gre_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
skb               570 net/ipv4/ip_gre.c 	struct ip_tunnel_info *info = skb_tunnel_info(skb);
skb               581 net/ipv4/ip_gre.c 			    skb->mark, skb_get_hash(skb));
skb               591 net/ipv4/ip_gre.c static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
skb               597 net/ipv4/ip_gre.c 	if (!pskb_inet_may_pull(skb))
skb               601 net/ipv4/ip_gre.c 		gre_fb_xmit(skb, dev, skb->protocol);
skb               607 net/ipv4/ip_gre.c 		if (skb_cow_head(skb, dev->needed_headroom -
skb               611 net/ipv4/ip_gre.c 		tnl_params = (const struct iphdr *)skb->data;
skb               616 net/ipv4/ip_gre.c 		skb_pull(skb, tunnel->hlen + sizeof(struct iphdr));
skb               617 net/ipv4/ip_gre.c 		skb_reset_mac_header(skb);
skb               619 net/ipv4/ip_gre.c 		if (skb_cow_head(skb, dev->needed_headroom))
skb               625 net/ipv4/ip_gre.c 	if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM)))
skb               628 net/ipv4/ip_gre.c 	__gre_xmit(skb, dev, tnl_params, skb->protocol);
skb               632 net/ipv4/ip_gre.c 	kfree_skb(skb);
skb               637 net/ipv4/ip_gre.c static netdev_tx_t erspan_xmit(struct sk_buff *skb,
skb               644 net/ipv4/ip_gre.c 	if (!pskb_inet_may_pull(skb))
skb               648 net/ipv4/ip_gre.c 		erspan_fb_xmit(skb, dev);
skb               652 net/ipv4/ip_gre.c 	if (gre_handle_offloads(skb, false))
skb               655 net/ipv4/ip_gre.c 	if (skb_cow_head(skb, dev->needed_headroom))
skb               658 net/ipv4/ip_gre.c 	if (skb->len > dev->mtu + dev->hard_header_len) {
skb               659 net/ipv4/ip_gre.c 		pskb_trim(skb, dev->mtu + dev->hard_header_len);
skb               665 net/ipv4/ip_gre.c 		erspan_build_header(skb, ntohl(tunnel->parms.o_key),
skb               670 net/ipv4/ip_gre.c 		erspan_build_header_v2(skb, ntohl(tunnel->parms.o_key),
skb               679 net/ipv4/ip_gre.c 	__gre_xmit(skb, dev, &tunnel->parms.iph, proto);
skb               683 net/ipv4/ip_gre.c 	kfree_skb(skb);
skb               688 net/ipv4/ip_gre.c static netdev_tx_t gre_tap_xmit(struct sk_buff *skb,
skb               693 net/ipv4/ip_gre.c 	if (!pskb_inet_may_pull(skb))
skb               697 net/ipv4/ip_gre.c 		gre_fb_xmit(skb, dev, htons(ETH_P_TEB));
skb               701 net/ipv4/ip_gre.c 	if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM)))
skb               704 net/ipv4/ip_gre.c 	if (skb_cow_head(skb, dev->needed_headroom))
skb               707 net/ipv4/ip_gre.c 	__gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_TEB));
skb               711 net/ipv4/ip_gre.c 	kfree_skb(skb);
skb               815 net/ipv4/ip_gre.c static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
skb               823 net/ipv4/ip_gre.c 	iph = skb_push(skb, t->hlen + sizeof(*iph));
skb               841 net/ipv4/ip_gre.c static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr)
skb               843 net/ipv4/ip_gre.c 	const struct iphdr *iph = (const struct iphdr *) skb_mac_header(skb);
skb              1449 net/ipv4/ip_gre.c static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
skb              1459 net/ipv4/ip_gre.c 		if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, t->erspan_ver))
skb              1463 net/ipv4/ip_gre.c 			if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, t->index))
skb              1466 net/ipv4/ip_gre.c 			if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, t->dir))
skb              1468 net/ipv4/ip_gre.c 			if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, t->hwid))
skb              1473 net/ipv4/ip_gre.c 	if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
skb              1474 net/ipv4/ip_gre.c 	    nla_put_be16(skb, IFLA_GRE_IFLAGS,
skb              1476 net/ipv4/ip_gre.c 	    nla_put_be16(skb, IFLA_GRE_OFLAGS,
skb              1478 net/ipv4/ip_gre.c 	    nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
skb              1479 net/ipv4/ip_gre.c 	    nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
skb              1480 net/ipv4/ip_gre.c 	    nla_put_in_addr(skb, IFLA_GRE_LOCAL, p->iph.saddr) ||
skb              1481 net/ipv4/ip_gre.c 	    nla_put_in_addr(skb, IFLA_GRE_REMOTE, p->iph.daddr) ||
skb              1482 net/ipv4/ip_gre.c 	    nla_put_u8(skb, IFLA_GRE_TTL, p->iph.ttl) ||
skb              1483 net/ipv4/ip_gre.c 	    nla_put_u8(skb, IFLA_GRE_TOS, p->iph.tos) ||
skb              1484 net/ipv4/ip_gre.c 	    nla_put_u8(skb, IFLA_GRE_PMTUDISC,
skb              1486 net/ipv4/ip_gre.c 	    nla_put_u32(skb, IFLA_GRE_FWMARK, t->fwmark))
skb              1489 net/ipv4/ip_gre.c 	if (nla_put_u16(skb, IFLA_GRE_ENCAP_TYPE,
skb              1491 net/ipv4/ip_gre.c 	    nla_put_be16(skb, IFLA_GRE_ENCAP_SPORT,
skb              1493 net/ipv4/ip_gre.c 	    nla_put_be16(skb, IFLA_GRE_ENCAP_DPORT,
skb              1495 net/ipv4/ip_gre.c 	    nla_put_u16(skb, IFLA_GRE_ENCAP_FLAGS,
skb              1499 net/ipv4/ip_gre.c 	if (nla_put_u8(skb, IFLA_GRE_IGNORE_DF, t->ignore_df))
skb              1503 net/ipv4/ip_gre.c 		if (nla_put_flag(skb, IFLA_GRE_COLLECT_METADATA))
skb               148 net/ipv4/ip_input.c bool ip_call_ra_chain(struct sk_buff *skb)
skb               151 net/ipv4/ip_input.c 	u8 protocol = ip_hdr(skb)->protocol;
skb               153 net/ipv4/ip_input.c 	struct net_device *dev = skb->dev;
skb               165 net/ipv4/ip_input.c 			if (ip_is_fragment(ip_hdr(skb))) {
skb               166 net/ipv4/ip_input.c 				if (ip_defrag(net, skb, IP_DEFRAG_CALL_RA_CHAIN))
skb               170 net/ipv4/ip_input.c 				struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
skb               179 net/ipv4/ip_input.c 		raw_rcv(last, skb);
skb               187 net/ipv4/ip_input.c void ip_protocol_deliver_rcu(struct net *net, struct sk_buff *skb, int protocol)
skb               193 net/ipv4/ip_input.c 	raw = raw_local_deliver(skb, protocol);
skb               198 net/ipv4/ip_input.c 			if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
skb               199 net/ipv4/ip_input.c 				kfree_skb(skb);
skb               202 net/ipv4/ip_input.c 			nf_reset_ct(skb);
skb               205 net/ipv4/ip_input.c 				      skb);
skb               213 net/ipv4/ip_input.c 			if (xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
skb               215 net/ipv4/ip_input.c 				icmp_send(skb, ICMP_DEST_UNREACH,
skb               218 net/ipv4/ip_input.c 			kfree_skb(skb);
skb               221 net/ipv4/ip_input.c 			consume_skb(skb);
skb               226 net/ipv4/ip_input.c static int ip_local_deliver_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
skb               228 net/ipv4/ip_input.c 	__skb_pull(skb, skb_network_header_len(skb));
skb               231 net/ipv4/ip_input.c 	ip_protocol_deliver_rcu(net, skb, ip_hdr(skb)->protocol);
skb               240 net/ipv4/ip_input.c int ip_local_deliver(struct sk_buff *skb)
skb               245 net/ipv4/ip_input.c 	struct net *net = dev_net(skb->dev);
skb               247 net/ipv4/ip_input.c 	if (ip_is_fragment(ip_hdr(skb))) {
skb               248 net/ipv4/ip_input.c 		if (ip_defrag(net, skb, IP_DEFRAG_LOCAL_DELIVER))
skb               253 net/ipv4/ip_input.c 		       net, NULL, skb, skb->dev, NULL,
skb               257 net/ipv4/ip_input.c static inline bool ip_rcv_options(struct sk_buff *skb, struct net_device *dev)
skb               269 net/ipv4/ip_input.c 	if (skb_cow(skb, skb_headroom(skb))) {
skb               274 net/ipv4/ip_input.c 	iph = ip_hdr(skb);
skb               275 net/ipv4/ip_input.c 	opt = &(IPCB(skb)->opt);
skb               278 net/ipv4/ip_input.c 	if (ip_options_compile(dev_net(dev), opt, skb)) {
skb               296 net/ipv4/ip_input.c 		if (ip_options_rcv_srr(skb, dev))
skb               308 net/ipv4/ip_input.c 			      struct sk_buff *skb, struct net_device *dev)
skb               310 net/ipv4/ip_input.c 	const struct iphdr *iph = ip_hdr(skb);
skb               311 net/ipv4/ip_input.c 	int (*edemux)(struct sk_buff *skb);
skb               316 net/ipv4/ip_input.c 	    !skb_dst(skb) &&
skb               317 net/ipv4/ip_input.c 	    !skb->sk &&
skb               325 net/ipv4/ip_input.c 					      udp_v4_early_demux, skb);
skb               329 net/ipv4/ip_input.c 			iph = ip_hdr(skb);
skb               337 net/ipv4/ip_input.c 	if (!skb_valid_dst(skb)) {
skb               338 net/ipv4/ip_input.c 		err = ip_route_input_noref(skb, iph->daddr, iph->saddr,
skb               345 net/ipv4/ip_input.c 	if (unlikely(skb_dst(skb)->tclassid)) {
skb               347 net/ipv4/ip_input.c 		u32 idx = skb_dst(skb)->tclassid;
skb               349 net/ipv4/ip_input.c 		st[idx&0xFF].o_bytes += skb->len;
skb               351 net/ipv4/ip_input.c 		st[(idx>>16)&0xFF].i_bytes += skb->len;
skb               355 net/ipv4/ip_input.c 	if (iph->ihl > 5 && ip_rcv_options(skb, dev))
skb               358 net/ipv4/ip_input.c 	rt = skb_rtable(skb);
skb               360 net/ipv4/ip_input.c 		__IP_UPD_PO_STATS(net, IPSTATS_MIB_INMCAST, skb->len);
skb               362 net/ipv4/ip_input.c 		__IP_UPD_PO_STATS(net, IPSTATS_MIB_INBCAST, skb->len);
skb               363 net/ipv4/ip_input.c 	} else if (skb->pkt_type == PACKET_BROADCAST ||
skb               364 net/ipv4/ip_input.c 		   skb->pkt_type == PACKET_MULTICAST) {
skb               390 net/ipv4/ip_input.c 	kfree_skb(skb);
skb               399 net/ipv4/ip_input.c static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
skb               401 net/ipv4/ip_input.c 	struct net_device *dev = skb->dev;
skb               407 net/ipv4/ip_input.c 	skb = l3mdev_ip_rcv(skb);
skb               408 net/ipv4/ip_input.c 	if (!skb)
skb               411 net/ipv4/ip_input.c 	ret = ip_rcv_finish_core(net, sk, skb, dev);
skb               413 net/ipv4/ip_input.c 		ret = dst_input(skb);
skb               420 net/ipv4/ip_input.c static struct sk_buff *ip_rcv_core(struct sk_buff *skb, struct net *net)
skb               428 net/ipv4/ip_input.c 	if (skb->pkt_type == PACKET_OTHERHOST)
skb               431 net/ipv4/ip_input.c 	__IP_UPD_PO_STATS(net, IPSTATS_MIB_IN, skb->len);
skb               433 net/ipv4/ip_input.c 	skb = skb_share_check(skb, GFP_ATOMIC);
skb               434 net/ipv4/ip_input.c 	if (!skb) {
skb               439 net/ipv4/ip_input.c 	if (!pskb_may_pull(skb, sizeof(struct iphdr)))
skb               442 net/ipv4/ip_input.c 	iph = ip_hdr(skb);
skb               463 net/ipv4/ip_input.c 		       max_t(unsigned short, 1, skb_shinfo(skb)->gso_segs));
skb               465 net/ipv4/ip_input.c 	if (!pskb_may_pull(skb, iph->ihl*4))
skb               468 net/ipv4/ip_input.c 	iph = ip_hdr(skb);
skb               474 net/ipv4/ip_input.c 	if (skb->len < len) {
skb               484 net/ipv4/ip_input.c 	if (pskb_trim_rcsum(skb, len)) {
skb               489 net/ipv4/ip_input.c 	iph = ip_hdr(skb);
skb               490 net/ipv4/ip_input.c 	skb->transport_header = skb->network_header + iph->ihl*4;
skb               493 net/ipv4/ip_input.c 	memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
skb               494 net/ipv4/ip_input.c 	IPCB(skb)->iif = skb->skb_iif;
skb               497 net/ipv4/ip_input.c 	skb_orphan(skb);
skb               499 net/ipv4/ip_input.c 	return skb;
skb               506 net/ipv4/ip_input.c 	kfree_skb(skb);
skb               514 net/ipv4/ip_input.c int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
skb               519 net/ipv4/ip_input.c 	skb = ip_rcv_core(skb, net);
skb               520 net/ipv4/ip_input.c 	if (skb == NULL)
skb               524 net/ipv4/ip_input.c 		       net, NULL, skb, dev, NULL,
skb               530 net/ipv4/ip_input.c 	struct sk_buff *skb, *next;
skb               532 net/ipv4/ip_input.c 	list_for_each_entry_safe(skb, next, head, list) {
skb               533 net/ipv4/ip_input.c 		skb_list_del_init(skb);
skb               534 net/ipv4/ip_input.c 		dst_input(skb);
skb               542 net/ipv4/ip_input.c 	struct sk_buff *skb, *next;
skb               546 net/ipv4/ip_input.c 	list_for_each_entry_safe(skb, next, head, list) {
skb               547 net/ipv4/ip_input.c 		struct net_device *dev = skb->dev;
skb               550 net/ipv4/ip_input.c 		skb_list_del_init(skb);
skb               554 net/ipv4/ip_input.c 		skb = l3mdev_ip_rcv(skb);
skb               555 net/ipv4/ip_input.c 		if (!skb)
skb               557 net/ipv4/ip_input.c 		if (ip_rcv_finish_core(net, sk, skb, dev) == NET_RX_DROP)
skb               560 net/ipv4/ip_input.c 		dst = skb_dst(skb);
skb               569 net/ipv4/ip_input.c 		list_add_tail(&skb->list, &sublist);
skb               589 net/ipv4/ip_input.c 	struct sk_buff *skb, *next;
skb               593 net/ipv4/ip_input.c 	list_for_each_entry_safe(skb, next, head, list) {
skb               594 net/ipv4/ip_input.c 		struct net_device *dev = skb->dev;
skb               597 net/ipv4/ip_input.c 		skb_list_del_init(skb);
skb               598 net/ipv4/ip_input.c 		skb = ip_rcv_core(skb, net);
skb               599 net/ipv4/ip_input.c 		if (skb == NULL)
skb               611 net/ipv4/ip_input.c 		list_add_tail(&skb->list, &sublist);
skb                44 net/ipv4/ip_options.c void ip_options_build(struct sk_buff *skb, struct ip_options *opt,
skb                47 net/ipv4/ip_options.c 	unsigned char *iph = skb_network_header(skb);
skb                49 net/ipv4/ip_options.c 	memcpy(&(IPCB(skb)->opt), opt, sizeof(struct ip_options));
skb                51 net/ipv4/ip_options.c 	opt = &(IPCB(skb)->opt);
skb                58 net/ipv4/ip_options.c 			ip_rt_get_source(iph+opt->rr+iph[opt->rr+2]-5, skb, rt);
skb                60 net/ipv4/ip_options.c 			ip_rt_get_source(iph+opt->ts+iph[opt->ts+2]-9, skb, rt);
skb                91 net/ipv4/ip_options.c 		      struct sk_buff *skb, const struct ip_options *sopt)
skb               102 net/ipv4/ip_options.c 	sptr = skb_network_header(skb);
skb               173 net/ipv4/ip_options.c 			if (memcmp(&ip_hdr(skb)->saddr,
skb               208 net/ipv4/ip_options.c void ip_options_fragment(struct sk_buff *skb)
skb               210 net/ipv4/ip_options.c 	unsigned char *optptr = skb_network_header(skb) + sizeof(struct iphdr);
skb               211 net/ipv4/ip_options.c 	struct ip_options *opt = &(IPCB(skb)->opt);
skb               242 net/ipv4/ip_options.c static void spec_dst_fill(__be32 *spec_dst, struct sk_buff *skb)
skb               245 net/ipv4/ip_options.c 		*spec_dst = fib_compute_spec_dst(skb);
skb               255 net/ipv4/ip_options.c 			 struct ip_options *opt, struct sk_buff *skb,
skb               265 net/ipv4/ip_options.c 	if (skb) {
skb               266 net/ipv4/ip_options.c 		rt = skb_rtable(skb);
skb               267 net/ipv4/ip_options.c 		optptr = (unsigned char *)&(ip_hdr(skb)[1]);
skb               312 net/ipv4/ip_options.c 			if (!skb) {
skb               343 net/ipv4/ip_options.c 					spec_dst_fill(&spec_dst, skb);
skb               373 net/ipv4/ip_options.c 					if (skb)
skb               384 net/ipv4/ip_options.c 						spec_dst_fill(&spec_dst, skb);
skb               402 net/ipv4/ip_options.c 						if (skb)
skb               409 net/ipv4/ip_options.c 					if (!skb && !ns_capable(net->user_ns, CAP_NET_RAW)) {
skb               428 net/ipv4/ip_options.c 				if (skb) {
skb               444 net/ipv4/ip_options.c 			if ((!skb && !ns_capable(net->user_ns, CAP_NET_RAW)) || opt->cipso) {
skb               449 net/ipv4/ip_options.c 			if (cipso_v4_validate(skb, &optptr)) {
skb               457 net/ipv4/ip_options.c 			if (!skb && !ns_capable(net->user_ns, CAP_NET_RAW)) {
skb               479 net/ipv4/ip_options.c 		       struct ip_options *opt, struct sk_buff *skb)
skb               484 net/ipv4/ip_options.c 	ret = __ip_options_compile(net, opt, skb, &info);
skb               485 net/ipv4/ip_options.c 	if (ret != 0 && skb)
skb               486 net/ipv4/ip_options.c 		icmp_send(skb, ICMP_PARAMETERPROB, 0, info);
skb               569 net/ipv4/ip_options.c void ip_forward_options(struct sk_buff *skb)
skb               571 net/ipv4/ip_options.c 	struct   ip_options *opt	= &(IPCB(skb)->opt);
skb               573 net/ipv4/ip_options.c 	struct rtable *rt = skb_rtable(skb);
skb               574 net/ipv4/ip_options.c 	unsigned char *raw = skb_network_header(skb);
skb               578 net/ipv4/ip_options.c 		ip_rt_get_source(&optptr[optptr[2]-5], skb, rt);
skb               597 net/ipv4/ip_options.c 			ip_hdr(skb)->daddr = opt->nexthop;
skb               598 net/ipv4/ip_options.c 			ip_rt_get_source(&optptr[srrptr-1], skb, rt);
skb               606 net/ipv4/ip_options.c 			ip_rt_get_source(&optptr[optptr[2]-9], skb, rt);
skb               612 net/ipv4/ip_options.c 		ip_send_check(ip_hdr(skb));
skb               616 net/ipv4/ip_options.c int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev)
skb               618 net/ipv4/ip_options.c 	struct ip_options *opt = &(IPCB(skb)->opt);
skb               621 net/ipv4/ip_options.c 	struct iphdr *iph = ip_hdr(skb);
skb               622 net/ipv4/ip_options.c 	unsigned char *optptr = skb_network_header(skb) + opt->srr;
skb               623 net/ipv4/ip_options.c 	struct rtable *rt = skb_rtable(skb);
skb               631 net/ipv4/ip_options.c 	if (skb->pkt_type != PACKET_HOST)
skb               636 net/ipv4/ip_options.c 		icmp_send(skb, ICMP_PARAMETERPROB, 0, htonl(16<<24));
skb               644 net/ipv4/ip_options.c 			icmp_send(skb, ICMP_PARAMETERPROB, 0, htonl((opt->srr+2)<<24));
skb               649 net/ipv4/ip_options.c 		orefdst = skb->_skb_refdst;
skb               650 net/ipv4/ip_options.c 		skb_dst_set(skb, NULL);
skb               651 net/ipv4/ip_options.c 		err = ip_route_input(skb, nexthop, iph->saddr, iph->tos, dev);
skb               652 net/ipv4/ip_options.c 		rt2 = skb_rtable(skb);
skb               654 net/ipv4/ip_options.c 			skb_dst_drop(skb);
skb               655 net/ipv4/ip_options.c 			skb->_skb_refdst = orefdst;
skb                86 net/ipv4/ip_output.c ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
skb                98 net/ipv4/ip_output.c int __ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
skb               100 net/ipv4/ip_output.c 	struct iphdr *iph = ip_hdr(skb);
skb               102 net/ipv4/ip_output.c 	iph->tot_len = htons(skb->len);
skb               108 net/ipv4/ip_output.c 	skb = l3mdev_ip_out(sk, skb);
skb               109 net/ipv4/ip_output.c 	if (unlikely(!skb))
skb               112 net/ipv4/ip_output.c 	skb->protocol = htons(ETH_P_IP);
skb               115 net/ipv4/ip_output.c 		       net, sk, skb, NULL, skb_dst(skb)->dev,
skb               119 net/ipv4/ip_output.c int ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
skb               123 net/ipv4/ip_output.c 	err = __ip_local_out(net, sk, skb);
skb               125 net/ipv4/ip_output.c 		err = dst_output(net, sk, skb);
skb               144 net/ipv4/ip_output.c int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk,
skb               148 net/ipv4/ip_output.c 	struct rtable *rt = skb_rtable(skb);
skb               153 net/ipv4/ip_output.c 	skb_push(skb, sizeof(struct iphdr) + (opt ? opt->opt.optlen : 0));
skb               154 net/ipv4/ip_output.c 	skb_reset_network_header(skb);
skb               155 net/ipv4/ip_output.c 	iph = ip_hdr(skb);
skb               173 net/ipv4/ip_output.c 		ip_options_build(skb, &opt->opt, daddr, rt, 0);
skb               176 net/ipv4/ip_output.c 	skb->priority = sk->sk_priority;
skb               177 net/ipv4/ip_output.c 	if (!skb->mark)
skb               178 net/ipv4/ip_output.c 		skb->mark = sk->sk_mark;
skb               181 net/ipv4/ip_output.c 	return ip_local_out(net, skb->sk, skb);
skb               185 net/ipv4/ip_output.c static int ip_finish_output2(struct net *net, struct sock *sk, struct sk_buff *skb)
skb               187 net/ipv4/ip_output.c 	struct dst_entry *dst = skb_dst(skb);
skb               195 net/ipv4/ip_output.c 		IP_UPD_PO_STATS(net, IPSTATS_MIB_OUTMCAST, skb->len);
skb               197 net/ipv4/ip_output.c 		IP_UPD_PO_STATS(net, IPSTATS_MIB_OUTBCAST, skb->len);
skb               200 net/ipv4/ip_output.c 	if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
skb               203 net/ipv4/ip_output.c 		skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
skb               205 net/ipv4/ip_output.c 			kfree_skb(skb);
skb               208 net/ipv4/ip_output.c 		if (skb->sk)
skb               209 net/ipv4/ip_output.c 			skb_set_owner_w(skb2, skb->sk);
skb               210 net/ipv4/ip_output.c 		consume_skb(skb);
skb               211 net/ipv4/ip_output.c 		skb = skb2;
skb               215 net/ipv4/ip_output.c 		int res = lwtunnel_xmit(skb);
skb               222 net/ipv4/ip_output.c 	neigh = ip_neigh_for_gw(rt, skb, &is_v6gw);
skb               226 net/ipv4/ip_output.c 		sock_confirm_neigh(skb, neigh);
skb               228 net/ipv4/ip_output.c 		res = neigh_output(neigh, skb, is_v6gw);
skb               236 net/ipv4/ip_output.c 	kfree_skb(skb);
skb               241 net/ipv4/ip_output.c 				struct sk_buff *skb, unsigned int mtu)
skb               249 net/ipv4/ip_output.c 	if (skb_gso_validate_network_len(skb, mtu))
skb               250 net/ipv4/ip_output.c 		return ip_finish_output2(net, sk, skb);
skb               265 net/ipv4/ip_output.c 	features = netif_skb_features(skb);
skb               266 net/ipv4/ip_output.c 	BUILD_BUG_ON(sizeof(*IPCB(skb)) > SKB_SGO_CB_OFFSET);
skb               267 net/ipv4/ip_output.c 	segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
skb               269 net/ipv4/ip_output.c 		kfree_skb(skb);
skb               273 net/ipv4/ip_output.c 	consume_skb(skb);
skb               290 net/ipv4/ip_output.c static int __ip_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
skb               296 net/ipv4/ip_output.c 	if (skb_dst(skb)->xfrm) {
skb               297 net/ipv4/ip_output.c 		IPCB(skb)->flags |= IPSKB_REROUTED;
skb               298 net/ipv4/ip_output.c 		return dst_output(net, sk, skb);
skb               301 net/ipv4/ip_output.c 	mtu = ip_skb_dst_mtu(sk, skb);
skb               302 net/ipv4/ip_output.c 	if (skb_is_gso(skb))
skb               303 net/ipv4/ip_output.c 		return ip_finish_output_gso(net, sk, skb, mtu);
skb               305 net/ipv4/ip_output.c 	if (skb->len > mtu || (IPCB(skb)->flags & IPSKB_FRAG_PMTU))
skb               306 net/ipv4/ip_output.c 		return ip_fragment(net, sk, skb, mtu, ip_finish_output2);
skb               308 net/ipv4/ip_output.c 	return ip_finish_output2(net, sk, skb);
skb               311 net/ipv4/ip_output.c static int ip_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
skb               315 net/ipv4/ip_output.c 	ret = BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb);
skb               318 net/ipv4/ip_output.c 		return __ip_finish_output(net, sk, skb);
skb               320 net/ipv4/ip_output.c 		return __ip_finish_output(net, sk, skb) ? : ret;
skb               322 net/ipv4/ip_output.c 		kfree_skb(skb);
skb               328 net/ipv4/ip_output.c 			       struct sk_buff *skb)
skb               334 net/ipv4/ip_output.c 	ret = BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb);
skb               342 net/ipv4/ip_output.c 		kfree_skb(skb);
skb               350 net/ipv4/ip_output.c 	new_rt = rt_dst_clone(net->loopback_dev, skb_rtable(skb));
skb               353 net/ipv4/ip_output.c 		skb_dst_drop(skb);
skb               354 net/ipv4/ip_output.c 		skb_dst_set(skb, &new_rt->dst);
skb               357 net/ipv4/ip_output.c 	err = dev_loopback_xmit(net, sk, skb);
skb               361 net/ipv4/ip_output.c int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb)
skb               363 net/ipv4/ip_output.c 	struct rtable *rt = skb_rtable(skb);
skb               369 net/ipv4/ip_output.c 	IP_UPD_PO_STATS(net, IPSTATS_MIB_OUT, skb->len);
skb               371 net/ipv4/ip_output.c 	skb->dev = dev;
skb               372 net/ipv4/ip_output.c 	skb->protocol = htons(ETH_P_IP);
skb               391 net/ipv4/ip_output.c 		     !(IPCB(skb)->flags & IPSKB_FORWARDED))
skb               394 net/ipv4/ip_output.c 			struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
skb               403 net/ipv4/ip_output.c 		if (ip_hdr(skb)->ttl == 0) {
skb               404 net/ipv4/ip_output.c 			kfree_skb(skb);
skb               410 net/ipv4/ip_output.c 		struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
skb               418 net/ipv4/ip_output.c 			    net, sk, skb, NULL, skb->dev,
skb               420 net/ipv4/ip_output.c 			    !(IPCB(skb)->flags & IPSKB_REROUTED));
skb               423 net/ipv4/ip_output.c int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb)
skb               425 net/ipv4/ip_output.c 	struct net_device *dev = skb_dst(skb)->dev;
skb               427 net/ipv4/ip_output.c 	IP_UPD_PO_STATS(net, IPSTATS_MIB_OUT, skb->len);
skb               429 net/ipv4/ip_output.c 	skb->dev = dev;
skb               430 net/ipv4/ip_output.c 	skb->protocol = htons(ETH_P_IP);
skb               433 net/ipv4/ip_output.c 			    net, sk, skb, NULL, dev,
skb               435 net/ipv4/ip_output.c 			    !(IPCB(skb)->flags & IPSKB_REROUTED));
skb               453 net/ipv4/ip_output.c int __ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
skb               470 net/ipv4/ip_output.c 	rt = skb_rtable(skb);
skb               499 net/ipv4/ip_output.c 	skb_dst_set_noref(skb, &rt->dst);
skb               506 net/ipv4/ip_output.c 	skb_push(skb, sizeof(struct iphdr) + (inet_opt ? inet_opt->opt.optlen : 0));
skb               507 net/ipv4/ip_output.c 	skb_reset_network_header(skb);
skb               508 net/ipv4/ip_output.c 	iph = ip_hdr(skb);
skb               510 net/ipv4/ip_output.c 	if (ip_dont_fragment(sk, &rt->dst) && !skb->ignore_df)
skb               522 net/ipv4/ip_output.c 		ip_options_build(skb, &inet_opt->opt, inet->inet_daddr, rt, 0);
skb               525 net/ipv4/ip_output.c 	ip_select_ident_segs(net, skb, sk,
skb               526 net/ipv4/ip_output.c 			     skb_shinfo(skb)->gso_segs ?: 1);
skb               529 net/ipv4/ip_output.c 	skb->priority = sk->sk_priority;
skb               530 net/ipv4/ip_output.c 	skb->mark = sk->sk_mark;
skb               532 net/ipv4/ip_output.c 	res = ip_local_out(net, sk, skb);
skb               539 net/ipv4/ip_output.c 	kfree_skb(skb);
skb               568 net/ipv4/ip_output.c static int ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
skb               572 net/ipv4/ip_output.c 	struct iphdr *iph = ip_hdr(skb);
skb               575 net/ipv4/ip_output.c 		return ip_do_fragment(net, sk, skb, output);
skb               577 net/ipv4/ip_output.c 	if (unlikely(!skb->ignore_df ||
skb               578 net/ipv4/ip_output.c 		     (IPCB(skb)->frag_max_size &&
skb               579 net/ipv4/ip_output.c 		      IPCB(skb)->frag_max_size > mtu))) {
skb               581 net/ipv4/ip_output.c 		icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
skb               583 net/ipv4/ip_output.c 		kfree_skb(skb);
skb               587 net/ipv4/ip_output.c 	return ip_do_fragment(net, sk, skb, output);
skb               590 net/ipv4/ip_output.c void ip_fraglist_init(struct sk_buff *skb, struct iphdr *iph,
skb               593 net/ipv4/ip_output.c 	unsigned int first_len = skb_pagelen(skb);
skb               595 net/ipv4/ip_output.c 	iter->frag = skb_shinfo(skb)->frag_list;
skb               596 net/ipv4/ip_output.c 	skb_frag_list_init(skb);
skb               602 net/ipv4/ip_output.c 	skb->data_len = first_len - skb_headlen(skb);
skb               603 net/ipv4/ip_output.c 	skb->len = first_len;
skb               610 net/ipv4/ip_output.c static void ip_fraglist_ipcb_prepare(struct sk_buff *skb,
skb               616 net/ipv4/ip_output.c 	IPCB(to)->flags = IPCB(skb)->flags;
skb               622 net/ipv4/ip_output.c void ip_fraglist_prepare(struct sk_buff *skb, struct ip_fraglist_iter *iter)
skb               637 net/ipv4/ip_output.c 	ip_copy_metadata(frag, skb);
skb               638 net/ipv4/ip_output.c 	iter->offset += skb->len - hlen;
skb               647 net/ipv4/ip_output.c void ip_frag_init(struct sk_buff *skb, unsigned int hlen,
skb               651 net/ipv4/ip_output.c 	struct iphdr *iph = ip_hdr(skb);
skb               658 net/ipv4/ip_output.c 	state->left = skb->len - hlen;	/* Space per frame */
skb               682 net/ipv4/ip_output.c struct sk_buff *ip_frag_next(struct sk_buff *skb, struct ip_frag_state *state)
skb               707 net/ipv4/ip_output.c 	ip_copy_metadata(skb2, skb);
skb               718 net/ipv4/ip_output.c 	if (skb->sk)
skb               719 net/ipv4/ip_output.c 		skb_set_owner_w(skb2, skb->sk);
skb               725 net/ipv4/ip_output.c 	skb_copy_from_linear_data(skb, skb_network_header(skb2), state->hlen);
skb               730 net/ipv4/ip_output.c 	if (skb_copy_bits(skb, state->ptr, skb_transport_header(skb2), len))
skb               766 net/ipv4/ip_output.c int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
skb               771 net/ipv4/ip_output.c 	struct rtable *rt = skb_rtable(skb);
skb               774 net/ipv4/ip_output.c 	ktime_t tstamp = skb->tstamp;
skb               779 net/ipv4/ip_output.c 	if (skb->ip_summed == CHECKSUM_PARTIAL &&
skb               780 net/ipv4/ip_output.c 	    (err = skb_checksum_help(skb)))
skb               787 net/ipv4/ip_output.c 	iph = ip_hdr(skb);
skb               789 net/ipv4/ip_output.c 	mtu = ip_skb_dst_mtu(sk, skb);
skb               790 net/ipv4/ip_output.c 	if (IPCB(skb)->frag_max_size && IPCB(skb)->frag_max_size < mtu)
skb               791 net/ipv4/ip_output.c 		mtu = IPCB(skb)->frag_max_size;
skb               799 net/ipv4/ip_output.c 	IPCB(skb)->flags |= IPSKB_FRAG_COMPLETE;
skb               809 net/ipv4/ip_output.c 	if (skb_has_frag_list(skb)) {
skb               811 net/ipv4/ip_output.c 		unsigned int first_len = skb_pagelen(skb);
skb               816 net/ipv4/ip_output.c 		    skb_cloned(skb) ||
skb               817 net/ipv4/ip_output.c 		    skb_headroom(skb) < ll_rs)
skb               820 net/ipv4/ip_output.c 		skb_walk_frags(skb, frag) {
skb               832 net/ipv4/ip_output.c 			if (skb->sk) {
skb               833 net/ipv4/ip_output.c 				frag->sk = skb->sk;
skb               836 net/ipv4/ip_output.c 			skb->truesize -= frag->truesize;
skb               840 net/ipv4/ip_output.c 		ip_fraglist_init(skb, iph, hlen, &iter);
skb               846 net/ipv4/ip_output.c 				ip_fraglist_ipcb_prepare(skb, &iter);
skb               847 net/ipv4/ip_output.c 				ip_fraglist_prepare(skb, &iter);
skb               850 net/ipv4/ip_output.c 			skb->tstamp = tstamp;
skb               851 net/ipv4/ip_output.c 			err = output(net, sk, skb);
skb               858 net/ipv4/ip_output.c 			skb = ip_fraglist_next(&iter);
skb               872 net/ipv4/ip_output.c 		skb_walk_frags(skb, frag2) {
skb               877 net/ipv4/ip_output.c 			skb->truesize += frag2->truesize;
skb               886 net/ipv4/ip_output.c 	ip_frag_init(skb, hlen, ll_rs, mtu, IPCB(skb)->flags & IPSKB_FRAG_PMTU,
skb               896 net/ipv4/ip_output.c 		skb2 = ip_frag_next(skb, &state);
skb               901 net/ipv4/ip_output.c 		ip_frag_ipcb(skb, skb2, first_frag, &state);
skb               913 net/ipv4/ip_output.c 	consume_skb(skb);
skb               918 net/ipv4/ip_output.c 	kfree_skb(skb);
skb               925 net/ipv4/ip_output.c ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
skb               929 net/ipv4/ip_output.c 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb               936 net/ipv4/ip_output.c 		skb->csum = csum_block_add(skb->csum, csum, odd);
skb               959 net/ipv4/ip_output.c 					int len, int odd, struct sk_buff *skb),
skb               965 net/ipv4/ip_output.c 	struct sk_buff *skb;
skb               981 net/ipv4/ip_output.c 	skb = skb_peek_tail(queue);
skb               983 net/ipv4/ip_output.c 	exthdrlen = !skb ? rt->dst.header_len : 0;
skb              1015 net/ipv4/ip_output.c 		uarg = sock_zerocopy_realloc(sk, length, skb_zcopy(skb));
skb              1018 net/ipv4/ip_output.c 		extra_uref = !skb_zcopy(skb);	/* only ref on new uarg */
skb              1024 net/ipv4/ip_output.c 			skb_zcopy_set(skb, uarg, &extra_uref);
skb              1037 net/ipv4/ip_output.c 	if (!skb)
skb              1042 net/ipv4/ip_output.c 		copy = mtu - skb->len;
skb              1044 net/ipv4/ip_output.c 			copy = maxfraglen - skb->len;
skb              1054 net/ipv4/ip_output.c 			skb_prev = skb;
skb              1091 net/ipv4/ip_output.c 				skb = sock_alloc_send_skb(sk,
skb              1095 net/ipv4/ip_output.c 				skb = NULL;
skb              1098 net/ipv4/ip_output.c 					skb = alloc_skb(alloclen + hh_len + 15,
skb              1100 net/ipv4/ip_output.c 				if (unlikely(!skb))
skb              1103 net/ipv4/ip_output.c 			if (!skb)
skb              1109 net/ipv4/ip_output.c 			skb->ip_summed = csummode;
skb              1110 net/ipv4/ip_output.c 			skb->csum = 0;
skb              1111 net/ipv4/ip_output.c 			skb_reserve(skb, hh_len);
skb              1116 net/ipv4/ip_output.c 			data = skb_put(skb, fraglen + exthdrlen - pagedlen);
skb              1117 net/ipv4/ip_output.c 			skb_set_network_header(skb, exthdrlen);
skb              1118 net/ipv4/ip_output.c 			skb->transport_header = (skb->network_header +
skb              1123 net/ipv4/ip_output.c 				skb->csum = skb_copy_and_csum_bits(
skb              1127 net/ipv4/ip_output.c 							  skb->csum);
skb              1133 net/ipv4/ip_output.c 			if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
skb              1135 net/ipv4/ip_output.c 				kfree_skb(skb);
skb              1146 net/ipv4/ip_output.c 			skb_shinfo(skb)->tx_flags = cork->tx_flags;
skb              1148 net/ipv4/ip_output.c 			skb_shinfo(skb)->tskey = tskey;
skb              1150 net/ipv4/ip_output.c 			skb_zcopy_set(skb, uarg, &extra_uref);
skb              1153 net/ipv4/ip_output.c 				skb_set_dst_pending_confirm(skb, 1);
skb              1158 net/ipv4/ip_output.c 			if (!skb->destructor) {
skb              1159 net/ipv4/ip_output.c 				skb->destructor = sock_wfree;
skb              1160 net/ipv4/ip_output.c 				skb->sk = sk;
skb              1161 net/ipv4/ip_output.c 				wmem_alloc_delta += skb->truesize;
skb              1163 net/ipv4/ip_output.c 			__skb_queue_tail(queue, skb);
skb              1171 net/ipv4/ip_output.c 		    skb_tailroom(skb) >= copy) {
skb              1174 net/ipv4/ip_output.c 			off = skb->len;
skb              1175 net/ipv4/ip_output.c 			if (getfrag(from, skb_put(skb, copy),
skb              1176 net/ipv4/ip_output.c 					offset, copy, off, skb) < 0) {
skb              1177 net/ipv4/ip_output.c 				__skb_trim(skb, off);
skb              1182 net/ipv4/ip_output.c 			int i = skb_shinfo(skb)->nr_frags;
skb              1188 net/ipv4/ip_output.c 			if (!skb_can_coalesce(skb, i, pfrag->page,
skb              1194 net/ipv4/ip_output.c 				__skb_fill_page_desc(skb, i, pfrag->page,
skb              1196 net/ipv4/ip_output.c 				skb_shinfo(skb)->nr_frags = ++i;
skb              1202 net/ipv4/ip_output.c 				    offset, copy, skb->len, skb) < 0)
skb              1206 net/ipv4/ip_output.c 			skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
skb              1207 net/ipv4/ip_output.c 			skb->len += copy;
skb              1208 net/ipv4/ip_output.c 			skb->data_len += copy;
skb              1209 net/ipv4/ip_output.c 			skb->truesize += copy;
skb              1212 net/ipv4/ip_output.c 			err = skb_zerocopy_iter_dgram(skb, from, copy);
skb              1298 net/ipv4/ip_output.c 			       int odd, struct sk_buff *skb),
skb              1326 net/ipv4/ip_output.c 	struct sk_buff *skb;
skb              1366 net/ipv4/ip_output.c 	skb = skb_peek_tail(&sk->sk_write_queue);
skb              1367 net/ipv4/ip_output.c 	if (!skb)
skb              1374 net/ipv4/ip_output.c 		len = mtu - skb->len;
skb              1376 net/ipv4/ip_output.c 			len = maxfraglen - skb->len;
skb              1382 net/ipv4/ip_output.c 			skb_prev = skb;
skb              1386 net/ipv4/ip_output.c 			skb = sock_wmalloc(sk, alloclen, 1, sk->sk_allocation);
skb              1387 net/ipv4/ip_output.c 			if (unlikely(!skb)) {
skb              1395 net/ipv4/ip_output.c 			skb->ip_summed = CHECKSUM_NONE;
skb              1396 net/ipv4/ip_output.c 			skb->csum = 0;
skb              1397 net/ipv4/ip_output.c 			skb_reserve(skb, hh_len);
skb              1402 net/ipv4/ip_output.c 			skb_put(skb, fragheaderlen + fraggap);
skb              1403 net/ipv4/ip_output.c 			skb_reset_network_header(skb);
skb              1404 net/ipv4/ip_output.c 			skb->transport_header = (skb->network_header +
skb              1407 net/ipv4/ip_output.c 				skb->csum = skb_copy_and_csum_bits(skb_prev,
skb              1409 net/ipv4/ip_output.c 						    skb_transport_header(skb),
skb              1412 net/ipv4/ip_output.c 							  skb->csum);
skb              1419 net/ipv4/ip_output.c 			__skb_queue_tail(&sk->sk_write_queue, skb);
skb              1426 net/ipv4/ip_output.c 		if (skb_append_pagefrags(skb, page, offset, len)) {
skb              1431 net/ipv4/ip_output.c 		if (skb->ip_summed == CHECKSUM_NONE) {
skb              1434 net/ipv4/ip_output.c 			skb->csum = csum_block_add(skb->csum, csum, skb->len);
skb              1437 net/ipv4/ip_output.c 		skb->len += len;
skb              1438 net/ipv4/ip_output.c 		skb->data_len += len;
skb              1439 net/ipv4/ip_output.c 		skb->truesize += len;
skb              1470 net/ipv4/ip_output.c 	struct sk_buff *skb, *tmp_skb;
skb              1480 net/ipv4/ip_output.c 	skb = __skb_dequeue(queue);
skb              1481 net/ipv4/ip_output.c 	if (!skb)
skb              1483 net/ipv4/ip_output.c 	tail_skb = &(skb_shinfo(skb)->frag_list);
skb              1486 net/ipv4/ip_output.c 	if (skb->data < skb_network_header(skb))
skb              1487 net/ipv4/ip_output.c 		__skb_pull(skb, skb_network_offset(skb));
skb              1489 net/ipv4/ip_output.c 		__skb_pull(tmp_skb, skb_network_header_len(skb));
skb              1492 net/ipv4/ip_output.c 		skb->len += tmp_skb->len;
skb              1493 net/ipv4/ip_output.c 		skb->data_len += tmp_skb->len;
skb              1494 net/ipv4/ip_output.c 		skb->truesize += tmp_skb->truesize;
skb              1503 net/ipv4/ip_output.c 	skb->ignore_df = ip_sk_ignore_df(sk);
skb              1510 net/ipv4/ip_output.c 	    (skb->len <= dst_mtu(&rt->dst) &&
skb              1524 net/ipv4/ip_output.c 	iph = ip_hdr(skb);
skb              1532 net/ipv4/ip_output.c 	ip_select_ident(net, skb, sk);
skb              1536 net/ipv4/ip_output.c 		ip_options_build(skb, opt, cork->addr, rt, 0);
skb              1539 net/ipv4/ip_output.c 	skb->priority = (cork->tos != -1) ? cork->priority: sk->sk_priority;
skb              1540 net/ipv4/ip_output.c 	skb->mark = cork->mark;
skb              1541 net/ipv4/ip_output.c 	skb->tstamp = cork->transmit_time;
skb              1547 net/ipv4/ip_output.c 	skb_dst_set(skb, &rt->dst);
skb              1551 net/ipv4/ip_output.c 			skb_transport_header(skb))->type);
skb              1555 net/ipv4/ip_output.c 	return skb;
skb              1558 net/ipv4/ip_output.c int ip_send_skb(struct net *net, struct sk_buff *skb)
skb              1562 net/ipv4/ip_output.c 	err = ip_local_out(net, skb->sk, skb);
skb              1575 net/ipv4/ip_output.c 	struct sk_buff *skb;
skb              1577 net/ipv4/ip_output.c 	skb = ip_finish_skb(sk, fl4);
skb              1578 net/ipv4/ip_output.c 	if (!skb)
skb              1582 net/ipv4/ip_output.c 	return ip_send_skb(sock_net(sk), skb);
skb              1592 net/ipv4/ip_output.c 	struct sk_buff *skb;
skb              1594 net/ipv4/ip_output.c 	while ((skb = __skb_dequeue_tail(queue)) != NULL)
skb              1595 net/ipv4/ip_output.c 		kfree_skb(skb);
skb              1608 net/ipv4/ip_output.c 					int len, int odd, struct sk_buff *skb),
skb              1643 net/ipv4/ip_output.c 			      int len, int odd, struct sk_buff *skb)
skb              1648 net/ipv4/ip_output.c 	skb->csum = csum_block_add(skb->csum, csum, odd);
skb              1656 net/ipv4/ip_output.c void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
skb              1665 net/ipv4/ip_output.c 	struct rtable *rt = skb_rtable(skb);
skb              1671 net/ipv4/ip_output.c 	if (__ip_options_echo(net, &replyopts.opt.opt, skb, sopt))
skb              1686 net/ipv4/ip_output.c 	if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
skb              1687 net/ipv4/ip_output.c 		oif = skb->skb_iif;
skb              1690 net/ipv4/ip_output.c 			   IP4_REPLY_MARK(net, skb->mark) ?: sk->sk_mark,
skb              1692 net/ipv4/ip_output.c 			   RT_SCOPE_UNIVERSE, ip_hdr(skb)->protocol,
skb              1695 net/ipv4/ip_output.c 			   tcp_hdr(skb)->source, tcp_hdr(skb)->dest,
skb              1697 net/ipv4/ip_output.c 	security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
skb              1704 net/ipv4/ip_output.c 	sk->sk_protocol = ip_hdr(skb)->protocol;
skb                56 net/ipv4/ip_sockglue.c static void ip_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
skb                58 net/ipv4/ip_sockglue.c 	struct in_pktinfo info = *PKTINFO_SKB_CB(skb);
skb                60 net/ipv4/ip_sockglue.c 	info.ipi_addr.s_addr = ip_hdr(skb)->daddr;
skb                65 net/ipv4/ip_sockglue.c static void ip_cmsg_recv_ttl(struct msghdr *msg, struct sk_buff *skb)
skb                67 net/ipv4/ip_sockglue.c 	int ttl = ip_hdr(skb)->ttl;
skb                71 net/ipv4/ip_sockglue.c static void ip_cmsg_recv_tos(struct msghdr *msg, struct sk_buff *skb)
skb                73 net/ipv4/ip_sockglue.c 	put_cmsg(msg, SOL_IP, IP_TOS, 1, &ip_hdr(skb)->tos);
skb                76 net/ipv4/ip_sockglue.c static void ip_cmsg_recv_opts(struct msghdr *msg, struct sk_buff *skb)
skb                78 net/ipv4/ip_sockglue.c 	if (IPCB(skb)->opt.optlen == 0)
skb                81 net/ipv4/ip_sockglue.c 	put_cmsg(msg, SOL_IP, IP_RECVOPTS, IPCB(skb)->opt.optlen,
skb                82 net/ipv4/ip_sockglue.c 		 ip_hdr(skb) + 1);
skb                87 net/ipv4/ip_sockglue.c 				 struct sk_buff *skb)
skb                92 net/ipv4/ip_sockglue.c 	if (IPCB(skb)->opt.optlen == 0)
skb                95 net/ipv4/ip_sockglue.c 	if (ip_options_echo(net, opt, skb)) {
skb               104 net/ipv4/ip_sockglue.c static void ip_cmsg_recv_fragsize(struct msghdr *msg, struct sk_buff *skb)
skb               108 net/ipv4/ip_sockglue.c 	if (IPCB(skb)->frag_max_size == 0)
skb               111 net/ipv4/ip_sockglue.c 	val = IPCB(skb)->frag_max_size;
skb               115 net/ipv4/ip_sockglue.c static void ip_cmsg_recv_checksum(struct msghdr *msg, struct sk_buff *skb,
skb               118 net/ipv4/ip_sockglue.c 	__wsum csum = skb->csum;
skb               120 net/ipv4/ip_sockglue.c 	if (skb->ip_summed != CHECKSUM_COMPLETE)
skb               124 net/ipv4/ip_sockglue.c 		int tend_off = skb_transport_offset(skb) + tlen;
skb               125 net/ipv4/ip_sockglue.c 		csum = csum_sub(csum, skb_checksum(skb, tend_off, offset, 0));
skb               131 net/ipv4/ip_sockglue.c static void ip_cmsg_recv_security(struct msghdr *msg, struct sk_buff *skb)
skb               137 net/ipv4/ip_sockglue.c 	err = security_socket_getpeersec_dgram(NULL, skb, &secid);
skb               149 net/ipv4/ip_sockglue.c static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
skb               158 net/ipv4/ip_sockglue.c 	ports = skb_header_pointer(skb, skb_transport_offset(skb),
skb               164 net/ipv4/ip_sockglue.c 	sin.sin_addr.s_addr = ip_hdr(skb)->daddr;
skb               172 net/ipv4/ip_sockglue.c 			 struct sk_buff *skb, int tlen, int offset)
skb               179 net/ipv4/ip_sockglue.c 		ip_cmsg_recv_pktinfo(msg, skb);
skb               187 net/ipv4/ip_sockglue.c 		ip_cmsg_recv_ttl(msg, skb);
skb               195 net/ipv4/ip_sockglue.c 		ip_cmsg_recv_tos(msg, skb);
skb               203 net/ipv4/ip_sockglue.c 		ip_cmsg_recv_opts(msg, skb);
skb               211 net/ipv4/ip_sockglue.c 		ip_cmsg_recv_retopts(sock_net(sk), msg, skb);
skb               219 net/ipv4/ip_sockglue.c 		ip_cmsg_recv_security(msg, skb);
skb               227 net/ipv4/ip_sockglue.c 		ip_cmsg_recv_dstaddr(msg, skb);
skb               235 net/ipv4/ip_sockglue.c 		ip_cmsg_recv_checksum(msg, skb, tlen, offset);
skb               238 net/ipv4/ip_sockglue.c 		ip_cmsg_recv_fragsize(msg, skb);
skb               392 net/ipv4/ip_sockglue.c void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
skb               397 net/ipv4/ip_sockglue.c 	skb = skb_clone(skb, GFP_ATOMIC);
skb               398 net/ipv4/ip_sockglue.c 	if (!skb)
skb               401 net/ipv4/ip_sockglue.c 	serr = SKB_EXT_ERR(skb);
skb               404 net/ipv4/ip_sockglue.c 	serr->ee.ee_type = icmp_hdr(skb)->type;
skb               405 net/ipv4/ip_sockglue.c 	serr->ee.ee_code = icmp_hdr(skb)->code;
skb               409 net/ipv4/ip_sockglue.c 	serr->addr_offset = (u8 *)&(((struct iphdr *)(icmp_hdr(skb) + 1))->daddr) -
skb               410 net/ipv4/ip_sockglue.c 				   skb_network_header(skb);
skb               413 net/ipv4/ip_sockglue.c 	if (skb_pull(skb, payload - skb->data)) {
skb               414 net/ipv4/ip_sockglue.c 		skb_reset_transport_header(skb);
skb               415 net/ipv4/ip_sockglue.c 		if (sock_queue_err_skb(sk, skb) == 0)
skb               418 net/ipv4/ip_sockglue.c 	kfree_skb(skb);
skb               426 net/ipv4/ip_sockglue.c 	struct sk_buff *skb;
skb               431 net/ipv4/ip_sockglue.c 	skb = alloc_skb(sizeof(struct iphdr), GFP_ATOMIC);
skb               432 net/ipv4/ip_sockglue.c 	if (!skb)
skb               435 net/ipv4/ip_sockglue.c 	skb_put(skb, sizeof(struct iphdr));
skb               436 net/ipv4/ip_sockglue.c 	skb_reset_network_header(skb);
skb               437 net/ipv4/ip_sockglue.c 	iph = ip_hdr(skb);
skb               440 net/ipv4/ip_sockglue.c 	serr = SKB_EXT_ERR(skb);
skb               448 net/ipv4/ip_sockglue.c 	serr->addr_offset = (u8 *)&iph->daddr - skb_network_header(skb);
skb               451 net/ipv4/ip_sockglue.c 	__skb_pull(skb, skb_tail_pointer(skb) - skb->data);
skb               452 net/ipv4/ip_sockglue.c 	skb_reset_transport_header(skb);
skb               454 net/ipv4/ip_sockglue.c 	if (sock_queue_err_skb(sk, skb))
skb               455 net/ipv4/ip_sockglue.c 		kfree_skb(skb);
skb               473 net/ipv4/ip_sockglue.c 				       struct sk_buff *skb,
skb               488 net/ipv4/ip_sockglue.c 	info = PKTINFO_SKB_CB(skb);
skb               493 net/ipv4/ip_sockglue.c 	info->ipi_spec_dst.s_addr = ip_hdr(skb)->saddr;
skb               503 net/ipv4/ip_sockglue.c 	struct sk_buff *skb;
skb               513 net/ipv4/ip_sockglue.c 	skb = sock_dequeue_err_skb(sk);
skb               514 net/ipv4/ip_sockglue.c 	if (!skb)
skb               517 net/ipv4/ip_sockglue.c 	copied = skb->len;
skb               522 net/ipv4/ip_sockglue.c 	err = skb_copy_datagram_msg(skb, 0, msg, copied);
skb               524 net/ipv4/ip_sockglue.c 		kfree_skb(skb);
skb               527 net/ipv4/ip_sockglue.c 	sock_recv_timestamp(msg, sk, skb);
skb               529 net/ipv4/ip_sockglue.c 	serr = SKB_EXT_ERR(skb);
skb               533 net/ipv4/ip_sockglue.c 		sin->sin_addr.s_addr = *(__be32 *)(skb_network_header(skb) +
skb               544 net/ipv4/ip_sockglue.c 	if (ipv4_datagram_support_cmsg(sk, skb, serr->ee.ee_origin)) {
skb               546 net/ipv4/ip_sockglue.c 		sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
skb               548 net/ipv4/ip_sockglue.c 			ip_cmsg_recv(msg, skb);
skb               558 net/ipv4/ip_sockglue.c 	consume_skb(skb);
skb              1208 net/ipv4/ip_sockglue.c void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb)
skb              1210 net/ipv4/ip_sockglue.c 	struct in_pktinfo *pktinfo = PKTINFO_SKB_CB(skb);
skb              1214 net/ipv4/ip_sockglue.c 	if (prepare && skb_rtable(skb)) {
skb              1224 net/ipv4/ip_sockglue.c 		struct rtable *rt = skb_rtable(skb);
skb              1225 net/ipv4/ip_sockglue.c 		bool l3slave = ipv4_l3mdev_skb(IPCB(skb)->flags);
skb              1228 net/ipv4/ip_sockglue.c 			pktinfo->ipi_ifindex = inet_iif(skb);
skb              1232 net/ipv4/ip_sockglue.c 		pktinfo->ipi_spec_dst.s_addr = fib_compute_spec_dst(skb);
skb              1237 net/ipv4/ip_sockglue.c 	skb_dst_drop(skb);
skb               357 net/ipv4/ip_tunnel.c int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
skb               362 net/ipv4/ip_tunnel.c 	const struct iphdr *iph = ip_hdr(skb);
skb               368 net/ipv4/ip_tunnel.c 		skb->pkt_type = PACKET_BROADCAST;
skb               389 net/ipv4/ip_tunnel.c 	skb_reset_network_header(skb);
skb               391 net/ipv4/ip_tunnel.c 	err = IP_ECN_decapsulate(iph, skb);
skb               406 net/ipv4/ip_tunnel.c 	tstats->rx_bytes += skb->len;
skb               409 net/ipv4/ip_tunnel.c 	skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(tunnel->dev)));
skb               412 net/ipv4/ip_tunnel.c 		skb->protocol = eth_type_trans(skb, tunnel->dev);
skb               413 net/ipv4/ip_tunnel.c 		skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
skb               415 net/ipv4/ip_tunnel.c 		skb->dev = tunnel->dev;
skb               419 net/ipv4/ip_tunnel.c 		skb_dst_set(skb, (struct dst_entry *)tun_dst);
skb               421 net/ipv4/ip_tunnel.c 	gro_cells_receive(&tunnel->gro_cells, skb);
skb               427 net/ipv4/ip_tunnel.c 	kfree_skb(skb);
skb               485 net/ipv4/ip_tunnel.c static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
skb               495 net/ipv4/ip_tunnel.c 	pkt_size = skb->len - tunnel_hlen - dev->hard_header_len;
skb               501 net/ipv4/ip_tunnel.c 		mtu = skb_valid_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
skb               503 net/ipv4/ip_tunnel.c 	if (skb_valid_dst(skb))
skb               504 net/ipv4/ip_tunnel.c 		skb_dst_update_pmtu_no_confirm(skb, mtu);
skb               506 net/ipv4/ip_tunnel.c 	if (skb->protocol == htons(ETH_P_IP)) {
skb               507 net/ipv4/ip_tunnel.c 		if (!skb_is_gso(skb) &&
skb               510 net/ipv4/ip_tunnel.c 			memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
skb               511 net/ipv4/ip_tunnel.c 			icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
skb               516 net/ipv4/ip_tunnel.c 	else if (skb->protocol == htons(ETH_P_IPV6)) {
skb               520 net/ipv4/ip_tunnel.c 		rt6 = skb_valid_dst(skb) ? (struct rt6_info *)skb_dst(skb) :
skb               524 net/ipv4/ip_tunnel.c 		if (rt6 && mtu < dst_mtu(skb_dst(skb)) &&
skb               529 net/ipv4/ip_tunnel.c 				dst_metric_set(skb_dst(skb), RTAX_MTU, mtu);
skb               533 net/ipv4/ip_tunnel.c 		if (!skb_is_gso(skb) && mtu >= IPV6_MIN_MTU &&
skb               535 net/ipv4/ip_tunnel.c 			icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
skb               543 net/ipv4/ip_tunnel.c void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
skb               557 net/ipv4/ip_tunnel.c 	tun_info = skb_tunnel_info(skb);
skb               562 net/ipv4/ip_tunnel.c 	memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
skb               563 net/ipv4/ip_tunnel.c 	inner_iph = (const struct iphdr *)skb_inner_network_header(skb);
skb               566 net/ipv4/ip_tunnel.c 		if (skb->protocol == htons(ETH_P_IP))
skb               568 net/ipv4/ip_tunnel.c 		else if (skb->protocol == htons(ETH_P_IPV6))
skb               573 net/ipv4/ip_tunnel.c 			    0, skb->mark, skb_get_hash(skb));
skb               577 net/ipv4/ip_tunnel.c 	use_cache = ip_tunnel_dst_cache_usable(skb, tun_info);
skb               598 net/ipv4/ip_tunnel.c 	if (tnl_update_pmtu(dev, skb, rt, df, inner_iph, tunnel_hlen,
skb               604 net/ipv4/ip_tunnel.c 	tos = ip_tunnel_ecn_encap(tos, inner_iph, skb);
skb               607 net/ipv4/ip_tunnel.c 		if (skb->protocol == htons(ETH_P_IP))
skb               609 net/ipv4/ip_tunnel.c 		else if (skb->protocol == htons(ETH_P_IPV6))
skb               615 net/ipv4/ip_tunnel.c 	if (!df && skb->protocol == htons(ETH_P_IP))
skb               622 net/ipv4/ip_tunnel.c 	if (skb_cow_head(skb, dev->needed_headroom)) {
skb               626 net/ipv4/ip_tunnel.c 	iptunnel_xmit(NULL, rt, skb, fl4.saddr, fl4.daddr, proto, tos, ttl,
skb               635 net/ipv4/ip_tunnel.c 	kfree_skb(skb);
skb               639 net/ipv4/ip_tunnel.c void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
skb               655 net/ipv4/ip_tunnel.c 	inner_iph = (const struct iphdr *)skb_inner_network_header(skb);
skb               658 net/ipv4/ip_tunnel.c 	memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
skb               664 net/ipv4/ip_tunnel.c 		if (!skb_dst(skb)) {
skb               669 net/ipv4/ip_tunnel.c 		tun_info = skb_tunnel_info(skb);
skb               677 net/ipv4/ip_tunnel.c 		else if (skb->protocol == htons(ETH_P_IP)) {
skb               678 net/ipv4/ip_tunnel.c 			rt = skb_rtable(skb);
skb               682 net/ipv4/ip_tunnel.c 		else if (skb->protocol == htons(ETH_P_IPV6)) {
skb               688 net/ipv4/ip_tunnel.c 			neigh = dst_neigh_lookup(skb_dst(skb),
skb               689 net/ipv4/ip_tunnel.c 						 &ipv6_hdr(skb)->daddr);
skb               697 net/ipv4/ip_tunnel.c 				addr6 = &ipv6_hdr(skb)->daddr;
skb               722 net/ipv4/ip_tunnel.c 		if (skb->protocol == htons(ETH_P_IP)) {
skb               725 net/ipv4/ip_tunnel.c 		} else if (skb->protocol == htons(ETH_P_IPV6)) {
skb               733 net/ipv4/ip_tunnel.c 			    tunnel->fwmark, skb_get_hash(skb));
skb               735 net/ipv4/ip_tunnel.c 	if (ip_tunnel_encap(skb, tunnel, &protocol, &fl4) < 0)
skb               739 net/ipv4/ip_tunnel.c 		use_cache = ip_tunnel_dst_cache_usable(skb, tun_info);
skb               769 net/ipv4/ip_tunnel.c 	if (tnl_update_pmtu(dev, skb, rt, tnl_params->frag_off, inner_iph,
skb               780 net/ipv4/ip_tunnel.c 			dst_link_failure(skb);
skb               785 net/ipv4/ip_tunnel.c 	tos = ip_tunnel_ecn_encap(tos, inner_iph, skb);
skb               788 net/ipv4/ip_tunnel.c 		if (skb->protocol == htons(ETH_P_IP))
skb               791 net/ipv4/ip_tunnel.c 		else if (skb->protocol == htons(ETH_P_IPV6))
skb               799 net/ipv4/ip_tunnel.c 	if (skb->protocol == htons(ETH_P_IP) && !tunnel->ignore_df)
skb               807 net/ipv4/ip_tunnel.c 	if (skb_cow_head(skb, dev->needed_headroom)) {
skb               810 net/ipv4/ip_tunnel.c 		kfree_skb(skb);
skb               814 net/ipv4/ip_tunnel.c 	iptunnel_xmit(NULL, rt, skb, fl4.saddr, fl4.daddr, protocol, tos, ttl,
skb               820 net/ipv4/ip_tunnel.c 	dst_link_failure(skb);
skb               824 net/ipv4/ip_tunnel.c 	kfree_skb(skb);
skb                46 net/ipv4/ip_tunnel_core.c void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
skb                50 net/ipv4/ip_tunnel_core.c 	int pkt_len = skb->len - skb_inner_network_offset(skb);
skb                52 net/ipv4/ip_tunnel_core.c 	struct net_device *dev = skb->dev;
skb                56 net/ipv4/ip_tunnel_core.c 	skb_scrub_packet(skb, xnet);
skb                58 net/ipv4/ip_tunnel_core.c 	skb_clear_hash_if_not_l4(skb);
skb                59 net/ipv4/ip_tunnel_core.c 	skb_dst_set(skb, &rt->dst);
skb                60 net/ipv4/ip_tunnel_core.c 	memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
skb                63 net/ipv4/ip_tunnel_core.c 	skb_push(skb, sizeof(struct iphdr));
skb                64 net/ipv4/ip_tunnel_core.c 	skb_reset_network_header(skb);
skb                66 net/ipv4/ip_tunnel_core.c 	iph = ip_hdr(skb);
skb                76 net/ipv4/ip_tunnel_core.c 	__ip_select_ident(net, iph, skb_shinfo(skb)->gso_segs ?: 1);
skb                78 net/ipv4/ip_tunnel_core.c 	err = ip_local_out(net, sk, skb);
skb                88 net/ipv4/ip_tunnel_core.c int __iptunnel_pull_header(struct sk_buff *skb, int hdr_len,
skb                91 net/ipv4/ip_tunnel_core.c 	if (unlikely(!pskb_may_pull(skb, hdr_len)))
skb                94 net/ipv4/ip_tunnel_core.c 	skb_pull_rcsum(skb, hdr_len);
skb                99 net/ipv4/ip_tunnel_core.c 		if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
skb               102 net/ipv4/ip_tunnel_core.c 		eh = (struct ethhdr *)skb->data;
skb               104 net/ipv4/ip_tunnel_core.c 			skb->protocol = eh->h_proto;
skb               106 net/ipv4/ip_tunnel_core.c 			skb->protocol = htons(ETH_P_802_2);
skb               109 net/ipv4/ip_tunnel_core.c 		skb->protocol = inner_proto;
skb               112 net/ipv4/ip_tunnel_core.c 	skb_clear_hash_if_not_l4(skb);
skb               113 net/ipv4/ip_tunnel_core.c 	__vlan_hwaccel_clear_tag(skb);
skb               114 net/ipv4/ip_tunnel_core.c 	skb_set_queue_mapping(skb, 0);
skb               115 net/ipv4/ip_tunnel_core.c 	skb_scrub_packet(skb, xnet);
skb               117 net/ipv4/ip_tunnel_core.c 	return iptunnel_pull_offloads(skb);
skb               151 net/ipv4/ip_tunnel_core.c int iptunnel_handle_offloads(struct sk_buff *skb,
skb               156 net/ipv4/ip_tunnel_core.c 	if (likely(!skb->encapsulation)) {
skb               157 net/ipv4/ip_tunnel_core.c 		skb_reset_inner_headers(skb);
skb               158 net/ipv4/ip_tunnel_core.c 		skb->encapsulation = 1;
skb               161 net/ipv4/ip_tunnel_core.c 	if (skb_is_gso(skb)) {
skb               162 net/ipv4/ip_tunnel_core.c 		err = skb_header_unclone(skb, GFP_ATOMIC);
skb               165 net/ipv4/ip_tunnel_core.c 		skb_shinfo(skb)->gso_type |= gso_type_mask;
skb               169 net/ipv4/ip_tunnel_core.c 	if (skb->ip_summed != CHECKSUM_PARTIAL) {
skb               170 net/ipv4/ip_tunnel_core.c 		skb->ip_summed = CHECKSUM_NONE;
skb               176 net/ipv4/ip_tunnel_core.c 		skb->encapsulation = 0;
skb               288 net/ipv4/ip_tunnel_core.c static int ip_tun_fill_encap_info(struct sk_buff *skb,
skb               293 net/ipv4/ip_tunnel_core.c 	if (nla_put_be64(skb, LWTUNNEL_IP_ID, tun_info->key.tun_id,
skb               295 net/ipv4/ip_tunnel_core.c 	    nla_put_in_addr(skb, LWTUNNEL_IP_DST, tun_info->key.u.ipv4.dst) ||
skb               296 net/ipv4/ip_tunnel_core.c 	    nla_put_in_addr(skb, LWTUNNEL_IP_SRC, tun_info->key.u.ipv4.src) ||
skb               297 net/ipv4/ip_tunnel_core.c 	    nla_put_u8(skb, LWTUNNEL_IP_TOS, tun_info->key.tos) ||
skb               298 net/ipv4/ip_tunnel_core.c 	    nla_put_u8(skb, LWTUNNEL_IP_TTL, tun_info->key.ttl) ||
skb               299 net/ipv4/ip_tunnel_core.c 	    nla_put_be16(skb, LWTUNNEL_IP_FLAGS, tun_info->key.tun_flags))
skb               388 net/ipv4/ip_tunnel_core.c static int ip6_tun_fill_encap_info(struct sk_buff *skb,
skb               393 net/ipv4/ip_tunnel_core.c 	if (nla_put_be64(skb, LWTUNNEL_IP6_ID, tun_info->key.tun_id,
skb               395 net/ipv4/ip_tunnel_core.c 	    nla_put_in6_addr(skb, LWTUNNEL_IP6_DST, &tun_info->key.u.ipv6.dst) ||
skb               396 net/ipv4/ip_tunnel_core.c 	    nla_put_in6_addr(skb, LWTUNNEL_IP6_SRC, &tun_info->key.u.ipv6.src) ||
skb               397 net/ipv4/ip_tunnel_core.c 	    nla_put_u8(skb, LWTUNNEL_IP6_TC, tun_info->key.tos) ||
skb               398 net/ipv4/ip_tunnel_core.c 	    nla_put_u8(skb, LWTUNNEL_IP6_HOPLIMIT, tun_info->key.ttl) ||
skb               399 net/ipv4/ip_tunnel_core.c 	    nla_put_be16(skb, LWTUNNEL_IP6_FLAGS, tun_info->key.tun_flags))
skb                47 net/ipv4/ip_vti.c static int vti_input(struct sk_buff *skb, int nexthdr, __be32 spi,
skb                51 net/ipv4/ip_vti.c 	const struct iphdr *iph = ip_hdr(skb);
skb                52 net/ipv4/ip_vti.c 	struct net *net = dev_net(skb->dev);
skb                55 net/ipv4/ip_vti.c 	tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
skb                58 net/ipv4/ip_vti.c 		if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
skb                61 net/ipv4/ip_vti.c 		XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = tunnel;
skb                64 net/ipv4/ip_vti.c 			skb->dev = tunnel->dev;
skb                66 net/ipv4/ip_vti.c 		return xfrm_input(skb, nexthdr, spi, encap_type);
skb                71 net/ipv4/ip_vti.c 	kfree_skb(skb);
skb                75 net/ipv4/ip_vti.c static int vti_input_proto(struct sk_buff *skb, int nexthdr, __be32 spi,
skb                78 net/ipv4/ip_vti.c 	return vti_input(skb, nexthdr, spi, encap_type, false);
skb                81 net/ipv4/ip_vti.c static int vti_rcv(struct sk_buff *skb, __be32 spi, bool update_skb_dev)
skb                83 net/ipv4/ip_vti.c 	XFRM_SPI_SKB_CB(skb)->family = AF_INET;
skb                84 net/ipv4/ip_vti.c 	XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
skb                86 net/ipv4/ip_vti.c 	return vti_input(skb, ip_hdr(skb)->protocol, spi, 0, update_skb_dev);
skb                89 net/ipv4/ip_vti.c static int vti_rcv_proto(struct sk_buff *skb)
skb                91 net/ipv4/ip_vti.c 	return vti_rcv(skb, 0, false);
skb                94 net/ipv4/ip_vti.c static int vti_rcv_tunnel(struct sk_buff *skb)
skb                96 net/ipv4/ip_vti.c 	struct ip_tunnel_net *itn = net_generic(dev_net(skb->dev), vti_net_id);
skb                97 net/ipv4/ip_vti.c 	const struct iphdr *iph = ip_hdr(skb);
skb               100 net/ipv4/ip_vti.c 	tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
skb               107 net/ipv4/ip_vti.c 		if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
skb               109 net/ipv4/ip_vti.c 		if (iptunnel_pull_header(skb, 0, tpi.proto, false))
skb               111 net/ipv4/ip_vti.c 		return ip_tunnel_rcv(tunnel, skb, &tpi, NULL, false);
skb               116 net/ipv4/ip_vti.c 	kfree_skb(skb);
skb               120 net/ipv4/ip_vti.c static int vti_rcv_cb(struct sk_buff *skb, int err)
skb               127 net/ipv4/ip_vti.c 	struct ip_tunnel *tunnel = XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4;
skb               128 net/ipv4/ip_vti.c 	u32 orig_mark = skb->mark;
skb               143 net/ipv4/ip_vti.c 	x = xfrm_input_state(skb);
skb               148 net/ipv4/ip_vti.c 		inner_mode = xfrm_ip2inner_mode(x, XFRM_MODE_SKB_CB(skb)->protocol);
skb               150 net/ipv4/ip_vti.c 			XFRM_INC_STATS(dev_net(skb->dev),
skb               158 net/ipv4/ip_vti.c 	skb->mark = be32_to_cpu(tunnel->parms.i_key);
skb               159 net/ipv4/ip_vti.c 	ret = xfrm_policy_check(NULL, XFRM_POLICY_IN, skb, family);
skb               160 net/ipv4/ip_vti.c 	skb->mark = orig_mark;
skb               165 net/ipv4/ip_vti.c 	skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(skb->dev)));
skb               166 net/ipv4/ip_vti.c 	skb->dev = dev;
skb               172 net/ipv4/ip_vti.c 	tstats->rx_bytes += skb->len;
skb               199 net/ipv4/ip_vti.c static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
skb               204 net/ipv4/ip_vti.c 	struct dst_entry *dst = skb_dst(skb);
skb               206 net/ipv4/ip_vti.c 	int pkt_len = skb->len;
skb               211 net/ipv4/ip_vti.c 		switch (skb->protocol) {
skb               223 net/ipv4/ip_vti.c 			skb_dst_set(skb, dst);
skb               237 net/ipv4/ip_vti.c 			skb_dst_set(skb, dst);
skb               268 net/ipv4/ip_vti.c 	if (skb->len > mtu) {
skb               269 net/ipv4/ip_vti.c 		skb_dst_update_pmtu_no_confirm(skb, mtu);
skb               270 net/ipv4/ip_vti.c 		if (skb->protocol == htons(ETH_P_IP)) {
skb               271 net/ipv4/ip_vti.c 			icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
skb               277 net/ipv4/ip_vti.c 			icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
skb               284 net/ipv4/ip_vti.c 	skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(dev)));
skb               285 net/ipv4/ip_vti.c 	skb_dst_set(skb, dst);
skb               286 net/ipv4/ip_vti.c 	skb->dev = skb_dst(skb)->dev;
skb               288 net/ipv4/ip_vti.c 	err = dst_output(tunnel->net, skb->sk, skb);
skb               295 net/ipv4/ip_vti.c 	dst_link_failure(skb);
skb               298 net/ipv4/ip_vti.c 	kfree_skb(skb);
skb               305 net/ipv4/ip_vti.c static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
skb               310 net/ipv4/ip_vti.c 	if (!pskb_inet_may_pull(skb))
skb               315 net/ipv4/ip_vti.c 	switch (skb->protocol) {
skb               317 net/ipv4/ip_vti.c 		xfrm_decode_session(skb, &fl, AF_INET);
skb               318 net/ipv4/ip_vti.c 		memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
skb               321 net/ipv4/ip_vti.c 		xfrm_decode_session(skb, &fl, AF_INET6);
skb               322 net/ipv4/ip_vti.c 		memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
skb               331 net/ipv4/ip_vti.c 	return vti_xmit(skb, dev, &fl);
skb               335 net/ipv4/ip_vti.c 	kfree_skb(skb);
skb               339 net/ipv4/ip_vti.c static int vti4_err(struct sk_buff *skb, u32 info)
skb               348 net/ipv4/ip_vti.c 	struct net *net = dev_net(skb->dev);
skb               349 net/ipv4/ip_vti.c 	const struct iphdr *iph = (const struct iphdr *)skb->data;
skb               353 net/ipv4/ip_vti.c 	tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
skb               362 net/ipv4/ip_vti.c 		esph = (struct ip_esp_hdr *)(skb->data+(iph->ihl<<2));
skb               366 net/ipv4/ip_vti.c 		ah = (struct ip_auth_hdr *)(skb->data+(iph->ihl<<2));
skb               370 net/ipv4/ip_vti.c 		ipch = (struct ip_comp_hdr *)(skb->data+(iph->ihl<<2));
skb               377 net/ipv4/ip_vti.c 	switch (icmp_hdr(skb)->type) {
skb               379 net/ipv4/ip_vti.c 		if (icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
skb               392 net/ipv4/ip_vti.c 	if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH)
skb               393 net/ipv4/ip_vti.c 		ipv4_update_pmtu(skb, net, info, 0, protocol);
skb               395 net/ipv4/ip_vti.c 		ipv4_redirect(skb, net, 0, protocol);
skb               615 net/ipv4/ip_vti.c static int vti_fill_info(struct sk_buff *skb, const struct net_device *dev)
skb               620 net/ipv4/ip_vti.c 	if (nla_put_u32(skb, IFLA_VTI_LINK, p->link) ||
skb               621 net/ipv4/ip_vti.c 	    nla_put_be32(skb, IFLA_VTI_IKEY, p->i_key) ||
skb               622 net/ipv4/ip_vti.c 	    nla_put_be32(skb, IFLA_VTI_OKEY, p->o_key) ||
skb               623 net/ipv4/ip_vti.c 	    nla_put_in_addr(skb, IFLA_VTI_LOCAL, p->iph.saddr) ||
skb               624 net/ipv4/ip_vti.c 	    nla_put_in_addr(skb, IFLA_VTI_REMOTE, p->iph.daddr) ||
skb               625 net/ipv4/ip_vti.c 	    nla_put_u32(skb, IFLA_VTI_FWMARK, t->fwmark))
skb                22 net/ipv4/ipcomp.c static int ipcomp4_err(struct sk_buff *skb, u32 info)
skb                24 net/ipv4/ipcomp.c 	struct net *net = dev_net(skb->dev);
skb                26 net/ipv4/ipcomp.c 	const struct iphdr *iph = (const struct iphdr *)skb->data;
skb                27 net/ipv4/ipcomp.c 	struct ip_comp_hdr *ipch = (struct ip_comp_hdr *)(skb->data+(iph->ihl<<2));
skb                30 net/ipv4/ipcomp.c 	switch (icmp_hdr(skb)->type) {
skb                32 net/ipv4/ipcomp.c 		if (icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
skb                41 net/ipv4/ipcomp.c 	x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
skb                46 net/ipv4/ipcomp.c 	if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH)
skb                47 net/ipv4/ipcomp.c 		ipv4_update_pmtu(skb, net, info, 0, IPPROTO_COMP);
skb                49 net/ipv4/ipcomp.c 		ipv4_redirect(skb, net, 0, IPPROTO_COMP);
skb               148 net/ipv4/ipcomp.c static int ipcomp4_rcv_cb(struct sk_buff *skb, int err)
skb               454 net/ipv4/ipconfig.c static int ic_rarp_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev);
skb               475 net/ipv4/ipconfig.c ic_rarp_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
skb               486 net/ipv4/ipconfig.c 	skb = skb_share_check(skb, GFP_ATOMIC);
skb               487 net/ipv4/ipconfig.c 	if (!skb)
skb               490 net/ipv4/ipconfig.c 	if (!pskb_may_pull(skb, sizeof(struct arphdr)))
skb               494 net/ipv4/ipconfig.c 	rarp = (struct arphdr *)skb_transport_header(skb);
skb               510 net/ipv4/ipconfig.c 	if (!pskb_may_pull(skb, arp_hdr_len(dev)))
skb               514 net/ipv4/ipconfig.c 	rarp = (struct arphdr *)skb_transport_header(skb);
skb               561 net/ipv4/ipconfig.c 	kfree_skb(skb);
skb               637 net/ipv4/ipconfig.c static int ic_bootp_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev);
skb               799 net/ipv4/ipconfig.c 	struct sk_buff *skb;
skb               806 net/ipv4/ipconfig.c 	skb = alloc_skb(sizeof(struct bootp_pkt) + hlen + tlen + 15,
skb               808 net/ipv4/ipconfig.c 	if (!skb)
skb               810 net/ipv4/ipconfig.c 	skb_reserve(skb, hlen);
skb               811 net/ipv4/ipconfig.c 	b = skb_put_zero(skb, sizeof(struct bootp_pkt));
skb               814 net/ipv4/ipconfig.c 	skb_reset_network_header(skb);
skb               815 net/ipv4/ipconfig.c 	h = ip_hdr(skb);
skb               858 net/ipv4/ipconfig.c 	skb->dev = dev;
skb               859 net/ipv4/ipconfig.c 	skb->protocol = htons(ETH_P_IP);
skb               860 net/ipv4/ipconfig.c 	if (dev_hard_header(skb, dev, ntohs(skb->protocol),
skb               861 net/ipv4/ipconfig.c 			    dev->broadcast, dev->dev_addr, skb->len) < 0) {
skb               862 net/ipv4/ipconfig.c 		kfree_skb(skb);
skb               867 net/ipv4/ipconfig.c 	if (dev_queue_xmit(skb) < 0)
skb               958 net/ipv4/ipconfig.c static int __init ic_bootp_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
skb               969 net/ipv4/ipconfig.c 	if (skb->pkt_type == PACKET_OTHERHOST)
skb               972 net/ipv4/ipconfig.c 	skb = skb_share_check(skb, GFP_ATOMIC);
skb               973 net/ipv4/ipconfig.c 	if (!skb)
skb               976 net/ipv4/ipconfig.c 	if (!pskb_may_pull(skb,
skb               981 net/ipv4/ipconfig.c 	b = (struct bootp_pkt *)skb_network_header(skb);
skb               993 net/ipv4/ipconfig.c 	if (skb->len < ntohs(h->tot_len))
skb              1014 net/ipv4/ipconfig.c 	if (!pskb_may_pull(skb, skb->len))
skb              1017 net/ipv4/ipconfig.c 	b = (struct bootp_pkt *)skb_network_header(skb);
skb              1144 net/ipv4/ipconfig.c 	kfree_skb(skb);
skb               124 net/ipv4/ipip.c static int ipip_err(struct sk_buff *skb, u32 info)
skb               130 net/ipv4/ipip.c 	struct net *net = dev_net(skb->dev);
skb               132 net/ipv4/ipip.c 	const struct iphdr *iph = (const struct iphdr *)skb->data;
skb               133 net/ipv4/ipip.c 	const int type = icmp_hdr(skb)->type;
skb               134 net/ipv4/ipip.c 	const int code = icmp_hdr(skb)->code;
skb               138 net/ipv4/ipip.c 	t = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
skb               173 net/ipv4/ipip.c 		ipv4_update_pmtu(skb, net, info, t->parms.link, iph->protocol);
skb               178 net/ipv4/ipip.c 		ipv4_redirect(skb, net, t->parms.link, iph->protocol);
skb               212 net/ipv4/ipip.c static int ipip_tunnel_rcv(struct sk_buff *skb, u8 ipproto)
skb               214 net/ipv4/ipip.c 	struct net *net = dev_net(skb->dev);
skb               220 net/ipv4/ipip.c 	iph = ip_hdr(skb);
skb               221 net/ipv4/ipip.c 	tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
skb               230 net/ipv4/ipip.c 		if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
skb               238 net/ipv4/ipip.c 		if (iptunnel_pull_header(skb, 0, tpi->proto, false))
skb               241 net/ipv4/ipip.c 			tun_dst = ip_tun_rx_dst(skb, 0, 0, 0);
skb               245 net/ipv4/ipip.c 		return ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
skb               251 net/ipv4/ipip.c 	kfree_skb(skb);
skb               255 net/ipv4/ipip.c static int ipip_rcv(struct sk_buff *skb)
skb               257 net/ipv4/ipip.c 	return ipip_tunnel_rcv(skb, IPPROTO_IPIP);
skb               261 net/ipv4/ipip.c static int mplsip_rcv(struct sk_buff *skb)
skb               263 net/ipv4/ipip.c 	return ipip_tunnel_rcv(skb, IPPROTO_MPLS);
skb               271 net/ipv4/ipip.c static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb,
skb               278 net/ipv4/ipip.c 	if (!pskb_inet_may_pull(skb))
skb               281 net/ipv4/ipip.c 	switch (skb->protocol) {
skb               297 net/ipv4/ipip.c 	if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP4))
skb               300 net/ipv4/ipip.c 	skb_set_inner_ipproto(skb, ipproto);
skb               303 net/ipv4/ipip.c 		ip_md_tunnel_xmit(skb, dev, ipproto, 0);
skb               305 net/ipv4/ipip.c 		ip_tunnel_xmit(skb, dev, tiph, ipproto);
skb               309 net/ipv4/ipip.c 	kfree_skb(skb);
skb               574 net/ipv4/ipip.c static int ipip_fill_info(struct sk_buff *skb, const struct net_device *dev)
skb               579 net/ipv4/ipip.c 	if (nla_put_u32(skb, IFLA_IPTUN_LINK, parm->link) ||
skb               580 net/ipv4/ipip.c 	    nla_put_in_addr(skb, IFLA_IPTUN_LOCAL, parm->iph.saddr) ||
skb               581 net/ipv4/ipip.c 	    nla_put_in_addr(skb, IFLA_IPTUN_REMOTE, parm->iph.daddr) ||
skb               582 net/ipv4/ipip.c 	    nla_put_u8(skb, IFLA_IPTUN_TTL, parm->iph.ttl) ||
skb               583 net/ipv4/ipip.c 	    nla_put_u8(skb, IFLA_IPTUN_TOS, parm->iph.tos) ||
skb               584 net/ipv4/ipip.c 	    nla_put_u8(skb, IFLA_IPTUN_PROTO, parm->iph.protocol) ||
skb               585 net/ipv4/ipip.c 	    nla_put_u8(skb, IFLA_IPTUN_PMTUDISC,
skb               587 net/ipv4/ipip.c 	    nla_put_u32(skb, IFLA_IPTUN_FWMARK, tunnel->fwmark))
skb               590 net/ipv4/ipip.c 	if (nla_put_u16(skb, IFLA_IPTUN_ENCAP_TYPE,
skb               592 net/ipv4/ipip.c 	    nla_put_be16(skb, IFLA_IPTUN_ENCAP_SPORT,
skb               594 net/ipv4/ipip.c 	    nla_put_be16(skb, IFLA_IPTUN_ENCAP_DPORT,
skb               596 net/ipv4/ipip.c 	    nla_put_u16(skb, IFLA_IPTUN_ENCAP_FLAGS,
skb               601 net/ipv4/ipip.c 		if (nla_put_flag(skb, IFLA_IPTUN_COLLECT_METADATA))
skb               101 net/ipv4/ipmr.c 			  struct net_device *dev, struct sk_buff *skb,
skb               200 net/ipv4/ipmr.c static int ipmr_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
skb               213 net/ipv4/ipmr.c static int ipmr_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
skb               518 net/ipv4/ipmr.c static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
skb               524 net/ipv4/ipmr.c 		.flowi4_iif	= skb->skb_iif ? : LOOPBACK_IFINDEX,
skb               525 net/ipv4/ipmr.c 		.flowi4_mark	= skb->mark,
skb               531 net/ipv4/ipmr.c 		kfree_skb(skb);
skb               536 net/ipv4/ipmr.c 	dev->stats.tx_bytes += skb->len;
skb               538 net/ipv4/ipmr.c 	ipmr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, IGMPMSG_WHOLEPKT);
skb               540 net/ipv4/ipmr.c 	kfree_skb(skb);
skb               601 net/ipv4/ipmr.c static int __pim_rcv(struct mr_table *mrt, struct sk_buff *skb,
skb               607 net/ipv4/ipmr.c 	encap = (struct iphdr *)(skb_transport_header(skb) + pimlen);
skb               615 net/ipv4/ipmr.c 	    ntohs(encap->tot_len) + pimlen > skb->len)
skb               626 net/ipv4/ipmr.c 	skb->mac_header = skb->network_header;
skb               627 net/ipv4/ipmr.c 	skb_pull(skb, (u8 *)encap - skb->data);
skb               628 net/ipv4/ipmr.c 	skb_reset_network_header(skb);
skb               629 net/ipv4/ipmr.c 	skb->protocol = htons(ETH_P_IP);
skb               630 net/ipv4/ipmr.c 	skb->ip_summed = CHECKSUM_NONE;
skb               632 net/ipv4/ipmr.c 	skb_tunnel_rx(skb, reg_dev, dev_net(reg_dev));
skb               634 net/ipv4/ipmr.c 	netif_rx(skb);
skb               744 net/ipv4/ipmr.c 	struct sk_buff *skb;
skb               749 net/ipv4/ipmr.c 	while ((skb = skb_dequeue(&c->_c.mfc_un.unres.unresolved))) {
skb               750 net/ipv4/ipmr.c 		if (ip_hdr(skb)->version == 0) {
skb               751 net/ipv4/ipmr.c 			struct nlmsghdr *nlh = skb_pull(skb,
skb               755 net/ipv4/ipmr.c 			skb_trim(skb, nlh->nlmsg_len);
skb               760 net/ipv4/ipmr.c 			rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
skb               762 net/ipv4/ipmr.c 			kfree_skb(skb);
skb              1006 net/ipv4/ipmr.c 	struct sk_buff *skb;
skb              1010 net/ipv4/ipmr.c 	while ((skb = __skb_dequeue(&uc->_c.mfc_un.unres.unresolved))) {
skb              1011 net/ipv4/ipmr.c 		if (ip_hdr(skb)->version == 0) {
skb              1012 net/ipv4/ipmr.c 			struct nlmsghdr *nlh = skb_pull(skb,
skb              1015 net/ipv4/ipmr.c 			if (mr_fill_mroute(mrt, skb, &c->_c,
skb              1017 net/ipv4/ipmr.c 				nlh->nlmsg_len = skb_tail_pointer(skb) -
skb              1022 net/ipv4/ipmr.c 				skb_trim(skb, nlh->nlmsg_len);
skb              1028 net/ipv4/ipmr.c 			rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
skb              1030 net/ipv4/ipmr.c 			ip_mr_forward(net, mrt, skb->dev, skb, c, 0);
skb              1046 net/ipv4/ipmr.c 	struct sk_buff *skb;
skb              1050 net/ipv4/ipmr.c 		skb = skb_realloc_headroom(pkt, sizeof(struct iphdr));
skb              1052 net/ipv4/ipmr.c 		skb = alloc_skb(128, GFP_ATOMIC);
skb              1054 net/ipv4/ipmr.c 	if (!skb)
skb              1063 net/ipv4/ipmr.c 		skb_push(skb, sizeof(struct iphdr));
skb              1064 net/ipv4/ipmr.c 		skb_reset_network_header(skb);
skb              1065 net/ipv4/ipmr.c 		skb_reset_transport_header(skb);
skb              1066 net/ipv4/ipmr.c 		msg = (struct igmpmsg *)skb_network_header(skb);
skb              1074 net/ipv4/ipmr.c 		ip_hdr(skb)->ihl = sizeof(struct iphdr) >> 2;
skb              1075 net/ipv4/ipmr.c 		ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(pkt)->tot_len) +
skb              1079 net/ipv4/ipmr.c 		skb_set_network_header(skb, skb->len);
skb              1080 net/ipv4/ipmr.c 		skb_put(skb, ihl);
skb              1081 net/ipv4/ipmr.c 		skb_copy_to_linear_data(skb, pkt->data, ihl);
skb              1083 net/ipv4/ipmr.c 		ip_hdr(skb)->protocol = 0;
skb              1084 net/ipv4/ipmr.c 		msg = (struct igmpmsg *)skb_network_header(skb);
skb              1086 net/ipv4/ipmr.c 		skb_dst_set(skb, dst_clone(skb_dst(pkt)));
skb              1088 net/ipv4/ipmr.c 		igmp = skb_put(skb, sizeof(struct igmphdr));
skb              1092 net/ipv4/ipmr.c 		ip_hdr(skb)->tot_len = htons(skb->len);	/* Fix the length */
skb              1093 net/ipv4/ipmr.c 		skb->transport_header = skb->network_header;
skb              1100 net/ipv4/ipmr.c 		kfree_skb(skb);
skb              1104 net/ipv4/ipmr.c 	igmpmsg_netlink_event(mrt, skb);
skb              1107 net/ipv4/ipmr.c 	ret = sock_queue_rcv_skb(mroute_sk, skb);
skb              1111 net/ipv4/ipmr.c 		kfree_skb(skb);
skb              1119 net/ipv4/ipmr.c 				 struct sk_buff *skb, struct net_device *dev)
skb              1121 net/ipv4/ipmr.c 	const struct iphdr *iph = ip_hdr(skb);
skb              1141 net/ipv4/ipmr.c 			kfree_skb(skb);
skb              1151 net/ipv4/ipmr.c 		err = ipmr_cache_report(mrt, skb, vifi, IGMPMSG_NOCACHE);
skb              1160 net/ipv4/ipmr.c 			kfree_skb(skb);
skb              1175 net/ipv4/ipmr.c 		kfree_skb(skb);
skb              1179 net/ipv4/ipmr.c 			skb->dev = dev;
skb              1180 net/ipv4/ipmr.c 			skb->skb_iif = dev->ifindex;
skb              1182 net/ipv4/ipmr.c 		skb_queue_tail(&c->_c.mfc_un.unres.unresolved, skb);
skb              1773 net/ipv4/ipmr.c static void ip_encap(struct net *net, struct sk_buff *skb,
skb              1777 net/ipv4/ipmr.c 	const struct iphdr *old_iph = ip_hdr(skb);
skb              1779 net/ipv4/ipmr.c 	skb_push(skb, sizeof(struct iphdr));
skb              1780 net/ipv4/ipmr.c 	skb->transport_header = skb->network_header;
skb              1781 net/ipv4/ipmr.c 	skb_reset_network_header(skb);
skb              1782 net/ipv4/ipmr.c 	iph = ip_hdr(skb);
skb              1792 net/ipv4/ipmr.c 	iph->tot_len	=	htons(skb->len);
skb              1793 net/ipv4/ipmr.c 	ip_select_ident(net, skb, NULL);
skb              1796 net/ipv4/ipmr.c 	memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
skb              1797 net/ipv4/ipmr.c 	nf_reset_ct(skb);
skb              1801 net/ipv4/ipmr.c 				      struct sk_buff *skb)
skb              1803 net/ipv4/ipmr.c 	struct ip_options *opt = &(IPCB(skb)->opt);
skb              1806 net/ipv4/ipmr.c 	IP_ADD_STATS(net, IPSTATS_MIB_OUTOCTETS, skb->len);
skb              1809 net/ipv4/ipmr.c 		ip_forward_options(skb);
skb              1811 net/ipv4/ipmr.c 	return dst_output(net, sk, skb);
skb              1815 net/ipv4/ipmr.c static bool ipmr_forward_offloaded(struct sk_buff *skb, struct mr_table *mrt,
skb              1821 net/ipv4/ipmr.c 	if (!skb->offload_l3_fwd_mark)
skb              1829 net/ipv4/ipmr.c static bool ipmr_forward_offloaded(struct sk_buff *skb, struct mr_table *mrt,
skb              1839 net/ipv4/ipmr.c 			    int in_vifi, struct sk_buff *skb, int vifi)
skb              1841 net/ipv4/ipmr.c 	const struct iphdr *iph = ip_hdr(skb);
skb              1853 net/ipv4/ipmr.c 		vif->bytes_out += skb->len;
skb              1854 net/ipv4/ipmr.c 		vif->dev->stats.tx_bytes += skb->len;
skb              1856 net/ipv4/ipmr.c 		ipmr_cache_report(mrt, skb, vifi, IGMPMSG_WHOLEPKT);
skb              1860 net/ipv4/ipmr.c 	if (ipmr_forward_offloaded(skb, mrt, in_vifi, vifi))
skb              1883 net/ipv4/ipmr.c 	if (skb->len+encap > dst_mtu(&rt->dst) && (ntohs(iph->frag_off) & IP_DF)) {
skb              1895 net/ipv4/ipmr.c 	if (skb_cow(skb, encap)) {
skb              1901 net/ipv4/ipmr.c 	vif->bytes_out += skb->len;
skb              1903 net/ipv4/ipmr.c 	skb_dst_drop(skb);
skb              1904 net/ipv4/ipmr.c 	skb_dst_set(skb, &rt->dst);
skb              1905 net/ipv4/ipmr.c 	ip_decrease_ttl(ip_hdr(skb));
skb              1911 net/ipv4/ipmr.c 		ip_encap(net, skb, vif->local, vif->remote);
skb              1914 net/ipv4/ipmr.c 		vif->dev->stats.tx_bytes += skb->len;
skb              1917 net/ipv4/ipmr.c 	IPCB(skb)->flags |= IPSKB_FORWARDED;
skb              1930 net/ipv4/ipmr.c 		net, NULL, skb, skb->dev, dev,
skb              1935 net/ipv4/ipmr.c 	kfree_skb(skb);
skb              1951 net/ipv4/ipmr.c 			  struct net_device *dev, struct sk_buff *skb,
skb              1960 net/ipv4/ipmr.c 	c->_c.mfc_un.res.bytes += skb->len;
skb              1977 net/ipv4/ipmr.c 		if (rt_is_output_route(skb_rtable(skb))) {
skb              2006 net/ipv4/ipmr.c 			ipmr_cache_report(mrt, skb, true_vifi, IGMPMSG_WRONGVIF);
skb              2008 net/ipv4/ipmr.c 				ipmr_cache_report(mrt, skb, true_vifi,
skb              2016 net/ipv4/ipmr.c 	mrt->vif_table[vif].bytes_in += skb->len;
skb              2023 net/ipv4/ipmr.c 		    ip_hdr(skb)->ttl >
skb              2039 net/ipv4/ipmr.c 		    ip_hdr(skb)->ttl > c->_c.mfc_un.res.ttls[ct]) {
skb              2041 net/ipv4/ipmr.c 				struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
skb              2053 net/ipv4/ipmr.c 			struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
skb              2059 net/ipv4/ipmr.c 			ipmr_queue_xmit(net, mrt, true_vifi, skb, psend);
skb              2066 net/ipv4/ipmr.c 		kfree_skb(skb);
skb              2069 net/ipv4/ipmr.c static struct mr_table *ipmr_rt_fib_lookup(struct net *net, struct sk_buff *skb)
skb              2071 net/ipv4/ipmr.c 	struct rtable *rt = skb_rtable(skb);
skb              2072 net/ipv4/ipmr.c 	struct iphdr *iph = ip_hdr(skb);
skb              2078 net/ipv4/ipmr.c 			       skb->dev->ifindex : 0),
skb              2081 net/ipv4/ipmr.c 			       skb->dev->ifindex),
skb              2082 net/ipv4/ipmr.c 		.flowi4_mark = skb->mark,
skb              2096 net/ipv4/ipmr.c int ip_mr_input(struct sk_buff *skb)
skb              2099 net/ipv4/ipmr.c 	struct net *net = dev_net(skb->dev);
skb              2100 net/ipv4/ipmr.c 	int local = skb_rtable(skb)->rt_flags & RTCF_LOCAL;
skb              2108 net/ipv4/ipmr.c 	dev = skb->dev;
skb              2109 net/ipv4/ipmr.c 	if (netif_is_l3_master(skb->dev)) {
skb              2110 net/ipv4/ipmr.c 		dev = dev_get_by_index_rcu(net, IPCB(skb)->iif);
skb              2112 net/ipv4/ipmr.c 			kfree_skb(skb);
skb              2120 net/ipv4/ipmr.c 	if (IPCB(skb)->flags & IPSKB_FORWARDED)
skb              2123 net/ipv4/ipmr.c 	mrt = ipmr_rt_fib_lookup(net, skb);
skb              2125 net/ipv4/ipmr.c 		kfree_skb(skb);
skb              2129 net/ipv4/ipmr.c 		if (IPCB(skb)->opt.router_alert) {
skb              2130 net/ipv4/ipmr.c 			if (ip_call_ra_chain(skb))
skb              2132 net/ipv4/ipmr.c 		} else if (ip_hdr(skb)->protocol == IPPROTO_IGMP) {
skb              2143 net/ipv4/ipmr.c 				nf_reset_ct(skb);
skb              2144 net/ipv4/ipmr.c 				raw_rcv(mroute_sk, skb);
skb              2151 net/ipv4/ipmr.c 	cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr);
skb              2156 net/ipv4/ipmr.c 			cache = ipmr_cache_find_any(mrt, ip_hdr(skb)->daddr,
skb              2165 net/ipv4/ipmr.c 			struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
skb              2166 net/ipv4/ipmr.c 			ip_local_deliver(skb);
skb              2169 net/ipv4/ipmr.c 			skb = skb2;
skb              2175 net/ipv4/ipmr.c 			int err2 = ipmr_cache_unresolved(mrt, vif, skb, dev);
skb              2181 net/ipv4/ipmr.c 		kfree_skb(skb);
skb              2186 net/ipv4/ipmr.c 	ip_mr_forward(net, mrt, dev, skb, cache, local);
skb              2190 net/ipv4/ipmr.c 		return ip_local_deliver(skb);
skb              2196 net/ipv4/ipmr.c 		return ip_local_deliver(skb);
skb              2197 net/ipv4/ipmr.c 	kfree_skb(skb);
skb              2203 net/ipv4/ipmr.c int pim_rcv_v1(struct sk_buff *skb)
skb              2206 net/ipv4/ipmr.c 	struct net *net = dev_net(skb->dev);
skb              2209 net/ipv4/ipmr.c 	if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr)))
skb              2212 net/ipv4/ipmr.c 	pim = igmp_hdr(skb);
skb              2214 net/ipv4/ipmr.c 	mrt = ipmr_rt_fib_lookup(net, skb);
skb              2221 net/ipv4/ipmr.c 	if (__pim_rcv(mrt, skb, sizeof(*pim))) {
skb              2223 net/ipv4/ipmr.c 		kfree_skb(skb);
skb              2230 net/ipv4/ipmr.c static int pim_rcv(struct sk_buff *skb)
skb              2233 net/ipv4/ipmr.c 	struct net *net = dev_net(skb->dev);
skb              2236 net/ipv4/ipmr.c 	if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr)))
skb              2239 net/ipv4/ipmr.c 	pim = (struct pimreghdr *)skb_transport_header(skb);
skb              2243 net/ipv4/ipmr.c 	     csum_fold(skb_checksum(skb, 0, skb->len, 0))))
skb              2246 net/ipv4/ipmr.c 	mrt = ipmr_rt_fib_lookup(net, skb);
skb              2249 net/ipv4/ipmr.c 	if (__pim_rcv(mrt, skb, sizeof(*pim))) {
skb              2251 net/ipv4/ipmr.c 		kfree_skb(skb);
skb              2257 net/ipv4/ipmr.c int ipmr_get_route(struct net *net, struct sk_buff *skb,
skb              2271 net/ipv4/ipmr.c 	if (!cache && skb->dev) {
skb              2272 net/ipv4/ipmr.c 		int vif = ipmr_find_vif(mrt, skb->dev);
skb              2283 net/ipv4/ipmr.c 		dev = skb->dev;
skb              2293 net/ipv4/ipmr.c 		skb2 = skb_realloc_headroom(skb, sizeof(struct iphdr));
skb              2315 net/ipv4/ipmr.c 	err = mr_fill_mroute(mrt, skb, &cache->_c, rtm);
skb              2321 net/ipv4/ipmr.c static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
skb              2329 net/ipv4/ipmr.c 	nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags);
skb              2339 net/ipv4/ipmr.c 	if (nla_put_u32(skb, RTA_TABLE, mrt->id))
skb              2349 net/ipv4/ipmr.c 	if (nla_put_in_addr(skb, RTA_SRC, c->mfc_origin) ||
skb              2350 net/ipv4/ipmr.c 	    nla_put_in_addr(skb, RTA_DST, c->mfc_mcastgrp))
skb              2352 net/ipv4/ipmr.c 	err = mr_fill_mroute(mrt, skb, &c->_c, rtm);
skb              2357 net/ipv4/ipmr.c 	nlmsg_end(skb, nlh);
skb              2361 net/ipv4/ipmr.c 	nlmsg_cancel(skb, nlh);
skb              2365 net/ipv4/ipmr.c static int _ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
skb              2369 net/ipv4/ipmr.c 	return ipmr_fill_mroute(mrt, skb, portid, seq, (struct mfc_cache *)c,
skb              2398 net/ipv4/ipmr.c 	struct sk_buff *skb;
skb              2401 net/ipv4/ipmr.c 	skb = nlmsg_new(mroute_msgsize(mfc->_c.mfc_parent >= MAXVIFS,
skb              2404 net/ipv4/ipmr.c 	if (!skb)
skb              2407 net/ipv4/ipmr.c 	err = ipmr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0);
skb              2411 net/ipv4/ipmr.c 	rtnl_notify(skb, net, 0, RTNLGRP_IPV4_MROUTE, NULL, GFP_ATOMIC);
skb              2415 net/ipv4/ipmr.c 	kfree_skb(skb);
skb              2441 net/ipv4/ipmr.c 	struct sk_buff *skb;
skb              2448 net/ipv4/ipmr.c 	skb = nlmsg_new(igmpmsg_netlink_msgsize(payloadlen), GFP_ATOMIC);
skb              2449 net/ipv4/ipmr.c 	if (!skb)
skb              2452 net/ipv4/ipmr.c 	nlh = nlmsg_put(skb, 0, 0, RTM_NEWCACHEREPORT,
skb              2458 net/ipv4/ipmr.c 	if (nla_put_u8(skb, IPMRA_CREPORT_MSGTYPE, msg->im_msgtype) ||
skb              2459 net/ipv4/ipmr.c 	    nla_put_u32(skb, IPMRA_CREPORT_VIF_ID, msg->im_vif) ||
skb              2460 net/ipv4/ipmr.c 	    nla_put_in_addr(skb, IPMRA_CREPORT_SRC_ADDR,
skb              2462 net/ipv4/ipmr.c 	    nla_put_in_addr(skb, IPMRA_CREPORT_DST_ADDR,
skb              2466 net/ipv4/ipmr.c 	nla = nla_reserve(skb, IPMRA_CREPORT_PKT, payloadlen);
skb              2471 net/ipv4/ipmr.c 	nlmsg_end(skb, nlh);
skb              2473 net/ipv4/ipmr.c 	rtnl_notify(skb, net, 0, RTNLGRP_IPV4_MROUTE_R, NULL, GFP_ATOMIC);
skb              2477 net/ipv4/ipmr.c 	nlmsg_cancel(skb, nlh);
skb              2479 net/ipv4/ipmr.c 	kfree_skb(skb);
skb              2483 net/ipv4/ipmr.c static int ipmr_rtm_valid_getroute_req(struct sk_buff *skb,
skb              2496 net/ipv4/ipmr.c 	if (!netlink_strict_get_check(skb))
skb              2543 net/ipv4/ipmr.c 	struct sk_buff *skb = NULL;
skb              2573 net/ipv4/ipmr.c 	skb = nlmsg_new(mroute_msgsize(false, mrt->maxvif), GFP_KERNEL);
skb              2574 net/ipv4/ipmr.c 	if (!skb) {
skb              2579 net/ipv4/ipmr.c 	err = ipmr_fill_mroute(mrt, skb, NETLINK_CB(in_skb).portid,
skb              2585 net/ipv4/ipmr.c 	err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
skb              2591 net/ipv4/ipmr.c 	kfree_skb(skb);
skb              2595 net/ipv4/ipmr.c static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
skb              2601 net/ipv4/ipmr.c 		err = ip_valid_fib_dump_req(sock_net(skb->sk), cb->nlh,
skb              2610 net/ipv4/ipmr.c 		mrt = ipmr_get_table(sock_net(skb->sk), filter.table_id);
skb              2613 net/ipv4/ipmr.c 				return skb->len;
skb              2618 net/ipv4/ipmr.c 		err = mr_table_dump(mrt, skb, cb, _ipmr_fill_mroute,
skb              2620 net/ipv4/ipmr.c 		return skb->len ? : err;
skb              2623 net/ipv4/ipmr.c 	return mr_rtm_dumproute(skb, cb, ipmr_mr_table_iter,
skb              2733 net/ipv4/ipmr.c static int ipmr_rtm_route(struct sk_buff *skb, struct nlmsghdr *nlh,
skb              2736 net/ipv4/ipmr.c 	struct net *net = sock_net(skb->sk);
skb              2754 net/ipv4/ipmr.c static bool ipmr_fill_table(struct mr_table *mrt, struct sk_buff *skb)
skb              2758 net/ipv4/ipmr.c 	if (nla_put_u32(skb, IPMRA_TABLE_ID, mrt->id) ||
skb              2759 net/ipv4/ipmr.c 	    nla_put_u32(skb, IPMRA_TABLE_CACHE_RES_QUEUE_LEN, queue_len) ||
skb              2760 net/ipv4/ipmr.c 	    nla_put_s32(skb, IPMRA_TABLE_MROUTE_REG_VIF_NUM,
skb              2762 net/ipv4/ipmr.c 	    nla_put_u8(skb, IPMRA_TABLE_MROUTE_DO_ASSERT,
skb              2764 net/ipv4/ipmr.c 	    nla_put_u8(skb, IPMRA_TABLE_MROUTE_DO_PIM, mrt->mroute_do_pim) ||
skb              2765 net/ipv4/ipmr.c 	    nla_put_u8(skb, IPMRA_TABLE_MROUTE_DO_WRVIFWHOLE,
skb              2772 net/ipv4/ipmr.c static bool ipmr_fill_vif(struct mr_table *mrt, u32 vifid, struct sk_buff *skb)
skb              2782 net/ipv4/ipmr.c 	vif_nest = nla_nest_start_noflag(skb, IPMRA_VIF);
skb              2785 net/ipv4/ipmr.c 	if (nla_put_u32(skb, IPMRA_VIFA_IFINDEX, vif->dev->ifindex) ||
skb              2786 net/ipv4/ipmr.c 	    nla_put_u32(skb, IPMRA_VIFA_VIF_ID, vifid) ||
skb              2787 net/ipv4/ipmr.c 	    nla_put_u16(skb, IPMRA_VIFA_FLAGS, vif->flags) ||
skb              2788 net/ipv4/ipmr.c 	    nla_put_u64_64bit(skb, IPMRA_VIFA_BYTES_IN, vif->bytes_in,
skb              2790 net/ipv4/ipmr.c 	    nla_put_u64_64bit(skb, IPMRA_VIFA_BYTES_OUT, vif->bytes_out,
skb              2792 net/ipv4/ipmr.c 	    nla_put_u64_64bit(skb, IPMRA_VIFA_PACKETS_IN, vif->pkt_in,
skb              2794 net/ipv4/ipmr.c 	    nla_put_u64_64bit(skb, IPMRA_VIFA_PACKETS_OUT, vif->pkt_out,
skb              2796 net/ipv4/ipmr.c 	    nla_put_be32(skb, IPMRA_VIFA_LOCAL_ADDR, vif->local) ||
skb              2797 net/ipv4/ipmr.c 	    nla_put_be32(skb, IPMRA_VIFA_REMOTE_ADDR, vif->remote)) {
skb              2798 net/ipv4/ipmr.c 		nla_nest_cancel(skb, vif_nest);
skb              2801 net/ipv4/ipmr.c 	nla_nest_end(skb, vif_nest);
skb              2831 net/ipv4/ipmr.c static int ipmr_rtm_dumplink(struct sk_buff *skb, struct netlink_callback *cb)
skb              2833 net/ipv4/ipmr.c 	struct net *net = sock_net(skb->sk);
skb              2856 net/ipv4/ipmr.c 		nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
skb              2866 net/ipv4/ipmr.c 		af = nla_nest_start_noflag(skb, IFLA_AF_SPEC);
skb              2868 net/ipv4/ipmr.c 			nlmsg_cancel(skb, nlh);
skb              2872 net/ipv4/ipmr.c 		if (!ipmr_fill_table(mrt, skb)) {
skb              2873 net/ipv4/ipmr.c 			nlmsg_cancel(skb, nlh);
skb              2877 net/ipv4/ipmr.c 		vifs = nla_nest_start_noflag(skb, IPMRA_TABLE_VIFS);
skb              2879 net/ipv4/ipmr.c 			nla_nest_end(skb, af);
skb              2880 net/ipv4/ipmr.c 			nlmsg_end(skb, nlh);
skb              2886 net/ipv4/ipmr.c 			if (!ipmr_fill_vif(mrt, i, skb)) {
skb              2887 net/ipv4/ipmr.c 				nla_nest_end(skb, vifs);
skb              2888 net/ipv4/ipmr.c 				nla_nest_end(skb, af);
skb              2889 net/ipv4/ipmr.c 				nlmsg_end(skb, nlh);
skb              2897 net/ipv4/ipmr.c 		nla_nest_end(skb, vifs);
skb              2898 net/ipv4/ipmr.c 		nla_nest_end(skb, af);
skb              2899 net/ipv4/ipmr.c 		nlmsg_end(skb, nlh);
skb              2908 net/ipv4/ipmr.c 	return skb->len;
skb               208 net/ipv4/ipmr_base.c int mr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
skb               224 net/ipv4/ipmr_base.c 	    nla_put_u32(skb, RTA_IIF,
skb               231 net/ipv4/ipmr_base.c 	mp_attr = nla_nest_start_noflag(skb, RTA_MULTIPATH);
skb               239 net/ipv4/ipmr_base.c 			nhp = nla_reserve_nohdr(skb, sizeof(*nhp));
skb               241 net/ipv4/ipmr_base.c 				nla_nest_cancel(skb, mp_attr);
skb               253 net/ipv4/ipmr_base.c 	nla_nest_end(skb, mp_attr);
skb               261 net/ipv4/ipmr_base.c 	if (nla_put_64bit(skb, RTA_MFC_STATS, sizeof(mfcs), &mfcs, RTA_PAD) ||
skb               262 net/ipv4/ipmr_base.c 	    nla_put_u64_64bit(skb, RTA_EXPIRES, jiffies_to_clock_t(lastuse),
skb               289 net/ipv4/ipmr_base.c int mr_table_dump(struct mr_table *mrt, struct sk_buff *skb,
skb               291 net/ipv4/ipmr_base.c 		  int (*fill)(struct mr_table *mrt, struct sk_buff *skb,
skb               311 net/ipv4/ipmr_base.c 		err = fill(mrt, skb, NETLINK_CB(cb->skb).portid,
skb               327 net/ipv4/ipmr_base.c 		err = fill(mrt, skb, NETLINK_CB(cb->skb).portid,
skb               344 net/ipv4/ipmr_base.c int mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb,
skb               348 net/ipv4/ipmr_base.c 				 struct sk_buff *skb,
skb               354 net/ipv4/ipmr_base.c 	struct net *net = sock_net(skb->sk);
skb               364 net/ipv4/ipmr_base.c 			return skb->len;
skb               372 net/ipv4/ipmr_base.c 		err = mr_table_dump(mrt, skb, cb, fill, lock, filter);
skb               383 net/ipv4/ipmr_base.c 	return skb->len;
skb                20 net/ipv4/netfilter.c int ip_route_me_harder(struct net *net, struct sk_buff *skb, unsigned int addr_type)
skb                22 net/ipv4/netfilter.c 	const struct iphdr *iph = ip_hdr(skb);
skb                26 net/ipv4/netfilter.c 	const struct sock *sk = skb_to_full_sk(skb);
skb                28 net/ipv4/netfilter.c 	struct net_device *dev = skb_dst(skb)->dev;
skb                47 net/ipv4/netfilter.c 	fl4.flowi4_mark = skb->mark;
skb                54 net/ipv4/netfilter.c 	skb_dst_drop(skb);
skb                55 net/ipv4/netfilter.c 	skb_dst_set(skb, &rt->dst);
skb                57 net/ipv4/netfilter.c 	if (skb_dst(skb)->error)
skb                58 net/ipv4/netfilter.c 		return skb_dst(skb)->error;
skb                61 net/ipv4/netfilter.c 	if (!(IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED) &&
skb                62 net/ipv4/netfilter.c 	    xfrm_decode_session(skb, flowi4_to_flowi(&fl4), AF_INET) == 0) {
skb                63 net/ipv4/netfilter.c 		struct dst_entry *dst = skb_dst(skb);
skb                64 net/ipv4/netfilter.c 		skb_dst_set(skb, NULL);
skb                68 net/ipv4/netfilter.c 		skb_dst_set(skb, dst);
skb                73 net/ipv4/netfilter.c 	hh_len = skb_dst(skb)->dev->hard_header_len;
skb                74 net/ipv4/netfilter.c 	if (skb_headroom(skb) < hh_len &&
skb                75 net/ipv4/netfilter.c 	    pskb_expand_head(skb, HH_DATA_ALIGN(hh_len - skb_headroom(skb)),
skb               156 net/ipv4/netfilter/arp_tables.c arpt_error(struct sk_buff *skb, const struct xt_action_param *par)
skb               182 net/ipv4/netfilter/arp_tables.c unsigned int arpt_do_table(struct sk_buff *skb,
skb               198 net/ipv4/netfilter/arp_tables.c 	if (!pskb_may_pull(skb, arp_hdr_len(skb->dev)))
skb               219 net/ipv4/netfilter/arp_tables.c 	arp = arp_hdr(skb);
skb               224 net/ipv4/netfilter/arp_tables.c 		if (!arp_packet_match(arp, skb->dev, indev, outdev, &e->arp)) {
skb               230 net/ipv4/netfilter/arp_tables.c 		ADD_COUNTER(*counter, arp_hdr_len(skb->dev), 1);
skb               269 net/ipv4/netfilter/arp_tables.c 		verdict = t->u.kernel.target->target(skb, &acpar);
skb               273 net/ipv4/netfilter/arp_tables.c 			arp = arp_hdr(skb);
skb                13 net/ipv4/netfilter/arpt_mangle.c target(struct sk_buff *skb, const struct xt_action_param *par)
skb                20 net/ipv4/netfilter/arpt_mangle.c 	if (skb_ensure_writable(skb, skb->len))
skb                23 net/ipv4/netfilter/arpt_mangle.c 	arp = arp_hdr(skb);
skb                24 net/ipv4/netfilter/arpt_mangle.c 	arpptr = skb_network_header(skb) + sizeof(*arp);
skb                30 net/ipv4/netfilter/arpt_mangle.c 		   (arpptr + hln > skb_tail_pointer(skb)))
skb                37 net/ipv4/netfilter/arpt_mangle.c 		   (arpptr + pln > skb_tail_pointer(skb)))
skb                44 net/ipv4/netfilter/arpt_mangle.c 		   (arpptr + hln > skb_tail_pointer(skb)))
skb                51 net/ipv4/netfilter/arpt_mangle.c 		   (arpptr + pln > skb_tail_pointer(skb)))
skb                34 net/ipv4/netfilter/arptable_filter.c arptable_filter_hook(void *priv, struct sk_buff *skb,
skb                37 net/ipv4/netfilter/arptable_filter.c 	return arpt_do_table(skb, state, state->net->ipv4.arptable_filter);
skb                94 net/ipv4/netfilter/ip_tables.c ipt_error(struct sk_buff *skb, const struct xt_action_param *par)
skb               188 net/ipv4/netfilter/ip_tables.c 			 const struct sk_buff *skb,
skb               211 net/ipv4/netfilter/ip_tables.c 	nf_log_trace(net, AF_INET, hook, skb, in, out, &trace_loginfo,
skb               225 net/ipv4/netfilter/ip_tables.c ipt_do_table(struct sk_buff *skb,
skb               244 net/ipv4/netfilter/ip_tables.c 	ip = ip_hdr(skb);
skb               254 net/ipv4/netfilter/ip_tables.c 	acpar.thoff   = ip_hdrlen(skb);
skb               294 net/ipv4/netfilter/ip_tables.c 			if (!acpar.match->match(skb, &acpar))
skb               299 net/ipv4/netfilter/ip_tables.c 		ADD_COUNTER(*counter, skb->len, 1);
skb               306 net/ipv4/netfilter/ip_tables.c 		if (unlikely(skb->nf_trace))
skb               307 net/ipv4/netfilter/ip_tables.c 			trace_packet(state->net, skb, hook, state->in,
skb               346 net/ipv4/netfilter/ip_tables.c 		verdict = t->u.kernel.target->target(skb, &acpar);
skb               349 net/ipv4/netfilter/ip_tables.c 			ip = ip_hdr(skb);
skb              1820 net/ipv4/netfilter/ip_tables.c icmp_match(const struct sk_buff *skb, struct xt_action_param *par)
skb              1830 net/ipv4/netfilter/ip_tables.c 	ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
skb               337 net/ipv4/netfilter/ipt_CLUSTERIP.c clusterip_hashfn(const struct sk_buff *skb,
skb               340 net/ipv4/netfilter/ipt_CLUSTERIP.c 	const struct iphdr *iph = ip_hdr(skb);
skb               350 net/ipv4/netfilter/ipt_CLUSTERIP.c 		ports = skb_header_pointer(skb, iph->ihl * 4 + poff, 4, _ports);
skb               397 net/ipv4/netfilter/ipt_CLUSTERIP.c clusterip_tg(struct sk_buff *skb, const struct xt_action_param *par)
skb               408 net/ipv4/netfilter/ipt_CLUSTERIP.c 	ct = nf_ct_get(skb, &ctinfo);
skb               414 net/ipv4/netfilter/ipt_CLUSTERIP.c 	if (ip_hdr(skb)->protocol == IPPROTO_ICMP &&
skb               423 net/ipv4/netfilter/ipt_CLUSTERIP.c 	hash = clusterip_hashfn(skb, cipinfo->config);
skb               453 net/ipv4/netfilter/ipt_CLUSTERIP.c 	skb->pkt_type = PACKET_HOST;
skb               606 net/ipv4/netfilter/ipt_CLUSTERIP.c 	   struct sk_buff *skb,
skb               609 net/ipv4/netfilter/ipt_CLUSTERIP.c 	struct arphdr *arp = arp_hdr(skb);
skb                26 net/ipv4/netfilter/ipt_ECN.c set_ect_ip(struct sk_buff *skb, const struct ipt_ECN_info *einfo)
skb                28 net/ipv4/netfilter/ipt_ECN.c 	struct iphdr *iph = ip_hdr(skb);
skb                32 net/ipv4/netfilter/ipt_ECN.c 		if (skb_ensure_writable(skb, sizeof(struct iphdr)))
skb                34 net/ipv4/netfilter/ipt_ECN.c 		iph = ip_hdr(skb);
skb                45 net/ipv4/netfilter/ipt_ECN.c set_ect_tcp(struct sk_buff *skb, const struct ipt_ECN_info *einfo)
skb                51 net/ipv4/netfilter/ipt_ECN.c 	tcph = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_tcph), &_tcph);
skb                61 net/ipv4/netfilter/ipt_ECN.c 	if (skb_ensure_writable(skb, ip_hdrlen(skb) + sizeof(*tcph)))
skb                63 net/ipv4/netfilter/ipt_ECN.c 	tcph = (void *)ip_hdr(skb) + ip_hdrlen(skb);
skb                71 net/ipv4/netfilter/ipt_ECN.c 	inet_proto_csum_replace2(&tcph->check, skb,
skb                77 net/ipv4/netfilter/ipt_ECN.c ecn_tg(struct sk_buff *skb, const struct xt_action_param *par)
skb                82 net/ipv4/netfilter/ipt_ECN.c 		if (!set_ect_ip(skb, einfo))
skb                86 net/ipv4/netfilter/ipt_ECN.c 	    ip_hdr(skb)->protocol == IPPROTO_TCP)
skb                87 net/ipv4/netfilter/ipt_ECN.c 		if (!set_ect_tcp(skb, einfo))
skb                31 net/ipv4/netfilter/ipt_REJECT.c reject_tg(struct sk_buff *skb, const struct xt_action_param *par)
skb                38 net/ipv4/netfilter/ipt_REJECT.c 		nf_send_unreach(skb, ICMP_NET_UNREACH, hook);
skb                41 net/ipv4/netfilter/ipt_REJECT.c 		nf_send_unreach(skb, ICMP_HOST_UNREACH, hook);
skb                44 net/ipv4/netfilter/ipt_REJECT.c 		nf_send_unreach(skb, ICMP_PROT_UNREACH, hook);
skb                47 net/ipv4/netfilter/ipt_REJECT.c 		nf_send_unreach(skb, ICMP_PORT_UNREACH, hook);
skb                50 net/ipv4/netfilter/ipt_REJECT.c 		nf_send_unreach(skb, ICMP_NET_ANO, hook);
skb                53 net/ipv4/netfilter/ipt_REJECT.c 		nf_send_unreach(skb, ICMP_HOST_ANO, hook);
skb                56 net/ipv4/netfilter/ipt_REJECT.c 		nf_send_unreach(skb, ICMP_PKT_FILTERED, hook);
skb                59 net/ipv4/netfilter/ipt_REJECT.c 		nf_send_reset(xt_net(par), skb, hook);
skb                13 net/ipv4/netfilter/ipt_SYNPROXY.c synproxy_tg4(struct sk_buff *skb, const struct xt_action_param *par)
skb                21 net/ipv4/netfilter/ipt_SYNPROXY.c 	if (nf_ip_checksum(skb, xt_hooknum(par), par->thoff, IPPROTO_TCP))
skb                24 net/ipv4/netfilter/ipt_SYNPROXY.c 	th = skb_header_pointer(skb, par->thoff, sizeof(_th), &_th);
skb                28 net/ipv4/netfilter/ipt_SYNPROXY.c 	if (!synproxy_parse_options(skb, par->thoff, th, &opts))
skb                48 net/ipv4/netfilter/ipt_SYNPROXY.c 		synproxy_send_client_synack(net, skb, th, &opts);
skb                49 net/ipv4/netfilter/ipt_SYNPROXY.c 		consume_skb(skb);
skb                53 net/ipv4/netfilter/ipt_SYNPROXY.c 		if (synproxy_recv_client_ack(net, skb, th, &opts, ntohl(th->seq))) {
skb                54 net/ipv4/netfilter/ipt_SYNPROXY.c 			consume_skb(skb);
skb                30 net/ipv4/netfilter/ipt_ah.c static bool ah_mt(const struct sk_buff *skb, struct xt_action_param *par)
skb                40 net/ipv4/netfilter/ipt_ah.c 	ah = skb_header_pointer(skb, par->thoff, sizeof(_ahdr), &_ahdr);
skb                49 net/ipv4/netfilter/ipt_rpfilter.c rpfilter_is_loopback(const struct sk_buff *skb, const struct net_device *in)
skb                51 net/ipv4/netfilter/ipt_rpfilter.c 	return skb->pkt_type == PACKET_LOOPBACK || in->flags & IFF_LOOPBACK;
skb                54 net/ipv4/netfilter/ipt_rpfilter.c static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
skb                64 net/ipv4/netfilter/ipt_rpfilter.c 	if (rpfilter_is_loopback(skb, xt_in(par)))
skb                67 net/ipv4/netfilter/ipt_rpfilter.c 	iph = ip_hdr(skb);
skb                78 net/ipv4/netfilter/ipt_rpfilter.c 	flow.flowi4_mark = info->flags & XT_RPFILTER_VALID_MARK ? skb->mark : 0;
skb                34 net/ipv4/netfilter/iptable_filter.c iptable_filter_hook(void *priv, struct sk_buff *skb,
skb                37 net/ipv4/netfilter/iptable_filter.c 	return ipt_do_table(skb, state, state->net->ipv4.iptable_filter);
skb                40 net/ipv4/netfilter/iptable_mangle.c ipt_mangle_out(struct sk_buff *skb, const struct nf_hook_state *state)
skb                50 net/ipv4/netfilter/iptable_mangle.c 	mark = skb->mark;
skb                51 net/ipv4/netfilter/iptable_mangle.c 	iph = ip_hdr(skb);
skb                56 net/ipv4/netfilter/iptable_mangle.c 	ret = ipt_do_table(skb, state, state->net->ipv4.iptable_mangle);
skb                59 net/ipv4/netfilter/iptable_mangle.c 		iph = ip_hdr(skb);
skb                63 net/ipv4/netfilter/iptable_mangle.c 		    skb->mark != mark ||
skb                65 net/ipv4/netfilter/iptable_mangle.c 			err = ip_route_me_harder(state->net, skb, RTN_UNSPEC);
skb                77 net/ipv4/netfilter/iptable_mangle.c 		     struct sk_buff *skb,
skb                81 net/ipv4/netfilter/iptable_mangle.c 		return ipt_mangle_out(skb, state);
skb                82 net/ipv4/netfilter/iptable_mangle.c 	return ipt_do_table(skb, state, state->net->ipv4.iptable_mangle);
skb                30 net/ipv4/netfilter/iptable_nat.c 					 struct sk_buff *skb,
skb                33 net/ipv4/netfilter/iptable_nat.c 	return ipt_do_table(skb, state, state->net->ipv4.nat_table);
skb                41 net/ipv4/netfilter/iptable_raw.c iptable_raw_hook(void *priv, struct sk_buff *skb,
skb                44 net/ipv4/netfilter/iptable_raw.c 	return ipt_do_table(skb, state, state->net->ipv4.iptable_raw);
skb                40 net/ipv4/netfilter/iptable_security.c iptable_security_hook(void *priv, struct sk_buff *skb,
skb                43 net/ipv4/netfilter/iptable_security.c 	return ipt_do_table(skb, state, state->net->ipv4.iptable_security);
skb                25 net/ipv4/netfilter/nf_defrag_ipv4.c static int nf_ct_ipv4_gather_frags(struct net *net, struct sk_buff *skb,
skb                31 net/ipv4/netfilter/nf_defrag_ipv4.c 	err = ip_defrag(net, skb, user);
skb                35 net/ipv4/netfilter/nf_defrag_ipv4.c 		skb->ignore_df = 1;
skb                41 net/ipv4/netfilter/nf_defrag_ipv4.c 					      struct sk_buff *skb)
skb                45 net/ipv4/netfilter/nf_defrag_ipv4.c 	if (skb_nfct(skb)) {
skb                47 net/ipv4/netfilter/nf_defrag_ipv4.c 		const struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
skb                52 net/ipv4/netfilter/nf_defrag_ipv4.c 	if (nf_bridge_in_prerouting(skb))
skb                62 net/ipv4/netfilter/nf_defrag_ipv4.c 					  struct sk_buff *skb,
skb                65 net/ipv4/netfilter/nf_defrag_ipv4.c 	struct sock *sk = skb->sk;
skb                75 net/ipv4/netfilter/nf_defrag_ipv4.c 	if (skb_nfct(skb) && !nf_ct_is_template((struct nf_conn *)skb_nfct(skb)))
skb                78 net/ipv4/netfilter/nf_defrag_ipv4.c 	if (skb->_nfct == IP_CT_UNTRACKED)
skb                82 net/ipv4/netfilter/nf_defrag_ipv4.c 	if (ip_is_fragment(ip_hdr(skb))) {
skb                84 net/ipv4/netfilter/nf_defrag_ipv4.c 			nf_ct_defrag_user(state->hook, skb);
skb                86 net/ipv4/netfilter/nf_defrag_ipv4.c 		if (nf_ct_ipv4_gather_frags(state->net, skb, user))
skb                23 net/ipv4/netfilter/nf_dup_ipv4.c static bool nf_dup_ipv4_route(struct net *net, struct sk_buff *skb,
skb                26 net/ipv4/netfilter/nf_dup_ipv4.c 	const struct iphdr *iph = ip_hdr(skb);
skb                42 net/ipv4/netfilter/nf_dup_ipv4.c 	skb_dst_drop(skb);
skb                43 net/ipv4/netfilter/nf_dup_ipv4.c 	skb_dst_set(skb, &rt->dst);
skb                44 net/ipv4/netfilter/nf_dup_ipv4.c 	skb->dev      = rt->dst.dev;
skb                45 net/ipv4/netfilter/nf_dup_ipv4.c 	skb->protocol = htons(ETH_P_IP);
skb                50 net/ipv4/netfilter/nf_dup_ipv4.c void nf_dup_ipv4(struct net *net, struct sk_buff *skb, unsigned int hooknum,
skb                62 net/ipv4/netfilter/nf_dup_ipv4.c 	skb = pskb_copy(skb, GFP_ATOMIC);
skb                63 net/ipv4/netfilter/nf_dup_ipv4.c 	if (skb == NULL)
skb                68 net/ipv4/netfilter/nf_dup_ipv4.c 	nf_reset_ct(skb);
skb                69 net/ipv4/netfilter/nf_dup_ipv4.c 	nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
skb                80 net/ipv4/netfilter/nf_dup_ipv4.c 	iph = ip_hdr(skb);
skb                86 net/ipv4/netfilter/nf_dup_ipv4.c 	if (nf_dup_ipv4_route(net, skb, gw, oif)) {
skb                88 net/ipv4/netfilter/nf_dup_ipv4.c 		ip_local_out(net, skb->sk, skb);
skb                91 net/ipv4/netfilter/nf_dup_ipv4.c 		kfree_skb(skb);
skb                44 net/ipv4/netfilter/nf_log_arp.c 			    const struct sk_buff *skb, unsigned int nhoff)
skb                51 net/ipv4/netfilter/nf_log_arp.c 	ah = skb_header_pointer(skb, 0, sizeof(_arph), &_arph);
skb                67 net/ipv4/netfilter/nf_log_arp.c 	ap = skb_header_pointer(skb, sizeof(_arph), sizeof(_arpp), &_arpp);
skb                70 net/ipv4/netfilter/nf_log_arp.c 			       skb->len - sizeof(_arph));
skb                78 net/ipv4/netfilter/nf_log_arp.c 			      unsigned int hooknum, const struct sk_buff *skb,
skb                95 net/ipv4/netfilter/nf_log_arp.c 	nf_log_dump_packet_common(m, pf, hooknum, skb, in, out, loginfo,
skb                97 net/ipv4/netfilter/nf_log_arp.c 	dump_arp_packet(m, loginfo, skb, 0);
skb                37 net/ipv4/netfilter/nf_log_ipv4.c 			     const struct sk_buff *skb, unsigned int iphoff)
skb                48 net/ipv4/netfilter/nf_log_ipv4.c 	ih = skb_header_pointer(skb, iphoff, sizeof(_iph), &_iph);
skb                83 net/ipv4/netfilter/nf_log_ipv4.c 		op = skb_header_pointer(skb, iphoff+sizeof(_iph),
skb                99 net/ipv4/netfilter/nf_log_ipv4.c 		if (nf_log_dump_tcp_header(m, skb, ih->protocol,
skb               106 net/ipv4/netfilter/nf_log_ipv4.c 		if (nf_log_dump_udp_header(m, skb, ih->protocol,
skb               139 net/ipv4/netfilter/nf_log_ipv4.c 		ich = skb_header_pointer(skb, iphoff + ih->ihl * 4,
skb               143 net/ipv4/netfilter/nf_log_ipv4.c 				       skb->len - iphoff - ih->ihl*4);
skb               153 net/ipv4/netfilter/nf_log_ipv4.c 		    skb->len-iphoff-ih->ihl*4 < required_len[ich->type]) {
skb               155 net/ipv4/netfilter/nf_log_ipv4.c 				       skb->len - iphoff - ih->ihl*4);
skb               183 net/ipv4/netfilter/nf_log_ipv4.c 				dump_ipv4_packet(net, m, info, skb,
skb               209 net/ipv4/netfilter/nf_log_ipv4.c 		ah = skb_header_pointer(skb, iphoff+ih->ihl*4,
skb               213 net/ipv4/netfilter/nf_log_ipv4.c 				       skb->len - iphoff - ih->ihl*4);
skb               232 net/ipv4/netfilter/nf_log_ipv4.c 		eh = skb_header_pointer(skb, iphoff+ih->ihl*4,
skb               236 net/ipv4/netfilter/nf_log_ipv4.c 				       skb->len - iphoff - ih->ihl*4);
skb               251 net/ipv4/netfilter/nf_log_ipv4.c 		nf_log_dump_sk_uid_gid(net, m, skb->sk);
skb               254 net/ipv4/netfilter/nf_log_ipv4.c 	if (!iphoff && skb->mark)
skb               255 net/ipv4/netfilter/nf_log_ipv4.c 		nf_log_buf_add(m, "MARK=0x%x ", skb->mark);
skb               274 net/ipv4/netfilter/nf_log_ipv4.c 			    const struct sk_buff *skb)
skb               276 net/ipv4/netfilter/nf_log_ipv4.c 	struct net_device *dev = skb->dev;
skb               288 net/ipv4/netfilter/nf_log_ipv4.c 			       eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
skb               289 net/ipv4/netfilter/nf_log_ipv4.c 			       ntohs(eth_hdr(skb)->h_proto));
skb               298 net/ipv4/netfilter/nf_log_ipv4.c 	    skb->mac_header != skb->network_header) {
skb               299 net/ipv4/netfilter/nf_log_ipv4.c 		const unsigned char *p = skb_mac_header(skb);
skb               310 net/ipv4/netfilter/nf_log_ipv4.c 			     unsigned int hooknum, const struct sk_buff *skb,
skb               327 net/ipv4/netfilter/nf_log_ipv4.c 	nf_log_dump_packet_common(m, pf, hooknum, skb, in,
skb               331 net/ipv4/netfilter/nf_log_ipv4.c 		dump_ipv4_mac_header(m, loginfo, skb);
skb               333 net/ipv4/netfilter/nf_log_ipv4.c 	dump_ipv4_packet(net, m, loginfo, skb, 0);
skb                23 net/ipv4/netfilter/nf_nat_h323.c static int set_addr(struct sk_buff *skb, unsigned int protoff,
skb                28 net/ipv4/netfilter/nf_nat_h323.c 	struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
skb                40 net/ipv4/netfilter/nf_nat_h323.c 	if (ip_hdr(skb)->protocol == IPPROTO_TCP) {
skb                41 net/ipv4/netfilter/nf_nat_h323.c 		if (!nf_nat_mangle_tcp_packet(skb, ct, ctinfo,
skb                49 net/ipv4/netfilter/nf_nat_h323.c 		th = skb_header_pointer(skb, ip_hdrlen(skb),
skb                53 net/ipv4/netfilter/nf_nat_h323.c 		*data = skb->data + ip_hdrlen(skb) + th->doff * 4 + dataoff;
skb                55 net/ipv4/netfilter/nf_nat_h323.c 		if (!nf_nat_mangle_udp_packet(skb, ct, ctinfo,
skb                64 net/ipv4/netfilter/nf_nat_h323.c 		*data = skb->data + ip_hdrlen(skb) + sizeof(struct udphdr);
skb                71 net/ipv4/netfilter/nf_nat_h323.c static int set_h225_addr(struct sk_buff *skb, unsigned int protoff,
skb                76 net/ipv4/netfilter/nf_nat_h323.c 	return set_addr(skb, protoff, data, dataoff, taddr->ipAddress.ip,
skb                81 net/ipv4/netfilter/nf_nat_h323.c static int set_h245_addr(struct sk_buff *skb, unsigned protoff,
skb                86 net/ipv4/netfilter/nf_nat_h323.c 	return set_addr(skb, protoff, data, dataoff,
skb                92 net/ipv4/netfilter/nf_nat_h323.c static int set_sig_addr(struct sk_buff *skb, struct nf_conn *ct,
skb               120 net/ipv4/netfilter/nf_nat_h323.c 				return set_h225_addr(skb, protoff, data, 0,
skb               132 net/ipv4/netfilter/nf_nat_h323.c 				return set_h225_addr(skb, protoff, data, 0,
skb               145 net/ipv4/netfilter/nf_nat_h323.c static int set_ras_addr(struct sk_buff *skb, struct nf_conn *ct,
skb               163 net/ipv4/netfilter/nf_nat_h323.c 			return set_h225_addr(skb, protoff, data, 0, &taddr[i],
skb               174 net/ipv4/netfilter/nf_nat_h323.c static int nat_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct,
skb               251 net/ipv4/netfilter/nf_nat_h323.c 	if (set_h245_addr(skb, protoff, data, dataoff, taddr,
skb               280 net/ipv4/netfilter/nf_nat_h323.c static int nat_t120(struct sk_buff *skb, struct nf_conn *ct,
skb               314 net/ipv4/netfilter/nf_nat_h323.c 	if (set_h245_addr(skb, protoff, data, dataoff, taddr,
skb               331 net/ipv4/netfilter/nf_nat_h323.c static int nat_h245(struct sk_buff *skb, struct nf_conn *ct,
skb               370 net/ipv4/netfilter/nf_nat_h323.c 	if (set_h225_addr(skb, protoff, data, dataoff, taddr,
skb               422 net/ipv4/netfilter/nf_nat_h323.c static int nat_q931(struct sk_buff *skb, struct nf_conn *ct,
skb               462 net/ipv4/netfilter/nf_nat_h323.c 	if (set_h225_addr(skb, protoff, data, 0, &taddr[idx],
skb               477 net/ipv4/netfilter/nf_nat_h323.c 		if (set_h225_addr(skb, protoff, data, 0, &taddr[0],
skb               518 net/ipv4/netfilter/nf_nat_h323.c static int nat_callforwarding(struct sk_buff *skb, struct nf_conn *ct,
skb               555 net/ipv4/netfilter/nf_nat_h323.c 	if (set_h225_addr(skb, protoff, data, dataoff, taddr,
skb               120 net/ipv4/netfilter/nf_nat_pptp.c pptp_outbound_pkt(struct sk_buff *skb,
skb               189 net/ipv4/netfilter/nf_nat_pptp.c 	if (!nf_nat_mangle_tcp_packet(skb, ct, ctinfo, protoff,
skb               231 net/ipv4/netfilter/nf_nat_pptp.c pptp_inbound_pkt(struct sk_buff *skb,
skb               289 net/ipv4/netfilter/nf_nat_pptp.c 	if (!nf_nat_mangle_tcp_packet(skb, ct, ctinfo, protoff,
skb               126 net/ipv4/netfilter/nf_nat_snmp_basic_main.c static int snmp_translate(struct nf_conn *ct, int dir, struct sk_buff *skb)
skb               128 net/ipv4/netfilter/nf_nat_snmp_basic_main.c 	struct iphdr *iph = ip_hdr(skb);
skb               150 net/ipv4/netfilter/nf_nat_snmp_basic_main.c 		nf_ct_helper_log(skb, ct, "parser failed\n");
skb               160 net/ipv4/netfilter/nf_nat_snmp_basic_main.c static int help(struct sk_buff *skb, unsigned int protoff,
skb               166 net/ipv4/netfilter/nf_nat_snmp_basic_main.c 	const struct iphdr *iph = ip_hdr(skb);
skb               184 net/ipv4/netfilter/nf_nat_snmp_basic_main.c 	if (ntohs(udph->len) != skb->len - (iph->ihl << 2)) {
skb               185 net/ipv4/netfilter/nf_nat_snmp_basic_main.c 		nf_ct_helper_log(skb, ct, "dropping malformed packet\n");
skb               189 net/ipv4/netfilter/nf_nat_snmp_basic_main.c 	if (skb_ensure_writable(skb, skb->len)) {
skb               190 net/ipv4/netfilter/nf_nat_snmp_basic_main.c 		nf_ct_helper_log(skb, ct, "cannot mangle packet");
skb               195 net/ipv4/netfilter/nf_nat_snmp_basic_main.c 	ret = snmp_translate(ct, dir, skb);
skb                20 net/ipv4/netfilter/nf_socket_ipv4.c extract_icmp4_fields(const struct sk_buff *skb, u8 *protocol,
skb                24 net/ipv4/netfilter/nf_socket_ipv4.c 	unsigned int outside_hdrlen = ip_hdrlen(skb);
skb                29 net/ipv4/netfilter/nf_socket_ipv4.c 	icmph = skb_header_pointer(skb, outside_hdrlen,
skb                45 net/ipv4/netfilter/nf_socket_ipv4.c 	inside_iph = skb_header_pointer(skb, outside_hdrlen +
skb                55 net/ipv4/netfilter/nf_socket_ipv4.c 	ports = skb_header_pointer(skb, outside_hdrlen +
skb                74 net/ipv4/netfilter/nf_socket_ipv4.c nf_socket_get_sock_v4(struct net *net, struct sk_buff *skb, const int doff,
skb                82 net/ipv4/netfilter/nf_socket_ipv4.c 		return inet_lookup(net, &tcp_hashinfo, skb, doff,
skb                92 net/ipv4/netfilter/nf_socket_ipv4.c struct sock *nf_sk_lookup_slow_v4(struct net *net, const struct sk_buff *skb,
skb                97 net/ipv4/netfilter/nf_socket_ipv4.c 	const struct iphdr *iph = ip_hdr(skb);
skb               110 net/ipv4/netfilter/nf_socket_ipv4.c 		hp = skb_header_pointer(skb, ip_hdrlen(skb),
skb               121 net/ipv4/netfilter/nf_socket_ipv4.c 		data_skb = (struct sk_buff *)skb;
skb               123 net/ipv4/netfilter/nf_socket_ipv4.c 			ip_hdrlen(skb) + __tcp_hdrlen((struct tcphdr *)hp) :
skb               124 net/ipv4/netfilter/nf_socket_ipv4.c 			ip_hdrlen(skb) + sizeof(*hp);
skb               127 net/ipv4/netfilter/nf_socket_ipv4.c 		if (extract_icmp4_fields(skb, &protocol, &saddr, &daddr,
skb               139 net/ipv4/netfilter/nf_socket_ipv4.c 	ct = nf_ct_get(skb, &ctinfo);
skb                19 net/ipv4/netfilter/nf_tproxy_ipv4.c nf_tproxy_handle_time_wait4(struct net *net, struct sk_buff *skb,
skb                22 net/ipv4/netfilter/nf_tproxy_ipv4.c 	const struct iphdr *iph = ip_hdr(skb);
skb                25 net/ipv4/netfilter/nf_tproxy_ipv4.c 	hp = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_hdr), &_hdr);
skb                36 net/ipv4/netfilter/nf_tproxy_ipv4.c 		sk2 = nf_tproxy_get_sock_v4(net, skb, iph->protocol,
skb                39 net/ipv4/netfilter/nf_tproxy_ipv4.c 					    skb->dev, NF_TPROXY_LOOKUP_LISTENER);
skb                50 net/ipv4/netfilter/nf_tproxy_ipv4.c __be32 nf_tproxy_laddr4(struct sk_buff *skb, __be32 user_laddr, __be32 daddr)
skb                60 net/ipv4/netfilter/nf_tproxy_ipv4.c 	indev = __in_dev_get_rcu(skb->dev);
skb                75 net/ipv4/netfilter/nf_tproxy_ipv4.c nf_tproxy_get_sock_v4(struct net *net, struct sk_buff *skb,
skb                88 net/ipv4/netfilter/nf_tproxy_ipv4.c 		hp = skb_header_pointer(skb, ip_hdrlen(skb),
skb                95 net/ipv4/netfilter/nf_tproxy_ipv4.c 			sk = inet_lookup_listener(net, &tcp_hashinfo, skb,
skb                96 net/ipv4/netfilter/nf_tproxy_ipv4.c 						    ip_hdrlen(skb) +
skb                30 net/ipv4/netfilter/nft_dup_ipv4.c 	nf_dup_ipv4(nft_net(pkt), pkt->skb, nft_hook(pkt), &gw, oif);
skb                55 net/ipv4/netfilter/nft_dup_ipv4.c static int nft_dup_ipv4_dump(struct sk_buff *skb, const struct nft_expr *expr)
skb                59 net/ipv4/netfilter/nft_dup_ipv4.c 	if (nft_dump_register(skb, NFTA_DUP_SREG_ADDR, priv->sreg_addr))
skb                62 net/ipv4/netfilter/nft_dup_ipv4.c 	    nft_dump_register(skb, NFTA_DUP_SREG_DEV, priv->sreg_dev))
skb                31 net/ipv4/netfilter/nft_fib_ipv4.c 	int noff = skb_network_offset(pkt->skb);
skb                42 net/ipv4/netfilter/nft_fib_ipv4.c 	iph = skb_header_pointer(pkt->skb, noff, sizeof(_iph), &_iph);
skb                61 net/ipv4/netfilter/nft_fib_ipv4.c 	int noff = skb_network_offset(pkt->skb);
skb                87 net/ipv4/netfilter/nft_fib_ipv4.c 	    nft_fib_is_loopback(pkt->skb, nft_in(pkt))) {
skb                92 net/ipv4/netfilter/nft_fib_ipv4.c 	iph = skb_header_pointer(pkt->skb, noff, sizeof(_iph), &_iph);
skb               101 net/ipv4/netfilter/nft_fib_ipv4.c 			nft_fib_store_result(dest, priv, pkt->skb->dev);
skb               107 net/ipv4/netfilter/nft_fib_ipv4.c 		fl4.flowi4_mark = pkt->skb->mark;
skb                27 net/ipv4/netfilter/nft_reject_ipv4.c 		nf_send_unreach(pkt->skb, priv->icmp_code, nft_hook(pkt));
skb                30 net/ipv4/netfilter/nft_reject_ipv4.c 		nf_send_reset(nft_net(pkt), pkt->skb, nft_hook(pkt));
skb               184 net/ipv4/nexthop.c static int nla_put_nh_group(struct sk_buff *skb, struct nh_group *nhg)
skb               195 net/ipv4/nexthop.c 	if (nla_put_u16(skb, NHA_GROUP_TYPE, group_type))
skb               198 net/ipv4/nexthop.c 	nla = nla_reserve(skb, NHA_GROUP, len);
skb               215 net/ipv4/nexthop.c static int nh_fill_node(struct sk_buff *skb, struct nexthop *nh,
skb               224 net/ipv4/nexthop.c 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nhm), nlflags);
skb               235 net/ipv4/nexthop.c 	if (nla_put_u32(skb, NHA_ID, nh->id))
skb               241 net/ipv4/nexthop.c 		if (nla_put_nh_group(skb, nhg))
skb               249 net/ipv4/nexthop.c 		if (nla_put_flag(skb, NHA_BLACKHOLE))
skb               256 net/ipv4/nexthop.c 		if (dev && nla_put_u32(skb, NHA_OIF, dev->ifindex))
skb               265 net/ipv4/nexthop.c 		    nla_put_u32(skb, NHA_GATEWAY, fib_nh->fib_nh_gw4))
skb               272 net/ipv4/nexthop.c 		    nla_put_in6_addr(skb, NHA_GATEWAY, &fib6_nh->fib_nh_gw6))
skb               278 net/ipv4/nexthop.c 	    lwtunnel_fill_encap(skb, nhi->fib_nhc.nhc_lwtstate,
skb               283 net/ipv4/nexthop.c 	nlmsg_end(skb, nlh);
skb               287 net/ipv4/nexthop.c 	nlmsg_cancel(skb, nlh);
skb               349 net/ipv4/nexthop.c 	struct sk_buff *skb;
skb               352 net/ipv4/nexthop.c 	skb = nlmsg_new(nh_nlmsg_size(nh), gfp_any());
skb               353 net/ipv4/nexthop.c 	if (!skb)
skb               356 net/ipv4/nexthop.c 	err = nh_fill_node(skb, nh, event, info->portid, seq, nlflags);
skb               360 net/ipv4/nexthop.c 		kfree_skb(skb);
skb               364 net/ipv4/nexthop.c 	rtnl_notify(skb, info->nl_net, info->portid, RTNLGRP_NEXTHOP,
skb              1326 net/ipv4/nexthop.c static int rtm_to_nh_config(struct net *net, struct sk_buff *skb,
skb              1369 net/ipv4/nexthop.c 	cfg->nlinfo.portid = NETLINK_CB(skb).portid;
skb              1493 net/ipv4/nexthop.c static int rtm_new_nexthop(struct sk_buff *skb, struct nlmsghdr *nlh,
skb              1496 net/ipv4/nexthop.c 	struct net *net = sock_net(skb->sk);
skb              1501 net/ipv4/nexthop.c 	err = rtm_to_nh_config(net, skb, nlh, &cfg, extack);
skb              1557 net/ipv4/nexthop.c static int rtm_del_nexthop(struct sk_buff *skb, struct nlmsghdr *nlh,
skb              1560 net/ipv4/nexthop.c 	struct net *net = sock_net(skb->sk);
skb              1564 net/ipv4/nexthop.c 		.portid = NETLINK_CB(skb).portid,
skb              1588 net/ipv4/nexthop.c 	struct sk_buff *skb = NULL;
skb              1598 net/ipv4/nexthop.c 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
skb              1599 net/ipv4/nexthop.c 	if (!skb)
skb              1607 net/ipv4/nexthop.c 	err = nh_fill_node(skb, nh, RTM_NEWNEXTHOP, NETLINK_CB(in_skb).portid,
skb              1614 net/ipv4/nexthop.c 	err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
skb              1618 net/ipv4/nexthop.c 	kfree_skb(skb);
skb              1714 net/ipv4/nexthop.c static int rtm_dump_nexthop(struct sk_buff *skb, struct netlink_callback *cb)
skb              1718 net/ipv4/nexthop.c 	struct net *net = sock_net(skb->sk);
skb              1742 net/ipv4/nexthop.c 		err = nh_fill_node(skb, nh, RTM_NEWNEXTHOP,
skb              1743 net/ipv4/nexthop.c 				   NETLINK_CB(cb->skb).portid,
skb              1746 net/ipv4/nexthop.c 			if (likely(skb->len))
skb              1756 net/ipv4/nexthop.c 	err = skb->len;
skb              1760 net/ipv4/nexthop.c 	nl_dump_check_consistent(cb, nlmsg_hdr(skb));
skb               169 net/ipv4/ping.c static struct sock *ping_lookup(struct net *net, struct sk_buff *skb, u16 ident)
skb               175 net/ipv4/ping.c 	int dif = skb->dev->ifindex;
skb               177 net/ipv4/ping.c 	if (skb->protocol == htons(ETH_P_IP)) {
skb               179 net/ipv4/ping.c 			 (int)ident, &ip_hdr(skb)->daddr, dif);
skb               181 net/ipv4/ping.c 	} else if (skb->protocol == htons(ETH_P_IPV6)) {
skb               183 net/ipv4/ping.c 			 (int)ident, &ipv6_hdr(skb)->daddr, dif);
skb               196 net/ipv4/ping.c 		if (skb->protocol == htons(ETH_P_IP) &&
skb               203 net/ipv4/ping.c 			    isk->inet_rcv_saddr != ip_hdr(skb)->daddr)
skb               206 net/ipv4/ping.c 		} else if (skb->protocol == htons(ETH_P_IPV6) &&
skb               216 net/ipv4/ping.c 					     &ipv6_hdr(skb)->daddr))
skb               476 net/ipv4/ping.c void ping_err(struct sk_buff *skb, int offset, u32 info)
skb               483 net/ipv4/ping.c 	struct net *net = dev_net(skb->dev);
skb               488 net/ipv4/ping.c 	if (skb->protocol == htons(ETH_P_IP)) {
skb               490 net/ipv4/ping.c 		type = icmp_hdr(skb)->type;
skb               491 net/ipv4/ping.c 		code = icmp_hdr(skb)->code;
skb               492 net/ipv4/ping.c 		icmph = (struct icmphdr *)(skb->data + offset);
skb               493 net/ipv4/ping.c 	} else if (skb->protocol == htons(ETH_P_IPV6)) {
skb               495 net/ipv4/ping.c 		type = icmp6_hdr(skb)->icmp6_type;
skb               496 net/ipv4/ping.c 		code = icmp6_hdr(skb)->icmp6_code;
skb               497 net/ipv4/ping.c 		icmph = (struct icmphdr *) (skb->data + offset);
skb               508 net/ipv4/ping.c 		 skb->protocol, type, code, ntohs(icmph->un.echo.id),
skb               511 net/ipv4/ping.c 	sk = ping_lookup(net, skb, ntohs(icmph->un.echo.id));
skb               522 net/ipv4/ping.c 	if (skb->protocol == htons(ETH_P_IP)) {
skb               540 net/ipv4/ping.c 				ipv4_sk_update_pmtu(skb, sk, info);
skb               556 net/ipv4/ping.c 			ipv4_sk_redirect(skb, sk);
skb               561 net/ipv4/ping.c 	} else if (skb->protocol == htons(ETH_P_IPV6)) {
skb               576 net/ipv4/ping.c 			ip_icmp_error(sk, skb, err, 0 /* no remote port */,
skb               580 net/ipv4/ping.c 			pingv6_ops.ipv6_icmp_error(sk, skb, err, 0,
skb               598 net/ipv4/ping.c 		 int offset, int fraglen, int odd, struct sk_buff *skb)
skb               624 net/ipv4/ping.c 		skb->csum = pfh->wcheck;
skb               625 net/ipv4/ping.c 		skb->ip_summed = CHECKSUM_NONE;
skb               637 net/ipv4/ping.c 	struct sk_buff *skb = skb_peek(&sk->sk_write_queue);
skb               639 net/ipv4/ping.c 	if (!skb)
skb               644 net/ipv4/ping.c 	memcpy(icmp_hdr(skb), &pfh->icmph, sizeof(struct icmphdr));
skb               645 net/ipv4/ping.c 	skb->ip_summed = CHECKSUM_NONE;
skb               855 net/ipv4/ping.c 	struct sk_buff *skb;
skb               867 net/ipv4/ping.c 	skb = skb_recv_datagram(sk, flags, noblock, &err);
skb               868 net/ipv4/ping.c 	if (!skb)
skb               871 net/ipv4/ping.c 	copied = skb->len;
skb               878 net/ipv4/ping.c 	err = skb_copy_datagram_msg(skb, 0, msg, copied);
skb               882 net/ipv4/ping.c 	sock_recv_timestamp(msg, sk, skb);
skb               891 net/ipv4/ping.c 			sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
skb               897 net/ipv4/ping.c 			ip_cmsg_recv(msg, skb);
skb               902 net/ipv4/ping.c 		struct ipv6hdr *ip6 = ipv6_hdr(skb);
skb               914 net/ipv4/ping.c 						    inet6_iif(skb));
skb               919 net/ipv4/ping.c 			pingv6_ops.ip6_datagram_recv_common_ctl(sk, msg, skb);
skb               920 net/ipv4/ping.c 		if (skb->protocol == htons(ETH_P_IPV6) &&
skb               922 net/ipv4/ping.c 			pingv6_ops.ip6_datagram_recv_specific_ctl(sk, msg, skb);
skb               923 net/ipv4/ping.c 		else if (skb->protocol == htons(ETH_P_IP) && isk->cmsg_flags)
skb               924 net/ipv4/ping.c 			ip_cmsg_recv(msg, skb);
skb               933 net/ipv4/ping.c 	skb_free_datagram(sk, skb);
skb               940 net/ipv4/ping.c int ping_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
skb               943 net/ipv4/ping.c 		 inet_sk(sk), inet_sk(sk)->inet_num, skb);
skb               944 net/ipv4/ping.c 	if (sock_queue_rcv_skb(sk, skb) < 0) {
skb               945 net/ipv4/ping.c 		kfree_skb(skb);
skb               958 net/ipv4/ping.c bool ping_rcv(struct sk_buff *skb)
skb               961 net/ipv4/ping.c 	struct net *net = dev_net(skb->dev);
skb               962 net/ipv4/ping.c 	struct icmphdr *icmph = icmp_hdr(skb);
skb               967 net/ipv4/ping.c 		 skb, ntohs(icmph->un.echo.id), ntohs(icmph->un.echo.sequence));
skb               970 net/ipv4/ping.c 	skb_push(skb, skb->data - (u8 *)icmph);
skb               972 net/ipv4/ping.c 	sk = ping_lookup(net, skb, ntohs(icmph->un.echo.id));
skb               974 net/ipv4/ping.c 		struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
skb               143 net/ipv4/raw.c static int icmp_filter(const struct sock *sk, const struct sk_buff *skb)
skb               148 net/ipv4/raw.c 	hdr = skb_header_pointer(skb, skb_transport_offset(skb),
skb               169 net/ipv4/raw.c static int raw_v4_input(struct sk_buff *skb, const struct iphdr *iph, int hash)
skb               171 net/ipv4/raw.c 	int sdif = inet_sdif(skb);
skb               172 net/ipv4/raw.c 	int dif = inet_iif(skb);
skb               183 net/ipv4/raw.c 	net = dev_net(skb->dev);
skb               189 net/ipv4/raw.c 		if ((iph->protocol != IPPROTO_ICMP || !icmp_filter(sk, skb)) &&
skb               191 net/ipv4/raw.c 				   skb->dev->ifindex, sdif)) {
skb               192 net/ipv4/raw.c 			struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC);
skb               207 net/ipv4/raw.c int raw_local_deliver(struct sk_buff *skb, int protocol)
skb               218 net/ipv4/raw.c 	if (raw_sk && !raw_v4_input(skb, ip_hdr(skb), hash))
skb               225 net/ipv4/raw.c static void raw_err(struct sock *sk, struct sk_buff *skb, u32 info)
skb               228 net/ipv4/raw.c 	const int type = icmp_hdr(skb)->type;
skb               229 net/ipv4/raw.c 	const int code = icmp_hdr(skb)->code;
skb               234 net/ipv4/raw.c 		ipv4_sk_update_pmtu(skb, sk, info);
skb               236 net/ipv4/raw.c 		ipv4_sk_redirect(skb, sk);
skb               272 net/ipv4/raw.c 		const struct iphdr *iph = (const struct iphdr *)skb->data;
skb               273 net/ipv4/raw.c 		u8 *payload = skb->data + (iph->ihl << 2);
skb               276 net/ipv4/raw.c 			payload = skb->data;
skb               277 net/ipv4/raw.c 		ip_icmp_error(sk, skb, err, 0, info, payload);
skb               286 net/ipv4/raw.c void raw_icmp_error(struct sk_buff *skb, int protocol, u32 info)
skb               298 net/ipv4/raw.c 		int dif = skb->dev->ifindex;
skb               299 net/ipv4/raw.c 		int sdif = inet_sdif(skb);
skb               301 net/ipv4/raw.c 		iph = (const struct iphdr *)skb->data;
skb               302 net/ipv4/raw.c 		net = dev_net(skb->dev);
skb               307 net/ipv4/raw.c 			raw_err(raw_sk, skb, info);
skb               309 net/ipv4/raw.c 			iph = (const struct iphdr *)skb->data;
skb               315 net/ipv4/raw.c static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
skb               319 net/ipv4/raw.c 	ipv4_pktinfo_prepare(sk, skb);
skb               320 net/ipv4/raw.c 	if (sock_queue_rcv_skb(sk, skb) < 0) {
skb               321 net/ipv4/raw.c 		kfree_skb(skb);
skb               328 net/ipv4/raw.c int raw_rcv(struct sock *sk, struct sk_buff *skb)
skb               330 net/ipv4/raw.c 	if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
skb               332 net/ipv4/raw.c 		kfree_skb(skb);
skb               335 net/ipv4/raw.c 	nf_reset_ct(skb);
skb               337 net/ipv4/raw.c 	skb_push(skb, skb->data - skb_network_header(skb));
skb               339 net/ipv4/raw.c 	raw_rcv_skb(sk, skb);
skb               351 net/ipv4/raw.c 	struct sk_buff *skb;
skb               370 net/ipv4/raw.c 	skb = sock_alloc_send_skb(sk,
skb               373 net/ipv4/raw.c 	if (!skb)
skb               375 net/ipv4/raw.c 	skb_reserve(skb, hlen);
skb               377 net/ipv4/raw.c 	skb->priority = sk->sk_priority;
skb               378 net/ipv4/raw.c 	skb->mark = sockc->mark;
skb               379 net/ipv4/raw.c 	skb->tstamp = sockc->transmit_time;
skb               380 net/ipv4/raw.c 	skb_dst_set(skb, &rt->dst);
skb               383 net/ipv4/raw.c 	skb_reset_network_header(skb);
skb               384 net/ipv4/raw.c 	iph = ip_hdr(skb);
skb               385 net/ipv4/raw.c 	skb_put(skb, length);
skb               387 net/ipv4/raw.c 	skb->ip_summed = CHECKSUM_NONE;
skb               389 net/ipv4/raw.c 	skb_setup_tx_timestamp(skb, sockc->tsflags);
skb               392 net/ipv4/raw.c 		skb_set_dst_pending_confirm(skb, 1);
skb               394 net/ipv4/raw.c 	skb->transport_header = skb->network_header;
skb               418 net/ipv4/raw.c 			ip_select_ident(net, skb, NULL);
skb               421 net/ipv4/raw.c 		skb->transport_header += iphlen;
skb               425 net/ipv4/raw.c 				skb_transport_header(skb))->type);
skb               429 net/ipv4/raw.c 		      net, sk, skb, NULL, rt->dst.dev,
skb               439 net/ipv4/raw.c 	kfree_skb(skb);
skb               468 net/ipv4/raw.c 		       struct sk_buff *skb)
skb               475 net/ipv4/raw.c 		if (skb->ip_summed == CHECKSUM_PARTIAL)
skb               478 net/ipv4/raw.c 			skb->csum = csum_block_add(
skb               479 net/ipv4/raw.c 				skb->csum,
skb               495 net/ipv4/raw.c 	return ip_generic_getfrag(rfv->msg, to, offset, len, odd, skb);
skb               757 net/ipv4/raw.c 	struct sk_buff *skb;
skb               767 net/ipv4/raw.c 	skb = skb_recv_datagram(sk, flags, noblock, &err);
skb               768 net/ipv4/raw.c 	if (!skb)
skb               771 net/ipv4/raw.c 	copied = skb->len;
skb               777 net/ipv4/raw.c 	err = skb_copy_datagram_msg(skb, 0, msg, copied);
skb               781 net/ipv4/raw.c 	sock_recv_ts_and_drops(msg, sk, skb);
skb               786 net/ipv4/raw.c 		sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
skb               792 net/ipv4/raw.c 		ip_cmsg_recv(msg, skb);
skb               794 net/ipv4/raw.c 		copied = skb->len;
skb               796 net/ipv4/raw.c 	skb_free_datagram(sk, skb);
skb               909 net/ipv4/raw.c 		struct sk_buff *skb;
skb               913 net/ipv4/raw.c 		skb = skb_peek(&sk->sk_receive_queue);
skb               914 net/ipv4/raw.c 		if (skb)
skb               915 net/ipv4/raw.c 			amount = skb->len;
skb               132 net/ipv4/raw_diag.c static int sk_diag_dump(struct sock *sk, struct sk_buff *skb,
skb               140 net/ipv4/raw_diag.c 	return inet_sk_diag_fill(sk, NULL, skb, r,
skb               141 net/ipv4/raw_diag.c 				 sk_user_ns(NETLINK_CB(cb->skb).sk),
skb               142 net/ipv4/raw_diag.c 				 NETLINK_CB(cb->skb).portid,
skb               147 net/ipv4/raw_diag.c static void raw_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
skb               150 net/ipv4/raw_diag.c 	bool net_admin = netlink_net_capable(cb->skb, CAP_NET_ADMIN);
skb               152 net/ipv4/raw_diag.c 	struct net *net = sock_net(skb->sk);
skb               181 net/ipv4/raw_diag.c 			if (sk_diag_dump(sk, skb, cb, r, bc, net_admin) < 0)
skb               140 net/ipv4/route.c static void		 ipv4_link_failure(struct sk_buff *skb);
skb               142 net/ipv4/route.c 					   struct sk_buff *skb, u32 mtu,
skb               145 net/ipv4/route.c 					struct sk_buff *skb);
skb               155 net/ipv4/route.c 					   struct sk_buff *skb,
skb               432 net/ipv4/route.c 					   struct sk_buff *skb,
skb               448 net/ipv4/route.c 		pkey = skb ? ip_hdr(skb)->daddr : *((__be32 *) daddr);
skb               545 net/ipv4/route.c static void build_skb_flow_key(struct flowi4 *fl4, const struct sk_buff *skb,
skb               548 net/ipv4/route.c 	const struct net *net = dev_net(skb->dev);
skb               549 net/ipv4/route.c 	const struct iphdr *iph = ip_hdr(skb);
skb               550 net/ipv4/route.c 	int oif = skb->dev->ifindex;
skb               553 net/ipv4/route.c 	u32 mark = skb->mark;
skb               577 net/ipv4/route.c 				 const struct sk_buff *skb)
skb               579 net/ipv4/route.c 	if (skb)
skb               580 net/ipv4/route.c 		build_skb_flow_key(fl4, skb, sk);
skb               735 net/ipv4/route.c static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flowi4 *fl4,
skb               738 net/ipv4/route.c 	__be32 new_gw = icmp_hdr(skb)->un.gateway;
skb               739 net/ipv4/route.c 	__be32 old_gw = ip_hdr(skb)->saddr;
skb               740 net/ipv4/route.c 	struct net_device *dev = skb->dev;
skb               746 net/ipv4/route.c 	switch (icmp_hdr(skb)->code & 7) {
skb               805 net/ipv4/route.c 		const struct iphdr *iph = (const struct iphdr *) skb->data;
skb               818 net/ipv4/route.c static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
skb               822 net/ipv4/route.c 	const struct iphdr *iph = (const struct iphdr *) skb->data;
skb               823 net/ipv4/route.c 	struct net *net = dev_net(skb->dev);
skb               824 net/ipv4/route.c 	int oif = skb->dev->ifindex;
skb               827 net/ipv4/route.c 	u32 mark = skb->mark;
skb               832 net/ipv4/route.c 	__ip_do_redirect(rt, skb, &fl4, true);
skb               869 net/ipv4/route.c void ip_rt_send_redirect(struct sk_buff *skb)
skb               871 net/ipv4/route.c 	struct rtable *rt = skb_rtable(skb);
skb               889 net/ipv4/route.c 	peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, vif, 1);
skb               891 net/ipv4/route.c 		icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST,
skb               892 net/ipv4/route.c 			  rt_nexthop(rt, ip_hdr(skb)->daddr));
skb               919 net/ipv4/route.c 		__be32 gw = rt_nexthop(rt, ip_hdr(skb)->daddr);
skb               921 net/ipv4/route.c 		icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw);
skb               928 net/ipv4/route.c 					     &ip_hdr(skb)->saddr, inet_iif(skb),
skb               929 net/ipv4/route.c 					     &ip_hdr(skb)->daddr, &gw);
skb               936 net/ipv4/route.c static int ip_error(struct sk_buff *skb)
skb               938 net/ipv4/route.c 	struct rtable *rt = skb_rtable(skb);
skb               939 net/ipv4/route.c 	struct net_device *dev = skb->dev;
skb               947 net/ipv4/route.c 	if (netif_is_l3_master(skb->dev)) {
skb               948 net/ipv4/route.c 		dev = __dev_get_by_index(dev_net(skb->dev), IPCB(skb)->iif);
skb               989 net/ipv4/route.c 	peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr,
skb               990 net/ipv4/route.c 			       l3mdev_master_ifindex(skb->dev), 1);
skb              1006 net/ipv4/route.c 		icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
skb              1008 net/ipv4/route.c out:	kfree_skb(skb);
skb              1045 net/ipv4/route.c 			      struct sk_buff *skb, u32 mtu,
skb              1051 net/ipv4/route.c 	ip_rt_build_flow_key(&fl4, sk, skb);
skb              1055 net/ipv4/route.c void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu,
skb              1058 net/ipv4/route.c 	const struct iphdr *iph = (const struct iphdr *) skb->data;
skb              1061 net/ipv4/route.c 	u32 mark = IP4_REPLY_MARK(net, skb->mark);
skb              1073 net/ipv4/route.c static void __ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
skb              1075 net/ipv4/route.c 	const struct iphdr *iph = (const struct iphdr *) skb->data;
skb              1082 net/ipv4/route.c 		fl4.flowi4_mark = IP4_REPLY_MARK(sock_net(sk), skb->mark);
skb              1091 net/ipv4/route.c void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
skb              1093 net/ipv4/route.c 	const struct iphdr *iph = (const struct iphdr *) skb->data;
skb              1108 net/ipv4/route.c 		__ipv4_sk_update_pmtu(skb, sk, mtu);
skb              1145 net/ipv4/route.c void ipv4_redirect(struct sk_buff *skb, struct net *net,
skb              1148 net/ipv4/route.c 	const struct iphdr *iph = (const struct iphdr *) skb->data;
skb              1156 net/ipv4/route.c 		__ip_do_redirect(rt, skb, &fl4, false);
skb              1162 net/ipv4/route.c void ipv4_sk_redirect(struct sk_buff *skb, struct sock *sk)
skb              1164 net/ipv4/route.c 	const struct iphdr *iph = (const struct iphdr *) skb->data;
skb              1172 net/ipv4/route.c 		__ip_do_redirect(rt, skb, &fl4, false);
skb              1195 net/ipv4/route.c static void ipv4_send_dest_unreach(struct sk_buff *skb)
skb              1203 net/ipv4/route.c 	if (!pskb_network_may_pull(skb, sizeof(struct iphdr)) ||
skb              1204 net/ipv4/route.c 	    ip_hdr(skb)->version != 4 || ip_hdr(skb)->ihl < 5)
skb              1208 net/ipv4/route.c 	if (ip_hdr(skb)->ihl > 5) {
skb              1209 net/ipv4/route.c 		if (!pskb_network_may_pull(skb, ip_hdr(skb)->ihl * 4))
skb              1211 net/ipv4/route.c 		opt.optlen = ip_hdr(skb)->ihl * 4 - sizeof(struct iphdr);
skb              1214 net/ipv4/route.c 		res = __ip_options_compile(dev_net(skb->dev), &opt, skb, NULL);
skb              1220 net/ipv4/route.c 	__icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, &opt);
skb              1223 net/ipv4/route.c static void ipv4_link_failure(struct sk_buff *skb)
skb              1227 net/ipv4/route.c 	ipv4_send_dest_unreach(skb);
skb              1229 net/ipv4/route.c 	rt = skb_rtable(skb);
skb              1234 net/ipv4/route.c static int ip_rt_bug(struct net *net, struct sock *sk, struct sk_buff *skb)
skb              1237 net/ipv4/route.c 		 __func__, &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
skb              1238 net/ipv4/route.c 		 skb->dev ? skb->dev->name : "?");
skb              1239 net/ipv4/route.c 	kfree_skb(skb);
skb              1253 net/ipv4/route.c void ip_rt_get_source(u8 *addr, struct sk_buff *skb, struct rtable *rt)
skb              1258 net/ipv4/route.c 		src = ip_hdr(skb)->saddr;
skb              1261 net/ipv4/route.c 		struct iphdr *iph = ip_hdr(skb);
skb              1267 net/ipv4/route.c 			.flowi4_iif = skb->dev->ifindex,
skb              1268 net/ipv4/route.c 			.flowi4_mark = skb->mark,
skb              1686 net/ipv4/route.c int ip_mc_validate_source(struct sk_buff *skb, __be32 daddr, __be32 saddr,
skb              1697 net/ipv4/route.c 	    skb->protocol != htons(ETH_P_IP))
skb              1705 net/ipv4/route.c 		    ip_hdr(skb)->protocol != IPPROTO_IGMP)
skb              1708 net/ipv4/route.c 		err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
skb              1717 net/ipv4/route.c static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
skb              1726 net/ipv4/route.c 	err = ip_mc_validate_source(skb, daddr, saddr, tos, dev, in_dev, &itag);
skb              1750 net/ipv4/route.c 	skb_dst_set(skb, &rth->dst);
skb              1757 net/ipv4/route.c 				     struct sk_buff *skb,
skb              1770 net/ipv4/route.c 		if (dev->hard_header_len && skb_mac_header_was_set(skb)) {
skb              1773 net/ipv4/route.c 				       skb_mac_header(skb),
skb              1781 net/ipv4/route.c static int __mkroute_input(struct sk_buff *skb,
skb              1802 net/ipv4/route.c 	err = fib_validate_source(skb, saddr, daddr, tos, FIB_RES_OIF(*res),
skb              1805 net/ipv4/route.c 		ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
skb              1813 net/ipv4/route.c 	    skb->protocol == htons(ETH_P_IP)) {
skb              1819 net/ipv4/route.c 			IPCB(skb)->flags |= IPSKB_DOREDIRECT;
skb              1822 net/ipv4/route.c 	if (skb->protocol != htons(ETH_P_IP)) {
skb              1844 net/ipv4/route.c 			skb_dst_set_noref(skb, &rth->dst);
skb              1865 net/ipv4/route.c 	skb_dst_set(skb, &rth->dst);
skb              1876 net/ipv4/route.c static void ip_multipath_l3_keys(const struct sk_buff *skb,
skb              1879 net/ipv4/route.c 	const struct iphdr *outer_iph = ip_hdr(skb);
skb              1892 net/ipv4/route.c 	icmph = skb_header_pointer(skb, outer_iph->ihl * 4, sizeof(_icmph),
skb              1903 net/ipv4/route.c 	inner_iph = skb_header_pointer(skb,
skb              1917 net/ipv4/route.c 		       const struct sk_buff *skb, struct flow_keys *flkeys)
skb              1927 net/ipv4/route.c 		if (skb) {
skb              1928 net/ipv4/route.c 			ip_multipath_l3_keys(skb, &hash_keys);
skb              1936 net/ipv4/route.c 		if (skb) {
skb              1941 net/ipv4/route.c 			if (skb->l4_hash)
skb              1942 net/ipv4/route.c 				return skb_get_hash_raw(skb) >> 1;
skb              1947 net/ipv4/route.c 				skb_flow_dissect_flow_keys(skb, &keys, flag);
skb              1970 net/ipv4/route.c 		if (skb) {
skb              1973 net/ipv4/route.c 			skb_flow_dissect_flow_keys(skb, &keys, 0);
skb              1988 net/ipv4/route.c 				ip_multipath_l3_keys(skb, &hash_keys);
skb              2007 net/ipv4/route.c static int ip_mkroute_input(struct sk_buff *skb,
skb              2015 net/ipv4/route.c 		int h = fib_multipath_hash(res->fi->fib_net, NULL, skb, hkeys);
skb              2022 net/ipv4/route.c 	return __mkroute_input(skb, res, in_dev, daddr, saddr, tos);
skb              2036 net/ipv4/route.c static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
skb              2060 net/ipv4/route.c 	tun_info = skb_tunnel_info(skb);
skb              2065 net/ipv4/route.c 	skb_dst_drop(skb);
skb              2100 net/ipv4/route.c 	fl4.flowi4_mark = skb->mark;
skb              2108 net/ipv4/route.c 	if (fib4_rules_early_flow_dissect(net, skb, &fl4, &_flkeys)) {
skb              2133 net/ipv4/route.c 		err = fib_validate_source(skb, saddr, daddr, tos,
skb              2148 net/ipv4/route.c 	err = ip_mkroute_input(skb, res, in_dev, daddr, saddr, tos, flkeys);
skb              2152 net/ipv4/route.c 	if (skb->protocol != htons(ETH_P_IP))
skb              2156 net/ipv4/route.c 		err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
skb              2172 net/ipv4/route.c 			skb_dst_set_noref(skb, &rth->dst);
skb              2210 net/ipv4/route.c 	skb_dst_set(skb, &rth->dst);
skb              2241 net/ipv4/route.c 	ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
skb              2245 net/ipv4/route.c int ip_route_input_noref(struct sk_buff *skb, __be32 daddr, __be32 saddr,
skb              2253 net/ipv4/route.c 	err = ip_route_input_rcu(skb, daddr, saddr, tos, dev, &res);
skb              2261 net/ipv4/route.c int ip_route_input_rcu(struct sk_buff *skb, __be32 daddr, __be32 saddr,
skb              2283 net/ipv4/route.c 				      ip_hdr(skb)->protocol);
skb              2289 net/ipv4/route.c 			l3_in_dev = __in_dev_get_rcu(skb->dev);
skb              2292 net/ipv4/route.c 						      ip_hdr(skb)->protocol);
skb              2302 net/ipv4/route.c 			err = ip_route_input_mc(skb, daddr, saddr,
skb              2308 net/ipv4/route.c 	return ip_route_input_slow(skb, daddr, saddr, tos, dev, res);
skb              2441 net/ipv4/route.c 					const struct sk_buff *skb)
skb              2458 net/ipv4/route.c 	rth = ip_route_output_key_hash_rcu(net, fl4, &res, skb);
skb              2467 net/ipv4/route.c 					    const struct sk_buff *skb)
skb              2625 net/ipv4/route.c 	fib_select_path(net, res, fl4, skb);
skb              2651 net/ipv4/route.c 					  struct sk_buff *skb, u32 mtu,
skb              2657 net/ipv4/route.c 				       struct sk_buff *skb)
skb              2738 net/ipv4/route.c 			struct sk_buff *skb, u32 portid, u32 seq,
skb              2747 net/ipv4/route.c 	nlh = nlmsg_put(skb, portid, seq, RTM_NEWROUTE, sizeof(*r), flags);
skb              2757 net/ipv4/route.c 	if (nla_put_u32(skb, RTA_TABLE, table_id))
skb              2765 net/ipv4/route.c 	if (IPCB(skb)->flags & IPSKB_DOREDIRECT)
skb              2768 net/ipv4/route.c 	if (nla_put_in_addr(skb, RTA_DST, dst))
skb              2772 net/ipv4/route.c 		if (nla_put_in_addr(skb, RTA_SRC, src))
skb              2776 net/ipv4/route.c 	    nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
skb              2780 net/ipv4/route.c 	    nla_put_u32(skb, RTA_FLOW, rt->dst.tclassid))
skb              2785 net/ipv4/route.c 		if (nla_put_in_addr(skb, RTA_PREFSRC, fl4->saddr))
skb              2790 net/ipv4/route.c 		    nla_put_in_addr(skb, RTA_GATEWAY, rt->rt_gw4)) {
skb              2797 net/ipv4/route.c 			nla = nla_reserve(skb, RTA_VIA, alen + 2);
skb              2822 net/ipv4/route.c 	if (rtnetlink_put_metrics(skb, metrics) < 0)
skb              2827 net/ipv4/route.c 		    nla_put_u32(skb, RTA_MARK, fl4->flowi4_mark))
skb              2831 net/ipv4/route.c 		    nla_put_u32(skb, RTA_UID,
skb              2841 net/ipv4/route.c 				int err = ipmr_get_route(net, skb,
skb              2852 net/ipv4/route.c 				if (nla_put_u32(skb, RTA_IIF, fl4->flowi4_iif))
skb              2859 net/ipv4/route.c 	if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, error) < 0)
skb              2862 net/ipv4/route.c 	nlmsg_end(skb, nlh);
skb              2866 net/ipv4/route.c 	nlmsg_cancel(skb, nlh);
skb              2870 net/ipv4/route.c static int fnhe_dump_bucket(struct net *net, struct sk_buff *skb,
skb              2902 net/ipv4/route.c 					   table_id, NULL, skb,
skb              2903 net/ipv4/route.c 					   NETLINK_CB(cb->skb).portid,
skb              2915 net/ipv4/route.c int fib_dump_info_fnhe(struct sk_buff *skb, struct netlink_callback *cb,
skb              2919 net/ipv4/route.c 	struct net *net = sock_net(cb->skb->sk);
skb              2934 net/ipv4/route.c 			err = fnhe_dump_bucket(net, skb, cb, table_id, bucket,
skb              2949 net/ipv4/route.c 	struct sk_buff *skb;
skb              2952 net/ipv4/route.c 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
skb              2953 net/ipv4/route.c 	if (!skb)
skb              2959 net/ipv4/route.c 	skb_reset_mac_header(skb);
skb              2960 net/ipv4/route.c 	skb_reset_network_header(skb);
skb              2961 net/ipv4/route.c 	skb->protocol = htons(ETH_P_IP);
skb              2962 net/ipv4/route.c 	iph = skb_put(skb, sizeof(struct iphdr));
skb              2969 net/ipv4/route.c 	skb_set_transport_header(skb, skb->len);
skb              2975 net/ipv4/route.c 		udph = skb_put_zero(skb, sizeof(struct udphdr));
skb              2985 net/ipv4/route.c 		tcph = skb_put_zero(skb, sizeof(struct tcphdr));
skb              2997 net/ipv4/route.c 		icmph = skb_put_zero(skb, sizeof(struct icmphdr));
skb              3003 net/ipv4/route.c 	return skb;
skb              3006 net/ipv4/route.c static int inet_rtm_valid_getroute_req(struct sk_buff *skb,
skb              3020 net/ipv4/route.c 	if (!netlink_strict_get_check(skb))
skb              3085 net/ipv4/route.c 	struct sk_buff *skb;
skb              3122 net/ipv4/route.c 	skb = inet_rtm_getroute_build_skb(src, dst, ip_proto, sport, dport);
skb              3123 net/ipv4/route.c 	if (!skb)
skb              3150 net/ipv4/route.c 		skb->dev	= dev;
skb              3151 net/ipv4/route.c 		skb->mark	= mark;
skb              3152 net/ipv4/route.c 		err = ip_route_input_rcu(skb, dst, src, rtm->rtm_tos,
skb              3155 net/ipv4/route.c 		rt = skb_rtable(skb);
skb              3160 net/ipv4/route.c 		skb->dev = net->loopback_dev;
skb              3161 net/ipv4/route.c 		rt = ip_route_output_key_hash_rcu(net, &fl4, &res, skb);
skb              3166 net/ipv4/route.c 			skb_dst_set(skb, &rt->dst);
skb              3179 net/ipv4/route.c 	skb_trim(skb, 0);
skb              3180 net/ipv4/route.c 	skb_reset_network_header(skb);
skb              3181 net/ipv4/route.c 	skb_reset_transport_header(skb);
skb              3182 net/ipv4/route.c 	skb_reset_mac_header(skb);
skb              3191 net/ipv4/route.c 		err = fib_dump_info(skb, NETLINK_CB(in_skb).portid,
skb              3196 net/ipv4/route.c 		err = rt_fill_info(net, dst, src, rt, table_id, &fl4, skb,
skb              3205 net/ipv4/route.c 	err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
skb              3211 net/ipv4/route.c 	kfree_skb(skb);
skb               178 net/ipv4/syncookies.c __u32 cookie_v4_init_sequence(const struct sk_buff *skb, __u16 *mssp)
skb               180 net/ipv4/syncookies.c 	const struct iphdr *iph = ip_hdr(skb);
skb               181 net/ipv4/syncookies.c 	const struct tcphdr *th = tcp_hdr(skb);
skb               201 net/ipv4/syncookies.c struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
skb               209 net/ipv4/syncookies.c 	child = icsk->icsk_af_ops->syn_recv_sock(sk, skb, req, dst,
skb               214 net/ipv4/syncookies.c 		sock_rps_save_rxhash(child, skb);
skb               283 net/ipv4/syncookies.c struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
skb               285 net/ipv4/syncookies.c 	struct ip_options *opt = &TCP_SKB_CB(skb)->header.h4.opt;
skb               290 net/ipv4/syncookies.c 	const struct tcphdr *th = tcp_hdr(skb);
skb               306 net/ipv4/syncookies.c 	mss = __cookie_v4_check(ip_hdr(skb), th, cookie);
skb               316 net/ipv4/syncookies.c 	tcp_parse_options(sock_net(sk), skb, &tcp_opt, 0, NULL);
skb               320 net/ipv4/syncookies.c 					  ip_hdr(skb)->daddr,
skb               321 net/ipv4/syncookies.c 					  ip_hdr(skb)->saddr);
skb               342 net/ipv4/syncookies.c 	sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
skb               343 net/ipv4/syncookies.c 	sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
skb               344 net/ipv4/syncookies.c 	ireq->ir_mark		= inet_request_mark(sk, skb);
skb               355 net/ipv4/syncookies.c 	ireq->ir_iif = inet_request_bound_dev_if(sk, skb);
skb               360 net/ipv4/syncookies.c 	RCU_INIT_POINTER(ireq->ireq_opt, tcp_v4_save_options(sock_net(sk), skb));
skb               362 net/ipv4/syncookies.c 	if (security_inet_conn_request(sk, skb, req)) {
skb               398 net/ipv4/syncookies.c 	ret = tcp_get_cookie_sock(sk, skb, req, &rt->dst, tsoff);
skb               463 net/ipv4/tcp.c 	struct sk_buff *skb = tcp_write_queue_tail(sk);
skb               465 net/ipv4/tcp.c 	if (tsflags && skb) {
skb               466 net/ipv4/tcp.c 		struct skb_shared_info *shinfo = skb_shinfo(skb);
skb               467 net/ipv4/tcp.c 		struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
skb               473 net/ipv4/tcp.c 			shinfo->tskey = TCP_SKB_CB(skb)->seq + skb->len - 1;
skb               648 net/ipv4/tcp.c static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
skb               650 net/ipv4/tcp.c 	TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
skb               659 net/ipv4/tcp.c static void skb_entail(struct sock *sk, struct sk_buff *skb)
skb               662 net/ipv4/tcp.c 	struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
skb               664 net/ipv4/tcp.c 	skb->csum    = 0;
skb               668 net/ipv4/tcp.c 	__skb_header_release(skb);
skb               669 net/ipv4/tcp.c 	tcp_add_write_queue_tail(sk, skb);
skb               670 net/ipv4/tcp.c 	sk_wmem_queued_add(sk, skb->truesize);
skb               671 net/ipv4/tcp.c 	sk_mem_charge(sk, skb->truesize);
skb               694 net/ipv4/tcp.c static bool tcp_should_autocork(struct sock *sk, struct sk_buff *skb,
skb               697 net/ipv4/tcp.c 	return skb->len < size_goal &&
skb               700 net/ipv4/tcp.c 	       refcount_read(&sk->sk_wmem_alloc) > skb->truesize;
skb               707 net/ipv4/tcp.c 	struct sk_buff *skb;
skb               709 net/ipv4/tcp.c 	skb = tcp_write_queue_tail(sk);
skb               710 net/ipv4/tcp.c 	if (!skb)
skb               713 net/ipv4/tcp.c 		tcp_mark_push(tp, skb);
skb               717 net/ipv4/tcp.c 	if (tcp_should_autocork(sk, skb, size_goal)) {
skb               727 net/ipv4/tcp.c 		if (refcount_read(&sk->sk_wmem_alloc) > skb->truesize)
skb               737 net/ipv4/tcp.c static int tcp_splice_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
skb               743 net/ipv4/tcp.c 	ret = skb_splice_bits(skb, skb->sk, offset, tss->pipe,
skb               865 net/ipv4/tcp.c 	struct sk_buff *skb;
skb               868 net/ipv4/tcp.c 		skb = sk->sk_tx_skb_cache;
skb               869 net/ipv4/tcp.c 		if (skb) {
skb               870 net/ipv4/tcp.c 			skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
skb               872 net/ipv4/tcp.c 			pskb_trim(skb, 0);
skb               873 net/ipv4/tcp.c 			INIT_LIST_HEAD(&skb->tcp_tsorted_anchor);
skb               874 net/ipv4/tcp.c 			skb_shinfo(skb)->tx_flags = 0;
skb               875 net/ipv4/tcp.c 			memset(TCP_SKB_CB(skb), 0, sizeof(struct tcp_skb_cb));
skb               876 net/ipv4/tcp.c 			return skb;
skb               885 net/ipv4/tcp.c 	skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
skb               886 net/ipv4/tcp.c 	if (likely(skb)) {
skb               891 net/ipv4/tcp.c 			sk_forced_mem_schedule(sk, skb->truesize);
skb               893 net/ipv4/tcp.c 			mem_scheduled = sk_wmem_schedule(sk, skb->truesize);
skb               896 net/ipv4/tcp.c 			skb_reserve(skb, sk->sk_prot->max_header);
skb               901 net/ipv4/tcp.c 			skb->reserved_tailroom = skb->end - skb->tail - size;
skb               902 net/ipv4/tcp.c 			INIT_LIST_HEAD(&skb->tcp_tsorted_anchor);
skb               903 net/ipv4/tcp.c 			return skb;
skb               905 net/ipv4/tcp.c 		__kfree_skb(skb);
skb               954 net/ipv4/tcp.c static void tcp_remove_empty_skb(struct sock *sk, struct sk_buff *skb)
skb               956 net/ipv4/tcp.c 	if (skb && !skb->len) {
skb               957 net/ipv4/tcp.c 		tcp_unlink_write_queue(skb, sk);
skb               960 net/ipv4/tcp.c 		sk_wmem_free_skb(sk, skb);
skb               998 net/ipv4/tcp.c 		struct sk_buff *skb = tcp_write_queue_tail(sk);
skb              1002 net/ipv4/tcp.c 		if (!skb || (copy = size_goal - skb->len) <= 0 ||
skb              1003 net/ipv4/tcp.c 		    !tcp_skb_can_collapse_to(skb)) {
skb              1008 net/ipv4/tcp.c 			skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation,
skb              1010 net/ipv4/tcp.c 			if (!skb)
skb              1014 net/ipv4/tcp.c 			skb->decrypted = !!(flags & MSG_SENDPAGE_DECRYPTED);
skb              1016 net/ipv4/tcp.c 			skb_entail(sk, skb);
skb              1023 net/ipv4/tcp.c 		i = skb_shinfo(skb)->nr_frags;
skb              1024 net/ipv4/tcp.c 		can_coalesce = skb_can_coalesce(skb, i, page, offset);
skb              1026 net/ipv4/tcp.c 			tcp_mark_push(tp, skb);
skb              1033 net/ipv4/tcp.c 			skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
skb              1036 net/ipv4/tcp.c 			skb_fill_page_desc(skb, i, page, offset, copy);
skb              1040 net/ipv4/tcp.c 			skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
skb              1042 net/ipv4/tcp.c 		skb->len += copy;
skb              1043 net/ipv4/tcp.c 		skb->data_len += copy;
skb              1044 net/ipv4/tcp.c 		skb->truesize += copy;
skb              1047 net/ipv4/tcp.c 		skb->ip_summed = CHECKSUM_PARTIAL;
skb              1049 net/ipv4/tcp.c 		TCP_SKB_CB(skb)->end_seq += copy;
skb              1050 net/ipv4/tcp.c 		tcp_skb_pcount_set(skb, 0);
skb              1053 net/ipv4/tcp.c 			TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH;
skb              1061 net/ipv4/tcp.c 		if (skb->len < size_goal || (flags & MSG_OOB))
skb              1065 net/ipv4/tcp.c 			tcp_mark_push(tp, skb);
skb              1067 net/ipv4/tcp.c 		} else if (skb == tcp_send_head(sk))
skb              1190 net/ipv4/tcp.c 	struct sk_buff *skb;
skb              1201 net/ipv4/tcp.c 		skb = tcp_write_queue_tail(sk);
skb              1202 net/ipv4/tcp.c 		uarg = sock_zerocopy_realloc(sk, size, skb_zcopy(skb));
skb              1275 net/ipv4/tcp.c 		skb = tcp_write_queue_tail(sk);
skb              1276 net/ipv4/tcp.c 		if (skb)
skb              1277 net/ipv4/tcp.c 			copy = size_goal - skb->len;
skb              1279 net/ipv4/tcp.c 		if (copy <= 0 || !tcp_skb_can_collapse_to(skb)) {
skb              1292 net/ipv4/tcp.c 			skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation,
skb              1294 net/ipv4/tcp.c 			if (!skb)
skb              1298 net/ipv4/tcp.c 			skb->ip_summed = CHECKSUM_PARTIAL;
skb              1300 net/ipv4/tcp.c 			skb_entail(sk, skb);
skb              1308 net/ipv4/tcp.c 				TCP_SKB_CB(skb)->sacked |= TCPCB_REPAIRED;
skb              1316 net/ipv4/tcp.c 		if (skb_availroom(skb) > 0 && !zc) {
skb              1318 net/ipv4/tcp.c 			copy = min_t(int, copy, skb_availroom(skb));
skb              1319 net/ipv4/tcp.c 			err = skb_add_data_nocache(sk, skb, &msg->msg_iter, copy);
skb              1324 net/ipv4/tcp.c 			int i = skb_shinfo(skb)->nr_frags;
skb              1330 net/ipv4/tcp.c 			if (!skb_can_coalesce(skb, i, pfrag->page,
skb              1333 net/ipv4/tcp.c 					tcp_mark_push(tp, skb);
skb              1344 net/ipv4/tcp.c 			err = skb_copy_to_page_nocache(sk, &msg->msg_iter, skb,
skb              1353 net/ipv4/tcp.c 				skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
skb              1355 net/ipv4/tcp.c 				skb_fill_page_desc(skb, i, pfrag->page,
skb              1361 net/ipv4/tcp.c 			err = skb_zerocopy_iter_stream(sk, skb, msg, copy, uarg);
skb              1363 net/ipv4/tcp.c 				tcp_mark_push(tp, skb);
skb              1372 net/ipv4/tcp.c 			TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH;
skb              1375 net/ipv4/tcp.c 		TCP_SKB_CB(skb)->end_seq += copy;
skb              1376 net/ipv4/tcp.c 		tcp_skb_pcount_set(skb, 0);
skb              1381 net/ipv4/tcp.c 				TCP_SKB_CB(skb)->eor = 1;
skb              1385 net/ipv4/tcp.c 		if (skb->len < size_goal || (flags & MSG_OOB) || unlikely(tp->repair))
skb              1389 net/ipv4/tcp.c 			tcp_mark_push(tp, skb);
skb              1391 net/ipv4/tcp.c 		} else if (skb == tcp_send_head(sk))
skb              1419 net/ipv4/tcp.c 	skb = tcp_write_queue_tail(sk);
skb              1421 net/ipv4/tcp.c 	tcp_remove_empty_skb(sk, skb);
skb              1500 net/ipv4/tcp.c 	struct sk_buff *skb;
skb              1505 net/ipv4/tcp.c 	skb_rbtree_walk(skb, &sk->tcp_rtx_queue) {
skb              1506 net/ipv4/tcp.c 		err = skb_copy_datagram_msg(skb, 0, msg, skb->len);
skb              1509 net/ipv4/tcp.c 		copied += skb->len;
skb              1512 net/ipv4/tcp.c 	skb_queue_walk(&sk->sk_write_queue, skb) {
skb              1513 net/ipv4/tcp.c 		err = skb_copy_datagram_msg(skb, 0, msg, skb->len);
skb              1517 net/ipv4/tcp.c 		copied += skb->len;
skb              1534 net/ipv4/tcp.c 	struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
skb              1536 net/ipv4/tcp.c 	WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq),
skb              1538 net/ipv4/tcp.c 	     tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt);
skb              1589 net/ipv4/tcp.c 	struct sk_buff *skb;
skb              1592 net/ipv4/tcp.c 	while ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) {
skb              1593 net/ipv4/tcp.c 		offset = seq - TCP_SKB_CB(skb)->seq;
skb              1594 net/ipv4/tcp.c 		if (unlikely(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
skb              1598 net/ipv4/tcp.c 		if (offset < skb->len || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)) {
skb              1600 net/ipv4/tcp.c 			return skb;
skb              1606 net/ipv4/tcp.c 		sk_eat_skb(sk, skb);
skb              1625 net/ipv4/tcp.c 	struct sk_buff *skb;
skb              1633 net/ipv4/tcp.c 	while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) {
skb              1634 net/ipv4/tcp.c 		if (offset < skb->len) {
skb              1638 net/ipv4/tcp.c 			len = skb->len - offset;
skb              1647 net/ipv4/tcp.c 			used = recv_actor(desc, skb, offset, len);
skb              1662 net/ipv4/tcp.c 			skb = tcp_recv_skb(sk, seq - 1, &offset);
skb              1663 net/ipv4/tcp.c 			if (!skb)
skb              1668 net/ipv4/tcp.c 			if (offset + 1 != skb->len)
skb              1671 net/ipv4/tcp.c 		if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) {
skb              1672 net/ipv4/tcp.c 			sk_eat_skb(sk, skb);
skb              1676 net/ipv4/tcp.c 		sk_eat_skb(sk, skb);
skb              1753 net/ipv4/tcp.c 	struct sk_buff *skb = NULL;
skb              1789 net/ipv4/tcp.c 			if (skb) {
skb              1790 net/ipv4/tcp.c 				skb = skb->next;
skb              1791 net/ipv4/tcp.c 				offset = seq - TCP_SKB_CB(skb)->seq;
skb              1793 net/ipv4/tcp.c 				skb = tcp_recv_skb(sk, seq, &offset);
skb              1796 net/ipv4/tcp.c 			zc->recv_skip_hint = skb->len - offset;
skb              1797 net/ipv4/tcp.c 			offset -= skb_headlen(skb);
skb              1798 net/ipv4/tcp.c 			if ((int)offset < 0 || skb_has_frag_list(skb))
skb              1800 net/ipv4/tcp.c 			frags = skb_shinfo(skb)->frags;
skb              1849 net/ipv4/tcp.c static void tcp_update_recv_tstamps(struct sk_buff *skb,
skb              1852 net/ipv4/tcp.c 	if (skb->tstamp)
skb              1853 net/ipv4/tcp.c 		tss->ts[0] = ktime_to_timespec64(skb->tstamp);
skb              1857 net/ipv4/tcp.c 	if (skb_hwtstamps(skb)->hwtstamp)
skb              1858 net/ipv4/tcp.c 		tss->ts[2] = ktime_to_timespec64(skb_hwtstamps(skb)->hwtstamp);
skb              1965 net/ipv4/tcp.c 	struct sk_buff *skb, *last;
skb              2029 net/ipv4/tcp.c 		skb_queue_walk(&sk->sk_receive_queue, skb) {
skb              2030 net/ipv4/tcp.c 			last = skb;
skb              2034 net/ipv4/tcp.c 			if (WARN(before(*seq, TCP_SKB_CB(skb)->seq),
skb              2036 net/ipv4/tcp.c 				 *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt,
skb              2040 net/ipv4/tcp.c 			offset = *seq - TCP_SKB_CB(skb)->seq;
skb              2041 net/ipv4/tcp.c 			if (unlikely(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
skb              2045 net/ipv4/tcp.c 			if (offset < skb->len)
skb              2047 net/ipv4/tcp.c 			if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
skb              2051 net/ipv4/tcp.c 			     *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, flags);
skb              2118 net/ipv4/tcp.c 		used = skb->len - offset;
skb              2141 net/ipv4/tcp.c 			err = skb_copy_datagram_msg(skb, offset, msg, used);
skb              2162 net/ipv4/tcp.c 		if (TCP_SKB_CB(skb)->has_rxtstamp) {
skb              2163 net/ipv4/tcp.c 			tcp_update_recv_tstamps(skb, &tss);
skb              2167 net/ipv4/tcp.c 		if (used + offset < skb->len)
skb              2170 net/ipv4/tcp.c 		if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
skb              2173 net/ipv4/tcp.c 			sk_eat_skb(sk, skb);
skb              2180 net/ipv4/tcp.c 			sk_eat_skb(sk, skb);
skb              2348 net/ipv4/tcp.c 	struct sk_buff *skb;
skb              2368 net/ipv4/tcp.c 	while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
skb              2369 net/ipv4/tcp.c 		u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq;
skb              2371 net/ipv4/tcp.c 		if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
skb              2374 net/ipv4/tcp.c 		__kfree_skb(skb);
skb              2536 net/ipv4/tcp.c 		struct sk_buff *skb = rb_to_skb(p);
skb              2542 net/ipv4/tcp.c 		tcp_rtx_queue_unlink(skb, sk);
skb              2543 net/ipv4/tcp.c 		sk_wmem_free_skb(sk, skb);
skb              2549 net/ipv4/tcp.c 	struct sk_buff *skb;
skb              2552 net/ipv4/tcp.c 	while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
skb              2553 net/ipv4/tcp.c 		tcp_skb_tsorted_anchor_cleanup(skb);
skb              2554 net/ipv4/tcp.c 		sk_wmem_free_skb(sk, skb);
skb              2557 net/ipv4/tcp.c 	skb = sk->sk_tx_skb_cache;
skb              2558 net/ipv4/tcp.c 	if (skb) {
skb              2559 net/ipv4/tcp.c 		__kfree_skb(skb);
skb              3807 net/ipv4/tcp.c 			  const struct sk_buff *skb, unsigned int header_len)
skb              3810 net/ipv4/tcp.c 	const struct tcphdr *tp = tcp_hdr(skb);
skb              3813 net/ipv4/tcp.c 	const unsigned int head_data_len = skb_headlen(skb) > header_len ?
skb              3814 net/ipv4/tcp.c 					   skb_headlen(skb) - header_len : 0;
skb              3815 net/ipv4/tcp.c 	const struct skb_shared_info *shi = skb_shinfo(skb);
skb              3837 net/ipv4/tcp.c 	skb_walk_frags(skb, frag_iter)
skb                87 net/ipv4/tcp_bpf.c 					if (!msg_rx->skb)
skb               106 net/ipv4/tcp_bpf.c 			if (msg_rx->skb)
skb               107 net/ipv4/tcp_bpf.c 				consume_skb(msg_rx->skb);
skb                55 net/ipv4/tcp_diag.c static int tcp_diag_put_md5sig(struct sk_buff *skb,
skb                68 net/ipv4/tcp_diag.c 	attr = nla_reserve(skb, INET_DIAG_MD5SIG,
skb                85 net/ipv4/tcp_diag.c static int tcp_diag_put_ulp(struct sk_buff *skb, struct sock *sk,
skb                91 net/ipv4/tcp_diag.c 	nest = nla_nest_start_noflag(skb, INET_DIAG_ULP_INFO);
skb                95 net/ipv4/tcp_diag.c 	err = nla_put_string(skb, INET_ULP_INFO_NAME, ulp_ops->name);
skb               100 net/ipv4/tcp_diag.c 		err = ulp_ops->get_info(sk, skb);
skb               104 net/ipv4/tcp_diag.c 	nla_nest_end(skb, nest);
skb               108 net/ipv4/tcp_diag.c 	nla_nest_cancel(skb, nest);
skb               113 net/ipv4/tcp_diag.c 			    struct sk_buff *skb)
skb               125 net/ipv4/tcp_diag.c 			err = tcp_diag_put_md5sig(skb, md5sig);
skb               137 net/ipv4/tcp_diag.c 			err = tcp_diag_put_ulp(skb, sk, ulp_ops);
skb               181 net/ipv4/tcp_diag.c static void tcp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
skb               184 net/ipv4/tcp_diag.c 	inet_diag_dump_icsk(&tcp_hashinfo, skb, cb, r, bc);
skb               163 net/ipv4/tcp_fastopen.c void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb)
skb               167 net/ipv4/tcp_fastopen.c 	if (TCP_SKB_CB(skb)->end_seq == tp->rcv_nxt)
skb               170 net/ipv4/tcp_fastopen.c 	skb = skb_clone(skb, GFP_ATOMIC);
skb               171 net/ipv4/tcp_fastopen.c 	if (!skb)
skb               174 net/ipv4/tcp_fastopen.c 	skb_dst_drop(skb);
skb               182 net/ipv4/tcp_fastopen.c 	tcp_segs_in(tp, skb);
skb               183 net/ipv4/tcp_fastopen.c 	__skb_pull(skb, tcp_hdrlen(skb));
skb               184 net/ipv4/tcp_fastopen.c 	sk_forced_mem_schedule(sk, skb->truesize);
skb               185 net/ipv4/tcp_fastopen.c 	skb_set_owner_r(skb, sk);
skb               187 net/ipv4/tcp_fastopen.c 	TCP_SKB_CB(skb)->seq++;
skb               188 net/ipv4/tcp_fastopen.c 	TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_SYN;
skb               190 net/ipv4/tcp_fastopen.c 	tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
skb               191 net/ipv4/tcp_fastopen.c 	__skb_queue_tail(&sk->sk_receive_queue, skb);
skb               197 net/ipv4/tcp_fastopen.c 	tp->bytes_received = skb->len;
skb               199 net/ipv4/tcp_fastopen.c 	if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
skb               233 net/ipv4/tcp_fastopen.c 					      struct sk_buff *skb,
skb               241 net/ipv4/tcp_fastopen.c 	child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
skb               262 net/ipv4/tcp_fastopen.c 	tp->snd_wnd = ntohs(tcp_hdr(skb)->window);
skb               277 net/ipv4/tcp_fastopen.c 	tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
skb               279 net/ipv4/tcp_fastopen.c 	tcp_fastopen_add_skb(child, skb);
skb               338 net/ipv4/tcp_fastopen.c struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
skb               343 net/ipv4/tcp_fastopen.c 	bool syn_data = TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1;
skb               365 net/ipv4/tcp_fastopen.c 		tcp_fastopen_cookie_gen(sk, req, skb, &valid_foc);
skb               367 net/ipv4/tcp_fastopen.c 		ret = tcp_fastopen_cookie_gen_check(sk, req, skb, foc,
skb               383 net/ipv4/tcp_fastopen.c 			child = tcp_fastopen_create_child(sk, skb, req);
skb               526 net/ipv4/tcp_fastopen.c 	struct sk_buff *skb;
skb               532 net/ipv4/tcp_fastopen.c 		skb = skb_rb_first(&tp->out_of_order_queue);
skb               533 net/ipv4/tcp_fastopen.c 		if (skb && !skb_rb_next(skb)) {
skb               534 net/ipv4/tcp_fastopen.c 			if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) {
skb               140 net/ipv4/tcp_input.c static void tcp_gro_dev_warn(struct sock *sk, const struct sk_buff *skb,
skb               151 net/ipv4/tcp_input.c 		dev = dev_get_by_index_rcu(sock_net(sk), skb->skb_iif);
skb               162 net/ipv4/tcp_input.c static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb)
skb               173 net/ipv4/tcp_input.c 	len = skb_shinfo(skb)->gso_size ? : skb->len;
skb               180 net/ipv4/tcp_input.c 			tcp_gro_dev_warn(sk, skb, len);
skb               187 net/ipv4/tcp_input.c 		len += skb->data - skb_transport_header(skb);
skb               195 net/ipv4/tcp_input.c 		     !(tcp_flag_word(tcp_hdr(skb)) & TCP_REMNANT))) {
skb               254 net/ipv4/tcp_input.c static void tcp_ecn_accept_cwr(struct sock *sk, const struct sk_buff *skb)
skb               256 net/ipv4/tcp_input.c 	if (tcp_hdr(skb)->cwr) {
skb               272 net/ipv4/tcp_input.c static void __tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb)
skb               276 net/ipv4/tcp_input.c 	switch (TCP_SKB_CB(skb)->ip_dsfield & INET_ECN_MASK) {
skb               304 net/ipv4/tcp_input.c static void tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb)
skb               307 net/ipv4/tcp_input.c 		__tcp_ecn_check_ce(sk, skb);
skb               392 net/ipv4/tcp_input.c static int __tcp_grow_window(const struct sock *sk, const struct sk_buff *skb)
skb               396 net/ipv4/tcp_input.c 	int truesize = tcp_win_from_space(sk, skb->truesize) >> 1;
skb               400 net/ipv4/tcp_input.c 		if (truesize <= skb->len)
skb               409 net/ipv4/tcp_input.c static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb)
skb               423 net/ipv4/tcp_input.c 		if (tcp_win_from_space(sk, skb->truesize) <= skb->len)
skb               426 net/ipv4/tcp_input.c 			incr = __tcp_grow_window(sk, skb);
skb               429 net/ipv4/tcp_input.c 			incr = max_t(int, incr, 2 * skb->len);
skb               577 net/ipv4/tcp_input.c 					  const struct sk_buff *skb)
skb               585 net/ipv4/tcp_input.c 	if (TCP_SKB_CB(skb)->end_seq -
skb               586 net/ipv4/tcp_input.c 	    TCP_SKB_CB(skb)->seq >= inet_csk(sk)->icsk_ack.rcv_mss) {
skb               676 net/ipv4/tcp_input.c static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb)
skb               684 net/ipv4/tcp_input.c 	tcp_measure_rcv_mss(sk, skb);
skb               716 net/ipv4/tcp_input.c 	tcp_ecn_check_ce(sk, skb);
skb               718 net/ipv4/tcp_input.c 	if (skb->len >= 128)
skb               719 net/ipv4/tcp_input.c 		tcp_grow_window(sk, skb);
skb               916 net/ipv4/tcp_input.c static void tcp_verify_retransmit_hint(struct tcp_sock *tp, struct sk_buff *skb)
skb               920 net/ipv4/tcp_input.c 	     before(TCP_SKB_CB(skb)->seq,
skb               922 net/ipv4/tcp_input.c 		tp->retransmit_skb_hint = skb;
skb               932 net/ipv4/tcp_input.c static void tcp_sum_lost(struct tcp_sock *tp, struct sk_buff *skb)
skb               934 net/ipv4/tcp_input.c 	__u8 sacked = TCP_SKB_CB(skb)->sacked;
skb               938 net/ipv4/tcp_input.c 		tp->lost += tcp_skb_pcount(skb);
skb               941 net/ipv4/tcp_input.c static void tcp_skb_mark_lost(struct tcp_sock *tp, struct sk_buff *skb)
skb               943 net/ipv4/tcp_input.c 	if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_ACKED))) {
skb               944 net/ipv4/tcp_input.c 		tcp_verify_retransmit_hint(tp, skb);
skb               946 net/ipv4/tcp_input.c 		tp->lost_out += tcp_skb_pcount(skb);
skb               947 net/ipv4/tcp_input.c 		tcp_sum_lost(tp, skb);
skb               948 net/ipv4/tcp_input.c 		TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
skb               952 net/ipv4/tcp_input.c void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb)
skb               954 net/ipv4/tcp_input.c 	tcp_verify_retransmit_hint(tp, skb);
skb               956 net/ipv4/tcp_input.c 	tcp_sum_lost(tp, skb);
skb               957 net/ipv4/tcp_input.c 	if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_ACKED))) {
skb               958 net/ipv4/tcp_input.c 		tp->lost_out += tcp_skb_pcount(skb);
skb               959 net/ipv4/tcp_input.c 		TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
skb              1149 net/ipv4/tcp_input.c static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
skb              1157 net/ipv4/tcp_input.c 	in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq) &&
skb              1158 net/ipv4/tcp_input.c 		  !before(end_seq, TCP_SKB_CB(skb)->end_seq);
skb              1160 net/ipv4/tcp_input.c 	if (tcp_skb_pcount(skb) > 1 && !in_sack &&
skb              1161 net/ipv4/tcp_input.c 	    after(TCP_SKB_CB(skb)->end_seq, start_seq)) {
skb              1162 net/ipv4/tcp_input.c 		mss = tcp_skb_mss(skb);
skb              1163 net/ipv4/tcp_input.c 		in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq);
skb              1166 net/ipv4/tcp_input.c 			pkt_len = start_seq - TCP_SKB_CB(skb)->seq;
skb              1170 net/ipv4/tcp_input.c 			pkt_len = end_seq - TCP_SKB_CB(skb)->seq;
skb              1185 net/ipv4/tcp_input.c 		if (pkt_len >= skb->len && !in_sack)
skb              1188 net/ipv4/tcp_input.c 		err = tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb,
skb              1283 net/ipv4/tcp_input.c 			    struct sk_buff *skb,
skb              1289 net/ipv4/tcp_input.c 	u32 start_seq = TCP_SKB_CB(skb)->seq;	/* start of newly-SACKed */
skb              1300 net/ipv4/tcp_input.c 	tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked,
skb              1302 net/ipv4/tcp_input.c 			tcp_skb_timestamp_us(skb));
skb              1303 net/ipv4/tcp_input.c 	tcp_rate_skb_delivered(sk, skb, state->rate);
skb              1305 net/ipv4/tcp_input.c 	if (skb == tp->lost_skb_hint)
skb              1309 net/ipv4/tcp_input.c 	TCP_SKB_CB(skb)->seq += shifted;
skb              1312 net/ipv4/tcp_input.c 	WARN_ON_ONCE(tcp_skb_pcount(skb) < pcount);
skb              1313 net/ipv4/tcp_input.c 	tcp_skb_pcount_add(skb, -pcount);
skb              1324 net/ipv4/tcp_input.c 	if (tcp_skb_pcount(skb) <= 1)
skb              1325 net/ipv4/tcp_input.c 		TCP_SKB_CB(skb)->tcp_gso_size = 0;
skb              1328 net/ipv4/tcp_input.c 	TCP_SKB_CB(prev)->sacked |= (TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS);
skb              1330 net/ipv4/tcp_input.c 	if (skb->len > 0) {
skb              1331 net/ipv4/tcp_input.c 		BUG_ON(!tcp_skb_pcount(skb));
skb              1338 net/ipv4/tcp_input.c 	if (skb == tp->retransmit_skb_hint)
skb              1340 net/ipv4/tcp_input.c 	if (skb == tp->lost_skb_hint) {
skb              1345 net/ipv4/tcp_input.c 	TCP_SKB_CB(prev)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
skb              1346 net/ipv4/tcp_input.c 	TCP_SKB_CB(prev)->eor = TCP_SKB_CB(skb)->eor;
skb              1347 net/ipv4/tcp_input.c 	if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
skb              1350 net/ipv4/tcp_input.c 	if (skb == tcp_highest_sack(sk))
skb              1351 net/ipv4/tcp_input.c 		tcp_advance_highest_sack(sk, skb);
skb              1353 net/ipv4/tcp_input.c 	tcp_skb_collapse_tstamp(prev, skb);
skb              1357 net/ipv4/tcp_input.c 	tcp_rtx_queue_unlink_and_free(skb, sk);
skb              1367 net/ipv4/tcp_input.c static int tcp_skb_seglen(const struct sk_buff *skb)
skb              1369 net/ipv4/tcp_input.c 	return tcp_skb_pcount(skb) == 1 ? skb->len : tcp_skb_mss(skb);
skb              1373 net/ipv4/tcp_input.c static int skb_can_shift(const struct sk_buff *skb)
skb              1375 net/ipv4/tcp_input.c 	return !skb_headlen(skb) && skb_is_nonlinear(skb);
skb              1396 net/ipv4/tcp_input.c static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
skb              1410 net/ipv4/tcp_input.c 	    (TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_RETRANS)) == TCPCB_SACKED_RETRANS)
skb              1412 net/ipv4/tcp_input.c 	if (!skb_can_shift(skb))
skb              1415 net/ipv4/tcp_input.c 	if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
skb              1419 net/ipv4/tcp_input.c 	prev = skb_rb_prev(skb);
skb              1429 net/ipv4/tcp_input.c 	in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq) &&
skb              1430 net/ipv4/tcp_input.c 		  !before(end_seq, TCP_SKB_CB(skb)->end_seq);
skb              1433 net/ipv4/tcp_input.c 		len = skb->len;
skb              1434 net/ipv4/tcp_input.c 		pcount = tcp_skb_pcount(skb);
skb              1435 net/ipv4/tcp_input.c 		mss = tcp_skb_seglen(skb);
skb              1443 net/ipv4/tcp_input.c 		if (!after(TCP_SKB_CB(skb)->end_seq, start_seq))
skb              1449 net/ipv4/tcp_input.c 		if (tcp_skb_pcount(skb) <= 1)
skb              1452 net/ipv4/tcp_input.c 		in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq);
skb              1468 net/ipv4/tcp_input.c 		len = end_seq - TCP_SKB_CB(skb)->seq;
skb              1470 net/ipv4/tcp_input.c 		BUG_ON(len > skb->len);
skb              1476 net/ipv4/tcp_input.c 		mss = tcp_skb_mss(skb);
skb              1495 net/ipv4/tcp_input.c 	if (!after(TCP_SKB_CB(skb)->seq + len, tp->snd_una))
skb              1498 net/ipv4/tcp_input.c 	if (!tcp_skb_shift(prev, skb, pcount, len))
skb              1500 net/ipv4/tcp_input.c 	if (!tcp_shifted_skb(sk, prev, skb, state, pcount, len, mss, dup_sack))
skb              1506 net/ipv4/tcp_input.c 	skb = skb_rb_next(prev);
skb              1507 net/ipv4/tcp_input.c 	if (!skb)
skb              1510 net/ipv4/tcp_input.c 	if (!skb_can_shift(skb) ||
skb              1511 net/ipv4/tcp_input.c 	    ((TCP_SKB_CB(skb)->sacked & TCPCB_TAGBITS) != TCPCB_SACKED_ACKED) ||
skb              1512 net/ipv4/tcp_input.c 	    (mss != tcp_skb_seglen(skb)))
skb              1515 net/ipv4/tcp_input.c 	len = skb->len;
skb              1516 net/ipv4/tcp_input.c 	pcount = tcp_skb_pcount(skb);
skb              1517 net/ipv4/tcp_input.c 	if (tcp_skb_shift(prev, skb, pcount, len))
skb              1518 net/ipv4/tcp_input.c 		tcp_shifted_skb(sk, prev, skb, state, pcount,
skb              1525 net/ipv4/tcp_input.c 	return skb;
skb              1532 net/ipv4/tcp_input.c static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
skb              1541 net/ipv4/tcp_input.c 	skb_rbtree_walk_from(skb) {
skb              1546 net/ipv4/tcp_input.c 		if (!before(TCP_SKB_CB(skb)->seq, end_seq))
skb              1550 net/ipv4/tcp_input.c 		    before(TCP_SKB_CB(skb)->seq, next_dup->end_seq)) {
skb              1551 net/ipv4/tcp_input.c 			in_sack = tcp_match_skb_to_sack(sk, skb,
skb              1563 net/ipv4/tcp_input.c 			tmp = tcp_shift_skb_data(sk, skb, state,
skb              1566 net/ipv4/tcp_input.c 				if (tmp != skb) {
skb              1567 net/ipv4/tcp_input.c 					skb = tmp;
skb              1573 net/ipv4/tcp_input.c 				in_sack = tcp_match_skb_to_sack(sk, skb,
skb              1583 net/ipv4/tcp_input.c 			TCP_SKB_CB(skb)->sacked =
skb              1586 net/ipv4/tcp_input.c 						TCP_SKB_CB(skb)->sacked,
skb              1587 net/ipv4/tcp_input.c 						TCP_SKB_CB(skb)->seq,
skb              1588 net/ipv4/tcp_input.c 						TCP_SKB_CB(skb)->end_seq,
skb              1590 net/ipv4/tcp_input.c 						tcp_skb_pcount(skb),
skb              1591 net/ipv4/tcp_input.c 						tcp_skb_timestamp_us(skb));
skb              1592 net/ipv4/tcp_input.c 			tcp_rate_skb_delivered(sk, skb, state->rate);
skb              1593 net/ipv4/tcp_input.c 			if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
skb              1594 net/ipv4/tcp_input.c 				list_del_init(&skb->tcp_tsorted_anchor);
skb              1596 net/ipv4/tcp_input.c 			if (!before(TCP_SKB_CB(skb)->seq,
skb              1598 net/ipv4/tcp_input.c 				tcp_advance_highest_sack(sk, skb);
skb              1601 net/ipv4/tcp_input.c 	return skb;
skb              1607 net/ipv4/tcp_input.c 	struct sk_buff *skb;
skb              1611 net/ipv4/tcp_input.c 		skb = rb_to_skb(parent);
skb              1612 net/ipv4/tcp_input.c 		if (before(seq, TCP_SKB_CB(skb)->seq)) {
skb              1616 net/ipv4/tcp_input.c 		if (!before(seq, TCP_SKB_CB(skb)->end_seq)) {
skb              1620 net/ipv4/tcp_input.c 		return skb;
skb              1625 net/ipv4/tcp_input.c static struct sk_buff *tcp_sacktag_skip(struct sk_buff *skb, struct sock *sk,
skb              1628 net/ipv4/tcp_input.c 	if (skb && after(TCP_SKB_CB(skb)->seq, skip_to_seq))
skb              1629 net/ipv4/tcp_input.c 		return skb;
skb              1634 net/ipv4/tcp_input.c static struct sk_buff *tcp_maybe_skipping_dsack(struct sk_buff *skb,
skb              1641 net/ipv4/tcp_input.c 		return skb;
skb              1644 net/ipv4/tcp_input.c 		skb = tcp_sacktag_skip(skb, sk, next_dup->start_seq);
skb              1645 net/ipv4/tcp_input.c 		skb = tcp_sacktag_walk(skb, sk, NULL, state,
skb              1650 net/ipv4/tcp_input.c 	return skb;
skb              1668 net/ipv4/tcp_input.c 	struct sk_buff *skb;
skb              1754 net/ipv4/tcp_input.c 	skb = NULL;
skb              1788 net/ipv4/tcp_input.c 				skb = tcp_sacktag_skip(skb, sk, start_seq);
skb              1789 net/ipv4/tcp_input.c 				skb = tcp_sacktag_walk(skb, sk, next_dup,
skb              1800 net/ipv4/tcp_input.c 			skb = tcp_maybe_skipping_dsack(skb, sk, next_dup,
skb              1807 net/ipv4/tcp_input.c 				skb = tcp_highest_sack(sk);
skb              1808 net/ipv4/tcp_input.c 				if (!skb)
skb              1814 net/ipv4/tcp_input.c 			skb = tcp_sacktag_skip(skb, sk, cache->end_seq);
skb              1821 net/ipv4/tcp_input.c 			skb = tcp_highest_sack(sk);
skb              1822 net/ipv4/tcp_input.c 			if (!skb)
skb              1825 net/ipv4/tcp_input.c 		skb = tcp_sacktag_skip(skb, sk, start_seq);
skb              1828 net/ipv4/tcp_input.c 		skb = tcp_sacktag_walk(skb, sk, next_dup, state,
skb              1961 net/ipv4/tcp_input.c 	struct sk_buff *skb, *head;
skb              1975 net/ipv4/tcp_input.c 	skb = head;
skb              1976 net/ipv4/tcp_input.c 	skb_rbtree_walk_from(skb) {
skb              1978 net/ipv4/tcp_input.c 			TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_ACKED;
skb              1979 net/ipv4/tcp_input.c 		else if (tcp_is_rack(sk) && skb != head &&
skb              1980 net/ipv4/tcp_input.c 			 tcp_rack_skb_timeout(tp, skb, 0) > 0)
skb              1982 net/ipv4/tcp_input.c 		tcp_mark_skb_lost(sk, skb);
skb              2193 net/ipv4/tcp_input.c 	struct sk_buff *skb;
skb              2200 net/ipv4/tcp_input.c 	skb = tp->lost_skb_hint;
skb              2201 net/ipv4/tcp_input.c 	if (skb) {
skb              2203 net/ipv4/tcp_input.c 		if (mark_head && after(TCP_SKB_CB(skb)->seq, tp->snd_una))
skb              2207 net/ipv4/tcp_input.c 		skb = tcp_rtx_queue_head(sk);
skb              2211 net/ipv4/tcp_input.c 	skb_rbtree_walk_from(skb) {
skb              2214 net/ipv4/tcp_input.c 		tp->lost_skb_hint = skb;
skb              2217 net/ipv4/tcp_input.c 		if (after(TCP_SKB_CB(skb)->end_seq, loss_high))
skb              2222 net/ipv4/tcp_input.c 		    (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))
skb              2223 net/ipv4/tcp_input.c 			cnt += tcp_skb_pcount(skb);
skb              2227 net/ipv4/tcp_input.c 			    (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) ||
skb              2231 net/ipv4/tcp_input.c 			mss = tcp_skb_mss(skb);
skb              2234 net/ipv4/tcp_input.c 			if (lost < skb->len &&
skb              2235 net/ipv4/tcp_input.c 			    tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb,
skb              2241 net/ipv4/tcp_input.c 		tcp_skb_mark_lost(tp, skb);
skb              2274 net/ipv4/tcp_input.c 				     const struct sk_buff *skb)
skb              2276 net/ipv4/tcp_input.c 	return (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS) &&
skb              2277 net/ipv4/tcp_input.c 	       tcp_tsopt_ecr_before(tp, tcp_skb_timestamp(skb));
skb              2308 net/ipv4/tcp_input.c 	struct sk_buff *skb;
skb              2313 net/ipv4/tcp_input.c 	skb = tcp_rtx_queue_head(sk);
skb              2314 net/ipv4/tcp_input.c 	if (unlikely(skb && TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS))
skb              2352 net/ipv4/tcp_input.c 		struct sk_buff *skb;
skb              2354 net/ipv4/tcp_input.c 		skb_rbtree_walk(skb, &sk->tcp_rtx_queue) {
skb              2355 net/ipv4/tcp_input.c 			TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST;
skb              2603 net/ipv4/tcp_input.c 	struct sk_buff *skb;
skb              2606 net/ipv4/tcp_input.c 	skb_rbtree_walk(skb, &sk->tcp_rtx_queue) {
skb              2607 net/ipv4/tcp_input.c 		if (tcp_skb_seglen(skb) > mss &&
skb              2608 net/ipv4/tcp_input.c 		    !(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) {
skb              2609 net/ipv4/tcp_input.c 			if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) {
skb              2610 net/ipv4/tcp_input.c 				TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
skb              2611 net/ipv4/tcp_input.c 				tp->retrans_out -= tcp_skb_pcount(skb);
skb              2613 net/ipv4/tcp_input.c 			tcp_skb_mark_lost_uncond_verify(tp, skb);
skb              3028 net/ipv4/tcp_input.c static u32 tcp_tso_acked(struct sock *sk, struct sk_buff *skb)
skb              3033 net/ipv4/tcp_input.c 	BUG_ON(!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una));
skb              3035 net/ipv4/tcp_input.c 	packets_acked = tcp_skb_pcount(skb);
skb              3036 net/ipv4/tcp_input.c 	if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq))
skb              3038 net/ipv4/tcp_input.c 	packets_acked -= tcp_skb_pcount(skb);
skb              3041 net/ipv4/tcp_input.c 		BUG_ON(tcp_skb_pcount(skb) == 0);
skb              3042 net/ipv4/tcp_input.c 		BUG_ON(!before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq));
skb              3048 net/ipv4/tcp_input.c static void tcp_ack_tstamp(struct sock *sk, struct sk_buff *skb,
skb              3054 net/ipv4/tcp_input.c 	if (likely(!TCP_SKB_CB(skb)->txstamp_ack))
skb              3057 net/ipv4/tcp_input.c 	shinfo = skb_shinfo(skb);
skb              3060 net/ipv4/tcp_input.c 		tcp_skb_tsorted_save(skb) {
skb              3061 net/ipv4/tcp_input.c 			__skb_tstamp_tx(skb, NULL, sk, SCM_TSTAMP_ACK);
skb              3062 net/ipv4/tcp_input.c 		} tcp_skb_tsorted_restore(skb);
skb              3079 net/ipv4/tcp_input.c 	struct sk_buff *skb, *next;
skb              3091 net/ipv4/tcp_input.c 	for (skb = skb_rb_first(&sk->tcp_rtx_queue); skb; skb = next) {
skb              3092 net/ipv4/tcp_input.c 		struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
skb              3097 net/ipv4/tcp_input.c 		tcp_ack_tstamp(sk, skb, prior_snd_una);
skb              3101 net/ipv4/tcp_input.c 			if (tcp_skb_pcount(skb) == 1 ||
skb              3105 net/ipv4/tcp_input.c 			acked_pcount = tcp_tso_acked(sk, skb);
skb              3110 net/ipv4/tcp_input.c 			acked_pcount = tcp_skb_pcount(skb);
skb              3118 net/ipv4/tcp_input.c 			last_ackt = tcp_skb_timestamp_us(skb);
skb              3123 net/ipv4/tcp_input.c 			last_in_flight = TCP_SKB_CB(skb)->tx.in_flight;
skb              3134 net/ipv4/tcp_input.c 			if (!tcp_skb_spurious_retrans(tp, skb))
skb              3136 net/ipv4/tcp_input.c 						 tcp_skb_timestamp_us(skb));
skb              3143 net/ipv4/tcp_input.c 		tcp_rate_skb_delivered(sk, skb, sack->rate);
skb              3162 net/ipv4/tcp_input.c 		next = skb_rb_next(skb);
skb              3163 net/ipv4/tcp_input.c 		if (unlikely(skb == tp->retransmit_skb_hint))
skb              3165 net/ipv4/tcp_input.c 		if (unlikely(skb == tp->lost_skb_hint))
skb              3167 net/ipv4/tcp_input.c 		tcp_highest_sack_replace(sk, skb, next);
skb              3168 net/ipv4/tcp_input.c 		tcp_rtx_queue_unlink_and_free(skb, sk);
skb              3171 net/ipv4/tcp_input.c 	if (!skb)
skb              3177 net/ipv4/tcp_input.c 	if (skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))
skb              3230 net/ipv4/tcp_input.c 	} else if (skb && rtt_update && sack_rtt_us >= 0 &&
skb              3232 net/ipv4/tcp_input.c 						    tcp_skb_timestamp_us(skb))) {
skb              3380 net/ipv4/tcp_input.c static int tcp_ack_update_window(struct sock *sk, const struct sk_buff *skb, u32 ack,
skb              3385 net/ipv4/tcp_input.c 	u32 nwin = ntohs(tcp_hdr(skb)->window);
skb              3387 net/ipv4/tcp_input.c 	if (likely(!tcp_hdr(skb)->syn))
skb              3442 net/ipv4/tcp_input.c bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb,
skb              3446 net/ipv4/tcp_input.c 	if ((TCP_SKB_CB(skb)->seq != TCP_SKB_CB(skb)->end_seq) &&
skb              3447 net/ipv4/tcp_input.c 	    !tcp_hdr(skb)->syn)
skb              3454 net/ipv4/tcp_input.c static void tcp_send_challenge_ack(struct sock *sk, const struct sk_buff *skb)
skb              3585 net/ipv4/tcp_input.c static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
skb              3593 net/ipv4/tcp_input.c 	u32 ack_seq = TCP_SKB_CB(skb)->seq;
skb              3594 net/ipv4/tcp_input.c 	u32 ack = TCP_SKB_CB(skb)->ack_seq;
skb              3615 net/ipv4/tcp_input.c 				tcp_send_challenge_ack(sk, skb);
skb              3645 net/ipv4/tcp_input.c 		tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
skb              3663 net/ipv4/tcp_input.c 		if (ack_seq != TCP_SKB_CB(skb)->end_seq)
skb              3668 net/ipv4/tcp_input.c 		flag |= tcp_ack_update_window(sk, skb, ack, ack_seq);
skb              3670 net/ipv4/tcp_input.c 		if (TCP_SKB_CB(skb)->sacked)
skb              3671 net/ipv4/tcp_input.c 			flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una,
skb              3674 net/ipv4/tcp_input.c 		if (tcp_ecn_rcv_ecn_echo(tp, tcp_hdr(skb))) {
skb              3710 net/ipv4/tcp_input.c 				num_dupack = max_t(u16, 1, skb_shinfo(skb)->gso_segs);
skb              3748 net/ipv4/tcp_input.c 	if (TCP_SKB_CB(skb)->sacked) {
skb              3749 net/ipv4/tcp_input.c 		flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una,
skb              3840 net/ipv4/tcp_input.c 		       const struct sk_buff *skb,
skb              3845 net/ipv4/tcp_input.c 	const struct tcphdr *th = tcp_hdr(skb);
skb              3917 net/ipv4/tcp_input.c 					TCP_SKB_CB(skb)->sacked = (ptr - 2) - (unsigned char *)th;
skb              3980 net/ipv4/tcp_input.c 				   const struct sk_buff *skb,
skb              3995 net/ipv4/tcp_input.c 	tcp_parse_options(net, skb, &tp->rx_opt, 1, NULL);
skb              4060 net/ipv4/tcp_input.c static int tcp_disordered_ack(const struct sock *sk, const struct sk_buff *skb)
skb              4063 net/ipv4/tcp_input.c 	const struct tcphdr *th = tcp_hdr(skb);
skb              4064 net/ipv4/tcp_input.c 	u32 seq = TCP_SKB_CB(skb)->seq;
skb              4065 net/ipv4/tcp_input.c 	u32 ack = TCP_SKB_CB(skb)->ack_seq;
skb              4068 net/ipv4/tcp_input.c 		(th->ack && seq == TCP_SKB_CB(skb)->end_seq && seq == tp->rcv_nxt) &&
skb              4081 net/ipv4/tcp_input.c 				   const struct sk_buff *skb)
skb              4086 net/ipv4/tcp_input.c 	       !tcp_disordered_ack(sk, skb);
skb              4262 net/ipv4/tcp_input.c static void tcp_rcv_spurious_retrans(struct sock *sk, const struct sk_buff *skb)
skb              4269 net/ipv4/tcp_input.c 	if (TCP_SKB_CB(skb)->seq == tcp_sk(sk)->duplicate_sack[0].start_seq)
skb              4273 net/ipv4/tcp_input.c static void tcp_send_dupack(struct sock *sk, const struct sk_buff *skb)
skb              4277 net/ipv4/tcp_input.c 	if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
skb              4278 net/ipv4/tcp_input.c 	    before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
skb              4283 net/ipv4/tcp_input.c 			u32 end_seq = TCP_SKB_CB(skb)->end_seq;
skb              4285 net/ipv4/tcp_input.c 			tcp_rcv_spurious_retrans(sk, skb);
skb              4286 net/ipv4/tcp_input.c 			if (after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt))
skb              4288 net/ipv4/tcp_input.c 			tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, end_seq);
skb              4469 net/ipv4/tcp_input.c static void tcp_drop(struct sock *sk, struct sk_buff *skb)
skb              4471 net/ipv4/tcp_input.c 	sk_drops_add(sk, skb);
skb              4472 net/ipv4/tcp_input.c 	__kfree_skb(skb);
skb              4483 net/ipv4/tcp_input.c 	struct sk_buff *skb, *tail;
skb              4488 net/ipv4/tcp_input.c 		skb = rb_to_skb(p);
skb              4489 net/ipv4/tcp_input.c 		if (after(TCP_SKB_CB(skb)->seq, tp->rcv_nxt))
skb              4492 net/ipv4/tcp_input.c 		if (before(TCP_SKB_CB(skb)->seq, dsack_high)) {
skb              4494 net/ipv4/tcp_input.c 			if (before(TCP_SKB_CB(skb)->end_seq, dsack_high))
skb              4495 net/ipv4/tcp_input.c 				dsack_high = TCP_SKB_CB(skb)->end_seq;
skb              4496 net/ipv4/tcp_input.c 			tcp_dsack_extend(sk, TCP_SKB_CB(skb)->seq, dsack);
skb              4499 net/ipv4/tcp_input.c 		rb_erase(&skb->rbnode, &tp->out_of_order_queue);
skb              4501 net/ipv4/tcp_input.c 		if (unlikely(!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt))) {
skb              4502 net/ipv4/tcp_input.c 			tcp_drop(sk, skb);
skb              4507 net/ipv4/tcp_input.c 		eaten = tail && tcp_try_coalesce(sk, tail, skb, &fragstolen);
skb              4508 net/ipv4/tcp_input.c 		tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq);
skb              4509 net/ipv4/tcp_input.c 		fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN;
skb              4511 net/ipv4/tcp_input.c 			__skb_queue_tail(&sk->sk_receive_queue, skb);
skb              4513 net/ipv4/tcp_input.c 			kfree_skb_partial(skb, fragstolen);
skb              4528 net/ipv4/tcp_input.c static int tcp_try_rmem_schedule(struct sock *sk, struct sk_buff *skb,
skb              4532 net/ipv4/tcp_input.c 	    !sk_rmem_schedule(sk, skb, size)) {
skb              4537 net/ipv4/tcp_input.c 		while (!sk_rmem_schedule(sk, skb, size)) {
skb              4545 net/ipv4/tcp_input.c static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
skb              4553 net/ipv4/tcp_input.c 	tcp_ecn_check_ce(sk, skb);
skb              4555 net/ipv4/tcp_input.c 	if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) {
skb              4557 net/ipv4/tcp_input.c 		tcp_drop(sk, skb);
skb              4565 net/ipv4/tcp_input.c 	tp->rcv_ooopack += max_t(u16, 1, skb_shinfo(skb)->gso_segs);
skb              4567 net/ipv4/tcp_input.c 	seq = TCP_SKB_CB(skb)->seq;
skb              4568 net/ipv4/tcp_input.c 	end_seq = TCP_SKB_CB(skb)->end_seq;
skb              4578 net/ipv4/tcp_input.c 		rb_link_node(&skb->rbnode, NULL, p);
skb              4579 net/ipv4/tcp_input.c 		rb_insert_color(&skb->rbnode, &tp->out_of_order_queue);
skb              4580 net/ipv4/tcp_input.c 		tp->ooo_last_skb = skb;
skb              4588 net/ipv4/tcp_input.c 				 skb, &fragstolen)) {
skb              4590 net/ipv4/tcp_input.c 		tcp_grow_window(sk, skb);
skb              4591 net/ipv4/tcp_input.c 		kfree_skb_partial(skb, fragstolen);
skb              4592 net/ipv4/tcp_input.c 		skb = NULL;
skb              4616 net/ipv4/tcp_input.c 				tcp_drop(sk, skb);
skb              4617 net/ipv4/tcp_input.c 				skb = NULL;
skb              4628 net/ipv4/tcp_input.c 				rb_replace_node(&skb1->rbnode, &skb->rbnode,
skb              4639 net/ipv4/tcp_input.c 						skb, &fragstolen)) {
skb              4646 net/ipv4/tcp_input.c 	rb_link_node(&skb->rbnode, parent, p);
skb              4647 net/ipv4/tcp_input.c 	rb_insert_color(&skb->rbnode, &tp->out_of_order_queue);
skb              4651 net/ipv4/tcp_input.c 	while ((skb1 = skb_rb_next(skb)) != NULL) {
skb              4667 net/ipv4/tcp_input.c 		tp->ooo_last_skb = skb;
skb              4673 net/ipv4/tcp_input.c 	if (skb) {
skb              4674 net/ipv4/tcp_input.c 		tcp_grow_window(sk, skb);
skb              4675 net/ipv4/tcp_input.c 		skb_condense(skb);
skb              4676 net/ipv4/tcp_input.c 		skb_set_owner_r(skb, sk);
skb              4680 net/ipv4/tcp_input.c static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb,
skb              4688 net/ipv4/tcp_input.c 				  skb, fragstolen)) ? 1 : 0;
skb              4689 net/ipv4/tcp_input.c 	tcp_rcv_nxt_update(tcp_sk(sk), TCP_SKB_CB(skb)->end_seq);
skb              4691 net/ipv4/tcp_input.c 		__skb_queue_tail(&sk->sk_receive_queue, skb);
skb              4692 net/ipv4/tcp_input.c 		skb_set_owner_r(skb, sk);
skb              4699 net/ipv4/tcp_input.c 	struct sk_buff *skb;
skb              4713 net/ipv4/tcp_input.c 	skb = alloc_skb_with_frags(size - data_len, data_len,
skb              4716 net/ipv4/tcp_input.c 	if (!skb)
skb              4719 net/ipv4/tcp_input.c 	skb_put(skb, size - data_len);
skb              4720 net/ipv4/tcp_input.c 	skb->data_len = data_len;
skb              4721 net/ipv4/tcp_input.c 	skb->len = size;
skb              4723 net/ipv4/tcp_input.c 	if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) {
skb              4728 net/ipv4/tcp_input.c 	err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size);
skb              4732 net/ipv4/tcp_input.c 	TCP_SKB_CB(skb)->seq = tcp_sk(sk)->rcv_nxt;
skb              4733 net/ipv4/tcp_input.c 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + size;
skb              4734 net/ipv4/tcp_input.c 	TCP_SKB_CB(skb)->ack_seq = tcp_sk(sk)->snd_una - 1;
skb              4736 net/ipv4/tcp_input.c 	if (tcp_queue_rcv(sk, skb, &fragstolen)) {
skb              4738 net/ipv4/tcp_input.c 		__kfree_skb(skb);
skb              4743 net/ipv4/tcp_input.c 	kfree_skb(skb);
skb              4761 net/ipv4/tcp_input.c static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
skb              4767 net/ipv4/tcp_input.c 	if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) {
skb              4768 net/ipv4/tcp_input.c 		__kfree_skb(skb);
skb              4771 net/ipv4/tcp_input.c 	skb_dst_drop(skb);
skb              4772 net/ipv4/tcp_input.c 	__skb_pull(skb, tcp_hdr(skb)->doff * 4);
skb              4774 net/ipv4/tcp_input.c 	tcp_ecn_accept_cwr(sk, skb);
skb              4782 net/ipv4/tcp_input.c 	if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt) {
skb              4791 net/ipv4/tcp_input.c 			sk_forced_mem_schedule(sk, skb->truesize);
skb              4792 net/ipv4/tcp_input.c 		else if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) {
skb              4797 net/ipv4/tcp_input.c 		eaten = tcp_queue_rcv(sk, skb, &fragstolen);
skb              4798 net/ipv4/tcp_input.c 		if (skb->len)
skb              4799 net/ipv4/tcp_input.c 			tcp_event_data_recv(sk, skb);
skb              4800 net/ipv4/tcp_input.c 		if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
skb              4819 net/ipv4/tcp_input.c 			kfree_skb_partial(skb, fragstolen);
skb              4825 net/ipv4/tcp_input.c 	if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) {
skb              4826 net/ipv4/tcp_input.c 		tcp_rcv_spurious_retrans(sk, skb);
skb              4829 net/ipv4/tcp_input.c 		tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
skb              4835 net/ipv4/tcp_input.c 		tcp_drop(sk, skb);
skb              4840 net/ipv4/tcp_input.c 	if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt + tcp_receive_window(tp)))
skb              4843 net/ipv4/tcp_input.c 	if (before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
skb              4845 net/ipv4/tcp_input.c 		tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, tp->rcv_nxt);
skb              4857 net/ipv4/tcp_input.c 	tcp_data_queue_ofo(sk, skb);
skb              4860 net/ipv4/tcp_input.c static struct sk_buff *tcp_skb_next(struct sk_buff *skb, struct sk_buff_head *list)
skb              4863 net/ipv4/tcp_input.c 		return !skb_queue_is_last(list, skb) ? skb->next : NULL;
skb              4865 net/ipv4/tcp_input.c 	return skb_rb_next(skb);
skb              4868 net/ipv4/tcp_input.c static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
skb              4872 net/ipv4/tcp_input.c 	struct sk_buff *next = tcp_skb_next(skb, list);
skb              4875 net/ipv4/tcp_input.c 		__skb_unlink(skb, list);
skb              4877 net/ipv4/tcp_input.c 		rb_erase(&skb->rbnode, root);
skb              4879 net/ipv4/tcp_input.c 	__kfree_skb(skb);
skb              4886 net/ipv4/tcp_input.c void tcp_rbtree_insert(struct rb_root *root, struct sk_buff *skb)
skb              4895 net/ipv4/tcp_input.c 		if (before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb1)->seq))
skb              4900 net/ipv4/tcp_input.c 	rb_link_node(&skb->rbnode, parent, p);
skb              4901 net/ipv4/tcp_input.c 	rb_insert_color(&skb->rbnode, root);
skb              4916 net/ipv4/tcp_input.c 	struct sk_buff *skb = head, *n;
skb              4924 net/ipv4/tcp_input.c 	for (end_of_skbs = true; skb != NULL && skb != tail; skb = n) {
skb              4925 net/ipv4/tcp_input.c 		n = tcp_skb_next(skb, list);
skb              4928 net/ipv4/tcp_input.c 		if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
skb              4929 net/ipv4/tcp_input.c 			skb = tcp_collapse_one(sk, skb, list, root);
skb              4930 net/ipv4/tcp_input.c 			if (!skb)
skb              4940 net/ipv4/tcp_input.c 		if (!(TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN)) &&
skb              4941 net/ipv4/tcp_input.c 		    (tcp_win_from_space(sk, skb->truesize) > skb->len ||
skb              4942 net/ipv4/tcp_input.c 		     before(TCP_SKB_CB(skb)->seq, start))) {
skb              4948 net/ipv4/tcp_input.c 		    TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(n)->seq) {
skb              4954 net/ipv4/tcp_input.c 		start = TCP_SKB_CB(skb)->end_seq;
skb              4957 net/ipv4/tcp_input.c 	    (TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN)))
skb              4970 net/ipv4/tcp_input.c 		memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
skb              4972 net/ipv4/tcp_input.c 		nskb->decrypted = skb->decrypted;
skb              4976 net/ipv4/tcp_input.c 			__skb_queue_before(list, skb, nskb);
skb              4983 net/ipv4/tcp_input.c 			int offset = start - TCP_SKB_CB(skb)->seq;
skb              4984 net/ipv4/tcp_input.c 			int size = TCP_SKB_CB(skb)->end_seq - start;
skb              4989 net/ipv4/tcp_input.c 				if (skb_copy_bits(skb, offset, skb_put(nskb, size), size))
skb              4995 net/ipv4/tcp_input.c 			if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
skb              4996 net/ipv4/tcp_input.c 				skb = tcp_collapse_one(sk, skb, list, root);
skb              4997 net/ipv4/tcp_input.c 				if (!skb ||
skb              4998 net/ipv4/tcp_input.c 				    skb == tail ||
skb              4999 net/ipv4/tcp_input.c 				    (TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN)))
skb              5002 net/ipv4/tcp_input.c 				if (skb->decrypted != nskb->decrypted)
skb              5009 net/ipv4/tcp_input.c 	skb_queue_walk_safe(&tmp, skb, n)
skb              5010 net/ipv4/tcp_input.c 		tcp_rbtree_insert(root, skb);
skb              5020 net/ipv4/tcp_input.c 	struct sk_buff *skb, *head;
skb              5023 net/ipv4/tcp_input.c 	skb = skb_rb_first(&tp->out_of_order_queue);
skb              5025 net/ipv4/tcp_input.c 	if (!skb) {
skb              5029 net/ipv4/tcp_input.c 	start = TCP_SKB_CB(skb)->seq;
skb              5030 net/ipv4/tcp_input.c 	end = TCP_SKB_CB(skb)->end_seq;
skb              5031 net/ipv4/tcp_input.c 	range_truesize = skb->truesize;
skb              5033 net/ipv4/tcp_input.c 	for (head = skb;;) {
skb              5034 net/ipv4/tcp_input.c 		skb = skb_rb_next(skb);
skb              5039 net/ipv4/tcp_input.c 		if (!skb ||
skb              5040 net/ipv4/tcp_input.c 		    after(TCP_SKB_CB(skb)->seq, end) ||
skb              5041 net/ipv4/tcp_input.c 		    before(TCP_SKB_CB(skb)->end_seq, start)) {
skb              5046 net/ipv4/tcp_input.c 					     head, skb, start, end);
skb              5055 net/ipv4/tcp_input.c 		range_truesize += skb->truesize;
skb              5056 net/ipv4/tcp_input.c 		if (unlikely(before(TCP_SKB_CB(skb)->seq, start)))
skb              5057 net/ipv4/tcp_input.c 			start = TCP_SKB_CB(skb)->seq;
skb              5058 net/ipv4/tcp_input.c 		if (after(TCP_SKB_CB(skb)->end_seq, end))
skb              5059 net/ipv4/tcp_input.c 			end = TCP_SKB_CB(skb)->end_seq;
skb              5358 net/ipv4/tcp_input.c 		struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
skb              5360 net/ipv4/tcp_input.c 		if (skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq)) {
skb              5361 net/ipv4/tcp_input.c 			__skb_unlink(skb, &sk->sk_receive_queue);
skb              5362 net/ipv4/tcp_input.c 			__kfree_skb(skb);
skb              5374 net/ipv4/tcp_input.c static void tcp_urg(struct sock *sk, struct sk_buff *skb, const struct tcphdr *th)
skb              5388 net/ipv4/tcp_input.c 		if (ptr < skb->len) {
skb              5390 net/ipv4/tcp_input.c 			if (skb_copy_bits(skb, ptr, &tmp, 1))
skb              5407 net/ipv4/tcp_input.c static bool tcp_reset_check(const struct sock *sk, const struct sk_buff *skb)
skb              5411 net/ipv4/tcp_input.c 	return unlikely(TCP_SKB_CB(skb)->seq == (tp->rcv_nxt - 1) &&
skb              5419 net/ipv4/tcp_input.c static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
skb              5426 net/ipv4/tcp_input.c 	if (tcp_fast_parse_options(sock_net(sk), skb, th, tp) &&
skb              5428 net/ipv4/tcp_input.c 	    tcp_paws_discard(sk, skb)) {
skb              5431 net/ipv4/tcp_input.c 			if (!tcp_oow_rate_limited(sock_net(sk), skb,
skb              5434 net/ipv4/tcp_input.c 				tcp_send_dupack(sk, skb);
skb              5441 net/ipv4/tcp_input.c 	if (!tcp_sequence(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq)) {
skb              5451 net/ipv4/tcp_input.c 			if (!tcp_oow_rate_limited(sock_net(sk), skb,
skb              5454 net/ipv4/tcp_input.c 				tcp_send_dupack(sk, skb);
skb              5455 net/ipv4/tcp_input.c 		} else if (tcp_reset_check(sk, skb)) {
skb              5472 net/ipv4/tcp_input.c 		if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt ||
skb              5473 net/ipv4/tcp_input.c 		    tcp_reset_check(sk, skb)) {
skb              5487 net/ipv4/tcp_input.c 			if (TCP_SKB_CB(skb)->seq == max_sack)
skb              5501 net/ipv4/tcp_input.c 			tcp_send_challenge_ack(sk, skb);
skb              5516 net/ipv4/tcp_input.c 		tcp_send_challenge_ack(sk, skb);
skb              5523 net/ipv4/tcp_input.c 	tcp_drop(sk, skb);
skb              5550 net/ipv4/tcp_input.c void tcp_rcv_established(struct sock *sk, struct sk_buff *skb)
skb              5552 net/ipv4/tcp_input.c 	const struct tcphdr *th = (const struct tcphdr *)skb->data;
skb              5554 net/ipv4/tcp_input.c 	unsigned int len = skb->len;
skb              5557 net/ipv4/tcp_input.c 	trace_tcp_probe(sk, skb);
skb              5561 net/ipv4/tcp_input.c 		inet_csk(sk)->icsk_af_ops->sk_rx_dst_set(sk, skb);
skb              5589 net/ipv4/tcp_input.c 	    TCP_SKB_CB(skb)->seq == tp->rcv_nxt &&
skb              5590 net/ipv4/tcp_input.c 	    !after(TCP_SKB_CB(skb)->ack_seq, tp->snd_nxt)) {
skb              5630 net/ipv4/tcp_input.c 				tcp_ack(sk, skb, 0);
skb              5631 net/ipv4/tcp_input.c 				__kfree_skb(skb);
skb              5647 net/ipv4/tcp_input.c 			if (tcp_checksum_complete(skb))
skb              5650 net/ipv4/tcp_input.c 			if ((int)skb->truesize > sk->sk_forward_alloc)
skb              5662 net/ipv4/tcp_input.c 			tcp_rcv_rtt_measure_ts(sk, skb);
skb              5667 net/ipv4/tcp_input.c 			__skb_pull(skb, tcp_header_len);
skb              5668 net/ipv4/tcp_input.c 			eaten = tcp_queue_rcv(sk, skb, &fragstolen);
skb              5670 net/ipv4/tcp_input.c 			tcp_event_data_recv(sk, skb);
skb              5672 net/ipv4/tcp_input.c 			if (TCP_SKB_CB(skb)->ack_seq != tp->snd_una) {
skb              5674 net/ipv4/tcp_input.c 				tcp_ack(sk, skb, FLAG_DATA);
skb              5683 net/ipv4/tcp_input.c 				kfree_skb_partial(skb, fragstolen);
skb              5690 net/ipv4/tcp_input.c 	if (len < (th->doff << 2) || tcp_checksum_complete(skb))
skb              5700 net/ipv4/tcp_input.c 	if (!tcp_validate_incoming(sk, skb, th, 1))
skb              5704 net/ipv4/tcp_input.c 	if (tcp_ack(sk, skb, FLAG_SLOWPATH | FLAG_UPDATE_TS_RECENT) < 0)
skb              5707 net/ipv4/tcp_input.c 	tcp_rcv_rtt_measure_ts(sk, skb);
skb              5710 net/ipv4/tcp_input.c 	tcp_urg(sk, skb, th);
skb              5713 net/ipv4/tcp_input.c 	tcp_data_queue(sk, skb);
skb              5724 net/ipv4/tcp_input.c 	tcp_drop(sk, skb);
skb              5754 net/ipv4/tcp_input.c void tcp_finish_connect(struct sock *sk, struct sk_buff *skb)
skb              5762 net/ipv4/tcp_input.c 	if (skb) {
skb              5763 net/ipv4/tcp_input.c 		icsk->icsk_af_ops->sk_rx_dst_set(sk, skb);
skb              5764 net/ipv4/tcp_input.c 		security_inet_conn_established(sk, skb);
skb              5765 net/ipv4/tcp_input.c 		sk_mark_napi_id(sk, skb);
skb              5870 net/ipv4/tcp_input.c static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
skb              5879 net/ipv4/tcp_input.c 	tcp_parse_options(sock_net(sk), skb, &tp->rx_opt, 0, &foc);
skb              5892 net/ipv4/tcp_input.c 		if (!after(TCP_SKB_CB(skb)->ack_seq, tp->snd_una) ||
skb              5893 net/ipv4/tcp_input.c 		    after(TCP_SKB_CB(skb)->ack_seq, tp->snd_nxt))
skb              5936 net/ipv4/tcp_input.c 		tcp_init_wl(tp, TCP_SKB_CB(skb)->seq);
skb              5938 net/ipv4/tcp_input.c 		tcp_ack(sk, skb, FLAG_SLOWPATH);
skb              5943 net/ipv4/tcp_input.c 		WRITE_ONCE(tp->rcv_nxt, TCP_SKB_CB(skb)->seq + 1);
skb              5944 net/ipv4/tcp_input.c 		tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1;
skb              5978 net/ipv4/tcp_input.c 		tcp_finish_connect(sk, skb);
skb              5981 net/ipv4/tcp_input.c 				tcp_rcv_fastopen_synack(sk, skb, &foc);
skb              6005 net/ipv4/tcp_input.c 			tcp_drop(sk, skb);
skb              6046 net/ipv4/tcp_input.c 		WRITE_ONCE(tp->rcv_nxt, TCP_SKB_CB(skb)->seq + 1);
skb              6048 net/ipv4/tcp_input.c 		tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1;
skb              6054 net/ipv4/tcp_input.c 		tp->snd_wl1    = TCP_SKB_CB(skb)->seq;
skb              6135 net/ipv4/tcp_input.c int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
skb              6139 net/ipv4/tcp_input.c 	const struct tcphdr *th = tcp_hdr(skb);
skb              6163 net/ipv4/tcp_input.c 			acceptable = icsk->icsk_af_ops->conn_request(sk, skb) >= 0;
skb              6169 net/ipv4/tcp_input.c 			consume_skb(skb);
skb              6177 net/ipv4/tcp_input.c 		queued = tcp_rcv_synsent_state_process(sk, skb, th);
skb              6182 net/ipv4/tcp_input.c 		tcp_urg(sk, skb, th);
skb              6183 net/ipv4/tcp_input.c 		__kfree_skb(skb);
skb              6198 net/ipv4/tcp_input.c 		if (!tcp_check_req(sk, skb, req, true, &req_stolen))
skb              6205 net/ipv4/tcp_input.c 	if (!tcp_validate_incoming(sk, skb, th, 0))
skb              6209 net/ipv4/tcp_input.c 	acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH |
skb              6216 net/ipv4/tcp_input.c 		tcp_send_challenge_ack(sk, skb);
skb              6244 net/ipv4/tcp_input.c 		tp->snd_una = TCP_SKB_CB(skb)->ack_seq;
skb              6246 net/ipv4/tcp_input.c 		tcp_init_wl(tp, TCP_SKB_CB(skb)->seq);
skb              6286 net/ipv4/tcp_input.c 		if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
skb              6287 net/ipv4/tcp_input.c 		    after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) {
skb              6331 net/ipv4/tcp_input.c 	tcp_urg(sk, skb, th);
skb              6338 net/ipv4/tcp_input.c 		if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt))
skb              6348 net/ipv4/tcp_input.c 			if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
skb              6349 net/ipv4/tcp_input.c 			    after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) {
skb              6357 net/ipv4/tcp_input.c 		tcp_data_queue(sk, skb);
skb              6370 net/ipv4/tcp_input.c 		tcp_drop(sk, skb);
skb              6408 net/ipv4/tcp_input.c 				   const struct sk_buff *skb,
skb              6412 net/ipv4/tcp_input.c 	const struct tcphdr *th = tcp_hdr(skb);
skb              6421 net/ipv4/tcp_input.c 	ect = !INET_ECN_is_not_ect(TCP_SKB_CB(skb)->ip_dsfield);
skb              6433 net/ipv4/tcp_input.c 			     struct sk_buff *skb, const struct sock *sk)
skb              6439 net/ipv4/tcp_input.c 	tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq;
skb              6440 net/ipv4/tcp_input.c 	tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
skb              6451 net/ipv4/tcp_input.c 	ireq->ir_rmt_port = tcp_hdr(skb)->source;
skb              6452 net/ipv4/tcp_input.c 	ireq->ir_num = ntohs(tcp_hdr(skb)->dest);
skb              6453 net/ipv4/tcp_input.c 	ireq->ir_mark = inet_request_mark(sk, skb);
skb              6513 net/ipv4/tcp_input.c 				 const struct sk_buff *skb)
skb              6516 net/ipv4/tcp_input.c 		u32 len = skb_network_header_len(skb) + tcp_hdrlen(skb);
skb              6522 net/ipv4/tcp_input.c 			memcpy(&copy[1], skb_network_header(skb), len);
skb              6560 net/ipv4/tcp_input.c 		     struct sock *sk, struct sk_buff *skb)
skb              6563 net/ipv4/tcp_input.c 	__u32 isn = TCP_SKB_CB(skb)->tcp_tw_isn;
skb              6599 net/ipv4/tcp_input.c 	tcp_parse_options(sock_net(sk), skb, &tmp_opt, 0,
skb              6609 net/ipv4/tcp_input.c 	tcp_openreq_init(req, &tmp_opt, skb, sk);
skb              6613 net/ipv4/tcp_input.c 	inet_rsk(req)->ir_iif = inet_request_bound_dev_if(sk, skb);
skb              6615 net/ipv4/tcp_input.c 	af_ops->init_req(req, sk, skb);
skb              6617 net/ipv4/tcp_input.c 	if (security_inet_conn_request(sk, skb, req))
skb              6621 net/ipv4/tcp_input.c 		tcp_rsk(req)->ts_off = af_ops->init_ts_off(net, skb);
skb              6640 net/ipv4/tcp_input.c 			pr_drop_req(req, ntohs(tcp_hdr(skb)->source),
skb              6645 net/ipv4/tcp_input.c 		isn = af_ops->init_seq(skb);
skb              6648 net/ipv4/tcp_input.c 	tcp_ecn_create_request(req, skb, sk, dst);
skb              6651 net/ipv4/tcp_input.c 		isn = cookie_init_sequence(af_ops, sk, skb, &req->mss);
skb              6660 net/ipv4/tcp_input.c 	sk_rx_queue_set(req_to_sk(req), skb);
skb              6662 net/ipv4/tcp_input.c 		tcp_reqsk_record_syn(sk, req, skb);
skb              6663 net/ipv4/tcp_input.c 		fastopen_sk = tcp_try_fastopen(sk, skb, req, &foc, dst);
skb                93 net/ipv4/tcp_ipv4.c static u32 tcp_v4_init_seq(const struct sk_buff *skb)
skb                95 net/ipv4/tcp_ipv4.c 	return secure_tcp_seq(ip_hdr(skb)->daddr,
skb                96 net/ipv4/tcp_ipv4.c 			      ip_hdr(skb)->saddr,
skb                97 net/ipv4/tcp_ipv4.c 			      tcp_hdr(skb)->dest,
skb                98 net/ipv4/tcp_ipv4.c 			      tcp_hdr(skb)->source);
skb               101 net/ipv4/tcp_ipv4.c static u32 tcp_v4_init_ts_off(const struct net *net, const struct sk_buff *skb)
skb               103 net/ipv4/tcp_ipv4.c 	return secure_tcp_ts_off(net, ip_hdr(skb)->daddr, ip_hdr(skb)->saddr);
skb               374 net/ipv4/tcp_ipv4.c static void do_redirect(struct sk_buff *skb, struct sock *sk)
skb               379 net/ipv4/tcp_ipv4.c 		dst->ops->redirect(dst, sk, skb);
skb               434 net/ipv4/tcp_ipv4.c 	struct sk_buff *skb;
skb               537 net/ipv4/tcp_ipv4.c 		skb = tcp_rtx_queue_head(sk);
skb               538 net/ipv4/tcp_ipv4.c 		if (WARN_ON_ONCE(!skb))
skb               548 net/ipv4/tcp_ipv4.c 		delta_us = (u32)(tp->tcp_mstamp - tcp_skb_timestamp_us(skb));
skb               620 net/ipv4/tcp_ipv4.c void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
skb               622 net/ipv4/tcp_ipv4.c 	struct tcphdr *th = tcp_hdr(skb);
skb               624 net/ipv4/tcp_ipv4.c 	th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
skb               625 net/ipv4/tcp_ipv4.c 	skb->csum_start = skb_transport_header(skb) - skb->head;
skb               626 net/ipv4/tcp_ipv4.c 	skb->csum_offset = offsetof(struct tcphdr, check);
skb               630 net/ipv4/tcp_ipv4.c void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
skb               634 net/ipv4/tcp_ipv4.c 	__tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
skb               651 net/ipv4/tcp_ipv4.c static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
skb               653 net/ipv4/tcp_ipv4.c 	const struct tcphdr *th = tcp_hdr(skb);
skb               679 net/ipv4/tcp_ipv4.c 	if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL)
skb               694 net/ipv4/tcp_ipv4.c 				       skb->len - (th->doff << 2));
skb               701 net/ipv4/tcp_ipv4.c 	net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
skb               707 net/ipv4/tcp_ipv4.c 					&ip_hdr(skb)->saddr, AF_INET);
skb               717 net/ipv4/tcp_ipv4.c 					     ip_hdr(skb)->saddr,
skb               718 net/ipv4/tcp_ipv4.c 					     th->source, ip_hdr(skb)->daddr,
skb               719 net/ipv4/tcp_ipv4.c 					     ntohs(th->source), inet_iif(skb),
skb               720 net/ipv4/tcp_ipv4.c 					     tcp_v4_sdif(skb));
skb               726 net/ipv4/tcp_ipv4.c 					&ip_hdr(skb)->saddr, AF_INET);
skb               731 net/ipv4/tcp_ipv4.c 		genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb);
skb               747 net/ipv4/tcp_ipv4.c 				     key, ip_hdr(skb)->saddr,
skb               748 net/ipv4/tcp_ipv4.c 				     ip_hdr(skb)->daddr, &rep.th);
skb               751 net/ipv4/tcp_ipv4.c 	arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
skb               752 net/ipv4/tcp_ipv4.c 				      ip_hdr(skb)->saddr, /* XXX */
skb               764 net/ipv4/tcp_ipv4.c 			trace_tcp_send_reset(sk, skb);
skb               770 net/ipv4/tcp_ipv4.c 	arg.tos = ip_hdr(skb)->tos;
skb               782 net/ipv4/tcp_ipv4.c 			      skb, &TCP_SKB_CB(skb)->header.h4.opt,
skb               783 net/ipv4/tcp_ipv4.c 			      ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
skb               803 net/ipv4/tcp_ipv4.c 			    struct sk_buff *skb, u32 seq, u32 ack,
skb               808 net/ipv4/tcp_ipv4.c 	const struct tcphdr *th = tcp_hdr(skb);
skb               857 net/ipv4/tcp_ipv4.c 				    key, ip_hdr(skb)->saddr,
skb               858 net/ipv4/tcp_ipv4.c 				    ip_hdr(skb)->daddr, &rep.th);
skb               862 net/ipv4/tcp_ipv4.c 	arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
skb               863 net/ipv4/tcp_ipv4.c 				      ip_hdr(skb)->saddr, /* XXX */
skb               878 net/ipv4/tcp_ipv4.c 			      skb, &TCP_SKB_CB(skb)->header.h4.opt,
skb               879 net/ipv4/tcp_ipv4.c 			      ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
skb               888 net/ipv4/tcp_ipv4.c static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
skb               893 net/ipv4/tcp_ipv4.c 	tcp_v4_send_ack(sk, skb,
skb               907 net/ipv4/tcp_ipv4.c static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
skb               921 net/ipv4/tcp_ipv4.c 	tcp_v4_send_ack(sk, skb, seq,
skb               927 net/ipv4/tcp_ipv4.c 			tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->saddr,
skb               930 net/ipv4/tcp_ipv4.c 			ip_hdr(skb)->tos);
skb               947 net/ipv4/tcp_ipv4.c 	struct sk_buff *skb;
skb               953 net/ipv4/tcp_ipv4.c 	skb = tcp_make_synack(sk, dst, req, foc, synack_type);
skb               955 net/ipv4/tcp_ipv4.c 	if (skb) {
skb               956 net/ipv4/tcp_ipv4.c 		__tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
skb               959 net/ipv4/tcp_ipv4.c 		err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
skb              1242 net/ipv4/tcp_ipv4.c 			const struct sk_buff *skb)
skb              1246 net/ipv4/tcp_ipv4.c 	const struct tcphdr *th = tcp_hdr(skb);
skb              1253 net/ipv4/tcp_ipv4.c 		const struct iphdr *iph = ip_hdr(skb);
skb              1266 net/ipv4/tcp_ipv4.c 	if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, skb->len))
skb              1268 net/ipv4/tcp_ipv4.c 	if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
skb              1291 net/ipv4/tcp_ipv4.c 				    const struct sk_buff *skb)
skb              1304 net/ipv4/tcp_ipv4.c 	const struct iphdr *iph = ip_hdr(skb);
skb              1305 net/ipv4/tcp_ipv4.c 	const struct tcphdr *th = tcp_hdr(skb);
skb              1332 net/ipv4/tcp_ipv4.c 				      NULL, skb);
skb              1350 net/ipv4/tcp_ipv4.c 			    struct sk_buff *skb)
skb              1355 net/ipv4/tcp_ipv4.c 	sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
skb              1356 net/ipv4/tcp_ipv4.c 	sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
skb              1357 net/ipv4/tcp_ipv4.c 	RCU_INIT_POINTER(ireq->ireq_opt, tcp_v4_save_options(net, skb));
skb              1393 net/ipv4/tcp_ipv4.c int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
skb              1396 net/ipv4/tcp_ipv4.c 	if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
skb              1400 net/ipv4/tcp_ipv4.c 				&tcp_request_sock_ipv4_ops, sk, skb);
skb              1413 net/ipv4/tcp_ipv4.c struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
skb              1431 net/ipv4/tcp_ipv4.c 	newsk = tcp_create_openreq_child(sk, req, skb);
skb              1436 net/ipv4/tcp_ipv4.c 	inet_sk_rx_dst_set(newsk, skb);
skb              1447 net/ipv4/tcp_ipv4.c 	newinet->mc_index     = inet_iif(skb);
skb              1448 net/ipv4/tcp_ipv4.c 	newinet->mc_ttl	      = ip_hdr(skb)->ttl;
skb              1449 net/ipv4/tcp_ipv4.c 	newinet->rcv_tos      = ip_hdr(skb)->tos;
skb              1514 net/ipv4/tcp_ipv4.c static struct sock *tcp_v4_cookie_check(struct sock *sk, struct sk_buff *skb)
skb              1517 net/ipv4/tcp_ipv4.c 	const struct tcphdr *th = tcp_hdr(skb);
skb              1520 net/ipv4/tcp_ipv4.c 		sk = cookie_v4_check(sk, skb);
skb              1548 net/ipv4/tcp_ipv4.c int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
skb              1555 net/ipv4/tcp_ipv4.c 		sock_rps_save_rxhash(sk, skb);
skb              1556 net/ipv4/tcp_ipv4.c 		sk_mark_napi_id(sk, skb);
skb              1558 net/ipv4/tcp_ipv4.c 			if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
skb              1564 net/ipv4/tcp_ipv4.c 		tcp_rcv_established(sk, skb);
skb              1568 net/ipv4/tcp_ipv4.c 	if (tcp_checksum_complete(skb))
skb              1572 net/ipv4/tcp_ipv4.c 		struct sock *nsk = tcp_v4_cookie_check(sk, skb);
skb              1577 net/ipv4/tcp_ipv4.c 			if (tcp_child_process(sk, nsk, skb)) {
skb              1584 net/ipv4/tcp_ipv4.c 		sock_rps_save_rxhash(sk, skb);
skb              1586 net/ipv4/tcp_ipv4.c 	if (tcp_rcv_state_process(sk, skb)) {
skb              1593 net/ipv4/tcp_ipv4.c 	tcp_v4_send_reset(rsk, skb);
skb              1595 net/ipv4/tcp_ipv4.c 	kfree_skb(skb);
skb              1610 net/ipv4/tcp_ipv4.c int tcp_v4_early_demux(struct sk_buff *skb)
skb              1616 net/ipv4/tcp_ipv4.c 	if (skb->pkt_type != PACKET_HOST)
skb              1619 net/ipv4/tcp_ipv4.c 	if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
skb              1622 net/ipv4/tcp_ipv4.c 	iph = ip_hdr(skb);
skb              1623 net/ipv4/tcp_ipv4.c 	th = tcp_hdr(skb);
skb              1628 net/ipv4/tcp_ipv4.c 	sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
skb              1631 net/ipv4/tcp_ipv4.c 				       skb->skb_iif, inet_sdif(skb));
skb              1633 net/ipv4/tcp_ipv4.c 		skb->sk = sk;
skb              1634 net/ipv4/tcp_ipv4.c 		skb->destructor = sock_edemux;
skb              1641 net/ipv4/tcp_ipv4.c 			    inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
skb              1642 net/ipv4/tcp_ipv4.c 				skb_dst_set_noref(skb, dst);
skb              1648 net/ipv4/tcp_ipv4.c bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
skb              1666 net/ipv4/tcp_ipv4.c 	skb_condense(skb);
skb              1668 net/ipv4/tcp_ipv4.c 	skb_dst_drop(skb);
skb              1670 net/ipv4/tcp_ipv4.c 	if (unlikely(tcp_checksum_complete(skb))) {
skb              1681 net/ipv4/tcp_ipv4.c 	th = (const struct tcphdr *)skb->data;
skb              1683 net/ipv4/tcp_ipv4.c 	shinfo = skb_shinfo(skb);
skb              1686 net/ipv4/tcp_ipv4.c 		shinfo->gso_size = skb->len - hdrlen;
skb              1696 net/ipv4/tcp_ipv4.c 	if (TCP_SKB_CB(tail)->end_seq != TCP_SKB_CB(skb)->seq ||
skb              1697 net/ipv4/tcp_ipv4.c 	    TCP_SKB_CB(tail)->ip_dsfield != TCP_SKB_CB(skb)->ip_dsfield ||
skb              1699 net/ipv4/tcp_ipv4.c 	      TCP_SKB_CB(skb)->tcp_flags) & (TCPHDR_SYN | TCPHDR_RST | TCPHDR_URG)) ||
skb              1701 net/ipv4/tcp_ipv4.c 	      TCP_SKB_CB(skb)->tcp_flags) & TCPHDR_ACK) ||
skb              1703 net/ipv4/tcp_ipv4.c 	      TCP_SKB_CB(skb)->tcp_flags) & (TCPHDR_ECE | TCPHDR_CWR)) ||
skb              1705 net/ipv4/tcp_ipv4.c 	    tail->decrypted != skb->decrypted ||
skb              1711 net/ipv4/tcp_ipv4.c 	__skb_pull(skb, hdrlen);
skb              1712 net/ipv4/tcp_ipv4.c 	if (skb_try_coalesce(tail, skb, &fragstolen, &delta)) {
skb              1715 net/ipv4/tcp_ipv4.c 		TCP_SKB_CB(tail)->end_seq = TCP_SKB_CB(skb)->end_seq;
skb              1717 net/ipv4/tcp_ipv4.c 		if (after(TCP_SKB_CB(skb)->ack_seq, TCP_SKB_CB(tail)->ack_seq))
skb              1718 net/ipv4/tcp_ipv4.c 			TCP_SKB_CB(tail)->ack_seq = TCP_SKB_CB(skb)->ack_seq;
skb              1729 net/ipv4/tcp_ipv4.c 		TCP_SKB_CB(tail)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
skb              1731 net/ipv4/tcp_ipv4.c 		if (TCP_SKB_CB(skb)->has_rxtstamp) {
skb              1733 net/ipv4/tcp_ipv4.c 			tail->tstamp = skb->tstamp;
skb              1734 net/ipv4/tcp_ipv4.c 			skb_hwtstamps(tail)->hwtstamp = skb_hwtstamps(skb)->hwtstamp;
skb              1747 net/ipv4/tcp_ipv4.c 		kfree_skb_partial(skb, fragstolen);
skb              1750 net/ipv4/tcp_ipv4.c 	__skb_push(skb, hdrlen);
skb              1759 net/ipv4/tcp_ipv4.c 	if (unlikely(sk_add_backlog(sk, skb, limit))) {
skb              1768 net/ipv4/tcp_ipv4.c int tcp_filter(struct sock *sk, struct sk_buff *skb)
skb              1770 net/ipv4/tcp_ipv4.c 	struct tcphdr *th = (struct tcphdr *)skb->data;
skb              1772 net/ipv4/tcp_ipv4.c 	return sk_filter_trim_cap(sk, skb, th->doff * 4);
skb              1776 net/ipv4/tcp_ipv4.c static void tcp_v4_restore_cb(struct sk_buff *skb)
skb              1778 net/ipv4/tcp_ipv4.c 	memmove(IPCB(skb), &TCP_SKB_CB(skb)->header.h4,
skb              1782 net/ipv4/tcp_ipv4.c static void tcp_v4_fill_cb(struct sk_buff *skb, const struct iphdr *iph,
skb              1788 net/ipv4/tcp_ipv4.c 	memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
skb              1792 net/ipv4/tcp_ipv4.c 	TCP_SKB_CB(skb)->seq = ntohl(th->seq);
skb              1793 net/ipv4/tcp_ipv4.c 	TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
skb              1794 net/ipv4/tcp_ipv4.c 				    skb->len - th->doff * 4);
skb              1795 net/ipv4/tcp_ipv4.c 	TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
skb              1796 net/ipv4/tcp_ipv4.c 	TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
skb              1797 net/ipv4/tcp_ipv4.c 	TCP_SKB_CB(skb)->tcp_tw_isn = 0;
skb              1798 net/ipv4/tcp_ipv4.c 	TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
skb              1799 net/ipv4/tcp_ipv4.c 	TCP_SKB_CB(skb)->sacked	 = 0;
skb              1800 net/ipv4/tcp_ipv4.c 	TCP_SKB_CB(skb)->has_rxtstamp =
skb              1801 net/ipv4/tcp_ipv4.c 			skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
skb              1808 net/ipv4/tcp_ipv4.c int tcp_v4_rcv(struct sk_buff *skb)
skb              1810 net/ipv4/tcp_ipv4.c 	struct net *net = dev_net(skb->dev);
skb              1812 net/ipv4/tcp_ipv4.c 	int sdif = inet_sdif(skb);
skb              1819 net/ipv4/tcp_ipv4.c 	if (skb->pkt_type != PACKET_HOST)
skb              1825 net/ipv4/tcp_ipv4.c 	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
skb              1828 net/ipv4/tcp_ipv4.c 	th = (const struct tcphdr *)skb->data;
skb              1832 net/ipv4/tcp_ipv4.c 	if (!pskb_may_pull(skb, th->doff * 4))
skb              1840 net/ipv4/tcp_ipv4.c 	if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
skb              1843 net/ipv4/tcp_ipv4.c 	th = (const struct tcphdr *)skb->data;
skb              1844 net/ipv4/tcp_ipv4.c 	iph = ip_hdr(skb);
skb              1846 net/ipv4/tcp_ipv4.c 	sk = __inet_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th), th->source,
skb              1861 net/ipv4/tcp_ipv4.c 		if (unlikely(tcp_v4_inbound_md5_hash(sk, skb))) {
skb              1862 net/ipv4/tcp_ipv4.c 			sk_drops_add(sk, skb);
skb              1866 net/ipv4/tcp_ipv4.c 		if (tcp_checksum_complete(skb)) {
skb              1880 net/ipv4/tcp_ipv4.c 		if (!tcp_filter(sk, skb)) {
skb              1881 net/ipv4/tcp_ipv4.c 			th = (const struct tcphdr *)skb->data;
skb              1882 net/ipv4/tcp_ipv4.c 			iph = ip_hdr(skb);
skb              1883 net/ipv4/tcp_ipv4.c 			tcp_v4_fill_cb(skb, iph, th);
skb              1884 net/ipv4/tcp_ipv4.c 			nsk = tcp_check_req(sk, skb, req, false, &req_stolen);
skb              1894 net/ipv4/tcp_ipv4.c 				tcp_v4_restore_cb(skb);
skb              1902 net/ipv4/tcp_ipv4.c 			tcp_v4_restore_cb(skb);
skb              1903 net/ipv4/tcp_ipv4.c 		} else if (tcp_child_process(sk, nsk, skb)) {
skb              1904 net/ipv4/tcp_ipv4.c 			tcp_v4_send_reset(nsk, skb);
skb              1916 net/ipv4/tcp_ipv4.c 	if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
skb              1919 net/ipv4/tcp_ipv4.c 	if (tcp_v4_inbound_md5_hash(sk, skb))
skb              1922 net/ipv4/tcp_ipv4.c 	nf_reset_ct(skb);
skb              1924 net/ipv4/tcp_ipv4.c 	if (tcp_filter(sk, skb))
skb              1926 net/ipv4/tcp_ipv4.c 	th = (const struct tcphdr *)skb->data;
skb              1927 net/ipv4/tcp_ipv4.c 	iph = ip_hdr(skb);
skb              1928 net/ipv4/tcp_ipv4.c 	tcp_v4_fill_cb(skb, iph, th);
skb              1930 net/ipv4/tcp_ipv4.c 	skb->dev = NULL;
skb              1933 net/ipv4/tcp_ipv4.c 		ret = tcp_v4_do_rcv(sk, skb);
skb              1940 net/ipv4/tcp_ipv4.c 	tcp_segs_in(tcp_sk(sk), skb);
skb              1945 net/ipv4/tcp_ipv4.c 		ret = tcp_v4_do_rcv(sk, skb);
skb              1947 net/ipv4/tcp_ipv4.c 		if (tcp_add_backlog(sk, skb))
skb              1962 net/ipv4/tcp_ipv4.c 	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
skb              1965 net/ipv4/tcp_ipv4.c 	tcp_v4_fill_cb(skb, iph, th);
skb              1967 net/ipv4/tcp_ipv4.c 	if (tcp_checksum_complete(skb)) {
skb              1973 net/ipv4/tcp_ipv4.c 		tcp_v4_send_reset(NULL, skb);
skb              1978 net/ipv4/tcp_ipv4.c 	kfree_skb(skb);
skb              1982 net/ipv4/tcp_ipv4.c 	sk_drops_add(sk, skb);
skb              1988 net/ipv4/tcp_ipv4.c 	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
skb              1993 net/ipv4/tcp_ipv4.c 	tcp_v4_fill_cb(skb, iph, th);
skb              1995 net/ipv4/tcp_ipv4.c 	if (tcp_checksum_complete(skb)) {
skb              1999 net/ipv4/tcp_ipv4.c 	switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
skb              2001 net/ipv4/tcp_ipv4.c 		struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
skb              2002 net/ipv4/tcp_ipv4.c 							&tcp_hashinfo, skb,
skb              2006 net/ipv4/tcp_ipv4.c 							inet_iif(skb),
skb              2011 net/ipv4/tcp_ipv4.c 			tcp_v4_restore_cb(skb);
skb              2019 net/ipv4/tcp_ipv4.c 		tcp_v4_timewait_ack(sk, skb);
skb              2022 net/ipv4/tcp_ipv4.c 		tcp_v4_send_reset(sk, skb);
skb              2036 net/ipv4/tcp_ipv4.c void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
skb              2038 net/ipv4/tcp_ipv4.c 	struct dst_entry *dst = skb_dst(skb);
skb              2042 net/ipv4/tcp_ipv4.c 		inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
skb               716 net/ipv4/tcp_metrics.c static int tcp_metrics_dump_info(struct sk_buff *skb,
skb               722 net/ipv4/tcp_metrics.c 	hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
skb               728 net/ipv4/tcp_metrics.c 	if (tcp_metrics_fill_info(skb, tm) < 0)
skb               731 net/ipv4/tcp_metrics.c 	genlmsg_end(skb, hdr);
skb               735 net/ipv4/tcp_metrics.c 	genlmsg_cancel(skb, hdr);
skb               739 net/ipv4/tcp_metrics.c static int tcp_metrics_nl_dump(struct sk_buff *skb,
skb               742 net/ipv4/tcp_metrics.c 	struct net *net = sock_net(skb->sk);
skb               758 net/ipv4/tcp_metrics.c 			if (tcp_metrics_dump_info(skb, cb, tm) < 0) {
skb               769 net/ipv4/tcp_metrics.c 	return skb->len;
skb               814 net/ipv4/tcp_metrics.c static int tcp_metrics_nl_cmd_get(struct sk_buff *skb, struct genl_info *info)
skb               897 net/ipv4/tcp_metrics.c static int tcp_metrics_nl_cmd_del(struct sk_buff *skb, struct genl_info *info)
skb                44 net/ipv4/tcp_minisocks.c 				  const struct sk_buff *skb, int mib_idx)
skb                48 net/ipv4/tcp_minisocks.c 	if (!tcp_oow_rate_limited(twsk_net(tw), skb, mib_idx,
skb                92 net/ipv4/tcp_minisocks.c tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
skb               101 net/ipv4/tcp_minisocks.c 		tcp_parse_options(twsk_net(tw), skb, &tmp_opt, 0, NULL);
skb               117 net/ipv4/tcp_minisocks.c 		    !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
skb               121 net/ipv4/tcp_minisocks.c 				tw, skb, LINUX_MIB_TCPACKSKIPPEDFINWAIT2);
skb               126 net/ipv4/tcp_minisocks.c 		if (th->syn && !before(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt))
skb               131 net/ipv4/tcp_minisocks.c 		    !after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) ||
skb               132 net/ipv4/tcp_minisocks.c 		    TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
skb               141 net/ipv4/tcp_minisocks.c 		    TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1)
skb               146 net/ipv4/tcp_minisocks.c 		tcptw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq;
skb               174 net/ipv4/tcp_minisocks.c 	    (TCP_SKB_CB(skb)->seq == tcptw->tw_rcv_nxt &&
skb               175 net/ipv4/tcp_minisocks.c 	     (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) {
skb               219 net/ipv4/tcp_minisocks.c 	    (after(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt) ||
skb               225 net/ipv4/tcp_minisocks.c 		TCP_SKB_CB(skb)->tcp_tw_isn = isn;
skb               243 net/ipv4/tcp_minisocks.c 			tw, skb, LINUX_MIB_TCPACKSKIPPEDTIMEWAIT);
skb               458 net/ipv4/tcp_minisocks.c 				      struct sk_buff *skb)
skb               522 net/ipv4/tcp_minisocks.c 	newtp->snd_wnd = ntohs(tcp_hdr(skb)->window) << newtp->rx_opt.snd_wscale;
skb               544 net/ipv4/tcp_minisocks.c 	if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len)
skb               545 net/ipv4/tcp_minisocks.c 		newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
skb               568 net/ipv4/tcp_minisocks.c struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
skb               574 net/ipv4/tcp_minisocks.c 	const struct tcphdr *th = tcp_hdr(skb);
skb               581 net/ipv4/tcp_minisocks.c 		tcp_parse_options(sock_net(sk), skb, &tmp_opt, 0, NULL);
skb               597 net/ipv4/tcp_minisocks.c 	if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn &&
skb               623 net/ipv4/tcp_minisocks.c 		if (!tcp_oow_rate_limited(sock_net(sk), skb,
skb               698 net/ipv4/tcp_minisocks.c 	    (TCP_SKB_CB(skb)->ack_seq !=
skb               709 net/ipv4/tcp_minisocks.c 	if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
skb               713 net/ipv4/tcp_minisocks.c 		    !tcp_oow_rate_limited(sock_net(sk), skb,
skb               716 net/ipv4/tcp_minisocks.c 			req->rsk_ops->send_ack(sk, skb, req);
skb               724 net/ipv4/tcp_minisocks.c 	if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt))
skb               727 net/ipv4/tcp_minisocks.c 	if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
skb               758 net/ipv4/tcp_minisocks.c 	    TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
skb               770 net/ipv4/tcp_minisocks.c 	child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
skb               775 net/ipv4/tcp_minisocks.c 	sock_rps_save_rxhash(child, skb);
skb               793 net/ipv4/tcp_minisocks.c 		req->rsk_ops->send_reset(sk, skb);
skb               819 net/ipv4/tcp_minisocks.c 		      struct sk_buff *skb)
skb               825 net/ipv4/tcp_minisocks.c 	sk_mark_napi_id(child, skb);
skb               827 net/ipv4/tcp_minisocks.c 	tcp_segs_in(tcp_sk(child), skb);
skb               829 net/ipv4/tcp_minisocks.c 		ret = tcp_rcv_state_process(child, skb);
skb               838 net/ipv4/tcp_minisocks.c 		__sk_add_backlog(child, skb);
skb                14 net/ipv4/tcp_offload.c static void tcp_gso_tstamp(struct sk_buff *skb, unsigned int ts_seq,
skb                17 net/ipv4/tcp_offload.c 	while (skb) {
skb                19 net/ipv4/tcp_offload.c 			skb_shinfo(skb)->tx_flags |= SKBTX_SW_TSTAMP;
skb                20 net/ipv4/tcp_offload.c 			skb_shinfo(skb)->tskey = ts_seq;
skb                24 net/ipv4/tcp_offload.c 		skb = skb->next;
skb                29 net/ipv4/tcp_offload.c static struct sk_buff *tcp4_gso_segment(struct sk_buff *skb,
skb                32 net/ipv4/tcp_offload.c 	if (!(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4))
skb                35 net/ipv4/tcp_offload.c 	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
skb                38 net/ipv4/tcp_offload.c 	if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
skb                39 net/ipv4/tcp_offload.c 		const struct iphdr *iph = ip_hdr(skb);
skb                40 net/ipv4/tcp_offload.c 		struct tcphdr *th = tcp_hdr(skb);
skb                47 net/ipv4/tcp_offload.c 		skb->ip_summed = CHECKSUM_PARTIAL;
skb                48 net/ipv4/tcp_offload.c 		__tcp_v4_send_check(skb, iph->saddr, iph->daddr);
skb                51 net/ipv4/tcp_offload.c 	return tcp_gso_segment(skb, features);
skb                54 net/ipv4/tcp_offload.c struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
skb                65 net/ipv4/tcp_offload.c 	struct sk_buff *gso_skb = skb;
skb                69 net/ipv4/tcp_offload.c 	th = tcp_hdr(skb);
skb                74 net/ipv4/tcp_offload.c 	if (!pskb_may_pull(skb, thlen))
skb                77 net/ipv4/tcp_offload.c 	oldlen = (u16)~skb->len;
skb                78 net/ipv4/tcp_offload.c 	__skb_pull(skb, thlen);
skb                80 net/ipv4/tcp_offload.c 	mss = skb_shinfo(skb)->gso_size;
skb                81 net/ipv4/tcp_offload.c 	if (unlikely(skb->len <= mss))
skb                84 net/ipv4/tcp_offload.c 	if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
skb                87 net/ipv4/tcp_offload.c 		skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
skb                96 net/ipv4/tcp_offload.c 	skb->ooo_okay = 0;
skb                98 net/ipv4/tcp_offload.c 	segs = skb_segment(skb, features);
skb               114 net/ipv4/tcp_offload.c 	skb = segs;
skb               115 net/ipv4/tcp_offload.c 	th = tcp_hdr(skb);
skb               124 net/ipv4/tcp_offload.c 	while (skb->next) {
skb               128 net/ipv4/tcp_offload.c 		if (skb->ip_summed == CHECKSUM_PARTIAL)
skb               129 net/ipv4/tcp_offload.c 			gso_reset_checksum(skb, ~th->check);
skb               131 net/ipv4/tcp_offload.c 			th->check = gso_make_checksum(skb, ~th->check);
skb               135 net/ipv4/tcp_offload.c 			skb->destructor = gso_skb->destructor;
skb               136 net/ipv4/tcp_offload.c 			skb->sk = gso_skb->sk;
skb               137 net/ipv4/tcp_offload.c 			sum_truesize += skb->truesize;
skb               139 net/ipv4/tcp_offload.c 		skb = skb->next;
skb               140 net/ipv4/tcp_offload.c 		th = tcp_hdr(skb);
skb               154 net/ipv4/tcp_offload.c 		swap(gso_skb->sk, skb->sk);
skb               155 net/ipv4/tcp_offload.c 		swap(gso_skb->destructor, skb->destructor);
skb               156 net/ipv4/tcp_offload.c 		sum_truesize += skb->truesize;
skb               162 net/ipv4/tcp_offload.c 			refcount_add(delta, &skb->sk->sk_wmem_alloc);
skb               164 net/ipv4/tcp_offload.c 			WARN_ON_ONCE(refcount_sub_and_test(-delta, &skb->sk->sk_wmem_alloc));
skb               167 net/ipv4/tcp_offload.c 	delta = htonl(oldlen + (skb_tail_pointer(skb) -
skb               168 net/ipv4/tcp_offload.c 				skb_transport_header(skb)) +
skb               169 net/ipv4/tcp_offload.c 		      skb->data_len);
skb               172 net/ipv4/tcp_offload.c 	if (skb->ip_summed == CHECKSUM_PARTIAL)
skb               173 net/ipv4/tcp_offload.c 		gso_reset_checksum(skb, ~th->check);
skb               175 net/ipv4/tcp_offload.c 		th->check = gso_make_checksum(skb, ~th->check);
skb               180 net/ipv4/tcp_offload.c struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb)
skb               195 net/ipv4/tcp_offload.c 	off = skb_gro_offset(skb);
skb               197 net/ipv4/tcp_offload.c 	th = skb_gro_header_fast(skb, off);
skb               198 net/ipv4/tcp_offload.c 	if (skb_gro_header_hard(skb, hlen)) {
skb               199 net/ipv4/tcp_offload.c 		th = skb_gro_header_slow(skb, hlen, off);
skb               209 net/ipv4/tcp_offload.c 	if (skb_gro_header_hard(skb, hlen)) {
skb               210 net/ipv4/tcp_offload.c 		th = skb_gro_header_slow(skb, hlen, off);
skb               215 net/ipv4/tcp_offload.c 	skb_gro_pull(skb, thlen);
skb               217 net/ipv4/tcp_offload.c 	len = skb_gro_len(skb);
skb               263 net/ipv4/tcp_offload.c 	flush |= p->decrypted ^ skb->decrypted;
skb               266 net/ipv4/tcp_offload.c 	if (flush || skb_gro_receive(p, skb)) {
skb               279 net/ipv4/tcp_offload.c 	if (p && (!NAPI_GRO_CB(skb)->same_flow || flush))
skb               283 net/ipv4/tcp_offload.c 	NAPI_GRO_CB(skb)->flush |= (flush != 0);
skb               288 net/ipv4/tcp_offload.c int tcp_gro_complete(struct sk_buff *skb)
skb               290 net/ipv4/tcp_offload.c 	struct tcphdr *th = tcp_hdr(skb);
skb               292 net/ipv4/tcp_offload.c 	skb->csum_start = (unsigned char *)th - skb->head;
skb               293 net/ipv4/tcp_offload.c 	skb->csum_offset = offsetof(struct tcphdr, check);
skb               294 net/ipv4/tcp_offload.c 	skb->ip_summed = CHECKSUM_PARTIAL;
skb               296 net/ipv4/tcp_offload.c 	skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
skb               299 net/ipv4/tcp_offload.c 		skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
skb               306 net/ipv4/tcp_offload.c struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb)
skb               309 net/ipv4/tcp_offload.c 	if (!NAPI_GRO_CB(skb)->flush &&
skb               310 net/ipv4/tcp_offload.c 	    skb_gro_checksum_validate(skb, IPPROTO_TCP,
skb               312 net/ipv4/tcp_offload.c 		NAPI_GRO_CB(skb)->flush = 1;
skb               316 net/ipv4/tcp_offload.c 	return tcp_gro_receive(head, skb);
skb               319 net/ipv4/tcp_offload.c INDIRECT_CALLABLE_SCOPE int tcp4_gro_complete(struct sk_buff *skb, int thoff)
skb               321 net/ipv4/tcp_offload.c 	const struct iphdr *iph = ip_hdr(skb);
skb               322 net/ipv4/tcp_offload.c 	struct tcphdr *th = tcp_hdr(skb);
skb               324 net/ipv4/tcp_offload.c 	th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr,
skb               326 net/ipv4/tcp_offload.c 	skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
skb               328 net/ipv4/tcp_offload.c 	if (NAPI_GRO_CB(skb)->is_atomic)
skb               329 net/ipv4/tcp_offload.c 		skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID;
skb               331 net/ipv4/tcp_offload.c 	return tcp_gro_complete(skb);
skb                64 net/ipv4/tcp_output.c static void tcp_event_new_data_sent(struct sock *sk, struct sk_buff *skb)
skb                70 net/ipv4/tcp_output.c 	WRITE_ONCE(tp->snd_nxt, TCP_SKB_CB(skb)->end_seq);
skb                72 net/ipv4/tcp_output.c 	__skb_unlink(skb, &sk->sk_write_queue);
skb                73 net/ipv4/tcp_output.c 	tcp_rbtree_insert(&sk->tcp_rtx_queue, skb);
skb                76 net/ipv4/tcp_output.c 		tp->highest_sack = skb;
skb                78 net/ipv4/tcp_output.c 	tp->packets_out += tcp_skb_pcount(skb);
skb                83 net/ipv4/tcp_output.c 		      tcp_skb_pcount(skb));
skb               308 net/ipv4/tcp_output.c static void tcp_ecn_send_synack(struct sock *sk, struct sk_buff *skb)
skb               312 net/ipv4/tcp_output.c 	TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_CWR;
skb               314 net/ipv4/tcp_output.c 		TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_ECE;
skb               321 net/ipv4/tcp_output.c static void tcp_ecn_send_syn(struct sock *sk, struct sk_buff *skb)
skb               338 net/ipv4/tcp_output.c 		TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ECE | TCPHDR_CWR;
skb               345 net/ipv4/tcp_output.c static void tcp_ecn_clear_syn(struct sock *sk, struct sk_buff *skb)
skb               351 net/ipv4/tcp_output.c 		TCP_SKB_CB(skb)->tcp_flags &= ~(TCPHDR_ECE | TCPHDR_CWR);
skb               364 net/ipv4/tcp_output.c static void tcp_ecn_send(struct sock *sk, struct sk_buff *skb,
skb               371 net/ipv4/tcp_output.c 		if (skb->len != tcp_header_len &&
skb               372 net/ipv4/tcp_output.c 		    !before(TCP_SKB_CB(skb)->seq, tp->snd_nxt)) {
skb               377 net/ipv4/tcp_output.c 				skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
skb               391 net/ipv4/tcp_output.c static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags)
skb               393 net/ipv4/tcp_output.c 	skb->ip_summed = CHECKSUM_PARTIAL;
skb               395 net/ipv4/tcp_output.c 	TCP_SKB_CB(skb)->tcp_flags = flags;
skb               396 net/ipv4/tcp_output.c 	TCP_SKB_CB(skb)->sacked = 0;
skb               398 net/ipv4/tcp_output.c 	tcp_skb_pcount_set(skb, 1);
skb               400 net/ipv4/tcp_output.c 	TCP_SKB_CB(skb)->seq = seq;
skb               403 net/ipv4/tcp_output.c 	TCP_SKB_CB(skb)->end_seq = seq;
skb               590 net/ipv4/tcp_output.c static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb,
skb               624 net/ipv4/tcp_output.c 		opts->tsval = tcp_skb_timestamp(skb) + tp->tsoffset;
skb               662 net/ipv4/tcp_output.c 				       unsigned int mss, struct sk_buff *skb,
skb               695 net/ipv4/tcp_output.c 		opts->tsval = tcp_skb_timestamp(skb) + tcp_rsk(req)->ts_off;
skb               725 net/ipv4/tcp_output.c static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb,
skb               749 net/ipv4/tcp_output.c 		opts->tsval = skb ? tcp_skb_timestamp(skb) + tp->tsoffset : 0;
skb               921 net/ipv4/tcp_output.c void tcp_wfree(struct sk_buff *skb)
skb               923 net/ipv4/tcp_output.c 	struct sock *sk = skb->sk;
skb               930 net/ipv4/tcp_output.c 	WARN_ON(refcount_sub_and_test(skb->truesize - 1, &sk->sk_wmem_alloc));
skb               982 net/ipv4/tcp_output.c static void tcp_update_skb_after_send(struct sock *sk, struct sk_buff *skb,
skb               995 net/ipv4/tcp_output.c 			u64 len_ns = div64_ul((u64)skb->len * NSEC_PER_SEC, rate);
skb              1003 net/ipv4/tcp_output.c 	list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue);
skb              1017 net/ipv4/tcp_output.c static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
skb              1032 net/ipv4/tcp_output.c 	BUG_ON(!skb || !tcp_skb_pcount(skb));
skb              1036 net/ipv4/tcp_output.c 	skb->skb_mstamp_ns = tp->tcp_wstamp_ns;
skb              1038 net/ipv4/tcp_output.c 		TCP_SKB_CB(skb)->tx.in_flight = TCP_SKB_CB(skb)->end_seq
skb              1040 net/ipv4/tcp_output.c 		oskb = skb;
skb              1044 net/ipv4/tcp_output.c 				skb = pskb_copy(oskb, gfp_mask);
skb              1046 net/ipv4/tcp_output.c 				skb = skb_clone(oskb, gfp_mask);
skb              1049 net/ipv4/tcp_output.c 		if (unlikely(!skb))
skb              1054 net/ipv4/tcp_output.c 		skb->dev = NULL;
skb              1058 net/ipv4/tcp_output.c 	tcb = TCP_SKB_CB(skb);
skb              1062 net/ipv4/tcp_output.c 		tcp_options_size = tcp_syn_options(sk, skb, &opts, &md5);
skb              1064 net/ipv4/tcp_output.c 		tcp_options_size = tcp_established_options(sk, skb, &opts,
skb              1074 net/ipv4/tcp_output.c 		if (tcp_skb_pcount(skb) > 1)
skb              1086 net/ipv4/tcp_output.c 	skb->ooo_okay = sk_wmem_alloc_get(sk) < SKB_TRUESIZE(1);
skb              1093 net/ipv4/tcp_output.c 	skb->pfmemalloc = 0;
skb              1095 net/ipv4/tcp_output.c 	skb_push(skb, tcp_header_size);
skb              1096 net/ipv4/tcp_output.c 	skb_reset_transport_header(skb);
skb              1098 net/ipv4/tcp_output.c 	skb_orphan(skb);
skb              1099 net/ipv4/tcp_output.c 	skb->sk = sk;
skb              1100 net/ipv4/tcp_output.c 	skb->destructor = skb_is_tcp_pure_ack(skb) ? __sock_wfree : tcp_wfree;
skb              1101 net/ipv4/tcp_output.c 	skb_set_hash_from_sk(skb, sk);
skb              1102 net/ipv4/tcp_output.c 	refcount_add(skb->truesize, &sk->sk_wmem_alloc);
skb              1104 net/ipv4/tcp_output.c 	skb_set_dst_pending_confirm(skb, sk->sk_dst_pending_confirm);
skb              1107 net/ipv4/tcp_output.c 	th = (struct tcphdr *)skb->data;
skb              1130 net/ipv4/tcp_output.c 	skb_shinfo(skb)->gso_type = sk->sk_gso_type;
skb              1133 net/ipv4/tcp_output.c 		tcp_ecn_send(sk, skb, th, tcp_header_size);
skb              1145 net/ipv4/tcp_output.c 					       md5, sk, skb);
skb              1149 net/ipv4/tcp_output.c 	icsk->icsk_af_ops->send_check(sk, skb);
skb              1152 net/ipv4/tcp_output.c 		tcp_event_ack_sent(sk, tcp_skb_pcount(skb), rcv_nxt);
skb              1154 net/ipv4/tcp_output.c 	if (skb->len != tcp_header_size) {
skb              1156 net/ipv4/tcp_output.c 		tp->data_segs_out += tcp_skb_pcount(skb);
skb              1157 net/ipv4/tcp_output.c 		tp->bytes_sent += skb->len - tcp_header_size;
skb              1162 net/ipv4/tcp_output.c 			      tcp_skb_pcount(skb));
skb              1164 net/ipv4/tcp_output.c 	tp->segs_out += tcp_skb_pcount(skb);
skb              1166 net/ipv4/tcp_output.c 	skb_shinfo(skb)->gso_segs = tcp_skb_pcount(skb);
skb              1167 net/ipv4/tcp_output.c 	skb_shinfo(skb)->gso_size = tcp_skb_mss(skb);
skb              1172 net/ipv4/tcp_output.c 	memset(skb->cb, 0, max(sizeof(struct inet_skb_parm),
skb              1175 net/ipv4/tcp_output.c 	tcp_add_tx_delay(skb, tp);
skb              1177 net/ipv4/tcp_output.c 	err = icsk->icsk_af_ops->queue_xmit(sk, skb, &inet->cork.fl);
skb              1190 net/ipv4/tcp_output.c static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
skb              1193 net/ipv4/tcp_output.c 	return __tcp_transmit_skb(sk, skb, clone_it, gfp_mask,
skb              1202 net/ipv4/tcp_output.c static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
skb              1207 net/ipv4/tcp_output.c 	WRITE_ONCE(tp->write_seq, TCP_SKB_CB(skb)->end_seq);
skb              1208 net/ipv4/tcp_output.c 	__skb_header_release(skb);
skb              1209 net/ipv4/tcp_output.c 	tcp_add_write_queue_tail(sk, skb);
skb              1210 net/ipv4/tcp_output.c 	sk_wmem_queued_add(sk, skb->truesize);
skb              1211 net/ipv4/tcp_output.c 	sk_mem_charge(sk, skb->truesize);
skb              1215 net/ipv4/tcp_output.c static void tcp_set_skb_tso_segs(struct sk_buff *skb, unsigned int mss_now)
skb              1217 net/ipv4/tcp_output.c 	if (skb->len <= mss_now) {
skb              1221 net/ipv4/tcp_output.c 		tcp_skb_pcount_set(skb, 1);
skb              1222 net/ipv4/tcp_output.c 		TCP_SKB_CB(skb)->tcp_gso_size = 0;
skb              1224 net/ipv4/tcp_output.c 		tcp_skb_pcount_set(skb, DIV_ROUND_UP(skb->len, mss_now));
skb              1225 net/ipv4/tcp_output.c 		TCP_SKB_CB(skb)->tcp_gso_size = mss_now;
skb              1232 net/ipv4/tcp_output.c static void tcp_adjust_pcount(struct sock *sk, const struct sk_buff *skb, int decr)
skb              1238 net/ipv4/tcp_output.c 	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
skb              1240 net/ipv4/tcp_output.c 	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)
skb              1242 net/ipv4/tcp_output.c 	if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST)
skb              1250 net/ipv4/tcp_output.c 	    before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(tp->lost_skb_hint)->seq) &&
skb              1251 net/ipv4/tcp_output.c 	    (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))
skb              1257 net/ipv4/tcp_output.c static bool tcp_has_tx_tstamp(const struct sk_buff *skb)
skb              1259 net/ipv4/tcp_output.c 	return TCP_SKB_CB(skb)->txstamp_ack ||
skb              1260 net/ipv4/tcp_output.c 		(skb_shinfo(skb)->tx_flags & SKBTX_ANY_TSTAMP);
skb              1263 net/ipv4/tcp_output.c static void tcp_fragment_tstamp(struct sk_buff *skb, struct sk_buff *skb2)
skb              1265 net/ipv4/tcp_output.c 	struct skb_shared_info *shinfo = skb_shinfo(skb);
skb              1267 net/ipv4/tcp_output.c 	if (unlikely(tcp_has_tx_tstamp(skb)) &&
skb              1275 net/ipv4/tcp_output.c 		TCP_SKB_CB(skb2)->txstamp_ack = TCP_SKB_CB(skb)->txstamp_ack;
skb              1276 net/ipv4/tcp_output.c 		TCP_SKB_CB(skb)->txstamp_ack = 0;
skb              1280 net/ipv4/tcp_output.c static void tcp_skb_fragment_eor(struct sk_buff *skb, struct sk_buff *skb2)
skb              1282 net/ipv4/tcp_output.c 	TCP_SKB_CB(skb2)->eor = TCP_SKB_CB(skb)->eor;
skb              1283 net/ipv4/tcp_output.c 	TCP_SKB_CB(skb)->eor = 0;
skb              1287 net/ipv4/tcp_output.c static void tcp_insert_write_queue_after(struct sk_buff *skb,
skb              1293 net/ipv4/tcp_output.c 		__skb_queue_after(&sk->sk_write_queue, skb, buff);
skb              1304 net/ipv4/tcp_output.c 		 struct sk_buff *skb, u32 len,
skb              1314 net/ipv4/tcp_output.c 	if (WARN_ON(len > skb->len))
skb              1317 net/ipv4/tcp_output.c 	nsize = skb_headlen(skb) - len;
skb              1329 net/ipv4/tcp_output.c 		     skb != tcp_rtx_queue_head(sk) &&
skb              1330 net/ipv4/tcp_output.c 		     skb != tcp_rtx_queue_tail(sk))) {
skb              1335 net/ipv4/tcp_output.c 	if (skb_unclone(skb, gfp))
skb              1342 net/ipv4/tcp_output.c 	skb_copy_decrypted(buff, skb);
skb              1346 net/ipv4/tcp_output.c 	nlen = skb->len - len - nsize;
skb              1348 net/ipv4/tcp_output.c 	skb->truesize -= nlen;
skb              1351 net/ipv4/tcp_output.c 	TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
skb              1352 net/ipv4/tcp_output.c 	TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
skb              1353 net/ipv4/tcp_output.c 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
skb              1356 net/ipv4/tcp_output.c 	flags = TCP_SKB_CB(skb)->tcp_flags;
skb              1357 net/ipv4/tcp_output.c 	TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
skb              1359 net/ipv4/tcp_output.c 	TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked;
skb              1360 net/ipv4/tcp_output.c 	tcp_skb_fragment_eor(skb, buff);
skb              1362 net/ipv4/tcp_output.c 	skb_split(skb, buff, len);
skb              1366 net/ipv4/tcp_output.c 	buff->tstamp = skb->tstamp;
skb              1367 net/ipv4/tcp_output.c 	tcp_fragment_tstamp(skb, buff);
skb              1369 net/ipv4/tcp_output.c 	old_factor = tcp_skb_pcount(skb);
skb              1372 net/ipv4/tcp_output.c 	tcp_set_skb_tso_segs(skb, mss_now);
skb              1376 net/ipv4/tcp_output.c 	TCP_SKB_CB(buff)->tx = TCP_SKB_CB(skb)->tx;
skb              1382 net/ipv4/tcp_output.c 		int diff = old_factor - tcp_skb_pcount(skb) -
skb              1386 net/ipv4/tcp_output.c 			tcp_adjust_pcount(sk, skb, diff);
skb              1391 net/ipv4/tcp_output.c 	tcp_insert_write_queue_after(skb, buff, sk, tcp_queue);
skb              1393 net/ipv4/tcp_output.c 		list_add(&buff->tcp_tsorted_anchor, &skb->tcp_tsorted_anchor);
skb              1401 net/ipv4/tcp_output.c static int __pskb_trim_head(struct sk_buff *skb, int len)
skb              1406 net/ipv4/tcp_output.c 	eat = min_t(int, len, skb_headlen(skb));
skb              1408 net/ipv4/tcp_output.c 		__skb_pull(skb, eat);
skb              1415 net/ipv4/tcp_output.c 	shinfo = skb_shinfo(skb);
skb              1420 net/ipv4/tcp_output.c 			skb_frag_unref(skb, i);
skb              1434 net/ipv4/tcp_output.c 	skb->data_len -= len;
skb              1435 net/ipv4/tcp_output.c 	skb->len = skb->data_len;
skb              1440 net/ipv4/tcp_output.c int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
skb              1444 net/ipv4/tcp_output.c 	if (skb_unclone(skb, GFP_ATOMIC))
skb              1447 net/ipv4/tcp_output.c 	delta_truesize = __pskb_trim_head(skb, len);
skb              1449 net/ipv4/tcp_output.c 	TCP_SKB_CB(skb)->seq += len;
skb              1450 net/ipv4/tcp_output.c 	skb->ip_summed = CHECKSUM_PARTIAL;
skb              1453 net/ipv4/tcp_output.c 		skb->truesize	   -= delta_truesize;
skb              1460 net/ipv4/tcp_output.c 	if (tcp_skb_pcount(skb) > 1)
skb              1461 net/ipv4/tcp_output.c 		tcp_set_skb_tso_segs(skb, tcp_skb_mss(skb));
skb              1705 net/ipv4/tcp_output.c 				const struct sk_buff *skb)
skb              1707 net/ipv4/tcp_output.c 	if (skb->len < tcp_skb_pcount(skb) * mss_now)
skb              1708 net/ipv4/tcp_output.c 		tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
skb              1766 net/ipv4/tcp_output.c 					const struct sk_buff *skb,
skb              1774 net/ipv4/tcp_output.c 	window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
skb              1777 net/ipv4/tcp_output.c 	if (likely(max_len <= window && skb != tcp_write_queue_tail(sk)))
skb              1780 net/ipv4/tcp_output.c 	needed = min(skb->len, window);
skb              1800 net/ipv4/tcp_output.c 					 const struct sk_buff *skb)
skb              1805 net/ipv4/tcp_output.c 	if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) &&
skb              1806 net/ipv4/tcp_output.c 	    tcp_skb_pcount(skb) == 1)
skb              1825 net/ipv4/tcp_output.c static int tcp_init_tso_segs(struct sk_buff *skb, unsigned int mss_now)
skb              1827 net/ipv4/tcp_output.c 	int tso_segs = tcp_skb_pcount(skb);
skb              1829 net/ipv4/tcp_output.c 	if (!tso_segs || (tso_segs > 1 && tcp_skb_mss(skb) != mss_now)) {
skb              1830 net/ipv4/tcp_output.c 		tcp_set_skb_tso_segs(skb, mss_now);
skb              1831 net/ipv4/tcp_output.c 		tso_segs = tcp_skb_pcount(skb);
skb              1840 net/ipv4/tcp_output.c static inline bool tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff *skb,
skb              1853 net/ipv4/tcp_output.c 	if (tcp_urg_mode(tp) || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN))
skb              1856 net/ipv4/tcp_output.c 	if (!tcp_nagle_check(skb->len < cur_mss, tp, nonagle))
skb              1864 net/ipv4/tcp_output.c 			     const struct sk_buff *skb,
skb              1867 net/ipv4/tcp_output.c 	u32 end_seq = TCP_SKB_CB(skb)->end_seq;
skb              1869 net/ipv4/tcp_output.c 	if (skb->len > cur_mss)
skb              1870 net/ipv4/tcp_output.c 		end_seq = TCP_SKB_CB(skb)->seq + cur_mss;
skb              1882 net/ipv4/tcp_output.c static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
skb              1885 net/ipv4/tcp_output.c 	int nlen = skb->len - len;
skb              1890 net/ipv4/tcp_output.c 	if (skb->len != skb->data_len)
skb              1892 net/ipv4/tcp_output.c 				    skb, len, mss_now, gfp);
skb              1897 net/ipv4/tcp_output.c 	skb_copy_decrypted(buff, skb);
skb              1902 net/ipv4/tcp_output.c 	skb->truesize -= nlen;
skb              1905 net/ipv4/tcp_output.c 	TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
skb              1906 net/ipv4/tcp_output.c 	TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
skb              1907 net/ipv4/tcp_output.c 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
skb              1910 net/ipv4/tcp_output.c 	flags = TCP_SKB_CB(skb)->tcp_flags;
skb              1911 net/ipv4/tcp_output.c 	TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
skb              1917 net/ipv4/tcp_output.c 	tcp_skb_fragment_eor(skb, buff);
skb              1920 net/ipv4/tcp_output.c 	skb_split(skb, buff, len);
skb              1921 net/ipv4/tcp_output.c 	tcp_fragment_tstamp(skb, buff);
skb              1924 net/ipv4/tcp_output.c 	tcp_set_skb_tso_segs(skb, mss_now);
skb              1929 net/ipv4/tcp_output.c 	tcp_insert_write_queue_after(skb, buff, sk, TCP_FRAG_IN_WRITE_QUEUE);
skb              1939 net/ipv4/tcp_output.c static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
skb              1965 net/ipv4/tcp_output.c 	BUG_ON(tcp_skb_pcount(skb) <= 1);
skb              1968 net/ipv4/tcp_output.c 	send_win = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
skb              1980 net/ipv4/tcp_output.c 	if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len))
skb              2019 net/ipv4/tcp_output.c 		if (cong_win <= skb->len) {
skb              2024 net/ipv4/tcp_output.c 		if (send_win <= skb->len) {
skb              2031 net/ipv4/tcp_output.c 	if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) ||
skb              2032 net/ipv4/tcp_output.c 	    TCP_SKB_CB(skb)->eor)
skb              2068 net/ipv4/tcp_output.c 	struct sk_buff *skb, *next;
skb              2070 net/ipv4/tcp_output.c 	skb = tcp_send_head(sk);
skb              2071 net/ipv4/tcp_output.c 	tcp_for_write_queue_from_safe(skb, next, sk) {
skb              2072 net/ipv4/tcp_output.c 		if (len <= skb->len)
skb              2075 net/ipv4/tcp_output.c 		if (unlikely(TCP_SKB_CB(skb)->eor) || tcp_has_tx_tstamp(skb))
skb              2078 net/ipv4/tcp_output.c 		len -= skb->len;
skb              2097 net/ipv4/tcp_output.c 	struct sk_buff *skb, *nskb, *next;
skb              2166 net/ipv4/tcp_output.c 	skb = tcp_send_head(sk);
skb              2167 net/ipv4/tcp_output.c 	skb_copy_decrypted(nskb, skb);
skb              2169 net/ipv4/tcp_output.c 	TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq;
skb              2170 net/ipv4/tcp_output.c 	TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size;
skb              2176 net/ipv4/tcp_output.c 	tcp_insert_write_queue_before(nskb, skb, sk);
skb              2177 net/ipv4/tcp_output.c 	tcp_highest_sack_replace(sk, skb, nskb);
skb              2180 net/ipv4/tcp_output.c 	tcp_for_write_queue_from_safe(skb, next, sk) {
skb              2181 net/ipv4/tcp_output.c 		copy = min_t(int, skb->len, probe_size - len);
skb              2182 net/ipv4/tcp_output.c 		skb_copy_bits(skb, 0, skb_put(nskb, copy), copy);
skb              2184 net/ipv4/tcp_output.c 		if (skb->len <= copy) {
skb              2187 net/ipv4/tcp_output.c 			TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
skb              2191 net/ipv4/tcp_output.c 			TCP_SKB_CB(nskb)->eor = TCP_SKB_CB(skb)->eor;
skb              2192 net/ipv4/tcp_output.c 			tcp_skb_collapse_tstamp(nskb, skb);
skb              2193 net/ipv4/tcp_output.c 			tcp_unlink_write_queue(skb, sk);
skb              2194 net/ipv4/tcp_output.c 			sk_wmem_free_skb(sk, skb);
skb              2196 net/ipv4/tcp_output.c 			TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags &
skb              2198 net/ipv4/tcp_output.c 			if (!skb_shinfo(skb)->nr_frags) {
skb              2199 net/ipv4/tcp_output.c 				skb_pull(skb, copy);
skb              2201 net/ipv4/tcp_output.c 				__pskb_trim_head(skb, copy);
skb              2202 net/ipv4/tcp_output.c 				tcp_set_skb_tso_segs(skb, mss_now);
skb              2204 net/ipv4/tcp_output.c 			TCP_SKB_CB(skb)->seq += copy;
skb              2263 net/ipv4/tcp_output.c static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb,
skb              2269 net/ipv4/tcp_output.c 		      2 * skb->truesize,
skb              2369 net/ipv4/tcp_output.c 	struct sk_buff *skb;
skb              2390 net/ipv4/tcp_output.c 	while ((skb = tcp_send_head(sk))) {
skb              2395 net/ipv4/tcp_output.c 			skb->skb_mstamp_ns = tp->tcp_wstamp_ns = tp->tcp_clock_cache;
skb              2396 net/ipv4/tcp_output.c 			list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue);
skb              2397 net/ipv4/tcp_output.c 			tcp_init_tso_segs(skb, mss_now);
skb              2404 net/ipv4/tcp_output.c 		tso_segs = tcp_init_tso_segs(skb, mss_now);
skb              2407 net/ipv4/tcp_output.c 		cwnd_quota = tcp_cwnd_test(tp, skb);
skb              2416 net/ipv4/tcp_output.c 		if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now))) {
skb              2422 net/ipv4/tcp_output.c 			if (unlikely(!tcp_nagle_test(tp, skb, mss_now,
skb              2423 net/ipv4/tcp_output.c 						     (tcp_skb_is_last(sk, skb) ?
skb              2428 net/ipv4/tcp_output.c 			    tcp_tso_should_defer(sk, skb, &is_cwnd_limited,
skb              2435 net/ipv4/tcp_output.c 			limit = tcp_mss_split_point(sk, skb, mss_now,
skb              2441 net/ipv4/tcp_output.c 		if (skb->len > limit &&
skb              2442 net/ipv4/tcp_output.c 		    unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))
skb              2445 net/ipv4/tcp_output.c 		if (tcp_small_queue_check(sk, skb, 0))
skb              2453 net/ipv4/tcp_output.c 		if (TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq)
skb              2456 net/ipv4/tcp_output.c 		if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp)))
skb              2463 net/ipv4/tcp_output.c 		tcp_event_new_data_sent(sk, skb);
skb              2465 net/ipv4/tcp_output.c 		tcp_minshall_update(tp, mss_now, skb);
skb              2466 net/ipv4/tcp_output.c 		sent_pkts += tcp_skb_pcount(skb);
skb              2545 net/ipv4/tcp_output.c 				    const struct sk_buff *skb)
skb              2547 net/ipv4/tcp_output.c 	if (unlikely(skb_fclone_busy(sk, skb))) {
skb              2561 net/ipv4/tcp_output.c 	struct sk_buff *skb;
skb              2565 net/ipv4/tcp_output.c 	skb = tcp_send_head(sk);
skb              2566 net/ipv4/tcp_output.c 	if (skb && tcp_snd_wnd_test(tp, skb, mss)) {
skb              2573 net/ipv4/tcp_output.c 	skb = skb_rb_last(&sk->tcp_rtx_queue);
skb              2574 net/ipv4/tcp_output.c 	if (unlikely(!skb)) {
skb              2586 net/ipv4/tcp_output.c 	if (skb_still_in_host_queue(sk, skb))
skb              2589 net/ipv4/tcp_output.c 	pcount = tcp_skb_pcount(skb);
skb              2593 net/ipv4/tcp_output.c 	if ((pcount > 1) && (skb->len > (pcount - 1) * mss)) {
skb              2594 net/ipv4/tcp_output.c 		if (unlikely(tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb,
skb              2598 net/ipv4/tcp_output.c 		skb = skb_rb_next(skb);
skb              2601 net/ipv4/tcp_output.c 	if (WARN_ON(!skb || !tcp_skb_pcount(skb)))
skb              2604 net/ipv4/tcp_output.c 	if (__tcp_retransmit_skb(sk, skb, 1))
skb              2642 net/ipv4/tcp_output.c 	struct sk_buff *skb = tcp_send_head(sk);
skb              2644 net/ipv4/tcp_output.c 	BUG_ON(!skb || skb->len < mss_now);
skb              2779 net/ipv4/tcp_output.c void tcp_skb_collapse_tstamp(struct sk_buff *skb,
skb              2785 net/ipv4/tcp_output.c 		struct skb_shared_info *shinfo = skb_shinfo(skb);
skb              2789 net/ipv4/tcp_output.c 		TCP_SKB_CB(skb)->txstamp_ack |=
skb              2795 net/ipv4/tcp_output.c static bool tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
skb              2798 net/ipv4/tcp_output.c 	struct sk_buff *next_skb = skb_rb_next(skb);
skb              2803 net/ipv4/tcp_output.c 	BUG_ON(tcp_skb_pcount(skb) != 1 || tcp_skb_pcount(next_skb) != 1);
skb              2806 net/ipv4/tcp_output.c 		if (next_skb_size <= skb_availroom(skb))
skb              2807 net/ipv4/tcp_output.c 			skb_copy_bits(next_skb, 0, skb_put(skb, next_skb_size),
skb              2809 net/ipv4/tcp_output.c 		else if (!tcp_skb_shift(skb, next_skb, 1, next_skb_size))
skb              2812 net/ipv4/tcp_output.c 	tcp_highest_sack_replace(sk, next_skb, skb);
skb              2815 net/ipv4/tcp_output.c 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq;
skb              2818 net/ipv4/tcp_output.c 	TCP_SKB_CB(skb)->tcp_flags |= TCP_SKB_CB(next_skb)->tcp_flags;
skb              2823 net/ipv4/tcp_output.c 	TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked & TCPCB_EVER_RETRANS;
skb              2824 net/ipv4/tcp_output.c 	TCP_SKB_CB(skb)->eor = TCP_SKB_CB(next_skb)->eor;
skb              2829 net/ipv4/tcp_output.c 		tp->retransmit_skb_hint = skb;
skb              2833 net/ipv4/tcp_output.c 	tcp_skb_collapse_tstamp(skb, next_skb);
skb              2840 net/ipv4/tcp_output.c static bool tcp_can_collapse(const struct sock *sk, const struct sk_buff *skb)
skb              2842 net/ipv4/tcp_output.c 	if (tcp_skb_pcount(skb) > 1)
skb              2844 net/ipv4/tcp_output.c 	if (skb_cloned(skb))
skb              2847 net/ipv4/tcp_output.c 	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
skb              2860 net/ipv4/tcp_output.c 	struct sk_buff *skb = to, *tmp;
skb              2865 net/ipv4/tcp_output.c 	if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
skb              2868 net/ipv4/tcp_output.c 	skb_rbtree_walk_from_safe(skb, tmp) {
skb              2869 net/ipv4/tcp_output.c 		if (!tcp_can_collapse(sk, skb))
skb              2875 net/ipv4/tcp_output.c 		space -= skb->len;
skb              2885 net/ipv4/tcp_output.c 		if (after(TCP_SKB_CB(skb)->end_seq, tcp_wnd_end(tp)))
skb              2897 net/ipv4/tcp_output.c int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
skb              2917 net/ipv4/tcp_output.c 	if (skb_still_in_host_queue(sk, skb))
skb              2920 net/ipv4/tcp_output.c 	if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) {
skb              2921 net/ipv4/tcp_output.c 		if (unlikely(before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))) {
skb              2925 net/ipv4/tcp_output.c 		if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq))
skb              2939 net/ipv4/tcp_output.c 	if (!before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp)) &&
skb              2940 net/ipv4/tcp_output.c 	    TCP_SKB_CB(skb)->seq != tp->snd_una)
skb              2944 net/ipv4/tcp_output.c 	if (skb->len > len) {
skb              2945 net/ipv4/tcp_output.c 		if (tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb, len,
skb              2949 net/ipv4/tcp_output.c 		if (skb_unclone(skb, GFP_ATOMIC))
skb              2952 net/ipv4/tcp_output.c 		diff = tcp_skb_pcount(skb);
skb              2953 net/ipv4/tcp_output.c 		tcp_set_skb_tso_segs(skb, cur_mss);
skb              2954 net/ipv4/tcp_output.c 		diff -= tcp_skb_pcount(skb);
skb              2956 net/ipv4/tcp_output.c 			tcp_adjust_pcount(sk, skb, diff);
skb              2957 net/ipv4/tcp_output.c 		if (skb->len < cur_mss)
skb              2958 net/ipv4/tcp_output.c 			tcp_retrans_try_collapse(sk, skb, cur_mss);
skb              2962 net/ipv4/tcp_output.c 	if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN_ECN) == TCPHDR_SYN_ECN)
skb              2963 net/ipv4/tcp_output.c 		tcp_ecn_clear_syn(sk, skb);
skb              2966 net/ipv4/tcp_output.c 	segs = tcp_skb_pcount(skb);
skb              2968 net/ipv4/tcp_output.c 	if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
skb              2971 net/ipv4/tcp_output.c 	tp->bytes_retrans += skb->len;
skb              2977 net/ipv4/tcp_output.c 	if (unlikely((NET_IP_ALIGN && ((unsigned long)skb->data & 3)) ||
skb              2978 net/ipv4/tcp_output.c 		     skb_headroom(skb) >= 0xFFFF)) {
skb              2981 net/ipv4/tcp_output.c 		tcp_skb_tsorted_save(skb) {
skb              2982 net/ipv4/tcp_output.c 			nskb = __pskb_copy(skb, MAX_TCP_HEADER, GFP_ATOMIC);
skb              2989 net/ipv4/tcp_output.c 		} tcp_skb_tsorted_restore(skb);
skb              2992 net/ipv4/tcp_output.c 			tcp_update_skb_after_send(sk, skb, tp->tcp_wstamp_ns);
skb              2993 net/ipv4/tcp_output.c 			tcp_rate_skb_sent(sk, skb);
skb              2996 net/ipv4/tcp_output.c 		err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
skb              3002 net/ipv4/tcp_output.c 	TCP_SKB_CB(skb)->sacked |= TCPCB_EVER_RETRANS;
skb              3006 net/ipv4/tcp_output.c 				  TCP_SKB_CB(skb)->seq, segs, err);
skb              3009 net/ipv4/tcp_output.c 		trace_tcp_retransmit_skb(sk, skb);
skb              3016 net/ipv4/tcp_output.c int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
skb              3019 net/ipv4/tcp_output.c 	int err = __tcp_retransmit_skb(sk, skb, segs);
skb              3023 net/ipv4/tcp_output.c 		if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) {
skb              3027 net/ipv4/tcp_output.c 		TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS;
skb              3028 net/ipv4/tcp_output.c 		tp->retrans_out += tcp_skb_pcount(skb);
skb              3033 net/ipv4/tcp_output.c 		tp->retrans_stamp = tcp_skb_timestamp(skb);
skb              3037 net/ipv4/tcp_output.c 	tp->undo_retrans += tcp_skb_pcount(skb);
skb              3049 net/ipv4/tcp_output.c 	struct sk_buff *skb, *rtx_head, *hole = NULL;
skb              3058 net/ipv4/tcp_output.c 	skb = tp->retransmit_skb_hint ?: rtx_head;
skb              3060 net/ipv4/tcp_output.c 	skb_rbtree_walk_from(skb) {
skb              3069 net/ipv4/tcp_output.c 			tp->retransmit_skb_hint = skb;
skb              3074 net/ipv4/tcp_output.c 		sacked = TCP_SKB_CB(skb)->sacked;
skb              3084 net/ipv4/tcp_output.c 				hole = skb;
skb              3097 net/ipv4/tcp_output.c 		if (tcp_small_queue_check(sk, skb, 1))
skb              3100 net/ipv4/tcp_output.c 		if (tcp_retransmit_skb(sk, skb, segs))
skb              3103 net/ipv4/tcp_output.c 		NET_ADD_STATS(sock_net(sk), mib_idx, tcp_skb_pcount(skb));
skb              3106 net/ipv4/tcp_output.c 			tp->prr_out += tcp_skb_pcount(skb);
skb              3108 net/ipv4/tcp_output.c 		if (skb == rtx_head &&
skb              3113 net/ipv4/tcp_output.c 					     skb);
skb              3143 net/ipv4/tcp_output.c 	struct sk_buff *skb, *tskb = tcp_write_queue_tail(sk);
skb              3169 net/ipv4/tcp_output.c 		skb = alloc_skb_fclone(MAX_TCP_HEADER, sk->sk_allocation);
skb              3170 net/ipv4/tcp_output.c 		if (unlikely(!skb))
skb              3173 net/ipv4/tcp_output.c 		INIT_LIST_HEAD(&skb->tcp_tsorted_anchor);
skb              3174 net/ipv4/tcp_output.c 		skb_reserve(skb, MAX_TCP_HEADER);
skb              3175 net/ipv4/tcp_output.c 		sk_forced_mem_schedule(sk, skb->truesize);
skb              3177 net/ipv4/tcp_output.c 		tcp_init_nondata_skb(skb, tp->write_seq,
skb              3179 net/ipv4/tcp_output.c 		tcp_queue_skb(sk, skb);
skb              3191 net/ipv4/tcp_output.c 	struct sk_buff *skb;
skb              3196 net/ipv4/tcp_output.c 	skb = alloc_skb(MAX_TCP_HEADER, priority);
skb              3197 net/ipv4/tcp_output.c 	if (!skb) {
skb              3203 net/ipv4/tcp_output.c 	skb_reserve(skb, MAX_TCP_HEADER);
skb              3204 net/ipv4/tcp_output.c 	tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk),
skb              3208 net/ipv4/tcp_output.c 	if (tcp_transmit_skb(sk, skb, 0, priority))
skb              3225 net/ipv4/tcp_output.c 	struct sk_buff *skb;
skb              3227 net/ipv4/tcp_output.c 	skb = tcp_rtx_queue_head(sk);
skb              3228 net/ipv4/tcp_output.c 	if (!skb || !(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
skb              3232 net/ipv4/tcp_output.c 	if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK)) {
skb              3233 net/ipv4/tcp_output.c 		if (skb_cloned(skb)) {
skb              3236 net/ipv4/tcp_output.c 			tcp_skb_tsorted_save(skb) {
skb              3237 net/ipv4/tcp_output.c 				nskb = skb_copy(skb, GFP_ATOMIC);
skb              3238 net/ipv4/tcp_output.c 			} tcp_skb_tsorted_restore(skb);
skb              3242 net/ipv4/tcp_output.c 			tcp_highest_sack_replace(sk, skb, nskb);
skb              3243 net/ipv4/tcp_output.c 			tcp_rtx_queue_unlink_and_free(skb, sk);
skb              3248 net/ipv4/tcp_output.c 			skb = nskb;
skb              3251 net/ipv4/tcp_output.c 		TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ACK;
skb              3252 net/ipv4/tcp_output.c 		tcp_ecn_send_synack(sk, skb);
skb              3254 net/ipv4/tcp_output.c 	return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
skb              3275 net/ipv4/tcp_output.c 	struct sk_buff *skb;
skb              3281 net/ipv4/tcp_output.c 	skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
skb              3282 net/ipv4/tcp_output.c 	if (unlikely(!skb)) {
skb              3287 net/ipv4/tcp_output.c 	skb_reserve(skb, MAX_TCP_HEADER);
skb              3291 net/ipv4/tcp_output.c 		skb_set_owner_w(skb, req_to_sk(req));
skb              3303 net/ipv4/tcp_output.c 		skb_set_owner_w(skb, (struct sock *)sk);
skb              3306 net/ipv4/tcp_output.c 	skb_dst_set(skb, dst);
skb              3314 net/ipv4/tcp_output.c 		skb->skb_mstamp_ns = cookie_init_timestamp(req);
skb              3318 net/ipv4/tcp_output.c 		skb->skb_mstamp_ns = now;
skb              3320 net/ipv4/tcp_output.c 			tcp_rsk(req)->snt_synack = tcp_skb_timestamp_us(skb);
skb              3327 net/ipv4/tcp_output.c 	skb_set_hash(skb, tcp_rsk(req)->txhash, PKT_HASH_TYPE_L4);
skb              3328 net/ipv4/tcp_output.c 	tcp_header_size = tcp_synack_options(sk, req, mss, skb, &opts, md5,
skb              3331 net/ipv4/tcp_output.c 	skb_push(skb, tcp_header_size);
skb              3332 net/ipv4/tcp_output.c 	skb_reset_transport_header(skb);
skb              3334 net/ipv4/tcp_output.c 	th = (struct tcphdr *)skb->data;
skb              3341 net/ipv4/tcp_output.c 	skb->mark = ireq->ir_mark;
skb              3342 net/ipv4/tcp_output.c 	skb->ip_summed = CHECKSUM_PARTIAL;
skb              3357 net/ipv4/tcp_output.c 					       md5, req_to_sk(req), skb);
skb              3361 net/ipv4/tcp_output.c 	skb->skb_mstamp_ns = now;
skb              3362 net/ipv4/tcp_output.c 	tcp_add_tx_delay(skb, tp);
skb              3364 net/ipv4/tcp_output.c 	return skb;
skb              3464 net/ipv4/tcp_output.c static void tcp_connect_queue_skb(struct sock *sk, struct sk_buff *skb)
skb              3467 net/ipv4/tcp_output.c 	struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
skb              3469 net/ipv4/tcp_output.c 	tcb->end_seq += skb->len;
skb              3470 net/ipv4/tcp_output.c 	__skb_header_release(skb);
skb              3471 net/ipv4/tcp_output.c 	sk_wmem_queued_add(sk, skb->truesize);
skb              3472 net/ipv4/tcp_output.c 	sk_mem_charge(sk, skb->truesize);
skb              3474 net/ipv4/tcp_output.c 	tp->packets_out += tcp_skb_pcount(skb);
skb              3740 net/ipv4/tcp_output.c 	struct sk_buff *skb;
skb              3743 net/ipv4/tcp_output.c 	skb = alloc_skb(MAX_TCP_HEADER,
skb              3745 net/ipv4/tcp_output.c 	if (!skb)
skb              3749 net/ipv4/tcp_output.c 	skb_reserve(skb, MAX_TCP_HEADER);
skb              3754 net/ipv4/tcp_output.c 	tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK);
skb              3756 net/ipv4/tcp_output.c 	return tcp_transmit_skb(sk, skb, 0, (__force gfp_t)0);
skb              3773 net/ipv4/tcp_output.c 	struct sk_buff *skb;
skb              3778 net/ipv4/tcp_output.c 	skb = tcp_send_head(sk);
skb              3779 net/ipv4/tcp_output.c 	if (skb && before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) {
skb              3782 net/ipv4/tcp_output.c 		unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
skb              3784 net/ipv4/tcp_output.c 		if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq))
skb              3785 net/ipv4/tcp_output.c 			tp->pushed_seq = TCP_SKB_CB(skb)->end_seq;
skb              3791 net/ipv4/tcp_output.c 		if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq ||
skb              3792 net/ipv4/tcp_output.c 		    skb->len > mss) {
skb              3794 net/ipv4/tcp_output.c 			TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
skb              3796 net/ipv4/tcp_output.c 					 skb, seg_size, mss, GFP_ATOMIC))
skb              3798 net/ipv4/tcp_output.c 		} else if (!tcp_skb_pcount(skb))
skb              3799 net/ipv4/tcp_output.c 			tcp_set_skb_tso_segs(skb, mss);
skb              3801 net/ipv4/tcp_output.c 		TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
skb              3802 net/ipv4/tcp_output.c 		err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
skb              3804 net/ipv4/tcp_output.c 			tcp_event_new_data_sent(sk, skb);
skb                40 net/ipv4/tcp_rate.c void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb)
skb                59 net/ipv4/tcp_rate.c 		u64 tstamp_us = tcp_skb_timestamp_us(skb);
skb                65 net/ipv4/tcp_rate.c 	TCP_SKB_CB(skb)->tx.first_tx_mstamp	= tp->first_tx_mstamp;
skb                66 net/ipv4/tcp_rate.c 	TCP_SKB_CB(skb)->tx.delivered_mstamp	= tp->delivered_mstamp;
skb                67 net/ipv4/tcp_rate.c 	TCP_SKB_CB(skb)->tx.delivered		= tp->delivered;
skb                68 net/ipv4/tcp_rate.c 	TCP_SKB_CB(skb)->tx.is_app_limited	= tp->app_limited ? 1 : 0;
skb                78 net/ipv4/tcp_rate.c void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
skb                82 net/ipv4/tcp_rate.c 	struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
skb                95 net/ipv4/tcp_rate.c 		tp->first_tx_mstamp  = tcp_skb_timestamp_us(skb);
skb                 5 net/ipv4/tcp_recovery.c void tcp_mark_skb_lost(struct sock *sk, struct sk_buff *skb)
skb                 9 net/ipv4/tcp_recovery.c 	tcp_skb_mark_lost_uncond_verify(tp, skb);
skb                10 net/ipv4/tcp_recovery.c 	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) {
skb                12 net/ipv4/tcp_recovery.c 		TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
skb                13 net/ipv4/tcp_recovery.c 		tp->retrans_out -= tcp_skb_pcount(skb);
skb                15 net/ipv4/tcp_recovery.c 			      tcp_skb_pcount(skb));
skb                50 net/ipv4/tcp_recovery.c s32 tcp_rack_skb_timeout(struct tcp_sock *tp, struct sk_buff *skb, u32 reo_wnd)
skb                53 net/ipv4/tcp_recovery.c 	       tcp_stamp_us_delta(tp->tcp_mstamp, tcp_skb_timestamp_us(skb));
skb                79 net/ipv4/tcp_recovery.c 	struct sk_buff *skb, *n;
skb                84 net/ipv4/tcp_recovery.c 	list_for_each_entry_safe(skb, n, &tp->tsorted_sent_queue,
skb                86 net/ipv4/tcp_recovery.c 		struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
skb                95 net/ipv4/tcp_recovery.c 					 tcp_skb_timestamp_us(skb),
skb               102 net/ipv4/tcp_recovery.c 		remaining = tcp_rack_skb_timeout(tp, skb, reo_wnd);
skb               104 net/ipv4/tcp_recovery.c 			tcp_mark_skb_lost(sk, skb);
skb               105 net/ipv4/tcp_recovery.c 			list_del_init(&skb->tcp_tsorted_anchor);
skb               238 net/ipv4/tcp_recovery.c 		struct sk_buff *skb = tcp_rtx_queue_head(sk);
skb               241 net/ipv4/tcp_recovery.c 		if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST)
skb               244 net/ipv4/tcp_recovery.c 		mss = tcp_skb_mss(skb);
skb               245 net/ipv4/tcp_recovery.c 		if (tcp_skb_pcount(skb) > 1 && skb->len > mss)
skb               246 net/ipv4/tcp_recovery.c 			tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb,
skb               249 net/ipv4/tcp_recovery.c 		tcp_skb_mark_lost_uncond_verify(tp, skb);
skb               341 net/ipv4/tcp_timer.c 	struct sk_buff *skb = tcp_send_head(sk);
skb               345 net/ipv4/tcp_timer.c 	if (tp->packets_out || !skb) {
skb                95 net/ipv4/tunnel4.c static int tunnel4_rcv(struct sk_buff *skb)
skb                99 net/ipv4/tunnel4.c 	if (!pskb_may_pull(skb, sizeof(struct iphdr)))
skb               103 net/ipv4/tunnel4.c 		if (!handler->handler(skb))
skb               106 net/ipv4/tunnel4.c 	icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
skb               109 net/ipv4/tunnel4.c 	kfree_skb(skb);
skb               114 net/ipv4/tunnel4.c static int tunnel64_rcv(struct sk_buff *skb)
skb               118 net/ipv4/tunnel4.c 	if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
skb               122 net/ipv4/tunnel4.c 		if (!handler->handler(skb))
skb               125 net/ipv4/tunnel4.c 	icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
skb               128 net/ipv4/tunnel4.c 	kfree_skb(skb);
skb               134 net/ipv4/tunnel4.c static int tunnelmpls4_rcv(struct sk_buff *skb)
skb               138 net/ipv4/tunnel4.c 	if (!pskb_may_pull(skb, sizeof(struct mpls_label)))
skb               142 net/ipv4/tunnel4.c 		if (!handler->handler(skb))
skb               145 net/ipv4/tunnel4.c 	icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
skb               148 net/ipv4/tunnel4.c 	kfree_skb(skb);
skb               153 net/ipv4/tunnel4.c static int tunnel4_err(struct sk_buff *skb, u32 info)
skb               158 net/ipv4/tunnel4.c 		if (!handler->err_handler(skb, info))
skb               165 net/ipv4/tunnel4.c static int tunnel64_err(struct sk_buff *skb, u32 info)
skb               170 net/ipv4/tunnel4.c 		if (!handler->err_handler(skb, info))
skb               178 net/ipv4/tunnel4.c static int tunnelmpls4_err(struct sk_buff *skb, u32 info)
skb               183 net/ipv4/tunnel4.c 		if (!handler->err_handler(skb, info))
skb               414 net/ipv4/udp.c 				     struct sk_buff *skb)
skb               430 net/ipv4/udp.c 				result = reuseport_select_sock(sk, hash, skb,
skb               447 net/ipv4/udp.c 		int sdif, struct udp_table *udptable, struct sk_buff *skb)
skb               460 net/ipv4/udp.c 				  hslot2, skb);
skb               468 net/ipv4/udp.c 					  hslot2, skb);
skb               476 net/ipv4/udp.c static inline struct sock *__udp4_lib_lookup_skb(struct sk_buff *skb,
skb               480 net/ipv4/udp.c 	const struct iphdr *iph = ip_hdr(skb);
skb               482 net/ipv4/udp.c 	return __udp4_lib_lookup(dev_net(skb->dev), iph->saddr, sport,
skb               483 net/ipv4/udp.c 				 iph->daddr, dport, inet_iif(skb),
skb               484 net/ipv4/udp.c 				 inet_sdif(skb), udptable, skb);
skb               487 net/ipv4/udp.c struct sock *udp4_lib_lookup_skb(struct sk_buff *skb,
skb               490 net/ipv4/udp.c 	const struct iphdr *iph = ip_hdr(skb);
skb               492 net/ipv4/udp.c 	return __udp4_lib_lookup(dev_net(skb->dev), iph->saddr, sport,
skb               493 net/ipv4/udp.c 				 iph->daddr, dport, inet_iif(skb),
skb               494 net/ipv4/udp.c 				 inet_sdif(skb), &udp_table, NULL);
skb               546 net/ipv4/udp.c static int __udp4_lib_err_encap_no_sk(struct sk_buff *skb, u32 info)
skb               551 net/ipv4/udp.c 		int (*handler)(struct sk_buff *skb, u32 info);
skb               558 net/ipv4/udp.c 		if (handler && !handler(skb, info))
skb               586 net/ipv4/udp.c 					 struct sk_buff *skb, u32 info)
skb               591 net/ipv4/udp.c 	network_offset = skb_network_offset(skb);
skb               592 net/ipv4/udp.c 	transport_offset = skb_transport_offset(skb);
skb               595 net/ipv4/udp.c 	skb_reset_network_header(skb);
skb               598 net/ipv4/udp.c 	skb_set_transport_header(skb, iph->ihl << 2);
skb               601 net/ipv4/udp.c 			       iph->saddr, uh->dest, skb->dev->ifindex, 0,
skb               604 net/ipv4/udp.c 		int (*lookup)(struct sock *sk, struct sk_buff *skb);
skb               608 net/ipv4/udp.c 		if (!lookup || lookup(sk, skb))
skb               613 net/ipv4/udp.c 		sk = ERR_PTR(__udp4_lib_err_encap_no_sk(skb, info));
skb               615 net/ipv4/udp.c 	skb_set_transport_header(skb, transport_offset);
skb               616 net/ipv4/udp.c 	skb_set_network_header(skb, network_offset);
skb               632 net/ipv4/udp.c int __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable)
skb               635 net/ipv4/udp.c 	const struct iphdr *iph = (const struct iphdr *)skb->data;
skb               636 net/ipv4/udp.c 	struct udphdr *uh = (struct udphdr *)(skb->data+(iph->ihl<<2));
skb               637 net/ipv4/udp.c 	const int type = icmp_hdr(skb)->type;
skb               638 net/ipv4/udp.c 	const int code = icmp_hdr(skb)->code;
skb               643 net/ipv4/udp.c 	struct net *net = dev_net(skb->dev);
skb               646 net/ipv4/udp.c 			       iph->saddr, uh->source, skb->dev->ifindex,
skb               647 net/ipv4/udp.c 			       inet_sdif(skb), udptable, NULL);
skb               652 net/ipv4/udp.c 			sk = __udp4_lib_err_encap(net, iph, uh, udptable, skb,
skb               683 net/ipv4/udp.c 			ipv4_sk_update_pmtu(skb, sk, info);
skb               698 net/ipv4/udp.c 		ipv4_sk_redirect(skb, sk);
skb               714 net/ipv4/udp.c 		ip_icmp_error(sk, skb, err, uh->dest, info, (u8 *)(uh+1));
skb               722 net/ipv4/udp.c int udp_err(struct sk_buff *skb, u32 info)
skb               724 net/ipv4/udp.c 	return __udp4_lib_err(skb, info, &udp_table);
skb               749 net/ipv4/udp.c void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst)
skb               751 net/ipv4/udp.c 	struct udphdr *uh = udp_hdr(skb);
skb               752 net/ipv4/udp.c 	int offset = skb_transport_offset(skb);
skb               753 net/ipv4/udp.c 	int len = skb->len - offset;
skb               757 net/ipv4/udp.c 	if (!skb_has_frag_list(skb)) {
skb               761 net/ipv4/udp.c 		skb->csum_start = skb_transport_header(skb) - skb->head;
skb               762 net/ipv4/udp.c 		skb->csum_offset = offsetof(struct udphdr, check);
skb               773 net/ipv4/udp.c 		skb_walk_frags(skb, frags) {
skb               778 net/ipv4/udp.c 		csum = skb_checksum(skb, offset, hlen, csum);
skb               779 net/ipv4/udp.c 		skb->ip_summed = CHECKSUM_NONE;
skb               791 net/ipv4/udp.c void udp_set_csum(bool nocheck, struct sk_buff *skb,
skb               794 net/ipv4/udp.c 	struct udphdr *uh = udp_hdr(skb);
skb               798 net/ipv4/udp.c 	} else if (skb_is_gso(skb)) {
skb               800 net/ipv4/udp.c 	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb               802 net/ipv4/udp.c 		uh->check = udp_v4_check(len, saddr, daddr, lco_csum(skb));
skb               806 net/ipv4/udp.c 		skb->ip_summed = CHECKSUM_PARTIAL;
skb               807 net/ipv4/udp.c 		skb->csum_start = skb_transport_header(skb) - skb->head;
skb               808 net/ipv4/udp.c 		skb->csum_offset = offsetof(struct udphdr, check);
skb               814 net/ipv4/udp.c static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4,
skb               817 net/ipv4/udp.c 	struct sock *sk = skb->sk;
skb               822 net/ipv4/udp.c 	int offset = skb_transport_offset(skb);
skb               823 net/ipv4/udp.c 	int len = skb->len - offset;
skb               830 net/ipv4/udp.c 	uh = udp_hdr(skb);
skb               837 net/ipv4/udp.c 		const int hlen = skb_network_header_len(skb) +
skb               841 net/ipv4/udp.c 			kfree_skb(skb);
skb               844 net/ipv4/udp.c 		if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) {
skb               845 net/ipv4/udp.c 			kfree_skb(skb);
skb               849 net/ipv4/udp.c 			kfree_skb(skb);
skb               852 net/ipv4/udp.c 		if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite ||
skb               853 net/ipv4/udp.c 		    dst_xfrm(skb_dst(skb))) {
skb               854 net/ipv4/udp.c 			kfree_skb(skb);
skb               859 net/ipv4/udp.c 			skb_shinfo(skb)->gso_size = cork->gso_size;
skb               860 net/ipv4/udp.c 			skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
skb               861 net/ipv4/udp.c 			skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(datalen,
skb               868 net/ipv4/udp.c 		csum = udplite_csum(skb);
skb               872 net/ipv4/udp.c 		skb->ip_summed = CHECKSUM_NONE;
skb               875 net/ipv4/udp.c 	} else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
skb               878 net/ipv4/udp.c 		udp4_hwcsum(skb, fl4->saddr, fl4->daddr);
skb               882 net/ipv4/udp.c 		csum = udp_csum(skb);
skb               891 net/ipv4/udp.c 	err = ip_send_skb(sock_net(sk), skb);
skb               912 net/ipv4/udp.c 	struct sk_buff *skb;
skb               915 net/ipv4/udp.c 	skb = ip_finish_skb(sk, fl4);
skb               916 net/ipv4/udp.c 	if (!skb)
skb               919 net/ipv4/udp.c 	err = udp_send_skb(skb, fl4, &inet->cork.base);
skb               983 net/ipv4/udp.c 	struct sk_buff *skb;
skb              1173 net/ipv4/udp.c 		skb = ip_make_skb(sk, fl4, getfrag, msg, ulen,
skb              1176 net/ipv4/udp.c 		err = PTR_ERR(skb);
skb              1177 net/ipv4/udp.c 		if (!IS_ERR_OR_NULL(skb))
skb              1178 net/ipv4/udp.c 			err = udp_send_skb(skb, fl4, &cork);
skb              1308 net/ipv4/udp.c static bool udp_try_make_stateless(struct sk_buff *skb)
skb              1310 net/ipv4/udp.c 	if (!skb_has_extensions(skb))
skb              1313 net/ipv4/udp.c 	if (!secpath_exists(skb)) {
skb              1314 net/ipv4/udp.c 		skb_ext_reset(skb);
skb              1321 net/ipv4/udp.c static void udp_set_dev_scratch(struct sk_buff *skb)
skb              1323 net/ipv4/udp.c 	struct udp_dev_scratch *scratch = udp_skb_scratch(skb);
skb              1326 net/ipv4/udp.c 	scratch->_tsize_state = skb->truesize;
skb              1328 net/ipv4/udp.c 	scratch->len = skb->len;
skb              1329 net/ipv4/udp.c 	scratch->csum_unnecessary = !!skb_csum_unnecessary(skb);
skb              1330 net/ipv4/udp.c 	scratch->is_linear = !skb_is_nonlinear(skb);
skb              1332 net/ipv4/udp.c 	if (udp_try_make_stateless(skb))
skb              1336 net/ipv4/udp.c static void udp_skb_csum_unnecessary_set(struct sk_buff *skb)
skb              1345 net/ipv4/udp.c 	if (!skb_shared(skb))
skb              1346 net/ipv4/udp.c 		udp_skb_scratch(skb)->csum_unnecessary = true;
skb              1350 net/ipv4/udp.c static int udp_skb_truesize(struct sk_buff *skb)
skb              1352 net/ipv4/udp.c 	return udp_skb_scratch(skb)->_tsize_state & ~UDP_SKB_IS_STATELESS;
skb              1355 net/ipv4/udp.c static bool udp_skb_has_head_state(struct sk_buff *skb)
skb              1357 net/ipv4/udp.c 	return !(udp_skb_scratch(skb)->_tsize_state & UDP_SKB_IS_STATELESS);
skb              1408 net/ipv4/udp.c void udp_skb_destructor(struct sock *sk, struct sk_buff *skb)
skb              1410 net/ipv4/udp.c 	prefetch(&skb->data);
skb              1411 net/ipv4/udp.c 	udp_rmem_release(sk, udp_skb_truesize(skb), 1, false);
skb              1416 net/ipv4/udp.c static void udp_skb_dtor_locked(struct sock *sk, struct sk_buff *skb)
skb              1418 net/ipv4/udp.c 	prefetch(&skb->data);
skb              1419 net/ipv4/udp.c 	udp_rmem_release(sk, udp_skb_truesize(skb), 1, true);
skb              1447 net/ipv4/udp.c int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb)
skb              1468 net/ipv4/udp.c 		skb_condense(skb);
skb              1472 net/ipv4/udp.c 	size = skb->truesize;
skb              1473 net/ipv4/udp.c 	udp_set_dev_scratch(skb);
skb              1500 net/ipv4/udp.c 	sock_skb_set_dropcount(sk, skb);
skb              1502 net/ipv4/udp.c 	__skb_queue_tail(list, skb);
skb              1512 net/ipv4/udp.c 	atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
skb              1526 net/ipv4/udp.c 	struct sk_buff *skb;
skb              1529 net/ipv4/udp.c 	while ((skb = __skb_dequeue(&up->reader_queue)) != NULL) {
skb              1530 net/ipv4/udp.c 		total += skb->truesize;
skb              1531 net/ipv4/udp.c 		kfree_skb(skb);
skb              1547 net/ipv4/udp.c void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len)
skb              1556 net/ipv4/udp.c 	if (!skb_unref(skb))
skb              1562 net/ipv4/udp.c 	if (unlikely(udp_skb_has_head_state(skb)))
skb              1563 net/ipv4/udp.c 		skb_release_head_state(skb);
skb              1564 net/ipv4/udp.c 	__consume_stateless_skb(skb);
skb              1572 net/ipv4/udp.c 	struct sk_buff *skb;
skb              1574 net/ipv4/udp.c 	while ((skb = skb_peek(rcvq)) != NULL) {
skb              1575 net/ipv4/udp.c 		if (udp_lib_checksum_complete(skb)) {
skb              1581 net/ipv4/udp.c 			__skb_unlink(skb, rcvq);
skb              1582 net/ipv4/udp.c 			*total += skb->truesize;
skb              1583 net/ipv4/udp.c 			kfree_skb(skb);
skb              1585 net/ipv4/udp.c 			udp_skb_csum_unnecessary_set(skb);
skb              1589 net/ipv4/udp.c 	return skb;
skb              1603 net/ipv4/udp.c 	struct sk_buff *skb;
skb              1608 net/ipv4/udp.c 	skb = __first_packet_length(sk, rcvq, &total);
skb              1609 net/ipv4/udp.c 	if (!skb && !skb_queue_empty_lockless(sk_queue)) {
skb              1614 net/ipv4/udp.c 		skb = __first_packet_length(sk, rcvq, &total);
skb              1616 net/ipv4/udp.c 	res = skb ? skb->len : -1;
skb              1665 net/ipv4/udp.c 		struct sk_buff *skb;
skb              1674 net/ipv4/udp.c 			skb = __skb_try_recv_from_queue(sk, queue, flags,
skb              1677 net/ipv4/udp.c 			if (skb) {
skb              1679 net/ipv4/udp.c 				return skb;
skb              1695 net/ipv4/udp.c 			skb = __skb_try_recv_from_queue(sk, queue, flags,
skb              1700 net/ipv4/udp.c 			if (skb)
skb              1701 net/ipv4/udp.c 				return skb;
skb              1730 net/ipv4/udp.c 	struct sk_buff *skb;
skb              1741 net/ipv4/udp.c 	skb = __skb_recv_udp(sk, flags, noblock, &off, &err);
skb              1742 net/ipv4/udp.c 	if (!skb)
skb              1745 net/ipv4/udp.c 	ulen = udp_skb_len(skb);
skb              1759 net/ipv4/udp.c 	    (is_udplite && UDP_SKB_CB(skb)->partial_cov)) {
skb              1760 net/ipv4/udp.c 		checksum_valid = udp_skb_csum_unnecessary(skb) ||
skb              1761 net/ipv4/udp.c 				!__udp_lib_checksum_complete(skb);
skb              1766 net/ipv4/udp.c 	if (checksum_valid || udp_skb_csum_unnecessary(skb)) {
skb              1767 net/ipv4/udp.c 		if (udp_skb_is_linear(skb))
skb              1768 net/ipv4/udp.c 			err = copy_linear_skb(skb, copied, off, &msg->msg_iter);
skb              1770 net/ipv4/udp.c 			err = skb_copy_datagram_msg(skb, off, msg, copied);
skb              1772 net/ipv4/udp.c 		err = skb_copy_and_csum_datagram_msg(skb, off, msg);
skb              1784 net/ipv4/udp.c 		kfree_skb(skb);
skb              1792 net/ipv4/udp.c 	sock_recv_ts_and_drops(msg, sk, skb);
skb              1797 net/ipv4/udp.c 		sin->sin_port = udp_hdr(skb)->source;
skb              1798 net/ipv4/udp.c 		sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
skb              1808 net/ipv4/udp.c 		udp_cmsg_recv(msg, sk, skb);
skb              1811 net/ipv4/udp.c 		ip_cmsg_recv_offset(msg, sk, skb, sizeof(struct udphdr), off);
skb              1817 net/ipv4/udp.c 	skb_consume_udp(sk, skb, peeking ? -err : err);
skb              1821 net/ipv4/udp.c 	if (!__sk_queue_drop_skb(sk, &udp_sk(sk)->reader_queue, skb, flags,
skb              1826 net/ipv4/udp.c 	kfree_skb(skb);
skb              1961 net/ipv4/udp.c static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
skb              1966 net/ipv4/udp.c 		sock_rps_save_rxhash(sk, skb);
skb              1967 net/ipv4/udp.c 		sk_mark_napi_id(sk, skb);
skb              1970 net/ipv4/udp.c 		sk_mark_napi_id_once(sk, skb);
skb              1973 net/ipv4/udp.c 	rc = __udp_enqueue_schedule_skb(sk, skb);
skb              1982 net/ipv4/udp.c 		kfree_skb(skb);
skb              1998 net/ipv4/udp.c static int udp_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
skb              2006 net/ipv4/udp.c 	if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
skb              2008 net/ipv4/udp.c 	nf_reset_ct(skb);
skb              2011 net/ipv4/udp.c 		int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
skb              2030 net/ipv4/udp.c 			if (udp_lib_checksum_complete(skb))
skb              2033 net/ipv4/udp.c 			ret = encap_rcv(sk, skb);
skb              2048 net/ipv4/udp.c 	if ((is_udplite & UDPLITE_RECV_CC)  &&  UDP_SKB_CB(skb)->partial_cov) {
skb              2063 net/ipv4/udp.c 					    UDP_SKB_CB(skb)->cscov, skb->len);
skb              2072 net/ipv4/udp.c 		if (UDP_SKB_CB(skb)->cscov  <  up->pcrlen) {
skb              2074 net/ipv4/udp.c 					    UDP_SKB_CB(skb)->cscov, up->pcrlen);
skb              2081 net/ipv4/udp.c 	    udp_lib_checksum_complete(skb))
skb              2084 net/ipv4/udp.c 	if (sk_filter_trim_cap(sk, skb, sizeof(struct udphdr)))
skb              2087 net/ipv4/udp.c 	udp_csum_pull_header(skb);
skb              2089 net/ipv4/udp.c 	ipv4_pktinfo_prepare(sk, skb);
skb              2090 net/ipv4/udp.c 	return __udp_queue_rcv_skb(sk, skb);
skb              2097 net/ipv4/udp.c 	kfree_skb(skb);
skb              2101 net/ipv4/udp.c static int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
skb              2106 net/ipv4/udp.c 	if (likely(!udp_unexpected_gso(sk, skb)))
skb              2107 net/ipv4/udp.c 		return udp_queue_rcv_one_skb(sk, skb);
skb              2110 net/ipv4/udp.c 	__skb_push(skb, -skb_mac_offset(skb));
skb              2111 net/ipv4/udp.c 	segs = udp_rcv_segment(sk, skb, true);
skb              2112 net/ipv4/udp.c 	for (skb = segs; skb; skb = next) {
skb              2113 net/ipv4/udp.c 		next = skb->next;
skb              2114 net/ipv4/udp.c 		__skb_pull(skb, skb_transport_offset(skb));
skb              2115 net/ipv4/udp.c 		ret = udp_queue_rcv_one_skb(sk, skb);
skb              2117 net/ipv4/udp.c 			ip_protocol_deliver_rcu(dev_net(skb->dev), skb, -ret);
skb              2143 net/ipv4/udp.c static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
skb              2154 net/ipv4/udp.c 	int dif = skb->dev->ifindex;
skb              2155 net/ipv4/udp.c 	int sdif = inet_sdif(skb);
skb              2177 net/ipv4/udp.c 		nskb = skb_clone(skb, GFP_ATOMIC);
skb              2198 net/ipv4/udp.c 		if (udp_queue_rcv_skb(first, skb) > 0)
skb              2199 net/ipv4/udp.c 			consume_skb(skb);
skb              2201 net/ipv4/udp.c 		kfree_skb(skb);
skb              2213 net/ipv4/udp.c static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh,
skb              2218 net/ipv4/udp.c 	UDP_SKB_CB(skb)->partial_cov = 0;
skb              2219 net/ipv4/udp.c 	UDP_SKB_CB(skb)->cscov = skb->len;
skb              2222 net/ipv4/udp.c 		err = udplite_checksum_init(skb, uh);
skb              2226 net/ipv4/udp.c 		if (UDP_SKB_CB(skb)->partial_cov) {
skb              2227 net/ipv4/udp.c 			skb->csum = inet_compute_pseudo(skb, proto);
skb              2235 net/ipv4/udp.c 	err = (__force int)skb_checksum_init_zero_check(skb, proto, uh->check,
skb              2240 net/ipv4/udp.c 	if (skb->ip_summed == CHECKSUM_COMPLETE && !skb->csum_valid) {
skb              2242 net/ipv4/udp.c 		if (skb->csum_complete_sw)
skb              2249 net/ipv4/udp.c 		skb_checksum_complete_unset(skb);
skb              2258 net/ipv4/udp.c static int udp_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb,
skb              2264 net/ipv4/udp.c 		skb_checksum_try_convert(skb, IPPROTO_UDP, inet_compute_pseudo);
skb              2266 net/ipv4/udp.c 	ret = udp_queue_rcv_skb(sk, skb);
skb              2280 net/ipv4/udp.c int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
skb              2286 net/ipv4/udp.c 	struct rtable *rt = skb_rtable(skb);
skb              2288 net/ipv4/udp.c 	struct net *net = dev_net(skb->dev);
skb              2293 net/ipv4/udp.c 	if (!pskb_may_pull(skb, sizeof(struct udphdr)))
skb              2296 net/ipv4/udp.c 	uh   = udp_hdr(skb);
skb              2298 net/ipv4/udp.c 	saddr = ip_hdr(skb)->saddr;
skb              2299 net/ipv4/udp.c 	daddr = ip_hdr(skb)->daddr;
skb              2301 net/ipv4/udp.c 	if (ulen > skb->len)
skb              2306 net/ipv4/udp.c 		if (ulen < sizeof(*uh) || pskb_trim_rcsum(skb, ulen))
skb              2308 net/ipv4/udp.c 		uh = udp_hdr(skb);
skb              2311 net/ipv4/udp.c 	if (udp4_csum_init(skb, uh, proto))
skb              2314 net/ipv4/udp.c 	sk = skb_steal_sock(skb);
skb              2316 net/ipv4/udp.c 		struct dst_entry *dst = skb_dst(skb);
skb              2322 net/ipv4/udp.c 		ret = udp_unicast_rcv_skb(sk, skb, uh);
skb              2328 net/ipv4/udp.c 		return __udp4_lib_mcast_deliver(net, skb, uh,
skb              2331 net/ipv4/udp.c 	sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
skb              2333 net/ipv4/udp.c 		return udp_unicast_rcv_skb(sk, skb, uh);
skb              2335 net/ipv4/udp.c 	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
skb              2337 net/ipv4/udp.c 	nf_reset_ct(skb);
skb              2340 net/ipv4/udp.c 	if (udp_lib_checksum_complete(skb))
skb              2344 net/ipv4/udp.c 	icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
skb              2350 net/ipv4/udp.c 	kfree_skb(skb);
skb              2357 net/ipv4/udp.c 			    ulen, skb->len,
skb              2373 net/ipv4/udp.c 	kfree_skb(skb);
skb              2434 net/ipv4/udp.c int udp_v4_early_demux(struct sk_buff *skb)
skb              2436 net/ipv4/udp.c 	struct net *net = dev_net(skb->dev);
skb              2442 net/ipv4/udp.c 	int dif = skb->dev->ifindex;
skb              2443 net/ipv4/udp.c 	int sdif = inet_sdif(skb);
skb              2447 net/ipv4/udp.c 	if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct udphdr)))
skb              2450 net/ipv4/udp.c 	iph = ip_hdr(skb);
skb              2451 net/ipv4/udp.c 	uh = udp_hdr(skb);
skb              2453 net/ipv4/udp.c 	if (skb->pkt_type == PACKET_MULTICAST) {
skb              2454 net/ipv4/udp.c 		in_dev = __in_dev_get_rcu(skb->dev);
skb              2467 net/ipv4/udp.c 	} else if (skb->pkt_type == PACKET_HOST) {
skb              2475 net/ipv4/udp.c 	skb->sk = sk;
skb              2476 net/ipv4/udp.c 	skb->destructor = sock_efree;
skb              2488 net/ipv4/udp.c 		skb_dst_set_noref(skb, dst);
skb              2494 net/ipv4/udp.c 			return ip_mc_validate_source(skb, iph->daddr,
skb              2496 net/ipv4/udp.c 						     skb->dev, in_dev, &itag);
skb              2501 net/ipv4/udp.c int udp_rcv(struct sk_buff *skb)
skb              2503 net/ipv4/udp.c 	return __udp4_lib_rcv(skb, &udp_table, IPPROTO_UDP);
skb                16 net/ipv4/udp_diag.c static int sk_diag_dump(struct sock *sk, struct sk_buff *skb,
skb                24 net/ipv4/udp_diag.c 	return inet_sk_diag_fill(sk, NULL, skb, req,
skb                25 net/ipv4/udp_diag.c 			sk_user_ns(NETLINK_CB(cb->skb).sk),
skb                26 net/ipv4/udp_diag.c 			NETLINK_CB(cb->skb).portid,
skb                95 net/ipv4/udp_diag.c static void udp_dump(struct udp_table *table, struct sk_buff *skb,
skb                99 net/ipv4/udp_diag.c 	bool net_admin = netlink_net_capable(cb->skb, CAP_NET_ADMIN);
skb               100 net/ipv4/udp_diag.c 	struct net *net = sock_net(skb->sk);
skb               135 net/ipv4/udp_diag.c 			if (sk_diag_dump(sk, skb, cb, r, bc, net_admin) < 0) {
skb               149 net/ipv4/udp_diag.c static void udp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
skb               152 net/ipv4/udp_diag.c 	udp_dump(&udp_table, skb, cb, r, bc);
skb               252 net/ipv4/udp_diag.c static void udplite_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
skb               256 net/ipv4/udp_diag.c 	udp_dump(&udplite_table, skb, cb, r, bc);
skb                14 net/ipv4/udp_offload.c static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb,
skb                16 net/ipv4/udp_offload.c 	struct sk_buff *(*gso_inner_segment)(struct sk_buff *skb,
skb                20 net/ipv4/udp_offload.c 	int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb);
skb                23 net/ipv4/udp_offload.c 	struct udphdr *uh = udp_hdr(skb);
skb                24 net/ipv4/udp_offload.c 	u16 mac_offset = skb->mac_header;
skb                25 net/ipv4/udp_offload.c 	__be16 protocol = skb->protocol;
skb                26 net/ipv4/udp_offload.c 	u16 mac_len = skb->mac_len;
skb                31 net/ipv4/udp_offload.c 	if (unlikely(!pskb_may_pull(skb, tnl_hlen)))
skb                40 net/ipv4/udp_offload.c 	if (skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL)
skb                43 net/ipv4/udp_offload.c 		partial = (__force __wsum)htonl(skb->len);
skb                47 net/ipv4/udp_offload.c 	skb->encapsulation = 0;
skb                48 net/ipv4/udp_offload.c 	SKB_GSO_CB(skb)->encap_level = 0;
skb                49 net/ipv4/udp_offload.c 	__skb_pull(skb, tnl_hlen);
skb                50 net/ipv4/udp_offload.c 	skb_reset_mac_header(skb);
skb                51 net/ipv4/udp_offload.c 	skb_set_network_header(skb, skb_inner_network_offset(skb));
skb                52 net/ipv4/udp_offload.c 	skb->mac_len = skb_inner_network_offset(skb);
skb                53 net/ipv4/udp_offload.c 	skb->protocol = new_protocol;
skb                55 net/ipv4/udp_offload.c 	need_csum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM);
skb                56 net/ipv4/udp_offload.c 	skb->encap_hdr_csum = need_csum;
skb                58 net/ipv4/udp_offload.c 	remcsum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_TUNNEL_REMCSUM);
skb                59 net/ipv4/udp_offload.c 	skb->remcsum_offload = remcsum;
skb                61 net/ipv4/udp_offload.c 	need_ipsec = skb_dst(skb) && dst_xfrm(skb_dst(skb));
skb                65 net/ipv4/udp_offload.c 			  (skb->dev->features &
skb                69 net/ipv4/udp_offload.c 	features &= skb->dev->hw_enc_features;
skb                82 net/ipv4/udp_offload.c 	segs = gso_inner_segment(skb, features);
skb                84 net/ipv4/udp_offload.c 		skb_gso_error_unwind(skb, protocol, tnl_hlen, mac_offset,
skb                91 net/ipv4/udp_offload.c 	outer_hlen = skb_tnl_header_len(skb);
skb                93 net/ipv4/udp_offload.c 	skb = segs;
skb                98 net/ipv4/udp_offload.c 			skb->ip_summed = CHECKSUM_NONE;
skb               101 net/ipv4/udp_offload.c 		if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb               102 net/ipv4/udp_offload.c 			skb_reset_inner_headers(skb);
skb               103 net/ipv4/udp_offload.c 			skb->encapsulation = 1;
skb               106 net/ipv4/udp_offload.c 		skb->mac_len = mac_len;
skb               107 net/ipv4/udp_offload.c 		skb->protocol = protocol;
skb               109 net/ipv4/udp_offload.c 		__skb_push(skb, outer_hlen);
skb               110 net/ipv4/udp_offload.c 		skb_reset_mac_header(skb);
skb               111 net/ipv4/udp_offload.c 		skb_set_network_header(skb, mac_len);
skb               112 net/ipv4/udp_offload.c 		skb_set_transport_header(skb, udp_offset);
skb               113 net/ipv4/udp_offload.c 		len = skb->len - udp_offset;
skb               114 net/ipv4/udp_offload.c 		uh = udp_hdr(skb);
skb               120 net/ipv4/udp_offload.c 		if (gso_partial && skb_is_gso(skb)) {
skb               121 net/ipv4/udp_offload.c 			uh->len = htons(skb_shinfo(skb)->gso_size +
skb               122 net/ipv4/udp_offload.c 					SKB_GSO_CB(skb)->data_offset +
skb               123 net/ipv4/udp_offload.c 					skb->head - (unsigned char *)uh);
skb               134 net/ipv4/udp_offload.c 		if (skb->encapsulation || !offload_csum) {
skb               135 net/ipv4/udp_offload.c 			uh->check = gso_make_checksum(skb, ~uh->check);
skb               139 net/ipv4/udp_offload.c 			skb->ip_summed = CHECKSUM_PARTIAL;
skb               140 net/ipv4/udp_offload.c 			skb->csum_start = skb_transport_header(skb) - skb->head;
skb               141 net/ipv4/udp_offload.c 			skb->csum_offset = offsetof(struct udphdr, check);
skb               143 net/ipv4/udp_offload.c 	} while ((skb = skb->next));
skb               148 net/ipv4/udp_offload.c struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
skb               152 net/ipv4/udp_offload.c 	__be16 protocol = skb->protocol;
skb               156 net/ipv4/udp_offload.c 	struct sk_buff *(*gso_inner_segment)(struct sk_buff *skb,
skb               161 net/ipv4/udp_offload.c 	switch (skb->inner_protocol_type) {
skb               163 net/ipv4/udp_offload.c 		protocol = skb->inner_protocol;
skb               168 net/ipv4/udp_offload.c 		ops = rcu_dereference(offloads[skb->inner_ipproto]);
skb               177 net/ipv4/udp_offload.c 	segs = __skb_udp_tunnel_segment(skb, features, gso_inner_segment,
skb               288 net/ipv4/udp_offload.c static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
skb               297 net/ipv4/udp_offload.c 	if (skb->encapsulation &&
skb               298 net/ipv4/udp_offload.c 	    (skb_shinfo(skb)->gso_type &
skb               300 net/ipv4/udp_offload.c 		segs = skb_udp_tunnel_segment(skb, features, false);
skb               304 net/ipv4/udp_offload.c 	if (!(skb_shinfo(skb)->gso_type & (SKB_GSO_UDP | SKB_GSO_UDP_L4)))
skb               307 net/ipv4/udp_offload.c 	if (!pskb_may_pull(skb, sizeof(struct udphdr)))
skb               310 net/ipv4/udp_offload.c 	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
skb               311 net/ipv4/udp_offload.c 		return __udp_gso_segment(skb, features);
skb               313 net/ipv4/udp_offload.c 	mss = skb_shinfo(skb)->gso_size;
skb               314 net/ipv4/udp_offload.c 	if (unlikely(skb->len <= mss))
skb               322 net/ipv4/udp_offload.c 	uh = udp_hdr(skb);
skb               323 net/ipv4/udp_offload.c 	iph = ip_hdr(skb);
skb               326 net/ipv4/udp_offload.c 	csum = skb_checksum(skb, 0, skb->len, 0);
skb               327 net/ipv4/udp_offload.c 	uh->check = udp_v4_check(skb->len, iph->saddr, iph->daddr, csum);
skb               331 net/ipv4/udp_offload.c 	skb->ip_summed = CHECKSUM_UNNECESSARY;
skb               337 net/ipv4/udp_offload.c 	if (!skb->encap_hdr_csum)
skb               343 net/ipv4/udp_offload.c 	segs = skb_segment(skb, features);
skb               350 net/ipv4/udp_offload.c 					       struct sk_buff *skb)
skb               352 net/ipv4/udp_offload.c 	struct udphdr *uh = udp_hdr(skb);
skb               360 net/ipv4/udp_offload.c 		NAPI_GRO_CB(skb)->flush = 1;
skb               366 net/ipv4/udp_offload.c 	if (ulen <= sizeof(*uh) || ulen != skb_gro_len(skb)) {
skb               367 net/ipv4/udp_offload.c 		NAPI_GRO_CB(skb)->flush = 1;
skb               371 net/ipv4/udp_offload.c 	skb_gro_pull(skb, sizeof(struct udphdr));
skb               372 net/ipv4/udp_offload.c 	skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr));
skb               392 net/ipv4/udp_offload.c 		if (ulen > ntohs(uh2->len) || skb_gro_receive(p, skb) ||
skb               404 net/ipv4/udp_offload.c INDIRECT_CALLABLE_DECLARE(struct sock *udp6_lib_lookup_skb(struct sk_buff *skb,
skb               406 net/ipv4/udp_offload.c struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
skb               412 net/ipv4/udp_offload.c 	unsigned int off = skb_gro_offset(skb);
skb               418 net/ipv4/udp_offload.c 				udp4_lib_lookup_skb, skb, uh->source, uh->dest);
skb               423 net/ipv4/udp_offload.c 		pp = call_gro_receive(udp_gro_receive_segment, head, skb);
skb               428 net/ipv4/udp_offload.c 	if (NAPI_GRO_CB(skb)->encap_mark ||
skb               429 net/ipv4/udp_offload.c 	    (skb->ip_summed != CHECKSUM_PARTIAL &&
skb               430 net/ipv4/udp_offload.c 	     NAPI_GRO_CB(skb)->csum_cnt == 0 &&
skb               431 net/ipv4/udp_offload.c 	     !NAPI_GRO_CB(skb)->csum_valid) ||
skb               436 net/ipv4/udp_offload.c 	NAPI_GRO_CB(skb)->encap_mark = 1;
skb               456 net/ipv4/udp_offload.c 	skb_gro_pull(skb, sizeof(struct udphdr)); /* pull encapsulating udp header */
skb               457 net/ipv4/udp_offload.c 	skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr));
skb               458 net/ipv4/udp_offload.c 	pp = call_gro_receive_sk(udp_sk(sk)->gro_receive, sk, head, skb);
skb               462 net/ipv4/udp_offload.c 	skb_gro_flush_final(skb, pp, flush);
skb               468 net/ipv4/udp_offload.c struct sk_buff *udp4_gro_receive(struct list_head *head, struct sk_buff *skb)
skb               470 net/ipv4/udp_offload.c 	struct udphdr *uh = udp_gro_udphdr(skb);
skb               476 net/ipv4/udp_offload.c 	if (NAPI_GRO_CB(skb)->flush)
skb               479 net/ipv4/udp_offload.c 	if (skb_gro_checksum_validate_zero_check(skb, IPPROTO_UDP, uh->check,
skb               483 net/ipv4/udp_offload.c 		skb_gro_checksum_try_convert(skb, IPPROTO_UDP, uh->check,
skb               486 net/ipv4/udp_offload.c 	NAPI_GRO_CB(skb)->is_ipv6 = 0;
skb               487 net/ipv4/udp_offload.c 	return udp_gro_receive(head, skb, uh, udp4_lib_lookup_skb);
skb               490 net/ipv4/udp_offload.c 	NAPI_GRO_CB(skb)->flush = 1;
skb               494 net/ipv4/udp_offload.c static int udp_gro_complete_segment(struct sk_buff *skb)
skb               496 net/ipv4/udp_offload.c 	struct udphdr *uh = udp_hdr(skb);
skb               498 net/ipv4/udp_offload.c 	skb->csum_start = (unsigned char *)uh - skb->head;
skb               499 net/ipv4/udp_offload.c 	skb->csum_offset = offsetof(struct udphdr, check);
skb               500 net/ipv4/udp_offload.c 	skb->ip_summed = CHECKSUM_PARTIAL;
skb               502 net/ipv4/udp_offload.c 	skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
skb               503 net/ipv4/udp_offload.c 	skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_L4;
skb               507 net/ipv4/udp_offload.c int udp_gro_complete(struct sk_buff *skb, int nhoff,
skb               510 net/ipv4/udp_offload.c 	__be16 newlen = htons(skb->len - nhoff);
skb               511 net/ipv4/udp_offload.c 	struct udphdr *uh = (struct udphdr *)(skb->data + nhoff);
skb               519 net/ipv4/udp_offload.c 				udp4_lib_lookup_skb, skb, uh->source, uh->dest);
skb               521 net/ipv4/udp_offload.c 		err = udp_gro_complete_segment(skb);
skb               523 net/ipv4/udp_offload.c 		skb_shinfo(skb)->gso_type = uh->check ? SKB_GSO_UDP_TUNNEL_CSUM
skb               529 net/ipv4/udp_offload.c 		skb->encapsulation = 1;
skb               530 net/ipv4/udp_offload.c 		err = udp_sk(sk)->gro_complete(sk, skb,
skb               535 net/ipv4/udp_offload.c 	if (skb->remcsum_offload)
skb               536 net/ipv4/udp_offload.c 		skb_shinfo(skb)->gso_type |= SKB_GSO_TUNNEL_REMCSUM;
skb               542 net/ipv4/udp_offload.c INDIRECT_CALLABLE_SCOPE int udp4_gro_complete(struct sk_buff *skb, int nhoff)
skb               544 net/ipv4/udp_offload.c 	const struct iphdr *iph = ip_hdr(skb);
skb               545 net/ipv4/udp_offload.c 	struct udphdr *uh = (struct udphdr *)(skb->data + nhoff);
skb               548 net/ipv4/udp_offload.c 		uh->check = ~udp_v4_check(skb->len - nhoff, iph->saddr,
skb               551 net/ipv4/udp_offload.c 	return udp_gro_complete(skb, nhoff, udp4_lib_lookup_skb);
skb               173 net/ipv4/udp_tunnel.c void udp_tunnel_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
skb               180 net/ipv4/udp_tunnel.c 	__skb_push(skb, sizeof(*uh));
skb               181 net/ipv4/udp_tunnel.c 	skb_reset_transport_header(skb);
skb               182 net/ipv4/udp_tunnel.c 	uh = udp_hdr(skb);
skb               186 net/ipv4/udp_tunnel.c 	uh->len = htons(skb->len);
skb               188 net/ipv4/udp_tunnel.c 	memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
skb               190 net/ipv4/udp_tunnel.c 	udp_set_csum(nocheck, skb, src, dst, skb->len);
skb               192 net/ipv4/udp_tunnel.c 	iptunnel_xmit(sk, rt, skb, src, dst, IPPROTO_UDP, tos, ttl, df, xnet);
skb               204 net/ipv4/udp_tunnel.c struct metadata_dst *udp_tun_rx_dst(struct sk_buff *skb,  unsigned short family,
skb               211 net/ipv4/udp_tunnel.c 		tun_dst = ip_tun_rx_dst(skb, flags, tunnel_id, md_size);
skb               213 net/ipv4/udp_tunnel.c 		tun_dst = ipv6_tun_rx_dst(skb, flags, tunnel_id, md_size);
skb               218 net/ipv4/udp_tunnel.c 	info->key.tp_src = udp_hdr(skb)->source;
skb               219 net/ipv4/udp_tunnel.c 	info->key.tp_dst = udp_hdr(skb)->dest;
skb               220 net/ipv4/udp_tunnel.c 	if (udp_hdr(skb)->check)
skb                20 net/ipv4/udplite.c static int udplite_rcv(struct sk_buff *skb)
skb                22 net/ipv4/udplite.c 	return __udp4_lib_rcv(skb, &udplite_table, IPPROTO_UDPLITE);
skb                25 net/ipv4/udplite.c static int udplite_err(struct sk_buff *skb, u32 info)
skb                27 net/ipv4/udplite.c 	return __udp4_lib_err(skb, info, &udplite_table);
skb                21 net/ipv4/xfrm4_input.c int xfrm4_extract_input(struct xfrm_state *x, struct sk_buff *skb)
skb                23 net/ipv4/xfrm4_input.c 	return xfrm4_extract_header(skb);
skb                27 net/ipv4/xfrm4_input.c 				   struct sk_buff *skb)
skb                29 net/ipv4/xfrm4_input.c 	return dst_input(skb);
skb                33 net/ipv4/xfrm4_input.c 					 struct sk_buff *skb)
skb                35 net/ipv4/xfrm4_input.c 	if (!skb_dst(skb)) {
skb                36 net/ipv4/xfrm4_input.c 		const struct iphdr *iph = ip_hdr(skb);
skb                38 net/ipv4/xfrm4_input.c 		if (ip_route_input_noref(skb, iph->daddr, iph->saddr,
skb                39 net/ipv4/xfrm4_input.c 					 iph->tos, skb->dev))
skb                43 net/ipv4/xfrm4_input.c 	if (xfrm_trans_queue(skb, xfrm4_rcv_encap_finish2))
skb                48 net/ipv4/xfrm4_input.c 	kfree_skb(skb);
skb                52 net/ipv4/xfrm4_input.c int xfrm4_transport_finish(struct sk_buff *skb, int async)
skb                54 net/ipv4/xfrm4_input.c 	struct xfrm_offload *xo = xfrm_offload(skb);
skb                55 net/ipv4/xfrm4_input.c 	struct iphdr *iph = ip_hdr(skb);
skb                57 net/ipv4/xfrm4_input.c 	iph->protocol = XFRM_MODE_SKB_CB(skb)->protocol;
skb                64 net/ipv4/xfrm4_input.c 	__skb_push(skb, skb->data - skb_network_header(skb));
skb                65 net/ipv4/xfrm4_input.c 	iph->tot_len = htons(skb->len);
skb                69 net/ipv4/xfrm4_input.c 		skb_mac_header_rebuild(skb);
skb                70 net/ipv4/xfrm4_input.c 		skb_reset_transport_header(skb);
skb                75 net/ipv4/xfrm4_input.c 		dev_net(skb->dev), NULL, skb, skb->dev, NULL,
skb                87 net/ipv4/xfrm4_input.c int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb)
skb               104 net/ipv4/xfrm4_input.c 	len = skb->len - sizeof(struct udphdr);
skb               105 net/ipv4/xfrm4_input.c 	if (!pskb_may_pull(skb, sizeof(struct udphdr) + min(len, 8)))
skb               109 net/ipv4/xfrm4_input.c 	uh = udp_hdr(skb);
skb               146 net/ipv4/xfrm4_input.c 	if (skb_unclone(skb, GFP_ATOMIC))
skb               150 net/ipv4/xfrm4_input.c 	iph = ip_hdr(skb);
skb               153 net/ipv4/xfrm4_input.c 	if (skb->len < iphlen + len) {
skb               162 net/ipv4/xfrm4_input.c 	__skb_pull(skb, len);
skb               163 net/ipv4/xfrm4_input.c 	skb_reset_transport_header(skb);
skb               166 net/ipv4/xfrm4_input.c 	return xfrm4_rcv_encap(skb, IPPROTO_ESP, 0, encap_type);
skb               169 net/ipv4/xfrm4_input.c 	kfree_skb(skb);
skb               173 net/ipv4/xfrm4_input.c int xfrm4_rcv(struct sk_buff *skb)
skb               175 net/ipv4/xfrm4_input.c 	return xfrm4_rcv_spi(skb, ip_hdr(skb)->protocol, 0);
skb                17 net/ipv4/xfrm4_output.c static int xfrm4_tunnel_check_size(struct sk_buff *skb)
skb                21 net/ipv4/xfrm4_output.c 	if (IPCB(skb)->flags & IPSKB_XFRM_TUNNEL_SIZE)
skb                24 net/ipv4/xfrm4_output.c 	if (!(ip_hdr(skb)->frag_off & htons(IP_DF)) || skb->ignore_df)
skb                27 net/ipv4/xfrm4_output.c 	mtu = dst_mtu(skb_dst(skb));
skb                28 net/ipv4/xfrm4_output.c 	if ((!skb_is_gso(skb) && skb->len > mtu) ||
skb                29 net/ipv4/xfrm4_output.c 	    (skb_is_gso(skb) &&
skb                30 net/ipv4/xfrm4_output.c 	     !skb_gso_validate_network_len(skb, ip_skb_dst_mtu(skb->sk, skb)))) {
skb                31 net/ipv4/xfrm4_output.c 		skb->protocol = htons(ETH_P_IP);
skb                33 net/ipv4/xfrm4_output.c 		if (skb->sk)
skb                34 net/ipv4/xfrm4_output.c 			xfrm_local_error(skb, mtu);
skb                36 net/ipv4/xfrm4_output.c 			icmp_send(skb, ICMP_DEST_UNREACH,
skb                44 net/ipv4/xfrm4_output.c int xfrm4_extract_output(struct xfrm_state *x, struct sk_buff *skb)
skb                48 net/ipv4/xfrm4_output.c 	err = xfrm4_tunnel_check_size(skb);
skb                52 net/ipv4/xfrm4_output.c 	XFRM_MODE_SKB_CB(skb)->protocol = ip_hdr(skb)->protocol;
skb                54 net/ipv4/xfrm4_output.c 	return xfrm4_extract_header(skb);
skb                57 net/ipv4/xfrm4_output.c int xfrm4_output_finish(struct sock *sk, struct sk_buff *skb)
skb                59 net/ipv4/xfrm4_output.c 	memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
skb                61 net/ipv4/xfrm4_output.c 	IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED;
skb                63 net/ipv4/xfrm4_output.c 	return xfrm_output(sk, skb);
skb                66 net/ipv4/xfrm4_output.c static int __xfrm4_output(struct net *net, struct sock *sk, struct sk_buff *skb)
skb                68 net/ipv4/xfrm4_output.c 	struct xfrm_state *x = skb_dst(skb)->xfrm;
skb                74 net/ipv4/xfrm4_output.c 		IPCB(skb)->flags |= IPSKB_REROUTED;
skb                75 net/ipv4/xfrm4_output.c 		return dst_output(net, sk, skb);
skb                82 net/ipv4/xfrm4_output.c 		ret = afinfo->output_finish(sk, skb);
skb                84 net/ipv4/xfrm4_output.c 		kfree_skb(skb);
skb                90 net/ipv4/xfrm4_output.c int xfrm4_output(struct net *net, struct sock *sk, struct sk_buff *skb)
skb                93 net/ipv4/xfrm4_output.c 			    net, sk, skb, NULL, skb_dst(skb)->dev,
skb                95 net/ipv4/xfrm4_output.c 			    !(IPCB(skb)->flags & IPSKB_REROUTED));
skb                98 net/ipv4/xfrm4_output.c void xfrm4_local_error(struct sk_buff *skb, u32 mtu)
skb               102 net/ipv4/xfrm4_output.c 	hdr = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
skb               103 net/ipv4/xfrm4_output.c 	ip_local_error(skb->sk, EMSGSIZE, hdr->daddr,
skb               104 net/ipv4/xfrm4_output.c 		       inet_sk(skb->sk)->inet_dport, mtu);
skb               103 net/ipv4/xfrm4_policy.c 			      struct sk_buff *skb, u32 mtu,
skb               109 net/ipv4/xfrm4_policy.c 	path->ops->update_pmtu(path, sk, skb, mtu, confirm_neigh);
skb               113 net/ipv4/xfrm4_policy.c 			   struct sk_buff *skb)
skb               118 net/ipv4/xfrm4_policy.c 	path->ops->redirect(path, sk, skb);
skb                45 net/ipv4/xfrm4_protocol.c static int xfrm4_rcv_cb(struct sk_buff *skb, u8 protocol, int err)
skb                55 net/ipv4/xfrm4_protocol.c 		if ((ret = handler->cb_handler(skb, err)) <= 0)
skb                61 net/ipv4/xfrm4_protocol.c int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi,
skb                68 net/ipv4/xfrm4_protocol.c 	XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL;
skb                69 net/ipv4/xfrm4_protocol.c 	XFRM_SPI_SKB_CB(skb)->family = AF_INET;
skb                70 net/ipv4/xfrm4_protocol.c 	XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
skb                76 net/ipv4/xfrm4_protocol.c 		if ((ret = handler->input_handler(skb, nexthdr, spi, encap_type)) != -EINVAL)
skb                80 net/ipv4/xfrm4_protocol.c 	icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
skb                82 net/ipv4/xfrm4_protocol.c 	kfree_skb(skb);
skb                87 net/ipv4/xfrm4_protocol.c static int xfrm4_esp_rcv(struct sk_buff *skb)
skb                92 net/ipv4/xfrm4_protocol.c 	XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL;
skb                95 net/ipv4/xfrm4_protocol.c 		if ((ret = handler->handler(skb)) != -EINVAL)
skb                98 net/ipv4/xfrm4_protocol.c 	icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
skb               100 net/ipv4/xfrm4_protocol.c 	kfree_skb(skb);
skb               104 net/ipv4/xfrm4_protocol.c static int xfrm4_esp_err(struct sk_buff *skb, u32 info)
skb               109 net/ipv4/xfrm4_protocol.c 		if (!handler->err_handler(skb, info))
skb               115 net/ipv4/xfrm4_protocol.c static int xfrm4_ah_rcv(struct sk_buff *skb)
skb               120 net/ipv4/xfrm4_protocol.c 	XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL;
skb               123 net/ipv4/xfrm4_protocol.c 		if ((ret = handler->handler(skb)) != -EINVAL)
skb               126 net/ipv4/xfrm4_protocol.c 	icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
skb               128 net/ipv4/xfrm4_protocol.c 	kfree_skb(skb);
skb               132 net/ipv4/xfrm4_protocol.c static int xfrm4_ah_err(struct sk_buff *skb, u32 info)
skb               137 net/ipv4/xfrm4_protocol.c 		if (!handler->err_handler(skb, info))
skb               143 net/ipv4/xfrm4_protocol.c static int xfrm4_ipcomp_rcv(struct sk_buff *skb)
skb               148 net/ipv4/xfrm4_protocol.c 	XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL;
skb               151 net/ipv4/xfrm4_protocol.c 		if ((ret = handler->handler(skb)) != -EINVAL)
skb               154 net/ipv4/xfrm4_protocol.c 	icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
skb               156 net/ipv4/xfrm4_protocol.c 	kfree_skb(skb);
skb               160 net/ipv4/xfrm4_protocol.c static int xfrm4_ipcomp_err(struct sk_buff *skb, u32 info)
skb               165 net/ipv4/xfrm4_protocol.c 		if (!handler->err_handler(skb, info))
skb                18 net/ipv4/xfrm4_state.c int xfrm4_extract_header(struct sk_buff *skb)
skb                20 net/ipv4/xfrm4_state.c 	const struct iphdr *iph = ip_hdr(skb);
skb                22 net/ipv4/xfrm4_state.c 	XFRM_MODE_SKB_CB(skb)->ihl = sizeof(*iph);
skb                23 net/ipv4/xfrm4_state.c 	XFRM_MODE_SKB_CB(skb)->id = iph->id;
skb                24 net/ipv4/xfrm4_state.c 	XFRM_MODE_SKB_CB(skb)->frag_off = iph->frag_off;
skb                25 net/ipv4/xfrm4_state.c 	XFRM_MODE_SKB_CB(skb)->tos = iph->tos;
skb                26 net/ipv4/xfrm4_state.c 	XFRM_MODE_SKB_CB(skb)->ttl = iph->ttl;
skb                27 net/ipv4/xfrm4_state.c 	XFRM_MODE_SKB_CB(skb)->optlen = iph->ihl * 4 - sizeof(*iph);
skb                28 net/ipv4/xfrm4_state.c 	memset(XFRM_MODE_SKB_CB(skb)->flow_lbl, 0,
skb                29 net/ipv4/xfrm4_state.c 	       sizeof(XFRM_MODE_SKB_CB(skb)->flow_lbl));
skb                16 net/ipv4/xfrm4_tunnel.c static int ipip_output(struct xfrm_state *x, struct sk_buff *skb)
skb                18 net/ipv4/xfrm4_tunnel.c 	skb_push(skb, -skb_network_offset(skb));
skb                22 net/ipv4/xfrm4_tunnel.c static int ipip_xfrm_rcv(struct xfrm_state *x, struct sk_buff *skb)
skb                24 net/ipv4/xfrm4_tunnel.c 	return ip_hdr(skb)->protocol;
skb                54 net/ipv4/xfrm4_tunnel.c static int xfrm_tunnel_rcv(struct sk_buff *skb)
skb                56 net/ipv4/xfrm4_tunnel.c 	return xfrm4_rcv_spi(skb, IPPROTO_IPIP, ip_hdr(skb)->saddr);
skb                59 net/ipv4/xfrm4_tunnel.c static int xfrm_tunnel_err(struct sk_buff *skb, u32 info)
skb               513 net/ipv6/addrconf.c static int inet6_netconf_fill_devconf(struct sk_buff *skb, int ifindex,
skb               522 net/ipv6/addrconf.c 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct netconfmsg),
skb               533 net/ipv6/addrconf.c 	if (nla_put_s32(skb, NETCONFA_IFINDEX, ifindex) < 0)
skb               540 net/ipv6/addrconf.c 	    nla_put_s32(skb, NETCONFA_FORWARDING, devconf->forwarding) < 0)
skb               544 net/ipv6/addrconf.c 	    nla_put_s32(skb, NETCONFA_MC_FORWARDING,
skb               549 net/ipv6/addrconf.c 	    nla_put_s32(skb, NETCONFA_PROXY_NEIGH, devconf->proxy_ndp) < 0)
skb               553 net/ipv6/addrconf.c 	    nla_put_s32(skb, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
skb               558 net/ipv6/addrconf.c 	nlmsg_end(skb, nlh);
skb               562 net/ipv6/addrconf.c 	nlmsg_cancel(skb, nlh);
skb               569 net/ipv6/addrconf.c 	struct sk_buff *skb;
skb               572 net/ipv6/addrconf.c 	skb = nlmsg_new(inet6_netconf_msgsize_devconf(type), GFP_KERNEL);
skb               573 net/ipv6/addrconf.c 	if (!skb)
skb               576 net/ipv6/addrconf.c 	err = inet6_netconf_fill_devconf(skb, ifindex, devconf, 0, 0,
skb               581 net/ipv6/addrconf.c 		kfree_skb(skb);
skb               584 net/ipv6/addrconf.c 	rtnl_notify(skb, net, 0, RTNLGRP_IPV6_NETCONF, NULL, GFP_KERNEL);
skb               597 net/ipv6/addrconf.c static int inet6_netconf_valid_get_req(struct sk_buff *skb,
skb               609 net/ipv6/addrconf.c 	if (!netlink_strict_get_check(skb))
skb               644 net/ipv6/addrconf.c 	struct sk_buff *skb;
skb               677 net/ipv6/addrconf.c 	skb = nlmsg_new(inet6_netconf_msgsize_devconf(NETCONFA_ALL), GFP_KERNEL);
skb               678 net/ipv6/addrconf.c 	if (!skb)
skb               681 net/ipv6/addrconf.c 	err = inet6_netconf_fill_devconf(skb, ifindex, devconf,
skb               688 net/ipv6/addrconf.c 		kfree_skb(skb);
skb               691 net/ipv6/addrconf.c 	err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
skb               700 net/ipv6/addrconf.c static int inet6_netconf_dump_devconf(struct sk_buff *skb,
skb               704 net/ipv6/addrconf.c 	struct net *net = sock_net(skb->sk);
skb               742 net/ipv6/addrconf.c 			if (inet6_netconf_fill_devconf(skb, dev->ifindex,
skb               744 net/ipv6/addrconf.c 						       NETLINK_CB(cb->skb).portid,
skb               752 net/ipv6/addrconf.c 			nl_dump_check_consistent(cb, nlmsg_hdr(skb));
skb               759 net/ipv6/addrconf.c 		if (inet6_netconf_fill_devconf(skb, NETCONFA_IFINDEX_ALL,
skb               761 net/ipv6/addrconf.c 					       NETLINK_CB(cb->skb).portid,
skb               770 net/ipv6/addrconf.c 		if (inet6_netconf_fill_devconf(skb, NETCONFA_IFINDEX_DEFAULT,
skb               772 net/ipv6/addrconf.c 					       NETLINK_CB(cb->skb).portid,
skb               784 net/ipv6/addrconf.c 	return skb->len;
skb              2068 net/ipv6/addrconf.c void addrconf_dad_failure(struct sk_buff *skb, struct inet6_ifaddr *ifp)
skb              2079 net/ipv6/addrconf.c 			     ifp->idev->dev->name, &ifp->addr, eth_hdr(skb)->h_source);
skb              4569 net/ipv6/addrconf.c inet6_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh,
skb              4572 net/ipv6/addrconf.c 	struct net *net = sock_net(skb->sk);
skb              4753 net/ipv6/addrconf.c inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh,
skb              4756 net/ipv6/addrconf.c 	struct net *net = sock_net(skb->sk);
skb              4854 net/ipv6/addrconf.c static int put_cacheinfo(struct sk_buff *skb, unsigned long cstamp,
skb              4864 net/ipv6/addrconf.c 	return nla_put(skb, IFA_CACHEINFO, sizeof(ci), &ci);
skb              4905 net/ipv6/addrconf.c static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa,
skb              4911 net/ipv6/addrconf.c 	nlh = nlmsg_put(skb, args->portid, args->seq, args->event,
skb              4920 net/ipv6/addrconf.c 	    nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid))
skb              4946 net/ipv6/addrconf.c 		if (nla_put_in6_addr(skb, IFA_LOCAL, &ifa->addr) < 0 ||
skb              4947 net/ipv6/addrconf.c 		    nla_put_in6_addr(skb, IFA_ADDRESS, &ifa->peer_addr) < 0)
skb              4950 net/ipv6/addrconf.c 		if (nla_put_in6_addr(skb, IFA_ADDRESS, &ifa->addr) < 0)
skb              4954 net/ipv6/addrconf.c 	    nla_put_u32(skb, IFA_RT_PRIORITY, ifa->rt_priority))
skb              4957 net/ipv6/addrconf.c 	if (put_cacheinfo(skb, ifa->cstamp, ifa->tstamp, preferred, valid) < 0)
skb              4960 net/ipv6/addrconf.c 	if (nla_put_u32(skb, IFA_FLAGS, ifa->flags) < 0)
skb              4963 net/ipv6/addrconf.c 	nlmsg_end(skb, nlh);
skb              4967 net/ipv6/addrconf.c 	nlmsg_cancel(skb, nlh);
skb              4971 net/ipv6/addrconf.c static int inet6_fill_ifmcaddr(struct sk_buff *skb, struct ifmcaddr6 *ifmca,
skb              4981 net/ipv6/addrconf.c 	nlh = nlmsg_put(skb, args->portid, args->seq, args->event,
skb              4987 net/ipv6/addrconf.c 	    nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid))
skb              4991 net/ipv6/addrconf.c 	if (nla_put_in6_addr(skb, IFA_MULTICAST, &ifmca->mca_addr) < 0 ||
skb              4992 net/ipv6/addrconf.c 	    put_cacheinfo(skb, ifmca->mca_cstamp, ifmca->mca_tstamp,
skb              4994 net/ipv6/addrconf.c 		nlmsg_cancel(skb, nlh);
skb              4998 net/ipv6/addrconf.c 	nlmsg_end(skb, nlh);
skb              5002 net/ipv6/addrconf.c static int inet6_fill_ifacaddr(struct sk_buff *skb, struct ifacaddr6 *ifaca,
skb              5013 net/ipv6/addrconf.c 	nlh = nlmsg_put(skb, args->portid, args->seq, args->event,
skb              5019 net/ipv6/addrconf.c 	    nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid))
skb              5023 net/ipv6/addrconf.c 	if (nla_put_in6_addr(skb, IFA_ANYCAST, &ifaca->aca_addr) < 0 ||
skb              5024 net/ipv6/addrconf.c 	    put_cacheinfo(skb, ifaca->aca_cstamp, ifaca->aca_tstamp,
skb              5026 net/ipv6/addrconf.c 		nlmsg_cancel(skb, nlh);
skb              5030 net/ipv6/addrconf.c 	nlmsg_end(skb, nlh);
skb              5035 net/ipv6/addrconf.c static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb,
skb              5054 net/ipv6/addrconf.c 			err = inet6_fill_ifaddr(skb, ifa, fillargs);
skb              5057 net/ipv6/addrconf.c 			nl_dump_check_consistent(cb, nlmsg_hdr(skb));
skb              5071 net/ipv6/addrconf.c 			err = inet6_fill_ifmcaddr(skb, ifmca, fillargs);
skb              5083 net/ipv6/addrconf.c 			err = inet6_fill_ifacaddr(skb, ifaca, fillargs);
skb              5152 net/ipv6/addrconf.c static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
skb              5157 net/ipv6/addrconf.c 		.portid = NETLINK_CB(cb->skb).portid,
skb              5163 net/ipv6/addrconf.c 	struct net *net = sock_net(skb->sk);
skb              5178 net/ipv6/addrconf.c 						  skb->sk, cb);
skb              5191 net/ipv6/addrconf.c 				err = in6_dump_addrs(idev, skb, cb, s_ip_idx,
skb              5214 net/ipv6/addrconf.c 			if (in6_dump_addrs(idev, skb, cb, s_ip_idx,
skb              5229 net/ipv6/addrconf.c 	return skb->len ? : err;
skb              5232 net/ipv6/addrconf.c static int inet6_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
skb              5236 net/ipv6/addrconf.c 	return inet6_dump_addr(skb, cb, type);
skb              5239 net/ipv6/addrconf.c static int inet6_dump_ifmcaddr(struct sk_buff *skb, struct netlink_callback *cb)
skb              5243 net/ipv6/addrconf.c 	return inet6_dump_addr(skb, cb, type);
skb              5247 net/ipv6/addrconf.c static int inet6_dump_ifacaddr(struct sk_buff *skb, struct netlink_callback *cb)
skb              5251 net/ipv6/addrconf.c 	return inet6_dump_addr(skb, cb, type);
skb              5254 net/ipv6/addrconf.c static int inet6_rtm_valid_getaddr_req(struct sk_buff *skb,
skb              5267 net/ipv6/addrconf.c 	if (!netlink_strict_get_check(skb))
skb              5317 net/ipv6/addrconf.c 	struct sk_buff *skb;
skb              5347 net/ipv6/addrconf.c 	skb = nlmsg_new(inet6_ifaddr_msgsize(), GFP_KERNEL);
skb              5348 net/ipv6/addrconf.c 	if (!skb) {
skb              5353 net/ipv6/addrconf.c 	err = inet6_fill_ifaddr(skb, ifa, &fillargs);
skb              5357 net/ipv6/addrconf.c 		kfree_skb(skb);
skb              5360 net/ipv6/addrconf.c 	err = rtnl_unicast(skb, tgt_net, NETLINK_CB(in_skb).portid);
skb              5374 net/ipv6/addrconf.c 	struct sk_buff *skb;
skb              5385 net/ipv6/addrconf.c 	skb = nlmsg_new(inet6_ifaddr_msgsize(), GFP_ATOMIC);
skb              5386 net/ipv6/addrconf.c 	if (!skb)
skb              5389 net/ipv6/addrconf.c 	err = inet6_fill_ifaddr(skb, ifa, &fillargs);
skb              5393 net/ipv6/addrconf.c 		kfree_skb(skb);
skb              5396 net/ipv6/addrconf.c 	rtnl_notify(skb, net, 0, RTNLGRP_IPV6_IFADDR, NULL, GFP_ATOMIC);
skb              5551 net/ipv6/addrconf.c static int inet6_fill_ifla6_attrs(struct sk_buff *skb, struct inet6_dev *idev,
skb              5557 net/ipv6/addrconf.c 	if (nla_put_u32(skb, IFLA_INET6_FLAGS, idev->if_flags))
skb              5563 net/ipv6/addrconf.c 	if (nla_put(skb, IFLA_INET6_CACHEINFO, sizeof(ci), &ci))
skb              5565 net/ipv6/addrconf.c 	nla = nla_reserve(skb, IFLA_INET6_CONF, DEVCONF_MAX * sizeof(s32));
skb              5575 net/ipv6/addrconf.c 	nla = nla_reserve(skb, IFLA_INET6_STATS, IPSTATS_MIB_MAX * sizeof(u64));
skb              5580 net/ipv6/addrconf.c 	nla = nla_reserve(skb, IFLA_INET6_ICMP6STATS, ICMP6_MIB_MAX * sizeof(u64));
skb              5585 net/ipv6/addrconf.c 	nla = nla_reserve(skb, IFLA_INET6_TOKEN, sizeof(struct in6_addr));
skb              5589 net/ipv6/addrconf.c 	if (nla_put_u8(skb, IFLA_INET6_ADDR_GEN_MODE, idev->cnf.addr_gen_mode))
skb              5611 net/ipv6/addrconf.c static int inet6_fill_link_af(struct sk_buff *skb, const struct net_device *dev,
skb              5619 net/ipv6/addrconf.c 	if (inet6_fill_ifla6_attrs(skb, idev, ext_filter_mask) < 0)
skb              5776 net/ipv6/addrconf.c static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev,
skb              5784 net/ipv6/addrconf.c 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*hdr), flags);
skb              5796 net/ipv6/addrconf.c 	if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
skb              5798 net/ipv6/addrconf.c 	     nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
skb              5799 net/ipv6/addrconf.c 	    nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
skb              5801 net/ipv6/addrconf.c 	     nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))) ||
skb              5802 net/ipv6/addrconf.c 	    nla_put_u8(skb, IFLA_OPERSTATE,
skb              5805 net/ipv6/addrconf.c 	protoinfo = nla_nest_start_noflag(skb, IFLA_PROTINFO);
skb              5809 net/ipv6/addrconf.c 	if (inet6_fill_ifla6_attrs(skb, idev, 0) < 0)
skb              5812 net/ipv6/addrconf.c 	nla_nest_end(skb, protoinfo);
skb              5813 net/ipv6/addrconf.c 	nlmsg_end(skb, nlh);
skb              5817 net/ipv6/addrconf.c 	nlmsg_cancel(skb, nlh);
skb              5846 net/ipv6/addrconf.c static int inet6_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
skb              5848 net/ipv6/addrconf.c 	struct net *net = sock_net(skb->sk);
skb              5878 net/ipv6/addrconf.c 			if (inet6_fill_ifinfo(skb, idev,
skb              5879 net/ipv6/addrconf.c 					      NETLINK_CB(cb->skb).portid,
skb              5892 net/ipv6/addrconf.c 	return skb->len;
skb              5897 net/ipv6/addrconf.c 	struct sk_buff *skb;
skb              5901 net/ipv6/addrconf.c 	skb = nlmsg_new(inet6_if_nlmsg_size(), GFP_ATOMIC);
skb              5902 net/ipv6/addrconf.c 	if (!skb)
skb              5905 net/ipv6/addrconf.c 	err = inet6_fill_ifinfo(skb, idev, 0, 0, event, 0);
skb              5909 net/ipv6/addrconf.c 		kfree_skb(skb);
skb              5912 net/ipv6/addrconf.c 	rtnl_notify(skb, net, 0, RTNLGRP_IPV6_IFINFO, NULL, GFP_ATOMIC);
skb              5926 net/ipv6/addrconf.c static int inet6_fill_prefix(struct sk_buff *skb, struct inet6_dev *idev,
skb              5934 net/ipv6/addrconf.c 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*pmsg), flags);
skb              5952 net/ipv6/addrconf.c 	if (nla_put(skb, PREFIX_ADDRESS, sizeof(pinfo->prefix), &pinfo->prefix))
skb              5956 net/ipv6/addrconf.c 	if (nla_put(skb, PREFIX_CACHEINFO, sizeof(ci), &ci))
skb              5958 net/ipv6/addrconf.c 	nlmsg_end(skb, nlh);
skb              5962 net/ipv6/addrconf.c 	nlmsg_cancel(skb, nlh);
skb              5969 net/ipv6/addrconf.c 	struct sk_buff *skb;
skb              5973 net/ipv6/addrconf.c 	skb = nlmsg_new(inet6_prefix_nlmsg_size(), GFP_ATOMIC);
skb              5974 net/ipv6/addrconf.c 	if (!skb)
skb              5977 net/ipv6/addrconf.c 	err = inet6_fill_prefix(skb, idev, pinfo, 0, 0, event, 0);
skb              5981 net/ipv6/addrconf.c 		kfree_skb(skb);
skb              5984 net/ipv6/addrconf.c 	rtnl_notify(skb, net, 0, RTNLGRP_IPV6_PREFIX, NULL, GFP_ATOMIC);
skb               140 net/ipv6/addrconf_core.c static int eafnosupport_ipv6_route_input(struct sk_buff *skb)
skb               168 net/ipv6/addrconf_core.c 			      const struct sk_buff *skb, int strict)
skb               376 net/ipv6/addrlabel.c static int ip6addrlbl_newdel(struct sk_buff *skb, struct nlmsghdr *nlh,
skb               379 net/ipv6/addrlabel.c 	struct net *net = sock_net(skb->sk);
skb               438 net/ipv6/addrlabel.c static int ip6addrlbl_fill(struct sk_buff *skb,
skb               444 net/ipv6/addrlabel.c 	struct nlmsghdr *nlh = nlmsg_put(skb, portid, seq, event,
skb               451 net/ipv6/addrlabel.c 	if (nla_put_in6_addr(skb, IFAL_ADDRESS, &p->prefix) < 0 ||
skb               452 net/ipv6/addrlabel.c 	    nla_put_u32(skb, IFAL_LABEL, p->label) < 0) {
skb               453 net/ipv6/addrlabel.c 		nlmsg_cancel(skb, nlh);
skb               457 net/ipv6/addrlabel.c 	nlmsg_end(skb, nlh);
skb               486 net/ipv6/addrlabel.c static int ip6addrlbl_dump(struct sk_buff *skb, struct netlink_callback *cb)
skb               489 net/ipv6/addrlabel.c 	struct net *net = sock_net(skb->sk);
skb               503 net/ipv6/addrlabel.c 			err = ip6addrlbl_fill(skb, p,
skb               505 net/ipv6/addrlabel.c 					      NETLINK_CB(cb->skb).portid,
skb               516 net/ipv6/addrlabel.c 	return skb->len;
skb               526 net/ipv6/addrlabel.c static int ip6addrlbl_valid_get_req(struct sk_buff *skb,
skb               539 net/ipv6/addrlabel.c 	if (!netlink_strict_get_check(skb))
skb               580 net/ipv6/addrlabel.c 	struct sk_buff *skb;
skb               600 net/ipv6/addrlabel.c 	skb = nlmsg_new(ip6addrlbl_msgsize(), GFP_KERNEL);
skb               601 net/ipv6/addrlabel.c 	if (!skb)
skb               610 net/ipv6/addrlabel.c 		err = ip6addrlbl_fill(skb, p, lseq,
skb               618 net/ipv6/addrlabel.c 		kfree_skb(skb);
skb               620 net/ipv6/addrlabel.c 		err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
skb               477 net/ipv6/af_inet6.c 	struct sk_buff *skb;
skb               482 net/ipv6/af_inet6.c 	skb = xchg(&np->pktoptions, NULL);
skb               483 net/ipv6/af_inet6.c 	kfree_skb(skb);
skb               485 net/ipv6/af_inet6.c 	skb = xchg(&np->rxpmtu, NULL);
skb               486 net/ipv6/af_inet6.c 	kfree_skb(skb);
skb               782 net/ipv6/af_inet6.c bool ipv6_opt_accepted(const struct sock *sk, const struct sk_buff *skb,
skb               790 net/ipv6/af_inet6.c 		    (ip6_flowinfo((struct ipv6hdr *) skb_network_header(skb)) &&
skb               940 net/ipv6/af_inet6.c static int ipv6_route_input(struct sk_buff *skb)
skb               942 net/ipv6/af_inet6.c 	ip6_route_input(skb);
skb               943 net/ipv6/af_inet6.c 	return skb_dst(skb)->error;
skb               292 net/ipv6/ah6.c 	struct sk_buff *skb = base->data;
skb               293 net/ipv6/ah6.c 	struct xfrm_state *x = skb_dst(skb)->xfrm;
skb               295 net/ipv6/ah6.c 	struct ipv6hdr *top_iph = ipv6_hdr(skb);
skb               296 net/ipv6/ah6.c 	struct ip_auth_hdr *ah = ip_auth_hdr(skb);
skb               299 net/ipv6/ah6.c 	extlen = skb_network_header_len(skb) - sizeof(struct ipv6hdr);
skb               303 net/ipv6/ah6.c 	iph_base = AH_SKB_CB(skb)->tmp;
skb               318 net/ipv6/ah6.c 	kfree(AH_SKB_CB(skb)->tmp);
skb               319 net/ipv6/ah6.c 	xfrm_output_resume(skb, err);
skb               322 net/ipv6/ah6.c static int ah6_output(struct xfrm_state *x, struct sk_buff *skb)
skb               346 net/ipv6/ah6.c 	err = skb_cow_data(skb, 0, &trailer);
skb               351 net/ipv6/ah6.c 	skb_push(skb, -skb_network_offset(skb));
skb               352 net/ipv6/ah6.c 	extlen = skb_network_header_len(skb) - sizeof(struct ipv6hdr);
skb               373 net/ipv6/ah6.c 	ah = ip_auth_hdr(skb);
skb               376 net/ipv6/ah6.c 	top_iph = ipv6_hdr(skb);
skb               377 net/ipv6/ah6.c 	top_iph->payload_len = htons(skb->len - sizeof(*top_iph));
skb               379 net/ipv6/ah6.c 	nexthdr = *skb_mac_header(skb);
skb               380 net/ipv6/ah6.c 	*skb_mac_header(skb) = IPPROTO_AH;
skb               413 net/ipv6/ah6.c 	ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
skb               416 net/ipv6/ah6.c 	err = skb_to_sgvec_nomark(skb, sg, 0, skb->len);
skb               422 net/ipv6/ah6.c 		*seqhi = htonl(XFRM_SKB_CB(skb)->seq.output.hi);
skb               425 net/ipv6/ah6.c 	ahash_request_set_crypt(req, sg, icv, skb->len + seqhi_len);
skb               426 net/ipv6/ah6.c 	ahash_request_set_callback(req, 0, ah6_output_done, skb);
skb               428 net/ipv6/ah6.c 	AH_SKB_CB(skb)->tmp = iph_base;
skb               462 net/ipv6/ah6.c 	struct sk_buff *skb = base->data;
skb               463 net/ipv6/ah6.c 	struct xfrm_state *x = xfrm_input_state(skb);
skb               465 net/ipv6/ah6.c 	struct ip_auth_hdr *ah = ip_auth_hdr(skb);
skb               466 net/ipv6/ah6.c 	int hdr_len = skb_network_header_len(skb);
skb               472 net/ipv6/ah6.c 	work_iph = AH_SKB_CB(skb)->tmp;
skb               482 net/ipv6/ah6.c 	skb->network_header += ah_hlen;
skb               483 net/ipv6/ah6.c 	memcpy(skb_network_header(skb), work_iph, hdr_len);
skb               484 net/ipv6/ah6.c 	__skb_pull(skb, ah_hlen + hdr_len);
skb               486 net/ipv6/ah6.c 		skb_reset_transport_header(skb);
skb               488 net/ipv6/ah6.c 		skb_set_transport_header(skb, -hdr_len);
skb               490 net/ipv6/ah6.c 	kfree(AH_SKB_CB(skb)->tmp);
skb               491 net/ipv6/ah6.c 	xfrm_input_resume(skb, err);
skb               496 net/ipv6/ah6.c static int ah6_input(struct xfrm_state *x, struct sk_buff *skb)
skb               533 net/ipv6/ah6.c 	if (!pskb_may_pull(skb, sizeof(struct ip_auth_hdr)))
skb               538 net/ipv6/ah6.c 	if (skb_unclone(skb, GFP_ATOMIC))
skb               541 net/ipv6/ah6.c 	skb->ip_summed = CHECKSUM_NONE;
skb               543 net/ipv6/ah6.c 	hdr_len = skb_network_header_len(skb);
skb               544 net/ipv6/ah6.c 	ah = (struct ip_auth_hdr *)skb->data;
skb               555 net/ipv6/ah6.c 	if (!pskb_may_pull(skb, ah_hlen))
skb               558 net/ipv6/ah6.c 	err = skb_cow_data(skb, 0, &trailer);
skb               563 net/ipv6/ah6.c 	ah = (struct ip_auth_hdr *)skb->data;
skb               564 net/ipv6/ah6.c 	ip6h = ipv6_hdr(skb);
skb               566 net/ipv6/ah6.c 	skb_push(skb, hdr_len);
skb               601 net/ipv6/ah6.c 	err = skb_to_sgvec_nomark(skb, sg, 0, skb->len);
skb               607 net/ipv6/ah6.c 		*seqhi = XFRM_SKB_CB(skb)->seq.input.hi;
skb               611 net/ipv6/ah6.c 	ahash_request_set_crypt(req, sg, icv, skb->len + seqhi_len);
skb               612 net/ipv6/ah6.c 	ahash_request_set_callback(req, 0, ah6_input_done, skb);
skb               614 net/ipv6/ah6.c 	AH_SKB_CB(skb)->tmp = work_iph;
skb               628 net/ipv6/ah6.c 	skb->network_header += ah_hlen;
skb               629 net/ipv6/ah6.c 	memcpy(skb_network_header(skb), work_iph, hdr_len);
skb               630 net/ipv6/ah6.c 	__skb_pull(skb, ah_hlen + hdr_len);
skb               633 net/ipv6/ah6.c 		skb_reset_transport_header(skb);
skb               635 net/ipv6/ah6.c 		skb_set_transport_header(skb, -hdr_len);
skb               645 net/ipv6/ah6.c static int ah6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
skb               648 net/ipv6/ah6.c 	struct net *net = dev_net(skb->dev);
skb               649 net/ipv6/ah6.c 	struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
skb               650 net/ipv6/ah6.c 	struct ip_auth_hdr *ah = (struct ip_auth_hdr *)(skb->data+offset);
skb               657 net/ipv6/ah6.c 	x = xfrm_state_lookup(net, skb->mark, (xfrm_address_t *)&iph->daddr, ah->spi, IPPROTO_AH, AF_INET6);
skb               662 net/ipv6/ah6.c 		ip6_redirect(skb, net, skb->dev->ifindex, 0,
skb               665 net/ipv6/ah6.c 		ip6_update_pmtu(skb, net, info, 0, 0, sock_net_uid(net, NULL));
skb               751 net/ipv6/ah6.c static int ah6_rcv_cb(struct sk_buff *skb, int err)
skb               574 net/ipv6/calipso.c bool calipso_validate(const struct sk_buff *skb, const unsigned char *option)
skb              1272 net/ipv6/calipso.c static unsigned char *calipso_skbuff_optptr(const struct sk_buff *skb)
skb              1274 net/ipv6/calipso.c 	const struct ipv6hdr *ip6_hdr = ipv6_hdr(skb);
skb              1280 net/ipv6/calipso.c 	offset = ipv6_find_tlv(skb, sizeof(*ip6_hdr), IPV6_TLV_CALIPSO);
skb              1298 net/ipv6/calipso.c static int calipso_skbuff_setattr(struct sk_buff *skb,
skb              1309 net/ipv6/calipso.c 	ip6_hdr = ipv6_hdr(skb);
skb              1329 net/ipv6/calipso.c 	ret_val = skb_cow(skb, skb_headroom(skb) + len_delta);
skb              1333 net/ipv6/calipso.c 	ip6_hdr = ipv6_hdr(skb); /* Reset as skb_cow() may have moved it */
skb              1337 net/ipv6/calipso.c 			skb_push(skb, len_delta);
skb              1339 net/ipv6/calipso.c 			skb_pull(skb, -len_delta);
skb              1342 net/ipv6/calipso.c 		skb_reset_network_header(skb);
skb              1343 net/ipv6/calipso.c 		ip6_hdr = ipv6_hdr(skb);
skb              1373 net/ipv6/calipso.c static int calipso_skbuff_delattr(struct sk_buff *skb)
skb              1380 net/ipv6/calipso.c 	if (!calipso_skbuff_optptr(skb))
skb              1384 net/ipv6/calipso.c 	ret_val = skb_cow(skb, skb_headroom(skb));
skb              1388 net/ipv6/calipso.c 	ip6_hdr = ipv6_hdr(skb);
skb              1412 net/ipv6/calipso.c 		skb_pull(skb, delta);
skb              1414 net/ipv6/calipso.c 		skb_reset_network_header(skb);
skb               287 net/ipv6/datagram.c void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
skb               291 net/ipv6/datagram.c 	struct icmp6hdr *icmph = icmp6_hdr(skb);
skb               297 net/ipv6/datagram.c 	skb = skb_clone(skb, GFP_ATOMIC);
skb               298 net/ipv6/datagram.c 	if (!skb)
skb               301 net/ipv6/datagram.c 	skb->protocol = htons(ETH_P_IPV6);
skb               303 net/ipv6/datagram.c 	serr = SKB_EXT_ERR(skb);
skb               312 net/ipv6/datagram.c 				  skb_network_header(skb);
skb               315 net/ipv6/datagram.c 	__skb_pull(skb, payload - skb->data);
skb               316 net/ipv6/datagram.c 	skb_reset_transport_header(skb);
skb               318 net/ipv6/datagram.c 	if (sock_queue_err_skb(sk, skb))
skb               319 net/ipv6/datagram.c 		kfree_skb(skb);
skb               327 net/ipv6/datagram.c 	struct sk_buff *skb;
skb               332 net/ipv6/datagram.c 	skb = alloc_skb(sizeof(struct ipv6hdr), GFP_ATOMIC);
skb               333 net/ipv6/datagram.c 	if (!skb)
skb               336 net/ipv6/datagram.c 	skb->protocol = htons(ETH_P_IPV6);
skb               338 net/ipv6/datagram.c 	skb_put(skb, sizeof(struct ipv6hdr));
skb               339 net/ipv6/datagram.c 	skb_reset_network_header(skb);
skb               340 net/ipv6/datagram.c 	iph = ipv6_hdr(skb);
skb               344 net/ipv6/datagram.c 	serr = SKB_EXT_ERR(skb);
skb               352 net/ipv6/datagram.c 	serr->addr_offset = (u8 *)&iph->daddr - skb_network_header(skb);
skb               355 net/ipv6/datagram.c 	__skb_pull(skb, skb_tail_pointer(skb) - skb->data);
skb               356 net/ipv6/datagram.c 	skb_reset_transport_header(skb);
skb               358 net/ipv6/datagram.c 	if (sock_queue_err_skb(sk, skb))
skb               359 net/ipv6/datagram.c 		kfree_skb(skb);
skb               366 net/ipv6/datagram.c 	struct sk_buff *skb;
skb               372 net/ipv6/datagram.c 	skb = alloc_skb(sizeof(struct ipv6hdr), GFP_ATOMIC);
skb               373 net/ipv6/datagram.c 	if (!skb)
skb               376 net/ipv6/datagram.c 	skb_put(skb, sizeof(struct ipv6hdr));
skb               377 net/ipv6/datagram.c 	skb_reset_network_header(skb);
skb               378 net/ipv6/datagram.c 	iph = ipv6_hdr(skb);
skb               381 net/ipv6/datagram.c 	mtu_info = IP6CBMTU(skb);
skb               388 net/ipv6/datagram.c 	mtu_info->ip6m_addr.sin6_addr = ipv6_hdr(skb)->daddr;
skb               390 net/ipv6/datagram.c 	__skb_pull(skb, skb_tail_pointer(skb) - skb->data);
skb               391 net/ipv6/datagram.c 	skb_reset_transport_header(skb);
skb               393 net/ipv6/datagram.c 	skb = xchg(&np->rxpmtu, skb);
skb               394 net/ipv6/datagram.c 	kfree_skb(skb);
skb               413 net/ipv6/datagram.c static bool ip6_datagram_support_cmsg(struct sk_buff *skb,
skb               423 net/ipv6/datagram.c 	if (!IP6CB(skb)->iif)
skb               436 net/ipv6/datagram.c 	struct sk_buff *skb;
skb               446 net/ipv6/datagram.c 	skb = sock_dequeue_err_skb(sk);
skb               447 net/ipv6/datagram.c 	if (!skb)
skb               450 net/ipv6/datagram.c 	copied = skb->len;
skb               455 net/ipv6/datagram.c 	err = skb_copy_datagram_msg(skb, 0, msg, copied);
skb               457 net/ipv6/datagram.c 		kfree_skb(skb);
skb               460 net/ipv6/datagram.c 	sock_recv_timestamp(msg, sk, skb);
skb               462 net/ipv6/datagram.c 	serr = SKB_EXT_ERR(skb);
skb               465 net/ipv6/datagram.c 		const unsigned char *nh = skb_network_header(skb);
skb               469 net/ipv6/datagram.c 		if (skb->protocol == htons(ETH_P_IPV6)) {
skb               477 net/ipv6/datagram.c 						    IP6CB(skb)->iif);
skb               490 net/ipv6/datagram.c 	if (ip6_datagram_support_cmsg(skb, serr)) {
skb               493 net/ipv6/datagram.c 			ip6_datagram_recv_common_ctl(sk, msg, skb);
skb               494 net/ipv6/datagram.c 		if (skb->protocol == htons(ETH_P_IPV6)) {
skb               495 net/ipv6/datagram.c 			sin->sin6_addr = ipv6_hdr(skb)->saddr;
skb               497 net/ipv6/datagram.c 				ip6_datagram_recv_specific_ctl(sk, msg, skb);
skb               500 net/ipv6/datagram.c 						    IP6CB(skb)->iif);
skb               502 net/ipv6/datagram.c 			ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr,
skb               505 net/ipv6/datagram.c 				ip_cmsg_recv(msg, skb);
skb               516 net/ipv6/datagram.c 	consume_skb(skb);
skb               529 net/ipv6/datagram.c 	struct sk_buff *skb;
skb               536 net/ipv6/datagram.c 	skb = xchg(&np->rxpmtu, NULL);
skb               537 net/ipv6/datagram.c 	if (!skb)
skb               540 net/ipv6/datagram.c 	copied = skb->len;
skb               545 net/ipv6/datagram.c 	err = skb_copy_datagram_msg(skb, 0, msg, copied);
skb               549 net/ipv6/datagram.c 	sock_recv_timestamp(msg, sk, skb);
skb               551 net/ipv6/datagram.c 	memcpy(&mtu_info, IP6CBMTU(skb), sizeof(mtu_info));
skb               567 net/ipv6/datagram.c 	kfree_skb(skb);
skb               574 net/ipv6/datagram.c 				 struct sk_buff *skb)
skb               577 net/ipv6/datagram.c 	bool is_ipv6 = skb->protocol == htons(ETH_P_IPV6);
skb               583 net/ipv6/datagram.c 			src_info.ipi6_ifindex = IP6CB(skb)->iif;
skb               584 net/ipv6/datagram.c 			src_info.ipi6_addr = ipv6_hdr(skb)->daddr;
skb               587 net/ipv6/datagram.c 				PKTINFO_SKB_CB(skb)->ipi_ifindex;
skb               588 net/ipv6/datagram.c 			ipv6_addr_set_v4mapped(ip_hdr(skb)->daddr,
skb               599 net/ipv6/datagram.c 				    struct sk_buff *skb)
skb               602 net/ipv6/datagram.c 	struct inet6_skb_parm *opt = IP6CB(skb);
skb               603 net/ipv6/datagram.c 	unsigned char *nh = skb_network_header(skb);
skb               606 net/ipv6/datagram.c 		int hlim = ipv6_hdr(skb)->hop_limit;
skb               611 net/ipv6/datagram.c 		int tclass = ipv6_get_dsfield(ipv6_hdr(skb));
skb               639 net/ipv6/datagram.c 		u8 nexthdr = ipv6_hdr(skb)->nexthdr;
skb               677 net/ipv6/datagram.c 		src_info.ipi6_addr = ipv6_hdr(skb)->daddr;
skb               681 net/ipv6/datagram.c 		int hlim = ipv6_hdr(skb)->hop_limit;
skb               704 net/ipv6/datagram.c 		ports = skb_header_pointer(skb, skb_transport_offset(skb),
skb               712 net/ipv6/datagram.c 			sin6.sin6_addr = ipv6_hdr(skb)->daddr;
skb               716 net/ipv6/datagram.c 				ipv6_iface_scope_id(&ipv6_hdr(skb)->daddr,
skb               730 net/ipv6/datagram.c 			  struct sk_buff *skb)
skb               732 net/ipv6/datagram.c 	ip6_datagram_recv_common_ctl(sk, msg, skb);
skb               733 net/ipv6/datagram.c 	ip6_datagram_recv_specific_ctl(sk, msg, skb);
skb               129 net/ipv6/esp6.c 	struct sk_buff *skb = base->data;
skb               130 net/ipv6/esp6.c 	struct xfrm_offload *xo = xfrm_offload(skb);
skb               135 net/ipv6/esp6.c 		struct sec_path *sp = skb_sec_path(skb);
skb               139 net/ipv6/esp6.c 		x = skb_dst(skb)->xfrm;
skb               142 net/ipv6/esp6.c 	tmp = ESP_SKB_CB(skb)->tmp;
skb               149 net/ipv6/esp6.c 			kfree_skb(skb);
skb               153 net/ipv6/esp6.c 		skb_push(skb, skb->data - skb_mac_header(skb));
skb               154 net/ipv6/esp6.c 		secpath_reset(skb);
skb               155 net/ipv6/esp6.c 		xfrm_dev_resume(skb);
skb               157 net/ipv6/esp6.c 		xfrm_output_resume(skb, err);
skb               162 net/ipv6/esp6.c static void esp_restore_header(struct sk_buff *skb, unsigned int offset)
skb               164 net/ipv6/esp6.c 	struct ip_esp_hdr *esph = (void *)(skb->data + offset);
skb               165 net/ipv6/esp6.c 	void *tmp = ESP_SKB_CB(skb)->tmp;
skb               172 net/ipv6/esp6.c static void esp_output_restore_header(struct sk_buff *skb)
skb               174 net/ipv6/esp6.c 	esp_restore_header(skb, skb_transport_offset(skb) - sizeof(__be32));
skb               177 net/ipv6/esp6.c static struct ip_esp_hdr *esp_output_set_esn(struct sk_buff *skb,
skb               187 net/ipv6/esp6.c 		struct xfrm_offload *xo = xfrm_offload(skb);
skb               189 net/ipv6/esp6.c 		esph = (void *)(skb_transport_header(skb) - sizeof(__be32));
skb               194 net/ipv6/esp6.c 			esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.hi);
skb               204 net/ipv6/esp6.c 	struct sk_buff *skb = base->data;
skb               206 net/ipv6/esp6.c 	esp_output_restore_header(skb);
skb               226 net/ipv6/esp6.c int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
skb               235 net/ipv6/esp6.c 	if (!skb_cloned(skb)) {
skb               236 net/ipv6/esp6.c 		if (tailen <= skb_tailroom(skb)) {
skb               238 net/ipv6/esp6.c 			trailer = skb;
skb               242 net/ipv6/esp6.c 		} else if ((skb_shinfo(skb)->nr_frags < MAX_SKB_FRAGS)
skb               243 net/ipv6/esp6.c 			   && !skb_has_frag_list(skb)) {
skb               245 net/ipv6/esp6.c 			struct sock *sk = skb->sk;
skb               270 net/ipv6/esp6.c 			nfrags = skb_shinfo(skb)->nr_frags;
skb               272 net/ipv6/esp6.c 			__skb_fill_page_desc(skb, nfrags, page, pfrag->offset,
skb               274 net/ipv6/esp6.c 			skb_shinfo(skb)->nr_frags = ++nfrags;
skb               282 net/ipv6/esp6.c 			skb->len += tailen;
skb               283 net/ipv6/esp6.c 			skb->data_len += tailen;
skb               284 net/ipv6/esp6.c 			skb->truesize += tailen;
skb               293 net/ipv6/esp6.c 	nfrags = skb_cow_data(skb, tailen, &trailer);
skb               300 net/ipv6/esp6.c 	pskb_put(skb, trailer, tailen);
skb               307 net/ipv6/esp6.c int esp6_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
skb               349 net/ipv6/esp6.c 	esph = esp_output_set_esn(skb, x, ip_esp_hdr(skb), seqhi);
skb               352 net/ipv6/esp6.c 	err = skb_to_sgvec(skb, sg,
skb               353 net/ipv6/esp6.c 		           (unsigned char *)esph - skb->data,
skb               362 net/ipv6/esp6.c 		allocsize = ALIGN(skb->data_len, L1_CACHE_BYTES);
skb               370 net/ipv6/esp6.c 		skb_shinfo(skb)->nr_frags = 1;
skb               375 net/ipv6/esp6.c 		__skb_fill_page_desc(skb, 0, page, pfrag->offset, skb->data_len);
skb               379 net/ipv6/esp6.c 		sg_init_table(dsg, skb_shinfo(skb)->nr_frags + 1);
skb               380 net/ipv6/esp6.c 		err = skb_to_sgvec(skb, dsg,
skb               381 net/ipv6/esp6.c 			           (unsigned char *)esph - skb->data,
skb               388 net/ipv6/esp6.c 		aead_request_set_callback(req, 0, esp_output_done_esn, skb);
skb               390 net/ipv6/esp6.c 		aead_request_set_callback(req, 0, esp_output_done, skb);
skb               399 net/ipv6/esp6.c 	ESP_SKB_CB(skb)->tmp = tmp;
skb               412 net/ipv6/esp6.c 			esp_output_restore_header(skb);
skb               425 net/ipv6/esp6.c static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
skb               435 net/ipv6/esp6.c 	esp.proto = *skb_mac_header(skb);
skb               436 net/ipv6/esp6.c 	*skb_mac_header(skb) = IPPROTO_ESP;
skb               445 net/ipv6/esp6.c 		struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
skb               449 net/ipv6/esp6.c 		if (skb->len < padto)
skb               450 net/ipv6/esp6.c 			esp.tfclen = padto - skb->len;
skb               453 net/ipv6/esp6.c 	esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize);
skb               454 net/ipv6/esp6.c 	esp.plen = esp.clen - skb->len - esp.tfclen;
skb               457 net/ipv6/esp6.c 	esp.nfrags = esp6_output_head(x, skb, &esp);
skb               461 net/ipv6/esp6.c 	esph = ip_esp_hdr(skb);
skb               464 net/ipv6/esp6.c 	esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
skb               465 net/ipv6/esp6.c 	esp.seqno = cpu_to_be64(XFRM_SKB_CB(skb)->seq.output.low +
skb               466 net/ipv6/esp6.c 			    ((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32));
skb               468 net/ipv6/esp6.c 	skb_push(skb, -skb_network_offset(skb));
skb               470 net/ipv6/esp6.c 	return esp6_output_tail(x, skb, &esp);
skb               473 net/ipv6/esp6.c static inline int esp_remove_trailer(struct sk_buff *skb)
skb               475 net/ipv6/esp6.c 	struct xfrm_state *x = xfrm_input_state(skb);
skb               476 net/ipv6/esp6.c 	struct xfrm_offload *xo = xfrm_offload(skb);
skb               486 net/ipv6/esp6.c 	elen = skb->len - hlen;
skb               493 net/ipv6/esp6.c 	ret = skb_copy_bits(skb, skb->len - alen - 2, nexthdr, 2);
skb               505 net/ipv6/esp6.c 	if (skb->ip_summed == CHECKSUM_COMPLETE) {
skb               506 net/ipv6/esp6.c 		csumdiff = skb_checksum(skb, skb->len - trimlen, trimlen, 0);
skb               507 net/ipv6/esp6.c 		skb->csum = csum_block_sub(skb->csum, csumdiff,
skb               508 net/ipv6/esp6.c 					   skb->len - trimlen);
skb               510 net/ipv6/esp6.c 	pskb_trim(skb, skb->len - trimlen);
skb               518 net/ipv6/esp6.c int esp6_input_done2(struct sk_buff *skb, int err)
skb               520 net/ipv6/esp6.c 	struct xfrm_state *x = xfrm_input_state(skb);
skb               521 net/ipv6/esp6.c 	struct xfrm_offload *xo = xfrm_offload(skb);
skb               524 net/ipv6/esp6.c 	int hdr_len = skb_network_header_len(skb);
skb               527 net/ipv6/esp6.c 		kfree(ESP_SKB_CB(skb)->tmp);
skb               532 net/ipv6/esp6.c 	err = esp_remove_trailer(skb);
skb               536 net/ipv6/esp6.c 	skb_postpull_rcsum(skb, skb_network_header(skb),
skb               537 net/ipv6/esp6.c 			   skb_network_header_len(skb));
skb               538 net/ipv6/esp6.c 	skb_pull_rcsum(skb, hlen);
skb               540 net/ipv6/esp6.c 		skb_reset_transport_header(skb);
skb               542 net/ipv6/esp6.c 		skb_set_transport_header(skb, -hdr_len);
skb               555 net/ipv6/esp6.c 	struct sk_buff *skb = base->data;
skb               557 net/ipv6/esp6.c 	xfrm_input_resume(skb, esp6_input_done2(skb, err));
skb               560 net/ipv6/esp6.c static void esp_input_restore_header(struct sk_buff *skb)
skb               562 net/ipv6/esp6.c 	esp_restore_header(skb, 0);
skb               563 net/ipv6/esp6.c 	__skb_pull(skb, 4);
skb               566 net/ipv6/esp6.c static void esp_input_set_header(struct sk_buff *skb, __be32 *seqhi)
skb               568 net/ipv6/esp6.c 	struct xfrm_state *x = xfrm_input_state(skb);
skb               575 net/ipv6/esp6.c 		struct ip_esp_hdr *esph = skb_push(skb, 4);
skb               579 net/ipv6/esp6.c 		esph->seq_no = XFRM_SKB_CB(skb)->seq.input.hi;
skb               585 net/ipv6/esp6.c 	struct sk_buff *skb = base->data;
skb               587 net/ipv6/esp6.c 	esp_input_restore_header(skb);
skb               591 net/ipv6/esp6.c static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
skb               597 net/ipv6/esp6.c 	int elen = skb->len - sizeof(struct ip_esp_hdr) - ivlen;
skb               607 net/ipv6/esp6.c 	if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + ivlen)) {
skb               625 net/ipv6/esp6.c 	if (!skb_cloned(skb)) {
skb               626 net/ipv6/esp6.c 		if (!skb_is_nonlinear(skb)) {
skb               630 net/ipv6/esp6.c 		} else if (!skb_has_frag_list(skb)) {
skb               631 net/ipv6/esp6.c 			nfrags = skb_shinfo(skb)->nr_frags;
skb               638 net/ipv6/esp6.c 	nfrags = skb_cow_data(skb, 0, &trailer);
skb               650 net/ipv6/esp6.c 	ESP_SKB_CB(skb)->tmp = tmp;
skb               656 net/ipv6/esp6.c 	esp_input_set_header(skb, seqhi);
skb               659 net/ipv6/esp6.c 	ret = skb_to_sgvec(skb, sg, 0, skb->len);
skb               665 net/ipv6/esp6.c 	skb->ip_summed = CHECKSUM_NONE;
skb               668 net/ipv6/esp6.c 		aead_request_set_callback(req, 0, esp_input_done_esn, skb);
skb               670 net/ipv6/esp6.c 		aead_request_set_callback(req, 0, esp_input_done, skb);
skb               680 net/ipv6/esp6.c 		esp_input_restore_header(skb);
skb               682 net/ipv6/esp6.c 	ret = esp6_input_done2(skb, ret);
skb               688 net/ipv6/esp6.c static int esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
skb               691 net/ipv6/esp6.c 	struct net *net = dev_net(skb->dev);
skb               692 net/ipv6/esp6.c 	const struct ipv6hdr *iph = (const struct ipv6hdr *)skb->data;
skb               693 net/ipv6/esp6.c 	struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data + offset);
skb               700 net/ipv6/esp6.c 	x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
skb               706 net/ipv6/esp6.c 		ip6_redirect(skb, net, skb->dev->ifindex, 0,
skb               709 net/ipv6/esp6.c 		ip6_update_pmtu(skb, net, info, 0, 0, sock_net_uid(net, NULL));
skb               893 net/ipv6/esp6.c static int esp6_rcv_cb(struct sk_buff *skb, int err)
skb                50 net/ipv6/esp6_offload.c 					struct sk_buff *skb)
skb                52 net/ipv6/esp6_offload.c 	int offset = skb_gro_offset(skb);
skb                60 net/ipv6/esp6_offload.c 	if (!pskb_pull(skb, offset))
skb                63 net/ipv6/esp6_offload.c 	if ((err = xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq)) != 0)
skb                66 net/ipv6/esp6_offload.c 	xo = xfrm_offload(skb);
skb                68 net/ipv6/esp6_offload.c 		struct sec_path *sp = secpath_set(skb);
skb                76 net/ipv6/esp6_offload.c 		x = xfrm_state_lookup(dev_net(skb->dev), skb->mark,
skb                77 net/ipv6/esp6_offload.c 				      (xfrm_address_t *)&ipv6_hdr(skb)->daddr,
skb                82 net/ipv6/esp6_offload.c 		skb->mark = xfrm_smark_get(skb->mark, x);
skb                87 net/ipv6/esp6_offload.c 		xo = xfrm_offload(skb);
skb                94 net/ipv6/esp6_offload.c 	nhoff = esp6_nexthdr_esp_offset(ipv6_hdr(skb), offset);
skb                98 net/ipv6/esp6_offload.c 	IP6CB(skb)->nhoff = nhoff;
skb                99 net/ipv6/esp6_offload.c 	XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = NULL;
skb               100 net/ipv6/esp6_offload.c 	XFRM_SPI_SKB_CB(skb)->family = AF_INET6;
skb               101 net/ipv6/esp6_offload.c 	XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct ipv6hdr, daddr);
skb               102 net/ipv6/esp6_offload.c 	XFRM_SPI_SKB_CB(skb)->seq = seq;
skb               106 net/ipv6/esp6_offload.c 	xfrm_input(skb, IPPROTO_ESP, spi, -2);
skb               110 net/ipv6/esp6_offload.c 	secpath_reset(skb);
skb               112 net/ipv6/esp6_offload.c 	skb_push(skb, offset);
skb               113 net/ipv6/esp6_offload.c 	NAPI_GRO_CB(skb)->same_flow = 0;
skb               114 net/ipv6/esp6_offload.c 	NAPI_GRO_CB(skb)->flush = 1;
skb               119 net/ipv6/esp6_offload.c static void esp6_gso_encap(struct xfrm_state *x, struct sk_buff *skb)
skb               122 net/ipv6/esp6_offload.c 	struct ipv6hdr *iph = ipv6_hdr(skb);
skb               123 net/ipv6/esp6_offload.c 	struct xfrm_offload *xo = xfrm_offload(skb);
skb               126 net/ipv6/esp6_offload.c 	skb_push(skb, -skb_network_offset(skb));
skb               131 net/ipv6/esp6_offload.c 		ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &proto, &frag);
skb               134 net/ipv6/esp6_offload.c 	esph = ip_esp_hdr(skb);
skb               135 net/ipv6/esp6_offload.c 	*skb_mac_header(skb) = IPPROTO_ESP;
skb               138 net/ipv6/esp6_offload.c 	esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
skb               144 net/ipv6/esp6_offload.c 						struct sk_buff *skb,
skb               147 net/ipv6/esp6_offload.c 	__skb_push(skb, skb->mac_len);
skb               148 net/ipv6/esp6_offload.c 	return skb_mac_gso_segment(skb, features);
skb               152 net/ipv6/esp6_offload.c 						   struct sk_buff *skb,
skb               157 net/ipv6/esp6_offload.c 	struct xfrm_offload *xo = xfrm_offload(skb);
skb               159 net/ipv6/esp6_offload.c 	skb->transport_header += x->props.header_len;
skb               162 net/ipv6/esp6_offload.c 		segs = ops->callbacks.gso_segment(skb, features);
skb               168 net/ipv6/esp6_offload.c 						    struct sk_buff *skb,
skb               173 net/ipv6/esp6_offload.c 		return xfrm6_tunnel_gso_segment(x, skb, features);
skb               175 net/ipv6/esp6_offload.c 		return xfrm6_transport_gso_segment(x, skb, features);
skb               181 net/ipv6/esp6_offload.c static struct sk_buff *esp6_gso_segment(struct sk_buff *skb,
skb               188 net/ipv6/esp6_offload.c 	struct xfrm_offload *xo = xfrm_offload(skb);
skb               194 net/ipv6/esp6_offload.c 	if (!(skb_shinfo(skb)->gso_type & SKB_GSO_ESP))
skb               197 net/ipv6/esp6_offload.c 	sp = skb_sec_path(skb);
skb               200 net/ipv6/esp6_offload.c 	esph = ip_esp_hdr(skb);
skb               205 net/ipv6/esp6_offload.c 	if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)))
skb               208 net/ipv6/esp6_offload.c 	__skb_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead));
skb               210 net/ipv6/esp6_offload.c 	skb->encap_hdr_csum = 1;
skb               212 net/ipv6/esp6_offload.c 	if (!(features & NETIF_F_HW_ESP) || x->xso.dev != skb->dev)
skb               219 net/ipv6/esp6_offload.c 	return xfrm6_outer_mode_gso_segment(x, skb, esp_features);
skb               222 net/ipv6/esp6_offload.c static int esp6_input_tail(struct xfrm_state *x, struct sk_buff *skb)
skb               225 net/ipv6/esp6_offload.c 	struct xfrm_offload *xo = xfrm_offload(skb);
skb               227 net/ipv6/esp6_offload.c 	if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead)))
skb               231 net/ipv6/esp6_offload.c 		skb->ip_summed = CHECKSUM_NONE;
skb               233 net/ipv6/esp6_offload.c 	return esp6_input_done2(skb, 0);
skb               236 net/ipv6/esp6_offload.c static int esp6_xmit(struct xfrm_state *x, struct sk_buff *skb,  netdev_features_t features)
skb               251 net/ipv6/esp6_offload.c 	xo = xfrm_offload(skb);
skb               256 net/ipv6/esp6_offload.c 	if (!(features & NETIF_F_HW_ESP) || x->xso.dev != skb->dev) {
skb               272 net/ipv6/esp6_offload.c 	esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize);
skb               273 net/ipv6/esp6_offload.c 	esp.plen = esp.clen - skb->len - esp.tfclen;
skb               276 net/ipv6/esp6_offload.c 	if (!hw_offload || (hw_offload && !skb_is_gso(skb))) {
skb               277 net/ipv6/esp6_offload.c 		esp.nfrags = esp6_output_head(x, skb, &esp);
skb               284 net/ipv6/esp6_offload.c 	esph = ip_esp_hdr(skb);
skb               287 net/ipv6/esp6_offload.c 	skb_push(skb, -skb_network_offset(skb));
skb               292 net/ipv6/esp6_offload.c 		if (!skb_is_gso(skb))
skb               295 net/ipv6/esp6_offload.c 			xo->seq.low += skb_shinfo(skb)->gso_segs;
skb               300 net/ipv6/esp6_offload.c 	len = skb->len - sizeof(struct ipv6hdr);
skb               304 net/ipv6/esp6_offload.c 	ipv6_hdr(skb)->payload_len = htons(len);
skb               309 net/ipv6/esp6_offload.c 	err = esp6_output_tail(x, skb, &esp);
skb               313 net/ipv6/esp6_offload.c 	secpath_reset(skb);
skb                64 net/ipv6/exthdrs.c 	bool	(*func)(struct sk_buff *skb, int offset);
skb                73 net/ipv6/exthdrs.c static bool ip6_tlvopt_unknown(struct sk_buff *skb, int optoff,
skb                87 net/ipv6/exthdrs.c 	switch ((skb_network_header(skb)[optoff] & 0xC0) >> 6) {
skb                98 net/ipv6/exthdrs.c 		if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr))
skb               102 net/ipv6/exthdrs.c 		icmpv6_param_prob(skb, ICMPV6_UNK_OPTION, optoff);
skb               107 net/ipv6/exthdrs.c 	kfree_skb(skb);
skb               114 net/ipv6/exthdrs.c 			  struct sk_buff *skb,
skb               117 net/ipv6/exthdrs.c 	int len = (skb_transport_header(skb)[1] + 1) << 3;
skb               118 net/ipv6/exthdrs.c 	const unsigned char *nh = skb_network_header(skb);
skb               119 net/ipv6/exthdrs.c 	int off = skb_network_header_len(skb);
skb               130 net/ipv6/exthdrs.c 	if (skb_transport_offset(skb) + len > skb_headlen(skb))
skb               180 net/ipv6/exthdrs.c 					if (curr->func(skb, off) == false)
skb               186 net/ipv6/exthdrs.c 			    !ip6_tlvopt_unknown(skb, off, disallow_unknowns))
skb               199 net/ipv6/exthdrs.c 	kfree_skb(skb);
skb               208 net/ipv6/exthdrs.c static bool ipv6_dest_hao(struct sk_buff *skb, int optoff)
skb               211 net/ipv6/exthdrs.c 	struct inet6_skb_parm *opt = IP6CB(skb);
skb               212 net/ipv6/exthdrs.c 	struct ipv6hdr *ipv6h = ipv6_hdr(skb);
skb               222 net/ipv6/exthdrs.c 	hao = (struct ipv6_destopt_hao *)(skb_network_header(skb) + optoff);
skb               236 net/ipv6/exthdrs.c 	ret = xfrm6_input_addr(skb, (xfrm_address_t *)&ipv6h->daddr,
skb               241 net/ipv6/exthdrs.c 	if (skb_cloned(skb)) {
skb               242 net/ipv6/exthdrs.c 		if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
skb               246 net/ipv6/exthdrs.c 		hao = (struct ipv6_destopt_hao *)(skb_network_header(skb) +
skb               248 net/ipv6/exthdrs.c 		ipv6h = ipv6_hdr(skb);
skb               251 net/ipv6/exthdrs.c 	if (skb->ip_summed == CHECKSUM_COMPLETE)
skb               252 net/ipv6/exthdrs.c 		skb->ip_summed = CHECKSUM_NONE;
skb               256 net/ipv6/exthdrs.c 	if (skb->tstamp == 0)
skb               257 net/ipv6/exthdrs.c 		__net_timestamp(skb);
skb               262 net/ipv6/exthdrs.c 	kfree_skb(skb);
skb               277 net/ipv6/exthdrs.c static int ipv6_destopt_rcv(struct sk_buff *skb)
skb               279 net/ipv6/exthdrs.c 	struct inet6_dev *idev = __in6_dev_get(skb->dev);
skb               280 net/ipv6/exthdrs.c 	struct inet6_skb_parm *opt = IP6CB(skb);
skb               284 net/ipv6/exthdrs.c 	struct dst_entry *dst = skb_dst(skb);
skb               285 net/ipv6/exthdrs.c 	struct net *net = dev_net(skb->dev);
skb               288 net/ipv6/exthdrs.c 	if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) ||
skb               289 net/ipv6/exthdrs.c 	    !pskb_may_pull(skb, (skb_transport_offset(skb) +
skb               290 net/ipv6/exthdrs.c 				 ((skb_transport_header(skb)[1] + 1) << 3)))) {
skb               294 net/ipv6/exthdrs.c 		kfree_skb(skb);
skb               298 net/ipv6/exthdrs.c 	extlen = (skb_transport_header(skb)[1] + 1) << 3;
skb               302 net/ipv6/exthdrs.c 	opt->lastopt = opt->dst1 = skb_network_header_len(skb);
skb               307 net/ipv6/exthdrs.c 	if (ip6_parse_tlv(tlvprocdestopt_lst, skb,
skb               309 net/ipv6/exthdrs.c 		skb->transport_header += extlen;
skb               310 net/ipv6/exthdrs.c 		opt = IP6CB(skb);
skb               323 net/ipv6/exthdrs.c static void seg6_update_csum(struct sk_buff *skb)
skb               333 net/ipv6/exthdrs.c 	hdr = (struct ipv6_sr_hdr *)skb_transport_header(skb);
skb               344 net/ipv6/exthdrs.c 	update_csum_diff4(skb, from, to);
skb               348 net/ipv6/exthdrs.c 	update_csum_diff16(skb, (__be32 *)(&ipv6_hdr(skb)->daddr),
skb               352 net/ipv6/exthdrs.c static int ipv6_srh_rcv(struct sk_buff *skb)
skb               354 net/ipv6/exthdrs.c 	struct inet6_skb_parm *opt = IP6CB(skb);
skb               355 net/ipv6/exthdrs.c 	struct net *net = dev_net(skb->dev);
skb               361 net/ipv6/exthdrs.c 	hdr = (struct ipv6_sr_hdr *)skb_transport_header(skb);
skb               363 net/ipv6/exthdrs.c 	idev = __in6_dev_get(skb->dev);
skb               370 net/ipv6/exthdrs.c 		kfree_skb(skb);
skb               375 net/ipv6/exthdrs.c 	if (!seg6_hmac_validate_skb(skb)) {
skb               376 net/ipv6/exthdrs.c 		kfree_skb(skb);
skb               386 net/ipv6/exthdrs.c 			skb_postpull_rcsum(skb, skb_network_header(skb),
skb               387 net/ipv6/exthdrs.c 					   skb_network_header_len(skb));
skb               389 net/ipv6/exthdrs.c 			if (!pskb_pull(skb, offset)) {
skb               390 net/ipv6/exthdrs.c 				kfree_skb(skb);
skb               393 net/ipv6/exthdrs.c 			skb_postpull_rcsum(skb, skb_transport_header(skb),
skb               396 net/ipv6/exthdrs.c 			skb_reset_network_header(skb);
skb               397 net/ipv6/exthdrs.c 			skb_reset_transport_header(skb);
skb               398 net/ipv6/exthdrs.c 			skb->encapsulation = 0;
skb               400 net/ipv6/exthdrs.c 			__skb_tunnel_rx(skb, skb->dev, net);
skb               402 net/ipv6/exthdrs.c 			netif_rx(skb);
skb               406 net/ipv6/exthdrs.c 		opt->srcrt = skb_network_header_len(skb);
skb               408 net/ipv6/exthdrs.c 		skb->transport_header += (hdr->hdrlen + 1) << 3;
skb               409 net/ipv6/exthdrs.c 		opt->nhoff = (&hdr->nexthdr) - skb_network_header(skb);
skb               416 net/ipv6/exthdrs.c 		icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
skb               418 net/ipv6/exthdrs.c 				   skb_network_header(skb)));
skb               422 net/ipv6/exthdrs.c 	if (skb_cloned(skb)) {
skb               423 net/ipv6/exthdrs.c 		if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
skb               424 net/ipv6/exthdrs.c 			__IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
skb               426 net/ipv6/exthdrs.c 			kfree_skb(skb);
skb               431 net/ipv6/exthdrs.c 	hdr = (struct ipv6_sr_hdr *)skb_transport_header(skb);
skb               436 net/ipv6/exthdrs.c 	skb_push(skb, sizeof(struct ipv6hdr));
skb               438 net/ipv6/exthdrs.c 	if (skb->ip_summed == CHECKSUM_COMPLETE)
skb               439 net/ipv6/exthdrs.c 		seg6_update_csum(skb);
skb               441 net/ipv6/exthdrs.c 	ipv6_hdr(skb)->daddr = *addr;
skb               443 net/ipv6/exthdrs.c 	skb_dst_drop(skb);
skb               445 net/ipv6/exthdrs.c 	ip6_route_input(skb);
skb               447 net/ipv6/exthdrs.c 	if (skb_dst(skb)->error) {
skb               448 net/ipv6/exthdrs.c 		dst_input(skb);
skb               452 net/ipv6/exthdrs.c 	if (skb_dst(skb)->dev->flags & IFF_LOOPBACK) {
skb               453 net/ipv6/exthdrs.c 		if (ipv6_hdr(skb)->hop_limit <= 1) {
skb               455 net/ipv6/exthdrs.c 			icmpv6_send(skb, ICMPV6_TIME_EXCEED,
skb               457 net/ipv6/exthdrs.c 			kfree_skb(skb);
skb               460 net/ipv6/exthdrs.c 		ipv6_hdr(skb)->hop_limit--;
skb               462 net/ipv6/exthdrs.c 		skb_pull(skb, sizeof(struct ipv6hdr));
skb               466 net/ipv6/exthdrs.c 	dst_input(skb);
skb               476 net/ipv6/exthdrs.c static int ipv6_rthdr_rcv(struct sk_buff *skb)
skb               478 net/ipv6/exthdrs.c 	struct inet6_dev *idev = __in6_dev_get(skb->dev);
skb               479 net/ipv6/exthdrs.c 	struct inet6_skb_parm *opt = IP6CB(skb);
skb               485 net/ipv6/exthdrs.c 	struct net *net = dev_net(skb->dev);
skb               488 net/ipv6/exthdrs.c 	idev = __in6_dev_get(skb->dev);
skb               492 net/ipv6/exthdrs.c 	if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) ||
skb               493 net/ipv6/exthdrs.c 	    !pskb_may_pull(skb, (skb_transport_offset(skb) +
skb               494 net/ipv6/exthdrs.c 				 ((skb_transport_header(skb)[1] + 1) << 3)))) {
skb               496 net/ipv6/exthdrs.c 		kfree_skb(skb);
skb               500 net/ipv6/exthdrs.c 	hdr = (struct ipv6_rt_hdr *)skb_transport_header(skb);
skb               502 net/ipv6/exthdrs.c 	if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr) ||
skb               503 net/ipv6/exthdrs.c 	    skb->pkt_type != PACKET_HOST) {
skb               505 net/ipv6/exthdrs.c 		kfree_skb(skb);
skb               511 net/ipv6/exthdrs.c 		return ipv6_srh_rcv(skb);
skb               524 net/ipv6/exthdrs.c 				kfree_skb(skb);
skb               533 net/ipv6/exthdrs.c 		opt->lastopt = opt->srcrt = skb_network_header_len(skb);
skb               534 net/ipv6/exthdrs.c 		skb->transport_header += (hdr->hdrlen + 1) << 3;
skb               537 net/ipv6/exthdrs.c 		opt->nhoff = (&hdr->nexthdr) - skb_network_header(skb);
skb               549 net/ipv6/exthdrs.c 			kfree_skb(skb);
skb               567 net/ipv6/exthdrs.c 		icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
skb               569 net/ipv6/exthdrs.c 				   skb_network_header(skb)));
skb               576 net/ipv6/exthdrs.c 	if (skb_cloned(skb)) {
skb               578 net/ipv6/exthdrs.c 		if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
skb               579 net/ipv6/exthdrs.c 			__IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
skb               581 net/ipv6/exthdrs.c 			kfree_skb(skb);
skb               584 net/ipv6/exthdrs.c 		hdr = (struct ipv6_rt_hdr *)skb_transport_header(skb);
skb               587 net/ipv6/exthdrs.c 	if (skb->ip_summed == CHECKSUM_COMPLETE)
skb               588 net/ipv6/exthdrs.c 		skb->ip_summed = CHECKSUM_NONE;
skb               599 net/ipv6/exthdrs.c 		if (xfrm6_input_addr(skb, (xfrm_address_t *)addr,
skb               600 net/ipv6/exthdrs.c 				     (xfrm_address_t *)&ipv6_hdr(skb)->saddr,
skb               603 net/ipv6/exthdrs.c 			kfree_skb(skb);
skb               606 net/ipv6/exthdrs.c 		if (!ipv6_chk_home_addr(dev_net(skb_dst(skb)->dev), addr)) {
skb               608 net/ipv6/exthdrs.c 			kfree_skb(skb);
skb               619 net/ipv6/exthdrs.c 		kfree_skb(skb);
skb               624 net/ipv6/exthdrs.c 	*addr = ipv6_hdr(skb)->daddr;
skb               625 net/ipv6/exthdrs.c 	ipv6_hdr(skb)->daddr = daddr;
skb               627 net/ipv6/exthdrs.c 	skb_dst_drop(skb);
skb               628 net/ipv6/exthdrs.c 	ip6_route_input(skb);
skb               629 net/ipv6/exthdrs.c 	if (skb_dst(skb)->error) {
skb               630 net/ipv6/exthdrs.c 		skb_push(skb, skb->data - skb_network_header(skb));
skb               631 net/ipv6/exthdrs.c 		dst_input(skb);
skb               635 net/ipv6/exthdrs.c 	if (skb_dst(skb)->dev->flags&IFF_LOOPBACK) {
skb               636 net/ipv6/exthdrs.c 		if (ipv6_hdr(skb)->hop_limit <= 1) {
skb               638 net/ipv6/exthdrs.c 			icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT,
skb               640 net/ipv6/exthdrs.c 			kfree_skb(skb);
skb               643 net/ipv6/exthdrs.c 		ipv6_hdr(skb)->hop_limit--;
skb               647 net/ipv6/exthdrs.c 	skb_push(skb, skb->data - skb_network_header(skb));
skb               648 net/ipv6/exthdrs.c 	dst_input(skb);
skb               653 net/ipv6/exthdrs.c 	icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
skb               654 net/ipv6/exthdrs.c 			  (&hdr->type) - skb_network_header(skb));
skb               712 net/ipv6/exthdrs.c static inline struct inet6_dev *ipv6_skb_idev(struct sk_buff *skb)
skb               714 net/ipv6/exthdrs.c 	return skb_dst(skb) ? ip6_dst_idev(skb_dst(skb)) : __in6_dev_get(skb->dev);
skb               717 net/ipv6/exthdrs.c static inline struct net *ipv6_skb_net(struct sk_buff *skb)
skb               719 net/ipv6/exthdrs.c 	return skb_dst(skb) ? dev_net(skb_dst(skb)->dev) : dev_net(skb->dev);
skb               724 net/ipv6/exthdrs.c static bool ipv6_hop_ra(struct sk_buff *skb, int optoff)
skb               726 net/ipv6/exthdrs.c 	const unsigned char *nh = skb_network_header(skb);
skb               729 net/ipv6/exthdrs.c 		IP6CB(skb)->flags |= IP6SKB_ROUTERALERT;
skb               730 net/ipv6/exthdrs.c 		memcpy(&IP6CB(skb)->ra, nh + optoff + 2, sizeof(IP6CB(skb)->ra));
skb               735 net/ipv6/exthdrs.c 	kfree_skb(skb);
skb               741 net/ipv6/exthdrs.c static bool ipv6_hop_jumbo(struct sk_buff *skb, int optoff)
skb               743 net/ipv6/exthdrs.c 	const unsigned char *nh = skb_network_header(skb);
skb               744 net/ipv6/exthdrs.c 	struct inet6_dev *idev = __in6_dev_get_safely(skb->dev);
skb               745 net/ipv6/exthdrs.c 	struct net *net = ipv6_skb_net(skb);
skb               758 net/ipv6/exthdrs.c 		icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff+2);
skb               761 net/ipv6/exthdrs.c 	if (ipv6_hdr(skb)->payload_len) {
skb               763 net/ipv6/exthdrs.c 		icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff);
skb               767 net/ipv6/exthdrs.c 	if (pkt_len > skb->len - sizeof(struct ipv6hdr)) {
skb               772 net/ipv6/exthdrs.c 	if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr)))
skb               775 net/ipv6/exthdrs.c 	IP6CB(skb)->flags |= IP6SKB_JUMBOGRAM;
skb               779 net/ipv6/exthdrs.c 	kfree_skb(skb);
skb               785 net/ipv6/exthdrs.c static bool ipv6_hop_calipso(struct sk_buff *skb, int optoff)
skb               787 net/ipv6/exthdrs.c 	const unsigned char *nh = skb_network_header(skb);
skb               795 net/ipv6/exthdrs.c 	if (!calipso_validate(skb, nh + optoff))
skb               801 net/ipv6/exthdrs.c 	kfree_skb(skb);
skb               821 net/ipv6/exthdrs.c int ipv6_parse_hopopts(struct sk_buff *skb)
skb               823 net/ipv6/exthdrs.c 	struct inet6_skb_parm *opt = IP6CB(skb);
skb               824 net/ipv6/exthdrs.c 	struct net *net = dev_net(skb->dev);
skb               833 net/ipv6/exthdrs.c 	if (!pskb_may_pull(skb, sizeof(struct ipv6hdr) + 8) ||
skb               834 net/ipv6/exthdrs.c 	    !pskb_may_pull(skb, (sizeof(struct ipv6hdr) +
skb               835 net/ipv6/exthdrs.c 				 ((skb_transport_header(skb)[1] + 1) << 3)))) {
skb               837 net/ipv6/exthdrs.c 		kfree_skb(skb);
skb               841 net/ipv6/exthdrs.c 	extlen = (skb_transport_header(skb)[1] + 1) << 3;
skb               846 net/ipv6/exthdrs.c 	if (ip6_parse_tlv(tlvprochopopt_lst, skb,
skb               848 net/ipv6/exthdrs.c 		skb->transport_header += extlen;
skb               849 net/ipv6/exthdrs.c 		opt = IP6CB(skb);
skb               866 net/ipv6/exthdrs.c static void ipv6_push_rthdr0(struct sk_buff *skb, u8 *proto,
skb               875 net/ipv6/exthdrs.c 	phdr = skb_push(skb, (ihdr->rt_hdr.hdrlen + 1) << 3);
skb               891 net/ipv6/exthdrs.c static void ipv6_push_rthdr4(struct sk_buff *skb, u8 *proto,
skb               901 net/ipv6/exthdrs.c 	sr_phdr = skb_push(skb, plen);
skb               924 net/ipv6/exthdrs.c 		if (skb->dev)
skb               925 net/ipv6/exthdrs.c 			net = dev_net(skb->dev);
skb               926 net/ipv6/exthdrs.c 		else if (skb->sk)
skb               927 net/ipv6/exthdrs.c 			net = sock_net(skb->sk);
skb               940 net/ipv6/exthdrs.c static void ipv6_push_rthdr(struct sk_buff *skb, u8 *proto,
skb               948 net/ipv6/exthdrs.c 		ipv6_push_rthdr0(skb, proto, opt, addr_p, saddr);
skb               951 net/ipv6/exthdrs.c 		ipv6_push_rthdr4(skb, proto, opt, addr_p, saddr);
skb               958 net/ipv6/exthdrs.c static void ipv6_push_exthdr(struct sk_buff *skb, u8 *proto, u8 type, struct ipv6_opt_hdr *opt)
skb               960 net/ipv6/exthdrs.c 	struct ipv6_opt_hdr *h = skb_push(skb, ipv6_optlen(opt));
skb               967 net/ipv6/exthdrs.c void ipv6_push_nfrag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt,
skb               972 net/ipv6/exthdrs.c 		ipv6_push_rthdr(skb, proto, opt->srcrt, daddr, saddr);
skb               978 net/ipv6/exthdrs.c 			ipv6_push_exthdr(skb, proto, NEXTHDR_DEST, opt->dst0opt);
skb               981 net/ipv6/exthdrs.c 		ipv6_push_exthdr(skb, proto, NEXTHDR_HOP, opt->hopopt);
skb               984 net/ipv6/exthdrs.c void ipv6_push_frag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt, u8 *proto)
skb               987 net/ipv6/exthdrs.c 		ipv6_push_exthdr(skb, proto, NEXTHDR_DEST, opt->dst1opt);
skb                72 net/ipv6/exthdrs_core.c int ipv6_skip_exthdr(const struct sk_buff *skb, int start, u8 *nexthdrp,
skb                85 net/ipv6/exthdrs_core.c 		hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr);
skb                90 net/ipv6/exthdrs_core.c 			fp = skb_header_pointer(skb,
skb               116 net/ipv6/exthdrs_core.c int ipv6_find_tlv(const struct sk_buff *skb, int offset, int type)
skb               118 net/ipv6/exthdrs_core.c 	const unsigned char *nh = skb_network_header(skb);
skb               119 net/ipv6/exthdrs_core.c 	int packet_len = skb_tail_pointer(skb) - skb_network_header(skb);
skb               186 net/ipv6/exthdrs_core.c int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
skb               189 net/ipv6/exthdrs_core.c 	unsigned int start = skb_network_offset(skb) + sizeof(struct ipv6hdr);
skb               190 net/ipv6/exthdrs_core.c 	u8 nexthdr = ipv6_hdr(skb)->nexthdr;
skb               199 net/ipv6/exthdrs_core.c 		ip6 = skb_header_pointer(skb, *offset, sizeof(_ip6), &_ip6);
skb               217 net/ipv6/exthdrs_core.c 		hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr);
skb               224 net/ipv6/exthdrs_core.c 			rh = skb_header_pointer(skb, start, sizeof(_rh),
skb               240 net/ipv6/exthdrs_core.c 			fp = skb_header_pointer(skb,
skb                90 net/ipv6/fib6_rules.c 				   const struct sk_buff *skb,
skb                97 net/ipv6/fib6_rules.c 			.lookup_data = skb,
skb               113 net/ipv6/fib6_rules.c 		rt = lookup(net, net->ipv6.fib6_local_tbl, fl6, skb, flags);
skb               117 net/ipv6/fib6_rules.c 		rt = lookup(net, net->ipv6.fib6_main_tbl, fl6, skb, flags);
skb               339 net/ipv6/fib6_rules.c static int fib6_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
skb               345 net/ipv6/fib6_rules.c 	struct net *net = sock_net(skb->sk);
skb               415 net/ipv6/fib6_rules.c static int fib6_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
skb               425 net/ipv6/fib6_rules.c 	     nla_put_in6_addr(skb, FRA_DST, &rule6->dst.addr)) ||
skb               427 net/ipv6/fib6_rules.c 	     nla_put_in6_addr(skb, FRA_SRC, &rule6->src.addr)))
skb                21 net/ipv6/fou6.c static void fou6_build_udp(struct sk_buff *skb, struct ip_tunnel_encap *e,
skb                26 net/ipv6/fou6.c 	skb_push(skb, sizeof(struct udphdr));
skb                27 net/ipv6/fou6.c 	skb_reset_transport_header(skb);
skb                29 net/ipv6/fou6.c 	uh = udp_hdr(skb);
skb                33 net/ipv6/fou6.c 	uh->len = htons(skb->len);
skb                34 net/ipv6/fou6.c 	udp6_set_csum(!(e->flags & TUNNEL_ENCAP_FLAG_CSUM6), skb,
skb                35 net/ipv6/fou6.c 		      &fl6->saddr, &fl6->daddr, skb->len);
skb                40 net/ipv6/fou6.c static int fou6_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
skb                48 net/ipv6/fou6.c 	err = __fou_build_header(skb, e, protocol, &sport, type);
skb                52 net/ipv6/fou6.c 	fou6_build_udp(skb, e, fl6, protocol, sport);
skb                57 net/ipv6/fou6.c static int gue6_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
skb                65 net/ipv6/fou6.c 	err = __gue_build_header(skb, e, protocol, &sport, type);
skb                69 net/ipv6/fou6.c 	fou6_build_udp(skb, e, fl6, protocol, sport);
skb                74 net/ipv6/fou6.c static int gue6_err_proto_handler(int proto, struct sk_buff *skb,
skb                82 net/ipv6/fou6.c 		if (!ipprot->err_handler(skb, opt, type, code, offset, info))
skb                89 net/ipv6/fou6.c static int gue6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
skb                92 net/ipv6/fou6.c 	int transport_offset = skb_transport_offset(skb);
skb                98 net/ipv6/fou6.c 	if (!pskb_may_pull(skb, transport_offset + len))
skb               101 net/ipv6/fou6.c 	guehdr = (struct guehdr *)&udp_hdr(skb)[1];
skb               108 net/ipv6/fou6.c 		skb_set_transport_header(skb, -(int)sizeof(struct icmp6hdr));
skb               112 net/ipv6/fou6.c 			ret = gue6_err_proto_handler(IPPROTO_IPIP, skb, opt,
skb               116 net/ipv6/fou6.c 			ret = gue6_err_proto_handler(IPPROTO_IPV6, skb, opt,
skb               133 net/ipv6/fou6.c 	if (!pskb_may_pull(skb, transport_offset + len + optlen))
skb               136 net/ipv6/fou6.c 	guehdr = (struct guehdr *)&udp_hdr(skb)[1];
skb               148 net/ipv6/fou6.c 	skb_set_transport_header(skb, -(int)sizeof(struct icmp6hdr));
skb               149 net/ipv6/fou6.c 	ret = gue6_err_proto_handler(guehdr->proto_ctype, skb,
skb               153 net/ipv6/fou6.c 	skb_set_transport_header(skb, transport_offset);
skb                83 net/ipv6/icmp.c static int icmpv6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
skb                87 net/ipv6/icmp.c 	struct icmp6hdr *icmp6 = (struct icmp6hdr *) (skb->data + offset);
skb                88 net/ipv6/icmp.c 	struct net *net = dev_net(skb->dev);
skb                91 net/ipv6/icmp.c 		ip6_update_pmtu(skb, net, info, skb->dev->ifindex, 0, sock_net_uid(net, NULL));
skb                93 net/ipv6/icmp.c 		ip6_redirect(skb, net, skb->dev->ifindex, 0,
skb                98 net/ipv6/icmp.c 			ping_err(skb, offset, ntohl(info));
skb               103 net/ipv6/icmp.c static int icmpv6_rcv(struct sk_buff *skb);
skb               143 net/ipv6/icmp.c static bool is_ineligible(const struct sk_buff *skb)
skb               145 net/ipv6/icmp.c 	int ptr = (u8 *)(ipv6_hdr(skb) + 1) - skb->data;
skb               146 net/ipv6/icmp.c 	int len = skb->len - ptr;
skb               147 net/ipv6/icmp.c 	__u8 nexthdr = ipv6_hdr(skb)->nexthdr;
skb               153 net/ipv6/icmp.c 	ptr = ipv6_skip_exthdr(skb, ptr, &nexthdr, &frag_off);
skb               158 net/ipv6/icmp.c 		tp = skb_header_pointer(skb,
skb               239 net/ipv6/icmp.c static bool opt_unrec(struct sk_buff *skb, __u32 offset)
skb               243 net/ipv6/icmp.c 	offset += skb_network_offset(skb);
skb               244 net/ipv6/icmp.c 	op = skb_header_pointer(skb, offset, sizeof(_optval), &_optval);
skb               253 net/ipv6/icmp.c 	struct sk_buff *skb;
skb               256 net/ipv6/icmp.c 	skb = skb_peek(&sk->sk_write_queue);
skb               257 net/ipv6/icmp.c 	if (!skb)
skb               260 net/ipv6/icmp.c 	icmp6h = icmp6_hdr(skb);
skb               265 net/ipv6/icmp.c 		skb->csum = csum_partial(icmp6h,
skb               266 net/ipv6/icmp.c 					sizeof(struct icmp6hdr), skb->csum);
skb               270 net/ipv6/icmp.c 						      skb->csum);
skb               274 net/ipv6/icmp.c 		skb_queue_walk(&sk->sk_write_queue, skb) {
skb               275 net/ipv6/icmp.c 			tmp_csum = csum_add(tmp_csum, skb->csum);
skb               289 net/ipv6/icmp.c 	struct sk_buff	*skb;
skb               294 net/ipv6/icmp.c static int icmpv6_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
skb               297 net/ipv6/icmp.c 	struct sk_buff *org_skb = msg->skb;
skb               302 net/ipv6/icmp.c 	skb->csum = csum_block_add(skb->csum, csum, odd);
skb               304 net/ipv6/icmp.c 		nf_ct_attach(skb, org_skb);
skb               309 net/ipv6/icmp.c static void mip6_addr_swap(struct sk_buff *skb)
skb               311 net/ipv6/icmp.c 	struct ipv6hdr *iph = ipv6_hdr(skb);
skb               312 net/ipv6/icmp.c 	struct inet6_skb_parm *opt = IP6CB(skb);
skb               318 net/ipv6/icmp.c 		off = ipv6_find_tlv(skb, opt->dsthao, IPV6_TLV_HAO);
skb               321 net/ipv6/icmp.c 					(skb_network_header(skb) + off);
skb               329 net/ipv6/icmp.c static inline void mip6_addr_swap(struct sk_buff *skb) {}
skb               333 net/ipv6/icmp.c 					     struct sk_buff *skb,
skb               369 net/ipv6/icmp.c 	err = xfrm_decode_session_reverse(skb, flowi6_to_flowi(&fl2), AF_INET6);
skb               396 net/ipv6/icmp.c static struct net_device *icmp6_dev(const struct sk_buff *skb)
skb               398 net/ipv6/icmp.c 	struct net_device *dev = skb->dev;
skb               405 net/ipv6/icmp.c 	if (unlikely(dev->ifindex == LOOPBACK_IFINDEX || netif_is_l3_master(skb->dev))) {
skb               406 net/ipv6/icmp.c 		const struct rt6_info *rt6 = skb_rt6_info(skb);
skb               415 net/ipv6/icmp.c static int icmp6_iif(const struct sk_buff *skb)
skb               417 net/ipv6/icmp.c 	return icmp6_dev(skb)->ifindex;
skb               423 net/ipv6/icmp.c static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
skb               427 net/ipv6/icmp.c 	struct ipv6hdr *hdr = ipv6_hdr(skb);
skb               442 net/ipv6/icmp.c 	if ((u8 *)hdr < skb->head ||
skb               443 net/ipv6/icmp.c 	    (skb_network_header(skb) + sizeof(*hdr)) > skb_tail_pointer(skb))
skb               446 net/ipv6/icmp.c 	if (!skb->dev)
skb               448 net/ipv6/icmp.c 	net = dev_net(skb->dev);
skb               449 net/ipv6/icmp.c 	mark = IP6_REPLY_MARK(net, skb->mark);
skb               458 net/ipv6/icmp.c 	if (ipv6_chk_addr(net, &hdr->daddr, skb->dev, 0) ||
skb               459 net/ipv6/icmp.c 	    ipv6_chk_acast_addr_src(net, skb->dev, &hdr->daddr))
skb               466 net/ipv6/icmp.c 	if (addr_type & IPV6_ADDR_MULTICAST || skb->pkt_type != PACKET_HOST) {
skb               470 net/ipv6/icmp.c 		      (opt_unrec(skb, info))))
skb               483 net/ipv6/icmp.c 		iif = icmp6_iif(skb);
skb               485 net/ipv6/icmp.c 		dst = skb_dst(skb);
skb               486 net/ipv6/icmp.c 		iif = l3mdev_master_ifindex(dst ? dst->dev : skb->dev);
skb               504 net/ipv6/icmp.c 	if (is_ineligible(skb)) {
skb               514 net/ipv6/icmp.c 	if (!(skb->dev->flags & IFF_LOOPBACK) && !icmpv6_global_allow(net, type))
skb               517 net/ipv6/icmp.c 	mip6_addr_swap(skb);
skb               531 net/ipv6/icmp.c 	fl6.mp_hash = rt6_multipath_hash(net, &fl6, skb, NULL);
skb               532 net/ipv6/icmp.c 	security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
skb               557 net/ipv6/icmp.c 	dst = icmpv6_route_lookup(net, skb, sk, &fl6);
skb               563 net/ipv6/icmp.c 	msg.skb = skb;
skb               564 net/ipv6/icmp.c 	msg.offset = skb_network_offset(skb);
skb               567 net/ipv6/icmp.c 	len = skb->len - msg.offset;
skb               576 net/ipv6/icmp.c 	idev = __in6_dev_get(skb->dev);
skb               600 net/ipv6/icmp.c void icmpv6_param_prob(struct sk_buff *skb, u8 code, int pos)
skb               602 net/ipv6/icmp.c 	icmp6_send(skb, ICMPV6_PARAMPROB, code, pos, NULL);
skb               603 net/ipv6/icmp.c 	kfree_skb(skb);
skb               612 net/ipv6/icmp.c int ip6_err_gen_icmpv6_unreach(struct sk_buff *skb, int nhs, int type,
skb               620 net/ipv6/icmp.c 	if (!pskb_may_pull(skb, nhs + sizeof(struct ipv6hdr) + 8))
skb               624 net/ipv6/icmp.c 	if (data_len < 128 || (data_len & 7) || skb->len < data_len)
skb               627 net/ipv6/icmp.c 	skb2 = data_len ? skb_copy(skb, GFP_ATOMIC) : skb_clone(skb, GFP_ATOMIC);
skb               636 net/ipv6/icmp.c 	rt = rt6_lookup(dev_net(skb->dev), &ipv6_hdr(skb2)->saddr, NULL, 0,
skb               637 net/ipv6/icmp.c 			skb, 0);
skb               642 net/ipv6/icmp.c 	ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr, &temp_saddr);
skb               672 net/ipv6/icmp.c static void icmpv6_echo_reply(struct sk_buff *skb)
skb               674 net/ipv6/icmp.c 	struct net *net = dev_net(skb->dev);
skb               679 net/ipv6/icmp.c 	struct icmp6hdr *icmph = icmp6_hdr(skb);
skb               685 net/ipv6/icmp.c 	u32 mark = IP6_REPLY_MARK(net, skb->mark);
skb               688 net/ipv6/icmp.c 	if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr) &&
skb               692 net/ipv6/icmp.c 	saddr = &ipv6_hdr(skb)->daddr;
skb               694 net/ipv6/icmp.c 	acast = ipv6_anycast_destination(skb_dst(skb), saddr);
skb               698 net/ipv6/icmp.c 	if (!ipv6_unicast_destination(skb) &&
skb               707 net/ipv6/icmp.c 		fl6.flowlabel = ip6_flowlabel(ipv6_hdr(skb));
skb               710 net/ipv6/icmp.c 	fl6.daddr = ipv6_hdr(skb)->saddr;
skb               713 net/ipv6/icmp.c 	fl6.flowi6_oif = icmp6_iif(skb);
skb               717 net/ipv6/icmp.c 	security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
skb               738 net/ipv6/icmp.c 	if ((!(skb->dev->flags & IFF_LOOPBACK) && !icmpv6_global_allow(net, ICMPV6_ECHO_REPLY)) ||
skb               742 net/ipv6/icmp.c 	idev = __in6_dev_get(skb->dev);
skb               744 net/ipv6/icmp.c 	msg.skb = skb;
skb               750 net/ipv6/icmp.c 	ipc6.tclass = ipv6_get_dsfield(ipv6_hdr(skb));
skb               753 net/ipv6/icmp.c 			    skb->len + sizeof(struct icmp6hdr),
skb               760 net/ipv6/icmp.c 					   skb->len + sizeof(struct icmp6hdr));
skb               770 net/ipv6/icmp.c void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info)
skb               776 net/ipv6/icmp.c 	struct net *net = dev_net(skb->dev);
skb               778 net/ipv6/icmp.c 	if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
skb               781 net/ipv6/icmp.c 	nexthdr = ((struct ipv6hdr *)skb->data)->nexthdr;
skb               784 net/ipv6/icmp.c 		inner_offset = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr),
skb               793 net/ipv6/icmp.c 	if (!pskb_may_pull(skb, inner_offset+8))
skb               805 net/ipv6/icmp.c 		ipprot->err_handler(skb, NULL, type, code, inner_offset, info);
skb               807 net/ipv6/icmp.c 	raw6_icmp_error(skb, nexthdr, type, code, inner_offset, info);
skb               811 net/ipv6/icmp.c 	__ICMP6_INC_STATS(net, __in6_dev_get(skb->dev), ICMP6_MIB_INERRORS);
skb               818 net/ipv6/icmp.c static int icmpv6_rcv(struct sk_buff *skb)
skb               820 net/ipv6/icmp.c 	struct net *net = dev_net(skb->dev);
skb               821 net/ipv6/icmp.c 	struct net_device *dev = icmp6_dev(skb);
skb               828 net/ipv6/icmp.c 	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
skb               829 net/ipv6/icmp.c 		struct sec_path *sp = skb_sec_path(skb);
skb               836 net/ipv6/icmp.c 		if (!pskb_may_pull(skb, sizeof(*hdr) + sizeof(struct ipv6hdr)))
skb               839 net/ipv6/icmp.c 		nh = skb_network_offset(skb);
skb               840 net/ipv6/icmp.c 		skb_set_network_header(skb, sizeof(*hdr));
skb               842 net/ipv6/icmp.c 		if (!xfrm6_policy_check_reverse(NULL, XFRM_POLICY_IN, skb))
skb               845 net/ipv6/icmp.c 		skb_set_network_header(skb, nh);
skb               850 net/ipv6/icmp.c 	saddr = &ipv6_hdr(skb)->saddr;
skb               851 net/ipv6/icmp.c 	daddr = &ipv6_hdr(skb)->daddr;
skb               853 net/ipv6/icmp.c 	if (skb_checksum_validate(skb, IPPROTO_ICMPV6, ip6_compute_pseudo)) {
skb               859 net/ipv6/icmp.c 	if (!pskb_pull(skb, sizeof(*hdr)))
skb               862 net/ipv6/icmp.c 	hdr = icmp6_hdr(skb);
skb               871 net/ipv6/icmp.c 			icmpv6_echo_reply(skb);
skb               875 net/ipv6/icmp.c 		success = ping_rcv(skb);
skb               884 net/ipv6/icmp.c 		if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
skb               886 net/ipv6/icmp.c 		hdr = icmp6_hdr(skb);
skb               893 net/ipv6/icmp.c 		icmpv6_notify(skb, type, hdr->icmp6_code, hdr->icmp6_mtu);
skb               901 net/ipv6/icmp.c 		ndisc_rcv(skb);
skb               905 net/ipv6/icmp.c 		igmp6_event_query(skb);
skb               909 net/ipv6/icmp.c 		igmp6_event_report(skb);
skb               935 net/ipv6/icmp.c 		icmpv6_notify(skb, type, hdr->icmp6_code, hdr->icmp6_mtu);
skb               942 net/ipv6/icmp.c 		consume_skb(skb);
skb               944 net/ipv6/icmp.c 		kfree_skb(skb);
skb               953 net/ipv6/icmp.c 	kfree_skb(skb);
skb                98 net/ipv6/ila/ila.h void ila_update_ipv6_locator(struct sk_buff *skb, struct ila_params *p,
skb               118 net/ipv6/ila/ila.h int ila_xlat_nl_cmd_add_mapping(struct sk_buff *skb, struct genl_info *info);
skb               119 net/ipv6/ila/ila.h int ila_xlat_nl_cmd_del_mapping(struct sk_buff *skb, struct genl_info *info);
skb               120 net/ipv6/ila/ila.h int ila_xlat_nl_cmd_get_mapping(struct sk_buff *skb, struct genl_info *info);
skb               121 net/ipv6/ila/ila.h int ila_xlat_nl_cmd_flush(struct sk_buff *skb, struct genl_info *info);
skb               124 net/ipv6/ila/ila.h int ila_xlat_nl_dump(struct sk_buff *skb, struct netlink_callback *cb);
skb                74 net/ipv6/ila/ila_common.c static void ila_csum_adjust_transport(struct sk_buff *skb,
skb                78 net/ipv6/ila/ila_common.c 	struct ipv6hdr *ip6h = ipv6_hdr(skb);
skb                83 net/ipv6/ila/ila_common.c 		if (likely(pskb_may_pull(skb, nhoff + sizeof(struct tcphdr)))) {
skb                85 net/ipv6/ila/ila_common.c 					(skb_network_header(skb) + nhoff);
skb                88 net/ipv6/ila/ila_common.c 			inet_proto_csum_replace_by_diff(&th->check, skb,
skb                93 net/ipv6/ila/ila_common.c 		if (likely(pskb_may_pull(skb, nhoff + sizeof(struct udphdr)))) {
skb                95 net/ipv6/ila/ila_common.c 					(skb_network_header(skb) + nhoff);
skb                97 net/ipv6/ila/ila_common.c 			if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
skb                99 net/ipv6/ila/ila_common.c 				inet_proto_csum_replace_by_diff(&uh->check, skb,
skb               107 net/ipv6/ila/ila_common.c 		if (likely(pskb_may_pull(skb,
skb               110 net/ipv6/ila/ila_common.c 					(skb_network_header(skb) + nhoff);
skb               113 net/ipv6/ila/ila_common.c 			inet_proto_csum_replace_by_diff(&ih->icmp6_cksum, skb,
skb               120 net/ipv6/ila/ila_common.c void ila_update_ipv6_locator(struct sk_buff *skb, struct ila_params *p,
skb               123 net/ipv6/ila/ila_common.c 	struct ipv6hdr *ip6h = ipv6_hdr(skb);
skb               128 net/ipv6/ila/ila_common.c 		ila_csum_adjust_transport(skb, p);
skb                38 net/ipv6/ila/ila_lwt.c static int ila_output(struct net *net, struct sock *sk, struct sk_buff *skb)
skb                40 net/ipv6/ila/ila_lwt.c 	struct dst_entry *orig_dst = skb_dst(skb);
skb                46 net/ipv6/ila/ila_lwt.c 	if (skb->protocol != htons(ETH_P_IPV6))
skb                50 net/ipv6/ila/ila_lwt.c 		ila_update_ipv6_locator(skb,
skb                58 net/ipv6/ila/ila_lwt.c 		return orig_dst->lwtstate->orig_output(net, sk, skb);
skb                63 net/ipv6/ila/ila_lwt.c 		struct ipv6hdr *ip6h = ipv6_hdr(skb);
skb                93 net/ipv6/ila/ila_lwt.c 	skb_dst_set(skb, dst);
skb                94 net/ipv6/ila/ila_lwt.c 	return dst_output(net, sk, skb);
skb                97 net/ipv6/ila/ila_lwt.c 	kfree_skb(skb);
skb               101 net/ipv6/ila/ila_lwt.c static int ila_input(struct sk_buff *skb)
skb               103 net/ipv6/ila/ila_lwt.c 	struct dst_entry *dst = skb_dst(skb);
skb               106 net/ipv6/ila/ila_lwt.c 	if (skb->protocol != htons(ETH_P_IPV6))
skb               110 net/ipv6/ila/ila_lwt.c 		ila_update_ipv6_locator(skb,
skb               114 net/ipv6/ila/ila_lwt.c 	return dst->lwtstate->orig_input(skb);
skb               117 net/ipv6/ila/ila_lwt.c 	kfree_skb(skb);
skb               262 net/ipv6/ila/ila_lwt.c static int ila_fill_encap_info(struct sk_buff *skb,
skb               268 net/ipv6/ila/ila_lwt.c 	if (nla_put_u64_64bit(skb, ILA_ATTR_LOCATOR, (__force u64)p->locator.v64,
skb               272 net/ipv6/ila/ila_lwt.c 	if (nla_put_u8(skb, ILA_ATTR_CSUM_MODE, (__force u8)p->csum_mode))
skb               275 net/ipv6/ila/ila_lwt.c 	if (nla_put_u8(skb, ILA_ATTR_IDENT_TYPE, (__force u8)p->ident_type))
skb               278 net/ipv6/ila/ila_lwt.c 	if (nla_put_u8(skb, ILA_ATTR_HOOK_TYPE,
skb               184 net/ipv6/ila/ila_xlat.c static int ila_xlat_addr(struct sk_buff *skb, bool sir2ila);
skb               188 net/ipv6/ila/ila_xlat.c 	     struct sk_buff *skb,
skb               191 net/ipv6/ila/ila_xlat.c 	ila_xlat_addr(skb, false);
skb               345 net/ipv6/ila/ila_xlat.c int ila_xlat_nl_cmd_add_mapping(struct sk_buff *skb, struct genl_info *info)
skb               358 net/ipv6/ila/ila_xlat.c int ila_xlat_nl_cmd_del_mapping(struct sk_buff *skb, struct genl_info *info)
skb               379 net/ipv6/ila/ila_xlat.c int ila_xlat_nl_cmd_flush(struct sk_buff *skb, struct genl_info *info)
skb               442 net/ipv6/ila/ila_xlat.c 			 struct sk_buff *skb, u8 cmd)
skb               446 net/ipv6/ila/ila_xlat.c 	hdr = genlmsg_put(skb, portid, seq, &ila_nl_family, flags, cmd);
skb               450 net/ipv6/ila/ila_xlat.c 	if (ila_fill_info(ila, skb) < 0)
skb               453 net/ipv6/ila/ila_xlat.c 	genlmsg_end(skb, hdr);
skb               457 net/ipv6/ila/ila_xlat.c 	genlmsg_cancel(skb, hdr);
skb               461 net/ipv6/ila/ila_xlat.c int ila_xlat_nl_cmd_get_mapping(struct sk_buff *skb, struct genl_info *info)
skb               507 net/ipv6/ila/ila_xlat.c 	struct net *net = sock_net(cb->skb->sk);
skb               534 net/ipv6/ila/ila_xlat.c int ila_xlat_nl_dump(struct sk_buff *skb, struct netlink_callback *cb)
skb               581 net/ipv6/ila/ila_xlat.c 			ret =  ila_dump_info(ila, NETLINK_CB(cb->skb).portid,
skb               583 net/ipv6/ila/ila_xlat.c 					     skb, ILA_CMD_GET);
skb               597 net/ipv6/ila/ila_xlat.c 	ret = (skb->len ? : ret);
skb               633 net/ipv6/ila/ila_xlat.c static int ila_xlat_addr(struct sk_buff *skb, bool sir2ila)
skb               636 net/ipv6/ila/ila_xlat.c 	struct ipv6hdr *ip6h = ipv6_hdr(skb);
skb               637 net/ipv6/ila/ila_xlat.c 	struct net *net = dev_net(skb->dev);
skb               650 net/ipv6/ila/ila_xlat.c 	ila = ila_lookup_wildcards(iaddr, skb->dev->ifindex, ilan);
skb               652 net/ipv6/ila/ila_xlat.c 		ila_update_ipv6_locator(skb, &ila->xp.ip, sir2ila);
skb               114 net/ipv6/inet6_connection_sock.c int inet6_csk_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl_unused)
skb               125 net/ipv6/inet6_connection_sock.c 		kfree_skb(skb);
skb               130 net/ipv6/inet6_connection_sock.c 	skb_dst_set_noref(skb, dst);
skb               135 net/ipv6/inet6_connection_sock.c 	res = ip6_xmit(sk, skb, &fl6, sk->sk_mark, rcu_dereference(np->opt),
skb               117 net/ipv6/inet6_hashtables.c 		struct sk_buff *skb, int doff,
skb               122 net/ipv6/inet6_hashtables.c 	bool exact_dif = inet6_exact_dif_match(net, skb);
skb               137 net/ipv6/inet6_hashtables.c 							       skb, doff);
skb               151 net/ipv6/inet6_hashtables.c 		struct sk_buff *skb, int doff,
skb               163 net/ipv6/inet6_hashtables.c 	result = inet6_lhash2_lookup(net, ilb2, skb, doff,
skb               173 net/ipv6/inet6_hashtables.c 	result = inet6_lhash2_lookup(net, ilb2, skb, doff,
skb               184 net/ipv6/inet6_hashtables.c 			  struct sk_buff *skb, int doff,
skb               192 net/ipv6/inet6_hashtables.c 	sk = __inet6_lookup(net, hashinfo, skb, doff, saddr, sport, daddr,
skb                65 net/ipv6/ip6_checksum.c int udp6_csum_init(struct sk_buff *skb, struct udphdr *uh, int proto)
skb                69 net/ipv6/ip6_checksum.c 	UDP_SKB_CB(skb)->partial_cov = 0;
skb                70 net/ipv6/ip6_checksum.c 	UDP_SKB_CB(skb)->cscov = skb->len;
skb                73 net/ipv6/ip6_checksum.c 		err = udplite_checksum_init(skb, uh);
skb                77 net/ipv6/ip6_checksum.c 		if (UDP_SKB_CB(skb)->partial_cov) {
skb                78 net/ipv6/ip6_checksum.c 			skb->csum = ip6_compute_pseudo(skb, proto);
skb                91 net/ipv6/ip6_checksum.c 	err = (__force int)skb_checksum_init_zero_check(skb, proto, uh->check,
skb                96 net/ipv6/ip6_checksum.c 	if (skb->ip_summed == CHECKSUM_COMPLETE && !skb->csum_valid) {
skb                98 net/ipv6/ip6_checksum.c 		if (skb->csum_complete_sw)
skb               105 net/ipv6/ip6_checksum.c 		skb_checksum_complete_unset(skb);
skb               115 net/ipv6/ip6_checksum.c void udp6_set_csum(bool nocheck, struct sk_buff *skb,
skb               119 net/ipv6/ip6_checksum.c 	struct udphdr *uh = udp_hdr(skb);
skb               123 net/ipv6/ip6_checksum.c 	else if (skb_is_gso(skb))
skb               125 net/ipv6/ip6_checksum.c 	else if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb               127 net/ipv6/ip6_checksum.c 		uh->check = udp_v6_check(len, saddr, daddr, lco_csum(skb));
skb               131 net/ipv6/ip6_checksum.c 		skb->ip_summed = CHECKSUM_PARTIAL;
skb               132 net/ipv6/ip6_checksum.c 		skb->csum_start = skb_transport_header(skb) - skb->head;
skb               133 net/ipv6/ip6_checksum.c 		skb->csum_offset = offsetof(struct udphdr, check);
skb               312 net/ipv6/ip6_fib.c 				   const struct sk_buff *skb,
skb               317 net/ipv6/ip6_fib.c 	rt = lookup(net, net->ipv6.fib6_main_tbl, fl6, skb, flags);
skb               497 net/ipv6/ip6_fib.c 	struct net *net = sock_net(cb->skb->sk);
skb               518 net/ipv6/ip6_fib.c static int fib6_dump_table(struct fib6_table *table, struct sk_buff *skb,
skb               521 net/ipv6/ip6_fib.c 	struct net *net = sock_net(skb->sk);
skb               563 net/ipv6/ip6_fib.c static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
skb               568 net/ipv6/ip6_fib.c 	struct net *net = sock_net(skb->sk);
skb               608 net/ipv6/ip6_fib.c 	arg.skb = skb;
skb               624 net/ipv6/ip6_fib.c 			res = fib6_dump_table(tb, skb, cb);
skb               641 net/ipv6/ip6_fib.c 			res = fib6_dump_table(tb, skb, cb);
skb               653 net/ipv6/ip6_fib.c 	res = res < 0 ? res : skb->len;
skb               421 net/ipv6/ip6_gre.c static int ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
skb               424 net/ipv6/ip6_gre.c 	struct net *net = dev_net(skb->dev);
skb               429 net/ipv6/ip6_gre.c 	if (gre_parse_header(skb, &tpi, NULL, htons(ETH_P_IPV6),
skb               433 net/ipv6/ip6_gre.c 	ipv6h = (const struct ipv6hdr *)skb->data;
skb               434 net/ipv6/ip6_gre.c 	t = ip6gre_tunnel_lookup(skb->dev, &ipv6h->daddr, &ipv6h->saddr,
skb               458 net/ipv6/ip6_gre.c 			teli = ip6_tnl_parse_tlv_enc_lim(skb, skb->data);
skb               461 net/ipv6/ip6_gre.c 			tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli];
skb               472 net/ipv6/ip6_gre.c 		ip6_update_pmtu(skb, net, info, 0, 0, sock_net_uid(net, NULL));
skb               475 net/ipv6/ip6_gre.c 		ip6_redirect(skb, net, skb->dev->ifindex, 0,
skb               489 net/ipv6/ip6_gre.c static int ip6gre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi)
skb               494 net/ipv6/ip6_gre.c 	ipv6h = ipv6_hdr(skb);
skb               495 net/ipv6/ip6_gre.c 	tunnel = ip6gre_tunnel_lookup(skb->dev,
skb               507 net/ipv6/ip6_gre.c 			tun_dst = ipv6_tun_rx_dst(skb, flags, tun_id, 0);
skb               511 net/ipv6/ip6_gre.c 			ip6_tnl_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
skb               513 net/ipv6/ip6_gre.c 			ip6_tnl_rcv(tunnel, skb, tpi, NULL, log_ecn_error);
skb               522 net/ipv6/ip6_gre.c static int ip6erspan_rcv(struct sk_buff *skb,
skb               532 net/ipv6/ip6_gre.c 	ipv6h = ipv6_hdr(skb);
skb               533 net/ipv6/ip6_gre.c 	ershdr = (struct erspan_base_hdr *)skb->data;
skb               536 net/ipv6/ip6_gre.c 	tunnel = ip6gre_tunnel_lookup(skb->dev,
skb               542 net/ipv6/ip6_gre.c 		if (unlikely(!pskb_may_pull(skb, len)))
skb               545 net/ipv6/ip6_gre.c 		if (__iptunnel_pull_header(skb, len,
skb               562 net/ipv6/ip6_gre.c 			tun_dst = ipv6_tun_rx_dst(skb, flags, tun_id,
skb               571 net/ipv6/ip6_gre.c 			gh = skb_network_header(skb) +
skb               572 net/ipv6/ip6_gre.c 			     skb_network_header_len(skb);
skb               584 net/ipv6/ip6_gre.c 			ip6_tnl_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
skb               587 net/ipv6/ip6_gre.c 			ip6_tnl_rcv(tunnel, skb, tpi, NULL, log_ecn_error);
skb               596 net/ipv6/ip6_gre.c static int gre_rcv(struct sk_buff *skb)
skb               602 net/ipv6/ip6_gre.c 	hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IPV6), 0);
skb               606 net/ipv6/ip6_gre.c 	if (iptunnel_pull_header(skb, hdr_len, tpi.proto, false))
skb               611 net/ipv6/ip6_gre.c 		if (ip6erspan_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
skb               616 net/ipv6/ip6_gre.c 	if (ip6gre_rcv(skb, &tpi) == PACKET_RCVD)
skb               620 net/ipv6/ip6_gre.c 	icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
skb               622 net/ipv6/ip6_gre.c 	kfree_skb(skb);
skb               626 net/ipv6/ip6_gre.c static int gre_handle_offloads(struct sk_buff *skb, bool csum)
skb               628 net/ipv6/ip6_gre.c 	return iptunnel_handle_offloads(skb,
skb               632 net/ipv6/ip6_gre.c static void prepare_ip6gre_xmit_ipv4(struct sk_buff *skb,
skb               637 net/ipv6/ip6_gre.c 	const struct iphdr *iph = ip_hdr(skb);
skb               651 net/ipv6/ip6_gre.c 		fl6->flowi6_mark = skb->mark;
skb               658 net/ipv6/ip6_gre.c static int prepare_ip6gre_xmit_ipv6(struct sk_buff *skb,
skb               667 net/ipv6/ip6_gre.c 	offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
skb               669 net/ipv6/ip6_gre.c 	ipv6h = ipv6_hdr(skb);
skb               674 net/ipv6/ip6_gre.c 		tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset];
skb               676 net/ipv6/ip6_gre.c 			icmpv6_send(skb, ICMPV6_PARAMPROB,
skb               696 net/ipv6/ip6_gre.c 		fl6->flowi6_mark = skb->mark;
skb               705 net/ipv6/ip6_gre.c static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
skb               714 net/ipv6/ip6_gre.c 		IPCB(skb)->flags = 0;
skb               717 net/ipv6/ip6_gre.c 		fl6->daddr = ((struct ipv6hdr *)skb->data)->daddr;
skb               721 net/ipv6/ip6_gre.c 	if (skb_cow_head(skb, dev->needed_headroom ?: tunnel->hlen))
skb               732 net/ipv6/ip6_gre.c 		tun_info = skb_tunnel_info(skb);
skb               750 net/ipv6/ip6_gre.c 		gre_build_header(skb, tunnel->tun_hlen,
skb               760 net/ipv6/ip6_gre.c 		gre_build_header(skb, tunnel->tun_hlen, tunnel->parms.o_flags,
skb               765 net/ipv6/ip6_gre.c 	return ip6_tnl_xmit(skb, dev, dsfield, fl6, encap_limit, pmtu,
skb               769 net/ipv6/ip6_gre.c static inline int ip6gre_xmit_ipv4(struct sk_buff *skb, struct net_device *dev)
skb               778 net/ipv6/ip6_gre.c 	memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
skb               781 net/ipv6/ip6_gre.c 		prepare_ip6gre_xmit_ipv4(skb, dev, &fl6,
skb               784 net/ipv6/ip6_gre.c 	err = gre_handle_offloads(skb, !!(t->parms.o_flags & TUNNEL_CSUM));
skb               788 net/ipv6/ip6_gre.c 	err = __gre6_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
skb               789 net/ipv6/ip6_gre.c 			  skb->protocol);
skb               793 net/ipv6/ip6_gre.c 			icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
skb               801 net/ipv6/ip6_gre.c static inline int ip6gre_xmit_ipv6(struct sk_buff *skb, struct net_device *dev)
skb               804 net/ipv6/ip6_gre.c 	struct ipv6hdr *ipv6h = ipv6_hdr(skb);
skb               815 net/ipv6/ip6_gre.c 	    prepare_ip6gre_xmit_ipv6(skb, dev, &fl6, &dsfield, &encap_limit))
skb               818 net/ipv6/ip6_gre.c 	if (gre_handle_offloads(skb, !!(t->parms.o_flags & TUNNEL_CSUM)))
skb               821 net/ipv6/ip6_gre.c 	err = __gre6_xmit(skb, dev, dsfield, &fl6, encap_limit,
skb               822 net/ipv6/ip6_gre.c 			  &mtu, skb->protocol);
skb               825 net/ipv6/ip6_gre.c 			icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
skb               852 net/ipv6/ip6_gre.c static int ip6gre_xmit_other(struct sk_buff *skb, struct net_device *dev)
skb               866 net/ipv6/ip6_gre.c 	err = gre_handle_offloads(skb, !!(t->parms.o_flags & TUNNEL_CSUM));
skb               870 net/ipv6/ip6_gre.c 	err = __gre6_xmit(skb, dev, 0, &fl6, encap_limit, &mtu, skb->protocol);
skb               875 net/ipv6/ip6_gre.c static netdev_tx_t ip6gre_tunnel_xmit(struct sk_buff *skb,
skb               882 net/ipv6/ip6_gre.c 	if (!pskb_inet_may_pull(skb))
skb               888 net/ipv6/ip6_gre.c 	switch (skb->protocol) {
skb               890 net/ipv6/ip6_gre.c 		ret = ip6gre_xmit_ipv4(skb, dev);
skb               893 net/ipv6/ip6_gre.c 		ret = ip6gre_xmit_ipv6(skb, dev);
skb               896 net/ipv6/ip6_gre.c 		ret = ip6gre_xmit_other(skb, dev);
skb               908 net/ipv6/ip6_gre.c 	kfree_skb(skb);
skb               912 net/ipv6/ip6_gre.c static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
skb               916 net/ipv6/ip6_gre.c 	struct dst_entry *dst = skb_dst(skb);
skb               928 net/ipv6/ip6_gre.c 	if (!pskb_inet_may_pull(skb))
skb               934 net/ipv6/ip6_gre.c 	if (gre_handle_offloads(skb, false))
skb               937 net/ipv6/ip6_gre.c 	if (skb->len > dev->mtu + dev->hard_header_len) {
skb               938 net/ipv6/ip6_gre.c 		pskb_trim(skb, dev->mtu + dev->hard_header_len);
skb               942 net/ipv6/ip6_gre.c 	nhoff = skb_network_header(skb) - skb_mac_header(skb);
skb               943 net/ipv6/ip6_gre.c 	if (skb->protocol == htons(ETH_P_IP) &&
skb               944 net/ipv6/ip6_gre.c 	    (ntohs(ip_hdr(skb)->tot_len) > skb->len - nhoff))
skb               947 net/ipv6/ip6_gre.c 	thoff = skb_transport_header(skb) - skb_mac_header(skb);
skb               948 net/ipv6/ip6_gre.c 	if (skb->protocol == htons(ETH_P_IPV6) &&
skb               949 net/ipv6/ip6_gre.c 	    (ntohs(ipv6_hdr(skb)->payload_len) > skb->len - thoff))
skb               952 net/ipv6/ip6_gre.c 	if (skb_cow_head(skb, dev->needed_headroom ?: t->hlen))
skb               956 net/ipv6/ip6_gre.c 	IPCB(skb)->flags = 0;
skb               967 net/ipv6/ip6_gre.c 		tun_info = skb_tunnel_info(skb);
skb               989 net/ipv6/ip6_gre.c 			erspan_build_header(skb,
skb               994 net/ipv6/ip6_gre.c 			erspan_build_header_v2(skb,
skb              1003 net/ipv6/ip6_gre.c 		switch (skb->protocol) {
skb              1005 net/ipv6/ip6_gre.c 			memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
skb              1006 net/ipv6/ip6_gre.c 			prepare_ip6gre_xmit_ipv4(skb, dev, &fl6,
skb              1010 net/ipv6/ip6_gre.c 			if (ipv6_addr_equal(&t->parms.raddr, &ipv6_hdr(skb)->saddr))
skb              1012 net/ipv6/ip6_gre.c 			if (prepare_ip6gre_xmit_ipv6(skb, dev, &fl6,
skb              1022 net/ipv6/ip6_gre.c 			erspan_build_header(skb, ntohl(t->parms.o_key),
skb              1026 net/ipv6/ip6_gre.c 			erspan_build_header_v2(skb, ntohl(t->parms.o_key),
skb              1039 net/ipv6/ip6_gre.c 	gre_build_header(skb, 8, TUNNEL_SEQ, proto, 0, htonl(t->o_seqno++));
skb              1043 net/ipv6/ip6_gre.c 		dst->ops->update_pmtu(dst, NULL, skb, dst->dev->mtu, false);
skb              1045 net/ipv6/ip6_gre.c 	err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
skb              1050 net/ipv6/ip6_gre.c 			if (skb->protocol == htons(ETH_P_IP))
skb              1051 net/ipv6/ip6_gre.c 				icmp_send(skb, ICMP_DEST_UNREACH,
skb              1054 net/ipv6/ip6_gre.c 				icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
skb              1065 net/ipv6/ip6_gre.c 	kfree_skb(skb);
skb              1333 net/ipv6/ip6_gre.c static int ip6gre_header(struct sk_buff *skb, struct net_device *dev,
skb              1341 net/ipv6/ip6_gre.c 	ipv6h = skb_push(skb, t->hlen + sizeof(*ipv6h));
skb              1342 net/ipv6/ip6_gre.c 	ip6_flow_hdr(ipv6h, 0, ip6_make_flowlabel(dev_net(dev), skb,
skb              2104 net/ipv6/ip6_gre.c static int ip6gre_fill_info(struct sk_buff *skb, const struct net_device *dev)
skb              2114 net/ipv6/ip6_gre.c 		if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, p->erspan_ver))
skb              2118 net/ipv6/ip6_gre.c 			if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, p->index))
skb              2121 net/ipv6/ip6_gre.c 			if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, p->dir))
skb              2123 net/ipv6/ip6_gre.c 			if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, p->hwid))
skb              2128 net/ipv6/ip6_gre.c 	if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
skb              2129 net/ipv6/ip6_gre.c 	    nla_put_be16(skb, IFLA_GRE_IFLAGS,
skb              2131 net/ipv6/ip6_gre.c 	    nla_put_be16(skb, IFLA_GRE_OFLAGS,
skb              2133 net/ipv6/ip6_gre.c 	    nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
skb              2134 net/ipv6/ip6_gre.c 	    nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
skb              2135 net/ipv6/ip6_gre.c 	    nla_put_in6_addr(skb, IFLA_GRE_LOCAL, &p->laddr) ||
skb              2136 net/ipv6/ip6_gre.c 	    nla_put_in6_addr(skb, IFLA_GRE_REMOTE, &p->raddr) ||
skb              2137 net/ipv6/ip6_gre.c 	    nla_put_u8(skb, IFLA_GRE_TTL, p->hop_limit) ||
skb              2138 net/ipv6/ip6_gre.c 	    nla_put_u8(skb, IFLA_GRE_ENCAP_LIMIT, p->encap_limit) ||
skb              2139 net/ipv6/ip6_gre.c 	    nla_put_be32(skb, IFLA_GRE_FLOWINFO, p->flowinfo) ||
skb              2140 net/ipv6/ip6_gre.c 	    nla_put_u32(skb, IFLA_GRE_FLAGS, p->flags) ||
skb              2141 net/ipv6/ip6_gre.c 	    nla_put_u32(skb, IFLA_GRE_FWMARK, p->fwmark))
skb              2144 net/ipv6/ip6_gre.c 	if (nla_put_u16(skb, IFLA_GRE_ENCAP_TYPE,
skb              2146 net/ipv6/ip6_gre.c 	    nla_put_be16(skb, IFLA_GRE_ENCAP_SPORT,
skb              2148 net/ipv6/ip6_gre.c 	    nla_put_be16(skb, IFLA_GRE_ENCAP_DPORT,
skb              2150 net/ipv6/ip6_gre.c 	    nla_put_u16(skb, IFLA_GRE_ENCAP_FLAGS,
skb              2155 net/ipv6/ip6_gre.c 		if (nla_put_flag(skb, IFLA_GRE_COLLECT_METADATA))
skb                34 net/ipv6/ip6_icmp.c void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
skb                43 net/ipv6/ip6_icmp.c 	send(skb, type, code, info, NULL);
skb                50 net/ipv6/ip6_input.c 				struct sk_buff *skb)
skb                52 net/ipv6/ip6_input.c 	void (*edemux)(struct sk_buff *skb);
skb                54 net/ipv6/ip6_input.c 	if (net->ipv4.sysctl_ip_early_demux && !skb_dst(skb) && skb->sk == NULL) {
skb                57 net/ipv6/ip6_input.c 		ipprot = rcu_dereference(inet6_protos[ipv6_hdr(skb)->nexthdr]);
skb                60 net/ipv6/ip6_input.c 					udp_v6_early_demux, skb);
skb                62 net/ipv6/ip6_input.c 	if (!skb_valid_dst(skb))
skb                63 net/ipv6/ip6_input.c 		ip6_route_input(skb);
skb                66 net/ipv6/ip6_input.c int ip6_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
skb                71 net/ipv6/ip6_input.c 	skb = l3mdev_ip6_rcv(skb);
skb                72 net/ipv6/ip6_input.c 	if (!skb)
skb                74 net/ipv6/ip6_input.c 	ip6_rcv_finish_core(net, sk, skb);
skb                76 net/ipv6/ip6_input.c 	return dst_input(skb);
skb                81 net/ipv6/ip6_input.c 	struct sk_buff *skb, *next;
skb                83 net/ipv6/ip6_input.c 	list_for_each_entry_safe(skb, next, head, list) {
skb                84 net/ipv6/ip6_input.c 		skb_list_del_init(skb);
skb                85 net/ipv6/ip6_input.c 		dst_input(skb);
skb                93 net/ipv6/ip6_input.c 	struct sk_buff *skb, *next;
skb                97 net/ipv6/ip6_input.c 	list_for_each_entry_safe(skb, next, head, list) {
skb               100 net/ipv6/ip6_input.c 		skb_list_del_init(skb);
skb               104 net/ipv6/ip6_input.c 		skb = l3mdev_ip6_rcv(skb);
skb               105 net/ipv6/ip6_input.c 		if (!skb)
skb               107 net/ipv6/ip6_input.c 		ip6_rcv_finish_core(net, sk, skb);
skb               108 net/ipv6/ip6_input.c 		dst = skb_dst(skb);
skb               117 net/ipv6/ip6_input.c 		list_add_tail(&skb->list, &sublist);
skb               123 net/ipv6/ip6_input.c static struct sk_buff *ip6_rcv_core(struct sk_buff *skb, struct net_device *dev,
skb               130 net/ipv6/ip6_input.c 	if (skb->pkt_type == PACKET_OTHERHOST) {
skb               131 net/ipv6/ip6_input.c 		kfree_skb(skb);
skb               137 net/ipv6/ip6_input.c 	idev = __in6_dev_get(skb->dev);
skb               139 net/ipv6/ip6_input.c 	__IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_IN, skb->len);
skb               141 net/ipv6/ip6_input.c 	if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL ||
skb               147 net/ipv6/ip6_input.c 	memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
skb               160 net/ipv6/ip6_input.c 	IP6CB(skb)->iif = skb_valid_dst(skb) ? ip6_dst_idev(skb_dst(skb))->dev->ifindex : dev->ifindex;
skb               162 net/ipv6/ip6_input.c 	if (unlikely(!pskb_may_pull(skb, sizeof(*hdr))))
skb               165 net/ipv6/ip6_input.c 	hdr = ipv6_hdr(skb);
skb               173 net/ipv6/ip6_input.c 			max_t(unsigned short, 1, skb_shinfo(skb)->gso_segs));
skb               193 net/ipv6/ip6_input.c 	if (!(skb->pkt_type == PACKET_LOOPBACK ||
skb               204 net/ipv6/ip6_input.c 	    (skb->pkt_type == PACKET_BROADCAST ||
skb               205 net/ipv6/ip6_input.c 	     skb->pkt_type == PACKET_MULTICAST) &&
skb               236 net/ipv6/ip6_input.c 	skb->transport_header = skb->network_header + sizeof(*hdr);
skb               237 net/ipv6/ip6_input.c 	IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr);
skb               243 net/ipv6/ip6_input.c 		if (pkt_len + sizeof(struct ipv6hdr) > skb->len) {
skb               248 net/ipv6/ip6_input.c 		if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr))) {
skb               252 net/ipv6/ip6_input.c 		hdr = ipv6_hdr(skb);
skb               256 net/ipv6/ip6_input.c 		if (ipv6_parse_hopopts(skb) < 0) {
skb               266 net/ipv6/ip6_input.c 	skb_orphan(skb);
skb               268 net/ipv6/ip6_input.c 	return skb;
skb               273 net/ipv6/ip6_input.c 	kfree_skb(skb);
skb               277 net/ipv6/ip6_input.c int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
skb               279 net/ipv6/ip6_input.c 	struct net *net = dev_net(skb->dev);
skb               281 net/ipv6/ip6_input.c 	skb = ip6_rcv_core(skb, dev, net);
skb               282 net/ipv6/ip6_input.c 	if (skb == NULL)
skb               285 net/ipv6/ip6_input.c 		       net, NULL, skb, dev, NULL,
skb               303 net/ipv6/ip6_input.c 	struct sk_buff *skb, *next;
skb               307 net/ipv6/ip6_input.c 	list_for_each_entry_safe(skb, next, head, list) {
skb               308 net/ipv6/ip6_input.c 		struct net_device *dev = skb->dev;
skb               311 net/ipv6/ip6_input.c 		skb_list_del_init(skb);
skb               312 net/ipv6/ip6_input.c 		skb = ip6_rcv_core(skb, dev, net);
skb               313 net/ipv6/ip6_input.c 		if (skb == NULL)
skb               325 net/ipv6/ip6_input.c 		list_add_tail(&skb->list, &sublist);
skb               337 net/ipv6/ip6_input.c void ip6_protocol_deliver_rcu(struct net *net, struct sk_buff *skb, int nexthdr,
skb               350 net/ipv6/ip6_input.c 	idev = ip6_dst_idev(skb_dst(skb));
skb               351 net/ipv6/ip6_input.c 	nhoff = IP6CB(skb)->nhoff;
skb               353 net/ipv6/ip6_input.c 		if (!pskb_pull(skb, skb_transport_offset(skb)))
skb               355 net/ipv6/ip6_input.c 		nexthdr = skb_network_header(skb)[nhoff];
skb               359 net/ipv6/ip6_input.c 	raw = raw6_local_deliver(skb, nexthdr);
skb               375 net/ipv6/ip6_input.c 			int sdif = inet6_sdif(skb);
skb               384 net/ipv6/ip6_input.c 			nf_reset_ct(skb);
skb               386 net/ipv6/ip6_input.c 			skb_postpull_rcsum(skb, skb_network_header(skb),
skb               387 net/ipv6/ip6_input.c 					   skb_network_header_len(skb));
skb               388 net/ipv6/ip6_input.c 			hdr = ipv6_hdr(skb);
skb               396 net/ipv6/ip6_input.c 				dev = skb->dev;
skb               402 net/ipv6/ip6_input.c 			    !ipv6_is_mld(skb, nexthdr, skb_network_header_len(skb)))
skb               406 net/ipv6/ip6_input.c 		    !xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
skb               410 net/ipv6/ip6_input.c 				      skb);
skb               428 net/ipv6/ip6_input.c 			if (xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
skb               431 net/ipv6/ip6_input.c 				icmpv6_send(skb, ICMPV6_PARAMPROB,
skb               434 net/ipv6/ip6_input.c 			kfree_skb(skb);
skb               437 net/ipv6/ip6_input.c 			consume_skb(skb);
skb               444 net/ipv6/ip6_input.c 	kfree_skb(skb);
skb               447 net/ipv6/ip6_input.c static int ip6_input_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
skb               450 net/ipv6/ip6_input.c 	ip6_protocol_deliver_rcu(net, skb, 0, false);
skb               457 net/ipv6/ip6_input.c int ip6_input(struct sk_buff *skb)
skb               460 net/ipv6/ip6_input.c 		       dev_net(skb->dev), NULL, skb, skb->dev, NULL,
skb               465 net/ipv6/ip6_input.c int ip6_mc_input(struct sk_buff *skb)
skb               467 net/ipv6/ip6_input.c 	int sdif = inet6_sdif(skb);
skb               472 net/ipv6/ip6_input.c 	__IP6_UPD_PO_STATS(dev_net(skb_dst(skb)->dev),
skb               473 net/ipv6/ip6_input.c 			 __in6_dev_get_safely(skb->dev), IPSTATS_MIB_INMCAST,
skb               474 net/ipv6/ip6_input.c 			 skb->len);
skb               479 net/ipv6/ip6_input.c 		dev = dev_get_by_index_rcu(dev_net(skb->dev), sdif);
skb               482 net/ipv6/ip6_input.c 			kfree_skb(skb);
skb               486 net/ipv6/ip6_input.c 		dev = skb->dev;
skb               489 net/ipv6/ip6_input.c 	hdr = ipv6_hdr(skb);
skb               498 net/ipv6/ip6_input.c 	if (dev_net(skb->dev)->ipv6.devconf_all->mc_forwarding &&
skb               501 net/ipv6/ip6_input.c 	    likely(!(IP6CB(skb)->flags & IP6SKB_FORWARDED))) {
skb               507 net/ipv6/ip6_input.c 		struct inet6_skb_parm *opt = IP6CB(skb);
skb               526 net/ipv6/ip6_input.c 				offset = ipv6_skip_exthdr(skb, sizeof(*hdr),
skb               531 net/ipv6/ip6_input.c 				if (ipv6_is_mld(skb, nexthdr, offset))
skb               540 net/ipv6/ip6_input.c 			skb2 = skb_clone(skb, GFP_ATOMIC);
skb               542 net/ipv6/ip6_input.c 			skb2 = skb;
skb               543 net/ipv6/ip6_input.c 			skb = NULL;
skb               553 net/ipv6/ip6_input.c 		ip6_input(skb);
skb               556 net/ipv6/ip6_input.c 		kfree_skb(skb);
skb                29 net/ipv6/ip6_offload.c #define indirect_call_gro_receive_l4(f2, f1, cb, head, skb)	\
skb                31 net/ipv6/ip6_offload.c 	unlikely(gro_recursion_inc_test(skb)) ?			\
skb                32 net/ipv6/ip6_offload.c 		NAPI_GRO_CB(skb)->flush |= 1, NULL :		\
skb                33 net/ipv6/ip6_offload.c 		INDIRECT_CALL_L4(cb, f2, f1, head, skb);	\
skb                36 net/ipv6/ip6_offload.c static int ipv6_gso_pull_exthdrs(struct sk_buff *skb, int proto)
skb                54 net/ipv6/ip6_offload.c 		if (unlikely(!pskb_may_pull(skb, 8)))
skb                57 net/ipv6/ip6_offload.c 		opth = (void *)skb->data;
skb                60 net/ipv6/ip6_offload.c 		if (unlikely(!pskb_may_pull(skb, len)))
skb                63 net/ipv6/ip6_offload.c 		opth = (void *)skb->data;
skb                65 net/ipv6/ip6_offload.c 		__skb_pull(skb, len);
skb                71 net/ipv6/ip6_offload.c static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
skb                86 net/ipv6/ip6_offload.c 	skb_reset_network_header(skb);
skb                87 net/ipv6/ip6_offload.c 	nhoff = skb_network_header(skb) - skb_mac_header(skb);
skb                88 net/ipv6/ip6_offload.c 	if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
skb                91 net/ipv6/ip6_offload.c 	encap = SKB_GSO_CB(skb)->encap_level > 0;
skb                93 net/ipv6/ip6_offload.c 		features &= skb->dev->hw_enc_features;
skb                94 net/ipv6/ip6_offload.c 	SKB_GSO_CB(skb)->encap_level += sizeof(*ipv6h);
skb                96 net/ipv6/ip6_offload.c 	ipv6h = ipv6_hdr(skb);
skb                97 net/ipv6/ip6_offload.c 	__skb_pull(skb, sizeof(*ipv6h));
skb               100 net/ipv6/ip6_offload.c 	proto = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr);
skb               102 net/ipv6/ip6_offload.c 	if (skb->encapsulation &&
skb               103 net/ipv6/ip6_offload.c 	    skb_shinfo(skb)->gso_type & (SKB_GSO_IPXIP4 | SKB_GSO_IPXIP6))
skb               105 net/ipv6/ip6_offload.c 			  (skb_shinfo(skb)->gso_type & SKB_GSO_UDP);
skb               107 net/ipv6/ip6_offload.c 		udpfrag = proto == IPPROTO_UDP && !skb->encapsulation &&
skb               108 net/ipv6/ip6_offload.c 			  (skb_shinfo(skb)->gso_type & SKB_GSO_UDP);
skb               112 net/ipv6/ip6_offload.c 		skb_reset_transport_header(skb);
skb               113 net/ipv6/ip6_offload.c 		segs = ops->callbacks.gso_segment(skb, features);
skb               121 net/ipv6/ip6_offload.c 	for (skb = segs; skb; skb = skb->next) {
skb               122 net/ipv6/ip6_offload.c 		ipv6h = (struct ipv6hdr *)(skb_mac_header(skb) + nhoff);
skb               123 net/ipv6/ip6_offload.c 		if (gso_partial && skb_is_gso(skb))
skb               124 net/ipv6/ip6_offload.c 			payload_len = skb_shinfo(skb)->gso_size +
skb               125 net/ipv6/ip6_offload.c 				      SKB_GSO_CB(skb)->data_offset +
skb               126 net/ipv6/ip6_offload.c 				      skb->head - (unsigned char *)(ipv6h + 1);
skb               128 net/ipv6/ip6_offload.c 			payload_len = skb->len - nhoff - sizeof(*ipv6h);
skb               130 net/ipv6/ip6_offload.c 		skb->network_header = (u8 *)ipv6h - skb->head;
skb               131 net/ipv6/ip6_offload.c 		skb_reset_mac_len(skb);
skb               134 net/ipv6/ip6_offload.c 			int err = ip6_find_1stfragopt(skb, &prevhdr);
skb               141 net/ipv6/ip6_offload.c 			if (skb->next)
skb               147 net/ipv6/ip6_offload.c 			skb_reset_inner_headers(skb);
skb               185 net/ipv6/ip6_offload.c 							 struct sk_buff *skb)
skb               197 net/ipv6/ip6_offload.c 	off = skb_gro_offset(skb);
skb               199 net/ipv6/ip6_offload.c 	iph = skb_gro_header_fast(skb, off);
skb               200 net/ipv6/ip6_offload.c 	if (skb_gro_header_hard(skb, hlen)) {
skb               201 net/ipv6/ip6_offload.c 		iph = skb_gro_header_slow(skb, hlen, off);
skb               206 net/ipv6/ip6_offload.c 	skb_set_network_header(skb, off);
skb               207 net/ipv6/ip6_offload.c 	skb_gro_pull(skb, sizeof(*iph));
skb               208 net/ipv6/ip6_offload.c 	skb_set_transport_header(skb, skb_gro_offset(skb));
skb               210 net/ipv6/ip6_offload.c 	flush += ntohs(iph->payload_len) != skb_gro_len(skb);
skb               216 net/ipv6/ip6_offload.c 		__pskb_pull(skb, skb_gro_offset(skb));
skb               217 net/ipv6/ip6_offload.c 		skb_gro_frag0_invalidate(skb);
skb               218 net/ipv6/ip6_offload.c 		proto = ipv6_gso_pull_exthdrs(skb, proto);
skb               219 net/ipv6/ip6_offload.c 		skb_gro_pull(skb, -skb_transport_offset(skb));
skb               220 net/ipv6/ip6_offload.c 		skb_reset_transport_header(skb);
skb               221 net/ipv6/ip6_offload.c 		__skb_push(skb, skb_gro_offset(skb));
skb               227 net/ipv6/ip6_offload.c 		iph = ipv6_hdr(skb);
skb               230 net/ipv6/ip6_offload.c 	NAPI_GRO_CB(skb)->proto = proto;
skb               233 net/ipv6/ip6_offload.c 	nlen = skb_network_header_len(skb);
skb               271 net/ipv6/ip6_offload.c 		if (NAPI_GRO_CB(skb)->is_atomic)
skb               275 net/ipv6/ip6_offload.c 	NAPI_GRO_CB(skb)->is_atomic = true;
skb               276 net/ipv6/ip6_offload.c 	NAPI_GRO_CB(skb)->flush |= flush;
skb               278 net/ipv6/ip6_offload.c 	skb_gro_postpull_rcsum(skb, iph, nlen);
skb               281 net/ipv6/ip6_offload.c 					 ops->callbacks.gro_receive, head, skb);
skb               287 net/ipv6/ip6_offload.c 	skb_gro_flush_final(skb, pp, flush);
skb               293 net/ipv6/ip6_offload.c 					      struct sk_buff *skb)
skb               297 net/ipv6/ip6_offload.c 	if (NAPI_GRO_CB(skb)->encap_mark) {
skb               298 net/ipv6/ip6_offload.c 		NAPI_GRO_CB(skb)->flush = 1;
skb               302 net/ipv6/ip6_offload.c 	NAPI_GRO_CB(skb)->encap_mark = 1;
skb               304 net/ipv6/ip6_offload.c 	return ipv6_gro_receive(head, skb);
skb               308 net/ipv6/ip6_offload.c 					  struct sk_buff *skb)
skb               312 net/ipv6/ip6_offload.c 	if (NAPI_GRO_CB(skb)->encap_mark) {
skb               313 net/ipv6/ip6_offload.c 		NAPI_GRO_CB(skb)->flush = 1;
skb               317 net/ipv6/ip6_offload.c 	NAPI_GRO_CB(skb)->encap_mark = 1;
skb               319 net/ipv6/ip6_offload.c 	return inet_gro_receive(head, skb);
skb               324 net/ipv6/ip6_offload.c INDIRECT_CALLABLE_SCOPE int ipv6_gro_complete(struct sk_buff *skb, int nhoff)
skb               327 net/ipv6/ip6_offload.c 	struct ipv6hdr *iph = (struct ipv6hdr *)(skb->data + nhoff);
skb               330 net/ipv6/ip6_offload.c 	if (skb->encapsulation) {
skb               331 net/ipv6/ip6_offload.c 		skb_set_inner_protocol(skb, cpu_to_be16(ETH_P_IPV6));
skb               332 net/ipv6/ip6_offload.c 		skb_set_inner_network_header(skb, nhoff);
skb               335 net/ipv6/ip6_offload.c 	iph->payload_len = htons(skb->len - nhoff - sizeof(*iph));
skb               344 net/ipv6/ip6_offload.c 			       udp6_gro_complete, skb, nhoff);
skb               352 net/ipv6/ip6_offload.c static int sit_gro_complete(struct sk_buff *skb, int nhoff)
skb               354 net/ipv6/ip6_offload.c 	skb->encapsulation = 1;
skb               355 net/ipv6/ip6_offload.c 	skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP4;
skb               356 net/ipv6/ip6_offload.c 	return ipv6_gro_complete(skb, nhoff);
skb               359 net/ipv6/ip6_offload.c static int ip6ip6_gro_complete(struct sk_buff *skb, int nhoff)
skb               361 net/ipv6/ip6_offload.c 	skb->encapsulation = 1;
skb               362 net/ipv6/ip6_offload.c 	skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP6;
skb               363 net/ipv6/ip6_offload.c 	return ipv6_gro_complete(skb, nhoff);
skb               366 net/ipv6/ip6_offload.c static int ip4ip6_gro_complete(struct sk_buff *skb, int nhoff)
skb               368 net/ipv6/ip6_offload.c 	skb->encapsulation = 1;
skb               369 net/ipv6/ip6_offload.c 	skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP6;
skb               370 net/ipv6/ip6_offload.c 	return inet_gro_complete(skb, nhoff);
skb               382 net/ipv6/ip6_offload.c static struct sk_buff *sit_gso_segment(struct sk_buff *skb,
skb               385 net/ipv6/ip6_offload.c 	if (!(skb_shinfo(skb)->gso_type & SKB_GSO_IPXIP4))
skb               388 net/ipv6/ip6_offload.c 	return ipv6_gso_segment(skb, features);
skb               391 net/ipv6/ip6_offload.c static struct sk_buff *ip4ip6_gso_segment(struct sk_buff *skb,
skb               394 net/ipv6/ip6_offload.c 	if (!(skb_shinfo(skb)->gso_type & SKB_GSO_IPXIP6))
skb               397 net/ipv6/ip6_offload.c 	return inet_gso_segment(skb, features);
skb               400 net/ipv6/ip6_offload.c static struct sk_buff *ip6ip6_gso_segment(struct sk_buff *skb,
skb               403 net/ipv6/ip6_offload.c 	if (!(skb_shinfo(skb)->gso_type & SKB_GSO_IPXIP6))
skb               406 net/ipv6/ip6_offload.c 	return ipv6_gso_segment(skb, features);
skb                58 net/ipv6/ip6_output.c static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff *skb)
skb                60 net/ipv6/ip6_output.c 	struct dst_entry *dst = skb_dst(skb);
skb                66 net/ipv6/ip6_output.c 	if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) {
skb                67 net/ipv6/ip6_output.c 		struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
skb                70 net/ipv6/ip6_output.c 		    ((mroute6_is_socket(net, skb) &&
skb                71 net/ipv6/ip6_output.c 		     !(IP6CB(skb)->flags & IP6SKB_FORWARDED)) ||
skb                72 net/ipv6/ip6_output.c 		     ipv6_chk_mcast_addr(dev, &ipv6_hdr(skb)->daddr,
skb                73 net/ipv6/ip6_output.c 					 &ipv6_hdr(skb)->saddr))) {
skb                74 net/ipv6/ip6_output.c 			struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
skb                84 net/ipv6/ip6_output.c 			if (ipv6_hdr(skb)->hop_limit == 0) {
skb                87 net/ipv6/ip6_output.c 				kfree_skb(skb);
skb                92 net/ipv6/ip6_output.c 		IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUTMCAST, skb->len);
skb                94 net/ipv6/ip6_output.c 		if (IPV6_ADDR_MC_SCOPE(&ipv6_hdr(skb)->daddr) <=
skb                97 net/ipv6/ip6_output.c 			kfree_skb(skb);
skb               103 net/ipv6/ip6_output.c 		int res = lwtunnel_xmit(skb);
skb               110 net/ipv6/ip6_output.c 	nexthop = rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr);
skb               115 net/ipv6/ip6_output.c 		sock_confirm_neigh(skb, neigh);
skb               116 net/ipv6/ip6_output.c 		ret = neigh_output(neigh, skb, false);
skb               123 net/ipv6/ip6_output.c 	kfree_skb(skb);
skb               127 net/ipv6/ip6_output.c static int __ip6_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
skb               131 net/ipv6/ip6_output.c 	if (skb_dst(skb)->xfrm) {
skb               132 net/ipv6/ip6_output.c 		IPCB(skb)->flags |= IPSKB_REROUTED;
skb               133 net/ipv6/ip6_output.c 		return dst_output(net, sk, skb);
skb               137 net/ipv6/ip6_output.c 	if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
skb               138 net/ipv6/ip6_output.c 	    dst_allfrag(skb_dst(skb)) ||
skb               139 net/ipv6/ip6_output.c 	    (IP6CB(skb)->frag_max_size && skb->len > IP6CB(skb)->frag_max_size))
skb               140 net/ipv6/ip6_output.c 		return ip6_fragment(net, sk, skb, ip6_finish_output2);
skb               142 net/ipv6/ip6_output.c 		return ip6_finish_output2(net, sk, skb);
skb               145 net/ipv6/ip6_output.c static int ip6_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
skb               149 net/ipv6/ip6_output.c 	ret = BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb);
skb               152 net/ipv6/ip6_output.c 		return __ip6_finish_output(net, sk, skb);
skb               154 net/ipv6/ip6_output.c 		return __ip6_finish_output(net, sk, skb) ? : ret;
skb               156 net/ipv6/ip6_output.c 		kfree_skb(skb);
skb               161 net/ipv6/ip6_output.c int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
skb               163 net/ipv6/ip6_output.c 	struct net_device *dev = skb_dst(skb)->dev;
skb               164 net/ipv6/ip6_output.c 	struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
skb               166 net/ipv6/ip6_output.c 	skb->protocol = htons(ETH_P_IPV6);
skb               167 net/ipv6/ip6_output.c 	skb->dev = dev;
skb               171 net/ipv6/ip6_output.c 		kfree_skb(skb);
skb               176 net/ipv6/ip6_output.c 			    net, sk, skb, NULL, dev,
skb               178 net/ipv6/ip6_output.c 			    !(IP6CB(skb)->flags & IP6SKB_REROUTED));
skb               195 net/ipv6/ip6_output.c int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
skb               201 net/ipv6/ip6_output.c 	struct dst_entry *dst = skb_dst(skb);
skb               205 net/ipv6/ip6_output.c 	int seg_len = skb->len;
skb               213 net/ipv6/ip6_output.c 	if (unlikely(skb_headroom(skb) < head_room)) {
skb               214 net/ipv6/ip6_output.c 		struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room);
skb               216 net/ipv6/ip6_output.c 			IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
skb               218 net/ipv6/ip6_output.c 			kfree_skb(skb);
skb               221 net/ipv6/ip6_output.c 		if (skb->sk)
skb               222 net/ipv6/ip6_output.c 			skb_set_owner_w(skb2, skb->sk);
skb               223 net/ipv6/ip6_output.c 		consume_skb(skb);
skb               224 net/ipv6/ip6_output.c 		skb = skb2;
skb               231 net/ipv6/ip6_output.c 			ipv6_push_frag_opts(skb, opt, &proto);
skb               234 net/ipv6/ip6_output.c 			ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop,
skb               238 net/ipv6/ip6_output.c 	skb_push(skb, sizeof(struct ipv6hdr));
skb               239 net/ipv6/ip6_output.c 	skb_reset_network_header(skb);
skb               240 net/ipv6/ip6_output.c 	hdr = ipv6_hdr(skb);
skb               250 net/ipv6/ip6_output.c 	ip6_flow_hdr(hdr, tclass, ip6_make_flowlabel(net, skb, fl6->flowlabel,
skb               260 net/ipv6/ip6_output.c 	skb->protocol = htons(ETH_P_IPV6);
skb               261 net/ipv6/ip6_output.c 	skb->priority = priority;
skb               262 net/ipv6/ip6_output.c 	skb->mark = mark;
skb               265 net/ipv6/ip6_output.c 	if ((skb->len <= mtu) || skb->ignore_df || skb_is_gso(skb)) {
skb               266 net/ipv6/ip6_output.c 		IP6_UPD_PO_STATS(net, ip6_dst_idev(skb_dst(skb)),
skb               267 net/ipv6/ip6_output.c 			      IPSTATS_MIB_OUT, skb->len);
skb               272 net/ipv6/ip6_output.c 		skb = l3mdev_ip6_out((struct sock *)sk, skb);
skb               273 net/ipv6/ip6_output.c 		if (unlikely(!skb))
skb               280 net/ipv6/ip6_output.c 			       net, (struct sock *)sk, skb, NULL, dst->dev,
skb               284 net/ipv6/ip6_output.c 	skb->dev = dst->dev;
skb               290 net/ipv6/ip6_output.c 	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS);
skb               291 net/ipv6/ip6_output.c 	kfree_skb(skb);
skb               296 net/ipv6/ip6_output.c static int ip6_call_ra_chain(struct sk_buff *skb, int sel)
skb               306 net/ipv6/ip6_output.c 		     sk->sk_bound_dev_if == skb->dev->ifindex)) {
skb               310 net/ipv6/ip6_output.c 			    !net_eq(sock_net(sk), dev_net(skb->dev))) {
skb               314 net/ipv6/ip6_output.c 				struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
skb               323 net/ipv6/ip6_output.c 		rawv6_rcv(last, skb);
skb               331 net/ipv6/ip6_output.c static int ip6_forward_proxy_check(struct sk_buff *skb)
skb               333 net/ipv6/ip6_output.c 	struct ipv6hdr *hdr = ipv6_hdr(skb);
skb               339 net/ipv6/ip6_output.c 		offset = ipv6_skip_exthdr(skb, sizeof(*hdr), &nexthdr, &frag_off);
skb               348 net/ipv6/ip6_output.c 		if (!pskb_may_pull(skb, (skb_network_header(skb) +
skb               349 net/ipv6/ip6_output.c 					 offset + 1 - skb->data)))
skb               352 net/ipv6/ip6_output.c 		icmp6 = (struct icmp6hdr *)(skb_network_header(skb) + offset);
skb               376 net/ipv6/ip6_output.c 		dst_link_failure(skb);
skb               384 net/ipv6/ip6_output.c 				     struct sk_buff *skb)
skb               386 net/ipv6/ip6_output.c 	struct dst_entry *dst = skb_dst(skb);
skb               389 net/ipv6/ip6_output.c 	__IP6_ADD_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len);
skb               392 net/ipv6/ip6_output.c 	if (skb->offload_l3_fwd_mark) {
skb               393 net/ipv6/ip6_output.c 		consume_skb(skb);
skb               398 net/ipv6/ip6_output.c 	skb->tstamp = 0;
skb               399 net/ipv6/ip6_output.c 	return dst_output(net, sk, skb);
skb               402 net/ipv6/ip6_output.c static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
skb               404 net/ipv6/ip6_output.c 	if (skb->len <= mtu)
skb               408 net/ipv6/ip6_output.c 	if (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu)
skb               411 net/ipv6/ip6_output.c 	if (skb->ignore_df)
skb               414 net/ipv6/ip6_output.c 	if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu))
skb               420 net/ipv6/ip6_output.c int ip6_forward(struct sk_buff *skb)
skb               422 net/ipv6/ip6_output.c 	struct inet6_dev *idev = __in6_dev_get_safely(skb->dev);
skb               423 net/ipv6/ip6_output.c 	struct dst_entry *dst = skb_dst(skb);
skb               424 net/ipv6/ip6_output.c 	struct ipv6hdr *hdr = ipv6_hdr(skb);
skb               425 net/ipv6/ip6_output.c 	struct inet6_skb_parm *opt = IP6CB(skb);
skb               432 net/ipv6/ip6_output.c 	if (skb->pkt_type != PACKET_HOST)
skb               435 net/ipv6/ip6_output.c 	if (unlikely(skb->sk))
skb               438 net/ipv6/ip6_output.c 	if (skb_warn_if_lro(skb))
skb               441 net/ipv6/ip6_output.c 	if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
skb               446 net/ipv6/ip6_output.c 	skb_forward_csum(skb);
skb               462 net/ipv6/ip6_output.c 		if (ip6_call_ra_chain(skb, ntohs(opt->ra)))
skb               471 net/ipv6/ip6_output.c 		skb->dev = dst->dev;
skb               472 net/ipv6/ip6_output.c 		icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 0);
skb               475 net/ipv6/ip6_output.c 		kfree_skb(skb);
skb               481 net/ipv6/ip6_output.c 	    pneigh_lookup(&nd_tbl, net, &hdr->daddr, skb->dev, 0)) {
skb               482 net/ipv6/ip6_output.c 		int proxied = ip6_forward_proxy_check(skb);
skb               484 net/ipv6/ip6_output.c 			return ip6_input(skb);
skb               491 net/ipv6/ip6_output.c 	if (!xfrm6_route_forward(skb)) {
skb               495 net/ipv6/ip6_output.c 	dst = skb_dst(skb);
skb               501 net/ipv6/ip6_output.c 	if (IP6CB(skb)->iif == dst->dev->ifindex &&
skb               502 net/ipv6/ip6_output.c 	    opt->srcrt == 0 && !skb_sec_path(skb)) {
skb               524 net/ipv6/ip6_output.c 			ndisc_send_redirect(skb, target);
skb               535 net/ipv6/ip6_output.c 			icmpv6_send(skb, ICMPV6_DEST_UNREACH,
skb               545 net/ipv6/ip6_output.c 	if (ip6_pkt_too_big(skb, mtu)) {
skb               547 net/ipv6/ip6_output.c 		skb->dev = dst->dev;
skb               548 net/ipv6/ip6_output.c 		icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
skb               552 net/ipv6/ip6_output.c 		kfree_skb(skb);
skb               556 net/ipv6/ip6_output.c 	if (skb_cow(skb, dst->dev->hard_header_len)) {
skb               562 net/ipv6/ip6_output.c 	hdr = ipv6_hdr(skb);
skb               569 net/ipv6/ip6_output.c 		       net, NULL, skb, skb->dev, dst->dev,
skb               575 net/ipv6/ip6_output.c 	kfree_skb(skb);
skb               599 net/ipv6/ip6_output.c int ip6_fraglist_init(struct sk_buff *skb, unsigned int hlen, u8 *prevhdr,
skb               608 net/ipv6/ip6_output.c 	iter->tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC);
skb               612 net/ipv6/ip6_output.c 	iter->frag = skb_shinfo(skb)->frag_list;
skb               613 net/ipv6/ip6_output.c 	skb_frag_list_init(skb);
skb               620 net/ipv6/ip6_output.c 	__skb_pull(skb, hlen);
skb               621 net/ipv6/ip6_output.c 	fh = __skb_push(skb, sizeof(struct frag_hdr));
skb               622 net/ipv6/ip6_output.c 	__skb_push(skb, hlen);
skb               623 net/ipv6/ip6_output.c 	skb_reset_network_header(skb);
skb               624 net/ipv6/ip6_output.c 	memcpy(skb_network_header(skb), iter->tmp_hdr, hlen);
skb               631 net/ipv6/ip6_output.c 	first_len = skb_pagelen(skb);
skb               632 net/ipv6/ip6_output.c 	skb->data_len = first_len - skb_headlen(skb);
skb               633 net/ipv6/ip6_output.c 	skb->len = first_len;
skb               634 net/ipv6/ip6_output.c 	ipv6_hdr(skb)->payload_len = htons(first_len - sizeof(struct ipv6hdr));
skb               640 net/ipv6/ip6_output.c void ip6_fraglist_prepare(struct sk_buff *skb,
skb               653 net/ipv6/ip6_output.c 	iter->offset += skb->len - hlen - sizeof(struct frag_hdr);
skb               661 net/ipv6/ip6_output.c 	ip6_copy_metadata(frag, skb);
skb               665 net/ipv6/ip6_output.c void ip6_frag_init(struct sk_buff *skb, unsigned int hlen, unsigned int mtu,
skb               676 net/ipv6/ip6_output.c 	state->left = skb->len - hlen;	/* Space per frame */
skb               686 net/ipv6/ip6_output.c struct sk_buff *ip6_frag_next(struct sk_buff *skb, struct ip6_frag_state *state)
skb               712 net/ipv6/ip6_output.c 	ip6_copy_metadata(frag, skb);
skb               724 net/ipv6/ip6_output.c 	if (skb->sk)
skb               725 net/ipv6/ip6_output.c 		skb_set_owner_w(frag, skb->sk);
skb               730 net/ipv6/ip6_output.c 	skb_copy_from_linear_data(skb, skb_network_header(frag), state->hlen);
skb               733 net/ipv6/ip6_output.c 	fragnexthdr_offset += prevhdr - skb_network_header(skb);
skb               746 net/ipv6/ip6_output.c 	BUG_ON(skb_copy_bits(skb, state->ptr, skb_transport_header(frag),
skb               762 net/ipv6/ip6_output.c int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
skb               766 net/ipv6/ip6_output.c 	struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
skb               767 net/ipv6/ip6_output.c 	struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ?
skb               768 net/ipv6/ip6_output.c 				inet6_sk(skb->sk) : NULL;
skb               771 net/ipv6/ip6_output.c 	ktime_t tstamp = skb->tstamp;
skb               776 net/ipv6/ip6_output.c 	err = ip6_find_1stfragopt(skb, &prevhdr);
skb               781 net/ipv6/ip6_output.c 	nexthdr_offset = prevhdr - skb_network_header(skb);
skb               783 net/ipv6/ip6_output.c 	mtu = ip6_skb_dst_mtu(skb);
skb               788 net/ipv6/ip6_output.c 	if (unlikely(!skb->ignore_df && skb->len > mtu))
skb               791 net/ipv6/ip6_output.c 	if (IP6CB(skb)->frag_max_size) {
skb               792 net/ipv6/ip6_output.c 		if (IP6CB(skb)->frag_max_size > mtu)
skb               796 net/ipv6/ip6_output.c 		mtu = IP6CB(skb)->frag_max_size;
skb               809 net/ipv6/ip6_output.c 	frag_id = ipv6_select_ident(net, &ipv6_hdr(skb)->daddr,
skb               810 net/ipv6/ip6_output.c 				    &ipv6_hdr(skb)->saddr);
skb               812 net/ipv6/ip6_output.c 	if (skb->ip_summed == CHECKSUM_PARTIAL &&
skb               813 net/ipv6/ip6_output.c 	    (err = skb_checksum_help(skb)))
skb               816 net/ipv6/ip6_output.c 	prevhdr = skb_network_header(skb) + nexthdr_offset;
skb               818 net/ipv6/ip6_output.c 	if (skb_has_frag_list(skb)) {
skb               819 net/ipv6/ip6_output.c 		unsigned int first_len = skb_pagelen(skb);
skb               825 net/ipv6/ip6_output.c 		    skb_cloned(skb) ||
skb               826 net/ipv6/ip6_output.c 		    skb_headroom(skb) < (hroom + sizeof(struct frag_hdr)))
skb               829 net/ipv6/ip6_output.c 		skb_walk_frags(skb, frag) {
skb               841 net/ipv6/ip6_output.c 			if (skb->sk) {
skb               842 net/ipv6/ip6_output.c 				frag->sk = skb->sk;
skb               845 net/ipv6/ip6_output.c 			skb->truesize -= frag->truesize;
skb               848 net/ipv6/ip6_output.c 		err = ip6_fraglist_init(skb, hlen, prevhdr, nexthdr, frag_id,
skb               857 net/ipv6/ip6_output.c 				ip6_fraglist_prepare(skb, &iter);
skb               859 net/ipv6/ip6_output.c 			skb->tstamp = tstamp;
skb               860 net/ipv6/ip6_output.c 			err = output(net, sk, skb);
skb               868 net/ipv6/ip6_output.c 			skb = ip6_fraglist_next(&iter);
skb               886 net/ipv6/ip6_output.c 		skb_walk_frags(skb, frag2) {
skb               891 net/ipv6/ip6_output.c 			skb->truesize += frag2->truesize;
skb               900 net/ipv6/ip6_output.c 	ip6_frag_init(skb, hlen, mtu, rt->dst.dev->needed_tailroom,
skb               909 net/ipv6/ip6_output.c 		frag = ip6_frag_next(skb, &state);
skb               923 net/ipv6/ip6_output.c 		IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
skb               926 net/ipv6/ip6_output.c 	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
skb               928 net/ipv6/ip6_output.c 	consume_skb(skb);
skb               932 net/ipv6/ip6_output.c 	if (skb->sk && dst_allfrag(skb_dst(skb)))
skb               933 net/ipv6/ip6_output.c 		sk_nocaps_add(skb->sk, NETIF_F_GSO_MASK);
skb               935 net/ipv6/ip6_output.c 	icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
skb               939 net/ipv6/ip6_output.c 	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
skb               941 net/ipv6/ip6_output.c 	kfree_skb(skb);
skb              1214 net/ipv6/ip6_output.c 				struct sk_buff *skb,
skb              1219 net/ipv6/ip6_output.c 		if (!skb) {
skb              1319 net/ipv6/ip6_output.c 					 int len, int odd, struct sk_buff *skb),
skb              1323 net/ipv6/ip6_output.c 	struct sk_buff *skb, *skb_prev = NULL;
skb              1340 net/ipv6/ip6_output.c 	skb = skb_peek_tail(queue);
skb              1341 net/ipv6/ip6_output.c 	if (!skb) {
skb              1404 net/ipv6/ip6_output.c 		uarg = sock_zerocopy_realloc(sk, length, skb_zcopy(skb));
skb              1407 net/ipv6/ip6_output.c 		extra_uref = !skb_zcopy(skb);	/* only ref on new uarg */
skb              1413 net/ipv6/ip6_output.c 			skb_zcopy_set(skb, uarg, &extra_uref);
skb              1434 net/ipv6/ip6_output.c 	if (!skb)
skb              1439 net/ipv6/ip6_output.c 		copy = (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - skb->len;
skb              1441 net/ipv6/ip6_output.c 			copy = maxfraglen - skb->len;
skb              1452 net/ipv6/ip6_output.c 			if (skb)
skb              1453 net/ipv6/ip6_output.c 				fraggap = skb->len - maxfraglen;
skb              1457 net/ipv6/ip6_output.c 			if (!skb || !skb_prev)
skb              1459 net/ipv6/ip6_output.c 						    fragheaderlen, skb, rt,
skb              1462 net/ipv6/ip6_output.c 			skb_prev = skb;
skb              1511 net/ipv6/ip6_output.c 				skb = sock_alloc_send_skb(sk,
skb              1515 net/ipv6/ip6_output.c 				skb = NULL;
skb              1518 net/ipv6/ip6_output.c 					skb = alloc_skb(alloclen + hh_len,
skb              1520 net/ipv6/ip6_output.c 				if (unlikely(!skb))
skb              1523 net/ipv6/ip6_output.c 			if (!skb)
skb              1528 net/ipv6/ip6_output.c 			skb->protocol = htons(ETH_P_IPV6);
skb              1529 net/ipv6/ip6_output.c 			skb->ip_summed = csummode;
skb              1530 net/ipv6/ip6_output.c 			skb->csum = 0;
skb              1532 net/ipv6/ip6_output.c 			skb_reserve(skb, hh_len + sizeof(struct frag_hdr) +
skb              1538 net/ipv6/ip6_output.c 			data = skb_put(skb, fraglen - pagedlen);
skb              1539 net/ipv6/ip6_output.c 			skb_set_network_header(skb, exthdrlen);
skb              1541 net/ipv6/ip6_output.c 			skb->transport_header = (skb->network_header +
skb              1544 net/ipv6/ip6_output.c 				skb->csum = skb_copy_and_csum_bits(
skb              1548 net/ipv6/ip6_output.c 							  skb->csum);
skb              1554 net/ipv6/ip6_output.c 				    copy, fraggap, skb) < 0) {
skb              1556 net/ipv6/ip6_output.c 				kfree_skb(skb);
skb              1567 net/ipv6/ip6_output.c 			skb_shinfo(skb)->tx_flags = cork->tx_flags;
skb              1569 net/ipv6/ip6_output.c 			skb_shinfo(skb)->tskey = tskey;
skb              1571 net/ipv6/ip6_output.c 			skb_zcopy_set(skb, uarg, &extra_uref);
skb              1574 net/ipv6/ip6_output.c 				skb_set_dst_pending_confirm(skb, 1);
skb              1579 net/ipv6/ip6_output.c 			if (!skb->destructor) {
skb              1580 net/ipv6/ip6_output.c 				skb->destructor = sock_wfree;
skb              1581 net/ipv6/ip6_output.c 				skb->sk = sk;
skb              1582 net/ipv6/ip6_output.c 				wmem_alloc_delta += skb->truesize;
skb              1584 net/ipv6/ip6_output.c 			__skb_queue_tail(queue, skb);
skb              1592 net/ipv6/ip6_output.c 		    skb_tailroom(skb) >= copy) {
skb              1595 net/ipv6/ip6_output.c 			off = skb->len;
skb              1596 net/ipv6/ip6_output.c 			if (getfrag(from, skb_put(skb, copy),
skb              1597 net/ipv6/ip6_output.c 						offset, copy, off, skb) < 0) {
skb              1598 net/ipv6/ip6_output.c 				__skb_trim(skb, off);
skb              1603 net/ipv6/ip6_output.c 			int i = skb_shinfo(skb)->nr_frags;
skb              1609 net/ipv6/ip6_output.c 			if (!skb_can_coalesce(skb, i, pfrag->page,
skb              1615 net/ipv6/ip6_output.c 				__skb_fill_page_desc(skb, i, pfrag->page,
skb              1617 net/ipv6/ip6_output.c 				skb_shinfo(skb)->nr_frags = ++i;
skb              1623 net/ipv6/ip6_output.c 				    offset, copy, skb->len, skb) < 0)
skb              1627 net/ipv6/ip6_output.c 			skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
skb              1628 net/ipv6/ip6_output.c 			skb->len += copy;
skb              1629 net/ipv6/ip6_output.c 			skb->data_len += copy;
skb              1630 net/ipv6/ip6_output.c 			skb->truesize += copy;
skb              1633 net/ipv6/ip6_output.c 			err = skb_zerocopy_iter_dgram(skb, from, copy);
skb              1658 net/ipv6/ip6_output.c 				int odd, struct sk_buff *skb),
skb              1718 net/ipv6/ip6_output.c 	struct sk_buff *skb, *tmp_skb;
skb              1729 net/ipv6/ip6_output.c 	skb = __skb_dequeue(queue);
skb              1730 net/ipv6/ip6_output.c 	if (!skb)
skb              1732 net/ipv6/ip6_output.c 	tail_skb = &(skb_shinfo(skb)->frag_list);
skb              1735 net/ipv6/ip6_output.c 	if (skb->data < skb_network_header(skb))
skb              1736 net/ipv6/ip6_output.c 		__skb_pull(skb, skb_network_offset(skb));
skb              1738 net/ipv6/ip6_output.c 		__skb_pull(tmp_skb, skb_network_header_len(skb));
skb              1741 net/ipv6/ip6_output.c 		skb->len += tmp_skb->len;
skb              1742 net/ipv6/ip6_output.c 		skb->data_len += tmp_skb->len;
skb              1743 net/ipv6/ip6_output.c 		skb->truesize += tmp_skb->truesize;
skb              1749 net/ipv6/ip6_output.c 	skb->ignore_df = ip6_sk_ignore_df(sk);
skb              1752 net/ipv6/ip6_output.c 	__skb_pull(skb, skb_network_header_len(skb));
skb              1754 net/ipv6/ip6_output.c 		ipv6_push_frag_opts(skb, opt, &proto);
skb              1756 net/ipv6/ip6_output.c 		ipv6_push_nfrag_opts(skb, opt, &proto, &final_dst, &fl6->saddr);
skb              1758 net/ipv6/ip6_output.c 	skb_push(skb, sizeof(struct ipv6hdr));
skb              1759 net/ipv6/ip6_output.c 	skb_reset_network_header(skb);
skb              1760 net/ipv6/ip6_output.c 	hdr = ipv6_hdr(skb);
skb              1763 net/ipv6/ip6_output.c 		     ip6_make_flowlabel(net, skb, fl6->flowlabel,
skb              1770 net/ipv6/ip6_output.c 	skb->priority = sk->sk_priority;
skb              1771 net/ipv6/ip6_output.c 	skb->mark = cork->base.mark;
skb              1773 net/ipv6/ip6_output.c 	skb->tstamp = cork->base.transmit_time;
skb              1775 net/ipv6/ip6_output.c 	skb_dst_set(skb, dst_clone(&rt->dst));
skb              1776 net/ipv6/ip6_output.c 	IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len);
skb              1778 net/ipv6/ip6_output.c 		struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
skb              1780 net/ipv6/ip6_output.c 		ICMP6MSGOUT_INC_STATS(net, idev, icmp6_hdr(skb)->icmp6_type);
skb              1786 net/ipv6/ip6_output.c 	return skb;
skb              1789 net/ipv6/ip6_output.c int ip6_send_skb(struct sk_buff *skb)
skb              1791 net/ipv6/ip6_output.c 	struct net *net = sock_net(skb->sk);
skb              1792 net/ipv6/ip6_output.c 	struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
skb              1795 net/ipv6/ip6_output.c 	err = ip6_local_out(net, skb->sk, skb);
skb              1809 net/ipv6/ip6_output.c 	struct sk_buff *skb;
skb              1811 net/ipv6/ip6_output.c 	skb = ip6_finish_skb(sk);
skb              1812 net/ipv6/ip6_output.c 	if (!skb)
skb              1815 net/ipv6/ip6_output.c 	return ip6_send_skb(skb);
skb              1824 net/ipv6/ip6_output.c 	struct sk_buff *skb;
skb              1826 net/ipv6/ip6_output.c 	while ((skb = __skb_dequeue_tail(queue)) != NULL) {
skb              1827 net/ipv6/ip6_output.c 		if (skb_dst(skb))
skb              1828 net/ipv6/ip6_output.c 			IP6_INC_STATS(sock_net(sk), ip6_dst_idev(skb_dst(skb)),
skb              1830 net/ipv6/ip6_output.c 		kfree_skb(skb);
skb              1845 net/ipv6/ip6_output.c 					 int len, int odd, struct sk_buff *skb),
skb               398 net/ipv6/ip6_tunnel.c __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
skb               401 net/ipv6/ip6_tunnel.c 	unsigned int nhoff = raw - skb->data;
skb               409 net/ipv6/ip6_tunnel.c 		if (!pskb_may_pull(skb, off + sizeof(*hdr)))
skb               412 net/ipv6/ip6_tunnel.c 		hdr = (struct ipv6_opt_hdr *)(skb->data + off);
skb               431 net/ipv6/ip6_tunnel.c 			if (!pskb_may_pull(skb, off + optlen))
skb               441 net/ipv6/ip6_tunnel.c 				tel = (struct ipv6_tlv_tnl_enc_lim *)(skb->data + off + i);
skb               469 net/ipv6/ip6_tunnel.c ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt,
skb               472 net/ipv6/ip6_tunnel.c 	const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)skb->data;
skb               473 net/ipv6/ip6_tunnel.c 	struct net *net = dev_net(skb->dev);
skb               488 net/ipv6/ip6_tunnel.c 	t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->daddr, &ipv6h->saddr);
skb               516 net/ipv6/ip6_tunnel.c 			teli = ip6_tnl_parse_tlv_enc_lim(skb, skb->data);
skb               519 net/ipv6/ip6_tunnel.c 			tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli];
skb               531 net/ipv6/ip6_tunnel.c 		ip6_update_pmtu(skb, net, htonl(*info), 0, 0,
skb               545 net/ipv6/ip6_tunnel.c 		ip6_redirect(skb, net, skb->dev->ifindex, 0,
skb               561 net/ipv6/ip6_tunnel.c ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
skb               573 net/ipv6/ip6_tunnel.c 	err = ip6_tnl_err(skb, IPPROTO_IPIP, opt, &rel_type, &rel_code,
skb               598 net/ipv6/ip6_tunnel.c 	if (!pskb_may_pull(skb, offset + sizeof(struct iphdr)))
skb               601 net/ipv6/ip6_tunnel.c 	skb2 = skb_clone(skb, GFP_ATOMIC);
skb               612 net/ipv6/ip6_tunnel.c 	rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL, eiph->saddr,
skb               622 net/ipv6/ip6_tunnel.c 		rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL,
skb               654 net/ipv6/ip6_tunnel.c ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
skb               662 net/ipv6/ip6_tunnel.c 	err = ip6_tnl_err(skb, IPPROTO_IPV6, opt, &rel_type, &rel_code,
skb               667 net/ipv6/ip6_tunnel.c 	if (rel_msg && pskb_may_pull(skb, offset + sizeof(struct ipv6hdr))) {
skb               669 net/ipv6/ip6_tunnel.c 		struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
skb               679 net/ipv6/ip6_tunnel.c 		rt = rt6_lookup(dev_net(skb->dev), &ipv6_hdr(skb2)->saddr,
skb               697 net/ipv6/ip6_tunnel.c 				       struct sk_buff *skb)
skb               702 net/ipv6/ip6_tunnel.c 		ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, dsfield);
skb               704 net/ipv6/ip6_tunnel.c 	return IP6_ECN_decapsulate(ipv6h, skb);
skb               709 net/ipv6/ip6_tunnel.c 				       struct sk_buff *skb)
skb               712 net/ipv6/ip6_tunnel.c 		ipv6_copy_dscp(ipv6_get_dsfield(ipv6h), ipv6_hdr(skb));
skb               714 net/ipv6/ip6_tunnel.c 	return IP6_ECN_decapsulate(ipv6h, skb);
skb               770 net/ipv6/ip6_tunnel.c static int __ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb,
skb               775 net/ipv6/ip6_tunnel.c 						struct sk_buff *skb),
skb               779 net/ipv6/ip6_tunnel.c 	const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
skb               802 net/ipv6/ip6_tunnel.c 	skb->protocol = tpi->proto;
skb               806 net/ipv6/ip6_tunnel.c 		if (!pskb_may_pull(skb, ETH_HLEN)) {
skb               812 net/ipv6/ip6_tunnel.c 		ipv6h = ipv6_hdr(skb);
skb               813 net/ipv6/ip6_tunnel.c 		skb->protocol = eth_type_trans(skb, tunnel->dev);
skb               814 net/ipv6/ip6_tunnel.c 		skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
skb               816 net/ipv6/ip6_tunnel.c 		skb->dev = tunnel->dev;
skb               819 net/ipv6/ip6_tunnel.c 	skb_reset_network_header(skb);
skb               820 net/ipv6/ip6_tunnel.c 	memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
skb               822 net/ipv6/ip6_tunnel.c 	__skb_tunnel_rx(skb, tunnel->dev, tunnel->net);
skb               824 net/ipv6/ip6_tunnel.c 	err = dscp_ecn_decapsulate(tunnel, ipv6h, skb);
skb               840 net/ipv6/ip6_tunnel.c 	tstats->rx_bytes += skb->len;
skb               843 net/ipv6/ip6_tunnel.c 	skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(tunnel->dev)));
skb               846 net/ipv6/ip6_tunnel.c 		skb_dst_set(skb, (struct dst_entry *)tun_dst);
skb               848 net/ipv6/ip6_tunnel.c 	gro_cells_receive(&tunnel->gro_cells, skb);
skb               854 net/ipv6/ip6_tunnel.c 	kfree_skb(skb);
skb               858 net/ipv6/ip6_tunnel.c int ip6_tnl_rcv(struct ip6_tnl *t, struct sk_buff *skb,
skb               863 net/ipv6/ip6_tunnel.c 	return __ip6_tnl_rcv(t, skb, tpi, tun_dst, ip6ip6_dscp_ecn_decapsulate,
skb               878 net/ipv6/ip6_tunnel.c static int ipxip6_rcv(struct sk_buff *skb, u8 ipproto,
skb               882 net/ipv6/ip6_tunnel.c 						  struct sk_buff *skb))
skb               885 net/ipv6/ip6_tunnel.c 	const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
skb               890 net/ipv6/ip6_tunnel.c 	t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr, &ipv6h->daddr);
skb               897 net/ipv6/ip6_tunnel.c 		if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
skb               899 net/ipv6/ip6_tunnel.c 		ipv6h = ipv6_hdr(skb);
skb               902 net/ipv6/ip6_tunnel.c 		if (iptunnel_pull_header(skb, 0, tpi->proto, false))
skb               905 net/ipv6/ip6_tunnel.c 			tun_dst = ipv6_tun_rx_dst(skb, 0, 0, 0);
skb               909 net/ipv6/ip6_tunnel.c 		ret = __ip6_tnl_rcv(t, skb, tpi, tun_dst, dscp_ecn_decapsulate,
skb               919 net/ipv6/ip6_tunnel.c 	kfree_skb(skb);
skb               923 net/ipv6/ip6_tunnel.c static int ip4ip6_rcv(struct sk_buff *skb)
skb               925 net/ipv6/ip6_tunnel.c 	return ipxip6_rcv(skb, IPPROTO_IPIP, &tpi_v4,
skb               929 net/ipv6/ip6_tunnel.c static int ip6ip6_rcv(struct sk_buff *skb)
skb               931 net/ipv6/ip6_tunnel.c 	return ipxip6_rcv(skb, IPPROTO_IPV6, &tpi_v6,
skb              1032 net/ipv6/ip6_tunnel.c int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
skb              1052 net/ipv6/ip6_tunnel.c 		hop_limit = skb_tunnel_info(skb)->key.ttl;
skb              1060 net/ipv6/ip6_tunnel.c 		if (skb->protocol == htons(ETH_P_IPV6)) {
skb              1065 net/ipv6/ip6_tunnel.c 			if (!skb_dst(skb))
skb              1068 net/ipv6/ip6_tunnel.c 			neigh = dst_neigh_lookup(skb_dst(skb),
skb              1069 net/ipv6/ip6_tunnel.c 						 &ipv6_hdr(skb)->daddr);
skb              1077 net/ipv6/ip6_tunnel.c 				addr6 = &ipv6_hdr(skb)->daddr;
skb              1132 net/ipv6/ip6_tunnel.c 	mtu = max(mtu, skb->protocol == htons(ETH_P_IPV6) ?
skb              1135 net/ipv6/ip6_tunnel.c 	skb_dst_update_pmtu_no_confirm(skb, mtu);
skb              1136 net/ipv6/ip6_tunnel.c 	if (skb->len - t->tun_hlen - eth_hlen > mtu && !skb_is_gso(skb)) {
skb              1147 net/ipv6/ip6_tunnel.c 			dst_link_failure(skb);
skb              1153 net/ipv6/ip6_tunnel.c 	skb_scrub_packet(skb, !net_eq(t->net, dev_net(dev)));
skb              1160 net/ipv6/ip6_tunnel.c 	if (skb_headroom(skb) < max_headroom || skb_shared(skb) ||
skb              1161 net/ipv6/ip6_tunnel.c 	    (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
skb              1164 net/ipv6/ip6_tunnel.c 		new_skb = skb_realloc_headroom(skb, max_headroom);
skb              1168 net/ipv6/ip6_tunnel.c 		if (skb->sk)
skb              1169 net/ipv6/ip6_tunnel.c 			skb_set_owner_w(new_skb, skb->sk);
skb              1170 net/ipv6/ip6_tunnel.c 		consume_skb(skb);
skb              1171 net/ipv6/ip6_tunnel.c 		skb = new_skb;
skb              1181 net/ipv6/ip6_tunnel.c 	skb_dst_set(skb, dst);
skb              1184 net/ipv6/ip6_tunnel.c 		if (skb->protocol == htons(ETH_P_IP))
skb              1185 net/ipv6/ip6_tunnel.c 			hop_limit = ip_hdr(skb)->ttl;
skb              1186 net/ipv6/ip6_tunnel.c 		else if (skb->protocol == htons(ETH_P_IPV6))
skb              1187 net/ipv6/ip6_tunnel.c 			hop_limit = ipv6_hdr(skb)->hop_limit;
skb              1200 net/ipv6/ip6_tunnel.c 	err = ip6_tnl_encap(skb, t, &proto, fl6);
skb              1206 net/ipv6/ip6_tunnel.c 		ipv6_push_frag_opts(skb, &opt.ops, &proto);
skb              1209 net/ipv6/ip6_tunnel.c 	skb_push(skb, sizeof(struct ipv6hdr));
skb              1210 net/ipv6/ip6_tunnel.c 	skb_reset_network_header(skb);
skb              1211 net/ipv6/ip6_tunnel.c 	ipv6h = ipv6_hdr(skb);
skb              1213 net/ipv6/ip6_tunnel.c 		     ip6_make_flowlabel(net, skb, fl6->flowlabel, true, fl6));
skb              1218 net/ipv6/ip6_tunnel.c 	ip6tunnel_xmit(NULL, skb, dev);
skb              1222 net/ipv6/ip6_tunnel.c 	dst_link_failure(skb);
skb              1230 net/ipv6/ip6_tunnel.c ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
skb              1241 net/ipv6/ip6_tunnel.c 	iph = ip_hdr(skb);
skb              1242 net/ipv6/ip6_tunnel.c 	memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
skb              1252 net/ipv6/ip6_tunnel.c 		tun_info = skb_tunnel_info(skb);
skb              1275 net/ipv6/ip6_tunnel.c 			fl6.flowi6_mark = skb->mark;
skb              1283 net/ipv6/ip6_tunnel.c 	if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
skb              1286 net/ipv6/ip6_tunnel.c 	skb_set_inner_ipproto(skb, IPPROTO_IPIP);
skb              1288 net/ipv6/ip6_tunnel.c 	err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
skb              1293 net/ipv6/ip6_tunnel.c 			icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
skb              1302 net/ipv6/ip6_tunnel.c ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
skb              1314 net/ipv6/ip6_tunnel.c 	ipv6h = ipv6_hdr(skb);
skb              1324 net/ipv6/ip6_tunnel.c 		tun_info = skb_tunnel_info(skb);
skb              1336 net/ipv6/ip6_tunnel.c 		offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
skb              1338 net/ipv6/ip6_tunnel.c 		ipv6h = ipv6_hdr(skb);
skb              1342 net/ipv6/ip6_tunnel.c 			tel = (void *)&skb_network_header(skb)[offset];
skb              1344 net/ipv6/ip6_tunnel.c 				icmpv6_send(skb, ICMPV6_PARAMPROB,
skb              1363 net/ipv6/ip6_tunnel.c 			fl6.flowi6_mark = skb->mark;
skb              1371 net/ipv6/ip6_tunnel.c 	if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
skb              1374 net/ipv6/ip6_tunnel.c 	skb_set_inner_ipproto(skb, IPPROTO_IPV6);
skb              1376 net/ipv6/ip6_tunnel.c 	err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
skb              1380 net/ipv6/ip6_tunnel.c 			icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
skb              1388 net/ipv6/ip6_tunnel.c ip6_tnl_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb              1394 net/ipv6/ip6_tunnel.c 	if (!pskb_inet_may_pull(skb))
skb              1397 net/ipv6/ip6_tunnel.c 	switch (skb->protocol) {
skb              1399 net/ipv6/ip6_tunnel.c 		ret = ip4ip6_tnl_xmit(skb, dev);
skb              1402 net/ipv6/ip6_tunnel.c 		ret = ip6ip6_tnl_xmit(skb, dev);
skb              1416 net/ipv6/ip6_tunnel.c 	kfree_skb(skb);
skb              2106 net/ipv6/ip6_tunnel.c static int ip6_tnl_fill_info(struct sk_buff *skb, const struct net_device *dev)
skb              2111 net/ipv6/ip6_tunnel.c 	if (nla_put_u32(skb, IFLA_IPTUN_LINK, parm->link) ||
skb              2112 net/ipv6/ip6_tunnel.c 	    nla_put_in6_addr(skb, IFLA_IPTUN_LOCAL, &parm->laddr) ||
skb              2113 net/ipv6/ip6_tunnel.c 	    nla_put_in6_addr(skb, IFLA_IPTUN_REMOTE, &parm->raddr) ||
skb              2114 net/ipv6/ip6_tunnel.c 	    nla_put_u8(skb, IFLA_IPTUN_TTL, parm->hop_limit) ||
skb              2115 net/ipv6/ip6_tunnel.c 	    nla_put_u8(skb, IFLA_IPTUN_ENCAP_LIMIT, parm->encap_limit) ||
skb              2116 net/ipv6/ip6_tunnel.c 	    nla_put_be32(skb, IFLA_IPTUN_FLOWINFO, parm->flowinfo) ||
skb              2117 net/ipv6/ip6_tunnel.c 	    nla_put_u32(skb, IFLA_IPTUN_FLAGS, parm->flags) ||
skb              2118 net/ipv6/ip6_tunnel.c 	    nla_put_u8(skb, IFLA_IPTUN_PROTO, parm->proto) ||
skb              2119 net/ipv6/ip6_tunnel.c 	    nla_put_u32(skb, IFLA_IPTUN_FWMARK, parm->fwmark))
skb              2122 net/ipv6/ip6_tunnel.c 	if (nla_put_u16(skb, IFLA_IPTUN_ENCAP_TYPE, tunnel->encap.type) ||
skb              2123 net/ipv6/ip6_tunnel.c 	    nla_put_be16(skb, IFLA_IPTUN_ENCAP_SPORT, tunnel->encap.sport) ||
skb              2124 net/ipv6/ip6_tunnel.c 	    nla_put_be16(skb, IFLA_IPTUN_ENCAP_DPORT, tunnel->encap.dport) ||
skb              2125 net/ipv6/ip6_tunnel.c 	    nla_put_u16(skb, IFLA_IPTUN_ENCAP_FLAGS, tunnel->encap.flags))
skb              2129 net/ipv6/ip6_tunnel.c 		if (nla_put_flag(skb, IFLA_IPTUN_COLLECT_METADATA))
skb                82 net/ipv6/ip6_udp_tunnel.c 			 struct sk_buff *skb,
skb                91 net/ipv6/ip6_udp_tunnel.c 	__skb_push(skb, sizeof(*uh));
skb                92 net/ipv6/ip6_udp_tunnel.c 	skb_reset_transport_header(skb);
skb                93 net/ipv6/ip6_udp_tunnel.c 	uh = udp_hdr(skb);
skb                98 net/ipv6/ip6_udp_tunnel.c 	uh->len = htons(skb->len);
skb               100 net/ipv6/ip6_udp_tunnel.c 	skb_dst_set(skb, dst);
skb               102 net/ipv6/ip6_udp_tunnel.c 	udp6_set_csum(nocheck, skb, saddr, daddr, skb->len);
skb               104 net/ipv6/ip6_udp_tunnel.c 	__skb_push(skb, sizeof(*ip6h));
skb               105 net/ipv6/ip6_udp_tunnel.c 	skb_reset_network_header(skb);
skb               106 net/ipv6/ip6_udp_tunnel.c 	ip6h		  = ipv6_hdr(skb);
skb               108 net/ipv6/ip6_udp_tunnel.c 	ip6h->payload_len = htons(skb->len);
skb               114 net/ipv6/ip6_udp_tunnel.c 	ip6tunnel_xmit(sk, skb, dev);
skb               299 net/ipv6/ip6_vti.c static int vti6_rcv(struct sk_buff *skb)
skb               302 net/ipv6/ip6_vti.c 	const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
skb               305 net/ipv6/ip6_vti.c 	t = vti6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr, &ipv6h->daddr);
skb               312 net/ipv6/ip6_vti.c 		if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
skb               317 net/ipv6/ip6_vti.c 		ipv6h = ipv6_hdr(skb);
skb               326 net/ipv6/ip6_vti.c 		return xfrm6_rcv_tnl(skb, t);
skb               331 net/ipv6/ip6_vti.c 	kfree_skb(skb);
skb               335 net/ipv6/ip6_vti.c static int vti6_rcv_cb(struct sk_buff *skb, int err)
skb               342 net/ipv6/ip6_vti.c 	struct ip6_tnl *t = XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6;
skb               343 net/ipv6/ip6_vti.c 	u32 orig_mark = skb->mark;
skb               358 net/ipv6/ip6_vti.c 	x = xfrm_input_state(skb);
skb               363 net/ipv6/ip6_vti.c 		inner_mode = xfrm_ip2inner_mode(x, XFRM_MODE_SKB_CB(skb)->protocol);
skb               365 net/ipv6/ip6_vti.c 			XFRM_INC_STATS(dev_net(skb->dev),
skb               373 net/ipv6/ip6_vti.c 	skb->mark = be32_to_cpu(t->parms.i_key);
skb               374 net/ipv6/ip6_vti.c 	ret = xfrm_policy_check(NULL, XFRM_POLICY_IN, skb, family);
skb               375 net/ipv6/ip6_vti.c 	skb->mark = orig_mark;
skb               380 net/ipv6/ip6_vti.c 	skb_scrub_packet(skb, !net_eq(t->net, dev_net(skb->dev)));
skb               381 net/ipv6/ip6_vti.c 	skb->dev = dev;
skb               386 net/ipv6/ip6_vti.c 	tstats->rx_bytes += skb->len;
skb               441 net/ipv6/ip6_vti.c vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
skb               445 net/ipv6/ip6_vti.c 	struct dst_entry *dst = skb_dst(skb);
skb               448 net/ipv6/ip6_vti.c 	int pkt_len = skb->len;
skb               453 net/ipv6/ip6_vti.c 		switch (skb->protocol) {
skb               463 net/ipv6/ip6_vti.c 			skb_dst_set(skb, dst);
skb               475 net/ipv6/ip6_vti.c 			skb_dst_set(skb, dst);
skb               508 net/ipv6/ip6_vti.c 	if (skb->len > mtu) {
skb               509 net/ipv6/ip6_vti.c 		skb_dst_update_pmtu_no_confirm(skb, mtu);
skb               511 net/ipv6/ip6_vti.c 		if (skb->protocol == htons(ETH_P_IPV6)) {
skb               515 net/ipv6/ip6_vti.c 			icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
skb               517 net/ipv6/ip6_vti.c 			icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
skb               525 net/ipv6/ip6_vti.c 	skb_scrub_packet(skb, !net_eq(t->net, dev_net(dev)));
skb               526 net/ipv6/ip6_vti.c 	skb_dst_set(skb, dst);
skb               527 net/ipv6/ip6_vti.c 	skb->dev = skb_dst(skb)->dev;
skb               529 net/ipv6/ip6_vti.c 	err = dst_output(t->net, skb->sk, skb);
skb               537 net/ipv6/ip6_vti.c 	dst_link_failure(skb);
skb               544 net/ipv6/ip6_vti.c vti6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
skb               551 net/ipv6/ip6_vti.c 	if (!pskb_inet_may_pull(skb))
skb               556 net/ipv6/ip6_vti.c 	switch (skb->protocol) {
skb               559 net/ipv6/ip6_vti.c 		    vti6_addr_conflict(t, ipv6_hdr(skb)))
skb               562 net/ipv6/ip6_vti.c 		xfrm_decode_session(skb, &fl, AF_INET6);
skb               563 net/ipv6/ip6_vti.c 		memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
skb               566 net/ipv6/ip6_vti.c 		xfrm_decode_session(skb, &fl, AF_INET);
skb               567 net/ipv6/ip6_vti.c 		memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
skb               576 net/ipv6/ip6_vti.c 	ret = vti6_xmit(skb, dev, &fl);
skb               585 net/ipv6/ip6_vti.c 	kfree_skb(skb);
skb               589 net/ipv6/ip6_vti.c static int vti6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
skb               599 net/ipv6/ip6_vti.c 	struct net *net = dev_net(skb->dev);
skb               600 net/ipv6/ip6_vti.c 	const struct ipv6hdr *iph = (const struct ipv6hdr *)skb->data;
skb               603 net/ipv6/ip6_vti.c 	t = vti6_tnl_lookup(dev_net(skb->dev), &iph->daddr, &iph->saddr);
skb               611 net/ipv6/ip6_vti.c 		esph = (struct ip_esp_hdr *)(skb->data + offset);
skb               615 net/ipv6/ip6_vti.c 		ah = (struct ip_auth_hdr *)(skb->data + offset);
skb               619 net/ipv6/ip6_vti.c 		ipch = (struct ip_comp_hdr *)(skb->data + offset);
skb               636 net/ipv6/ip6_vti.c 		ip6_redirect(skb, net, skb->dev->ifindex, 0,
skb               639 net/ipv6/ip6_vti.c 		ip6_update_pmtu(skb, net, info, 0, 0, sock_net_uid(net, NULL));
skb              1064 net/ipv6/ip6_vti.c static int vti6_fill_info(struct sk_buff *skb, const struct net_device *dev)
skb              1069 net/ipv6/ip6_vti.c 	if (nla_put_u32(skb, IFLA_VTI_LINK, parm->link) ||
skb              1070 net/ipv6/ip6_vti.c 	    nla_put_in6_addr(skb, IFLA_VTI_LOCAL, &parm->laddr) ||
skb              1071 net/ipv6/ip6_vti.c 	    nla_put_in6_addr(skb, IFLA_VTI_REMOTE, &parm->raddr) ||
skb              1072 net/ipv6/ip6_vti.c 	    nla_put_be32(skb, IFLA_VTI_IKEY, parm->i_key) ||
skb              1073 net/ipv6/ip6_vti.c 	    nla_put_be32(skb, IFLA_VTI_OKEY, parm->o_key) ||
skb              1074 net/ipv6/ip6_vti.c 	    nla_put_u32(skb, IFLA_VTI_FWMARK, parm->fwmark))
skb                86 net/ipv6/ip6mr.c 			   struct net_device *dev, struct sk_buff *skb,
skb                93 net/ipv6/ip6mr.c static int ip6mr_rtm_dumproute(struct sk_buff *skb,
skb               187 net/ipv6/ip6mr.c static int ip6mr_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
skb               200 net/ipv6/ip6mr.c static int ip6mr_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
skb               517 net/ipv6/ip6mr.c static int pim6_rcv(struct sk_buff *skb)
skb               522 net/ipv6/ip6mr.c 	struct net *net = dev_net(skb->dev);
skb               525 net/ipv6/ip6mr.c 		.flowi6_iif	= skb->dev->ifindex,
skb               526 net/ipv6/ip6mr.c 		.flowi6_mark	= skb->mark,
skb               530 net/ipv6/ip6mr.c 	if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap)))
skb               533 net/ipv6/ip6mr.c 	pim = (struct pimreghdr *)skb_transport_header(skb);
skb               536 net/ipv6/ip6mr.c 	    (csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
skb               539 net/ipv6/ip6mr.c 	     csum_fold(skb_checksum(skb, 0, skb->len, 0))))
skb               543 net/ipv6/ip6mr.c 	encap = (struct ipv6hdr *)(skb_transport_header(skb) +
skb               548 net/ipv6/ip6mr.c 	    ntohs(encap->payload_len) + sizeof(*pim) > skb->len)
skb               565 net/ipv6/ip6mr.c 	skb->mac_header = skb->network_header;
skb               566 net/ipv6/ip6mr.c 	skb_pull(skb, (u8 *)encap - skb->data);
skb               567 net/ipv6/ip6mr.c 	skb_reset_network_header(skb);
skb               568 net/ipv6/ip6mr.c 	skb->protocol = htons(ETH_P_IPV6);
skb               569 net/ipv6/ip6mr.c 	skb->ip_summed = CHECKSUM_NONE;
skb               571 net/ipv6/ip6mr.c 	skb_tunnel_rx(skb, reg_dev, dev_net(reg_dev));
skb               573 net/ipv6/ip6mr.c 	netif_rx(skb);
skb               578 net/ipv6/ip6mr.c 	kfree_skb(skb);
skb               588 net/ipv6/ip6mr.c static netdev_tx_t reg_vif_xmit(struct sk_buff *skb,
skb               595 net/ipv6/ip6mr.c 		.flowi6_iif	= skb->skb_iif ? : LOOPBACK_IFINDEX,
skb               596 net/ipv6/ip6mr.c 		.flowi6_mark	= skb->mark,
skb               599 net/ipv6/ip6mr.c 	if (!pskb_inet_may_pull(skb))
skb               606 net/ipv6/ip6mr.c 	dev->stats.tx_bytes += skb->len;
skb               608 net/ipv6/ip6mr.c 	ip6mr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, MRT6MSG_WHOLEPKT);
skb               610 net/ipv6/ip6mr.c 	kfree_skb(skb);
skb               615 net/ipv6/ip6mr.c 	kfree_skb(skb);
skb               769 net/ipv6/ip6mr.c 	struct sk_buff *skb;
skb               773 net/ipv6/ip6mr.c 	while ((skb = skb_dequeue(&c->_c.mfc_un.unres.unresolved)) != NULL) {
skb               774 net/ipv6/ip6mr.c 		if (ipv6_hdr(skb)->version == 0) {
skb               775 net/ipv6/ip6mr.c 			struct nlmsghdr *nlh = skb_pull(skb,
skb               779 net/ipv6/ip6mr.c 			skb_trim(skb, nlh->nlmsg_len);
skb               781 net/ipv6/ip6mr.c 			rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
skb               783 net/ipv6/ip6mr.c 			kfree_skb(skb);
skb              1003 net/ipv6/ip6mr.c 	struct sk_buff *skb;
skb              1009 net/ipv6/ip6mr.c 	while ((skb = __skb_dequeue(&uc->_c.mfc_un.unres.unresolved))) {
skb              1010 net/ipv6/ip6mr.c 		if (ipv6_hdr(skb)->version == 0) {
skb              1011 net/ipv6/ip6mr.c 			struct nlmsghdr *nlh = skb_pull(skb,
skb              1014 net/ipv6/ip6mr.c 			if (mr_fill_mroute(mrt, skb, &c->_c,
skb              1016 net/ipv6/ip6mr.c 				nlh->nlmsg_len = skb_tail_pointer(skb) - (u8 *)nlh;
skb              1020 net/ipv6/ip6mr.c 				skb_trim(skb, nlh->nlmsg_len);
skb              1023 net/ipv6/ip6mr.c 			rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
skb              1025 net/ipv6/ip6mr.c 			ip6_mr_forward(net, mrt, skb->dev, skb, c);
skb              1039 net/ipv6/ip6mr.c 	struct sk_buff *skb;
skb              1045 net/ipv6/ip6mr.c 		skb = skb_realloc_headroom(pkt, -skb_network_offset(pkt)
skb              1049 net/ipv6/ip6mr.c 		skb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(*msg), GFP_ATOMIC);
skb              1051 net/ipv6/ip6mr.c 	if (!skb)
skb              1057 net/ipv6/ip6mr.c 	skb->ip_summed = CHECKSUM_UNNECESSARY;
skb              1066 net/ipv6/ip6mr.c 		skb_push(skb, -skb_network_offset(pkt));
skb              1068 net/ipv6/ip6mr.c 		skb_push(skb, sizeof(*msg));
skb              1069 net/ipv6/ip6mr.c 		skb_reset_transport_header(skb);
skb              1070 net/ipv6/ip6mr.c 		msg = (struct mrt6msg *)skb_transport_header(skb);
skb              1078 net/ipv6/ip6mr.c 		skb->ip_summed = CHECKSUM_UNNECESSARY;
skb              1086 net/ipv6/ip6mr.c 	skb_put(skb, sizeof(struct ipv6hdr));
skb              1087 net/ipv6/ip6mr.c 	skb_reset_network_header(skb);
skb              1088 net/ipv6/ip6mr.c 	skb_copy_to_linear_data(skb, ipv6_hdr(pkt), sizeof(struct ipv6hdr));
skb              1093 net/ipv6/ip6mr.c 	skb_put(skb, sizeof(*msg));
skb              1094 net/ipv6/ip6mr.c 	skb_reset_transport_header(skb);
skb              1095 net/ipv6/ip6mr.c 	msg = (struct mrt6msg *)skb_transport_header(skb);
skb              1104 net/ipv6/ip6mr.c 	skb_dst_set(skb, dst_clone(skb_dst(pkt)));
skb              1105 net/ipv6/ip6mr.c 	skb->ip_summed = CHECKSUM_UNNECESSARY;
skb              1112 net/ipv6/ip6mr.c 		kfree_skb(skb);
skb              1116 net/ipv6/ip6mr.c 	mrt6msg_netlink_event(mrt, skb);
skb              1119 net/ipv6/ip6mr.c 	ret = sock_queue_rcv_skb(mroute6_sk, skb);
skb              1123 net/ipv6/ip6mr.c 		kfree_skb(skb);
skb              1131 net/ipv6/ip6mr.c 				  struct sk_buff *skb, struct net_device *dev)
skb              1139 net/ipv6/ip6mr.c 		if (ipv6_addr_equal(&c->mf6c_mcastgrp, &ipv6_hdr(skb)->daddr) &&
skb              1140 net/ipv6/ip6mr.c 		    ipv6_addr_equal(&c->mf6c_origin, &ipv6_hdr(skb)->saddr)) {
skb              1155 net/ipv6/ip6mr.c 			kfree_skb(skb);
skb              1161 net/ipv6/ip6mr.c 		c->mf6c_origin = ipv6_hdr(skb)->saddr;
skb              1162 net/ipv6/ip6mr.c 		c->mf6c_mcastgrp = ipv6_hdr(skb)->daddr;
skb              1167 net/ipv6/ip6mr.c 		err = ip6mr_cache_report(mrt, skb, mifi, MRT6MSG_NOCACHE);
skb              1175 net/ipv6/ip6mr.c 			kfree_skb(skb);
skb              1188 net/ipv6/ip6mr.c 		kfree_skb(skb);
skb              1192 net/ipv6/ip6mr.c 			skb->dev = dev;
skb              1193 net/ipv6/ip6mr.c 			skb->skb_iif = dev->ifindex;
skb              1195 net/ipv6/ip6mr.c 		skb_queue_tail(&c->_c.mfc_un.unres.unresolved, skb);
skb              1604 net/ipv6/ip6mr.c bool mroute6_is_socket(struct net *net, struct sk_buff *skb)
skb              1608 net/ipv6/ip6mr.c 		.flowi6_iif	= skb->skb_iif ? : LOOPBACK_IFINDEX,
skb              1609 net/ipv6/ip6mr.c 		.flowi6_oif	= skb->dev->ifindex,
skb              1610 net/ipv6/ip6mr.c 		.flowi6_mark	= skb->mark,
skb              1983 net/ipv6/ip6mr.c static inline int ip6mr_forward2_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
skb              1985 net/ipv6/ip6mr.c 	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
skb              1987 net/ipv6/ip6mr.c 	IP6_ADD_STATS(net, ip6_dst_idev(skb_dst(skb)),
skb              1988 net/ipv6/ip6mr.c 		      IPSTATS_MIB_OUTOCTETS, skb->len);
skb              1989 net/ipv6/ip6mr.c 	return dst_output(net, sk, skb);
skb              1997 net/ipv6/ip6mr.c 			  struct sk_buff *skb, int vifi)
skb              2011 net/ipv6/ip6mr.c 		vif->bytes_out += skb->len;
skb              2012 net/ipv6/ip6mr.c 		vif->dev->stats.tx_bytes += skb->len;
skb              2014 net/ipv6/ip6mr.c 		ip6mr_cache_report(mrt, skb, vifi, MRT6MSG_WHOLEPKT);
skb              2019 net/ipv6/ip6mr.c 	ipv6h = ipv6_hdr(skb);
skb              2032 net/ipv6/ip6mr.c 	skb_dst_drop(skb);
skb              2033 net/ipv6/ip6mr.c 	skb_dst_set(skb, dst);
skb              2047 net/ipv6/ip6mr.c 	skb->dev = dev;
skb              2049 net/ipv6/ip6mr.c 	vif->bytes_out += skb->len;
skb              2053 net/ipv6/ip6mr.c 	if (skb_cow(skb, sizeof(*ipv6h) + LL_RESERVED_SPACE(dev)))
skb              2056 net/ipv6/ip6mr.c 	ipv6h = ipv6_hdr(skb);
skb              2059 net/ipv6/ip6mr.c 	IP6CB(skb)->flags |= IP6SKB_FORWARDED;
skb              2062 net/ipv6/ip6mr.c 		       net, NULL, skb, skb->dev, dev,
skb              2066 net/ipv6/ip6mr.c 	kfree_skb(skb);
skb              2082 net/ipv6/ip6mr.c 			   struct net_device *dev, struct sk_buff *skb,
skb              2091 net/ipv6/ip6mr.c 	c->_c.mfc_un.res.bytes += skb->len;
skb              2128 net/ipv6/ip6mr.c 			ip6mr_cache_report(mrt, skb, true_vifi, MRT6MSG_WRONGMIF);
skb              2135 net/ipv6/ip6mr.c 	mrt->vif_table[vif].bytes_in += skb->len;
skb              2144 net/ipv6/ip6mr.c 		    ipv6_hdr(skb)->hop_limit >
skb              2159 net/ipv6/ip6mr.c 		    ipv6_hdr(skb)->hop_limit > c->_c.mfc_un.res.ttls[ct]) {
skb              2161 net/ipv6/ip6mr.c 				struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
skb              2170 net/ipv6/ip6mr.c 		ip6mr_forward2(net, mrt, skb, psend);
skb              2175 net/ipv6/ip6mr.c 	kfree_skb(skb);
skb              2183 net/ipv6/ip6mr.c int ip6_mr_input(struct sk_buff *skb)
skb              2186 net/ipv6/ip6mr.c 	struct net *net = dev_net(skb->dev);
skb              2189 net/ipv6/ip6mr.c 		.flowi6_iif	= skb->dev->ifindex,
skb              2190 net/ipv6/ip6mr.c 		.flowi6_mark	= skb->mark,
skb              2198 net/ipv6/ip6mr.c 	dev = skb->dev;
skb              2199 net/ipv6/ip6mr.c 	if (netif_is_l3_master(skb->dev)) {
skb              2200 net/ipv6/ip6mr.c 		dev = dev_get_by_index_rcu(net, IPCB(skb)->iif);
skb              2202 net/ipv6/ip6mr.c 			kfree_skb(skb);
skb              2209 net/ipv6/ip6mr.c 		kfree_skb(skb);
skb              2215 net/ipv6/ip6mr.c 				 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr);
skb              2221 net/ipv6/ip6mr.c 						     &ipv6_hdr(skb)->daddr,
skb              2233 net/ipv6/ip6mr.c 			int err = ip6mr_cache_unresolved(mrt, vif, skb, dev);
skb              2239 net/ipv6/ip6mr.c 		kfree_skb(skb);
skb              2243 net/ipv6/ip6mr.c 	ip6_mr_forward(net, mrt, dev, skb, cache);
skb              2250 net/ipv6/ip6mr.c int ip6mr_get_route(struct net *net, struct sk_buff *skb, struct rtmsg *rtm,
skb              2256 net/ipv6/ip6mr.c 	struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
skb              2264 net/ipv6/ip6mr.c 	if (!cache && skb->dev) {
skb              2265 net/ipv6/ip6mr.c 		int vif = ip6mr_find_vif(mrt, skb->dev);
skb              2278 net/ipv6/ip6mr.c 		dev = skb->dev;
skb              2315 net/ipv6/ip6mr.c 	err = mr_fill_mroute(mrt, skb, &cache->_c, rtm);
skb              2320 net/ipv6/ip6mr.c static int ip6mr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
skb              2328 net/ipv6/ip6mr.c 	nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags);
skb              2338 net/ipv6/ip6mr.c 	if (nla_put_u32(skb, RTA_TABLE, mrt->id))
skb              2348 net/ipv6/ip6mr.c 	if (nla_put_in6_addr(skb, RTA_SRC, &c->mf6c_origin) ||
skb              2349 net/ipv6/ip6mr.c 	    nla_put_in6_addr(skb, RTA_DST, &c->mf6c_mcastgrp))
skb              2351 net/ipv6/ip6mr.c 	err = mr_fill_mroute(mrt, skb, &c->_c, rtm);
skb              2356 net/ipv6/ip6mr.c 	nlmsg_end(skb, nlh);
skb              2360 net/ipv6/ip6mr.c 	nlmsg_cancel(skb, nlh);
skb              2364 net/ipv6/ip6mr.c static int _ip6mr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
skb              2368 net/ipv6/ip6mr.c 	return ip6mr_fill_mroute(mrt, skb, portid, seq, (struct mfc6_cache *)c,
skb              2397 net/ipv6/ip6mr.c 	struct sk_buff *skb;
skb              2400 net/ipv6/ip6mr.c 	skb = nlmsg_new(mr6_msgsize(mfc->_c.mfc_parent >= MAXMIFS, mrt->maxvif),
skb              2402 net/ipv6/ip6mr.c 	if (!skb)
skb              2405 net/ipv6/ip6mr.c 	err = ip6mr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0);
skb              2409 net/ipv6/ip6mr.c 	rtnl_notify(skb, net, 0, RTNLGRP_IPV6_MROUTE, NULL, GFP_ATOMIC);
skb              2413 net/ipv6/ip6mr.c 	kfree_skb(skb);
skb              2441 net/ipv6/ip6mr.c 	struct sk_buff *skb;
skb              2448 net/ipv6/ip6mr.c 	skb = nlmsg_new(mrt6msg_netlink_msgsize(payloadlen), GFP_ATOMIC);
skb              2449 net/ipv6/ip6mr.c 	if (!skb)
skb              2452 net/ipv6/ip6mr.c 	nlh = nlmsg_put(skb, 0, 0, RTM_NEWCACHEREPORT,
skb              2458 net/ipv6/ip6mr.c 	if (nla_put_u8(skb, IP6MRA_CREPORT_MSGTYPE, msg->im6_msgtype) ||
skb              2459 net/ipv6/ip6mr.c 	    nla_put_u32(skb, IP6MRA_CREPORT_MIF_ID, msg->im6_mif) ||
skb              2460 net/ipv6/ip6mr.c 	    nla_put_in6_addr(skb, IP6MRA_CREPORT_SRC_ADDR,
skb              2462 net/ipv6/ip6mr.c 	    nla_put_in6_addr(skb, IP6MRA_CREPORT_DST_ADDR,
skb              2466 net/ipv6/ip6mr.c 	nla = nla_reserve(skb, IP6MRA_CREPORT_PKT, payloadlen);
skb              2471 net/ipv6/ip6mr.c 	nlmsg_end(skb, nlh);
skb              2473 net/ipv6/ip6mr.c 	rtnl_notify(skb, net, 0, RTNLGRP_IPV6_MROUTE_R, NULL, GFP_ATOMIC);
skb              2477 net/ipv6/ip6mr.c 	nlmsg_cancel(skb, nlh);
skb              2479 net/ipv6/ip6mr.c 	kfree_skb(skb);
skb              2483 net/ipv6/ip6mr.c static int ip6mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
skb              2490 net/ipv6/ip6mr.c 		err = ip_valid_fib_dump_req(sock_net(skb->sk), nlh,
skb              2499 net/ipv6/ip6mr.c 		mrt = ip6mr_get_table(sock_net(skb->sk), filter.table_id);
skb              2502 net/ipv6/ip6mr.c 				return skb->len;
skb              2507 net/ipv6/ip6mr.c 		err = mr_table_dump(mrt, skb, cb, _ip6mr_fill_mroute,
skb              2509 net/ipv6/ip6mr.c 		return skb->len ? : err;
skb              2512 net/ipv6/ip6mr.c 	return mr_rtm_dumproute(skb, cb, ip6mr_mr_table_iter,
skb                44 net/ipv6/ipcomp6.c static int ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
skb                47 net/ipv6/ipcomp6.c 	struct net *net = dev_net(skb->dev);
skb                49 net/ipv6/ipcomp6.c 	const struct ipv6hdr *iph = (const struct ipv6hdr *)skb->data;
skb                51 net/ipv6/ipcomp6.c 		(struct ip_comp_hdr *)(skb->data + offset);
skb                59 net/ipv6/ipcomp6.c 	x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
skb                65 net/ipv6/ipcomp6.c 		ip6_redirect(skb, net, skb->dev->ifindex, 0,
skb                68 net/ipv6/ipcomp6.c 		ip6_update_pmtu(skb, net, info, 0, 0, sock_net_uid(net, NULL));
skb               168 net/ipv6/ipcomp6.c static int ipcomp6_rcv_cb(struct sk_buff *skb, int err)
skb              1071 net/ipv6/ipv6_sockglue.c 		struct sk_buff *skb;
skb              1081 net/ipv6/ipv6_sockglue.c 		skb = np->pktoptions;
skb              1082 net/ipv6/ipv6_sockglue.c 		if (skb)
skb              1083 net/ipv6/ipv6_sockglue.c 			ip6_datagram_recv_ctl(sk, &msg, skb);
skb              1085 net/ipv6/ipv6_sockglue.c 		if (!skb) {
skb              1336 net/ipv6/mcast.c int igmp6_event_query(struct sk_buff *skb)
skb              1348 net/ipv6/mcast.c 	if (!pskb_may_pull(skb, sizeof(struct in6_addr)))
skb              1352 net/ipv6/mcast.c 	len = ntohs(ipv6_hdr(skb)->payload_len) + sizeof(struct ipv6hdr);
skb              1353 net/ipv6/mcast.c 	len -= skb_network_header_len(skb);
skb              1362 net/ipv6/mcast.c 	if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL) ||
skb              1363 net/ipv6/mcast.c 	    ipv6_hdr(skb)->hop_limit != 1 ||
skb              1364 net/ipv6/mcast.c 	    !(IP6CB(skb)->flags & IP6SKB_ROUTERALERT) ||
skb              1365 net/ipv6/mcast.c 	    IP6CB(skb)->ra != htons(IPV6_OPT_ROUTERALERT_MLD))
skb              1368 net/ipv6/mcast.c 	idev = __in6_dev_get(skb->dev);
skb              1372 net/ipv6/mcast.c 	mld = (struct mld_msg *)icmp6_hdr(skb);
skb              1391 net/ipv6/mcast.c 		if (!pskb_may_pull(skb, srcs_offset))
skb              1394 net/ipv6/mcast.c 		mlh2 = (struct mld2_query *)skb_transport_header(skb);
skb              1409 net/ipv6/mcast.c 			if (!pskb_may_pull(skb, srcs_offset +
skb              1413 net/ipv6/mcast.c 			mlh2 = (struct mld2_query *)skb_transport_header(skb);
skb              1456 net/ipv6/mcast.c int igmp6_event_report(struct sk_buff *skb)
skb              1464 net/ipv6/mcast.c 	if (skb->pkt_type == PACKET_LOOPBACK)
skb              1468 net/ipv6/mcast.c 	if (skb->pkt_type != PACKET_MULTICAST &&
skb              1469 net/ipv6/mcast.c 	    skb->pkt_type != PACKET_BROADCAST)
skb              1472 net/ipv6/mcast.c 	if (!pskb_may_pull(skb, sizeof(*mld) - sizeof(struct icmp6hdr)))
skb              1475 net/ipv6/mcast.c 	mld = (struct mld_msg *)icmp6_hdr(skb);
skb              1478 net/ipv6/mcast.c 	addr_type = ipv6_addr_type(&ipv6_hdr(skb)->saddr);
skb              1483 net/ipv6/mcast.c 	idev = __in6_dev_get(skb->dev);
skb              1564 net/ipv6/mcast.c static void ip6_mc_hdr(struct sock *sk, struct sk_buff *skb,
skb              1572 net/ipv6/mcast.c 	skb->protocol = htons(ETH_P_IPV6);
skb              1573 net/ipv6/mcast.c 	skb->dev = dev;
skb              1575 net/ipv6/mcast.c 	skb_reset_network_header(skb);
skb              1576 net/ipv6/mcast.c 	skb_put(skb, sizeof(struct ipv6hdr));
skb              1577 net/ipv6/mcast.c 	hdr = ipv6_hdr(skb);
skb              1594 net/ipv6/mcast.c 	struct sk_buff *skb;
skb              1609 net/ipv6/mcast.c 	skb = sock_alloc_send_skb(sk, size, 1, &err);
skb              1611 net/ipv6/mcast.c 	if (!skb)
skb              1614 net/ipv6/mcast.c 	skb->priority = TC_PRIO_CONTROL;
skb              1615 net/ipv6/mcast.c 	skb_reserve(skb, hlen);
skb              1616 net/ipv6/mcast.c 	skb_tailroom_reserve(skb, mtu, tlen);
skb              1627 net/ipv6/mcast.c 	ip6_mc_hdr(sk, skb, dev, saddr, &mld2_all_mcr, NEXTHDR_HOP, 0);
skb              1629 net/ipv6/mcast.c 	skb_put_data(skb, ra, sizeof(ra));
skb              1631 net/ipv6/mcast.c 	skb_set_transport_header(skb, skb_tail_pointer(skb) - skb->data);
skb              1632 net/ipv6/mcast.c 	skb_put(skb, sizeof(*pmr));
skb              1633 net/ipv6/mcast.c 	pmr = (struct mld2_report *)skb_transport_header(skb);
skb              1639 net/ipv6/mcast.c 	return skb;
skb              1642 net/ipv6/mcast.c static void mld_sendpack(struct sk_buff *skb)
skb              1644 net/ipv6/mcast.c 	struct ipv6hdr *pip6 = ipv6_hdr(skb);
skb              1646 net/ipv6/mcast.c 			      (struct mld2_report *)skb_transport_header(skb);
skb              1649 net/ipv6/mcast.c 	struct net *net = dev_net(skb->dev);
skb              1655 net/ipv6/mcast.c 	idev = __in6_dev_get(skb->dev);
skb              1656 net/ipv6/mcast.c 	IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len);
skb              1658 net/ipv6/mcast.c 	payload_len = (skb_tail_pointer(skb) - skb_network_header(skb)) -
skb              1660 net/ipv6/mcast.c 	mldlen = skb_tail_pointer(skb) - skb_transport_header(skb);
skb              1665 net/ipv6/mcast.c 					   csum_partial(skb_transport_header(skb),
skb              1669 net/ipv6/mcast.c 			 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
skb              1670 net/ipv6/mcast.c 			 skb->dev->ifindex);
skb              1671 net/ipv6/mcast.c 	dst = icmp6_dst_alloc(skb->dev, &fl6);
skb              1678 net/ipv6/mcast.c 	skb_dst_set(skb, dst);
skb              1683 net/ipv6/mcast.c 		      net, net->ipv6.igmp_sk, skb, NULL, skb->dev,
skb              1697 net/ipv6/mcast.c 	kfree_skb(skb);
skb              1706 net/ipv6/mcast.c static struct sk_buff *add_grhead(struct sk_buff *skb, struct ifmcaddr6 *pmc,
skb              1712 net/ipv6/mcast.c 	if (!skb) {
skb              1713 net/ipv6/mcast.c 		skb = mld_newpack(pmc->idev, mtu);
skb              1714 net/ipv6/mcast.c 		if (!skb)
skb              1717 net/ipv6/mcast.c 	pgr = skb_put(skb, sizeof(struct mld2_grec));
skb              1722 net/ipv6/mcast.c 	pmr = (struct mld2_report *)skb_transport_header(skb);
skb              1725 net/ipv6/mcast.c 	return skb;
skb              1728 net/ipv6/mcast.c #define AVAILABLE(skb)	((skb) ? skb_availroom(skb) : 0)
skb              1730 net/ipv6/mcast.c static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
skb              1742 net/ipv6/mcast.c 		return skb;
skb              1746 net/ipv6/mcast.c 		return skb;
skb              1760 net/ipv6/mcast.c 	pmr = skb ? (struct mld2_report *)skb_transport_header(skb) : NULL;
skb              1765 net/ipv6/mcast.c 		    AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) {
skb              1766 net/ipv6/mcast.c 			if (skb)
skb              1767 net/ipv6/mcast.c 				mld_sendpack(skb);
skb              1768 net/ipv6/mcast.c 			skb = mld_newpack(idev, mtu);
skb              1796 net/ipv6/mcast.c 		if (AVAILABLE(skb) < sizeof(*psrc) +
skb              1802 net/ipv6/mcast.c 			if (skb)
skb              1803 net/ipv6/mcast.c 				mld_sendpack(skb);
skb              1804 net/ipv6/mcast.c 			skb = mld_newpack(idev, mtu);
skb              1809 net/ipv6/mcast.c 			skb = add_grhead(skb, pmc, type, &pgr, mtu);
skb              1812 net/ipv6/mcast.c 		if (!skb)
skb              1814 net/ipv6/mcast.c 		psrc = skb_put(skb, sizeof(*psrc));
skb              1837 net/ipv6/mcast.c 			return skb;
skb              1840 net/ipv6/mcast.c 			if (skb && AVAILABLE(skb) < sizeof(struct mld2_grec)) {
skb              1841 net/ipv6/mcast.c 				mld_sendpack(skb);
skb              1842 net/ipv6/mcast.c 				skb = NULL; /* add_grhead will get a new one */
skb              1844 net/ipv6/mcast.c 			skb = add_grhead(skb, pmc, type, &pgr, mtu);
skb              1852 net/ipv6/mcast.c 	return skb;
skb              1857 net/ipv6/mcast.c 	struct sk_buff *skb = NULL;
skb              1870 net/ipv6/mcast.c 			skb = add_grec(skb, pmc, type, 0, 0, 0);
skb              1879 net/ipv6/mcast.c 		skb = add_grec(skb, pmc, type, 0, 0, 0);
skb              1883 net/ipv6/mcast.c 	if (skb)
skb              1884 net/ipv6/mcast.c 		mld_sendpack(skb);
skb              1911 net/ipv6/mcast.c 	struct sk_buff *skb = NULL;
skb              1924 net/ipv6/mcast.c 			skb = add_grec(skb, pmc, type, 1, 0, 0);
skb              1925 net/ipv6/mcast.c 			skb = add_grec(skb, pmc, dtype, 1, 1, 0);
skb              1930 net/ipv6/mcast.c 				skb = add_grec(skb, pmc, type, 1, 0, 0);
skb              1961 net/ipv6/mcast.c 		skb = add_grec(skb, pmc, type, 0, 0, 0);
skb              1962 net/ipv6/mcast.c 		skb = add_grec(skb, pmc, dtype, 0, 1, 0);	/* deleted sources */
skb              1970 net/ipv6/mcast.c 			skb = add_grec(skb, pmc, type, 0, 0, 0);
skb              1976 net/ipv6/mcast.c 	if (!skb)
skb              1978 net/ipv6/mcast.c 	(void) mld_sendpack(skb);
skb              1986 net/ipv6/mcast.c 	struct sk_buff *skb;
skb              2013 net/ipv6/mcast.c 	skb = sock_alloc_send_skb(sk, hlen + tlen + full_len, 1, &err);
skb              2015 net/ipv6/mcast.c 	if (!skb) {
skb              2022 net/ipv6/mcast.c 	skb->priority = TC_PRIO_CONTROL;
skb              2023 net/ipv6/mcast.c 	skb_reserve(skb, hlen);
skb              2034 net/ipv6/mcast.c 	ip6_mc_hdr(sk, skb, dev, saddr, snd_addr, NEXTHDR_HOP, payload_len);
skb              2036 net/ipv6/mcast.c 	skb_put_data(skb, ra, sizeof(ra));
skb              2038 net/ipv6/mcast.c 	hdr = skb_put_zero(skb, sizeof(struct mld_msg));
skb              2047 net/ipv6/mcast.c 	idev = __in6_dev_get(skb->dev);
skb              2050 net/ipv6/mcast.c 			 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
skb              2051 net/ipv6/mcast.c 			 skb->dev->ifindex);
skb              2052 net/ipv6/mcast.c 	dst = icmp6_dst_alloc(skb->dev, &fl6);
skb              2058 net/ipv6/mcast.c 	skb_dst_set(skb, dst);
skb              2060 net/ipv6/mcast.c 		      net, sk, skb, NULL, skb->dev,
skb              2073 net/ipv6/mcast.c 	kfree_skb(skb);
skb              2079 net/ipv6/mcast.c 	struct sk_buff *skb;
skb              2086 net/ipv6/mcast.c 	skb = NULL;
skb              2094 net/ipv6/mcast.c 		skb = add_grec(skb, pmc, type, 0, 0, 1);
skb              2098 net/ipv6/mcast.c 	if (skb)
skb              2099 net/ipv6/mcast.c 		mld_sendpack(skb);
skb                14 net/ipv6/mcast_snoop.c static int ipv6_mc_check_ip6hdr(struct sk_buff *skb)
skb                18 net/ipv6/mcast_snoop.c 	unsigned int offset = skb_network_offset(skb) + sizeof(*ip6h);
skb                20 net/ipv6/mcast_snoop.c 	if (!pskb_may_pull(skb, offset))
skb                23 net/ipv6/mcast_snoop.c 	ip6h = ipv6_hdr(skb);
skb                29 net/ipv6/mcast_snoop.c 	if (skb->len < len || len <= offset)
skb                32 net/ipv6/mcast_snoop.c 	skb_set_transport_header(skb, offset);
skb                37 net/ipv6/mcast_snoop.c static int ipv6_mc_check_exthdrs(struct sk_buff *skb)
skb                44 net/ipv6/mcast_snoop.c 	ip6h = ipv6_hdr(skb);
skb                50 net/ipv6/mcast_snoop.c 	offset = skb_network_offset(skb) + sizeof(*ip6h);
skb                51 net/ipv6/mcast_snoop.c 	offset = ipv6_skip_exthdr(skb, offset, &nexthdr, &frag_off);
skb                59 net/ipv6/mcast_snoop.c 	skb_set_transport_header(skb, offset);
skb                64 net/ipv6/mcast_snoop.c static int ipv6_mc_check_mld_reportv2(struct sk_buff *skb)
skb                66 net/ipv6/mcast_snoop.c 	unsigned int len = skb_transport_offset(skb);
skb                70 net/ipv6/mcast_snoop.c 	return ipv6_mc_may_pull(skb, len) ? 0 : -EINVAL;
skb                73 net/ipv6/mcast_snoop.c static int ipv6_mc_check_mld_query(struct sk_buff *skb)
skb                75 net/ipv6/mcast_snoop.c 	unsigned int transport_len = ipv6_transport_len(skb);
skb                80 net/ipv6/mcast_snoop.c 	if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL))
skb                89 net/ipv6/mcast_snoop.c 		len = skb_transport_offset(skb) + sizeof(struct mld2_query);
skb                90 net/ipv6/mcast_snoop.c 		if (!ipv6_mc_may_pull(skb, len))
skb                94 net/ipv6/mcast_snoop.c 	mld = (struct mld_msg *)skb_transport_header(skb);
skb               100 net/ipv6/mcast_snoop.c 	    !ipv6_addr_is_ll_all_nodes(&ipv6_hdr(skb)->daddr))
skb               106 net/ipv6/mcast_snoop.c static int ipv6_mc_check_mld_msg(struct sk_buff *skb)
skb               108 net/ipv6/mcast_snoop.c 	unsigned int len = skb_transport_offset(skb) + sizeof(struct mld_msg);
skb               111 net/ipv6/mcast_snoop.c 	if (!ipv6_mc_may_pull(skb, len))
skb               114 net/ipv6/mcast_snoop.c 	mld = (struct mld_msg *)skb_transport_header(skb);
skb               121 net/ipv6/mcast_snoop.c 		return ipv6_mc_check_mld_reportv2(skb);
skb               123 net/ipv6/mcast_snoop.c 		return ipv6_mc_check_mld_query(skb);
skb               129 net/ipv6/mcast_snoop.c static inline __sum16 ipv6_mc_validate_checksum(struct sk_buff *skb)
skb               131 net/ipv6/mcast_snoop.c 	return skb_checksum_validate(skb, IPPROTO_ICMPV6, ip6_compute_pseudo);
skb               134 net/ipv6/mcast_snoop.c int ipv6_mc_check_icmpv6(struct sk_buff *skb)
skb               136 net/ipv6/mcast_snoop.c 	unsigned int len = skb_transport_offset(skb) + sizeof(struct icmp6hdr);
skb               137 net/ipv6/mcast_snoop.c 	unsigned int transport_len = ipv6_transport_len(skb);
skb               140 net/ipv6/mcast_snoop.c 	if (!ipv6_mc_may_pull(skb, len))
skb               143 net/ipv6/mcast_snoop.c 	skb_chk = skb_checksum_trimmed(skb, transport_len,
skb               148 net/ipv6/mcast_snoop.c 	if (skb_chk != skb)
skb               170 net/ipv6/mcast_snoop.c int ipv6_mc_check_mld(struct sk_buff *skb)
skb               174 net/ipv6/mcast_snoop.c 	ret = ipv6_mc_check_ip6hdr(skb);
skb               178 net/ipv6/mcast_snoop.c 	ret = ipv6_mc_check_exthdrs(skb);
skb               182 net/ipv6/mcast_snoop.c 	ret = ipv6_mc_check_icmpv6(skb);
skb               186 net/ipv6/mcast_snoop.c 	return ipv6_mc_check_mld_msg(skb);
skb                46 net/ipv6/mip6.c static inline void mip6_param_prob(struct sk_buff *skb, u8 code, int pos)
skb                48 net/ipv6/mip6.c 	icmpv6_send(skb, ICMPV6_PARAMPROB, code, pos);
skb                74 net/ipv6/mip6.c static int mip6_mh_filter(struct sock *sk, struct sk_buff *skb)
skb                79 net/ipv6/mip6.c 	mh = skb_header_pointer(skb, skb_transport_offset(skb),
skb                84 net/ipv6/mip6.c 	if (((mh->ip6mh_hdrlen + 1) << 3) > skb->len)
skb                91 net/ipv6/mip6.c 		mip6_param_prob(skb, 0, offsetof(struct ip6_mh, ip6mh_hdrlen) +
skb                92 net/ipv6/mip6.c 				skb_network_header_len(skb));
skb                99 net/ipv6/mip6.c 		mip6_param_prob(skb, 0, offsetof(struct ip6_mh, ip6mh_proto) +
skb               100 net/ipv6/mip6.c 				skb_network_header_len(skb));
skb               119 net/ipv6/mip6.c static int mip6_destopt_input(struct xfrm_state *x, struct sk_buff *skb)
skb               121 net/ipv6/mip6.c 	const struct ipv6hdr *iph = ipv6_hdr(skb);
skb               122 net/ipv6/mip6.c 	struct ipv6_destopt_hdr *destopt = (struct ipv6_destopt_hdr *)skb->data;
skb               138 net/ipv6/mip6.c static int mip6_destopt_output(struct xfrm_state *x, struct sk_buff *skb)
skb               146 net/ipv6/mip6.c 	skb_push(skb, -skb_network_offset(skb));
skb               147 net/ipv6/mip6.c 	iph = ipv6_hdr(skb);
skb               149 net/ipv6/mip6.c 	nexthdr = *skb_mac_header(skb);
skb               150 net/ipv6/mip6.c 	*skb_mac_header(skb) = IPPROTO_DSTOPTS;
skb               152 net/ipv6/mip6.c 	dstopt = (struct ipv6_destopt_hdr *)skb_transport_header(skb);
skb               196 net/ipv6/mip6.c static int mip6_destopt_reject(struct xfrm_state *x, struct sk_buff *skb,
skb               200 net/ipv6/mip6.c 	struct inet6_skb_parm *opt = (struct inet6_skb_parm *)skb->cb;
skb               213 net/ipv6/mip6.c 		offset = ipv6_find_tlv(skb, opt->dsthao, IPV6_TLV_HAO);
skb               216 net/ipv6/mip6.c 					(skb_network_header(skb) + offset);
skb               219 net/ipv6/mip6.c 	stamp = skb_get_ktime(skb);
skb               221 net/ipv6/mip6.c 	if (!mip6_report_rl_allow(stamp, &ipv6_hdr(skb)->daddr,
skb               222 net/ipv6/mip6.c 				  hao ? &hao->addr : &ipv6_hdr(skb)->saddr,
skb               227 net/ipv6/mip6.c 	memcpy(&sel.daddr, (xfrm_address_t *)&ipv6_hdr(skb)->daddr,
skb               230 net/ipv6/mip6.c 	memcpy(&sel.saddr, (xfrm_address_t *)&ipv6_hdr(skb)->saddr,
skb               250 net/ipv6/mip6.c static int mip6_destopt_offset(struct xfrm_state *x, struct sk_buff *skb,
skb               255 net/ipv6/mip6.c 				   (struct ipv6_opt_hdr *)(ipv6_hdr(skb) + 1);
skb               256 net/ipv6/mip6.c 	const unsigned char *nh = skb_network_header(skb);
skb               257 net/ipv6/mip6.c 	unsigned int packet_len = skb_tail_pointer(skb) -
skb               258 net/ipv6/mip6.c 		skb_network_header(skb);
skb               261 net/ipv6/mip6.c 	*nexthdr = &ipv6_hdr(skb)->nexthdr;
skb               277 net/ipv6/mip6.c 			if (ipv6_find_tlv(skb, offset, IPV6_TLV_HAO) >= 0) {
skb               339 net/ipv6/mip6.c static int mip6_rthdr_input(struct xfrm_state *x, struct sk_buff *skb)
skb               341 net/ipv6/mip6.c 	const struct ipv6hdr *iph = ipv6_hdr(skb);
skb               342 net/ipv6/mip6.c 	struct rt2_hdr *rt2 = (struct rt2_hdr *)skb->data;
skb               357 net/ipv6/mip6.c static int mip6_rthdr_output(struct xfrm_state *x, struct sk_buff *skb)
skb               363 net/ipv6/mip6.c 	skb_push(skb, -skb_network_offset(skb));
skb               364 net/ipv6/mip6.c 	iph = ipv6_hdr(skb);
skb               366 net/ipv6/mip6.c 	nexthdr = *skb_mac_header(skb);
skb               367 net/ipv6/mip6.c 	*skb_mac_header(skb) = IPPROTO_ROUTING;
skb               369 net/ipv6/mip6.c 	rt2 = (struct rt2_hdr *)skb_transport_header(skb);
skb               386 net/ipv6/mip6.c static int mip6_rthdr_offset(struct xfrm_state *x, struct sk_buff *skb,
skb               391 net/ipv6/mip6.c 				   (struct ipv6_opt_hdr *)(ipv6_hdr(skb) + 1);
skb               392 net/ipv6/mip6.c 	const unsigned char *nh = skb_network_header(skb);
skb               393 net/ipv6/mip6.c 	unsigned int packet_len = skb_tail_pointer(skb) -
skb               394 net/ipv6/mip6.c 		skb_network_header(skb);
skb               397 net/ipv6/mip6.c 	*nexthdr = &ipv6_hdr(skb)->nexthdr;
skb               414 net/ipv6/mip6.c 			if (ipv6_find_tlv(skb, offset, IPV6_TLV_HAO) >= 0)
skb                79 net/ipv6/ndisc.c static void ndisc_solicit(struct neighbour *neigh, struct sk_buff *skb);
skb                80 net/ipv6/ndisc.c static void ndisc_error_report(struct neighbour *neigh, struct sk_buff *skb);
skb                83 net/ipv6/ndisc.c static void pndisc_redo(struct sk_buff *skb);
skb               143 net/ipv6/ndisc.c void __ndisc_fill_addr_option(struct sk_buff *skb, int type, void *data,
skb               147 net/ipv6/ndisc.c 	u8 *opt = skb_put(skb, space);
skb               165 net/ipv6/ndisc.c static inline void ndisc_fill_addr_option(struct sk_buff *skb, int type,
skb               168 net/ipv6/ndisc.c 	__ndisc_fill_addr_option(skb, type, data, skb->dev->addr_len,
skb               169 net/ipv6/ndisc.c 				 ndisc_addr_option_pad(skb->dev->type));
skb               170 net/ipv6/ndisc.c 	ndisc_ops_fill_addr_option(skb->dev, skb, icmp6_type);
skb               173 net/ipv6/ndisc.c static inline void ndisc_fill_redirect_addr_option(struct sk_buff *skb,
skb               177 net/ipv6/ndisc.c 	ndisc_fill_addr_option(skb, ND_OPT_TARGET_LL_ADDR, ha, NDISC_REDIRECT);
skb               178 net/ipv6/ndisc.c 	ndisc_ops_fill_redirect_addr_option(skb->dev, skb, ops_data);
skb               415 net/ipv6/ndisc.c 	struct sk_buff *skb;
skb               417 net/ipv6/ndisc.c 	skb = alloc_skb(hlen + sizeof(struct ipv6hdr) + len + tlen, GFP_ATOMIC);
skb               418 net/ipv6/ndisc.c 	if (!skb) {
skb               424 net/ipv6/ndisc.c 	skb->protocol = htons(ETH_P_IPV6);
skb               425 net/ipv6/ndisc.c 	skb->dev = dev;
skb               427 net/ipv6/ndisc.c 	skb_reserve(skb, hlen + sizeof(struct ipv6hdr));
skb               428 net/ipv6/ndisc.c 	skb_reset_transport_header(skb);
skb               433 net/ipv6/ndisc.c 	skb_set_owner_w(skb, sk);
skb               435 net/ipv6/ndisc.c 	return skb;
skb               438 net/ipv6/ndisc.c static void ip6_nd_hdr(struct sk_buff *skb,
skb               448 net/ipv6/ndisc.c 	idev = __in6_dev_get(skb->dev);
skb               452 net/ipv6/ndisc.c 	skb_push(skb, sizeof(*hdr));
skb               453 net/ipv6/ndisc.c 	skb_reset_network_header(skb);
skb               454 net/ipv6/ndisc.c 	hdr = ipv6_hdr(skb);
skb               466 net/ipv6/ndisc.c static void ndisc_send_skb(struct sk_buff *skb,
skb               470 net/ipv6/ndisc.c 	struct dst_entry *dst = skb_dst(skb);
skb               471 net/ipv6/ndisc.c 	struct net *net = dev_net(skb->dev);
skb               475 net/ipv6/ndisc.c 	struct icmp6hdr *icmp6h = icmp6_hdr(skb);
skb               482 net/ipv6/ndisc.c 		int oif = skb->dev->ifindex;
skb               485 net/ipv6/ndisc.c 		dst = icmp6_dst_alloc(skb->dev, &fl6);
skb               487 net/ipv6/ndisc.c 			kfree_skb(skb);
skb               491 net/ipv6/ndisc.c 		skb_dst_set(skb, dst);
skb               494 net/ipv6/ndisc.c 	icmp6h->icmp6_cksum = csum_ipv6_magic(saddr, daddr, skb->len,
skb               497 net/ipv6/ndisc.c 							   skb->len, 0));
skb               499 net/ipv6/ndisc.c 	ip6_nd_hdr(skb, saddr, daddr, inet6_sk(sk)->hop_limit, skb->len);
skb               503 net/ipv6/ndisc.c 	IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len);
skb               506 net/ipv6/ndisc.c 		      net, sk, skb, NULL, dst->dev,
skb               520 net/ipv6/ndisc.c 	struct sk_buff *skb;
skb               549 net/ipv6/ndisc.c 	skb = ndisc_alloc_skb(dev, sizeof(*msg) + optlen);
skb               550 net/ipv6/ndisc.c 	if (!skb)
skb               553 net/ipv6/ndisc.c 	msg = skb_put(skb, sizeof(*msg));
skb               565 net/ipv6/ndisc.c 		ndisc_fill_addr_option(skb, ND_OPT_TARGET_LL_ADDR,
skb               569 net/ipv6/ndisc.c 	ndisc_send_skb(skb, daddr, src_addr);
skb               602 net/ipv6/ndisc.c 	struct sk_buff *skb;
skb               623 net/ipv6/ndisc.c 	skb = ndisc_alloc_skb(dev, sizeof(*msg) + optlen);
skb               624 net/ipv6/ndisc.c 	if (!skb)
skb               627 net/ipv6/ndisc.c 	msg = skb_put(skb, sizeof(*msg));
skb               636 net/ipv6/ndisc.c 		ndisc_fill_addr_option(skb, ND_OPT_SOURCE_LL_ADDR,
skb               640 net/ipv6/ndisc.c 		u8 *opt = skb_put(skb, 8);
skb               647 net/ipv6/ndisc.c 	ndisc_send_skb(skb, daddr, saddr);
skb               653 net/ipv6/ndisc.c 	struct sk_buff *skb;
skb               683 net/ipv6/ndisc.c 	skb = ndisc_alloc_skb(dev, sizeof(*msg) + optlen);
skb               684 net/ipv6/ndisc.c 	if (!skb)
skb               687 net/ipv6/ndisc.c 	msg = skb_put(skb, sizeof(*msg));
skb               695 net/ipv6/ndisc.c 		ndisc_fill_addr_option(skb, ND_OPT_SOURCE_LL_ADDR,
skb               699 net/ipv6/ndisc.c 	ndisc_send_skb(skb, daddr, saddr);
skb               703 net/ipv6/ndisc.c static void ndisc_error_report(struct neighbour *neigh, struct sk_buff *skb)
skb               709 net/ipv6/ndisc.c 	dst_link_failure(skb);
skb               710 net/ipv6/ndisc.c 	kfree_skb(skb);
skb               715 net/ipv6/ndisc.c static void ndisc_solicit(struct neighbour *neigh, struct sk_buff *skb)
skb               723 net/ipv6/ndisc.c 	if (skb && ipv6_chk_addr_and_flags(dev_net(dev), &ipv6_hdr(skb)->saddr,
skb               726 net/ipv6/ndisc.c 		saddr = &ipv6_hdr(skb)->saddr;
skb               767 net/ipv6/ndisc.c static void ndisc_recv_ns(struct sk_buff *skb)
skb               769 net/ipv6/ndisc.c 	struct nd_msg *msg = (struct nd_msg *)skb_transport_header(skb);
skb               770 net/ipv6/ndisc.c 	const struct in6_addr *saddr = &ipv6_hdr(skb)->saddr;
skb               771 net/ipv6/ndisc.c 	const struct in6_addr *daddr = &ipv6_hdr(skb)->daddr;
skb               773 net/ipv6/ndisc.c 	u32 ndoptlen = skb_tail_pointer(skb) - (skb_transport_header(skb) +
skb               776 net/ipv6/ndisc.c 	struct net_device *dev = skb->dev;
skb               785 net/ipv6/ndisc.c 	if (skb->len < sizeof(struct nd_msg)) {
skb               852 net/ipv6/ndisc.c 				addrconf_dad_failure(skb, ifp);
skb               892 net/ipv6/ndisc.c 			if (!(NEIGH_CB(skb)->flags & LOCALLY_ENQUEUED) &&
skb               893 net/ipv6/ndisc.c 			    skb->pkt_type != PACKET_HOST &&
skb               903 net/ipv6/ndisc.c 				struct sk_buff *n = skb_clone(skb, GFP_ATOMIC);
skb               951 net/ipv6/ndisc.c static void ndisc_recv_na(struct sk_buff *skb)
skb               953 net/ipv6/ndisc.c 	struct nd_msg *msg = (struct nd_msg *)skb_transport_header(skb);
skb               954 net/ipv6/ndisc.c 	struct in6_addr *saddr = &ipv6_hdr(skb)->saddr;
skb               955 net/ipv6/ndisc.c 	const struct in6_addr *daddr = &ipv6_hdr(skb)->daddr;
skb               957 net/ipv6/ndisc.c 	u32 ndoptlen = skb_tail_pointer(skb) - (skb_transport_header(skb) +
skb               960 net/ipv6/ndisc.c 	struct net_device *dev = skb->dev;
skb               965 net/ipv6/ndisc.c 	if (skb->len < sizeof(struct nd_msg)) {
skb              1003 net/ipv6/ndisc.c 		if (skb->pkt_type != PACKET_LOOPBACK
skb              1005 net/ipv6/ndisc.c 				addrconf_dad_failure(skb, ifp);
skb              1017 net/ipv6/ndisc.c 		if (skb->pkt_type != PACKET_LOOPBACK)
skb              1020 net/ipv6/ndisc.c 				  eth_hdr(skb)->h_source, &ifp->addr, ifp->idev->dev->name);
skb              1065 net/ipv6/ndisc.c static void ndisc_recv_rs(struct sk_buff *skb)
skb              1067 net/ipv6/ndisc.c 	struct rs_msg *rs_msg = (struct rs_msg *)skb_transport_header(skb);
skb              1068 net/ipv6/ndisc.c 	unsigned long ndoptlen = skb->len - sizeof(*rs_msg);
skb              1071 net/ipv6/ndisc.c 	const struct in6_addr *saddr = &ipv6_hdr(skb)->saddr;
skb              1075 net/ipv6/ndisc.c 	if (skb->len < sizeof(*rs_msg))
skb              1078 net/ipv6/ndisc.c 	idev = __in6_dev_get(skb->dev);
skb              1096 net/ipv6/ndisc.c 	if (!ndisc_parse_options(skb->dev, rs_msg->opt, ndoptlen, &ndopts)) {
skb              1103 net/ipv6/ndisc.c 					     skb->dev);
skb              1108 net/ipv6/ndisc.c 	neigh = __neigh_lookup(&nd_tbl, saddr, skb->dev, 1);
skb              1110 net/ipv6/ndisc.c 		ndisc_update(skb->dev, neigh, lladdr, NUD_STALE,
skb              1124 net/ipv6/ndisc.c 	struct sk_buff *skb;
skb              1133 net/ipv6/ndisc.c 	skb = nlmsg_new(msg_size, GFP_ATOMIC);
skb              1134 net/ipv6/ndisc.c 	if (!skb) {
skb              1139 net/ipv6/ndisc.c 	nlh = nlmsg_put(skb, 0, 0, RTM_NEWNDUSEROPT, base_size, 0);
skb              1153 net/ipv6/ndisc.c 	if (nla_put_in6_addr(skb, NDUSEROPT_SRCADDR, &ipv6_hdr(ra)->saddr))
skb              1155 net/ipv6/ndisc.c 	nlmsg_end(skb, nlh);
skb              1157 net/ipv6/ndisc.c 	rtnl_notify(skb, net, 0, RTNLGRP_ND_USEROPT, NULL, GFP_ATOMIC);
skb              1161 net/ipv6/ndisc.c 	nlmsg_free(skb);
skb              1167 net/ipv6/ndisc.c static void ndisc_router_discovery(struct sk_buff *skb)
skb              1169 net/ipv6/ndisc.c 	struct ra_msg *ra_msg = (struct ra_msg *)skb_transport_header(skb);
skb              1183 net/ipv6/ndisc.c 	optlen = (skb_tail_pointer(skb) - skb_transport_header(skb)) -
skb              1188 net/ipv6/ndisc.c 		  __func__, skb->dev->name);
skb              1189 net/ipv6/ndisc.c 	if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL)) {
skb              1199 net/ipv6/ndisc.c 	if (skb->ndisc_nodetype == NDISC_NODETYPE_HOST) {
skb              1209 net/ipv6/ndisc.c 	in6_dev = __in6_dev_get(skb->dev);
skb              1212 net/ipv6/ndisc.c 			  skb->dev->name);
skb              1216 net/ipv6/ndisc.c 	if (!ndisc_parse_options(skb->dev, opt, optlen, &ndopts)) {
skb              1224 net/ipv6/ndisc.c 			  __func__, skb->dev->name);
skb              1230 net/ipv6/ndisc.c 	if (skb->ndisc_nodetype == NDISC_NODETYPE_NODEFAULT) {
skb              1233 net/ipv6/ndisc.c 			  __func__, skb->dev->name);
skb              1264 net/ipv6/ndisc.c 			  __func__, skb->dev->name);
skb              1273 net/ipv6/ndisc.c 	    ipv6_chk_addr(net, &ipv6_hdr(skb)->saddr, in6_dev->dev, 0)) {
skb              1276 net/ipv6/ndisc.c 			  skb->dev->name);
skb              1290 net/ipv6/ndisc.c 	rt = rt6_get_dflt_router(net, &ipv6_hdr(skb)->saddr, skb->dev);
skb              1294 net/ipv6/ndisc.c 					  &ipv6_hdr(skb)->saddr);
skb              1309 net/ipv6/ndisc.c 		  rt, lifetime, skb->dev->name);
skb              1313 net/ipv6/ndisc.c 		rt = rt6_add_dflt_router(net, &ipv6_hdr(skb)->saddr,
skb              1314 net/ipv6/ndisc.c 					 skb->dev, pref);
skb              1324 net/ipv6/ndisc.c 					  &ipv6_hdr(skb)->saddr);
skb              1400 net/ipv6/ndisc.c 		neigh = __neigh_lookup(&nd_tbl, &ipv6_hdr(skb)->saddr,
skb              1401 net/ipv6/ndisc.c 				       skb->dev, 1);
skb              1406 net/ipv6/ndisc.c 						     skb->dev);
skb              1413 net/ipv6/ndisc.c 		ndisc_update(skb->dev, neigh, lladdr, NUD_STALE,
skb              1424 net/ipv6/ndisc.c 			  __func__, skb->dev->name);
skb              1430 net/ipv6/ndisc.c 	    ipv6_chk_addr(dev_net(in6_dev->dev), &ipv6_hdr(skb)->saddr,
skb              1434 net/ipv6/ndisc.c 			  skb->dev->name);
skb              1445 net/ipv6/ndisc.c 			if (skb->ndisc_nodetype == NDISC_NODETYPE_NODEFAULT &&
skb              1456 net/ipv6/ndisc.c 			rt6_route_rcv(skb->dev, (u8 *)p, (p->nd_opt_len) << 3,
skb              1457 net/ipv6/ndisc.c 				      &ipv6_hdr(skb)->saddr);
skb              1466 net/ipv6/ndisc.c 	if (skb->ndisc_nodetype == NDISC_NODETYPE_NODEFAULT) {
skb              1469 net/ipv6/ndisc.c 			  __func__, skb->dev->name);
skb              1479 net/ipv6/ndisc.c 			addrconf_prefix_rcv(skb->dev, (u8 *)p,
skb              1492 net/ipv6/ndisc.c 		if (mtu < IPV6_MIN_MTU || mtu > skb->dev->mtu) {
skb              1497 net/ipv6/ndisc.c 			rt6_mtu_change(skb->dev, mtu);
skb              1505 net/ipv6/ndisc.c 		     p = ndisc_next_useropt(skb->dev, p,
skb              1507 net/ipv6/ndisc.c 			ndisc_ra_useropt(skb, p);
skb              1520 net/ipv6/ndisc.c static void ndisc_redirect_rcv(struct sk_buff *skb)
skb              1524 net/ipv6/ndisc.c 	struct rd_msg *msg = (struct rd_msg *)skb_transport_header(skb);
skb              1525 net/ipv6/ndisc.c 	u32 ndoptlen = skb_tail_pointer(skb) - (skb_transport_header(skb) +
skb              1529 net/ipv6/ndisc.c 	switch (skb->ndisc_nodetype) {
skb              1538 net/ipv6/ndisc.c 	if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL)) {
skb              1544 net/ipv6/ndisc.c 	if (!ndisc_parse_options(skb->dev, msg->opt, ndoptlen, &ndopts))
skb              1548 net/ipv6/ndisc.c 		ip6_redirect_no_header(skb, dev_net(skb->dev),
skb              1549 net/ipv6/ndisc.c 					skb->dev->ifindex);
skb              1555 net/ipv6/ndisc.c 	if (!pskb_pull(skb, hdr - skb_transport_header(skb)))
skb              1558 net/ipv6/ndisc.c 	icmpv6_notify(skb, NDISC_REDIRECT, 0, 0);
skb              1561 net/ipv6/ndisc.c static void ndisc_fill_redirect_hdr_option(struct sk_buff *skb,
skb              1565 net/ipv6/ndisc.c 	u8 *opt = skb_put(skb, rd_len);
skb              1576 net/ipv6/ndisc.c void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target)
skb              1578 net/ipv6/ndisc.c 	struct net_device *dev = skb->dev;
skb              1594 net/ipv6/ndisc.c 	if (netif_is_l3_master(skb->dev)) {
skb              1595 net/ipv6/ndisc.c 		dev = __dev_get_by_index(dev_net(skb->dev), IPCB(skb)->iif);
skb              1606 net/ipv6/ndisc.c 	if (!ipv6_addr_equal(&ipv6_hdr(skb)->daddr, target) &&
skb              1614 net/ipv6/ndisc.c 			 &saddr_buf, &ipv6_hdr(skb)->saddr, dev->ifindex);
skb              1632 net/ipv6/ndisc.c 	peer = inet_getpeer_v6(net->ipv6.peers, &ipv6_hdr(skb)->saddr, 1);
skb              1640 net/ipv6/ndisc.c 		struct neighbour *neigh = dst_neigh_lookup(skb_dst(skb), target);
skb              1663 net/ipv6/ndisc.c 		       skb->len + 8);
skb              1677 net/ipv6/ndisc.c 		.dest = ipv6_hdr(skb)->daddr,
skb              1692 net/ipv6/ndisc.c 		ndisc_fill_redirect_hdr_option(buff, skb, rd_len);
skb              1695 net/ipv6/ndisc.c 	ndisc_send_skb(buff, &ipv6_hdr(skb)->saddr, &saddr_buf);
skb              1702 net/ipv6/ndisc.c static void pndisc_redo(struct sk_buff *skb)
skb              1704 net/ipv6/ndisc.c 	ndisc_recv_ns(skb);
skb              1705 net/ipv6/ndisc.c 	kfree_skb(skb);
skb              1708 net/ipv6/ndisc.c static bool ndisc_suppress_frag_ndisc(struct sk_buff *skb)
skb              1710 net/ipv6/ndisc.c 	struct inet6_dev *idev = __in6_dev_get(skb->dev);
skb              1714 net/ipv6/ndisc.c 	if (IP6CB(skb)->flags & IP6SKB_FRAGMENTED &&
skb              1722 net/ipv6/ndisc.c int ndisc_rcv(struct sk_buff *skb)
skb              1726 net/ipv6/ndisc.c 	if (ndisc_suppress_frag_ndisc(skb))
skb              1729 net/ipv6/ndisc.c 	if (skb_linearize(skb))
skb              1732 net/ipv6/ndisc.c 	msg = (struct nd_msg *)skb_transport_header(skb);
skb              1734 net/ipv6/ndisc.c 	__skb_push(skb, skb->data - skb_transport_header(skb));
skb              1736 net/ipv6/ndisc.c 	if (ipv6_hdr(skb)->hop_limit != 255) {
skb              1738 net/ipv6/ndisc.c 			  ipv6_hdr(skb)->hop_limit);
skb              1750 net/ipv6/ndisc.c 		memset(NEIGH_CB(skb), 0, sizeof(struct neighbour_cb));
skb              1751 net/ipv6/ndisc.c 		ndisc_recv_ns(skb);
skb              1755 net/ipv6/ndisc.c 		ndisc_recv_na(skb);
skb              1759 net/ipv6/ndisc.c 		ndisc_recv_rs(skb);
skb              1763 net/ipv6/ndisc.c 		ndisc_router_discovery(skb);
skb              1767 net/ipv6/ndisc.c 		ndisc_redirect_rcv(skb);
skb                23 net/ipv6/netfilter.c int ip6_route_me_harder(struct net *net, struct sk_buff *skb)
skb                25 net/ipv6/netfilter.c 	const struct ipv6hdr *iph = ipv6_hdr(skb);
skb                26 net/ipv6/netfilter.c 	struct sock *sk = sk_to_full_sk(skb->sk);
skb                33 net/ipv6/netfilter.c 			strict ? skb_dst(skb)->dev->ifindex : 0,
skb                34 net/ipv6/netfilter.c 		.flowi6_mark = skb->mark,
skb                51 net/ipv6/netfilter.c 	skb_dst_drop(skb);
skb                53 net/ipv6/netfilter.c 	skb_dst_set(skb, dst);
skb                56 net/ipv6/netfilter.c 	if (!(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) &&
skb                57 net/ipv6/netfilter.c 	    xfrm_decode_session(skb, flowi6_to_flowi(&fl6), AF_INET6) == 0) {
skb                58 net/ipv6/netfilter.c 		skb_dst_set(skb, NULL);
skb                62 net/ipv6/netfilter.c 		skb_dst_set(skb, dst);
skb                67 net/ipv6/netfilter.c 	hh_len = skb_dst(skb)->dev->hard_header_len;
skb                68 net/ipv6/netfilter.c 	if (skb_headroom(skb) < hh_len &&
skb                69 net/ipv6/netfilter.c 	    pskb_expand_head(skb, HH_DATA_ALIGN(hh_len - skb_headroom(skb)),
skb                77 net/ipv6/netfilter.c static int nf_ip6_reroute(struct sk_buff *skb,
skb                83 net/ipv6/netfilter.c 		const struct ipv6hdr *iph = ipv6_hdr(skb);
skb                86 net/ipv6/netfilter.c 		    skb->mark != rt_info->mark)
skb                87 net/ipv6/netfilter.c 			return ip6_route_me_harder(entry->state.net, skb);
skb               115 net/ipv6/netfilter.c int br_ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
skb               121 net/ipv6/netfilter.c 	int frag_max_size = BR_INPUT_SKB_CB(skb)->frag_max_size;
skb               122 net/ipv6/netfilter.c 	ktime_t tstamp = skb->tstamp;
skb               129 net/ipv6/netfilter.c 	err = ip6_find_1stfragopt(skb, &prevhdr);
skb               135 net/ipv6/netfilter.c 	mtu = skb->dev->mtu;
skb               145 net/ipv6/netfilter.c 	frag_id = ipv6_select_ident(net, &ipv6_hdr(skb)->daddr,
skb               146 net/ipv6/netfilter.c 				    &ipv6_hdr(skb)->saddr);
skb               148 net/ipv6/netfilter.c 	if (skb->ip_summed == CHECKSUM_PARTIAL &&
skb               149 net/ipv6/netfilter.c 	    (err = skb_checksum_help(skb)))
skb               152 net/ipv6/netfilter.c 	hroom = LL_RESERVED_SPACE(skb->dev);
skb               153 net/ipv6/netfilter.c 	if (skb_has_frag_list(skb)) {
skb               154 net/ipv6/netfilter.c 		unsigned int first_len = skb_pagelen(skb);
skb               159 net/ipv6/netfilter.c 		    skb_headroom(skb) < (hroom + sizeof(struct frag_hdr)))
skb               162 net/ipv6/netfilter.c 		if (skb_cloned(skb))
skb               165 net/ipv6/netfilter.c 		skb_walk_frags(skb, frag2) {
skb               175 net/ipv6/netfilter.c 		err = ip6_fraglist_init(skb, hlen, prevhdr, nexthdr, frag_id,
skb               185 net/ipv6/netfilter.c 				ip6_fraglist_prepare(skb, &iter);
skb               187 net/ipv6/netfilter.c 			skb->tstamp = tstamp;
skb               188 net/ipv6/netfilter.c 			err = output(net, sk, data, skb);
skb               192 net/ipv6/netfilter.c 			skb = ip6_fraglist_next(&iter);
skb               207 net/ipv6/netfilter.c 	ip6_frag_init(skb, hlen, mtu, skb->dev->needed_tailroom,
skb               208 net/ipv6/netfilter.c 		      LL_RESERVED_SPACE(skb->dev), prevhdr, nexthdr, frag_id,
skb               214 net/ipv6/netfilter.c 		skb2 = ip6_frag_next(skb, &state);
skb               225 net/ipv6/netfilter.c 	consume_skb(skb);
skb               229 net/ipv6/netfilter.c 	kfree_skb(skb);
skb                49 net/ipv6/netfilter/ip6_tables.c ip6_packet_match(const struct sk_buff *skb,
skb                57 net/ipv6/netfilter/ip6_tables.c 	const struct ipv6hdr *ipv6 = ipv6_hdr(skb);
skb                84 net/ipv6/netfilter/ip6_tables.c 		protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off, NULL);
skb               120 net/ipv6/netfilter/ip6_tables.c ip6t_error(struct sk_buff *skb, const struct xt_action_param *par)
skb               213 net/ipv6/netfilter/ip6_tables.c 			 const struct sk_buff *skb,
skb               236 net/ipv6/netfilter/ip6_tables.c 	nf_log_trace(net, AF_INET6, hook, skb, in, out, &trace_loginfo,
skb               250 net/ipv6/netfilter/ip6_tables.c ip6t_do_table(struct sk_buff *skb,
skb               307 net/ipv6/netfilter/ip6_tables.c 		if (!ip6_packet_match(skb, indev, outdev, &e->ipv6,
skb               317 net/ipv6/netfilter/ip6_tables.c 			if (!acpar.match->match(skb, &acpar))
skb               322 net/ipv6/netfilter/ip6_tables.c 		ADD_COUNTER(*counter, skb->len, 1);
skb               329 net/ipv6/netfilter/ip6_tables.c 		if (unlikely(skb->nf_trace))
skb               330 net/ipv6/netfilter/ip6_tables.c 			trace_packet(state->net, skb, hook, state->in,
skb               367 net/ipv6/netfilter/ip6_tables.c 		verdict = t->u.kernel.target->target(skb, &acpar);
skb              1829 net/ipv6/netfilter/ip6_tables.c icmp6_match(const struct sk_buff *skb, struct xt_action_param *par)
skb              1839 net/ipv6/netfilter/ip6_tables.c 	ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
skb                81 net/ipv6/netfilter/ip6t_NPT.c ip6t_snpt_tg(struct sk_buff *skb, const struct xt_action_param *par)
skb                85 net/ipv6/netfilter/ip6t_NPT.c 	if (!ip6t_npt_map_pfx(npt, &ipv6_hdr(skb)->saddr)) {
skb                86 net/ipv6/netfilter/ip6t_NPT.c 		icmpv6_send(skb, ICMPV6_PARAMPROB, ICMPV6_HDR_FIELD,
skb                94 net/ipv6/netfilter/ip6t_NPT.c ip6t_dnpt_tg(struct sk_buff *skb, const struct xt_action_param *par)
skb                98 net/ipv6/netfilter/ip6t_NPT.c 	if (!ip6t_npt_map_pfx(npt, &ipv6_hdr(skb)->daddr)) {
skb                99 net/ipv6/netfilter/ip6t_NPT.c 		icmpv6_send(skb, ICMPV6_PARAMPROB, ICMPV6_HDR_FIELD,
skb                35 net/ipv6/netfilter/ip6t_REJECT.c reject_tg6(struct sk_buff *skb, const struct xt_action_param *par)
skb                42 net/ipv6/netfilter/ip6t_REJECT.c 		nf_send_unreach6(net, skb, ICMPV6_NOROUTE, xt_hooknum(par));
skb                45 net/ipv6/netfilter/ip6t_REJECT.c 		nf_send_unreach6(net, skb, ICMPV6_ADM_PROHIBITED,
skb                49 net/ipv6/netfilter/ip6t_REJECT.c 		nf_send_unreach6(net, skb, ICMPV6_NOT_NEIGHBOUR,
skb                53 net/ipv6/netfilter/ip6t_REJECT.c 		nf_send_unreach6(net, skb, ICMPV6_ADDR_UNREACH,
skb                57 net/ipv6/netfilter/ip6t_REJECT.c 		nf_send_unreach6(net, skb, ICMPV6_PORT_UNREACH,
skb                64 net/ipv6/netfilter/ip6t_REJECT.c 		nf_send_reset6(net, skb, xt_hooknum(par));
skb                67 net/ipv6/netfilter/ip6t_REJECT.c 		nf_send_unreach6(net, skb, ICMPV6_POLICY_FAIL, xt_hooknum(par));
skb                70 net/ipv6/netfilter/ip6t_REJECT.c 		nf_send_unreach6(net, skb, ICMPV6_REJECT_ROUTE,
skb                13 net/ipv6/netfilter/ip6t_SYNPROXY.c synproxy_tg6(struct sk_buff *skb, const struct xt_action_param *par)
skb                21 net/ipv6/netfilter/ip6t_SYNPROXY.c 	if (nf_ip6_checksum(skb, xt_hooknum(par), par->thoff, IPPROTO_TCP))
skb                24 net/ipv6/netfilter/ip6t_SYNPROXY.c 	th = skb_header_pointer(skb, par->thoff, sizeof(_th), &_th);
skb                28 net/ipv6/netfilter/ip6t_SYNPROXY.c 	if (!synproxy_parse_options(skb, par->thoff, th, &opts))
skb                48 net/ipv6/netfilter/ip6t_SYNPROXY.c 		synproxy_send_client_synack_ipv6(net, skb, th, &opts);
skb                49 net/ipv6/netfilter/ip6t_SYNPROXY.c 		consume_skb(skb);
skb                54 net/ipv6/netfilter/ip6t_SYNPROXY.c 		if (synproxy_recv_client_ack_ipv6(net, skb, th, &opts,
skb                56 net/ipv6/netfilter/ip6t_SYNPROXY.c 			consume_skb(skb);
skb                36 net/ipv6/netfilter/ip6t_ah.c static bool ah_mt6(const struct sk_buff *skb, struct xt_action_param *par)
skb                45 net/ipv6/netfilter/ip6t_ah.c 	err = ipv6_find_hdr(skb, &ptr, NEXTHDR_AUTH, NULL, NULL);
skb                52 net/ipv6/netfilter/ip6t_ah.c 	ah = skb_header_pointer(skb, ptr, sizeof(_ah), &_ah);
skb                20 net/ipv6/netfilter/ip6t_eui64.c eui64_mt6(const struct sk_buff *skb, struct xt_action_param *par)
skb                24 net/ipv6/netfilter/ip6t_eui64.c 	if (!(skb_mac_header(skb) >= skb->head &&
skb                25 net/ipv6/netfilter/ip6t_eui64.c 	      skb_mac_header(skb) + ETH_HLEN <= skb->data) &&
skb                33 net/ipv6/netfilter/ip6t_eui64.c 	if (eth_hdr(skb)->h_proto == htons(ETH_P_IPV6)) {
skb                34 net/ipv6/netfilter/ip6t_eui64.c 		if (ipv6_hdr(skb)->version == 0x6) {
skb                35 net/ipv6/netfilter/ip6t_eui64.c 			memcpy(eui64, eth_hdr(skb)->h_source, 3);
skb                36 net/ipv6/netfilter/ip6t_eui64.c 			memcpy(eui64 + 5, eth_hdr(skb)->h_source + 3, 3);
skb                41 net/ipv6/netfilter/ip6t_eui64.c 			if (!memcmp(ipv6_hdr(skb)->saddr.s6_addr + 8, eui64,
skb                35 net/ipv6/netfilter/ip6t_frag.c frag_mt6(const struct sk_buff *skb, struct xt_action_param *par)
skb                43 net/ipv6/netfilter/ip6t_frag.c 	err = ipv6_find_hdr(skb, &ptr, NEXTHDR_FRAGMENT, NULL, NULL);
skb                50 net/ipv6/netfilter/ip6t_frag.c 	fh = skb_header_pointer(skb, ptr, sizeof(_frag), &_frag);
skb                44 net/ipv6/netfilter/ip6t_hbh.c hbh_mt6(const struct sk_buff *skb, struct xt_action_param *par)
skb                60 net/ipv6/netfilter/ip6t_hbh.c 	err = ipv6_find_hdr(skb, &ptr,
skb                69 net/ipv6/netfilter/ip6t_hbh.c 	oh = skb_header_pointer(skb, ptr, sizeof(_optsh), &_optsh);
skb                76 net/ipv6/netfilter/ip6t_hbh.c 	if (skb->len - ptr < hdrlen) {
skb               105 net/ipv6/netfilter/ip6t_hbh.c 			tp = skb_header_pointer(skb, ptr, sizeof(_opttype),
skb               125 net/ipv6/netfilter/ip6t_hbh.c 				lp = skb_header_pointer(skb, ptr + 1,
skb               147 net/ipv6/netfilter/ip6t_hbh.c 			if ((ptr > skb->len - optlen || hdrlen < optlen) &&
skb                27 net/ipv6/netfilter/ip6t_ipv6header.c ipv6header_mt6(const struct sk_buff *skb, struct xt_action_param *par)
skb                38 net/ipv6/netfilter/ip6t_ipv6header.c 	nexthdr = ipv6_hdr(skb)->nexthdr;
skb                42 net/ipv6/netfilter/ip6t_ipv6header.c 	len = skb->len - ptr;
skb                64 net/ipv6/netfilter/ip6t_ipv6header.c 		hp = skb_header_pointer(skb, ptr, sizeof(_hdr), &_hdr);
skb               102 net/ipv6/netfilter/ip6t_ipv6header.c 		if (ptr > skb->len)
skb                31 net/ipv6/netfilter/ip6t_mh.c static bool mh_mt6(const struct sk_buff *skb, struct xt_action_param *par)
skb                41 net/ipv6/netfilter/ip6t_mh.c 	mh = skb_header_pointer(skb, par->thoff, sizeof(_mh), &_mh);
skb                32 net/ipv6/netfilter/ip6t_rpfilter.c static bool rpfilter_lookup_reverse6(struct net *net, const struct sk_buff *skb,
skb                36 net/ipv6/netfilter/ip6t_rpfilter.c 	struct ipv6hdr *iph = ipv6_hdr(skb);
skb                53 net/ipv6/netfilter/ip6t_rpfilter.c 	fl6.flowi6_mark = flags & XT_RPFILTER_VALID_MARK ? skb->mark : 0;
skb                63 net/ipv6/netfilter/ip6t_rpfilter.c 	rt = (void *)ip6_route_lookup(net, &fl6, skb, lookup_flags);
skb                85 net/ipv6/netfilter/ip6t_rpfilter.c rpfilter_is_loopback(const struct sk_buff *skb, const struct net_device *in)
skb                87 net/ipv6/netfilter/ip6t_rpfilter.c 	return skb->pkt_type == PACKET_LOOPBACK || in->flags & IFF_LOOPBACK;
skb                90 net/ipv6/netfilter/ip6t_rpfilter.c static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
skb                97 net/ipv6/netfilter/ip6t_rpfilter.c 	if (rpfilter_is_loopback(skb, xt_in(par)))
skb               100 net/ipv6/netfilter/ip6t_rpfilter.c 	iph = ipv6_hdr(skb);
skb               105 net/ipv6/netfilter/ip6t_rpfilter.c 	return rpfilter_lookup_reverse6(xt_net(par), skb, xt_in(par),
skb                36 net/ipv6/netfilter/ip6t_rt.c static bool rt_mt6(const struct sk_buff *skb, struct xt_action_param *par)
skb                49 net/ipv6/netfilter/ip6t_rt.c 	err = ipv6_find_hdr(skb, &ptr, NEXTHDR_ROUTING, NULL, NULL);
skb                56 net/ipv6/netfilter/ip6t_rt.c 	rh = skb_header_pointer(skb, ptr, sizeof(_route), &_route);
skb                63 net/ipv6/netfilter/ip6t_rt.c 	if (skb->len - ptr < hdrlen) {
skb               106 net/ipv6/netfilter/ip6t_rt.c 		rp = skb_header_pointer(skb,
skb               130 net/ipv6/netfilter/ip6t_rt.c 				ap = skb_header_pointer(skb,
skb               163 net/ipv6/netfilter/ip6t_rt.c 				ap = skb_header_pointer(skb,
skb                24 net/ipv6/netfilter/ip6t_srh.c static bool srh_mt6(const struct sk_buff *skb, struct xt_action_param *par)
skb                31 net/ipv6/netfilter/ip6t_srh.c 	if (ipv6_find_hdr(skb, &srhoff, IPPROTO_ROUTING, NULL, NULL) < 0)
skb                33 net/ipv6/netfilter/ip6t_srh.c 	srh = skb_header_pointer(skb, srhoff, sizeof(_srh), &_srh);
skb                38 net/ipv6/netfilter/ip6t_srh.c 	if (skb->len - srhoff < hdrlen)
skb               116 net/ipv6/netfilter/ip6t_srh.c static bool srh1_mt6(const struct sk_buff *skb, struct xt_action_param *par)
skb               125 net/ipv6/netfilter/ip6t_srh.c 	if (ipv6_find_hdr(skb, &srhoff, IPPROTO_ROUTING, NULL, NULL) < 0)
skb               127 net/ipv6/netfilter/ip6t_srh.c 	srh = skb_header_pointer(skb, srhoff, sizeof(_srh), &_srh);
skb               132 net/ipv6/netfilter/ip6t_srh.c 	if (skb->len - srhoff < hdrlen)
skb               208 net/ipv6/netfilter/ip6t_srh.c 		psid = skb_header_pointer(skb, psidoff, sizeof(_psid), &_psid);
skb               223 net/ipv6/netfilter/ip6t_srh.c 		nsid = skb_header_pointer(skb, nsidoff, sizeof(_nsid), &_nsid);
skb               235 net/ipv6/netfilter/ip6t_srh.c 		lsid = skb_header_pointer(skb, lsidoff, sizeof(_lsid), &_lsid);
skb                35 net/ipv6/netfilter/ip6table_filter.c ip6table_filter_hook(void *priv, struct sk_buff *skb,
skb                38 net/ipv6/netfilter/ip6table_filter.c 	return ip6t_do_table(skb, state, state->net->ipv6.ip6table_filter);
skb                35 net/ipv6/netfilter/ip6table_mangle.c ip6t_mangle_out(struct sk_buff *skb, const struct nf_hook_state *state)
skb                44 net/ipv6/netfilter/ip6table_mangle.c 	memcpy(&saddr, &ipv6_hdr(skb)->saddr, sizeof(saddr));
skb                45 net/ipv6/netfilter/ip6table_mangle.c 	memcpy(&daddr, &ipv6_hdr(skb)->daddr, sizeof(daddr));
skb                46 net/ipv6/netfilter/ip6table_mangle.c 	mark = skb->mark;
skb                47 net/ipv6/netfilter/ip6table_mangle.c 	hop_limit = ipv6_hdr(skb)->hop_limit;
skb                50 net/ipv6/netfilter/ip6table_mangle.c 	flowlabel = *((u_int32_t *)ipv6_hdr(skb));
skb                52 net/ipv6/netfilter/ip6table_mangle.c 	ret = ip6t_do_table(skb, state, state->net->ipv6.ip6table_mangle);
skb                55 net/ipv6/netfilter/ip6table_mangle.c 	    (!ipv6_addr_equal(&ipv6_hdr(skb)->saddr, &saddr) ||
skb                56 net/ipv6/netfilter/ip6table_mangle.c 	     !ipv6_addr_equal(&ipv6_hdr(skb)->daddr, &daddr) ||
skb                57 net/ipv6/netfilter/ip6table_mangle.c 	     skb->mark != mark ||
skb                58 net/ipv6/netfilter/ip6table_mangle.c 	     ipv6_hdr(skb)->hop_limit != hop_limit ||
skb                59 net/ipv6/netfilter/ip6table_mangle.c 	     flowlabel != *((u_int32_t *)ipv6_hdr(skb)))) {
skb                60 net/ipv6/netfilter/ip6table_mangle.c 		err = ip6_route_me_harder(state->net, skb);
skb                70 net/ipv6/netfilter/ip6table_mangle.c ip6table_mangle_hook(void *priv, struct sk_buff *skb,
skb                74 net/ipv6/netfilter/ip6table_mangle.c 		return ip6t_mangle_out(skb, state);
skb                75 net/ipv6/netfilter/ip6table_mangle.c 	return ip6t_do_table(skb, state, state->net->ipv6.ip6table_mangle);
skb                32 net/ipv6/netfilter/ip6table_nat.c 					  struct sk_buff *skb,
skb                35 net/ipv6/netfilter/ip6table_nat.c 	return ip6t_do_table(skb, state, state->net->ipv6.ip6table_nat);
skb                40 net/ipv6/netfilter/ip6table_raw.c ip6table_raw_hook(void *priv, struct sk_buff *skb,
skb                43 net/ipv6/netfilter/ip6table_raw.c 	return ip6t_do_table(skb, state, state->net->ipv6.ip6table_raw);
skb                39 net/ipv6/netfilter/ip6table_security.c ip6table_security_hook(void *priv, struct sk_buff *skb,
skb                42 net/ipv6/netfilter/ip6table_security.c 	return ip6t_do_table(skb, state, state->net->ipv6.ip6table_security);
skb               130 net/ipv6/netfilter/nf_conntrack_reasm.c static int nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *skb,
skb               169 net/ipv6/netfilter/nf_conntrack_reasm.c static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb,
skb               183 net/ipv6/netfilter/nf_conntrack_reasm.c 	payload_len = ntohs(ipv6_hdr(skb)->payload_len);
skb               187 net/ipv6/netfilter/nf_conntrack_reasm.c 			((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1)));
skb               194 net/ipv6/netfilter/nf_conntrack_reasm.c 	ecn = ip6_frag_ecn(ipv6_hdr(skb));
skb               196 net/ipv6/netfilter/nf_conntrack_reasm.c 	if (skb->ip_summed == CHECKSUM_COMPLETE) {
skb               197 net/ipv6/netfilter/nf_conntrack_reasm.c 		const unsigned char *nh = skb_network_header(skb);
skb               198 net/ipv6/netfilter/nf_conntrack_reasm.c 		skb->csum = csum_sub(skb->csum,
skb               241 net/ipv6/netfilter/nf_conntrack_reasm.c 	if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data)) {
skb               245 net/ipv6/netfilter/nf_conntrack_reasm.c 	if (pskb_trim_rcsum(skb, end - offset)) {
skb               251 net/ipv6/netfilter/nf_conntrack_reasm.c 	dev = skb->dev;
skb               256 net/ipv6/netfilter/nf_conntrack_reasm.c 	err = inet_frag_queue_insert(&fq->q, skb, offset, end);
skb               260 net/ipv6/netfilter/nf_conntrack_reasm.c 			kfree_skb(skb);
skb               269 net/ipv6/netfilter/nf_conntrack_reasm.c 	fq->q.stamp = skb->tstamp;
skb               270 net/ipv6/netfilter/nf_conntrack_reasm.c 	fq->q.meat += skb->len;
skb               274 net/ipv6/netfilter/nf_conntrack_reasm.c 	add_frag_mem_limit(fq->q.fqdir, skb->truesize);
skb               286 net/ipv6/netfilter/nf_conntrack_reasm.c 		unsigned long orefdst = skb->_skb_refdst;
skb               288 net/ipv6/netfilter/nf_conntrack_reasm.c 		skb->_skb_refdst = 0UL;
skb               289 net/ipv6/netfilter/nf_conntrack_reasm.c 		err = nf_ct_frag6_reasm(fq, skb, prev, dev);
skb               290 net/ipv6/netfilter/nf_conntrack_reasm.c 		skb->_skb_refdst = orefdst;
skb               298 net/ipv6/netfilter/nf_conntrack_reasm.c 	skb_dst_drop(skb);
skb               304 net/ipv6/netfilter/nf_conntrack_reasm.c 	skb_dst_drop(skb);
skb               315 net/ipv6/netfilter/nf_conntrack_reasm.c static int nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *skb,
skb               328 net/ipv6/netfilter/nf_conntrack_reasm.c 	reasm_data = inet_frag_reasm_prepare(&fq->q, skb, prev_tail);
skb               332 net/ipv6/netfilter/nf_conntrack_reasm.c 	payload_len = ((skb->data - skb_network_header(skb)) -
skb               343 net/ipv6/netfilter/nf_conntrack_reasm.c 	skb_network_header(skb)[fq->nhoffset] = skb_transport_header(skb)[0];
skb               344 net/ipv6/netfilter/nf_conntrack_reasm.c 	memmove(skb->head + sizeof(struct frag_hdr), skb->head,
skb               345 net/ipv6/netfilter/nf_conntrack_reasm.c 		(skb->data - skb->head) - sizeof(struct frag_hdr));
skb               346 net/ipv6/netfilter/nf_conntrack_reasm.c 	skb->mac_header += sizeof(struct frag_hdr);
skb               347 net/ipv6/netfilter/nf_conntrack_reasm.c 	skb->network_header += sizeof(struct frag_hdr);
skb               349 net/ipv6/netfilter/nf_conntrack_reasm.c 	skb_reset_transport_header(skb);
skb               351 net/ipv6/netfilter/nf_conntrack_reasm.c 	inet_frag_reasm_finish(&fq->q, skb, reasm_data, false);
skb               353 net/ipv6/netfilter/nf_conntrack_reasm.c 	skb->ignore_df = 1;
skb               354 net/ipv6/netfilter/nf_conntrack_reasm.c 	skb->dev = dev;
skb               355 net/ipv6/netfilter/nf_conntrack_reasm.c 	ipv6_hdr(skb)->payload_len = htons(payload_len);
skb               356 net/ipv6/netfilter/nf_conntrack_reasm.c 	ipv6_change_dsfield(ipv6_hdr(skb), 0xff, ecn);
skb               357 net/ipv6/netfilter/nf_conntrack_reasm.c 	IP6CB(skb)->frag_max_size = sizeof(struct ipv6hdr) + fq->q.max_size;
skb               360 net/ipv6/netfilter/nf_conntrack_reasm.c 	if (skb->ip_summed == CHECKSUM_COMPLETE)
skb               361 net/ipv6/netfilter/nf_conntrack_reasm.c 		skb->csum = csum_partial(skb_network_header(skb),
skb               362 net/ipv6/netfilter/nf_conntrack_reasm.c 					 skb_network_header_len(skb),
skb               363 net/ipv6/netfilter/nf_conntrack_reasm.c 					 skb->csum);
skb               390 net/ipv6/netfilter/nf_conntrack_reasm.c find_prev_fhdr(struct sk_buff *skb, u8 *prevhdrp, int *prevhoff, int *fhoff)
skb               392 net/ipv6/netfilter/nf_conntrack_reasm.c 	u8 nexthdr = ipv6_hdr(skb)->nexthdr;
skb               393 net/ipv6/netfilter/nf_conntrack_reasm.c 	const int netoff = skb_network_offset(skb);
skb               396 net/ipv6/netfilter/nf_conntrack_reasm.c 	int len = skb->len - start;
skb               414 net/ipv6/netfilter/nf_conntrack_reasm.c 		if (skb_copy_bits(skb, start, &hdr, sizeof(hdr)))
skb               439 net/ipv6/netfilter/nf_conntrack_reasm.c int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
skb               441 net/ipv6/netfilter/nf_conntrack_reasm.c 	u16 savethdr = skb->transport_header;
skb               449 net/ipv6/netfilter/nf_conntrack_reasm.c 	if (ipv6_hdr(skb)->payload_len == 0) {
skb               454 net/ipv6/netfilter/nf_conntrack_reasm.c 	if (find_prev_fhdr(skb, &prevhdr, &nhoff, &fhoff) < 0)
skb               457 net/ipv6/netfilter/nf_conntrack_reasm.c 	if (!pskb_may_pull(skb, fhoff + sizeof(*fhdr)))
skb               460 net/ipv6/netfilter/nf_conntrack_reasm.c 	skb_set_transport_header(skb, fhoff);
skb               461 net/ipv6/netfilter/nf_conntrack_reasm.c 	hdr = ipv6_hdr(skb);
skb               462 net/ipv6/netfilter/nf_conntrack_reasm.c 	fhdr = (struct frag_hdr *)skb_transport_header(skb);
skb               464 net/ipv6/netfilter/nf_conntrack_reasm.c 	skb_orphan(skb);
skb               466 net/ipv6/netfilter/nf_conntrack_reasm.c 		     skb->dev ? skb->dev->ifindex : 0);
skb               474 net/ipv6/netfilter/nf_conntrack_reasm.c 	ret = nf_ct_frag6_queue(fq, skb, fhdr, nhoff);
skb               476 net/ipv6/netfilter/nf_conntrack_reasm.c 		skb->transport_header = savethdr;
skb                31 net/ipv6/netfilter/nf_defrag_ipv6_hooks.c 						struct sk_buff *skb)
skb                35 net/ipv6/netfilter/nf_defrag_ipv6_hooks.c 	if (skb_nfct(skb)) {
skb                37 net/ipv6/netfilter/nf_defrag_ipv6_hooks.c 		const struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
skb                42 net/ipv6/netfilter/nf_defrag_ipv6_hooks.c 	if (nf_bridge_in_prerouting(skb))
skb                52 net/ipv6/netfilter/nf_defrag_ipv6_hooks.c 				struct sk_buff *skb,
skb                59 net/ipv6/netfilter/nf_defrag_ipv6_hooks.c 	if (skb_nfct(skb) && !nf_ct_is_template((struct nf_conn *)skb_nfct(skb)))
skb                62 net/ipv6/netfilter/nf_defrag_ipv6_hooks.c 	if (skb->_nfct == IP_CT_UNTRACKED)
skb                66 net/ipv6/netfilter/nf_defrag_ipv6_hooks.c 	err = nf_ct_frag6_gather(state->net, skb,
skb                67 net/ipv6/netfilter/nf_defrag_ipv6_hooks.c 				 nf_ct6_defrag_user(state->hook, skb));
skb                19 net/ipv6/netfilter/nf_dup_ipv6.c static bool nf_dup_ipv6_route(struct net *net, struct sk_buff *skb,
skb                22 net/ipv6/netfilter/nf_dup_ipv6.c 	const struct ipv6hdr *iph = ipv6_hdr(skb);
skb                39 net/ipv6/netfilter/nf_dup_ipv6.c 	skb_dst_drop(skb);
skb                40 net/ipv6/netfilter/nf_dup_ipv6.c 	skb_dst_set(skb, dst);
skb                41 net/ipv6/netfilter/nf_dup_ipv6.c 	skb->dev      = dst->dev;
skb                42 net/ipv6/netfilter/nf_dup_ipv6.c 	skb->protocol = htons(ETH_P_IPV6);
skb                47 net/ipv6/netfilter/nf_dup_ipv6.c void nf_dup_ipv6(struct net *net, struct sk_buff *skb, unsigned int hooknum,
skb                52 net/ipv6/netfilter/nf_dup_ipv6.c 	skb = pskb_copy(skb, GFP_ATOMIC);
skb                53 net/ipv6/netfilter/nf_dup_ipv6.c 	if (skb == NULL)
skb                57 net/ipv6/netfilter/nf_dup_ipv6.c 	nf_reset_ct(skb);
skb                58 net/ipv6/netfilter/nf_dup_ipv6.c 	nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
skb                62 net/ipv6/netfilter/nf_dup_ipv6.c 		struct ipv6hdr *iph = ipv6_hdr(skb);
skb                65 net/ipv6/netfilter/nf_dup_ipv6.c 	if (nf_dup_ipv6_route(net, skb, gw, oif)) {
skb                67 net/ipv6/netfilter/nf_dup_ipv6.c 		ip6_local_out(net, skb->sk, skb);
skb                70 net/ipv6/netfilter/nf_dup_ipv6.c 		kfree_skb(skb);
skb                38 net/ipv6/netfilter/nf_log_ipv6.c 			     const struct sk_buff *skb, unsigned int ip6hoff,
skb                54 net/ipv6/netfilter/nf_log_ipv6.c 	ih = skb_header_pointer(skb, ip6hoff, sizeof(_ip6h), &_ip6h);
skb                77 net/ipv6/netfilter/nf_log_ipv6.c 		hp = skb_header_pointer(skb, ptr, sizeof(_hdr), &_hdr);
skb                93 net/ipv6/netfilter/nf_log_ipv6.c 			fh = skb_header_pointer(skb, ptr, sizeof(_fhdr),
skb               141 net/ipv6/netfilter/nf_log_ipv6.c 				ah = skb_header_pointer(skb, ptr, sizeof(_ahdr),
skb               149 net/ipv6/netfilter/nf_log_ipv6.c 						       skb->len - ptr);
skb               176 net/ipv6/netfilter/nf_log_ipv6.c 				eh = skb_header_pointer(skb, ptr, sizeof(_esph),
skb               180 net/ipv6/netfilter/nf_log_ipv6.c 						       skb->len - ptr);
skb               203 net/ipv6/netfilter/nf_log_ipv6.c 		if (nf_log_dump_tcp_header(m, skb, currenthdr, fragment,
skb               209 net/ipv6/netfilter/nf_log_ipv6.c 		if (nf_log_dump_udp_header(m, skb, currenthdr, fragment, ptr))
skb               223 net/ipv6/netfilter/nf_log_ipv6.c 		ic = skb_header_pointer(skb, ptr, sizeof(_icmp6h), &_icmp6h);
skb               226 net/ipv6/netfilter/nf_log_ipv6.c 				       skb->len - ptr);
skb               258 net/ipv6/netfilter/nf_log_ipv6.c 				dump_ipv6_packet(net, m, info, skb,
skb               278 net/ipv6/netfilter/nf_log_ipv6.c 		nf_log_dump_sk_uid_gid(net, m, skb->sk);
skb               281 net/ipv6/netfilter/nf_log_ipv6.c 	if (recurse && skb->mark)
skb               282 net/ipv6/netfilter/nf_log_ipv6.c 		nf_log_buf_add(m, "MARK=0x%x ", skb->mark);
skb               287 net/ipv6/netfilter/nf_log_ipv6.c 				 const struct sk_buff *skb)
skb               289 net/ipv6/netfilter/nf_log_ipv6.c 	struct net_device *dev = skb->dev;
skb               301 net/ipv6/netfilter/nf_log_ipv6.c 		       eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
skb               302 net/ipv6/netfilter/nf_log_ipv6.c 		       ntohs(eth_hdr(skb)->h_proto));
skb               311 net/ipv6/netfilter/nf_log_ipv6.c 	    skb->mac_header != skb->network_header) {
skb               312 net/ipv6/netfilter/nf_log_ipv6.c 		const unsigned char *p = skb_mac_header(skb);
skb               319 net/ipv6/netfilter/nf_log_ipv6.c 			if (p < skb->head)
skb               332 net/ipv6/netfilter/nf_log_ipv6.c 				(struct iphdr *)skb_mac_header(skb);
skb               342 net/ipv6/netfilter/nf_log_ipv6.c 			      unsigned int hooknum, const struct sk_buff *skb,
skb               359 net/ipv6/netfilter/nf_log_ipv6.c 	nf_log_dump_packet_common(m, pf, hooknum, skb, in, out,
skb               363 net/ipv6/netfilter/nf_log_ipv6.c 		dump_ipv6_mac_header(m, loginfo, skb);
skb               365 net/ipv6/netfilter/nf_log_ipv6.c 	dump_ipv6_packet(net, m, loginfo, skb, skb_network_offset(skb), 1);
skb               217 net/ipv6/netfilter/nf_reject_ipv6.c static bool reject6_csum_ok(struct sk_buff *skb, int hook)
skb               219 net/ipv6/netfilter/nf_reject_ipv6.c 	const struct ipv6hdr *ip6h = ipv6_hdr(skb);
skb               224 net/ipv6/netfilter/nf_reject_ipv6.c 	if (skb_csum_unnecessary(skb))
skb               228 net/ipv6/netfilter/nf_reject_ipv6.c 	thoff = ipv6_skip_exthdr(skb, ((u8 *)(ip6h + 1) - skb->data), &proto, &fo);
skb               230 net/ipv6/netfilter/nf_reject_ipv6.c 	if (thoff < 0 || thoff >= skb->len || (fo & htons(~0x7)) != 0)
skb               236 net/ipv6/netfilter/nf_reject_ipv6.c 	return nf_ip6_checksum(skb, hook, thoff, proto) == 0;
skb                21 net/ipv6/netfilter/nf_socket_ipv6.c extract_icmp6_fields(const struct sk_buff *skb,
skb                37 net/ipv6/netfilter/nf_socket_ipv6.c 	icmph = skb_header_pointer(skb, outside_hdrlen,
skb                45 net/ipv6/netfilter/nf_socket_ipv6.c 	inside_iph = skb_header_pointer(skb, outside_hdrlen + sizeof(_icmph),
skb                51 net/ipv6/netfilter/nf_socket_ipv6.c 	inside_hdrlen = ipv6_skip_exthdr(skb, outside_hdrlen + sizeof(_icmph) +
skb                61 net/ipv6/netfilter/nf_socket_ipv6.c 	ports = skb_header_pointer(skb, inside_hdrlen,
skb                78 net/ipv6/netfilter/nf_socket_ipv6.c nf_socket_get_sock_v6(struct net *net, struct sk_buff *skb, int doff,
skb                86 net/ipv6/netfilter/nf_socket_ipv6.c 		return inet6_lookup(net, &tcp_hashinfo, skb, doff,
skb                97 net/ipv6/netfilter/nf_socket_ipv6.c struct sock *nf_sk_lookup_slow_v6(struct net *net, const struct sk_buff *skb,
skb               102 net/ipv6/netfilter/nf_socket_ipv6.c 	struct ipv6hdr *iph = ipv6_hdr(skb);
skb               107 net/ipv6/netfilter/nf_socket_ipv6.c 	tproto = ipv6_find_hdr(skb, &thoff, -1, NULL, NULL);
skb               117 net/ipv6/netfilter/nf_socket_ipv6.c 		hp = skb_header_pointer(skb, thoff, tproto == IPPROTO_UDP ?
skb               126 net/ipv6/netfilter/nf_socket_ipv6.c 		data_skb = (struct sk_buff *)skb;
skb               134 net/ipv6/netfilter/nf_socket_ipv6.c 		if (extract_icmp6_fields(skb, thoff, &tproto, &saddr, &daddr,
skb                10 net/ipv6/netfilter/nf_tproxy_ipv6.c nf_tproxy_laddr6(struct sk_buff *skb, const struct in6_addr *user_laddr,
skb                21 net/ipv6/netfilter/nf_tproxy_ipv6.c 	indev = __in6_dev_get(skb->dev);
skb                39 net/ipv6/netfilter/nf_tproxy_ipv6.c nf_tproxy_handle_time_wait6(struct sk_buff *skb, int tproto, int thoff,
skb                45 net/ipv6/netfilter/nf_tproxy_ipv6.c 	const struct ipv6hdr *iph = ipv6_hdr(skb);
skb                48 net/ipv6/netfilter/nf_tproxy_ipv6.c 	hp = skb_header_pointer(skb, thoff, sizeof(_hdr), &_hdr);
skb                59 net/ipv6/netfilter/nf_tproxy_ipv6.c 		sk2 = nf_tproxy_get_sock_v6(net, skb, thoff, tproto,
skb                61 net/ipv6/netfilter/nf_tproxy_ipv6.c 					    nf_tproxy_laddr6(skb, laddr, &iph->daddr),
skb                64 net/ipv6/netfilter/nf_tproxy_ipv6.c 					    skb->dev, NF_TPROXY_LOOKUP_LISTENER);
skb                76 net/ipv6/netfilter/nf_tproxy_ipv6.c nf_tproxy_get_sock_v6(struct net *net, struct sk_buff *skb, int thoff,
skb                89 net/ipv6/netfilter/nf_tproxy_ipv6.c 		hp = skb_header_pointer(skb, thoff,
skb                96 net/ipv6/netfilter/nf_tproxy_ipv6.c 			sk = inet6_lookup_listener(net, &tcp_hashinfo, skb,
skb                28 net/ipv6/netfilter/nft_dup_ipv6.c 	nf_dup_ipv6(nft_net(pkt), pkt->skb, nft_hook(pkt), gw, oif);
skb                53 net/ipv6/netfilter/nft_dup_ipv6.c static int nft_dup_ipv6_dump(struct sk_buff *skb, const struct nft_expr *expr)
skb                57 net/ipv6/netfilter/nft_dup_ipv6.c 	if (nft_dump_register(skb, NFTA_DUP_SREG_ADDR, priv->sreg_addr))
skb                60 net/ipv6/netfilter/nft_dup_ipv6.c 	    nft_dump_register(skb, NFTA_DUP_SREG_DEV, priv->sreg_dev))
skb                39 net/ipv6/netfilter/nft_fib_ipv6.c 		fl6->flowi6_oif = get_ifindex(dev ? dev : pkt->skb->dev);
skb                46 net/ipv6/netfilter/nft_fib_ipv6.c 		fl6->flowi6_mark = pkt->skb->mark;
skb               124 net/ipv6/netfilter/nft_fib_ipv6.c 	int noff = skb_network_offset(pkt->skb);
skb               128 net/ipv6/netfilter/nft_fib_ipv6.c 	iph = skb_header_pointer(pkt->skb, noff, sizeof(_iph), &_iph);
skb               142 net/ipv6/netfilter/nft_fib_ipv6.c 	int noff = skb_network_offset(pkt->skb);
skb               158 net/ipv6/netfilter/nft_fib_ipv6.c 	iph = skb_header_pointer(pkt->skb, noff, sizeof(_iph), &_iph);
skb               167 net/ipv6/netfilter/nft_fib_ipv6.c 	    nft_fib_is_loopback(pkt->skb, nft_in(pkt))) {
skb               173 net/ipv6/netfilter/nft_fib_ipv6.c 	rt = (void *)ip6_route_lookup(nft_net(pkt), &fl6, pkt->skb,
skb                27 net/ipv6/netfilter/nft_reject_ipv6.c 		nf_send_unreach6(nft_net(pkt), pkt->skb, priv->icmp_code,
skb                31 net/ipv6/netfilter/nft_reject_ipv6.c 		nf_send_reset6(nft_net(pkt), pkt->skb, nft_hook(pkt));
skb                53 net/ipv6/output_core.c __be32 ipv6_proxy_select_ident(struct net *net, struct sk_buff *skb)
skb                59 net/ipv6/output_core.c 	addrs = skb_header_pointer(skb,
skb                60 net/ipv6/output_core.c 				   skb_network_offset(skb) +
skb                82 net/ipv6/output_core.c int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
skb                85 net/ipv6/output_core.c 	unsigned int packet_len = skb_tail_pointer(skb) -
skb                86 net/ipv6/output_core.c 		skb_network_header(skb);
skb                88 net/ipv6/output_core.c 	*nexthdr = &ipv6_hdr(skb)->nexthdr;
skb               102 net/ipv6/output_core.c 			if (ipv6_find_tlv(skb, offset, IPV6_TLV_HAO) >= 0)
skb               115 net/ipv6/output_core.c 		exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) +
skb               148 net/ipv6/output_core.c int __ip6_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
skb               152 net/ipv6/output_core.c 	len = skb->len - sizeof(struct ipv6hdr);
skb               155 net/ipv6/output_core.c 	ipv6_hdr(skb)->payload_len = htons(len);
skb               156 net/ipv6/output_core.c 	IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr);
skb               161 net/ipv6/output_core.c 	skb = l3mdev_ip6_out(sk, skb);
skb               162 net/ipv6/output_core.c 	if (unlikely(!skb))
skb               165 net/ipv6/output_core.c 	skb->protocol = htons(ETH_P_IPV6);
skb               168 net/ipv6/output_core.c 		       net, sk, skb, NULL, skb_dst(skb)->dev,
skb               173 net/ipv6/output_core.c int ip6_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
skb               177 net/ipv6/output_core.c 	err = __ip6_local_out(net, sk, skb);
skb               179 net/ipv6/output_core.c 		err = dst_output(net, sk, skb);
skb                32 net/ipv6/ping.c 				       struct sk_buff *skb)
skb                39 net/ipv6/ping.c static void dummy_ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
skb               109 net/ipv6/raw.c static int icmpv6_filter(const struct sock *sk, const struct sk_buff *skb)
skb               117 net/ipv6/raw.c 	hdr = skb_header_pointer(skb, skb_transport_offset(skb),
skb               129 net/ipv6/raw.c typedef int mh_filter_t(struct sock *sock, struct sk_buff *skb);
skb               157 net/ipv6/raw.c static bool ipv6_raw_deliver(struct sk_buff *skb, int nexthdr)
skb               166 net/ipv6/raw.c 	saddr = &ipv6_hdr(skb)->saddr;
skb               177 net/ipv6/raw.c 	net = dev_net(skb->dev);
skb               179 net/ipv6/raw.c 			     inet6_iif(skb), inet6_sdif(skb));
skb               187 net/ipv6/raw.c 			filtered = icmpv6_filter(sk, skb);
skb               202 net/ipv6/raw.c 			filtered = filter ? (*filter)(sk, skb) : 0;
skb               214 net/ipv6/raw.c 			struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC);
skb               223 net/ipv6/raw.c 				     inet6_iif(skb), inet6_sdif(skb));
skb               230 net/ipv6/raw.c bool raw6_local_deliver(struct sk_buff *skb, int nexthdr)
skb               235 net/ipv6/raw.c 	if (raw_sk && !ipv6_raw_deliver(skb, nexthdr))
skb               322 net/ipv6/raw.c static void rawv6_err(struct sock *sk, struct sk_buff *skb,
skb               341 net/ipv6/raw.c 		ip6_sk_update_pmtu(skb, sk, info);
skb               345 net/ipv6/raw.c 		ip6_sk_redirect(skb, sk);
skb               349 net/ipv6/raw.c 		u8 *payload = skb->data;
skb               352 net/ipv6/raw.c 		ipv6_icmp_error(sk, skb, err, 0, ntohl(info), payload);
skb               361 net/ipv6/raw.c void raw6_icmp_error(struct sk_buff *skb, int nexthdr,
skb               375 net/ipv6/raw.c 		const struct ipv6hdr *ip6h = (const struct ipv6hdr *)skb->data;
skb               378 net/ipv6/raw.c 		net = dev_net(skb->dev);
skb               381 net/ipv6/raw.c 					     inet6_iif(skb), inet6_iif(skb)))) {
skb               382 net/ipv6/raw.c 			rawv6_err(sk, skb, NULL, type, code,
skb               390 net/ipv6/raw.c static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
skb               393 net/ipv6/raw.c 	    skb_checksum_complete(skb)) {
skb               395 net/ipv6/raw.c 		kfree_skb(skb);
skb               400 net/ipv6/raw.c 	skb_dst_drop(skb);
skb               401 net/ipv6/raw.c 	if (sock_queue_rcv_skb(sk, skb) < 0) {
skb               402 net/ipv6/raw.c 		kfree_skb(skb);
skb               416 net/ipv6/raw.c int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
skb               421 net/ipv6/raw.c 	if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
skb               423 net/ipv6/raw.c 		kfree_skb(skb);
skb               428 net/ipv6/raw.c 		skb->ip_summed = CHECKSUM_UNNECESSARY;
skb               430 net/ipv6/raw.c 	if (skb->ip_summed == CHECKSUM_COMPLETE) {
skb               431 net/ipv6/raw.c 		skb_postpull_rcsum(skb, skb_network_header(skb),
skb               432 net/ipv6/raw.c 				   skb_network_header_len(skb));
skb               433 net/ipv6/raw.c 		if (!csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
skb               434 net/ipv6/raw.c 				     &ipv6_hdr(skb)->daddr,
skb               435 net/ipv6/raw.c 				     skb->len, inet->inet_num, skb->csum))
skb               436 net/ipv6/raw.c 			skb->ip_summed = CHECKSUM_UNNECESSARY;
skb               438 net/ipv6/raw.c 	if (!skb_csum_unnecessary(skb))
skb               439 net/ipv6/raw.c 		skb->csum = ~csum_unfold(csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
skb               440 net/ipv6/raw.c 							 &ipv6_hdr(skb)->daddr,
skb               441 net/ipv6/raw.c 							 skb->len,
skb               445 net/ipv6/raw.c 		if (skb_checksum_complete(skb)) {
skb               447 net/ipv6/raw.c 			kfree_skb(skb);
skb               452 net/ipv6/raw.c 	rawv6_rcv_skb(sk, skb);
skb               467 net/ipv6/raw.c 	struct sk_buff *skb;
skb               480 net/ipv6/raw.c 	skb = skb_recv_datagram(sk, flags, noblock, &err);
skb               481 net/ipv6/raw.c 	if (!skb)
skb               484 net/ipv6/raw.c 	copied = skb->len;
skb               490 net/ipv6/raw.c 	if (skb_csum_unnecessary(skb)) {
skb               491 net/ipv6/raw.c 		err = skb_copy_datagram_msg(skb, 0, msg, copied);
skb               493 net/ipv6/raw.c 		if (__skb_checksum_complete(skb))
skb               495 net/ipv6/raw.c 		err = skb_copy_datagram_msg(skb, 0, msg, copied);
skb               497 net/ipv6/raw.c 		err = skb_copy_and_csum_datagram_msg(skb, 0, msg);
skb               508 net/ipv6/raw.c 		sin6->sin6_addr = ipv6_hdr(skb)->saddr;
skb               511 net/ipv6/raw.c 							  inet6_iif(skb));
skb               515 net/ipv6/raw.c 	sock_recv_ts_and_drops(msg, sk, skb);
skb               518 net/ipv6/raw.c 		ip6_datagram_recv_ctl(sk, msg, skb);
skb               522 net/ipv6/raw.c 		err = skb->len;
skb               525 net/ipv6/raw.c 	skb_free_datagram(sk, skb);
skb               530 net/ipv6/raw.c 	skb_kill_datagram(sk, skb, flags);
skb               542 net/ipv6/raw.c 	struct sk_buff *skb;
skb               553 net/ipv6/raw.c 	skb = skb_peek(&sk->sk_write_queue);
skb               554 net/ipv6/raw.c 	if (!skb)
skb               570 net/ipv6/raw.c 		tmp_csum = skb->csum;
skb               575 net/ipv6/raw.c 		skb_queue_walk(&sk->sk_write_queue, skb) {
skb               576 net/ipv6/raw.c 			tmp_csum = csum_add(tmp_csum, skb->csum);
skb               581 net/ipv6/raw.c 			len = skb->len - skb_transport_offset(skb);
skb               587 net/ipv6/raw.c 			csum_skb = skb;
skb               590 net/ipv6/raw.c 		skb = csum_skb;
skb               593 net/ipv6/raw.c 	offset += skb_transport_offset(skb);
skb               594 net/ipv6/raw.c 	err = skb_copy_bits(skb, offset, &csum, 2);
skb               610 net/ipv6/raw.c 	BUG_ON(skb_store_bits(skb, offset, &csum, 2));
skb               625 net/ipv6/raw.c 	struct sk_buff *skb;
skb               640 net/ipv6/raw.c 	skb = sock_alloc_send_skb(sk,
skb               643 net/ipv6/raw.c 	if (!skb)
skb               645 net/ipv6/raw.c 	skb_reserve(skb, hlen);
skb               647 net/ipv6/raw.c 	skb->protocol = htons(ETH_P_IPV6);
skb               648 net/ipv6/raw.c 	skb->priority = sk->sk_priority;
skb               649 net/ipv6/raw.c 	skb->mark = sockc->mark;
skb               650 net/ipv6/raw.c 	skb->tstamp = sockc->transmit_time;
skb               652 net/ipv6/raw.c 	skb_put(skb, length);
skb               653 net/ipv6/raw.c 	skb_reset_network_header(skb);
skb               654 net/ipv6/raw.c 	iph = ipv6_hdr(skb);
skb               656 net/ipv6/raw.c 	skb->ip_summed = CHECKSUM_NONE;
skb               658 net/ipv6/raw.c 	skb_setup_tx_timestamp(skb, sockc->tsflags);
skb               661 net/ipv6/raw.c 		skb_set_dst_pending_confirm(skb, 1);
skb               663 net/ipv6/raw.c 	skb->transport_header = skb->network_header;
skb               667 net/ipv6/raw.c 		kfree_skb(skb);
skb               671 net/ipv6/raw.c 	skb_dst_set(skb, &rt->dst);
skb               677 net/ipv6/raw.c 	skb = l3mdev_ip6_out(sk, skb);
skb               678 net/ipv6/raw.c 	if (unlikely(!skb))
skb               686 net/ipv6/raw.c 	IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len);
skb               687 net/ipv6/raw.c 	err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, sk, skb,
skb               736 net/ipv6/raw.c 		       struct sk_buff *skb)
skb               743 net/ipv6/raw.c 		if (skb->ip_summed == CHECKSUM_PARTIAL)
skb               746 net/ipv6/raw.c 			skb->csum = csum_block_add(
skb               747 net/ipv6/raw.c 				skb->csum,
skb               763 net/ipv6/raw.c 	return ip_generic_getfrag(rfv->msg, to, offset, len, odd, skb);
skb              1205 net/ipv6/raw.c 		struct sk_buff *skb;
skb              1209 net/ipv6/raw.c 		skb = skb_peek(&sk->sk_receive_queue);
skb              1210 net/ipv6/raw.c 		if (skb)
skb              1211 net/ipv6/raw.c 			amount = skb->len;
skb                68 net/ipv6/reassembly.c static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *skb,
skb               104 net/ipv6/reassembly.c static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
skb               108 net/ipv6/reassembly.c 	struct net *net = dev_net(skb_dst(skb)->dev);
skb               120 net/ipv6/reassembly.c 	end = offset + (ntohs(ipv6_hdr(skb)->payload_len) -
skb               121 net/ipv6/reassembly.c 			((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1)));
skb               124 net/ipv6/reassembly.c 		*prob_offset = (u8 *)&fhdr->frag_off - skb_network_header(skb);
skb               131 net/ipv6/reassembly.c 	ecn = ip6_frag_ecn(ipv6_hdr(skb));
skb               133 net/ipv6/reassembly.c 	if (skb->ip_summed == CHECKSUM_COMPLETE) {
skb               134 net/ipv6/reassembly.c 		const unsigned char *nh = skb_network_header(skb);
skb               135 net/ipv6/reassembly.c 		skb->csum = csum_sub(skb->csum,
skb               174 net/ipv6/reassembly.c 	if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data))
skb               177 net/ipv6/reassembly.c 	err = pskb_trim_rcsum(skb, end - offset);
skb               182 net/ipv6/reassembly.c 	dev = skb->dev;
skb               187 net/ipv6/reassembly.c 	err = inet_frag_queue_insert(&fq->q, skb, offset, end);
skb               194 net/ipv6/reassembly.c 	fq->q.stamp = skb->tstamp;
skb               195 net/ipv6/reassembly.c 	fq->q.meat += skb->len;
skb               197 net/ipv6/reassembly.c 	add_frag_mem_limit(fq->q.fqdir, skb->truesize);
skb               199 net/ipv6/reassembly.c 	fragsize = -skb_network_offset(skb) + skb->len;
skb               213 net/ipv6/reassembly.c 		unsigned long orefdst = skb->_skb_refdst;
skb               215 net/ipv6/reassembly.c 		skb->_skb_refdst = 0UL;
skb               216 net/ipv6/reassembly.c 		err = ip6_frag_reasm(fq, skb, prev_tail, dev);
skb               217 net/ipv6/reassembly.c 		skb->_skb_refdst = orefdst;
skb               221 net/ipv6/reassembly.c 	skb_dst_drop(skb);
skb               226 net/ipv6/reassembly.c 		kfree_skb(skb);
skb               230 net/ipv6/reassembly.c 	__IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
skb               234 net/ipv6/reassembly.c 	__IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
skb               237 net/ipv6/reassembly.c 	kfree_skb(skb);
skb               248 net/ipv6/reassembly.c static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *skb,
skb               263 net/ipv6/reassembly.c 	reasm_data = inet_frag_reasm_prepare(&fq->q, skb, prev_tail);
skb               267 net/ipv6/reassembly.c 	payload_len = ((skb->data - skb_network_header(skb)) -
skb               276 net/ipv6/reassembly.c 	skb_network_header(skb)[nhoff] = skb_transport_header(skb)[0];
skb               277 net/ipv6/reassembly.c 	memmove(skb->head + sizeof(struct frag_hdr), skb->head,
skb               278 net/ipv6/reassembly.c 		(skb->data - skb->head) - sizeof(struct frag_hdr));
skb               279 net/ipv6/reassembly.c 	if (skb_mac_header_was_set(skb))
skb               280 net/ipv6/reassembly.c 		skb->mac_header += sizeof(struct frag_hdr);
skb               281 net/ipv6/reassembly.c 	skb->network_header += sizeof(struct frag_hdr);
skb               283 net/ipv6/reassembly.c 	skb_reset_transport_header(skb);
skb               285 net/ipv6/reassembly.c 	inet_frag_reasm_finish(&fq->q, skb, reasm_data, true);
skb               287 net/ipv6/reassembly.c 	skb->dev = dev;
skb               288 net/ipv6/reassembly.c 	ipv6_hdr(skb)->payload_len = htons(payload_len);
skb               289 net/ipv6/reassembly.c 	ipv6_change_dsfield(ipv6_hdr(skb), 0xff, ecn);
skb               290 net/ipv6/reassembly.c 	IP6CB(skb)->nhoff = nhoff;
skb               291 net/ipv6/reassembly.c 	IP6CB(skb)->flags |= IP6SKB_FRAGMENTED;
skb               292 net/ipv6/reassembly.c 	IP6CB(skb)->frag_max_size = fq->q.max_size;
skb               295 net/ipv6/reassembly.c 	skb_postpush_rcsum(skb, skb_network_header(skb),
skb               296 net/ipv6/reassembly.c 			   skb_network_header_len(skb));
skb               299 net/ipv6/reassembly.c 	__IP6_INC_STATS(net, __in6_dev_stats_get(dev, skb), IPSTATS_MIB_REASMOKS);
skb               313 net/ipv6/reassembly.c 	__IP6_INC_STATS(net, __in6_dev_stats_get(dev, skb), IPSTATS_MIB_REASMFAILS);
skb               319 net/ipv6/reassembly.c static int ipv6_frag_rcv(struct sk_buff *skb)
skb               323 net/ipv6/reassembly.c 	const struct ipv6hdr *hdr = ipv6_hdr(skb);
skb               324 net/ipv6/reassembly.c 	struct net *net = dev_net(skb_dst(skb)->dev);
skb               327 net/ipv6/reassembly.c 	if (IP6CB(skb)->flags & IP6SKB_FRAGMENTED)
skb               330 net/ipv6/reassembly.c 	__IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMREQDS);
skb               336 net/ipv6/reassembly.c 	if (!pskb_may_pull(skb, (skb_transport_offset(skb) +
skb               340 net/ipv6/reassembly.c 	hdr = ipv6_hdr(skb);
skb               341 net/ipv6/reassembly.c 	fhdr = (struct frag_hdr *)skb_transport_header(skb);
skb               345 net/ipv6/reassembly.c 		skb->transport_header += sizeof(struct frag_hdr);
skb               347 net/ipv6/reassembly.c 				ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMOKS);
skb               349 net/ipv6/reassembly.c 		IP6CB(skb)->nhoff = (u8 *)fhdr - skb_network_header(skb);
skb               350 net/ipv6/reassembly.c 		IP6CB(skb)->flags |= IP6SKB_FRAGMENTED;
skb               354 net/ipv6/reassembly.c 	iif = skb->dev ? skb->dev->ifindex : 0;
skb               363 net/ipv6/reassembly.c 		ret = ip6_frag_queue(fq, skb, fhdr, IP6CB(skb)->nhoff,
skb               369 net/ipv6/reassembly.c 			__IP6_INC_STATS(net, __in6_dev_get_safely(skb->dev),
skb               372 net/ipv6/reassembly.c 			icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, prob_offset);
skb               377 net/ipv6/reassembly.c 	__IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMFAILS);
skb               378 net/ipv6/reassembly.c 	kfree_skb(skb);
skb               382 net/ipv6/reassembly.c 	__IP6_INC_STATS(net, __in6_dev_get_safely(skb->dev),
skb               384 net/ipv6/reassembly.c 	icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, skb_network_header_len(skb));
skb                92 net/ipv6/route.c static int		ip6_pkt_discard(struct sk_buff *skb);
skb                93 net/ipv6/route.c static int		ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb);
skb                94 net/ipv6/route.c static int		ip6_pkt_prohibit(struct sk_buff *skb);
skb                95 net/ipv6/route.c static int		ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb);
skb                96 net/ipv6/route.c static void		ip6_link_failure(struct sk_buff *skb);
skb                98 net/ipv6/route.c 					   struct sk_buff *skb, u32 mtu,
skb               101 net/ipv6/route.c 					struct sk_buff *skb);
skb               105 net/ipv6/route.c static int rt6_fill_node(struct net *net, struct sk_buff *skb,
skb               190 net/ipv6/route.c 					     struct sk_buff *skb,
skb               195 net/ipv6/route.c 	else if (skb)
skb               196 net/ipv6/route.c 		return &ipv6_hdr(skb)->daddr;
skb               202 net/ipv6/route.c 				   struct sk_buff *skb,
skb               207 net/ipv6/route.c 	daddr = choose_neigh_daddr(gw, skb, daddr);
skb               217 net/ipv6/route.c 					      struct sk_buff *skb,
skb               223 net/ipv6/route.c 				dst->dev, skb, daddr);
skb               268 net/ipv6/route.c 					 struct sk_buff *skb, u32 mtu,
skb               274 net/ipv6/route.c 				      struct sk_buff *skb)
skb               429 net/ipv6/route.c 		      const struct sk_buff *skb, int strict)
skb               442 net/ipv6/route.c 		fl6->mp_hash = rt6_multipath_hash(net, fl6, skb, NULL);
skb              1215 net/ipv6/route.c 					     const struct sk_buff *skb,
skb              1248 net/ipv6/route.c 			 fl6->flowi6_oif != 0, skb, flags);
skb              1269 net/ipv6/route.c 				   const struct sk_buff *skb, int flags)
skb              1271 net/ipv6/route.c 	return fib6_rule_lookup(net, fl6, skb, flags, ip6_pol_route_lookup);
skb              1277 net/ipv6/route.c 			    const struct sk_buff *skb, int strict)
skb              1291 net/ipv6/route.c 	dst = fib6_rule_lookup(net, &fl6, skb, flags, ip6_pol_route_lookup);
skb              2212 net/ipv6/route.c 			       const struct sk_buff *skb, int flags)
skb              2232 net/ipv6/route.c 	fib6_select_path(net, &res, fl6, oif, false, skb, strict);
skb              2283 net/ipv6/route.c 					    const struct sk_buff *skb,
skb              2286 net/ipv6/route.c 	return ip6_pol_route(net, table, fl6->flowi6_iif, fl6, skb, flags);
skb              2292 net/ipv6/route.c 					 const struct sk_buff *skb,
skb              2298 net/ipv6/route.c 	return fib6_rule_lookup(net, fl6, skb, flags, ip6_pol_route_input);
skb              2302 net/ipv6/route.c static void ip6_multipath_l3_keys(const struct sk_buff *skb,
skb              2306 net/ipv6/route.c 	const struct ipv6hdr *outer_iph = ipv6_hdr(skb);
skb              2317 net/ipv6/route.c 	icmph = skb_header_pointer(skb, skb_transport_offset(skb),
skb              2328 net/ipv6/route.c 	inner_iph = skb_header_pointer(skb,
skb              2329 net/ipv6/route.c 				       skb_transport_offset(skb) + sizeof(*icmph),
skb              2352 net/ipv6/route.c 		       const struct sk_buff *skb, struct flow_keys *flkeys)
skb              2361 net/ipv6/route.c 		if (skb) {
skb              2362 net/ipv6/route.c 			ip6_multipath_l3_keys(skb, &hash_keys, flkeys);
skb              2371 net/ipv6/route.c 		if (skb) {
skb              2376 net/ipv6/route.c 			if (skb->l4_hash)
skb              2377 net/ipv6/route.c 				return skb_get_hash_raw(skb) >> 1;
skb              2382 net/ipv6/route.c 				skb_flow_dissect_flow_keys(skb, &keys, flag);
skb              2404 net/ipv6/route.c 		if (skb) {
skb              2408 net/ipv6/route.c 				skb_flow_dissect_flow_keys(skb, &keys, 0);
skb              2426 net/ipv6/route.c 				ip6_multipath_l3_keys(skb, &hash_keys, flkeys);
skb              2444 net/ipv6/route.c void ip6_route_input(struct sk_buff *skb)
skb              2446 net/ipv6/route.c 	const struct ipv6hdr *iph = ipv6_hdr(skb);
skb              2447 net/ipv6/route.c 	struct net *net = dev_net(skb->dev);
skb              2451 net/ipv6/route.c 		.flowi6_iif = skb->dev->ifindex,
skb              2455 net/ipv6/route.c 		.flowi6_mark = skb->mark,
skb              2460 net/ipv6/route.c 	tun_info = skb_tunnel_info(skb);
skb              2464 net/ipv6/route.c 	if (fib6_rules_early_flow_dissect(net, skb, &fl6, &_flkeys))
skb              2468 net/ipv6/route.c 		fl6.mp_hash = rt6_multipath_hash(net, &fl6, skb, flkeys);
skb              2469 net/ipv6/route.c 	skb_dst_drop(skb);
skb              2470 net/ipv6/route.c 	skb_dst_set_noref(skb, ip6_route_input_lookup(net, skb->dev,
skb              2471 net/ipv6/route.c 						      &fl6, skb, flags));
skb              2477 net/ipv6/route.c 					     const struct sk_buff *skb,
skb              2480 net/ipv6/route.c 	return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, skb, flags);
skb              2667 net/ipv6/route.c static void ip6_link_failure(struct sk_buff *skb)
skb              2671 net/ipv6/route.c 	icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
skb              2673 net/ipv6/route.c 	rt = (struct rt6_info *) skb_dst(skb);
skb              2803 net/ipv6/route.c 			       struct sk_buff *skb, u32 mtu,
skb              2806 net/ipv6/route.c 	__ip6_rt_update_pmtu(dst, sk, skb ? ipv6_hdr(skb) : NULL, mtu,
skb              2810 net/ipv6/route.c void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
skb              2813 net/ipv6/route.c 	const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
skb              2817 net/ipv6/route.c 		.flowi6_mark = mark ? mark : IP6_REPLY_MARK(net, skb->mark),
skb              2831 net/ipv6/route.c void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
skb              2836 net/ipv6/route.c 	if (!oif && skb->dev)
skb              2837 net/ipv6/route.c 		oif = l3mdev_master_ifindex(skb->dev);
skb              2839 net/ipv6/route.c 	ip6_update_pmtu(skb, sock_net(sk), mtu, oif, sk->sk_mark, sk->sk_uid);
skb              2924 net/ipv6/route.c 					     const struct sk_buff *skb,
skb              2943 net/ipv6/route.c 		fl6->flowi6_oif = skb->dev->ifindex;
skb              3012 net/ipv6/route.c 					    const struct sk_buff *skb,
skb              3021 net/ipv6/route.c 	return fib6_rule_lookup(net, &rdfl.fl6, skb,
skb              3025 net/ipv6/route.c void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark,
skb              3028 net/ipv6/route.c 	const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
skb              3040 net/ipv6/route.c 	dst = ip6_route_redirect(net, &fl6, skb, &ipv6_hdr(skb)->saddr);
skb              3041 net/ipv6/route.c 	rt6_do_redirect(dst, NULL, skb);
skb              3046 net/ipv6/route.c void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif)
skb              3048 net/ipv6/route.c 	const struct ipv6hdr *iph = ipv6_hdr(skb);
skb              3049 net/ipv6/route.c 	const struct rd_msg *msg = (struct rd_msg *)icmp6_hdr(skb);
skb              3059 net/ipv6/route.c 	dst = ip6_route_redirect(net, &fl6, skb, &iph->saddr);
skb              3060 net/ipv6/route.c 	rt6_do_redirect(dst, NULL, skb);
skb              3064 net/ipv6/route.c void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk)
skb              3066 net/ipv6/route.c 	ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if, sk->sk_mark,
skb              3779 net/ipv6/route.c 	struct sk_buff *skb = NULL;
skb              3792 net/ipv6/route.c 		skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
skb              3793 net/ipv6/route.c 		if (skb) {
skb              3796 net/ipv6/route.c 			if (rt6_fill_node(net, skb, rt, NULL,
skb              3799 net/ipv6/route.c 				kfree_skb(skb);
skb              3800 net/ipv6/route.c 				skb = NULL;
skb              3826 net/ipv6/route.c 	if (skb) {
skb              3827 net/ipv6/route.c 		rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
skb              3976 net/ipv6/route.c static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
skb              3988 net/ipv6/route.c 	optlen = skb_tail_pointer(skb) - skb_transport_header(skb);
skb              3996 net/ipv6/route.c 	msg = (struct rd_msg *)icmp6_hdr(skb);
skb              4012 net/ipv6/route.c 	in6_dev = __in6_dev_get(skb->dev);
skb              4023 net/ipv6/route.c 	if (!ndisc_parse_options(skb->dev, msg->opt, optlen, &ndopts)) {
skb              4031 net/ipv6/route.c 					     skb->dev);
skb              4048 net/ipv6/route.c 	dst_confirm_neigh(&rt->dst, &ipv6_hdr(skb)->saddr);
skb              4050 net/ipv6/route.c 	neigh = __neigh_lookup(&nd_tbl, &msg->target, skb->dev, 1);
skb              4058 net/ipv6/route.c 	ndisc_update(skb->dev, neigh, lladdr, NUD_STALE,
skb              4362 net/ipv6/route.c static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes)
skb              4364 net/ipv6/route.c 	struct dst_entry *dst = skb_dst(skb);
skb              4369 net/ipv6/route.c 	if (netif_is_l3_master(skb->dev) &&
skb              4371 net/ipv6/route.c 		idev = __in6_dev_get_safely(dev_get_by_index_rcu(net, IP6CB(skb)->iif));
skb              4377 net/ipv6/route.c 		type = ipv6_addr_type(&ipv6_hdr(skb)->daddr);
skb              4389 net/ipv6/route.c 	if (netif_is_l3_master(skb->dev))
skb              4390 net/ipv6/route.c 		skb_dst_drop(skb);
skb              4392 net/ipv6/route.c 	icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0);
skb              4393 net/ipv6/route.c 	kfree_skb(skb);
skb              4397 net/ipv6/route.c static int ip6_pkt_discard(struct sk_buff *skb)
skb              4399 net/ipv6/route.c 	return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_INNOROUTES);
skb              4402 net/ipv6/route.c static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
skb              4404 net/ipv6/route.c 	skb->dev = skb_dst(skb)->dev;
skb              4405 net/ipv6/route.c 	return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES);
skb              4408 net/ipv6/route.c static int ip6_pkt_prohibit(struct sk_buff *skb)
skb              4410 net/ipv6/route.c 	return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_INNOROUTES);
skb              4413 net/ipv6/route.c static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb)
skb              4415 net/ipv6/route.c 	skb->dev = skb_dst(skb)->dev;
skb              4416 net/ipv6/route.c 	return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES);
skb              4866 net/ipv6/route.c static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
skb              4891 net/ipv6/route.c 		.fc_nlinfo.portid = NETLINK_CB(skb).portid,
skb              4893 net/ipv6/route.c 		.fc_nlinfo.nl_net = sock_net(skb->sk),
skb              5266 net/ipv6/route.c static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh,
skb              5272 net/ipv6/route.c 	err = rtm_to_fib6_config(skb, nlh, &cfg, extack);
skb              5277 net/ipv6/route.c 	    !nexthop_find_by_id(sock_net(skb->sk), cfg.fc_nh_id)) {
skb              5290 net/ipv6/route.c static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh,
skb              5296 net/ipv6/route.c 	err = rtm_to_fib6_config(skb, nlh, &cfg, extack);
skb              5367 net/ipv6/route.c static int rt6_fill_node_nexthop(struct sk_buff *skb, struct nexthop *nh,
skb              5373 net/ipv6/route.c 		mp = nla_nest_start_noflag(skb, RTA_MULTIPATH);
skb              5377 net/ipv6/route.c 		if (nexthop_mpath_fill_node(skb, nh, AF_INET6))
skb              5380 net/ipv6/route.c 		nla_nest_end(skb, mp);
skb              5385 net/ipv6/route.c 		if (fib_nexthop_info(skb, &fib6_nh->nh_common, AF_INET6,
skb              5396 net/ipv6/route.c static int rt6_fill_node(struct net *net, struct sk_buff *skb,
skb              5410 net/ipv6/route.c 	nlh = nlmsg_put(skb, portid, seq, type, sizeof(*rtm), flags);
skb              5434 net/ipv6/route.c 	if (nla_put_u32(skb, RTA_TABLE, table))
skb              5446 net/ipv6/route.c 		if (nla_put_in6_addr(skb, RTA_DST, dest))
skb              5450 net/ipv6/route.c 		if (nla_put_in6_addr(skb, RTA_DST, &rt6_dst->addr))
skb              5454 net/ipv6/route.c 		if (nla_put_in6_addr(skb, RTA_SRC, src))
skb              5458 net/ipv6/route.c 		   nla_put_in6_addr(skb, RTA_SRC, &rt6_src->addr))
skb              5464 net/ipv6/route.c 			int err = ip6mr_get_route(net, skb, rtm, portid);
skb              5472 net/ipv6/route.c 			if (nla_put_u32(skb, RTA_IIF, iif))
skb              5477 net/ipv6/route.c 		    nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
skb              5484 net/ipv6/route.c 		if (nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
skb              5489 net/ipv6/route.c 	if (rtnetlink_put_metrics(skb, pmetrics) < 0)
skb              5492 net/ipv6/route.c 	if (nla_put_u32(skb, RTA_PRIORITY, rt->fib6_metric))
skb              5500 net/ipv6/route.c 		    nla_put_in6_addr(skb, RTA_GATEWAY, &rt6->rt6i_gateway))
skb              5503 net/ipv6/route.c 		if (dst->dev && nla_put_u32(skb, RTA_OIF, dst->dev->ifindex))
skb              5509 net/ipv6/route.c 		mp = nla_nest_start_noflag(skb, RTA_MULTIPATH);
skb              5513 net/ipv6/route.c 		if (fib_add_nexthop(skb, &rt->fib6_nh->nh_common,
skb              5519 net/ipv6/route.c 			if (fib_add_nexthop(skb, &sibling->fib6_nh->nh_common,
skb              5525 net/ipv6/route.c 		nla_nest_end(skb, mp);
skb              5527 net/ipv6/route.c 		if (nla_put_u32(skb, RTA_NH_ID, rt->nh->id))
skb              5533 net/ipv6/route.c 		if (rt6_fill_node_nexthop(skb, rt->nh, &nh_flags) < 0)
skb              5538 net/ipv6/route.c 		if (fib_nexthop_info(skb, &rt->fib6_nh->nh_common, AF_INET6,
skb              5550 net/ipv6/route.c 	if (rtnl_put_cacheinfo(skb, dst, 0, expires, dst ? dst->error : 0) < 0)
skb              5553 net/ipv6/route.c 	if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt6_flags)))
skb              5557 net/ipv6/route.c 	nlmsg_end(skb, nlh);
skb              5561 net/ipv6/route.c 	nlmsg_cancel(skb, nlh);
skb              5645 net/ipv6/route.c 			err = rt6_fill_node(dump->net, dump->skb, w->rt,
skb              5648 net/ipv6/route.c 					    NETLINK_CB(dump->cb->skb).portid,
skb              5694 net/ipv6/route.c 			if (rt6_fill_node(net, arg->skb, rt, NULL, NULL, NULL,
skb              5696 net/ipv6/route.c 					  NETLINK_CB(arg->cb->skb).portid,
skb              5729 net/ipv6/route.c static int inet6_rtm_valid_getroute_req(struct sk_buff *skb,
skb              5743 net/ipv6/route.c 	if (!netlink_strict_get_check(skb))
skb              5805 net/ipv6/route.c 	struct sk_buff *skb;
skb              5903 net/ipv6/route.c 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
skb              5904 net/ipv6/route.c 	if (!skb) {
skb              5910 net/ipv6/route.c 	skb_dst_set(skb, &rt->dst);
skb              5916 net/ipv6/route.c 			err = rt6_fill_node(net, skb, from, NULL, NULL, NULL,
skb              5921 net/ipv6/route.c 			err = rt6_fill_node(net, skb, from, dst, &fl6.daddr,
skb              5931 net/ipv6/route.c 		kfree_skb(skb);
skb              5935 net/ipv6/route.c 	err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
skb              5943 net/ipv6/route.c 	struct sk_buff *skb;
skb              5951 net/ipv6/route.c 	skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
skb              5952 net/ipv6/route.c 	if (!skb)
skb              5955 net/ipv6/route.c 	err = rt6_fill_node(net, skb, rt, NULL, NULL, NULL, 0,
skb              5960 net/ipv6/route.c 		kfree_skb(skb);
skb              5963 net/ipv6/route.c 	rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
skb              5975 net/ipv6/route.c 	struct sk_buff *skb;
skb              5983 net/ipv6/route.c 	skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
skb              5984 net/ipv6/route.c 	if (!skb)
skb              5987 net/ipv6/route.c 	err = rt6_fill_node(net, skb, rt, NULL, NULL, NULL, 0,
skb              5992 net/ipv6/route.c 		kfree_skb(skb);
skb              5995 net/ipv6/route.c 	rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
skb                83 net/ipv6/seg6.c static int seg6_genl_sethmac(struct sk_buff *skb, struct genl_info *info)
skb               158 net/ipv6/seg6.c static int seg6_genl_sethmac(struct sk_buff *skb, struct genl_info *info)
skb               165 net/ipv6/seg6.c static int seg6_genl_set_tunsrc(struct sk_buff *skb, struct genl_info *info)
skb               194 net/ipv6/seg6.c static int seg6_genl_get_tunsrc(struct sk_buff *skb, struct genl_info *info)
skb               244 net/ipv6/seg6.c 					struct sk_buff *skb, u8 cmd)
skb               248 net/ipv6/seg6.c 	hdr = genlmsg_put(skb, portid, seq, &seg6_genl_family, flags, cmd);
skb               252 net/ipv6/seg6.c 	if (__seg6_hmac_fill_info(hinfo, skb) < 0)
skb               255 net/ipv6/seg6.c 	genlmsg_end(skb, hdr);
skb               259 net/ipv6/seg6.c 	genlmsg_cancel(skb, hdr);
skb               265 net/ipv6/seg6.c 	struct net *net = sock_net(cb->skb->sk);
skb               296 net/ipv6/seg6.c static int seg6_genl_dumphmac(struct sk_buff *skb, struct netlink_callback *cb)
skb               317 net/ipv6/seg6.c 						   NETLINK_CB(cb->skb).portid,
skb               320 net/ipv6/seg6.c 						   skb, SEG6_CMD_DUMPHMAC);
skb               325 net/ipv6/seg6.c 	ret = skb->len;
skb               344 net/ipv6/seg6.c static int seg6_genl_dumphmac(struct sk_buff *skb, struct netlink_callback *cb)
skb               238 net/ipv6/seg6_hmac.c bool seg6_hmac_validate_skb(struct sk_buff *skb)
skb               241 net/ipv6/seg6_hmac.c 	struct net *net = dev_net(skb->dev);
skb               247 net/ipv6/seg6_hmac.c 	idev = __in6_dev_get(skb->dev);
skb               249 net/ipv6/seg6_hmac.c 	srh = (struct ipv6_sr_hdr *)skb_transport_header(skb);
skb               271 net/ipv6/seg6_hmac.c 	if (seg6_hmac_compute(hinfo, srh, &ipv6_hdr(skb)->saddr, hmac_output))
skb                50 net/ipv6/seg6_iptunnel.c static int nla_put_srh(struct sk_buff *skb, int attrtype,
skb                59 net/ipv6/seg6_iptunnel.c 	nla = nla_reserve(skb, attrtype, len);
skb                90 net/ipv6/seg6_iptunnel.c static __be32 seg6_make_flowlabel(struct net *net, struct sk_buff *skb,
skb                98 net/ipv6/seg6_iptunnel.c 		hash = skb_get_hash(skb);
skb               101 net/ipv6/seg6_iptunnel.c 	} else if (!do_flowlabel && skb->protocol == htons(ETH_P_IPV6)) {
skb               108 net/ipv6/seg6_iptunnel.c int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto)
skb               110 net/ipv6/seg6_iptunnel.c 	struct dst_entry *dst = skb_dst(skb);
skb               120 net/ipv6/seg6_iptunnel.c 	err = skb_cow_head(skb, tot_len + skb->mac_len);
skb               124 net/ipv6/seg6_iptunnel.c 	inner_hdr = ipv6_hdr(skb);
skb               125 net/ipv6/seg6_iptunnel.c 	flowlabel = seg6_make_flowlabel(net, skb, inner_hdr);
skb               127 net/ipv6/seg6_iptunnel.c 	skb_push(skb, tot_len);
skb               128 net/ipv6/seg6_iptunnel.c 	skb_reset_network_header(skb);
skb               129 net/ipv6/seg6_iptunnel.c 	skb_mac_header_rebuild(skb);
skb               130 net/ipv6/seg6_iptunnel.c 	hdr = ipv6_hdr(skb);
skb               137 net/ipv6/seg6_iptunnel.c 	if (skb->protocol == htons(ETH_P_IPV6)) {
skb               143 net/ipv6/seg6_iptunnel.c 		hdr->hop_limit = ip6_dst_hoplimit(skb_dst(skb));
skb               145 net/ipv6/seg6_iptunnel.c 		memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
skb               166 net/ipv6/seg6_iptunnel.c 	skb_postpush_rcsum(skb, hdr, tot_len);
skb               173 net/ipv6/seg6_iptunnel.c int seg6_do_srh_inline(struct sk_buff *skb, struct ipv6_sr_hdr *osrh)
skb               181 net/ipv6/seg6_iptunnel.c 	err = skb_cow_head(skb, hdrlen + skb->mac_len);
skb               185 net/ipv6/seg6_iptunnel.c 	oldhdr = ipv6_hdr(skb);
skb               187 net/ipv6/seg6_iptunnel.c 	skb_pull(skb, sizeof(struct ipv6hdr));
skb               188 net/ipv6/seg6_iptunnel.c 	skb_postpull_rcsum(skb, skb_network_header(skb),
skb               191 net/ipv6/seg6_iptunnel.c 	skb_push(skb, sizeof(struct ipv6hdr) + hdrlen);
skb               192 net/ipv6/seg6_iptunnel.c 	skb_reset_network_header(skb);
skb               193 net/ipv6/seg6_iptunnel.c 	skb_mac_header_rebuild(skb);
skb               195 net/ipv6/seg6_iptunnel.c 	hdr = ipv6_hdr(skb);
skb               210 net/ipv6/seg6_iptunnel.c 		struct net *net = dev_net(skb_dst(skb)->dev);
skb               218 net/ipv6/seg6_iptunnel.c 	skb_postpush_rcsum(skb, hdr, sizeof(struct ipv6hdr) + hdrlen);
skb               224 net/ipv6/seg6_iptunnel.c static int seg6_do_srh(struct sk_buff *skb)
skb               226 net/ipv6/seg6_iptunnel.c 	struct dst_entry *dst = skb_dst(skb);
skb               234 net/ipv6/seg6_iptunnel.c 		if (skb->protocol != htons(ETH_P_IPV6))
skb               237 net/ipv6/seg6_iptunnel.c 		err = seg6_do_srh_inline(skb, tinfo->srh);
skb               242 net/ipv6/seg6_iptunnel.c 		err = iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6);
skb               246 net/ipv6/seg6_iptunnel.c 		if (skb->protocol == htons(ETH_P_IPV6))
skb               248 net/ipv6/seg6_iptunnel.c 		else if (skb->protocol == htons(ETH_P_IP))
skb               253 net/ipv6/seg6_iptunnel.c 		err = seg6_do_srh_encap(skb, tinfo->srh, proto);
skb               257 net/ipv6/seg6_iptunnel.c 		skb_set_inner_transport_header(skb, skb_transport_offset(skb));
skb               258 net/ipv6/seg6_iptunnel.c 		skb_set_inner_protocol(skb, skb->protocol);
skb               259 net/ipv6/seg6_iptunnel.c 		skb->protocol = htons(ETH_P_IPV6);
skb               262 net/ipv6/seg6_iptunnel.c 		if (!skb_mac_header_was_set(skb))
skb               265 net/ipv6/seg6_iptunnel.c 		if (pskb_expand_head(skb, skb->mac_len, 0, GFP_ATOMIC) < 0)
skb               268 net/ipv6/seg6_iptunnel.c 		skb_mac_header_rebuild(skb);
skb               269 net/ipv6/seg6_iptunnel.c 		skb_push(skb, skb->mac_len);
skb               271 net/ipv6/seg6_iptunnel.c 		err = seg6_do_srh_encap(skb, tinfo->srh, NEXTHDR_NONE);
skb               275 net/ipv6/seg6_iptunnel.c 		skb->protocol = htons(ETH_P_IPV6);
skb               279 net/ipv6/seg6_iptunnel.c 	ipv6_hdr(skb)->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
skb               280 net/ipv6/seg6_iptunnel.c 	skb_set_transport_header(skb, sizeof(struct ipv6hdr));
skb               285 net/ipv6/seg6_iptunnel.c static int seg6_input(struct sk_buff *skb)
skb               287 net/ipv6/seg6_iptunnel.c 	struct dst_entry *orig_dst = skb_dst(skb);
skb               292 net/ipv6/seg6_iptunnel.c 	err = seg6_do_srh(skb);
skb               294 net/ipv6/seg6_iptunnel.c 		kfree_skb(skb);
skb               304 net/ipv6/seg6_iptunnel.c 	skb_dst_drop(skb);
skb               307 net/ipv6/seg6_iptunnel.c 		ip6_route_input(skb);
skb               308 net/ipv6/seg6_iptunnel.c 		dst = skb_dst(skb);
skb               312 net/ipv6/seg6_iptunnel.c 					  &ipv6_hdr(skb)->saddr);
skb               316 net/ipv6/seg6_iptunnel.c 		skb_dst_set(skb, dst);
skb               319 net/ipv6/seg6_iptunnel.c 	err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
skb               323 net/ipv6/seg6_iptunnel.c 	return dst_input(skb);
skb               326 net/ipv6/seg6_iptunnel.c static int seg6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
skb               328 net/ipv6/seg6_iptunnel.c 	struct dst_entry *orig_dst = skb_dst(skb);
skb               333 net/ipv6/seg6_iptunnel.c 	err = seg6_do_srh(skb);
skb               344 net/ipv6/seg6_iptunnel.c 		struct ipv6hdr *hdr = ipv6_hdr(skb);
skb               351 net/ipv6/seg6_iptunnel.c 		fl6.flowi6_mark = skb->mark;
skb               366 net/ipv6/seg6_iptunnel.c 	skb_dst_drop(skb);
skb               367 net/ipv6/seg6_iptunnel.c 	skb_dst_set(skb, dst);
skb               369 net/ipv6/seg6_iptunnel.c 	err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
skb               373 net/ipv6/seg6_iptunnel.c 	return dst_output(net, sk, skb);
skb               375 net/ipv6/seg6_iptunnel.c 	kfree_skb(skb);
skb               464 net/ipv6/seg6_iptunnel.c static int seg6_fill_encap_info(struct sk_buff *skb,
skb               469 net/ipv6/seg6_iptunnel.c 	if (nla_put_srh(skb, SEG6_IPTUNNEL_SRH, tuninfo))
skb                39 net/ipv6/seg6_local.c 	int (*input)(struct sk_buff *skb, struct seg6_local_lwt *slwt);
skb                67 net/ipv6/seg6_local.c static struct ipv6_sr_hdr *get_srh(struct sk_buff *skb)
skb                72 net/ipv6/seg6_local.c 	if (ipv6_find_hdr(skb, &srhoff, IPPROTO_ROUTING, NULL, NULL) < 0)
skb                75 net/ipv6/seg6_local.c 	if (!pskb_may_pull(skb, srhoff + sizeof(*srh)))
skb                78 net/ipv6/seg6_local.c 	srh = (struct ipv6_sr_hdr *)(skb->data + srhoff);
skb                82 net/ipv6/seg6_local.c 	if (!pskb_may_pull(skb, srhoff + len))
skb                88 net/ipv6/seg6_local.c 	srh = (struct ipv6_sr_hdr *)(skb->data + srhoff);
skb                96 net/ipv6/seg6_local.c static struct ipv6_sr_hdr *get_and_validate_srh(struct sk_buff *skb)
skb               100 net/ipv6/seg6_local.c 	srh = get_srh(skb);
skb               108 net/ipv6/seg6_local.c 	if (!seg6_hmac_validate_skb(skb))
skb               115 net/ipv6/seg6_local.c static bool decap_and_validate(struct sk_buff *skb, int proto)
skb               120 net/ipv6/seg6_local.c 	srh = get_srh(skb);
skb               125 net/ipv6/seg6_local.c 	if (srh && !seg6_hmac_validate_skb(skb))
skb               129 net/ipv6/seg6_local.c 	if (ipv6_find_hdr(skb, &off, proto, NULL, NULL) < 0)
skb               132 net/ipv6/seg6_local.c 	if (!pskb_pull(skb, off))
skb               135 net/ipv6/seg6_local.c 	skb_postpull_rcsum(skb, skb_network_header(skb), off);
skb               137 net/ipv6/seg6_local.c 	skb_reset_network_header(skb);
skb               138 net/ipv6/seg6_local.c 	skb_reset_transport_header(skb);
skb               139 net/ipv6/seg6_local.c 	if (iptunnel_pull_offloads(skb))
skb               154 net/ipv6/seg6_local.c int seg6_lookup_nexthop(struct sk_buff *skb, struct in6_addr *nhaddr,
skb               157 net/ipv6/seg6_local.c 	struct net *net = dev_net(skb->dev);
skb               158 net/ipv6/seg6_local.c 	struct ipv6hdr *hdr = ipv6_hdr(skb);
skb               164 net/ipv6/seg6_local.c 	fl6.flowi6_iif = skb->dev->ifindex;
skb               168 net/ipv6/seg6_local.c 	fl6.flowi6_mark = skb->mark;
skb               175 net/ipv6/seg6_local.c 		dst = ip6_route_input_lookup(net, skb->dev, &fl6, skb, flags);
skb               183 net/ipv6/seg6_local.c 		rt = ip6_pol_route(net, table, 0, &fl6, skb, flags);
skb               199 net/ipv6/seg6_local.c 	skb_dst_drop(skb);
skb               200 net/ipv6/seg6_local.c 	skb_dst_set(skb, dst);
skb               205 net/ipv6/seg6_local.c static int input_action_end(struct sk_buff *skb, struct seg6_local_lwt *slwt)
skb               209 net/ipv6/seg6_local.c 	srh = get_and_validate_srh(skb);
skb               213 net/ipv6/seg6_local.c 	advance_nextseg(srh, &ipv6_hdr(skb)->daddr);
skb               215 net/ipv6/seg6_local.c 	seg6_lookup_nexthop(skb, NULL, 0);
skb               217 net/ipv6/seg6_local.c 	return dst_input(skb);
skb               220 net/ipv6/seg6_local.c 	kfree_skb(skb);
skb               225 net/ipv6/seg6_local.c static int input_action_end_x(struct sk_buff *skb, struct seg6_local_lwt *slwt)
skb               229 net/ipv6/seg6_local.c 	srh = get_and_validate_srh(skb);
skb               233 net/ipv6/seg6_local.c 	advance_nextseg(srh, &ipv6_hdr(skb)->daddr);
skb               235 net/ipv6/seg6_local.c 	seg6_lookup_nexthop(skb, &slwt->nh6, 0);
skb               237 net/ipv6/seg6_local.c 	return dst_input(skb);
skb               240 net/ipv6/seg6_local.c 	kfree_skb(skb);
skb               244 net/ipv6/seg6_local.c static int input_action_end_t(struct sk_buff *skb, struct seg6_local_lwt *slwt)
skb               248 net/ipv6/seg6_local.c 	srh = get_and_validate_srh(skb);
skb               252 net/ipv6/seg6_local.c 	advance_nextseg(srh, &ipv6_hdr(skb)->daddr);
skb               254 net/ipv6/seg6_local.c 	seg6_lookup_nexthop(skb, NULL, slwt->table);
skb               256 net/ipv6/seg6_local.c 	return dst_input(skb);
skb               259 net/ipv6/seg6_local.c 	kfree_skb(skb);
skb               264 net/ipv6/seg6_local.c static int input_action_end_dx2(struct sk_buff *skb,
skb               267 net/ipv6/seg6_local.c 	struct net *net = dev_net(skb->dev);
skb               271 net/ipv6/seg6_local.c 	if (!decap_and_validate(skb, NEXTHDR_NONE))
skb               274 net/ipv6/seg6_local.c 	if (!pskb_may_pull(skb, ETH_HLEN))
skb               277 net/ipv6/seg6_local.c 	skb_reset_mac_header(skb);
skb               278 net/ipv6/seg6_local.c 	eth = (struct ethhdr *)skb->data;
skb               300 net/ipv6/seg6_local.c 	skb_orphan(skb);
skb               302 net/ipv6/seg6_local.c 	if (skb_warn_if_lro(skb))
skb               305 net/ipv6/seg6_local.c 	skb_forward_csum(skb);
skb               307 net/ipv6/seg6_local.c 	if (skb->len - ETH_HLEN > odev->mtu)
skb               310 net/ipv6/seg6_local.c 	skb->dev = odev;
skb               311 net/ipv6/seg6_local.c 	skb->protocol = eth->h_proto;
skb               313 net/ipv6/seg6_local.c 	return dev_queue_xmit(skb);
skb               316 net/ipv6/seg6_local.c 	kfree_skb(skb);
skb               321 net/ipv6/seg6_local.c static int input_action_end_dx6(struct sk_buff *skb,
skb               330 net/ipv6/seg6_local.c 	if (!decap_and_validate(skb, IPPROTO_IPV6))
skb               333 net/ipv6/seg6_local.c 	if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
skb               346 net/ipv6/seg6_local.c 	skb_set_transport_header(skb, sizeof(struct ipv6hdr));
skb               348 net/ipv6/seg6_local.c 	seg6_lookup_nexthop(skb, nhaddr, 0);
skb               350 net/ipv6/seg6_local.c 	return dst_input(skb);
skb               352 net/ipv6/seg6_local.c 	kfree_skb(skb);
skb               356 net/ipv6/seg6_local.c static int input_action_end_dx4(struct sk_buff *skb,
skb               363 net/ipv6/seg6_local.c 	if (!decap_and_validate(skb, IPPROTO_IPIP))
skb               366 net/ipv6/seg6_local.c 	if (!pskb_may_pull(skb, sizeof(struct iphdr)))
skb               369 net/ipv6/seg6_local.c 	skb->protocol = htons(ETH_P_IP);
skb               371 net/ipv6/seg6_local.c 	iph = ip_hdr(skb);
skb               375 net/ipv6/seg6_local.c 	skb_dst_drop(skb);
skb               377 net/ipv6/seg6_local.c 	skb_set_transport_header(skb, sizeof(struct iphdr));
skb               379 net/ipv6/seg6_local.c 	err = ip_route_input(skb, nhaddr, iph->saddr, 0, skb->dev);
skb               383 net/ipv6/seg6_local.c 	return dst_input(skb);
skb               386 net/ipv6/seg6_local.c 	kfree_skb(skb);
skb               390 net/ipv6/seg6_local.c static int input_action_end_dt6(struct sk_buff *skb,
skb               393 net/ipv6/seg6_local.c 	if (!decap_and_validate(skb, IPPROTO_IPV6))
skb               396 net/ipv6/seg6_local.c 	if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
skb               399 net/ipv6/seg6_local.c 	skb_set_transport_header(skb, sizeof(struct ipv6hdr));
skb               401 net/ipv6/seg6_local.c 	seg6_lookup_nexthop(skb, NULL, slwt->table);
skb               403 net/ipv6/seg6_local.c 	return dst_input(skb);
skb               406 net/ipv6/seg6_local.c 	kfree_skb(skb);
skb               411 net/ipv6/seg6_local.c static int input_action_end_b6(struct sk_buff *skb, struct seg6_local_lwt *slwt)
skb               416 net/ipv6/seg6_local.c 	srh = get_and_validate_srh(skb);
skb               420 net/ipv6/seg6_local.c 	err = seg6_do_srh_inline(skb, slwt->srh);
skb               424 net/ipv6/seg6_local.c 	ipv6_hdr(skb)->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
skb               425 net/ipv6/seg6_local.c 	skb_set_transport_header(skb, sizeof(struct ipv6hdr));
skb               427 net/ipv6/seg6_local.c 	seg6_lookup_nexthop(skb, NULL, 0);
skb               429 net/ipv6/seg6_local.c 	return dst_input(skb);
skb               432 net/ipv6/seg6_local.c 	kfree_skb(skb);
skb               437 net/ipv6/seg6_local.c static int input_action_end_b6_encap(struct sk_buff *skb,
skb               443 net/ipv6/seg6_local.c 	srh = get_and_validate_srh(skb);
skb               447 net/ipv6/seg6_local.c 	advance_nextseg(srh, &ipv6_hdr(skb)->daddr);
skb               449 net/ipv6/seg6_local.c 	skb_reset_inner_headers(skb);
skb               450 net/ipv6/seg6_local.c 	skb->encapsulation = 1;
skb               452 net/ipv6/seg6_local.c 	err = seg6_do_srh_encap(skb, slwt->srh, IPPROTO_IPV6);
skb               456 net/ipv6/seg6_local.c 	ipv6_hdr(skb)->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
skb               457 net/ipv6/seg6_local.c 	skb_set_transport_header(skb, sizeof(struct ipv6hdr));
skb               459 net/ipv6/seg6_local.c 	seg6_lookup_nexthop(skb, NULL, 0);
skb               461 net/ipv6/seg6_local.c 	return dst_input(skb);
skb               464 net/ipv6/seg6_local.c 	kfree_skb(skb);
skb               470 net/ipv6/seg6_local.c bool seg6_bpf_has_valid_srh(struct sk_buff *skb)
skb               493 net/ipv6/seg6_local.c static int input_action_end_bpf(struct sk_buff *skb,
skb               501 net/ipv6/seg6_local.c 	srh = get_and_validate_srh(skb);
skb               503 net/ipv6/seg6_local.c 		kfree_skb(skb);
skb               506 net/ipv6/seg6_local.c 	advance_nextseg(srh, &ipv6_hdr(skb)->daddr);
skb               517 net/ipv6/seg6_local.c 	bpf_compute_data_pointers(skb);
skb               518 net/ipv6/seg6_local.c 	ret = bpf_prog_run_save_cb(slwt->bpf.prog, skb);
skb               532 net/ipv6/seg6_local.c 	if (srh_state->srh && !seg6_bpf_has_valid_srh(skb))
skb               537 net/ipv6/seg6_local.c 		seg6_lookup_nexthop(skb, NULL, 0);
skb               539 net/ipv6/seg6_local.c 	return dst_input(skb);
skb               543 net/ipv6/seg6_local.c 	kfree_skb(skb);
skb               617 net/ipv6/seg6_local.c static int seg6_local_input(struct sk_buff *skb)
skb               619 net/ipv6/seg6_local.c 	struct dst_entry *orig_dst = skb_dst(skb);
skb               623 net/ipv6/seg6_local.c 	if (skb->protocol != htons(ETH_P_IPV6)) {
skb               624 net/ipv6/seg6_local.c 		kfree_skb(skb);
skb               631 net/ipv6/seg6_local.c 	return desc->input(skb, slwt);
skb               671 net/ipv6/seg6_local.c static int put_nla_srh(struct sk_buff *skb, struct seg6_local_lwt *slwt)
skb               680 net/ipv6/seg6_local.c 	nla = nla_reserve(skb, SEG6_LOCAL_SRH, len);
skb               706 net/ipv6/seg6_local.c static int put_nla_table(struct sk_buff *skb, struct seg6_local_lwt *slwt)
skb               708 net/ipv6/seg6_local.c 	if (nla_put_u32(skb, SEG6_LOCAL_TABLE, slwt->table))
skb               730 net/ipv6/seg6_local.c static int put_nla_nh4(struct sk_buff *skb, struct seg6_local_lwt *slwt)
skb               734 net/ipv6/seg6_local.c 	nla = nla_reserve(skb, SEG6_LOCAL_NH4, sizeof(struct in_addr));
skb               756 net/ipv6/seg6_local.c static int put_nla_nh6(struct sk_buff *skb, struct seg6_local_lwt *slwt)
skb               760 net/ipv6/seg6_local.c 	nla = nla_reserve(skb, SEG6_LOCAL_NH6, sizeof(struct in6_addr));
skb               781 net/ipv6/seg6_local.c static int put_nla_iif(struct sk_buff *skb, struct seg6_local_lwt *slwt)
skb               783 net/ipv6/seg6_local.c 	if (nla_put_u32(skb, SEG6_LOCAL_IIF, slwt->iif))
skb               804 net/ipv6/seg6_local.c static int put_nla_oif(struct sk_buff *skb, struct seg6_local_lwt *slwt)
skb               806 net/ipv6/seg6_local.c 	if (nla_put_u32(skb, SEG6_LOCAL_OIF, slwt->oif))
skb               858 net/ipv6/seg6_local.c static int put_nla_bpf(struct sk_buff *skb, struct seg6_local_lwt *slwt)
skb               865 net/ipv6/seg6_local.c 	nest = nla_nest_start_noflag(skb, SEG6_LOCAL_BPF);
skb               869 net/ipv6/seg6_local.c 	if (nla_put_u32(skb, SEG6_LOCAL_BPF_PROG, slwt->bpf.prog->aux->id))
skb               873 net/ipv6/seg6_local.c 	    nla_put_string(skb, SEG6_LOCAL_BPF_PROG_NAME, slwt->bpf.name))
skb               876 net/ipv6/seg6_local.c 	return nla_nest_end(skb, nest);
skb               892 net/ipv6/seg6_local.c 	int (*put)(struct sk_buff *skb, struct seg6_local_lwt *slwt);
skb              1019 net/ipv6/seg6_local.c static int seg6_local_fill_encap(struct sk_buff *skb,
skb              1026 net/ipv6/seg6_local.c 	if (nla_put_u32(skb, SEG6_LOCAL_ACTION, slwt->action))
skb              1032 net/ipv6/seg6_local.c 			err = param->put(skb, slwt);
skb               445 net/ipv6/sit.c isatap_chksrc(struct sk_buff *skb, const struct iphdr *iph, struct ip_tunnel *t)
skb               454 net/ipv6/sit.c 			skb->ndisc_nodetype = NDISC_NODETYPE_DEFAULT;
skb               456 net/ipv6/sit.c 			skb->ndisc_nodetype = NDISC_NODETYPE_NODEFAULT;
skb               458 net/ipv6/sit.c 		const struct in6_addr *addr6 = &ipv6_hdr(skb)->saddr;
skb               463 net/ipv6/sit.c 			skb->ndisc_nodetype = NDISC_NODETYPE_HOST;
skb               486 net/ipv6/sit.c static int ipip6_err(struct sk_buff *skb, u32 info)
skb               488 net/ipv6/sit.c 	const struct iphdr *iph = (const struct iphdr *)skb->data;
skb               489 net/ipv6/sit.c 	const int type = icmp_hdr(skb)->type;
skb               490 net/ipv6/sit.c 	const int code = icmp_hdr(skb)->code;
skb               517 net/ipv6/sit.c 		data_len = icmp_hdr(skb)->un.reserved[1] * 4; /* RFC 4884 4.1 */
skb               525 net/ipv6/sit.c 	sifindex = netif_is_l3_master(skb->dev) ? IPCB(skb)->iif : 0;
skb               526 net/ipv6/sit.c 	t = ipip6_tunnel_lookup(dev_net(skb->dev), skb->dev,
skb               532 net/ipv6/sit.c 		ipv4_update_pmtu(skb, dev_net(skb->dev), info,
skb               538 net/ipv6/sit.c 		ipv4_redirect(skb, dev_net(skb->dev), t->parms.link,
skb               545 net/ipv6/sit.c 	if (__in6_dev_get(skb->dev) &&
skb               546 net/ipv6/sit.c 	    !ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4, type, data_len))
skb               600 net/ipv6/sit.c static bool packet_is_spoofed(struct sk_buff *skb,
skb               607 net/ipv6/sit.c 		if (!isatap_chksrc(skb, iph, tunnel))
skb               616 net/ipv6/sit.c 	ipv6h = ipv6_hdr(skb);
skb               637 net/ipv6/sit.c static int ipip6_rcv(struct sk_buff *skb)
skb               639 net/ipv6/sit.c 	const struct iphdr *iph = ip_hdr(skb);
skb               644 net/ipv6/sit.c 	sifindex = netif_is_l3_master(skb->dev) ? IPCB(skb)->iif : 0;
skb               645 net/ipv6/sit.c 	tunnel = ipip6_tunnel_lookup(dev_net(skb->dev), skb->dev,
skb               654 net/ipv6/sit.c 		skb->mac_header = skb->network_header;
skb               655 net/ipv6/sit.c 		skb_reset_network_header(skb);
skb               656 net/ipv6/sit.c 		IPCB(skb)->flags = 0;
skb               657 net/ipv6/sit.c 		skb->dev = tunnel->dev;
skb               659 net/ipv6/sit.c 		if (packet_is_spoofed(skb, iph, tunnel)) {
skb               664 net/ipv6/sit.c 		if (iptunnel_pull_header(skb, 0, htons(ETH_P_IPV6),
skb               671 net/ipv6/sit.c 		iph = (const struct iphdr *)skb_mac_header(skb);
skb               672 net/ipv6/sit.c 		err = IP_ECN_decapsulate(iph, skb);
skb               687 net/ipv6/sit.c 		tstats->rx_bytes += skb->len;
skb               690 net/ipv6/sit.c 		netif_rx(skb);
skb               698 net/ipv6/sit.c 	kfree_skb(skb);
skb               714 net/ipv6/sit.c static int sit_tunnel_rcv(struct sk_buff *skb, u8 ipproto)
skb               720 net/ipv6/sit.c 	sifindex = netif_is_l3_master(skb->dev) ? IPCB(skb)->iif : 0;
skb               722 net/ipv6/sit.c 	iph = ip_hdr(skb);
skb               723 net/ipv6/sit.c 	tunnel = ipip6_tunnel_lookup(dev_net(skb->dev), skb->dev,
skb               732 net/ipv6/sit.c 		if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
skb               740 net/ipv6/sit.c 		if (iptunnel_pull_header(skb, 0, tpi->proto, false))
skb               742 net/ipv6/sit.c 		return ip_tunnel_rcv(tunnel, skb, tpi, NULL, log_ecn_error);
skb               748 net/ipv6/sit.c 	kfree_skb(skb);
skb               752 net/ipv6/sit.c static int ipip_rcv(struct sk_buff *skb)
skb               754 net/ipv6/sit.c 	return sit_tunnel_rcv(skb, IPPROTO_IPIP);
skb               758 net/ipv6/sit.c static int mplsip_rcv(struct sk_buff *skb)
skb               760 net/ipv6/sit.c 	return sit_tunnel_rcv(skb, IPPROTO_MPLS);
skb               816 net/ipv6/sit.c static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
skb               821 net/ipv6/sit.c 	const struct ipv6hdr *iph6 = ipv6_hdr(skb);
skb               844 net/ipv6/sit.c 		if (skb_dst(skb))
skb               845 net/ipv6/sit.c 			neigh = dst_neigh_lookup(skb_dst(skb), &iph6->daddr);
skb               873 net/ipv6/sit.c 		if (skb_dst(skb))
skb               874 net/ipv6/sit.c 			neigh = dst_neigh_lookup(skb_dst(skb), &iph6->daddr);
skb               885 net/ipv6/sit.c 			addr6 = &ipv6_hdr(skb)->daddr;
skb               927 net/ipv6/sit.c 	if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP4)) {
skb               947 net/ipv6/sit.c 			skb_dst_update_pmtu_no_confirm(skb, mtu);
skb               949 net/ipv6/sit.c 		if (skb->len > mtu && !skb_is_gso(skb)) {
skb               950 net/ipv6/sit.c 			icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
skb               960 net/ipv6/sit.c 			dst_link_failure(skb);
skb               970 net/ipv6/sit.c 	if (skb_headroom(skb) < max_headroom || skb_shared(skb) ||
skb               971 net/ipv6/sit.c 	    (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
skb               972 net/ipv6/sit.c 		struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
skb               976 net/ipv6/sit.c 			kfree_skb(skb);
skb               979 net/ipv6/sit.c 		if (skb->sk)
skb               980 net/ipv6/sit.c 			skb_set_owner_w(new_skb, skb->sk);
skb               981 net/ipv6/sit.c 		dev_kfree_skb(skb);
skb               982 net/ipv6/sit.c 		skb = new_skb;
skb               983 net/ipv6/sit.c 		iph6 = ipv6_hdr(skb);
skb               990 net/ipv6/sit.c 	if (ip_tunnel_encap(skb, tunnel, &protocol, &fl4) < 0) {
skb               995 net/ipv6/sit.c 	skb_set_inner_ipproto(skb, IPPROTO_IPV6);
skb               997 net/ipv6/sit.c 	iptunnel_xmit(NULL, rt, skb, fl4.saddr, fl4.daddr, protocol, tos, ttl,
skb              1002 net/ipv6/sit.c 	dst_link_failure(skb);
skb              1004 net/ipv6/sit.c 	kfree_skb(skb);
skb              1009 net/ipv6/sit.c static netdev_tx_t sit_tunnel_xmit__(struct sk_buff *skb,
skb              1015 net/ipv6/sit.c 	if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP4))
skb              1018 net/ipv6/sit.c 	skb_set_inner_ipproto(skb, ipproto);
skb              1020 net/ipv6/sit.c 	ip_tunnel_xmit(skb, dev, tiph, ipproto);
skb              1023 net/ipv6/sit.c 	kfree_skb(skb);
skb              1028 net/ipv6/sit.c static netdev_tx_t sit_tunnel_xmit(struct sk_buff *skb,
skb              1031 net/ipv6/sit.c 	if (!pskb_inet_may_pull(skb))
skb              1034 net/ipv6/sit.c 	switch (skb->protocol) {
skb              1036 net/ipv6/sit.c 		sit_tunnel_xmit__(skb, dev, IPPROTO_IPIP);
skb              1039 net/ipv6/sit.c 		ipip6_tunnel_xmit(skb, dev);
skb              1043 net/ipv6/sit.c 		sit_tunnel_xmit__(skb, dev, IPPROTO_MPLS);
skb              1054 net/ipv6/sit.c 	kfree_skb(skb);
skb              1699 net/ipv6/sit.c static int ipip6_fill_info(struct sk_buff *skb, const struct net_device *dev)
skb              1704 net/ipv6/sit.c 	if (nla_put_u32(skb, IFLA_IPTUN_LINK, parm->link) ||
skb              1705 net/ipv6/sit.c 	    nla_put_in_addr(skb, IFLA_IPTUN_LOCAL, parm->iph.saddr) ||
skb              1706 net/ipv6/sit.c 	    nla_put_in_addr(skb, IFLA_IPTUN_REMOTE, parm->iph.daddr) ||
skb              1707 net/ipv6/sit.c 	    nla_put_u8(skb, IFLA_IPTUN_TTL, parm->iph.ttl) ||
skb              1708 net/ipv6/sit.c 	    nla_put_u8(skb, IFLA_IPTUN_TOS, parm->iph.tos) ||
skb              1709 net/ipv6/sit.c 	    nla_put_u8(skb, IFLA_IPTUN_PMTUDISC,
skb              1711 net/ipv6/sit.c 	    nla_put_u8(skb, IFLA_IPTUN_PROTO, parm->iph.protocol) ||
skb              1712 net/ipv6/sit.c 	    nla_put_be16(skb, IFLA_IPTUN_FLAGS, parm->i_flags) ||
skb              1713 net/ipv6/sit.c 	    nla_put_u32(skb, IFLA_IPTUN_FWMARK, tunnel->fwmark))
skb              1717 net/ipv6/sit.c 	if (nla_put_in6_addr(skb, IFLA_IPTUN_6RD_PREFIX,
skb              1719 net/ipv6/sit.c 	    nla_put_in_addr(skb, IFLA_IPTUN_6RD_RELAY_PREFIX,
skb              1721 net/ipv6/sit.c 	    nla_put_u16(skb, IFLA_IPTUN_6RD_PREFIXLEN,
skb              1723 net/ipv6/sit.c 	    nla_put_u16(skb, IFLA_IPTUN_6RD_RELAY_PREFIXLEN,
skb              1728 net/ipv6/sit.c 	if (nla_put_u16(skb, IFLA_IPTUN_ENCAP_TYPE,
skb              1730 net/ipv6/sit.c 	    nla_put_be16(skb, IFLA_IPTUN_ENCAP_SPORT,
skb              1732 net/ipv6/sit.c 	    nla_put_be16(skb, IFLA_IPTUN_ENCAP_DPORT,
skb              1734 net/ipv6/sit.c 	    nla_put_u16(skb, IFLA_IPTUN_ENCAP_FLAGS,
skb               109 net/ipv6/syncookies.c __u32 cookie_v6_init_sequence(const struct sk_buff *skb, __u16 *mssp)
skb               111 net/ipv6/syncookies.c 	const struct ipv6hdr *iph = ipv6_hdr(skb);
skb               112 net/ipv6/syncookies.c 	const struct tcphdr *th = tcp_hdr(skb);
skb               128 net/ipv6/syncookies.c struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
skb               135 net/ipv6/syncookies.c 	const struct tcphdr *th = tcp_hdr(skb);
skb               150 net/ipv6/syncookies.c 	mss = __cookie_v6_check(ipv6_hdr(skb), th, cookie);
skb               160 net/ipv6/syncookies.c 	tcp_parse_options(sock_net(sk), skb, &tcp_opt, 0, NULL);
skb               164 net/ipv6/syncookies.c 					    ipv6_hdr(skb)->daddr.s6_addr32,
skb               165 net/ipv6/syncookies.c 					    ipv6_hdr(skb)->saddr.s6_addr32);
skb               181 net/ipv6/syncookies.c 	if (security_inet_conn_request(sk, skb, req))
skb               187 net/ipv6/syncookies.c 	ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
skb               188 net/ipv6/syncookies.c 	ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
skb               189 net/ipv6/syncookies.c 	if (ipv6_opt_accepted(sk, skb, &TCP_SKB_CB(skb)->header.h6) ||
skb               192 net/ipv6/syncookies.c 		refcount_inc(&skb->users);
skb               193 net/ipv6/syncookies.c 		ireq->pktopts = skb;
skb               196 net/ipv6/syncookies.c 	ireq->ir_iif = inet_request_bound_dev_if(sk, skb);
skb               200 net/ipv6/syncookies.c 		ireq->ir_iif = tcp_v6_iif(skb);
skb               202 net/ipv6/syncookies.c 	ireq->ir_mark = inet_request_mark(sk, skb);
skb               252 net/ipv6/syncookies.c 	ret = tcp_get_cookie_sock(sk, skb, req, dst, tsoff);
skb                71 net/ipv6/tcp_ipv6.c static void	tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
skb                72 net/ipv6/tcp_ipv6.c static void	tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
skb                75 net/ipv6/tcp_ipv6.c static int	tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
skb               102 net/ipv6/tcp_ipv6.c static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
skb               104 net/ipv6/tcp_ipv6.c 	struct dst_entry *dst = skb_dst(skb);
skb               110 net/ipv6/tcp_ipv6.c 		inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
skb               115 net/ipv6/tcp_ipv6.c static u32 tcp_v6_init_seq(const struct sk_buff *skb)
skb               117 net/ipv6/tcp_ipv6.c 	return secure_tcpv6_seq(ipv6_hdr(skb)->daddr.s6_addr32,
skb               118 net/ipv6/tcp_ipv6.c 				ipv6_hdr(skb)->saddr.s6_addr32,
skb               119 net/ipv6/tcp_ipv6.c 				tcp_hdr(skb)->dest,
skb               120 net/ipv6/tcp_ipv6.c 				tcp_hdr(skb)->source);
skb               123 net/ipv6/tcp_ipv6.c static u32 tcp_v6_init_ts_off(const struct net *net, const struct sk_buff *skb)
skb               125 net/ipv6/tcp_ipv6.c 	return secure_tcpv6_ts_off(net, ipv6_hdr(skb)->daddr.s6_addr32,
skb               126 net/ipv6/tcp_ipv6.c 				   ipv6_hdr(skb)->saddr.s6_addr32);
skb               360 net/ipv6/tcp_ipv6.c static int tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
skb               363 net/ipv6/tcp_ipv6.c 	const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
skb               364 net/ipv6/tcp_ipv6.c 	const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
skb               365 net/ipv6/tcp_ipv6.c 	struct net *net = dev_net(skb->dev);
skb               377 net/ipv6/tcp_ipv6.c 					skb->dev->ifindex, inet6_sdif(skb));
skb               380 net/ipv6/tcp_ipv6.c 		__ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
skb               403 net/ipv6/tcp_ipv6.c 	if (ipv6_hdr(skb)->hop_limit < tcp_inet6_sk(sk)->min_hopcount) {
skb               425 net/ipv6/tcp_ipv6.c 				dst->ops->redirect(dst, sk, skb);
skb               494 net/ipv6/tcp_ipv6.c 	struct sk_buff *skb;
skb               502 net/ipv6/tcp_ipv6.c 	skb = tcp_make_synack(sk, dst, req, foc, synack_type);
skb               504 net/ipv6/tcp_ipv6.c 	if (skb) {
skb               505 net/ipv6/tcp_ipv6.c 		__tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
skb               516 net/ipv6/tcp_ipv6.c 		err = ip6_xmit(sk, skb, fl6, sk->sk_mark, opt, np->tclass,
skb               654 net/ipv6/tcp_ipv6.c 			       const struct sk_buff *skb)
skb               659 net/ipv6/tcp_ipv6.c 	const struct tcphdr *th = tcp_hdr(skb);
skb               665 net/ipv6/tcp_ipv6.c 		const struct ipv6hdr *ip6h = ipv6_hdr(skb);
skb               678 net/ipv6/tcp_ipv6.c 	if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, skb->len))
skb               680 net/ipv6/tcp_ipv6.c 	if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
skb               701 net/ipv6/tcp_ipv6.c 				    const struct sk_buff *skb)
skb               706 net/ipv6/tcp_ipv6.c 	const struct ipv6hdr *ip6h = ipv6_hdr(skb);
skb               707 net/ipv6/tcp_ipv6.c 	const struct tcphdr *th = tcp_hdr(skb);
skb               731 net/ipv6/tcp_ipv6.c 				      NULL, skb);
skb               747 net/ipv6/tcp_ipv6.c 			    struct sk_buff *skb)
skb               749 net/ipv6/tcp_ipv6.c 	bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags);
skb               753 net/ipv6/tcp_ipv6.c 	ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
skb               754 net/ipv6/tcp_ipv6.c 	ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
skb               759 net/ipv6/tcp_ipv6.c 		ireq->ir_iif = tcp_v6_iif(skb);
skb               761 net/ipv6/tcp_ipv6.c 	if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
skb               762 net/ipv6/tcp_ipv6.c 	    (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) ||
skb               766 net/ipv6/tcp_ipv6.c 		refcount_inc(&skb->users);
skb               767 net/ipv6/tcp_ipv6.c 		ireq->pktopts = skb;
skb               805 net/ipv6/tcp_ipv6.c static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
skb               810 net/ipv6/tcp_ipv6.c 	const struct tcphdr *th = tcp_hdr(skb);
skb               814 net/ipv6/tcp_ipv6.c 	struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
skb               863 net/ipv6/tcp_ipv6.c 				    &ipv6_hdr(skb)->saddr,
skb               864 net/ipv6/tcp_ipv6.c 				    &ipv6_hdr(skb)->daddr, t1);
skb               869 net/ipv6/tcp_ipv6.c 	fl6.daddr = ipv6_hdr(skb)->saddr;
skb               870 net/ipv6/tcp_ipv6.c 	fl6.saddr = ipv6_hdr(skb)->daddr;
skb               880 net/ipv6/tcp_ipv6.c 		fl6.flowi6_oif = tcp_v6_iif(skb);
skb               882 net/ipv6/tcp_ipv6.c 		if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
skb               883 net/ipv6/tcp_ipv6.c 			oif = skb->skb_iif;
skb               899 net/ipv6/tcp_ipv6.c 	fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark) ?: mark;
skb               903 net/ipv6/tcp_ipv6.c 	security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
skb               923 net/ipv6/tcp_ipv6.c static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
skb               925 net/ipv6/tcp_ipv6.c 	const struct tcphdr *th = tcp_hdr(skb);
skb               926 net/ipv6/tcp_ipv6.c 	struct ipv6hdr *ipv6h = ipv6_hdr(skb);
skb               946 net/ipv6/tcp_ipv6.c 	if (!sk && !ipv6_unicast_destination(skb))
skb               949 net/ipv6/tcp_ipv6.c 	net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
skb               968 net/ipv6/tcp_ipv6.c 					   tcp_v6_iif_l3_slave(skb),
skb               969 net/ipv6/tcp_ipv6.c 					   tcp_v6_sdif(skb));
skb               977 net/ipv6/tcp_ipv6.c 		genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
skb               986 net/ipv6/tcp_ipv6.c 		ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
skb               994 net/ipv6/tcp_ipv6.c 			trace_tcp_send_reset(sk, skb);
skb              1008 net/ipv6/tcp_ipv6.c 	tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0,
skb              1017 net/ipv6/tcp_ipv6.c static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
skb              1022 net/ipv6/tcp_ipv6.c 	tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
skb              1026 net/ipv6/tcp_ipv6.c static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
skb              1031 net/ipv6/tcp_ipv6.c 	tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
skb              1040 net/ipv6/tcp_ipv6.c static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
skb              1051 net/ipv6/tcp_ipv6.c 	tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
skb              1057 net/ipv6/tcp_ipv6.c 			tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr),
skb              1062 net/ipv6/tcp_ipv6.c static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb)
skb              1065 net/ipv6/tcp_ipv6.c 	const struct tcphdr *th = tcp_hdr(skb);
skb              1068 net/ipv6/tcp_ipv6.c 		sk = cookie_v6_check(sk, skb);
skb              1088 net/ipv6/tcp_ipv6.c static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
skb              1090 net/ipv6/tcp_ipv6.c 	if (skb->protocol == htons(ETH_P_IP))
skb              1091 net/ipv6/tcp_ipv6.c 		return tcp_v4_conn_request(sk, skb);
skb              1093 net/ipv6/tcp_ipv6.c 	if (!ipv6_unicast_destination(skb))
skb              1097 net/ipv6/tcp_ipv6.c 				&tcp_request_sock_ipv6_ops, sk, skb);
skb              1104 net/ipv6/tcp_ipv6.c static void tcp_v6_restore_cb(struct sk_buff *skb)
skb              1110 net/ipv6/tcp_ipv6.c 	memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
skb              1114 net/ipv6/tcp_ipv6.c static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
skb              1132 net/ipv6/tcp_ipv6.c 	if (skb->protocol == htons(ETH_P_IP)) {
skb              1137 net/ipv6/tcp_ipv6.c 		newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst,
skb              1164 net/ipv6/tcp_ipv6.c 		newnp->mcast_oif   = inet_iif(skb);
skb              1165 net/ipv6/tcp_ipv6.c 		newnp->mcast_hops  = ip_hdr(skb)->ttl;
skb              1196 net/ipv6/tcp_ipv6.c 	newsk = tcp_create_openreq_child(sk, req, skb);
skb              1208 net/ipv6/tcp_ipv6.c 	inet6_sk_rx_dst_set(newsk, skb);
skb              1237 net/ipv6/tcp_ipv6.c 	newnp->mcast_oif  = tcp_v6_iif(skb);
skb              1238 net/ipv6/tcp_ipv6.c 	newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
skb              1239 net/ipv6/tcp_ipv6.c 	newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
skb              1241 net/ipv6/tcp_ipv6.c 		newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
skb              1327 net/ipv6/tcp_ipv6.c static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
skb              1341 net/ipv6/tcp_ipv6.c 	if (skb->protocol == htons(ETH_P_IP))
skb              1342 net/ipv6/tcp_ipv6.c 		return tcp_v4_do_rcv(sk, skb);
skb              1363 net/ipv6/tcp_ipv6.c 		opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
skb              1368 net/ipv6/tcp_ipv6.c 		sock_rps_save_rxhash(sk, skb);
skb              1369 net/ipv6/tcp_ipv6.c 		sk_mark_napi_id(sk, skb);
skb              1371 net/ipv6/tcp_ipv6.c 			if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
skb              1378 net/ipv6/tcp_ipv6.c 		tcp_rcv_established(sk, skb);
skb              1384 net/ipv6/tcp_ipv6.c 	if (tcp_checksum_complete(skb))
skb              1388 net/ipv6/tcp_ipv6.c 		struct sock *nsk = tcp_v6_cookie_check(sk, skb);
skb              1394 net/ipv6/tcp_ipv6.c 			if (tcp_child_process(sk, nsk, skb))
skb              1401 net/ipv6/tcp_ipv6.c 		sock_rps_save_rxhash(sk, skb);
skb              1403 net/ipv6/tcp_ipv6.c 	if (tcp_rcv_state_process(sk, skb))
skb              1410 net/ipv6/tcp_ipv6.c 	tcp_v6_send_reset(sk, skb);
skb              1414 net/ipv6/tcp_ipv6.c 	kfree_skb(skb);
skb              1455 net/ipv6/tcp_ipv6.c static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
skb              1463 net/ipv6/tcp_ipv6.c 	memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
skb              1467 net/ipv6/tcp_ipv6.c 	TCP_SKB_CB(skb)->seq = ntohl(th->seq);
skb              1468 net/ipv6/tcp_ipv6.c 	TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
skb              1469 net/ipv6/tcp_ipv6.c 				    skb->len - th->doff*4);
skb              1470 net/ipv6/tcp_ipv6.c 	TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
skb              1471 net/ipv6/tcp_ipv6.c 	TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
skb              1472 net/ipv6/tcp_ipv6.c 	TCP_SKB_CB(skb)->tcp_tw_isn = 0;
skb              1473 net/ipv6/tcp_ipv6.c 	TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
skb              1474 net/ipv6/tcp_ipv6.c 	TCP_SKB_CB(skb)->sacked = 0;
skb              1475 net/ipv6/tcp_ipv6.c 	TCP_SKB_CB(skb)->has_rxtstamp =
skb              1476 net/ipv6/tcp_ipv6.c 			skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
skb              1479 net/ipv6/tcp_ipv6.c INDIRECT_CALLABLE_SCOPE int tcp_v6_rcv(struct sk_buff *skb)
skb              1482 net/ipv6/tcp_ipv6.c 	int sdif = inet6_sdif(skb);
skb              1488 net/ipv6/tcp_ipv6.c 	struct net *net = dev_net(skb->dev);
skb              1490 net/ipv6/tcp_ipv6.c 	if (skb->pkt_type != PACKET_HOST)
skb              1498 net/ipv6/tcp_ipv6.c 	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
skb              1501 net/ipv6/tcp_ipv6.c 	th = (const struct tcphdr *)skb->data;
skb              1505 net/ipv6/tcp_ipv6.c 	if (!pskb_may_pull(skb, th->doff*4))
skb              1508 net/ipv6/tcp_ipv6.c 	if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
skb              1511 net/ipv6/tcp_ipv6.c 	th = (const struct tcphdr *)skb->data;
skb              1512 net/ipv6/tcp_ipv6.c 	hdr = ipv6_hdr(skb);
skb              1515 net/ipv6/tcp_ipv6.c 	sk = __inet6_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th),
skb              1516 net/ipv6/tcp_ipv6.c 				th->source, th->dest, inet6_iif(skb), sdif,
skb              1531 net/ipv6/tcp_ipv6.c 		if (tcp_v6_inbound_md5_hash(sk, skb)) {
skb              1532 net/ipv6/tcp_ipv6.c 			sk_drops_add(sk, skb);
skb              1536 net/ipv6/tcp_ipv6.c 		if (tcp_checksum_complete(skb)) {
skb              1547 net/ipv6/tcp_ipv6.c 		if (!tcp_filter(sk, skb)) {
skb              1548 net/ipv6/tcp_ipv6.c 			th = (const struct tcphdr *)skb->data;
skb              1549 net/ipv6/tcp_ipv6.c 			hdr = ipv6_hdr(skb);
skb              1550 net/ipv6/tcp_ipv6.c 			tcp_v6_fill_cb(skb, hdr, th);
skb              1551 net/ipv6/tcp_ipv6.c 			nsk = tcp_check_req(sk, skb, req, false, &req_stolen);
skb              1561 net/ipv6/tcp_ipv6.c 				tcp_v6_restore_cb(skb);
skb              1569 net/ipv6/tcp_ipv6.c 			tcp_v6_restore_cb(skb);
skb              1570 net/ipv6/tcp_ipv6.c 		} else if (tcp_child_process(sk, nsk, skb)) {
skb              1571 net/ipv6/tcp_ipv6.c 			tcp_v6_send_reset(nsk, skb);
skb              1583 net/ipv6/tcp_ipv6.c 	if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
skb              1586 net/ipv6/tcp_ipv6.c 	if (tcp_v6_inbound_md5_hash(sk, skb))
skb              1589 net/ipv6/tcp_ipv6.c 	if (tcp_filter(sk, skb))
skb              1591 net/ipv6/tcp_ipv6.c 	th = (const struct tcphdr *)skb->data;
skb              1592 net/ipv6/tcp_ipv6.c 	hdr = ipv6_hdr(skb);
skb              1593 net/ipv6/tcp_ipv6.c 	tcp_v6_fill_cb(skb, hdr, th);
skb              1595 net/ipv6/tcp_ipv6.c 	skb->dev = NULL;
skb              1598 net/ipv6/tcp_ipv6.c 		ret = tcp_v6_do_rcv(sk, skb);
skb              1605 net/ipv6/tcp_ipv6.c 	tcp_segs_in(tcp_sk(sk), skb);
skb              1610 net/ipv6/tcp_ipv6.c 		ret = tcp_v6_do_rcv(sk, skb);
skb              1612 net/ipv6/tcp_ipv6.c 		if (tcp_add_backlog(sk, skb))
skb              1625 net/ipv6/tcp_ipv6.c 	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
skb              1628 net/ipv6/tcp_ipv6.c 	tcp_v6_fill_cb(skb, hdr, th);
skb              1630 net/ipv6/tcp_ipv6.c 	if (tcp_checksum_complete(skb)) {
skb              1636 net/ipv6/tcp_ipv6.c 		tcp_v6_send_reset(NULL, skb);
skb              1640 net/ipv6/tcp_ipv6.c 	kfree_skb(skb);
skb              1644 net/ipv6/tcp_ipv6.c 	sk_drops_add(sk, skb);
skb              1650 net/ipv6/tcp_ipv6.c 	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
skb              1655 net/ipv6/tcp_ipv6.c 	tcp_v6_fill_cb(skb, hdr, th);
skb              1657 net/ipv6/tcp_ipv6.c 	if (tcp_checksum_complete(skb)) {
skb              1662 net/ipv6/tcp_ipv6.c 	switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
skb              1667 net/ipv6/tcp_ipv6.c 		sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
skb              1668 net/ipv6/tcp_ipv6.c 					    skb, __tcp_hdrlen(th),
skb              1669 net/ipv6/tcp_ipv6.c 					    &ipv6_hdr(skb)->saddr, th->source,
skb              1670 net/ipv6/tcp_ipv6.c 					    &ipv6_hdr(skb)->daddr,
skb              1672 net/ipv6/tcp_ipv6.c 					    tcp_v6_iif_l3_slave(skb),
skb              1678 net/ipv6/tcp_ipv6.c 			tcp_v6_restore_cb(skb);
skb              1686 net/ipv6/tcp_ipv6.c 		tcp_v6_timewait_ack(sk, skb);
skb              1689 net/ipv6/tcp_ipv6.c 		tcp_v6_send_reset(sk, skb);
skb              1698 net/ipv6/tcp_ipv6.c INDIRECT_CALLABLE_SCOPE void tcp_v6_early_demux(struct sk_buff *skb)
skb              1704 net/ipv6/tcp_ipv6.c 	if (skb->pkt_type != PACKET_HOST)
skb              1707 net/ipv6/tcp_ipv6.c 	if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
skb              1710 net/ipv6/tcp_ipv6.c 	hdr = ipv6_hdr(skb);
skb              1711 net/ipv6/tcp_ipv6.c 	th = tcp_hdr(skb);
skb              1717 net/ipv6/tcp_ipv6.c 	sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
skb              1720 net/ipv6/tcp_ipv6.c 					inet6_iif(skb), inet6_sdif(skb));
skb              1722 net/ipv6/tcp_ipv6.c 		skb->sk = sk;
skb              1723 net/ipv6/tcp_ipv6.c 		skb->destructor = sock_edemux;
skb              1730 net/ipv6/tcp_ipv6.c 			    inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
skb              1731 net/ipv6/tcp_ipv6.c 				skb_dst_set_noref(skb, dst);
skb                16 net/ipv6/tcpv6_offload.c struct sk_buff *tcp6_gro_receive(struct list_head *head, struct sk_buff *skb)
skb                19 net/ipv6/tcpv6_offload.c 	if (!NAPI_GRO_CB(skb)->flush &&
skb                20 net/ipv6/tcpv6_offload.c 	    skb_gro_checksum_validate(skb, IPPROTO_TCP,
skb                22 net/ipv6/tcpv6_offload.c 		NAPI_GRO_CB(skb)->flush = 1;
skb                26 net/ipv6/tcpv6_offload.c 	return tcp_gro_receive(head, skb);
skb                29 net/ipv6/tcpv6_offload.c INDIRECT_CALLABLE_SCOPE int tcp6_gro_complete(struct sk_buff *skb, int thoff)
skb                31 net/ipv6/tcpv6_offload.c 	const struct ipv6hdr *iph = ipv6_hdr(skb);
skb                32 net/ipv6/tcpv6_offload.c 	struct tcphdr *th = tcp_hdr(skb);
skb                34 net/ipv6/tcpv6_offload.c 	th->check = ~tcp_v6_check(skb->len - thoff, &iph->saddr,
skb                36 net/ipv6/tcpv6_offload.c 	skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV6;
skb                38 net/ipv6/tcpv6_offload.c 	return tcp_gro_complete(skb);
skb                41 net/ipv6/tcpv6_offload.c static struct sk_buff *tcp6_gso_segment(struct sk_buff *skb,
skb                46 net/ipv6/tcpv6_offload.c 	if (!(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6))
skb                49 net/ipv6/tcpv6_offload.c 	if (!pskb_may_pull(skb, sizeof(*th)))
skb                52 net/ipv6/tcpv6_offload.c 	if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
skb                53 net/ipv6/tcpv6_offload.c 		const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
skb                54 net/ipv6/tcpv6_offload.c 		struct tcphdr *th = tcp_hdr(skb);
skb                61 net/ipv6/tcpv6_offload.c 		skb->ip_summed = CHECKSUM_PARTIAL;
skb                62 net/ipv6/tcpv6_offload.c 		__tcp_v6_send_check(skb, &ipv6h->saddr, &ipv6h->daddr);
skb                65 net/ipv6/tcpv6_offload.c 	return tcp_gso_segment(skb, features);
skb                89 net/ipv6/tunnel6.c static int tunnel6_rcv(struct sk_buff *skb)
skb                93 net/ipv6/tunnel6.c 	if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
skb                97 net/ipv6/tunnel6.c 		if (!handler->handler(skb))
skb               100 net/ipv6/tunnel6.c 	icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
skb               103 net/ipv6/tunnel6.c 	kfree_skb(skb);
skb               107 net/ipv6/tunnel6.c static int tunnel46_rcv(struct sk_buff *skb)
skb               111 net/ipv6/tunnel6.c 	if (!pskb_may_pull(skb, sizeof(struct iphdr)))
skb               115 net/ipv6/tunnel6.c 		if (!handler->handler(skb))
skb               118 net/ipv6/tunnel6.c 	icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
skb               121 net/ipv6/tunnel6.c 	kfree_skb(skb);
skb               125 net/ipv6/tunnel6.c static int tunnel6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
skb               131 net/ipv6/tunnel6.c 		if (!handler->err_handler(skb, opt, type, code, offset, info))
skb               137 net/ipv6/tunnel6.c static int tunnel46_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
skb               143 net/ipv6/tunnel6.c 		if (!handler->err_handler(skb, opt, type, code, offset, info))
skb               149 net/ipv6/udp.c 		struct sk_buff *skb)
skb               166 net/ipv6/udp.c 				result = reuseport_select_sock(sk, hash, skb,
skb               183 net/ipv6/udp.c 			       struct sk_buff *skb)
skb               196 net/ipv6/udp.c 				  hslot2, skb);
skb               205 net/ipv6/udp.c 					  hslot2, skb);
skb               213 net/ipv6/udp.c static struct sock *__udp6_lib_lookup_skb(struct sk_buff *skb,
skb               217 net/ipv6/udp.c 	const struct ipv6hdr *iph = ipv6_hdr(skb);
skb               219 net/ipv6/udp.c 	return __udp6_lib_lookup(dev_net(skb->dev), &iph->saddr, sport,
skb               220 net/ipv6/udp.c 				 &iph->daddr, dport, inet6_iif(skb),
skb               221 net/ipv6/udp.c 				 inet6_sdif(skb), udptable, skb);
skb               224 net/ipv6/udp.c struct sock *udp6_lib_lookup_skb(struct sk_buff *skb,
skb               227 net/ipv6/udp.c 	const struct ipv6hdr *iph = ipv6_hdr(skb);
skb               229 net/ipv6/udp.c 	return __udp6_lib_lookup(dev_net(skb->dev), &iph->saddr, sport,
skb               230 net/ipv6/udp.c 				 &iph->daddr, dport, inet6_iif(skb),
skb               231 net/ipv6/udp.c 				 inet6_sdif(skb), &udp_table, NULL);
skb               257 net/ipv6/udp.c static int udp6_skb_len(struct sk_buff *skb)
skb               259 net/ipv6/udp.c 	return unlikely(inet6_is_jumbogram(skb)) ? skb->len : udp_skb_len(skb);
skb               272 net/ipv6/udp.c 	struct sk_buff *skb;
skb               288 net/ipv6/udp.c 	skb = __skb_recv_udp(sk, flags, noblock, &off, &err);
skb               289 net/ipv6/udp.c 	if (!skb)
skb               292 net/ipv6/udp.c 	ulen = udp6_skb_len(skb);
skb               299 net/ipv6/udp.c 	is_udp4 = (skb->protocol == htons(ETH_P_IP));
skb               309 net/ipv6/udp.c 	    (is_udplite && UDP_SKB_CB(skb)->partial_cov)) {
skb               310 net/ipv6/udp.c 		checksum_valid = udp_skb_csum_unnecessary(skb) ||
skb               311 net/ipv6/udp.c 				!__udp_lib_checksum_complete(skb);
skb               316 net/ipv6/udp.c 	if (checksum_valid || udp_skb_csum_unnecessary(skb)) {
skb               317 net/ipv6/udp.c 		if (udp_skb_is_linear(skb))
skb               318 net/ipv6/udp.c 			err = copy_linear_skb(skb, copied, off, &msg->msg_iter);
skb               320 net/ipv6/udp.c 			err = skb_copy_datagram_msg(skb, off, msg, copied);
skb               322 net/ipv6/udp.c 		err = skb_copy_and_csum_datagram_msg(skb, off, msg);
skb               331 net/ipv6/udp.c 		kfree_skb(skb);
skb               337 net/ipv6/udp.c 	sock_recv_ts_and_drops(msg, sk, skb);
skb               343 net/ipv6/udp.c 		sin6->sin6_port = udp_hdr(skb)->source;
skb               347 net/ipv6/udp.c 			ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr,
skb               351 net/ipv6/udp.c 			sin6->sin6_addr = ipv6_hdr(skb)->saddr;
skb               354 net/ipv6/udp.c 						    inet6_iif(skb));
skb               364 net/ipv6/udp.c 		udp_cmsg_recv(msg, sk, skb);
skb               367 net/ipv6/udp.c 		ip6_datagram_recv_common_ctl(sk, msg, skb);
skb               371 net/ipv6/udp.c 			ip_cmsg_recv_offset(msg, sk, skb,
skb               375 net/ipv6/udp.c 			ip6_datagram_recv_specific_ctl(sk, msg, skb);
skb               382 net/ipv6/udp.c 	skb_consume_udp(sk, skb, peeking ? -err : err);
skb               386 net/ipv6/udp.c 	if (!__sk_queue_drop_skb(sk, &udp_sk(sk)->reader_queue, skb, flags,
skb               391 net/ipv6/udp.c 	kfree_skb(skb);
skb               409 net/ipv6/udp.c static int __udp6_lib_err_encap_no_sk(struct sk_buff *skb,
skb               416 net/ipv6/udp.c 		int (*handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
skb               424 net/ipv6/udp.c 		if (handler && !handler(skb, opt, type, code, offset, info))
skb               452 net/ipv6/udp.c 					 struct sk_buff *skb,
skb               459 net/ipv6/udp.c 	network_offset = skb_network_offset(skb);
skb               460 net/ipv6/udp.c 	transport_offset = skb_transport_offset(skb);
skb               463 net/ipv6/udp.c 	skb_reset_network_header(skb);
skb               466 net/ipv6/udp.c 	skb_set_transport_header(skb, offset);
skb               470 net/ipv6/udp.c 			       inet6_iif(skb), 0, udptable, skb);
skb               472 net/ipv6/udp.c 		int (*lookup)(struct sock *sk, struct sk_buff *skb);
skb               476 net/ipv6/udp.c 		if (!lookup || lookup(sk, skb))
skb               481 net/ipv6/udp.c 		sk = ERR_PTR(__udp6_lib_err_encap_no_sk(skb, opt, type, code,
skb               485 net/ipv6/udp.c 	skb_set_transport_header(skb, transport_offset);
skb               486 net/ipv6/udp.c 	skb_set_network_header(skb, network_offset);
skb               491 net/ipv6/udp.c int __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
skb               496 net/ipv6/udp.c 	const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
skb               499 net/ipv6/udp.c 	struct udphdr *uh = (struct udphdr *)(skb->data+offset);
skb               504 net/ipv6/udp.c 	struct net *net = dev_net(skb->dev);
skb               507 net/ipv6/udp.c 			       inet6_iif(skb), inet6_sdif(skb), udptable, NULL);
skb               513 net/ipv6/udp.c 						  udptable, skb,
skb               520 net/ipv6/udp.c 			__ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
skb               534 net/ipv6/udp.c 		ip6_sk_update_pmtu(skb, sk, info);
skb               540 net/ipv6/udp.c 			ip6_redirect(skb, sock_net(sk), inet6_iif(skb),
skb               543 net/ipv6/udp.c 			ip6_sk_redirect(skb, sk);
skb               556 net/ipv6/udp.c 		ipv6_icmp_error(sk, skb, err, uh->dest, ntohl(info), (u8 *)(uh+1));
skb               565 net/ipv6/udp.c static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
skb               570 net/ipv6/udp.c 		sock_rps_save_rxhash(sk, skb);
skb               571 net/ipv6/udp.c 		sk_mark_napi_id(sk, skb);
skb               574 net/ipv6/udp.c 		sk_mark_napi_id_once(sk, skb);
skb               577 net/ipv6/udp.c 	rc = __udp_enqueue_schedule_skb(sk, skb);
skb               586 net/ipv6/udp.c 		kfree_skb(skb);
skb               593 net/ipv6/udp.c static __inline__ int udpv6_err(struct sk_buff *skb,
skb               597 net/ipv6/udp.c 	return __udp6_lib_err(skb, opt, type, code, offset, info, &udp_table);
skb               600 net/ipv6/udp.c static int udpv6_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
skb               605 net/ipv6/udp.c 	if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
skb               609 net/ipv6/udp.c 		int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
skb               628 net/ipv6/udp.c 			if (udp_lib_checksum_complete(skb))
skb               631 net/ipv6/udp.c 			ret = encap_rcv(sk, skb);
skb               646 net/ipv6/udp.c 	if ((is_udplite & UDPLITE_RECV_CC)  &&  UDP_SKB_CB(skb)->partial_cov) {
skb               650 net/ipv6/udp.c 					    UDP_SKB_CB(skb)->cscov, skb->len);
skb               653 net/ipv6/udp.c 		if (UDP_SKB_CB(skb)->cscov  <  up->pcrlen) {
skb               655 net/ipv6/udp.c 					    UDP_SKB_CB(skb)->cscov, up->pcrlen);
skb               662 net/ipv6/udp.c 	    udp_lib_checksum_complete(skb))
skb               665 net/ipv6/udp.c 	if (sk_filter_trim_cap(sk, skb, sizeof(struct udphdr)))
skb               668 net/ipv6/udp.c 	udp_csum_pull_header(skb);
skb               670 net/ipv6/udp.c 	skb_dst_drop(skb);
skb               672 net/ipv6/udp.c 	return __udpv6_queue_rcv_skb(sk, skb);
skb               679 net/ipv6/udp.c 	kfree_skb(skb);
skb               683 net/ipv6/udp.c static int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
skb               688 net/ipv6/udp.c 	if (likely(!udp_unexpected_gso(sk, skb)))
skb               689 net/ipv6/udp.c 		return udpv6_queue_rcv_one_skb(sk, skb);
skb               691 net/ipv6/udp.c 	__skb_push(skb, -skb_mac_offset(skb));
skb               692 net/ipv6/udp.c 	segs = udp_rcv_segment(sk, skb, false);
skb               693 net/ipv6/udp.c 	for (skb = segs; skb; skb = next) {
skb               694 net/ipv6/udp.c 		next = skb->next;
skb               695 net/ipv6/udp.c 		__skb_pull(skb, skb_transport_offset(skb));
skb               697 net/ipv6/udp.c 		ret = udpv6_queue_rcv_one_skb(sk, skb);
skb               699 net/ipv6/udp.c 			ip6_protocol_deliver_rcu(dev_net(skb->dev), skb, ret,
skb               729 net/ipv6/udp.c static void udp6_csum_zero_error(struct sk_buff *skb)
skb               735 net/ipv6/udp.c 			    &ipv6_hdr(skb)->saddr, ntohs(udp_hdr(skb)->source),
skb               736 net/ipv6/udp.c 			    &ipv6_hdr(skb)->daddr, ntohs(udp_hdr(skb)->dest));
skb               743 net/ipv6/udp.c static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
skb               748 net/ipv6/udp.c 	const struct udphdr *uh = udp_hdr(skb);
skb               753 net/ipv6/udp.c 	int dif = inet6_iif(skb);
skb               754 net/ipv6/udp.c 	int sdif = inet6_sdif(skb);
skb               781 net/ipv6/udp.c 		nskb = skb_clone(skb, GFP_ATOMIC);
skb               802 net/ipv6/udp.c 		if (udpv6_queue_rcv_skb(first, skb) > 0)
skb               803 net/ipv6/udp.c 			consume_skb(skb);
skb               805 net/ipv6/udp.c 		kfree_skb(skb);
skb               824 net/ipv6/udp.c static int udp6_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb,
skb               830 net/ipv6/udp.c 		skb_checksum_try_convert(skb, IPPROTO_UDP, ip6_compute_pseudo);
skb               832 net/ipv6/udp.c 	ret = udpv6_queue_rcv_skb(sk, skb);
skb               840 net/ipv6/udp.c int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
skb               844 net/ipv6/udp.c 	struct net *net = dev_net(skb->dev);
skb               849 net/ipv6/udp.c 	if (!pskb_may_pull(skb, sizeof(struct udphdr)))
skb               852 net/ipv6/udp.c 	saddr = &ipv6_hdr(skb)->saddr;
skb               853 net/ipv6/udp.c 	daddr = &ipv6_hdr(skb)->daddr;
skb               854 net/ipv6/udp.c 	uh = udp_hdr(skb);
skb               857 net/ipv6/udp.c 	if (ulen > skb->len)
skb               865 net/ipv6/udp.c 			ulen = skb->len;
skb               870 net/ipv6/udp.c 		if (ulen < skb->len) {
skb               871 net/ipv6/udp.c 			if (pskb_trim_rcsum(skb, ulen))
skb               873 net/ipv6/udp.c 			saddr = &ipv6_hdr(skb)->saddr;
skb               874 net/ipv6/udp.c 			daddr = &ipv6_hdr(skb)->daddr;
skb               875 net/ipv6/udp.c 			uh = udp_hdr(skb);
skb               879 net/ipv6/udp.c 	if (udp6_csum_init(skb, uh, proto))
skb               883 net/ipv6/udp.c 	sk = skb_steal_sock(skb);
skb               885 net/ipv6/udp.c 		struct dst_entry *dst = skb_dst(skb);
skb               896 net/ipv6/udp.c 		ret = udp6_unicast_rcv_skb(sk, skb, uh);
skb               905 net/ipv6/udp.c 		return __udp6_lib_mcast_deliver(net, skb,
skb               909 net/ipv6/udp.c 	sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
skb               913 net/ipv6/udp.c 		return udp6_unicast_rcv_skb(sk, skb, uh);
skb               919 net/ipv6/udp.c 	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
skb               922 net/ipv6/udp.c 	if (udp_lib_checksum_complete(skb))
skb               926 net/ipv6/udp.c 	icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
skb               928 net/ipv6/udp.c 	kfree_skb(skb);
skb               935 net/ipv6/udp.c 			    ulen, skb->len,
skb               940 net/ipv6/udp.c 	udp6_csum_zero_error(skb);
skb               945 net/ipv6/udp.c 	kfree_skb(skb);
skb               972 net/ipv6/udp.c INDIRECT_CALLABLE_SCOPE void udp_v6_early_demux(struct sk_buff *skb)
skb               974 net/ipv6/udp.c 	struct net *net = dev_net(skb->dev);
skb               978 net/ipv6/udp.c 	int dif = skb->dev->ifindex;
skb               979 net/ipv6/udp.c 	int sdif = inet6_sdif(skb);
skb               981 net/ipv6/udp.c 	if (!pskb_may_pull(skb, skb_transport_offset(skb) +
skb               985 net/ipv6/udp.c 	uh = udp_hdr(skb);
skb               987 net/ipv6/udp.c 	if (skb->pkt_type == PACKET_HOST)
skb               989 net/ipv6/udp.c 					     &ipv6_hdr(skb)->daddr,
skb               990 net/ipv6/udp.c 					     uh->source, &ipv6_hdr(skb)->saddr,
skb               998 net/ipv6/udp.c 	skb->sk = sk;
skb               999 net/ipv6/udp.c 	skb->destructor = sock_efree;
skb              1009 net/ipv6/udp.c 		skb_dst_set_noref(skb, dst);
skb              1013 net/ipv6/udp.c INDIRECT_CALLABLE_SCOPE int udpv6_rcv(struct sk_buff *skb)
skb              1015 net/ipv6/udp.c 	return __udp6_lib_rcv(skb, &udp_table, IPPROTO_UDP);
skb              1061 net/ipv6/udp.c static void udp6_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb,
skb              1066 net/ipv6/udp.c 	struct udphdr *uh = udp_hdr(skb);
skb              1067 net/ipv6/udp.c 	struct sk_buff *frags = skb_shinfo(skb)->frag_list;
skb              1072 net/ipv6/udp.c 		skb->csum_start = skb_transport_header(skb) - skb->head;
skb              1073 net/ipv6/udp.c 		skb->csum_offset = offsetof(struct udphdr, check);
skb              1081 net/ipv6/udp.c 		offset = skb_transport_offset(skb);
skb              1082 net/ipv6/udp.c 		skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
skb              1083 net/ipv6/udp.c 		csum = skb->csum;
skb              1085 net/ipv6/udp.c 		skb->ip_summed = CHECKSUM_NONE;
skb              1102 net/ipv6/udp.c static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6,
skb              1105 net/ipv6/udp.c 	struct sock *sk = skb->sk;
skb              1110 net/ipv6/udp.c 	int offset = skb_transport_offset(skb);
skb              1111 net/ipv6/udp.c 	int len = skb->len - offset;
skb              1117 net/ipv6/udp.c 	uh = udp_hdr(skb);
skb              1124 net/ipv6/udp.c 		const int hlen = skb_network_header_len(skb) +
skb              1128 net/ipv6/udp.c 			kfree_skb(skb);
skb              1131 net/ipv6/udp.c 		if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) {
skb              1132 net/ipv6/udp.c 			kfree_skb(skb);
skb              1136 net/ipv6/udp.c 			kfree_skb(skb);
skb              1139 net/ipv6/udp.c 		if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite ||
skb              1140 net/ipv6/udp.c 		    dst_xfrm(skb_dst(skb))) {
skb              1141 net/ipv6/udp.c 			kfree_skb(skb);
skb              1146 net/ipv6/udp.c 			skb_shinfo(skb)->gso_size = cork->gso_size;
skb              1147 net/ipv6/udp.c 			skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
skb              1148 net/ipv6/udp.c 			skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(datalen,
skb              1155 net/ipv6/udp.c 		csum = udplite_csum(skb);
skb              1157 net/ipv6/udp.c 		skb->ip_summed = CHECKSUM_NONE;
skb              1159 net/ipv6/udp.c 	} else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
skb              1161 net/ipv6/udp.c 		udp6_hwcsum_outgoing(sk, skb, &fl6->saddr, &fl6->daddr, len);
skb              1164 net/ipv6/udp.c 		csum = udp_csum(skb);
skb              1173 net/ipv6/udp.c 	err = ip6_send_skb(skb);
skb              1189 net/ipv6/udp.c 	struct sk_buff *skb;
skb              1202 net/ipv6/udp.c 	skb = ip6_finish_skb(sk);
skb              1203 net/ipv6/udp.c 	if (!skb)
skb              1206 net/ipv6/udp.c 	err = udp_v6_send_skb(skb, &fl6, &inet_sk(sk)->cork.base);
skb              1463 net/ipv6/udp.c 		struct sk_buff *skb;
skb              1465 net/ipv6/udp.c 		skb = ip6_make_skb(sk, getfrag, msg, ulen,
skb              1469 net/ipv6/udp.c 		err = PTR_ERR(skb);
skb              1470 net/ipv6/udp.c 		if (!IS_ERR_OR_NULL(skb))
skb              1471 net/ipv6/udp.c 			err = udp_v6_send_skb(skb, &fl6, &cork.base);
skb                17 net/ipv6/udp_offload.c static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
skb                31 net/ipv6/udp_offload.c 	mss = skb_shinfo(skb)->gso_size;
skb                32 net/ipv6/udp_offload.c 	if (unlikely(skb->len <= mss))
skb                35 net/ipv6/udp_offload.c 	if (skb->encapsulation && skb_shinfo(skb)->gso_type &
skb                37 net/ipv6/udp_offload.c 		segs = skb_udp_tunnel_segment(skb, features, true);
skb                42 net/ipv6/udp_offload.c 		if (!(skb_shinfo(skb)->gso_type & (SKB_GSO_UDP | SKB_GSO_UDP_L4)))
skb                45 net/ipv6/udp_offload.c 		if (!pskb_may_pull(skb, sizeof(struct udphdr)))
skb                48 net/ipv6/udp_offload.c 		if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
skb                49 net/ipv6/udp_offload.c 			return __udp_gso_segment(skb, features);
skb                55 net/ipv6/udp_offload.c 		uh = udp_hdr(skb);
skb                56 net/ipv6/udp_offload.c 		ipv6h = ipv6_hdr(skb);
skb                59 net/ipv6/udp_offload.c 		csum = skb_checksum(skb, 0, skb->len, 0);
skb                60 net/ipv6/udp_offload.c 		uh->check = udp_v6_check(skb->len, &ipv6h->saddr,
skb                65 net/ipv6/udp_offload.c 		skb->ip_summed = CHECKSUM_UNNECESSARY;
skb                71 net/ipv6/udp_offload.c 		if (!skb->encap_hdr_csum)
skb                75 net/ipv6/udp_offload.c 		tnl_hlen = skb_tnl_header_len(skb);
skb                76 net/ipv6/udp_offload.c 		if (skb->mac_header < (tnl_hlen + frag_hdr_sz)) {
skb                77 net/ipv6/udp_offload.c 			if (gso_pskb_expand_head(skb, tnl_hlen + frag_hdr_sz))
skb                84 net/ipv6/udp_offload.c 		err = ip6_find_1stfragopt(skb, &prevhdr);
skb                90 net/ipv6/udp_offload.c 		unfrag_len = (skb_network_header(skb) - skb_mac_header(skb)) +
skb                92 net/ipv6/udp_offload.c 		packet_start = (u8 *) skb->head + SKB_GSO_CB(skb)->mac_offset;
skb                95 net/ipv6/udp_offload.c 		SKB_GSO_CB(skb)->mac_offset -= frag_hdr_sz;
skb                96 net/ipv6/udp_offload.c 		skb->mac_header -= frag_hdr_sz;
skb                97 net/ipv6/udp_offload.c 		skb->network_header -= frag_hdr_sz;
skb                99 net/ipv6/udp_offload.c 		fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen);
skb               102 net/ipv6/udp_offload.c 		fptr->identification = ipv6_proxy_select_ident(dev_net(skb->dev), skb);
skb               107 net/ipv6/udp_offload.c 		segs = skb_segment(skb, features);
skb               115 net/ipv6/udp_offload.c struct sk_buff *udp6_gro_receive(struct list_head *head, struct sk_buff *skb)
skb               117 net/ipv6/udp_offload.c 	struct udphdr *uh = udp_gro_udphdr(skb);
skb               123 net/ipv6/udp_offload.c 	if (NAPI_GRO_CB(skb)->flush)
skb               126 net/ipv6/udp_offload.c 	if (skb_gro_checksum_validate_zero_check(skb, IPPROTO_UDP, uh->check,
skb               130 net/ipv6/udp_offload.c 		skb_gro_checksum_try_convert(skb, IPPROTO_UDP, uh->check,
skb               134 net/ipv6/udp_offload.c 	NAPI_GRO_CB(skb)->is_ipv6 = 1;
skb               135 net/ipv6/udp_offload.c 	return udp_gro_receive(head, skb, uh, udp6_lib_lookup_skb);
skb               138 net/ipv6/udp_offload.c 	NAPI_GRO_CB(skb)->flush = 1;
skb               142 net/ipv6/udp_offload.c INDIRECT_CALLABLE_SCOPE int udp6_gro_complete(struct sk_buff *skb, int nhoff)
skb               144 net/ipv6/udp_offload.c 	const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
skb               145 net/ipv6/udp_offload.c 	struct udphdr *uh = (struct udphdr *)(skb->data + nhoff);
skb               148 net/ipv6/udp_offload.c 		uh->check = ~udp_v6_check(skb->len - nhoff, &ipv6h->saddr,
skb               151 net/ipv6/udp_offload.c 	return udp_gro_complete(skb, nhoff, udp6_lib_lookup_skb);
skb                15 net/ipv6/udplite.c static int udplitev6_rcv(struct sk_buff *skb)
skb                17 net/ipv6/udplite.c 	return __udp6_lib_rcv(skb, &udplite_table, IPPROTO_UDPLITE);
skb                20 net/ipv6/udplite.c static int udplitev6_err(struct sk_buff *skb,
skb                24 net/ipv6/udplite.c 	return __udp6_lib_err(skb, opt, type, code, offset, info,
skb                20 net/ipv6/xfrm6_input.c int xfrm6_extract_input(struct xfrm_state *x, struct sk_buff *skb)
skb                22 net/ipv6/xfrm6_input.c 	return xfrm6_extract_header(skb);
skb                25 net/ipv6/xfrm6_input.c int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi,
skb                28 net/ipv6/xfrm6_input.c 	XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = t;
skb                29 net/ipv6/xfrm6_input.c 	XFRM_SPI_SKB_CB(skb)->family = AF_INET6;
skb                30 net/ipv6/xfrm6_input.c 	XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct ipv6hdr, daddr);
skb                31 net/ipv6/xfrm6_input.c 	return xfrm_input(skb, nexthdr, spi, 0);
skb                36 net/ipv6/xfrm6_input.c 				   struct sk_buff *skb)
skb                38 net/ipv6/xfrm6_input.c 	if (xfrm_trans_queue(skb, ip6_rcv_finish))
skb                39 net/ipv6/xfrm6_input.c 		__kfree_skb(skb);
skb                43 net/ipv6/xfrm6_input.c int xfrm6_transport_finish(struct sk_buff *skb, int async)
skb                45 net/ipv6/xfrm6_input.c 	struct xfrm_offload *xo = xfrm_offload(skb);
skb                46 net/ipv6/xfrm6_input.c 	int nhlen = skb->data - skb_network_header(skb);
skb                48 net/ipv6/xfrm6_input.c 	skb_network_header(skb)[IP6CB(skb)->nhoff] =
skb                49 net/ipv6/xfrm6_input.c 		XFRM_MODE_SKB_CB(skb)->protocol;
skb                56 net/ipv6/xfrm6_input.c 	__skb_push(skb, nhlen);
skb                57 net/ipv6/xfrm6_input.c 	ipv6_hdr(skb)->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
skb                58 net/ipv6/xfrm6_input.c 	skb_postpush_rcsum(skb, skb_network_header(skb), nhlen);
skb                61 net/ipv6/xfrm6_input.c 		skb_mac_header_rebuild(skb);
skb                62 net/ipv6/xfrm6_input.c 		skb_reset_transport_header(skb);
skb                67 net/ipv6/xfrm6_input.c 		dev_net(skb->dev), NULL, skb, skb->dev, NULL,
skb                72 net/ipv6/xfrm6_input.c int xfrm6_rcv_tnl(struct sk_buff *skb, struct ip6_tnl *t)
skb                74 net/ipv6/xfrm6_input.c 	return xfrm6_rcv_spi(skb, skb_network_header(skb)[IP6CB(skb)->nhoff],
skb                79 net/ipv6/xfrm6_input.c int xfrm6_rcv(struct sk_buff *skb)
skb                81 net/ipv6/xfrm6_input.c 	return xfrm6_rcv_tnl(skb, NULL);
skb                84 net/ipv6/xfrm6_input.c int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr,
skb                87 net/ipv6/xfrm6_input.c 	struct net *net = dev_net(skb->dev);
skb                92 net/ipv6/xfrm6_input.c 	sp = secpath_set(skb);
skb               123 net/ipv6/xfrm6_input.c 		x = xfrm_state_lookup_byaddr(net, skb->mark, dst, src, proto, AF_INET6);
skb               133 net/ipv6/xfrm6_input.c 			if (x->type->input(x, skb) > 0) {
skb               146 net/ipv6/xfrm6_input.c 		xfrm_audit_state_notfound_simple(skb, AF_INET6);
skb               154 net/ipv6/xfrm6_input.c 	x->curlft.bytes += skb->len;
skb                19 net/ipv6/xfrm6_output.c int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb,
skb                22 net/ipv6/xfrm6_output.c 	return ip6_find_1stfragopt(skb, prevhdr);
skb                26 net/ipv6/xfrm6_output.c static int xfrm6_local_dontfrag(struct sk_buff *skb)
skb                29 net/ipv6/xfrm6_output.c 	struct sock *sk = skb->sk;
skb                43 net/ipv6/xfrm6_output.c static void xfrm6_local_rxpmtu(struct sk_buff *skb, u32 mtu)
skb                46 net/ipv6/xfrm6_output.c 	struct sock *sk = skb->sk;
skb                49 net/ipv6/xfrm6_output.c 	fl6.daddr = ipv6_hdr(skb)->daddr;
skb                54 net/ipv6/xfrm6_output.c void xfrm6_local_error(struct sk_buff *skb, u32 mtu)
skb                58 net/ipv6/xfrm6_output.c 	struct sock *sk = skb->sk;
skb                60 net/ipv6/xfrm6_output.c 	hdr = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
skb                67 net/ipv6/xfrm6_output.c static int xfrm6_tunnel_check_size(struct sk_buff *skb)
skb                70 net/ipv6/xfrm6_output.c 	struct dst_entry *dst = skb_dst(skb);
skb                72 net/ipv6/xfrm6_output.c 	if (skb->ignore_df)
skb                79 net/ipv6/xfrm6_output.c 	if ((!skb_is_gso(skb) && skb->len > mtu) ||
skb                80 net/ipv6/xfrm6_output.c 	    (skb_is_gso(skb) &&
skb                81 net/ipv6/xfrm6_output.c 	     !skb_gso_validate_network_len(skb, ip6_skb_dst_mtu(skb)))) {
skb                82 net/ipv6/xfrm6_output.c 		skb->dev = dst->dev;
skb                83 net/ipv6/xfrm6_output.c 		skb->protocol = htons(ETH_P_IPV6);
skb                85 net/ipv6/xfrm6_output.c 		if (xfrm6_local_dontfrag(skb))
skb                86 net/ipv6/xfrm6_output.c 			xfrm6_local_rxpmtu(skb, mtu);
skb                87 net/ipv6/xfrm6_output.c 		else if (skb->sk)
skb                88 net/ipv6/xfrm6_output.c 			xfrm_local_error(skb, mtu);
skb                90 net/ipv6/xfrm6_output.c 			icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
skb                97 net/ipv6/xfrm6_output.c int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb)
skb               101 net/ipv6/xfrm6_output.c 	err = xfrm6_tunnel_check_size(skb);
skb               105 net/ipv6/xfrm6_output.c 	XFRM_MODE_SKB_CB(skb)->protocol = ipv6_hdr(skb)->nexthdr;
skb               107 net/ipv6/xfrm6_output.c 	return xfrm6_extract_header(skb);
skb               110 net/ipv6/xfrm6_output.c int xfrm6_output_finish(struct sock *sk, struct sk_buff *skb)
skb               112 net/ipv6/xfrm6_output.c 	memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
skb               114 net/ipv6/xfrm6_output.c 	IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED;
skb               116 net/ipv6/xfrm6_output.c 	return xfrm_output(sk, skb);
skb               120 net/ipv6/xfrm6_output.c 				       struct sk_buff *skb)
skb               128 net/ipv6/xfrm6_output.c 		ret = afinfo->output_finish(sk, skb);
skb               130 net/ipv6/xfrm6_output.c 		kfree_skb(skb);
skb               136 net/ipv6/xfrm6_output.c static int __xfrm6_output_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
skb               138 net/ipv6/xfrm6_output.c 	struct xfrm_state *x = skb_dst(skb)->xfrm;
skb               140 net/ipv6/xfrm6_output.c 	return __xfrm6_output_state_finish(x, sk, skb);
skb               143 net/ipv6/xfrm6_output.c static int __xfrm6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
skb               145 net/ipv6/xfrm6_output.c 	struct dst_entry *dst = skb_dst(skb);
skb               152 net/ipv6/xfrm6_output.c 		IP6CB(skb)->flags |= IP6SKB_REROUTED;
skb               153 net/ipv6/xfrm6_output.c 		return dst_output(net, sk, skb);
skb               160 net/ipv6/xfrm6_output.c 	if (skb->protocol == htons(ETH_P_IPV6))
skb               161 net/ipv6/xfrm6_output.c 		mtu = ip6_skb_dst_mtu(skb);
skb               163 net/ipv6/xfrm6_output.c 		mtu = dst_mtu(skb_dst(skb));
skb               165 net/ipv6/xfrm6_output.c 	toobig = skb->len > mtu && !skb_is_gso(skb);
skb               167 net/ipv6/xfrm6_output.c 	if (toobig && xfrm6_local_dontfrag(skb)) {
skb               168 net/ipv6/xfrm6_output.c 		xfrm6_local_rxpmtu(skb, mtu);
skb               169 net/ipv6/xfrm6_output.c 		kfree_skb(skb);
skb               171 net/ipv6/xfrm6_output.c 	} else if (!skb->ignore_df && toobig && skb->sk) {
skb               172 net/ipv6/xfrm6_output.c 		xfrm_local_error(skb, mtu);
skb               173 net/ipv6/xfrm6_output.c 		kfree_skb(skb);
skb               177 net/ipv6/xfrm6_output.c 	if (toobig || dst_allfrag(skb_dst(skb)))
skb               178 net/ipv6/xfrm6_output.c 		return ip6_fragment(net, sk, skb,
skb               182 net/ipv6/xfrm6_output.c 	return __xfrm6_output_state_finish(x, sk, skb);
skb               185 net/ipv6/xfrm6_output.c int xfrm6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
skb               188 net/ipv6/xfrm6_output.c 			    net, sk, skb,  NULL, skb_dst(skb)->dev,
skb               190 net/ipv6/xfrm6_output.c 			    !(IP6CB(skb)->flags & IP6SKB_REROUTED));
skb               101 net/ipv6/xfrm6_policy.c 			      struct sk_buff *skb, u32 mtu,
skb               107 net/ipv6/xfrm6_policy.c 	path->ops->update_pmtu(path, sk, skb, mtu, confirm_neigh);
skb               111 net/ipv6/xfrm6_policy.c 			   struct sk_buff *skb)
skb               116 net/ipv6/xfrm6_policy.c 	path->ops->redirect(path, sk, skb);
skb                45 net/ipv6/xfrm6_protocol.c static int xfrm6_rcv_cb(struct sk_buff *skb, u8 protocol, int err)
skb                55 net/ipv6/xfrm6_protocol.c 		if ((ret = handler->cb_handler(skb, err)) <= 0)
skb                61 net/ipv6/xfrm6_protocol.c static int xfrm6_esp_rcv(struct sk_buff *skb)
skb                66 net/ipv6/xfrm6_protocol.c 	XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = NULL;
skb                69 net/ipv6/xfrm6_protocol.c 		if ((ret = handler->handler(skb)) != -EINVAL)
skb                72 net/ipv6/xfrm6_protocol.c 	icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
skb                74 net/ipv6/xfrm6_protocol.c 	kfree_skb(skb);
skb                78 net/ipv6/xfrm6_protocol.c static int xfrm6_esp_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
skb                84 net/ipv6/xfrm6_protocol.c 		if (!handler->err_handler(skb, opt, type, code, offset, info))
skb                90 net/ipv6/xfrm6_protocol.c static int xfrm6_ah_rcv(struct sk_buff *skb)
skb                95 net/ipv6/xfrm6_protocol.c 	XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = NULL;
skb                98 net/ipv6/xfrm6_protocol.c 		if ((ret = handler->handler(skb)) != -EINVAL)
skb               101 net/ipv6/xfrm6_protocol.c 	icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
skb               103 net/ipv6/xfrm6_protocol.c 	kfree_skb(skb);
skb               107 net/ipv6/xfrm6_protocol.c static int xfrm6_ah_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
skb               113 net/ipv6/xfrm6_protocol.c 		if (!handler->err_handler(skb, opt, type, code, offset, info))
skb               119 net/ipv6/xfrm6_protocol.c static int xfrm6_ipcomp_rcv(struct sk_buff *skb)
skb               124 net/ipv6/xfrm6_protocol.c 	XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = NULL;
skb               127 net/ipv6/xfrm6_protocol.c 		if ((ret = handler->handler(skb)) != -EINVAL)
skb               130 net/ipv6/xfrm6_protocol.c 	icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
skb               132 net/ipv6/xfrm6_protocol.c 	kfree_skb(skb);
skb               136 net/ipv6/xfrm6_protocol.c static int xfrm6_ipcomp_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
skb               142 net/ipv6/xfrm6_protocol.c 		if (!handler->err_handler(skb, opt, type, code, offset, info))
skb                24 net/ipv6/xfrm6_state.c int xfrm6_extract_header(struct sk_buff *skb)
skb                26 net/ipv6/xfrm6_state.c 	struct ipv6hdr *iph = ipv6_hdr(skb);
skb                28 net/ipv6/xfrm6_state.c 	XFRM_MODE_SKB_CB(skb)->ihl = sizeof(*iph);
skb                29 net/ipv6/xfrm6_state.c 	XFRM_MODE_SKB_CB(skb)->id = 0;
skb                30 net/ipv6/xfrm6_state.c 	XFRM_MODE_SKB_CB(skb)->frag_off = htons(IP_DF);
skb                31 net/ipv6/xfrm6_state.c 	XFRM_MODE_SKB_CB(skb)->tos = ipv6_get_dsfield(iph);
skb                32 net/ipv6/xfrm6_state.c 	XFRM_MODE_SKB_CB(skb)->ttl = iph->hop_limit;
skb                33 net/ipv6/xfrm6_state.c 	XFRM_MODE_SKB_CB(skb)->optlen = 0;
skb                34 net/ipv6/xfrm6_state.c 	memcpy(XFRM_MODE_SKB_CB(skb)->flow_lbl, iph->flow_lbl,
skb                35 net/ipv6/xfrm6_state.c 	       sizeof(XFRM_MODE_SKB_CB(skb)->flow_lbl));
skb               211 net/ipv6/xfrm6_tunnel.c static int xfrm6_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
skb               213 net/ipv6/xfrm6_tunnel.c 	skb_push(skb, -skb_network_offset(skb));
skb               217 net/ipv6/xfrm6_tunnel.c static int xfrm6_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
skb               219 net/ipv6/xfrm6_tunnel.c 	return skb_network_header(skb)[IP6CB(skb)->nhoff];
skb               222 net/ipv6/xfrm6_tunnel.c static int xfrm6_tunnel_rcv(struct sk_buff *skb)
skb               224 net/ipv6/xfrm6_tunnel.c 	struct net *net = dev_net(skb->dev);
skb               225 net/ipv6/xfrm6_tunnel.c 	const struct ipv6hdr *iph = ipv6_hdr(skb);
skb               229 net/ipv6/xfrm6_tunnel.c 	return xfrm6_rcv_spi(skb, IPPROTO_IPV6, spi, NULL);
skb               232 net/ipv6/xfrm6_tunnel.c static int xfrm6_tunnel_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
skb                92 net/iucv/af_iucv.c static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
skb                95 net/iucv/af_iucv.c 		   struct sk_buff *skb, u8 flags);
skb               318 net/iucv/af_iucv.c 		   struct sk_buff *skb, u8 flags)
skb               325 net/iucv/af_iucv.c 	phs_hdr = skb_push(skb, sizeof(*phs_hdr));
skb               327 net/iucv/af_iucv.c 	skb_reset_network_header(skb);
skb               351 net/iucv/af_iucv.c 	skb->dev = iucv->hs_dev;
skb               352 net/iucv/af_iucv.c 	if (!skb->dev) {
skb               357 net/iucv/af_iucv.c 	dev_hard_header(skb, skb->dev, ETH_P_AF_IUCV, NULL, NULL, skb->len);
skb               359 net/iucv/af_iucv.c 	if (!(skb->dev->flags & IFF_UP) || !netif_carrier_ok(skb->dev)) {
skb               363 net/iucv/af_iucv.c 	if (skb->len > skb->dev->mtu) {
skb               368 net/iucv/af_iucv.c 		skb_trim(skb, skb->dev->mtu);
skb               370 net/iucv/af_iucv.c 	skb->protocol = cpu_to_be16(ETH_P_AF_IUCV);
skb               372 net/iucv/af_iucv.c 	__skb_header_release(skb);
skb               373 net/iucv/af_iucv.c 	nskb = skb_clone(skb, GFP_ATOMIC);
skb               380 net/iucv/af_iucv.c 	err = dev_queue_xmit(skb);
skb               391 net/iucv/af_iucv.c 	kfree_skb(skb);
skb               475 net/iucv/af_iucv.c 	struct sk_buff *skb;
skb               485 net/iucv/af_iucv.c 	skb = sock_alloc_send_skb(sk, blen, 1, &err);
skb               486 net/iucv/af_iucv.c 	if (skb) {
skb               487 net/iucv/af_iucv.c 		skb_reserve(skb, blen);
skb               488 net/iucv/af_iucv.c 		err = afiucv_hs_send(NULL, sk, skb, flags);
skb              1050 net/iucv/af_iucv.c 			  struct sk_buff *skb)
skb              1054 net/iucv/af_iucv.c 	memcpy(prmdata, (void *) skb->data, skb->len);
skb              1055 net/iucv/af_iucv.c 	prmdata[7] = 0xff - (u8) skb->len;
skb              1067 net/iucv/af_iucv.c 	struct sk_buff *skb;
skb              1160 net/iucv/af_iucv.c 	skb = sock_alloc_send_pskb(sk, headroom + linear, len - linear,
skb              1162 net/iucv/af_iucv.c 	if (!skb)
skb              1165 net/iucv/af_iucv.c 		skb_reserve(skb, headroom);
skb              1166 net/iucv/af_iucv.c 	skb_put(skb, linear);
skb              1167 net/iucv/af_iucv.c 	skb->len = len;
skb              1168 net/iucv/af_iucv.c 	skb->data_len = len - linear;
skb              1169 net/iucv/af_iucv.c 	err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, len);
skb              1187 net/iucv/af_iucv.c 	IUCV_SKB_CB(skb)->tag = txmsg.tag;
skb              1191 net/iucv/af_iucv.c 		err = afiucv_hs_send(&txmsg, sk, skb, 0);
skb              1197 net/iucv/af_iucv.c 		skb_queue_tail(&iucv->send_skb_q, skb);
skb              1200 net/iucv/af_iucv.c 		    skb->len <= 7) {
skb              1201 net/iucv/af_iucv.c 			err = iucv_send_iprm(iucv->path, &txmsg, skb);
skb              1206 net/iucv/af_iucv.c 				skb_unlink(skb, &iucv->send_skb_q);
skb              1207 net/iucv/af_iucv.c 				kfree_skb(skb);
skb              1214 net/iucv/af_iucv.c 				skb_unlink(skb, &iucv->send_skb_q);
skb              1218 net/iucv/af_iucv.c 		} else if (skb_is_nonlinear(skb)) {
skb              1219 net/iucv/af_iucv.c 			struct iucv_array *iba = (struct iucv_array *)skb->head;
skb              1223 net/iucv/af_iucv.c 			iba[0].address = (u32)(addr_t)skb->data;
skb              1224 net/iucv/af_iucv.c 			iba[0].length = (u32)skb_headlen(skb);
skb              1225 net/iucv/af_iucv.c 			for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb              1226 net/iucv/af_iucv.c 				skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
skb              1234 net/iucv/af_iucv.c 						    (void *)iba, skb->len);
skb              1237 net/iucv/af_iucv.c 					0, 0, (void *)skb->data, skb->len);
skb              1252 net/iucv/af_iucv.c 			skb_unlink(skb, &iucv->send_skb_q);
skb              1261 net/iucv/af_iucv.c 	kfree_skb(skb);
skb              1270 net/iucv/af_iucv.c 	struct sk_buff *skb;
skb              1280 net/iucv/af_iucv.c 	skb = alloc_skb_with_frags(headroom + linear, len - linear,
skb              1282 net/iucv/af_iucv.c 	WARN_ONCE(!skb,
skb              1285 net/iucv/af_iucv.c 	if (skb) {
skb              1287 net/iucv/af_iucv.c 			skb_reserve(skb, headroom);
skb              1288 net/iucv/af_iucv.c 		skb_put(skb, linear);
skb              1289 net/iucv/af_iucv.c 		skb->len = len;
skb              1290 net/iucv/af_iucv.c 		skb->data_len = len - linear;
skb              1292 net/iucv/af_iucv.c 	return skb;
skb              1299 net/iucv/af_iucv.c static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
skb              1310 net/iucv/af_iucv.c 	IUCV_SKB_CB(skb)->class = msg->class;
skb              1315 net/iucv/af_iucv.c 			skb->data = NULL;
skb              1316 net/iucv/af_iucv.c 			skb->len = 0;
skb              1319 net/iucv/af_iucv.c 		if (skb_is_nonlinear(skb)) {
skb              1320 net/iucv/af_iucv.c 			struct iucv_array *iba = (struct iucv_array *)skb->head;
skb              1323 net/iucv/af_iucv.c 			iba[0].address = (u32)(addr_t)skb->data;
skb              1324 net/iucv/af_iucv.c 			iba[0].length = (u32)skb_headlen(skb);
skb              1325 net/iucv/af_iucv.c 			for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb              1326 net/iucv/af_iucv.c 				skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
skb              1338 net/iucv/af_iucv.c 					      skb->data, len, NULL);
skb              1341 net/iucv/af_iucv.c 			kfree_skb(skb);
skb              1344 net/iucv/af_iucv.c 		WARN_ON_ONCE(skb->len != len);
skb              1347 net/iucv/af_iucv.c 	IUCV_SKB_CB(skb)->offset = 0;
skb              1348 net/iucv/af_iucv.c 	if (sk_filter(sk, skb)) {
skb              1350 net/iucv/af_iucv.c 		kfree_skb(skb);
skb              1353 net/iucv/af_iucv.c 	if (__sock_queue_rcv_skb(sk, skb))	/* handle rcv queue full */
skb              1354 net/iucv/af_iucv.c 		skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb);
skb              1364 net/iucv/af_iucv.c 	struct sk_buff *skb;
skb              1368 net/iucv/af_iucv.c 		skb = alloc_iucv_recv_skb(iucv_msg_length(&p->msg));
skb              1369 net/iucv/af_iucv.c 		if (!skb)
skb              1371 net/iucv/af_iucv.c 		iucv_process_message(sk, skb, p->path, &p->msg);
skb              1386 net/iucv/af_iucv.c 	struct sk_buff *skb, *rskb, *cskb;
skb              1401 net/iucv/af_iucv.c 	skb = skb_recv_datagram(sk, flags, noblock, &err);
skb              1402 net/iucv/af_iucv.c 	if (!skb) {
skb              1408 net/iucv/af_iucv.c 	offset = IUCV_SKB_CB(skb)->offset;
skb              1409 net/iucv/af_iucv.c 	rlen   = skb->len - offset;		/* real length of skb */
skb              1414 net/iucv/af_iucv.c 	cskb = skb;
skb              1417 net/iucv/af_iucv.c 			skb_queue_head(&sk->sk_receive_queue, skb);
skb              1433 net/iucv/af_iucv.c 		       sizeof(IUCV_SKB_CB(skb)->class),
skb              1434 net/iucv/af_iucv.c 		       (void *)&IUCV_SKB_CB(skb)->class);
skb              1437 net/iucv/af_iucv.c 			skb_queue_head(&sk->sk_receive_queue, skb);
skb              1447 net/iucv/af_iucv.c 				IUCV_SKB_CB(skb)->offset = offset + copied;
skb              1448 net/iucv/af_iucv.c 				skb_queue_head(&sk->sk_receive_queue, skb);
skb              1453 net/iucv/af_iucv.c 		kfree_skb(skb);
skb              1845 net/iucv/af_iucv.c 	struct sk_buff *skb;
skb              1865 net/iucv/af_iucv.c 	skb = alloc_iucv_recv_skb(iucv_msg_length(msg));
skb              1866 net/iucv/af_iucv.c 	if (!skb)
skb              1869 net/iucv/af_iucv.c 	iucv_process_message(sk, skb, path, msg);
skb              1954 net/iucv/af_iucv.c static void afiucv_swap_src_dest(struct sk_buff *skb)
skb              1956 net/iucv/af_iucv.c 	struct af_iucv_trans_hdr *trans_hdr = iucv_trans_hdr(skb);
skb              1970 net/iucv/af_iucv.c 	skb_push(skb, ETH_HLEN);
skb              1971 net/iucv/af_iucv.c 	memset(skb->data, 0, ETH_HLEN);
skb              1977 net/iucv/af_iucv.c static int afiucv_hs_callback_syn(struct sock *sk, struct sk_buff *skb)
skb              1979 net/iucv/af_iucv.c 	struct af_iucv_trans_hdr *trans_hdr = iucv_trans_hdr(skb);
skb              1987 net/iucv/af_iucv.c 		afiucv_swap_src_dest(skb);
skb              1989 net/iucv/af_iucv.c 		err = dev_queue_xmit(skb);
skb              1999 net/iucv/af_iucv.c 		afiucv_swap_src_dest(skb);
skb              2001 net/iucv/af_iucv.c 		err = dev_queue_xmit(skb);
skb              2022 net/iucv/af_iucv.c 	afiucv_swap_src_dest(skb);
skb              2026 net/iucv/af_iucv.c 	err = dev_queue_xmit(skb);
skb              2042 net/iucv/af_iucv.c static int afiucv_hs_callback_synack(struct sock *sk, struct sk_buff *skb)
skb              2051 net/iucv/af_iucv.c 	iucv->msglimit_peer = iucv_trans_hdr(skb)->window;
skb              2056 net/iucv/af_iucv.c 	kfree_skb(skb);
skb              2063 net/iucv/af_iucv.c static int afiucv_hs_callback_synfin(struct sock *sk, struct sk_buff *skb)
skb              2076 net/iucv/af_iucv.c 	kfree_skb(skb);
skb              2083 net/iucv/af_iucv.c static int afiucv_hs_callback_fin(struct sock *sk, struct sk_buff *skb)
skb              2097 net/iucv/af_iucv.c 	kfree_skb(skb);
skb              2104 net/iucv/af_iucv.c static int afiucv_hs_callback_win(struct sock *sk, struct sk_buff *skb)
skb              2114 net/iucv/af_iucv.c 	atomic_sub(iucv_trans_hdr(skb)->window, &iucv->msg_sent);
skb              2122 net/iucv/af_iucv.c static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb)
skb              2127 net/iucv/af_iucv.c 		kfree_skb(skb);
skb              2132 net/iucv/af_iucv.c 		kfree_skb(skb);
skb              2137 net/iucv/af_iucv.c 		kfree_skb(skb);
skb              2142 net/iucv/af_iucv.c 	skb_pull(skb, sizeof(struct af_iucv_trans_hdr));
skb              2143 net/iucv/af_iucv.c 	skb_reset_transport_header(skb);
skb              2144 net/iucv/af_iucv.c 	skb_reset_network_header(skb);
skb              2145 net/iucv/af_iucv.c 	IUCV_SKB_CB(skb)->offset = 0;
skb              2146 net/iucv/af_iucv.c 	if (sk_filter(sk, skb)) {
skb              2148 net/iucv/af_iucv.c 		kfree_skb(skb);
skb              2154 net/iucv/af_iucv.c 		if (__sock_queue_rcv_skb(sk, skb))
skb              2156 net/iucv/af_iucv.c 			skb_queue_tail(&iucv->backlog_skb_q, skb);
skb              2158 net/iucv/af_iucv.c 		skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb);
skb              2168 net/iucv/af_iucv.c static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
skb              2177 net/iucv/af_iucv.c 	if (!pskb_may_pull(skb, sizeof(*trans_hdr))) {
skb              2178 net/iucv/af_iucv.c 		WARN_ONCE(1, "AF_IUCV failed to receive skb, len=%u", skb->len);
skb              2179 net/iucv/af_iucv.c 		kfree_skb(skb);
skb              2183 net/iucv/af_iucv.c 	trans_hdr = iucv_trans_hdr(skb);
skb              2234 net/iucv/af_iucv.c 		err = afiucv_hs_callback_syn(sk, skb);
skb              2238 net/iucv/af_iucv.c 		err = afiucv_hs_callback_synack(sk, skb);
skb              2242 net/iucv/af_iucv.c 		err = afiucv_hs_callback_synfin(sk, skb);
skb              2246 net/iucv/af_iucv.c 		err = afiucv_hs_callback_fin(sk, skb);
skb              2249 net/iucv/af_iucv.c 		err = afiucv_hs_callback_win(sk, skb);
skb              2250 net/iucv/af_iucv.c 		if (skb->len == sizeof(struct af_iucv_trans_hdr)) {
skb              2251 net/iucv/af_iucv.c 			kfree_skb(skb);
skb              2260 net/iucv/af_iucv.c 		IUCV_SKB_CB(skb)->class = trans_hdr->iucv_hdr.class;
skb              2261 net/iucv/af_iucv.c 		err = afiucv_hs_callback_rx(sk, skb);
skb              2264 net/iucv/af_iucv.c 		kfree_skb(skb);
skb              2274 net/iucv/af_iucv.c static void afiucv_hs_callback_txnotify(struct sk_buff *skb,
skb              2277 net/iucv/af_iucv.c 	struct sock *isk = skb->sk;
skb              2299 net/iucv/af_iucv.c 		if (skb_shinfo(list_skb) == skb_shinfo(skb)) {
skb                42 net/kcm/kcmsock.c static inline struct kcm_tx_msg *kcm_tx_msg(struct sk_buff *skb)
skb                44 net/kcm/kcmsock.c 	return (struct kcm_tx_msg *)skb->cb;
skb               115 net/kcm/kcmsock.c static int kcm_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
skb               125 net/kcm/kcmsock.c 	struct sk_buff *skb;
skb               130 net/kcm/kcmsock.c 	while (unlikely((skb = __skb_dequeue(&mux->rx_hold_queue)))) {
skb               131 net/kcm/kcmsock.c 		if (kcm_queue_rcv_skb(&kcm->sk, skb)) {
skb               133 net/kcm/kcmsock.c 			skb_queue_head(&mux->rx_hold_queue, skb);
skb               167 net/kcm/kcmsock.c static void kcm_rfree(struct sk_buff *skb)
skb               169 net/kcm/kcmsock.c 	struct sock *sk = skb->sk;
skb               172 net/kcm/kcmsock.c 	unsigned int len = skb->truesize;
skb               188 net/kcm/kcmsock.c static int kcm_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
skb               195 net/kcm/kcmsock.c 	if (!sk_rmem_schedule(sk, skb, skb->truesize))
skb               198 net/kcm/kcmsock.c 	skb->dev = NULL;
skb               200 net/kcm/kcmsock.c 	skb_orphan(skb);
skb               201 net/kcm/kcmsock.c 	skb->sk = sk;
skb               202 net/kcm/kcmsock.c 	skb->destructor = kcm_rfree;
skb               203 net/kcm/kcmsock.c 	atomic_add(skb->truesize, &sk->sk_rmem_alloc);
skb               204 net/kcm/kcmsock.c 	sk_mem_charge(sk, skb->truesize);
skb               206 net/kcm/kcmsock.c 	skb_queue_tail(list, skb);
skb               220 net/kcm/kcmsock.c 	struct sk_buff *skb;
skb               223 net/kcm/kcmsock.c 	while ((skb = __skb_dequeue(head))) {
skb               225 net/kcm/kcmsock.c 		skb->destructor = sock_rfree;
skb               226 net/kcm/kcmsock.c 		skb_orphan(skb);
skb               229 net/kcm/kcmsock.c 			skb_queue_tail(&mux->rx_hold_queue, skb);
skb               236 net/kcm/kcmsock.c 		if (kcm_queue_rcv_skb(&kcm->sk, skb)) {
skb               356 net/kcm/kcmsock.c static void kcm_rcv_strparser(struct strparser *strp, struct sk_buff *skb)
skb               362 net/kcm/kcmsock.c 	kcm = reserve_rx_kcm(psock, skb);
skb               370 net/kcm/kcmsock.c 	if (kcm_queue_rcv_skb(&kcm->sk, skb)) {
skb               377 net/kcm/kcmsock.c static int kcm_parse_func_strparser(struct strparser *strp, struct sk_buff *skb)
skb               384 net/kcm/kcmsock.c 	res = BPF_PROG_RUN(prog, skb);
skb               579 net/kcm/kcmsock.c 	struct sk_buff *skb, *head;
skb               614 net/kcm/kcmsock.c 		skb = txm->frag_skb;
skb               625 net/kcm/kcmsock.c 		skb = head;
skb               630 net/kcm/kcmsock.c 		if (WARN_ON(!skb_shinfo(skb)->nr_frags)) {
skb               635 net/kcm/kcmsock.c 		for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags;
skb               641 net/kcm/kcmsock.c 			frag = &skb_shinfo(skb)->frags[fragidx];
skb               660 net/kcm/kcmsock.c 					txm->frag_skb = skb;
skb               691 net/kcm/kcmsock.c 		if (skb == head) {
skb               692 net/kcm/kcmsock.c 			if (skb_has_frag_list(skb)) {
skb               693 net/kcm/kcmsock.c 				skb = skb_shinfo(skb)->frag_list;
skb               696 net/kcm/kcmsock.c 		} else if (skb->next) {
skb               697 net/kcm/kcmsock.c 			skb = skb->next;
skb               763 net/kcm/kcmsock.c 	struct sk_buff *skb = NULL, *head = NULL;
skb               786 net/kcm/kcmsock.c 		skb = kcm_tx_msg(head)->last_skb;
skb               787 net/kcm/kcmsock.c 		i = skb_shinfo(skb)->nr_frags;
skb               789 net/kcm/kcmsock.c 		if (skb_can_coalesce(skb, i, page, offset)) {
skb               790 net/kcm/kcmsock.c 			skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], size);
skb               791 net/kcm/kcmsock.c 			skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
skb               806 net/kcm/kcmsock.c 			if (head == skb)
skb               809 net/kcm/kcmsock.c 				skb->next = tskb;
skb               811 net/kcm/kcmsock.c 			skb = tskb;
skb               812 net/kcm/kcmsock.c 			skb->ip_summed = CHECKSUM_UNNECESSARY;
skb               833 net/kcm/kcmsock.c 		skb = head;
skb               838 net/kcm/kcmsock.c 	skb_fill_page_desc(skb, i, page, offset, size);
skb               839 net/kcm/kcmsock.c 	skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
skb               842 net/kcm/kcmsock.c 	skb->len += size;
skb               843 net/kcm/kcmsock.c 	skb->data_len += size;
skb               844 net/kcm/kcmsock.c 	skb->truesize += size;
skb               848 net/kcm/kcmsock.c 	if (head != skb) {
skb               879 net/kcm/kcmsock.c 		kcm_tx_msg(head)->last_skb = skb;
skb               904 net/kcm/kcmsock.c 	struct sk_buff *skb = NULL, *head = NULL;
skb               922 net/kcm/kcmsock.c 		skb = kcm_tx_msg(head)->last_skb;
skb               947 net/kcm/kcmsock.c 		skb = head;
skb               952 net/kcm/kcmsock.c 		skb->ip_summed = CHECKSUM_UNNECESSARY;
skb               958 net/kcm/kcmsock.c 		int i = skb_shinfo(skb)->nr_frags;
skb               964 net/kcm/kcmsock.c 		if (!skb_can_coalesce(skb, i, pfrag->page,
skb               973 net/kcm/kcmsock.c 				if (head == skb)
skb               976 net/kcm/kcmsock.c 					skb->next = tskb;
skb               978 net/kcm/kcmsock.c 				skb = tskb;
skb               979 net/kcm/kcmsock.c 				skb->ip_summed = CHECKSUM_UNNECESSARY;
skb               991 net/kcm/kcmsock.c 		err = skb_copy_to_page_nocache(sk, &msg->msg_iter, skb,
skb              1000 net/kcm/kcmsock.c 			skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
skb              1002 net/kcm/kcmsock.c 			skb_fill_page_desc(skb, i, pfrag->page,
skb              1009 net/kcm/kcmsock.c 		if (head != skb) {
skb              1052 net/kcm/kcmsock.c 			kcm_tx_msg(head)->last_skb = skb;
skb              1087 net/kcm/kcmsock.c 	struct sk_buff *skb;
skb              1089 net/kcm/kcmsock.c 	while (!(skb = skb_peek(&sk->sk_receive_queue))) {
skb              1112 net/kcm/kcmsock.c 	return skb;
skb              1124 net/kcm/kcmsock.c 	struct sk_buff *skb;
skb              1130 net/kcm/kcmsock.c 	skb = kcm_wait_data(sk, flags, timeo, &err);
skb              1131 net/kcm/kcmsock.c 	if (!skb)
skb              1136 net/kcm/kcmsock.c 	stm = strp_msg(skb);
skb              1141 net/kcm/kcmsock.c 	err = skb_copy_datagram_msg(skb, stm->offset, msg, len);
skb              1161 net/kcm/kcmsock.c 			skb_unlink(skb, &sk->sk_receive_queue);
skb              1162 net/kcm/kcmsock.c 			kfree_skb(skb);
skb              1182 net/kcm/kcmsock.c 	struct sk_buff *skb;
skb              1190 net/kcm/kcmsock.c 	skb = kcm_wait_data(sk, flags, timeo, &err);
skb              1191 net/kcm/kcmsock.c 	if (!skb)
skb              1196 net/kcm/kcmsock.c 	stm = strp_msg(skb);
skb              1201 net/kcm/kcmsock.c 	copied = skb_splice_bits(skb, sk, stm->offset, pipe, len, flags);
skb                60 net/key/af_key.c 		struct sk_buff	*skb;
skb                84 net/key/af_key.c 		if (pfk->dump.skb) {
skb                85 net/key/af_key.c 			kfree_skb(pfk->dump.skb);
skb                86 net/key/af_key.c 			pfk->dump.skb = NULL;
skb               195 net/key/af_key.c static int pfkey_broadcast_one(struct sk_buff *skb, gfp_t allocation,
skb               203 net/key/af_key.c 	skb = skb_clone(skb, allocation);
skb               205 net/key/af_key.c 	if (skb) {
skb               206 net/key/af_key.c 		skb_set_owner_r(skb, sk);
skb               207 net/key/af_key.c 		skb_queue_tail(&sk->sk_receive_queue, skb);
skb               219 net/key/af_key.c static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
skb               230 net/key/af_key.c 	if (!skb)
skb               243 net/key/af_key.c 			pfkey_broadcast_one(skb, GFP_ATOMIC, sk);
skb               258 net/key/af_key.c 		err2 = pfkey_broadcast_one(skb, GFP_ATOMIC, sk);
skb               268 net/key/af_key.c 		err = pfkey_broadcast_one(skb, allocation, one_sk);
skb               270 net/key/af_key.c 	kfree_skb(skb);
skb               291 net/key/af_key.c 	if (pfk->dump.skb) {
skb               297 net/key/af_key.c 		hdr = (struct sadb_msg *) pfk->dump.skb->data;
skb               300 net/key/af_key.c 		pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE,
skb               302 net/key/af_key.c 		pfk->dump.skb = NULL;
skb               320 net/key/af_key.c 	struct sk_buff *skb = alloc_skb(sizeof(struct sadb_msg) + 16, GFP_KERNEL);
skb               323 net/key/af_key.c 	if (!skb)
skb               338 net/key/af_key.c 	hdr = skb_put(skb, sizeof(struct sadb_msg));
skb               344 net/key/af_key.c 	pfkey_broadcast(skb, GFP_KERNEL, BROADCAST_ONE, sk, sock_net(sk));
skb               511 net/key/af_key.c static int parse_exthdrs(struct sk_buff *skb, const struct sadb_msg *hdr, void **ext_hdrs)
skb               514 net/key/af_key.c 	int len = skb->len;
skb               767 net/key/af_key.c 	struct sk_buff *skb;
skb               829 net/key/af_key.c 	skb =  alloc_skb(size + 16, GFP_ATOMIC);
skb               830 net/key/af_key.c 	if (skb == NULL)
skb               834 net/key/af_key.c 	hdr = skb_put(skb, sizeof(struct sadb_msg));
skb               839 net/key/af_key.c 	sa = skb_put(skb, sizeof(struct sadb_sa));
skb               886 net/key/af_key.c 		lifetime = skb_put(skb, sizeof(struct sadb_lifetime));
skb               897 net/key/af_key.c 		lifetime = skb_put(skb, sizeof(struct sadb_lifetime));
skb               907 net/key/af_key.c 	lifetime = skb_put(skb, sizeof(struct sadb_lifetime));
skb               916 net/key/af_key.c 	addr = skb_put(skb, sizeof(struct sadb_address) + sockaddr_size);
skb               934 net/key/af_key.c 	addr = skb_put(skb, sizeof(struct sadb_address) + sockaddr_size);
skb               950 net/key/af_key.c 		addr = skb_put(skb,
skb               968 net/key/af_key.c 		key = skb_put(skb, sizeof(struct sadb_key) + auth_key_size);
skb               978 net/key/af_key.c 		key = skb_put(skb, sizeof(struct sadb_key) + encrypt_key_size);
skb               989 net/key/af_key.c 	sa2 = skb_put(skb, sizeof(struct sadb_x_sa2));
skb               993 net/key/af_key.c 		kfree_skb(skb);
skb              1007 net/key/af_key.c 		n_type = skb_put(skb, sizeof(*n_type));
skb              1016 net/key/af_key.c 		n_port = skb_put(skb, sizeof(*n_port));
skb              1023 net/key/af_key.c 		n_port = skb_put(skb, sizeof(*n_port));
skb              1032 net/key/af_key.c 		sec_ctx = skb_put(skb,
skb              1044 net/key/af_key.c 	return skb;
skb              1050 net/key/af_key.c 	struct sk_buff *skb;
skb              1052 net/key/af_key.c 	skb = __pfkey_xfrm_state2msg(x, 1, 3);
skb              1054 net/key/af_key.c 	return skb;
skb              1304 net/key/af_key.c static int pfkey_reserved(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
skb              1309 net/key/af_key.c static int pfkey_getspi(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
skb              1414 net/key/af_key.c static int pfkey_acquire(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
skb              1479 net/key/af_key.c 	struct sk_buff *skb;
skb              1482 net/key/af_key.c 	skb = pfkey_xfrm_state2msg(x);
skb              1484 net/key/af_key.c 	if (IS_ERR(skb))
skb              1485 net/key/af_key.c 		return PTR_ERR(skb);
skb              1487 net/key/af_key.c 	hdr = (struct sadb_msg *) skb->data;
skb              1496 net/key/af_key.c 	pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, xs_net(x));
skb              1501 net/key/af_key.c static int pfkey_add(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
skb              1538 net/key/af_key.c static int pfkey_delete(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
skb              1578 net/key/af_key.c static int pfkey_get(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
skb              1617 net/key/af_key.c 	struct sk_buff *skb;
skb              1635 net/key/af_key.c 	skb = alloc_skb(len + 16, allocation);
skb              1636 net/key/af_key.c 	if (!skb)
skb              1639 net/key/af_key.c 	hdr = skb_put(skb, sizeof(*hdr));
skb              1648 net/key/af_key.c 		sp = skb_put(skb, auth_len);
skb              1669 net/key/af_key.c 		sp = skb_put(skb, enc_len);
skb              1687 net/key/af_key.c 	return skb;
skb              1690 net/key/af_key.c static int pfkey_register(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
skb              1721 net/key/af_key.c 	struct sk_buff *skb;
skb              1724 net/key/af_key.c 	skb = alloc_skb(sizeof(struct sadb_msg) + 16, GFP_ATOMIC);
skb              1725 net/key/af_key.c 	if (!skb)
skb              1728 net/key/af_key.c 	hdr = skb_put_data(skb, ihdr, sizeof(struct sadb_msg));
skb              1732 net/key/af_key.c 	return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ONE, sk,
skb              1738 net/key/af_key.c 	struct sk_buff *skb;
skb              1741 net/key/af_key.c 	skb = alloc_skb(sizeof(struct sadb_msg) + 16, GFP_ATOMIC);
skb              1742 net/key/af_key.c 	if (!skb)
skb              1744 net/key/af_key.c 	hdr = skb_put(skb, sizeof(struct sadb_msg));
skb              1754 net/key/af_key.c 	pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net);
skb              1759 net/key/af_key.c static int pfkey_flush(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
skb              1810 net/key/af_key.c 	if (pfk->dump.skb)
skb              1811 net/key/af_key.c 		pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE,
skb              1813 net/key/af_key.c 	pfk->dump.skb = out_skb;
skb              1831 net/key/af_key.c static int pfkey_dump(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
skb              1877 net/key/af_key.c static int pfkey_promisc(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
skb              1889 net/key/af_key.c 	if (reset_errno && skb_cloned(skb))
skb              1890 net/key/af_key.c 		skb = skb_copy(skb, GFP_KERNEL);
skb              1892 net/key/af_key.c 		skb = skb_clone(skb, GFP_KERNEL);
skb              1894 net/key/af_key.c 	if (reset_errno && skb) {
skb              1895 net/key/af_key.c 		struct sadb_msg *new_hdr = (struct sadb_msg *) skb->data;
skb              1899 net/key/af_key.c 	pfkey_broadcast(skb, GFP_KERNEL, BROADCAST_ALL, NULL, sock_net(sk));
skb              2043 net/key/af_key.c 	struct sk_buff *skb;
skb              2048 net/key/af_key.c 	skb =  alloc_skb(size + 16, GFP_ATOMIC);
skb              2049 net/key/af_key.c 	if (skb == NULL)
skb              2052 net/key/af_key.c 	return skb;
skb              2055 net/key/af_key.c static int pfkey_xfrm_policy2msg(struct sk_buff *skb, const struct xfrm_policy *xp, int dir)
skb              2071 net/key/af_key.c 	hdr = skb_put(skb, sizeof(struct sadb_msg));
skb              2075 net/key/af_key.c 	addr = skb_put(skb, sizeof(struct sadb_address) + sockaddr_size);
skb              2090 net/key/af_key.c 	addr = skb_put(skb, sizeof(struct sadb_address) + sockaddr_size);
skb              2104 net/key/af_key.c 	lifetime = skb_put(skb, sizeof(struct sadb_lifetime));
skb              2113 net/key/af_key.c 	lifetime = skb_put(skb, sizeof(struct sadb_lifetime));
skb              2122 net/key/af_key.c 	lifetime = skb_put(skb, sizeof(struct sadb_lifetime));
skb              2131 net/key/af_key.c 	pol = skb_put(skb, sizeof(struct sadb_x_policy));
skb              2159 net/key/af_key.c 		rq = skb_put(skb, req_size);
skb              2189 net/key/af_key.c 		sec_ctx = skb_put(skb, ctx_size);
skb              2236 net/key/af_key.c static int pfkey_spdadd(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
skb              2351 net/key/af_key.c static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
skb              2537 net/key/af_key.c static int pfkey_migrate(struct sock *sk, struct sk_buff *skb,
skb              2629 net/key/af_key.c static int pfkey_migrate(struct sock *sk, struct sk_buff *skb,
skb              2637 net/key/af_key.c static int pfkey_spdget(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
skb              2706 net/key/af_key.c 	if (pfk->dump.skb)
skb              2707 net/key/af_key.c 		pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE,
skb              2709 net/key/af_key.c 	pfk->dump.skb = out_skb;
skb              2727 net/key/af_key.c static int pfkey_spddump(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
skb              2769 net/key/af_key.c static int pfkey_spdflush(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
skb              2793 net/key/af_key.c typedef int (*pfkey_handler)(struct sock *sk, struct sk_buff *skb,
skb              2821 net/key/af_key.c static int pfkey_process(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr)
skb              2826 net/key/af_key.c 	pfkey_broadcast(skb_clone(skb, GFP_KERNEL), GFP_KERNEL,
skb              2830 net/key/af_key.c 	err = parse_exthdrs(skb, hdr, ext_hdrs);
skb              2834 net/key/af_key.c 			err = pfkey_funcs[hdr->sadb_msg_type](sk, skb, hdr, ext_hdrs);
skb              2839 net/key/af_key.c static struct sadb_msg *pfkey_get_base_msg(struct sk_buff *skb, int *errp)
skb              2843 net/key/af_key.c 	if (skb->len < sizeof(*hdr)) {
skb              2846 net/key/af_key.c 		hdr = (struct sadb_msg *) skb->data;
skb              2853 net/key/af_key.c 		} else if (hdr->sadb_msg_len != (skb->len /
skb              2934 net/key/af_key.c static void dump_ah_combs(struct sk_buff *skb, const struct xfrm_tmpl *t)
skb              2939 net/key/af_key.c 	p = skb_put(skb, sizeof(struct sadb_prop));
skb              2955 net/key/af_key.c 			c = skb_put_zero(skb, sizeof(struct sadb_comb));
skb              2968 net/key/af_key.c static void dump_esp_combs(struct sk_buff *skb, const struct xfrm_tmpl *t)
skb              2973 net/key/af_key.c 	p = skb_put(skb, sizeof(struct sadb_prop));
skb              2999 net/key/af_key.c 			c = skb_put(skb, sizeof(struct sadb_comb));
skb              3134 net/key/af_key.c 	struct sk_buff *skb;
skb              3163 net/key/af_key.c 	skb =  alloc_skb(size + 16, GFP_ATOMIC);
skb              3164 net/key/af_key.c 	if (skb == NULL)
skb              3167 net/key/af_key.c 	hdr = skb_put(skb, sizeof(struct sadb_msg));
skb              3178 net/key/af_key.c 	addr = skb_put(skb, sizeof(struct sadb_address) + sockaddr_size);
skb              3193 net/key/af_key.c 	addr = skb_put(skb, sizeof(struct sadb_address) + sockaddr_size);
skb              3207 net/key/af_key.c 	pol = skb_put(skb, sizeof(struct sadb_x_policy));
skb              3218 net/key/af_key.c 		dump_ah_combs(skb, t);
skb              3220 net/key/af_key.c 		dump_esp_combs(skb, t);
skb              3224 net/key/af_key.c 		sec_ctx = skb_put(skb,
skb              3236 net/key/af_key.c 	return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL,
skb              3330 net/key/af_key.c 	struct sk_buff *skb;
skb              3364 net/key/af_key.c 	skb =  alloc_skb(size + 16, GFP_ATOMIC);
skb              3365 net/key/af_key.c 	if (skb == NULL)
skb              3368 net/key/af_key.c 	hdr = skb_put(skb, sizeof(struct sadb_msg));
skb              3379 net/key/af_key.c 	sa = skb_put(skb, sizeof(struct sadb_sa));
skb              3390 net/key/af_key.c 	addr = skb_put(skb, sizeof(struct sadb_address) + sockaddr_size);
skb              3405 net/key/af_key.c 	n_port = skb_put(skb, sizeof(*n_port));
skb              3412 net/key/af_key.c 	addr = skb_put(skb, sizeof(struct sadb_address) + sockaddr_size);
skb              3427 net/key/af_key.c 	n_port = skb_put(skb, sizeof(*n_port));
skb              3433 net/key/af_key.c 	return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL,
skb              3438 net/key/af_key.c static int set_sadb_address(struct sk_buff *skb, int sasize, int type,
skb              3442 net/key/af_key.c 	addr = skb_put(skb, sizeof(struct sadb_address) + sasize);
skb              3469 net/key/af_key.c static int set_sadb_kmaddress(struct sk_buff *skb, const struct xfrm_kmaddress *k)
skb              3480 net/key/af_key.c 	kma = skb_put_zero(skb, size_req);
skb              3493 net/key/af_key.c static int set_ipsecrequest(struct sk_buff *skb,
skb              3506 net/key/af_key.c 	rq = skb_put_zero(skb, size_req);
skb              3532 net/key/af_key.c 	struct sk_buff *skb;
skb              3571 net/key/af_key.c 	skb = alloc_skb(size, GFP_ATOMIC);
skb              3572 net/key/af_key.c 	if (skb == NULL)
skb              3575 net/key/af_key.c 	hdr = skb_put(skb, sizeof(struct sadb_msg));
skb              3586 net/key/af_key.c 	if (k != NULL && (set_sadb_kmaddress(skb, k) < 0))
skb              3590 net/key/af_key.c 	set_sadb_address(skb, sasize_sel, SADB_EXT_ADDRESS_SRC, sel);
skb              3593 net/key/af_key.c 	set_sadb_address(skb, sasize_sel, SADB_EXT_ADDRESS_DST, sel);
skb              3596 net/key/af_key.c 	pol = skb_put(skb, sizeof(struct sadb_x_policy));
skb              3610 net/key/af_key.c 		if (set_ipsecrequest(skb, mp->proto, mode,
skb              3617 net/key/af_key.c 		if (set_ipsecrequest(skb, mp->proto, mode,
skb              3625 net/key/af_key.c 	pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, &init_net);
skb              3630 net/key/af_key.c 	kfree_skb(skb);
skb              3646 net/key/af_key.c 	struct sk_buff *skb = NULL;
skb              3660 net/key/af_key.c 	skb = alloc_skb(len, GFP_KERNEL);
skb              3661 net/key/af_key.c 	if (skb == NULL)
skb              3665 net/key/af_key.c 	if (memcpy_from_msg(skb_put(skb,len), msg, len))
skb              3668 net/key/af_key.c 	hdr = pfkey_get_base_msg(skb, &err);
skb              3673 net/key/af_key.c 	err = pfkey_process(sk, skb, hdr);
skb              3679 net/key/af_key.c 	kfree_skb(skb);
skb              3689 net/key/af_key.c 	struct sk_buff *skb;
skb              3696 net/key/af_key.c 	skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
skb              3697 net/key/af_key.c 	if (skb == NULL)
skb              3700 net/key/af_key.c 	copied = skb->len;
skb              3706 net/key/af_key.c 	skb_reset_transport_header(skb);
skb              3707 net/key/af_key.c 	err = skb_copy_datagram_msg(skb, 0, msg, copied);
skb              3711 net/key/af_key.c 	sock_recv_ts_and_drops(msg, sk, skb);
skb              3713 net/key/af_key.c 	err = (flags & MSG_TRUNC) ? skb->len : copied;
skb              3720 net/key/af_key.c 	skb_free_datagram(sk, skb);
skb                97 net/l2tp/l2tp_core.c #define L2TP_SKB_CB(skb)	((struct l2tp_skb_cb *) &skb->cb[sizeof(struct inet_skb_parm)])
skb               365 net/l2tp/l2tp_core.c static void l2tp_recv_queue_skb(struct l2tp_session *session, struct sk_buff *skb)
skb               369 net/l2tp/l2tp_core.c 	u32 ns = L2TP_SKB_CB(skb)->ns;
skb               374 net/l2tp/l2tp_core.c 			__skb_queue_before(&session->reorder_q, skbp, skb);
skb               384 net/l2tp/l2tp_core.c 	__skb_queue_tail(&session->reorder_q, skb);
skb               392 net/l2tp/l2tp_core.c static void l2tp_recv_dequeue_skb(struct l2tp_session *session, struct sk_buff *skb)
skb               395 net/l2tp/l2tp_core.c 	int length = L2TP_SKB_CB(skb)->length;
skb               400 net/l2tp/l2tp_core.c 	skb_orphan(skb);
skb               407 net/l2tp/l2tp_core.c 	if (L2TP_SKB_CB(skb)->has_seq) {
skb               418 net/l2tp/l2tp_core.c 		(*session->recv_skb)(session, skb, L2TP_SKB_CB(skb)->length);
skb               420 net/l2tp/l2tp_core.c 		kfree_skb(skb);
skb               428 net/l2tp/l2tp_core.c 	struct sk_buff *skb;
skb               437 net/l2tp/l2tp_core.c 	skb_queue_walk_safe(&session->reorder_q, skb, tmp) {
skb               438 net/l2tp/l2tp_core.c 		if (time_after(jiffies, L2TP_SKB_CB(skb)->expires)) {
skb               443 net/l2tp/l2tp_core.c 				 session->name, L2TP_SKB_CB(skb)->ns,
skb               444 net/l2tp/l2tp_core.c 				 L2TP_SKB_CB(skb)->length, session->nr,
skb               447 net/l2tp/l2tp_core.c 			__skb_unlink(skb, &session->reorder_q);
skb               448 net/l2tp/l2tp_core.c 			kfree_skb(skb);
skb               452 net/l2tp/l2tp_core.c 		if (L2TP_SKB_CB(skb)->has_seq) {
skb               457 net/l2tp/l2tp_core.c 					 L2TP_SKB_CB(skb)->ns);
skb               459 net/l2tp/l2tp_core.c 				session->nr = L2TP_SKB_CB(skb)->ns;
skb               461 net/l2tp/l2tp_core.c 			if (L2TP_SKB_CB(skb)->ns != session->nr) {
skb               464 net/l2tp/l2tp_core.c 					 session->name, L2TP_SKB_CB(skb)->ns,
skb               465 net/l2tp/l2tp_core.c 					 L2TP_SKB_CB(skb)->length, session->nr,
skb               470 net/l2tp/l2tp_core.c 		__skb_unlink(skb, &session->reorder_q);
skb               476 net/l2tp/l2tp_core.c 		l2tp_recv_dequeue_skb(session, skb);
skb               499 net/l2tp/l2tp_core.c static int l2tp_recv_data_seq(struct l2tp_session *session, struct sk_buff *skb)
skb               501 net/l2tp/l2tp_core.c 	if (!l2tp_seq_check_rx_window(session, L2TP_SKB_CB(skb)->ns)) {
skb               507 net/l2tp/l2tp_core.c 			 session->name, L2TP_SKB_CB(skb)->ns,
skb               508 net/l2tp/l2tp_core.c 			 L2TP_SKB_CB(skb)->length, session->nr);
skb               516 net/l2tp/l2tp_core.c 		l2tp_recv_queue_skb(session, skb);
skb               525 net/l2tp/l2tp_core.c 	if (L2TP_SKB_CB(skb)->ns == session->nr) {
skb               526 net/l2tp/l2tp_core.c 		skb_queue_tail(&session->reorder_q, skb);
skb               528 net/l2tp/l2tp_core.c 		u32 nr_oos = L2TP_SKB_CB(skb)->ns;
skb               547 net/l2tp/l2tp_core.c 				 session->name, L2TP_SKB_CB(skb)->ns,
skb               548 net/l2tp/l2tp_core.c 				 L2TP_SKB_CB(skb)->length, session->nr,
skb               552 net/l2tp/l2tp_core.c 		skb_queue_tail(&session->reorder_q, skb);
skb               621 net/l2tp/l2tp_core.c void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
skb               650 net/l2tp/l2tp_core.c 	L2TP_SKB_CB(skb)->has_seq = 0;
skb               659 net/l2tp/l2tp_core.c 			L2TP_SKB_CB(skb)->ns = ns;
skb               660 net/l2tp/l2tp_core.c 			L2TP_SKB_CB(skb)->has_seq = 1;
skb               673 net/l2tp/l2tp_core.c 			L2TP_SKB_CB(skb)->ns = ns;
skb               674 net/l2tp/l2tp_core.c 			L2TP_SKB_CB(skb)->has_seq = 1;
skb               683 net/l2tp/l2tp_core.c 	if (L2TP_SKB_CB(skb)->has_seq) {
skb               739 net/l2tp/l2tp_core.c 	if (!pskb_may_pull(skb, offset))
skb               742 net/l2tp/l2tp_core.c 	__skb_pull(skb, offset);
skb               748 net/l2tp/l2tp_core.c 	L2TP_SKB_CB(skb)->length = length;
skb               749 net/l2tp/l2tp_core.c 	L2TP_SKB_CB(skb)->expires = jiffies +
skb               755 net/l2tp/l2tp_core.c 	if (L2TP_SKB_CB(skb)->has_seq) {
skb               756 net/l2tp/l2tp_core.c 		if (l2tp_recv_data_seq(session, skb))
skb               763 net/l2tp/l2tp_core.c 		skb_queue_tail(&session->reorder_q, skb);
skb               773 net/l2tp/l2tp_core.c 	kfree_skb(skb);
skb               781 net/l2tp/l2tp_core.c 	struct sk_buff *skb = NULL;
skb               784 net/l2tp/l2tp_core.c 	while ((skb = skb_dequeue(&session->reorder_q))) {
skb               786 net/l2tp/l2tp_core.c 		kfree_skb(skb);
skb               797 net/l2tp/l2tp_core.c static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb)
skb               809 net/l2tp/l2tp_core.c 	__skb_pull(skb, sizeof(struct udphdr));
skb               812 net/l2tp/l2tp_core.c 	if (!pskb_may_pull(skb, L2TP_HDR_SIZE_MAX)) {
skb               815 net/l2tp/l2tp_core.c 			  tunnel->name, skb->len);
skb               821 net/l2tp/l2tp_core.c 		length = min(32u, skb->len);
skb               822 net/l2tp/l2tp_core.c 		if (!pskb_may_pull(skb, length))
skb               826 net/l2tp/l2tp_core.c 		print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, skb->data, length);
skb               830 net/l2tp/l2tp_core.c 	optr = ptr = skb->data;
skb               845 net/l2tp/l2tp_core.c 	length = skb->len;
skb               889 net/l2tp/l2tp_core.c 	    l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
skb               892 net/l2tp/l2tp_core.c 	l2tp_recv_common(session, skb, ptr, optr, hdrflags, length);
skb               899 net/l2tp/l2tp_core.c 	__skb_push(skb, sizeof(struct udphdr));
skb               910 net/l2tp/l2tp_core.c int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
skb               919 net/l2tp/l2tp_core.c 		 tunnel->name, skb->len);
skb               921 net/l2tp/l2tp_core.c 	if (l2tp_udp_recv_core(tunnel, skb))
skb              1007 net/l2tp/l2tp_core.c static void l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb,
skb              1011 net/l2tp/l2tp_core.c 	unsigned int len = skb->len;
skb              1024 net/l2tp/l2tp_core.c 		unsigned char *datap = skb->data + uhlen;
skb              1032 net/l2tp/l2tp_core.c 	skb->ignore_df = 1;
skb              1035 net/l2tp/l2tp_core.c 		error = inet6_csk_xmit(tunnel->sock, skb, NULL);
skb              1038 net/l2tp/l2tp_core.c 		error = ip_queue_xmit(tunnel->sock, skb, fl);
skb              1055 net/l2tp/l2tp_core.c int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len)
skb              1057 net/l2tp/l2tp_core.c 	int data_len = skb->len;
skb              1074 net/l2tp/l2tp_core.c 	if (skb_cow_head(skb, headroom)) {
skb              1075 net/l2tp/l2tp_core.c 		kfree_skb(skb);
skb              1080 net/l2tp/l2tp_core.c 	session->build_header(session, __skb_push(skb, hdr_len));
skb              1083 net/l2tp/l2tp_core.c 	memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
skb              1084 net/l2tp/l2tp_core.c 	IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
skb              1086 net/l2tp/l2tp_core.c 	nf_reset_ct(skb);
skb              1090 net/l2tp/l2tp_core.c 		kfree_skb(skb);
skb              1099 net/l2tp/l2tp_core.c 		kfree_skb(skb);
skb              1105 net/l2tp/l2tp_core.c 	skb_dst_drop(skb);
skb              1106 net/l2tp/l2tp_core.c 	skb_dst_set(skb, sk_dst_check(sk, 0));
skb              1113 net/l2tp/l2tp_core.c 		__skb_push(skb, sizeof(*uh));
skb              1114 net/l2tp/l2tp_core.c 		skb_reset_transport_header(skb);
skb              1115 net/l2tp/l2tp_core.c 		uh = udp_hdr(skb);
skb              1125 net/l2tp/l2tp_core.c 				      skb, &inet6_sk(sk)->saddr,
skb              1129 net/l2tp/l2tp_core.c 		udp_set_csum(sk->sk_no_check_tx, skb, inet->inet_saddr,
skb              1137 net/l2tp/l2tp_core.c 	l2tp_xmit_core(session, skb, fl, data_len);
skb               116 net/l2tp/l2tp_core.h 	void (*recv_skb)(struct l2tp_session *session, struct sk_buff *skb, int data_len);
skb               221 net/l2tp/l2tp_core.h void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
skb               224 net/l2tp/l2tp_core.h int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb);
skb               227 net/l2tp/l2tp_core.h int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb,
skb               301 net/l2tp/l2tp_core.h static inline int l2tp_v3_ensure_opt_in_linear(struct l2tp_session *session, struct sk_buff *skb,
skb               309 net/l2tp/l2tp_core.h 		if (!pskb_may_pull(skb, off + opt_len))
skb               312 net/l2tp/l2tp_core.h 		if (skb->data != *optr) {
skb               313 net/l2tp/l2tp_core.h 			*optr = skb->data;
skb               314 net/l2tp/l2tp_core.h 			*ptr = skb->data + off;
skb                75 net/l2tp/l2tp_eth.c static int l2tp_eth_dev_xmit(struct sk_buff *skb, struct net_device *dev)
skb                79 net/l2tp/l2tp_eth.c 	unsigned int len = skb->len;
skb                80 net/l2tp/l2tp_eth.c 	int ret = l2tp_xmit_skb(session, skb, session->hdr_len);
skb               127 net/l2tp/l2tp_eth.c static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb, int data_len)
skb               136 net/l2tp/l2tp_eth.c 		length = min(32u, skb->len);
skb               137 net/l2tp/l2tp_eth.c 		if (!pskb_may_pull(skb, length))
skb               141 net/l2tp/l2tp_eth.c 		print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, skb->data, length);
skb               144 net/l2tp/l2tp_eth.c 	if (!pskb_may_pull(skb, ETH_HLEN))
skb               147 net/l2tp/l2tp_eth.c 	secpath_reset(skb);
skb               150 net/l2tp/l2tp_eth.c 	skb->ip_summed = CHECKSUM_NONE;
skb               152 net/l2tp/l2tp_eth.c 	skb_dst_drop(skb);
skb               153 net/l2tp/l2tp_eth.c 	nf_reset_ct(skb);
skb               161 net/l2tp/l2tp_eth.c 	if (dev_forward_skb(dev, skb) == NET_RX_SUCCESS) {
skb               174 net/l2tp/l2tp_eth.c 	kfree_skb(skb);
skb               112 net/l2tp/l2tp_ip.c static int l2tp_ip_recv(struct sk_buff *skb)
skb               114 net/l2tp/l2tp_ip.c 	struct net *net = dev_net(skb->dev);
skb               124 net/l2tp/l2tp_ip.c 	if (!pskb_may_pull(skb, 4))
skb               128 net/l2tp/l2tp_ip.c 	optr = ptr = skb->data;
skb               137 net/l2tp/l2tp_ip.c 		__skb_pull(skb, 4);
skb               152 net/l2tp/l2tp_ip.c 		length = min(32u, skb->len);
skb               153 net/l2tp/l2tp_ip.c 		if (!pskb_may_pull(skb, length))
skb               157 net/l2tp/l2tp_ip.c 		optr = ptr = skb->data;
skb               163 net/l2tp/l2tp_ip.c 	if (l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
skb               166 net/l2tp/l2tp_ip.c 	l2tp_recv_common(session, skb, ptr, optr, 0, skb->len);
skb               173 net/l2tp/l2tp_ip.c 	if (!pskb_may_pull(skb, 12))
skb               176 net/l2tp/l2tp_ip.c 	if ((skb->data[0] & 0xc0) != 0xc0)
skb               179 net/l2tp/l2tp_ip.c 	tunnel_id = ntohl(*(__be32 *) &skb->data[4]);
skb               180 net/l2tp/l2tp_ip.c 	iph = (struct iphdr *)skb_network_header(skb);
skb               183 net/l2tp/l2tp_ip.c 	sk = __l2tp_ip_bind_lookup(net, iph->daddr, iph->saddr, inet_iif(skb),
skb               192 net/l2tp/l2tp_ip.c 	if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
skb               195 net/l2tp/l2tp_ip.c 	nf_reset_ct(skb);
skb               197 net/l2tp/l2tp_ip.c 	return sk_receive_skb(sk, skb, 1);
skb               207 net/l2tp/l2tp_ip.c 	kfree_skb(skb);
skb               250 net/l2tp/l2tp_ip.c 	struct sk_buff *skb;
skb               253 net/l2tp/l2tp_ip.c 	while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL)
skb               254 net/l2tp/l2tp_ip.c 		kfree_skb(skb);
skb               386 net/l2tp/l2tp_ip.c static int l2tp_ip_backlog_recv(struct sock *sk, struct sk_buff *skb)
skb               391 net/l2tp/l2tp_ip.c 	rc = sock_queue_rcv_skb(sk, skb);
skb               399 net/l2tp/l2tp_ip.c 	kfree_skb(skb);
skb               408 net/l2tp/l2tp_ip.c 	struct sk_buff *skb;
skb               447 net/l2tp/l2tp_ip.c 	skb = sock_wmalloc(sk, 2 + NET_SKB_PAD + sizeof(struct iphdr) +
skb               449 net/l2tp/l2tp_ip.c 	if (!skb)
skb               453 net/l2tp/l2tp_ip.c 	skb_reserve(skb, 2 + NET_SKB_PAD);
skb               454 net/l2tp/l2tp_ip.c 	skb_reset_network_header(skb);
skb               455 net/l2tp/l2tp_ip.c 	skb_reserve(skb, sizeof(struct iphdr));
skb               456 net/l2tp/l2tp_ip.c 	skb_reset_transport_header(skb);
skb               459 net/l2tp/l2tp_ip.c 	*((__be32 *) skb_put(skb, 4)) = 0;
skb               462 net/l2tp/l2tp_ip.c 	rc = memcpy_from_msg(skb_put(skb, len), msg, len);
skb               464 net/l2tp/l2tp_ip.c 		kfree_skb(skb);
skb               496 net/l2tp/l2tp_ip.c 			skb_dst_set(skb, &rt->dst);
skb               504 net/l2tp/l2tp_ip.c 	skb_dst_set_noref(skb, &rt->dst);
skb               508 net/l2tp/l2tp_ip.c 	rc = ip_queue_xmit(sk, skb, &inet->cork.fl);
skb               522 net/l2tp/l2tp_ip.c 	kfree_skb(skb);
skb               534 net/l2tp/l2tp_ip.c 	struct sk_buff *skb;
skb               539 net/l2tp/l2tp_ip.c 	skb = skb_recv_datagram(sk, flags, noblock, &err);
skb               540 net/l2tp/l2tp_ip.c 	if (!skb)
skb               543 net/l2tp/l2tp_ip.c 	copied = skb->len;
skb               549 net/l2tp/l2tp_ip.c 	err = skb_copy_datagram_msg(skb, 0, msg, copied);
skb               553 net/l2tp/l2tp_ip.c 	sock_recv_timestamp(msg, sk, skb);
skb               558 net/l2tp/l2tp_ip.c 		sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
skb               564 net/l2tp/l2tp_ip.c 		ip_cmsg_recv(msg, skb);
skb               566 net/l2tp/l2tp_ip.c 		copied = skb->len;
skb               568 net/l2tp/l2tp_ip.c 	skb_free_datagram(sk, skb);
skb               575 net/l2tp/l2tp_ip.c 	struct sk_buff *skb;
skb               584 net/l2tp/l2tp_ip.c 		skb = skb_peek(&sk->sk_receive_queue);
skb               585 net/l2tp/l2tp_ip.c 		amount = skb ? skb->len : 0;
skb               124 net/l2tp/l2tp_ip6.c static int l2tp_ip6_recv(struct sk_buff *skb)
skb               126 net/l2tp/l2tp_ip6.c 	struct net *net = dev_net(skb->dev);
skb               136 net/l2tp/l2tp_ip6.c 	if (!pskb_may_pull(skb, 4))
skb               140 net/l2tp/l2tp_ip6.c 	optr = ptr = skb->data;
skb               149 net/l2tp/l2tp_ip6.c 		__skb_pull(skb, 4);
skb               164 net/l2tp/l2tp_ip6.c 		length = min(32u, skb->len);
skb               165 net/l2tp/l2tp_ip6.c 		if (!pskb_may_pull(skb, length))
skb               169 net/l2tp/l2tp_ip6.c 		optr = ptr = skb->data;
skb               175 net/l2tp/l2tp_ip6.c 	if (l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
skb               178 net/l2tp/l2tp_ip6.c 	l2tp_recv_common(session, skb, ptr, optr, 0, skb->len);
skb               185 net/l2tp/l2tp_ip6.c 	if (!pskb_may_pull(skb, 12))
skb               188 net/l2tp/l2tp_ip6.c 	if ((skb->data[0] & 0xc0) != 0xc0)
skb               191 net/l2tp/l2tp_ip6.c 	tunnel_id = ntohl(*(__be32 *) &skb->data[4]);
skb               192 net/l2tp/l2tp_ip6.c 	iph = ipv6_hdr(skb);
skb               196 net/l2tp/l2tp_ip6.c 				    inet6_iif(skb), tunnel_id);
skb               204 net/l2tp/l2tp_ip6.c 	if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
skb               207 net/l2tp/l2tp_ip6.c 	nf_reset_ct(skb);
skb               209 net/l2tp/l2tp_ip6.c 	return sk_receive_skb(sk, skb, 1);
skb               219 net/l2tp/l2tp_ip6.c 	kfree_skb(skb);
skb               465 net/l2tp/l2tp_ip6.c static int l2tp_ip6_backlog_recv(struct sock *sk, struct sk_buff *skb)
skb               470 net/l2tp/l2tp_ip6.c 	rc = sock_queue_rcv_skb(sk, skb);
skb               478 net/l2tp/l2tp_ip6.c 	kfree_skb(skb);
skb               484 net/l2tp/l2tp_ip6.c 	struct sk_buff *skb;
skb               488 net/l2tp/l2tp_ip6.c 	skb = skb_peek(&sk->sk_write_queue);
skb               489 net/l2tp/l2tp_ip6.c 	if (skb == NULL)
skb               492 net/l2tp/l2tp_ip6.c 	transhdr = (__be32 *)skb_transport_header(skb);
skb               682 net/l2tp/l2tp_ip6.c 	struct sk_buff *skb;
skb               690 net/l2tp/l2tp_ip6.c 	skb = skb_recv_datagram(sk, flags, noblock, &err);
skb               691 net/l2tp/l2tp_ip6.c 	if (!skb)
skb               694 net/l2tp/l2tp_ip6.c 	copied = skb->len;
skb               700 net/l2tp/l2tp_ip6.c 	err = skb_copy_datagram_msg(skb, 0, msg, copied);
skb               704 net/l2tp/l2tp_ip6.c 	sock_recv_timestamp(msg, sk, skb);
skb               710 net/l2tp/l2tp_ip6.c 		lsa->l2tp_addr = ipv6_hdr(skb)->saddr;
skb               715 net/l2tp/l2tp_ip6.c 			lsa->l2tp_scope_id = inet6_iif(skb);
skb               720 net/l2tp/l2tp_ip6.c 		ip6_datagram_recv_ctl(sk, msg, skb);
skb               723 net/l2tp/l2tp_ip6.c 		copied = skb->len;
skb               725 net/l2tp/l2tp_ip6.c 	skb_free_datagram(sk, skb);
skb                39 net/l2tp/l2tp_netlink.c static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 portid, u32 seq,
skb                41 net/l2tp/l2tp_netlink.c static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq,
skb                74 net/l2tp/l2tp_netlink.c static int l2tp_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
skb               160 net/l2tp/l2tp_netlink.c static int l2tp_nl_cmd_tunnel_create(struct sk_buff *skb, struct genl_info *info)
skb               262 net/l2tp/l2tp_netlink.c static int l2tp_nl_cmd_tunnel_delete(struct sk_buff *skb, struct genl_info *info)
skb               292 net/l2tp/l2tp_netlink.c static int l2tp_nl_cmd_tunnel_modify(struct sk_buff *skb, struct genl_info *info)
skb               323 net/l2tp/l2tp_netlink.c static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 portid, u32 seq, int flags,
skb               334 net/l2tp/l2tp_netlink.c 	hdr = genlmsg_put(skb, portid, seq, &l2tp_nl_family, flags, cmd);
skb               338 net/l2tp/l2tp_netlink.c 	if (nla_put_u8(skb, L2TP_ATTR_PROTO_VERSION, tunnel->version) ||
skb               339 net/l2tp/l2tp_netlink.c 	    nla_put_u32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id) ||
skb               340 net/l2tp/l2tp_netlink.c 	    nla_put_u32(skb, L2TP_ATTR_PEER_CONN_ID, tunnel->peer_tunnel_id) ||
skb               341 net/l2tp/l2tp_netlink.c 	    nla_put_u32(skb, L2TP_ATTR_DEBUG, tunnel->debug) ||
skb               342 net/l2tp/l2tp_netlink.c 	    nla_put_u16(skb, L2TP_ATTR_ENCAP_TYPE, tunnel->encap))
skb               345 net/l2tp/l2tp_netlink.c 	nest = nla_nest_start_noflag(skb, L2TP_ATTR_STATS);
skb               349 net/l2tp/l2tp_netlink.c 	if (nla_put_u64_64bit(skb, L2TP_ATTR_TX_PACKETS,
skb               352 net/l2tp/l2tp_netlink.c 	    nla_put_u64_64bit(skb, L2TP_ATTR_TX_BYTES,
skb               355 net/l2tp/l2tp_netlink.c 	    nla_put_u64_64bit(skb, L2TP_ATTR_TX_ERRORS,
skb               358 net/l2tp/l2tp_netlink.c 	    nla_put_u64_64bit(skb, L2TP_ATTR_RX_PACKETS,
skb               361 net/l2tp/l2tp_netlink.c 	    nla_put_u64_64bit(skb, L2TP_ATTR_RX_BYTES,
skb               364 net/l2tp/l2tp_netlink.c 	    nla_put_u64_64bit(skb, L2TP_ATTR_RX_SEQ_DISCARDS,
skb               367 net/l2tp/l2tp_netlink.c 	    nla_put_u64_64bit(skb, L2TP_ATTR_RX_OOS_PACKETS,
skb               370 net/l2tp/l2tp_netlink.c 	    nla_put_u64_64bit(skb, L2TP_ATTR_RX_ERRORS,
skb               374 net/l2tp/l2tp_netlink.c 	nla_nest_end(skb, nest);
skb               391 net/l2tp/l2tp_netlink.c 			if (nla_put_u8(skb, L2TP_ATTR_UDP_CSUM, !sk->sk_no_check_tx))
skb               397 net/l2tp/l2tp_netlink.c 			    nla_put_flag(skb, L2TP_ATTR_UDP_ZERO_CSUM6_TX))
skb               400 net/l2tp/l2tp_netlink.c 			    nla_put_flag(skb, L2TP_ATTR_UDP_ZERO_CSUM6_RX))
skb               405 net/l2tp/l2tp_netlink.c 		if (nla_put_u16(skb, L2TP_ATTR_UDP_SPORT, ntohs(inet->inet_sport)) ||
skb               406 net/l2tp/l2tp_netlink.c 		    nla_put_u16(skb, L2TP_ATTR_UDP_DPORT, ntohs(inet->inet_dport)))
skb               412 net/l2tp/l2tp_netlink.c 			if (nla_put_in6_addr(skb, L2TP_ATTR_IP6_SADDR,
skb               414 net/l2tp/l2tp_netlink.c 			    nla_put_in6_addr(skb, L2TP_ATTR_IP6_DADDR,
skb               419 net/l2tp/l2tp_netlink.c 		if (nla_put_in_addr(skb, L2TP_ATTR_IP_SADDR,
skb               421 net/l2tp/l2tp_netlink.c 		    nla_put_in_addr(skb, L2TP_ATTR_IP_DADDR,
skb               428 net/l2tp/l2tp_netlink.c 	genlmsg_end(skb, hdr);
skb               432 net/l2tp/l2tp_netlink.c 	genlmsg_cancel(skb, hdr);
skb               436 net/l2tp/l2tp_netlink.c static int l2tp_nl_cmd_tunnel_get(struct sk_buff *skb, struct genl_info *info)
skb               480 net/l2tp/l2tp_netlink.c static int l2tp_nl_cmd_tunnel_dump(struct sk_buff *skb, struct netlink_callback *cb)
skb               484 net/l2tp/l2tp_netlink.c 	struct net *net = sock_net(skb->sk);
skb               491 net/l2tp/l2tp_netlink.c 		if (l2tp_nl_tunnel_send(skb, NETLINK_CB(cb->skb).portid,
skb               505 net/l2tp/l2tp_netlink.c 	return skb->len;
skb               508 net/l2tp/l2tp_netlink.c static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *info)
skb               641 net/l2tp/l2tp_netlink.c static int l2tp_nl_cmd_session_delete(struct sk_buff *skb, struct genl_info *info)
skb               667 net/l2tp/l2tp_netlink.c static int l2tp_nl_cmd_session_modify(struct sk_buff *skb, struct genl_info *info)
skb               704 net/l2tp/l2tp_netlink.c static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq, int flags,
skb               711 net/l2tp/l2tp_netlink.c 	hdr = genlmsg_put(skb, portid, seq, &l2tp_nl_family, flags, cmd);
skb               715 net/l2tp/l2tp_netlink.c 	if (nla_put_u32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id) ||
skb               716 net/l2tp/l2tp_netlink.c 	    nla_put_u32(skb, L2TP_ATTR_SESSION_ID, session->session_id) ||
skb               717 net/l2tp/l2tp_netlink.c 	    nla_put_u32(skb, L2TP_ATTR_PEER_CONN_ID, tunnel->peer_tunnel_id) ||
skb               718 net/l2tp/l2tp_netlink.c 	    nla_put_u32(skb, L2TP_ATTR_PEER_SESSION_ID,
skb               720 net/l2tp/l2tp_netlink.c 	    nla_put_u32(skb, L2TP_ATTR_DEBUG, session->debug) ||
skb               721 net/l2tp/l2tp_netlink.c 	    nla_put_u16(skb, L2TP_ATTR_PW_TYPE, session->pwtype))
skb               725 net/l2tp/l2tp_netlink.c 	     nla_put_string(skb, L2TP_ATTR_IFNAME, session->ifname)) ||
skb               727 net/l2tp/l2tp_netlink.c 	     nla_put(skb, L2TP_ATTR_COOKIE, session->cookie_len,
skb               730 net/l2tp/l2tp_netlink.c 	     nla_put(skb, L2TP_ATTR_PEER_COOKIE, session->peer_cookie_len,
skb               732 net/l2tp/l2tp_netlink.c 	    nla_put_u8(skb, L2TP_ATTR_RECV_SEQ, session->recv_seq) ||
skb               733 net/l2tp/l2tp_netlink.c 	    nla_put_u8(skb, L2TP_ATTR_SEND_SEQ, session->send_seq) ||
skb               734 net/l2tp/l2tp_netlink.c 	    nla_put_u8(skb, L2TP_ATTR_LNS_MODE, session->lns_mode) ||
skb               736 net/l2tp/l2tp_netlink.c 	     nla_put_u8(skb, L2TP_ATTR_USING_IPSEC, 1)) ||
skb               738 net/l2tp/l2tp_netlink.c 	     nla_put_msecs(skb, L2TP_ATTR_RECV_TIMEOUT,
skb               742 net/l2tp/l2tp_netlink.c 	nest = nla_nest_start_noflag(skb, L2TP_ATTR_STATS);
skb               746 net/l2tp/l2tp_netlink.c 	if (nla_put_u64_64bit(skb, L2TP_ATTR_TX_PACKETS,
skb               749 net/l2tp/l2tp_netlink.c 	    nla_put_u64_64bit(skb, L2TP_ATTR_TX_BYTES,
skb               752 net/l2tp/l2tp_netlink.c 	    nla_put_u64_64bit(skb, L2TP_ATTR_TX_ERRORS,
skb               755 net/l2tp/l2tp_netlink.c 	    nla_put_u64_64bit(skb, L2TP_ATTR_RX_PACKETS,
skb               758 net/l2tp/l2tp_netlink.c 	    nla_put_u64_64bit(skb, L2TP_ATTR_RX_BYTES,
skb               761 net/l2tp/l2tp_netlink.c 	    nla_put_u64_64bit(skb, L2TP_ATTR_RX_SEQ_DISCARDS,
skb               764 net/l2tp/l2tp_netlink.c 	    nla_put_u64_64bit(skb, L2TP_ATTR_RX_OOS_PACKETS,
skb               767 net/l2tp/l2tp_netlink.c 	    nla_put_u64_64bit(skb, L2TP_ATTR_RX_ERRORS,
skb               771 net/l2tp/l2tp_netlink.c 	nla_nest_end(skb, nest);
skb               773 net/l2tp/l2tp_netlink.c 	genlmsg_end(skb, hdr);
skb               777 net/l2tp/l2tp_netlink.c 	genlmsg_cancel(skb, hdr);
skb               781 net/l2tp/l2tp_netlink.c static int l2tp_nl_cmd_session_get(struct sk_buff *skb, struct genl_info *info)
skb               818 net/l2tp/l2tp_netlink.c static int l2tp_nl_cmd_session_dump(struct sk_buff *skb, struct netlink_callback *cb)
skb               820 net/l2tp/l2tp_netlink.c 	struct net *net = sock_net(skb->sk);
skb               842 net/l2tp/l2tp_netlink.c 		if (l2tp_nl_session_send(skb, NETLINK_CB(cb->skb).portid,
skb               858 net/l2tp/l2tp_netlink.c 	return skb->len;
skb               126 net/l2tp/l2tp_ppp.c static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb);
skb               184 net/l2tp/l2tp_ppp.c 	struct sk_buff *skb;
skb               192 net/l2tp/l2tp_ppp.c 	skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
skb               194 net/l2tp/l2tp_ppp.c 	if (!skb)
skb               197 net/l2tp/l2tp_ppp.c 	if (len > skb->len)
skb               198 net/l2tp/l2tp_ppp.c 		len = skb->len;
skb               199 net/l2tp/l2tp_ppp.c 	else if (len < skb->len)
skb               202 net/l2tp/l2tp_ppp.c 	err = skb_copy_datagram_msg(skb, 0, msg, len);
skb               206 net/l2tp/l2tp_ppp.c 	kfree_skb(skb);
skb               211 net/l2tp/l2tp_ppp.c static void pppol2tp_recv(struct l2tp_session *session, struct sk_buff *skb, int data_len)
skb               231 net/l2tp/l2tp_ppp.c 	if (pskb_may_pull(skb, 2) && skb->data[0] == PPP_ALLSTATIONS &&
skb               232 net/l2tp/l2tp_ppp.c 	    skb->data[1] == PPP_UI)
skb               233 net/l2tp/l2tp_ppp.c 		skb_pull(skb, 2);
skb               243 net/l2tp/l2tp_ppp.c 		ppp_input(&po->chan, skb);
skb               249 net/l2tp/l2tp_ppp.c 		if (sock_queue_rcv_skb(sk, skb) < 0) {
skb               251 net/l2tp/l2tp_ppp.c 			kfree_skb(skb);
skb               261 net/l2tp/l2tp_ppp.c 	kfree_skb(skb);
skb               276 net/l2tp/l2tp_ppp.c 	struct sk_buff *skb;
skb               298 net/l2tp/l2tp_ppp.c 	skb = sock_wmalloc(sk, NET_SKB_PAD + sizeof(struct iphdr) +
skb               302 net/l2tp/l2tp_ppp.c 	if (!skb)
skb               306 net/l2tp/l2tp_ppp.c 	skb_reserve(skb, NET_SKB_PAD);
skb               307 net/l2tp/l2tp_ppp.c 	skb_reset_network_header(skb);
skb               308 net/l2tp/l2tp_ppp.c 	skb_reserve(skb, sizeof(struct iphdr));
skb               309 net/l2tp/l2tp_ppp.c 	skb_reset_transport_header(skb);
skb               310 net/l2tp/l2tp_ppp.c 	skb_reserve(skb, uhlen);
skb               313 net/l2tp/l2tp_ppp.c 	skb->data[0] = PPP_ALLSTATIONS;
skb               314 net/l2tp/l2tp_ppp.c 	skb->data[1] = PPP_UI;
skb               315 net/l2tp/l2tp_ppp.c 	skb_put(skb, 2);
skb               318 net/l2tp/l2tp_ppp.c 	error = memcpy_from_msg(skb_put(skb, total_len), m, total_len);
skb               320 net/l2tp/l2tp_ppp.c 		kfree_skb(skb);
skb               325 net/l2tp/l2tp_ppp.c 	l2tp_xmit_skb(session, skb, session->hdr_len);
skb               352 net/l2tp/l2tp_ppp.c static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
skb               375 net/l2tp/l2tp_ppp.c 	if (skb_cow_head(skb, headroom))
skb               379 net/l2tp/l2tp_ppp.c 	__skb_push(skb, 2);
skb               380 net/l2tp/l2tp_ppp.c 	skb->data[0] = PPP_ALLSTATIONS;
skb               381 net/l2tp/l2tp_ppp.c 	skb->data[1] = PPP_UI;
skb               384 net/l2tp/l2tp_ppp.c 	l2tp_xmit_skb(session, skb, session->hdr_len);
skb               395 net/l2tp/l2tp_ppp.c 	kfree_skb(skb);
skb               492 net/l2tp/l2tp_ppp.c static int pppol2tp_backlog_recv(struct sock *sk, struct sk_buff *skb)
skb               496 net/l2tp/l2tp_ppp.c 	rc = l2tp_udp_encap_recv(sk, skb);
skb               498 net/l2tp/l2tp_ppp.c 		kfree_skb(skb);
skb               339 net/lapb/lapb_iface.c int lapb_data_request(struct net_device *dev, struct sk_buff *skb)
skb               351 net/lapb/lapb_iface.c 	skb_queue_tail(&lapb->write_queue, skb);
skb               361 net/lapb/lapb_iface.c int lapb_data_received(struct net_device *dev, struct sk_buff *skb)
skb               367 net/lapb/lapb_iface.c 		lapb_data_input(lapb, skb);
skb               400 net/lapb/lapb_iface.c int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *skb)
skb               403 net/lapb/lapb_iface.c 		return lapb->callbacks->data_indication(lapb->dev, skb);
skb               405 net/lapb/lapb_iface.c 	kfree_skb(skb);
skb               409 net/lapb/lapb_iface.c int lapb_data_transmit(struct lapb_cb *lapb, struct sk_buff *skb)
skb               414 net/lapb/lapb_iface.c 		lapb->callbacks->data_transmit(lapb->dev, skb);
skb                39 net/lapb/lapb_in.c static void lapb_state0_machine(struct lapb_cb *lapb, struct sk_buff *skb,
skb               103 net/lapb/lapb_in.c 	kfree_skb(skb);
skb               110 net/lapb/lapb_in.c static void lapb_state1_machine(struct lapb_cb *lapb, struct sk_buff *skb,
skb               179 net/lapb/lapb_in.c 	kfree_skb(skb);
skb               186 net/lapb/lapb_in.c static void lapb_state2_machine(struct lapb_cb *lapb, struct sk_buff *skb,
skb               239 net/lapb/lapb_in.c 	kfree_skb(skb);
skb               246 net/lapb/lapb_in.c static void lapb_state3_machine(struct lapb_cb *lapb, struct sk_buff *skb,
skb               402 net/lapb/lapb_in.c 			cn = lapb_data_indication(lapb, skb);
skb               444 net/lapb/lapb_in.c 			 skb->data);
skb               465 net/lapb/lapb_in.c 		kfree_skb(skb);
skb               472 net/lapb/lapb_in.c static void lapb_state4_machine(struct lapb_cb *lapb, struct sk_buff *skb,
skb               527 net/lapb/lapb_in.c 	kfree_skb(skb);
skb               533 net/lapb/lapb_in.c void lapb_data_input(struct lapb_cb *lapb, struct sk_buff *skb)
skb               537 net/lapb/lapb_in.c 	if (lapb_decode(lapb, skb, &frame) < 0) {
skb               538 net/lapb/lapb_in.c 		kfree_skb(skb);
skb               544 net/lapb/lapb_in.c 		lapb_state0_machine(lapb, skb, &frame); break;
skb               546 net/lapb/lapb_in.c 		lapb_state1_machine(lapb, skb, &frame); break;
skb               548 net/lapb/lapb_in.c 		lapb_state2_machine(lapb, skb, &frame); break;
skb               550 net/lapb/lapb_in.c 		lapb_state3_machine(lapb, skb, &frame); break;
skb               552 net/lapb/lapb_in.c 		lapb_state4_machine(lapb, skb, &frame); break;
skb                37 net/lapb/lapb_out.c static void lapb_send_iframe(struct lapb_cb *lapb, struct sk_buff *skb, int poll_bit)
skb                41 net/lapb/lapb_out.c 	if (!skb)
skb                45 net/lapb/lapb_out.c 		frame = skb_push(skb, 2);
skb                52 net/lapb/lapb_out.c 		frame = skb_push(skb, 1);
skb                63 net/lapb/lapb_out.c 	lapb_transmit_buffer(lapb, skb, LAPB_COMMAND);
skb                68 net/lapb/lapb_out.c 	struct sk_buff *skb, *skbn;
skb                82 net/lapb/lapb_out.c 		skb = skb_dequeue(&lapb->write_queue);
skb                85 net/lapb/lapb_out.c 			if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) {
skb                86 net/lapb/lapb_out.c 				skb_queue_head(&lapb->write_queue, skb);
skb                90 net/lapb/lapb_out.c 			if (skb->sk)
skb                91 net/lapb/lapb_out.c 				skb_set_owner_w(skbn, skb->sk);
skb               103 net/lapb/lapb_out.c 			skb_queue_tail(&lapb->ack_queue, skb);
skb               105 net/lapb/lapb_out.c 		} while (lapb->vs != end && (skb = skb_dequeue(&lapb->write_queue)) != NULL);
skb               114 net/lapb/lapb_out.c void lapb_transmit_buffer(struct lapb_cb *lapb, struct sk_buff *skb, int type)
skb               118 net/lapb/lapb_out.c 	ptr = skb_push(skb, 1);
skb               146 net/lapb/lapb_out.c 	lapb_dbg(2, "(%p) S%d TX %3ph\n", lapb->dev, lapb->state, skb->data);
skb               148 net/lapb/lapb_out.c 	if (!lapb_data_transmit(lapb, skb))
skb               149 net/lapb/lapb_out.c 		kfree_skb(skb);
skb                48 net/lapb/lapb_subr.c 	struct sk_buff *skb;
skb                58 net/lapb/lapb_subr.c 			skb = skb_dequeue(&lapb->ack_queue);
skb                59 net/lapb/lapb_subr.c 			kfree_skb(skb);
skb                66 net/lapb/lapb_subr.c 	struct sk_buff *skb, *skb_prev = NULL;
skb                73 net/lapb/lapb_subr.c 	while ((skb = skb_dequeue(&lapb->ack_queue)) != NULL) {
skb                75 net/lapb/lapb_subr.c 			skb_queue_head(&lapb->write_queue, skb);
skb                77 net/lapb/lapb_subr.c 			skb_append(skb_prev, skb, &lapb->write_queue);
skb                78 net/lapb/lapb_subr.c 		skb_prev = skb;
skb               106 net/lapb/lapb_subr.c int lapb_decode(struct lapb_cb *lapb, struct sk_buff *skb,
skb               111 net/lapb/lapb_subr.c 	lapb_dbg(2, "(%p) S%d RX %3ph\n", lapb->dev, lapb->state, skb->data);
skb               116 net/lapb/lapb_subr.c 	if (!pskb_may_pull(skb, 2))
skb               121 net/lapb/lapb_subr.c 			if (skb->data[0] == LAPB_ADDR_D)
skb               123 net/lapb/lapb_subr.c 			if (skb->data[0] == LAPB_ADDR_C)
skb               126 net/lapb/lapb_subr.c 			if (skb->data[0] == LAPB_ADDR_C)
skb               128 net/lapb/lapb_subr.c 			if (skb->data[0] == LAPB_ADDR_D)
skb               133 net/lapb/lapb_subr.c 			if (skb->data[0] == LAPB_ADDR_B)
skb               135 net/lapb/lapb_subr.c 			if (skb->data[0] == LAPB_ADDR_A)
skb               138 net/lapb/lapb_subr.c 			if (skb->data[0] == LAPB_ADDR_A)
skb               140 net/lapb/lapb_subr.c 			if (skb->data[0] == LAPB_ADDR_B)
skb               145 net/lapb/lapb_subr.c 	skb_pull(skb, 1);
skb               148 net/lapb/lapb_subr.c 		if (!(skb->data[0] & LAPB_S)) {
skb               149 net/lapb/lapb_subr.c 			if (!pskb_may_pull(skb, 2))
skb               155 net/lapb/lapb_subr.c 			frame->ns         = (skb->data[0] >> 1) & 0x7F;
skb               156 net/lapb/lapb_subr.c 			frame->nr         = (skb->data[1] >> 1) & 0x7F;
skb               157 net/lapb/lapb_subr.c 			frame->pf         = skb->data[1] & LAPB_EPF;
skb               158 net/lapb/lapb_subr.c 			frame->control[0] = skb->data[0];
skb               159 net/lapb/lapb_subr.c 			frame->control[1] = skb->data[1];
skb               160 net/lapb/lapb_subr.c 			skb_pull(skb, 2);
skb               161 net/lapb/lapb_subr.c 		} else if ((skb->data[0] & LAPB_U) == 1) {
skb               162 net/lapb/lapb_subr.c 			if (!pskb_may_pull(skb, 2))
skb               167 net/lapb/lapb_subr.c 			frame->type       = skb->data[0] & 0x0F;
skb               168 net/lapb/lapb_subr.c 			frame->nr         = (skb->data[1] >> 1) & 0x7F;
skb               169 net/lapb/lapb_subr.c 			frame->pf         = skb->data[1] & LAPB_EPF;
skb               170 net/lapb/lapb_subr.c 			frame->control[0] = skb->data[0];
skb               171 net/lapb/lapb_subr.c 			frame->control[1] = skb->data[1];
skb               172 net/lapb/lapb_subr.c 			skb_pull(skb, 2);
skb               173 net/lapb/lapb_subr.c 		} else if ((skb->data[0] & LAPB_U) == 3) {
skb               177 net/lapb/lapb_subr.c 			frame->type       = skb->data[0] & ~LAPB_SPF;
skb               178 net/lapb/lapb_subr.c 			frame->pf         = skb->data[0] & LAPB_SPF;
skb               179 net/lapb/lapb_subr.c 			frame->control[0] = skb->data[0];
skb               181 net/lapb/lapb_subr.c 			skb_pull(skb, 1);
skb               184 net/lapb/lapb_subr.c 		if (!(skb->data[0] & LAPB_S)) {
skb               189 net/lapb/lapb_subr.c 			frame->ns   = (skb->data[0] >> 1) & 0x07;
skb               190 net/lapb/lapb_subr.c 			frame->nr   = (skb->data[0] >> 5) & 0x07;
skb               191 net/lapb/lapb_subr.c 			frame->pf   = skb->data[0] & LAPB_SPF;
skb               192 net/lapb/lapb_subr.c 		} else if ((skb->data[0] & LAPB_U) == 1) {
skb               196 net/lapb/lapb_subr.c 			frame->type = skb->data[0] & 0x0F;
skb               197 net/lapb/lapb_subr.c 			frame->nr   = (skb->data[0] >> 5) & 0x07;
skb               198 net/lapb/lapb_subr.c 			frame->pf   = skb->data[0] & LAPB_SPF;
skb               199 net/lapb/lapb_subr.c 		} else if ((skb->data[0] & LAPB_U) == 3) {
skb               203 net/lapb/lapb_subr.c 			frame->type = skb->data[0] & ~LAPB_SPF;
skb               204 net/lapb/lapb_subr.c 			frame->pf   = skb->data[0] & LAPB_SPF;
skb               207 net/lapb/lapb_subr.c 		frame->control[0] = skb->data[0];
skb               209 net/lapb/lapb_subr.c 		skb_pull(skb, 1);
skb               224 net/lapb/lapb_subr.c 	struct sk_buff *skb;
skb               227 net/lapb/lapb_subr.c 	if ((skb = alloc_skb(LAPB_HEADER_LEN + 3, GFP_ATOMIC)) == NULL)
skb               230 net/lapb/lapb_subr.c 	skb_reserve(skb, LAPB_HEADER_LEN + 1);
skb               234 net/lapb/lapb_subr.c 			dptr   = skb_put(skb, 1);
skb               238 net/lapb/lapb_subr.c 			dptr     = skb_put(skb, 2);
skb               244 net/lapb/lapb_subr.c 		dptr   = skb_put(skb, 1);
skb               251 net/lapb/lapb_subr.c 	lapb_transmit_buffer(lapb, skb, type);
skb               260 net/lapb/lapb_subr.c 	struct sk_buff *skb;
skb               263 net/lapb/lapb_subr.c 	if ((skb = alloc_skb(LAPB_HEADER_LEN + 7, GFP_ATOMIC)) == NULL)
skb               266 net/lapb/lapb_subr.c 	skb_reserve(skb, LAPB_HEADER_LEN + 1);
skb               269 net/lapb/lapb_subr.c 		dptr    = skb_put(skb, 6);
skb               282 net/lapb/lapb_subr.c 			 &skb->data[1]);
skb               284 net/lapb/lapb_subr.c 		dptr    = skb_put(skb, 4);
skb               295 net/lapb/lapb_subr.c 			 lapb->dev, lapb->state, &skb->data[1]);
skb               298 net/lapb/lapb_subr.c 	lapb_transmit_buffer(lapb, skb, LAPB_RESPONSE);
skb               119 net/llc/af_llc.c static int llc_ui_send_data(struct sock* sk, struct sk_buff *skb, int noblock)
skb               131 net/llc/af_llc.c 			kfree_skb(skb);
skb               135 net/llc/af_llc.c 	return llc_build_and_send_pkt(sk, skb);
skb               640 net/llc/af_llc.c static void llc_cmsg_rcv(struct msghdr *msg, struct sk_buff *skb)
skb               642 net/llc/af_llc.c 	struct llc_sock *llc = llc_sk(skb->sk);
skb               648 net/llc/af_llc.c 		info.lpi_ifindex = llc_sk(skb->sk)->dev->ifindex;
skb               649 net/llc/af_llc.c 		llc_pdu_decode_dsap(skb, &info.lpi_sap);
skb               650 net/llc/af_llc.c 		llc_pdu_decode_da(skb, info.lpi_mac);
skb               670 net/llc/af_llc.c 	struct sk_buff *skb;
skb               690 net/llc/af_llc.c 	skb = skb_dequeue(&sk->sk_receive_queue);
skb               692 net/llc/af_llc.c 	if (!skb->sk)
skb               695 net/llc/af_llc.c 	newsk = skb->sk;
skb               712 net/llc/af_llc.c 	kfree_skb(skb);
skb               733 net/llc/af_llc.c 	struct sk_buff *skb = NULL;
skb               776 net/llc/af_llc.c 		skb = skb_peek(&sk->sk_receive_queue);
skb               777 net/llc/af_llc.c 		if (skb) {
skb               835 net/llc/af_llc.c 		skb_len = skb->len;
skb               837 net/llc/af_llc.c 		used = skb->len - offset;
skb               842 net/llc/af_llc.c 			int rc = skb_copy_datagram_msg(skb, offset, msg, used);
skb               860 net/llc/af_llc.c 			skb_unlink(skb, &sk->sk_receive_queue);
skb               861 net/llc/af_llc.c 			kfree_skb(skb);
skb               874 net/llc/af_llc.c 	if (uaddr != NULL && skb != NULL) {
skb               875 net/llc/af_llc.c 		memcpy(uaddr, llc_ui_skb_cb(skb), sizeof(*uaddr));
skb               879 net/llc/af_llc.c 		llc_cmsg_rcv(msg, skb);
skb               882 net/llc/af_llc.c 		skb_unlink(skb, &sk->sk_receive_queue);
skb               883 net/llc/af_llc.c 		kfree_skb(skb);
skb               906 net/llc/af_llc.c 	struct sk_buff *skb = NULL;
skb               937 net/llc/af_llc.c 	skb = sock_alloc_send_skb(sk, size, noblock, &rc);
skb               939 net/llc/af_llc.c 	if (!skb)
skb               941 net/llc/af_llc.c 	skb->dev      = llc->dev;
skb               942 net/llc/af_llc.c 	skb->protocol = llc_proto_type(addr->sllc_arphrd);
skb               943 net/llc/af_llc.c 	skb_reserve(skb, hdrlen);
skb               944 net/llc/af_llc.c 	rc = memcpy_from_msg(skb_put(skb, copied), msg, copied);
skb               948 net/llc/af_llc.c 		llc_build_and_send_ui_pkt(llc->sap, skb, addr->sllc_mac,
skb               950 net/llc/af_llc.c 		skb = NULL;
skb               954 net/llc/af_llc.c 		llc_build_and_send_test_pkt(llc->sap, skb, addr->sllc_mac,
skb               956 net/llc/af_llc.c 		skb = NULL;
skb               960 net/llc/af_llc.c 		llc_build_and_send_xid_pkt(llc->sap, skb, addr->sllc_mac,
skb               962 net/llc/af_llc.c 		skb = NULL;
skb               968 net/llc/af_llc.c 	rc = llc_ui_send_data(sk, skb, noblock);
skb               969 net/llc/af_llc.c 	skb = NULL;
skb               971 net/llc/af_llc.c 	kfree_skb(skb);
skb                32 net/llc/llc_c_ac.c static int llc_conn_ac_inc_vs_by_1(struct sock *sk, struct sk_buff *skb);
skb                33 net/llc/llc_c_ac.c static void llc_process_tmr_ev(struct sock *sk, struct sk_buff *skb);
skb                36 net/llc/llc_c_ac.c static int llc_conn_ac_inc_npta_value(struct sock *sk, struct sk_buff *skb);
skb                39 net/llc/llc_c_ac.c 					       struct sk_buff *skb);
skb                41 net/llc/llc_c_ac.c static int llc_conn_ac_set_p_flag_1(struct sock *sk, struct sk_buff *skb);
skb                45 net/llc/llc_c_ac.c int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb)
skb                51 net/llc/llc_c_ac.c 		struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
skb                61 net/llc/llc_c_ac.c int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb)
skb                63 net/llc/llc_c_ac.c 	struct llc_conn_state_ev *ev = llc_conn_ev(skb);
skb                69 net/llc/llc_c_ac.c int llc_conn_ac_conn_confirm(struct sock *sk, struct sk_buff *skb)
skb                71 net/llc/llc_c_ac.c 	struct llc_conn_state_ev *ev = llc_conn_ev(skb);
skb                77 net/llc/llc_c_ac.c static int llc_conn_ac_data_confirm(struct sock *sk, struct sk_buff *skb)
skb                79 net/llc/llc_c_ac.c 	struct llc_conn_state_ev *ev = llc_conn_ev(skb);
skb                85 net/llc/llc_c_ac.c int llc_conn_ac_data_ind(struct sock *sk, struct sk_buff *skb)
skb                87 net/llc/llc_c_ac.c 	llc_conn_rtn_pdu(sk, skb);
skb                91 net/llc/llc_c_ac.c int llc_conn_ac_disc_ind(struct sock *sk, struct sk_buff *skb)
skb                93 net/llc/llc_c_ac.c 	struct llc_conn_state_ev *ev = llc_conn_ev(skb);
skb                98 net/llc/llc_c_ac.c 		struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb);
skb               119 net/llc/llc_c_ac.c int llc_conn_ac_disc_confirm(struct sock *sk, struct sk_buff *skb)
skb               121 net/llc/llc_c_ac.c 	struct llc_conn_state_ev *ev = llc_conn_ev(skb);
skb               128 net/llc/llc_c_ac.c int llc_conn_ac_rst_ind(struct sock *sk, struct sk_buff *skb)
skb               132 net/llc/llc_c_ac.c 	struct llc_conn_state_ev *ev = llc_conn_ev(skb);
skb               133 net/llc/llc_c_ac.c 	struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb);
skb               167 net/llc/llc_c_ac.c int llc_conn_ac_rst_confirm(struct sock *sk, struct sk_buff *skb)
skb               169 net/llc/llc_c_ac.c 	struct llc_conn_state_ev *ev = llc_conn_ev(skb);
skb               177 net/llc/llc_c_ac.c 					    struct sk_buff *skb)
skb               179 net/llc/llc_c_ac.c 	struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
skb               184 net/llc/llc_c_ac.c 		llc_conn_ac_clear_remote_busy(sk, skb);
skb               189 net/llc/llc_c_ac.c 					       struct sk_buff *skb)
skb               198 net/llc/llc_c_ac.c int llc_conn_ac_send_disc_cmd_p_set_x(struct sock *sk, struct sk_buff *skb)
skb               214 net/llc/llc_c_ac.c 		llc_conn_ac_set_p_flag_1(sk, skb);
skb               223 net/llc/llc_c_ac.c int llc_conn_ac_send_dm_rsp_f_set_p(struct sock *sk, struct sk_buff *skb)
skb               233 net/llc/llc_c_ac.c 		llc_pdu_decode_pf_bit(skb, &f_bit);
skb               249 net/llc/llc_c_ac.c int llc_conn_ac_send_dm_rsp_f_set_1(struct sock *sk, struct sk_buff *skb)
skb               273 net/llc/llc_c_ac.c int llc_conn_ac_send_frmr_rsp_f_set_x(struct sock *sk, struct sk_buff *skb)
skb               278 net/llc/llc_c_ac.c 	struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
skb               283 net/llc/llc_c_ac.c 		llc_pdu_decode_pf_bit(skb, &f_bit);
skb               307 net/llc/llc_c_ac.c int llc_conn_ac_resend_frmr_rsp_f_set_0(struct sock *sk, struct sk_buff *skb)
skb               334 net/llc/llc_c_ac.c int llc_conn_ac_resend_frmr_rsp_f_set_p(struct sock *sk, struct sk_buff *skb)
skb               341 net/llc/llc_c_ac.c 	llc_pdu_decode_pf_bit(skb, &f_bit);
skb               346 net/llc/llc_c_ac.c 		struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
skb               364 net/llc/llc_c_ac.c int llc_conn_ac_send_i_cmd_p_set_1(struct sock *sk, struct sk_buff *skb)
skb               370 net/llc/llc_c_ac.c 	llc_pdu_header_init(skb, LLC_PDU_TYPE_I, sap->laddr.lsap,
skb               372 net/llc/llc_c_ac.c 	llc_pdu_init_as_i_cmd(skb, 1, llc->vS, llc->vR);
skb               373 net/llc/llc_c_ac.c 	rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac);
skb               375 net/llc/llc_c_ac.c 		skb_get(skb);
skb               376 net/llc/llc_c_ac.c 		llc_conn_send_pdu(sk, skb);
skb               377 net/llc/llc_c_ac.c 		llc_conn_ac_inc_vs_by_1(sk, skb);
skb               382 net/llc/llc_c_ac.c static int llc_conn_ac_send_i_cmd_p_set_0(struct sock *sk, struct sk_buff *skb)
skb               388 net/llc/llc_c_ac.c 	llc_pdu_header_init(skb, LLC_PDU_TYPE_I, sap->laddr.lsap,
skb               390 net/llc/llc_c_ac.c 	llc_pdu_init_as_i_cmd(skb, 0, llc->vS, llc->vR);
skb               391 net/llc/llc_c_ac.c 	rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac);
skb               393 net/llc/llc_c_ac.c 		skb_get(skb);
skb               394 net/llc/llc_c_ac.c 		llc_conn_send_pdu(sk, skb);
skb               395 net/llc/llc_c_ac.c 		llc_conn_ac_inc_vs_by_1(sk, skb);
skb               400 net/llc/llc_c_ac.c int llc_conn_ac_send_i_xxx_x_set_0(struct sock *sk, struct sk_buff *skb)
skb               406 net/llc/llc_c_ac.c 	llc_pdu_header_init(skb, LLC_PDU_TYPE_I, sap->laddr.lsap,
skb               408 net/llc/llc_c_ac.c 	llc_pdu_init_as_i_cmd(skb, 0, llc->vS, llc->vR);
skb               409 net/llc/llc_c_ac.c 	rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac);
skb               411 net/llc/llc_c_ac.c 		skb_get(skb);
skb               412 net/llc/llc_c_ac.c 		llc_conn_send_pdu(sk, skb);
skb               413 net/llc/llc_c_ac.c 		llc_conn_ac_inc_vs_by_1(sk, skb);
skb               418 net/llc/llc_c_ac.c int llc_conn_ac_resend_i_xxx_x_set_0(struct sock *sk, struct sk_buff *skb)
skb               420 net/llc/llc_c_ac.c 	struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
skb               428 net/llc/llc_c_ac.c 						struct sk_buff *skb)
skb               431 net/llc/llc_c_ac.c 	struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
skb               446 net/llc/llc_c_ac.c 			kfree_skb(skb);
skb               456 net/llc/llc_c_ac.c int llc_conn_ac_resend_i_rsp_f_set_1(struct sock *sk, struct sk_buff *skb)
skb               458 net/llc/llc_c_ac.c 	struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
skb               465 net/llc/llc_c_ac.c int llc_conn_ac_send_rej_cmd_p_set_1(struct sock *sk, struct sk_buff *skb)
skb               489 net/llc/llc_c_ac.c int llc_conn_ac_send_rej_rsp_f_set_1(struct sock *sk, struct sk_buff *skb)
skb               513 net/llc/llc_c_ac.c int llc_conn_ac_send_rej_xxx_x_set_0(struct sock *sk, struct sk_buff *skb)
skb               537 net/llc/llc_c_ac.c int llc_conn_ac_send_rnr_cmd_p_set_1(struct sock *sk, struct sk_buff *skb)
skb               561 net/llc/llc_c_ac.c int llc_conn_ac_send_rnr_rsp_f_set_1(struct sock *sk, struct sk_buff *skb)
skb               585 net/llc/llc_c_ac.c int llc_conn_ac_send_rnr_xxx_x_set_0(struct sock *sk, struct sk_buff *skb)
skb               609 net/llc/llc_c_ac.c int llc_conn_ac_set_remote_busy(struct sock *sk, struct sk_buff *skb)
skb               621 net/llc/llc_c_ac.c int llc_conn_ac_opt_send_rnr_xxx_x_set_0(struct sock *sk, struct sk_buff *skb)
skb               645 net/llc/llc_c_ac.c int llc_conn_ac_send_rr_cmd_p_set_1(struct sock *sk, struct sk_buff *skb)
skb               669 net/llc/llc_c_ac.c int llc_conn_ac_send_rr_rsp_f_set_1(struct sock *sk, struct sk_buff *skb)
skb               694 net/llc/llc_c_ac.c int llc_conn_ac_send_ack_rsp_f_set_1(struct sock *sk, struct sk_buff *skb)
skb               718 net/llc/llc_c_ac.c int llc_conn_ac_send_rr_xxx_x_set_0(struct sock *sk, struct sk_buff *skb)
skb               742 net/llc/llc_c_ac.c int llc_conn_ac_send_ack_xxx_x_set_0(struct sock *sk, struct sk_buff *skb)
skb               776 net/llc/llc_c_ac.c int llc_conn_ac_send_sabme_cmd_p_set_x(struct sock *sk, struct sk_buff *skb)
skb               804 net/llc/llc_c_ac.c int llc_conn_ac_send_ua_rsp_f_set_p(struct sock *sk, struct sk_buff *skb)
skb               811 net/llc/llc_c_ac.c 	llc_pdu_decode_pf_bit(skb, &f_bit);
skb               831 net/llc/llc_c_ac.c int llc_conn_ac_set_s_flag_0(struct sock *sk, struct sk_buff *skb)
skb               837 net/llc/llc_c_ac.c int llc_conn_ac_set_s_flag_1(struct sock *sk, struct sk_buff *skb)
skb               843 net/llc/llc_c_ac.c int llc_conn_ac_start_p_timer(struct sock *sk, struct sk_buff *skb)
skb               863 net/llc/llc_c_ac.c int llc_conn_ac_send_ack_if_needed(struct sock *sk, struct sk_buff *skb)
skb               868 net/llc/llc_c_ac.c 	llc_pdu_decode_pf_bit(skb, &pf_bit);
skb               877 net/llc/llc_c_ac.c 		llc_conn_ac_send_rr_rsp_f_set_ackpf(sk, skb);
skb               880 net/llc/llc_c_ac.c 		llc_conn_ac_inc_npta_value(sk, skb);
skb               894 net/llc/llc_c_ac.c int llc_conn_ac_rst_sendack_flag(struct sock *sk, struct sk_buff *skb)
skb               911 net/llc/llc_c_ac.c 					      struct sk_buff *skb)
skb               917 net/llc/llc_c_ac.c 	llc_pdu_header_init(skb, LLC_PDU_TYPE_I, sap->laddr.lsap,
skb               919 net/llc/llc_c_ac.c 	llc_pdu_init_as_i_cmd(skb, llc->ack_pf, llc->vS, llc->vR);
skb               920 net/llc/llc_c_ac.c 	rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac);
skb               922 net/llc/llc_c_ac.c 		skb_get(skb);
skb               923 net/llc/llc_c_ac.c 		llc_conn_send_pdu(sk, skb);
skb               924 net/llc/llc_c_ac.c 		llc_conn_ac_inc_vs_by_1(sk, skb);
skb               939 net/llc/llc_c_ac.c int llc_conn_ac_send_i_as_ack(struct sock *sk, struct sk_buff *skb)
skb               945 net/llc/llc_c_ac.c 		ret = llc_conn_ac_send_i_rsp_f_set_ackpf(sk, skb);
skb               949 net/llc/llc_c_ac.c 		ret = llc_conn_ac_send_i_cmd_p_set_0(sk, skb);
skb               966 net/llc/llc_c_ac.c 					       struct sk_buff *skb)
skb              1000 net/llc/llc_c_ac.c static int llc_conn_ac_inc_npta_value(struct sock *sk, struct sk_buff *skb)
skb              1023 net/llc/llc_c_ac.c int llc_conn_ac_adjust_npta_by_rr(struct sock *sk, struct sk_buff *skb)
skb              1049 net/llc/llc_c_ac.c int llc_conn_ac_adjust_npta_by_rnr(struct sock *sk, struct sk_buff *skb)
skb              1074 net/llc/llc_c_ac.c int llc_conn_ac_dec_tx_win_size(struct sock *sk, struct sk_buff *skb)
skb              1094 net/llc/llc_c_ac.c int llc_conn_ac_inc_tx_win_size(struct sock *sk, struct sk_buff *skb)
skb              1104 net/llc/llc_c_ac.c int llc_conn_ac_stop_all_timers(struct sock *sk, struct sk_buff *skb)
skb              1110 net/llc/llc_c_ac.c int llc_conn_ac_stop_other_timers(struct sock *sk, struct sk_buff *skb)
skb              1122 net/llc/llc_c_ac.c int llc_conn_ac_start_ack_timer(struct sock *sk, struct sk_buff *skb)
skb              1130 net/llc/llc_c_ac.c int llc_conn_ac_start_rej_timer(struct sock *sk, struct sk_buff *skb)
skb              1140 net/llc/llc_c_ac.c 					     struct sk_buff *skb)
skb              1150 net/llc/llc_c_ac.c int llc_conn_ac_stop_ack_timer(struct sock *sk, struct sk_buff *skb)
skb              1156 net/llc/llc_c_ac.c int llc_conn_ac_stop_p_timer(struct sock *sk, struct sk_buff *skb)
skb              1165 net/llc/llc_c_ac.c int llc_conn_ac_stop_rej_timer(struct sock *sk, struct sk_buff *skb)
skb              1171 net/llc/llc_c_ac.c int llc_conn_ac_upd_nr_received(struct sock *sk, struct sk_buff *skb)
skb              1175 net/llc/llc_c_ac.c 	struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
skb              1190 net/llc/llc_c_ac.c 			llc_conn_ac_data_confirm(sk, skb);
skb              1198 net/llc/llc_c_ac.c 		llc_pdu_decode_pf_bit(skb, &f_bit);
skb              1201 net/llc/llc_c_ac.c 			llc_conn_ac_data_confirm(sk, skb);
skb              1207 net/llc/llc_c_ac.c int llc_conn_ac_upd_p_flag(struct sock *sk, struct sk_buff *skb)
skb              1209 net/llc/llc_c_ac.c 	struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
skb              1214 net/llc/llc_c_ac.c 		llc_pdu_decode_pf_bit(skb, &f_bit);
skb              1217 net/llc/llc_c_ac.c 			llc_conn_ac_stop_p_timer(sk, skb);
skb              1223 net/llc/llc_c_ac.c int llc_conn_ac_set_data_flag_2(struct sock *sk, struct sk_buff *skb)
skb              1229 net/llc/llc_c_ac.c int llc_conn_ac_set_data_flag_0(struct sock *sk, struct sk_buff *skb)
skb              1235 net/llc/llc_c_ac.c int llc_conn_ac_set_data_flag_1(struct sock *sk, struct sk_buff *skb)
skb              1242 net/llc/llc_c_ac.c 						  struct sk_buff *skb)
skb              1249 net/llc/llc_c_ac.c int llc_conn_ac_set_p_flag_0(struct sock *sk, struct sk_buff *skb)
skb              1255 net/llc/llc_c_ac.c static int llc_conn_ac_set_p_flag_1(struct sock *sk, struct sk_buff *skb)
skb              1261 net/llc/llc_c_ac.c int llc_conn_ac_set_remote_busy_0(struct sock *sk, struct sk_buff *skb)
skb              1267 net/llc/llc_c_ac.c int llc_conn_ac_set_cause_flag_0(struct sock *sk, struct sk_buff *skb)
skb              1273 net/llc/llc_c_ac.c int llc_conn_ac_set_cause_flag_1(struct sock *sk, struct sk_buff *skb)
skb              1279 net/llc/llc_c_ac.c int llc_conn_ac_set_retry_cnt_0(struct sock *sk, struct sk_buff *skb)
skb              1285 net/llc/llc_c_ac.c int llc_conn_ac_inc_retry_cnt_by_1(struct sock *sk, struct sk_buff *skb)
skb              1291 net/llc/llc_c_ac.c int llc_conn_ac_set_vr_0(struct sock *sk, struct sk_buff *skb)
skb              1297 net/llc/llc_c_ac.c int llc_conn_ac_inc_vr_by_1(struct sock *sk, struct sk_buff *skb)
skb              1303 net/llc/llc_c_ac.c int llc_conn_ac_set_vs_0(struct sock *sk, struct sk_buff *skb)
skb              1309 net/llc/llc_c_ac.c int llc_conn_ac_set_vs_nr(struct sock *sk, struct sk_buff *skb)
skb              1315 net/llc/llc_c_ac.c static int llc_conn_ac_inc_vs_by_1(struct sock *sk, struct sk_buff *skb)
skb              1323 net/llc/llc_c_ac.c 	struct sk_buff *skb = alloc_skb(0, GFP_ATOMIC);
skb              1326 net/llc/llc_c_ac.c 	if (skb) {
skb              1327 net/llc/llc_c_ac.c 		struct llc_conn_state_ev *ev = llc_conn_ev(skb);
skb              1329 net/llc/llc_c_ac.c 		skb_set_owner_r(skb, sk);
skb              1331 net/llc/llc_c_ac.c 		llc_process_tmr_ev(sk, skb);
skb              1364 net/llc/llc_c_ac.c int llc_conn_ac_rst_vs(struct sock *sk, struct sk_buff *skb)
skb              1367 net/llc/llc_c_ac.c 	llc_conn_ac_set_vs_nr(sk, skb);
skb              1371 net/llc/llc_c_ac.c int llc_conn_ac_upd_vs(struct sock *sk, struct sk_buff *skb)
skb              1373 net/llc/llc_c_ac.c 	struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
skb              1377 net/llc/llc_c_ac.c 		llc_conn_ac_set_vs_nr(sk, skb);
skb              1390 net/llc/llc_c_ac.c int llc_conn_disc(struct sock *sk, struct sk_buff *skb)
skb              1403 net/llc/llc_c_ac.c int llc_conn_reset(struct sock *sk, struct sk_buff *skb)
skb              1437 net/llc/llc_c_ac.c static void llc_process_tmr_ev(struct sock *sk, struct sk_buff *skb)
skb              1442 net/llc/llc_c_ac.c 		kfree_skb(skb);
skb              1445 net/llc/llc_c_ac.c 			llc_conn_state_process(sk, skb);
skb              1447 net/llc/llc_c_ac.c 			llc_set_backlog_type(skb, LLC_EVENT);
skb              1448 net/llc/llc_c_ac.c 			__sk_add_backlog(sk, skb);
skb                78 net/llc/llc_c_ev.c 	struct sk_buff *skb;
skb                88 net/llc/llc_c_ev.c 	skb = skb_peek(&llc->pdu_unack_q);
skb                89 net/llc/llc_c_ev.c 	pdu = llc_pdu_sn_hdr(skb);
skb                91 net/llc/llc_c_ev.c 	skb = skb_peek_tail(&llc->pdu_unack_q);
skb                92 net/llc/llc_c_ev.c 	pdu = llc_pdu_sn_hdr(skb);
skb                99 net/llc/llc_c_ev.c int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb)
skb               101 net/llc/llc_c_ev.c 	const struct llc_conn_state_ev *ev = llc_conn_ev(skb);
skb               107 net/llc/llc_c_ev.c int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb)
skb               109 net/llc/llc_c_ev.c 	const struct llc_conn_state_ev *ev = llc_conn_ev(skb);
skb               115 net/llc/llc_c_ev.c int llc_conn_ev_disc_req(struct sock *sk, struct sk_buff *skb)
skb               117 net/llc/llc_c_ev.c 	const struct llc_conn_state_ev *ev = llc_conn_ev(skb);
skb               123 net/llc/llc_c_ev.c int llc_conn_ev_rst_req(struct sock *sk, struct sk_buff *skb)
skb               125 net/llc/llc_c_ev.c 	const struct llc_conn_state_ev *ev = llc_conn_ev(skb);
skb               131 net/llc/llc_c_ev.c int llc_conn_ev_local_busy_detected(struct sock *sk, struct sk_buff *skb)
skb               133 net/llc/llc_c_ev.c 	const struct llc_conn_state_ev *ev = llc_conn_ev(skb);
skb               139 net/llc/llc_c_ev.c int llc_conn_ev_local_busy_cleared(struct sock *sk, struct sk_buff *skb)
skb               141 net/llc/llc_c_ev.c 	const struct llc_conn_state_ev *ev = llc_conn_ev(skb);
skb               147 net/llc/llc_c_ev.c int llc_conn_ev_rx_bad_pdu(struct sock *sk, struct sk_buff *skb)
skb               152 net/llc/llc_c_ev.c int llc_conn_ev_rx_disc_cmd_pbit_set_x(struct sock *sk, struct sk_buff *skb)
skb               154 net/llc/llc_c_ev.c 	const struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb);
skb               160 net/llc/llc_c_ev.c int llc_conn_ev_rx_dm_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb)
skb               162 net/llc/llc_c_ev.c 	const struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb);
skb               168 net/llc/llc_c_ev.c int llc_conn_ev_rx_frmr_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb)
skb               170 net/llc/llc_c_ev.c 	const struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb);
skb               176 net/llc/llc_c_ev.c int llc_conn_ev_rx_i_cmd_pbit_set_0(struct sock *sk, struct sk_buff *skb)
skb               178 net/llc/llc_c_ev.c 	const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
skb               180 net/llc/llc_c_ev.c 	return llc_conn_space(sk, skb) &&
skb               186 net/llc/llc_c_ev.c int llc_conn_ev_rx_i_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb)
skb               188 net/llc/llc_c_ev.c 	const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
skb               190 net/llc/llc_c_ev.c 	return llc_conn_space(sk, skb) &&
skb               197 net/llc/llc_c_ev.c 					      struct sk_buff *skb)
skb               199 net/llc/llc_c_ev.c 	const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
skb               209 net/llc/llc_c_ev.c 					      struct sk_buff *skb)
skb               211 net/llc/llc_c_ev.c 	const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
skb               221 net/llc/llc_c_ev.c 					     struct sk_buff *skb)
skb               223 net/llc/llc_c_ev.c 	const struct llc_pdu_sn * pdu = llc_pdu_sn_hdr(skb);
skb               235 net/llc/llc_c_ev.c int llc_conn_ev_rx_i_rsp_fbit_set_0(struct sock *sk, struct sk_buff *skb)
skb               237 net/llc/llc_c_ev.c 	const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
skb               239 net/llc/llc_c_ev.c 	return llc_conn_space(sk, skb) &&
skb               245 net/llc/llc_c_ev.c int llc_conn_ev_rx_i_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb)
skb               247 net/llc/llc_c_ev.c 	const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
skb               254 net/llc/llc_c_ev.c int llc_conn_ev_rx_i_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb)
skb               256 net/llc/llc_c_ev.c 	const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
skb               258 net/llc/llc_c_ev.c 	return llc_conn_space(sk, skb) &&
skb               264 net/llc/llc_c_ev.c 					      struct sk_buff *skb)
skb               266 net/llc/llc_c_ev.c 	const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
skb               276 net/llc/llc_c_ev.c 					      struct sk_buff *skb)
skb               278 net/llc/llc_c_ev.c 	const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
skb               288 net/llc/llc_c_ev.c 					      struct sk_buff *skb)
skb               290 net/llc/llc_c_ev.c 	const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
skb               299 net/llc/llc_c_ev.c 					     struct sk_buff *skb)
skb               301 net/llc/llc_c_ev.c 	const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
skb               313 net/llc/llc_c_ev.c int llc_conn_ev_rx_rej_cmd_pbit_set_0(struct sock *sk, struct sk_buff *skb)
skb               315 net/llc/llc_c_ev.c 	const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
skb               322 net/llc/llc_c_ev.c int llc_conn_ev_rx_rej_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb)
skb               324 net/llc/llc_c_ev.c 	const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
skb               331 net/llc/llc_c_ev.c int llc_conn_ev_rx_rej_rsp_fbit_set_0(struct sock *sk, struct sk_buff *skb)
skb               333 net/llc/llc_c_ev.c 	const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
skb               340 net/llc/llc_c_ev.c int llc_conn_ev_rx_rej_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb)
skb               342 net/llc/llc_c_ev.c 	const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
skb               349 net/llc/llc_c_ev.c int llc_conn_ev_rx_rej_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb)
skb               351 net/llc/llc_c_ev.c 	const struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb);
skb               357 net/llc/llc_c_ev.c int llc_conn_ev_rx_rnr_cmd_pbit_set_0(struct sock *sk, struct sk_buff *skb)
skb               359 net/llc/llc_c_ev.c 	const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
skb               366 net/llc/llc_c_ev.c int llc_conn_ev_rx_rnr_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb)
skb               368 net/llc/llc_c_ev.c 	const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
skb               375 net/llc/llc_c_ev.c int llc_conn_ev_rx_rnr_rsp_fbit_set_0(struct sock *sk, struct sk_buff *skb)
skb               377 net/llc/llc_c_ev.c 	const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
skb               384 net/llc/llc_c_ev.c int llc_conn_ev_rx_rnr_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb)
skb               386 net/llc/llc_c_ev.c 	const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
skb               393 net/llc/llc_c_ev.c int llc_conn_ev_rx_rr_cmd_pbit_set_0(struct sock *sk, struct sk_buff *skb)
skb               395 net/llc/llc_c_ev.c 	const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
skb               402 net/llc/llc_c_ev.c int llc_conn_ev_rx_rr_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb)
skb               404 net/llc/llc_c_ev.c 	const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
skb               411 net/llc/llc_c_ev.c int llc_conn_ev_rx_rr_rsp_fbit_set_0(struct sock *sk, struct sk_buff *skb)
skb               413 net/llc/llc_c_ev.c 	const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
skb               415 net/llc/llc_c_ev.c 	return llc_conn_space(sk, skb) &&
skb               421 net/llc/llc_c_ev.c int llc_conn_ev_rx_rr_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb)
skb               423 net/llc/llc_c_ev.c 	const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
skb               425 net/llc/llc_c_ev.c 	return llc_conn_space(sk, skb) &&
skb               431 net/llc/llc_c_ev.c int llc_conn_ev_rx_sabme_cmd_pbit_set_x(struct sock *sk, struct sk_buff *skb)
skb               433 net/llc/llc_c_ev.c 	const struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb);
skb               439 net/llc/llc_c_ev.c int llc_conn_ev_rx_ua_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb)
skb               441 net/llc/llc_c_ev.c 	struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb);
skb               447 net/llc/llc_c_ev.c int llc_conn_ev_rx_xxx_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb)
skb               450 net/llc/llc_c_ev.c 	const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
skb               462 net/llc/llc_c_ev.c int llc_conn_ev_rx_xxx_cmd_pbit_set_x(struct sock *sk, struct sk_buff *skb)
skb               465 net/llc/llc_c_ev.c 	const struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb);
skb               481 net/llc/llc_c_ev.c int llc_conn_ev_rx_xxx_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb)
skb               484 net/llc/llc_c_ev.c 	const struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb);
skb               503 net/llc/llc_c_ev.c 					       struct sk_buff *skb)
skb               506 net/llc/llc_c_ev.c 	const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
skb               521 net/llc/llc_c_ev.c 					       struct sk_buff *skb)
skb               524 net/llc/llc_c_ev.c 	const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
skb               538 net/llc/llc_c_ev.c int llc_conn_ev_rx_any_frame(struct sock *sk, struct sk_buff *skb)
skb               543 net/llc/llc_c_ev.c int llc_conn_ev_p_tmr_exp(struct sock *sk, struct sk_buff *skb)
skb               545 net/llc/llc_c_ev.c 	const struct llc_conn_state_ev *ev = llc_conn_ev(skb);
skb               550 net/llc/llc_c_ev.c int llc_conn_ev_ack_tmr_exp(struct sock *sk, struct sk_buff *skb)
skb               552 net/llc/llc_c_ev.c 	const struct llc_conn_state_ev *ev = llc_conn_ev(skb);
skb               557 net/llc/llc_c_ev.c int llc_conn_ev_rej_tmr_exp(struct sock *sk, struct sk_buff *skb)
skb               559 net/llc/llc_c_ev.c 	const struct llc_conn_state_ev *ev = llc_conn_ev(skb);
skb               564 net/llc/llc_c_ev.c int llc_conn_ev_busy_tmr_exp(struct sock *sk, struct sk_buff *skb)
skb               566 net/llc/llc_c_ev.c 	const struct llc_conn_state_ev *ev = llc_conn_ev(skb);
skb               571 net/llc/llc_c_ev.c int llc_conn_ev_init_p_f_cycle(struct sock *sk, struct sk_buff *skb)
skb               576 net/llc/llc_c_ev.c int llc_conn_ev_tx_buffer_full(struct sock *sk, struct sk_buff *skb)
skb               578 net/llc/llc_c_ev.c 	const struct llc_conn_state_ev *ev = llc_conn_ev(skb);
skb               590 net/llc/llc_c_ev.c int llc_conn_ev_qlfy_data_flag_eq_1(struct sock *sk, struct sk_buff *skb)
skb               595 net/llc/llc_c_ev.c int llc_conn_ev_qlfy_data_flag_eq_0(struct sock *sk, struct sk_buff *skb)
skb               600 net/llc/llc_c_ev.c int llc_conn_ev_qlfy_data_flag_eq_2(struct sock *sk, struct sk_buff *skb)
skb               605 net/llc/llc_c_ev.c int llc_conn_ev_qlfy_p_flag_eq_1(struct sock *sk, struct sk_buff *skb)
skb               621 net/llc/llc_c_ev.c int llc_conn_ev_qlfy_last_frame_eq_1(struct sock *sk, struct sk_buff *skb)
skb               635 net/llc/llc_c_ev.c int llc_conn_ev_qlfy_last_frame_eq_0(struct sock *sk, struct sk_buff *skb)
skb               640 net/llc/llc_c_ev.c int llc_conn_ev_qlfy_p_flag_eq_0(struct sock *sk, struct sk_buff *skb)
skb               645 net/llc/llc_c_ev.c int llc_conn_ev_qlfy_p_flag_eq_f(struct sock *sk, struct sk_buff *skb)
skb               649 net/llc/llc_c_ev.c 	llc_pdu_decode_pf_bit(skb, &f_bit);
skb               653 net/llc/llc_c_ev.c int llc_conn_ev_qlfy_remote_busy_eq_0(struct sock *sk, struct sk_buff *skb)
skb               658 net/llc/llc_c_ev.c int llc_conn_ev_qlfy_remote_busy_eq_1(struct sock *sk, struct sk_buff *skb)
skb               663 net/llc/llc_c_ev.c int llc_conn_ev_qlfy_retry_cnt_lt_n2(struct sock *sk, struct sk_buff *skb)
skb               668 net/llc/llc_c_ev.c int llc_conn_ev_qlfy_retry_cnt_gte_n2(struct sock *sk, struct sk_buff *skb)
skb               673 net/llc/llc_c_ev.c int llc_conn_ev_qlfy_s_flag_eq_1(struct sock *sk, struct sk_buff *skb)
skb               678 net/llc/llc_c_ev.c int llc_conn_ev_qlfy_s_flag_eq_0(struct sock *sk, struct sk_buff *skb)
skb               683 net/llc/llc_c_ev.c int llc_conn_ev_qlfy_cause_flag_eq_1(struct sock *sk, struct sk_buff *skb)
skb               688 net/llc/llc_c_ev.c int llc_conn_ev_qlfy_cause_flag_eq_0(struct sock *sk, struct sk_buff *skb)
skb               693 net/llc/llc_c_ev.c int llc_conn_ev_qlfy_set_status_conn(struct sock *sk, struct sk_buff *skb)
skb               695 net/llc/llc_c_ev.c 	struct llc_conn_state_ev *ev = llc_conn_ev(skb);
skb               701 net/llc/llc_c_ev.c int llc_conn_ev_qlfy_set_status_disc(struct sock *sk, struct sk_buff *skb)
skb               703 net/llc/llc_c_ev.c 	struct llc_conn_state_ev *ev = llc_conn_ev(skb);
skb               709 net/llc/llc_c_ev.c int llc_conn_ev_qlfy_set_status_failed(struct sock *sk, struct sk_buff *skb)
skb               711 net/llc/llc_c_ev.c 	struct llc_conn_state_ev *ev = llc_conn_ev(skb);
skb               718 net/llc/llc_c_ev.c 					    struct sk_buff *skb)
skb               720 net/llc/llc_c_ev.c 	struct llc_conn_state_ev *ev = llc_conn_ev(skb);
skb               726 net/llc/llc_c_ev.c int llc_conn_ev_qlfy_set_status_refuse(struct sock *sk, struct sk_buff *skb)
skb               728 net/llc/llc_c_ev.c 	struct llc_conn_state_ev *ev = llc_conn_ev(skb);
skb               734 net/llc/llc_c_ev.c int llc_conn_ev_qlfy_set_status_conflict(struct sock *sk, struct sk_buff *skb)
skb               736 net/llc/llc_c_ev.c 	struct llc_conn_state_ev *ev = llc_conn_ev(skb);
skb               742 net/llc/llc_c_ev.c int llc_conn_ev_qlfy_set_status_rst_done(struct sock *sk, struct sk_buff *skb)
skb               744 net/llc/llc_c_ev.c 	struct llc_conn_state_ev *ev = llc_conn_ev(skb);
skb                34 net/llc/llc_conn.c static int llc_conn_service(struct sock *sk, struct sk_buff *skb);
skb                39 net/llc/llc_conn.c 							struct sk_buff *skb);
skb                61 net/llc/llc_conn.c int llc_conn_state_process(struct sock *sk, struct sk_buff *skb)
skb                64 net/llc/llc_conn.c 	struct llc_sock *llc = llc_sk(skb->sk);
skb                65 net/llc/llc_conn.c 	struct llc_conn_state_ev *ev = llc_conn_ev(skb);
skb                71 net/llc/llc_conn.c 	rc = llc_conn_service(skb->sk, skb);
skb                79 net/llc/llc_conn.c 		skb_get(skb);
skb                80 net/llc/llc_conn.c 		llc_save_primitive(sk, skb, LLC_DATA_PRIM);
skb                81 net/llc/llc_conn.c 		if (unlikely(sock_queue_rcv_skb(sk, skb))) {
skb                87 net/llc/llc_conn.c 			kfree_skb(skb);
skb                96 net/llc/llc_conn.c 		skb_get(skb);
skb                97 net/llc/llc_conn.c 		skb_queue_tail(&sk->sk_receive_queue, skb);
skb               173 net/llc/llc_conn.c 	kfree_skb(skb);
skb               177 net/llc/llc_conn.c void llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb)
skb               180 net/llc/llc_conn.c 	skb_queue_tail(&sk->sk_write_queue, skb);
skb               193 net/llc/llc_conn.c void llc_conn_rtn_pdu(struct sock *sk, struct sk_buff *skb)
skb               195 net/llc/llc_conn.c 	struct llc_conn_state_ev *ev = llc_conn_ev(skb);
skb               212 net/llc/llc_conn.c 	struct sk_buff *skb;
skb               227 net/llc/llc_conn.c 	while ((skb = skb_dequeue(&llc->pdu_unack_q)) != NULL) {
skb               228 net/llc/llc_conn.c 		pdu = llc_pdu_sn_hdr(skb);
skb               229 net/llc/llc_conn.c 		llc_pdu_set_cmd_rsp(skb, LLC_PDU_CMD);
skb               230 net/llc/llc_conn.c 		llc_pdu_set_pf_bit(skb, first_p_bit);
skb               231 net/llc/llc_conn.c 		skb_queue_tail(&sk->sk_write_queue, skb);
skb               255 net/llc/llc_conn.c 	struct sk_buff *skb;
skb               267 net/llc/llc_conn.c 	while ((skb = skb_dequeue(&llc->pdu_unack_q)) != NULL) {
skb               268 net/llc/llc_conn.c 		struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
skb               270 net/llc/llc_conn.c 		llc_pdu_set_cmd_rsp(skb, LLC_PDU_RSP);
skb               271 net/llc/llc_conn.c 		llc_pdu_set_pf_bit(skb, first_f_bit);
skb               272 net/llc/llc_conn.c 		skb_queue_tail(&sk->sk_write_queue, skb);
skb               296 net/llc/llc_conn.c 	struct sk_buff *skb;
skb               304 net/llc/llc_conn.c 	skb = skb_peek(&llc->pdu_unack_q);
skb               305 net/llc/llc_conn.c 	pdu = llc_pdu_sn_hdr(skb);
skb               312 net/llc/llc_conn.c 		skb = skb_dequeue(&llc->pdu_unack_q);
skb               313 net/llc/llc_conn.c 		kfree_skb(skb);
skb               329 net/llc/llc_conn.c 	struct sk_buff *skb;
skb               331 net/llc/llc_conn.c 	while ((skb = skb_dequeue(&sk->sk_write_queue)) != NULL) {
skb               332 net/llc/llc_conn.c 		struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
skb               335 net/llc/llc_conn.c 		    !(skb->dev->flags & IFF_LOOPBACK)) {
skb               336 net/llc/llc_conn.c 			struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
skb               338 net/llc/llc_conn.c 			skb_queue_tail(&llc_sk(sk)->pdu_unack_q, skb);
skb               341 net/llc/llc_conn.c 			skb = skb2;
skb               343 net/llc/llc_conn.c 		dev_queue_xmit(skb);
skb               356 net/llc/llc_conn.c static int llc_conn_service(struct sock *sk, struct sk_buff *skb)
skb               365 net/llc/llc_conn.c 	trans = llc_qualify_conn_ev(sk, skb);
skb               367 net/llc/llc_conn.c 		rc = llc_exec_conn_trans_actions(sk, trans, skb);
skb               387 net/llc/llc_conn.c 							struct sk_buff *skb)
skb               391 net/llc/llc_conn.c 	struct llc_conn_state_ev *ev = llc_conn_ev(skb);
skb               402 net/llc/llc_conn.c 		if (!((*next_trans)->ev)(sk, skb)) {
skb               411 net/llc/llc_conn.c 			     !(*next_qualifier)(sk, skb); next_qualifier++)
skb               435 net/llc/llc_conn.c 				       struct sk_buff *skb)
skb               442 net/llc/llc_conn.c 		int rc2 = (*next_action)(sk, skb);
skb               738 net/llc/llc_conn.c static int llc_conn_rcv(struct sock *sk, struct sk_buff *skb)
skb               740 net/llc/llc_conn.c 	struct llc_conn_state_ev *ev = llc_conn_ev(skb);
skb               744 net/llc/llc_conn.c 	return llc_conn_state_process(sk, skb);
skb               769 net/llc/llc_conn.c void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb)
skb               774 net/llc/llc_conn.c 	llc_pdu_decode_sa(skb, saddr.mac);
skb               775 net/llc/llc_conn.c 	llc_pdu_decode_ssap(skb, &saddr.lsap);
skb               776 net/llc/llc_conn.c 	llc_pdu_decode_da(skb, daddr.mac);
skb               777 net/llc/llc_conn.c 	llc_pdu_decode_dsap(skb, &daddr.lsap);
skb               793 net/llc/llc_conn.c 		struct sock *newsk = llc_create_incoming_sock(sk, skb->dev,
skb               797 net/llc/llc_conn.c 		skb_set_owner_r(skb, newsk);
skb               806 net/llc/llc_conn.c 		skb_orphan(skb);
skb               808 net/llc/llc_conn.c 		skb->sk = sk;
skb               809 net/llc/llc_conn.c 		skb->destructor = sock_efree;
skb               812 net/llc/llc_conn.c 		llc_conn_rcv(sk, skb);
skb               815 net/llc/llc_conn.c 		llc_set_backlog_type(skb, LLC_PACKET);
skb               816 net/llc/llc_conn.c 		if (sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf)))
skb               824 net/llc/llc_conn.c 	kfree_skb(skb);
skb               827 net/llc/llc_conn.c 	kfree_skb(skb);
skb               846 net/llc/llc_conn.c static int llc_backlog_rcv(struct sock *sk, struct sk_buff *skb)
skb               851 net/llc/llc_conn.c 	if (likely(llc_backlog_type(skb) == LLC_PACKET)) {
skb               853 net/llc/llc_conn.c 			rc = llc_conn_rcv(sk, skb);
skb               856 net/llc/llc_conn.c 	} else if (llc_backlog_type(skb) == LLC_EVENT) {
skb               859 net/llc/llc_conn.c 			rc = llc_conn_state_process(sk, skb);
skb               869 net/llc/llc_conn.c 	kfree_skb(skb);
skb                92 net/llc/llc_core.c 			     int (*func)(struct sk_buff *skb,
skb                44 net/llc/llc_if.c int llc_build_and_send_pkt(struct sock *sk, struct sk_buff *skb)
skb                58 net/llc/llc_if.c 	ev = llc_conn_ev(skb);
skb                62 net/llc/llc_if.c 	skb->dev      = llc->dev;
skb                63 net/llc/llc_if.c 	return llc_conn_state_process(sk, skb);
skb                66 net/llc/llc_if.c 	kfree_skb(skb);
skb                87 net/llc/llc_if.c 	struct sk_buff *skb;
skb               105 net/llc/llc_if.c 	skb = alloc_skb(0, GFP_ATOMIC);
skb               106 net/llc/llc_if.c 	if (skb) {
skb               107 net/llc/llc_if.c 		struct llc_conn_state_ev *ev = llc_conn_ev(skb);
skb               112 net/llc/llc_if.c 		skb_set_owner_w(skb, sk);
skb               113 net/llc/llc_if.c 		rc = llc_conn_state_process(sk, skb);
skb               133 net/llc/llc_if.c 	struct sk_buff *skb;
skb               144 net/llc/llc_if.c 	skb = alloc_skb(0, GFP_ATOMIC);
skb               145 net/llc/llc_if.c 	if (!skb)
skb               147 net/llc/llc_if.c 	skb_set_owner_w(skb, sk);
skb               149 net/llc/llc_if.c 	ev	      = llc_conn_ev(skb);
skb               153 net/llc/llc_if.c 	rc = llc_conn_state_process(sk, skb);
skb                34 net/llc/llc_input.c static void (*llc_station_handler)(struct sk_buff *skb);
skb                40 net/llc/llc_input.c 				    struct sk_buff *skb);
skb                43 net/llc/llc_input.c 					    struct sk_buff *skb))
skb                57 net/llc/llc_input.c void llc_set_station_handler(void (*handler)(struct sk_buff *skb))
skb                75 net/llc/llc_input.c static __inline__ int llc_pdu_type(struct sk_buff *skb)
skb                78 net/llc/llc_input.c 	struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
skb               111 net/llc/llc_input.c static inline int llc_fixup_skb(struct sk_buff *skb)
skb               116 net/llc/llc_input.c 	if (unlikely(!pskb_may_pull(skb, sizeof(*pdu))))
skb               119 net/llc/llc_input.c 	pdu = (struct llc_pdu_un *)skb->data;
skb               124 net/llc/llc_input.c 	if (unlikely(!pskb_may_pull(skb, llc_len)))
skb               127 net/llc/llc_input.c 	skb->transport_header += llc_len;
skb               128 net/llc/llc_input.c 	skb_pull(skb, llc_len);
skb               129 net/llc/llc_input.c 	if (skb->protocol == htons(ETH_P_802_2)) {
skb               130 net/llc/llc_input.c 		__be16 pdulen = eth_hdr(skb)->h_proto;
skb               134 net/llc/llc_input.c 		    !pskb_may_pull(skb, data_size))
skb               136 net/llc/llc_input.c 		if (unlikely(pskb_trim_rcsum(skb, data_size)))
skb               154 net/llc/llc_input.c int llc_rcv(struct sk_buff *skb, struct net_device *dev,
skb               162 net/llc/llc_input.c 	void (*sta_handler)(struct sk_buff *skb);
skb               163 net/llc/llc_input.c 	void (*sap_handler)(struct llc_sap *sap, struct sk_buff *skb);
skb               172 net/llc/llc_input.c 	if (unlikely(skb->pkt_type == PACKET_OTHERHOST)) {
skb               176 net/llc/llc_input.c 	skb = skb_share_check(skb, GFP_ATOMIC);
skb               177 net/llc/llc_input.c 	if (unlikely(!skb))
skb               179 net/llc/llc_input.c 	if (unlikely(!llc_fixup_skb(skb)))
skb               181 net/llc/llc_input.c 	pdu = llc_pdu_sn_hdr(skb);
skb               195 net/llc/llc_input.c 	dest = llc_pdu_type(skb);
skb               199 net/llc/llc_input.c 			rcv(skb, dev, pt, orig_dev);
skb               201 net/llc/llc_input.c 			kfree_skb(skb);
skb               204 net/llc/llc_input.c 			struct sk_buff *cskb = skb_clone(skb, GFP_ATOMIC);
skb               208 net/llc/llc_input.c 		sap_handler(sap, skb);
skb               214 net/llc/llc_input.c 	kfree_skb(skb);
skb               220 net/llc/llc_input.c 	sta_handler(skb);
skb                25 net/llc/llc_output.c int llc_mac_hdr_init(struct sk_buff *skb,
skb                30 net/llc/llc_output.c 	switch (skb->dev->type) {
skb                33 net/llc/llc_output.c 		rc = dev_hard_header(skb, skb->dev, ETH_P_802_2, da, sa,
skb                34 net/llc/llc_output.c 				     skb->len);
skb                58 net/llc/llc_output.c int llc_build_and_send_ui_pkt(struct llc_sap *sap, struct sk_buff *skb,
skb                62 net/llc/llc_output.c 	llc_pdu_header_init(skb, LLC_PDU_TYPE_U, sap->laddr.lsap,
skb                64 net/llc/llc_output.c 	llc_pdu_init_as_ui_cmd(skb);
skb                65 net/llc/llc_output.c 	rc = llc_mac_hdr_init(skb, skb->dev->dev_addr, dmac);
skb                67 net/llc/llc_output.c 		rc = dev_queue_xmit(skb);
skb                69 net/llc/llc_output.c 		kfree_skb(skb);
skb                18 net/llc/llc_pdu.c static void llc_pdu_decode_pdu_type(struct sk_buff *skb, u8 *type);
skb                21 net/llc/llc_pdu.c void llc_pdu_set_cmd_rsp(struct sk_buff *skb, u8 pdu_type)
skb                23 net/llc/llc_pdu.c 	llc_pdu_un_hdr(skb)->ssap |= pdu_type;
skb                35 net/llc/llc_pdu.c void llc_pdu_set_pf_bit(struct sk_buff *skb, u8 bit_value)
skb                40 net/llc/llc_pdu.c 	llc_pdu_decode_pdu_type(skb, &pdu_type);
skb                41 net/llc/llc_pdu.c 	pdu = llc_pdu_sn_hdr(skb);
skb                63 net/llc/llc_pdu.c void llc_pdu_decode_pf_bit(struct sk_buff *skb, u8 *pf_bit)
skb                68 net/llc/llc_pdu.c 	llc_pdu_decode_pdu_type(skb, &pdu_type);
skb                69 net/llc/llc_pdu.c 	pdu = llc_pdu_sn_hdr(skb);
skb                89 net/llc/llc_pdu.c void llc_pdu_init_as_disc_cmd(struct sk_buff *skb, u8 p_bit)
skb                91 net/llc/llc_pdu.c 	struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb);
skb               107 net/llc/llc_pdu.c void llc_pdu_init_as_i_cmd(struct sk_buff *skb, u8 p_bit, u8 ns, u8 nr)
skb               109 net/llc/llc_pdu.c 	struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
skb               126 net/llc/llc_pdu.c void llc_pdu_init_as_rej_cmd(struct sk_buff *skb, u8 p_bit, u8 nr)
skb               128 net/llc/llc_pdu.c 	struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
skb               146 net/llc/llc_pdu.c void llc_pdu_init_as_rnr_cmd(struct sk_buff *skb, u8 p_bit, u8 nr)
skb               148 net/llc/llc_pdu.c 	struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
skb               166 net/llc/llc_pdu.c void llc_pdu_init_as_rr_cmd(struct sk_buff *skb, u8 p_bit, u8 nr)
skb               168 net/llc/llc_pdu.c 	struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
skb               184 net/llc/llc_pdu.c void llc_pdu_init_as_sabme_cmd(struct sk_buff *skb, u8 p_bit)
skb               186 net/llc/llc_pdu.c 	struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb);
skb               200 net/llc/llc_pdu.c void llc_pdu_init_as_dm_rsp(struct sk_buff *skb, u8 f_bit)
skb               202 net/llc/llc_pdu.c 	struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb);
skb               220 net/llc/llc_pdu.c void llc_pdu_init_as_frmr_rsp(struct sk_buff *skb, struct llc_pdu_sn *prev_pdu,
skb               226 net/llc/llc_pdu.c 	struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
skb               244 net/llc/llc_pdu.c 	skb_put(skb, sizeof(struct llc_frmr_info));
skb               255 net/llc/llc_pdu.c void llc_pdu_init_as_rr_rsp(struct sk_buff *skb, u8 f_bit, u8 nr)
skb               257 net/llc/llc_pdu.c 	struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
skb               275 net/llc/llc_pdu.c void llc_pdu_init_as_rej_rsp(struct sk_buff *skb, u8 f_bit, u8 nr)
skb               277 net/llc/llc_pdu.c 	struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
skb               295 net/llc/llc_pdu.c void llc_pdu_init_as_rnr_rsp(struct sk_buff *skb, u8 f_bit, u8 nr)
skb               297 net/llc/llc_pdu.c 	struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
skb               314 net/llc/llc_pdu.c void llc_pdu_init_as_ua_rsp(struct sk_buff *skb, u8 f_bit)
skb               316 net/llc/llc_pdu.c 	struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb);
skb               330 net/llc/llc_pdu.c static void llc_pdu_decode_pdu_type(struct sk_buff *skb, u8 *type)
skb               332 net/llc/llc_pdu.c 	struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb);
skb                37 net/llc/llc_s_ac.c int llc_sap_action_unitdata_ind(struct llc_sap *sap, struct sk_buff *skb)
skb                39 net/llc/llc_s_ac.c 	llc_sap_rtn_pdu(sap, skb);
skb                52 net/llc/llc_s_ac.c int llc_sap_action_send_ui(struct llc_sap *sap, struct sk_buff *skb)
skb                54 net/llc/llc_s_ac.c 	struct llc_sap_state_ev *ev = llc_sap_ev(skb);
skb                57 net/llc/llc_s_ac.c 	llc_pdu_header_init(skb, LLC_PDU_TYPE_U, ev->saddr.lsap,
skb                59 net/llc/llc_s_ac.c 	llc_pdu_init_as_ui_cmd(skb);
skb                60 net/llc/llc_s_ac.c 	rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac);
skb                62 net/llc/llc_s_ac.c 		skb_get(skb);
skb                63 net/llc/llc_s_ac.c 		rc = dev_queue_xmit(skb);
skb                77 net/llc/llc_s_ac.c int llc_sap_action_send_xid_c(struct llc_sap *sap, struct sk_buff *skb)
skb                79 net/llc/llc_s_ac.c 	struct llc_sap_state_ev *ev = llc_sap_ev(skb);
skb                82 net/llc/llc_s_ac.c 	llc_pdu_header_init(skb, LLC_PDU_TYPE_U, ev->saddr.lsap,
skb                84 net/llc/llc_s_ac.c 	llc_pdu_init_as_xid_cmd(skb, LLC_XID_NULL_CLASS_2, 0);
skb                85 net/llc/llc_s_ac.c 	rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac);
skb                87 net/llc/llc_s_ac.c 		skb_get(skb);
skb                88 net/llc/llc_s_ac.c 		rc = dev_queue_xmit(skb);
skb               101 net/llc/llc_s_ac.c int llc_sap_action_send_xid_r(struct llc_sap *sap, struct sk_buff *skb)
skb               107 net/llc/llc_s_ac.c 	llc_pdu_decode_sa(skb, mac_da);
skb               108 net/llc/llc_s_ac.c 	llc_pdu_decode_da(skb, mac_sa);
skb               109 net/llc/llc_s_ac.c 	llc_pdu_decode_ssap(skb, &dsap);
skb               110 net/llc/llc_s_ac.c 	nskb = llc_alloc_frame(NULL, skb->dev, LLC_PDU_TYPE_U,
skb               133 net/llc/llc_s_ac.c int llc_sap_action_send_test_c(struct llc_sap *sap, struct sk_buff *skb)
skb               135 net/llc/llc_s_ac.c 	struct llc_sap_state_ev *ev = llc_sap_ev(skb);
skb               138 net/llc/llc_s_ac.c 	llc_pdu_header_init(skb, LLC_PDU_TYPE_U, ev->saddr.lsap,
skb               140 net/llc/llc_s_ac.c 	llc_pdu_init_as_test_cmd(skb);
skb               141 net/llc/llc_s_ac.c 	rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac);
skb               143 net/llc/llc_s_ac.c 		skb_get(skb);
skb               144 net/llc/llc_s_ac.c 		rc = dev_queue_xmit(skb);
skb               149 net/llc/llc_s_ac.c int llc_sap_action_send_test_r(struct llc_sap *sap, struct sk_buff *skb)
skb               156 net/llc/llc_s_ac.c 	llc_pdu_decode_sa(skb, mac_da);
skb               157 net/llc/llc_s_ac.c 	llc_pdu_decode_da(skb, mac_sa);
skb               158 net/llc/llc_s_ac.c 	llc_pdu_decode_ssap(skb, &dsap);
skb               161 net/llc/llc_s_ac.c 	data_size = ntohs(eth_hdr(skb)->h_proto) - 3;
skb               162 net/llc/llc_s_ac.c 	nskb = llc_alloc_frame(NULL, skb->dev, LLC_PDU_TYPE_U, data_size);
skb               167 net/llc/llc_s_ac.c 	llc_pdu_init_as_test_rsp(nskb, skb);
skb               183 net/llc/llc_s_ac.c int llc_sap_action_report_status(struct llc_sap *sap, struct sk_buff *skb)
skb               196 net/llc/llc_s_ac.c int llc_sap_action_xid_ind(struct llc_sap *sap, struct sk_buff *skb)
skb               198 net/llc/llc_s_ac.c 	llc_sap_rtn_pdu(sap, skb);
skb               210 net/llc/llc_s_ac.c int llc_sap_action_test_ind(struct llc_sap *sap, struct sk_buff *skb)
skb               212 net/llc/llc_s_ac.c 	llc_sap_rtn_pdu(sap, skb);
skb                23 net/llc/llc_s_ev.c int llc_sap_ev_activation_req(struct llc_sap *sap, struct sk_buff *skb)
skb                25 net/llc/llc_s_ev.c 	struct llc_sap_state_ev *ev = llc_sap_ev(skb);
skb                31 net/llc/llc_s_ev.c int llc_sap_ev_rx_ui(struct llc_sap *sap, struct sk_buff *skb)
skb                33 net/llc/llc_s_ev.c 	struct llc_sap_state_ev *ev = llc_sap_ev(skb);
skb                34 net/llc/llc_s_ev.c 	struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb);
skb                41 net/llc/llc_s_ev.c int llc_sap_ev_unitdata_req(struct llc_sap *sap, struct sk_buff *skb)
skb                43 net/llc/llc_s_ev.c 	struct llc_sap_state_ev *ev = llc_sap_ev(skb);
skb                51 net/llc/llc_s_ev.c int llc_sap_ev_xid_req(struct llc_sap *sap, struct sk_buff *skb)
skb                53 net/llc/llc_s_ev.c 	struct llc_sap_state_ev *ev = llc_sap_ev(skb);
skb                60 net/llc/llc_s_ev.c int llc_sap_ev_rx_xid_c(struct llc_sap *sap, struct sk_buff *skb)
skb                62 net/llc/llc_s_ev.c 	struct llc_sap_state_ev *ev = llc_sap_ev(skb);
skb                63 net/llc/llc_s_ev.c 	struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb);
skb                70 net/llc/llc_s_ev.c int llc_sap_ev_rx_xid_r(struct llc_sap *sap, struct sk_buff *skb)
skb                72 net/llc/llc_s_ev.c 	struct llc_sap_state_ev *ev = llc_sap_ev(skb);
skb                73 net/llc/llc_s_ev.c 	struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb);
skb                80 net/llc/llc_s_ev.c int llc_sap_ev_test_req(struct llc_sap *sap, struct sk_buff *skb)
skb                82 net/llc/llc_s_ev.c 	struct llc_sap_state_ev *ev = llc_sap_ev(skb);
skb                89 net/llc/llc_s_ev.c int llc_sap_ev_rx_test_c(struct llc_sap *sap, struct sk_buff *skb)
skb                91 net/llc/llc_s_ev.c 	struct llc_sap_state_ev *ev = llc_sap_ev(skb);
skb                92 net/llc/llc_s_ev.c 	struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb);
skb                99 net/llc/llc_s_ev.c int llc_sap_ev_rx_test_r(struct llc_sap *sap, struct sk_buff *skb)
skb               101 net/llc/llc_s_ev.c 	struct llc_sap_state_ev *ev = llc_sap_ev(skb);
skb               102 net/llc/llc_s_ev.c 	struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb);
skb               109 net/llc/llc_s_ev.c int llc_sap_ev_deactivation_req(struct llc_sap *sap, struct sk_buff *skb)
skb               111 net/llc/llc_s_ev.c 	struct llc_sap_state_ev *ev = llc_sap_ev(skb);
skb                51 net/llc/llc_sap.c 	struct sk_buff *skb;
skb                54 net/llc/llc_sap.c 	skb = alloc_skb(hlen + data_size, GFP_ATOMIC);
skb                56 net/llc/llc_sap.c 	if (skb) {
skb                57 net/llc/llc_sap.c 		skb_reset_mac_header(skb);
skb                58 net/llc/llc_sap.c 		skb_reserve(skb, hlen);
skb                59 net/llc/llc_sap.c 		skb_reset_network_header(skb);
skb                60 net/llc/llc_sap.c 		skb_reset_transport_header(skb);
skb                61 net/llc/llc_sap.c 		skb->protocol = htons(ETH_P_802_2);
skb                62 net/llc/llc_sap.c 		skb->dev      = dev;
skb                64 net/llc/llc_sap.c 			skb_set_owner_w(skb, sk);
skb                66 net/llc/llc_sap.c 	return skb;
skb                69 net/llc/llc_sap.c void llc_save_primitive(struct sock *sk, struct sk_buff *skb, u8 prim)
skb                74 net/llc/llc_sap.c 	addr		  = llc_ui_skb_cb(skb);
skb                78 net/llc/llc_sap.c 	addr->sllc_arphrd = skb->dev->type;
skb                82 net/llc/llc_sap.c 	llc_pdu_decode_sa(skb, addr->sllc_mac);
skb                83 net/llc/llc_sap.c 	llc_pdu_decode_ssap(skb, &addr->sllc_sap);
skb                91 net/llc/llc_sap.c void llc_sap_rtn_pdu(struct llc_sap *sap, struct sk_buff *skb)
skb                93 net/llc/llc_sap.c 	struct llc_sap_state_ev *ev = llc_sap_ev(skb);
skb                94 net/llc/llc_sap.c 	struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb);
skb               117 net/llc/llc_sap.c 						      struct sk_buff *skb)
skb               128 net/llc/llc_sap.c 		if (!next_trans[i]->ev(sap, skb)) {
skb               146 net/llc/llc_sap.c 				      struct sk_buff *skb)
skb               152 net/llc/llc_sap.c 		if ((*next_action)(sap, skb))
skb               166 net/llc/llc_sap.c static int llc_sap_next_state(struct llc_sap *sap, struct sk_buff *skb)
skb               173 net/llc/llc_sap.c 	trans = llc_find_sap_trans(sap, skb);
skb               181 net/llc/llc_sap.c 	rc = llc_exec_sap_trans_actions(sap, trans, skb);
skb               203 net/llc/llc_sap.c static void llc_sap_state_process(struct llc_sap *sap, struct sk_buff *skb)
skb               205 net/llc/llc_sap.c 	struct llc_sap_state_ev *ev = llc_sap_ev(skb);
skb               208 net/llc/llc_sap.c 	llc_sap_next_state(sap, skb);
skb               210 net/llc/llc_sap.c 	if (ev->ind_cfm_flag == LLC_IND && skb->sk->sk_state != TCP_LISTEN) {
skb               211 net/llc/llc_sap.c 		llc_save_primitive(skb->sk, skb, ev->prim);
skb               214 net/llc/llc_sap.c 		if (sock_queue_rcv_skb(skb->sk, skb) == 0)
skb               217 net/llc/llc_sap.c 	kfree_skb(skb);
skb               231 net/llc/llc_sap.c 				 struct sk_buff *skb, u8 *dmac, u8 dsap)
skb               233 net/llc/llc_sap.c 	struct llc_sap_state_ev *ev = llc_sap_ev(skb);
skb               237 net/llc/llc_sap.c 	memcpy(ev->saddr.mac, skb->dev->dev_addr, IFHWADDRLEN);
skb               243 net/llc/llc_sap.c 	llc_sap_state_process(sap, skb);
skb               256 net/llc/llc_sap.c void llc_build_and_send_xid_pkt(struct llc_sap *sap, struct sk_buff *skb,
skb               259 net/llc/llc_sap.c 	struct llc_sap_state_ev *ev = llc_sap_ev(skb);
skb               263 net/llc/llc_sap.c 	memcpy(ev->saddr.mac, skb->dev->dev_addr, IFHWADDRLEN);
skb               269 net/llc/llc_sap.c 	llc_sap_state_process(sap, skb);
skb               279 net/llc/llc_sap.c static void llc_sap_rcv(struct llc_sap *sap, struct sk_buff *skb,
skb               282 net/llc/llc_sap.c 	struct llc_sap_state_ev *ev = llc_sap_ev(skb);
skb               286 net/llc/llc_sap.c 	skb_orphan(skb);
skb               288 net/llc/llc_sap.c 	skb->sk = sk;
skb               289 net/llc/llc_sap.c 	skb->destructor = sock_efree;
skb               290 net/llc/llc_sap.c 	llc_sap_state_process(sap, skb);
skb               350 net/llc/llc_sap.c 				   const struct sk_buff *skb,
skb               357 net/llc/llc_sap.c 	  llc->dev == skb->dev;
skb               360 net/llc/llc_sap.c static void llc_do_mcast(struct llc_sap *sap, struct sk_buff *skb,
skb               367 net/llc/llc_sap.c 		skb1 = skb_clone(skb, GFP_ATOMIC);
skb               388 net/llc/llc_sap.c 			  struct sk_buff *skb)
skb               394 net/llc/llc_sap.c 	struct hlist_head *dev_hb = llc_sk_dev_hash(sap, skb->dev->ifindex);
skb               401 net/llc/llc_sap.c 		if (!llc_mcast_match(sap, laddr, skb, sk))
skb               408 net/llc/llc_sap.c 			llc_do_mcast(sap, skb, stack, i);
skb               414 net/llc/llc_sap.c 	llc_do_mcast(sap, skb, stack, i);
skb               418 net/llc/llc_sap.c void llc_sap_handler(struct llc_sap *sap, struct sk_buff *skb)
skb               422 net/llc/llc_sap.c 	llc_pdu_decode_da(skb, laddr.mac);
skb               423 net/llc/llc_sap.c 	llc_pdu_decode_dsap(skb, &laddr.lsap);
skb               426 net/llc/llc_sap.c 		llc_sap_mcast(sap, &laddr, skb);
skb               427 net/llc/llc_sap.c 		kfree_skb(skb);
skb               431 net/llc/llc_sap.c 			llc_sap_rcv(sap, skb, sk);
skb               434 net/llc/llc_sap.c 			kfree_skb(skb);
skb                28 net/llc/llc_station.c static int llc_stat_ev_rx_null_dsap_xid_c(struct sk_buff *skb)
skb                30 net/llc/llc_station.c 	struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb);
skb                38 net/llc/llc_station.c static int llc_stat_ev_rx_null_dsap_test_c(struct sk_buff *skb)
skb                40 net/llc/llc_station.c 	struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb);
skb                48 net/llc/llc_station.c static int llc_station_ac_send_xid_r(struct sk_buff *skb)
skb                52 net/llc/llc_station.c 	struct sk_buff *nskb = llc_alloc_frame(NULL, skb->dev, LLC_PDU_TYPE_U,
skb                58 net/llc/llc_station.c 	llc_pdu_decode_sa(skb, mac_da);
skb                59 net/llc/llc_station.c 	llc_pdu_decode_ssap(skb, &dsap);
skb                62 net/llc/llc_station.c 	rc = llc_mac_hdr_init(nskb, skb->dev->dev_addr, mac_da);
skb                73 net/llc/llc_station.c static int llc_station_ac_send_test_r(struct sk_buff *skb)
skb                81 net/llc/llc_station.c 	data_size = ntohs(eth_hdr(skb)->h_proto) - 3;
skb                82 net/llc/llc_station.c 	nskb = llc_alloc_frame(NULL, skb->dev, LLC_PDU_TYPE_U, data_size);
skb                87 net/llc/llc_station.c 	llc_pdu_decode_sa(skb, mac_da);
skb                88 net/llc/llc_station.c 	llc_pdu_decode_ssap(skb, &dsap);
skb                90 net/llc/llc_station.c 	llc_pdu_init_as_test_rsp(nskb, skb);
skb                91 net/llc/llc_station.c 	rc = llc_mac_hdr_init(nskb, skb->dev->dev_addr, mac_da);
skb               108 net/llc/llc_station.c static void llc_station_rcv(struct sk_buff *skb)
skb               110 net/llc/llc_station.c 	if (llc_stat_ev_rx_null_dsap_xid_c(skb))
skb               111 net/llc/llc_station.c 		llc_station_ac_send_xid_r(skb);
skb               112 net/llc/llc_station.c 	else if (llc_stat_ev_rx_null_dsap_test_c(skb))
skb               113 net/llc/llc_station.c 		llc_station_ac_send_test_r(skb);
skb               114 net/llc/llc_station.c 	kfree_skb(skb);
skb               182 net/mac80211/agg-rx.c 				   struct sk_buff *skb,
skb               198 net/mac80211/agg-rx.c 	pos = skb_put_zero(skb, 2 + sizeof(struct ieee80211_addba_ext_ie));
skb               221 net/mac80211/agg-rx.c 	struct sk_buff *skb;
skb               226 net/mac80211/agg-rx.c 	skb = dev_alloc_skb(sizeof(*mgmt) +
skb               229 net/mac80211/agg-rx.c 	if (!skb)
skb               232 net/mac80211/agg-rx.c 	skb_reserve(skb, local->hw.extra_tx_headroom);
skb               233 net/mac80211/agg-rx.c 	mgmt = skb_put_zero(skb, 24);
skb               248 net/mac80211/agg-rx.c 	skb_put(skb, 1 + sizeof(mgmt->u.action.u.addba_resp));
skb               263 net/mac80211/agg-rx.c 		ieee80211_add_addbaext(sdata, skb, addbaext);
skb               265 net/mac80211/agg-rx.c 	ieee80211_tx_skb(sdata, skb);
skb                67 net/mac80211/agg-tx.c 	struct sk_buff *skb;
skb                71 net/mac80211/agg-tx.c 	skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom);
skb                73 net/mac80211/agg-tx.c 	if (!skb)
skb                76 net/mac80211/agg-tx.c 	skb_reserve(skb, local->hw.extra_tx_headroom);
skb                77 net/mac80211/agg-tx.c 	mgmt = skb_put_zero(skb, 24);
skb                92 net/mac80211/agg-tx.c 	skb_put(skb, 1 + sizeof(mgmt->u.action.u.addba_req));
skb               109 net/mac80211/agg-tx.c 	ieee80211_tx_skb(sdata, skb);
skb               116 net/mac80211/agg-tx.c 	struct sk_buff *skb;
skb               120 net/mac80211/agg-tx.c 	skb = dev_alloc_skb(sizeof(*bar) + local->hw.extra_tx_headroom);
skb               121 net/mac80211/agg-tx.c 	if (!skb)
skb               124 net/mac80211/agg-tx.c 	skb_reserve(skb, local->hw.extra_tx_headroom);
skb               125 net/mac80211/agg-tx.c 	bar = skb_put_zero(skb, sizeof(*bar));
skb               136 net/mac80211/agg-tx.c 	IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT |
skb               138 net/mac80211/agg-tx.c 	ieee80211_tx_skb_tid(sdata, skb, tid);
skb              2626 net/mac80211/cfg.c 				   struct sk_buff *skb,
skb              2635 net/mac80211/cfg.c 	return local->ops->testmode_dump(&local->hw, skb, cb, data, len);
skb              3440 net/mac80211/cfg.c int ieee80211_attach_ack_skb(struct ieee80211_local *local, struct sk_buff *skb,
skb              3447 net/mac80211/cfg.c 	ack_skb = skb_copy(skb, gfp);
skb              3461 net/mac80211/cfg.c 	IEEE80211_SKB_CB(skb)->ack_frame_id = id;
skb              3544 net/mac80211/cfg.c 	struct sk_buff *skb;
skb              3583 net/mac80211/cfg.c 	skb = dev_alloc_skb(local->hw.extra_tx_headroom + size);
skb              3584 net/mac80211/cfg.c 	if (!skb) {
skb              3589 net/mac80211/cfg.c 	skb->dev = dev;
skb              3591 net/mac80211/cfg.c 	skb_reserve(skb, local->hw.extra_tx_headroom);
skb              3593 net/mac80211/cfg.c 	nullfunc = skb_put(skb, size);
skb              3601 net/mac80211/cfg.c 	info = IEEE80211_SKB_CB(skb);
skb              3607 net/mac80211/cfg.c 	skb_set_queue_mapping(skb, IEEE80211_AC_VO);
skb              3608 net/mac80211/cfg.c 	skb->priority = 7;
skb              3612 net/mac80211/cfg.c 	ret = ieee80211_attach_ack_skb(local, skb, cookie, GFP_ATOMIC);
skb              3614 net/mac80211/cfg.c 		kfree_skb(skb);
skb              3619 net/mac80211/cfg.c 	ieee80211_xmit(sdata, sta, skb, 0);
skb               315 net/mac80211/debugfs_netdev.c 	struct sk_buff *skb;
skb               325 net/mac80211/debugfs_netdev.c 	skb = dev_alloc_skb(local->hw.extra_tx_headroom + 24 + 100);
skb               326 net/mac80211/debugfs_netdev.c 	if (!skb)
skb               328 net/mac80211/debugfs_netdev.c 	skb_reserve(skb, local->hw.extra_tx_headroom);
skb               330 net/mac80211/debugfs_netdev.c 	hdr = skb_put_zero(skb, 24);
skb               347 net/mac80211/debugfs_netdev.c 			dev_kfree_skb(skb);
skb               356 net/mac80211/debugfs_netdev.c 		dev_kfree_skb(skb);
skb               366 net/mac80211/debugfs_netdev.c 	skb_put_zero(skb, 50);
skb               368 net/mac80211/debugfs_netdev.c 	IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_TKIP_MIC_FAILURE;
skb               370 net/mac80211/debugfs_netdev.c 	ieee80211_tx_skb(sdata, skb);
skb                34 net/mac80211/driver-ops.h 			  struct sk_buff *skb)
skb                36 net/mac80211/driver-ops.h 	local->ops->tx(&local->hw, control, skb);
skb              1224 net/mac80211/driver-ops.h 					     struct sk_buff *skb)
skb              1229 net/mac80211/driver-ops.h 	return local->ops->can_aggregate_in_amsdu(&local->hw, head, skb);
skb               217 net/mac80211/fils_aead.c int fils_encrypt_assoc_req(struct sk_buff *skb,
skb               220 net/mac80211/fils_aead.c 	struct ieee80211_mgmt *mgmt = (void *)skb->data;
skb               235 net/mac80211/fils_aead.c 				       ies, skb->data + skb->len - ies);
skb               261 net/mac80211/fils_aead.c 	crypt_len = skb->data + skb->len - encr;
skb               262 net/mac80211/fils_aead.c 	skb_put(skb, AES_BLOCK_SIZE);
skb                10 net/mac80211/fils_aead.h int fils_encrypt_assoc_req(struct sk_buff *skb,
skb               413 net/mac80211/ht.c 	struct sk_buff *skb;
skb               417 net/mac80211/ht.c 	skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom);
skb               418 net/mac80211/ht.c 	if (!skb)
skb               421 net/mac80211/ht.c 	skb_reserve(skb, local->hw.extra_tx_headroom);
skb               422 net/mac80211/ht.c 	mgmt = skb_put_zero(skb, 24);
skb               437 net/mac80211/ht.c 	skb_put(skb, 1 + sizeof(mgmt->u.action.u.delba));
skb               447 net/mac80211/ht.c 	ieee80211_tx_skb(sdata, skb);
skb               493 net/mac80211/ht.c 	struct sk_buff *skb;
skb               497 net/mac80211/ht.c 	skb = dev_alloc_skb(27 + local->hw.extra_tx_headroom);
skb               498 net/mac80211/ht.c 	if (!skb)
skb               501 net/mac80211/ht.c 	skb_reserve(skb, local->hw.extra_tx_headroom);
skb               502 net/mac80211/ht.c 	action_frame = skb_put(skb, 27);
skb               530 net/mac80211/ht.c 	IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
skb               531 net/mac80211/ht.c 	ieee80211_tx_skb(sdata, skb);
skb              1532 net/mac80211/ibss.c 	struct sk_buff *skb;
skb              1574 net/mac80211/ibss.c 	skb = dev_alloc_skb(local->tx_headroom + presp->head_len);
skb              1575 net/mac80211/ibss.c 	if (!skb)
skb              1578 net/mac80211/ibss.c 	skb_reserve(skb, local->tx_headroom);
skb              1579 net/mac80211/ibss.c 	skb_put_data(skb, presp->head, presp->head_len);
skb              1581 net/mac80211/ibss.c 	memcpy(((struct ieee80211_mgmt *) skb->data)->da, mgmt->sa, ETH_ALEN);
skb              1583 net/mac80211/ibss.c 	IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
skb              1587 net/mac80211/ibss.c 		IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_NO_ACK;
skb              1589 net/mac80211/ibss.c 	ieee80211_tx_skb(sdata, skb);
skb              1618 net/mac80211/ibss.c 				   struct sk_buff *skb)
skb              1626 net/mac80211/ibss.c 	rx_status = IEEE80211_SKB_RXCB(skb);
skb              1627 net/mac80211/ibss.c 	mgmt = (struct ieee80211_mgmt *) skb->data;
skb              1637 net/mac80211/ibss.c 		ieee80211_rx_mgmt_probe_req(sdata, skb);
skb              1641 net/mac80211/ibss.c 		ieee80211_rx_mgmt_probe_beacon(sdata, mgmt, skb->len,
skb              1645 net/mac80211/ibss.c 		ieee80211_rx_mgmt_auth_ibss(sdata, mgmt, skb->len);
skb              1648 net/mac80211/ibss.c 		ieee80211_rx_mgmt_deauth_ibss(sdata, mgmt, skb->len);
skb              1653 net/mac80211/ibss.c 			ies_len = skb->len -
skb              1667 net/mac80211/ibss.c 			ieee80211_rx_mgmt_spectrum_mgmt(sdata, mgmt, skb->len,
skb               170 net/mac80211/ieee80211_i.h 	struct sk_buff *skb;
skb               220 net/mac80211/ieee80211_i.h 	struct sk_buff *skb;
skb              1602 net/mac80211/ieee80211_i.h int ieee80211_attach_ack_skb(struct ieee80211_local *local, struct sk_buff *skb,
skb              1627 net/mac80211/ieee80211_i.h 				  struct sk_buff *skb);
skb              1647 net/mac80211/ieee80211_i.h 				   struct sk_buff *skb);
skb              1665 net/mac80211/ieee80211_i.h 				   struct sk_buff *skb);
skb              1681 net/mac80211/ieee80211_i.h void ieee80211_scan_rx(struct ieee80211_local *local, struct sk_buff *skb);
skb              1757 net/mac80211/ieee80211_i.h netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
skb              1759 net/mac80211/ieee80211_i.h netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
skb              1761 net/mac80211/ieee80211_i.h void __ieee80211_subif_start_xmit(struct sk_buff *skb,
skb              1769 net/mac80211/ieee80211_i.h 			      struct sk_buff *skb, u32 info_flags);
skb              1770 net/mac80211/ieee80211_i.h void ieee80211_tx_monitor(struct ieee80211_local *local, struct sk_buff *skb,
skb              1940 net/mac80211/ieee80211_i.h 		    struct sta_info *sta, struct sk_buff *skb,
skb              1944 net/mac80211/ieee80211_i.h 				 struct sk_buff *skb, int tid,
skb              1949 net/mac80211/ieee80211_i.h 			  struct sk_buff *skb, int tid,
skb              1953 net/mac80211/ieee80211_i.h 	__ieee80211_tx_skb_tid_band(sdata, skb, tid, band, txdata_flags);
skb              1958 net/mac80211/ieee80211_i.h 					struct sk_buff *skb, int tid)
skb              1966 net/mac80211/ieee80211_i.h 		kfree_skb(skb);
skb              1970 net/mac80211/ieee80211_i.h 	__ieee80211_tx_skb_tid_band(sdata, skb, tid,
skb              1976 net/mac80211/ieee80211_i.h 				    struct sk_buff *skb)
skb              1979 net/mac80211/ieee80211_i.h 	ieee80211_tx_skb_tid(sdata, skb, 7);
skb              2037 net/mac80211/ieee80211_i.h 			       struct sk_buff *skb);
skb              2157 net/mac80211/ieee80211_i.h 			    struct sk_buff *skb, bool need_basic,
skb              2160 net/mac80211/ieee80211_i.h 				struct sk_buff *skb, bool need_basic,
skb               798 net/mac80211/iface.c 	struct sk_buff *skb, *tmp;
skb               925 net/mac80211/iface.c 		skb_queue_walk_safe(&ps->bc_buf, skb, tmp) {
skb               926 net/mac80211/iface.c 			if (skb->dev == sdata->dev) {
skb               927 net/mac80211/iface.c 				__skb_unlink(skb, &ps->bc_buf);
skb               929 net/mac80211/iface.c 				ieee80211_free_txskb(&local->hw, skb);
skb               998 net/mac80211/iface.c 		skb_queue_walk_safe(&local->pending[i], skb, tmp) {
skb               999 net/mac80211/iface.c 			struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb              1001 net/mac80211/iface.c 				__skb_unlink(skb, &local->pending[i]);
skb              1002 net/mac80211/iface.c 				ieee80211_free_txskb(&local->hw, skb);
skb              1132 net/mac80211/iface.c 					 struct sk_buff *skb,
skb              1135 net/mac80211/iface.c 	return ieee80211_select_queue(IEEE80211_DEV_TO_SUB_IF(dev), skb);
skb              1177 net/mac80211/iface.c 					  struct sk_buff *skb,
skb              1183 net/mac80211/iface.c 	struct ieee80211_radiotap_header *rtap = (void *)skb->data;
skb              1188 net/mac80211/iface.c 	if (skb->len < 4 ||
skb              1189 net/mac80211/iface.c 	    skb->len < le16_to_cpu(rtap->it_len) + 2 /* frame control */)
skb              1192 net/mac80211/iface.c 	hdr = (void *)((u8 *)skb->data + le16_to_cpu(rtap->it_len));
skb              1194 net/mac80211/iface.c 	return ieee80211_select_queue_80211(sdata, skb, hdr);
skb              1233 net/mac80211/iface.c 	struct sk_buff *skb;
skb              1246 net/mac80211/iface.c 	while ((skb = skb_dequeue(&sdata->skb_queue))) {
skb              1247 net/mac80211/iface.c 		struct ieee80211_mgmt *mgmt = (void *)skb->data;
skb              1251 net/mac80211/iface.c 			int len = skb->len;
skb              1283 net/mac80211/iface.c 				status = IEEE80211_SKB_RXCB(skb);
skb              1337 net/mac80211/iface.c 			ieee80211_sta_rx_queued_mgmt(sdata, skb);
skb              1340 net/mac80211/iface.c 			ieee80211_ibss_rx_queued_mgmt(sdata, skb);
skb              1345 net/mac80211/iface.c 			ieee80211_mesh_rx_queued_mgmt(sdata, skb);
skb              1352 net/mac80211/iface.c 		kfree_skb(skb);
skb               219 net/mac80211/main.c 	struct sk_buff *skb;
skb               221 net/mac80211/main.c 	while ((skb = skb_dequeue(&local->skb_queue)) ||
skb               222 net/mac80211/main.c 	       (skb = skb_dequeue(&local->skb_queue_unreliable))) {
skb               223 net/mac80211/main.c 		switch (skb->pkt_type) {
skb               227 net/mac80211/main.c 			skb->pkt_type = 0;
skb               228 net/mac80211/main.c 			ieee80211_rx(&local->hw, skb);
skb               231 net/mac80211/main.c 			skb->pkt_type = 0;
skb               232 net/mac80211/main.c 			ieee80211_tx_status(&local->hw, skb);
skb               236 net/mac80211/main.c 			     skb->pkt_type);
skb               237 net/mac80211/main.c 			dev_kfree_skb(skb);
skb              1398 net/mac80211/main.c 	struct sk_buff *skb;
skb              1401 net/mac80211/main.c 	BUILD_BUG_ON(sizeof(struct ieee80211_tx_info) > sizeof(skb->cb));
skb              1403 net/mac80211/main.c 		     IEEE80211_TX_INFO_DRIVER_DATA_SIZE > sizeof(skb->cb));
skb               249 net/mac80211/mesh.c 			 struct sk_buff *skb)
skb               258 net/mac80211/mesh.c 	if (skb_tailroom(skb) < 2 + meshconf_len)
skb               261 net/mac80211/mesh.c 	pos = skb_put(skb, 2 + meshconf_len);
skb               266 net/mac80211/mesh.c 	ifmsh->meshconf_offset = pos - skb->data;
skb               294 net/mac80211/mesh.c int mesh_add_meshid_ie(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
skb               299 net/mac80211/mesh.c 	if (skb_tailroom(skb) < 2 + ifmsh->mesh_id_len)
skb               302 net/mac80211/mesh.c 	pos = skb_put(skb, 2 + ifmsh->mesh_id_len);
skb               312 net/mac80211/mesh.c 				    struct sk_buff *skb)
skb               323 net/mac80211/mesh.c 	if (skb_tailroom(skb) < 4)
skb               326 net/mac80211/mesh.c 	pos = skb_put(skb, 2 + 2);
skb               335 net/mac80211/mesh.c 			struct sk_buff *skb)
skb               350 net/mac80211/mesh.c 		if (skb_tailroom(skb) < len)
skb               352 net/mac80211/mesh.c 		skb_put_data(skb, data, len);
skb               358 net/mac80211/mesh.c int mesh_add_rsn_ie(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
skb               374 net/mac80211/mesh.c 	if (skb_tailroom(skb) < len)
skb               376 net/mac80211/mesh.c 	skb_put_data(skb, data, len);
skb               382 net/mac80211/mesh.c 				 struct sk_buff *skb)
skb               388 net/mac80211/mesh.c 	if (skb_tailroom(skb) < 3)
skb               400 net/mac80211/mesh.c 	pos = skb_put(skb, 2 + 1);
skb               409 net/mac80211/mesh.c 		       struct sk_buff *skb)
skb               424 net/mac80211/mesh.c 	if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_ht_cap))
skb               427 net/mac80211/mesh.c 	pos = skb_put(skb, 2 + sizeof(struct ieee80211_ht_cap));
skb               434 net/mac80211/mesh.c 			struct sk_buff *skb)
skb               461 net/mac80211/mesh.c 	if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_ht_operation))
skb               464 net/mac80211/mesh.c 	pos = skb_put(skb, 2 + sizeof(struct ieee80211_ht_operation));
skb               473 net/mac80211/mesh.c 			struct sk_buff *skb)
skb               488 net/mac80211/mesh.c 	if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_vht_cap))
skb               491 net/mac80211/mesh.c 	pos = skb_put(skb, 2 + sizeof(struct ieee80211_vht_cap));
skb               498 net/mac80211/mesh.c 			 struct sk_buff *skb)
skb               525 net/mac80211/mesh.c 	if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_vht_operation))
skb               528 net/mac80211/mesh.c 	pos = skb_put(skb, 2 + sizeof(struct ieee80211_vht_operation));
skb               536 net/mac80211/mesh.c 		       struct sk_buff *skb, u8 ie_len)
skb               554 net/mac80211/mesh.c 	if (skb_tailroom(skb) < ie_len)
skb               557 net/mac80211/mesh.c 	pos = skb_put(skb, ie_len);
skb               564 net/mac80211/mesh.c 			struct sk_buff *skb)
skb               581 net/mac80211/mesh.c 	if (skb_tailroom(skb) < 2 + 1 + sizeof(struct ieee80211_he_operation))
skb               584 net/mac80211/mesh.c 	pos = skb_put(skb, 2 + 1 + sizeof(struct ieee80211_he_operation));
skb               730 net/mac80211/mesh.c 	struct sk_buff *skb;
skb               773 net/mac80211/mesh.c 	skb = dev_alloc_skb(max(head_len, tail_len));
skb               775 net/mac80211/mesh.c 	if (!bcn || !skb)
skb               785 net/mac80211/mesh.c 	mgmt = skb_put_zero(skb, hdr_len);
skb               797 net/mac80211/mesh.c 	pos = skb_put(skb, 2);
skb               809 net/mac80211/mesh.c 		pos = skb_put_zero(skb, ie_len);
skb               836 net/mac80211/mesh.c 			pos = skb_put_zero(skb, ie_len);
skb               852 net/mac80211/mesh.c 			pos = skb_put_zero(skb, ie_len);
skb               866 net/mac80211/mesh.c 	if (ieee80211_add_srates_ie(sdata, skb, true, band) ||
skb               867 net/mac80211/mesh.c 	    mesh_add_ds_params_ie(sdata, skb))
skb               870 net/mac80211/mesh.c 	bcn->head_len = skb->len;
skb               871 net/mac80211/mesh.c 	memcpy(bcn->head, skb->data, bcn->head_len);
skb               874 net/mac80211/mesh.c 	skb_trim(skb, 0);
skb               877 net/mac80211/mesh.c 	if (ieee80211_add_ext_srates_ie(sdata, skb, true, band) ||
skb               878 net/mac80211/mesh.c 	    mesh_add_rsn_ie(sdata, skb) ||
skb               879 net/mac80211/mesh.c 	    mesh_add_ht_cap_ie(sdata, skb) ||
skb               880 net/mac80211/mesh.c 	    mesh_add_ht_oper_ie(sdata, skb) ||
skb               881 net/mac80211/mesh.c 	    mesh_add_meshid_ie(sdata, skb) ||
skb               882 net/mac80211/mesh.c 	    mesh_add_meshconf_ie(sdata, skb) ||
skb               883 net/mac80211/mesh.c 	    mesh_add_awake_window_ie(sdata, skb) ||
skb               884 net/mac80211/mesh.c 	    mesh_add_vht_cap_ie(sdata, skb) ||
skb               885 net/mac80211/mesh.c 	    mesh_add_vht_oper_ie(sdata, skb) ||
skb               886 net/mac80211/mesh.c 	    mesh_add_he_cap_ie(sdata, skb, ie_len_he_cap) ||
skb               887 net/mac80211/mesh.c 	    mesh_add_he_oper_ie(sdata, skb) ||
skb               888 net/mac80211/mesh.c 	    mesh_add_vendor_ies(sdata, skb))
skb               891 net/mac80211/mesh.c 	bcn->tail_len = skb->len;
skb               892 net/mac80211/mesh.c 	memcpy(bcn->tail, skb->data, bcn->tail_len);
skb               896 net/mac80211/mesh.c 	dev_kfree_skb(skb);
skb               901 net/mac80211/mesh.c 	dev_kfree_skb(skb);
skb              1334 net/mac80211/mesh.c 	struct sk_buff *skb;
skb              1337 net/mac80211/mesh.c 	skb = dev_alloc_skb(local->tx_headroom + len);
skb              1338 net/mac80211/mesh.c 	if (!skb)
skb              1340 net/mac80211/mesh.c 	skb_reserve(skb, local->tx_headroom);
skb              1341 net/mac80211/mesh.c 	mgmt_fwd = skb_put(skb, len);
skb              1352 net/mac80211/mesh.c 	ieee80211_tx_skb(sdata, skb);
skb              1428 net/mac80211/mesh.c 				   struct sk_buff *skb)
skb              1440 net/mac80211/mesh.c 	rx_status = IEEE80211_SKB_RXCB(skb);
skb              1441 net/mac80211/mesh.c 	mgmt = (struct ieee80211_mgmt *) skb->data;
skb              1447 net/mac80211/mesh.c 		ieee80211_mesh_rx_bcn_presp(sdata, stype, mgmt, skb->len,
skb              1451 net/mac80211/mesh.c 		ieee80211_mesh_rx_probe_req(sdata, mgmt, skb->len);
skb              1454 net/mac80211/mesh.c 		ieee80211_mesh_rx_mgmt_action(sdata, mgmt, skb->len, rx_status);
skb               206 net/mac80211/mesh.h 			 struct sk_buff *skb);
skb               208 net/mac80211/mesh.h 		       struct sk_buff *skb);
skb               210 net/mac80211/mesh.h 		    struct sk_buff *skb);
skb               212 net/mac80211/mesh.h 			struct sk_buff *skb);
skb               214 net/mac80211/mesh.h 		       struct sk_buff *skb);
skb               216 net/mac80211/mesh.h 			struct sk_buff *skb);
skb               218 net/mac80211/mesh.h 			struct sk_buff *skb);
skb               220 net/mac80211/mesh.h 			 struct sk_buff *skb);
skb               222 net/mac80211/mesh.h 		       struct sk_buff *skb, u8 ie_len);
skb               224 net/mac80211/mesh.h 			struct sk_buff *skb);
skb               258 net/mac80211/mesh.h 			struct sk_buff *skb);
skb               260 net/mac80211/mesh.h 			 struct sk_buff *skb);
skb               315 net/mac80211/mesh.h 			     struct sk_buff *skb);
skb               109 net/mac80211/mesh_hwmp.c 	struct sk_buff *skb;
skb               115 net/mac80211/mesh_hwmp.c 	skb = dev_alloc_skb(local->tx_headroom +
skb               118 net/mac80211/mesh_hwmp.c 	if (!skb)
skb               120 net/mac80211/mesh_hwmp.c 	skb_reserve(skb, local->tx_headroom);
skb               121 net/mac80211/mesh_hwmp.c 	mgmt = skb_put_zero(skb, hdr_len);
skb               137 net/mac80211/mesh_hwmp.c 		pos = skb_put(skb, 2 + ie_len);
skb               143 net/mac80211/mesh_hwmp.c 		pos = skb_put(skb, 2 + ie_len);
skb               149 net/mac80211/mesh_hwmp.c 		pos = skb_put(skb, 2 + ie_len);
skb               153 net/mac80211/mesh_hwmp.c 		kfree_skb(skb);
skb               193 net/mac80211/mesh_hwmp.c 	ieee80211_tx_skb(sdata, skb);
skb               201 net/mac80211/mesh_hwmp.c 		struct sk_buff *skb)
skb               203 net/mac80211/mesh_hwmp.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb               204 net/mac80211/mesh_hwmp.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
skb               206 net/mac80211/mesh_hwmp.c 	skb_reset_mac_header(skb);
skb               207 net/mac80211/mesh_hwmp.c 	skb_reset_network_header(skb);
skb               208 net/mac80211/mesh_hwmp.c 	skb_reset_transport_header(skb);
skb               211 net/mac80211/mesh_hwmp.c 	skb_set_queue_mapping(skb, IEEE80211_AC_VO);
skb               212 net/mac80211/mesh_hwmp.c 	skb->priority = 7;
skb               216 net/mac80211/mesh_hwmp.c 	ieee80211_set_qos_hdr(sdata, skb);
skb               239 net/mac80211/mesh_hwmp.c 	struct sk_buff *skb;
skb               249 net/mac80211/mesh_hwmp.c 	skb = dev_alloc_skb(local->tx_headroom +
skb               254 net/mac80211/mesh_hwmp.c 	if (!skb)
skb               256 net/mac80211/mesh_hwmp.c 	skb_reserve(skb, local->tx_headroom + sdata->encrypt_headroom);
skb               257 net/mac80211/mesh_hwmp.c 	mgmt = skb_put_zero(skb, hdr_len);
skb               269 net/mac80211/mesh_hwmp.c 	pos = skb_put(skb, 2 + ie_len);
skb               288 net/mac80211/mesh_hwmp.c 	prepare_frame_for_deferred_tx(sdata, skb);
skb               291 net/mac80211/mesh_hwmp.c 	ieee80211_add_pending_skb(local, skb);
skb              1133 net/mac80211/mesh_hwmp.c 			 struct sk_buff *skb)
skb              1135 net/mac80211/mesh_hwmp.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
skb              1136 net/mac80211/mesh_hwmp.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb              1149 net/mac80211/mesh_hwmp.c 	if (!mesh_nexthop_lookup(sdata, skb))
skb              1157 net/mac80211/mesh_hwmp.c 			mesh_path_discard_frame(sdata, skb);
skb              1170 net/mac80211/mesh_hwmp.c 	ieee80211_set_qos_hdr(sdata, skb);
skb              1171 net/mac80211/mesh_hwmp.c 	skb_queue_tail(&mpath->frame_queue, skb);
skb              1189 net/mac80211/mesh_hwmp.c 			struct sk_buff *skb)
skb              1193 net/mac80211/mesh_hwmp.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
skb                85 net/mac80211/mesh_pathtbl.c 	struct sk_buff *skb;
skb                92 net/mac80211/mesh_pathtbl.c 	skb_queue_walk(&mpath->frame_queue, skb) {
skb                93 net/mac80211/mesh_pathtbl.c 		hdr = (struct ieee80211_hdr *) skb->data;
skb               102 net/mac80211/mesh_pathtbl.c static void prepare_for_gate(struct sk_buff *skb, char *dst_addr,
skb               110 net/mac80211/mesh_pathtbl.c 	hdr = (struct ieee80211_hdr *) skb->data;
skb               112 net/mac80211/mesh_pathtbl.c 	mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
skb               119 net/mac80211/mesh_pathtbl.c 		skb_push(skb, 2 * ETH_ALEN);
skb               120 net/mac80211/mesh_pathtbl.c 		memmove(skb->data, hdr, hdrlen + mesh_hdrlen);
skb               122 net/mac80211/mesh_pathtbl.c 		hdr = (struct ieee80211_hdr *) skb->data;
skb               126 net/mac80211/mesh_pathtbl.c 		mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
skb               133 net/mac80211/mesh_pathtbl.c 	hdr = (struct ieee80211_hdr *) skb->data;
skb               164 net/mac80211/mesh_pathtbl.c 	struct sk_buff *skb, *fskb, *tmp;
skb               186 net/mac80211/mesh_pathtbl.c 		skb = skb_copy(fskb, GFP_ATOMIC);
skb               187 net/mac80211/mesh_pathtbl.c 		if (WARN_ON(!skb))
skb               190 net/mac80211/mesh_pathtbl.c 		prepare_for_gate(skb, gate_mpath->dst, gate_mpath);
skb               191 net/mac80211/mesh_pathtbl.c 		skb_queue_tail(&gate_mpath->frame_queue, skb);
skb               716 net/mac80211/mesh_pathtbl.c 			     struct sk_buff *skb)
skb               718 net/mac80211/mesh_pathtbl.c 	kfree_skb(skb);
skb               731 net/mac80211/mesh_pathtbl.c 	struct sk_buff *skb;
skb               733 net/mac80211/mesh_pathtbl.c 	while ((skb = skb_dequeue(&mpath->frame_queue)) != NULL)
skb               734 net/mac80211/mesh_pathtbl.c 		mesh_path_discard_frame(mpath->sdata, skb);
skb               215 net/mac80211/mesh_plink.c 	struct sk_buff *skb;
skb               227 net/mac80211/mesh_plink.c 	skb = dev_alloc_skb(local->tx_headroom +
skb               243 net/mac80211/mesh_plink.c 	if (!skb)
skb               245 net/mac80211/mesh_plink.c 	info = IEEE80211_SKB_CB(skb);
skb               246 net/mac80211/mesh_plink.c 	skb_reserve(skb, local->tx_headroom);
skb               247 net/mac80211/mesh_plink.c 	mgmt = skb_put_zero(skb, hdr_len);
skb               268 net/mac80211/mesh_plink.c 		pos = skb_put_zero(skb, 2);
skb               271 net/mac80211/mesh_plink.c 			pos = skb_put(skb, 2);
skb               274 net/mac80211/mesh_plink.c 		if (ieee80211_add_srates_ie(sdata, skb, true, band) ||
skb               275 net/mac80211/mesh_plink.c 		    ieee80211_add_ext_srates_ie(sdata, skb, true, band) ||
skb               276 net/mac80211/mesh_plink.c 		    mesh_add_rsn_ie(sdata, skb) ||
skb               277 net/mac80211/mesh_plink.c 		    mesh_add_meshid_ie(sdata, skb) ||
skb               278 net/mac80211/mesh_plink.c 		    mesh_add_meshconf_ie(sdata, skb))
skb               282 net/mac80211/mesh_plink.c 		if (mesh_add_meshid_ie(sdata, skb))
skb               306 net/mac80211/mesh_plink.c 	if (WARN_ON(skb_tailroom(skb) < 2 + ie_len))
skb               309 net/mac80211/mesh_plink.c 	pos = skb_put(skb, 2 + ie_len);
skb               326 net/mac80211/mesh_plink.c 		if (mesh_add_ht_cap_ie(sdata, skb) ||
skb               327 net/mac80211/mesh_plink.c 		    mesh_add_ht_oper_ie(sdata, skb) ||
skb               328 net/mac80211/mesh_plink.c 		    mesh_add_vht_cap_ie(sdata, skb) ||
skb               329 net/mac80211/mesh_plink.c 		    mesh_add_vht_oper_ie(sdata, skb) ||
skb               330 net/mac80211/mesh_plink.c 		    mesh_add_he_cap_ie(sdata, skb, ie_len_he_cap) ||
skb               331 net/mac80211/mesh_plink.c 		    mesh_add_he_oper_ie(sdata, skb))
skb               335 net/mac80211/mesh_plink.c 	if (mesh_add_vendor_ies(sdata, skb))
skb               338 net/mac80211/mesh_plink.c 	ieee80211_tx_skb(sdata, skb);
skb               341 net/mac80211/mesh_plink.c 	kfree_skb(skb);
skb                21 net/mac80211/mesh_ps.c 	struct sk_buff *skb;
skb                25 net/mac80211/mesh_ps.c 	skb = dev_alloc_skb(local->hw.extra_tx_headroom + size + 2);
skb                26 net/mac80211/mesh_ps.c 	if (!skb)
skb                28 net/mac80211/mesh_ps.c 	skb_reserve(skb, local->hw.extra_tx_headroom);
skb                30 net/mac80211/mesh_ps.c 	nullfunc = skb_put(skb, size);
skb                39 net/mac80211/mesh_ps.c 	skb_put_zero(skb, 2); /* append QoS control field */
skb                42 net/mac80211/mesh_ps.c 	return skb;
skb                50 net/mac80211/mesh_ps.c 	struct sk_buff *skb;
skb                52 net/mac80211/mesh_ps.c 	skb = mps_qos_null_get(sta);
skb                53 net/mac80211/mesh_ps.c 	if (!skb)
skb                61 net/mac80211/mesh_ps.c 		u8 *qc = ieee80211_get_qos_ctl((void *) skb->data);
skb                66 net/mac80211/mesh_ps.c 	ieee80211_tx_skb(sta->sdata, skb);
skb               363 net/mac80211/mesh_ps.c 	struct sk_buff *skb;
skb               368 net/mac80211/mesh_ps.c 	skb = mps_qos_null_get(sta);
skb               369 net/mac80211/mesh_ps.c 	if (!skb)
skb               372 net/mac80211/mesh_ps.c 	nullfunc = (struct ieee80211_hdr *) skb->data;
skb               390 net/mac80211/mesh_ps.c 	info = IEEE80211_SKB_CB(skb);
skb               398 net/mac80211/mesh_ps.c 	ieee80211_tx_skb(sdata, skb);
skb               412 net/mac80211/mesh_ps.c 	struct sk_buff *new_skb, *skb = skb_peek_tail(frames);
skb               413 net/mac80211/mesh_ps.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
skb               451 net/mac80211/mesh_ps.c 	struct sk_buff *skb;
skb               459 net/mac80211/mesh_ps.c 			skb = skb_dequeue(&sta->tx_filtered[ac]);
skb               460 net/mac80211/mesh_ps.c 			if (!skb) {
skb               461 net/mac80211/mesh_ps.c 				skb = skb_dequeue(
skb               463 net/mac80211/mesh_ps.c 				if (skb)
skb               466 net/mac80211/mesh_ps.c 			if (!skb)
skb               469 net/mac80211/mesh_ps.c 			__skb_queue_tail(&frames, skb);
skb               491 net/mac80211/mesh_ps.c 	skb_queue_walk(&frames, skb) {
skb               492 net/mac80211/mesh_ps.c 		struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb               493 net/mac80211/mesh_ps.c 		struct ieee80211_hdr *hdr = (void *) skb->data;
skb               502 net/mac80211/mesh_ps.c 		if (more_data || !skb_queue_is_last(&frames, skb))
skb               509 net/mac80211/mesh_ps.c 		if (skb_queue_is_last(&frames, skb) &&
skb               454 net/mac80211/mlme.c 				struct sk_buff *skb, u8 ap_ht_param,
skb               519 net/mac80211/mlme.c 	pos = skb_put(skb, sizeof(struct ieee80211_ht_cap) + 2);
skb               528 net/mac80211/mlme.c 				 struct sk_buff *skb,
skb               606 net/mac80211/mlme.c 	pos = skb_put(skb, sizeof(struct ieee80211_vht_cap) + 2);
skb               614 net/mac80211/mlme.c 				struct sk_buff *skb,
skb               634 net/mac80211/mlme.c 	pos = skb_put(skb, he_cap_size);
skb               643 net/mac80211/mlme.c 	struct sk_buff *skb;
skb               691 net/mac80211/mlme.c 	skb = alloc_skb(local->hw.extra_tx_headroom +
skb               706 net/mac80211/mlme.c 	if (!skb)
skb               709 net/mac80211/mlme.c 	skb_reserve(skb, local->hw.extra_tx_headroom);
skb               728 net/mac80211/mlme.c 	mgmt = skb_put_zero(skb, 24);
skb               734 net/mac80211/mlme.c 		skb_put(skb, 10);
skb               743 net/mac80211/mlme.c 		skb_put(skb, 4);
skb               752 net/mac80211/mlme.c 	pos = skb_put(skb, 2 + assoc_data->ssid_len);
skb               763 net/mac80211/mlme.c 	pos = skb_put(skb, supp_rates_len + 2);
skb               779 net/mac80211/mlme.c 		pos = skb_put(skb, rates_len - count + 2);
skb               795 net/mac80211/mlme.c 		pos = skb_put(skb, 4);
skb               805 net/mac80211/mlme.c 		pos = skb_put(skb, 2 * sband->n_channels + 2);
skb               867 net/mac80211/mlme.c 		skb_put_data(skb, assoc_data->ie + offset, noffset - offset);
skb               876 net/mac80211/mlme.c 		ieee80211_add_ht_ie(sdata, skb, assoc_data->ap_ht_param,
skb               898 net/mac80211/mlme.c 		skb_put_data(skb, assoc_data->ie + offset, noffset - offset);
skb               924 net/mac80211/mlme.c 		pos = skb_put(skb, noffset - offset);
skb               930 net/mac80211/mlme.c 		ieee80211_add_vht_ie(sdata, skb, sband,
skb               943 net/mac80211/mlme.c 		ieee80211_add_he_ie(sdata, skb, sband);
skb               950 net/mac80211/mlme.c 		skb_put_data(skb, assoc_data->ie + offset, noffset - offset);
skb               963 net/mac80211/mlme.c 		pos = ieee80211_add_wmm_info_ie(skb_put(skb, 9), qos_info);
skb               969 net/mac80211/mlme.c 		skb_put_data(skb, assoc_data->ie + offset, noffset - offset);
skb               973 net/mac80211/mlme.c 	    fils_encrypt_assoc_req(skb, assoc_data) < 0) {
skb               974 net/mac80211/mlme.c 		dev_kfree_skb(skb);
skb               978 net/mac80211/mlme.c 	pos = skb_tail_pointer(skb);
skb               985 net/mac80211/mlme.c 	IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
skb               987 net/mac80211/mlme.c 		IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS |
skb               989 net/mac80211/mlme.c 	ieee80211_tx_skb(sdata, skb);
skb               996 net/mac80211/mlme.c 	struct sk_buff *skb;
skb               998 net/mac80211/mlme.c 	skb = ieee80211_pspoll_get(&local->hw, &sdata->vif);
skb               999 net/mac80211/mlme.c 	if (!skb)
skb              1002 net/mac80211/mlme.c 	pspoll = (struct ieee80211_pspoll *) skb->data;
skb              1005 net/mac80211/mlme.c 	IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
skb              1006 net/mac80211/mlme.c 	ieee80211_tx_skb(sdata, skb);
skb              1013 net/mac80211/mlme.c 	struct sk_buff *skb;
skb              1022 net/mac80211/mlme.c 	skb = ieee80211_nullfunc_get(&local->hw, &sdata->vif,
skb              1024 net/mac80211/mlme.c 	if (!skb)
skb              1027 net/mac80211/mlme.c 	nullfunc = (struct ieee80211_hdr_3addr *) skb->data;
skb              1031 net/mac80211/mlme.c 	IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT |
skb              1035 net/mac80211/mlme.c 		IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
skb              1038 net/mac80211/mlme.c 		IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_USE_MINRATE;
skb              1040 net/mac80211/mlme.c 	ieee80211_tx_skb(sdata, skb);
skb              1046 net/mac80211/mlme.c 	struct sk_buff *skb;
skb              1057 net/mac80211/mlme.c 	skb = dev_alloc_skb(local->hw.extra_tx_headroom + 30);
skb              1058 net/mac80211/mlme.c 	if (!skb)
skb              1061 net/mac80211/mlme.c 	skb_reserve(skb, local->hw.extra_tx_headroom);
skb              1063 net/mac80211/mlme.c 	nullfunc = skb_put_zero(skb, 30);
skb              1072 net/mac80211/mlme.c 	IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
skb              1073 net/mac80211/mlme.c 	ieee80211_tx_skb(sdata, skb);
skb              2482 net/mac80211/mlme.c 	struct sk_buff *skb;
skb              2484 net/mac80211/mlme.c 	skb = ieee80211_build_probe_req(sdata, src, dst, (u32)-1, channel,
skb              2487 net/mac80211/mlme.c 	if (skb)
skb              2488 net/mac80211/mlme.c 		ieee80211_tx_skb(sdata, skb);
skb              2616 net/mac80211/mlme.c 	struct sk_buff *skb;
skb              2642 net/mac80211/mlme.c 	skb = ieee80211_build_probe_req(sdata, sdata->vif.addr, cbss->bssid,
skb              2648 net/mac80211/mlme.c 	return skb;
skb              3648 net/mac80211/mlme.c 					 struct sk_buff *skb)
skb              3650 net/mac80211/mlme.c 	struct ieee80211_mgmt *mgmt = (void *)skb->data;
skb              3652 net/mac80211/mlme.c 	struct ieee80211_rx_status *rx_status = (void *) skb->cb;
skb              3653 net/mac80211/mlme.c 	size_t baselen, len = skb->len;
skb              4061 net/mac80211/mlme.c 				  struct sk_buff *skb)
skb              4069 net/mac80211/mlme.c 	rx_status = (struct ieee80211_rx_status *) skb->cb;
skb              4070 net/mac80211/mlme.c 	mgmt = (struct ieee80211_mgmt *) skb->data;
skb              4077 net/mac80211/mlme.c 		ieee80211_rx_mgmt_beacon(sdata, mgmt, skb->len, rx_status);
skb              4080 net/mac80211/mlme.c 		ieee80211_rx_mgmt_probe_resp(sdata, skb);
skb              4083 net/mac80211/mlme.c 		ieee80211_rx_mgmt_auth(sdata, mgmt, skb->len);
skb              4086 net/mac80211/mlme.c 		ieee80211_rx_mgmt_deauth(sdata, mgmt, skb->len);
skb              4089 net/mac80211/mlme.c 		ieee80211_rx_mgmt_disassoc(sdata, mgmt, skb->len);
skb              4093 net/mac80211/mlme.c 		ieee80211_rx_mgmt_assoc_resp(sdata, mgmt, skb->len);
skb              4097 net/mac80211/mlme.c 			ies_len = skb->len -
skb              4117 net/mac80211/mlme.c 			ies_len = skb->len -
skb               785 net/mac80211/offchannel.c 	struct sk_buff *skb;
skb               885 net/mac80211/offchannel.c 	skb = dev_alloc_skb(local->hw.extra_tx_headroom + params->len);
skb               886 net/mac80211/offchannel.c 	if (!skb) {
skb               890 net/mac80211/offchannel.c 	skb_reserve(skb, local->hw.extra_tx_headroom);
skb               892 net/mac80211/offchannel.c 	data = skb_put_data(skb, params->buf, params->len);
skb               920 net/mac80211/offchannel.c 	IEEE80211_SKB_CB(skb)->flags = flags;
skb               922 net/mac80211/offchannel.c 	skb->dev = sdata->dev;
skb               928 net/mac80211/offchannel.c 		ret = ieee80211_attach_ack_skb(local, skb, cookie, GFP_KERNEL);
skb               930 net/mac80211/offchannel.c 			kfree_skb(skb);
skb               943 net/mac80211/offchannel.c 		ieee80211_tx_skb(sdata, skb);
skb               948 net/mac80211/offchannel.c 	IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_TX_OFFCHAN |
skb               951 net/mac80211/offchannel.c 		IEEE80211_SKB_CB(skb)->hw_queue =
skb               956 net/mac80211/offchannel.c 				       params->wait, cookie, skb,
skb               959 net/mac80211/offchannel.c 		ieee80211_free_txskb(&local->hw, skb);
skb                76 net/mac80211/rate.c 	else if (st->skb)
skb                77 net/mac80211/rate.c 		ref->ops->tx_status(ref->priv, sband, st->sta, priv_sta, st->skb);
skb               287 net/mac80211/rate.c 	struct sk_buff *skb = txrc->skb;
skb               288 net/mac80211/rate.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
skb               289 net/mac80211/rate.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb               367 net/mac80211/rate.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(txrc->skb);
skb               842 net/mac80211/rate.c 			    struct sk_buff *skb,
skb               847 net/mac80211/rate.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
skb               848 net/mac80211/rate.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb               878 net/mac80211/rate.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(txrc->skb);
skb               909 net/mac80211/rate.c 	ieee80211_get_tx_rates(&sdata->vif, ista, txrc->skb,
skb               331 net/mac80211/rc80211_minstrel.c 	struct sk_buff *skb = txrc->skb;
skb               332 net/mac80211/rc80211_minstrel.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb               860 net/mac80211/rc80211_minstrel_ht.c minstrel_aggr_check(struct ieee80211_sta *pubsta, struct sk_buff *skb)
skb               862 net/mac80211/rc80211_minstrel_ht.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
skb               866 net/mac80211/rc80211_minstrel_ht.c 	if (skb_get_queue_mapping(skb) == IEEE80211_AC_VO)
skb               872 net/mac80211/rc80211_minstrel_ht.c 	if (unlikely(skb->protocol == cpu_to_be16(ETH_P_PAE)))
skb              1292 net/mac80211/rc80211_minstrel_ht.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(txrc->skb);
skb              1304 net/mac80211/rc80211_minstrel_ht.c 		minstrel_aggr_check(sta, txrc->skb);
skb                96 net/mac80211/rx.c static void remove_monitor_info(struct sk_buff *skb,
skb               101 net/mac80211/rx.c 		__pskb_trim(skb, skb->len - present_fcs_len);
skb               102 net/mac80211/rx.c 	__pskb_pull(skb, rtap_space);
skb               105 net/mac80211/rx.c static inline bool should_drop_frame(struct sk_buff *skb, int present_fcs_len,
skb               108 net/mac80211/rx.c 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
skb               111 net/mac80211/rx.c 	hdr = (void *)(skb->data + rtap_space);
skb               119 net/mac80211/rx.c 	if (unlikely(skb->len < 16 + present_fcs_len + rtap_space))
skb               133 net/mac80211/rx.c 			     struct sk_buff *skb)
skb               225 net/mac80211/rx.c 		rtap = (void *)&skb->data[vendor_data_offset];
skb               241 net/mac80211/rx.c 					 struct sk_buff *skb,
skb               255 net/mac80211/rx.c 	if (skb->len < rtap_space + sizeof(action) +
skb               262 net/mac80211/rx.c 	skb_copy_bits(skb, rtap_space, &action, sizeof(action));
skb               276 net/mac80211/rx.c 	skb = skb_copy(skb, GFP_ATOMIC);
skb               277 net/mac80211/rx.c 	if (!skb)
skb               280 net/mac80211/rx.c 	skb_queue_tail(&sdata->skb_queue, skb);
skb               291 net/mac80211/rx.c 				 struct sk_buff *skb,
skb               295 net/mac80211/rx.c 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
skb               310 net/mac80211/rx.c 		he = *(struct ieee80211_radiotap_he *)skb->data;
skb               311 net/mac80211/rx.c 		skb_pull(skb, sizeof(he));
skb               316 net/mac80211/rx.c 		he_mu = *(struct ieee80211_radiotap_he_mu *)skb->data;
skb               317 net/mac80211/rx.c 		skb_pull(skb, sizeof(he_mu));
skb               321 net/mac80211/rx.c 		lsig = *(struct ieee80211_radiotap_lsig *)skb->data;
skb               322 net/mac80211/rx.c 		skb_pull(skb, sizeof(lsig));
skb               326 net/mac80211/rx.c 		rtap = *(struct ieee80211_vendor_radiotap *)skb->data;
skb               328 net/mac80211/rx.c 		skb_pull(skb, sizeof(rtap) + rtap.len + rtap.pad);
skb               331 net/mac80211/rx.c 	mpdulen = skb->len;
skb               335 net/mac80211/rx.c 	rthdr = skb_push(skb, rtap_len);
skb               709 net/mac80211/rx.c 	struct sk_buff *skb;
skb               717 net/mac80211/rx.c 		skb = *origskb;
skb               727 net/mac80211/rx.c 		if (skb_headroom(skb) < needed_headroom &&
skb               728 net/mac80211/rx.c 		    pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) {
skb               729 net/mac80211/rx.c 			dev_kfree_skb(skb);
skb               737 net/mac80211/rx.c 		skb = skb_copy_expand(*origskb, needed_headroom, 0, GFP_ATOMIC);
skb               739 net/mac80211/rx.c 		if (!skb)
skb               744 net/mac80211/rx.c 	ieee80211_add_rx_radiotap_header(local, skb, rate, rt_hdrlen, true);
skb               746 net/mac80211/rx.c 	skb_reset_mac_header(skb);
skb               747 net/mac80211/rx.c 	skb->ip_summed = CHECKSUM_UNNECESSARY;
skb               748 net/mac80211/rx.c 	skb->pkt_type = PACKET_OTHERHOST;
skb               749 net/mac80211/rx.c 	skb->protocol = htons(ETH_P_802_2);
skb               751 net/mac80211/rx.c 	return skb;
skb               846 net/mac80211/rx.c 			struct sk_buff *skb;
skb               849 net/mac80211/rx.c 				skb = monskb;
skb               852 net/mac80211/rx.c 				skb = skb_clone(monskb, GFP_ATOMIC);
skb               855 net/mac80211/rx.c 			if (skb) {
skb               856 net/mac80211/rx.c 				skb->dev = sdata->dev;
skb               857 net/mac80211/rx.c 				ieee80211_rx_stats(skb->dev, skb->len);
skb               858 net/mac80211/rx.c 				netif_receive_skb(skb);
skb               879 net/mac80211/rx.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
skb               880 net/mac80211/rx.c 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
skb               916 net/mac80211/rx.c 	rx->skb->priority = (tid > 7) ? 0 : tid;
skb               947 net/mac80211/rx.c 	WARN_ON_ONCE((unsigned long)rx->skb->data & 1);
skb               954 net/mac80211/rx.c static int ieee80211_is_unicast_robust_mgmt_frame(struct sk_buff *skb)
skb               956 net/mac80211/rx.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
skb               961 net/mac80211/rx.c 	return ieee80211_is_robust_mgmt_frame(skb);
skb               965 net/mac80211/rx.c static int ieee80211_is_multicast_robust_mgmt_frame(struct sk_buff *skb)
skb               967 net/mac80211/rx.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
skb               972 net/mac80211/rx.c 	return ieee80211_is_robust_mgmt_frame(skb);
skb               977 net/mac80211/rx.c static int ieee80211_get_mmie_keyidx(struct sk_buff *skb)
skb               979 net/mac80211/rx.c 	struct ieee80211_mgmt *hdr = (struct ieee80211_mgmt *) skb->data;
skb               983 net/mac80211/rx.c 	if (skb->len < 24 + sizeof(*mmie) || !is_multicast_ether_addr(hdr->da))
skb               986 net/mac80211/rx.c 	if (!ieee80211_is_robust_mgmt_frame(skb))
skb               990 net/mac80211/rx.c 		(skb->data + skb->len - sizeof(*mmie));
skb               996 net/mac80211/rx.c 		(skb->data + skb->len - sizeof(*mmie16));
skb               997 net/mac80211/rx.c 	if (skb->len >= 24 + sizeof(*mmie16) &&
skb              1005 net/mac80211/rx.c static int ieee80211_get_keyid(struct sk_buff *skb,
skb              1008 net/mac80211/rx.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
skb              1030 net/mac80211/rx.c 	if (unlikely(skb->len < minlen))
skb              1033 net/mac80211/rx.c 	skb_copy_bits(skb, key_idx_off, &keyid, 1);
skb              1048 net/mac80211/rx.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
skb              1080 net/mac80211/rx.c 			if (rx->skb->len < IEEE80211_MIN_ACTION_SIZE)
skb              1129 net/mac80211/rx.c 	struct sk_buff *skb;
skb              1144 net/mac80211/rx.c 	while ((skb = __skb_dequeue(skb_list))) {
skb              1145 net/mac80211/rx.c 		status = IEEE80211_SKB_RXCB(skb);
skb              1147 net/mac80211/rx.c 		__skb_queue_tail(frames, skb);
skb              1261 net/mac80211/rx.c 					     struct sk_buff *skb,
skb              1264 net/mac80211/rx.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
skb              1265 net/mac80211/rx.c 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
skb              1301 net/mac80211/rx.c 		dev_kfree_skb(skb);
skb              1323 net/mac80211/rx.c 		dev_kfree_skb(skb);
skb              1343 net/mac80211/rx.c 	__skb_queue_tail(&tid_agg_rx->reorder_buf[index], skb);
skb              1362 net/mac80211/rx.c 	struct sk_buff *skb = rx->skb;
skb              1364 net/mac80211/rx.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
skb              1415 net/mac80211/rx.c 		skb_queue_tail(&rx->sdata->skb_queue, skb);
skb              1427 net/mac80211/rx.c 	if (ieee80211_sta_manage_reorder_buf(rx->sdata, tid_agg_rx, skb,
skb              1432 net/mac80211/rx.c 	__skb_queue_tail(frames, skb);
skb              1438 net/mac80211/rx.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
skb              1439 net/mac80211/rx.c 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
skb              1449 net/mac80211/rx.c 	if (rx->skb->len < 24)
skb              1475 net/mac80211/rx.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
skb              1507 net/mac80211/rx.c 			if (rx->skb->len < hdrlen + 8)
skb              1510 net/mac80211/rx.c 			skb_copy_bits(rx->skb, hdrlen + 6, &ethertype, 2);
skb              1533 net/mac80211/rx.c 	struct sk_buff *skb;
skb              1536 net/mac80211/rx.c 	skb = rx->skb;
skb              1537 net/mac80211/rx.c 	hdr = (struct ieee80211_hdr *) skb->data;
skb              1692 net/mac80211/rx.c 	struct ieee80211_hdr *hdr = (void *)rx->skb->data;
skb              1693 net/mac80211/rx.c 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
skb              1723 net/mac80211/rx.c 		dev_kfree_skb(rx->skb);
skb              1743 net/mac80211/rx.c 	struct sk_buff *skb = rx->skb;
skb              1744 net/mac80211/rx.c 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
skb              1745 net/mac80211/rx.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
skb              1760 net/mac80211/rx.c 		u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len,
skb              1788 net/mac80211/rx.c 	sta->rx_stats.bytes += rx->skb->len;
skb              1864 net/mac80211/rx.c 		dev_kfree_skb(rx->skb);
skb              1874 net/mac80211/rx.c 	struct sk_buff *skb = rx->skb;
skb              1875 net/mac80211/rx.c 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
skb              1876 net/mac80211/rx.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
skb              1922 net/mac80211/rx.c 			keyid = ieee80211_get_keyid(rx->skb, cs);
skb              1932 net/mac80211/rx.c 		mmie_keyidx = ieee80211_get_mmie_keyidx(rx->skb);
skb              1952 net/mac80211/rx.c 			if (ieee80211_is_group_privacy_action(skb) &&
skb              2008 net/mac80211/rx.c 		keyidx = ieee80211_get_keyid(rx->skb, cs);
skb              2088 net/mac80211/rx.c 			 struct sk_buff **skb)
skb              2099 net/mac80211/rx.c 	__skb_queue_tail(&entry->skb_list, *skb); /* no need for locking */
skb              2100 net/mac80211/rx.c 	*skb = NULL;
skb              2164 net/mac80211/rx.c 	struct sk_buff *skb;
skb              2166 net/mac80211/rx.c 	hdr = (struct ieee80211_hdr *)rx->skb->data;
skb              2185 net/mac80211/rx.c 	if (skb_linearize(rx->skb))
skb              2193 net/mac80211/rx.c 	hdr = (struct ieee80211_hdr *)rx->skb->data;
skb              2199 net/mac80211/rx.c 						 rx->seqno_idx, &(rx->skb));
skb              2266 net/mac80211/rx.c 	skb_pull(rx->skb, ieee80211_hdrlen(fc));
skb              2267 net/mac80211/rx.c 	__skb_queue_tail(&entry->skb_list, rx->skb);
skb              2269 net/mac80211/rx.c 	entry->extra_len += rx->skb->len;
skb              2271 net/mac80211/rx.c 		rx->skb = NULL;
skb              2275 net/mac80211/rx.c 	rx->skb = __skb_dequeue(&entry->skb_list);
skb              2276 net/mac80211/rx.c 	if (skb_tailroom(rx->skb) < entry->extra_len) {
skb              2278 net/mac80211/rx.c 		if (unlikely(pskb_expand_head(rx->skb, 0, entry->extra_len,
skb              2285 net/mac80211/rx.c 	while ((skb = __skb_dequeue(&entry->skb_list))) {
skb              2286 net/mac80211/rx.c 		skb_put_data(rx->skb, skb->data, skb->len);
skb              2287 net/mac80211/rx.c 		dev_kfree_skb(skb);
skb              2308 net/mac80211/rx.c 	struct sk_buff *skb = rx->skb;
skb              2309 net/mac80211/rx.c 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
skb              2329 net/mac80211/rx.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
skb              2330 net/mac80211/rx.c 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
skb              2342 net/mac80211/rx.c 			     ieee80211_is_unicast_robust_mgmt_frame(rx->skb) &&
skb              2347 net/mac80211/rx.c 							     rx->skb->data,
skb              2348 net/mac80211/rx.c 							     rx->skb->len);
skb              2352 net/mac80211/rx.c 		if (unlikely(ieee80211_is_multicast_robust_mgmt_frame(rx->skb) &&
skb              2353 net/mac80211/rx.c 			     ieee80211_get_mmie_keyidx(rx->skb) < 0)) {
skb              2357 net/mac80211/rx.c 							     rx->skb->data,
skb              2358 net/mac80211/rx.c 							     rx->skb->len);
skb              2366 net/mac80211/rx.c 			     ieee80211_is_robust_mgmt_frame(rx->skb)))
skb              2377 net/mac80211/rx.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
skb              2400 net/mac80211/rx.c 	ret = ieee80211_data_to_8023(rx->skb, sdata->vif.addr, sdata->vif.type);
skb              2404 net/mac80211/rx.c 	ehdr = (struct ethhdr *) rx->skb->data;
skb              2420 net/mac80211/rx.c 	struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
skb              2438 net/mac80211/rx.c static void ieee80211_deliver_skb_to_local_stack(struct sk_buff *skb,
skb              2444 net/mac80211/rx.c 	if (unlikely((skb->protocol == sdata->control_port_protocol ||
skb              2445 net/mac80211/rx.c 		      skb->protocol == cpu_to_be16(ETH_P_PREAUTH)) &&
skb              2447 net/mac80211/rx.c 		struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
skb              2450 net/mac80211/rx.c 		cfg80211_rx_control_port(dev, skb, noencrypt);
skb              2451 net/mac80211/rx.c 		dev_kfree_skb(skb);
skb              2453 net/mac80211/rx.c 		memset(skb->cb, 0, sizeof(skb->cb));
skb              2457 net/mac80211/rx.c 			napi_gro_receive(rx->napi, skb);
skb              2459 net/mac80211/rx.c 			netif_receive_skb(skb);
skb              2471 net/mac80211/rx.c 	struct sk_buff *skb, *xmit_skb;
skb              2472 net/mac80211/rx.c 	struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
skb              2475 net/mac80211/rx.c 	skb = rx->skb;
skb              2478 net/mac80211/rx.c 	ieee80211_rx_stats(dev, skb->len);
skb              2501 net/mac80211/rx.c 			xmit_skb = skb_copy(skb, GFP_ATOMIC);
skb              2515 net/mac80211/rx.c 				xmit_skb = skb;
skb              2516 net/mac80211/rx.c 				skb = NULL;
skb              2522 net/mac80211/rx.c 	if (skb) {
skb              2531 net/mac80211/rx.c 		align = (unsigned long)(skb->data + sizeof(struct ethhdr)) & 3;
skb              2533 net/mac80211/rx.c 			if (WARN_ON(skb_headroom(skb) < 3)) {
skb              2534 net/mac80211/rx.c 				dev_kfree_skb(skb);
skb              2535 net/mac80211/rx.c 				skb = NULL;
skb              2537 net/mac80211/rx.c 				u8 *data = skb->data;
skb              2538 net/mac80211/rx.c 				size_t len = skb_headlen(skb);
skb              2539 net/mac80211/rx.c 				skb->data -= align;
skb              2540 net/mac80211/rx.c 				memmove(skb->data, data, len);
skb              2541 net/mac80211/rx.c 				skb_set_tail_pointer(skb, len);
skb              2547 net/mac80211/rx.c 	if (skb) {
skb              2548 net/mac80211/rx.c 		skb->protocol = eth_type_trans(skb, dev);
skb              2549 net/mac80211/rx.c 		ieee80211_deliver_skb_to_local_stack(skb, rx);
skb              2570 net/mac80211/rx.c 	struct sk_buff *skb = rx->skb;
skb              2571 net/mac80211/rx.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
skb              2597 net/mac80211/rx.c 	skb->dev = dev;
skb              2600 net/mac80211/rx.c 	if (ieee80211_data_to_8023_exthdr(skb, &ethhdr,
skb              2606 net/mac80211/rx.c 	ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr,
skb              2612 net/mac80211/rx.c 		rx->skb = __skb_dequeue(&frame_list);
skb              2615 net/mac80211/rx.c 			dev_kfree_skb(rx->skb);
skb              2628 net/mac80211/rx.c 	struct sk_buff *skb = rx->skb;
skb              2629 net/mac80211/rx.c 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
skb              2630 net/mac80211/rx.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
skb              2670 net/mac80211/rx.c 	struct sk_buff *skb = rx->skb, *fwd_skb;
skb              2677 net/mac80211/rx.c 	hdr = (struct ieee80211_hdr *) skb->data;
skb              2681 net/mac80211/rx.c 	if (!pskb_may_pull(rx->skb, hdrlen + 6))
skb              2684 net/mac80211/rx.c 	mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
skb              2687 net/mac80211/rx.c 	if (!pskb_may_pull(rx->skb,
skb              2692 net/mac80211/rx.c 	hdr = (struct ieee80211_hdr *) skb->data;
skb              2693 net/mac80211/rx.c 	mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
skb              2746 net/mac80211/rx.c 	ac = ieee80211_select_queue_80211(sdata, skb, hdr);
skb              2752 net/mac80211/rx.c 	skb_set_queue_mapping(skb, q);
skb              2767 net/mac80211/rx.c 	fwd_skb = skb_copy_expand(skb, local->tx_headroom +
skb              2814 net/mac80211/rx.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
skb              2846 net/mac80211/rx.c 	if (unlikely(((struct ethhdr *)rx->skb->data)->h_proto ==
skb              2848 net/mac80211/rx.c 		struct ieee80211_tdls_data *tf = (void *)rx->skb->data;
skb              2850 net/mac80211/rx.c 		if (pskb_may_pull(rx->skb,
skb              2856 net/mac80211/rx.c 			skb_queue_tail(&local->skb_queue_tdls_chsw, rx->skb);
skb              2873 net/mac80211/rx.c 	rx->skb->dev = dev;
skb              2878 net/mac80211/rx.c 		    ((struct ethhdr *)rx->skb->data)->h_dest) &&
skb              2892 net/mac80211/rx.c 	struct sk_buff *skb = rx->skb;
skb              2893 net/mac80211/rx.c 	struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
skb              2912 net/mac80211/rx.c 		if (skb_copy_bits(skb, offsetof(struct ieee80211_bar, control),
skb              2946 net/mac80211/rx.c 		kfree_skb(skb);
skb              2963 net/mac80211/rx.c 	struct sk_buff *skb;
skb              2982 net/mac80211/rx.c 	skb = dev_alloc_skb(sizeof(*resp) + local->hw.extra_tx_headroom);
skb              2983 net/mac80211/rx.c 	if (skb == NULL)
skb              2986 net/mac80211/rx.c 	skb_reserve(skb, local->hw.extra_tx_headroom);
skb              2987 net/mac80211/rx.c 	resp = skb_put_zero(skb, 24);
skb              2993 net/mac80211/rx.c 	skb_put(skb, 1 + sizeof(resp->u.action.u.sa_query));
skb              3000 net/mac80211/rx.c 	ieee80211_tx_skb(sdata, skb);
skb              3006 net/mac80211/rx.c 	struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
skb              3007 net/mac80211/rx.c 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
skb              3014 net/mac80211/rx.c 	if (rx->skb->len < 24)
skb              3030 net/mac80211/rx.c 					    rx->skb->data, rx->skb->len,
skb              3046 net/mac80211/rx.c 	struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
skb              3047 net/mac80211/rx.c 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
skb              3048 net/mac80211/rx.c 	int len = rx->skb->len;
skb              3341 net/mac80211/rx.c 	dev_kfree_skb(rx->skb);
skb              3345 net/mac80211/rx.c 	skb_queue_tail(&sdata->skb_queue, rx->skb);
skb              3355 net/mac80211/rx.c 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
skb              3374 net/mac80211/rx.c 			     rx->skb->data, rx->skb->len, 0)) {
skb              3377 net/mac80211/rx.c 		dev_kfree_skb(rx->skb);
skb              3388 net/mac80211/rx.c 	struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
skb              3391 net/mac80211/rx.c 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
skb              3418 net/mac80211/rx.c 	nskb = skb_copy_expand(rx->skb, local->hw.extra_tx_headroom, 0,
skb              3443 net/mac80211/rx.c 	dev_kfree_skb(rx->skb);
skb              3451 net/mac80211/rx.c 	struct ieee80211_mgmt *mgmt = (void *)rx->skb->data;
skb              3500 net/mac80211/rx.c 	skb_queue_tail(&sdata->skb_queue, rx->skb);
skb              3513 net/mac80211/rx.c 	struct sk_buff *skb = rx->skb, *skb2;
skb              3515 net/mac80211/rx.c 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
skb              3533 net/mac80211/rx.c 	needed_headroom = ieee80211_rx_radiotap_hdrlen(local, status, skb);
skb              3535 net/mac80211/rx.c 	if (skb_headroom(skb) < needed_headroom &&
skb              3536 net/mac80211/rx.c 	    pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC))
skb              3540 net/mac80211/rx.c 	ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom,
skb              3543 net/mac80211/rx.c 	skb_reset_mac_header(skb);
skb              3544 net/mac80211/rx.c 	skb->ip_summed = CHECKSUM_UNNECESSARY;
skb              3545 net/mac80211/rx.c 	skb->pkt_type = PACKET_OTHERHOST;
skb              3546 net/mac80211/rx.c 	skb->protocol = htons(ETH_P_802_2);
skb              3557 net/mac80211/rx.c 			skb2 = skb_clone(skb, GFP_ATOMIC);
skb              3565 net/mac80211/rx.c 		ieee80211_rx_stats(sdata->dev, skb->len);
skb              3569 net/mac80211/rx.c 		skb->dev = prev_dev;
skb              3570 net/mac80211/rx.c 		netif_receive_skb(skb);
skb              3575 net/mac80211/rx.c 	dev_kfree_skb(skb);
skb              3592 net/mac80211/rx.c 		status = IEEE80211_SKB_RXCB((rx->skb));
skb              3605 net/mac80211/rx.c 		dev_kfree_skb(rx->skb);
skb              3617 net/mac80211/rx.c 	struct sk_buff *skb;
skb              3634 net/mac80211/rx.c 	while ((skb = __skb_dequeue(frames))) {
skb              3640 net/mac80211/rx.c 		rx->skb = skb;
skb              3828 net/mac80211/rx.c 	struct sk_buff *skb = rx->skb;
skb              3829 net/mac80211/rx.c 	struct ieee80211_hdr *hdr = (void *)skb->data;
skb              3830 net/mac80211/rx.c 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
skb              3831 net/mac80211/rx.c 	u8 *bssid = ieee80211_get_bssid(hdr, skb->len, sdata->vif.type);
skb              3838 net/mac80211/rx.c 		if (ieee80211_is_robust_mgmt_frame(skb) && !rx->sta)
skb              3907 net/mac80211/rx.c 			if (ieee80211_is_public_action(hdr, skb->len))
skb              3949 net/mac80211/rx.c 		return ieee80211_is_public_action(hdr, skb->len) ||
skb              4135 net/mac80211/rx.c 	struct sk_buff *skb = rx->skb;
skb              4136 net/mac80211/rx.c 	struct ieee80211_hdr *hdr = (void *)skb->data;
skb              4137 net/mac80211/rx.c 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
skb              4139 net/mac80211/rx.c 	int orig_len = skb->len;
skb              4204 net/mac80211/rx.c 		if (!pskb_may_pull(skb, snap_offs + sizeof(*payload)))
skb              4207 net/mac80211/rx.c 		payload = (void *)(skb->data + snap_offs);
skb              4225 net/mac80211/rx.c 	    pskb_trim(skb, skb->len - fast_rx->icv_len))
skb              4277 net/mac80211/rx.c 	ether_addr_copy(addrs.da, skb->data + fast_rx->da_offs);
skb              4278 net/mac80211/rx.c 	ether_addr_copy(addrs.sa, skb->data + fast_rx->sa_offs);
skb              4280 net/mac80211/rx.c 	skb_pull(skb, snap_offs + sizeof(rfc1042_header));
skb              4282 net/mac80211/rx.c 	memcpy(skb_push(skb, sizeof(addrs)), &addrs, sizeof(addrs));
skb              4284 net/mac80211/rx.c 	skb->dev = fast_rx->dev;
skb              4286 net/mac80211/rx.c 	ieee80211_rx_stats(fast_rx->dev, skb->len);
skb              4301 net/mac80211/rx.c 			xmit_skb = skb_copy(skb, GFP_ATOMIC);
skb              4304 net/mac80211/rx.c 			xmit_skb = skb;
skb              4305 net/mac80211/rx.c 			skb = NULL;
skb              4321 net/mac80211/rx.c 		if (!skb)
skb              4326 net/mac80211/rx.c 	skb->protocol = eth_type_trans(skb, fast_rx->dev);
skb              4327 net/mac80211/rx.c 	memset(skb->cb, 0, sizeof(skb->cb));
skb              4329 net/mac80211/rx.c 		napi_gro_receive(rx->napi, skb);
skb              4331 net/mac80211/rx.c 		netif_receive_skb(skb);
skb              4335 net/mac80211/rx.c 	dev_kfree_skb(skb);
skb              4347 net/mac80211/rx.c 					    struct sk_buff *skb, bool consume)
skb              4352 net/mac80211/rx.c 	rx->skb = skb;
skb              4373 net/mac80211/rx.c 		skb = skb_copy(skb, GFP_ATOMIC);
skb              4374 net/mac80211/rx.c 		if (!skb) {
skb              4382 net/mac80211/rx.c 		rx->skb = skb;
skb              4395 net/mac80211/rx.c 					 struct sk_buff *skb,
skb              4407 net/mac80211/rx.c 	fc = ((struct ieee80211_hdr *)skb->data)->frame_control;
skb              4409 net/mac80211/rx.c 	rx.skb = skb;
skb              4418 net/mac80211/rx.c 		if (skb->len < ieee80211_hdrlen(fc))
skb              4421 net/mac80211/rx.c 			err = skb_linearize(skb);
skb              4423 net/mac80211/rx.c 		err = !pskb_may_pull(skb, ieee80211_hdrlen(fc));
skb              4427 net/mac80211/rx.c 		dev_kfree_skb(skb);
skb              4431 net/mac80211/rx.c 	hdr = (struct ieee80211_hdr *)skb->data;
skb              4437 net/mac80211/rx.c 		ieee80211_scan_rx(local, skb);
skb              4445 net/mac80211/rx.c 			if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
skb              4460 net/mac80211/rx.c 			ieee80211_prepare_and_rx_handle(&rx, skb, false);
skb              4469 net/mac80211/rx.c 			if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
skb              4498 net/mac80211/rx.c 		ieee80211_prepare_and_rx_handle(&rx, skb, false);
skb              4507 net/mac80211/rx.c 		if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
skb              4512 net/mac80211/rx.c 	dev_kfree_skb(skb);
skb              4520 net/mac80211/rx.c 		       struct sk_buff *skb, struct napi_struct *napi)
skb              4525 net/mac80211/rx.c 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
skb              4624 net/mac80211/rx.c 	skb = ieee80211_rx_monitor(local, skb, rate);
skb              4625 net/mac80211/rx.c 	if (!skb) {
skb              4631 net/mac80211/rx.c 			((struct ieee80211_hdr *)skb->data)->frame_control,
skb              4632 net/mac80211/rx.c 			skb->len);
skb              4634 net/mac80211/rx.c 	__ieee80211_rx_handle_packet(hw, pubsta, skb, napi);
skb              4640 net/mac80211/rx.c 	kfree_skb(skb);
skb              4646 net/mac80211/rx.c void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb)
skb              4650 net/mac80211/rx.c 	BUILD_BUG_ON(sizeof(struct ieee80211_rx_status) > sizeof(skb->cb));
skb              4652 net/mac80211/rx.c 	skb->pkt_type = IEEE80211_RX_MSG;
skb              4653 net/mac80211/rx.c 	skb_queue_tail(&local->skb_queue, skb);
skb               236 net/mac80211/scan.c void ieee80211_scan_rx(struct ieee80211_local *local, struct sk_buff *skb)
skb               238 net/mac80211/scan.c 	struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
skb               240 net/mac80211/scan.c 	struct ieee80211_mgmt *mgmt = (void *)skb->data;
skb               244 net/mac80211/scan.c 	if (skb->len < 24 ||
skb               285 net/mac80211/scan.c 					mgmt, skb->len,
skb               586 net/mac80211/scan.c 	struct sk_buff *skb;
skb               589 net/mac80211/scan.c 	skb = ieee80211_build_probe_req(sdata, src, dst, ratemask, channel,
skb               593 net/mac80211/scan.c 	if (skb) {
skb               595 net/mac80211/scan.c 			struct ieee80211_hdr *hdr = (void *)skb->data;
skb               602 net/mac80211/scan.c 		IEEE80211_SKB_CB(skb)->flags |= tx_flags;
skb               603 net/mac80211/scan.c 		ieee80211_tx_skb_tid_band(sdata, skb, 7, channel->band,
skb               192 net/mac80211/spectmgmt.c 	struct sk_buff *skb;
skb               195 net/mac80211/spectmgmt.c 	skb = dev_alloc_skb(sizeof(*msr_report) + local->hw.extra_tx_headroom +
skb               197 net/mac80211/spectmgmt.c 	if (!skb)
skb               200 net/mac80211/spectmgmt.c 	skb_reserve(skb, local->hw.extra_tx_headroom);
skb               201 net/mac80211/spectmgmt.c 	msr_report = skb_put_zero(skb, 24);
skb               208 net/mac80211/spectmgmt.c 	skb_put(skb, 1 + sizeof(msr_report->u.action.u.measurement));
skb               225 net/mac80211/spectmgmt.c 	ieee80211_tx_skb(sdata, skb);
skb               848 net/mac80211/sta_info.c static bool sta_info_buffer_expired(struct sta_info *sta, struct sk_buff *skb)
skb               853 net/mac80211/sta_info.c 	if (!skb)
skb               856 net/mac80211/sta_info.c 	info = IEEE80211_SKB_CB(skb);
skb               872 net/mac80211/sta_info.c 	struct sk_buff *skb;
skb               883 net/mac80211/sta_info.c 		skb = skb_peek(&sta->tx_filtered[ac]);
skb               884 net/mac80211/sta_info.c 		if (sta_info_buffer_expired(sta, skb))
skb               885 net/mac80211/sta_info.c 			skb = __skb_dequeue(&sta->tx_filtered[ac]);
skb               887 net/mac80211/sta_info.c 			skb = NULL;
skb               896 net/mac80211/sta_info.c 		if (!skb)
skb               898 net/mac80211/sta_info.c 		ieee80211_free_txskb(&local->hw, skb);
skb               909 net/mac80211/sta_info.c 		skb = skb_peek(&sta->ps_tx_buf[ac]);
skb               910 net/mac80211/sta_info.c 		if (sta_info_buffer_expired(sta, skb))
skb               911 net/mac80211/sta_info.c 			skb = __skb_dequeue(&sta->ps_tx_buf[ac]);
skb               913 net/mac80211/sta_info.c 			skb = NULL;
skb               921 net/mac80211/sta_info.c 		if (!skb)
skb               927 net/mac80211/sta_info.c 		ieee80211_free_txskb(&local->hw, skb);
skb              1375 net/mac80211/sta_info.c 	struct sk_buff *skb;
skb              1398 net/mac80211/sta_info.c 	skb = dev_alloc_skb(local->hw.extra_tx_headroom + size);
skb              1399 net/mac80211/sta_info.c 	if (!skb)
skb              1402 net/mac80211/sta_info.c 	skb_reserve(skb, local->hw.extra_tx_headroom);
skb              1404 net/mac80211/sta_info.c 	nullfunc = skb_put(skb, size);
skb              1412 net/mac80211/sta_info.c 	skb->priority = tid;
skb              1413 net/mac80211/sta_info.c 	skb_set_queue_mapping(skb, ieee802_1d_to_ac[tid]);
skb              1426 net/mac80211/sta_info.c 	info = IEEE80211_SKB_CB(skb);
skb              1444 net/mac80211/sta_info.c 	skb->dev = sdata->dev;
skb              1450 net/mac80211/sta_info.c 		kfree_skb(skb);
skb              1455 net/mac80211/sta_info.c 	ieee80211_xmit(sdata, sta, skb, 0);
skb              1532 net/mac80211/sta_info.c 			struct sk_buff *skb;
skb              1535 net/mac80211/sta_info.c 				skb = skb_dequeue(&sta->tx_filtered[ac]);
skb              1536 net/mac80211/sta_info.c 				if (!skb) {
skb              1537 net/mac80211/sta_info.c 					skb = skb_dequeue(
skb              1539 net/mac80211/sta_info.c 					if (skb)
skb              1542 net/mac80211/sta_info.c 				if (!skb)
skb              1545 net/mac80211/sta_info.c 				__skb_queue_tail(frames, skb);
skb              1611 net/mac80211/sta_info.c 		struct sk_buff *skb;
skb              1618 net/mac80211/sta_info.c 		while ((skb = __skb_dequeue(&frames))) {
skb              1619 net/mac80211/sta_info.c 			struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb              1620 net/mac80211/sta_info.c 			struct ieee80211_hdr *hdr = (void *) skb->data;
skb              1648 net/mac80211/sta_info.c 			tids |= BIT(skb->priority);
skb              1650 net/mac80211/sta_info.c 			__skb_queue_tail(&pending, skb);
skb                22 net/mac80211/status.c 				 struct sk_buff *skb)
skb                25 net/mac80211/status.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb                28 net/mac80211/status.c 	skb->pkt_type = IEEE80211_TX_STATUS_MSG;
skb                30 net/mac80211/status.c 		       &local->skb_queue : &local->skb_queue_unreliable, skb);
skb                34 net/mac80211/status.c 	       (skb = skb_dequeue(&local->skb_queue_unreliable))) {
skb                35 net/mac80211/status.c 		ieee80211_free_txskb(hw, skb);
skb                45 net/mac80211/status.c 					    struct sk_buff *skb)
skb                47 net/mac80211/status.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb                48 net/mac80211/status.c 	struct ieee80211_hdr *hdr = (void *)skb->data;
skb                53 net/mac80211/status.c 		ieee80211_free_txskb(&local->hw, skb);
skb               145 net/mac80211/status.c 		skb_queue_tail(&sta->tx_filtered[ac], skb);
skb               159 net/mac80211/status.c 		ieee80211_add_pending_skb(local, skb);
skb               167 net/mac80211/status.c 	ieee80211_free_txskb(&local->hw, skb);
skb               182 net/mac80211/status.c static void ieee80211_frame_acked(struct sta_info *sta, struct sk_buff *skb)
skb               184 net/mac80211/status.c 	struct ieee80211_mgmt *mgmt = (void *) skb->data;
skb               187 net/mac80211/status.c 	struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb);
skb               201 net/mac80211/status.c 		struct ieee80211_hdr *hdr = (void *) skb->data;
skb               303 net/mac80211/status.c 				 struct sk_buff *skb, int retry_count,
skb               307 net/mac80211/status.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb               308 net/mac80211/status.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
skb               314 net/mac80211/status.c 	rthdr = skb_push(skb, rtap_len);
skb               571 net/mac80211/status.c 					struct sk_buff *skb, u32 flags)
skb               581 net/mac80211/status.c 	if ((skb == orig_teardown_skb) && teardown_skb) {
skb               599 net/mac80211/status.c 			ieee80211_subif_start_xmit(teardown_skb, skb->dev);
skb               605 net/mac80211/status.c ieee80211_sdata_from_skb(struct ieee80211_local *local, struct sk_buff *skb)
skb               609 net/mac80211/status.c 	if (skb->dev) {
skb               614 net/mac80211/status.c 			if (skb->dev == sdata->dev)
skb               628 net/mac80211/status.c 	struct sk_buff *skb;
skb               632 net/mac80211/status.c 	skb = idr_remove(&local->ack_status_frames, info->ack_frame_id);
skb               635 net/mac80211/status.c 	if (!skb)
skb               639 net/mac80211/status.c 		u64 cookie = IEEE80211_SKB_CB(skb)->ack.cookie;
skb               641 net/mac80211/status.c 		struct ieee80211_hdr *hdr = (void *)skb->data;
skb               644 net/mac80211/status.c 		sdata = ieee80211_sdata_from_skb(local, skb);
skb               654 net/mac80211/status.c 							skb->data, skb->len,
skb               659 net/mac80211/status.c 		dev_kfree_skb_any(skb);
skb               661 net/mac80211/status.c 		dev_kfree_skb_any(skb);
skb               664 net/mac80211/status.c 		skb_complete_wifi_ack(skb, acked);
skb               669 net/mac80211/status.c 				      struct sk_buff *skb, bool dropped)
skb               671 net/mac80211/status.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb               672 net/mac80211/status.c 	struct ieee80211_hdr *hdr = (void *)skb->data;
skb               683 net/mac80211/status.c 		sdata = ieee80211_sdata_from_skb(local, skb);
skb               686 net/mac80211/status.c 			skb->dev = NULL;
skb               693 net/mac80211/status.c 			    (ieee80211_get_tdls_action(skb, hdr_size) ==
skb               695 net/mac80211/status.c 				ieee80211_tdls_td_tx_handle(local, sdata, skb,
skb               708 net/mac80211/status.c 	if (!dropped && skb->destructor) {
skb               709 net/mac80211/status.c 		skb->wifi_acked_valid = 1;
skb               710 net/mac80211/status.c 		skb->wifi_acked = acked;
skb               715 net/mac80211/status.c 	if (skb_has_frag_list(skb)) {
skb               716 net/mac80211/status.c 		kfree_skb_list(skb_shinfo(skb)->frag_list);
skb               717 net/mac80211/status.c 		skb_shinfo(skb)->frag_list = NULL;
skb               804 net/mac80211/status.c void ieee80211_tx_monitor(struct ieee80211_local *local, struct sk_buff *skb,
skb               810 net/mac80211/status.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb               817 net/mac80211/status.c 	if (WARN_ON_ONCE(skb_headroom(skb) < rtap_len)) {
skb               819 net/mac80211/status.c 		dev_kfree_skb(skb);
skb               822 net/mac80211/status.c 	ieee80211_add_tx_radiotap_header(local, sband, skb, retry_count,
skb               826 net/mac80211/status.c 	skb_reset_mac_header(skb);
skb               827 net/mac80211/status.c 	skb->ip_summed = CHECKSUM_UNNECESSARY;
skb               828 net/mac80211/status.c 	skb->pkt_type = PACKET_OTHERHOST;
skb               829 net/mac80211/status.c 	skb->protocol = htons(ETH_P_802_2);
skb               830 net/mac80211/status.c 	memset(skb->cb, 0, sizeof(skb->cb));
skb               843 net/mac80211/status.c 				skb2 = skb_clone(skb, GFP_ATOMIC);
skb               854 net/mac80211/status.c 		skb->dev = prev_dev;
skb               855 net/mac80211/status.c 		netif_rx(skb);
skb               856 net/mac80211/status.c 		skb = NULL;
skb               859 net/mac80211/status.c 	dev_kfree_skb(skb);
skb               865 net/mac80211/status.c 	struct sk_buff *skb = status->skb;
skb               866 net/mac80211/status.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
skb               905 net/mac80211/status.c 			ieee80211_handle_filtered_frame(local, sta, skb);
skb               940 net/mac80211/status.c 			bar = (struct ieee80211_bar *) skb->data;
skb               954 net/mac80211/status.c 			ieee80211_handle_filtered_frame(local, sta, skb);
skb               975 net/mac80211/status.c 			ieee80211_frame_acked(sta, skb);
skb               979 net/mac80211/status.c 			ieee80211_sta_tx_notify(sta->sdata, (void *) skb->data,
skb              1045 net/mac80211/status.c 	ieee80211_report_used_skb(local, skb, false);
skb              1048 net/mac80211/status.c 	skb_orphan(skb);
skb              1059 net/mac80211/status.c 		dev_kfree_skb(skb);
skb              1064 net/mac80211/status.c 	ieee80211_tx_monitor(local, skb, sband, retry_count, shift,
skb              1068 net/mac80211/status.c void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
skb              1070 net/mac80211/status.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
skb              1073 net/mac80211/status.c 		.skb = skb,
skb              1074 net/mac80211/status.c 		.info = IEEE80211_SKB_CB(skb),
skb              1105 net/mac80211/status.c 	if (status->skb)
skb              1188 net/mac80211/status.c void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb)
skb              1192 net/mac80211/status.c 	ieee80211_report_used_skb(local, skb, true);
skb              1193 net/mac80211/status.c 	dev_kfree_skb_any(skb);
skb              1200 net/mac80211/status.c 	struct sk_buff *skb;
skb              1202 net/mac80211/status.c 	while ((skb = __skb_dequeue(skbs)))
skb              1203 net/mac80211/status.c 		ieee80211_free_txskb(hw, skb);
skb                43 net/mac80211/tdls.c 					 struct sk_buff *skb)
skb                55 net/mac80211/tdls.c 	u8 *pos = skb_put(skb, 10);
skb                72 net/mac80211/tdls.c 			   struct sk_buff *skb, u16 start, u16 end,
skb               106 net/mac80211/tdls.c 			u8 *pos = skb_put(skb, 2);
skb               117 net/mac80211/tdls.c 		u8 *pos = skb_put(skb, 2);
skb               129 net/mac80211/tdls.c 				 struct sk_buff *skb)
skb               136 net/mac80211/tdls.c 	u8 *pos = skb_put(skb, 2);
skb               146 net/mac80211/tdls.c 	subband_cnt = ieee80211_tdls_add_subband(sdata, skb, 2412, 2472, 5);
skb               149 net/mac80211/tdls.c 	subband_cnt += ieee80211_tdls_add_subband(sdata, skb, 5000, 5825, 20);
skb               156 net/mac80211/tdls.c 					    struct sk_buff *skb)
skb               165 net/mac80211/tdls.c 	pos = skb_put(skb, 4);
skb               173 net/mac80211/tdls.c static void ieee80211_tdls_add_bss_coex_ie(struct sk_buff *skb)
skb               175 net/mac80211/tdls.c 	u8 *pos = skb_put(skb, 3);
skb               202 net/mac80211/tdls.c 				       struct sk_buff *skb, const u8 *peer,
skb               216 net/mac80211/tdls.c 	lnkid = skb_put(skb, sizeof(struct ieee80211_tdls_lnkie));
skb               227 net/mac80211/tdls.c ieee80211_tdls_add_aid(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
skb               230 net/mac80211/tdls.c 	u8 *pos = skb_put(skb, 4);
skb               273 net/mac80211/tdls.c 					    struct sk_buff *skb)
skb               279 net/mac80211/tdls.c 	wmm = skb_put_zero(skb, sizeof(*wmm));
skb               363 net/mac80211/tdls.c 				   struct sk_buff *skb, const u8 *peer,
skb               379 net/mac80211/tdls.c 	ieee80211_add_srates_ie(sdata, skb, false, sband->band);
skb               380 net/mac80211/tdls.c 	ieee80211_add_ext_srates_ie(sdata, skb, false, sband->band);
skb               381 net/mac80211/tdls.c 	ieee80211_tdls_add_supp_channels(sdata, skb);
skb               396 net/mac80211/tdls.c 		skb_put_data(skb, extra_ies + offset, noffset - offset);
skb               400 net/mac80211/tdls.c 	ieee80211_tdls_add_ext_capab(sdata, skb);
skb               405 net/mac80211/tdls.c 		ieee80211_add_wmm_info_ie(skb_put(skb, 9), 0); /* no U-APSD */
skb               425 net/mac80211/tdls.c 		skb_put_data(skb, extra_ies + offset, noffset - offset);
skb               442 net/mac80211/tdls.c 	ieee80211_tdls_add_oper_classes(sdata, skb);
skb               460 net/mac80211/tdls.c 		pos = skb_put(skb, sizeof(struct ieee80211_ht_cap) + 2);
skb               467 net/mac80211/tdls.c 		pos = skb_put(skb, sizeof(struct ieee80211_ht_cap) + 2);
skb               473 net/mac80211/tdls.c 		ieee80211_tdls_add_bss_coex_ie(skb);
skb               475 net/mac80211/tdls.c 	ieee80211_tdls_add_link_ie(sdata, skb, peer, initiator);
skb               496 net/mac80211/tdls.c 		skb_put_data(skb, extra_ies + offset, noffset - offset);
skb               509 net/mac80211/tdls.c 			ieee80211_tdls_add_aid(sdata, skb);
skb               511 net/mac80211/tdls.c 		pos = skb_put(skb, sizeof(struct ieee80211_vht_cap) + 2);
skb               519 net/mac80211/tdls.c 		ieee80211_tdls_add_aid(sdata, skb);
skb               521 net/mac80211/tdls.c 		pos = skb_put(skb, sizeof(struct ieee80211_vht_cap) + 2);
skb               537 net/mac80211/tdls.c 		skb_put_data(skb, extra_ies + offset, noffset - offset);
skb               544 net/mac80211/tdls.c 				 struct sk_buff *skb, const u8 *peer,
skb               579 net/mac80211/tdls.c 		skb_put_data(skb, extra_ies + offset, noffset - offset);
skb               585 net/mac80211/tdls.c 		ieee80211_tdls_add_wmm_param_ie(sdata, skb);
skb               599 net/mac80211/tdls.c 		skb_put_data(skb, extra_ies + offset, noffset - offset);
skb               612 net/mac80211/tdls.c 		pos = skb_put(skb, 2 + sizeof(struct ieee80211_ht_operation));
skb               618 net/mac80211/tdls.c 	ieee80211_tdls_add_link_ie(sdata, skb, peer, initiator);
skb               630 net/mac80211/tdls.c 		pos = skb_put(skb, 2 + sizeof(struct ieee80211_vht_operation));
skb               640 net/mac80211/tdls.c 		skb_put_data(skb, extra_ies + offset, noffset - offset);
skb               646 net/mac80211/tdls.c 				       struct sk_buff *skb, const u8 *peer,
skb               657 net/mac80211/tdls.c 	tf = (void *)skb->data;
skb               670 net/mac80211/tdls.c 		skb_put_data(skb, extra_ies + offset, noffset - offset);
skb               674 net/mac80211/tdls.c 	ieee80211_tdls_add_link_ie(sdata, skb, peer, initiator);
skb               679 net/mac80211/tdls.c 		skb_put_data(skb, extra_ies + offset, noffset - offset);
skb               685 net/mac80211/tdls.c 					struct sk_buff *skb, const u8 *peer,
skb               691 net/mac80211/tdls.c 		ieee80211_tdls_add_link_ie(sdata, skb, peer, initiator);
skb               694 net/mac80211/tdls.c 		skb_put_data(skb, extra_ies, extra_ies_len);
skb               698 net/mac80211/tdls.c 				   struct sk_buff *skb, const u8 *peer,
skb               709 net/mac80211/tdls.c 			ieee80211_tdls_add_setup_start_ies(sdata, skb, peer,
skb               717 net/mac80211/tdls.c 			ieee80211_tdls_add_setup_cfm_ies(sdata, skb, peer,
skb               724 net/mac80211/tdls.c 			skb_put_data(skb, extra_ies, extra_ies_len);
skb               726 net/mac80211/tdls.c 			ieee80211_tdls_add_link_ie(sdata, skb, peer, initiator);
skb               729 net/mac80211/tdls.c 		ieee80211_tdls_add_chan_switch_req_ies(sdata, skb, peer,
skb               735 net/mac80211/tdls.c 		ieee80211_tdls_add_chan_switch_resp_ies(sdata, skb, peer,
skb               747 net/mac80211/tdls.c 			       u16 status_code, struct sk_buff *skb)
skb               752 net/mac80211/tdls.c 	tf = skb_put(skb, offsetof(struct ieee80211_tdls_data, u));
skb               760 net/mac80211/tdls.c 	skb_set_network_header(skb, ETH_HLEN);
skb               767 net/mac80211/tdls.c 		skb_put(skb, sizeof(tf->u.setup_req));
skb               777 net/mac80211/tdls.c 		skb_put(skb, sizeof(tf->u.setup_resp));
skb               788 net/mac80211/tdls.c 		skb_put(skb, sizeof(tf->u.setup_cfm));
skb               796 net/mac80211/tdls.c 		skb_put(skb, sizeof(tf->u.teardown));
skb               803 net/mac80211/tdls.c 		skb_put(skb, sizeof(tf->u.discover_req));
skb               810 net/mac80211/tdls.c 		skb_put(skb, sizeof(tf->u.chan_switch_req));
skb               816 net/mac80211/tdls.c 		skb_put(skb, sizeof(tf->u.chan_switch_resp));
skb               829 net/mac80211/tdls.c 			   u16 status_code, struct sk_buff *skb)
skb               834 net/mac80211/tdls.c 	mgmt = skb_put_zero(skb, 24);
skb               844 net/mac80211/tdls.c 		skb_put(skb, 1 + sizeof(mgmt->u.action.u.tdls_discover_resp));
skb               870 net/mac80211/tdls.c 	struct sk_buff *skb;
skb               873 net/mac80211/tdls.c 	skb = netdev_alloc_skb(sdata->dev,
skb               890 net/mac80211/tdls.c 	if (!skb)
skb               893 net/mac80211/tdls.c 	skb_reserve(skb, local->hw.extra_tx_headroom);
skb               906 net/mac80211/tdls.c 						     status_code, skb);
skb               912 net/mac80211/tdls.c 						 skb);
skb               922 net/mac80211/tdls.c 	ieee80211_tdls_add_ies(sdata, skb, peer, action_code, status_code,
skb               925 net/mac80211/tdls.c 	return skb;
skb               928 net/mac80211/tdls.c 	dev_kfree_skb(skb);
skb               941 net/mac80211/tdls.c 	struct sk_buff *skb = NULL;
skb               992 net/mac80211/tdls.c 	skb = ieee80211_tdls_build_mgmt_packet_data(sdata, peer, action_code,
skb               997 net/mac80211/tdls.c 	if (!skb) {
skb              1003 net/mac80211/tdls.c 		ieee80211_tx_skb(sdata, skb);
skb              1014 net/mac80211/tdls.c 		skb->priority = 256 + 2;
skb              1017 net/mac80211/tdls.c 		skb->priority = 256 + 5;
skb              1020 net/mac80211/tdls.c 	skb_set_queue_mapping(skb, ieee80211_select_queue(sdata, skb));
skb              1050 net/mac80211/tdls.c 			sdata->u.mgd.teardown_skb = skb_copy(skb, GFP_ATOMIC);
skb              1051 net/mac80211/tdls.c 			sdata->u.mgd.orig_teardown_skb = skb;
skb              1058 net/mac80211/tdls.c 	__ieee80211_subif_start_xmit(skb, dev, flags, 0);
skb              1064 net/mac80211/tdls.c 	dev_kfree_skb(skb);
skb              1472 net/mac80211/tdls.c static const u8 *ieee80211_tdls_find_sw_timing_ie(struct sk_buff *skb)
skb              1482 net/mac80211/tdls.c 	tf = container_of(skb->data + skb_network_offset(skb),
skb              1486 net/mac80211/tdls.c 				skb->len - (ie_start - skb->data));
skb              1499 net/mac80211/tdls.c 	struct sk_buff *skb;
skb              1526 net/mac80211/tdls.c 	skb = ieee80211_tdls_build_mgmt_packet_data(sdata, sta->sta.addr,
skb              1531 net/mac80211/tdls.c 	if (!skb)
skb              1534 net/mac80211/tdls.c 	skb = ieee80211_build_data_template(sdata, skb, 0);
skb              1535 net/mac80211/tdls.c 	if (IS_ERR(skb)) {
skb              1541 net/mac80211/tdls.c 		const u8 *tm_ie = ieee80211_tdls_find_sw_timing_ie(skb);
skb              1545 net/mac80211/tdls.c 			dev_kfree_skb_any(skb);
skb              1549 net/mac80211/tdls.c 		*ch_sw_tm_ie_offset = tm_ie - skb->data;
skb              1555 net/mac80211/tdls.c 	return skb;
skb              1566 net/mac80211/tdls.c 	struct sk_buff *skb = NULL;
skb              1587 net/mac80211/tdls.c 	skb = ieee80211_tdls_ch_sw_tmpl_get(sta, oper_class, chandef,
skb              1589 net/mac80211/tdls.c 	if (!skb) {
skb              1595 net/mac80211/tdls.c 				      chandef, skb, ch_sw_tm_ie);
skb              1601 net/mac80211/tdls.c 	dev_kfree_skb_any(skb);
skb              1641 net/mac80211/tdls.c 	struct sk_buff *skb;
skb              1647 net/mac80211/tdls.c 	skb = ieee80211_tdls_build_mgmt_packet_data(sdata, sta->sta.addr,
skb              1651 net/mac80211/tdls.c 	if (!skb)
skb              1654 net/mac80211/tdls.c 	skb = ieee80211_build_data_template(sdata, skb, 0);
skb              1655 net/mac80211/tdls.c 	if (IS_ERR(skb)) {
skb              1662 net/mac80211/tdls.c 		const u8 *tm_ie = ieee80211_tdls_find_sw_timing_ie(skb);
skb              1667 net/mac80211/tdls.c 			dev_kfree_skb_any(skb);
skb              1671 net/mac80211/tdls.c 		*ch_sw_tm_ie_offset = tm_ie - skb->data;
skb              1676 net/mac80211/tdls.c 	return skb;
skb              1681 net/mac80211/tdls.c 					   struct sk_buff *skb)
skb              1686 net/mac80211/tdls.c 	struct ieee80211_tdls_data *tf = (void *)skb->data;
skb              1688 net/mac80211/tdls.c 	struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
skb              1696 net/mac80211/tdls.c 	if (skb->len < baselen) {
skb              1698 net/mac80211/tdls.c 			 skb->len);
skb              1719 net/mac80211/tdls.c 			       skb->len - baselen, false, &elems,
skb              1768 net/mac80211/tdls.c 					  struct sk_buff *skb)
skb              1780 net/mac80211/tdls.c 	struct ieee80211_tdls_data *tf = (void *)skb->data;
skb              1781 net/mac80211/tdls.c 	struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
skb              1789 net/mac80211/tdls.c 	if (skb->len < baselen) {
skb              1791 net/mac80211/tdls.c 			 skb->len);
skb              1832 net/mac80211/tdls.c 			       skb->len - baselen, false, &elems, NULL, NULL);
skb              1922 net/mac80211/tdls.c 				      struct sk_buff *skb)
skb              1924 net/mac80211/tdls.c 	struct ieee80211_tdls_data *tf = (void *)skb->data;
skb              1934 net/mac80211/tdls.c 	if (skb_linearize(skb))
skb              1942 net/mac80211/tdls.c 		ieee80211_process_tdls_channel_switch_req(sdata, skb);
skb              1945 net/mac80211/tdls.c 		ieee80211_process_tdls_channel_switch_resp(sdata, skb);
skb              1976 net/mac80211/tdls.c 	struct sk_buff *skb;
skb              1980 net/mac80211/tdls.c 	while ((skb = skb_dequeue(&local->skb_queue_tdls_chsw))) {
skb              1981 net/mac80211/tdls.c 		tf = (struct ieee80211_tdls_data *)skb->data;
skb              1988 net/mac80211/tdls.c 			ieee80211_process_tdls_channel_switch(sdata, skb);
skb              1992 net/mac80211/tdls.c 		kfree_skb(skb);
skb               197 net/mac80211/tkip.c 			    struct sk_buff *skb, u8 *p2k)
skb               203 net/mac80211/tkip.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
skb               224 net/mac80211/tkip.c 				struct sk_buff *skb,
skb               229 net/mac80211/tkip.c 	ieee80211_get_tkip_p2k(&key->conf, skb, rc4key);
skb                15 net/mac80211/tkip.h 				struct sk_buff *skb,
skb                52 net/mac80211/tx.c 				 struct sk_buff *skb, int group_addr,
skb                60 net/mac80211/tx.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb               102 net/mac80211/tx.c 	hdr = (struct ieee80211_hdr *)skb->data;
skb               214 net/mac80211/tx.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
skb               263 net/mac80211/tx.c 	    skb_get_queue_mapping(tx->skb) == IEEE80211_AC_VO)
skb               290 net/mac80211/tx.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
skb               291 net/mac80211/tx.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
skb               356 net/mac80211/tx.c 	struct sk_buff *skb;
skb               370 net/mac80211/tx.c 		skb = skb_dequeue(&ps->bc_buf);
skb               371 net/mac80211/tx.c 		if (skb) {
skb               373 net/mac80211/tx.c 			ieee80211_free_txskb(&local->hw, skb);
skb               386 net/mac80211/tx.c 			skb = skb_dequeue(&sta->ps_tx_buf[ac]);
skb               388 net/mac80211/tx.c 			if (skb) {
skb               390 net/mac80211/tx.c 				ieee80211_free_txskb(&local->hw, skb);
skb               403 net/mac80211/tx.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
skb               404 net/mac80211/tx.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
skb               460 net/mac80211/tx.c 	skb_queue_tail(&ps->bc_buf, tx->skb);
skb               466 net/mac80211/tx.c 			     struct sk_buff *skb)
skb               474 net/mac80211/tx.c 	if (!ieee80211_is_robust_mgmt_frame(skb))
skb               484 net/mac80211/tx.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
skb               485 net/mac80211/tx.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
skb               495 net/mac80211/tx.c 		int ac = skb_get_queue_mapping(tx->skb);
skb               535 net/mac80211/tx.c 		skb_queue_tail(&sta->ps_tx_buf[ac], tx->skb);
skb               574 net/mac80211/tx.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
skb               576 net/mac80211/tx.c 	if (unlikely(tx->sdata->control_port_protocol == tx->skb->protocol)) {
skb               590 net/mac80211/tx.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
skb               591 net/mac80211/tx.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
skb               601 net/mac80211/tx.c 	else if (ieee80211_is_group_privacy_action(tx->skb) &&
skb               606 net/mac80211/tx.c 		 ieee80211_is_robust_mgmt_frame(tx->skb) &&
skb               636 net/mac80211/tx.c 					       tx->skb) &&
skb               637 net/mac80211/tx.c 			    !ieee80211_is_group_privacy_action(tx->skb))
skb               671 net/mac80211/tx.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
skb               672 net/mac80211/tx.c 	struct ieee80211_hdr *hdr = (void *)tx->skb->data;
skb               683 net/mac80211/tx.c 	len = min_t(u32, tx->skb->len + FCS_LEN,
skb               690 net/mac80211/tx.c 	txrc.skb = tx->skb;
skb               806 net/mac80211/tx.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
skb               807 net/mac80211/tx.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
skb               865 net/mac80211/tx.c 			      struct sk_buff *skb, int hdrlen,
skb               873 net/mac80211/tx.c 	int rem = skb->len - hdrlen - per_fragm;
skb               899 net/mac80211/tx.c 		memcpy(tmp->cb, skb->cb, sizeof(tmp->cb));
skb               908 net/mac80211/tx.c 		skb_copy_queue_mapping(tmp, skb);
skb               909 net/mac80211/tx.c 		tmp->priority = skb->priority;
skb               910 net/mac80211/tx.c 		tmp->dev = skb->dev;
skb               913 net/mac80211/tx.c 		skb_put_data(tmp, skb->data, hdrlen);
skb               914 net/mac80211/tx.c 		skb_put_data(tmp, skb->data + pos, fraglen);
skb               920 net/mac80211/tx.c 	skb_trim(skb, hdrlen + per_fragm);
skb               927 net/mac80211/tx.c 	struct sk_buff *skb = tx->skb;
skb               928 net/mac80211/tx.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb               929 net/mac80211/tx.c 	struct ieee80211_hdr *hdr = (void *)skb->data;
skb               935 net/mac80211/tx.c 	__skb_queue_tail(&tx->skbs, skb);
skb               936 net/mac80211/tx.c 	tx->skb = NULL;
skb               955 net/mac80211/tx.c 	if (WARN_ON(skb->len + FCS_LEN <= frag_threshold))
skb               966 net/mac80211/tx.c 	if (ieee80211_fragment(tx, skb, hdrlen, frag_threshold))
skb               972 net/mac80211/tx.c 	skb_queue_walk(&tx->skbs, skb) {
skb               975 net/mac80211/tx.c 		hdr = (void *)skb->data;
skb               976 net/mac80211/tx.c 		info = IEEE80211_SKB_CB(skb);
skb               978 net/mac80211/tx.c 		if (!skb_queue_is_last(&tx->skbs, skb)) {
skb              1002 net/mac80211/tx.c 	struct sk_buff *skb;
skb              1008 net/mac80211/tx.c 	skb_queue_walk(&tx->skbs, skb) {
skb              1009 net/mac80211/tx.c 		ac = skb_get_queue_mapping(skb);
skb              1010 net/mac80211/tx.c 		tx->sta->tx_stats.bytes[ac] += skb->len;
skb              1056 net/mac80211/tx.c 	struct sk_buff *skb;
skb              1061 net/mac80211/tx.c 	skb_queue_walk(&tx->skbs, skb) {
skb              1062 net/mac80211/tx.c 		hdr = (void *) skb->data;
skb              1065 net/mac80211/tx.c 		if (!skb_queue_is_last(&tx->skbs, skb)) {
skb              1066 net/mac80211/tx.c 			struct sk_buff *next = skb_queue_next(&tx->skbs, skb);
skb              1073 net/mac80211/tx.c 			ieee80211_duration(tx, skb, group_addr, next_len);
skb              1082 net/mac80211/tx.c 				  struct sk_buff *skb,
skb              1137 net/mac80211/tx.c 			__skb_queue_tail(&tid_tx->pending, skb);
skb              1162 net/mac80211/tx.c 		     struct sta_info *sta, struct sk_buff *skb)
skb              1166 net/mac80211/tx.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb              1170 net/mac80211/tx.c 	tx->skb = skb;
skb              1182 net/mac80211/tx.c 	hdr = (struct ieee80211_hdr *) skb->data;
skb              1194 net/mac80211/tx.c 			   tx->sdata->control_port_protocol == tx->skb->protocol) {
skb              1213 net/mac80211/tx.c 			queued = ieee80211_tx_prep_agg(tx, skb, info,
skb              1229 net/mac80211/tx.c 		    skb->len + FCS_LEN <= local->hw.wiphy->frag_threshold ||
skb              1249 net/mac80211/tx.c 					  struct sk_buff *skb)
skb              1251 net/mac80211/tx.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
skb              1252 net/mac80211/tx.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb              1271 net/mac80211/tx.c 		u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
skb              1287 net/mac80211/tx.c static void ieee80211_set_skb_enqueue_time(struct sk_buff *skb)
skb              1289 net/mac80211/tx.c 	IEEE80211_SKB_CB(skb)->control.enqueue_time = codel_get_time();
skb              1292 net/mac80211/tx.c static u32 codel_skb_len_func(const struct sk_buff *skb)
skb              1294 net/mac80211/tx.c 	return skb->len;
skb              1297 net/mac80211/tx.c static codel_time_t codel_skb_time_func(const struct sk_buff *skb)
skb              1301 net/mac80211/tx.c 	info = (const struct ieee80211_tx_info *)skb->cb;
skb              1325 net/mac80211/tx.c static void codel_drop_func(struct sk_buff *skb,
skb              1336 net/mac80211/tx.c 	ieee80211_free_txskb(hw, skb);
skb              1380 net/mac80211/tx.c 			     struct sk_buff *skb)
skb              1385 net/mac80211/tx.c 	ieee80211_free_txskb(&local->hw, skb);
skb              1391 net/mac80211/tx.c 						struct sk_buff *skb)
skb              1401 net/mac80211/tx.c 				  struct sk_buff *skb)
skb              1405 net/mac80211/tx.c 	u32 flow_idx = fq_flow_idx(fq, skb);
skb              1407 net/mac80211/tx.c 	ieee80211_set_skb_enqueue_time(skb);
skb              1410 net/mac80211/tx.c 	fq_tin_enqueue(fq, tin, flow_idx, skb,
skb              1417 net/mac80211/tx.c 				struct fq_flow *flow, struct sk_buff *skb,
skb              1420 net/mac80211/tx.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb              1598 net/mac80211/tx.c 				struct sk_buff *skb)
skb              1612 net/mac80211/tx.c 	txqi = ieee80211_get_txq(local, vif, sta, skb);
skb              1617 net/mac80211/tx.c 	ieee80211_txq_enqueue(local, txqi, skb);
skb              1631 net/mac80211/tx.c 	struct sk_buff *skb, *tmp;
skb              1634 net/mac80211/tx.c 	skb_queue_walk_safe(skbs, skb, tmp) {
skb              1635 net/mac80211/tx.c 		struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb              1640 net/mac80211/tx.c 			__skb_unlink(skb, skbs);
skb              1641 net/mac80211/tx.c 			ieee80211_free_txskb(&local->hw, skb);
skb              1690 net/mac80211/tx.c 		__skb_unlink(skb, skbs);
skb              1691 net/mac80211/tx.c 		drv_tx(local, &control, skb);
skb              1708 net/mac80211/tx.c 	struct sk_buff *skb;
skb              1715 net/mac80211/tx.c 	skb = skb_peek(skbs);
skb              1716 net/mac80211/tx.c 	fc = ((struct ieee80211_hdr *)skb->data)->frame_control;
skb              1717 net/mac80211/tx.c 	info = IEEE80211_SKB_CB(skb);
skb              1737 net/mac80211/tx.c 				vif->hw_queue[skb_get_queue_mapping(skb)];
skb              1793 net/mac80211/tx.c 		if (tx->skb)
skb              1794 net/mac80211/tx.c 			ieee80211_free_txskb(&tx->local->hw, tx->skb);
skb              1812 net/mac80211/tx.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
skb              1816 net/mac80211/tx.c 		__skb_queue_tail(&tx->skbs, tx->skb);
skb              1817 net/mac80211/tx.c 		tx->skb = NULL;
skb              1834 net/mac80211/tx.c 		if (tx->skb)
skb              1835 net/mac80211/tx.c 			ieee80211_free_txskb(&tx->local->hw, tx->skb);
skb              1857 net/mac80211/tx.c 			      struct ieee80211_vif *vif, struct sk_buff *skb,
skb              1861 net/mac80211/tx.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb              1865 net/mac80211/tx.c 	if (ieee80211_tx_prepare(sdata, &tx, NULL, skb) == TX_DROP)
skb              1870 net/mac80211/tx.c 	info->hw_queue = vif->hw_queue[skb_get_queue_mapping(skb)];
skb              1884 net/mac80211/tx.c 	if (WARN_ON(skb2 != skb || !skb_queue_empty(&tx.skbs))) {
skb              1898 net/mac80211/tx.c 			 struct sta_info *sta, struct sk_buff *skb,
skb              1904 net/mac80211/tx.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb              1908 net/mac80211/tx.c 	if (unlikely(skb->len < 10)) {
skb              1909 net/mac80211/tx.c 		dev_kfree_skb(skb);
skb              1914 net/mac80211/tx.c 	led_len = skb->len;
skb              1915 net/mac80211/tx.c 	res_prepare = ieee80211_tx_prepare(sdata, &tx, sta, skb);
skb              1920 net/mac80211/tx.c 		ieee80211_free_txskb(&local->hw, skb);
skb              1930 net/mac80211/tx.c 			sdata->vif.hw_queue[skb_get_queue_mapping(skb)];
skb              1935 net/mac80211/tx.c 	if (ieee80211_queue_skb(local, sdata, tx.sta, tx.skb))
skb              1948 net/mac80211/tx.c 				struct sk_buff *skb,
skb              1956 net/mac80211/tx.c 	hdr = (struct ieee80211_hdr *) skb->data;
skb              1963 net/mac80211/tx.c 		tail_need -= skb_tailroom(skb);
skb              1967 net/mac80211/tx.c 	if (skb_cloned(skb) &&
skb              1969 net/mac80211/tx.c 	     !skb_clone_writable(skb, ETH_HLEN) || enc_tailroom))
skb              1976 net/mac80211/tx.c 	if (pskb_expand_head(skb, head_need, tail_need, GFP_ATOMIC)) {
skb              1986 net/mac80211/tx.c 		    struct sta_info *sta, struct sk_buff *skb,
skb              1990 net/mac80211/tx.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb              2000 net/mac80211/tx.c 	headroom -= skb_headroom(skb);
skb              2003 net/mac80211/tx.c 	if (ieee80211_skb_resize(sdata, skb, headroom, may_encrypt)) {
skb              2004 net/mac80211/tx.c 		ieee80211_free_txskb(&local->hw, skb);
skb              2008 net/mac80211/tx.c 	hdr = (struct ieee80211_hdr *) skb->data;
skb              2014 net/mac80211/tx.c 			if (mesh_nexthop_resolve(sdata, skb))
skb              2021 net/mac80211/tx.c 	ieee80211_set_qos_hdr(sdata, skb);
skb              2022 net/mac80211/tx.c 	ieee80211_tx(sdata, sta, skb, false, txdata_flags);
skb              2026 net/mac80211/tx.c 					struct sk_buff *skb)
skb              2030 net/mac80211/tx.c 		(struct ieee80211_radiotap_header *) skb->data;
skb              2031 net/mac80211/tx.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb              2034 net/mac80211/tx.c 	int ret = ieee80211_radiotap_iterator_init(&iterator, rthdr, skb->len,
skb              2078 net/mac80211/tx.c 				if (skb->len < (iterator._max_length + FCS_LEN))
skb              2081 net/mac80211/tx.c 				skb_trim(skb, skb->len - FCS_LEN);
skb              2202 net/mac80211/tx.c 	skb_pull(skb, iterator._max_length);
skb              2207 net/mac80211/tx.c netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
skb              2213 net/mac80211/tx.c 		(struct ieee80211_radiotap_header *)skb->data;
skb              2214 net/mac80211/tx.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb              2222 net/mac80211/tx.c 	if (unlikely(skb->len < sizeof(struct ieee80211_radiotap_header)))
skb              2230 net/mac80211/tx.c 	len_rthdr = ieee80211_get_radiotap_len(skb->data);
skb              2233 net/mac80211/tx.c 	if (unlikely(skb->len < len_rthdr))
skb              2242 net/mac80211/tx.c 	skb_set_mac_header(skb, len_rthdr);
skb              2247 net/mac80211/tx.c 	skb_set_network_header(skb, len_rthdr);
skb              2248 net/mac80211/tx.c 	skb_set_transport_header(skb, len_rthdr);
skb              2250 net/mac80211/tx.c 	if (skb->len < len_rthdr + 2)
skb              2253 net/mac80211/tx.c 	hdr = (struct ieee80211_hdr *)(skb->data + len_rthdr);
skb              2256 net/mac80211/tx.c 	if (skb->len < len_rthdr + hdrlen)
skb              2264 net/mac80211/tx.c 	    skb->len >= len_rthdr + hdrlen + sizeof(rfc1042_header) + 2) {
skb              2268 net/mac80211/tx.c 			skb->protocol = cpu_to_be16((payload[6] << 8) |
skb              2278 net/mac80211/tx.c 		skb->priority = *p & IEEE80211_QOS_CTL_TAG1D_MASK;
skb              2349 net/mac80211/tx.c 	if (!ieee80211_parse_tx_radiotap(local, skb))
skb              2352 net/mac80211/tx.c 	ieee80211_xmit(sdata, NULL, skb, 0);
skb              2360 net/mac80211/tx.c 	dev_kfree_skb(skb);
skb              2364 net/mac80211/tx.c static inline bool ieee80211_is_tdls_setup(struct sk_buff *skb)
skb              2366 net/mac80211/tx.c 	u16 ethertype = (skb->data[12] << 8) | skb->data[13];
skb              2369 net/mac80211/tx.c 	       skb->len > 14 &&
skb              2370 net/mac80211/tx.c 	       skb->data[14] == WLAN_TDLS_SNAP_RFTYPE;
skb              2374 net/mac80211/tx.c 				   struct sk_buff *skb,
skb              2392 net/mac80211/tx.c 		if (is_multicast_ether_addr(skb->data)) {
skb              2396 net/mac80211/tx.c 		sta = sta_info_get_bss(sdata, skb->data);
skb              2409 net/mac80211/tx.c 			sta = sta_info_get(sdata, skb->data);
skb              2424 net/mac80211/tx.c 				if (!ieee80211_is_tdls_setup(skb))
skb              2462 net/mac80211/tx.c 					   struct sk_buff *skb, u32 info_flags,
skb              2494 net/mac80211/tx.c 	ethertype = (skb->data[12] << 8) | skb->data[13];
skb              2504 net/mac80211/tx.c 			memcpy(hdr.addr3, skb->data, ETH_ALEN);
skb              2505 net/mac80211/tx.c 			memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN);
skb              2530 net/mac80211/tx.c 		memcpy(hdr.addr1, skb->data, ETH_ALEN);
skb              2532 net/mac80211/tx.c 		memcpy(hdr.addr3, skb->data + ETH_ALEN, ETH_ALEN);
skb              2541 net/mac80211/tx.c 		memcpy(hdr.addr3, skb->data, ETH_ALEN);
skb              2542 net/mac80211/tx.c 		memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN);
skb              2552 net/mac80211/tx.c 		if (!is_multicast_ether_addr(skb->data)) {
skb              2556 net/mac80211/tx.c 			mpath = mesh_path_lookup(sdata, skb->data);
skb              2567 net/mac80211/tx.c 				mppath = mpp_path_lookup(sdata, skb->data);
skb              2582 net/mac80211/tx.c 		if (ether_addr_equal(sdata->vif.addr, skb->data + ETH_ALEN) &&
skb              2583 net/mac80211/tx.c 		    !(mppath && !ether_addr_equal(mppath->mpp, skb->data))) {
skb              2585 net/mac80211/tx.c 					skb->data, skb->data + ETH_ALEN);
skb              2595 net/mac80211/tx.c 			const u8 *mesh_da = skb->data;
skb              2608 net/mac80211/tx.c 						skb->data + ETH_ALEN, NULL);
skb              2612 net/mac80211/tx.c 						sdata, &mesh_hdr, skb->data,
skb              2613 net/mac80211/tx.c 						skb->data + ETH_ALEN);
skb              2628 net/mac80211/tx.c 			memcpy(hdr.addr1, skb->data, ETH_ALEN);
skb              2637 net/mac80211/tx.c 			memcpy(hdr.addr1, skb->data, ETH_ALEN);
skb              2638 net/mac80211/tx.c 			memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN);
skb              2648 net/mac80211/tx.c 			memcpy(hdr.addr3, skb->data, ETH_ALEN);
skb              2649 net/mac80211/tx.c 			memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN);
skb              2655 net/mac80211/tx.c 			memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN);
skb              2656 net/mac80211/tx.c 			memcpy(hdr.addr3, skb->data, ETH_ALEN);
skb              2668 net/mac80211/tx.c 		memcpy(hdr.addr1, skb->data, ETH_ALEN);
skb              2669 net/mac80211/tx.c 		memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN);
skb              2681 net/mac80211/tx.c 		memcpy(hdr.addr1, skb->data, ETH_ALEN);
skb              2682 net/mac80211/tx.c 		memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN);
skb              2722 net/mac80211/tx.c 		      !ether_addr_equal(sdata->vif.addr, skb->data + ETH_ALEN)))) {
skb              2734 net/mac80211/tx.c 	if (unlikely(!multicast && skb->sk &&
skb              2735 net/mac80211/tx.c 		     skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS)) {
skb              2736 net/mac80211/tx.c 		struct sk_buff *ack_skb = skb_clone_sk(skb);
skb              2759 net/mac80211/tx.c 	if (skb_shared(skb)) {
skb              2760 net/mac80211/tx.c 		struct sk_buff *tmp_skb = skb;
skb              2765 net/mac80211/tx.c 		skb = skb_clone(skb, GFP_ATOMIC);
skb              2768 net/mac80211/tx.c 		if (!skb) {
skb              2792 net/mac80211/tx.c 	skb_pull(skb, skip_header_bytes);
skb              2793 net/mac80211/tx.c 	head_need = hdrlen + encaps_len + meshhdrlen - skb_headroom(skb);
skb              2807 net/mac80211/tx.c 	if (head_need > 0 || skb_cloned(skb)) {
skb              2811 net/mac80211/tx.c 		if (ieee80211_skb_resize(sdata, skb, head_need, true)) {
skb              2812 net/mac80211/tx.c 			ieee80211_free_txskb(&local->hw, skb);
skb              2813 net/mac80211/tx.c 			skb = NULL;
skb              2819 net/mac80211/tx.c 		memcpy(skb_push(skb, encaps_len), encaps_data, encaps_len);
skb              2823 net/mac80211/tx.c 		memcpy(skb_push(skb, meshhdrlen), &mesh_hdr, meshhdrlen);
skb              2829 net/mac80211/tx.c 		qos_control = skb_push(skb, 2);
skb              2830 net/mac80211/tx.c 		memcpy(skb_push(skb, hdrlen - 2), &hdr, hdrlen - 2);
skb              2837 net/mac80211/tx.c 		memcpy(skb_push(skb, hdrlen), &hdr, hdrlen);
skb              2839 net/mac80211/tx.c 	skb_reset_mac_header(skb);
skb              2841 net/mac80211/tx.c 	info = IEEE80211_SKB_CB(skb);
skb              2849 net/mac80211/tx.c 	return skb;
skb              2851 net/mac80211/tx.c 	kfree_skb(skb);
skb              3146 net/mac80211/tx.c 					struct sk_buff *skb, int headroom)
skb              3148 net/mac80211/tx.c 	if (skb_headroom(skb) < headroom) {
skb              3151 net/mac80211/tx.c 		if (pskb_expand_head(skb, headroom, 0, GFP_ATOMIC)) {
skb              3163 net/mac80211/tx.c 					 struct sk_buff *skb)
skb              3166 net/mac80211/tx.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb              3170 net/mac80211/tx.c 	int subframe_len = skb->len - hdr_len;
skb              3181 net/mac80211/tx.c 	if (!ieee80211_amsdu_realloc_pad(local, skb, sizeof(*amsdu_hdr)))
skb              3184 net/mac80211/tx.c 	data = skb_push(skb, sizeof(*amsdu_hdr));
skb              3229 net/mac80211/tx.c 				      struct sk_buff *skb)
skb              3235 net/mac80211/tx.c 	u8 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
skb              3239 net/mac80211/tx.c 	int subframe_len = skb->len - ETH_ALEN;
skb              3255 net/mac80211/tx.c 	if (skb_is_gso(skb))
skb              3273 net/mac80211/tx.c 	flow_idx = fq_flow_idx(fq, skb);
skb              3282 net/mac80211/tx.c 	flow = fq_flow_classify(fq, tin, flow_idx, skb,
skb              3291 net/mac80211/tx.c 	if (skb->len + head->len > max_amsdu_len)
skb              3294 net/mac80211/tx.c 	nfrags = 1 + skb_shinfo(skb)->nr_frags;
skb              3309 net/mac80211/tx.c 	if (!drv_can_aggregate_in_amsdu(local, head, skb))
skb              3326 net/mac80211/tx.c 	if (!ieee80211_amsdu_realloc_pad(local, skb, sizeof(rfc1042_header) +
skb              3331 net/mac80211/tx.c 	data = skb_push(skb, ETH_ALEN + 2);
skb              3339 net/mac80211/tx.c 	memset(skb_push(skb, pad), 0, pad);
skb              3341 net/mac80211/tx.c 	head->len += skb->len;
skb              3342 net/mac80211/tx.c 	head->data_len += skb->len;
skb              3343 net/mac80211/tx.c 	*frag_tail = skb;
skb              3366 net/mac80211/tx.c 				       struct sk_buff *skb)
skb              3368 net/mac80211/tx.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb              3369 net/mac80211/tx.c 	struct ieee80211_hdr *hdr = (void *)skb->data;
skb              3375 net/mac80211/tx.c 	ieee80211_tx_stats(skb->dev, skb->len);
skb              3378 net/mac80211/tx.c 		tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
skb              3386 net/mac80211/tx.c 	if (skb_shinfo(skb)->gso_size)
skb              3388 net/mac80211/tx.c 			DIV_ROUND_UP(skb->len, skb_shinfo(skb)->gso_size);
skb              3392 net/mac80211/tx.c 	info->hw_queue = sdata->vif.hw_queue[skb_get_queue_mapping(skb)];
skb              3397 net/mac80211/tx.c 	sta->tx_stats.bytes[skb_get_queue_mapping(skb)] += skb->len;
skb              3398 net/mac80211/tx.c 	sta->tx_stats.packets[skb_get_queue_mapping(skb)]++;
skb              3402 net/mac80211/tx.c 		u8 *crypto_hdr = skb->data + pn_offs;
skb              3425 net/mac80211/tx.c 				struct sk_buff *skb)
skb              3428 net/mac80211/tx.c 	u16 ethertype = (skb->data[12] << 8) | skb->data[13];
skb              3448 net/mac80211/tx.c 	if (skb->sk && skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS)
skb              3452 net/mac80211/tx.c 		tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
skb              3464 net/mac80211/tx.c 	if (skb_shared(skb)) {
skb              3465 net/mac80211/tx.c 		struct sk_buff *tmp_skb = skb;
skb              3467 net/mac80211/tx.c 		skb = skb_clone(skb, GFP_ATOMIC);
skb              3470 net/mac80211/tx.c 		if (!skb)
skb              3475 net/mac80211/tx.c 	    ieee80211_amsdu_aggregate(sdata, sta, fast_tx, skb))
skb              3482 net/mac80211/tx.c 	if (unlikely(ieee80211_skb_resize(sdata, skb,
skb              3484 net/mac80211/tx.c 						     skb_headroom(skb), 0),
skb              3486 net/mac80211/tx.c 		kfree_skb(skb);
skb              3490 net/mac80211/tx.c 	memcpy(&eth, skb->data, ETH_HLEN - 2);
skb              3491 net/mac80211/tx.c 	hdr = skb_push(skb, extra_head);
skb              3492 net/mac80211/tx.c 	memcpy(skb->data, fast_tx->hdr, fast_tx->hdr_len);
skb              3493 net/mac80211/tx.c 	memcpy(skb->data + fast_tx->da_offs, eth.h_dest, ETH_ALEN);
skb              3494 net/mac80211/tx.c 	memcpy(skb->data + fast_tx->sa_offs, eth.h_source, ETH_ALEN);
skb              3496 net/mac80211/tx.c 	info = IEEE80211_SKB_CB(skb);
skb              3511 net/mac80211/tx.c 		tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
skb              3524 net/mac80211/tx.c 		tx.skb = skb;
skb              3526 net/mac80211/tx.c 		skb = tx.skb;
skb              3527 net/mac80211/tx.c 		tx.skb = NULL;
skb              3531 net/mac80211/tx.c 				kfree_skb(skb);
skb              3536 net/mac80211/tx.c 	if (ieee80211_queue_skb(local, sdata, sta, skb))
skb              3540 net/mac80211/tx.c 				   fast_tx->key, skb);
skb              3546 net/mac80211/tx.c 	__skb_queue_tail(&tx.skbs, skb);
skb              3557 net/mac80211/tx.c 	struct sk_buff *skb = NULL;
skb              3580 net/mac80211/tx.c 	skb = __skb_dequeue(&txqi->frags);
skb              3581 net/mac80211/tx.c 	if (skb)
skb              3584 net/mac80211/tx.c 	skb = fq_tin_dequeue(fq, tin, fq_tin_dequeue_func);
skb              3585 net/mac80211/tx.c 	if (!skb)
skb              3590 net/mac80211/tx.c 	hdr = (struct ieee80211_hdr *)skb->data;
skb              3591 net/mac80211/tx.c 	info = IEEE80211_SKB_CB(skb);
skb              3596 net/mac80211/tx.c 	tx.skb = skb;
skb              3615 net/mac80211/tx.c 			ieee80211_free_txskb(&local->hw, skb);
skb              3626 net/mac80211/tx.c 		ieee80211_free_txskb(&local->hw, skb);
skb              3645 net/mac80211/tx.c 					   tx.key, skb);
skb              3650 net/mac80211/tx.c 		skb = __skb_dequeue(&tx.skbs);
skb              3659 net/mac80211/tx.c 	if (skb_has_frag_list(skb) &&
skb              3661 net/mac80211/tx.c 		if (skb_linearize(skb)) {
skb              3662 net/mac80211/tx.c 			ieee80211_free_txskb(&local->hw, skb);
skb              3677 net/mac80211/tx.c 				vif->hw_queue[skb_get_queue_mapping(skb)];
skb              3679 net/mac80211/tx.c 			ieee80211_free_txskb(&local->hw, skb);
skb              3694 net/mac80211/tx.c 	IEEE80211_SKB_CB(skb)->control.vif = vif;
skb              3695 net/mac80211/tx.c 	return skb;
skb              3700 net/mac80211/tx.c 	return skb;
skb              3839 net/mac80211/tx.c void __ieee80211_subif_start_xmit(struct sk_buff *skb,
skb              3849 net/mac80211/tx.c 	if (unlikely(skb->len < ETH_HLEN)) {
skb              3850 net/mac80211/tx.c 		kfree_skb(skb);
skb              3856 net/mac80211/tx.c 	if (ieee80211_lookup_ra_sta(sdata, skb, &sta))
skb              3863 net/mac80211/tx.c 		u16 queue = __ieee80211_select_queue(sdata, sta, skb);
skb              3864 net/mac80211/tx.c 		skb_set_queue_mapping(skb, queue);
skb              3870 net/mac80211/tx.c 		sk_pacing_shift_update(skb->sk, sdata->local->hw.tx_sk_pacing_shift);
skb              3875 net/mac80211/tx.c 		    ieee80211_xmit_fast(sdata, sta, fast_tx, skb))
skb              3879 net/mac80211/tx.c 	if (skb_is_gso(skb)) {
skb              3882 net/mac80211/tx.c 		segs = skb_gso_segment(skb, 0);
skb              3886 net/mac80211/tx.c 			consume_skb(skb);
skb              3887 net/mac80211/tx.c 			skb = segs;
skb              3891 net/mac80211/tx.c 		if (skb_linearize(skb)) {
skb              3892 net/mac80211/tx.c 			kfree_skb(skb);
skb              3900 net/mac80211/tx.c 		if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb              3901 net/mac80211/tx.c 			skb_set_transport_header(skb,
skb              3902 net/mac80211/tx.c 						 skb_checksum_start_offset(skb));
skb              3903 net/mac80211/tx.c 			if (skb_checksum_help(skb))
skb              3908 net/mac80211/tx.c 	next = skb;
skb              3910 net/mac80211/tx.c 		skb = next;
skb              3911 net/mac80211/tx.c 		next = skb->next;
skb              3913 net/mac80211/tx.c 		skb->prev = NULL;
skb              3914 net/mac80211/tx.c 		skb->next = NULL;
skb              3916 net/mac80211/tx.c 		skb = ieee80211_build_hdr(sdata, skb, info_flags,
skb              3918 net/mac80211/tx.c 		if (IS_ERR(skb))
skb              3921 net/mac80211/tx.c 		ieee80211_tx_stats(dev, skb->len);
skb              3923 net/mac80211/tx.c 		ieee80211_xmit(sdata, sta, skb, 0);
skb              3927 net/mac80211/tx.c 	kfree_skb(skb);
skb              3932 net/mac80211/tx.c static int ieee80211_change_da(struct sk_buff *skb, struct sta_info *sta)
skb              3937 net/mac80211/tx.c 	err = skb_ensure_writable(skb, ETH_HLEN);
skb              3941 net/mac80211/tx.c 	eth = (void *)skb->data;
skb              3947 net/mac80211/tx.c static bool ieee80211_multicast_to_unicast(struct sk_buff *skb,
skb              3951 net/mac80211/tx.c 	const struct ethhdr *eth = (void *)skb->data;
skb              3952 net/mac80211/tx.c 	const struct vlan_ethhdr *ethvlan = (void *)skb->data;
skb              3976 net/mac80211/tx.c 	if (ethertype == htons(ETH_P_8021Q) && skb->len >= VLAN_ETH_HLEN)
skb              3991 net/mac80211/tx.c ieee80211_convert_to_unicast(struct sk_buff *skb, struct net_device *dev,
skb              3996 net/mac80211/tx.c 	const struct ethhdr *eth = (struct ethhdr *)skb->data;
skb              4013 net/mac80211/tx.c 		cloned_skb = skb_clone(skb, GFP_ATOMIC);
skb              4024 net/mac80211/tx.c 		if (unlikely(ieee80211_change_da(skb, first)))
skb              4026 net/mac80211/tx.c 		__skb_queue_tail(queue, skb);
skb              4029 net/mac80211/tx.c 		kfree_skb(skb);
skb              4030 net/mac80211/tx.c 		skb = NULL;
skb              4036 net/mac80211/tx.c 	__skb_queue_tail(queue, skb);
skb              4048 net/mac80211/tx.c netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
skb              4051 net/mac80211/tx.c 	if (unlikely(ieee80211_multicast_to_unicast(skb, dev))) {
skb              4055 net/mac80211/tx.c 		ieee80211_convert_to_unicast(skb, dev, &queue);
skb              4056 net/mac80211/tx.c 		while ((skb = __skb_dequeue(&queue)))
skb              4057 net/mac80211/tx.c 			__ieee80211_subif_start_xmit(skb, dev, 0, 0);
skb              4059 net/mac80211/tx.c 		__ieee80211_subif_start_xmit(skb, dev, 0, 0);
skb              4067 net/mac80211/tx.c 			      struct sk_buff *skb, u32 info_flags)
skb              4078 net/mac80211/tx.c 	if (ieee80211_lookup_ra_sta(sdata, skb, &sta)) {
skb              4079 net/mac80211/tx.c 		kfree_skb(skb);
skb              4080 net/mac80211/tx.c 		skb = ERR_PTR(-EINVAL);
skb              4084 net/mac80211/tx.c 	skb = ieee80211_build_hdr(sdata, skb, info_flags, sta, 0);
skb              4085 net/mac80211/tx.c 	if (IS_ERR(skb))
skb              4088 net/mac80211/tx.c 	hdr = (void *)skb->data;
skb              4090 net/mac80211/tx.c 	tx.skb = skb;
skb              4094 net/mac80211/tx.c 		kfree_skb(skb);
skb              4100 net/mac80211/tx.c 	return skb;
skb              4109 net/mac80211/tx.c 	struct sk_buff *skb;
skb              4113 net/mac80211/tx.c 		while ((skb = skb_dequeue(&local->pending[i])) != NULL)
skb              4114 net/mac80211/tx.c 			ieee80211_free_txskb(&local->hw, skb);
skb              4124 net/mac80211/tx.c 				     struct sk_buff *skb)
skb              4126 net/mac80211/tx.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb              4138 net/mac80211/tx.c 			dev_kfree_skb(skb);
skb              4142 net/mac80211/tx.c 		result = ieee80211_tx(sdata, NULL, skb, true, 0);
skb              4147 net/mac80211/tx.c 		__skb_queue_tail(&skbs, skb);
skb              4149 net/mac80211/tx.c 		hdr = (struct ieee80211_hdr *)skb->data;
skb              4152 net/mac80211/tx.c 		result = __ieee80211_tx(local, &skbs, skb->len, sta, true);
skb              4181 net/mac80211/tx.c 			struct sk_buff *skb = __skb_dequeue(&local->pending[i]);
skb              4182 net/mac80211/tx.c 			struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb              4185 net/mac80211/tx.c 				ieee80211_free_txskb(&local->hw, skb);
skb              4192 net/mac80211/tx.c 			txok = ieee80211_tx_pending_skb(local, skb);
skb              4210 net/mac80211/tx.c 				       struct ps_data *ps, struct sk_buff *skb,
skb              4231 net/mac80211/tx.c 	tim = pos = skb_put(skb, 6);
skb              4264 net/mac80211/tx.c 		skb_put(skb, n2 - n1);
skb              4275 net/mac80211/tx.c 				    struct ps_data *ps, struct sk_buff *skb,
skb              4288 net/mac80211/tx.c 		__ieee80211_beacon_add_tim(sdata, ps, skb, is_template);
skb              4291 net/mac80211/tx.c 		__ieee80211_beacon_add_tim(sdata, ps, skb, is_template);
skb              4471 net/mac80211/tx.c 	struct sk_buff *skb = NULL;
skb              4506 net/mac80211/tx.c 			skb = dev_alloc_skb(local->tx_headroom +
skb              4510 net/mac80211/tx.c 			if (!skb)
skb              4513 net/mac80211/tx.c 			skb_reserve(skb, local->tx_headroom);
skb              4514 net/mac80211/tx.c 			skb_put_data(skb, beacon->head, beacon->head_len);
skb              4516 net/mac80211/tx.c 			ieee80211_beacon_add_tim(sdata, &ap->ps, skb,
skb              4521 net/mac80211/tx.c 				offs->tim_length = skb->len - beacon->head_len;
skb              4524 net/mac80211/tx.c 				csa_off_base = skb->len;
skb              4528 net/mac80211/tx.c 				skb_put_data(skb, beacon->tail,
skb              4547 net/mac80211/tx.c 		skb = dev_alloc_skb(local->tx_headroom + beacon->head_len +
skb              4549 net/mac80211/tx.c 		if (!skb)
skb              4551 net/mac80211/tx.c 		skb_reserve(skb, local->tx_headroom);
skb              4552 net/mac80211/tx.c 		skb_put_data(skb, beacon->head, beacon->head_len);
skb              4554 net/mac80211/tx.c 		hdr = (struct ieee80211_hdr *) skb->data;
skb              4579 net/mac80211/tx.c 		skb = dev_alloc_skb(local->tx_headroom +
skb              4584 net/mac80211/tx.c 		if (!skb)
skb              4586 net/mac80211/tx.c 		skb_reserve(skb, local->tx_headroom);
skb              4587 net/mac80211/tx.c 		skb_put_data(skb, beacon->head, beacon->head_len);
skb              4588 net/mac80211/tx.c 		ieee80211_beacon_add_tim(sdata, &ifmsh->ps, skb, is_template);
skb              4592 net/mac80211/tx.c 			offs->tim_length = skb->len - beacon->head_len;
skb              4595 net/mac80211/tx.c 		skb_put_data(skb, beacon->tail, beacon->tail_len);
skb              4617 net/mac80211/tx.c 	info = IEEE80211_SKB_CB(skb);
skb              4627 net/mac80211/tx.c 	txrc.skb = skb;
skb              4640 net/mac80211/tx.c 	return skb;
skb              4697 net/mac80211/tx.c 	struct sk_buff *skb = NULL;
skb              4712 net/mac80211/tx.c 	skb = dev_alloc_skb(presp->len);
skb              4713 net/mac80211/tx.c 	if (!skb)
skb              4716 net/mac80211/tx.c 	skb_put_data(skb, presp->data, presp->len);
skb              4718 net/mac80211/tx.c 	hdr = (struct ieee80211_hdr *) skb->data;
skb              4723 net/mac80211/tx.c 	return skb;
skb              4734 net/mac80211/tx.c 	struct sk_buff *skb;
skb              4743 net/mac80211/tx.c 	skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*pspoll));
skb              4744 net/mac80211/tx.c 	if (!skb)
skb              4747 net/mac80211/tx.c 	skb_reserve(skb, local->hw.extra_tx_headroom);
skb              4749 net/mac80211/tx.c 	pspoll = skb_put_zero(skb, sizeof(*pspoll));
skb              4760 net/mac80211/tx.c 	return skb;
skb              4772 net/mac80211/tx.c 	struct sk_buff *skb;
skb              4791 net/mac80211/tx.c 	skb = dev_alloc_skb(local->hw.extra_tx_headroom +
skb              4793 net/mac80211/tx.c 	if (!skb)
skb              4796 net/mac80211/tx.c 	skb_reserve(skb, local->hw.extra_tx_headroom);
skb              4798 net/mac80211/tx.c 	nullfunc = skb_put_zero(skb, sizeof(*nullfunc));
skb              4810 net/mac80211/tx.c 		skb->priority = 7;
skb              4811 net/mac80211/tx.c 		skb_set_queue_mapping(skb, IEEE80211_AC_VO);
skb              4812 net/mac80211/tx.c 		skb_put_data(skb, &qoshdr, sizeof(qoshdr));
skb              4819 net/mac80211/tx.c 	return skb;
skb              4830 net/mac80211/tx.c 	struct sk_buff *skb;
skb              4836 net/mac80211/tx.c 	skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*hdr) +
skb              4838 net/mac80211/tx.c 	if (!skb)
skb              4841 net/mac80211/tx.c 	skb_reserve(skb, local->hw.extra_tx_headroom);
skb              4843 net/mac80211/tx.c 	hdr = skb_put_zero(skb, sizeof(*hdr));
skb              4850 net/mac80211/tx.c 	pos = skb_put(skb, ie_ssid_len);
skb              4857 net/mac80211/tx.c 	return skb;
skb              4897 net/mac80211/tx.c 	struct sk_buff *skb = NULL;
skb              4930 net/mac80211/tx.c 		skb = skb_dequeue(&ps->bc_buf);
skb              4931 net/mac80211/tx.c 		if (!skb)
skb              4935 net/mac80211/tx.c 		if (!skb_queue_empty(&ps->bc_buf) && skb->len >= 2) {
skb              4937 net/mac80211/tx.c 				(struct ieee80211_hdr *) skb->data;
skb              4946 net/mac80211/tx.c 			sdata = IEEE80211_DEV_TO_SUB_IF(skb->dev);
skb              4947 net/mac80211/tx.c 		if (!ieee80211_tx_prepare(sdata, &tx, NULL, skb))
skb              4949 net/mac80211/tx.c 		ieee80211_free_txskb(hw, skb);
skb              4952 net/mac80211/tx.c 	info = IEEE80211_SKB_CB(skb);
skb              4958 net/mac80211/tx.c 		skb = NULL;
skb              4962 net/mac80211/tx.c 	return skb;
skb              5058 net/mac80211/tx.c 				 struct sk_buff *skb, int tid,
skb              5063 net/mac80211/tx.c 	skb_reset_mac_header(skb);
skb              5064 net/mac80211/tx.c 	skb_set_queue_mapping(skb, ac);
skb              5065 net/mac80211/tx.c 	skb->priority = tid;
skb              5067 net/mac80211/tx.c 	skb->dev = sdata->dev;
skb              5075 net/mac80211/tx.c 	IEEE80211_SKB_CB(skb)->band = band;
skb              5076 net/mac80211/tx.c 	ieee80211_xmit(sdata, NULL, skb, txdata_flags);
skb              5086 net/mac80211/tx.c 	struct sk_buff *skb;
skb              5106 net/mac80211/tx.c 	skb = dev_alloc_skb(local->hw.extra_tx_headroom +
skb              5108 net/mac80211/tx.c 	if (!skb)
skb              5111 net/mac80211/tx.c 	skb_reserve(skb, local->hw.extra_tx_headroom + sizeof(struct ethhdr));
skb              5113 net/mac80211/tx.c 	skb_put_data(skb, buf, len);
skb              5115 net/mac80211/tx.c 	ehdr = skb_push(skb, sizeof(struct ethhdr));
skb              5120 net/mac80211/tx.c 	skb->dev = dev;
skb              5121 net/mac80211/tx.c 	skb->protocol = htons(ETH_P_802_3);
skb              5122 net/mac80211/tx.c 	skb_reset_network_header(skb);
skb              5123 net/mac80211/tx.c 	skb_reset_mac_header(skb);
skb              5126 net/mac80211/tx.c 	__ieee80211_subif_start_xmit(skb, skb->dev, flags, ctrl_flags);
skb              5137 net/mac80211/tx.c 	struct sk_buff *skb;
skb              5139 net/mac80211/tx.c 	skb = dev_alloc_skb(local->hw.extra_tx_headroom + len +
skb              5142 net/mac80211/tx.c 	if (!skb)
skb              5145 net/mac80211/tx.c 	skb_reserve(skb, local->hw.extra_tx_headroom);
skb              5146 net/mac80211/tx.c 	skb_put_data(skb, buf, len);
skb              5148 net/mac80211/tx.c 	skb->dev = dev;
skb              5149 net/mac80211/tx.c 	skb->protocol = htons(ETH_P_802_3);
skb              5150 net/mac80211/tx.c 	skb_reset_network_header(skb);
skb              5151 net/mac80211/tx.c 	skb_reset_mac_header(skb);
skb              5154 net/mac80211/tx.c 	__ieee80211_subif_start_xmit(skb, skb->dev, 0,
skb                51 net/mac80211/util.c 	struct sk_buff *skb;
skb                54 net/mac80211/util.c 	skb_queue_walk(&tx->skbs, skb) {
skb                55 net/mac80211/util.c 		hdr = (struct ieee80211_hdr *) skb->data;
skb               521 net/mac80211/util.c 			       struct sk_buff *skb)
skb               525 net/mac80211/util.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb               529 net/mac80211/util.c 		ieee80211_free_txskb(&local->hw, skb);
skb               536 net/mac80211/util.c 	__skb_queue_tail(&local->pending[queue], skb);
skb               546 net/mac80211/util.c 	struct sk_buff *skb;
skb               551 net/mac80211/util.c 	while ((skb = skb_dequeue(skbs))) {
skb               552 net/mac80211/util.c 		struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb               555 net/mac80211/util.c 			ieee80211_free_txskb(&local->hw, skb);
skb               565 net/mac80211/util.c 		__skb_queue_tail(&local->pending[queue], skb);
skb              1559 net/mac80211/util.c 	struct sk_buff *skb;
skb              1564 net/mac80211/util.c 	skb = dev_alloc_skb(local->hw.extra_tx_headroom + IEEE80211_WEP_IV_LEN +
skb              1566 net/mac80211/util.c 	if (!skb)
skb              1569 net/mac80211/util.c 	skb_reserve(skb, local->hw.extra_tx_headroom + IEEE80211_WEP_IV_LEN);
skb              1571 net/mac80211/util.c 	mgmt = skb_put_zero(skb, 24 + 6);
skb              1581 net/mac80211/util.c 		skb_put_data(skb, extra, extra_len);
skb              1585 net/mac80211/util.c 		err = ieee80211_wep_encrypt(local, skb, key, key_len, key_idx);
skb              1589 net/mac80211/util.c 	IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT |
skb              1591 net/mac80211/util.c 	ieee80211_tx_skb(sdata, skb);
skb              1600 net/mac80211/util.c 	struct sk_buff *skb;
skb              1614 net/mac80211/util.c 		skb = dev_alloc_skb(local->hw.extra_tx_headroom +
skb              1616 net/mac80211/util.c 		if (!skb)
skb              1619 net/mac80211/util.c 		skb_reserve(skb, local->hw.extra_tx_headroom);
skb              1622 net/mac80211/util.c 		skb_put_data(skb, mgmt, IEEE80211_DEAUTH_FRAME_LEN);
skb              1626 net/mac80211/util.c 			IEEE80211_SKB_CB(skb)->flags |=
skb              1629 net/mac80211/util.c 		ieee80211_tx_skb(sdata, skb);
skb              1886 net/mac80211/util.c 	struct sk_buff *skb;
skb              1903 net/mac80211/util.c 	skb = ieee80211_probereq_get(&local->hw, src, ssid, ssid_len,
skb              1905 net/mac80211/util.c 	if (!skb)
skb              1909 net/mac80211/util.c 	ies_len = ieee80211_build_preq_ies(local, skb_tail_pointer(skb),
skb              1910 net/mac80211/util.c 					   skb_tailroom(skb), &dummy_ie_desc,
skb              1913 net/mac80211/util.c 	skb_put(skb, ies_len);
skb              1916 net/mac80211/util.c 		mgmt = (struct ieee80211_mgmt *) skb->data;
skb              1921 net/mac80211/util.c 	IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
skb              1923 net/mac80211/util.c 	return skb;
skb              3101 net/mac80211/util.c 			    struct sk_buff *skb, bool need_basic,
skb              3123 net/mac80211/util.c 	if (skb_tailroom(skb) < rates + 2)
skb              3126 net/mac80211/util.c 	pos = skb_put(skb, rates + 2);
skb              3145 net/mac80211/util.c 				struct sk_buff *skb, bool need_basic,
skb              3171 net/mac80211/util.c 	if (skb_tailroom(skb) < exrates + 2)
skb              3175 net/mac80211/util.c 		pos = skb_put(skb, exrates + 2);
skb              3471 net/mac80211/util.c 	struct sk_buff *skb;
skb              3483 net/mac80211/util.c 	skb = dev_alloc_skb(local->tx_headroom + hdr_len +
skb              3488 net/mac80211/util.c 	if (!skb)
skb              3491 net/mac80211/util.c 	skb_reserve(skb, local->tx_headroom);
skb              3492 net/mac80211/util.c 	mgmt = skb_put_zero(skb, hdr_len);
skb              3506 net/mac80211/util.c 	pos = skb_put(skb, 5);
skb              3517 net/mac80211/util.c 		skb_put(skb, 3);
skb              3530 net/mac80211/util.c 		skb_put(skb, 8);
skb              3547 net/mac80211/util.c 		skb_put(skb, 5);
skb              3551 net/mac80211/util.c 	ieee80211_tx_skb(sdata, skb);
skb              3989 net/mac80211/util.c 	struct sk_buff *skb;
skb              3991 net/mac80211/util.c 	skb_queue_walk(&txqi->frags, skb) {
skb              3993 net/mac80211/util.c 		frag_bytes += skb->len;
skb                67 net/mac80211/wep.c 				struct sk_buff *skb,
skb                70 net/mac80211/wep.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
skb                71 net/mac80211/wep.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb                77 net/mac80211/wep.c 	if (WARN_ON(skb_headroom(skb) < IEEE80211_WEP_IV_LEN))
skb                81 net/mac80211/wep.c 	newhdr = skb_push(skb, IEEE80211_WEP_IV_LEN);
skb                95 net/mac80211/wep.c 				    struct sk_buff *skb,
skb                98 net/mac80211/wep.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
skb               102 net/mac80211/wep.c 	memmove(skb->data + IEEE80211_WEP_IV_LEN, skb->data, hdrlen);
skb               103 net/mac80211/wep.c 	skb_pull(skb, IEEE80211_WEP_IV_LEN);
skb               134 net/mac80211/wep.c 			  struct sk_buff *skb,
skb               141 net/mac80211/wep.c 	if (WARN_ON(skb_tailroom(skb) < IEEE80211_WEP_ICV_LEN))
skb               144 net/mac80211/wep.c 	iv = ieee80211_wep_add_iv(local, skb, keylen, keyidx);
skb               148 net/mac80211/wep.c 	len = skb->len - (iv + IEEE80211_WEP_IV_LEN - skb->data);
skb               157 net/mac80211/wep.c 	skb_put(skb, IEEE80211_WEP_ICV_LEN);
skb               194 net/mac80211/wep.c 				 struct sk_buff *skb,
skb               200 net/mac80211/wep.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
skb               209 net/mac80211/wep.c 	if (skb->len < hdrlen + IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN)
skb               212 net/mac80211/wep.c 	len = skb->len - hdrlen - IEEE80211_WEP_IV_LEN - IEEE80211_WEP_ICV_LEN;
skb               214 net/mac80211/wep.c 	keyidx = skb->data[hdrlen + 3] >> 6;
skb               222 net/mac80211/wep.c 	memcpy(rc4key, skb->data + hdrlen, 3);
skb               228 net/mac80211/wep.c 				       skb->data + hdrlen +
skb               233 net/mac80211/wep.c 	skb_trim(skb, skb->len - IEEE80211_WEP_ICV_LEN);
skb               236 net/mac80211/wep.c 	memmove(skb->data + IEEE80211_WEP_IV_LEN, skb->data, hdrlen);
skb               237 net/mac80211/wep.c 	skb_pull(skb, IEEE80211_WEP_IV_LEN);
skb               245 net/mac80211/wep.c 	struct sk_buff *skb = rx->skb;
skb               246 net/mac80211/wep.c 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
skb               247 net/mac80211/wep.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
skb               254 net/mac80211/wep.c 		if (skb_linearize(rx->skb))
skb               256 net/mac80211/wep.c 		if (ieee80211_wep_decrypt(rx->local, rx->skb, rx->key))
skb               259 net/mac80211/wep.c 		if (!pskb_may_pull(rx->skb, ieee80211_hdrlen(fc) +
skb               262 net/mac80211/wep.c 		ieee80211_wep_remove_iv(rx->local, rx->skb, rx->key);
skb               265 net/mac80211/wep.c 		    pskb_trim(rx->skb, rx->skb->len - IEEE80211_WEP_ICV_LEN))
skb               272 net/mac80211/wep.c static int wep_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
skb               274 net/mac80211/wep.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb               278 net/mac80211/wep.c 		if (ieee80211_wep_encrypt(tx->local, skb, tx->key->conf.key,
skb               284 net/mac80211/wep.c 		if (!ieee80211_wep_add_iv(tx->local, skb,
skb               296 net/mac80211/wep.c 	struct sk_buff *skb;
skb               300 net/mac80211/wep.c 	skb_queue_walk(&tx->skbs, skb) {
skb               301 net/mac80211/wep.c 		if (wep_encrypt_skb(tx, skb) < 0) {
skb                20 net/mac80211/wep.h 			  struct sk_buff *skb,
skb                33 net/mac80211/wme.c static int wme_downgrade_ac(struct sk_buff *skb)
skb                35 net/mac80211/wme.c 	switch (skb->priority) {
skb                38 net/mac80211/wme.c 		skb->priority = 5; /* VO -> VI */
skb                42 net/mac80211/wme.c 		skb->priority = 3; /* VI -> BE */
skb                46 net/mac80211/wme.c 		skb->priority = 2; /* BE -> BK */
skb                84 net/mac80211/wme.c 				     struct sta_info *sta, struct sk_buff *skb)
skb                89 net/mac80211/wme.c 	while (sdata->wmm_acm & BIT(skb->priority)) {
skb                90 net/mac80211/wme.c 		int ac = ieee802_1d_to_ac[skb->priority];
skb                93 net/mac80211/wme.c 		    skb->priority == ifmgd->tx_tspec[ac].up)
skb                96 net/mac80211/wme.c 		if (wme_downgrade_ac(skb)) {
skb               108 net/mac80211/wme.c 	if (sta && sta->reserved_tid == skb->priority)
skb               109 net/mac80211/wme.c 		skb->priority = ieee80211_fix_reserved_tid(skb->priority);
skb               112 net/mac80211/wme.c 	return ieee802_1d_to_ac[skb->priority];
skb               117 net/mac80211/wme.c 				 struct sk_buff *skb,
skb               127 net/mac80211/wme.c 		skb->priority = 7;
skb               128 net/mac80211/wme.c 		return ieee802_1d_to_ac[skb->priority];
skb               131 net/mac80211/wme.c 		skb->priority = 0;
skb               132 net/mac80211/wme.c 		return ieee802_1d_to_ac[skb->priority];
skb               136 net/mac80211/wme.c 	skb->priority = *p & IEEE80211_QOS_CTL_TAG1D_MASK;
skb               138 net/mac80211/wme.c 	return ieee80211_downgrade_queue(sdata, NULL, skb);
skb               142 net/mac80211/wme.c 			     struct sta_info *sta, struct sk_buff *skb)
skb               157 net/mac80211/wme.c 		skb->priority = 0; /* required for correct WPA/11i MIC */
skb               161 net/mac80211/wme.c 	if (skb->protocol == sdata->control_port_protocol) {
skb               162 net/mac80211/wme.c 		skb->priority = 7;
skb               169 net/mac80211/wme.c 	skb->priority = cfg80211_classify8021d(skb, qos_map ?
skb               173 net/mac80211/wme.c 	return ieee80211_downgrade_queue(sdata, sta, skb);
skb               179 net/mac80211/wme.c 			   struct sk_buff *skb)
skb               190 net/mac80211/wme.c 	if (local->hw.queues < IEEE80211_NUM_ACS || skb->len < 6) {
skb               191 net/mac80211/wme.c 		skb->priority = 0; /* required for correct WPA/11i MIC */
skb               203 net/mac80211/wme.c 		ra = skb->data;
skb               210 net/mac80211/wme.c 		sta = sta_info_get(sdata, skb->data);
skb               217 net/mac80211/wme.c 		ra = skb->data;
skb               226 net/mac80211/wme.c 	ret = __ieee80211_select_queue(sdata, sta, skb);
skb               239 net/mac80211/wme.c 			   struct sk_buff *skb)
skb               241 net/mac80211/wme.c 	struct ieee80211_hdr *hdr = (void *)skb->data;
skb               242 net/mac80211/wme.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb               243 net/mac80211/wme.c 	u8 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
skb                14 net/mac80211/wme.h 				 struct sk_buff *skb,
skb                17 net/mac80211/wme.h 			     struct sta_info *sta, struct sk_buff *skb);
skb                19 net/mac80211/wme.h 			   struct sk_buff *skb);
skb                21 net/mac80211/wme.h 			   struct sk_buff *skb);
skb                35 net/mac80211/wpa.c 	struct sk_buff *skb = tx->skb;
skb                36 net/mac80211/wpa.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb                39 net/mac80211/wpa.c 	hdr = (struct ieee80211_hdr *)skb->data;
skb                41 net/mac80211/wpa.c 	    skb->len < 24 || !ieee80211_is_data_present(hdr->frame_control))
skb                45 net/mac80211/wpa.c 	if (skb->len < hdrlen)
skb                48 net/mac80211/wpa.c 	data = skb->data + hdrlen;
skb                49 net/mac80211/wpa.c 	data_len = skb->len - hdrlen;
skb                69 net/mac80211/wpa.c 	if (WARN(skb_tailroom(skb) < tail ||
skb                70 net/mac80211/wpa.c 		 skb_headroom(skb) < IEEE80211_TKIP_IV_LEN,
skb                72 net/mac80211/wpa.c 		 skb_headroom(skb), IEEE80211_TKIP_IV_LEN,
skb                73 net/mac80211/wpa.c 		 skb_tailroom(skb), tail))
skb                76 net/mac80211/wpa.c 	mic = skb_put(skb, MICHAEL_MIC_LEN);
skb               100 net/mac80211/wpa.c 	struct sk_buff *skb = rx->skb;
skb               101 net/mac80211/wpa.c 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
skb               102 net/mac80211/wpa.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
skb               151 net/mac80211/wpa.c 	if (skb->len < hdrlen + MICHAEL_MIC_LEN)
skb               154 net/mac80211/wpa.c 	if (skb_linearize(rx->skb))
skb               156 net/mac80211/wpa.c 	hdr = (void *)skb->data;
skb               158 net/mac80211/wpa.c 	data = skb->data + hdrlen;
skb               159 net/mac80211/wpa.c 	data_len = skb->len - hdrlen - MICHAEL_MIC_LEN;
skb               166 net/mac80211/wpa.c 	skb_trim(skb, skb->len - MICHAEL_MIC_LEN);
skb               193 net/mac80211/wpa.c static int tkip_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
skb               195 net/mac80211/wpa.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
skb               197 net/mac80211/wpa.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb               211 net/mac80211/wpa.c 	len = skb->len - hdrlen;
skb               218 net/mac80211/wpa.c 	if (WARN_ON(skb_tailroom(skb) < tail ||
skb               219 net/mac80211/wpa.c 		    skb_headroom(skb) < IEEE80211_TKIP_IV_LEN))
skb               222 net/mac80211/wpa.c 	pos = skb_push(skb, IEEE80211_TKIP_IV_LEN);
skb               240 net/mac80211/wpa.c 	skb_put(skb, IEEE80211_TKIP_ICV_LEN);
skb               243 net/mac80211/wpa.c 					   key, skb, pos, len);
skb               250 net/mac80211/wpa.c 	struct sk_buff *skb;
skb               254 net/mac80211/wpa.c 	skb_queue_walk(&tx->skbs, skb) {
skb               255 net/mac80211/wpa.c 		if (tkip_encrypt_skb(tx, skb) < 0)
skb               266 net/mac80211/wpa.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data;
skb               269 net/mac80211/wpa.c 	struct sk_buff *skb = rx->skb;
skb               270 net/mac80211/wpa.c 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
skb               277 net/mac80211/wpa.c 	if (!rx->sta || skb->len - hdrlen < 12)
skb               281 net/mac80211/wpa.c 	if (skb_linearize(rx->skb))
skb               283 net/mac80211/wpa.c 	hdr = (void *)skb->data;
skb               294 net/mac80211/wpa.c 					  key, skb->data + hdrlen,
skb               295 net/mac80211/wpa.c 					  skb->len - hdrlen, rx->sta->sta.addr,
skb               304 net/mac80211/wpa.c 		skb_trim(skb, skb->len - IEEE80211_TKIP_ICV_LEN);
skb               307 net/mac80211/wpa.c 	memmove(skb->data + IEEE80211_TKIP_IV_LEN, skb->data, hdrlen);
skb               308 net/mac80211/wpa.c 	skb_pull(skb, IEEE80211_TKIP_IV_LEN);
skb               314 net/mac80211/wpa.c static void ccmp_special_blocks(struct sk_buff *skb, u8 *pn, u8 *b_0, u8 *aad)
skb               321 net/mac80211/wpa.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
skb               405 net/mac80211/wpa.c static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb,
skb               408 net/mac80211/wpa.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
skb               410 net/mac80211/wpa.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb               432 net/mac80211/wpa.c 	len = skb->len - hdrlen;
skb               439 net/mac80211/wpa.c 	if (WARN_ON(skb_tailroom(skb) < tail ||
skb               440 net/mac80211/wpa.c 		    skb_headroom(skb) < IEEE80211_CCMP_HDR_LEN))
skb               443 net/mac80211/wpa.c 	pos = skb_push(skb, IEEE80211_CCMP_HDR_LEN);
skb               470 net/mac80211/wpa.c 	ccmp_special_blocks(skb, pn, b_0, aad);
skb               472 net/mac80211/wpa.c 					 skb_put(skb, mic_len));
skb               480 net/mac80211/wpa.c 	struct sk_buff *skb;
skb               484 net/mac80211/wpa.c 	skb_queue_walk(&tx->skbs, skb) {
skb               485 net/mac80211/wpa.c 		if (ccmp_encrypt_skb(tx, skb, mic_len) < 0)
skb               497 net/mac80211/wpa.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
skb               500 net/mac80211/wpa.c 	struct sk_buff *skb = rx->skb;
skb               501 net/mac80211/wpa.c 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
skb               509 net/mac80211/wpa.c 	    !ieee80211_is_robust_mgmt_frame(skb))
skb               513 net/mac80211/wpa.c 		if (!pskb_may_pull(rx->skb, hdrlen + IEEE80211_CCMP_HDR_LEN))
skb               518 net/mac80211/wpa.c 		if (skb_linearize(rx->skb))
skb               522 net/mac80211/wpa.c 	data_len = skb->len - hdrlen - IEEE80211_CCMP_HDR_LEN - mic_len;
skb               529 net/mac80211/wpa.c 		ccmp_hdr2pn(pn, skb->data + hdrlen);
skb               545 net/mac80211/wpa.c 			ccmp_special_blocks(skb, pn, b_0, aad);
skb               549 net/mac80211/wpa.c 				    skb->data + hdrlen + IEEE80211_CCMP_HDR_LEN,
skb               551 net/mac80211/wpa.c 				    skb->data + skb->len - mic_len))
skb               559 net/mac80211/wpa.c 	if (pskb_trim(skb, skb->len - mic_len))
skb               561 net/mac80211/wpa.c 	memmove(skb->data + IEEE80211_CCMP_HDR_LEN, skb->data, hdrlen);
skb               562 net/mac80211/wpa.c 	skb_pull(skb, IEEE80211_CCMP_HDR_LEN);
skb               567 net/mac80211/wpa.c static void gcmp_special_blocks(struct sk_buff *skb, u8 *pn, u8 *j_0, u8 *aad)
skb               571 net/mac80211/wpa.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
skb               637 net/mac80211/wpa.c static int gcmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
skb               639 net/mac80211/wpa.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
skb               641 net/mac80211/wpa.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb               662 net/mac80211/wpa.c 	len = skb->len - hdrlen;
skb               669 net/mac80211/wpa.c 	if (WARN_ON(skb_tailroom(skb) < tail ||
skb               670 net/mac80211/wpa.c 		    skb_headroom(skb) < IEEE80211_GCMP_HDR_LEN))
skb               673 net/mac80211/wpa.c 	pos = skb_push(skb, IEEE80211_GCMP_HDR_LEN);
skb               675 net/mac80211/wpa.c 	skb_set_network_header(skb, skb_network_offset(skb) +
skb               702 net/mac80211/wpa.c 	gcmp_special_blocks(skb, pn, j_0, aad);
skb               704 net/mac80211/wpa.c 					 skb_put(skb, IEEE80211_GCMP_MIC_LEN));
skb               710 net/mac80211/wpa.c 	struct sk_buff *skb;
skb               714 net/mac80211/wpa.c 	skb_queue_walk(&tx->skbs, skb) {
skb               715 net/mac80211/wpa.c 		if (gcmp_encrypt_skb(tx, skb) < 0)
skb               725 net/mac80211/wpa.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
skb               728 net/mac80211/wpa.c 	struct sk_buff *skb = rx->skb;
skb               729 net/mac80211/wpa.c 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
skb               736 net/mac80211/wpa.c 	    !ieee80211_is_robust_mgmt_frame(skb))
skb               740 net/mac80211/wpa.c 		if (!pskb_may_pull(rx->skb, hdrlen + IEEE80211_GCMP_HDR_LEN))
skb               745 net/mac80211/wpa.c 		if (skb_linearize(rx->skb))
skb               749 net/mac80211/wpa.c 	data_len = skb->len - hdrlen - IEEE80211_GCMP_HDR_LEN - mic_len;
skb               756 net/mac80211/wpa.c 		gcmp_hdr2pn(pn, skb->data + hdrlen);
skb               772 net/mac80211/wpa.c 			gcmp_special_blocks(skb, pn, j_0, aad);
skb               776 net/mac80211/wpa.c 				    skb->data + hdrlen + IEEE80211_GCMP_HDR_LEN,
skb               778 net/mac80211/wpa.c 				    skb->data + skb->len -
skb               787 net/mac80211/wpa.c 	if (pskb_trim(skb, skb->len - mic_len))
skb               789 net/mac80211/wpa.c 	memmove(skb->data + IEEE80211_GCMP_HDR_LEN, skb->data, hdrlen);
skb               790 net/mac80211/wpa.c 	skb_pull(skb, IEEE80211_GCMP_HDR_LEN);
skb               797 net/mac80211/wpa.c 			    struct sk_buff *skb)
skb               799 net/mac80211/wpa.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
skb               801 net/mac80211/wpa.c 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb               811 net/mac80211/wpa.c 	if (unlikely(skb_headroom(skb) < iv_len &&
skb               812 net/mac80211/wpa.c 		     pskb_expand_head(skb, iv_len, 0, GFP_ATOMIC)))
skb               817 net/mac80211/wpa.c 	pos = skb_push(skb, iv_len);
skb               842 net/mac80211/wpa.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
skb               845 net/mac80211/wpa.c 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
skb               860 net/mac80211/wpa.c 	data_len = rx->skb->len - hdrlen - cs->hdr_len;
skb               870 net/mac80211/wpa.c 	if (skb_linearize(rx->skb))
skb               873 net/mac80211/wpa.c 	hdr = (struct ieee80211_hdr *)rx->skb->data;
skb               876 net/mac80211/wpa.c 	skb_pn = rx->skb->data + hdrlen + cs->pn_off;
skb               884 net/mac80211/wpa.c 	if (pskb_trim(rx->skb, rx->skb->len - cs->mic_len))
skb               887 net/mac80211/wpa.c 	memmove(rx->skb->data + cs->hdr_len, rx->skb->data, hdrlen);
skb               888 net/mac80211/wpa.c 	skb_pull(rx->skb, cs->hdr_len);
skb               893 net/mac80211/wpa.c static void bip_aad(struct sk_buff *skb, u8 *aad)
skb               896 net/mac80211/wpa.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
skb               935 net/mac80211/wpa.c 	struct sk_buff *skb;
skb               945 net/mac80211/wpa.c 	skb = skb_peek(&tx->skbs);
skb               947 net/mac80211/wpa.c 	info = IEEE80211_SKB_CB(skb);
skb               953 net/mac80211/wpa.c 	if (WARN_ON(skb_tailroom(skb) < sizeof(*mmie)))
skb               956 net/mac80211/wpa.c 	mmie = skb_put(skb, sizeof(*mmie));
skb               969 net/mac80211/wpa.c 	bip_aad(skb, aad);
skb               975 net/mac80211/wpa.c 			   skb->data + 24, skb->len - 24, mmie->mic);
skb               983 net/mac80211/wpa.c 	struct sk_buff *skb;
skb               993 net/mac80211/wpa.c 	skb = skb_peek(&tx->skbs);
skb               995 net/mac80211/wpa.c 	info = IEEE80211_SKB_CB(skb);
skb              1000 net/mac80211/wpa.c 	if (WARN_ON(skb_tailroom(skb) < sizeof(*mmie)))
skb              1003 net/mac80211/wpa.c 	mmie = skb_put(skb, sizeof(*mmie));
skb              1013 net/mac80211/wpa.c 	bip_aad(skb, aad);
skb              1018 net/mac80211/wpa.c 			       skb->data + 24, skb->len - 24, mmie->mic);
skb              1026 net/mac80211/wpa.c 	struct sk_buff *skb = rx->skb;
skb              1027 net/mac80211/wpa.c 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
skb              1031 net/mac80211/wpa.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
skb              1038 net/mac80211/wpa.c 	if (skb->len < 24 + sizeof(*mmie))
skb              1042 net/mac80211/wpa.c 		(skb->data + skb->len - sizeof(*mmie));
skb              1056 net/mac80211/wpa.c 		bip_aad(skb, aad);
skb              1058 net/mac80211/wpa.c 				   skb->data + 24, skb->len - 24, mic);
skb              1068 net/mac80211/wpa.c 	skb_trim(skb, skb->len - sizeof(*mmie));
skb              1076 net/mac80211/wpa.c 	struct sk_buff *skb = rx->skb;
skb              1077 net/mac80211/wpa.c 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
skb              1081 net/mac80211/wpa.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
skb              1088 net/mac80211/wpa.c 	if (skb->len < 24 + sizeof(*mmie))
skb              1092 net/mac80211/wpa.c 		(skb->data + skb->len - sizeof(*mmie));
skb              1106 net/mac80211/wpa.c 		bip_aad(skb, aad);
skb              1108 net/mac80211/wpa.c 				       skb->data + 24, skb->len - 24, mic);
skb              1118 net/mac80211/wpa.c 	skb_trim(skb, skb->len - sizeof(*mmie));
skb              1126 net/mac80211/wpa.c 	struct sk_buff *skb;
skb              1138 net/mac80211/wpa.c 	skb = skb_peek(&tx->skbs);
skb              1140 net/mac80211/wpa.c 	info = IEEE80211_SKB_CB(skb);
skb              1145 net/mac80211/wpa.c 	if (WARN_ON(skb_tailroom(skb) < sizeof(*mmie)))
skb              1148 net/mac80211/wpa.c 	mmie = skb_put(skb, sizeof(*mmie));
skb              1158 net/mac80211/wpa.c 	bip_aad(skb, aad);
skb              1160 net/mac80211/wpa.c 	hdr = (struct ieee80211_hdr *)skb->data;
skb              1166 net/mac80211/wpa.c 			       skb->data + 24, skb->len - 24, mmie->mic) < 0)
skb              1175 net/mac80211/wpa.c 	struct sk_buff *skb = rx->skb;
skb              1176 net/mac80211/wpa.c 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
skb              1180 net/mac80211/wpa.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
skb              1187 net/mac80211/wpa.c 	if (skb->len < 24 + sizeof(*mmie))
skb              1191 net/mac80211/wpa.c 		(skb->data + skb->len - sizeof(*mmie));
skb              1205 net/mac80211/wpa.c 		bip_aad(skb, aad);
skb              1214 net/mac80211/wpa.c 				       skb->data + 24, skb->len - 24,
skb              1227 net/mac80211/wpa.c 	skb_trim(skb, skb->len - sizeof(*mmie));
skb              1235 net/mac80211/wpa.c 	struct sk_buff *skb;
skb              1239 net/mac80211/wpa.c 	skb_queue_walk(&tx->skbs, skb) {
skb              1240 net/mac80211/wpa.c 		info  = IEEE80211_SKB_CB(skb);
skb              1247 net/mac80211/wpa.c 			res = ieee80211_crypto_cs_encrypt(tx, skb);
skb                14 net/mac802154/driver-ops.h drv_xmit_async(struct ieee802154_local *local, struct sk_buff *skb)
skb                16 net/mac802154/driver-ops.h 	return local->ops->xmit_async(&local->hw, skb);
skb                20 net/mac802154/driver-ops.h drv_xmit_sync(struct ieee802154_local *local, struct sk_buff *skb)
skb                24 net/mac802154/driver-ops.h 	return local->ops->xmit_sync(&local->hw, skb);
skb               123 net/mac802154/ieee802154_i.h void ieee802154_rx(struct ieee802154_local *local, struct sk_buff *skb);
skb               126 net/mac802154/ieee802154_i.h ieee802154_monitor_start_xmit(struct sk_buff *skb, struct net_device *dev);
skb               128 net/mac802154/ieee802154_i.h ieee802154_subif_start_xmit(struct sk_buff *skb, struct net_device *dev);
skb               362 net/mac802154/iface.c static int ieee802154_header_create(struct sk_buff *skb,
skb               371 net/mac802154/iface.c 	struct ieee802154_mac_cb *cb = mac_cb(skb);
skb               404 net/mac802154/iface.c 	hlen = ieee802154_hdr_push(skb, &hdr);
skb               408 net/mac802154/iface.c 	skb_reset_mac_header(skb);
skb               409 net/mac802154/iface.c 	skb->mac_len = hlen;
skb               427 net/mac802154/iface.c static int mac802154_header_create(struct sk_buff *skb,
skb               466 net/mac802154/iface.c 	hlen = ieee802154_hdr_push(skb, &hdr);
skb               470 net/mac802154/iface.c 	skb_reset_mac_header(skb);
skb               471 net/mac802154/iface.c 	skb->mac_len = hlen;
skb               480 net/mac802154/iface.c mac802154_header_parse(const struct sk_buff *skb, unsigned char *haddr)
skb               484 net/mac802154/iface.c 	if (ieee802154_hdr_peek_addrs(skb, &hdr) < 0) {
skb               611 net/mac802154/llsec.c llsec_do_encrypt_unauth(struct sk_buff *skb, const struct mac802154_llsec *sec,
skb               623 net/mac802154/llsec.c 	data = skb_mac_header(skb) + skb->mac_len;
skb               624 net/mac802154/llsec.c 	datalen = skb_tail_pointer(skb) - data;
skb               648 net/mac802154/llsec.c llsec_do_encrypt_auth(struct sk_buff *skb, const struct mac802154_llsec *sec,
skb               665 net/mac802154/llsec.c 	assoclen = skb->mac_len;
skb               667 net/mac802154/llsec.c 	data = skb_mac_header(skb) + skb->mac_len;
skb               668 net/mac802154/llsec.c 	datalen = skb_tail_pointer(skb) - data;
skb               670 net/mac802154/llsec.c 	skb_put(skb, authlen);
skb               672 net/mac802154/llsec.c 	sg_init_one(&sg, skb_mac_header(skb), assoclen + datalen + authlen);
skb               690 net/mac802154/llsec.c static int llsec_do_encrypt(struct sk_buff *skb,
skb               696 net/mac802154/llsec.c 		return llsec_do_encrypt_unauth(skb, sec, hdr, key);
skb               698 net/mac802154/llsec.c 		return llsec_do_encrypt_auth(skb, sec, hdr, key);
skb               701 net/mac802154/llsec.c int mac802154_llsec_encrypt(struct mac802154_llsec *sec, struct sk_buff *skb)
skb               708 net/mac802154/llsec.c 	hlen = ieee802154_hdr_pull(skb, &hdr);
skb               715 net/mac802154/llsec.c 		skb_push(skb, hlen);
skb               721 net/mac802154/llsec.c 	if (skb->len + hlen + authlen + IEEE802154_MFR_SIZE > IEEE802154_MTU)
skb               758 net/mac802154/llsec.c 	skb->mac_len = ieee802154_hdr_push(skb, &hdr);
skb               759 net/mac802154/llsec.c 	skb_reset_mac_header(skb);
skb               761 net/mac802154/llsec.c 	rc = llsec_do_encrypt(skb, sec, &hdr, key);
skb               827 net/mac802154/llsec.c llsec_do_decrypt_unauth(struct sk_buff *skb, const struct mac802154_llsec *sec,
skb               839 net/mac802154/llsec.c 	data = skb_mac_header(skb) + skb->mac_len;
skb               840 net/mac802154/llsec.c 	datalen = skb_tail_pointer(skb) - data;
skb               854 net/mac802154/llsec.c llsec_do_decrypt_auth(struct sk_buff *skb, const struct mac802154_llsec *sec,
skb               871 net/mac802154/llsec.c 	assoclen = skb->mac_len;
skb               873 net/mac802154/llsec.c 	data = skb_mac_header(skb) + skb->mac_len;
skb               874 net/mac802154/llsec.c 	datalen = skb_tail_pointer(skb) - data;
skb               876 net/mac802154/llsec.c 	sg_init_one(&sg, skb_mac_header(skb), assoclen + datalen);
skb               890 net/mac802154/llsec.c 	skb_trim(skb, skb->len - authlen);
skb               896 net/mac802154/llsec.c llsec_do_decrypt(struct sk_buff *skb, const struct mac802154_llsec *sec,
skb               901 net/mac802154/llsec.c 		return llsec_do_decrypt_unauth(skb, sec, hdr, key, dev_addr);
skb               903 net/mac802154/llsec.c 		return llsec_do_decrypt_auth(skb, sec, hdr, key, dev_addr);
skb               975 net/mac802154/llsec.c int mac802154_llsec_decrypt(struct mac802154_llsec *sec, struct sk_buff *skb)
skb               986 net/mac802154/llsec.c 	if (ieee802154_hdr_peek(skb, &hdr) < 0)
skb              1041 net/mac802154/llsec.c 	err = llsec_do_decrypt(skb, sec, &hdr, key, dev_addr);
skb                96 net/mac802154/llsec.h int mac802154_llsec_encrypt(struct mac802154_llsec *sec, struct sk_buff *skb);
skb                97 net/mac802154/llsec.h int mac802154_llsec_decrypt(struct mac802154_llsec *sec, struct sk_buff *skb);
skb                26 net/mac802154/main.c 	struct sk_buff *skb;
skb                28 net/mac802154/main.c 	while ((skb = skb_dequeue(&local->skb_queue))) {
skb                29 net/mac802154/main.c 		switch (skb->pkt_type) {
skb                34 net/mac802154/main.c 			skb->pkt_type = 0;
skb                35 net/mac802154/main.c 			ieee802154_rx(local, skb);
skb                39 net/mac802154/main.c 			     skb->pkt_type);
skb                40 net/mac802154/main.c 			kfree_skb(skb);
skb                24 net/mac802154/rx.c static int ieee802154_deliver_skb(struct sk_buff *skb)
skb                26 net/mac802154/rx.c 	skb->ip_summed = CHECKSUM_UNNECESSARY;
skb                27 net/mac802154/rx.c 	skb->protocol = htons(ETH_P_IEEE802154);
skb                29 net/mac802154/rx.c 	return netif_receive_skb(skb);
skb                34 net/mac802154/rx.c 		       struct sk_buff *skb, const struct ieee802154_hdr *hdr)
skb                45 net/mac802154/rx.c 	switch (mac_cb(skb)->dest.mode) {
skb                47 net/mac802154/rx.c 		if (mac_cb(skb)->dest.mode != IEEE802154_ADDR_NONE)
skb                49 net/mac802154/rx.c 			skb->pkt_type = PACKET_OTHERHOST;
skb                52 net/mac802154/rx.c 			skb->pkt_type = PACKET_HOST;
skb                55 net/mac802154/rx.c 		if (mac_cb(skb)->dest.pan_id != span &&
skb                56 net/mac802154/rx.c 		    mac_cb(skb)->dest.pan_id != cpu_to_le16(IEEE802154_PANID_BROADCAST))
skb                57 net/mac802154/rx.c 			skb->pkt_type = PACKET_OTHERHOST;
skb                58 net/mac802154/rx.c 		else if (mac_cb(skb)->dest.extended_addr == wpan_dev->extended_addr)
skb                59 net/mac802154/rx.c 			skb->pkt_type = PACKET_HOST;
skb                61 net/mac802154/rx.c 			skb->pkt_type = PACKET_OTHERHOST;
skb                64 net/mac802154/rx.c 		if (mac_cb(skb)->dest.pan_id != span &&
skb                65 net/mac802154/rx.c 		    mac_cb(skb)->dest.pan_id != cpu_to_le16(IEEE802154_PANID_BROADCAST))
skb                66 net/mac802154/rx.c 			skb->pkt_type = PACKET_OTHERHOST;
skb                67 net/mac802154/rx.c 		else if (mac_cb(skb)->dest.short_addr == sshort)
skb                68 net/mac802154/rx.c 			skb->pkt_type = PACKET_HOST;
skb                69 net/mac802154/rx.c 		else if (mac_cb(skb)->dest.short_addr ==
skb                71 net/mac802154/rx.c 			skb->pkt_type = PACKET_BROADCAST;
skb                73 net/mac802154/rx.c 			skb->pkt_type = PACKET_OTHERHOST;
skb                80 net/mac802154/rx.c 	skb->dev = sdata->dev;
skb                86 net/mac802154/rx.c 	rc = mac802154_llsec_decrypt(&sdata->sec, skb);
skb                93 net/mac802154/rx.c 	sdata->dev->stats.rx_bytes += skb->len;
skb                95 net/mac802154/rx.c 	switch (mac_cb(skb)->type) {
skb               102 net/mac802154/rx.c 		return ieee802154_deliver_skb(skb);
skb               105 net/mac802154/rx.c 				    "(type = %d)\n", mac_cb(skb)->type);
skb               110 net/mac802154/rx.c 	kfree_skb(skb);
skb               132 net/mac802154/rx.c ieee802154_parse_frame_start(struct sk_buff *skb, struct ieee802154_hdr *hdr)
skb               135 net/mac802154/rx.c 	struct ieee802154_mac_cb *cb = mac_cb_init(skb);
skb               137 net/mac802154/rx.c 	skb_reset_mac_header(skb);
skb               139 net/mac802154/rx.c 	hlen = ieee802154_hdr_pull(skb, hdr);
skb               143 net/mac802154/rx.c 	skb->mac_len = hlen;
skb               192 net/mac802154/rx.c 			      struct sk_buff *skb)
skb               198 net/mac802154/rx.c 	ret = ieee802154_parse_frame_start(skb, &hdr);
skb               201 net/mac802154/rx.c 		kfree_skb(skb);
skb               212 net/mac802154/rx.c 		ieee802154_subif_frame(sdata, skb, &hdr);
skb               213 net/mac802154/rx.c 		skb = NULL;
skb               217 net/mac802154/rx.c 	kfree_skb(skb);
skb               221 net/mac802154/rx.c ieee802154_monitors_rx(struct ieee802154_local *local, struct sk_buff *skb)
skb               226 net/mac802154/rx.c 	skb_reset_mac_header(skb);
skb               227 net/mac802154/rx.c 	skb->ip_summed = CHECKSUM_UNNECESSARY;
skb               228 net/mac802154/rx.c 	skb->pkt_type = PACKET_OTHERHOST;
skb               229 net/mac802154/rx.c 	skb->protocol = htons(ETH_P_IEEE802154);
skb               238 net/mac802154/rx.c 		skb2 = skb_clone(skb, GFP_ATOMIC);
skb               244 net/mac802154/rx.c 			sdata->dev->stats.rx_bytes += skb->len;
skb               249 net/mac802154/rx.c void ieee802154_rx(struct ieee802154_local *local, struct sk_buff *skb)
skb               263 net/mac802154/rx.c 		crc = crc_ccitt(0, skb->data, skb->len);
skb               264 net/mac802154/rx.c 		put_unaligned_le16(crc, skb_put(skb, 2));
skb               269 net/mac802154/rx.c 	ieee802154_monitors_rx(local, skb);
skb               275 net/mac802154/rx.c 		crc = crc_ccitt(0, skb->data, skb->len);
skb               282 net/mac802154/rx.c 	skb_trim(skb, skb->len - 2);
skb               284 net/mac802154/rx.c 	__ieee802154_rx_handle_packet(local, skb);
skb               290 net/mac802154/rx.c 	kfree_skb(skb);
skb               294 net/mac802154/rx.c ieee802154_rx_irqsafe(struct ieee802154_hw *hw, struct sk_buff *skb, u8 lqi)
skb               298 net/mac802154/rx.c 	mac_cb(skb)->lqi = lqi;
skb               299 net/mac802154/rx.c 	skb->pkt_type = IEEE802154_RX_MSG;
skb               300 net/mac802154/rx.c 	skb_queue_tail(&local->skb_queue, skb);
skb                29 net/mac802154/tx.c 	struct sk_buff *skb = local->tx_skb;
skb                30 net/mac802154/tx.c 	struct net_device *dev = skb->dev;
skb                33 net/mac802154/tx.c 	res = drv_xmit_sync(local, skb);
skb                37 net/mac802154/tx.c 	ieee802154_xmit_complete(&local->hw, skb, false);
skb                40 net/mac802154/tx.c 	dev->stats.tx_bytes += skb->len;
skb                47 net/mac802154/tx.c 	kfree_skb(skb);
skb                52 net/mac802154/tx.c ieee802154_tx(struct ieee802154_local *local, struct sk_buff *skb)
skb                54 net/mac802154/tx.c 	struct net_device *dev = skb->dev;
skb                61 net/mac802154/tx.c 		if (unlikely(skb_tailroom(skb) < IEEE802154_FCS_LEN)) {
skb                62 net/mac802154/tx.c 			nskb = skb_copy_expand(skb, 0, IEEE802154_FCS_LEN,
skb                65 net/mac802154/tx.c 				consume_skb(skb);
skb                66 net/mac802154/tx.c 				skb = nskb;
skb                72 net/mac802154/tx.c 		crc = crc_ccitt(0, skb->data, skb->len);
skb                73 net/mac802154/tx.c 		put_unaligned_le16(crc, skb_put(skb, 2));
skb                81 net/mac802154/tx.c 		ret = drv_xmit_async(local, skb);
skb                88 net/mac802154/tx.c 		dev->stats.tx_bytes += skb->len;
skb                90 net/mac802154/tx.c 		local->tx_skb = skb;
skb                97 net/mac802154/tx.c 	kfree_skb(skb);
skb               102 net/mac802154/tx.c ieee802154_monitor_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb               106 net/mac802154/tx.c 	skb->skb_iif = dev->ifindex;
skb               108 net/mac802154/tx.c 	return ieee802154_tx(sdata->local, skb);
skb               112 net/mac802154/tx.c ieee802154_subif_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb               121 net/mac802154/tx.c 	rc = mac802154_llsec_encrypt(&sdata->sec, skb);
skb               124 net/mac802154/tx.c 		kfree_skb(skb);
skb               128 net/mac802154/tx.c 	skb->skb_iif = dev->ifindex;
skb               130 net/mac802154/tx.c 	return ieee802154_tx(sdata->local, skb);
skb                58 net/mac802154/util.c void ieee802154_xmit_complete(struct ieee802154_hw *hw, struct sk_buff *skb,
skb                75 net/mac802154/util.c 		if (skb->len > max_sifs_size)
skb                87 net/mac802154/util.c 	dev_consume_skb_any(skb);
skb               119 net/mpls/af_mpls.c bool mpls_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
skb               121 net/mpls/af_mpls.c 	if (skb->len <= mtu)
skb               124 net/mpls/af_mpls.c 	if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu))
skb               132 net/mpls/af_mpls.c 				 const struct sk_buff *skb)
skb               136 net/mpls/af_mpls.c 	if (skb->protocol == htons(ETH_P_MPLS_UC)) {
skb               139 net/mpls/af_mpls.c 			MPLS_INC_STATS_LEN(mdev, skb->len,
skb               142 net/mpls/af_mpls.c 	} else if (skb->protocol == htons(ETH_P_IP)) {
skb               143 net/mpls/af_mpls.c 		IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUT, skb->len);
skb               145 net/mpls/af_mpls.c 	} else if (skb->protocol == htons(ETH_P_IPV6)) {
skb               150 net/mpls/af_mpls.c 					 IPSTATS_MIB_OUT, skb->len);
skb               156 net/mpls/af_mpls.c static u32 mpls_multipath_hash(struct mpls_route *rt, struct sk_buff *skb)
skb               168 net/mpls/af_mpls.c 		if (!pskb_may_pull(skb, mpls_hdr_len))
skb               172 net/mpls/af_mpls.c 		hdr = mpls_hdr(skb) + label_index;
skb               197 net/mpls/af_mpls.c 		if (pskb_may_pull(skb, mpls_hdr_len + sizeof(struct iphdr))) {
skb               206 net/mpls/af_mpls.c 				   pskb_may_pull(skb, mpls_hdr_len +
skb               234 net/mpls/af_mpls.c 					     struct sk_buff *skb)
skb               251 net/mpls/af_mpls.c 	hash = mpls_multipath_hash(rt, skb);
skb               270 net/mpls/af_mpls.c 			struct sk_buff *skb, struct mpls_entry_decoded dec)
skb               285 net/mpls/af_mpls.c 	if (!pskb_may_pull(skb, 12))
skb               290 net/mpls/af_mpls.c 		payload_type = ip_hdr(skb)->version;
skb               294 net/mpls/af_mpls.c 		struct iphdr *hdr4 = ip_hdr(skb);
skb               296 net/mpls/af_mpls.c 		skb->protocol = htons(ETH_P_IP);
skb               317 net/mpls/af_mpls.c 		struct ipv6hdr *hdr6 = ipv6_hdr(skb);
skb               318 net/mpls/af_mpls.c 		skb->protocol = htons(ETH_P_IPV6);
skb               341 net/mpls/af_mpls.c static int mpls_forward(struct sk_buff *skb, struct net_device *dev,
skb               363 net/mpls/af_mpls.c 	MPLS_INC_STATS_LEN(mdev, skb->len, rx_packets,
skb               371 net/mpls/af_mpls.c 	if (skb->pkt_type != PACKET_HOST)
skb               374 net/mpls/af_mpls.c 	if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
skb               377 net/mpls/af_mpls.c 	if (!pskb_may_pull(skb, sizeof(*hdr)))
skb               381 net/mpls/af_mpls.c 	hdr = mpls_hdr(skb);
skb               390 net/mpls/af_mpls.c 	nh = mpls_select_multipath(rt, skb);
skb               395 net/mpls/af_mpls.c 	skb_pull(skb, sizeof(*hdr));
skb               396 net/mpls/af_mpls.c 	skb_reset_network_header(skb);
skb               398 net/mpls/af_mpls.c 	skb_orphan(skb);
skb               400 net/mpls/af_mpls.c 	if (skb_warn_if_lro(skb))
skb               403 net/mpls/af_mpls.c 	skb_forward_csum(skb);
skb               418 net/mpls/af_mpls.c 	if (mpls_pkt_too_big(skb, mtu - new_header_size))
skb               426 net/mpls/af_mpls.c 	if (skb_cow(skb, hh_len + new_header_size))
skb               429 net/mpls/af_mpls.c 	skb->dev = out_dev;
skb               430 net/mpls/af_mpls.c 	skb->protocol = htons(ETH_P_MPLS_UC);
skb               434 net/mpls/af_mpls.c 		if (!mpls_egress(dev_net(out_dev), rt, skb, dec))
skb               439 net/mpls/af_mpls.c 		skb_push(skb, new_header_size);
skb               440 net/mpls/af_mpls.c 		skb_reset_network_header(skb);
skb               442 net/mpls/af_mpls.c 		hdr = mpls_hdr(skb);
skb               451 net/mpls/af_mpls.c 	mpls_stats_inc_outucastpkts(out_dev, skb);
skb               456 net/mpls/af_mpls.c 				 out_dev->dev_addr, skb);
skb               459 net/mpls/af_mpls.c 				 mpls_nh_via(rt, nh), skb);
skb               473 net/mpls/af_mpls.c 	kfree_skb(skb);
skb              1097 net/mpls/af_mpls.c static int mpls_fill_stats_af(struct sk_buff *skb,
skb              1108 net/mpls/af_mpls.c 	nla = nla_reserve_64bit(skb, MPLS_STATS_LINK,
skb              1131 net/mpls/af_mpls.c static int mpls_netconf_fill_devconf(struct sk_buff *skb, struct mpls_dev *mdev,
skb              1139 net/mpls/af_mpls.c 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct netconfmsg),
skb              1150 net/mpls/af_mpls.c 	if (nla_put_s32(skb, NETCONFA_IFINDEX, mdev->dev->ifindex) < 0)
skb              1154 net/mpls/af_mpls.c 	    nla_put_s32(skb, NETCONFA_INPUT,
skb              1158 net/mpls/af_mpls.c 	nlmsg_end(skb, nlh);
skb              1162 net/mpls/af_mpls.c 	nlmsg_cancel(skb, nlh);
skb              1184 net/mpls/af_mpls.c 	struct sk_buff *skb;
skb              1187 net/mpls/af_mpls.c 	skb = nlmsg_new(mpls_netconf_msgsize_devconf(type), GFP_KERNEL);
skb              1188 net/mpls/af_mpls.c 	if (!skb)
skb              1191 net/mpls/af_mpls.c 	err = mpls_netconf_fill_devconf(skb, mdev, 0, 0, event, 0, type);
skb              1195 net/mpls/af_mpls.c 		kfree_skb(skb);
skb              1199 net/mpls/af_mpls.c 	rtnl_notify(skb, net, 0, RTNLGRP_MPLS_NETCONF, NULL, GFP_KERNEL);
skb              1210 net/mpls/af_mpls.c static int mpls_netconf_valid_get_req(struct sk_buff *skb,
skb              1223 net/mpls/af_mpls.c 	if (!netlink_strict_get_check(skb))
skb              1258 net/mpls/af_mpls.c 	struct sk_buff *skb;
skb              1280 net/mpls/af_mpls.c 	skb = nlmsg_new(mpls_netconf_msgsize_devconf(NETCONFA_ALL), GFP_KERNEL);
skb              1281 net/mpls/af_mpls.c 	if (!skb)
skb              1284 net/mpls/af_mpls.c 	err = mpls_netconf_fill_devconf(skb, mdev,
skb              1291 net/mpls/af_mpls.c 		kfree_skb(skb);
skb              1294 net/mpls/af_mpls.c 	err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
skb              1299 net/mpls/af_mpls.c static int mpls_netconf_dump_devconf(struct sk_buff *skb,
skb              1303 net/mpls/af_mpls.c 	struct net *net = sock_net(skb->sk);
skb              1339 net/mpls/af_mpls.c 			if (mpls_netconf_fill_devconf(skb, mdev,
skb              1340 net/mpls/af_mpls.c 						      NETLINK_CB(cb->skb).portid,
skb              1348 net/mpls/af_mpls.c 			nl_dump_check_consistent(cb, nlmsg_hdr(skb));
skb              1358 net/mpls/af_mpls.c 	return skb->len;
skb              1655 net/mpls/af_mpls.c static int nla_put_via(struct sk_buff *skb,
skb              1665 net/mpls/af_mpls.c 	nla = nla_reserve(skb, RTA_VIA, alen + 2);
skb              1678 net/mpls/af_mpls.c int nla_put_labels(struct sk_buff *skb, int attrtype,
skb              1685 net/mpls/af_mpls.c 	nla = nla_reserve(skb, attrtype, labels*4);
skb              1781 net/mpls/af_mpls.c static int rtm_to_route_config(struct sk_buff *skb,
skb              1846 net/mpls/af_mpls.c 	cfg->rc_nlinfo.portid	= NETLINK_CB(skb).portid;
skb              1848 net/mpls/af_mpls.c 	cfg->rc_nlinfo.nl_net	= sock_net(skb->sk);
skb              1920 net/mpls/af_mpls.c static int mpls_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh,
skb              1930 net/mpls/af_mpls.c 	err = rtm_to_route_config(skb, nlh, cfg, extack);
skb              1942 net/mpls/af_mpls.c static int mpls_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh,
skb              1952 net/mpls/af_mpls.c 	err = rtm_to_route_config(skb, nlh, cfg, extack);
skb              1963 net/mpls/af_mpls.c static int mpls_dump_route(struct sk_buff *skb, u32 portid, u32 seq, int event,
skb              1970 net/mpls/af_mpls.c 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*rtm), flags);
skb              1985 net/mpls/af_mpls.c 	if (nla_put_labels(skb, RTA_DST, 1, &label))
skb              1992 net/mpls/af_mpls.c 		if (nla_put_u8(skb, RTA_TTL_PROPAGATE,
skb              2000 net/mpls/af_mpls.c 		    nla_put_labels(skb, RTA_NEWDST, nh->nh_labels,
skb              2004 net/mpls/af_mpls.c 		    nla_put_via(skb, nh->nh_via_table, mpls_nh_via(rt, nh),
skb              2008 net/mpls/af_mpls.c 		if (dev && nla_put_u32(skb, RTA_OIF, dev->ifindex))
skb              2020 net/mpls/af_mpls.c 		mp = nla_nest_start_noflag(skb, RTA_MULTIPATH);
skb              2029 net/mpls/af_mpls.c 			rtnh = nla_reserve_nohdr(skb, sizeof(*rtnh));
skb              2043 net/mpls/af_mpls.c 			if (nh->nh_labels && nla_put_labels(skb, RTA_NEWDST,
skb              2048 net/mpls/af_mpls.c 			    nla_put_via(skb, nh->nh_via_table,
skb              2054 net/mpls/af_mpls.c 			rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *)rtnh;
skb              2062 net/mpls/af_mpls.c 		nla_nest_end(skb, mp);
skb              2065 net/mpls/af_mpls.c 	nlmsg_end(skb, nlh);
skb              2069 net/mpls/af_mpls.c 	nlmsg_cancel(skb, nlh);
skb              2155 net/mpls/af_mpls.c static int mpls_dump_routes(struct sk_buff *skb, struct netlink_callback *cb)
skb              2158 net/mpls/af_mpls.c 	struct net *net = sock_net(skb->sk);
skb              2180 net/mpls/af_mpls.c 			return skb->len;
skb              2204 net/mpls/af_mpls.c 		if (mpls_dump_route(skb, NETLINK_CB(cb->skb).portid,
skb              2211 net/mpls/af_mpls.c 	return skb->len;
skb              2255 net/mpls/af_mpls.c 	struct sk_buff *skb;
skb              2259 net/mpls/af_mpls.c 	skb = nlmsg_new(lfib_nlmsg_size(rt), GFP_KERNEL);
skb              2260 net/mpls/af_mpls.c 	if (skb == NULL)
skb              2263 net/mpls/af_mpls.c 	err = mpls_dump_route(skb, portid, seq, event, label, rt, nlm_flags);
skb              2267 net/mpls/af_mpls.c 		kfree_skb(skb);
skb              2270 net/mpls/af_mpls.c 	rtnl_notify(skb, net, portid, RTNLGRP_MPLS_ROUTE, nlh, GFP_KERNEL);
skb              2278 net/mpls/af_mpls.c static int mpls_valid_getroute_req(struct sk_buff *skb,
skb              2292 net/mpls/af_mpls.c 	if (!netlink_strict_get_check(skb))
skb              2350 net/mpls/af_mpls.c 	struct sk_buff *skb;
skb              2383 net/mpls/af_mpls.c 		skb = nlmsg_new(lfib_nlmsg_size(rt), GFP_KERNEL);
skb              2384 net/mpls/af_mpls.c 		if (!skb) {
skb              2389 net/mpls/af_mpls.c 		err = mpls_dump_route(skb, portid, in_nlh->nlmsg_seq,
skb              2397 net/mpls/af_mpls.c 		return rtnl_unicast(skb, net, portid);
skb              2410 net/mpls/af_mpls.c 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
skb              2411 net/mpls/af_mpls.c 	if (!skb) {
skb              2416 net/mpls/af_mpls.c 	skb->protocol = htons(ETH_P_MPLS_UC);
skb              2422 net/mpls/af_mpls.c 		if (skb_cow(skb, hdr_size)) {
skb              2427 net/mpls/af_mpls.c 		skb_reserve(skb, hdr_size);
skb              2428 net/mpls/af_mpls.c 		skb_push(skb, hdr_size);
skb              2429 net/mpls/af_mpls.c 		skb_reset_network_header(skb);
skb              2432 net/mpls/af_mpls.c 		hdr = mpls_hdr(skb);
skb              2441 net/mpls/af_mpls.c 	nh = mpls_select_multipath(rt, skb);
skb              2448 net/mpls/af_mpls.c 		skb_pull(skb, hdr_size);
skb              2449 net/mpls/af_mpls.c 		skb_reset_network_header(skb);
skb              2452 net/mpls/af_mpls.c 	nlh = nlmsg_put(skb, portid, in_nlh->nlmsg_seq,
skb              2469 net/mpls/af_mpls.c 	if (nla_put_labels(skb, RTA_DST, 1, &in_label))
skb              2473 net/mpls/af_mpls.c 	    nla_put_labels(skb, RTA_NEWDST, nh->nh_labels,
skb              2478 net/mpls/af_mpls.c 	    nla_put_via(skb, nh->nh_via_table, mpls_nh_via(rt, nh),
skb              2482 net/mpls/af_mpls.c 	if (dev && nla_put_u32(skb, RTA_OIF, dev->ifindex))
skb              2485 net/mpls/af_mpls.c 	nlmsg_end(skb, nlh);
skb              2487 net/mpls/af_mpls.c 	err = rtnl_unicast(skb, net, portid);
skb              2492 net/mpls/af_mpls.c 	nlmsg_cancel(skb, nlh);
skb              2495 net/mpls/af_mpls.c 	kfree_skb(skb);
skb               204 net/mpls/internal.h int nla_put_labels(struct sk_buff *skb, int attrtype,  u8 labels,
skb               210 net/mpls/internal.h bool mpls_pkt_too_big(const struct sk_buff *skb, unsigned int mtu);
skb               212 net/mpls/internal.h 				 const struct sk_buff *skb);
skb                18 net/mpls/mpls_gso.c static struct sk_buff *mpls_gso_segment(struct sk_buff *skb,
skb                22 net/mpls/mpls_gso.c 	u16 mac_offset = skb->mac_header;
skb                24 net/mpls/mpls_gso.c 	u16 mac_len = skb->mac_len;
skb                28 net/mpls/mpls_gso.c 	skb_reset_network_header(skb);
skb                29 net/mpls/mpls_gso.c 	mpls_hlen = skb_inner_network_header(skb) - skb_network_header(skb);
skb                30 net/mpls/mpls_gso.c 	if (unlikely(!pskb_may_pull(skb, mpls_hlen)))
skb                34 net/mpls/mpls_gso.c 	mpls_protocol = skb->protocol;
skb                35 net/mpls/mpls_gso.c 	skb->protocol = skb->inner_protocol;
skb                37 net/mpls/mpls_gso.c 	__skb_pull(skb, mpls_hlen);
skb                39 net/mpls/mpls_gso.c 	skb->mac_len = 0;
skb                40 net/mpls/mpls_gso.c 	skb_reset_mac_header(skb);
skb                43 net/mpls/mpls_gso.c 	mpls_features = skb->dev->mpls_features & features;
skb                44 net/mpls/mpls_gso.c 	segs = skb_mac_gso_segment(skb, mpls_features);
skb                46 net/mpls/mpls_gso.c 		skb_gso_error_unwind(skb, mpls_protocol, mpls_hlen, mac_offset,
skb                50 net/mpls/mpls_gso.c 	skb = segs;
skb                54 net/mpls/mpls_gso.c 		skb->mac_len = mac_len;
skb                55 net/mpls/mpls_gso.c 		skb->protocol = mpls_protocol;
skb                57 net/mpls/mpls_gso.c 		skb_reset_inner_network_header(skb);
skb                59 net/mpls/mpls_gso.c 		__skb_push(skb, mpls_hlen);
skb                61 net/mpls/mpls_gso.c 		skb_reset_mac_header(skb);
skb                62 net/mpls/mpls_gso.c 		skb_set_network_header(skb, mac_len);
skb                63 net/mpls/mpls_gso.c 	} while ((skb = skb->next));
skb                36 net/mpls/mpls_iptunnel.c static int mpls_xmit(struct sk_buff *skb)
skb                44 net/mpls/mpls_iptunnel.c 	struct dst_entry *dst = skb_dst(skb);
skb                58 net/mpls/mpls_iptunnel.c 	skb_orphan(skb);
skb                61 net/mpls/mpls_iptunnel.c 	    !dst->lwtstate || skb_warn_if_lro(skb))
skb                64 net/mpls/mpls_iptunnel.c 	skb_forward_csum(skb);
skb                85 net/mpls/mpls_iptunnel.c 			ttl = ip_hdr(skb)->ttl;
skb                94 net/mpls/mpls_iptunnel.c 			ttl = ipv6_hdr(skb)->hop_limit;
skb               103 net/mpls/mpls_iptunnel.c 	if (mpls_pkt_too_big(skb, mtu - new_header_size))
skb               111 net/mpls/mpls_iptunnel.c 	if (skb_cow(skb, hh_len + new_header_size))
skb               114 net/mpls/mpls_iptunnel.c 	skb_set_inner_protocol(skb, skb->protocol);
skb               115 net/mpls/mpls_iptunnel.c 	skb_reset_inner_network_header(skb);
skb               117 net/mpls/mpls_iptunnel.c 	skb_push(skb, new_header_size);
skb               119 net/mpls/mpls_iptunnel.c 	skb_reset_network_header(skb);
skb               121 net/mpls/mpls_iptunnel.c 	skb->dev = out_dev;
skb               122 net/mpls/mpls_iptunnel.c 	skb->protocol = htons(ETH_P_MPLS_UC);
skb               125 net/mpls/mpls_iptunnel.c 	hdr = mpls_hdr(skb);
skb               133 net/mpls/mpls_iptunnel.c 	mpls_stats_inc_outucastpkts(out_dev, skb);
skb               138 net/mpls/mpls_iptunnel.c 					 skb);
skb               141 net/mpls/mpls_iptunnel.c 					 skb);
skb               146 net/mpls/mpls_iptunnel.c 					 skb);
skb               149 net/mpls/mpls_iptunnel.c 					 skb);
skb               161 net/mpls/mpls_iptunnel.c 	kfree_skb(skb);
skb               228 net/mpls/mpls_iptunnel.c static int mpls_fill_encap_info(struct sk_buff *skb,
skb               235 net/mpls/mpls_iptunnel.c 	if (nla_put_labels(skb, MPLS_IPTUNNEL_DST, tun_encap_info->labels,
skb               240 net/mpls/mpls_iptunnel.c 	    nla_put_u8(skb, MPLS_IPTUNNEL_TTL, tun_encap_info->default_ttl))
skb               379 net/ncsi/internal.h int ncsi_rcv_rsp(struct sk_buff *skb, struct net_device *dev,
skb               381 net/ncsi/internal.h int ncsi_aen_handler(struct ncsi_dev_priv *ndp, struct sk_buff *skb);
skb               208 net/ncsi/ncsi-aen.c int ncsi_aen_handler(struct ncsi_dev_priv *ndp, struct sk_buff *skb)
skb               215 net/ncsi/ncsi-aen.c 	h = (struct ncsi_aen_pkt_hdr *)skb_network_header(skb);
skb               243 net/ncsi/ncsi-aen.c 	consume_skb(skb);
skb                61 net/ncsi/ncsi-cmd.c static int ncsi_cmd_handler_default(struct sk_buff *skb,
skb                66 net/ncsi/ncsi-cmd.c 	cmd = skb_put_zero(skb, sizeof(*cmd));
skb                72 net/ncsi/ncsi-cmd.c static int ncsi_cmd_handler_sp(struct sk_buff *skb,
skb                77 net/ncsi/ncsi-cmd.c 	cmd = skb_put_zero(skb, sizeof(*cmd));
skb                84 net/ncsi/ncsi-cmd.c static int ncsi_cmd_handler_dc(struct sk_buff *skb,
skb                89 net/ncsi/ncsi-cmd.c 	cmd = skb_put_zero(skb, sizeof(*cmd));
skb                96 net/ncsi/ncsi-cmd.c static int ncsi_cmd_handler_rc(struct sk_buff *skb,
skb               101 net/ncsi/ncsi-cmd.c 	cmd = skb_put_zero(skb, sizeof(*cmd));
skb               107 net/ncsi/ncsi-cmd.c static int ncsi_cmd_handler_ae(struct sk_buff *skb,
skb               112 net/ncsi/ncsi-cmd.c 	cmd = skb_put_zero(skb, sizeof(*cmd));
skb               120 net/ncsi/ncsi-cmd.c static int ncsi_cmd_handler_sl(struct sk_buff *skb,
skb               125 net/ncsi/ncsi-cmd.c 	cmd = skb_put_zero(skb, sizeof(*cmd));
skb               133 net/ncsi/ncsi-cmd.c static int ncsi_cmd_handler_svf(struct sk_buff *skb,
skb               138 net/ncsi/ncsi-cmd.c 	cmd = skb_put_zero(skb, sizeof(*cmd));
skb               147 net/ncsi/ncsi-cmd.c static int ncsi_cmd_handler_ev(struct sk_buff *skb,
skb               152 net/ncsi/ncsi-cmd.c 	cmd = skb_put_zero(skb, sizeof(*cmd));
skb               159 net/ncsi/ncsi-cmd.c static int ncsi_cmd_handler_sma(struct sk_buff *skb,
skb               165 net/ncsi/ncsi-cmd.c 	cmd = skb_put_zero(skb, sizeof(*cmd));
skb               175 net/ncsi/ncsi-cmd.c static int ncsi_cmd_handler_ebf(struct sk_buff *skb,
skb               180 net/ncsi/ncsi-cmd.c 	cmd = skb_put_zero(skb, sizeof(*cmd));
skb               187 net/ncsi/ncsi-cmd.c static int ncsi_cmd_handler_egmf(struct sk_buff *skb,
skb               192 net/ncsi/ncsi-cmd.c 	cmd = skb_put_zero(skb, sizeof(*cmd));
skb               199 net/ncsi/ncsi-cmd.c static int ncsi_cmd_handler_snfc(struct sk_buff *skb,
skb               204 net/ncsi/ncsi-cmd.c 	cmd = skb_put_zero(skb, sizeof(*cmd));
skb               211 net/ncsi/ncsi-cmd.c static int ncsi_cmd_handler_oem(struct sk_buff *skb,
skb               223 net/ncsi/ncsi-cmd.c 	cmd = skb_put_zero(skb, len);
skb               233 net/ncsi/ncsi-cmd.c 	int           (*handler)(struct sk_buff *skb,
skb               276 net/ncsi/ncsi-cmd.c 	struct sk_buff *skb;
skb               294 net/ncsi/ncsi-cmd.c 	skb = alloc_skb(len, GFP_ATOMIC);
skb               295 net/ncsi/ncsi-cmd.c 	if (!skb) {
skb               300 net/ncsi/ncsi-cmd.c 	nr->cmd = skb;
skb               301 net/ncsi/ncsi-cmd.c 	skb_reserve(skb, hlen);
skb               302 net/ncsi/ncsi-cmd.c 	skb_reset_network_header(skb);
skb               304 net/ncsi/ncsi-cmd.c 	skb->dev = dev;
skb               305 net/ncsi/ncsi-cmd.c 	skb->protocol = htons(ETH_P_NCSI);
skb                57 net/ncsi/ncsi-netlink.c static int ncsi_write_channel_info(struct sk_buff *skb,
skb                66 net/ncsi/ncsi-netlink.c 	nla_put_u32(skb, NCSI_CHANNEL_ATTR_ID, nc->id);
skb                68 net/ncsi/ncsi-netlink.c 	nla_put_u32(skb, NCSI_CHANNEL_ATTR_LINK_STATE, m->data[2]);
skb                70 net/ncsi/ncsi-netlink.c 		nla_put_flag(skb, NCSI_CHANNEL_ATTR_ACTIVE);
skb                72 net/ncsi/ncsi-netlink.c 		nla_put_flag(skb, NCSI_CHANNEL_ATTR_FORCED);
skb                74 net/ncsi/ncsi-netlink.c 	nla_put_u32(skb, NCSI_CHANNEL_ATTR_VERSION_MAJOR, nc->version.version);
skb                75 net/ncsi/ncsi-netlink.c 	nla_put_u32(skb, NCSI_CHANNEL_ATTR_VERSION_MINOR, nc->version.alpha2);
skb                76 net/ncsi/ncsi-netlink.c 	nla_put_string(skb, NCSI_CHANNEL_ATTR_VERSION_STR, nc->version.fw_name);
skb                78 net/ncsi/ncsi-netlink.c 	vid_nest = nla_nest_start_noflag(skb, NCSI_CHANNEL_ATTR_VLAN_LIST);
skb                86 net/ncsi/ncsi-netlink.c 			nla_put_u16(skb, NCSI_CHANNEL_ATTR_VLAN_ID,
skb                89 net/ncsi/ncsi-netlink.c 	nla_nest_end(skb, vid_nest);
skb                94 net/ncsi/ncsi-netlink.c static int ncsi_write_package_info(struct sk_buff *skb,
skb               112 net/ncsi/ncsi-netlink.c 		pnest = nla_nest_start_noflag(skb, NCSI_PKG_ATTR);
skb               115 net/ncsi/ncsi-netlink.c 		nla_put_u32(skb, NCSI_PKG_ATTR_ID, np->id);
skb               117 net/ncsi/ncsi-netlink.c 			nla_put_flag(skb, NCSI_PKG_ATTR_FORCED);
skb               118 net/ncsi/ncsi-netlink.c 		cnest = nla_nest_start_noflag(skb, NCSI_PKG_ATTR_CHANNEL_LIST);
skb               120 net/ncsi/ncsi-netlink.c 			nla_nest_cancel(skb, pnest);
skb               124 net/ncsi/ncsi-netlink.c 			nest = nla_nest_start_noflag(skb, NCSI_CHANNEL_ATTR);
skb               126 net/ncsi/ncsi-netlink.c 				nla_nest_cancel(skb, cnest);
skb               127 net/ncsi/ncsi-netlink.c 				nla_nest_cancel(skb, pnest);
skb               130 net/ncsi/ncsi-netlink.c 			rc = ncsi_write_channel_info(skb, ndp, nc);
skb               132 net/ncsi/ncsi-netlink.c 				nla_nest_cancel(skb, nest);
skb               133 net/ncsi/ncsi-netlink.c 				nla_nest_cancel(skb, cnest);
skb               134 net/ncsi/ncsi-netlink.c 				nla_nest_cancel(skb, pnest);
skb               137 net/ncsi/ncsi-netlink.c 			nla_nest_end(skb, nest);
skb               139 net/ncsi/ncsi-netlink.c 		nla_nest_end(skb, cnest);
skb               140 net/ncsi/ncsi-netlink.c 		nla_nest_end(skb, pnest);
skb               154 net/ncsi/ncsi-netlink.c 	struct sk_buff *skb;
skb               173 net/ncsi/ncsi-netlink.c 	skb = genlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
skb               174 net/ncsi/ncsi-netlink.c 	if (!skb)
skb               177 net/ncsi/ncsi-netlink.c 	hdr = genlmsg_put(skb, info->snd_portid, info->snd_seq,
skb               180 net/ncsi/ncsi-netlink.c 		kfree_skb(skb);
skb               186 net/ncsi/ncsi-netlink.c 	attr = nla_nest_start_noflag(skb, NCSI_ATTR_PACKAGE_LIST);
skb               188 net/ncsi/ncsi-netlink.c 		kfree_skb(skb);
skb               191 net/ncsi/ncsi-netlink.c 	rc = ncsi_write_package_info(skb, ndp, package_id);
skb               194 net/ncsi/ncsi-netlink.c 		nla_nest_cancel(skb, attr);
skb               198 net/ncsi/ncsi-netlink.c 	nla_nest_end(skb, attr);
skb               200 net/ncsi/ncsi-netlink.c 	genlmsg_end(skb, hdr);
skb               201 net/ncsi/ncsi-netlink.c 	return genlmsg_reply(skb, info);
skb               204 net/ncsi/ncsi-netlink.c 	kfree_skb(skb);
skb               208 net/ncsi/ncsi-netlink.c static int ncsi_pkg_info_all_nl(struct sk_buff *skb,
skb               227 net/ncsi/ncsi-netlink.c 	ndp = ndp_from_ifindex(get_net(sock_net(skb->sk)),
skb               242 net/ncsi/ncsi-netlink.c 	hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
skb               249 net/ncsi/ncsi-netlink.c 	attr = nla_nest_start_noflag(skb, NCSI_ATTR_PACKAGE_LIST);
skb               254 net/ncsi/ncsi-netlink.c 	rc = ncsi_write_package_info(skb, ndp, package->id);
skb               256 net/ncsi/ncsi-netlink.c 		nla_nest_cancel(skb, attr);
skb               260 net/ncsi/ncsi-netlink.c 	nla_nest_end(skb, attr);
skb               261 net/ncsi/ncsi-netlink.c 	genlmsg_end(skb, hdr);
skb               265 net/ncsi/ncsi-netlink.c 	return skb->len;
skb               267 net/ncsi/ncsi-netlink.c 	genlmsg_cancel(skb, hdr);
skb               482 net/ncsi/ncsi-netlink.c 	struct sk_buff *skb;
skb               489 net/ncsi/ncsi-netlink.c 	skb = genlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
skb               490 net/ncsi/ncsi-netlink.c 	if (!skb)
skb               493 net/ncsi/ncsi-netlink.c 	hdr = genlmsg_put(skb, nr->snd_portid, nr->snd_seq,
skb               496 net/ncsi/ncsi-netlink.c 		kfree_skb(skb);
skb               500 net/ncsi/ncsi-netlink.c 	nla_put_u32(skb, NCSI_ATTR_IFINDEX, nr->rsp->dev->ifindex);
skb               502 net/ncsi/ncsi-netlink.c 		nla_put_u32(skb, NCSI_ATTR_PACKAGE_ID, np->id);
skb               504 net/ncsi/ncsi-netlink.c 		nla_put_u32(skb, NCSI_ATTR_CHANNEL_ID, nc->id);
skb               506 net/ncsi/ncsi-netlink.c 		nla_put_u32(skb, NCSI_ATTR_CHANNEL_ID, NCSI_RESERVED_CHANNEL);
skb               508 net/ncsi/ncsi-netlink.c 	rc = nla_put(skb, NCSI_ATTR_DATA, nr->rsp->len, (void *)nr->rsp->data);
skb               512 net/ncsi/ncsi-netlink.c 	genlmsg_end(skb, hdr);
skb               513 net/ncsi/ncsi-netlink.c 	return genlmsg_unicast(net, skb, nr->snd_portid);
skb               516 net/ncsi/ncsi-netlink.c 	kfree_skb(skb);
skb               524 net/ncsi/ncsi-netlink.c 	struct sk_buff *skb;
skb               528 net/ncsi/ncsi-netlink.c 	skb = genlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
skb               529 net/ncsi/ncsi-netlink.c 	if (!skb)
skb               532 net/ncsi/ncsi-netlink.c 	hdr = genlmsg_put(skb, nr->snd_portid, nr->snd_seq,
skb               535 net/ncsi/ncsi-netlink.c 		kfree_skb(skb);
skb               541 net/ncsi/ncsi-netlink.c 	nla_put_u32(skb, NCSI_ATTR_IFINDEX, nr->cmd->dev->ifindex);
skb               544 net/ncsi/ncsi-netlink.c 		nla_put_u32(skb, NCSI_ATTR_PACKAGE_ID, np->id);
skb               546 net/ncsi/ncsi-netlink.c 		nla_put_u32(skb, NCSI_ATTR_PACKAGE_ID,
skb               551 net/ncsi/ncsi-netlink.c 		nla_put_u32(skb, NCSI_ATTR_CHANNEL_ID, nc->id);
skb               553 net/ncsi/ncsi-netlink.c 		nla_put_u32(skb, NCSI_ATTR_CHANNEL_ID, NCSI_RESERVED_CHANNEL);
skb               555 net/ncsi/ncsi-netlink.c 	genlmsg_end(skb, hdr);
skb               556 net/ncsi/ncsi-netlink.c 	return genlmsg_unicast(net, skb, nr->snd_portid);
skb               567 net/ncsi/ncsi-netlink.c 	struct sk_buff *skb;
skb               570 net/ncsi/ncsi-netlink.c 	skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
skb               571 net/ncsi/ncsi-netlink.c 	if (!skb)
skb               576 net/ncsi/ncsi-netlink.c 	nlh = nlmsg_put(skb, snd_portid, snd_seq,
skb               582 net/ncsi/ncsi-netlink.c 	nlmsg_end(skb, nlh);
skb               584 net/ncsi/ncsi-netlink.c 	return nlmsg_unicast(net->genl_sock, skb, snd_portid);
skb              1105 net/ncsi/ncsi-rsp.c int ncsi_rcv_rsp(struct sk_buff *skb, struct net_device *dev,
skb              1123 net/ncsi/ncsi-rsp.c 	hdr = (struct ncsi_pkt_hdr *)skb_network_header(skb);
skb              1125 net/ncsi/ncsi-rsp.c 		return ncsi_aen_handler(ndp, skb);
skb              1153 net/ncsi/ncsi-rsp.c 	nr->rsp = skb;
skb                92 net/netfilter/core.c 			       struct sk_buff *skb,
skb               505 net/netfilter/core.c int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state,
skb               512 net/netfilter/core.c 		verdict = nf_hook_entry_hookfn(&e->hooks[s], skb, state);
skb               517 net/netfilter/core.c 			kfree_skb(skb);
skb               523 net/netfilter/core.c 			ret = nf_queue(skb, state, s, verdict);
skb               559 net/netfilter/core.c void nf_ct_attach(struct sk_buff *new, const struct sk_buff *skb)
skb               563 net/netfilter/core.c 	if (skb->_nfct) {
skb               567 net/netfilter/core.c 			attach(new, skb);
skb               586 net/netfilter/core.c 			 const struct sk_buff *skb)
skb               594 net/netfilter/core.c 		ret = ct_hook->get_tuple_skb(dst_tuple, skb);
skb                92 net/netfilter/ipset/ip_set_bitmap_gen.h mtype_head(struct ip_set *set, struct sk_buff *skb)
skb                98 net/netfilter/ipset/ip_set_bitmap_gen.h 	nested = nla_nest_start(skb, IPSET_ATTR_DATA);
skb               101 net/netfilter/ipset/ip_set_bitmap_gen.h 	if (mtype_do_head(skb, map) ||
skb               102 net/netfilter/ipset/ip_set_bitmap_gen.h 	    nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref)) ||
skb               103 net/netfilter/ipset/ip_set_bitmap_gen.h 	    nla_put_net32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize)) ||
skb               104 net/netfilter/ipset/ip_set_bitmap_gen.h 	    nla_put_net32(skb, IPSET_ATTR_ELEMENTS, htonl(set->elements)))
skb               106 net/netfilter/ipset/ip_set_bitmap_gen.h 	if (unlikely(ip_set_put_flags(skb, set)))
skb               108 net/netfilter/ipset/ip_set_bitmap_gen.h 	nla_nest_end(skb, nested);
skb               204 net/netfilter/ipset/ip_set_bitmap_gen.h 	   struct sk_buff *skb, struct netlink_callback *cb)
skb               212 net/netfilter/ipset/ip_set_bitmap_gen.h 	adt = nla_nest_start(skb, IPSET_ATTR_ADT);
skb               229 net/netfilter/ipset/ip_set_bitmap_gen.h 		nested = nla_nest_start(skb, IPSET_ATTR_DATA);
skb               232 net/netfilter/ipset/ip_set_bitmap_gen.h 				nla_nest_cancel(skb, adt);
skb               239 net/netfilter/ipset/ip_set_bitmap_gen.h 		if (mtype_do_list(skb, map, id, set->dsize))
skb               241 net/netfilter/ipset/ip_set_bitmap_gen.h 		if (ip_set_put_extensions(skb, set, x, mtype_is_filled(x)))
skb               243 net/netfilter/ipset/ip_set_bitmap_gen.h 		nla_nest_end(skb, nested);
skb               245 net/netfilter/ipset/ip_set_bitmap_gen.h 	nla_nest_end(skb, adt);
skb               253 net/netfilter/ipset/ip_set_bitmap_gen.h 	nla_nest_cancel(skb, nested);
skb               258 net/netfilter/ipset/ip_set_bitmap_gen.h 	nla_nest_end(skb, adt);
skb                93 net/netfilter/ipset/ip_set_bitmap_ip.c bitmap_ip_do_list(struct sk_buff *skb, const struct bitmap_ip *map, u32 id,
skb                96 net/netfilter/ipset/ip_set_bitmap_ip.c 	return nla_put_ipaddr4(skb, IPSET_ATTR_IP,
skb               101 net/netfilter/ipset/ip_set_bitmap_ip.c bitmap_ip_do_head(struct sk_buff *skb, const struct bitmap_ip *map)
skb               103 net/netfilter/ipset/ip_set_bitmap_ip.c 	return nla_put_ipaddr4(skb, IPSET_ATTR_IP, htonl(map->first_ip)) ||
skb               104 net/netfilter/ipset/ip_set_bitmap_ip.c 	       nla_put_ipaddr4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip)) ||
skb               106 net/netfilter/ipset/ip_set_bitmap_ip.c 		nla_put_u8(skb, IPSET_ATTR_NETMASK, map->netmask));
skb               110 net/netfilter/ipset/ip_set_bitmap_ip.c bitmap_ip_kadt(struct ip_set *set, const struct sk_buff *skb,
skb               117 net/netfilter/ipset/ip_set_bitmap_ip.c 	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
skb               120 net/netfilter/ipset/ip_set_bitmap_ip.c 	ip = ntohl(ip4addr(skb, opt->flags & IPSET_DIM_ONE_SRC));
skb               188 net/netfilter/ipset/ip_set_bitmap_ipmac.c bitmap_ipmac_do_list(struct sk_buff *skb, const struct bitmap_ipmac *map,
skb               194 net/netfilter/ipset/ip_set_bitmap_ipmac.c 	return nla_put_ipaddr4(skb, IPSET_ATTR_IP,
skb               197 net/netfilter/ipset/ip_set_bitmap_ipmac.c 		nla_put(skb, IPSET_ATTR_ETHER, ETH_ALEN, elem->ether));
skb               201 net/netfilter/ipset/ip_set_bitmap_ipmac.c bitmap_ipmac_do_head(struct sk_buff *skb, const struct bitmap_ipmac *map)
skb               203 net/netfilter/ipset/ip_set_bitmap_ipmac.c 	return nla_put_ipaddr4(skb, IPSET_ATTR_IP, htonl(map->first_ip)) ||
skb               204 net/netfilter/ipset/ip_set_bitmap_ipmac.c 	       nla_put_ipaddr4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip));
skb               208 net/netfilter/ipset/ip_set_bitmap_ipmac.c bitmap_ipmac_kadt(struct ip_set *set, const struct sk_buff *skb,
skb               215 net/netfilter/ipset/ip_set_bitmap_ipmac.c 	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
skb               218 net/netfilter/ipset/ip_set_bitmap_ipmac.c 	ip = ntohl(ip4addr(skb, opt->flags & IPSET_DIM_ONE_SRC));
skb               223 net/netfilter/ipset/ip_set_bitmap_ipmac.c 	if (skb_mac_header(skb) < skb->head ||
skb               224 net/netfilter/ipset/ip_set_bitmap_ipmac.c 	    (skb_mac_header(skb) + ETH_HLEN) > skb->data)
skb               230 net/netfilter/ipset/ip_set_bitmap_ipmac.c 		ether_addr_copy(e.ether, eth_hdr(skb)->h_source);
skb               232 net/netfilter/ipset/ip_set_bitmap_ipmac.c 		ether_addr_copy(e.ether, eth_hdr(skb)->h_dest);
skb                85 net/netfilter/ipset/ip_set_bitmap_port.c bitmap_port_do_list(struct sk_buff *skb, const struct bitmap_port *map, u32 id,
skb                88 net/netfilter/ipset/ip_set_bitmap_port.c 	return nla_put_net16(skb, IPSET_ATTR_PORT,
skb                93 net/netfilter/ipset/ip_set_bitmap_port.c bitmap_port_do_head(struct sk_buff *skb, const struct bitmap_port *map)
skb                95 net/netfilter/ipset/ip_set_bitmap_port.c 	return nla_put_net16(skb, IPSET_ATTR_PORT, htons(map->first_port)) ||
skb                96 net/netfilter/ipset/ip_set_bitmap_port.c 	       nla_put_net16(skb, IPSET_ATTR_PORT_TO, htons(map->last_port));
skb               100 net/netfilter/ipset/ip_set_bitmap_port.c bitmap_port_kadt(struct ip_set *set, const struct sk_buff *skb,
skb               107 net/netfilter/ipset/ip_set_bitmap_port.c 	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
skb               111 net/netfilter/ipset/ip_set_bitmap_port.c 	if (!ip_set_get_ip_port(skb, opt->family,
skb               452 net/netfilter/ipset/ip_set_core.c ip_set_put_extensions(struct sk_buff *skb, const struct ip_set *set,
skb               458 net/netfilter/ipset/ip_set_core.c 		if (nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
skb               464 net/netfilter/ipset/ip_set_core.c 	    ip_set_put_counter(skb, ext_counter(e, set)))
skb               467 net/netfilter/ipset/ip_set_core.c 	    ip_set_put_comment(skb, ext_comment(e, set)))
skb               470 net/netfilter/ipset/ip_set_core.c 	    ip_set_put_skbinfo(skb, ext_skbinfo(e, set)))
skb               575 net/netfilter/ipset/ip_set_core.c ip_set_test(ip_set_id_t index, const struct sk_buff *skb,
skb               589 net/netfilter/ipset/ip_set_core.c 	ret = set->variant->kadt(set, skb, par, IPSET_TEST, opt);
skb               596 net/netfilter/ipset/ip_set_core.c 		set->variant->kadt(set, skb, par, IPSET_ADD, opt);
skb               613 net/netfilter/ipset/ip_set_core.c ip_set_add(ip_set_id_t index, const struct sk_buff *skb,
skb               627 net/netfilter/ipset/ip_set_core.c 	ret = set->variant->kadt(set, skb, par, IPSET_ADD, opt);
skb               635 net/netfilter/ipset/ip_set_core.c ip_set_del(ip_set_id_t index, const struct sk_buff *skb,
skb               649 net/netfilter/ipset/ip_set_core.c 	ret = set->variant->kadt(set, skb, par, IPSET_DEL, opt);
skb               809 net/netfilter/ipset/ip_set_core.c start_msg(struct sk_buff *skb, u32 portid, u32 seq, unsigned int flags,
skb               815 net/netfilter/ipset/ip_set_core.c 	nlh = nlmsg_put(skb, portid, seq, nfnl_msg_type(NFNL_SUBSYS_IPSET, cmd),
skb               891 net/netfilter/ipset/ip_set_core.c static int ip_set_none(struct net *net, struct sock *ctnl, struct sk_buff *skb,
skb               900 net/netfilter/ipset/ip_set_core.c 			 struct sk_buff *skb, const struct nlmsghdr *nlh,
skb              1042 net/netfilter/ipset/ip_set_core.c 			  struct sk_buff *skb, const struct nlmsghdr *nlh,
skb              1120 net/netfilter/ipset/ip_set_core.c static int ip_set_flush(struct net *net, struct sock *ctnl, struct sk_buff *skb,
skb              1161 net/netfilter/ipset/ip_set_core.c 			 struct sk_buff *skb, const struct nlmsghdr *nlh,
skb              1210 net/netfilter/ipset/ip_set_core.c static int ip_set_swap(struct net *net, struct sock *ctnl, struct sk_buff *skb,
skb              1312 net/netfilter/ipset/ip_set_core.c 	struct nlmsghdr *nlh = nlmsg_hdr(cb->skb);
skb              1316 net/netfilter/ipset/ip_set_core.c 	struct sk_buff *skb = cb->skb;
skb              1317 net/netfilter/ipset/ip_set_core.c 	struct ip_set_net *inst = ip_set_pernet(sock_net(skb->sk));
skb              1357 net/netfilter/ipset/ip_set_core.c 		netlink_ack(cb->skb, nlh, ret, NULL);
skb              1363 net/netfilter/ipset/ip_set_core.c ip_set_dump_do(struct sk_buff *skb, struct netlink_callback *cb)
skb              1368 net/netfilter/ipset/ip_set_core.c 	unsigned int flags = NETLINK_CB(cb->skb).portid ? NLM_F_MULTI : 0;
skb              1369 net/netfilter/ipset/ip_set_core.c 	struct ip_set_net *inst = ip_set_pernet(sock_net(skb->sk));
skb              1421 net/netfilter/ipset/ip_set_core.c 		nlh = start_msg(skb, NETLINK_CB(cb->skb).portid,
skb              1428 net/netfilter/ipset/ip_set_core.c 		if (nla_put_u8(skb, IPSET_ATTR_PROTOCOL,
skb              1430 net/netfilter/ipset/ip_set_core.c 		    nla_put_string(skb, IPSET_ATTR_SETNAME, set->name))
skb              1437 net/netfilter/ipset/ip_set_core.c 			if (nla_put_string(skb, IPSET_ATTR_TYPENAME,
skb              1439 net/netfilter/ipset/ip_set_core.c 			    nla_put_u8(skb, IPSET_ATTR_FAMILY,
skb              1441 net/netfilter/ipset/ip_set_core.c 			    nla_put_u8(skb, IPSET_ATTR_REVISION,
skb              1445 net/netfilter/ipset/ip_set_core.c 			    nla_put_net16(skb, IPSET_ATTR_INDEX, htons(index)))
skb              1447 net/netfilter/ipset/ip_set_core.c 			ret = set->variant->head(set, skb);
skb              1456 net/netfilter/ipset/ip_set_core.c 			ret = set->variant->list(set, skb, cb);
skb              1493 net/netfilter/ipset/ip_set_core.c 		nlmsg_end(skb, nlh);
skb              1498 net/netfilter/ipset/ip_set_core.c 	return ret < 0 ? ret : skb->len;
skb              1501 net/netfilter/ipset/ip_set_core.c static int ip_set_dump(struct net *net, struct sock *ctnl, struct sk_buff *skb,
skb              1515 net/netfilter/ipset/ip_set_core.c 		return netlink_dump_start(ctnl, skb, nlh, &c);
skb              1531 net/netfilter/ipset/ip_set_core.c call_ad(struct sock *ctnl, struct sk_buff *skb, struct ip_set *set,
skb              1552 net/netfilter/ipset/ip_set_core.c 		struct nlmsghdr *rep, *nlh = nlmsg_hdr(skb);
skb              1565 net/netfilter/ipset/ip_set_core.c 		rep = __nlmsg_put(skb2, NETLINK_CB(skb).portid,
skb              1584 net/netfilter/ipset/ip_set_core.c 		netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid,
skb              1594 net/netfilter/ipset/ip_set_core.c 		     struct sk_buff *skb,
skb              1629 net/netfilter/ipset/ip_set_core.c 		ret = call_ad(ctnl, skb, set, tb, adt, flags,
skb              1640 net/netfilter/ipset/ip_set_core.c 			ret = call_ad(ctnl, skb, set, tb, adt,
skb              1650 net/netfilter/ipset/ip_set_core.c 		       struct sk_buff *skb, const struct nlmsghdr *nlh,
skb              1654 net/netfilter/ipset/ip_set_core.c 	return ip_set_ad(net, ctnl, skb,
skb              1659 net/netfilter/ipset/ip_set_core.c 		       struct sk_buff *skb, const struct nlmsghdr *nlh,
skb              1663 net/netfilter/ipset/ip_set_core.c 	return ip_set_ad(net, ctnl, skb,
skb              1667 net/netfilter/ipset/ip_set_core.c static int ip_set_utest(struct net *net, struct sock *ctnl, struct sk_buff *skb,
skb              1705 net/netfilter/ipset/ip_set_core.c 			 struct sk_buff *skb, const struct nlmsghdr *nlh,
skb              1727 net/netfilter/ipset/ip_set_core.c 	nlh2 = start_msg(skb2, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
skb              1739 net/netfilter/ipset/ip_set_core.c 	ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid, MSG_DONTWAIT);
skb              1761 net/netfilter/ipset/ip_set_core.c static int ip_set_type(struct net *net, struct sock *ctnl, struct sk_buff *skb,
skb              1787 net/netfilter/ipset/ip_set_core.c 	nlh2 = start_msg(skb2, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
skb              1800 net/netfilter/ipset/ip_set_core.c 	ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid, MSG_DONTWAIT);
skb              1821 net/netfilter/ipset/ip_set_core.c 			   struct sk_buff *skb, const struct nlmsghdr *nlh,
skb              1836 net/netfilter/ipset/ip_set_core.c 	nlh2 = start_msg(skb2, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
skb              1846 net/netfilter/ipset/ip_set_core.c 	ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid, MSG_DONTWAIT);
skb              1862 net/netfilter/ipset/ip_set_core.c 			 struct sk_buff *skb, const struct nlmsghdr *nlh,
skb              1885 net/netfilter/ipset/ip_set_core.c 	nlh2 = start_msg(skb2, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
skb              1895 net/netfilter/ipset/ip_set_core.c 	ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid, MSG_DONTWAIT);
skb              1914 net/netfilter/ipset/ip_set_core.c 			  struct sk_buff *skb, const struct nlmsghdr *nlh,
skb              1940 net/netfilter/ipset/ip_set_core.c 	nlh2 = start_msg(skb2, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
skb              1949 net/netfilter/ipset/ip_set_core.c 	ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid, MSG_DONTWAIT);
skb                25 net/netfilter/ipset/ip_set_getport.c get_port(const struct sk_buff *skb, int protocol, unsigned int protooff,
skb                33 net/netfilter/ipset/ip_set_getport.c 		th = skb_header_pointer(skb, protooff, sizeof(_tcph), &_tcph);
skb                45 net/netfilter/ipset/ip_set_getport.c 		sh = skb_header_pointer(skb, protooff, sizeof(_sh), &_sh);
skb                58 net/netfilter/ipset/ip_set_getport.c 		uh = skb_header_pointer(skb, protooff, sizeof(_udph), &_udph);
skb                70 net/netfilter/ipset/ip_set_getport.c 		ic = skb_header_pointer(skb, protooff, sizeof(_ich), &_ich);
skb                81 net/netfilter/ipset/ip_set_getport.c 		ic = skb_header_pointer(skb, protooff, sizeof(_ich), &_ich);
skb                98 net/netfilter/ipset/ip_set_getport.c ip_set_get_ip4_port(const struct sk_buff *skb, bool src,
skb               101 net/netfilter/ipset/ip_set_getport.c 	const struct iphdr *iph = ip_hdr(skb);
skb               102 net/netfilter/ipset/ip_set_getport.c 	unsigned int protooff = skb_network_offset(skb) + ip_hdrlen(skb);
skb               126 net/netfilter/ipset/ip_set_getport.c 	return get_port(skb, protocol, protooff, src, port, proto);
skb               132 net/netfilter/ipset/ip_set_getport.c ip_set_get_ip6_port(const struct sk_buff *skb, bool src,
skb               139 net/netfilter/ipset/ip_set_getport.c 	nexthdr = ipv6_hdr(skb)->nexthdr;
skb               140 net/netfilter/ipset/ip_set_getport.c 	protoff = ipv6_skip_exthdr(skb,
skb               141 net/netfilter/ipset/ip_set_getport.c 				   skb_network_offset(skb) +
skb               147 net/netfilter/ipset/ip_set_getport.c 	return get_port(skb, nexthdr, protoff, src, port, proto);
skb               153 net/netfilter/ipset/ip_set_getport.c ip_set_get_ip_port(const struct sk_buff *skb, u8 pf, bool src, __be16 *port)
skb               160 net/netfilter/ipset/ip_set_getport.c 		ret = ip_set_get_ip4_port(skb, src, port, &proto);
skb               163 net/netfilter/ipset/ip_set_getport.c 		ret = ip_set_get_ip6_port(skb, src, port, &proto);
skb              1275 net/netfilter/ipset/ip_set_hash_gen.h mtype_head(struct ip_set *set, struct sk_buff *skb)
skb              1292 net/netfilter/ipset/ip_set_hash_gen.h 	nested = nla_nest_start(skb, IPSET_ATTR_DATA);
skb              1295 net/netfilter/ipset/ip_set_hash_gen.h 	if (nla_put_net32(skb, IPSET_ATTR_HASHSIZE,
skb              1297 net/netfilter/ipset/ip_set_hash_gen.h 	    nla_put_net32(skb, IPSET_ATTR_MAXELEM, htonl(h->maxelem)))
skb              1301 net/netfilter/ipset/ip_set_hash_gen.h 	    nla_put_u8(skb, IPSET_ATTR_NETMASK, h->netmask))
skb              1305 net/netfilter/ipset/ip_set_hash_gen.h 	if (nla_put_u32(skb, IPSET_ATTR_MARKMASK, h->markmask))
skb              1308 net/netfilter/ipset/ip_set_hash_gen.h 	if (nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref)) ||
skb              1309 net/netfilter/ipset/ip_set_hash_gen.h 	    nla_put_net32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize)) ||
skb              1310 net/netfilter/ipset/ip_set_hash_gen.h 	    nla_put_net32(skb, IPSET_ATTR_ELEMENTS, htonl(elements)))
skb              1312 net/netfilter/ipset/ip_set_hash_gen.h 	if (unlikely(ip_set_put_flags(skb, set)))
skb              1314 net/netfilter/ipset/ip_set_hash_gen.h 	nla_nest_end(skb, nested);
skb              1348 net/netfilter/ipset/ip_set_hash_gen.h 	   struct sk_buff *skb, struct netlink_callback *cb)
skb              1359 net/netfilter/ipset/ip_set_hash_gen.h 	atd = nla_nest_start(skb, IPSET_ATTR_ADT);
skb              1370 net/netfilter/ipset/ip_set_hash_gen.h 		incomplete = skb_tail_pointer(skb);
skb              1384 net/netfilter/ipset/ip_set_hash_gen.h 			nested = nla_nest_start(skb, IPSET_ATTR_DATA);
skb              1387 net/netfilter/ipset/ip_set_hash_gen.h 					nla_nest_cancel(skb, atd);
skb              1393 net/netfilter/ipset/ip_set_hash_gen.h 			if (mtype_data_list(skb, e))
skb              1395 net/netfilter/ipset/ip_set_hash_gen.h 			if (ip_set_put_extensions(skb, set, e, true))
skb              1397 net/netfilter/ipset/ip_set_hash_gen.h 			nla_nest_end(skb, nested);
skb              1400 net/netfilter/ipset/ip_set_hash_gen.h 	nla_nest_end(skb, atd);
skb              1407 net/netfilter/ipset/ip_set_hash_gen.h 	nlmsg_trim(skb, incomplete);
skb              1414 net/netfilter/ipset/ip_set_hash_gen.h 		nla_nest_end(skb, atd);
skb              1422 net/netfilter/ipset/ip_set_hash_gen.h IPSET_TOKEN(MTYPE, _kadt)(struct ip_set *set, const struct sk_buff *skb,
skb                56 net/netfilter/ipset/ip_set_hash_ip.c hash_ip4_data_list(struct sk_buff *skb, const struct hash_ip4_elem *e)
skb                58 net/netfilter/ipset/ip_set_hash_ip.c 	if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, e->ip))
skb                77 net/netfilter/ipset/ip_set_hash_ip.c hash_ip4_kadt(struct ip_set *set, const struct sk_buff *skb,
skb                84 net/netfilter/ipset/ip_set_hash_ip.c 	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
skb                87 net/netfilter/ipset/ip_set_hash_ip.c 	ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &ip);
skb               189 net/netfilter/ipset/ip_set_hash_ip.c hash_ip6_data_list(struct sk_buff *skb, const struct hash_ip6_elem *e)
skb               191 net/netfilter/ipset/ip_set_hash_ip.c 	if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &e->ip.in6))
skb               214 net/netfilter/ipset/ip_set_hash_ip.c hash_ip6_kadt(struct ip_set *set, const struct sk_buff *skb,
skb               221 net/netfilter/ipset/ip_set_hash_ip.c 	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
skb               223 net/netfilter/ipset/ip_set_hash_ip.c 	ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip.in6);
skb                59 net/netfilter/ipset/ip_set_hash_ipmac.c hash_ipmac4_data_list(struct sk_buff *skb, const struct hash_ipmac4_elem *e)
skb                61 net/netfilter/ipset/ip_set_hash_ipmac.c 	if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, e->ip) ||
skb                62 net/netfilter/ipset/ip_set_hash_ipmac.c 	    nla_put(skb, IPSET_ATTR_ETHER, ETH_ALEN, e->ether))
skb                84 net/netfilter/ipset/ip_set_hash_ipmac.c hash_ipmac4_kadt(struct ip_set *set, const struct sk_buff *skb,
skb                90 net/netfilter/ipset/ip_set_hash_ipmac.c 	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
skb                92 net/netfilter/ipset/ip_set_hash_ipmac.c 	if (skb_mac_header(skb) < skb->head ||
skb                93 net/netfilter/ipset/ip_set_hash_ipmac.c 	    (skb_mac_header(skb) + ETH_HLEN) > skb->data)
skb                97 net/netfilter/ipset/ip_set_hash_ipmac.c 		ether_addr_copy(e.ether, eth_hdr(skb)->h_source);
skb                99 net/netfilter/ipset/ip_set_hash_ipmac.c 		ether_addr_copy(e.ether, eth_hdr(skb)->h_dest);
skb               104 net/netfilter/ipset/ip_set_hash_ipmac.c 	ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip);
skb               167 net/netfilter/ipset/ip_set_hash_ipmac.c hash_ipmac6_data_list(struct sk_buff *skb, const struct hash_ipmac6_elem *e)
skb               169 net/netfilter/ipset/ip_set_hash_ipmac.c 	if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &e->ip.in6) ||
skb               170 net/netfilter/ipset/ip_set_hash_ipmac.c 	    nla_put(skb, IPSET_ATTR_ETHER, ETH_ALEN, e->ether))
skb               197 net/netfilter/ipset/ip_set_hash_ipmac.c hash_ipmac6_kadt(struct ip_set *set, const struct sk_buff *skb,
skb               206 net/netfilter/ipset/ip_set_hash_ipmac.c 	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
skb               208 net/netfilter/ipset/ip_set_hash_ipmac.c 	if (skb_mac_header(skb) < skb->head ||
skb               209 net/netfilter/ipset/ip_set_hash_ipmac.c 	    (skb_mac_header(skb) + ETH_HLEN) > skb->data)
skb               213 net/netfilter/ipset/ip_set_hash_ipmac.c 		ether_addr_copy(e.ether, eth_hdr(skb)->h_source);
skb               215 net/netfilter/ipset/ip_set_hash_ipmac.c 		ether_addr_copy(e.ether, eth_hdr(skb)->h_dest);
skb               220 net/netfilter/ipset/ip_set_hash_ipmac.c 	ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip.in6);
skb                55 net/netfilter/ipset/ip_set_hash_ipmark.c hash_ipmark4_data_list(struct sk_buff *skb,
skb                58 net/netfilter/ipset/ip_set_hash_ipmark.c 	if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) ||
skb                59 net/netfilter/ipset/ip_set_hash_ipmark.c 	    nla_put_net32(skb, IPSET_ATTR_MARK, htonl(data->mark)))
skb                79 net/netfilter/ipset/ip_set_hash_ipmark.c hash_ipmark4_kadt(struct ip_set *set, const struct sk_buff *skb,
skb                86 net/netfilter/ipset/ip_set_hash_ipmark.c 	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
skb                88 net/netfilter/ipset/ip_set_hash_ipmark.c 	e.mark = skb->mark;
skb                91 net/netfilter/ipset/ip_set_hash_ipmark.c 	ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip);
skb               178 net/netfilter/ipset/ip_set_hash_ipmark.c hash_ipmark6_data_list(struct sk_buff *skb,
skb               181 net/netfilter/ipset/ip_set_hash_ipmark.c 	if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6) ||
skb               182 net/netfilter/ipset/ip_set_hash_ipmark.c 	    nla_put_net32(skb, IPSET_ATTR_MARK, htonl(data->mark)))
skb               205 net/netfilter/ipset/ip_set_hash_ipmark.c hash_ipmark6_kadt(struct ip_set *set, const struct sk_buff *skb,
skb               212 net/netfilter/ipset/ip_set_hash_ipmark.c 	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
skb               214 net/netfilter/ipset/ip_set_hash_ipmark.c 	e.mark = skb->mark;
skb               217 net/netfilter/ipset/ip_set_hash_ipmark.c 	ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip.in6);
skb                61 net/netfilter/ipset/ip_set_hash_ipport.c hash_ipport4_data_list(struct sk_buff *skb,
skb                64 net/netfilter/ipset/ip_set_hash_ipport.c 	if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) ||
skb                65 net/netfilter/ipset/ip_set_hash_ipport.c 	    nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
skb                66 net/netfilter/ipset/ip_set_hash_ipport.c 	    nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto))
skb                87 net/netfilter/ipset/ip_set_hash_ipport.c hash_ipport4_kadt(struct ip_set *set, const struct sk_buff *skb,
skb                93 net/netfilter/ipset/ip_set_hash_ipport.c 	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
skb                95 net/netfilter/ipset/ip_set_hash_ipport.c 	if (!ip_set_get_ip4_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
skb                99 net/netfilter/ipset/ip_set_hash_ipport.c 	ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip);
skb               216 net/netfilter/ipset/ip_set_hash_ipport.c hash_ipport6_data_list(struct sk_buff *skb,
skb               219 net/netfilter/ipset/ip_set_hash_ipport.c 	if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6) ||
skb               220 net/netfilter/ipset/ip_set_hash_ipport.c 	    nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
skb               221 net/netfilter/ipset/ip_set_hash_ipport.c 	    nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto))
skb               245 net/netfilter/ipset/ip_set_hash_ipport.c hash_ipport6_kadt(struct ip_set *set, const struct sk_buff *skb,
skb               251 net/netfilter/ipset/ip_set_hash_ipport.c 	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
skb               253 net/netfilter/ipset/ip_set_hash_ipport.c 	if (!ip_set_get_ip6_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
skb               257 net/netfilter/ipset/ip_set_hash_ipport.c 	ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip.in6);
skb                61 net/netfilter/ipset/ip_set_hash_ipportip.c hash_ipportip4_data_list(struct sk_buff *skb,
skb                64 net/netfilter/ipset/ip_set_hash_ipportip.c 	if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) ||
skb                65 net/netfilter/ipset/ip_set_hash_ipportip.c 	    nla_put_ipaddr4(skb, IPSET_ATTR_IP2, data->ip2) ||
skb                66 net/netfilter/ipset/ip_set_hash_ipportip.c 	    nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
skb                67 net/netfilter/ipset/ip_set_hash_ipportip.c 	    nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto))
skb                89 net/netfilter/ipset/ip_set_hash_ipportip.c hash_ipportip4_kadt(struct ip_set *set, const struct sk_buff *skb,
skb                95 net/netfilter/ipset/ip_set_hash_ipportip.c 	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
skb                97 net/netfilter/ipset/ip_set_hash_ipportip.c 	if (!ip_set_get_ip4_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
skb               101 net/netfilter/ipset/ip_set_hash_ipportip.c 	ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip);
skb               102 net/netfilter/ipset/ip_set_hash_ipportip.c 	ip4addrptr(skb, opt->flags & IPSET_DIM_THREE_SRC, &e.ip2);
skb               225 net/netfilter/ipset/ip_set_hash_ipportip.c hash_ipportip6_data_list(struct sk_buff *skb,
skb               228 net/netfilter/ipset/ip_set_hash_ipportip.c 	if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6) ||
skb               229 net/netfilter/ipset/ip_set_hash_ipportip.c 	    nla_put_ipaddr6(skb, IPSET_ATTR_IP2, &data->ip2.in6) ||
skb               230 net/netfilter/ipset/ip_set_hash_ipportip.c 	    nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
skb               231 net/netfilter/ipset/ip_set_hash_ipportip.c 	    nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto))
skb               255 net/netfilter/ipset/ip_set_hash_ipportip.c hash_ipportip6_kadt(struct ip_set *set, const struct sk_buff *skb,
skb               261 net/netfilter/ipset/ip_set_hash_ipportip.c 	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
skb               263 net/netfilter/ipset/ip_set_hash_ipportip.c 	if (!ip_set_get_ip6_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
skb               267 net/netfilter/ipset/ip_set_hash_ipportip.c 	ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip.in6);
skb               268 net/netfilter/ipset/ip_set_hash_ipportip.c 	ip6addrptr(skb, opt->flags & IPSET_DIM_THREE_SRC, &e.ip2.in6);
skb               100 net/netfilter/ipset/ip_set_hash_ipportnet.c hash_ipportnet4_data_list(struct sk_buff *skb,
skb               105 net/netfilter/ipset/ip_set_hash_ipportnet.c 	if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) ||
skb               106 net/netfilter/ipset/ip_set_hash_ipportnet.c 	    nla_put_ipaddr4(skb, IPSET_ATTR_IP2, data->ip2) ||
skb               107 net/netfilter/ipset/ip_set_hash_ipportnet.c 	    nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
skb               108 net/netfilter/ipset/ip_set_hash_ipportnet.c 	    nla_put_u8(skb, IPSET_ATTR_CIDR2, data->cidr + 1) ||
skb               109 net/netfilter/ipset/ip_set_hash_ipportnet.c 	    nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
skb               111 net/netfilter/ipset/ip_set_hash_ipportnet.c 	     nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
skb               133 net/netfilter/ipset/ip_set_hash_ipportnet.c hash_ipportnet4_kadt(struct ip_set *set, const struct sk_buff *skb,
skb               142 net/netfilter/ipset/ip_set_hash_ipportnet.c 	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
skb               147 net/netfilter/ipset/ip_set_hash_ipportnet.c 	if (!ip_set_get_ip4_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
skb               151 net/netfilter/ipset/ip_set_hash_ipportnet.c 	ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip);
skb               152 net/netfilter/ipset/ip_set_hash_ipportnet.c 	ip4addrptr(skb, opt->flags & IPSET_DIM_THREE_SRC, &e.ip2);
skb               349 net/netfilter/ipset/ip_set_hash_ipportnet.c hash_ipportnet6_data_list(struct sk_buff *skb,
skb               354 net/netfilter/ipset/ip_set_hash_ipportnet.c 	if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6) ||
skb               355 net/netfilter/ipset/ip_set_hash_ipportnet.c 	    nla_put_ipaddr6(skb, IPSET_ATTR_IP2, &data->ip2.in6) ||
skb               356 net/netfilter/ipset/ip_set_hash_ipportnet.c 	    nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
skb               357 net/netfilter/ipset/ip_set_hash_ipportnet.c 	    nla_put_u8(skb, IPSET_ATTR_CIDR2, data->cidr + 1) ||
skb               358 net/netfilter/ipset/ip_set_hash_ipportnet.c 	    nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
skb               360 net/netfilter/ipset/ip_set_hash_ipportnet.c 	     nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
skb               384 net/netfilter/ipset/ip_set_hash_ipportnet.c hash_ipportnet6_kadt(struct ip_set *set, const struct sk_buff *skb,
skb               393 net/netfilter/ipset/ip_set_hash_ipportnet.c 	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
skb               398 net/netfilter/ipset/ip_set_hash_ipportnet.c 	if (!ip_set_get_ip6_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
skb               402 net/netfilter/ipset/ip_set_hash_ipportnet.c 	ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip.in6);
skb               403 net/netfilter/ipset/ip_set_hash_ipportnet.c 	ip6addrptr(skb, opt->flags & IPSET_DIM_THREE_SRC, &e.ip2.in6);
skb                49 net/netfilter/ipset/ip_set_hash_mac.c hash_mac4_data_list(struct sk_buff *skb, const struct hash_mac4_elem *e)
skb                51 net/netfilter/ipset/ip_set_hash_mac.c 	if (nla_put(skb, IPSET_ATTR_ETHER, ETH_ALEN, e->ether))
skb                72 net/netfilter/ipset/ip_set_hash_mac.c hash_mac4_kadt(struct ip_set *set, const struct sk_buff *skb,
skb                78 net/netfilter/ipset/ip_set_hash_mac.c 	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
skb                80 net/netfilter/ipset/ip_set_hash_mac.c 	if (skb_mac_header(skb) < skb->head ||
skb                81 net/netfilter/ipset/ip_set_hash_mac.c 	    (skb_mac_header(skb) + ETH_HLEN) > skb->data)
skb                85 net/netfilter/ipset/ip_set_hash_mac.c 		ether_addr_copy(e.ether, eth_hdr(skb)->h_source);
skb                87 net/netfilter/ipset/ip_set_hash_mac.c 		ether_addr_copy(e.ether, eth_hdr(skb)->h_dest);
skb                85 net/netfilter/ipset/ip_set_hash_net.c hash_net4_data_list(struct sk_buff *skb, const struct hash_net4_elem *data)
skb                89 net/netfilter/ipset/ip_set_hash_net.c 	if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) ||
skb                90 net/netfilter/ipset/ip_set_hash_net.c 	    nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr) ||
skb                92 net/netfilter/ipset/ip_set_hash_net.c 	     nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
skb               112 net/netfilter/ipset/ip_set_hash_net.c hash_net4_kadt(struct ip_set *set, const struct sk_buff *skb,
skb               121 net/netfilter/ipset/ip_set_hash_net.c 	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
skb               128 net/netfilter/ipset/ip_set_hash_net.c 	ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip);
skb               250 net/netfilter/ipset/ip_set_hash_net.c hash_net6_data_list(struct sk_buff *skb, const struct hash_net6_elem *data)
skb               254 net/netfilter/ipset/ip_set_hash_net.c 	if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6) ||
skb               255 net/netfilter/ipset/ip_set_hash_net.c 	    nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr) ||
skb               257 net/netfilter/ipset/ip_set_hash_net.c 	     nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
skb               280 net/netfilter/ipset/ip_set_hash_net.c hash_net6_kadt(struct ip_set *set, const struct sk_buff *skb,
skb               289 net/netfilter/ipset/ip_set_hash_net.c 	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
skb               296 net/netfilter/ipset/ip_set_hash_net.c 	ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip.in6);
skb               103 net/netfilter/ipset/ip_set_hash_netiface.c hash_netiface4_data_list(struct sk_buff *skb,
skb               110 net/netfilter/ipset/ip_set_hash_netiface.c 	if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) ||
skb               111 net/netfilter/ipset/ip_set_hash_netiface.c 	    nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr) ||
skb               112 net/netfilter/ipset/ip_set_hash_netiface.c 	    nla_put_string(skb, IPSET_ATTR_IFACE, data->iface) ||
skb               114 net/netfilter/ipset/ip_set_hash_netiface.c 	     nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
skb               135 net/netfilter/ipset/ip_set_hash_netiface.c static const char *get_physindev_name(const struct sk_buff *skb)
skb               137 net/netfilter/ipset/ip_set_hash_netiface.c 	struct net_device *dev = nf_bridge_get_physindev(skb);
skb               142 net/netfilter/ipset/ip_set_hash_netiface.c static const char *get_physoutdev_name(const struct sk_buff *skb)
skb               144 net/netfilter/ipset/ip_set_hash_netiface.c 	struct net_device *dev = nf_bridge_get_physoutdev(skb);
skb               151 net/netfilter/ipset/ip_set_hash_netiface.c hash_netiface4_kadt(struct ip_set *set, const struct sk_buff *skb,
skb               161 net/netfilter/ipset/ip_set_hash_netiface.c 	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
skb               166 net/netfilter/ipset/ip_set_hash_netiface.c 	ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip);
skb               174 net/netfilter/ipset/ip_set_hash_netiface.c 		const char *eiface = SRCDIR ? get_physindev_name(skb) :
skb               175 net/netfilter/ipset/ip_set_hash_netiface.c 					      get_physoutdev_name(skb);
skb               326 net/netfilter/ipset/ip_set_hash_netiface.c hash_netiface6_data_list(struct sk_buff *skb,
skb               333 net/netfilter/ipset/ip_set_hash_netiface.c 	if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6) ||
skb               334 net/netfilter/ipset/ip_set_hash_netiface.c 	    nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr) ||
skb               335 net/netfilter/ipset/ip_set_hash_netiface.c 	    nla_put_string(skb, IPSET_ATTR_IFACE, data->iface) ||
skb               337 net/netfilter/ipset/ip_set_hash_netiface.c 	     nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
skb               361 net/netfilter/ipset/ip_set_hash_netiface.c hash_netiface6_kadt(struct ip_set *set, const struct sk_buff *skb,
skb               371 net/netfilter/ipset/ip_set_hash_netiface.c 	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
skb               376 net/netfilter/ipset/ip_set_hash_netiface.c 	ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip.in6);
skb               381 net/netfilter/ipset/ip_set_hash_netiface.c 		const char *eiface = SRCDIR ? get_physindev_name(skb) :
skb               382 net/netfilter/ipset/ip_set_hash_netiface.c 					      get_physoutdev_name(skb);
skb               102 net/netfilter/ipset/ip_set_hash_netnet.c hash_netnet4_data_list(struct sk_buff *skb,
skb               107 net/netfilter/ipset/ip_set_hash_netnet.c 	if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip[0]) ||
skb               108 net/netfilter/ipset/ip_set_hash_netnet.c 	    nla_put_ipaddr4(skb, IPSET_ATTR_IP2, data->ip[1]) ||
skb               109 net/netfilter/ipset/ip_set_hash_netnet.c 	    nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr[0]) ||
skb               110 net/netfilter/ipset/ip_set_hash_netnet.c 	    nla_put_u8(skb, IPSET_ATTR_CIDR2, data->cidr[1]) ||
skb               112 net/netfilter/ipset/ip_set_hash_netnet.c 	     nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
skb               139 net/netfilter/ipset/ip_set_hash_netnet.c hash_netnet4_kadt(struct ip_set *set, const struct sk_buff *skb,
skb               146 net/netfilter/ipset/ip_set_hash_netnet.c 	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
skb               153 net/netfilter/ipset/ip_set_hash_netnet.c 	ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip[0]);
skb               154 net/netfilter/ipset/ip_set_hash_netnet.c 	ip4addrptr(skb, opt->flags & IPSET_DIM_TWO_SRC, &e.ip[1]);
skb               333 net/netfilter/ipset/ip_set_hash_netnet.c hash_netnet6_data_list(struct sk_buff *skb,
skb               338 net/netfilter/ipset/ip_set_hash_netnet.c 	if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip[0].in6) ||
skb               339 net/netfilter/ipset/ip_set_hash_netnet.c 	    nla_put_ipaddr6(skb, IPSET_ATTR_IP2, &data->ip[1].in6) ||
skb               340 net/netfilter/ipset/ip_set_hash_netnet.c 	    nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr[0]) ||
skb               341 net/netfilter/ipset/ip_set_hash_netnet.c 	    nla_put_u8(skb, IPSET_ATTR_CIDR2, data->cidr[1]) ||
skb               343 net/netfilter/ipset/ip_set_hash_netnet.c 	     nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
skb               373 net/netfilter/ipset/ip_set_hash_netnet.c hash_netnet6_kadt(struct ip_set *set, const struct sk_buff *skb,
skb               380 net/netfilter/ipset/ip_set_hash_netnet.c 	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
skb               387 net/netfilter/ipset/ip_set_hash_netnet.c 	ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip[0].in6);
skb               388 net/netfilter/ipset/ip_set_hash_netnet.c 	ip6addrptr(skb, opt->flags & IPSET_DIM_TWO_SRC, &e.ip[1].in6);
skb                97 net/netfilter/ipset/ip_set_hash_netport.c hash_netport4_data_list(struct sk_buff *skb,
skb               102 net/netfilter/ipset/ip_set_hash_netport.c 	if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) ||
skb               103 net/netfilter/ipset/ip_set_hash_netport.c 	    nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
skb               104 net/netfilter/ipset/ip_set_hash_netport.c 	    nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr + 1) ||
skb               105 net/netfilter/ipset/ip_set_hash_netport.c 	    nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
skb               107 net/netfilter/ipset/ip_set_hash_netport.c 	     nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
skb               128 net/netfilter/ipset/ip_set_hash_netport.c hash_netport4_kadt(struct ip_set *set, const struct sk_buff *skb,
skb               137 net/netfilter/ipset/ip_set_hash_netport.c 	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
skb               142 net/netfilter/ipset/ip_set_hash_netport.c 	if (!ip_set_get_ip4_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
skb               146 net/netfilter/ipset/ip_set_hash_netport.c 	ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip);
skb               310 net/netfilter/ipset/ip_set_hash_netport.c hash_netport6_data_list(struct sk_buff *skb,
skb               315 net/netfilter/ipset/ip_set_hash_netport.c 	if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6) ||
skb               316 net/netfilter/ipset/ip_set_hash_netport.c 	    nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
skb               317 net/netfilter/ipset/ip_set_hash_netport.c 	    nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr + 1) ||
skb               318 net/netfilter/ipset/ip_set_hash_netport.c 	    nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
skb               320 net/netfilter/ipset/ip_set_hash_netport.c 	     nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
skb               344 net/netfilter/ipset/ip_set_hash_netport.c hash_netport6_kadt(struct ip_set *set, const struct sk_buff *skb,
skb               353 net/netfilter/ipset/ip_set_hash_netport.c 	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
skb               358 net/netfilter/ipset/ip_set_hash_netport.c 	if (!ip_set_get_ip6_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
skb               362 net/netfilter/ipset/ip_set_hash_netport.c 	ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip.in6);
skb               109 net/netfilter/ipset/ip_set_hash_netportnet.c hash_netportnet4_data_list(struct sk_buff *skb,
skb               114 net/netfilter/ipset/ip_set_hash_netportnet.c 	if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip[0]) ||
skb               115 net/netfilter/ipset/ip_set_hash_netportnet.c 	    nla_put_ipaddr4(skb, IPSET_ATTR_IP2, data->ip[1]) ||
skb               116 net/netfilter/ipset/ip_set_hash_netportnet.c 	    nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
skb               117 net/netfilter/ipset/ip_set_hash_netportnet.c 	    nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr[0]) ||
skb               118 net/netfilter/ipset/ip_set_hash_netportnet.c 	    nla_put_u8(skb, IPSET_ATTR_CIDR2, data->cidr[1]) ||
skb               119 net/netfilter/ipset/ip_set_hash_netportnet.c 	    nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
skb               121 net/netfilter/ipset/ip_set_hash_netportnet.c 	     nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
skb               149 net/netfilter/ipset/ip_set_hash_netportnet.c hash_netportnet4_kadt(struct ip_set *set, const struct sk_buff *skb,
skb               156 net/netfilter/ipset/ip_set_hash_netportnet.c 	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
skb               163 net/netfilter/ipset/ip_set_hash_netportnet.c 	if (!ip_set_get_ip4_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
skb               167 net/netfilter/ipset/ip_set_hash_netportnet.c 	ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip[0]);
skb               168 net/netfilter/ipset/ip_set_hash_netportnet.c 	ip4addrptr(skb, opt->flags & IPSET_DIM_THREE_SRC, &e.ip[1]);
skb               385 net/netfilter/ipset/ip_set_hash_netportnet.c hash_netportnet6_data_list(struct sk_buff *skb,
skb               390 net/netfilter/ipset/ip_set_hash_netportnet.c 	if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip[0].in6) ||
skb               391 net/netfilter/ipset/ip_set_hash_netportnet.c 	    nla_put_ipaddr6(skb, IPSET_ATTR_IP2, &data->ip[1].in6) ||
skb               392 net/netfilter/ipset/ip_set_hash_netportnet.c 	    nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
skb               393 net/netfilter/ipset/ip_set_hash_netportnet.c 	    nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr[0]) ||
skb               394 net/netfilter/ipset/ip_set_hash_netportnet.c 	    nla_put_u8(skb, IPSET_ATTR_CIDR2, data->cidr[1]) ||
skb               395 net/netfilter/ipset/ip_set_hash_netportnet.c 	    nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
skb               397 net/netfilter/ipset/ip_set_hash_netportnet.c 	     nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
skb               428 net/netfilter/ipset/ip_set_hash_netportnet.c hash_netportnet6_kadt(struct ip_set *set, const struct sk_buff *skb,
skb               435 net/netfilter/ipset/ip_set_hash_netportnet.c 	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
skb               442 net/netfilter/ipset/ip_set_hash_netportnet.c 	if (!ip_set_get_ip6_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
skb               446 net/netfilter/ipset/ip_set_hash_netportnet.c 	ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip[0].in6);
skb               447 net/netfilter/ipset/ip_set_hash_netportnet.c 	ip6addrptr(skb, opt->flags & IPSET_DIM_THREE_SRC, &e.ip[1].in6);
skb                49 net/netfilter/ipset/ip_set_list_set.c list_set_ktest(struct ip_set *set, const struct sk_buff *skb,
skb                64 net/netfilter/ipset/ip_set_list_set.c 		ret = ip_set_test(e->id, skb, par, opt);
skb                74 net/netfilter/ipset/ip_set_list_set.c list_set_kadd(struct ip_set *set, const struct sk_buff *skb,
skb                86 net/netfilter/ipset/ip_set_list_set.c 		ret = ip_set_add(e->id, skb, par, opt);
skb                94 net/netfilter/ipset/ip_set_list_set.c list_set_kdel(struct ip_set *set, const struct sk_buff *skb,
skb               106 net/netfilter/ipset/ip_set_list_set.c 		ret = ip_set_del(e->id, skb, par, opt);
skb               114 net/netfilter/ipset/ip_set_list_set.c list_set_kadt(struct ip_set *set, const struct sk_buff *skb,
skb               118 net/netfilter/ipset/ip_set_list_set.c 	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
skb               124 net/netfilter/ipset/ip_set_list_set.c 		ret = list_set_ktest(set, skb, par, opt, &ext);
skb               127 net/netfilter/ipset/ip_set_list_set.c 		ret = list_set_kadd(set, skb, par, opt, &ext);
skb               130 net/netfilter/ipset/ip_set_list_set.c 		ret = list_set_kdel(set, skb, par, opt, &ext);
skb               459 net/netfilter/ipset/ip_set_list_set.c list_set_head(struct ip_set *set, struct sk_buff *skb)
skb               465 net/netfilter/ipset/ip_set_list_set.c 	nested = nla_nest_start(skb, IPSET_ATTR_DATA);
skb               468 net/netfilter/ipset/ip_set_list_set.c 	if (nla_put_net32(skb, IPSET_ATTR_SIZE, htonl(map->size)) ||
skb               469 net/netfilter/ipset/ip_set_list_set.c 	    nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref)) ||
skb               470 net/netfilter/ipset/ip_set_list_set.c 	    nla_put_net32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize)) ||
skb               471 net/netfilter/ipset/ip_set_list_set.c 	    nla_put_net32(skb, IPSET_ATTR_ELEMENTS, htonl(set->elements)))
skb               473 net/netfilter/ipset/ip_set_list_set.c 	if (unlikely(ip_set_put_flags(skb, set)))
skb               475 net/netfilter/ipset/ip_set_list_set.c 	nla_nest_end(skb, nested);
skb               484 net/netfilter/ipset/ip_set_list_set.c 	      struct sk_buff *skb, struct netlink_callback *cb)
skb               493 net/netfilter/ipset/ip_set_list_set.c 	atd = nla_nest_start(skb, IPSET_ATTR_ADT);
skb               505 net/netfilter/ipset/ip_set_list_set.c 		nested = nla_nest_start(skb, IPSET_ATTR_DATA);
skb               509 net/netfilter/ipset/ip_set_list_set.c 		if (nla_put_string(skb, IPSET_ATTR_NAME, name))
skb               511 net/netfilter/ipset/ip_set_list_set.c 		if (ip_set_put_extensions(skb, set, e, true))
skb               513 net/netfilter/ipset/ip_set_list_set.c 		nla_nest_end(skb, nested);
skb               517 net/netfilter/ipset/ip_set_list_set.c 	nla_nest_end(skb, atd);
skb               523 net/netfilter/ipset/ip_set_list_set.c 	nla_nest_cancel(skb, nested);
skb               525 net/netfilter/ipset/ip_set_list_set.c 		nla_nest_cancel(skb, atd);
skb               530 net/netfilter/ipset/ip_set_list_set.c 		nla_nest_end(skb, atd);
skb               360 net/netfilter/ipvs/ip_vs_app.c static inline int app_tcp_pkt_out(struct ip_vs_conn *cp, struct sk_buff *skb,
skb               365 net/netfilter/ipvs/ip_vs_app.c 	const unsigned int tcp_offset = ip_hdrlen(skb);
skb               369 net/netfilter/ipvs/ip_vs_app.c 	if (skb_ensure_writable(skb, tcp_offset + sizeof(*th)))
skb               372 net/netfilter/ipvs/ip_vs_app.c 	th = (struct tcphdr *)(skb_network_header(skb) + tcp_offset);
skb               393 net/netfilter/ipvs/ip_vs_app.c 	if (!app->pkt_out(app, cp, skb, &diff, ipvsh))
skb               411 net/netfilter/ipvs/ip_vs_app.c int ip_vs_app_pkt_out(struct ip_vs_conn *cp, struct sk_buff *skb,
skb               425 net/netfilter/ipvs/ip_vs_app.c 		return app_tcp_pkt_out(cp, skb, app, ipvsh);
skb               433 net/netfilter/ipvs/ip_vs_app.c 	return app->pkt_out(app, cp, skb, NULL, ipvsh);
skb               437 net/netfilter/ipvs/ip_vs_app.c static inline int app_tcp_pkt_in(struct ip_vs_conn *cp, struct sk_buff *skb,
skb               442 net/netfilter/ipvs/ip_vs_app.c 	const unsigned int tcp_offset = ip_hdrlen(skb);
skb               446 net/netfilter/ipvs/ip_vs_app.c 	if (skb_ensure_writable(skb, tcp_offset + sizeof(*th)))
skb               449 net/netfilter/ipvs/ip_vs_app.c 	th = (struct tcphdr *)(skb_network_header(skb) + tcp_offset);
skb               470 net/netfilter/ipvs/ip_vs_app.c 	if (!app->pkt_in(app, cp, skb, &diff, ipvsh))
skb               488 net/netfilter/ipvs/ip_vs_app.c int ip_vs_app_pkt_in(struct ip_vs_conn *cp, struct sk_buff *skb,
skb               502 net/netfilter/ipvs/ip_vs_app.c 		return app_tcp_pkt_in(cp, skb, app, ipvsh);
skb               510 net/netfilter/ipvs/ip_vs_app.c 	return app->pkt_in(app, cp, skb, NULL, ipvsh);
skb               315 net/netfilter/ipvs/ip_vs_conn.c 			    int af, const struct sk_buff *skb,
skb               321 net/netfilter/ipvs/ip_vs_conn.c 	pptr = frag_safe_skb_hp(skb, iph->len, sizeof(_ports), _ports);
skb               336 net/netfilter/ipvs/ip_vs_conn.c 			const struct sk_buff *skb,
skb               341 net/netfilter/ipvs/ip_vs_conn.c 	if (ip_vs_conn_fill_param_proto(ipvs, af, skb, iph, &p))
skb               441 net/netfilter/ipvs/ip_vs_conn.c 			 const struct sk_buff *skb,
skb               446 net/netfilter/ipvs/ip_vs_conn.c 	if (ip_vs_conn_fill_param_proto(ipvs, af, skb, iph, &p))
skb                73 net/netfilter/ipvs/ip_vs_core.c 	tcp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
skb                79 net/netfilter/ipvs/ip_vs_core.c 	udp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
skb               134 net/netfilter/ipvs/ip_vs_core.c ip_vs_in_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
skb               148 net/netfilter/ipvs/ip_vs_core.c 		s->cnt.inbytes += skb->len;
skb               155 net/netfilter/ipvs/ip_vs_core.c 		s->cnt.inbytes += skb->len;
skb               161 net/netfilter/ipvs/ip_vs_core.c 		s->cnt.inbytes += skb->len;
skb               170 net/netfilter/ipvs/ip_vs_core.c ip_vs_out_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
skb               184 net/netfilter/ipvs/ip_vs_core.c 		s->cnt.outbytes += skb->len;
skb               191 net/netfilter/ipvs/ip_vs_core.c 		s->cnt.outbytes += skb->len;
skb               197 net/netfilter/ipvs/ip_vs_core.c 		s->cnt.outbytes += skb->len;
skb               234 net/netfilter/ipvs/ip_vs_core.c 		const struct sk_buff *skb,
skb               238 net/netfilter/ipvs/ip_vs_core.c 		pd->pp->state_transition(cp, direction, skb, pd);
skb               243 net/netfilter/ipvs/ip_vs_core.c 			      struct sk_buff *skb, int protocol,
skb               252 net/netfilter/ipvs/ip_vs_core.c 		return p->pe->fill_param(p, skb);
skb               266 net/netfilter/ipvs/ip_vs_core.c 		    struct sk_buff *skb, __be16 src_port, __be16 dst_port,
skb               344 net/netfilter/ipvs/ip_vs_core.c 		if (ip_vs_conn_fill_param_persist(svc, skb, protocol, &snet, 0,
skb               365 net/netfilter/ipvs/ip_vs_core.c 			dest = sched->schedule(svc, skb, iph);
skb               384 net/netfilter/ipvs/ip_vs_core.c 				    IP_VS_CONN_F_TEMPLATE, dest, skb->mark);
skb               413 net/netfilter/ipvs/ip_vs_core.c 			    skb->mark);
skb               453 net/netfilter/ipvs/ip_vs_core.c ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
skb               469 net/netfilter/ipvs/ip_vs_core.c 	pptr = frag_safe_skb_hp(skb, iph->len, sizeof(_ports), _ports);
skb               492 net/netfilter/ipvs/ip_vs_core.c 		IP_VS_DBG_PKT(12, svc->af, pp, skb, iph->off,
skb               500 net/netfilter/ipvs/ip_vs_core.c 	if ((!skb->dev || skb->dev->flags & IFF_LOOPBACK)) {
skb               504 net/netfilter/ipvs/ip_vs_core.c 				     svc->af, skb, iph);
skb               508 net/netfilter/ipvs/ip_vs_core.c 			IP_VS_DBG_PKT(12, svc->af, pp, skb, iph->off,
skb               520 net/netfilter/ipvs/ip_vs_core.c 		return ip_vs_sched_persist(svc, skb, cport, vport, ignored,
skb               540 net/netfilter/ipvs/ip_vs_core.c 		dest = sched->schedule(svc, skb, iph);
skb               563 net/netfilter/ipvs/ip_vs_core.c 				    flags, dest, skb->mark);
skb               597 net/netfilter/ipvs/ip_vs_core.c int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
skb               604 net/netfilter/ipvs/ip_vs_core.c 	pptr = frag_safe_skb_hp(skb, iph->len, sizeof(_ports), _ports);
skb               631 net/netfilter/ipvs/ip_vs_core.c 					    NULL, skb->mark);
skb               637 net/netfilter/ipvs/ip_vs_core.c 		ip_vs_in_stats(cp, skb);
skb               640 net/netfilter/ipvs/ip_vs_core.c 		ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pd);
skb               643 net/netfilter/ipvs/ip_vs_core.c 		ret = cp->packet_xmit(skb, cp, pd->pp, iph);
skb               675 net/netfilter/ipvs/ip_vs_core.c 		if (!skb->dev)
skb               676 net/netfilter/ipvs/ip_vs_core.c 			skb->dev = net->loopback_dev;
skb               677 net/netfilter/ipvs/ip_vs_core.c 		icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
skb               680 net/netfilter/ipvs/ip_vs_core.c 		icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
skb               710 net/netfilter/ipvs/ip_vs_core.c __sum16 ip_vs_checksum_complete(struct sk_buff *skb, int offset)
skb               712 net/netfilter/ipvs/ip_vs_core.c 	return csum_fold(skb_checksum(skb, offset, skb->len - offset, 0));
skb               725 net/netfilter/ipvs/ip_vs_core.c 				     struct sk_buff *skb, u_int32_t user)
skb               730 net/netfilter/ipvs/ip_vs_core.c 	err = ip_defrag(ipvs->net, skb, user);
skb               733 net/netfilter/ipvs/ip_vs_core.c 		ip_send_check(ip_hdr(skb));
skb               739 net/netfilter/ipvs/ip_vs_core.c 				 struct sk_buff *skb, unsigned int hooknum)
skb               748 net/netfilter/ipvs/ip_vs_core.c 		struct dst_entry *dst = skb_dst(skb);
skb               751 net/netfilter/ipvs/ip_vs_core.c 		    ip6_route_me_harder(ipvs->net, skb) != 0)
skb               755 net/netfilter/ipvs/ip_vs_core.c 		if (!(skb_rtable(skb)->rt_flags & RTCF_LOCAL) &&
skb               756 net/netfilter/ipvs/ip_vs_core.c 		    ip_route_me_harder(ipvs->net, skb, RTN_LOCAL) != 0)
skb               766 net/netfilter/ipvs/ip_vs_core.c void ip_vs_nat_icmp(struct sk_buff *skb, struct ip_vs_protocol *pp,
skb               769 net/netfilter/ipvs/ip_vs_core.c 	struct iphdr *iph	 = ip_hdr(skb);
skb               771 net/netfilter/ipvs/ip_vs_core.c 	struct icmphdr *icmph	 = (struct icmphdr *)(skb_network_header(skb) +
skb               800 net/netfilter/ipvs/ip_vs_core.c 	icmph->checksum = ip_vs_checksum_complete(skb, icmp_offset);
skb               801 net/netfilter/ipvs/ip_vs_core.c 	skb->ip_summed = CHECKSUM_UNNECESSARY;
skb               804 net/netfilter/ipvs/ip_vs_core.c 		IP_VS_DBG_PKT(11, AF_INET, pp, skb, (void *)ciph - (void *)iph,
skb               807 net/netfilter/ipvs/ip_vs_core.c 		IP_VS_DBG_PKT(11, AF_INET, pp, skb, (void *)ciph - (void *)iph,
skb               812 net/netfilter/ipvs/ip_vs_core.c void ip_vs_nat_icmp_v6(struct sk_buff *skb, struct ip_vs_protocol *pp,
skb               815 net/netfilter/ipvs/ip_vs_core.c 	struct ipv6hdr *iph	 = ipv6_hdr(skb);
skb               823 net/netfilter/ipvs/ip_vs_core.c 	ipv6_find_hdr(skb, &icmp_offset, IPPROTO_ICMPV6, &fragoffs, NULL);
skb               824 net/netfilter/ipvs/ip_vs_core.c 	icmph = (struct icmp6hdr *)(skb_network_header(skb) + icmp_offset);
skb               826 net/netfilter/ipvs/ip_vs_core.c 	ciph = (struct ipv6hdr *)(skb_network_header(skb) + offs);
skb               828 net/netfilter/ipvs/ip_vs_core.c 	protocol = ipv6_find_hdr(skb, &offs, -1, &fragoffs, NULL);
skb               841 net/netfilter/ipvs/ip_vs_core.c 		__be16 *ports = (void *)(skb_network_header(skb) + offs);
skb               854 net/netfilter/ipvs/ip_vs_core.c 					      skb->len - icmp_offset,
skb               856 net/netfilter/ipvs/ip_vs_core.c 	skb->csum_start = skb_network_header(skb) - skb->head + icmp_offset;
skb               857 net/netfilter/ipvs/ip_vs_core.c 	skb->csum_offset = offsetof(struct icmp6hdr, icmp6_cksum);
skb               858 net/netfilter/ipvs/ip_vs_core.c 	skb->ip_summed = CHECKSUM_PARTIAL;
skb               861 net/netfilter/ipvs/ip_vs_core.c 		IP_VS_DBG_PKT(11, AF_INET6, pp, skb,
skb               865 net/netfilter/ipvs/ip_vs_core.c 		IP_VS_DBG_PKT(11, AF_INET6, pp, skb,
skb               874 net/netfilter/ipvs/ip_vs_core.c static int handle_response_icmp(int af, struct sk_buff *skb,
skb               887 net/netfilter/ipvs/ip_vs_core.c 	if (!skb_csum_unnecessary(skb) && ip_vs_checksum_complete(skb, ihl)) {
skb               897 net/netfilter/ipvs/ip_vs_core.c 	if (skb_ensure_writable(skb, offset))
skb               902 net/netfilter/ipvs/ip_vs_core.c 		ip_vs_nat_icmp_v6(skb, pp, cp, 1);
skb               905 net/netfilter/ipvs/ip_vs_core.c 		ip_vs_nat_icmp(skb, pp, cp, 1);
skb               907 net/netfilter/ipvs/ip_vs_core.c 	if (ip_vs_route_me_harder(cp->ipvs, af, skb, hooknum))
skb               911 net/netfilter/ipvs/ip_vs_core.c 	ip_vs_out_stats(cp, skb);
skb               913 net/netfilter/ipvs/ip_vs_core.c 	skb->ipvs_property = 1;
skb               915 net/netfilter/ipvs/ip_vs_core.c 		ip_vs_notrack(skb);
skb               917 net/netfilter/ipvs/ip_vs_core.c 		ip_vs_update_conntrack(skb, cp, 0);
skb               933 net/netfilter/ipvs/ip_vs_core.c static int ip_vs_out_icmp(struct netns_ipvs *ipvs, struct sk_buff *skb,
skb               948 net/netfilter/ipvs/ip_vs_core.c 	if (ip_is_fragment(ip_hdr(skb))) {
skb               949 net/netfilter/ipvs/ip_vs_core.c 		if (ip_vs_gather_frags(ipvs, skb, ip_vs_defrag_user(hooknum)))
skb               953 net/netfilter/ipvs/ip_vs_core.c 	iph = ip_hdr(skb);
skb               955 net/netfilter/ipvs/ip_vs_core.c 	ic = skb_header_pointer(skb, offset, sizeof(_icmph), &_icmph);
skb               979 net/netfilter/ipvs/ip_vs_core.c 	cih = skb_header_pointer(skb, offset, sizeof(_ciph), &_ciph);
skb               992 net/netfilter/ipvs/ip_vs_core.c 	IP_VS_DBG_PKT(11, AF_INET, pp, skb, offset,
skb               995 net/netfilter/ipvs/ip_vs_core.c 	ip_vs_fill_iph_skb_icmp(AF_INET, skb, offset, true, &ciph);
skb               999 net/netfilter/ipvs/ip_vs_core.c 			     ipvs, AF_INET, skb, &ciph);
skb              1004 net/netfilter/ipvs/ip_vs_core.c 	return handle_response_icmp(AF_INET, skb, &snet, cih->protocol, cp,
skb              1009 net/netfilter/ipvs/ip_vs_core.c static int ip_vs_out_icmp_v6(struct netns_ipvs *ipvs, struct sk_buff *skb,
skb              1021 net/netfilter/ipvs/ip_vs_core.c 	ic = frag_safe_skb_hp(skb, ipvsh->len, sizeof(_icmph), &_icmph);
skb              1046 net/netfilter/ipvs/ip_vs_core.c 	if (!ip_vs_fill_iph_skb_icmp(AF_INET6, skb, ipvsh->len + sizeof(_icmph),
skb              1056 net/netfilter/ipvs/ip_vs_core.c 			     ipvs, AF_INET6, skb, &ciph);
skb              1062 net/netfilter/ipvs/ip_vs_core.c 	return handle_response_icmp(AF_INET6, skb, &snet, ciph.protocol, cp,
skb              1071 net/netfilter/ipvs/ip_vs_core.c static inline int is_sctp_abort(const struct sk_buff *skb, int nh_len)
skb              1074 net/netfilter/ipvs/ip_vs_core.c 	sch = skb_header_pointer(skb, nh_len + sizeof(struct sctphdr),
skb              1083 net/netfilter/ipvs/ip_vs_core.c static inline int is_tcp_reset(const struct sk_buff *skb, int nh_len)
skb              1087 net/netfilter/ipvs/ip_vs_core.c 	th = skb_header_pointer(skb, nh_len, sizeof(_tcph), &_tcph);
skb              1093 net/netfilter/ipvs/ip_vs_core.c static inline bool is_new_conn(const struct sk_buff *skb,
skb              1100 net/netfilter/ipvs/ip_vs_core.c 		th = skb_header_pointer(skb, iph->len, sizeof(_tcph), &_tcph);
skb              1108 net/netfilter/ipvs/ip_vs_core.c 		sch = skb_header_pointer(skb, iph->len + sizeof(struct sctphdr),
skb              1150 net/netfilter/ipvs/ip_vs_core.c 				      struct sk_buff *skb,
skb              1185 net/netfilter/ipvs/ip_vs_core.c 		if (ip_vs_conn_fill_param_persist(svc, skb, iph->protocol,
skb              1243 net/netfilter/ipvs/ip_vs_core.c 					      int af, struct sk_buff *skb,
skb              1253 net/netfilter/ipvs/ip_vs_core.c 	pptr = frag_safe_skb_hp(skb, iph->len,
skb              1268 net/netfilter/ipvs/ip_vs_core.c 				cp = pe->conn_out(svc, dest, skb, iph,
skb              1279 net/netfilter/ipvs/ip_vs_core.c handle_response(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
skb              1285 net/netfilter/ipvs/ip_vs_core.c 	IP_VS_DBG_PKT(11, af, pp, skb, iph->off, "Outgoing packet");
skb              1287 net/netfilter/ipvs/ip_vs_core.c 	if (skb_ensure_writable(skb, iph->len))
skb              1292 net/netfilter/ipvs/ip_vs_core.c 	    !SNAT_CALL(pp->snat_handler, skb, pp, cp, iph))
skb              1297 net/netfilter/ipvs/ip_vs_core.c 		ipv6_hdr(skb)->saddr = cp->vaddr.in6;
skb              1301 net/netfilter/ipvs/ip_vs_core.c 		ip_hdr(skb)->saddr = cp->vaddr.ip;
skb              1302 net/netfilter/ipvs/ip_vs_core.c 		ip_send_check(ip_hdr(skb));
skb              1320 net/netfilter/ipvs/ip_vs_core.c 	if (ip_vs_route_me_harder(cp->ipvs, af, skb, hooknum))
skb              1323 net/netfilter/ipvs/ip_vs_core.c 	IP_VS_DBG_PKT(10, af, pp, skb, iph->off, "After SNAT");
skb              1325 net/netfilter/ipvs/ip_vs_core.c 	ip_vs_out_stats(cp, skb);
skb              1326 net/netfilter/ipvs/ip_vs_core.c 	ip_vs_set_state(cp, IP_VS_DIR_OUTPUT, skb, pd);
skb              1327 net/netfilter/ipvs/ip_vs_core.c 	skb->ipvs_property = 1;
skb              1329 net/netfilter/ipvs/ip_vs_core.c 		ip_vs_notrack(skb);
skb              1331 net/netfilter/ipvs/ip_vs_core.c 		ip_vs_update_conntrack(skb, cp, 0);
skb              1339 net/netfilter/ipvs/ip_vs_core.c 	kfree_skb(skb);
skb              1348 net/netfilter/ipvs/ip_vs_core.c ip_vs_out(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int af)
skb              1359 net/netfilter/ipvs/ip_vs_core.c 	if (skb->ipvs_property)
skb              1362 net/netfilter/ipvs/ip_vs_core.c 	sk = skb_to_full_sk(skb);
skb              1371 net/netfilter/ipvs/ip_vs_core.c 	if (unlikely(!skb_dst(skb)))
skb              1377 net/netfilter/ipvs/ip_vs_core.c 	ip_vs_fill_iph_skb(af, skb, false, &iph);
skb              1382 net/netfilter/ipvs/ip_vs_core.c 			int verdict = ip_vs_out_icmp_v6(ipvs, skb, &related,
skb              1392 net/netfilter/ipvs/ip_vs_core.c 			int verdict = ip_vs_out_icmp(ipvs, skb, &related, hooknum);
skb              1407 net/netfilter/ipvs/ip_vs_core.c 		if (unlikely(ip_is_fragment(ip_hdr(skb)) && !pp->dont_defrag)) {
skb              1408 net/netfilter/ipvs/ip_vs_core.c 			if (ip_vs_gather_frags(ipvs, skb,
skb              1412 net/netfilter/ipvs/ip_vs_core.c 			ip_vs_fill_iph_skb(AF_INET, skb, false, &iph);
skb              1419 net/netfilter/ipvs/ip_vs_core.c 			     ipvs, af, skb, &iph);
skb              1424 net/netfilter/ipvs/ip_vs_core.c 		return handle_response(af, skb, pd, cp, &iph, hooknum);
skb              1435 net/netfilter/ipvs/ip_vs_core.c 			cp = __ip_vs_rs_conn_out(hooknum, ipvs, af, skb, &iph);
skb              1437 net/netfilter/ipvs/ip_vs_core.c 				return handle_response(af, skb, pd, cp, &iph,
skb              1448 net/netfilter/ipvs/ip_vs_core.c 		pptr = frag_safe_skb_hp(skb, iph.len,
skb              1462 net/netfilter/ipvs/ip_vs_core.c 				  && !is_tcp_reset(skb, iph.len))
skb              1464 net/netfilter/ipvs/ip_vs_core.c 					&& !is_sctp_abort(skb,
skb              1468 net/netfilter/ipvs/ip_vs_core.c 					if (!skb->dev)
skb              1469 net/netfilter/ipvs/ip_vs_core.c 						skb->dev = ipvs->net->loopback_dev;
skb              1470 net/netfilter/ipvs/ip_vs_core.c 					icmpv6_send(skb,
skb              1476 net/netfilter/ipvs/ip_vs_core.c 					icmp_send(skb,
skb              1485 net/netfilter/ipvs/ip_vs_core.c 	IP_VS_DBG_PKT(12, af, pp, skb, iph.off,
skb              1500 net/netfilter/ipvs/ip_vs_core.c ip_vs_reply4(void *priv, struct sk_buff *skb,
skb              1503 net/netfilter/ipvs/ip_vs_core.c 	return ip_vs_out(net_ipvs(state->net), state->hook, skb, AF_INET);
skb              1511 net/netfilter/ipvs/ip_vs_core.c ip_vs_local_reply4(void *priv, struct sk_buff *skb,
skb              1514 net/netfilter/ipvs/ip_vs_core.c 	return ip_vs_out(net_ipvs(state->net), state->hook, skb, AF_INET);
skb              1525 net/netfilter/ipvs/ip_vs_core.c ip_vs_reply6(void *priv, struct sk_buff *skb,
skb              1528 net/netfilter/ipvs/ip_vs_core.c 	return ip_vs_out(net_ipvs(state->net), state->hook, skb, AF_INET6);
skb              1536 net/netfilter/ipvs/ip_vs_core.c ip_vs_local_reply6(void *priv, struct sk_buff *skb,
skb              1539 net/netfilter/ipvs/ip_vs_core.c 	return ip_vs_out(net_ipvs(state->net), state->hook, skb, AF_INET6);
skb              1545 net/netfilter/ipvs/ip_vs_core.c ip_vs_try_to_schedule(struct netns_ipvs *ipvs, int af, struct sk_buff *skb,
skb              1558 net/netfilter/ipvs/ip_vs_core.c 		if (!pp->conn_schedule(ipvs, af, skb, pd, verdict, cpp, iph))
skb              1564 net/netfilter/ipvs/ip_vs_core.c 		IP_VS_DBG_PKT(12, af, pp, skb, iph->off,
skb              1569 net/netfilter/ipvs/ip_vs_core.c 			IP_VS_DBG_PKT(7, af, pp, skb, iph->off,
skb              1580 net/netfilter/ipvs/ip_vs_core.c static int ipvs_udp_decap(struct netns_ipvs *ipvs, struct sk_buff *skb,
skb              1587 net/netfilter/ipvs/ip_vs_core.c 	udph = skb_header_pointer(skb, offset, sizeof(_udph), &_udph);
skb              1597 net/netfilter/ipvs/ip_vs_core.c 		gueh = skb_header_pointer(skb, offset, sizeof(_gueh), &_gueh);
skb              1615 net/netfilter/ipvs/ip_vs_core.c static int ipvs_gre_decap(struct netns_ipvs *ipvs, struct sk_buff *skb,
skb              1622 net/netfilter/ipvs/ip_vs_core.c 	greh = skb_header_pointer(skb, offset, sizeof(_greh), &_greh);
skb              1653 net/netfilter/ipvs/ip_vs_core.c ip_vs_in_icmp(struct netns_ipvs *ipvs, struct sk_buff *skb, int *related,
skb              1670 net/netfilter/ipvs/ip_vs_core.c 	if (ip_is_fragment(ip_hdr(skb))) {
skb              1671 net/netfilter/ipvs/ip_vs_core.c 		if (ip_vs_gather_frags(ipvs, skb, ip_vs_defrag_user(hooknum)))
skb              1675 net/netfilter/ipvs/ip_vs_core.c 	iph = ip_hdr(skb);
skb              1677 net/netfilter/ipvs/ip_vs_core.c 	ic = skb_header_pointer(skb, offset, sizeof(_icmph), &_icmph);
skb              1701 net/netfilter/ipvs/ip_vs_core.c 	cih = skb_header_pointer(skb, offset, sizeof(_ciph), &_ciph);
skb              1714 net/netfilter/ipvs/ip_vs_core.c 		if (!(skb_rtable(skb)->rt_flags & RTCF_LOCAL))
skb              1721 net/netfilter/ipvs/ip_vs_core.c 		cih = skb_header_pointer(skb, offset, sizeof(_ciph), &_ciph);
skb              1728 net/netfilter/ipvs/ip_vs_core.c 		   (skb_rtable(skb)->rt_flags & RTCF_LOCAL)) {
skb              1737 net/netfilter/ipvs/ip_vs_core.c 			ulen = ipvs_udp_decap(ipvs, skb, offset2, AF_INET,
skb              1740 net/netfilter/ipvs/ip_vs_core.c 			ulen = ipvs_gre_decap(ipvs, skb, offset2, AF_INET,
skb              1746 net/netfilter/ipvs/ip_vs_core.c 			cih = skb_header_pointer(skb, offset, sizeof(_ciph),
skb              1766 net/netfilter/ipvs/ip_vs_core.c 	IP_VS_DBG_PKT(11, AF_INET, pp, skb, offset,
skb              1770 net/netfilter/ipvs/ip_vs_core.c 	ip_vs_fill_iph_skb_icmp(AF_INET, skb, offset, !ipip, &ciph);
skb              1777 net/netfilter/ipvs/ip_vs_core.c 			     ipvs, AF_INET, skb, &ciph);
skb              1785 net/netfilter/ipvs/ip_vs_core.c 		if (!ip_vs_try_to_schedule(ipvs, AF_INET, skb, pd, &v, &cp, &ciph))
skb              1793 net/netfilter/ipvs/ip_vs_core.c 	if (!skb_csum_unnecessary(skb) && ip_vs_checksum_complete(skb, ihl)) {
skb              1813 net/netfilter/ipvs/ip_vs_core.c 			if (pskb_pull(skb, ihl + sizeof(_icmph)) == NULL)
skb              1816 net/netfilter/ipvs/ip_vs_core.c 			skb_reset_network_header(skb);
skb              1818 net/netfilter/ipvs/ip_vs_core.c 				&ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr, mtu);
skb              1819 net/netfilter/ipvs/ip_vs_core.c 			ipv4_update_pmtu(skb, ipvs->net, mtu, 0, 0);
skb              1838 net/netfilter/ipvs/ip_vs_core.c 		if (pskb_pull(skb, offset2) == NULL)
skb              1840 net/netfilter/ipvs/ip_vs_core.c 		skb_reset_network_header(skb);
skb              1842 net/netfilter/ipvs/ip_vs_core.c 			&ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
skb              1844 net/netfilter/ipvs/ip_vs_core.c 		icmp_send(skb, type, code, info);
skb              1846 net/netfilter/ipvs/ip_vs_core.c 		ip_vs_out_stats(cp, skb);
skb              1849 net/netfilter/ipvs/ip_vs_core.c 		consume_skb(skb);
skb              1855 net/netfilter/ipvs/ip_vs_core.c 	ip_vs_in_stats(cp, skb);
skb              1859 net/netfilter/ipvs/ip_vs_core.c 	verdict = ip_vs_icmp_xmit(skb, cp, pp, offset, hooknum, &ciph);
skb              1871 net/netfilter/ipvs/ip_vs_core.c static int ip_vs_in_icmp_v6(struct netns_ipvs *ipvs, struct sk_buff *skb,
skb              1885 net/netfilter/ipvs/ip_vs_core.c 	ic = frag_safe_skb_hp(skb, iph->len, sizeof(_icmph), &_icmph);
skb              1911 net/netfilter/ipvs/ip_vs_core.c 	if (!ip_vs_fill_iph_skb_icmp(AF_INET6, skb, offset, true, &ciph))
skb              1923 net/netfilter/ipvs/ip_vs_core.c 	IP_VS_DBG_PKT(11, AF_INET6, pp, skb, offset,
skb              1930 net/netfilter/ipvs/ip_vs_core.c 			     ipvs, AF_INET6, skb, &ciph);
skb              1938 net/netfilter/ipvs/ip_vs_core.c 		if (!ip_vs_try_to_schedule(ipvs, AF_INET6, skb, pd, &v, &cp, &ciph))
skb              1952 net/netfilter/ipvs/ip_vs_core.c 	ip_vs_in_stats(cp, skb);
skb              1960 net/netfilter/ipvs/ip_vs_core.c 	verdict = ip_vs_icmp_xmit_v6(skb, cp, pp, offset, hooknum, &ciph);
skb              1978 net/netfilter/ipvs/ip_vs_core.c ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int af)
skb              1989 net/netfilter/ipvs/ip_vs_core.c 	if (skb->ipvs_property)
skb              1997 net/netfilter/ipvs/ip_vs_core.c 	if (unlikely((skb->pkt_type != PACKET_HOST &&
skb              1999 net/netfilter/ipvs/ip_vs_core.c 		     !skb_dst(skb))) {
skb              2000 net/netfilter/ipvs/ip_vs_core.c 		ip_vs_fill_iph_skb(af, skb, false, &iph);
skb              2003 net/netfilter/ipvs/ip_vs_core.c 			      skb->pkt_type, iph.protocol,
skb              2011 net/netfilter/ipvs/ip_vs_core.c 	ip_vs_fill_iph_skb(af, skb, false, &iph);
skb              2014 net/netfilter/ipvs/ip_vs_core.c 	sk = skb_to_full_sk(skb);
skb              2026 net/netfilter/ipvs/ip_vs_core.c 			int verdict = ip_vs_in_icmp_v6(ipvs, skb, &related,
skb              2036 net/netfilter/ipvs/ip_vs_core.c 			int verdict = ip_vs_in_icmp(ipvs, skb, &related,
skb              2051 net/netfilter/ipvs/ip_vs_core.c 			skb->ipvs_property = 1;
skb              2060 net/netfilter/ipvs/ip_vs_core.c 			     ipvs, af, skb, &iph);
skb              2063 net/netfilter/ipvs/ip_vs_core.c 	if (conn_reuse_mode && !iph.fragoffs && is_new_conn(skb, &iph) && cp) {
skb              2069 net/netfilter/ipvs/ip_vs_core.c 			uses_ct = ip_vs_conn_uses_conntrack(cp, skb);
skb              2071 net/netfilter/ipvs/ip_vs_core.c 			uses_ct = ip_vs_conn_uses_conntrack(cp, skb);
skb              2096 net/netfilter/ipvs/ip_vs_core.c 		if (!ip_vs_try_to_schedule(ipvs, af, skb, pd, &v, &cp, &iph))
skb              2100 net/netfilter/ipvs/ip_vs_core.c 	IP_VS_DBG_PKT(11, af, pp, skb, iph.off, "Incoming packet");
skb              2123 net/netfilter/ipvs/ip_vs_core.c 	ip_vs_in_stats(cp, skb);
skb              2124 net/netfilter/ipvs/ip_vs_core.c 	ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pd);
skb              2126 net/netfilter/ipvs/ip_vs_core.c 		ret = cp->packet_xmit(skb, cp, pp, &iph);
skb              2162 net/netfilter/ipvs/ip_vs_core.c ip_vs_remote_request4(void *priv, struct sk_buff *skb,
skb              2165 net/netfilter/ipvs/ip_vs_core.c 	return ip_vs_in(net_ipvs(state->net), state->hook, skb, AF_INET);
skb              2173 net/netfilter/ipvs/ip_vs_core.c ip_vs_local_request4(void *priv, struct sk_buff *skb,
skb              2176 net/netfilter/ipvs/ip_vs_core.c 	return ip_vs_in(net_ipvs(state->net), state->hook, skb, AF_INET);
skb              2186 net/netfilter/ipvs/ip_vs_core.c ip_vs_remote_request6(void *priv, struct sk_buff *skb,
skb              2189 net/netfilter/ipvs/ip_vs_core.c 	return ip_vs_in(net_ipvs(state->net), state->hook, skb, AF_INET6);
skb              2197 net/netfilter/ipvs/ip_vs_core.c ip_vs_local_request6(void *priv, struct sk_buff *skb,
skb              2200 net/netfilter/ipvs/ip_vs_core.c 	return ip_vs_in(net_ipvs(state->net), state->hook, skb, AF_INET6);
skb              2216 net/netfilter/ipvs/ip_vs_core.c ip_vs_forward_icmp(void *priv, struct sk_buff *skb,
skb              2222 net/netfilter/ipvs/ip_vs_core.c 	if (ip_hdr(skb)->protocol != IPPROTO_ICMP)
skb              2229 net/netfilter/ipvs/ip_vs_core.c 	return ip_vs_in_icmp(ipvs, skb, &r, state->hook);
skb              2234 net/netfilter/ipvs/ip_vs_core.c ip_vs_forward_icmp_v6(void *priv, struct sk_buff *skb,
skb              2241 net/netfilter/ipvs/ip_vs_core.c 	ip_vs_fill_iph_skb(AF_INET6, skb, false, &iphdr);
skb              2249 net/netfilter/ipvs/ip_vs_core.c 	return ip_vs_in_icmp_v6(ipvs, skb, &r, state->hook, &iphdr);
skb              2961 net/netfilter/ipvs/ip_vs_ctl.c static int ip_vs_genl_fill_stats(struct sk_buff *skb, int container_type,
skb              2964 net/netfilter/ipvs/ip_vs_ctl.c 	struct nlattr *nl_stats = nla_nest_start_noflag(skb, container_type);
skb              2969 net/netfilter/ipvs/ip_vs_ctl.c 	if (nla_put_u32(skb, IPVS_STATS_ATTR_CONNS, (u32)kstats->conns) ||
skb              2970 net/netfilter/ipvs/ip_vs_ctl.c 	    nla_put_u32(skb, IPVS_STATS_ATTR_INPKTS, (u32)kstats->inpkts) ||
skb              2971 net/netfilter/ipvs/ip_vs_ctl.c 	    nla_put_u32(skb, IPVS_STATS_ATTR_OUTPKTS, (u32)kstats->outpkts) ||
skb              2972 net/netfilter/ipvs/ip_vs_ctl.c 	    nla_put_u64_64bit(skb, IPVS_STATS_ATTR_INBYTES, kstats->inbytes,
skb              2974 net/netfilter/ipvs/ip_vs_ctl.c 	    nla_put_u64_64bit(skb, IPVS_STATS_ATTR_OUTBYTES, kstats->outbytes,
skb              2976 net/netfilter/ipvs/ip_vs_ctl.c 	    nla_put_u32(skb, IPVS_STATS_ATTR_CPS, (u32)kstats->cps) ||
skb              2977 net/netfilter/ipvs/ip_vs_ctl.c 	    nla_put_u32(skb, IPVS_STATS_ATTR_INPPS, (u32)kstats->inpps) ||
skb              2978 net/netfilter/ipvs/ip_vs_ctl.c 	    nla_put_u32(skb, IPVS_STATS_ATTR_OUTPPS, (u32)kstats->outpps) ||
skb              2979 net/netfilter/ipvs/ip_vs_ctl.c 	    nla_put_u32(skb, IPVS_STATS_ATTR_INBPS, (u32)kstats->inbps) ||
skb              2980 net/netfilter/ipvs/ip_vs_ctl.c 	    nla_put_u32(skb, IPVS_STATS_ATTR_OUTBPS, (u32)kstats->outbps))
skb              2982 net/netfilter/ipvs/ip_vs_ctl.c 	nla_nest_end(skb, nl_stats);
skb              2987 net/netfilter/ipvs/ip_vs_ctl.c 	nla_nest_cancel(skb, nl_stats);
skb              2991 net/netfilter/ipvs/ip_vs_ctl.c static int ip_vs_genl_fill_stats64(struct sk_buff *skb, int container_type,
skb              2994 net/netfilter/ipvs/ip_vs_ctl.c 	struct nlattr *nl_stats = nla_nest_start_noflag(skb, container_type);
skb              2999 net/netfilter/ipvs/ip_vs_ctl.c 	if (nla_put_u64_64bit(skb, IPVS_STATS_ATTR_CONNS, kstats->conns,
skb              3001 net/netfilter/ipvs/ip_vs_ctl.c 	    nla_put_u64_64bit(skb, IPVS_STATS_ATTR_INPKTS, kstats->inpkts,
skb              3003 net/netfilter/ipvs/ip_vs_ctl.c 	    nla_put_u64_64bit(skb, IPVS_STATS_ATTR_OUTPKTS, kstats->outpkts,
skb              3005 net/netfilter/ipvs/ip_vs_ctl.c 	    nla_put_u64_64bit(skb, IPVS_STATS_ATTR_INBYTES, kstats->inbytes,
skb              3007 net/netfilter/ipvs/ip_vs_ctl.c 	    nla_put_u64_64bit(skb, IPVS_STATS_ATTR_OUTBYTES, kstats->outbytes,
skb              3009 net/netfilter/ipvs/ip_vs_ctl.c 	    nla_put_u64_64bit(skb, IPVS_STATS_ATTR_CPS, kstats->cps,
skb              3011 net/netfilter/ipvs/ip_vs_ctl.c 	    nla_put_u64_64bit(skb, IPVS_STATS_ATTR_INPPS, kstats->inpps,
skb              3013 net/netfilter/ipvs/ip_vs_ctl.c 	    nla_put_u64_64bit(skb, IPVS_STATS_ATTR_OUTPPS, kstats->outpps,
skb              3015 net/netfilter/ipvs/ip_vs_ctl.c 	    nla_put_u64_64bit(skb, IPVS_STATS_ATTR_INBPS, kstats->inbps,
skb              3017 net/netfilter/ipvs/ip_vs_ctl.c 	    nla_put_u64_64bit(skb, IPVS_STATS_ATTR_OUTBPS, kstats->outbps,
skb              3020 net/netfilter/ipvs/ip_vs_ctl.c 	nla_nest_end(skb, nl_stats);
skb              3025 net/netfilter/ipvs/ip_vs_ctl.c 	nla_nest_cancel(skb, nl_stats);
skb              3029 net/netfilter/ipvs/ip_vs_ctl.c static int ip_vs_genl_fill_service(struct sk_buff *skb,
skb              3040 net/netfilter/ipvs/ip_vs_ctl.c 	nl_service = nla_nest_start_noflag(skb, IPVS_CMD_ATTR_SERVICE);
skb              3044 net/netfilter/ipvs/ip_vs_ctl.c 	if (nla_put_u16(skb, IPVS_SVC_ATTR_AF, svc->af))
skb              3047 net/netfilter/ipvs/ip_vs_ctl.c 		if (nla_put_u32(skb, IPVS_SVC_ATTR_FWMARK, svc->fwmark))
skb              3050 net/netfilter/ipvs/ip_vs_ctl.c 		if (nla_put_u16(skb, IPVS_SVC_ATTR_PROTOCOL, svc->protocol) ||
skb              3051 net/netfilter/ipvs/ip_vs_ctl.c 		    nla_put(skb, IPVS_SVC_ATTR_ADDR, sizeof(svc->addr), &svc->addr) ||
skb              3052 net/netfilter/ipvs/ip_vs_ctl.c 		    nla_put_be16(skb, IPVS_SVC_ATTR_PORT, svc->port))
skb              3059 net/netfilter/ipvs/ip_vs_ctl.c 	if (nla_put_string(skb, IPVS_SVC_ATTR_SCHED_NAME, sched_name) ||
skb              3060 net/netfilter/ipvs/ip_vs_ctl.c 	    (pe && nla_put_string(skb, IPVS_SVC_ATTR_PE_NAME, pe->name)) ||
skb              3061 net/netfilter/ipvs/ip_vs_ctl.c 	    nla_put(skb, IPVS_SVC_ATTR_FLAGS, sizeof(flags), &flags) ||
skb              3062 net/netfilter/ipvs/ip_vs_ctl.c 	    nla_put_u32(skb, IPVS_SVC_ATTR_TIMEOUT, svc->timeout / HZ) ||
skb              3063 net/netfilter/ipvs/ip_vs_ctl.c 	    nla_put_be32(skb, IPVS_SVC_ATTR_NETMASK, svc->netmask))
skb              3066 net/netfilter/ipvs/ip_vs_ctl.c 	if (ip_vs_genl_fill_stats(skb, IPVS_SVC_ATTR_STATS, &kstats))
skb              3068 net/netfilter/ipvs/ip_vs_ctl.c 	if (ip_vs_genl_fill_stats64(skb, IPVS_SVC_ATTR_STATS64, &kstats))
skb              3071 net/netfilter/ipvs/ip_vs_ctl.c 	nla_nest_end(skb, nl_service);
skb              3076 net/netfilter/ipvs/ip_vs_ctl.c 	nla_nest_cancel(skb, nl_service);
skb              3080 net/netfilter/ipvs/ip_vs_ctl.c static int ip_vs_genl_dump_service(struct sk_buff *skb,
skb              3086 net/netfilter/ipvs/ip_vs_ctl.c 	hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
skb              3092 net/netfilter/ipvs/ip_vs_ctl.c 	if (ip_vs_genl_fill_service(skb, svc) < 0)
skb              3095 net/netfilter/ipvs/ip_vs_ctl.c 	genlmsg_end(skb, hdr);
skb              3099 net/netfilter/ipvs/ip_vs_ctl.c 	genlmsg_cancel(skb, hdr);
skb              3103 net/netfilter/ipvs/ip_vs_ctl.c static int ip_vs_genl_dump_services(struct sk_buff *skb,
skb              3109 net/netfilter/ipvs/ip_vs_ctl.c 	struct net *net = sock_net(skb->sk);
skb              3117 net/netfilter/ipvs/ip_vs_ctl.c 			if (ip_vs_genl_dump_service(skb, svc, cb) < 0) {
skb              3128 net/netfilter/ipvs/ip_vs_ctl.c 			if (ip_vs_genl_dump_service(skb, svc, cb) < 0) {
skb              3139 net/netfilter/ipvs/ip_vs_ctl.c 	return skb->len;
skb              3245 net/netfilter/ipvs/ip_vs_ctl.c static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
skb              3250 net/netfilter/ipvs/ip_vs_ctl.c 	nl_dest = nla_nest_start_noflag(skb, IPVS_CMD_ATTR_DEST);
skb              3254 net/netfilter/ipvs/ip_vs_ctl.c 	if (nla_put(skb, IPVS_DEST_ATTR_ADDR, sizeof(dest->addr), &dest->addr) ||
skb              3255 net/netfilter/ipvs/ip_vs_ctl.c 	    nla_put_be16(skb, IPVS_DEST_ATTR_PORT, dest->port) ||
skb              3256 net/netfilter/ipvs/ip_vs_ctl.c 	    nla_put_u32(skb, IPVS_DEST_ATTR_FWD_METHOD,
skb              3259 net/netfilter/ipvs/ip_vs_ctl.c 	    nla_put_u32(skb, IPVS_DEST_ATTR_WEIGHT,
skb              3261 net/netfilter/ipvs/ip_vs_ctl.c 	    nla_put_u8(skb, IPVS_DEST_ATTR_TUN_TYPE,
skb              3263 net/netfilter/ipvs/ip_vs_ctl.c 	    nla_put_be16(skb, IPVS_DEST_ATTR_TUN_PORT,
skb              3265 net/netfilter/ipvs/ip_vs_ctl.c 	    nla_put_u16(skb, IPVS_DEST_ATTR_TUN_FLAGS,
skb              3267 net/netfilter/ipvs/ip_vs_ctl.c 	    nla_put_u32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold) ||
skb              3268 net/netfilter/ipvs/ip_vs_ctl.c 	    nla_put_u32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold) ||
skb              3269 net/netfilter/ipvs/ip_vs_ctl.c 	    nla_put_u32(skb, IPVS_DEST_ATTR_ACTIVE_CONNS,
skb              3271 net/netfilter/ipvs/ip_vs_ctl.c 	    nla_put_u32(skb, IPVS_DEST_ATTR_INACT_CONNS,
skb              3273 net/netfilter/ipvs/ip_vs_ctl.c 	    nla_put_u32(skb, IPVS_DEST_ATTR_PERSIST_CONNS,
skb              3275 net/netfilter/ipvs/ip_vs_ctl.c 	    nla_put_u16(skb, IPVS_DEST_ATTR_ADDR_FAMILY, dest->af))
skb              3278 net/netfilter/ipvs/ip_vs_ctl.c 	if (ip_vs_genl_fill_stats(skb, IPVS_DEST_ATTR_STATS, &kstats))
skb              3280 net/netfilter/ipvs/ip_vs_ctl.c 	if (ip_vs_genl_fill_stats64(skb, IPVS_DEST_ATTR_STATS64, &kstats))
skb              3283 net/netfilter/ipvs/ip_vs_ctl.c 	nla_nest_end(skb, nl_dest);
skb              3288 net/netfilter/ipvs/ip_vs_ctl.c 	nla_nest_cancel(skb, nl_dest);
skb              3292 net/netfilter/ipvs/ip_vs_ctl.c static int ip_vs_genl_dump_dest(struct sk_buff *skb, struct ip_vs_dest *dest,
skb              3297 net/netfilter/ipvs/ip_vs_ctl.c 	hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
skb              3303 net/netfilter/ipvs/ip_vs_ctl.c 	if (ip_vs_genl_fill_dest(skb, dest) < 0)
skb              3306 net/netfilter/ipvs/ip_vs_ctl.c 	genlmsg_end(skb, hdr);
skb              3310 net/netfilter/ipvs/ip_vs_ctl.c 	genlmsg_cancel(skb, hdr);
skb              3314 net/netfilter/ipvs/ip_vs_ctl.c static int ip_vs_genl_dump_dests(struct sk_buff *skb,
skb              3322 net/netfilter/ipvs/ip_vs_ctl.c 	struct net *net = sock_net(skb->sk);
skb              3340 net/netfilter/ipvs/ip_vs_ctl.c 		if (ip_vs_genl_dump_dest(skb, dest, cb) < 0) {
skb              3352 net/netfilter/ipvs/ip_vs_ctl.c 	return skb->len;
skb              3420 net/netfilter/ipvs/ip_vs_ctl.c static int ip_vs_genl_fill_daemon(struct sk_buff *skb, __u32 state,
skb              3425 net/netfilter/ipvs/ip_vs_ctl.c 	nl_daemon = nla_nest_start_noflag(skb, IPVS_CMD_ATTR_DAEMON);
skb              3429 net/netfilter/ipvs/ip_vs_ctl.c 	if (nla_put_u32(skb, IPVS_DAEMON_ATTR_STATE, state) ||
skb              3430 net/netfilter/ipvs/ip_vs_ctl.c 	    nla_put_string(skb, IPVS_DAEMON_ATTR_MCAST_IFN, c->mcast_ifn) ||
skb              3431 net/netfilter/ipvs/ip_vs_ctl.c 	    nla_put_u32(skb, IPVS_DAEMON_ATTR_SYNC_ID, c->syncid) ||
skb              3432 net/netfilter/ipvs/ip_vs_ctl.c 	    nla_put_u16(skb, IPVS_DAEMON_ATTR_SYNC_MAXLEN, c->sync_maxlen) ||
skb              3433 net/netfilter/ipvs/ip_vs_ctl.c 	    nla_put_u16(skb, IPVS_DAEMON_ATTR_MCAST_PORT, c->mcast_port) ||
skb              3434 net/netfilter/ipvs/ip_vs_ctl.c 	    nla_put_u8(skb, IPVS_DAEMON_ATTR_MCAST_TTL, c->mcast_ttl))
skb              3438 net/netfilter/ipvs/ip_vs_ctl.c 		if (nla_put_in6_addr(skb, IPVS_DAEMON_ATTR_MCAST_GROUP6,
skb              3444 net/netfilter/ipvs/ip_vs_ctl.c 		    nla_put_in_addr(skb, IPVS_DAEMON_ATTR_MCAST_GROUP,
skb              3447 net/netfilter/ipvs/ip_vs_ctl.c 	nla_nest_end(skb, nl_daemon);
skb              3452 net/netfilter/ipvs/ip_vs_ctl.c 	nla_nest_cancel(skb, nl_daemon);
skb              3456 net/netfilter/ipvs/ip_vs_ctl.c static int ip_vs_genl_dump_daemon(struct sk_buff *skb, __u32 state,
skb              3461 net/netfilter/ipvs/ip_vs_ctl.c 	hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
skb              3467 net/netfilter/ipvs/ip_vs_ctl.c 	if (ip_vs_genl_fill_daemon(skb, state, c))
skb              3470 net/netfilter/ipvs/ip_vs_ctl.c 	genlmsg_end(skb, hdr);
skb              3474 net/netfilter/ipvs/ip_vs_ctl.c 	genlmsg_cancel(skb, hdr);
skb              3478 net/netfilter/ipvs/ip_vs_ctl.c static int ip_vs_genl_dump_daemons(struct sk_buff *skb,
skb              3481 net/netfilter/ipvs/ip_vs_ctl.c 	struct net *net = sock_net(skb->sk);
skb              3486 net/netfilter/ipvs/ip_vs_ctl.c 		if (ip_vs_genl_dump_daemon(skb, IP_VS_STATE_MASTER,
skb              3494 net/netfilter/ipvs/ip_vs_ctl.c 		if (ip_vs_genl_dump_daemon(skb, IP_VS_STATE_BACKUP,
skb              3504 net/netfilter/ipvs/ip_vs_ctl.c 	return skb->len;
skb              3599 net/netfilter/ipvs/ip_vs_ctl.c static int ip_vs_genl_set_daemon(struct sk_buff *skb, struct genl_info *info)
skb              3602 net/netfilter/ipvs/ip_vs_ctl.c 	struct net *net = sock_net(skb->sk);
skb              3624 net/netfilter/ipvs/ip_vs_ctl.c static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info)
skb              3631 net/netfilter/ipvs/ip_vs_ctl.c 	struct net *net = sock_net(skb->sk);
skb              3753 net/netfilter/ipvs/ip_vs_ctl.c static int ip_vs_genl_get_cmd(struct sk_buff *skb, struct genl_info *info)
skb              3758 net/netfilter/ipvs/ip_vs_ctl.c 	struct net *net = sock_net(skb->sk);
skb               213 net/netfilter/ipvs/ip_vs_dh.c ip_vs_dh_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
skb                21 net/netfilter/ipvs/ip_vs_fo.c ip_vs_fo_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
skb                66 net/netfilter/ipvs/ip_vs_ftp.c static char *ip_vs_ftp_data_ptr(struct sk_buff *skb, struct ip_vs_iphdr *ipvsh)
skb                68 net/netfilter/ipvs/ip_vs_ftp.c 	struct tcphdr *th = (struct tcphdr *)((char *)skb->data + ipvsh->len);
skb               249 net/netfilter/ipvs/ip_vs_ftp.c 			 struct sk_buff *skb, int *diff,
skb               270 net/netfilter/ipvs/ip_vs_ftp.c 	if (skb_ensure_writable(skb, skb->len))
skb               274 net/netfilter/ipvs/ip_vs_ftp.c 		data = ip_vs_ftp_data_ptr(skb, ipvsh);
skb               275 net/netfilter/ipvs/ip_vs_ftp.c 		data_limit = skb_tail_pointer(skb);
skb               291 net/netfilter/ipvs/ip_vs_ftp.c 		data = ip_vs_ftp_data_ptr(skb, ipvsh);
skb               292 net/netfilter/ipvs/ip_vs_ftp.c 		data_limit = skb_tail_pointer(skb);
skb               334 net/netfilter/ipvs/ip_vs_ftp.c 				      cp->dest, skb->mark);
skb               364 net/netfilter/ipvs/ip_vs_ftp.c 	ct = nf_ct_get(skb, &ctinfo);
skb               374 net/netfilter/ipvs/ip_vs_ftp.c 		mangled = nf_nat_mangle_tcp_packet(skb, ct, ctinfo,
skb               380 net/netfilter/ipvs/ip_vs_ftp.c 			ip_vs_nfct_expect_related(skb, ct, n_cp,
skb               382 net/netfilter/ipvs/ip_vs_ftp.c 			if (skb->ip_summed == CHECKSUM_COMPLETE)
skb               383 net/netfilter/ipvs/ip_vs_ftp.c 				skb->ip_summed = CHECKSUM_UNNECESSARY;
skb               419 net/netfilter/ipvs/ip_vs_ftp.c 			struct sk_buff *skb, int *diff,
skb               436 net/netfilter/ipvs/ip_vs_ftp.c 	if (skb_ensure_writable(skb, skb->len))
skb               439 net/netfilter/ipvs/ip_vs_ftp.c 	data = data_start = ip_vs_ftp_data_ptr(skb, ipvsh);
skb               440 net/netfilter/ipvs/ip_vs_ftp.c 	data_limit = skb_tail_pointer(skb);
skb               539 net/netfilter/ipvs/ip_vs_ftp.c 					      skb->mark);
skb               481 net/netfilter/ipvs/ip_vs_lblc.c ip_vs_lblc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
skb               645 net/netfilter/ipvs/ip_vs_lblcr.c ip_vs_lblcr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
skb                24 net/netfilter/ipvs/ip_vs_lc.c ip_vs_lc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
skb               446 net/netfilter/ipvs/ip_vs_mh.c ip_vs_mh_get_port(const struct sk_buff *skb, struct ip_vs_iphdr *iph)
skb               459 net/netfilter/ipvs/ip_vs_mh.c 		ports = skb_header_pointer(skb, iph->len, sizeof(_ports),
skb               475 net/netfilter/ipvs/ip_vs_mh.c ip_vs_mh_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
skb               488 net/netfilter/ipvs/ip_vs_mh.c 		port = ip_vs_mh_get_port(skb, iph);
skb                71 net/netfilter/ipvs/ip_vs_nfct.c ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp, int outin)
skb                74 net/netfilter/ipvs/ip_vs_nfct.c 	struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
skb               132 net/netfilter/ipvs/ip_vs_nfct.c int ip_vs_confirm_conntrack(struct sk_buff *skb)
skb               134 net/netfilter/ipvs/ip_vs_nfct.c 	return nf_conntrack_confirm(skb);
skb               211 net/netfilter/ipvs/ip_vs_nfct.c void ip_vs_nfct_expect_related(struct sk_buff *skb, struct nf_conn *ct,
skb                53 net/netfilter/ipvs/ip_vs_nq.c ip_vs_nq_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
skb                25 net/netfilter/ipvs/ip_vs_ovf.c ip_vs_ovf_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
skb                67 net/netfilter/ipvs/ip_vs_pe_sip.c ip_vs_sip_fill_param(struct ip_vs_conn_param *p, struct sk_buff *skb)
skb                74 net/netfilter/ipvs/ip_vs_pe_sip.c 	retc = ip_vs_fill_iph_skb(p->af, skb, false, &iph);
skb                84 net/netfilter/ipvs/ip_vs_pe_sip.c 	if (dataoff >= skb->len)
skb                86 net/netfilter/ipvs/ip_vs_pe_sip.c 	retc = skb_linearize(skb);
skb                89 net/netfilter/ipvs/ip_vs_pe_sip.c 	dptr = skb->data + dataoff;
skb                90 net/netfilter/ipvs/ip_vs_pe_sip.c 	datalen = skb->len - dataoff;
skb               150 net/netfilter/ipvs/ip_vs_pe_sip.c 		   struct sk_buff *skb,
skb               156 net/netfilter/ipvs/ip_vs_pe_sip.c 		return ip_vs_new_conn_out(svc, dest, skb, iph, dport, cport);
skb               216 net/netfilter/ipvs/ip_vs_proto.c 			     const struct sk_buff *skb,
skb               223 net/netfilter/ipvs/ip_vs_proto.c 	ih = skb_header_pointer(skb, offset, sizeof(_iph), &_iph);
skb               231 net/netfilter/ipvs/ip_vs_proto.c 		pptr = skb_header_pointer(skb, offset + ih->ihl*4,
skb               248 net/netfilter/ipvs/ip_vs_proto.c 			     const struct sk_buff *skb,
skb               255 net/netfilter/ipvs/ip_vs_proto.c 	ih = skb_header_pointer(skb, offset, sizeof(_iph), &_iph);
skb               263 net/netfilter/ipvs/ip_vs_proto.c 		pptr = skb_header_pointer(skb, offset + sizeof(struct ipv6hdr),
skb               281 net/netfilter/ipvs/ip_vs_proto.c 			  const struct sk_buff *skb,
skb               287 net/netfilter/ipvs/ip_vs_proto.c 		ip_vs_tcpudp_debug_packet_v6(pp, skb, offset, msg);
skb               290 net/netfilter/ipvs/ip_vs_proto.c 		ip_vs_tcpudp_debug_packet_v4(pp, skb, offset, msg);
skb                55 net/netfilter/ipvs/ip_vs_proto_ah_esp.c ah_esp_conn_in_get(struct netns_ipvs *ipvs, int af, const struct sk_buff *skb,
skb                81 net/netfilter/ipvs/ip_vs_proto_ah_esp.c ah_esp_conn_out_get(struct netns_ipvs *ipvs, int af, const struct sk_buff *skb,
skb               103 net/netfilter/ipvs/ip_vs_proto_ah_esp.c ah_esp_conn_schedule(struct netns_ipvs *ipvs, int af, struct sk_buff *skb,
skb                13 net/netfilter/ipvs/ip_vs_proto_sctp.c sctp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp);
skb                16 net/netfilter/ipvs/ip_vs_proto_sctp.c sctp_conn_schedule(struct netns_ipvs *ipvs, int af, struct sk_buff *skb,
skb                27 net/netfilter/ipvs/ip_vs_proto_sctp.c 		sh = skb_header_pointer(skb, iph->len, sizeof(_sctph), &_sctph);
skb                29 net/netfilter/ipvs/ip_vs_proto_sctp.c 			sch = skb_header_pointer(skb, iph->len + sizeof(_sctph),
skb                41 net/netfilter/ipvs/ip_vs_proto_sctp.c 			skb, iph->len, sizeof(_ports), &_ports);
skb                50 net/netfilter/ipvs/ip_vs_proto_sctp.c 		svc = ip_vs_service_find(ipvs, af, skb->mark, iph->protocol,
skb                53 net/netfilter/ipvs/ip_vs_proto_sctp.c 		svc = ip_vs_service_find(ipvs, af, skb->mark, iph->protocol,
skb                70 net/netfilter/ipvs/ip_vs_proto_sctp.c 		*cpp = ip_vs_schedule(svc, skb, pd, &ignored, iph);
skb                73 net/netfilter/ipvs/ip_vs_proto_sctp.c 				*verdict = ip_vs_leave(svc, skb, pd, iph);
skb                83 net/netfilter/ipvs/ip_vs_proto_sctp.c static void sctp_nat_csum(struct sk_buff *skb, struct sctphdr *sctph,
skb                86 net/netfilter/ipvs/ip_vs_proto_sctp.c 	sctph->checksum = sctp_compute_cksum(skb, sctphoff);
skb                87 net/netfilter/ipvs/ip_vs_proto_sctp.c 	skb->ip_summed = CHECKSUM_UNNECESSARY;
skb                91 net/netfilter/ipvs/ip_vs_proto_sctp.c sctp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
skb               104 net/netfilter/ipvs/ip_vs_proto_sctp.c 	if (skb_ensure_writable(skb, sctphoff + sizeof(*sctph)))
skb               111 net/netfilter/ipvs/ip_vs_proto_sctp.c 		if (!sctp_csum_check(cp->af, skb, pp))
skb               115 net/netfilter/ipvs/ip_vs_proto_sctp.c 		ret = ip_vs_app_pkt_out(cp, skb, iph);
skb               123 net/netfilter/ipvs/ip_vs_proto_sctp.c 	sctph = (void *) skb_network_header(skb) + sctphoff;
skb               127 net/netfilter/ipvs/ip_vs_proto_sctp.c 	    skb->ip_summed == CHECKSUM_PARTIAL) {
skb               129 net/netfilter/ipvs/ip_vs_proto_sctp.c 		sctp_nat_csum(skb, sctph, sctphoff);
skb               131 net/netfilter/ipvs/ip_vs_proto_sctp.c 		skb->ip_summed = CHECKSUM_UNNECESSARY;
skb               138 net/netfilter/ipvs/ip_vs_proto_sctp.c sctp_dnat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
skb               151 net/netfilter/ipvs/ip_vs_proto_sctp.c 	if (skb_ensure_writable(skb, sctphoff + sizeof(*sctph)))
skb               158 net/netfilter/ipvs/ip_vs_proto_sctp.c 		if (!sctp_csum_check(cp->af, skb, pp))
skb               162 net/netfilter/ipvs/ip_vs_proto_sctp.c 		ret = ip_vs_app_pkt_in(cp, skb, iph);
skb               170 net/netfilter/ipvs/ip_vs_proto_sctp.c 	sctph = (void *) skb_network_header(skb) + sctphoff;
skb               174 net/netfilter/ipvs/ip_vs_proto_sctp.c 	    (skb->ip_summed == CHECKSUM_PARTIAL &&
skb               175 net/netfilter/ipvs/ip_vs_proto_sctp.c 	     !(skb_dst(skb)->dev->features & NETIF_F_SCTP_CRC))) {
skb               177 net/netfilter/ipvs/ip_vs_proto_sctp.c 		sctp_nat_csum(skb, sctph, sctphoff);
skb               178 net/netfilter/ipvs/ip_vs_proto_sctp.c 	} else if (skb->ip_summed != CHECKSUM_PARTIAL) {
skb               179 net/netfilter/ipvs/ip_vs_proto_sctp.c 		skb->ip_summed = CHECKSUM_UNNECESSARY;
skb               186 net/netfilter/ipvs/ip_vs_proto_sctp.c sctp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp)
skb               197 net/netfilter/ipvs/ip_vs_proto_sctp.c 		sctphoff = ip_hdrlen(skb);
skb               199 net/netfilter/ipvs/ip_vs_proto_sctp.c 	sh = (struct sctphdr *)(skb->data + sctphoff);
skb               201 net/netfilter/ipvs/ip_vs_proto_sctp.c 	val = sctp_compute_cksum(skb, sctphoff);
skb               205 net/netfilter/ipvs/ip_vs_proto_sctp.c 		IP_VS_DBG_RL_PKT(0, af, pp, skb, 0,
skb               379 net/netfilter/ipvs/ip_vs_proto_sctp.c 		int direction, const struct sk_buff *skb)
skb               387 net/netfilter/ipvs/ip_vs_proto_sctp.c 	ihl = cp->af == AF_INET ? ip_hdrlen(skb) : sizeof(struct ipv6hdr);
skb               389 net/netfilter/ipvs/ip_vs_proto_sctp.c 	ihl = ip_hdrlen(skb);
skb               393 net/netfilter/ipvs/ip_vs_proto_sctp.c 	sch = skb_header_pointer(skb, cofs, sizeof(_sctpch), &_sctpch);
skb               414 net/netfilter/ipvs/ip_vs_proto_sctp.c 			sch = skb_header_pointer(skb, cofs + ALIGN(clen, 4),
skb               475 net/netfilter/ipvs/ip_vs_proto_sctp.c 		const struct sk_buff *skb, struct ip_vs_proto_data *pd)
skb               478 net/netfilter/ipvs/ip_vs_proto_sctp.c 	set_sctp_state(pd, cp, direction, skb);
skb                32 net/netfilter/ipvs/ip_vs_proto_tcp.c tcp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp);
skb                35 net/netfilter/ipvs/ip_vs_proto_tcp.c tcp_conn_schedule(struct netns_ipvs *ipvs, int af, struct sk_buff *skb,
skb                49 net/netfilter/ipvs/ip_vs_proto_tcp.c 		th = skb_header_pointer(skb, iph->len, sizeof(_tcph), &_tcph);
skb                57 net/netfilter/ipvs/ip_vs_proto_tcp.c 			skb, iph->len, sizeof(_ports), &_ports);
skb                68 net/netfilter/ipvs/ip_vs_proto_tcp.c 		svc = ip_vs_service_find(ipvs, af, skb->mark, iph->protocol,
skb                71 net/netfilter/ipvs/ip_vs_proto_tcp.c 		svc = ip_vs_service_find(ipvs, af, skb->mark, iph->protocol,
skb                90 net/netfilter/ipvs/ip_vs_proto_tcp.c 		*cpp = ip_vs_schedule(svc, skb, pd, &ignored, iph);
skb                93 net/netfilter/ipvs/ip_vs_proto_tcp.c 				*verdict = ip_vs_leave(svc, skb, pd, iph);
skb               147 net/netfilter/ipvs/ip_vs_proto_tcp.c tcp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
skb               159 net/netfilter/ipvs/ip_vs_proto_tcp.c 	oldlen = skb->len - tcphoff;
skb               162 net/netfilter/ipvs/ip_vs_proto_tcp.c 	if (skb_ensure_writable(skb, tcphoff + sizeof(*tcph)))
skb               169 net/netfilter/ipvs/ip_vs_proto_tcp.c 		if (!tcp_csum_check(cp->af, skb, pp))
skb               173 net/netfilter/ipvs/ip_vs_proto_tcp.c 		if (!(ret = ip_vs_app_pkt_out(cp, skb, iph)))
skb               177 net/netfilter/ipvs/ip_vs_proto_tcp.c 			oldlen = skb->len - tcphoff;
skb               182 net/netfilter/ipvs/ip_vs_proto_tcp.c 	tcph = (void *)skb_network_header(skb) + tcphoff;
skb               186 net/netfilter/ipvs/ip_vs_proto_tcp.c 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb               189 net/netfilter/ipvs/ip_vs_proto_tcp.c 					htons(skb->len - tcphoff));
skb               194 net/netfilter/ipvs/ip_vs_proto_tcp.c 		if (skb->ip_summed == CHECKSUM_COMPLETE)
skb               195 net/netfilter/ipvs/ip_vs_proto_tcp.c 			skb->ip_summed = cp->app ?
skb               200 net/netfilter/ipvs/ip_vs_proto_tcp.c 		skb->csum = skb_checksum(skb, tcphoff, skb->len - tcphoff, 0);
skb               205 net/netfilter/ipvs/ip_vs_proto_tcp.c 						      skb->len - tcphoff,
skb               206 net/netfilter/ipvs/ip_vs_proto_tcp.c 						      cp->protocol, skb->csum);
skb               211 net/netfilter/ipvs/ip_vs_proto_tcp.c 							skb->len - tcphoff,
skb               213 net/netfilter/ipvs/ip_vs_proto_tcp.c 							skb->csum);
skb               214 net/netfilter/ipvs/ip_vs_proto_tcp.c 		skb->ip_summed = CHECKSUM_UNNECESSARY;
skb               225 net/netfilter/ipvs/ip_vs_proto_tcp.c tcp_dnat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
skb               237 net/netfilter/ipvs/ip_vs_proto_tcp.c 	oldlen = skb->len - tcphoff;
skb               240 net/netfilter/ipvs/ip_vs_proto_tcp.c 	if (skb_ensure_writable(skb, tcphoff + sizeof(*tcph)))
skb               247 net/netfilter/ipvs/ip_vs_proto_tcp.c 		if (!tcp_csum_check(cp->af, skb, pp))
skb               254 net/netfilter/ipvs/ip_vs_proto_tcp.c 		if (!(ret = ip_vs_app_pkt_in(cp, skb, iph)))
skb               258 net/netfilter/ipvs/ip_vs_proto_tcp.c 			oldlen = skb->len - tcphoff;
skb               263 net/netfilter/ipvs/ip_vs_proto_tcp.c 	tcph = (void *)skb_network_header(skb) + tcphoff;
skb               269 net/netfilter/ipvs/ip_vs_proto_tcp.c 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb               272 net/netfilter/ipvs/ip_vs_proto_tcp.c 					htons(skb->len - tcphoff));
skb               277 net/netfilter/ipvs/ip_vs_proto_tcp.c 		if (skb->ip_summed == CHECKSUM_COMPLETE)
skb               278 net/netfilter/ipvs/ip_vs_proto_tcp.c 			skb->ip_summed = cp->app ?
skb               283 net/netfilter/ipvs/ip_vs_proto_tcp.c 		skb->csum = skb_checksum(skb, tcphoff, skb->len - tcphoff, 0);
skb               288 net/netfilter/ipvs/ip_vs_proto_tcp.c 						      skb->len - tcphoff,
skb               289 net/netfilter/ipvs/ip_vs_proto_tcp.c 						      cp->protocol, skb->csum);
skb               294 net/netfilter/ipvs/ip_vs_proto_tcp.c 							skb->len - tcphoff,
skb               296 net/netfilter/ipvs/ip_vs_proto_tcp.c 							skb->csum);
skb               297 net/netfilter/ipvs/ip_vs_proto_tcp.c 		skb->ip_summed = CHECKSUM_UNNECESSARY;
skb               304 net/netfilter/ipvs/ip_vs_proto_tcp.c tcp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp)
skb               313 net/netfilter/ipvs/ip_vs_proto_tcp.c 		tcphoff = ip_hdrlen(skb);
skb               315 net/netfilter/ipvs/ip_vs_proto_tcp.c 	switch (skb->ip_summed) {
skb               317 net/netfilter/ipvs/ip_vs_proto_tcp.c 		skb->csum = skb_checksum(skb, tcphoff, skb->len - tcphoff, 0);
skb               322 net/netfilter/ipvs/ip_vs_proto_tcp.c 			if (csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
skb               323 net/netfilter/ipvs/ip_vs_proto_tcp.c 					    &ipv6_hdr(skb)->daddr,
skb               324 net/netfilter/ipvs/ip_vs_proto_tcp.c 					    skb->len - tcphoff,
skb               325 net/netfilter/ipvs/ip_vs_proto_tcp.c 					    ipv6_hdr(skb)->nexthdr,
skb               326 net/netfilter/ipvs/ip_vs_proto_tcp.c 					    skb->csum)) {
skb               327 net/netfilter/ipvs/ip_vs_proto_tcp.c 				IP_VS_DBG_RL_PKT(0, af, pp, skb, 0,
skb               333 net/netfilter/ipvs/ip_vs_proto_tcp.c 			if (csum_tcpudp_magic(ip_hdr(skb)->saddr,
skb               334 net/netfilter/ipvs/ip_vs_proto_tcp.c 					      ip_hdr(skb)->daddr,
skb               335 net/netfilter/ipvs/ip_vs_proto_tcp.c 					      skb->len - tcphoff,
skb               336 net/netfilter/ipvs/ip_vs_proto_tcp.c 					      ip_hdr(skb)->protocol,
skb               337 net/netfilter/ipvs/ip_vs_proto_tcp.c 					      skb->csum)) {
skb               338 net/netfilter/ipvs/ip_vs_proto_tcp.c 				IP_VS_DBG_RL_PKT(0, af, pp, skb, 0,
skb               587 net/netfilter/ipvs/ip_vs_proto_tcp.c 		     const struct sk_buff *skb,
skb               593 net/netfilter/ipvs/ip_vs_proto_tcp.c 	int ihl = cp->af == AF_INET ? ip_hdrlen(skb) : sizeof(struct ipv6hdr);
skb               595 net/netfilter/ipvs/ip_vs_proto_tcp.c 	int ihl = ip_hdrlen(skb);
skb               598 net/netfilter/ipvs/ip_vs_proto_tcp.c 	th = skb_header_pointer(skb, ihl, sizeof(_tcph), &_tcph);
skb                28 net/netfilter/ipvs/ip_vs_proto_udp.c udp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp);
skb                31 net/netfilter/ipvs/ip_vs_proto_udp.c udp_conn_schedule(struct netns_ipvs *ipvs, int af, struct sk_buff *skb,
skb                42 net/netfilter/ipvs/ip_vs_proto_udp.c 		uh = skb_header_pointer(skb, iph->len, sizeof(_udph), &_udph);
skb                47 net/netfilter/ipvs/ip_vs_proto_udp.c 			skb, iph->len, sizeof(_ports), &_ports);
skb                56 net/netfilter/ipvs/ip_vs_proto_udp.c 		svc = ip_vs_service_find(ipvs, af, skb->mark, iph->protocol,
skb                59 net/netfilter/ipvs/ip_vs_proto_udp.c 		svc = ip_vs_service_find(ipvs, af, skb->mark, iph->protocol,
skb                78 net/netfilter/ipvs/ip_vs_proto_udp.c 		*cpp = ip_vs_schedule(svc, skb, pd, &ignored, iph);
skb                81 net/netfilter/ipvs/ip_vs_proto_udp.c 				*verdict = ip_vs_leave(svc, skb, pd, iph);
skb               136 net/netfilter/ipvs/ip_vs_proto_udp.c udp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
skb               148 net/netfilter/ipvs/ip_vs_proto_udp.c 	oldlen = skb->len - udphoff;
skb               151 net/netfilter/ipvs/ip_vs_proto_udp.c 	if (skb_ensure_writable(skb, udphoff + sizeof(*udph)))
skb               158 net/netfilter/ipvs/ip_vs_proto_udp.c 		if (!udp_csum_check(cp->af, skb, pp))
skb               164 net/netfilter/ipvs/ip_vs_proto_udp.c 		if (!(ret = ip_vs_app_pkt_out(cp, skb, iph)))
skb               168 net/netfilter/ipvs/ip_vs_proto_udp.c 			oldlen = skb->len - udphoff;
skb               173 net/netfilter/ipvs/ip_vs_proto_udp.c 	udph = (void *)skb_network_header(skb) + udphoff;
skb               179 net/netfilter/ipvs/ip_vs_proto_udp.c 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb               182 net/netfilter/ipvs/ip_vs_proto_udp.c 					htons(skb->len - udphoff));
skb               187 net/netfilter/ipvs/ip_vs_proto_udp.c 		if (skb->ip_summed == CHECKSUM_COMPLETE)
skb               188 net/netfilter/ipvs/ip_vs_proto_udp.c 			skb->ip_summed = cp->app ?
skb               193 net/netfilter/ipvs/ip_vs_proto_udp.c 		skb->csum = skb_checksum(skb, udphoff, skb->len - udphoff, 0);
skb               198 net/netfilter/ipvs/ip_vs_proto_udp.c 						      skb->len - udphoff,
skb               199 net/netfilter/ipvs/ip_vs_proto_udp.c 						      cp->protocol, skb->csum);
skb               204 net/netfilter/ipvs/ip_vs_proto_udp.c 							skb->len - udphoff,
skb               206 net/netfilter/ipvs/ip_vs_proto_udp.c 							skb->csum);
skb               209 net/netfilter/ipvs/ip_vs_proto_udp.c 		skb->ip_summed = CHECKSUM_UNNECESSARY;
skb               219 net/netfilter/ipvs/ip_vs_proto_udp.c udp_dnat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
skb               231 net/netfilter/ipvs/ip_vs_proto_udp.c 	oldlen = skb->len - udphoff;
skb               234 net/netfilter/ipvs/ip_vs_proto_udp.c 	if (skb_ensure_writable(skb, udphoff + sizeof(*udph)))
skb               241 net/netfilter/ipvs/ip_vs_proto_udp.c 		if (!udp_csum_check(cp->af, skb, pp))
skb               248 net/netfilter/ipvs/ip_vs_proto_udp.c 		if (!(ret = ip_vs_app_pkt_in(cp, skb, iph)))
skb               252 net/netfilter/ipvs/ip_vs_proto_udp.c 			oldlen = skb->len - udphoff;
skb               257 net/netfilter/ipvs/ip_vs_proto_udp.c 	udph = (void *)skb_network_header(skb) + udphoff;
skb               263 net/netfilter/ipvs/ip_vs_proto_udp.c 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb               266 net/netfilter/ipvs/ip_vs_proto_udp.c 					htons(skb->len - udphoff));
skb               271 net/netfilter/ipvs/ip_vs_proto_udp.c 		if (skb->ip_summed == CHECKSUM_COMPLETE)
skb               272 net/netfilter/ipvs/ip_vs_proto_udp.c 			skb->ip_summed = cp->app ?
skb               277 net/netfilter/ipvs/ip_vs_proto_udp.c 		skb->csum = skb_checksum(skb, udphoff, skb->len - udphoff, 0);
skb               282 net/netfilter/ipvs/ip_vs_proto_udp.c 						      skb->len - udphoff,
skb               283 net/netfilter/ipvs/ip_vs_proto_udp.c 						      cp->protocol, skb->csum);
skb               288 net/netfilter/ipvs/ip_vs_proto_udp.c 							skb->len - udphoff,
skb               290 net/netfilter/ipvs/ip_vs_proto_udp.c 							skb->csum);
skb               293 net/netfilter/ipvs/ip_vs_proto_udp.c 		skb->ip_summed = CHECKSUM_UNNECESSARY;
skb               300 net/netfilter/ipvs/ip_vs_proto_udp.c udp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp)
skb               310 net/netfilter/ipvs/ip_vs_proto_udp.c 		udphoff = ip_hdrlen(skb);
skb               312 net/netfilter/ipvs/ip_vs_proto_udp.c 	uh = skb_header_pointer(skb, udphoff, sizeof(_udph), &_udph);
skb               317 net/netfilter/ipvs/ip_vs_proto_udp.c 		switch (skb->ip_summed) {
skb               319 net/netfilter/ipvs/ip_vs_proto_udp.c 			skb->csum = skb_checksum(skb, udphoff,
skb               320 net/netfilter/ipvs/ip_vs_proto_udp.c 						 skb->len - udphoff, 0);
skb               325 net/netfilter/ipvs/ip_vs_proto_udp.c 				if (csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
skb               326 net/netfilter/ipvs/ip_vs_proto_udp.c 						    &ipv6_hdr(skb)->daddr,
skb               327 net/netfilter/ipvs/ip_vs_proto_udp.c 						    skb->len - udphoff,
skb               328 net/netfilter/ipvs/ip_vs_proto_udp.c 						    ipv6_hdr(skb)->nexthdr,
skb               329 net/netfilter/ipvs/ip_vs_proto_udp.c 						    skb->csum)) {
skb               330 net/netfilter/ipvs/ip_vs_proto_udp.c 					IP_VS_DBG_RL_PKT(0, af, pp, skb, 0,
skb               336 net/netfilter/ipvs/ip_vs_proto_udp.c 				if (csum_tcpudp_magic(ip_hdr(skb)->saddr,
skb               337 net/netfilter/ipvs/ip_vs_proto_udp.c 						      ip_hdr(skb)->daddr,
skb               338 net/netfilter/ipvs/ip_vs_proto_udp.c 						      skb->len - udphoff,
skb               339 net/netfilter/ipvs/ip_vs_proto_udp.c 						      ip_hdr(skb)->protocol,
skb               340 net/netfilter/ipvs/ip_vs_proto_udp.c 						      skb->csum)) {
skb               341 net/netfilter/ipvs/ip_vs_proto_udp.c 					IP_VS_DBG_RL_PKT(0, af, pp, skb, 0,
skb               453 net/netfilter/ipvs/ip_vs_proto_udp.c 		     const struct sk_buff *skb,
skb                53 net/netfilter/ipvs/ip_vs_rr.c ip_vs_rr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
skb                57 net/netfilter/ipvs/ip_vs_sed.c ip_vs_sed_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
skb               277 net/netfilter/ipvs/ip_vs_sh.c ip_vs_sh_get_port(const struct sk_buff *skb, struct ip_vs_iphdr *iph)
skb               290 net/netfilter/ipvs/ip_vs_sh.c 		ports = skb_header_pointer(skb, iph->len, sizeof(_ports),
skb               309 net/netfilter/ipvs/ip_vs_sh.c ip_vs_sh_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
skb               322 net/netfilter/ipvs/ip_vs_sh.c 		port = ip_vs_sh_get_port(skb, iph);
skb                29 net/netfilter/ipvs/ip_vs_wlc.c ip_vs_wlc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
skb               160 net/netfilter/ipvs/ip_vs_wrr.c ip_vs_wrr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
skb               107 net/netfilter/ipvs/ip_vs_xmit.c __mtu_check_toobig_v6(const struct sk_buff *skb, u32 mtu)
skb               109 net/netfilter/ipvs/ip_vs_xmit.c 	if (IP6CB(skb)->frag_max_size) {
skb               113 net/netfilter/ipvs/ip_vs_xmit.c 		if (IP6CB(skb)->frag_max_size > mtu)
skb               116 net/netfilter/ipvs/ip_vs_xmit.c 	else if (skb->len > mtu && !skb_is_gso(skb)) {
skb               165 net/netfilter/ipvs/ip_vs_xmit.c static inline bool crosses_local_route_boundary(int skb_af, struct sk_buff *skb,
skb               177 net/netfilter/ipvs/ip_vs_xmit.c 		int addr_type = ipv6_addr_type(&ipv6_hdr(skb)->saddr);
skb               180 net/netfilter/ipvs/ip_vs_xmit.c 			(!skb->dev || skb->dev->flags & IFF_LOOPBACK) &&
skb               183 net/netfilter/ipvs/ip_vs_xmit.c 			(struct rt6_info *)skb_dst(skb));
skb               187 net/netfilter/ipvs/ip_vs_xmit.c 		source_is_loopback = ipv4_is_loopback(ip_hdr(skb)->saddr);
skb               188 net/netfilter/ipvs/ip_vs_xmit.c 		old_rt_is_local = skb_rtable(skb)->rt_flags & RTCF_LOCAL;
skb               205 net/netfilter/ipvs/ip_vs_xmit.c static inline void maybe_update_pmtu(int skb_af, struct sk_buff *skb, int mtu)
skb               207 net/netfilter/ipvs/ip_vs_xmit.c 	struct sock *sk = skb->sk;
skb               208 net/netfilter/ipvs/ip_vs_xmit.c 	struct rtable *ort = skb_rtable(skb);
skb               210 net/netfilter/ipvs/ip_vs_xmit.c 	if (!skb->dev && sk && sk_fullsock(sk))
skb               217 net/netfilter/ipvs/ip_vs_xmit.c 					  struct sk_buff *skb, int mtu)
skb               223 net/netfilter/ipvs/ip_vs_xmit.c 		if (unlikely(__mtu_check_toobig_v6(skb, mtu))) {
skb               224 net/netfilter/ipvs/ip_vs_xmit.c 			if (!skb->dev)
skb               225 net/netfilter/ipvs/ip_vs_xmit.c 				skb->dev = net->loopback_dev;
skb               228 net/netfilter/ipvs/ip_vs_xmit.c 				icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
skb               230 net/netfilter/ipvs/ip_vs_xmit.c 				  &ipv6_hdr(skb)->saddr);
skb               242 net/netfilter/ipvs/ip_vs_xmit.c 		if (unlikely(ip_hdr(skb)->frag_off & htons(IP_DF) &&
skb               243 net/netfilter/ipvs/ip_vs_xmit.c 			     skb->len > mtu && !skb_is_gso(skb) &&
skb               245 net/netfilter/ipvs/ip_vs_xmit.c 			icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
skb               248 net/netfilter/ipvs/ip_vs_xmit.c 				  &ip_hdr(skb)->saddr);
skb               258 net/netfilter/ipvs/ip_vs_xmit.c 				 struct sk_buff *skb)
skb               264 net/netfilter/ipvs/ip_vs_xmit.c 		struct dst_entry *dst = skb_dst(skb);
skb               267 net/netfilter/ipvs/ip_vs_xmit.c 		if (ipv6_hdr(skb)->hop_limit <= 1) {
skb               268 net/netfilter/ipvs/ip_vs_xmit.c 			struct inet6_dev *idev = __in6_dev_get_safely(skb->dev);
skb               271 net/netfilter/ipvs/ip_vs_xmit.c 			skb->dev = dst->dev;
skb               272 net/netfilter/ipvs/ip_vs_xmit.c 			icmpv6_send(skb, ICMPV6_TIME_EXCEED,
skb               280 net/netfilter/ipvs/ip_vs_xmit.c 		if (skb_ensure_writable(skb, sizeof(struct ipv6hdr)))
skb               283 net/netfilter/ipvs/ip_vs_xmit.c 		ipv6_hdr(skb)->hop_limit--;
skb               287 net/netfilter/ipvs/ip_vs_xmit.c 		if (ip_hdr(skb)->ttl <= 1) {
skb               290 net/netfilter/ipvs/ip_vs_xmit.c 			icmp_send(skb, ICMP_TIME_EXCEEDED, ICMP_EXC_TTL, 0);
skb               295 net/netfilter/ipvs/ip_vs_xmit.c 		if (skb_ensure_writable(skb, sizeof(struct iphdr)))
skb               299 net/netfilter/ipvs/ip_vs_xmit.c 		ip_decrease_ttl(ip_hdr(skb));
skb               307 net/netfilter/ipvs/ip_vs_xmit.c __ip_vs_get_out_rt(struct netns_ipvs *ipvs, int skb_af, struct sk_buff *skb,
skb               363 net/netfilter/ipvs/ip_vs_xmit.c 	if (unlikely(crosses_local_route_boundary(skb_af, skb, rt_mode,
skb               377 net/netfilter/ipvs/ip_vs_xmit.c 	if (!decrement_ttl(ipvs, skb_af, skb))
skb               390 net/netfilter/ipvs/ip_vs_xmit.c 			    skb->ip_summed == CHECKSUM_PARTIAL)
skb               403 net/netfilter/ipvs/ip_vs_xmit.c 		maybe_update_pmtu(skb_af, skb, mtu);
skb               406 net/netfilter/ipvs/ip_vs_xmit.c 	if (!ensure_mtu_is_adequate(ipvs, skb_af, rt_mode, ipvsh, skb, mtu))
skb               409 net/netfilter/ipvs/ip_vs_xmit.c 	skb_dst_drop(skb);
skb               412 net/netfilter/ipvs/ip_vs_xmit.c 			skb_dst_set_noref(skb, &rt->dst);
skb               414 net/netfilter/ipvs/ip_vs_xmit.c 			skb_dst_set(skb, dst_clone(&rt->dst));
skb               416 net/netfilter/ipvs/ip_vs_xmit.c 		skb_dst_set(skb, &rt->dst);
skb               426 net/netfilter/ipvs/ip_vs_xmit.c 	dst_link_failure(skb);
skb               472 net/netfilter/ipvs/ip_vs_xmit.c __ip_vs_get_out_rt_v6(struct netns_ipvs *ipvs, int skb_af, struct sk_buff *skb,
skb               528 net/netfilter/ipvs/ip_vs_xmit.c 	if (unlikely(crosses_local_route_boundary(skb_af, skb, rt_mode,
skb               542 net/netfilter/ipvs/ip_vs_xmit.c 	if (!decrement_ttl(ipvs, skb_af, skb))
skb               556 net/netfilter/ipvs/ip_vs_xmit.c 			    skb->ip_summed == CHECKSUM_PARTIAL)
skb               570 net/netfilter/ipvs/ip_vs_xmit.c 		maybe_update_pmtu(skb_af, skb, mtu);
skb               573 net/netfilter/ipvs/ip_vs_xmit.c 	if (!ensure_mtu_is_adequate(ipvs, skb_af, rt_mode, ipvsh, skb, mtu))
skb               576 net/netfilter/ipvs/ip_vs_xmit.c 	skb_dst_drop(skb);
skb               579 net/netfilter/ipvs/ip_vs_xmit.c 			skb_dst_set_noref(skb, &rt->dst);
skb               581 net/netfilter/ipvs/ip_vs_xmit.c 			skb_dst_set(skb, dst_clone(&rt->dst));
skb               583 net/netfilter/ipvs/ip_vs_xmit.c 		skb_dst_set(skb, &rt->dst);
skb               597 net/netfilter/ipvs/ip_vs_xmit.c 	if (!skb->dev)
skb               598 net/netfilter/ipvs/ip_vs_xmit.c 		skb->dev = skb_dst(skb)->dev;
skb               600 net/netfilter/ipvs/ip_vs_xmit.c 	dst_link_failure(skb);
skb               607 net/netfilter/ipvs/ip_vs_xmit.c static inline int ip_vs_tunnel_xmit_prepare(struct sk_buff *skb,
skb               612 net/netfilter/ipvs/ip_vs_xmit.c 	skb->ipvs_property = 1;
skb               614 net/netfilter/ipvs/ip_vs_xmit.c 		ret = ip_vs_confirm_conntrack(skb);
skb               616 net/netfilter/ipvs/ip_vs_xmit.c 		nf_reset_ct(skb);
skb               617 net/netfilter/ipvs/ip_vs_xmit.c 		skb_forward_csum(skb);
skb               628 net/netfilter/ipvs/ip_vs_xmit.c static inline void ip_vs_drop_early_demux_sk(struct sk_buff *skb)
skb               633 net/netfilter/ipvs/ip_vs_xmit.c 	if (skb->dev)
skb               634 net/netfilter/ipvs/ip_vs_xmit.c 		skb_orphan(skb);
skb               638 net/netfilter/ipvs/ip_vs_xmit.c static inline int ip_vs_nat_send_or_cont(int pf, struct sk_buff *skb,
skb               643 net/netfilter/ipvs/ip_vs_xmit.c 	skb->ipvs_property = 1;
skb               645 net/netfilter/ipvs/ip_vs_xmit.c 		ip_vs_notrack(skb);
skb               647 net/netfilter/ipvs/ip_vs_xmit.c 		ip_vs_update_conntrack(skb, cp, 1);
skb               654 net/netfilter/ipvs/ip_vs_xmit.c 		ip_vs_drop_early_demux_sk(skb);
skb               657 net/netfilter/ipvs/ip_vs_xmit.c 		skb_forward_csum(skb);
skb               658 net/netfilter/ipvs/ip_vs_xmit.c 		NF_HOOK(pf, NF_INET_LOCAL_OUT, cp->ipvs->net, NULL, skb,
skb               659 net/netfilter/ipvs/ip_vs_xmit.c 			NULL, skb_dst(skb)->dev, dst_output);
skb               667 net/netfilter/ipvs/ip_vs_xmit.c static inline int ip_vs_send_or_cont(int pf, struct sk_buff *skb,
skb               672 net/netfilter/ipvs/ip_vs_xmit.c 	skb->ipvs_property = 1;
skb               674 net/netfilter/ipvs/ip_vs_xmit.c 		ip_vs_notrack(skb);
skb               676 net/netfilter/ipvs/ip_vs_xmit.c 		ip_vs_drop_early_demux_sk(skb);
skb               677 net/netfilter/ipvs/ip_vs_xmit.c 		skb_forward_csum(skb);
skb               678 net/netfilter/ipvs/ip_vs_xmit.c 		NF_HOOK(pf, NF_INET_LOCAL_OUT, cp->ipvs->net, NULL, skb,
skb               679 net/netfilter/ipvs/ip_vs_xmit.c 			NULL, skb_dst(skb)->dev, dst_output);
skb               690 net/netfilter/ipvs/ip_vs_xmit.c ip_vs_null_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
skb               694 net/netfilter/ipvs/ip_vs_xmit.c 	return ip_vs_send_or_cont(NFPROTO_IPV4, skb, cp, 1);
skb               704 net/netfilter/ipvs/ip_vs_xmit.c ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
skb               707 net/netfilter/ipvs/ip_vs_xmit.c 	struct iphdr  *iph = ip_hdr(skb);
skb               711 net/netfilter/ipvs/ip_vs_xmit.c 	if (__ip_vs_get_out_rt(cp->ipvs, cp->af, skb, NULL, iph->daddr,
skb               718 net/netfilter/ipvs/ip_vs_xmit.c 	skb->ignore_df = 1;
skb               720 net/netfilter/ipvs/ip_vs_xmit.c 	ip_vs_send_or_cont(NFPROTO_IPV4, skb, cp, 0);
skb               726 net/netfilter/ipvs/ip_vs_xmit.c 	kfree_skb(skb);
skb               733 net/netfilter/ipvs/ip_vs_xmit.c ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
skb               736 net/netfilter/ipvs/ip_vs_xmit.c 	struct ipv6hdr *iph = ipv6_hdr(skb);
skb               740 net/netfilter/ipvs/ip_vs_xmit.c 	if (__ip_vs_get_out_rt_v6(cp->ipvs, cp->af, skb, NULL,
skb               746 net/netfilter/ipvs/ip_vs_xmit.c 	skb->ignore_df = 1;
skb               748 net/netfilter/ipvs/ip_vs_xmit.c 	ip_vs_send_or_cont(NFPROTO_IPV6, skb, cp, 0);
skb               754 net/netfilter/ipvs/ip_vs_xmit.c 	kfree_skb(skb);
skb               765 net/netfilter/ipvs/ip_vs_xmit.c ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
skb               777 net/netfilter/ipvs/ip_vs_xmit.c 		p = skb_header_pointer(skb, ipvsh->len, sizeof(_pt), &_pt);
skb               784 net/netfilter/ipvs/ip_vs_xmit.c 	was_input = rt_is_input_route(skb_rtable(skb));
skb               785 net/netfilter/ipvs/ip_vs_xmit.c 	local = __ip_vs_get_out_rt(cp->ipvs, cp->af, skb, cp->dest, cp->daddr.ip,
skb               791 net/netfilter/ipvs/ip_vs_xmit.c 	rt = skb_rtable(skb);
skb               799 net/netfilter/ipvs/ip_vs_xmit.c 		struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
skb               802 net/netfilter/ipvs/ip_vs_xmit.c 			IP_VS_DBG_RL_PKT(10, AF_INET, pp, skb, ipvsh->off,
skb               812 net/netfilter/ipvs/ip_vs_xmit.c 		IP_VS_DBG_RL_PKT(1, AF_INET, pp, skb, ipvsh->off,
skb               819 net/netfilter/ipvs/ip_vs_xmit.c 	if (skb_ensure_writable(skb, sizeof(struct iphdr)))
skb               822 net/netfilter/ipvs/ip_vs_xmit.c 	if (skb_cow(skb, rt->dst.dev->hard_header_len))
skb               826 net/netfilter/ipvs/ip_vs_xmit.c 	if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp, ipvsh))
skb               828 net/netfilter/ipvs/ip_vs_xmit.c 	ip_hdr(skb)->daddr = cp->daddr.ip;
skb               829 net/netfilter/ipvs/ip_vs_xmit.c 	ip_send_check(ip_hdr(skb));
skb               831 net/netfilter/ipvs/ip_vs_xmit.c 	IP_VS_DBG_PKT(10, AF_INET, pp, skb, ipvsh->off, "After DNAT");
skb               838 net/netfilter/ipvs/ip_vs_xmit.c 	skb->ignore_df = 1;
skb               840 net/netfilter/ipvs/ip_vs_xmit.c 	rc = ip_vs_nat_send_or_cont(NFPROTO_IPV4, skb, cp, local);
skb               846 net/netfilter/ipvs/ip_vs_xmit.c 	kfree_skb(skb);
skb               853 net/netfilter/ipvs/ip_vs_xmit.c ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
skb               864 net/netfilter/ipvs/ip_vs_xmit.c 		p = skb_header_pointer(skb, ipvsh->len, sizeof(_pt), &_pt);
skb               871 net/netfilter/ipvs/ip_vs_xmit.c 	local = __ip_vs_get_out_rt_v6(cp->ipvs, cp->af, skb, cp->dest,
skb               879 net/netfilter/ipvs/ip_vs_xmit.c 	rt = (struct rt6_info *) skb_dst(skb);
skb               887 net/netfilter/ipvs/ip_vs_xmit.c 		struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
skb               890 net/netfilter/ipvs/ip_vs_xmit.c 			IP_VS_DBG_RL_PKT(10, AF_INET6, pp, skb, ipvsh->off,
skb               899 net/netfilter/ipvs/ip_vs_xmit.c 	if (local && skb->dev && !(skb->dev->flags & IFF_LOOPBACK) &&
skb               901 net/netfilter/ipvs/ip_vs_xmit.c 		IP_VS_DBG_RL_PKT(1, AF_INET6, pp, skb, ipvsh->off,
skb               908 net/netfilter/ipvs/ip_vs_xmit.c 	if (skb_ensure_writable(skb, sizeof(struct ipv6hdr)))
skb               911 net/netfilter/ipvs/ip_vs_xmit.c 	if (skb_cow(skb, rt->dst.dev->hard_header_len))
skb               915 net/netfilter/ipvs/ip_vs_xmit.c 	if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp, ipvsh))
skb               917 net/netfilter/ipvs/ip_vs_xmit.c 	ipv6_hdr(skb)->daddr = cp->daddr.in6;
skb               919 net/netfilter/ipvs/ip_vs_xmit.c 	IP_VS_DBG_PKT(10, AF_INET6, pp, skb, ipvsh->off, "After DNAT");
skb               926 net/netfilter/ipvs/ip_vs_xmit.c 	skb->ignore_df = 1;
skb               928 net/netfilter/ipvs/ip_vs_xmit.c 	rc = ip_vs_nat_send_or_cont(NFPROTO_IPV6, skb, cp, local);
skb               935 net/netfilter/ipvs/ip_vs_xmit.c 	kfree_skb(skb);
skb               947 net/netfilter/ipvs/ip_vs_xmit.c ip_vs_prepare_tunneled_skb(struct sk_buff *skb, int skb_af,
skb               959 net/netfilter/ipvs/ip_vs_xmit.c 	ip_vs_drop_early_demux_sk(skb);
skb               961 net/netfilter/ipvs/ip_vs_xmit.c 	if (skb_headroom(skb) < max_headroom || skb_cloned(skb)) {
skb               962 net/netfilter/ipvs/ip_vs_xmit.c 		new_skb = skb_realloc_headroom(skb, max_headroom);
skb               965 net/netfilter/ipvs/ip_vs_xmit.c 		if (skb->sk)
skb               966 net/netfilter/ipvs/ip_vs_xmit.c 			skb_set_owner_w(new_skb, skb->sk);
skb               967 net/netfilter/ipvs/ip_vs_xmit.c 		consume_skb(skb);
skb               968 net/netfilter/ipvs/ip_vs_xmit.c 		skb = new_skb;
skb               973 net/netfilter/ipvs/ip_vs_xmit.c 		old_ipv6h = ipv6_hdr(skb);
skb               986 net/netfilter/ipvs/ip_vs_xmit.c 		old_iph = ip_hdr(skb);
skb              1003 net/netfilter/ipvs/ip_vs_xmit.c 	return skb;
skb              1005 net/netfilter/ipvs/ip_vs_xmit.c 	kfree_skb(skb);
skb              1022 net/netfilter/ipvs/ip_vs_xmit.c ipvs_gue_encap(struct net *net, struct sk_buff *skb,
skb              1026 net/netfilter/ipvs/ip_vs_xmit.c 	__be16 sport = udp_flow_src_port(net, skb, 0, 0, false);
skb              1034 net/netfilter/ipvs/ip_vs_xmit.c 	    skb->ip_summed == CHECKSUM_PARTIAL) {
skb              1041 net/netfilter/ipvs/ip_vs_xmit.c 	skb_push(skb, hdrlen);
skb              1043 net/netfilter/ipvs/ip_vs_xmit.c 	gueh = (struct guehdr *)skb->data;
skb              1055 net/netfilter/ipvs/ip_vs_xmit.c 		u16 csum_start = skb_checksum_start_offset(skb);
skb              1068 net/netfilter/ipvs/ip_vs_xmit.c 		pd[1] = htons(csum_start + skb->csum_offset);
skb              1070 net/netfilter/ipvs/ip_vs_xmit.c 		if (!skb_is_gso(skb)) {
skb              1071 net/netfilter/ipvs/ip_vs_xmit.c 			skb->ip_summed = CHECKSUM_NONE;
skb              1072 net/netfilter/ipvs/ip_vs_xmit.c 			skb->encapsulation = 0;
skb              1079 net/netfilter/ipvs/ip_vs_xmit.c 	skb_push(skb, sizeof(struct udphdr));
skb              1080 net/netfilter/ipvs/ip_vs_xmit.c 	skb_reset_transport_header(skb);
skb              1082 net/netfilter/ipvs/ip_vs_xmit.c 	udph = udp_hdr(skb);
skb              1087 net/netfilter/ipvs/ip_vs_xmit.c 	udph->len = htons(skb->len);
skb              1096 net/netfilter/ipvs/ip_vs_xmit.c ipvs_gre_encap(struct net *net, struct sk_buff *skb,
skb              1108 net/netfilter/ipvs/ip_vs_xmit.c 	gre_build_header(skb, hdrlen, tflags, proto, 0, 0);
skb              1133 net/netfilter/ipvs/ip_vs_xmit.c ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
skb              1154 net/netfilter/ipvs/ip_vs_xmit.c 	local = __ip_vs_get_out_rt(ipvs, cp->af, skb, cp->dest, cp->daddr.ip,
skb              1162 net/netfilter/ipvs/ip_vs_xmit.c 		return ip_vs_send_or_cont(NFPROTO_IPV4, skb, cp, 1);
skb              1164 net/netfilter/ipvs/ip_vs_xmit.c 	rt = skb_rtable(skb);
skb              1179 net/netfilter/ipvs/ip_vs_xmit.c 		    skb->ip_summed == CHECKSUM_PARTIAL) {
skb              1198 net/netfilter/ipvs/ip_vs_xmit.c 	skb = ip_vs_prepare_tunneled_skb(skb, cp->af, max_headroom,
skb              1201 net/netfilter/ipvs/ip_vs_xmit.c 	if (IS_ERR(skb))
skb              1212 net/netfilter/ipvs/ip_vs_xmit.c 		    skb->ip_summed == CHECKSUM_PARTIAL) {
skb              1222 net/netfilter/ipvs/ip_vs_xmit.c 	if (iptunnel_handle_offloads(skb, gso_type))
skb              1225 net/netfilter/ipvs/ip_vs_xmit.c 	skb->transport_header = skb->network_header;
skb              1227 net/netfilter/ipvs/ip_vs_xmit.c 	skb_set_inner_ipproto(skb, next_protocol);
skb              1232 net/netfilter/ipvs/ip_vs_xmit.c 		if (ipvs_gue_encap(net, skb, cp, &next_protocol))
skb              1239 net/netfilter/ipvs/ip_vs_xmit.c 		udp_set_csum(!check, skb, saddr, cp->daddr.ip, skb->len);
skb              1241 net/netfilter/ipvs/ip_vs_xmit.c 		ipvs_gre_encap(net, skb, cp, &next_protocol);
skb              1243 net/netfilter/ipvs/ip_vs_xmit.c 	skb_push(skb, sizeof(struct iphdr));
skb              1244 net/netfilter/ipvs/ip_vs_xmit.c 	skb_reset_network_header(skb);
skb              1245 net/netfilter/ipvs/ip_vs_xmit.c 	memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
skb              1250 net/netfilter/ipvs/ip_vs_xmit.c 	iph			=	ip_hdr(skb);
skb              1259 net/netfilter/ipvs/ip_vs_xmit.c 	ip_select_ident(net, skb, NULL);
skb              1262 net/netfilter/ipvs/ip_vs_xmit.c 	skb->ignore_df = 1;
skb              1264 net/netfilter/ipvs/ip_vs_xmit.c 	ret = ip_vs_tunnel_xmit_prepare(skb, cp);
skb              1266 net/netfilter/ipvs/ip_vs_xmit.c 		ip_local_out(net, skb->sk, skb);
skb              1268 net/netfilter/ipvs/ip_vs_xmit.c 		kfree_skb(skb);
skb              1275 net/netfilter/ipvs/ip_vs_xmit.c 	if (!IS_ERR(skb))
skb              1276 net/netfilter/ipvs/ip_vs_xmit.c 		kfree_skb(skb);
skb              1283 net/netfilter/ipvs/ip_vs_xmit.c ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
skb              1303 net/netfilter/ipvs/ip_vs_xmit.c 	local = __ip_vs_get_out_rt_v6(ipvs, cp->af, skb, cp->dest,
skb              1312 net/netfilter/ipvs/ip_vs_xmit.c 		return ip_vs_send_or_cont(NFPROTO_IPV6, skb, cp, 1);
skb              1314 net/netfilter/ipvs/ip_vs_xmit.c 	rt = (struct rt6_info *) skb_dst(skb);
skb              1329 net/netfilter/ipvs/ip_vs_xmit.c 		    skb->ip_summed == CHECKSUM_PARTIAL) {
skb              1346 net/netfilter/ipvs/ip_vs_xmit.c 	skb = ip_vs_prepare_tunneled_skb(skb, cp->af, max_headroom,
skb              1349 net/netfilter/ipvs/ip_vs_xmit.c 	if (IS_ERR(skb))
skb              1360 net/netfilter/ipvs/ip_vs_xmit.c 		    skb->ip_summed == CHECKSUM_PARTIAL) {
skb              1370 net/netfilter/ipvs/ip_vs_xmit.c 	if (iptunnel_handle_offloads(skb, gso_type))
skb              1373 net/netfilter/ipvs/ip_vs_xmit.c 	skb->transport_header = skb->network_header;
skb              1375 net/netfilter/ipvs/ip_vs_xmit.c 	skb_set_inner_ipproto(skb, next_protocol);
skb              1380 net/netfilter/ipvs/ip_vs_xmit.c 		if (ipvs_gue_encap(net, skb, cp, &next_protocol))
skb              1387 net/netfilter/ipvs/ip_vs_xmit.c 		udp6_set_csum(!check, skb, &saddr, &cp->daddr.in6, skb->len);
skb              1389 net/netfilter/ipvs/ip_vs_xmit.c 		ipvs_gre_encap(net, skb, cp, &next_protocol);
skb              1391 net/netfilter/ipvs/ip_vs_xmit.c 	skb_push(skb, sizeof(struct ipv6hdr));
skb              1392 net/netfilter/ipvs/ip_vs_xmit.c 	skb_reset_network_header(skb);
skb              1393 net/netfilter/ipvs/ip_vs_xmit.c 	memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
skb              1398 net/netfilter/ipvs/ip_vs_xmit.c 	iph			=	ipv6_hdr(skb);
skb              1409 net/netfilter/ipvs/ip_vs_xmit.c 	skb->ignore_df = 1;
skb              1411 net/netfilter/ipvs/ip_vs_xmit.c 	ret = ip_vs_tunnel_xmit_prepare(skb, cp);
skb              1413 net/netfilter/ipvs/ip_vs_xmit.c 		ip6_local_out(net, skb->sk, skb);
skb              1415 net/netfilter/ipvs/ip_vs_xmit.c 		kfree_skb(skb);
skb              1422 net/netfilter/ipvs/ip_vs_xmit.c 	if (!IS_ERR(skb))
skb              1423 net/netfilter/ipvs/ip_vs_xmit.c 		kfree_skb(skb);
skb              1435 net/netfilter/ipvs/ip_vs_xmit.c ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
skb              1442 net/netfilter/ipvs/ip_vs_xmit.c 	local = __ip_vs_get_out_rt(cp->ipvs, cp->af, skb, cp->dest, cp->daddr.ip,
skb              1449 net/netfilter/ipvs/ip_vs_xmit.c 		return ip_vs_send_or_cont(NFPROTO_IPV4, skb, cp, 1);
skb              1451 net/netfilter/ipvs/ip_vs_xmit.c 	ip_send_check(ip_hdr(skb));
skb              1454 net/netfilter/ipvs/ip_vs_xmit.c 	skb->ignore_df = 1;
skb              1456 net/netfilter/ipvs/ip_vs_xmit.c 	ip_vs_send_or_cont(NFPROTO_IPV4, skb, cp, 0);
skb              1462 net/netfilter/ipvs/ip_vs_xmit.c 	kfree_skb(skb);
skb              1469 net/netfilter/ipvs/ip_vs_xmit.c ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
skb              1476 net/netfilter/ipvs/ip_vs_xmit.c 	local = __ip_vs_get_out_rt_v6(cp->ipvs, cp->af, skb, cp->dest,
skb              1485 net/netfilter/ipvs/ip_vs_xmit.c 		return ip_vs_send_or_cont(NFPROTO_IPV6, skb, cp, 1);
skb              1488 net/netfilter/ipvs/ip_vs_xmit.c 	skb->ignore_df = 1;
skb              1490 net/netfilter/ipvs/ip_vs_xmit.c 	ip_vs_send_or_cont(NFPROTO_IPV6, skb, cp, 0);
skb              1496 net/netfilter/ipvs/ip_vs_xmit.c 	kfree_skb(skb);
skb              1508 net/netfilter/ipvs/ip_vs_xmit.c ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
skb              1524 net/netfilter/ipvs/ip_vs_xmit.c 			rc = cp->packet_xmit(skb, cp, pp, iph);
skb              1535 net/netfilter/ipvs/ip_vs_xmit.c 	was_input = rt_is_input_route(skb_rtable(skb));
skb              1541 net/netfilter/ipvs/ip_vs_xmit.c 	local = __ip_vs_get_out_rt(cp->ipvs, cp->af, skb, cp->dest, cp->daddr.ip, rt_mode,
skb              1545 net/netfilter/ipvs/ip_vs_xmit.c 	rt = skb_rtable(skb);
skb              1554 net/netfilter/ipvs/ip_vs_xmit.c 		struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
skb              1574 net/netfilter/ipvs/ip_vs_xmit.c 	if (skb_ensure_writable(skb, offset))
skb              1577 net/netfilter/ipvs/ip_vs_xmit.c 	if (skb_cow(skb, rt->dst.dev->hard_header_len))
skb              1580 net/netfilter/ipvs/ip_vs_xmit.c 	ip_vs_nat_icmp(skb, pp, cp, 0);
skb              1583 net/netfilter/ipvs/ip_vs_xmit.c 	skb->ignore_df = 1;
skb              1585 net/netfilter/ipvs/ip_vs_xmit.c 	rc = ip_vs_nat_send_or_cont(NFPROTO_IPV4, skb, cp, local);
skb              1589 net/netfilter/ipvs/ip_vs_xmit.c 	kfree_skb(skb);
skb              1598 net/netfilter/ipvs/ip_vs_xmit.c ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
skb              1614 net/netfilter/ipvs/ip_vs_xmit.c 			rc = cp->packet_xmit(skb, cp, pp, ipvsh);
skb              1630 net/netfilter/ipvs/ip_vs_xmit.c 	local = __ip_vs_get_out_rt_v6(cp->ipvs, cp->af, skb, cp->dest,
skb              1634 net/netfilter/ipvs/ip_vs_xmit.c 	rt = (struct rt6_info *) skb_dst(skb);
skb              1642 net/netfilter/ipvs/ip_vs_xmit.c 		struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
skb              1654 net/netfilter/ipvs/ip_vs_xmit.c 	if (local && skb->dev && !(skb->dev->flags & IFF_LOOPBACK) &&
skb              1663 net/netfilter/ipvs/ip_vs_xmit.c 	if (skb_ensure_writable(skb, offset))
skb              1666 net/netfilter/ipvs/ip_vs_xmit.c 	if (skb_cow(skb, rt->dst.dev->hard_header_len))
skb              1669 net/netfilter/ipvs/ip_vs_xmit.c 	ip_vs_nat_icmp_v6(skb, pp, cp, 0);
skb              1672 net/netfilter/ipvs/ip_vs_xmit.c 	skb->ignore_df = 1;
skb              1674 net/netfilter/ipvs/ip_vs_xmit.c 	rc = ip_vs_nat_send_or_cont(NFPROTO_IPV6, skb, cp, local);
skb              1678 net/netfilter/ipvs/ip_vs_xmit.c 	kfree_skb(skb);
skb                40 net/netfilter/nf_conntrack_amanda.c unsigned int (*nf_nat_amanda_hook)(struct sk_buff *skb,
skb                89 net/netfilter/nf_conntrack_amanda.c static int amanda_help(struct sk_buff *skb,
skb               109 net/netfilter/nf_conntrack_amanda.c 	nf_ct_refresh(ct, skb, master_timeout * HZ);
skb               113 net/netfilter/nf_conntrack_amanda.c 	if (dataoff >= skb->len) {
skb               114 net/netfilter/nf_conntrack_amanda.c 		net_err_ratelimited("amanda_help: skblen = %u\n", skb->len);
skb               118 net/netfilter/nf_conntrack_amanda.c 	start = skb_find_text(skb, dataoff, skb->len,
skb               124 net/netfilter/nf_conntrack_amanda.c 	stop = skb_find_text(skb, start, skb->len,
skb               131 net/netfilter/nf_conntrack_amanda.c 		off = skb_find_text(skb, start, stop, search[i].ts);
skb               137 net/netfilter/nf_conntrack_amanda.c 		if (skb_copy_bits(skb, off, pbuf, len))
skb               148 net/netfilter/nf_conntrack_amanda.c 			nf_ct_helper_log(skb, ct, "cannot alloc expectation");
skb               160 net/netfilter/nf_conntrack_amanda.c 			ret = nf_nat_amanda(skb, ctinfo, protoff,
skb               163 net/netfilter/nf_conntrack_amanda.c 			nf_ct_helper_log(skb, ct, "cannot add expectation");
skb                18 net/netfilter/nf_conntrack_broadcast.c int nf_conntrack_broadcast_help(struct sk_buff *skb,
skb                24 net/netfilter/nf_conntrack_broadcast.c 	struct iphdr *iph = ip_hdr(skb);
skb                25 net/netfilter/nf_conntrack_broadcast.c 	struct rtable *rt = skb_rtable(skb);
skb                31 net/netfilter/nf_conntrack_broadcast.c 	if (skb->sk == NULL || !net_eq(nf_ct_net(ct), sock_net(skb->sk)))
skb                74 net/netfilter/nf_conntrack_broadcast.c 	nf_ct_refresh(ct, skb, timeout * HZ);
skb               221 net/netfilter/nf_conntrack_core.c static bool nf_ct_get_tuple_ports(const struct sk_buff *skb,
skb               230 net/netfilter/nf_conntrack_core.c 	inet_hdr = skb_header_pointer(skb, dataoff, sizeof(_inet_hdr), &_inet_hdr);
skb               240 net/netfilter/nf_conntrack_core.c nf_ct_get_tuple(const struct sk_buff *skb,
skb               268 net/netfilter/nf_conntrack_core.c 	ap = skb_header_pointer(skb, nhoff, size, _addrs);
skb               289 net/netfilter/nf_conntrack_core.c 		return icmpv6_pkt_to_tuple(skb, dataoff, net, tuple);
skb               292 net/netfilter/nf_conntrack_core.c 		return icmp_pkt_to_tuple(skb, dataoff, net, tuple);
skb               295 net/netfilter/nf_conntrack_core.c 		return gre_pkt_to_tuple(skb, dataoff, net, tuple);
skb               299 net/netfilter/nf_conntrack_core.c 		return nf_ct_get_tuple_ports(skb, dataoff, tuple);
skb               302 net/netfilter/nf_conntrack_core.c 		return nf_ct_get_tuple_ports(skb, dataoff, tuple);
skb               306 net/netfilter/nf_conntrack_core.c 		return nf_ct_get_tuple_ports(skb, dataoff, tuple);
skb               310 net/netfilter/nf_conntrack_core.c 		return nf_ct_get_tuple_ports(skb, dataoff, tuple);
skb               319 net/netfilter/nf_conntrack_core.c static int ipv4_get_l4proto(const struct sk_buff *skb, unsigned int nhoff,
skb               326 net/netfilter/nf_conntrack_core.c 	iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
skb               340 net/netfilter/nf_conntrack_core.c 	if (dataoff > skb->len) {
skb               342 net/netfilter/nf_conntrack_core.c 			 nhoff, iph->ihl << 2, skb->len);
skb               349 net/netfilter/nf_conntrack_core.c static int ipv6_get_l4proto(const struct sk_buff *skb, unsigned int nhoff,
skb               357 net/netfilter/nf_conntrack_core.c 	if (skb_copy_bits(skb, nhoff + offsetof(struct ipv6hdr, nexthdr),
skb               362 net/netfilter/nf_conntrack_core.c 	protoff = ipv6_skip_exthdr(skb, extoff, &nexthdr, &frag_off);
skb               377 net/netfilter/nf_conntrack_core.c static int get_l4proto(const struct sk_buff *skb,
skb               382 net/netfilter/nf_conntrack_core.c 		return ipv4_get_l4proto(skb, nhoff, l4num);
skb               385 net/netfilter/nf_conntrack_core.c 		return ipv6_get_l4proto(skb, nhoff, l4num);
skb               394 net/netfilter/nf_conntrack_core.c bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff,
skb               401 net/netfilter/nf_conntrack_core.c 	protoff = get_l4proto(skb, nhoff, l3num, &protonum);
skb               405 net/netfilter/nf_conntrack_core.c 	return nf_ct_get_tuple(skb, nhoff, protoff, l3num, protonum, net, tuple);
skb               899 net/netfilter/nf_conntrack_core.c static int nf_ct_resolve_clash(struct net *net, struct sk_buff *skb,
skb               907 net/netfilter/nf_conntrack_core.c 	struct nf_conn *loser_ct = nf_ct_get(skb, &oldinfo);
skb               917 net/netfilter/nf_conntrack_core.c 			nf_ct_set(skb, ct, oldinfo);
skb               928 net/netfilter/nf_conntrack_core.c __nf_conntrack_confirm(struct sk_buff *skb)
skb               942 net/netfilter/nf_conntrack_core.c 	ct = nf_ct_get(skb, &ctinfo);
skb              1039 net/netfilter/nf_conntrack_core.c 	ret = nf_ct_resolve_clash(net, skb, ctinfo, h);
skb              1433 net/netfilter/nf_conntrack_core.c 	       struct sk_buff *skb,
skb              1450 net/netfilter/nf_conntrack_core.c 	zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
skb              1524 net/netfilter/nf_conntrack_core.c 		  struct sk_buff *skb,
skb              1537 net/netfilter/nf_conntrack_core.c 	if (!nf_ct_get_tuple(skb, skb_network_offset(skb),
skb              1545 net/netfilter/nf_conntrack_core.c 	zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
skb              1550 net/netfilter/nf_conntrack_core.c 				   skb, dataoff, hash);
skb              1574 net/netfilter/nf_conntrack_core.c 	nf_ct_set(skb, ct, ctinfo);
skb              1587 net/netfilter/nf_conntrack_core.c 			 struct sk_buff *skb,
skb              1595 net/netfilter/nf_conntrack_core.c 		ret = nf_conntrack_icmpv4_error(tmpl, skb, dataoff, state);
skb              1598 net/netfilter/nf_conntrack_core.c 		ret = nf_conntrack_icmpv6_error(tmpl, skb, dataoff, state);
skb              1611 net/netfilter/nf_conntrack_core.c static int generic_packet(struct nf_conn *ct, struct sk_buff *skb,
skb              1619 net/netfilter/nf_conntrack_core.c 	nf_ct_refresh_acct(ct, ctinfo, skb, *timeout);
skb              1625 net/netfilter/nf_conntrack_core.c 				      struct sk_buff *skb,
skb              1632 net/netfilter/nf_conntrack_core.c 		return nf_conntrack_tcp_packet(ct, skb, dataoff,
skb              1635 net/netfilter/nf_conntrack_core.c 		return nf_conntrack_udp_packet(ct, skb, dataoff,
skb              1638 net/netfilter/nf_conntrack_core.c 		return nf_conntrack_icmp_packet(ct, skb, ctinfo, state);
skb              1641 net/netfilter/nf_conntrack_core.c 		return nf_conntrack_icmpv6_packet(ct, skb, ctinfo, state);
skb              1645 net/netfilter/nf_conntrack_core.c 		return nf_conntrack_udplite_packet(ct, skb, dataoff,
skb              1650 net/netfilter/nf_conntrack_core.c 		return nf_conntrack_sctp_packet(ct, skb, dataoff,
skb              1655 net/netfilter/nf_conntrack_core.c 		return nf_conntrack_dccp_packet(ct, skb, dataoff,
skb              1660 net/netfilter/nf_conntrack_core.c 		return nf_conntrack_gre_packet(ct, skb, dataoff,
skb              1665 net/netfilter/nf_conntrack_core.c 	return generic_packet(ct, skb, ctinfo);
skb              1669 net/netfilter/nf_conntrack_core.c nf_conntrack_in(struct sk_buff *skb, const struct nf_hook_state *state)
skb              1676 net/netfilter/nf_conntrack_core.c 	tmpl = nf_ct_get(skb, &ctinfo);
skb              1684 net/netfilter/nf_conntrack_core.c 		skb->_nfct = 0;
skb              1688 net/netfilter/nf_conntrack_core.c 	dataoff = get_l4proto(skb, skb_network_offset(skb), state->pf, &protonum);
skb              1698 net/netfilter/nf_conntrack_core.c 		ret = nf_conntrack_handle_icmp(tmpl, skb, dataoff,
skb              1705 net/netfilter/nf_conntrack_core.c 		if (skb->_nfct)
skb              1709 net/netfilter/nf_conntrack_core.c 	ret = resolve_normal_ct(tmpl, skb, dataoff,
skb              1718 net/netfilter/nf_conntrack_core.c 	ct = nf_ct_get(skb, &ctinfo);
skb              1726 net/netfilter/nf_conntrack_core.c 	ret = nf_conntrack_handle_packet(ct, skb, dataoff, ctinfo, state);
skb              1732 net/netfilter/nf_conntrack_core.c 		skb->_nfct = 0;
skb              1783 net/netfilter/nf_conntrack_core.c 			  const struct sk_buff *skb,
skb              1799 net/netfilter/nf_conntrack_core.c 		nf_ct_acct_update(ct, ctinfo, skb->len);
skb              1805 net/netfilter/nf_conntrack_core.c 		     const struct sk_buff *skb)
skb              1807 net/netfilter/nf_conntrack_core.c 	nf_ct_acct_update(ct, ctinfo, skb->len);
skb              1820 net/netfilter/nf_conntrack_core.c int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb,
skb              1823 net/netfilter/nf_conntrack_core.c 	if (nla_put_be16(skb, CTA_PROTO_SRC_PORT, tuple->src.u.tcp.port) ||
skb              1824 net/netfilter/nf_conntrack_core.c 	    nla_put_be16(skb, CTA_PROTO_DST_PORT, tuple->dst.u.tcp.port))
skb              1865 net/netfilter/nf_conntrack_core.c static void nf_conntrack_attach(struct sk_buff *nskb, const struct sk_buff *skb)
skb              1871 net/netfilter/nf_conntrack_core.c 	ct = nf_ct_get(skb, &ctinfo);
skb              1882 net/netfilter/nf_conntrack_core.c static int __nf_conntrack_update(struct net *net, struct sk_buff *skb,
skb              1896 net/netfilter/nf_conntrack_core.c 	dataoff = get_l4proto(skb, skb_network_offset(skb), l3num, &l4num);
skb              1900 net/netfilter/nf_conntrack_core.c 	if (!nf_ct_get_tuple(skb, skb_network_offset(skb), dataoff, l3num,
skb              1931 net/netfilter/nf_conntrack_core.c 	nf_ct_set(skb, ct, ctinfo);
skb              1938 net/netfilter/nf_conntrack_core.c 	    nat_hook->manip_pkt(skb, ct, NF_NAT_MANIP_SRC,
skb              1943 net/netfilter/nf_conntrack_core.c 	    nat_hook->manip_pkt(skb, ct, NF_NAT_MANIP_DST,
skb              1953 net/netfilter/nf_conntrack_core.c static int nf_confirm_cthelper(struct sk_buff *skb, struct nf_conn *ct,
skb              1970 net/netfilter/nf_conntrack_core.c 		protoff = skb_network_offset(skb) + ip_hdrlen(skb);
skb              1977 net/netfilter/nf_conntrack_core.c 		pnum = ipv6_hdr(skb)->nexthdr;
skb              1978 net/netfilter/nf_conntrack_core.c 		protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &pnum,
skb              1990 net/netfilter/nf_conntrack_core.c 	    !nf_is_loopback_packet(skb)) {
skb              1991 net/netfilter/nf_conntrack_core.c 		if (!nf_ct_seq_adjust(skb, ct, ctinfo, protoff)) {
skb              1998 net/netfilter/nf_conntrack_core.c 	return nf_conntrack_confirm(skb) == NF_DROP ? - 1 : 0;
skb              2001 net/netfilter/nf_conntrack_core.c static int nf_conntrack_update(struct net *net, struct sk_buff *skb)
skb              2007 net/netfilter/nf_conntrack_core.c 	ct = nf_ct_get(skb, &ctinfo);
skb              2012 net/netfilter/nf_conntrack_core.c 		err = __nf_conntrack_update(net, skb, ct, ctinfo);
skb              2017 net/netfilter/nf_conntrack_core.c 	return nf_confirm_cthelper(skb, ct, ctinfo);
skb              2021 net/netfilter/nf_conntrack_core.c 				       const struct sk_buff *skb)
skb              2029 net/netfilter/nf_conntrack_core.c 	ct = nf_ct_get(skb, &ctinfo);
skb              2036 net/netfilter/nf_conntrack_core.c 	if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb),
skb              2037 net/netfilter/nf_conntrack_core.c 			       NFPROTO_IPV4, dev_net(skb->dev),
skb              2041 net/netfilter/nf_conntrack_core.c 	hash = nf_conntrack_find_get(dev_net(skb->dev),
skb                50 net/netfilter/nf_conntrack_ftp.c unsigned int (*nf_nat_ftp_hook)(struct sk_buff *skb,
skb               350 net/netfilter/nf_conntrack_ftp.c 			  struct sk_buff *skb)
skb               373 net/netfilter/nf_conntrack_ftp.c static int help(struct sk_buff *skb,
skb               401 net/netfilter/nf_conntrack_ftp.c 	th = skb_header_pointer(skb, protoff, sizeof(_tcph), &_tcph);
skb               407 net/netfilter/nf_conntrack_ftp.c 	if (dataoff >= skb->len) {
skb               409 net/netfilter/nf_conntrack_ftp.c 			 skb->len);
skb               412 net/netfilter/nf_conntrack_ftp.c 	datalen = skb->len - dataoff;
skb               415 net/netfilter/nf_conntrack_ftp.c 	fb_ptr = skb_header_pointer(skb, dataoff, datalen, ftp_buffer);
skb               462 net/netfilter/nf_conntrack_ftp.c 		nf_ct_helper_log(skb, ct, "partial matching of `%s'",
skb               477 net/netfilter/nf_conntrack_ftp.c 		nf_ct_helper_log(skb, ct, "cannot alloc expectation");
skb               524 net/netfilter/nf_conntrack_ftp.c 		ret = nf_nat_ftp(skb, ctinfo, search[dir][i].ftptype,
skb               529 net/netfilter/nf_conntrack_ftp.c 			nf_ct_helper_log(skb, ct, "cannot add expectation");
skb               542 net/netfilter/nf_conntrack_ftp.c 		update_nl_seq(ct, seq, ct_ftp_info, dir, skb);
skb                53 net/netfilter/nf_conntrack_h323_main.c int (*set_h245_addr_hook) (struct sk_buff *skb, unsigned int protoff,
skb                58 net/netfilter/nf_conntrack_h323_main.c int (*set_h225_addr_hook) (struct sk_buff *skb, unsigned int protoff,
skb                63 net/netfilter/nf_conntrack_h323_main.c int (*set_sig_addr_hook) (struct sk_buff *skb,
skb                68 net/netfilter/nf_conntrack_h323_main.c int (*set_ras_addr_hook) (struct sk_buff *skb,
skb                73 net/netfilter/nf_conntrack_h323_main.c int (*nat_rtp_rtcp_hook) (struct sk_buff *skb,
skb                82 net/netfilter/nf_conntrack_h323_main.c int (*nat_t120_hook) (struct sk_buff *skb,
skb                89 net/netfilter/nf_conntrack_h323_main.c int (*nat_h245_hook) (struct sk_buff *skb,
skb                96 net/netfilter/nf_conntrack_h323_main.c int (*nat_callforwarding_hook) (struct sk_buff *skb,
skb               103 net/netfilter/nf_conntrack_h323_main.c int (*nat_q931_hook) (struct sk_buff *skb,
skb               118 net/netfilter/nf_conntrack_h323_main.c static int get_tpkt_data(struct sk_buff *skb, unsigned int protoff,
skb               133 net/netfilter/nf_conntrack_h323_main.c 	th = skb_header_pointer(skb, protoff, sizeof(_tcph), &_tcph);
skb               141 net/netfilter/nf_conntrack_h323_main.c 	tcpdatalen = skb->len - tcpdataoff;
skb               147 net/netfilter/nf_conntrack_h323_main.c 		tpkt = skb_header_pointer(skb, tcpdataoff, tcpdatalen,
skb               255 net/netfilter/nf_conntrack_h323_main.c static int expect_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct,
skb               305 net/netfilter/nf_conntrack_h323_main.c 		ret = nat_rtp_rtcp(skb, ct, ctinfo, protoff, data, dataoff,
skb               328 net/netfilter/nf_conntrack_h323_main.c static int expect_t120(struct sk_buff *skb,
skb               364 net/netfilter/nf_conntrack_h323_main.c 		ret = nat_t120(skb, ct, ctinfo, protoff, data, dataoff, taddr,
skb               379 net/netfilter/nf_conntrack_h323_main.c static int process_h245_channel(struct sk_buff *skb,
skb               390 net/netfilter/nf_conntrack_h323_main.c 		ret = expect_rtp_rtcp(skb, ct, ctinfo, protoff, data, dataoff,
skb               399 net/netfilter/nf_conntrack_h323_main.c 		ret = expect_rtp_rtcp(skb, ct, ctinfo, protoff, data, dataoff,
skb               408 net/netfilter/nf_conntrack_h323_main.c static int process_olc(struct sk_buff *skb, struct nf_conn *ct,
skb               421 net/netfilter/nf_conntrack_h323_main.c 		ret = process_h245_channel(skb, ct, ctinfo,
skb               440 net/netfilter/nf_conntrack_h323_main.c 		    process_h245_channel(skb, ct, ctinfo,
skb               459 net/netfilter/nf_conntrack_h323_main.c 		ret = expect_t120(skb, ct, ctinfo, protoff, data, dataoff,
skb               469 net/netfilter/nf_conntrack_h323_main.c static int process_olca(struct sk_buff *skb, struct nf_conn *ct,
skb               487 net/netfilter/nf_conntrack_h323_main.c 		ret = process_h245_channel(skb, ct, ctinfo,
skb               507 net/netfilter/nf_conntrack_h323_main.c 			ret = expect_rtp_rtcp(skb, ct, ctinfo,
skb               517 net/netfilter/nf_conntrack_h323_main.c 			ret = expect_rtp_rtcp(skb, ct, ctinfo,
skb               528 net/netfilter/nf_conntrack_h323_main.c 		ret = expect_t120(skb, ct, ctinfo, protoff, data, dataoff,
skb               538 net/netfilter/nf_conntrack_h323_main.c static int process_h245(struct sk_buff *skb, struct nf_conn *ct,
skb               547 net/netfilter/nf_conntrack_h323_main.c 			return process_olc(skb, ct, ctinfo,
skb               557 net/netfilter/nf_conntrack_h323_main.c 			return process_olca(skb, ct, ctinfo,
skb               573 net/netfilter/nf_conntrack_h323_main.c static int h245_help(struct sk_buff *skb, unsigned int protoff,
skb               586 net/netfilter/nf_conntrack_h323_main.c 	pr_debug("nf_ct_h245: skblen = %u\n", skb->len);
skb               591 net/netfilter/nf_conntrack_h323_main.c 	while (get_tpkt_data(skb, protoff, ct, ctinfo,
skb               608 net/netfilter/nf_conntrack_h323_main.c 		if (process_h245(skb, ct, ctinfo, protoff,
skb               618 net/netfilter/nf_conntrack_h323_main.c 	nf_ct_helper_log(skb, ct, "cannot process H.245 message");
skb               667 net/netfilter/nf_conntrack_h323_main.c static int expect_h245(struct sk_buff *skb, struct nf_conn *ct,
skb               701 net/netfilter/nf_conntrack_h323_main.c 		ret = nat_h245(skb, ct, ctinfo, protoff, data, dataoff, taddr,
skb               780 net/netfilter/nf_conntrack_h323_main.c static int expect_callforwarding(struct sk_buff *skb,
skb               824 net/netfilter/nf_conntrack_h323_main.c 		ret = nat_callforwarding(skb, ct, ctinfo,
skb               840 net/netfilter/nf_conntrack_h323_main.c static int process_setup(struct sk_buff *skb, struct nf_conn *ct,
skb               856 net/netfilter/nf_conntrack_h323_main.c 		ret = expect_h245(skb, ct, ctinfo, protoff, data, dataoff,
skb               872 net/netfilter/nf_conntrack_h323_main.c 		ret = set_h225_addr(skb, protoff, data, dataoff,
skb               889 net/netfilter/nf_conntrack_h323_main.c 		ret = set_h225_addr(skb, protoff, data, dataoff,
skb               899 net/netfilter/nf_conntrack_h323_main.c 			ret = process_olc(skb, ct, ctinfo,
skb               910 net/netfilter/nf_conntrack_h323_main.c static int process_callproceeding(struct sk_buff *skb,
skb               923 net/netfilter/nf_conntrack_h323_main.c 		ret = expect_h245(skb, ct, ctinfo, protoff, data, dataoff,
skb               931 net/netfilter/nf_conntrack_h323_main.c 			ret = process_olc(skb, ct, ctinfo,
skb               942 net/netfilter/nf_conntrack_h323_main.c static int process_connect(struct sk_buff *skb, struct nf_conn *ct,
skb               954 net/netfilter/nf_conntrack_h323_main.c 		ret = expect_h245(skb, ct, ctinfo, protoff, data, dataoff,
skb               962 net/netfilter/nf_conntrack_h323_main.c 			ret = process_olc(skb, ct, ctinfo,
skb               973 net/netfilter/nf_conntrack_h323_main.c static int process_alerting(struct sk_buff *skb, struct nf_conn *ct,
skb               985 net/netfilter/nf_conntrack_h323_main.c 		ret = expect_h245(skb, ct, ctinfo, protoff, data, dataoff,
skb               993 net/netfilter/nf_conntrack_h323_main.c 			ret = process_olc(skb, ct, ctinfo,
skb              1004 net/netfilter/nf_conntrack_h323_main.c static int process_facility(struct sk_buff *skb, struct nf_conn *ct,
skb              1017 net/netfilter/nf_conntrack_h323_main.c 			return expect_callforwarding(skb, ct, ctinfo,
skb              1025 net/netfilter/nf_conntrack_h323_main.c 		ret = expect_h245(skb, ct, ctinfo, protoff, data, dataoff,
skb              1033 net/netfilter/nf_conntrack_h323_main.c 			ret = process_olc(skb, ct, ctinfo,
skb              1044 net/netfilter/nf_conntrack_h323_main.c static int process_progress(struct sk_buff *skb, struct nf_conn *ct,
skb              1056 net/netfilter/nf_conntrack_h323_main.c 		ret = expect_h245(skb, ct, ctinfo, protoff, data, dataoff,
skb              1064 net/netfilter/nf_conntrack_h323_main.c 			ret = process_olc(skb, ct, ctinfo,
skb              1075 net/netfilter/nf_conntrack_h323_main.c static int process_q931(struct sk_buff *skb, struct nf_conn *ct,
skb              1086 net/netfilter/nf_conntrack_h323_main.c 		ret = process_setup(skb, ct, ctinfo, protoff, data, dataoff,
skb              1090 net/netfilter/nf_conntrack_h323_main.c 		ret = process_callproceeding(skb, ct, ctinfo,
skb              1096 net/netfilter/nf_conntrack_h323_main.c 		ret = process_connect(skb, ct, ctinfo, protoff, data, dataoff,
skb              1100 net/netfilter/nf_conntrack_h323_main.c 		ret = process_alerting(skb, ct, ctinfo, protoff, data, dataoff,
skb              1104 net/netfilter/nf_conntrack_h323_main.c 		ret = process_facility(skb, ct, ctinfo, protoff, data, dataoff,
skb              1108 net/netfilter/nf_conntrack_h323_main.c 		ret = process_progress(skb, ct, ctinfo, protoff, data, dataoff,
skb              1122 net/netfilter/nf_conntrack_h323_main.c 			ret = process_h245(skb, ct, ctinfo,
skb              1133 net/netfilter/nf_conntrack_h323_main.c static int q931_help(struct sk_buff *skb, unsigned int protoff,
skb              1146 net/netfilter/nf_conntrack_h323_main.c 	pr_debug("nf_ct_q931: skblen = %u\n", skb->len);
skb              1151 net/netfilter/nf_conntrack_h323_main.c 	while (get_tpkt_data(skb, protoff, ct, ctinfo,
skb              1167 net/netfilter/nf_conntrack_h323_main.c 		if (process_q931(skb, ct, ctinfo, protoff,
skb              1177 net/netfilter/nf_conntrack_h323_main.c 	nf_ct_helper_log(skb, ct, "cannot process Q.931 message");
skb              1208 net/netfilter/nf_conntrack_h323_main.c static unsigned char *get_udp_data(struct sk_buff *skb, unsigned int protoff,
skb              1215 net/netfilter/nf_conntrack_h323_main.c 	uh = skb_header_pointer(skb, protoff, sizeof(_uh), &_uh);
skb              1219 net/netfilter/nf_conntrack_h323_main.c 	if (dataoff >= skb->len)
skb              1221 net/netfilter/nf_conntrack_h323_main.c 	*datalen = skb->len - dataoff;
skb              1222 net/netfilter/nf_conntrack_h323_main.c 	return skb_header_pointer(skb, dataoff, *datalen, h323_buffer);
skb              1245 net/netfilter/nf_conntrack_h323_main.c static int expect_q931(struct sk_buff *skb, struct nf_conn *ct,
skb              1284 net/netfilter/nf_conntrack_h323_main.c 		ret = nat_q931(skb, ct, ctinfo, protoff, data,
skb              1302 net/netfilter/nf_conntrack_h323_main.c static int process_grq(struct sk_buff *skb, struct nf_conn *ct,
skb              1314 net/netfilter/nf_conntrack_h323_main.c 		return set_ras_addr(skb, ct, ctinfo, protoff, data,
skb              1319 net/netfilter/nf_conntrack_h323_main.c static int process_gcf(struct sk_buff *skb, struct nf_conn *ct,
skb              1363 net/netfilter/nf_conntrack_h323_main.c static int process_rrq(struct sk_buff *skb, struct nf_conn *ct,
skb              1374 net/netfilter/nf_conntrack_h323_main.c 	ret = expect_q931(skb, ct, ctinfo, protoff, data,
skb              1383 net/netfilter/nf_conntrack_h323_main.c 		ret = set_ras_addr(skb, ct, ctinfo, protoff, data,
skb              1399 net/netfilter/nf_conntrack_h323_main.c static int process_rcf(struct sk_buff *skb, struct nf_conn *ct,
skb              1415 net/netfilter/nf_conntrack_h323_main.c 		ret = set_sig_addr(skb, ct, ctinfo, protoff, data,
skb              1430 net/netfilter/nf_conntrack_h323_main.c 		nf_ct_refresh(ct, skb, info->timeout * HZ);
skb              1450 net/netfilter/nf_conntrack_h323_main.c static int process_urq(struct sk_buff *skb, struct nf_conn *ct,
skb              1465 net/netfilter/nf_conntrack_h323_main.c 		ret = set_sig_addr(skb, ct, ctinfo, protoff, data,
skb              1478 net/netfilter/nf_conntrack_h323_main.c 	nf_ct_refresh(ct, skb, 30 * HZ);
skb              1483 net/netfilter/nf_conntrack_h323_main.c static int process_arq(struct sk_buff *skb, struct nf_conn *ct,
skb              1505 net/netfilter/nf_conntrack_h323_main.c 		return set_h225_addr(skb, protoff, data, 0,
skb              1518 net/netfilter/nf_conntrack_h323_main.c 		return set_h225_addr(skb, protoff, data, 0,
skb              1527 net/netfilter/nf_conntrack_h323_main.c static int process_acf(struct sk_buff *skb, struct nf_conn *ct,
skb              1550 net/netfilter/nf_conntrack_h323_main.c 			return set_sig_addr(skb, ct, ctinfo, protoff, data,
skb              1575 net/netfilter/nf_conntrack_h323_main.c static int process_lrq(struct sk_buff *skb, struct nf_conn *ct,
skb              1587 net/netfilter/nf_conntrack_h323_main.c 		return set_ras_addr(skb, ct, ctinfo, protoff, data,
skb              1592 net/netfilter/nf_conntrack_h323_main.c static int process_lcf(struct sk_buff *skb, struct nf_conn *ct,
skb              1631 net/netfilter/nf_conntrack_h323_main.c static int process_irr(struct sk_buff *skb, struct nf_conn *ct,
skb              1645 net/netfilter/nf_conntrack_h323_main.c 		ret = set_ras_addr(skb, ct, ctinfo, protoff, data,
skb              1654 net/netfilter/nf_conntrack_h323_main.c 		ret = set_sig_addr(skb, ct, ctinfo, protoff, data,
skb              1664 net/netfilter/nf_conntrack_h323_main.c static int process_ras(struct sk_buff *skb, struct nf_conn *ct,
skb              1671 net/netfilter/nf_conntrack_h323_main.c 		return process_grq(skb, ct, ctinfo, protoff, data,
skb              1674 net/netfilter/nf_conntrack_h323_main.c 		return process_gcf(skb, ct, ctinfo, protoff, data,
skb              1677 net/netfilter/nf_conntrack_h323_main.c 		return process_rrq(skb, ct, ctinfo, protoff, data,
skb              1680 net/netfilter/nf_conntrack_h323_main.c 		return process_rcf(skb, ct, ctinfo, protoff, data,
skb              1683 net/netfilter/nf_conntrack_h323_main.c 		return process_urq(skb, ct, ctinfo, protoff, data,
skb              1686 net/netfilter/nf_conntrack_h323_main.c 		return process_arq(skb, ct, ctinfo, protoff, data,
skb              1689 net/netfilter/nf_conntrack_h323_main.c 		return process_acf(skb, ct, ctinfo, protoff, data,
skb              1692 net/netfilter/nf_conntrack_h323_main.c 		return process_lrq(skb, ct, ctinfo, protoff, data,
skb              1695 net/netfilter/nf_conntrack_h323_main.c 		return process_lcf(skb, ct, ctinfo, protoff, data,
skb              1698 net/netfilter/nf_conntrack_h323_main.c 		return process_irr(skb, ct, ctinfo, protoff, data,
skb              1708 net/netfilter/nf_conntrack_h323_main.c static int ras_help(struct sk_buff *skb, unsigned int protoff,
skb              1716 net/netfilter/nf_conntrack_h323_main.c 	pr_debug("nf_ct_ras: skblen = %u\n", skb->len);
skb              1721 net/netfilter/nf_conntrack_h323_main.c 	data = get_udp_data(skb, protoff, &datalen);
skb              1737 net/netfilter/nf_conntrack_h323_main.c 	if (process_ras(skb, ct, ctinfo, protoff, &data, &ras) < 0)
skb              1746 net/netfilter/nf_conntrack_h323_main.c 	nf_ct_helper_log(skb, ct, "cannot process RAS message");
skb               370 net/netfilter/nf_conntrack_helper.c void nf_ct_helper_log(struct sk_buff *skb, const struct nf_conn *ct,
skb               389 net/netfilter/nf_conntrack_helper.c 	nf_log_packet(nf_ct_net(ct), nf_ct_l3num(ct), 0, skb, NULL, NULL, NULL,
skb               483 net/netfilter/nf_conntrack_helper.c 		       int (*help)(struct sk_buff *skb, unsigned int protoff,
skb                33 net/netfilter/nf_conntrack_irc.c unsigned int (*nf_nat_irc_hook)(struct sk_buff *skb,
skb               106 net/netfilter/nf_conntrack_irc.c static int help(struct sk_buff *skb, unsigned int protoff,
skb               134 net/netfilter/nf_conntrack_irc.c 	th = skb_header_pointer(skb, protoff, sizeof(_tcph), &_tcph);
skb               140 net/netfilter/nf_conntrack_irc.c 	if (dataoff >= skb->len)
skb               144 net/netfilter/nf_conntrack_irc.c 	ib_ptr = skb_header_pointer(skb, dataoff, skb->len - dataoff,
skb               149 net/netfilter/nf_conntrack_irc.c 	data_limit = ib_ptr + skb->len - dataoff;
skb               161 net/netfilter/nf_conntrack_irc.c 		iph = ip_hdr(skb);
skb               198 net/netfilter/nf_conntrack_irc.c 				nf_ct_helper_log(skb, ct,
skb               212 net/netfilter/nf_conntrack_irc.c 				ret = nf_nat_irc(skb, ctinfo, protoff,
skb               217 net/netfilter/nf_conntrack_irc.c 				nf_ct_helper_log(skb, ct,
skb                39 net/netfilter/nf_conntrack_netbios_ns.c static int netbios_ns_help(struct sk_buff *skb, unsigned int protoff,
skb                43 net/netfilter/nf_conntrack_netbios_ns.c 	return nf_conntrack_broadcast_help(skb, ct, ctinfo, timeout);
skb                59 net/netfilter/nf_conntrack_netlink.c static int ctnetlink_dump_tuples_proto(struct sk_buff *skb,
skb                66 net/netfilter/nf_conntrack_netlink.c 	nest_parms = nla_nest_start(skb, CTA_TUPLE_PROTO);
skb                69 net/netfilter/nf_conntrack_netlink.c 	if (nla_put_u8(skb, CTA_PROTO_NUM, tuple->dst.protonum))
skb                73 net/netfilter/nf_conntrack_netlink.c 		ret = l4proto->tuple_to_nlattr(skb, tuple);
skb                75 net/netfilter/nf_conntrack_netlink.c 	nla_nest_end(skb, nest_parms);
skb                83 net/netfilter/nf_conntrack_netlink.c static int ipv4_tuple_to_nlattr(struct sk_buff *skb,
skb                86 net/netfilter/nf_conntrack_netlink.c 	if (nla_put_in_addr(skb, CTA_IP_V4_SRC, tuple->src.u3.ip) ||
skb                87 net/netfilter/nf_conntrack_netlink.c 	    nla_put_in_addr(skb, CTA_IP_V4_DST, tuple->dst.u3.ip))
skb                92 net/netfilter/nf_conntrack_netlink.c static int ipv6_tuple_to_nlattr(struct sk_buff *skb,
skb                95 net/netfilter/nf_conntrack_netlink.c 	if (nla_put_in6_addr(skb, CTA_IP_V6_SRC, &tuple->src.u3.in6) ||
skb                96 net/netfilter/nf_conntrack_netlink.c 	    nla_put_in6_addr(skb, CTA_IP_V6_DST, &tuple->dst.u3.in6))
skb               101 net/netfilter/nf_conntrack_netlink.c static int ctnetlink_dump_tuples_ip(struct sk_buff *skb,
skb               107 net/netfilter/nf_conntrack_netlink.c 	nest_parms = nla_nest_start(skb, CTA_TUPLE_IP);
skb               113 net/netfilter/nf_conntrack_netlink.c 		ret = ipv4_tuple_to_nlattr(skb, tuple);
skb               116 net/netfilter/nf_conntrack_netlink.c 		ret = ipv6_tuple_to_nlattr(skb, tuple);
skb               120 net/netfilter/nf_conntrack_netlink.c 	nla_nest_end(skb, nest_parms);
skb               128 net/netfilter/nf_conntrack_netlink.c static int ctnetlink_dump_tuples(struct sk_buff *skb,
skb               135 net/netfilter/nf_conntrack_netlink.c 	ret = ctnetlink_dump_tuples_ip(skb, tuple);
skb               139 net/netfilter/nf_conntrack_netlink.c 		ret = ctnetlink_dump_tuples_proto(skb, tuple, l4proto);
skb               145 net/netfilter/nf_conntrack_netlink.c static int ctnetlink_dump_zone_id(struct sk_buff *skb, int attrtype,
skb               150 net/netfilter/nf_conntrack_netlink.c 	if (nla_put_be16(skb, attrtype, htons(zone->id)))
skb               158 net/netfilter/nf_conntrack_netlink.c static int ctnetlink_dump_status(struct sk_buff *skb, const struct nf_conn *ct)
skb               160 net/netfilter/nf_conntrack_netlink.c 	if (nla_put_be32(skb, CTA_STATUS, htonl(ct->status)))
skb               168 net/netfilter/nf_conntrack_netlink.c static int ctnetlink_dump_timeout(struct sk_buff *skb, const struct nf_conn *ct)
skb               172 net/netfilter/nf_conntrack_netlink.c 	if (nla_put_be32(skb, CTA_TIMEOUT, htonl(timeout)))
skb               180 net/netfilter/nf_conntrack_netlink.c static int ctnetlink_dump_protoinfo(struct sk_buff *skb, struct nf_conn *ct)
skb               190 net/netfilter/nf_conntrack_netlink.c 	nest_proto = nla_nest_start(skb, CTA_PROTOINFO);
skb               194 net/netfilter/nf_conntrack_netlink.c 	ret = l4proto->to_nlattr(skb, nest_proto, ct);
skb               196 net/netfilter/nf_conntrack_netlink.c 	nla_nest_end(skb, nest_proto);
skb               204 net/netfilter/nf_conntrack_netlink.c static int ctnetlink_dump_helpinfo(struct sk_buff *skb,
skb               218 net/netfilter/nf_conntrack_netlink.c 	nest_helper = nla_nest_start(skb, CTA_HELP);
skb               221 net/netfilter/nf_conntrack_netlink.c 	if (nla_put_string(skb, CTA_HELP_NAME, helper->name))
skb               225 net/netfilter/nf_conntrack_netlink.c 		helper->to_nlattr(skb, ct);
skb               227 net/netfilter/nf_conntrack_netlink.c 	nla_nest_end(skb, nest_helper);
skb               236 net/netfilter/nf_conntrack_netlink.c dump_counters(struct sk_buff *skb, struct nf_conn_acct *acct,
skb               252 net/netfilter/nf_conntrack_netlink.c 	nest_count = nla_nest_start(skb, attr);
skb               256 net/netfilter/nf_conntrack_netlink.c 	if (nla_put_be64(skb, CTA_COUNTERS_PACKETS, cpu_to_be64(pkts),
skb               258 net/netfilter/nf_conntrack_netlink.c 	    nla_put_be64(skb, CTA_COUNTERS_BYTES, cpu_to_be64(bytes),
skb               262 net/netfilter/nf_conntrack_netlink.c 	nla_nest_end(skb, nest_count);
skb               271 net/netfilter/nf_conntrack_netlink.c ctnetlink_dump_acct(struct sk_buff *skb, const struct nf_conn *ct, int type)
skb               278 net/netfilter/nf_conntrack_netlink.c 	if (dump_counters(skb, acct, IP_CT_DIR_ORIGINAL, type) < 0)
skb               280 net/netfilter/nf_conntrack_netlink.c 	if (dump_counters(skb, acct, IP_CT_DIR_REPLY, type) < 0)
skb               287 net/netfilter/nf_conntrack_netlink.c ctnetlink_dump_timestamp(struct sk_buff *skb, const struct nf_conn *ct)
skb               296 net/netfilter/nf_conntrack_netlink.c 	nest_count = nla_nest_start(skb, CTA_TIMESTAMP);
skb               300 net/netfilter/nf_conntrack_netlink.c 	if (nla_put_be64(skb, CTA_TIMESTAMP_START, cpu_to_be64(tstamp->start),
skb               302 net/netfilter/nf_conntrack_netlink.c 	    (tstamp->stop != 0 && nla_put_be64(skb, CTA_TIMESTAMP_STOP,
skb               306 net/netfilter/nf_conntrack_netlink.c 	nla_nest_end(skb, nest_count);
skb               315 net/netfilter/nf_conntrack_netlink.c static int ctnetlink_dump_mark(struct sk_buff *skb, const struct nf_conn *ct)
skb               317 net/netfilter/nf_conntrack_netlink.c 	if (nla_put_be32(skb, CTA_MARK, htonl(ct->mark)))
skb               329 net/netfilter/nf_conntrack_netlink.c static int ctnetlink_dump_secctx(struct sk_buff *skb, const struct nf_conn *ct)
skb               340 net/netfilter/nf_conntrack_netlink.c 	nest_secctx = nla_nest_start(skb, CTA_SECCTX);
skb               344 net/netfilter/nf_conntrack_netlink.c 	if (nla_put_string(skb, CTA_SECCTX_NAME, secctx))
skb               346 net/netfilter/nf_conntrack_netlink.c 	nla_nest_end(skb, nest_secctx);
skb               368 net/netfilter/nf_conntrack_netlink.c ctnetlink_dump_labels(struct sk_buff *skb, const struct nf_conn *ct)
skb               379 net/netfilter/nf_conntrack_netlink.c 			return nla_put(skb, CTA_LABELS, sizeof(labels->bits),
skb               393 net/netfilter/nf_conntrack_netlink.c static int ctnetlink_dump_master(struct sk_buff *skb, const struct nf_conn *ct)
skb               400 net/netfilter/nf_conntrack_netlink.c 	nest_parms = nla_nest_start(skb, CTA_TUPLE_MASTER);
skb               403 net/netfilter/nf_conntrack_netlink.c 	if (ctnetlink_dump_tuples(skb, master_tuple(ct)) < 0)
skb               405 net/netfilter/nf_conntrack_netlink.c 	nla_nest_end(skb, nest_parms);
skb               414 net/netfilter/nf_conntrack_netlink.c dump_ct_seq_adj(struct sk_buff *skb, const struct nf_ct_seqadj *seq, int type)
skb               418 net/netfilter/nf_conntrack_netlink.c 	nest_parms = nla_nest_start(skb, type);
skb               422 net/netfilter/nf_conntrack_netlink.c 	if (nla_put_be32(skb, CTA_SEQADJ_CORRECTION_POS,
skb               424 net/netfilter/nf_conntrack_netlink.c 	    nla_put_be32(skb, CTA_SEQADJ_OFFSET_BEFORE,
skb               426 net/netfilter/nf_conntrack_netlink.c 	    nla_put_be32(skb, CTA_SEQADJ_OFFSET_AFTER,
skb               430 net/netfilter/nf_conntrack_netlink.c 	nla_nest_end(skb, nest_parms);
skb               438 net/netfilter/nf_conntrack_netlink.c static int ctnetlink_dump_ct_seq_adj(struct sk_buff *skb, struct nf_conn *ct)
skb               448 net/netfilter/nf_conntrack_netlink.c 	if (dump_ct_seq_adj(skb, seq, CTA_SEQ_ADJ_ORIG) == -1)
skb               452 net/netfilter/nf_conntrack_netlink.c 	if (dump_ct_seq_adj(skb, seq, CTA_SEQ_ADJ_REPLY) == -1)
skb               462 net/netfilter/nf_conntrack_netlink.c static int ctnetlink_dump_ct_synproxy(struct sk_buff *skb, struct nf_conn *ct)
skb               470 net/netfilter/nf_conntrack_netlink.c 	nest_parms = nla_nest_start(skb, CTA_SYNPROXY);
skb               474 net/netfilter/nf_conntrack_netlink.c 	if (nla_put_be32(skb, CTA_SYNPROXY_ISN, htonl(synproxy->isn)) ||
skb               475 net/netfilter/nf_conntrack_netlink.c 	    nla_put_be32(skb, CTA_SYNPROXY_ITS, htonl(synproxy->its)) ||
skb               476 net/netfilter/nf_conntrack_netlink.c 	    nla_put_be32(skb, CTA_SYNPROXY_TSOFF, htonl(synproxy->tsoff)))
skb               479 net/netfilter/nf_conntrack_netlink.c 	nla_nest_end(skb, nest_parms);
skb               487 net/netfilter/nf_conntrack_netlink.c static int ctnetlink_dump_id(struct sk_buff *skb, const struct nf_conn *ct)
skb               491 net/netfilter/nf_conntrack_netlink.c 	if (nla_put_be32(skb, CTA_ID, id))
skb               499 net/netfilter/nf_conntrack_netlink.c static int ctnetlink_dump_use(struct sk_buff *skb, const struct nf_conn *ct)
skb               501 net/netfilter/nf_conntrack_netlink.c 	if (nla_put_be32(skb, CTA_USE, htonl(atomic_read(&ct->ct_general.use))))
skb               510 net/netfilter/nf_conntrack_netlink.c ctnetlink_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
skb               520 net/netfilter/nf_conntrack_netlink.c 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
skb               531 net/netfilter/nf_conntrack_netlink.c 	nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG);
skb               534 net/netfilter/nf_conntrack_netlink.c 	if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0)
skb               536 net/netfilter/nf_conntrack_netlink.c 	if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
skb               539 net/netfilter/nf_conntrack_netlink.c 	nla_nest_end(skb, nest_parms);
skb               541 net/netfilter/nf_conntrack_netlink.c 	nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY);
skb               544 net/netfilter/nf_conntrack_netlink.c 	if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0)
skb               546 net/netfilter/nf_conntrack_netlink.c 	if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
skb               549 net/netfilter/nf_conntrack_netlink.c 	nla_nest_end(skb, nest_parms);
skb               551 net/netfilter/nf_conntrack_netlink.c 	if (ctnetlink_dump_zone_id(skb, CTA_ZONE, zone,
skb               555 net/netfilter/nf_conntrack_netlink.c 	if (ctnetlink_dump_status(skb, ct) < 0 ||
skb               556 net/netfilter/nf_conntrack_netlink.c 	    ctnetlink_dump_acct(skb, ct, type) < 0 ||
skb               557 net/netfilter/nf_conntrack_netlink.c 	    ctnetlink_dump_timestamp(skb, ct) < 0 ||
skb               558 net/netfilter/nf_conntrack_netlink.c 	    ctnetlink_dump_helpinfo(skb, ct) < 0 ||
skb               559 net/netfilter/nf_conntrack_netlink.c 	    ctnetlink_dump_mark(skb, ct) < 0 ||
skb               560 net/netfilter/nf_conntrack_netlink.c 	    ctnetlink_dump_secctx(skb, ct) < 0 ||
skb               561 net/netfilter/nf_conntrack_netlink.c 	    ctnetlink_dump_labels(skb, ct) < 0 ||
skb               562 net/netfilter/nf_conntrack_netlink.c 	    ctnetlink_dump_id(skb, ct) < 0 ||
skb               563 net/netfilter/nf_conntrack_netlink.c 	    ctnetlink_dump_use(skb, ct) < 0 ||
skb               564 net/netfilter/nf_conntrack_netlink.c 	    ctnetlink_dump_master(skb, ct) < 0 ||
skb               565 net/netfilter/nf_conntrack_netlink.c 	    ctnetlink_dump_ct_seq_adj(skb, ct) < 0 ||
skb               566 net/netfilter/nf_conntrack_netlink.c 	    ctnetlink_dump_ct_synproxy(skb, ct) < 0)
skb               570 net/netfilter/nf_conntrack_netlink.c 	    (ctnetlink_dump_timeout(skb, ct) < 0 ||
skb               571 net/netfilter/nf_conntrack_netlink.c 	     ctnetlink_dump_protoinfo(skb, ct) < 0))
skb               574 net/netfilter/nf_conntrack_netlink.c 	nlmsg_end(skb, nlh);
skb               575 net/netfilter/nf_conntrack_netlink.c 	return skb->len;
skb               579 net/netfilter/nf_conntrack_netlink.c 	nlmsg_cancel(skb, nlh);
skb               688 net/netfilter/nf_conntrack_netlink.c 	struct sk_buff *skb;
skb               710 net/netfilter/nf_conntrack_netlink.c 	skb = nlmsg_new(ctnetlink_nlmsg_size(ct), GFP_ATOMIC);
skb               711 net/netfilter/nf_conntrack_netlink.c 	if (skb == NULL)
skb               715 net/netfilter/nf_conntrack_netlink.c 	nlh = nlmsg_put(skb, item->portid, 0, type, sizeof(*nfmsg), flags);
skb               726 net/netfilter/nf_conntrack_netlink.c 	nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG);
skb               729 net/netfilter/nf_conntrack_netlink.c 	if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0)
skb               731 net/netfilter/nf_conntrack_netlink.c 	if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
skb               734 net/netfilter/nf_conntrack_netlink.c 	nla_nest_end(skb, nest_parms);
skb               736 net/netfilter/nf_conntrack_netlink.c 	nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY);
skb               739 net/netfilter/nf_conntrack_netlink.c 	if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0)
skb               741 net/netfilter/nf_conntrack_netlink.c 	if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
skb               744 net/netfilter/nf_conntrack_netlink.c 	nla_nest_end(skb, nest_parms);
skb               746 net/netfilter/nf_conntrack_netlink.c 	if (ctnetlink_dump_zone_id(skb, CTA_ZONE, zone,
skb               750 net/netfilter/nf_conntrack_netlink.c 	if (ctnetlink_dump_id(skb, ct) < 0)
skb               753 net/netfilter/nf_conntrack_netlink.c 	if (ctnetlink_dump_status(skb, ct) < 0)
skb               757 net/netfilter/nf_conntrack_netlink.c 		if (ctnetlink_dump_acct(skb, ct, type) < 0 ||
skb               758 net/netfilter/nf_conntrack_netlink.c 		    ctnetlink_dump_timestamp(skb, ct) < 0)
skb               761 net/netfilter/nf_conntrack_netlink.c 		if (ctnetlink_dump_timeout(skb, ct) < 0)
skb               765 net/netfilter/nf_conntrack_netlink.c 		    && ctnetlink_dump_protoinfo(skb, ct) < 0)
skb               769 net/netfilter/nf_conntrack_netlink.c 		    && ctnetlink_dump_helpinfo(skb, ct) < 0)
skb               774 net/netfilter/nf_conntrack_netlink.c 		    && ctnetlink_dump_secctx(skb, ct) < 0)
skb               778 net/netfilter/nf_conntrack_netlink.c 		     ctnetlink_dump_labels(skb, ct) < 0)
skb               782 net/netfilter/nf_conntrack_netlink.c 		    ctnetlink_dump_master(skb, ct) < 0)
skb               786 net/netfilter/nf_conntrack_netlink.c 		    ctnetlink_dump_ct_seq_adj(skb, ct) < 0)
skb               790 net/netfilter/nf_conntrack_netlink.c 		    ctnetlink_dump_ct_synproxy(skb, ct) < 0)
skb               796 net/netfilter/nf_conntrack_netlink.c 	    && ctnetlink_dump_mark(skb, ct) < 0)
skb               799 net/netfilter/nf_conntrack_netlink.c 	nlmsg_end(skb, nlh);
skb               800 net/netfilter/nf_conntrack_netlink.c 	err = nfnetlink_send(skb, net, item->portid, group, item->report,
skb               808 net/netfilter/nf_conntrack_netlink.c 	nlmsg_cancel(skb, nlh);
skb               810 net/netfilter/nf_conntrack_netlink.c 	kfree_skb(skb);
skb               904 net/netfilter/nf_conntrack_netlink.c ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
skb               906 net/netfilter/nf_conntrack_netlink.c 	struct net *net = sock_net(skb->sk);
skb               958 net/netfilter/nf_conntrack_netlink.c 			ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).portid,
skb               993 net/netfilter/nf_conntrack_netlink.c 	return skb->len;
skb              1253 net/netfilter/nf_conntrack_netlink.c 				   struct sk_buff *skb,
skb              1279 net/netfilter/nf_conntrack_netlink.c 						 NETLINK_CB(skb).portid,
skb              1306 net/netfilter/nf_conntrack_netlink.c 	nf_ct_delete(ct, NETLINK_CB(skb).portid, nlmsg_report(nlh));
skb              1313 net/netfilter/nf_conntrack_netlink.c 				   struct sk_buff *skb,
skb              1335 net/netfilter/nf_conntrack_netlink.c 		return netlink_dump_start(ctnl, skb, nlh, &c);
skb              1368 net/netfilter/nf_conntrack_netlink.c 	err = ctnetlink_fill_info(skb2, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
skb              1375 net/netfilter/nf_conntrack_netlink.c 	err = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid, MSG_DONTWAIT);
skb              1396 net/netfilter/nf_conntrack_netlink.c ctnetlink_dump_list(struct sk_buff *skb, struct netlink_callback *cb, bool dying)
skb              1406 net/netfilter/nf_conntrack_netlink.c 	struct net *net = sock_net(skb->sk);
skb              1433 net/netfilter/nf_conntrack_netlink.c 			res = ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).portid,
skb              1458 net/netfilter/nf_conntrack_netlink.c 	return skb->len;
skb              1462 net/netfilter/nf_conntrack_netlink.c ctnetlink_dump_dying(struct sk_buff *skb, struct netlink_callback *cb)
skb              1464 net/netfilter/nf_conntrack_netlink.c 	return ctnetlink_dump_list(skb, cb, true);
skb              1468 net/netfilter/nf_conntrack_netlink.c 				  struct sk_buff *skb,
skb              1478 net/netfilter/nf_conntrack_netlink.c 		return netlink_dump_start(ctnl, skb, nlh, &c);
skb              1485 net/netfilter/nf_conntrack_netlink.c ctnetlink_dump_unconfirmed(struct sk_buff *skb, struct netlink_callback *cb)
skb              1487 net/netfilter/nf_conntrack_netlink.c 	return ctnetlink_dump_list(skb, cb, false);
skb              1491 net/netfilter/nf_conntrack_netlink.c 					struct sk_buff *skb,
skb              1501 net/netfilter/nf_conntrack_netlink.c 		return netlink_dump_start(ctnl, skb, nlh, &c);
skb              2098 net/netfilter/nf_conntrack_netlink.c 				   struct sk_buff *skb,
skb              2167 net/netfilter/nf_conntrack_netlink.c 						      ct, NETLINK_CB(skb).portid,
skb              2189 net/netfilter/nf_conntrack_netlink.c 						      ct, NETLINK_CB(skb).portid,
skb              2199 net/netfilter/nf_conntrack_netlink.c ctnetlink_ct_stat_cpu_fill_info(struct sk_buff *skb, u32 portid, u32 seq,
skb              2208 net/netfilter/nf_conntrack_netlink.c 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
skb              2217 net/netfilter/nf_conntrack_netlink.c 	if (nla_put_be32(skb, CTA_STATS_FOUND, htonl(st->found)) ||
skb              2218 net/netfilter/nf_conntrack_netlink.c 	    nla_put_be32(skb, CTA_STATS_INVALID, htonl(st->invalid)) ||
skb              2219 net/netfilter/nf_conntrack_netlink.c 	    nla_put_be32(skb, CTA_STATS_IGNORE, htonl(st->ignore)) ||
skb              2220 net/netfilter/nf_conntrack_netlink.c 	    nla_put_be32(skb, CTA_STATS_INSERT, htonl(st->insert)) ||
skb              2221 net/netfilter/nf_conntrack_netlink.c 	    nla_put_be32(skb, CTA_STATS_INSERT_FAILED,
skb              2223 net/netfilter/nf_conntrack_netlink.c 	    nla_put_be32(skb, CTA_STATS_DROP, htonl(st->drop)) ||
skb              2224 net/netfilter/nf_conntrack_netlink.c 	    nla_put_be32(skb, CTA_STATS_EARLY_DROP, htonl(st->early_drop)) ||
skb              2225 net/netfilter/nf_conntrack_netlink.c 	    nla_put_be32(skb, CTA_STATS_ERROR, htonl(st->error)) ||
skb              2226 net/netfilter/nf_conntrack_netlink.c 	    nla_put_be32(skb, CTA_STATS_SEARCH_RESTART,
skb              2230 net/netfilter/nf_conntrack_netlink.c 	nlmsg_end(skb, nlh);
skb              2231 net/netfilter/nf_conntrack_netlink.c 	return skb->len;
skb              2235 net/netfilter/nf_conntrack_netlink.c 	nlmsg_cancel(skb, nlh);
skb              2240 net/netfilter/nf_conntrack_netlink.c ctnetlink_ct_stat_cpu_dump(struct sk_buff *skb, struct netlink_callback *cb)
skb              2243 net/netfilter/nf_conntrack_netlink.c 	struct net *net = sock_net(skb->sk);
skb              2255 net/netfilter/nf_conntrack_netlink.c 		if (ctnetlink_ct_stat_cpu_fill_info(skb,
skb              2256 net/netfilter/nf_conntrack_netlink.c 						    NETLINK_CB(cb->skb).portid,
skb              2263 net/netfilter/nf_conntrack_netlink.c 	return skb->len;
skb              2267 net/netfilter/nf_conntrack_netlink.c 				 struct sk_buff *skb,
skb              2276 net/netfilter/nf_conntrack_netlink.c 		return netlink_dump_start(ctnl, skb, nlh, &c);
skb              2283 net/netfilter/nf_conntrack_netlink.c ctnetlink_stat_ct_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
skb              2292 net/netfilter/nf_conntrack_netlink.c 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
skb              2301 net/netfilter/nf_conntrack_netlink.c 	if (nla_put_be32(skb, CTA_STATS_GLOBAL_ENTRIES, htonl(nr_conntracks)))
skb              2304 net/netfilter/nf_conntrack_netlink.c 	if (nla_put_be32(skb, CTA_STATS_GLOBAL_MAX_ENTRIES, htonl(nf_conntrack_max)))
skb              2307 net/netfilter/nf_conntrack_netlink.c 	nlmsg_end(skb, nlh);
skb              2308 net/netfilter/nf_conntrack_netlink.c 	return skb->len;
skb              2312 net/netfilter/nf_conntrack_netlink.c 	nlmsg_cancel(skb, nlh);
skb              2317 net/netfilter/nf_conntrack_netlink.c 			     struct sk_buff *skb, const struct nlmsghdr *nlh,
skb              2328 net/netfilter/nf_conntrack_netlink.c 	err = ctnetlink_stat_ct_fill_info(skb2, NETLINK_CB(skb).portid,
skb              2331 net/netfilter/nf_conntrack_netlink.c 					  sock_net(skb->sk));
skb              2335 net/netfilter/nf_conntrack_netlink.c 	err = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid, MSG_DONTWAIT);
skb              2398 net/netfilter/nf_conntrack_netlink.c static struct nf_conn *ctnetlink_glue_get_ct(const struct sk_buff *skb,
skb              2401 net/netfilter/nf_conntrack_netlink.c 	return nf_ct_get(skb, ctinfo);
skb              2404 net/netfilter/nf_conntrack_netlink.c static int __ctnetlink_glue_build(struct sk_buff *skb, struct nf_conn *ct)
skb              2411 net/netfilter/nf_conntrack_netlink.c 	nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG);
skb              2414 net/netfilter/nf_conntrack_netlink.c 	if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0)
skb              2416 net/netfilter/nf_conntrack_netlink.c 	if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
skb              2419 net/netfilter/nf_conntrack_netlink.c 	nla_nest_end(skb, nest_parms);
skb              2421 net/netfilter/nf_conntrack_netlink.c 	nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY);
skb              2424 net/netfilter/nf_conntrack_netlink.c 	if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0)
skb              2426 net/netfilter/nf_conntrack_netlink.c 	if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
skb              2429 net/netfilter/nf_conntrack_netlink.c 	nla_nest_end(skb, nest_parms);
skb              2431 net/netfilter/nf_conntrack_netlink.c 	if (ctnetlink_dump_zone_id(skb, CTA_ZONE, zone,
skb              2435 net/netfilter/nf_conntrack_netlink.c 	if (ctnetlink_dump_id(skb, ct) < 0)
skb              2438 net/netfilter/nf_conntrack_netlink.c 	if (ctnetlink_dump_status(skb, ct) < 0)
skb              2441 net/netfilter/nf_conntrack_netlink.c 	if (ctnetlink_dump_timeout(skb, ct) < 0)
skb              2444 net/netfilter/nf_conntrack_netlink.c 	if (ctnetlink_dump_protoinfo(skb, ct) < 0)
skb              2447 net/netfilter/nf_conntrack_netlink.c 	if (ctnetlink_dump_helpinfo(skb, ct) < 0)
skb              2451 net/netfilter/nf_conntrack_netlink.c 	if (ct->secmark && ctnetlink_dump_secctx(skb, ct) < 0)
skb              2454 net/netfilter/nf_conntrack_netlink.c 	if (ct->master && ctnetlink_dump_master(skb, ct) < 0)
skb              2458 net/netfilter/nf_conntrack_netlink.c 	    ctnetlink_dump_ct_seq_adj(skb, ct) < 0)
skb              2461 net/netfilter/nf_conntrack_netlink.c 	if (ctnetlink_dump_ct_synproxy(skb, ct) < 0)
skb              2465 net/netfilter/nf_conntrack_netlink.c 	if (ct->mark && ctnetlink_dump_mark(skb, ct) < 0)
skb              2468 net/netfilter/nf_conntrack_netlink.c 	if (ctnetlink_dump_labels(skb, ct) < 0)
skb              2477 net/netfilter/nf_conntrack_netlink.c ctnetlink_glue_build(struct sk_buff *skb, struct nf_conn *ct,
skb              2483 net/netfilter/nf_conntrack_netlink.c 	nest_parms = nla_nest_start(skb, ct_attr);
skb              2487 net/netfilter/nf_conntrack_netlink.c 	if (__ctnetlink_glue_build(skb, ct) < 0)
skb              2490 net/netfilter/nf_conntrack_netlink.c 	nla_nest_end(skb, nest_parms);
skb              2492 net/netfilter/nf_conntrack_netlink.c 	if (nla_put_be32(skb, ct_info_attr, htonl(ctinfo)))
skb              2627 net/netfilter/nf_conntrack_netlink.c static void ctnetlink_glue_seqadj(struct sk_buff *skb, struct nf_conn *ct,
skb              2633 net/netfilter/nf_conntrack_netlink.c 	nf_ct_tcp_seqadj_set(skb, ct, ctinfo, diff);
skb              2650 net/netfilter/nf_conntrack_netlink.c static int ctnetlink_exp_dump_tuple(struct sk_buff *skb,
skb              2656 net/netfilter/nf_conntrack_netlink.c 	nest_parms = nla_nest_start(skb, type);
skb              2659 net/netfilter/nf_conntrack_netlink.c 	if (ctnetlink_dump_tuples(skb, tuple) < 0)
skb              2661 net/netfilter/nf_conntrack_netlink.c 	nla_nest_end(skb, nest_parms);
skb              2669 net/netfilter/nf_conntrack_netlink.c static int ctnetlink_exp_dump_mask(struct sk_buff *skb,
skb              2683 net/netfilter/nf_conntrack_netlink.c 	nest_parms = nla_nest_start(skb, CTA_EXPECT_MASK);
skb              2688 net/netfilter/nf_conntrack_netlink.c 	ret = ctnetlink_dump_tuples_ip(skb, &m);
skb              2691 net/netfilter/nf_conntrack_netlink.c 		ret = ctnetlink_dump_tuples_proto(skb, &m, l4proto);
skb              2698 net/netfilter/nf_conntrack_netlink.c 	nla_nest_end(skb, nest_parms);
skb              2728 net/netfilter/nf_conntrack_netlink.c ctnetlink_exp_dump_expect(struct sk_buff *skb,
skb              2743 net/netfilter/nf_conntrack_netlink.c 	if (ctnetlink_exp_dump_tuple(skb, &exp->tuple, CTA_EXPECT_TUPLE) < 0)
skb              2745 net/netfilter/nf_conntrack_netlink.c 	if (ctnetlink_exp_dump_mask(skb, &exp->tuple, &exp->mask) < 0)
skb              2747 net/netfilter/nf_conntrack_netlink.c 	if (ctnetlink_exp_dump_tuple(skb,
skb              2755 net/netfilter/nf_conntrack_netlink.c 		nest_parms = nla_nest_start(skb, CTA_EXPECT_NAT);
skb              2759 net/netfilter/nf_conntrack_netlink.c 		if (nla_put_be32(skb, CTA_EXPECT_NAT_DIR, htonl(exp->dir)))
skb              2767 net/netfilter/nf_conntrack_netlink.c 		if (ctnetlink_exp_dump_tuple(skb, &nat_tuple,
skb              2770 net/netfilter/nf_conntrack_netlink.c 	        nla_nest_end(skb, nest_parms);
skb              2773 net/netfilter/nf_conntrack_netlink.c 	if (nla_put_be32(skb, CTA_EXPECT_TIMEOUT, htonl(timeout)) ||
skb              2774 net/netfilter/nf_conntrack_netlink.c 	    nla_put_be32(skb, CTA_EXPECT_ID, nf_expect_get_id(exp)) ||
skb              2775 net/netfilter/nf_conntrack_netlink.c 	    nla_put_be32(skb, CTA_EXPECT_FLAGS, htonl(exp->flags)) ||
skb              2776 net/netfilter/nf_conntrack_netlink.c 	    nla_put_be32(skb, CTA_EXPECT_CLASS, htonl(exp->class)))
skb              2784 net/netfilter/nf_conntrack_netlink.c 		    nla_put_string(skb, CTA_EXPECT_HELP_NAME, helper->name))
skb              2789 net/netfilter/nf_conntrack_netlink.c 	    nla_put_string(skb, CTA_EXPECT_FN, expfn->name))
skb              2799 net/netfilter/nf_conntrack_netlink.c ctnetlink_exp_fill_info(struct sk_buff *skb, u32 portid, u32 seq,
skb              2807 net/netfilter/nf_conntrack_netlink.c 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
skb              2816 net/netfilter/nf_conntrack_netlink.c 	if (ctnetlink_exp_dump_expect(skb, exp) < 0)
skb              2819 net/netfilter/nf_conntrack_netlink.c 	nlmsg_end(skb, nlh);
skb              2820 net/netfilter/nf_conntrack_netlink.c 	return skb->len;
skb              2824 net/netfilter/nf_conntrack_netlink.c 	nlmsg_cancel(skb, nlh);
skb              2836 net/netfilter/nf_conntrack_netlink.c 	struct sk_buff *skb;
skb              2853 net/netfilter/nf_conntrack_netlink.c 	skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
skb              2854 net/netfilter/nf_conntrack_netlink.c 	if (skb == NULL)
skb              2858 net/netfilter/nf_conntrack_netlink.c 	nlh = nlmsg_put(skb, item->portid, 0, type, sizeof(*nfmsg), flags);
skb              2867 net/netfilter/nf_conntrack_netlink.c 	if (ctnetlink_exp_dump_expect(skb, exp) < 0)
skb              2870 net/netfilter/nf_conntrack_netlink.c 	nlmsg_end(skb, nlh);
skb              2871 net/netfilter/nf_conntrack_netlink.c 	nfnetlink_send(skb, net, item->portid, group, item->report, GFP_ATOMIC);
skb              2875 net/netfilter/nf_conntrack_netlink.c 	nlmsg_cancel(skb, nlh);
skb              2877 net/netfilter/nf_conntrack_netlink.c 	kfree_skb(skb);
skb              2891 net/netfilter/nf_conntrack_netlink.c ctnetlink_exp_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
skb              2893 net/netfilter/nf_conntrack_netlink.c 	struct net *net = sock_net(skb->sk);
skb              2915 net/netfilter/nf_conntrack_netlink.c 			if (ctnetlink_exp_fill_info(skb,
skb              2916 net/netfilter/nf_conntrack_netlink.c 						    NETLINK_CB(cb->skb).portid,
skb              2936 net/netfilter/nf_conntrack_netlink.c 	return skb->len;
skb              2940 net/netfilter/nf_conntrack_netlink.c ctnetlink_exp_ct_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
skb              2962 net/netfilter/nf_conntrack_netlink.c 		if (ctnetlink_exp_fill_info(skb, NETLINK_CB(cb->skb).portid,
skb              2982 net/netfilter/nf_conntrack_netlink.c 	return skb->len;
skb              2986 net/netfilter/nf_conntrack_netlink.c 				 struct sk_buff *skb,
skb              3025 net/netfilter/nf_conntrack_netlink.c 	err = netlink_dump_start(ctnl, skb, nlh, &c);
skb              3032 net/netfilter/nf_conntrack_netlink.c 				struct sk_buff *skb, const struct nlmsghdr *nlh,
skb              3046 net/netfilter/nf_conntrack_netlink.c 			return ctnetlink_dump_exp_ct(net, ctnl, skb, nlh, cda,
skb              3053 net/netfilter/nf_conntrack_netlink.c 			return netlink_dump_start(ctnl, skb, nlh, &c);
skb              3094 net/netfilter/nf_conntrack_netlink.c 	err = ctnetlink_exp_fill_info(skb2, NETLINK_CB(skb).portid,
skb              3101 net/netfilter/nf_conntrack_netlink.c 	err = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid, MSG_DONTWAIT);
skb              3130 net/netfilter/nf_conntrack_netlink.c 				struct sk_buff *skb, const struct nlmsghdr *nlh,
skb              3168 net/netfilter/nf_conntrack_netlink.c 			nf_ct_unlink_expect_report(exp, NETLINK_CB(skb).portid,
skb              3180 net/netfilter/nf_conntrack_netlink.c 					 NETLINK_CB(skb).portid,
skb              3185 net/netfilter/nf_conntrack_netlink.c 					 NETLINK_CB(skb).portid,
skb              3383 net/netfilter/nf_conntrack_netlink.c 				struct sk_buff *skb, const struct nlmsghdr *nlh,
skb              3415 net/netfilter/nf_conntrack_netlink.c 						      NETLINK_CB(skb).portid,
skb              3430 net/netfilter/nf_conntrack_netlink.c ctnetlink_exp_stat_fill_info(struct sk_buff *skb, u32 portid, u32 seq, int cpu,
skb              3439 net/netfilter/nf_conntrack_netlink.c 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
skb              3448 net/netfilter/nf_conntrack_netlink.c 	if (nla_put_be32(skb, CTA_STATS_EXP_NEW, htonl(st->expect_new)) ||
skb              3449 net/netfilter/nf_conntrack_netlink.c 	    nla_put_be32(skb, CTA_STATS_EXP_CREATE, htonl(st->expect_create)) ||
skb              3450 net/netfilter/nf_conntrack_netlink.c 	    nla_put_be32(skb, CTA_STATS_EXP_DELETE, htonl(st->expect_delete)))
skb              3453 net/netfilter/nf_conntrack_netlink.c 	nlmsg_end(skb, nlh);
skb              3454 net/netfilter/nf_conntrack_netlink.c 	return skb->len;
skb              3458 net/netfilter/nf_conntrack_netlink.c 	nlmsg_cancel(skb, nlh);
skb              3463 net/netfilter/nf_conntrack_netlink.c ctnetlink_exp_stat_cpu_dump(struct sk_buff *skb, struct netlink_callback *cb)
skb              3466 net/netfilter/nf_conntrack_netlink.c 	struct net *net = sock_net(skb->sk);
skb              3478 net/netfilter/nf_conntrack_netlink.c 		if (ctnetlink_exp_stat_fill_info(skb, NETLINK_CB(cb->skb).portid,
skb              3485 net/netfilter/nf_conntrack_netlink.c 	return skb->len;
skb              3489 net/netfilter/nf_conntrack_netlink.c 				  struct sk_buff *skb,
skb              3498 net/netfilter/nf_conntrack_netlink.c 		return netlink_dump_start(ctnl, skb, nlh, &c);
skb                49 net/netfilter/nf_conntrack_pptp.c (*nf_nat_pptp_hook_outbound)(struct sk_buff *skb,
skb                56 net/netfilter/nf_conntrack_pptp.c (*nf_nat_pptp_hook_inbound)(struct sk_buff *skb,
skb               274 net/netfilter/nf_conntrack_pptp.c pptp_inbound_pkt(struct sk_buff *skb, unsigned int protoff,
skb               388 net/netfilter/nf_conntrack_pptp.c 		return nf_nat_pptp_inbound(skb, ct, ctinfo,
skb               402 net/netfilter/nf_conntrack_pptp.c pptp_outbound_pkt(struct sk_buff *skb, unsigned int protoff,
skb               484 net/netfilter/nf_conntrack_pptp.c 		return nf_nat_pptp_outbound(skb, ct, ctinfo,
skb               515 net/netfilter/nf_conntrack_pptp.c conntrack_pptp_help(struct sk_buff *skb, unsigned int protoff,
skb               527 net/netfilter/nf_conntrack_pptp.c 	unsigned int tcplen = skb->len - protoff;
skb               546 net/netfilter/nf_conntrack_pptp.c 	tcph = skb_header_pointer(skb, nexthdr_off, sizeof(_tcph), &_tcph);
skb               551 net/netfilter/nf_conntrack_pptp.c 	pptph = skb_header_pointer(skb, nexthdr_off, sizeof(_pptph), &_pptph);
skb               566 net/netfilter/nf_conntrack_pptp.c 	ctlh = skb_header_pointer(skb, nexthdr_off, sizeof(_ctlh), &_ctlh);
skb               579 net/netfilter/nf_conntrack_pptp.c 	pptpReq = skb_header_pointer(skb, nexthdr_off, reqlen, &_pptpReq);
skb               592 net/netfilter/nf_conntrack_pptp.c 		ret = pptp_outbound_pkt(skb, protoff, ctlh, pptpReq, reqlen, ct,
skb               596 net/netfilter/nf_conntrack_pptp.c 		ret = pptp_inbound_pkt(skb, protoff, ctlh, pptpReq, reqlen, ct,
skb                51 net/netfilter/nf_conntrack_proto.c void nf_l4proto_log_invalid(const struct sk_buff *skb,
skb                67 net/netfilter/nf_conntrack_proto.c 	nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
skb                74 net/netfilter/nf_conntrack_proto.c void nf_ct_l4proto_log_invalid(const struct sk_buff *skb,
skb                90 net/netfilter/nf_conntrack_proto.c 	nf_l4proto_log_invalid(skb, net, nf_ct_l3num(ct),
skb               124 net/netfilter/nf_conntrack_proto.c unsigned int nf_confirm(struct sk_buff *skb, unsigned int protoff,
skb               137 net/netfilter/nf_conntrack_proto.c 			ret = helper->help(skb,
skb               146 net/netfilter/nf_conntrack_proto.c 	    !nf_is_loopback_packet(skb)) {
skb               147 net/netfilter/nf_conntrack_proto.c 		if (!nf_ct_seq_adjust(skb, ct, ctinfo, protoff)) {
skb               154 net/netfilter/nf_conntrack_proto.c 	return nf_conntrack_confirm(skb);
skb               159 net/netfilter/nf_conntrack_proto.c 				 struct sk_buff *skb,
skb               165 net/netfilter/nf_conntrack_proto.c 	ct = nf_ct_get(skb, &ctinfo);
skb               167 net/netfilter/nf_conntrack_proto.c 		return nf_conntrack_confirm(skb);
skb               169 net/netfilter/nf_conntrack_proto.c 	return nf_confirm(skb,
skb               170 net/netfilter/nf_conntrack_proto.c 			  skb_network_offset(skb) + ip_hdrlen(skb),
skb               175 net/netfilter/nf_conntrack_proto.c 				      struct sk_buff *skb,
skb               178 net/netfilter/nf_conntrack_proto.c 	return nf_conntrack_in(skb, state);
skb               182 net/netfilter/nf_conntrack_proto.c 					 struct sk_buff *skb,
skb               185 net/netfilter/nf_conntrack_proto.c 	if (ip_is_fragment(ip_hdr(skb))) { /* IP_NODEFRAG setsockopt set */
skb               189 net/netfilter/nf_conntrack_proto.c 		tmpl = nf_ct_get(skb, &ctinfo);
skb               194 net/netfilter/nf_conntrack_proto.c 			skb->_nfct = 0;
skb               200 net/netfilter/nf_conntrack_proto.c 	return nf_conntrack_in(skb, state);
skb               364 net/netfilter/nf_conntrack_proto.c 				 struct sk_buff *skb,
skb               369 net/netfilter/nf_conntrack_proto.c 	unsigned char pnum = ipv6_hdr(skb)->nexthdr;
skb               373 net/netfilter/nf_conntrack_proto.c 	ct = nf_ct_get(skb, &ctinfo);
skb               375 net/netfilter/nf_conntrack_proto.c 		return nf_conntrack_confirm(skb);
skb               377 net/netfilter/nf_conntrack_proto.c 	protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &pnum,
skb               381 net/netfilter/nf_conntrack_proto.c 		return nf_conntrack_confirm(skb);
skb               384 net/netfilter/nf_conntrack_proto.c 	return nf_confirm(skb, protoff, ct, ctinfo);
skb               388 net/netfilter/nf_conntrack_proto.c 				      struct sk_buff *skb,
skb               391 net/netfilter/nf_conntrack_proto.c 	return nf_conntrack_in(skb, state);
skb               395 net/netfilter/nf_conntrack_proto.c 					 struct sk_buff *skb,
skb               398 net/netfilter/nf_conntrack_proto.c 	return nf_conntrack_in(skb, state);
skb               384 net/netfilter/nf_conntrack_proto_dccp.c dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
skb               416 net/netfilter/nf_conntrack_proto_dccp.c 	nf_ct_l4proto_log_invalid(skb, ct, "%s", msg);
skb               430 net/netfilter/nf_conntrack_proto_dccp.c 		       struct sk_buff *skb, unsigned int dataoff,
skb               433 net/netfilter/nf_conntrack_proto_dccp.c 	unsigned int dccp_len = skb->len - dataoff;
skb               454 net/netfilter/nf_conntrack_proto_dccp.c 	    nf_checksum_partial(skb, state->hook, dataoff, cscov,
skb               466 net/netfilter/nf_conntrack_proto_dccp.c 	nf_l4proto_log_invalid(skb, state->net, state->pf,
skb               471 net/netfilter/nf_conntrack_proto_dccp.c int nf_conntrack_dccp_packet(struct nf_conn *ct, struct sk_buff *skb,
skb               482 net/netfilter/nf_conntrack_proto_dccp.c 	dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh);
skb               486 net/netfilter/nf_conntrack_proto_dccp.c 	if (dccp_error(dh, skb, dataoff, state))
skb               490 net/netfilter/nf_conntrack_proto_dccp.c 	if (!nf_ct_is_confirmed(ct) && !dccp_new(ct, skb, dh))
skb               496 net/netfilter/nf_conntrack_proto_dccp.c 		nf_ct_kill_acct(ct, ctinfo, skb);
skb               545 net/netfilter/nf_conntrack_proto_dccp.c 		nf_ct_l4proto_log_invalid(skb, ct, "%s", "invalid packet");
skb               549 net/netfilter/nf_conntrack_proto_dccp.c 		nf_ct_l4proto_log_invalid(skb, ct, "%s", "invalid state transition");
skb               564 net/netfilter/nf_conntrack_proto_dccp.c 	nf_ct_refresh_acct(ct, ctinfo, skb, timeouts[new_state]);
skb               591 net/netfilter/nf_conntrack_proto_dccp.c static int dccp_to_nlattr(struct sk_buff *skb, struct nlattr *nla,
skb               597 net/netfilter/nf_conntrack_proto_dccp.c 	nest_parms = nla_nest_start(skb, CTA_PROTOINFO_DCCP);
skb               600 net/netfilter/nf_conntrack_proto_dccp.c 	if (nla_put_u8(skb, CTA_PROTOINFO_DCCP_STATE, ct->proto.dccp.state) ||
skb               601 net/netfilter/nf_conntrack_proto_dccp.c 	    nla_put_u8(skb, CTA_PROTOINFO_DCCP_ROLE,
skb               603 net/netfilter/nf_conntrack_proto_dccp.c 	    nla_put_be64(skb, CTA_PROTOINFO_DCCP_HANDSHAKE_SEQ,
skb               607 net/netfilter/nf_conntrack_proto_dccp.c 	nla_nest_end(skb, nest_parms);
skb               699 net/netfilter/nf_conntrack_proto_dccp.c dccp_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
skb               705 net/netfilter/nf_conntrack_proto_dccp.c 		if (nla_put_be32(skb, i, htonl(timeouts[i] / HZ)))
skb                41 net/netfilter/nf_conntrack_proto_generic.c generic_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
skb                45 net/netfilter/nf_conntrack_proto_generic.c 	if (nla_put_be32(skb, CTA_TIMEOUT_GENERIC_TIMEOUT, htonl(*timeout / HZ)))
skb               164 net/netfilter/nf_conntrack_proto_gre.c bool gre_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff,
skb               174 net/netfilter/nf_conntrack_proto_gre.c 	grehdr = skb_header_pointer(skb, dataoff, sizeof(_grehdr), &_grehdr);
skb               183 net/netfilter/nf_conntrack_proto_gre.c 	pgrehdr = skb_header_pointer(skb, dataoff, 8, &_pgrehdr);
skb               216 net/netfilter/nf_conntrack_proto_gre.c 			    struct sk_buff *skb,
skb               239 net/netfilter/nf_conntrack_proto_gre.c 		nf_ct_refresh_acct(ct, ctinfo, skb,
skb               245 net/netfilter/nf_conntrack_proto_gre.c 		nf_ct_refresh_acct(ct, ctinfo, skb,
skb               280 net/netfilter/nf_conntrack_proto_gre.c gre_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
skb               284 net/netfilter/nf_conntrack_proto_gre.c 	if (nla_put_be32(skb, CTA_TIMEOUT_GRE_UNREPLIED,
skb               286 net/netfilter/nf_conntrack_proto_gre.c 	    nla_put_be32(skb, CTA_TIMEOUT_GRE_REPLIED,
skb                25 net/netfilter/nf_conntrack_proto_icmp.c bool icmp_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff,
skb                31 net/netfilter/nf_conntrack_proto_icmp.c 	hp = skb_header_pointer(skb, dataoff, sizeof(_hdr), &_hdr);
skb                69 net/netfilter/nf_conntrack_proto_icmp.c 			     struct sk_buff *skb,
skb                99 net/netfilter/nf_conntrack_proto_icmp.c 	nf_ct_refresh_acct(ct, ctinfo, skb, *timeout);
skb               104 net/netfilter/nf_conntrack_proto_icmp.c int nf_conntrack_inet_error(struct nf_conn *tmpl, struct sk_buff *skb,
skb               118 net/netfilter/nf_conntrack_proto_icmp.c 	WARN_ON(skb_nfct(skb));
skb               119 net/netfilter/nf_conntrack_proto_icmp.c 	zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
skb               122 net/netfilter/nf_conntrack_proto_icmp.c 	if (!nf_ct_get_tuplepr(skb, dataoff,
skb               171 net/netfilter/nf_conntrack_proto_icmp.c 			nf_l4proto_log_invalid(skb, state->net, state->pf,
skb               176 net/netfilter/nf_conntrack_proto_icmp.c 			nf_l4proto_log_invalid(skb, state->net, state->pf,
skb               190 net/netfilter/nf_conntrack_proto_icmp.c 	nf_ct_set(skb, ct, ctinfo);
skb               194 net/netfilter/nf_conntrack_proto_icmp.c static void icmp_error_log(const struct sk_buff *skb,
skb               198 net/netfilter/nf_conntrack_proto_icmp.c 	nf_l4proto_log_invalid(skb, state->net, state->pf,
skb               204 net/netfilter/nf_conntrack_proto_icmp.c 			      struct sk_buff *skb, unsigned int dataoff,
skb               212 net/netfilter/nf_conntrack_proto_icmp.c 	icmph = skb_header_pointer(skb, dataoff, sizeof(_ih), &_ih);
skb               214 net/netfilter/nf_conntrack_proto_icmp.c 		icmp_error_log(skb, state, "short packet");
skb               221 net/netfilter/nf_conntrack_proto_icmp.c 	    nf_ip_checksum(skb, state->hook, dataoff, IPPROTO_ICMP)) {
skb               222 net/netfilter/nf_conntrack_proto_icmp.c 		icmp_error_log(skb, state, "bad hw icmp checksum");
skb               233 net/netfilter/nf_conntrack_proto_icmp.c 		icmp_error_log(skb, state, "invalid icmp type");
skb               246 net/netfilter/nf_conntrack_proto_icmp.c 	outer_daddr.ip = ip_hdr(skb)->daddr;
skb               249 net/netfilter/nf_conntrack_proto_icmp.c 	return nf_conntrack_inet_error(tmpl, skb, dataoff, state,
skb               258 net/netfilter/nf_conntrack_proto_icmp.c static int icmp_tuple_to_nlattr(struct sk_buff *skb,
skb               261 net/netfilter/nf_conntrack_proto_icmp.c 	if (nla_put_be16(skb, CTA_PROTO_ICMP_ID, t->src.u.icmp.id) ||
skb               262 net/netfilter/nf_conntrack_proto_icmp.c 	    nla_put_u8(skb, CTA_PROTO_ICMP_TYPE, t->dst.u.icmp.type) ||
skb               263 net/netfilter/nf_conntrack_proto_icmp.c 	    nla_put_u8(skb, CTA_PROTO_ICMP_CODE, t->dst.u.icmp.code))
skb               331 net/netfilter/nf_conntrack_proto_icmp.c icmp_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
skb               335 net/netfilter/nf_conntrack_proto_icmp.c 	if (nla_put_be32(skb, CTA_TIMEOUT_ICMP_TIMEOUT, htonl(*timeout / HZ)))
skb                29 net/netfilter/nf_conntrack_proto_icmpv6.c bool icmpv6_pkt_to_tuple(const struct sk_buff *skb,
skb                37 net/netfilter/nf_conntrack_proto_icmpv6.c 	hp = skb_header_pointer(skb, dataoff, sizeof(_hdr), &_hdr);
skb                86 net/netfilter/nf_conntrack_proto_icmpv6.c 			       struct sk_buff *skb,
skb               117 net/netfilter/nf_conntrack_proto_icmpv6.c 	nf_ct_refresh_acct(ct, ctinfo, skb, *timeout);
skb               123 net/netfilter/nf_conntrack_proto_icmpv6.c static void icmpv6_error_log(const struct sk_buff *skb,
skb               127 net/netfilter/nf_conntrack_proto_icmpv6.c 	nf_l4proto_log_invalid(skb, state->net, state->pf,
skb               132 net/netfilter/nf_conntrack_proto_icmpv6.c 			      struct sk_buff *skb,
skb               141 net/netfilter/nf_conntrack_proto_icmpv6.c 	icmp6h = skb_header_pointer(skb, dataoff, sizeof(_ih), &_ih);
skb               143 net/netfilter/nf_conntrack_proto_icmpv6.c 		icmpv6_error_log(skb, state, "short packet");
skb               149 net/netfilter/nf_conntrack_proto_icmpv6.c 	    nf_ip6_checksum(skb, state->hook, dataoff, IPPROTO_ICMPV6)) {
skb               150 net/netfilter/nf_conntrack_proto_icmpv6.c 		icmpv6_error_log(skb, state, "ICMPv6 checksum failed");
skb               157 net/netfilter/nf_conntrack_proto_icmpv6.c 		nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
skb               165 net/netfilter/nf_conntrack_proto_icmpv6.c 	memcpy(&outer_daddr.ip6, &ipv6_hdr(skb)->daddr,
skb               168 net/netfilter/nf_conntrack_proto_icmpv6.c 	return nf_conntrack_inet_error(tmpl, skb, dataoff, state,
skb               176 net/netfilter/nf_conntrack_proto_icmpv6.c static int icmpv6_tuple_to_nlattr(struct sk_buff *skb,
skb               179 net/netfilter/nf_conntrack_proto_icmpv6.c 	if (nla_put_be16(skb, CTA_PROTO_ICMPV6_ID, t->src.u.icmp.id) ||
skb               180 net/netfilter/nf_conntrack_proto_icmpv6.c 	    nla_put_u8(skb, CTA_PROTO_ICMPV6_TYPE, t->dst.u.icmp.type) ||
skb               181 net/netfilter/nf_conntrack_proto_icmpv6.c 	    nla_put_u8(skb, CTA_PROTO_ICMPV6_CODE, t->dst.u.icmp.code))
skb               250 net/netfilter/nf_conntrack_proto_icmpv6.c icmpv6_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
skb               254 net/netfilter/nf_conntrack_proto_icmpv6.c 	if (nla_put_be32(skb, CTA_TIMEOUT_ICMPV6_TIMEOUT, htonl(*timeout / HZ)))
skb               154 net/netfilter/nf_conntrack_proto_sctp.c #define for_each_sctp_chunk(skb, sch, _sch, offset, dataoff, count)	\
skb               156 net/netfilter/nf_conntrack_proto_sctp.c 	(offset) < (skb)->len &&					\
skb               157 net/netfilter/nf_conntrack_proto_sctp.c 	((sch) = skb_header_pointer((skb), (offset), sizeof(_sch), &(_sch)));	\
skb               162 net/netfilter/nf_conntrack_proto_sctp.c 			   const struct sk_buff *skb,
skb               172 net/netfilter/nf_conntrack_proto_sctp.c 	for_each_sctp_chunk (skb, sch, _sch, offset, dataoff, count) {
skb               270 net/netfilter/nf_conntrack_proto_sctp.c sctp_new(struct nf_conn *ct, const struct sk_buff *skb,
skb               280 net/netfilter/nf_conntrack_proto_sctp.c 	for_each_sctp_chunk(skb, sch, _sch, offset, dataoff, count) {
skb               298 net/netfilter/nf_conntrack_proto_sctp.c 			ih = skb_header_pointer(skb, offset + sizeof(_sch),
skb               325 net/netfilter/nf_conntrack_proto_sctp.c static bool sctp_error(struct sk_buff *skb,
skb               332 net/netfilter/nf_conntrack_proto_sctp.c 	if (skb->len < dataoff + sizeof(struct sctphdr)) {
skb               338 net/netfilter/nf_conntrack_proto_sctp.c 	    skb->ip_summed == CHECKSUM_NONE) {
skb               339 net/netfilter/nf_conntrack_proto_sctp.c 		if (skb_ensure_writable(skb, dataoff + sizeof(*sh))) {
skb               343 net/netfilter/nf_conntrack_proto_sctp.c 		sh = (const struct sctphdr *)(skb->data + dataoff);
skb               344 net/netfilter/nf_conntrack_proto_sctp.c 		if (sh->checksum != sctp_compute_cksum(skb, dataoff)) {
skb               348 net/netfilter/nf_conntrack_proto_sctp.c 		skb->ip_summed = CHECKSUM_UNNECESSARY;
skb               352 net/netfilter/nf_conntrack_proto_sctp.c 	nf_l4proto_log_invalid(skb, state->net, state->pf, IPPROTO_SCTP, "%s", logmsg);
skb               358 net/netfilter/nf_conntrack_proto_sctp.c 			     struct sk_buff *skb,
skb               373 net/netfilter/nf_conntrack_proto_sctp.c 	if (sctp_error(skb, dataoff, state))
skb               376 net/netfilter/nf_conntrack_proto_sctp.c 	sh = skb_header_pointer(skb, dataoff, sizeof(_sctph), &_sctph);
skb               380 net/netfilter/nf_conntrack_proto_sctp.c 	if (do_basic_checks(ct, skb, dataoff, map) != 0)
skb               390 net/netfilter/nf_conntrack_proto_sctp.c 		if (!sctp_new(ct, skb, sh, dataoff))
skb               409 net/netfilter/nf_conntrack_proto_sctp.c 	for_each_sctp_chunk (skb, sch, _sch, offset, dataoff, count) {
skb               458 net/netfilter/nf_conntrack_proto_sctp.c 			ih = skb_header_pointer(skb, offset + sizeof(_sch),
skb               477 net/netfilter/nf_conntrack_proto_sctp.c 	nf_ct_refresh_acct(ct, ctinfo, skb, timeouts[new_state]);
skb               514 net/netfilter/nf_conntrack_proto_sctp.c static int sctp_to_nlattr(struct sk_buff *skb, struct nlattr *nla,
skb               520 net/netfilter/nf_conntrack_proto_sctp.c 	nest_parms = nla_nest_start(skb, CTA_PROTOINFO_SCTP);
skb               524 net/netfilter/nf_conntrack_proto_sctp.c 	if (nla_put_u8(skb, CTA_PROTOINFO_SCTP_STATE, ct->proto.sctp.state) ||
skb               525 net/netfilter/nf_conntrack_proto_sctp.c 	    nla_put_be32(skb, CTA_PROTOINFO_SCTP_VTAG_ORIGINAL,
skb               527 net/netfilter/nf_conntrack_proto_sctp.c 	    nla_put_be32(skb, CTA_PROTOINFO_SCTP_VTAG_REPLY,
skb               533 net/netfilter/nf_conntrack_proto_sctp.c 	nla_nest_end(skb, nest_parms);
skb               616 net/netfilter/nf_conntrack_proto_sctp.c sctp_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
skb               622 net/netfilter/nf_conntrack_proto_sctp.c 	        if (nla_put_be32(skb, i, htonl(timeouts[i] / HZ)))
skb               341 net/netfilter/nf_conntrack_proto_tcp.c static void tcp_options(const struct sk_buff *skb,
skb               353 net/netfilter/nf_conntrack_proto_tcp.c 	ptr = skb_header_pointer(skb, dataoff + sizeof(struct tcphdr),
skb               398 net/netfilter/nf_conntrack_proto_tcp.c static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff,
skb               409 net/netfilter/nf_conntrack_proto_tcp.c 	ptr = skb_header_pointer(skb, dataoff + sizeof(struct tcphdr),
skb               465 net/netfilter/nf_conntrack_proto_tcp.c 			  const struct sk_buff *skb,
skb               486 net/netfilter/nf_conntrack_proto_tcp.c 	end = segment_seq_plus_len(seq, skb->len, dataoff, tcph);
skb               489 net/netfilter/nf_conntrack_proto_tcp.c 		tcp_sack(skb, dataoff, tcph, &sack);
skb               521 net/netfilter/nf_conntrack_proto_tcp.c 			tcp_options(skb, dataoff, tcph, sender);
skb               566 net/netfilter/nf_conntrack_proto_tcp.c 		tcp_options(skb, dataoff, tcph, sender);
skb               678 net/netfilter/nf_conntrack_proto_tcp.c 			nf_ct_l4proto_log_invalid(skb, ct,
skb               714 net/netfilter/nf_conntrack_proto_tcp.c static void tcp_error_log(const struct sk_buff *skb,
skb               718 net/netfilter/nf_conntrack_proto_tcp.c 	nf_l4proto_log_invalid(skb, state->net, state->pf, IPPROTO_TCP, "%s", msg);
skb               723 net/netfilter/nf_conntrack_proto_tcp.c 		      struct sk_buff *skb,
skb               727 net/netfilter/nf_conntrack_proto_tcp.c 	unsigned int tcplen = skb->len - dataoff;
skb               732 net/netfilter/nf_conntrack_proto_tcp.c 		tcp_error_log(skb, state, "truncated packet");
skb               743 net/netfilter/nf_conntrack_proto_tcp.c 	    nf_checksum(skb, state->hook, dataoff, IPPROTO_TCP, state->pf)) {
skb               744 net/netfilter/nf_conntrack_proto_tcp.c 		tcp_error_log(skb, state, "bad checksum");
skb               751 net/netfilter/nf_conntrack_proto_tcp.c 		tcp_error_log(skb, state, "invalid tcp flag combination");
skb               758 net/netfilter/nf_conntrack_proto_tcp.c static noinline bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb,
skb               781 net/netfilter/nf_conntrack_proto_tcp.c 			segment_seq_plus_len(ntohl(th->seq), skb->len,
skb               789 net/netfilter/nf_conntrack_proto_tcp.c 		tcp_options(skb, dataoff, th, &ct->proto.tcp.seen[0]);
skb               801 net/netfilter/nf_conntrack_proto_tcp.c 			segment_seq_plus_len(ntohl(th->seq), skb->len,
skb               838 net/netfilter/nf_conntrack_proto_tcp.c 			    struct sk_buff *skb,
skb               853 net/netfilter/nf_conntrack_proto_tcp.c 	th = skb_header_pointer(skb, dataoff, sizeof(_tcph), &_tcph);
skb               857 net/netfilter/nf_conntrack_proto_tcp.c 	if (tcp_error(th, skb, dataoff, state))
skb               860 net/netfilter/nf_conntrack_proto_tcp.c 	if (!nf_ct_is_confirmed(ct) && !tcp_new(ct, skb, dataoff, th))
skb               950 net/netfilter/nf_conntrack_proto_tcp.c 		    segment_seq_plus_len(ntohl(th->seq), skb->len, dataoff, th);
skb               965 net/netfilter/nf_conntrack_proto_tcp.c 			tcp_options(skb, dataoff, th, &seen);
skb               984 net/netfilter/nf_conntrack_proto_tcp.c 		nf_ct_l4proto_log_invalid(skb, ct, "invalid packet ignored in "
skb              1007 net/netfilter/nf_conntrack_proto_tcp.c 		nf_ct_l4proto_log_invalid(skb, ct, "invalid state");
skb              1022 net/netfilter/nf_conntrack_proto_tcp.c 			nf_ct_l4proto_log_invalid(skb, ct, "challenge-ack ignored");
skb              1047 net/netfilter/nf_conntrack_proto_tcp.c 				nf_ct_l4proto_log_invalid(skb, ct, "invalid rst");
skb              1092 net/netfilter/nf_conntrack_proto_tcp.c 			   skb, dataoff, th)) {
skb              1142 net/netfilter/nf_conntrack_proto_tcp.c 			nf_ct_kill_acct(ct, ctinfo, skb);
skb              1161 net/netfilter/nf_conntrack_proto_tcp.c 	nf_ct_refresh_acct(ct, ctinfo, skb, timeout);
skb              1187 net/netfilter/nf_conntrack_proto_tcp.c static int tcp_to_nlattr(struct sk_buff *skb, struct nlattr *nla,
skb              1194 net/netfilter/nf_conntrack_proto_tcp.c 	nest_parms = nla_nest_start(skb, CTA_PROTOINFO_TCP);
skb              1198 net/netfilter/nf_conntrack_proto_tcp.c 	if (nla_put_u8(skb, CTA_PROTOINFO_TCP_STATE, ct->proto.tcp.state) ||
skb              1199 net/netfilter/nf_conntrack_proto_tcp.c 	    nla_put_u8(skb, CTA_PROTOINFO_TCP_WSCALE_ORIGINAL,
skb              1201 net/netfilter/nf_conntrack_proto_tcp.c 	    nla_put_u8(skb, CTA_PROTOINFO_TCP_WSCALE_REPLY,
skb              1206 net/netfilter/nf_conntrack_proto_tcp.c 	if (nla_put(skb, CTA_PROTOINFO_TCP_FLAGS_ORIGINAL,
skb              1211 net/netfilter/nf_conntrack_proto_tcp.c 	if (nla_put(skb, CTA_PROTOINFO_TCP_FLAGS_REPLY,
skb              1216 net/netfilter/nf_conntrack_proto_tcp.c 	nla_nest_end(skb, nest_parms);
skb              1371 net/netfilter/nf_conntrack_proto_tcp.c tcp_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
skb              1375 net/netfilter/nf_conntrack_proto_tcp.c 	if (nla_put_be32(skb, CTA_TIMEOUT_TCP_SYN_SENT,
skb              1377 net/netfilter/nf_conntrack_proto_tcp.c 	    nla_put_be32(skb, CTA_TIMEOUT_TCP_SYN_RECV,
skb              1379 net/netfilter/nf_conntrack_proto_tcp.c 	    nla_put_be32(skb, CTA_TIMEOUT_TCP_ESTABLISHED,
skb              1381 net/netfilter/nf_conntrack_proto_tcp.c 	    nla_put_be32(skb, CTA_TIMEOUT_TCP_FIN_WAIT,
skb              1383 net/netfilter/nf_conntrack_proto_tcp.c 	    nla_put_be32(skb, CTA_TIMEOUT_TCP_CLOSE_WAIT,
skb              1385 net/netfilter/nf_conntrack_proto_tcp.c 	    nla_put_be32(skb, CTA_TIMEOUT_TCP_LAST_ACK,
skb              1387 net/netfilter/nf_conntrack_proto_tcp.c 	    nla_put_be32(skb, CTA_TIMEOUT_TCP_TIME_WAIT,
skb              1389 net/netfilter/nf_conntrack_proto_tcp.c 	    nla_put_be32(skb, CTA_TIMEOUT_TCP_CLOSE,
skb              1391 net/netfilter/nf_conntrack_proto_tcp.c 	    nla_put_be32(skb, CTA_TIMEOUT_TCP_SYN_SENT2,
skb              1393 net/netfilter/nf_conntrack_proto_tcp.c 	    nla_put_be32(skb, CTA_TIMEOUT_TCP_RETRANS,
skb              1395 net/netfilter/nf_conntrack_proto_tcp.c 	    nla_put_be32(skb, CTA_TIMEOUT_TCP_UNACK,
skb                37 net/netfilter/nf_conntrack_proto_udp.c static void udp_error_log(const struct sk_buff *skb,
skb                41 net/netfilter/nf_conntrack_proto_udp.c 	nf_l4proto_log_invalid(skb, state->net, state->pf,
skb                45 net/netfilter/nf_conntrack_proto_udp.c static bool udp_error(struct sk_buff *skb,
skb                49 net/netfilter/nf_conntrack_proto_udp.c 	unsigned int udplen = skb->len - dataoff;
skb                54 net/netfilter/nf_conntrack_proto_udp.c 	hdr = skb_header_pointer(skb, dataoff, sizeof(_hdr), &_hdr);
skb                56 net/netfilter/nf_conntrack_proto_udp.c 		udp_error_log(skb, state, "short packet");
skb                62 net/netfilter/nf_conntrack_proto_udp.c 		udp_error_log(skb, state, "truncated/malformed packet");
skb                76 net/netfilter/nf_conntrack_proto_udp.c 	    nf_checksum(skb, state->hook, dataoff, IPPROTO_UDP, state->pf)) {
skb                77 net/netfilter/nf_conntrack_proto_udp.c 		udp_error_log(skb, state, "bad checksum");
skb                86 net/netfilter/nf_conntrack_proto_udp.c 			    struct sk_buff *skb,
skb                93 net/netfilter/nf_conntrack_proto_udp.c 	if (udp_error(skb, dataoff, state))
skb               113 net/netfilter/nf_conntrack_proto_udp.c 		nf_ct_refresh_acct(ct, ctinfo, skb, extra);
skb               119 net/netfilter/nf_conntrack_proto_udp.c 		nf_ct_refresh_acct(ct, ctinfo, skb,
skb               126 net/netfilter/nf_conntrack_proto_udp.c static void udplite_error_log(const struct sk_buff *skb,
skb               130 net/netfilter/nf_conntrack_proto_udp.c 	nf_l4proto_log_invalid(skb, state->net, state->pf,
skb               134 net/netfilter/nf_conntrack_proto_udp.c static bool udplite_error(struct sk_buff *skb,
skb               138 net/netfilter/nf_conntrack_proto_udp.c 	unsigned int udplen = skb->len - dataoff;
skb               144 net/netfilter/nf_conntrack_proto_udp.c 	hdr = skb_header_pointer(skb, dataoff, sizeof(_hdr), &_hdr);
skb               146 net/netfilter/nf_conntrack_proto_udp.c 		udplite_error_log(skb, state, "short packet");
skb               154 net/netfilter/nf_conntrack_proto_udp.c 		udplite_error_log(skb, state, "invalid checksum coverage");
skb               160 net/netfilter/nf_conntrack_proto_udp.c 		udplite_error_log(skb, state, "checksum missing");
skb               167 net/netfilter/nf_conntrack_proto_udp.c 	    nf_checksum_partial(skb, state->hook, dataoff, cscov, IPPROTO_UDP,
skb               169 net/netfilter/nf_conntrack_proto_udp.c 		udplite_error_log(skb, state, "bad checksum");
skb               178 net/netfilter/nf_conntrack_proto_udp.c 				struct sk_buff *skb,
skb               185 net/netfilter/nf_conntrack_proto_udp.c 	if (udplite_error(skb, dataoff, state))
skb               195 net/netfilter/nf_conntrack_proto_udp.c 		nf_ct_refresh_acct(ct, ctinfo, skb,
skb               201 net/netfilter/nf_conntrack_proto_udp.c 		nf_ct_refresh_acct(ct, ctinfo, skb,
skb               238 net/netfilter/nf_conntrack_proto_udp.c udp_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
skb               242 net/netfilter/nf_conntrack_proto_udp.c 	if (nla_put_be32(skb, CTA_TIMEOUT_UDP_UNREPLIED,
skb               244 net/netfilter/nf_conntrack_proto_udp.c 	    nla_put_be32(skb, CTA_TIMEOUT_UDP_REPLIED,
skb                62 net/netfilter/nf_conntrack_sane.c static int help(struct sk_buff *skb,
skb                85 net/netfilter/nf_conntrack_sane.c 	th = skb_header_pointer(skb, protoff, sizeof(_tcph), &_tcph);
skb                91 net/netfilter/nf_conntrack_sane.c 	if (dataoff >= skb->len)
skb                94 net/netfilter/nf_conntrack_sane.c 	datalen = skb->len - dataoff;
skb                97 net/netfilter/nf_conntrack_sane.c 	sb_ptr = skb_header_pointer(skb, dataoff, datalen, sane_buffer);
skb               142 net/netfilter/nf_conntrack_sane.c 		nf_ct_helper_log(skb, ct, "cannot alloc expectation");
skb               157 net/netfilter/nf_conntrack_sane.c 		nf_ct_helper_log(skb, ct, "cannot add expectation");
skb                60 net/netfilter/nf_conntrack_seqadj.c void nf_ct_tcp_seqadj_set(struct sk_buff *skb,
skb                69 net/netfilter/nf_conntrack_seqadj.c 	th = (struct tcphdr *)(skb_network_header(skb) + ip_hdrlen(skb));
skb                75 net/netfilter/nf_conntrack_seqadj.c static void nf_ct_sack_block_adjust(struct sk_buff *skb,
skb                85 net/netfilter/nf_conntrack_seqadj.c 		sack = (void *)skb->data + sackoff;
skb               106 net/netfilter/nf_conntrack_seqadj.c 		inet_proto_csum_replace4(&tcph->check, skb,
skb               108 net/netfilter/nf_conntrack_seqadj.c 		inet_proto_csum_replace4(&tcph->check, skb,
skb               117 net/netfilter/nf_conntrack_seqadj.c static unsigned int nf_ct_sack_adjust(struct sk_buff *skb,
skb               122 net/netfilter/nf_conntrack_seqadj.c 	struct tcphdr *tcph = (void *)skb->data + protoff;
skb               129 net/netfilter/nf_conntrack_seqadj.c 	if (skb_ensure_writable(skb, optend))
skb               132 net/netfilter/nf_conntrack_seqadj.c 	tcph = (void *)skb->data + protoff;
skb               137 net/netfilter/nf_conntrack_seqadj.c 		unsigned char *op = skb->data + optoff;
skb               154 net/netfilter/nf_conntrack_seqadj.c 				nf_ct_sack_block_adjust(skb, tcph, optoff + 2,
skb               164 net/netfilter/nf_conntrack_seqadj.c int nf_ct_seq_adjust(struct sk_buff *skb,
skb               179 net/netfilter/nf_conntrack_seqadj.c 	if (skb_ensure_writable(skb, protoff + sizeof(*tcph)))
skb               182 net/netfilter/nf_conntrack_seqadj.c 	tcph = (void *)skb->data + protoff;
skb               190 net/netfilter/nf_conntrack_seqadj.c 	inet_proto_csum_replace4(&tcph->check, skb, tcph->seq, newseq, false);
skb               205 net/netfilter/nf_conntrack_seqadj.c 	inet_proto_csum_replace4(&tcph->check, skb, tcph->ack_seq, newack,
skb               212 net/netfilter/nf_conntrack_seqadj.c 	res = nf_ct_sack_adjust(skb, protoff, ct, ctinfo);
skb               846 net/netfilter/nf_conntrack_sip.c static int set_expected_rtp_rtcp(struct sk_buff *skb, unsigned int protoff,
skb               855 net/netfilter/nf_conntrack_sip.c 	struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
skb               871 net/netfilter/nf_conntrack_sip.c 		struct net_device *dev = skb_dst(skb)->dev;
skb               951 net/netfilter/nf_conntrack_sip.c 		    !hooks->sdp_port(skb, protoff, dataoff, dptr, datalen,
skb               973 net/netfilter/nf_conntrack_sip.c 		ret = hooks->sdp_media(skb, protoff, dataoff, dptr,
skb              1026 net/netfilter/nf_conntrack_sip.c static int process_sdp(struct sk_buff *skb, unsigned int protoff,
skb              1032 net/netfilter/nf_conntrack_sip.c 	struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
skb              1083 net/netfilter/nf_conntrack_sip.c 			nf_ct_helper_log(skb, ct, "wrong port %u", port);
skb              1097 net/netfilter/nf_conntrack_sip.c 			nf_ct_helper_log(skb, ct, "cannot parse SDP message");
skb              1101 net/netfilter/nf_conntrack_sip.c 		ret = set_expected_rtp_rtcp(skb, protoff, dataoff,
skb              1106 net/netfilter/nf_conntrack_sip.c 			nf_ct_helper_log(skb, ct,
skb              1113 net/netfilter/nf_conntrack_sip.c 			ret = hooks->sdp_addr(skb, protoff, dataoff,
skb              1119 net/netfilter/nf_conntrack_sip.c 				nf_ct_helper_log(skb, ct, "cannot mangle SDP");
skb              1129 net/netfilter/nf_conntrack_sip.c 		ret = hooks->sdp_session(skb, protoff, dataoff,
skb              1135 net/netfilter/nf_conntrack_sip.c static int process_invite_response(struct sk_buff *skb, unsigned int protoff,
skb              1141 net/netfilter/nf_conntrack_sip.c 	struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
skb              1146 net/netfilter/nf_conntrack_sip.c 		return process_sdp(skb, protoff, dataoff, dptr, datalen, cseq);
skb              1152 net/netfilter/nf_conntrack_sip.c static int process_update_response(struct sk_buff *skb, unsigned int protoff,
skb              1158 net/netfilter/nf_conntrack_sip.c 	struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
skb              1163 net/netfilter/nf_conntrack_sip.c 		return process_sdp(skb, protoff, dataoff, dptr, datalen, cseq);
skb              1169 net/netfilter/nf_conntrack_sip.c static int process_prack_response(struct sk_buff *skb, unsigned int protoff,
skb              1175 net/netfilter/nf_conntrack_sip.c 	struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
skb              1180 net/netfilter/nf_conntrack_sip.c 		return process_sdp(skb, protoff, dataoff, dptr, datalen, cseq);
skb              1186 net/netfilter/nf_conntrack_sip.c static int process_invite_request(struct sk_buff *skb, unsigned int protoff,
skb              1192 net/netfilter/nf_conntrack_sip.c 	struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
skb              1197 net/netfilter/nf_conntrack_sip.c 	ret = process_sdp(skb, protoff, dataoff, dptr, datalen, cseq);
skb              1203 net/netfilter/nf_conntrack_sip.c static int process_bye_request(struct sk_buff *skb, unsigned int protoff,
skb              1209 net/netfilter/nf_conntrack_sip.c 	struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
skb              1219 net/netfilter/nf_conntrack_sip.c static int process_register_request(struct sk_buff *skb, unsigned int protoff,
skb              1225 net/netfilter/nf_conntrack_sip.c 	struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
skb              1257 net/netfilter/nf_conntrack_sip.c 		nf_ct_helper_log(skb, ct, "cannot parse contact");
skb              1273 net/netfilter/nf_conntrack_sip.c 		nf_ct_helper_log(skb, ct, "cannot parse expires");
skb              1284 net/netfilter/nf_conntrack_sip.c 		nf_ct_helper_log(skb, ct, "cannot alloc expectation");
skb              1300 net/netfilter/nf_conntrack_sip.c 		ret = hooks->expect(skb, protoff, dataoff, dptr, datalen,
skb              1304 net/netfilter/nf_conntrack_sip.c 			nf_ct_helper_log(skb, ct, "cannot add expectation");
skb              1317 net/netfilter/nf_conntrack_sip.c static int process_register_response(struct sk_buff *skb, unsigned int protoff,
skb              1323 net/netfilter/nf_conntrack_sip.c 	struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
skb              1361 net/netfilter/nf_conntrack_sip.c 			nf_ct_helper_log(skb, ct, "cannot parse contact");
skb              1379 net/netfilter/nf_conntrack_sip.c 			nf_ct_helper_log(skb, ct, "cannot parse expires");
skb              1403 net/netfilter/nf_conntrack_sip.c static int process_sip_response(struct sk_buff *skb, unsigned int protoff,
skb              1408 net/netfilter/nf_conntrack_sip.c 	struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
skb              1416 net/netfilter/nf_conntrack_sip.c 		nf_ct_helper_log(skb, ct, "cannot get code");
skb              1422 net/netfilter/nf_conntrack_sip.c 		nf_ct_helper_log(skb, ct, "cannot parse cseq");
skb              1427 net/netfilter/nf_conntrack_sip.c 		nf_ct_helper_log(skb, ct, "cannot get cseq");
skb              1441 net/netfilter/nf_conntrack_sip.c 		return handler->response(skb, protoff, dataoff, dptr, datalen,
skb              1447 net/netfilter/nf_conntrack_sip.c static int process_sip_request(struct sk_buff *skb, unsigned int protoff,
skb              1452 net/netfilter/nf_conntrack_sip.c 	struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
skb              1488 net/netfilter/nf_conntrack_sip.c 			nf_ct_helper_log(skb, ct, "cannot parse cseq");
skb              1493 net/netfilter/nf_conntrack_sip.c 			nf_ct_helper_log(skb, ct, "cannot get cseq");
skb              1497 net/netfilter/nf_conntrack_sip.c 		return handler->request(skb, protoff, dataoff, dptr, datalen,
skb              1503 net/netfilter/nf_conntrack_sip.c static int process_sip_msg(struct sk_buff *skb, struct nf_conn *ct,
skb              1511 net/netfilter/nf_conntrack_sip.c 		ret = process_sip_request(skb, protoff, dataoff, dptr, datalen);
skb              1513 net/netfilter/nf_conntrack_sip.c 		ret = process_sip_response(skb, protoff, dataoff, dptr, datalen);
skb              1517 net/netfilter/nf_conntrack_sip.c 		if (hooks && !hooks->msg(skb, protoff, dataoff,
skb              1519 net/netfilter/nf_conntrack_sip.c 			nf_ct_helper_log(skb, ct, "cannot NAT SIP message");
skb              1527 net/netfilter/nf_conntrack_sip.c static int sip_help_tcp(struct sk_buff *skb, unsigned int protoff,
skb              1544 net/netfilter/nf_conntrack_sip.c 	th = skb_header_pointer(skb, protoff, sizeof(_tcph), &_tcph);
skb              1548 net/netfilter/nf_conntrack_sip.c 	if (dataoff >= skb->len)
skb              1551 net/netfilter/nf_conntrack_sip.c 	nf_ct_refresh(ct, skb, sip_timeout * HZ);
skb              1553 net/netfilter/nf_conntrack_sip.c 	if (unlikely(skb_linearize(skb)))
skb              1556 net/netfilter/nf_conntrack_sip.c 	dptr = skb->data + dataoff;
skb              1557 net/netfilter/nf_conntrack_sip.c 	datalen = skb->len - dataoff;
skb              1587 net/netfilter/nf_conntrack_sip.c 		ret = process_sip_msg(skb, ct, protoff, dataoff,
skb              1605 net/netfilter/nf_conntrack_sip.c 			hooks->seq_adjust(skb, protoff, tdiff);
skb              1611 net/netfilter/nf_conntrack_sip.c static int sip_help_udp(struct sk_buff *skb, unsigned int protoff,
skb              1619 net/netfilter/nf_conntrack_sip.c 	if (dataoff >= skb->len)
skb              1622 net/netfilter/nf_conntrack_sip.c 	nf_ct_refresh(ct, skb, sip_timeout * HZ);
skb              1624 net/netfilter/nf_conntrack_sip.c 	if (unlikely(skb_linearize(skb)))
skb              1627 net/netfilter/nf_conntrack_sip.c 	dptr = skb->data + dataoff;
skb              1628 net/netfilter/nf_conntrack_sip.c 	datalen = skb->len - dataoff;
skb              1632 net/netfilter/nf_conntrack_sip.c 	return process_sip_msg(skb, ct, protoff, dataoff, &dptr, &datalen);
skb                28 net/netfilter/nf_conntrack_snmp.c int (*nf_nat_snmp_hook)(struct sk_buff *skb,
skb                34 net/netfilter/nf_conntrack_snmp.c static int snmp_conntrack_help(struct sk_buff *skb, unsigned int protoff,
skb                40 net/netfilter/nf_conntrack_snmp.c 	nf_conntrack_broadcast_help(skb, ct, ctinfo, timeout);
skb                44 net/netfilter/nf_conntrack_snmp.c 		return nf_nat_snmp(skb, protoff, ct, ctinfo);
skb                35 net/netfilter/nf_conntrack_tftp.c unsigned int (*nf_nat_tftp_hook)(struct sk_buff *skb,
skb                40 net/netfilter/nf_conntrack_tftp.c static int tftp_help(struct sk_buff *skb,
skb                52 net/netfilter/nf_conntrack_tftp.c 	tfh = skb_header_pointer(skb, protoff + sizeof(struct udphdr),
skb                66 net/netfilter/nf_conntrack_tftp.c 			nf_ct_helper_log(skb, ct, "cannot alloc expectation");
skb                80 net/netfilter/nf_conntrack_tftp.c 			ret = nf_nat_tftp(skb, ctinfo, exp);
skb                82 net/netfilter/nf_conntrack_tftp.c 			nf_ct_helper_log(skb, ct, "cannot add expectation");
skb                16 net/netfilter/nf_dup_netdev.c static void nf_do_netdev_egress(struct sk_buff *skb, struct net_device *dev)
skb                18 net/netfilter/nf_dup_netdev.c 	if (skb_mac_header_was_set(skb))
skb                19 net/netfilter/nf_dup_netdev.c 		skb_push(skb, skb->mac_len);
skb                21 net/netfilter/nf_dup_netdev.c 	skb->dev = dev;
skb                22 net/netfilter/nf_dup_netdev.c 	dev_queue_xmit(skb);
skb                31 net/netfilter/nf_dup_netdev.c 		kfree_skb(pkt->skb);
skb                35 net/netfilter/nf_dup_netdev.c 	nf_do_netdev_egress(pkt->skb, dev);
skb                42 net/netfilter/nf_dup_netdev.c 	struct sk_buff *skb;
skb                48 net/netfilter/nf_dup_netdev.c 	skb = skb_clone(pkt->skb, GFP_ATOMIC);
skb                49 net/netfilter/nf_dup_netdev.c 	if (skb)
skb                50 net/netfilter/nf_dup_netdev.c 		nf_do_netdev_egress(skb, dev);
skb               347 net/netfilter/nf_flow_table_core.c static int nf_flow_nat_port_tcp(struct sk_buff *skb, unsigned int thoff,
skb               352 net/netfilter/nf_flow_table_core.c 	if (!pskb_may_pull(skb, thoff + sizeof(*tcph)) ||
skb               353 net/netfilter/nf_flow_table_core.c 	    skb_try_make_writable(skb, thoff + sizeof(*tcph)))
skb               356 net/netfilter/nf_flow_table_core.c 	tcph = (void *)(skb_network_header(skb) + thoff);
skb               357 net/netfilter/nf_flow_table_core.c 	inet_proto_csum_replace2(&tcph->check, skb, port, new_port, true);
skb               362 net/netfilter/nf_flow_table_core.c static int nf_flow_nat_port_udp(struct sk_buff *skb, unsigned int thoff,
skb               367 net/netfilter/nf_flow_table_core.c 	if (!pskb_may_pull(skb, thoff + sizeof(*udph)) ||
skb               368 net/netfilter/nf_flow_table_core.c 	    skb_try_make_writable(skb, thoff + sizeof(*udph)))
skb               371 net/netfilter/nf_flow_table_core.c 	udph = (void *)(skb_network_header(skb) + thoff);
skb               372 net/netfilter/nf_flow_table_core.c 	if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
skb               373 net/netfilter/nf_flow_table_core.c 		inet_proto_csum_replace2(&udph->check, skb, port,
skb               382 net/netfilter/nf_flow_table_core.c static int nf_flow_nat_port(struct sk_buff *skb, unsigned int thoff,
skb               387 net/netfilter/nf_flow_table_core.c 		if (nf_flow_nat_port_tcp(skb, thoff, port, new_port) < 0)
skb               391 net/netfilter/nf_flow_table_core.c 		if (nf_flow_nat_port_udp(skb, thoff, port, new_port) < 0)
skb               400 net/netfilter/nf_flow_table_core.c 		      struct sk_buff *skb, unsigned int thoff,
skb               406 net/netfilter/nf_flow_table_core.c 	if (!pskb_may_pull(skb, thoff + sizeof(*hdr)) ||
skb               407 net/netfilter/nf_flow_table_core.c 	    skb_try_make_writable(skb, thoff + sizeof(*hdr)))
skb               410 net/netfilter/nf_flow_table_core.c 	hdr = (void *)(skb_network_header(skb) + thoff);
skb               427 net/netfilter/nf_flow_table_core.c 	return nf_flow_nat_port(skb, thoff, protocol, port, new_port);
skb               432 net/netfilter/nf_flow_table_core.c 		      struct sk_buff *skb, unsigned int thoff,
skb               438 net/netfilter/nf_flow_table_core.c 	if (!pskb_may_pull(skb, thoff + sizeof(*hdr)) ||
skb               439 net/netfilter/nf_flow_table_core.c 	    skb_try_make_writable(skb, thoff + sizeof(*hdr)))
skb               442 net/netfilter/nf_flow_table_core.c 	hdr = (void *)(skb_network_header(skb) + thoff);
skb               459 net/netfilter/nf_flow_table_core.c 	return nf_flow_nat_port(skb, thoff, protocol, port, new_port);
skb                11 net/netfilter/nf_flow_table_inet.c nf_flow_offload_inet_hook(void *priv, struct sk_buff *skb,
skb                14 net/netfilter/nf_flow_table_inet.c 	switch (skb->protocol) {
skb                16 net/netfilter/nf_flow_table_inet.c 		return nf_flow_offload_ip_hook(priv, skb, state);
skb                18 net/netfilter/nf_flow_table_inet.c 		return nf_flow_offload_ipv6_hook(priv, skb, state);
skb                20 net/netfilter/nf_flow_table_ip.c 			       struct sk_buff *skb, unsigned int thoff)
skb                27 net/netfilter/nf_flow_table_ip.c 	if (!pskb_may_pull(skb, thoff + sizeof(*tcph)))
skb                30 net/netfilter/nf_flow_table_ip.c 	tcph = (void *)(skb_network_header(skb) + thoff);
skb                39 net/netfilter/nf_flow_table_ip.c static int nf_flow_nat_ip_tcp(struct sk_buff *skb, unsigned int thoff,
skb                44 net/netfilter/nf_flow_table_ip.c 	if (!pskb_may_pull(skb, thoff + sizeof(*tcph)) ||
skb                45 net/netfilter/nf_flow_table_ip.c 	    skb_try_make_writable(skb, thoff + sizeof(*tcph)))
skb                48 net/netfilter/nf_flow_table_ip.c 	tcph = (void *)(skb_network_header(skb) + thoff);
skb                49 net/netfilter/nf_flow_table_ip.c 	inet_proto_csum_replace4(&tcph->check, skb, addr, new_addr, true);
skb                54 net/netfilter/nf_flow_table_ip.c static int nf_flow_nat_ip_udp(struct sk_buff *skb, unsigned int thoff,
skb                59 net/netfilter/nf_flow_table_ip.c 	if (!pskb_may_pull(skb, thoff + sizeof(*udph)) ||
skb                60 net/netfilter/nf_flow_table_ip.c 	    skb_try_make_writable(skb, thoff + sizeof(*udph)))
skb                63 net/netfilter/nf_flow_table_ip.c 	udph = (void *)(skb_network_header(skb) + thoff);
skb                64 net/netfilter/nf_flow_table_ip.c 	if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
skb                65 net/netfilter/nf_flow_table_ip.c 		inet_proto_csum_replace4(&udph->check, skb, addr,
skb                74 net/netfilter/nf_flow_table_ip.c static int nf_flow_nat_ip_l4proto(struct sk_buff *skb, struct iphdr *iph,
skb                80 net/netfilter/nf_flow_table_ip.c 		if (nf_flow_nat_ip_tcp(skb, thoff, addr, new_addr) < 0)
skb                84 net/netfilter/nf_flow_table_ip.c 		if (nf_flow_nat_ip_udp(skb, thoff, addr, new_addr) < 0)
skb                92 net/netfilter/nf_flow_table_ip.c static int nf_flow_snat_ip(const struct flow_offload *flow, struct sk_buff *skb,
skb               114 net/netfilter/nf_flow_table_ip.c 	return nf_flow_nat_ip_l4proto(skb, iph, thoff, addr, new_addr);
skb               117 net/netfilter/nf_flow_table_ip.c static int nf_flow_dnat_ip(const struct flow_offload *flow, struct sk_buff *skb,
skb               139 net/netfilter/nf_flow_table_ip.c 	return nf_flow_nat_ip_l4proto(skb, iph, thoff, addr, new_addr);
skb               142 net/netfilter/nf_flow_table_ip.c static int nf_flow_nat_ip(const struct flow_offload *flow, struct sk_buff *skb,
skb               145 net/netfilter/nf_flow_table_ip.c 	struct iphdr *iph = ip_hdr(skb);
skb               148 net/netfilter/nf_flow_table_ip.c 	    (nf_flow_snat_port(flow, skb, thoff, iph->protocol, dir) < 0 ||
skb               149 net/netfilter/nf_flow_table_ip.c 	     nf_flow_snat_ip(flow, skb, iph, thoff, dir) < 0))
skb               152 net/netfilter/nf_flow_table_ip.c 	    (nf_flow_dnat_port(flow, skb, thoff, iph->protocol, dir) < 0 ||
skb               153 net/netfilter/nf_flow_table_ip.c 	     nf_flow_dnat_ip(flow, skb, iph, thoff, dir) < 0))
skb               164 net/netfilter/nf_flow_table_ip.c static int nf_flow_tuple_ip(struct sk_buff *skb, const struct net_device *dev,
skb               171 net/netfilter/nf_flow_table_ip.c 	if (!pskb_may_pull(skb, sizeof(*iph)))
skb               174 net/netfilter/nf_flow_table_ip.c 	iph = ip_hdr(skb);
skb               189 net/netfilter/nf_flow_table_ip.c 	if (!pskb_may_pull(skb, thoff + sizeof(*ports)))
skb               192 net/netfilter/nf_flow_table_ip.c 	iph = ip_hdr(skb);
skb               193 net/netfilter/nf_flow_table_ip.c 	ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
skb               207 net/netfilter/nf_flow_table_ip.c static bool nf_flow_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
skb               209 net/netfilter/nf_flow_table_ip.c 	if (skb->len <= mtu)
skb               212 net/netfilter/nf_flow_table_ip.c 	if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu))
skb               226 net/netfilter/nf_flow_table_ip.c static unsigned int nf_flow_xmit_xfrm(struct sk_buff *skb,
skb               230 net/netfilter/nf_flow_table_ip.c 	skb_orphan(skb);
skb               231 net/netfilter/nf_flow_table_ip.c 	skb_dst_set_noref(skb, dst);
skb               232 net/netfilter/nf_flow_table_ip.c 	dst_output(state->net, state->sk, skb);
skb               237 net/netfilter/nf_flow_table_ip.c nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
skb               251 net/netfilter/nf_flow_table_ip.c 	if (skb->protocol != htons(ETH_P_IP))
skb               254 net/netfilter/nf_flow_table_ip.c 	if (nf_flow_tuple_ip(skb, state->in, &tuple) < 0)
skb               266 net/netfilter/nf_flow_table_ip.c 	if (unlikely(nf_flow_exceeds_mtu(skb, flow->tuplehash[dir].tuple.mtu)))
skb               269 net/netfilter/nf_flow_table_ip.c 	if (skb_try_make_writable(skb, sizeof(*iph)))
skb               272 net/netfilter/nf_flow_table_ip.c 	thoff = ip_hdr(skb)->ihl * 4;
skb               273 net/netfilter/nf_flow_table_ip.c 	if (nf_flow_state_check(flow, ip_hdr(skb)->protocol, skb, thoff))
skb               281 net/netfilter/nf_flow_table_ip.c 	if (nf_flow_nat_ip(flow, skb, thoff, dir) < 0)
skb               285 net/netfilter/nf_flow_table_ip.c 	iph = ip_hdr(skb);
skb               287 net/netfilter/nf_flow_table_ip.c 	skb->tstamp = 0;
skb               290 net/netfilter/nf_flow_table_ip.c 		memset(skb->cb, 0, sizeof(struct inet_skb_parm));
skb               291 net/netfilter/nf_flow_table_ip.c 		IPCB(skb)->iif = skb->dev->ifindex;
skb               292 net/netfilter/nf_flow_table_ip.c 		IPCB(skb)->flags = IPSKB_FORWARDED;
skb               293 net/netfilter/nf_flow_table_ip.c 		return nf_flow_xmit_xfrm(skb, state, &rt->dst);
skb               296 net/netfilter/nf_flow_table_ip.c 	skb->dev = outdev;
skb               298 net/netfilter/nf_flow_table_ip.c 	skb_dst_set_noref(skb, &rt->dst);
skb               299 net/netfilter/nf_flow_table_ip.c 	neigh_xmit(NEIGH_ARP_TABLE, outdev, &nexthop, skb);
skb               305 net/netfilter/nf_flow_table_ip.c static int nf_flow_nat_ipv6_tcp(struct sk_buff *skb, unsigned int thoff,
skb               311 net/netfilter/nf_flow_table_ip.c 	if (!pskb_may_pull(skb, thoff + sizeof(*tcph)) ||
skb               312 net/netfilter/nf_flow_table_ip.c 	    skb_try_make_writable(skb, thoff + sizeof(*tcph)))
skb               315 net/netfilter/nf_flow_table_ip.c 	tcph = (void *)(skb_network_header(skb) + thoff);
skb               316 net/netfilter/nf_flow_table_ip.c 	inet_proto_csum_replace16(&tcph->check, skb, addr->s6_addr32,
skb               322 net/netfilter/nf_flow_table_ip.c static int nf_flow_nat_ipv6_udp(struct sk_buff *skb, unsigned int thoff,
skb               328 net/netfilter/nf_flow_table_ip.c 	if (!pskb_may_pull(skb, thoff + sizeof(*udph)) ||
skb               329 net/netfilter/nf_flow_table_ip.c 	    skb_try_make_writable(skb, thoff + sizeof(*udph)))
skb               332 net/netfilter/nf_flow_table_ip.c 	udph = (void *)(skb_network_header(skb) + thoff);
skb               333 net/netfilter/nf_flow_table_ip.c 	if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
skb               334 net/netfilter/nf_flow_table_ip.c 		inet_proto_csum_replace16(&udph->check, skb, addr->s6_addr32,
skb               343 net/netfilter/nf_flow_table_ip.c static int nf_flow_nat_ipv6_l4proto(struct sk_buff *skb, struct ipv6hdr *ip6h,
skb               349 net/netfilter/nf_flow_table_ip.c 		if (nf_flow_nat_ipv6_tcp(skb, thoff, addr, new_addr) < 0)
skb               353 net/netfilter/nf_flow_table_ip.c 		if (nf_flow_nat_ipv6_udp(skb, thoff, addr, new_addr) < 0)
skb               362 net/netfilter/nf_flow_table_ip.c 			     struct sk_buff *skb, struct ipv6hdr *ip6h,
skb               383 net/netfilter/nf_flow_table_ip.c 	return nf_flow_nat_ipv6_l4proto(skb, ip6h, thoff, &addr, &new_addr);
skb               387 net/netfilter/nf_flow_table_ip.c 			     struct sk_buff *skb, struct ipv6hdr *ip6h,
skb               408 net/netfilter/nf_flow_table_ip.c 	return nf_flow_nat_ipv6_l4proto(skb, ip6h, thoff, &addr, &new_addr);
skb               412 net/netfilter/nf_flow_table_ip.c 			    struct sk_buff *skb,
skb               415 net/netfilter/nf_flow_table_ip.c 	struct ipv6hdr *ip6h = ipv6_hdr(skb);
skb               419 net/netfilter/nf_flow_table_ip.c 	    (nf_flow_snat_port(flow, skb, thoff, ip6h->nexthdr, dir) < 0 ||
skb               420 net/netfilter/nf_flow_table_ip.c 	     nf_flow_snat_ipv6(flow, skb, ip6h, thoff, dir) < 0))
skb               423 net/netfilter/nf_flow_table_ip.c 	    (nf_flow_dnat_port(flow, skb, thoff, ip6h->nexthdr, dir) < 0 ||
skb               424 net/netfilter/nf_flow_table_ip.c 	     nf_flow_dnat_ipv6(flow, skb, ip6h, thoff, dir) < 0))
skb               430 net/netfilter/nf_flow_table_ip.c static int nf_flow_tuple_ipv6(struct sk_buff *skb, const struct net_device *dev,
skb               437 net/netfilter/nf_flow_table_ip.c 	if (!pskb_may_pull(skb, sizeof(*ip6h)))
skb               440 net/netfilter/nf_flow_table_ip.c 	ip6h = ipv6_hdr(skb);
skb               450 net/netfilter/nf_flow_table_ip.c 	if (!pskb_may_pull(skb, thoff + sizeof(*ports)))
skb               453 net/netfilter/nf_flow_table_ip.c 	ip6h = ipv6_hdr(skb);
skb               454 net/netfilter/nf_flow_table_ip.c 	ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
skb               468 net/netfilter/nf_flow_table_ip.c nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
skb               481 net/netfilter/nf_flow_table_ip.c 	if (skb->protocol != htons(ETH_P_IPV6))
skb               484 net/netfilter/nf_flow_table_ip.c 	if (nf_flow_tuple_ipv6(skb, state->in, &tuple) < 0)
skb               496 net/netfilter/nf_flow_table_ip.c 	if (unlikely(nf_flow_exceeds_mtu(skb, flow->tuplehash[dir].tuple.mtu)))
skb               499 net/netfilter/nf_flow_table_ip.c 	if (nf_flow_state_check(flow, ipv6_hdr(skb)->nexthdr, skb,
skb               508 net/netfilter/nf_flow_table_ip.c 	if (skb_try_make_writable(skb, sizeof(*ip6h)))
skb               511 net/netfilter/nf_flow_table_ip.c 	if (nf_flow_nat_ipv6(flow, skb, dir) < 0)
skb               515 net/netfilter/nf_flow_table_ip.c 	ip6h = ipv6_hdr(skb);
skb               517 net/netfilter/nf_flow_table_ip.c 	skb->tstamp = 0;
skb               520 net/netfilter/nf_flow_table_ip.c 		memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
skb               521 net/netfilter/nf_flow_table_ip.c 		IP6CB(skb)->iif = skb->dev->ifindex;
skb               522 net/netfilter/nf_flow_table_ip.c 		IP6CB(skb)->flags = IP6SKB_FORWARDED;
skb               523 net/netfilter/nf_flow_table_ip.c 		return nf_flow_xmit_xfrm(skb, state, &rt->dst);
skb               526 net/netfilter/nf_flow_table_ip.c 	skb->dev = outdev;
skb               528 net/netfilter/nf_flow_table_ip.c 	skb_dst_set_noref(skb, &rt->dst);
skb               529 net/netfilter/nf_flow_table_ip.c 	neigh_xmit(NEIGH_ND_TABLE, outdev, nexthop, skb);
skb               218 net/netfilter/nf_log.c 		   const struct sk_buff *skb,
skb               238 net/netfilter/nf_log.c 		logger->logfn(net, pf, hooknum, skb, in, out, loginfo, prefix);
skb               247 net/netfilter/nf_log.c 		  const struct sk_buff *skb,
skb               262 net/netfilter/nf_log.c 		logger->logfn(net, pf, hooknum, skb, in, out, loginfo, prefix);
skb                21 net/netfilter/nf_log_common.c int nf_log_dump_udp_header(struct nf_log_buf *m, const struct sk_buff *skb,
skb                37 net/netfilter/nf_log_common.c 	uh = skb_header_pointer(skb, offset, sizeof(_udph), &_udph);
skb                39 net/netfilter/nf_log_common.c 		nf_log_buf_add(m, "INCOMPLETE [%u bytes] ", skb->len - offset);
skb                53 net/netfilter/nf_log_common.c int nf_log_dump_tcp_header(struct nf_log_buf *m, const struct sk_buff *skb,
skb                67 net/netfilter/nf_log_common.c 	th = skb_header_pointer(skb, offset, sizeof(_tcph), &_tcph);
skb                69 net/netfilter/nf_log_common.c 		nf_log_buf_add(m, "INCOMPLETE [%u bytes] ", skb->len - offset);
skb               113 net/netfilter/nf_log_common.c 		op = skb_header_pointer(skb, offset + sizeof(struct tcphdr),
skb               151 net/netfilter/nf_log_common.c 			  unsigned int hooknum, const struct sk_buff *skb,
skb               164 net/netfilter/nf_log_common.c 	physindev = nf_bridge_get_physindev(skb);
skb               167 net/netfilter/nf_log_common.c 	physoutdev = nf_bridge_get_physoutdev(skb);
skb               178 net/netfilter/nf_log_common.c 		     const struct sk_buff *skb,
skb               186 net/netfilter/nf_log_common.c 		nf_log_packet(net, NFPROTO_IPV4, hooknum, skb, in, out,
skb               190 net/netfilter/nf_log_common.c 		nf_log_packet(net, NFPROTO_IPV6, hooknum, skb, in, out,
skb               195 net/netfilter/nf_log_common.c 		nf_log_packet(net, NFPROTO_ARP, hooknum, skb, in, out,
skb                17 net/netfilter/nf_log_netdev.c 				 const struct sk_buff *skb,
skb                23 net/netfilter/nf_log_netdev.c 	nf_log_l2packet(net, pf, skb->protocol, hooknum, skb, in, out,
skb                28 net/netfilter/nf_nat_amanda.c static unsigned int help(struct sk_buff *skb,
skb                61 net/netfilter/nf_nat_amanda.c 		nf_ct_helper_log(skb, exp->master, "all ports in use");
skb                66 net/netfilter/nf_nat_amanda.c 	if (!nf_nat_mangle_udp_packet(skb, exp->master, ctinfo,
skb                69 net/netfilter/nf_nat_amanda.c 		nf_ct_helper_log(skb, exp->master, "cannot mangle packet");
skb                55 net/netfilter/nf_nat_core.c static void nf_nat_ipv4_decode_session(struct sk_buff *skb,
skb                87 net/netfilter/nf_nat_core.c static void nf_nat_ipv6_decode_session(struct sk_buff *skb,
skb               121 net/netfilter/nf_nat_core.c static void __nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl)
skb               129 net/netfilter/nf_nat_core.c 	ct = nf_ct_get(skb, &ctinfo);
skb               142 net/netfilter/nf_nat_core.c 		nf_nat_ipv4_decode_session(skb, ct, dir, statusbit, fl);
skb               145 net/netfilter/nf_nat_core.c 		nf_nat_ipv6_decode_session(skb, ct, dir, statusbit, fl);
skb               150 net/netfilter/nf_nat_core.c int nf_xfrm_me_harder(struct net *net, struct sk_buff *skb, unsigned int family)
skb               155 net/netfilter/nf_nat_core.c 	struct sock *sk = skb->sk;
skb               158 net/netfilter/nf_nat_core.c 	err = xfrm_decode_session(skb, &fl, family);
skb               162 net/netfilter/nf_nat_core.c 	dst = skb_dst(skb);
skb               175 net/netfilter/nf_nat_core.c 	skb_dst_drop(skb);
skb               176 net/netfilter/nf_nat_core.c 	skb_dst_set(skb, dst);
skb               179 net/netfilter/nf_nat_core.c 	hh_len = skb_dst(skb)->dev->hard_header_len;
skb               180 net/netfilter/nf_nat_core.c 	if (skb_headroom(skb) < hh_len &&
skb               181 net/netfilter/nf_nat_core.c 	    pskb_expand_head(skb, hh_len - skb_headroom(skb), 0, GFP_ATOMIC))
skb               698 net/netfilter/nf_nat_core.c 			   struct sk_buff *skb)
skb               716 net/netfilter/nf_nat_core.c 		verdict = nf_nat_manip_pkt(skb, ct, mtype, dir);
skb               723 net/netfilter/nf_nat_core.c nf_nat_inet_fn(void *priv, struct sk_buff *skb,
skb               732 net/netfilter/nf_nat_core.c 	ct = nf_ct_get(skb, &ctinfo);
skb               761 net/netfilter/nf_nat_core.c 				ret = e->hooks[i].hook(e->hooks[i].priv, skb,
skb               789 net/netfilter/nf_nat_core.c 	return nf_nat_packet(ct, ctinfo, state->hook, skb);
skb               792 net/netfilter/nf_nat_core.c 	nf_ct_kill_acct(ct, ctinfo, skb);
skb                63 net/netfilter/nf_nat_ftp.c static unsigned int nf_nat_ftp(struct sk_buff *skb,
skb               104 net/netfilter/nf_nat_ftp.c 		nf_ct_helper_log(skb, ct, "all ports in use");
skb               115 net/netfilter/nf_nat_ftp.c 	if (!nf_nat_mangle_tcp_packet(skb, ct, ctinfo, protoff, matchoff,
skb               122 net/netfilter/nf_nat_ftp.c 	nf_ct_helper_log(skb, ct, "cannot mangle packet");
skb                25 net/netfilter/nf_nat_helper.c static void mangle_contents(struct sk_buff *skb,
skb                34 net/netfilter/nf_nat_helper.c 	SKB_LINEAR_ASSERT(skb);
skb                35 net/netfilter/nf_nat_helper.c 	data = skb_network_header(skb) + dataoff;
skb                40 net/netfilter/nf_nat_helper.c 		skb_tail_pointer(skb) - (skb_network_header(skb) + dataoff +
skb                49 net/netfilter/nf_nat_helper.c 			 "%u from %u bytes\n", rep_len - match_len, skb->len);
skb                50 net/netfilter/nf_nat_helper.c 		skb_put(skb, rep_len - match_len);
skb                53 net/netfilter/nf_nat_helper.c 			 "%u from %u bytes\n", match_len - rep_len, skb->len);
skb                54 net/netfilter/nf_nat_helper.c 		__skb_trim(skb, skb->len + rep_len - match_len);
skb                57 net/netfilter/nf_nat_helper.c 	if (nf_ct_l3num((struct nf_conn *)skb_nfct(skb)) == NFPROTO_IPV4) {
skb                59 net/netfilter/nf_nat_helper.c 		ip_hdr(skb)->tot_len = htons(skb->len);
skb                60 net/netfilter/nf_nat_helper.c 		ip_send_check(ip_hdr(skb));
skb                62 net/netfilter/nf_nat_helper.c 		ipv6_hdr(skb)->payload_len =
skb                63 net/netfilter/nf_nat_helper.c 			htons(skb->len - sizeof(struct ipv6hdr));
skb                67 net/netfilter/nf_nat_helper.c static bool enlarge_skb(struct sk_buff *skb, unsigned int extra)
skb                69 net/netfilter/nf_nat_helper.c 	if (skb->len + extra > 65535)
skb                72 net/netfilter/nf_nat_helper.c 	if (pskb_expand_head(skb, 0, extra - skb_tailroom(skb), GFP_ATOMIC))
skb                86 net/netfilter/nf_nat_helper.c bool __nf_nat_mangle_tcp_packet(struct sk_buff *skb,
skb                98 net/netfilter/nf_nat_helper.c 	if (skb_ensure_writable(skb, skb->len))
skb               102 net/netfilter/nf_nat_helper.c 	    rep_len - match_len > skb_tailroom(skb) &&
skb               103 net/netfilter/nf_nat_helper.c 	    !enlarge_skb(skb, rep_len - match_len))
skb               106 net/netfilter/nf_nat_helper.c 	tcph = (void *)skb->data + protoff;
skb               108 net/netfilter/nf_nat_helper.c 	oldlen = skb->len - protoff;
skb               109 net/netfilter/nf_nat_helper.c 	mangle_contents(skb, protoff + tcph->doff*4,
skb               112 net/netfilter/nf_nat_helper.c 	datalen = skb->len - protoff;
skb               114 net/netfilter/nf_nat_helper.c 	nf_nat_csum_recalc(skb, nf_ct_l3num(ct), IPPROTO_TCP,
skb               136 net/netfilter/nf_nat_helper.c nf_nat_mangle_udp_packet(struct sk_buff *skb,
skb               148 net/netfilter/nf_nat_helper.c 	if (skb_ensure_writable(skb, skb->len))
skb               152 net/netfilter/nf_nat_helper.c 	    rep_len - match_len > skb_tailroom(skb) &&
skb               153 net/netfilter/nf_nat_helper.c 	    !enlarge_skb(skb, rep_len - match_len))
skb               156 net/netfilter/nf_nat_helper.c 	udph = (void *)skb->data + protoff;
skb               158 net/netfilter/nf_nat_helper.c 	oldlen = skb->len - protoff;
skb               159 net/netfilter/nf_nat_helper.c 	mangle_contents(skb, protoff + sizeof(*udph),
skb               163 net/netfilter/nf_nat_helper.c 	datalen = skb->len - protoff;
skb               167 net/netfilter/nf_nat_helper.c 	if (!udph->check && skb->ip_summed != CHECKSUM_PARTIAL)
skb               170 net/netfilter/nf_nat_helper.c 	nf_nat_csum_recalc(skb, nf_ct_l3num(ct), IPPROTO_UDP,
skb                32 net/netfilter/nf_nat_irc.c static unsigned int help(struct sk_buff *skb,
skb                66 net/netfilter/nf_nat_irc.c 		nf_ct_helper_log(skb, ct, "all ports in use");
skb                88 net/netfilter/nf_nat_irc.c 	if (!nf_nat_mangle_tcp_packet(skb, ct, ctinfo, protoff, matchoff,
skb                90 net/netfilter/nf_nat_irc.c 		nf_ct_helper_log(skb, ct, "cannot mangle packet");
skb                16 net/netfilter/nf_nat_masquerade.c nf_nat_masquerade_ipv4(struct sk_buff *skb, unsigned int hooknum,
skb                29 net/netfilter/nf_nat_masquerade.c 	ct = nf_ct_get(skb, &ctinfo);
skb                40 net/netfilter/nf_nat_masquerade.c 	rt = skb_rtable(skb);
skb                41 net/netfilter/nf_nat_masquerade.c 	nh = rt_nexthop(rt, ip_hdr(skb)->daddr);
skb               159 net/netfilter/nf_nat_masquerade.c nf_nat_masquerade_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range,
skb               168 net/netfilter/nf_nat_masquerade.c 	ct = nf_ct_get(skb, &ctinfo);
skb               173 net/netfilter/nf_nat_masquerade.c 				   &ipv6_hdr(skb)->daddr, 0, &src) < 0)
skb                33 net/netfilter/nf_nat_proto.c static void nf_csum_update(struct sk_buff *skb,
skb                39 net/netfilter/nf_nat_proto.c __udp_manip_pkt(struct sk_buff *skb,
skb                56 net/netfilter/nf_nat_proto.c 		nf_csum_update(skb, iphdroff, &hdr->check, tuple, maniptype);
skb                57 net/netfilter/nf_nat_proto.c 		inet_proto_csum_replace2(&hdr->check, skb, *portptr, newport,
skb                65 net/netfilter/nf_nat_proto.c static bool udp_manip_pkt(struct sk_buff *skb,
skb                72 net/netfilter/nf_nat_proto.c 	if (skb_ensure_writable(skb, hdroff + sizeof(*hdr)))
skb                75 net/netfilter/nf_nat_proto.c 	hdr = (struct udphdr *)(skb->data + hdroff);
skb                76 net/netfilter/nf_nat_proto.c 	__udp_manip_pkt(skb, iphdroff, hdr, tuple, maniptype, !!hdr->check);
skb                81 net/netfilter/nf_nat_proto.c static bool udplite_manip_pkt(struct sk_buff *skb,
skb                89 net/netfilter/nf_nat_proto.c 	if (skb_ensure_writable(skb, hdroff + sizeof(*hdr)))
skb                92 net/netfilter/nf_nat_proto.c 	hdr = (struct udphdr *)(skb->data + hdroff);
skb                93 net/netfilter/nf_nat_proto.c 	__udp_manip_pkt(skb, iphdroff, hdr, tuple, maniptype, true);
skb                99 net/netfilter/nf_nat_proto.c sctp_manip_pkt(struct sk_buff *skb,
skb               112 net/netfilter/nf_nat_proto.c 	if (skb->len >= hdroff + sizeof(*hdr))
skb               115 net/netfilter/nf_nat_proto.c 	if (skb_ensure_writable(skb, hdroff + hdrsize))
skb               118 net/netfilter/nf_nat_proto.c 	hdr = (struct sctphdr *)(skb->data + hdroff);
skb               131 net/netfilter/nf_nat_proto.c 	if (skb->ip_summed != CHECKSUM_PARTIAL) {
skb               132 net/netfilter/nf_nat_proto.c 		hdr->checksum = sctp_compute_cksum(skb, hdroff);
skb               133 net/netfilter/nf_nat_proto.c 		skb->ip_summed = CHECKSUM_NONE;
skb               141 net/netfilter/nf_nat_proto.c tcp_manip_pkt(struct sk_buff *skb,
skb               153 net/netfilter/nf_nat_proto.c 	if (skb->len >= hdroff + sizeof(struct tcphdr))
skb               156 net/netfilter/nf_nat_proto.c 	if (skb_ensure_writable(skb, hdroff + hdrsize))
skb               159 net/netfilter/nf_nat_proto.c 	hdr = (struct tcphdr *)(skb->data + hdroff);
skb               177 net/netfilter/nf_nat_proto.c 	nf_csum_update(skb, iphdroff, &hdr->check, tuple, maniptype);
skb               178 net/netfilter/nf_nat_proto.c 	inet_proto_csum_replace2(&hdr->check, skb, oldport, newport, false);
skb               183 net/netfilter/nf_nat_proto.c dccp_manip_pkt(struct sk_buff *skb,
skb               193 net/netfilter/nf_nat_proto.c 	if (skb->len >= hdroff + sizeof(struct dccp_hdr))
skb               196 net/netfilter/nf_nat_proto.c 	if (skb_ensure_writable(skb, hdroff + hdrsize))
skb               199 net/netfilter/nf_nat_proto.c 	hdr = (struct dccp_hdr *)(skb->data + hdroff);
skb               215 net/netfilter/nf_nat_proto.c 	nf_csum_update(skb, iphdroff, &hdr->dccph_checksum, tuple, maniptype);
skb               216 net/netfilter/nf_nat_proto.c 	inet_proto_csum_replace2(&hdr->dccph_checksum, skb, oldport, newport,
skb               223 net/netfilter/nf_nat_proto.c icmp_manip_pkt(struct sk_buff *skb,
skb               230 net/netfilter/nf_nat_proto.c 	if (skb_ensure_writable(skb, hdroff + sizeof(*hdr)))
skb               233 net/netfilter/nf_nat_proto.c 	hdr = (struct icmphdr *)(skb->data + hdroff);
skb               247 net/netfilter/nf_nat_proto.c 	inet_proto_csum_replace2(&hdr->checksum, skb,
skb               254 net/netfilter/nf_nat_proto.c icmpv6_manip_pkt(struct sk_buff *skb,
skb               261 net/netfilter/nf_nat_proto.c 	if (skb_ensure_writable(skb, hdroff + sizeof(*hdr)))
skb               264 net/netfilter/nf_nat_proto.c 	hdr = (struct icmp6hdr *)(skb->data + hdroff);
skb               265 net/netfilter/nf_nat_proto.c 	nf_csum_update(skb, iphdroff, &hdr->icmp6_cksum, tuple, maniptype);
skb               268 net/netfilter/nf_nat_proto.c 		inet_proto_csum_replace2(&hdr->icmp6_cksum, skb,
skb               278 net/netfilter/nf_nat_proto.c gre_manip_pkt(struct sk_buff *skb,
skb               289 net/netfilter/nf_nat_proto.c 	if (skb_ensure_writable(skb, hdroff + sizeof(*pgreh) - 8))
skb               292 net/netfilter/nf_nat_proto.c 	greh = (void *)skb->data + hdroff;
skb               317 net/netfilter/nf_nat_proto.c static bool l4proto_manip_pkt(struct sk_buff *skb,
skb               324 net/netfilter/nf_nat_proto.c 		return tcp_manip_pkt(skb, iphdroff, hdroff,
skb               327 net/netfilter/nf_nat_proto.c 		return udp_manip_pkt(skb, iphdroff, hdroff,
skb               330 net/netfilter/nf_nat_proto.c 		return udplite_manip_pkt(skb, iphdroff, hdroff,
skb               333 net/netfilter/nf_nat_proto.c 		return sctp_manip_pkt(skb, iphdroff, hdroff,
skb               336 net/netfilter/nf_nat_proto.c 		return icmp_manip_pkt(skb, iphdroff, hdroff,
skb               339 net/netfilter/nf_nat_proto.c 		return icmpv6_manip_pkt(skb, iphdroff, hdroff,
skb               342 net/netfilter/nf_nat_proto.c 		return dccp_manip_pkt(skb, iphdroff, hdroff,
skb               345 net/netfilter/nf_nat_proto.c 		return gre_manip_pkt(skb, iphdroff, hdroff,
skb               353 net/netfilter/nf_nat_proto.c static bool nf_nat_ipv4_manip_pkt(struct sk_buff *skb,
skb               361 net/netfilter/nf_nat_proto.c 	if (skb_ensure_writable(skb, iphdroff + sizeof(*iph)))
skb               364 net/netfilter/nf_nat_proto.c 	iph = (void *)skb->data + iphdroff;
skb               367 net/netfilter/nf_nat_proto.c 	if (!l4proto_manip_pkt(skb, iphdroff, hdroff, target, maniptype))
skb               369 net/netfilter/nf_nat_proto.c 	iph = (void *)skb->data + iphdroff;
skb               381 net/netfilter/nf_nat_proto.c static bool nf_nat_ipv6_manip_pkt(struct sk_buff *skb,
skb               392 net/netfilter/nf_nat_proto.c 	if (skb_ensure_writable(skb, iphdroff + sizeof(*ipv6h)))
skb               395 net/netfilter/nf_nat_proto.c 	ipv6h = (void *)skb->data + iphdroff;
skb               397 net/netfilter/nf_nat_proto.c 	hdroff = ipv6_skip_exthdr(skb, iphdroff + sizeof(*ipv6h),
skb               403 net/netfilter/nf_nat_proto.c 	    !l4proto_manip_pkt(skb, iphdroff, hdroff, target, maniptype))
skb               407 net/netfilter/nf_nat_proto.c 	ipv6h = (void *)skb->data + iphdroff;
skb               419 net/netfilter/nf_nat_proto.c unsigned int nf_nat_manip_pkt(struct sk_buff *skb, struct nf_conn *ct,
skb               430 net/netfilter/nf_nat_proto.c 		if (nf_nat_ipv6_manip_pkt(skb, 0, &target, mtype))
skb               434 net/netfilter/nf_nat_proto.c 		if (nf_nat_ipv4_manip_pkt(skb, 0, &target, mtype))
skb               445 net/netfilter/nf_nat_proto.c static void nf_nat_ipv4_csum_update(struct sk_buff *skb,
skb               450 net/netfilter/nf_nat_proto.c 	struct iphdr *iph = (struct iphdr *)(skb->data + iphdroff);
skb               460 net/netfilter/nf_nat_proto.c 	inet_proto_csum_replace4(check, skb, oldip, newip, true);
skb               463 net/netfilter/nf_nat_proto.c static void nf_nat_ipv6_csum_update(struct sk_buff *skb,
skb               469 net/netfilter/nf_nat_proto.c 	const struct ipv6hdr *ipv6h = (struct ipv6hdr *)(skb->data + iphdroff);
skb               479 net/netfilter/nf_nat_proto.c 	inet_proto_csum_replace16(check, skb, oldip->s6_addr32,
skb               484 net/netfilter/nf_nat_proto.c static void nf_csum_update(struct sk_buff *skb,
skb               491 net/netfilter/nf_nat_proto.c 		nf_nat_ipv4_csum_update(skb, iphdroff, check, t, maniptype);
skb               494 net/netfilter/nf_nat_proto.c 		nf_nat_ipv6_csum_update(skb, iphdroff, check, t, maniptype);
skb               499 net/netfilter/nf_nat_proto.c static void nf_nat_ipv4_csum_recalc(struct sk_buff *skb,
skb               503 net/netfilter/nf_nat_proto.c 	if (skb->ip_summed != CHECKSUM_PARTIAL) {
skb               504 net/netfilter/nf_nat_proto.c 		const struct iphdr *iph = ip_hdr(skb);
skb               506 net/netfilter/nf_nat_proto.c 		skb->ip_summed = CHECKSUM_PARTIAL;
skb               507 net/netfilter/nf_nat_proto.c 		skb->csum_start = skb_headroom(skb) + skb_network_offset(skb) +
skb               508 net/netfilter/nf_nat_proto.c 			ip_hdrlen(skb);
skb               509 net/netfilter/nf_nat_proto.c 		skb->csum_offset = (void *)check - data;
skb               513 net/netfilter/nf_nat_proto.c 		inet_proto_csum_replace2(check, skb,
skb               519 net/netfilter/nf_nat_proto.c static void nf_nat_ipv6_csum_recalc(struct sk_buff *skb,
skb               523 net/netfilter/nf_nat_proto.c 	if (skb->ip_summed != CHECKSUM_PARTIAL) {
skb               524 net/netfilter/nf_nat_proto.c 		const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
skb               526 net/netfilter/nf_nat_proto.c 		skb->ip_summed = CHECKSUM_PARTIAL;
skb               527 net/netfilter/nf_nat_proto.c 		skb->csum_start = skb_headroom(skb) + skb_network_offset(skb) +
skb               528 net/netfilter/nf_nat_proto.c 			(data - (void *)skb->data);
skb               529 net/netfilter/nf_nat_proto.c 		skb->csum_offset = (void *)check - data;
skb               533 net/netfilter/nf_nat_proto.c 		inet_proto_csum_replace2(check, skb,
skb               539 net/netfilter/nf_nat_proto.c void nf_nat_csum_recalc(struct sk_buff *skb,
skb               545 net/netfilter/nf_nat_proto.c 		nf_nat_ipv4_csum_recalc(skb, proto, data, check,
skb               550 net/netfilter/nf_nat_proto.c 		nf_nat_ipv6_csum_recalc(skb, proto, data, check,
skb               559 net/netfilter/nf_nat_proto.c int nf_nat_icmp_reply_translation(struct sk_buff *skb,
skb               570 net/netfilter/nf_nat_proto.c 	unsigned int hdrlen = ip_hdrlen(skb);
skb               576 net/netfilter/nf_nat_proto.c 	if (skb_ensure_writable(skb, hdrlen + sizeof(*inside)))
skb               578 net/netfilter/nf_nat_proto.c 	if (nf_ip_checksum(skb, hooknum, hdrlen, IPPROTO_ICMP))
skb               581 net/netfilter/nf_nat_proto.c 	inside = (void *)skb->data + hdrlen;
skb               601 net/netfilter/nf_nat_proto.c 	if (!nf_nat_ipv4_manip_pkt(skb, hdrlen + sizeof(inside->icmp),
skb               605 net/netfilter/nf_nat_proto.c 	if (skb->ip_summed != CHECKSUM_PARTIAL) {
skb               607 net/netfilter/nf_nat_proto.c 		inside = (void *)skb->data + hdrlen;
skb               610 net/netfilter/nf_nat_proto.c 			csum_fold(skb_checksum(skb, hdrlen,
skb               611 net/netfilter/nf_nat_proto.c 					       skb->len - hdrlen, 0));
skb               617 net/netfilter/nf_nat_proto.c 	if (!nf_nat_ipv4_manip_pkt(skb, 0, &target, manip))
skb               625 net/netfilter/nf_nat_proto.c nf_nat_ipv4_fn(void *priv, struct sk_buff *skb,
skb               631 net/netfilter/nf_nat_proto.c 	ct = nf_ct_get(skb, &ctinfo);
skb               636 net/netfilter/nf_nat_proto.c 		if (ip_hdr(skb)->protocol == IPPROTO_ICMP) {
skb               637 net/netfilter/nf_nat_proto.c 			if (!nf_nat_icmp_reply_translation(skb, ct, ctinfo,
skb               645 net/netfilter/nf_nat_proto.c 	return nf_nat_inet_fn(priv, skb, state);
skb               649 net/netfilter/nf_nat_proto.c nf_nat_ipv4_in(void *priv, struct sk_buff *skb,
skb               653 net/netfilter/nf_nat_proto.c 	__be32 daddr = ip_hdr(skb)->daddr;
skb               655 net/netfilter/nf_nat_proto.c 	ret = nf_nat_ipv4_fn(priv, skb, state);
skb               656 net/netfilter/nf_nat_proto.c 	if (ret == NF_ACCEPT && daddr != ip_hdr(skb)->daddr)
skb               657 net/netfilter/nf_nat_proto.c 		skb_dst_drop(skb);
skb               663 net/netfilter/nf_nat_proto.c nf_nat_ipv4_out(void *priv, struct sk_buff *skb,
skb               673 net/netfilter/nf_nat_proto.c 	ret = nf_nat_ipv4_fn(priv, skb, state);
skb               678 net/netfilter/nf_nat_proto.c 	if (IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED)
skb               681 net/netfilter/nf_nat_proto.c 	ct = nf_ct_get(skb, &ctinfo);
skb               690 net/netfilter/nf_nat_proto.c 			err = nf_xfrm_me_harder(state->net, skb, AF_INET);
skb               700 net/netfilter/nf_nat_proto.c nf_nat_ipv4_local_fn(void *priv, struct sk_buff *skb,
skb               708 net/netfilter/nf_nat_proto.c 	ret = nf_nat_ipv4_fn(priv, skb, state);
skb               712 net/netfilter/nf_nat_proto.c 	ct = nf_ct_get(skb, &ctinfo);
skb               718 net/netfilter/nf_nat_proto.c 			err = ip_route_me_harder(state->net, skb, RTN_UNSPEC);
skb               723 net/netfilter/nf_nat_proto.c 		else if (!(IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED) &&
skb               727 net/netfilter/nf_nat_proto.c 			err = nf_xfrm_me_harder(state->net, skb, AF_INET);
skb               781 net/netfilter/nf_nat_proto.c int nf_nat_icmpv6_reply_translation(struct sk_buff *skb,
skb               798 net/netfilter/nf_nat_proto.c 	if (skb_ensure_writable(skb, hdrlen + sizeof(*inside)))
skb               800 net/netfilter/nf_nat_proto.c 	if (nf_ip6_checksum(skb, hooknum, hdrlen, IPPROTO_ICMPV6))
skb               803 net/netfilter/nf_nat_proto.c 	inside = (void *)skb->data + hdrlen;
skb               823 net/netfilter/nf_nat_proto.c 	if (!nf_nat_ipv6_manip_pkt(skb, hdrlen + sizeof(inside->icmp6),
skb               827 net/netfilter/nf_nat_proto.c 	if (skb->ip_summed != CHECKSUM_PARTIAL) {
skb               828 net/netfilter/nf_nat_proto.c 		struct ipv6hdr *ipv6h = ipv6_hdr(skb);
skb               830 net/netfilter/nf_nat_proto.c 		inside = (void *)skb->data + hdrlen;
skb               834 net/netfilter/nf_nat_proto.c 					skb->len - hdrlen, IPPROTO_ICMPV6,
skb               835 net/netfilter/nf_nat_proto.c 					skb_checksum(skb, hdrlen,
skb               836 net/netfilter/nf_nat_proto.c 						     skb->len - hdrlen, 0));
skb               841 net/netfilter/nf_nat_proto.c 	if (!nf_nat_ipv6_manip_pkt(skb, 0, &target, manip))
skb               849 net/netfilter/nf_nat_proto.c nf_nat_ipv6_fn(void *priv, struct sk_buff *skb,
skb               858 net/netfilter/nf_nat_proto.c 	ct = nf_ct_get(skb, &ctinfo);
skb               868 net/netfilter/nf_nat_proto.c 		nexthdr = ipv6_hdr(skb)->nexthdr;
skb               869 net/netfilter/nf_nat_proto.c 		hdrlen = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr),
skb               873 net/netfilter/nf_nat_proto.c 			if (!nf_nat_icmpv6_reply_translation(skb, ct, ctinfo,
skb               882 net/netfilter/nf_nat_proto.c 	return nf_nat_inet_fn(priv, skb, state);
skb               886 net/netfilter/nf_nat_proto.c nf_nat_ipv6_in(void *priv, struct sk_buff *skb,
skb               890 net/netfilter/nf_nat_proto.c 	struct in6_addr daddr = ipv6_hdr(skb)->daddr;
skb               892 net/netfilter/nf_nat_proto.c 	ret = nf_nat_ipv6_fn(priv, skb, state);
skb               894 net/netfilter/nf_nat_proto.c 	    ipv6_addr_cmp(&daddr, &ipv6_hdr(skb)->daddr))
skb               895 net/netfilter/nf_nat_proto.c 		skb_dst_drop(skb);
skb               901 net/netfilter/nf_nat_proto.c nf_nat_ipv6_out(void *priv, struct sk_buff *skb,
skb               911 net/netfilter/nf_nat_proto.c 	ret = nf_nat_ipv6_fn(priv, skb, state);
skb               916 net/netfilter/nf_nat_proto.c 	if (IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED)
skb               918 net/netfilter/nf_nat_proto.c 	ct = nf_ct_get(skb, &ctinfo);
skb               927 net/netfilter/nf_nat_proto.c 			err = nf_xfrm_me_harder(state->net, skb, AF_INET6);
skb               938 net/netfilter/nf_nat_proto.c nf_nat_ipv6_local_fn(void *priv, struct sk_buff *skb,
skb               946 net/netfilter/nf_nat_proto.c 	ret = nf_nat_ipv6_fn(priv, skb, state);
skb               950 net/netfilter/nf_nat_proto.c 	ct = nf_ct_get(skb, &ctinfo);
skb               956 net/netfilter/nf_nat_proto.c 			err = nf_ip6_route_me_harder(state->net, skb);
skb               961 net/netfilter/nf_nat_proto.c 		else if (!(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) &&
skb               965 net/netfilter/nf_nat_proto.c 			err = nf_xfrm_me_harder(state->net, skb, AF_INET6);
skb                28 net/netfilter/nf_nat_redirect.c nf_nat_redirect_ipv4(struct sk_buff *skb,
skb                40 net/netfilter/nf_nat_redirect.c 	ct = nf_ct_get(skb, &ctinfo);
skb                51 net/netfilter/nf_nat_redirect.c 		indev = __in_dev_get_rcu(skb->dev);
skb                81 net/netfilter/nf_nat_redirect.c nf_nat_redirect_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range,
skb                89 net/netfilter/nf_nat_redirect.c 	ct = nf_ct_get(skb, &ctinfo);
skb                97 net/netfilter/nf_nat_redirect.c 		idev = __in6_dev_get(skb->dev);
skb                34 net/netfilter/nf_nat_sip.c static unsigned int mangle_packet(struct sk_buff *skb, unsigned int protoff,
skb                41 net/netfilter/nf_nat_sip.c 	struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
skb                46 net/netfilter/nf_nat_sip.c 		th = (struct tcphdr *)(skb->data + protoff);
skb                50 net/netfilter/nf_nat_sip.c 		if (!__nf_nat_mangle_tcp_packet(skb, ct, ctinfo,
skb                58 net/netfilter/nf_nat_sip.c 		if (!nf_nat_mangle_udp_packet(skb, ct, ctinfo,
skb                65 net/netfilter/nf_nat_sip.c 	*dptr = skb->data + dataoff;
skb                92 net/netfilter/nf_nat_sip.c static int map_addr(struct sk_buff *skb, unsigned int protoff,
skb                99 net/netfilter/nf_nat_sip.c 	struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
skb               123 net/netfilter/nf_nat_sip.c 	return mangle_packet(skb, protoff, dataoff, dptr, datalen,
skb               127 net/netfilter/nf_nat_sip.c static int map_sip_addr(struct sk_buff *skb, unsigned int protoff,
skb               133 net/netfilter/nf_nat_sip.c 	struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
skb               141 net/netfilter/nf_nat_sip.c 	return map_addr(skb, protoff, dataoff, dptr, datalen,
skb               145 net/netfilter/nf_nat_sip.c static unsigned int nf_nat_sip(struct sk_buff *skb, unsigned int protoff,
skb               150 net/netfilter/nf_nat_sip.c 	struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
skb               164 net/netfilter/nf_nat_sip.c 		    !map_addr(skb, protoff, dataoff, dptr, datalen,
skb               166 net/netfilter/nf_nat_sip.c 			nf_ct_helper_log(skb, ct, "cannot mangle SIP message");
skb               200 net/netfilter/nf_nat_sip.c 		if (!map_addr(skb, protoff, dataoff, dptr, datalen,
skb               202 net/netfilter/nf_nat_sip.c 			nf_ct_helper_log(skb, ct, "cannot mangle Via header");
skb               218 net/netfilter/nf_nat_sip.c 			if (!mangle_packet(skb, protoff, dataoff, dptr, datalen,
skb               220 net/netfilter/nf_nat_sip.c 				nf_ct_helper_log(skb, ct, "cannot mangle maddr");
skb               235 net/netfilter/nf_nat_sip.c 			if (!mangle_packet(skb, protoff, dataoff, dptr, datalen,
skb               237 net/netfilter/nf_nat_sip.c 				nf_ct_helper_log(skb, ct, "cannot mangle received");
skb               251 net/netfilter/nf_nat_sip.c 			if (!mangle_packet(skb, protoff, dataoff, dptr, datalen,
skb               253 net/netfilter/nf_nat_sip.c 				nf_ct_helper_log(skb, ct, "cannot mangle rport");
skb               267 net/netfilter/nf_nat_sip.c 		if (!map_addr(skb, protoff, dataoff, dptr, datalen,
skb               270 net/netfilter/nf_nat_sip.c 			nf_ct_helper_log(skb, ct, "cannot mangle contact");
skb               275 net/netfilter/nf_nat_sip.c 	if (!map_sip_addr(skb, protoff, dataoff, dptr, datalen, SIP_HDR_FROM) ||
skb               276 net/netfilter/nf_nat_sip.c 	    !map_sip_addr(skb, protoff, dataoff, dptr, datalen, SIP_HDR_TO)) {
skb               277 net/netfilter/nf_nat_sip.c 		nf_ct_helper_log(skb, ct, "cannot mangle SIP from/to");
skb               285 net/netfilter/nf_nat_sip.c 		if (skb_ensure_writable(skb, skb->len)) {
skb               286 net/netfilter/nf_nat_sip.c 			nf_ct_helper_log(skb, ct, "cannot mangle packet");
skb               290 net/netfilter/nf_nat_sip.c 		uh = (void *)skb->data + protoff;
skb               293 net/netfilter/nf_nat_sip.c 		if (!nf_nat_mangle_udp_packet(skb, ct, ctinfo, protoff,
skb               295 net/netfilter/nf_nat_sip.c 			nf_ct_helper_log(skb, ct, "cannot mangle packet");
skb               303 net/netfilter/nf_nat_sip.c static void nf_nat_sip_seq_adjust(struct sk_buff *skb, unsigned int protoff,
skb               307 net/netfilter/nf_nat_sip.c 	struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
skb               313 net/netfilter/nf_nat_sip.c 	th = (struct tcphdr *)(skb->data + protoff);
skb               373 net/netfilter/nf_nat_sip.c static unsigned int nf_nat_sip_expect(struct sk_buff *skb, unsigned int protoff,
skb               381 net/netfilter/nf_nat_sip.c 	struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
skb               427 net/netfilter/nf_nat_sip.c 		nf_ct_helper_log(skb, ct, "all ports in use for SIP");
skb               434 net/netfilter/nf_nat_sip.c 		if (!mangle_packet(skb, protoff, dataoff, dptr, datalen,
skb               436 net/netfilter/nf_nat_sip.c 			nf_ct_helper_log(skb, ct, "cannot mangle packet");
skb               447 net/netfilter/nf_nat_sip.c static int mangle_content_len(struct sk_buff *skb, unsigned int protoff,
skb               452 net/netfilter/nf_nat_sip.c 	struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
skb               470 net/netfilter/nf_nat_sip.c 	return mangle_packet(skb, protoff, dataoff, dptr, datalen,
skb               474 net/netfilter/nf_nat_sip.c static int mangle_sdp_packet(struct sk_buff *skb, unsigned int protoff,
skb               483 net/netfilter/nf_nat_sip.c 	struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
skb               489 net/netfilter/nf_nat_sip.c 	return mangle_packet(skb, protoff, dataoff, dptr, datalen,
skb               493 net/netfilter/nf_nat_sip.c static unsigned int nf_nat_sdp_addr(struct sk_buff *skb, unsigned int protoff,
skb               502 net/netfilter/nf_nat_sip.c 	struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
skb               507 net/netfilter/nf_nat_sip.c 	if (mangle_sdp_packet(skb, protoff, dataoff, dptr, datalen,
skb               511 net/netfilter/nf_nat_sip.c 	return mangle_content_len(skb, protoff, dataoff, dptr, datalen);
skb               514 net/netfilter/nf_nat_sip.c static unsigned int nf_nat_sdp_port(struct sk_buff *skb, unsigned int protoff,
skb               525 net/netfilter/nf_nat_sip.c 	if (!mangle_packet(skb, protoff, dataoff, dptr, datalen,
skb               529 net/netfilter/nf_nat_sip.c 	return mangle_content_len(skb, protoff, dataoff, dptr, datalen);
skb               532 net/netfilter/nf_nat_sip.c static unsigned int nf_nat_sdp_session(struct sk_buff *skb, unsigned int protoff,
skb               539 net/netfilter/nf_nat_sip.c 	struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
skb               545 net/netfilter/nf_nat_sip.c 	if (mangle_sdp_packet(skb, protoff, dataoff, dptr, datalen, sdpoff,
skb               549 net/netfilter/nf_nat_sip.c 	switch (mangle_sdp_packet(skb, protoff, dataoff, dptr, datalen, sdpoff,
skb               566 net/netfilter/nf_nat_sip.c 	return mangle_content_len(skb, protoff, dataoff, dptr, datalen);
skb               571 net/netfilter/nf_nat_sip.c static unsigned int nf_nat_sdp_media(struct sk_buff *skb, unsigned int protoff,
skb               581 net/netfilter/nf_nat_sip.c 	struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
skb               634 net/netfilter/nf_nat_sip.c 		nf_ct_helper_log(skb, ct, "all ports in use for SDP media");
skb               640 net/netfilter/nf_nat_sip.c 	    !nf_nat_sdp_port(skb, protoff, dataoff, dptr, datalen,
skb               642 net/netfilter/nf_nat_sip.c 		nf_ct_helper_log(skb, ct, "cannot mangle SDP message");
skb                23 net/netfilter/nf_nat_tftp.c static unsigned int help(struct sk_buff *skb,
skb                34 net/netfilter/nf_nat_tftp.c 		nf_ct_helper_log(skb, exp->master, "cannot add expectation");
skb                49 net/netfilter/nf_queue.c static void nf_queue_entry_release_br_nf_refs(struct sk_buff *skb)
skb                52 net/netfilter/nf_queue.c 	struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
skb                57 net/netfilter/nf_queue.c 		physdev = nf_bridge_get_physindev(skb);
skb                60 net/netfilter/nf_queue.c 		physdev = nf_bridge_get_physoutdev(skb);
skb                79 net/netfilter/nf_queue.c 	nf_queue_entry_release_br_nf_refs(entry->skb);
skb                83 net/netfilter/nf_queue.c static void nf_queue_entry_get_br_nf_refs(struct sk_buff *skb)
skb                86 net/netfilter/nf_queue.c 	struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
skb                91 net/netfilter/nf_queue.c 		physdev = nf_bridge_get_physindev(skb);
skb                94 net/netfilter/nf_queue.c 		physdev = nf_bridge_get_physoutdev(skb);
skb               113 net/netfilter/nf_queue.c 	nf_queue_entry_get_br_nf_refs(entry->skb);
skb               129 net/netfilter/nf_queue.c static void nf_ip_saveroute(const struct sk_buff *skb,
skb               135 net/netfilter/nf_queue.c 		const struct iphdr *iph = ip_hdr(skb);
skb               140 net/netfilter/nf_queue.c 		rt_info->mark = skb->mark;
skb               144 net/netfilter/nf_queue.c static void nf_ip6_saveroute(const struct sk_buff *skb,
skb               150 net/netfilter/nf_queue.c 		const struct ipv6hdr *iph = ipv6_hdr(skb);
skb               154 net/netfilter/nf_queue.c 		rt_info->mark = skb->mark;
skb               158 net/netfilter/nf_queue.c static int __nf_queue(struct sk_buff *skb, const struct nf_hook_state *state,
skb               192 net/netfilter/nf_queue.c 	if (skb_dst(skb) && !skb_dst_force(skb)) {
skb               198 net/netfilter/nf_queue.c 		.skb	= skb,
skb               208 net/netfilter/nf_queue.c 		nf_ip_saveroute(skb, entry);
skb               211 net/netfilter/nf_queue.c 		nf_ip6_saveroute(skb, entry);
skb               230 net/netfilter/nf_queue.c int nf_queue(struct sk_buff *skb, struct nf_hook_state *state,
skb               235 net/netfilter/nf_queue.c 	ret = __nf_queue(skb, state, index, verdict >> NF_VERDICT_QBITS);
skb               240 net/netfilter/nf_queue.c 		kfree_skb(skb);
skb               247 net/netfilter/nf_queue.c static unsigned int nf_iterate(struct sk_buff *skb,
skb               258 net/netfilter/nf_queue.c 		verdict = nf_hook_entry_hookfn(hook, skb, state);
skb               296 net/netfilter/nf_queue.c 	struct sk_buff *skb = entry->skb;
skb               311 net/netfilter/nf_queue.c 		kfree_skb(skb);
skb               320 net/netfilter/nf_queue.c 		verdict = nf_hook_entry_hookfn(hook_entry, skb, &entry->state);
skb               323 net/netfilter/nf_queue.c 		if (nf_reroute(skb, entry) < 0)
skb               330 net/netfilter/nf_queue.c 		verdict = nf_iterate(skb, &entry->state, hooks, &i);
skb               337 net/netfilter/nf_queue.c 		entry->state.okfn(entry->state.net, entry->state.sk, skb);
skb               341 net/netfilter/nf_queue.c 		err = nf_queue(skb, &entry->state, i, verdict);
skb               348 net/netfilter/nf_queue.c 		kfree_skb(skb);
skb                28 net/netfilter/nf_synproxy_core.c synproxy_parse_options(const struct sk_buff *skb, unsigned int doff,
skb                34 net/netfilter/nf_synproxy_core.c 	ptr = skb_header_pointer(skb, doff + sizeof(*th), length, buf);
skb               180 net/netfilter/nf_synproxy_core.c synproxy_tstamp_adjust(struct sk_buff *skb, unsigned int protoff,
skb               194 net/netfilter/nf_synproxy_core.c 	if (skb_ensure_writable(skb, optend))
skb               198 net/netfilter/nf_synproxy_core.c 		unsigned char *op = skb->data + optoff;
skb               224 net/netfilter/nf_synproxy_core.c 				inet_proto_csum_replace4(&th->check, skb,
skb               414 net/netfilter/nf_synproxy_core.c synproxy_build_ip(struct net *net, struct sk_buff *skb, __be32 saddr,
skb               419 net/netfilter/nf_synproxy_core.c 	skb_reset_network_header(skb);
skb               420 net/netfilter/nf_synproxy_core.c 	iph = skb_put(skb, sizeof(*iph));
skb               437 net/netfilter/nf_synproxy_core.c 		  const struct sk_buff *skb, struct sk_buff *nskb,
skb               447 net/netfilter/nf_synproxy_core.c 	skb_dst_set_noref(nskb, skb_dst(skb));
skb               466 net/netfilter/nf_synproxy_core.c 			    const struct sk_buff *skb, const struct tcphdr *th,
skb               475 net/netfilter/nf_synproxy_core.c 	iph = ip_hdr(skb);
skb               502 net/netfilter/nf_synproxy_core.c 	synproxy_send_tcp(net, skb, nskb, skb_nfct(skb),
skb               509 net/netfilter/nf_synproxy_core.c 			 const struct sk_buff *skb, const struct tcphdr *th,
skb               518 net/netfilter/nf_synproxy_core.c 	iph = ip_hdr(skb);
skb               548 net/netfilter/nf_synproxy_core.c 	synproxy_send_tcp(net, skb, nskb, &snet->tmpl->ct_general, IP_CT_NEW,
skb               555 net/netfilter/nf_synproxy_core.c 			 const struct sk_buff *skb, const struct tcphdr *th,
skb               563 net/netfilter/nf_synproxy_core.c 	iph = ip_hdr(skb);
skb               588 net/netfilter/nf_synproxy_core.c 	synproxy_send_tcp(net, skb, nskb, NULL, 0, niph, nth, tcp_hdr_size);
skb               593 net/netfilter/nf_synproxy_core.c 			 const struct sk_buff *skb, const struct tcphdr *th,
skb               601 net/netfilter/nf_synproxy_core.c 	iph = ip_hdr(skb);
skb               626 net/netfilter/nf_synproxy_core.c 	synproxy_send_tcp(net, skb, nskb, skb_nfct(skb),
skb               632 net/netfilter/nf_synproxy_core.c 			 const struct sk_buff *skb, const struct tcphdr *th,
skb               638 net/netfilter/nf_synproxy_core.c 	mss = __cookie_v4_check(ip_hdr(skb), th, ntohl(th->ack_seq) - 1);
skb               651 net/netfilter/nf_synproxy_core.c 	synproxy_send_server_syn(net, skb, th, opts, recv_seq);
skb               657 net/netfilter/nf_synproxy_core.c ipv4_synproxy_hook(void *priv, struct sk_buff *skb,
skb               670 net/netfilter/nf_synproxy_core.c 	ct = nf_ct_get(skb, &ctinfo);
skb               678 net/netfilter/nf_synproxy_core.c 	if (nf_is_loopback_packet(skb) ||
skb               679 net/netfilter/nf_synproxy_core.c 	    ip_hdr(skb)->protocol != IPPROTO_TCP)
skb               682 net/netfilter/nf_synproxy_core.c 	thoff = ip_hdrlen(skb);
skb               683 net/netfilter/nf_synproxy_core.c 	th = skb_header_pointer(skb, thoff, sizeof(_th), &_th);
skb               710 net/netfilter/nf_synproxy_core.c 		if (!synproxy_parse_options(skb, thoff, th, &opts))
skb               719 net/netfilter/nf_synproxy_core.c 			if (synproxy_recv_client_ack(net, skb, th, &opts,
skb               722 net/netfilter/nf_synproxy_core.c 				consume_skb(skb);
skb               739 net/netfilter/nf_synproxy_core.c 		if (!synproxy_parse_options(skb, thoff, th, &opts))
skb               752 net/netfilter/nf_synproxy_core.c 		synproxy_send_server_ack(net, state, skb, th, &opts);
skb               758 net/netfilter/nf_synproxy_core.c 		synproxy_send_client_ack(net, skb, th, &opts);
skb               760 net/netfilter/nf_synproxy_core.c 		consume_skb(skb);
skb               766 net/netfilter/nf_synproxy_core.c 	synproxy_tstamp_adjust(skb, thoff, th, ct, ctinfo, synproxy);
skb               813 net/netfilter/nf_synproxy_core.c synproxy_build_ip_ipv6(struct net *net, struct sk_buff *skb,
skb               819 net/netfilter/nf_synproxy_core.c 	skb_reset_network_header(skb);
skb               820 net/netfilter/nf_synproxy_core.c 	iph = skb_put(skb, sizeof(*iph));
skb               832 net/netfilter/nf_synproxy_core.c 		       const struct sk_buff *skb, struct sk_buff *nskb,
skb               852 net/netfilter/nf_synproxy_core.c 	security_skb_classify_flow((struct sk_buff *)skb,
skb               879 net/netfilter/nf_synproxy_core.c 				 const struct sk_buff *skb,
skb               889 net/netfilter/nf_synproxy_core.c 	iph = ipv6_hdr(skb);
skb               916 net/netfilter/nf_synproxy_core.c 	synproxy_send_tcp_ipv6(net, skb, nskb, skb_nfct(skb),
skb               923 net/netfilter/nf_synproxy_core.c synproxy_send_server_syn_ipv6(struct net *net, const struct sk_buff *skb,
skb               933 net/netfilter/nf_synproxy_core.c 	iph = ipv6_hdr(skb);
skb               963 net/netfilter/nf_synproxy_core.c 	synproxy_send_tcp_ipv6(net, skb, nskb, &snet->tmpl->ct_general,
skb               969 net/netfilter/nf_synproxy_core.c 			      const struct sk_buff *skb,
skb               978 net/netfilter/nf_synproxy_core.c 	iph = ipv6_hdr(skb);
skb              1003 net/netfilter/nf_synproxy_core.c 	synproxy_send_tcp_ipv6(net, skb, nskb, NULL, 0, niph, nth,
skb              1008 net/netfilter/nf_synproxy_core.c synproxy_send_client_ack_ipv6(struct net *net, const struct sk_buff *skb,
skb              1017 net/netfilter/nf_synproxy_core.c 	iph = ipv6_hdr(skb);
skb              1042 net/netfilter/nf_synproxy_core.c 	synproxy_send_tcp_ipv6(net, skb, nskb, skb_nfct(skb),
skb              1049 net/netfilter/nf_synproxy_core.c 			      const struct sk_buff *skb,
skb              1056 net/netfilter/nf_synproxy_core.c 	mss = nf_cookie_v6_check(ipv6_hdr(skb), th, ntohl(th->ack_seq) - 1);
skb              1069 net/netfilter/nf_synproxy_core.c 	synproxy_send_server_syn_ipv6(net, skb, th, opts, recv_seq);
skb              1075 net/netfilter/nf_synproxy_core.c ipv6_synproxy_hook(void *priv, struct sk_buff *skb,
skb              1090 net/netfilter/nf_synproxy_core.c 	ct = nf_ct_get(skb, &ctinfo);
skb              1098 net/netfilter/nf_synproxy_core.c 	if (nf_is_loopback_packet(skb))
skb              1101 net/netfilter/nf_synproxy_core.c 	nexthdr = ipv6_hdr(skb)->nexthdr;
skb              1102 net/netfilter/nf_synproxy_core.c 	thoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr,
skb              1107 net/netfilter/nf_synproxy_core.c 	th = skb_header_pointer(skb, thoff, sizeof(_th), &_th);
skb              1134 net/netfilter/nf_synproxy_core.c 		if (!synproxy_parse_options(skb, thoff, th, &opts))
skb              1143 net/netfilter/nf_synproxy_core.c 			if (synproxy_recv_client_ack_ipv6(net, skb, th, &opts,
skb              1146 net/netfilter/nf_synproxy_core.c 				consume_skb(skb);
skb              1163 net/netfilter/nf_synproxy_core.c 		if (!synproxy_parse_options(skb, thoff, th, &opts))
skb              1176 net/netfilter/nf_synproxy_core.c 		synproxy_send_server_ack_ipv6(net, state, skb, th, &opts);
skb              1182 net/netfilter/nf_synproxy_core.c 		synproxy_send_client_ack_ipv6(net, skb, th, &opts);
skb              1184 net/netfilter/nf_synproxy_core.c 		consume_skb(skb);
skb              1190 net/netfilter/nf_synproxy_core.c 	synproxy_tstamp_adjust(skb, thoff, th, ct, ctinfo, synproxy);
skb                88 net/netfilter/nf_tables_api.c 			 const struct sk_buff *skb,
skb               101 net/netfilter/nf_tables_api.c 	ctx->portid	= NETLINK_CB(skb).portid;
skb               595 net/netfilter/nf_tables_api.c static int nf_tables_fill_table_info(struct sk_buff *skb, struct net *net,
skb               603 net/netfilter/nf_tables_api.c 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct nfgenmsg), flags);
skb               612 net/netfilter/nf_tables_api.c 	if (nla_put_string(skb, NFTA_TABLE_NAME, table->name) ||
skb               613 net/netfilter/nf_tables_api.c 	    nla_put_be32(skb, NFTA_TABLE_FLAGS, htonl(table->flags)) ||
skb               614 net/netfilter/nf_tables_api.c 	    nla_put_be32(skb, NFTA_TABLE_USE, htonl(table->use)) ||
skb               615 net/netfilter/nf_tables_api.c 	    nla_put_be64(skb, NFTA_TABLE_HANDLE, cpu_to_be64(table->handle),
skb               619 net/netfilter/nf_tables_api.c 	nlmsg_end(skb, nlh);
skb               623 net/netfilter/nf_tables_api.c 	nlmsg_trim(skb, nlh);
skb               629 net/netfilter/nf_tables_api.c 	struct sk_buff *skb;
skb               636 net/netfilter/nf_tables_api.c 	skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
skb               637 net/netfilter/nf_tables_api.c 	if (skb == NULL)
skb               640 net/netfilter/nf_tables_api.c 	err = nf_tables_fill_table_info(skb, ctx->net, ctx->portid, ctx->seq,
skb               643 net/netfilter/nf_tables_api.c 		kfree_skb(skb);
skb               647 net/netfilter/nf_tables_api.c 	nfnetlink_send(skb, ctx->net, ctx->portid, NFNLGRP_NFTABLES,
skb               654 net/netfilter/nf_tables_api.c static int nf_tables_dump_tables(struct sk_buff *skb,
skb               660 net/netfilter/nf_tables_api.c 	struct net *net = sock_net(skb->sk);
skb               677 net/netfilter/nf_tables_api.c 		if (nf_tables_fill_table_info(skb, net,
skb               678 net/netfilter/nf_tables_api.c 					      NETLINK_CB(cb->skb).portid,
skb               684 net/netfilter/nf_tables_api.c 		nl_dump_check_consistent(cb, nlmsg_hdr(skb));
skb               691 net/netfilter/nf_tables_api.c 	return skb->len;
skb               694 net/netfilter/nf_tables_api.c static int nft_netlink_dump_start_rcu(struct sock *nlsk, struct sk_buff *skb,
skb               704 net/netfilter/nf_tables_api.c 	err = netlink_dump_start(nlsk, skb, nlh, c);
skb               713 net/netfilter/nf_tables_api.c 			      struct sk_buff *skb, const struct nlmsghdr *nlh,
skb               730 net/netfilter/nf_tables_api.c 		return nft_netlink_dump_start_rcu(nlsk, skb, nlh, &c);
skb               743 net/netfilter/nf_tables_api.c 	err = nf_tables_fill_table_info(skb2, net, NETLINK_CB(skb).portid,
skb               749 net/netfilter/nf_tables_api.c 	return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid);
skb               898 net/netfilter/nf_tables_api.c 			      struct sk_buff *skb, const struct nlmsghdr *nlh,
skb               925 net/netfilter/nf_tables_api.c 		nft_ctx_init(&ctx, net, skb, nlh, family, table, NULL, nla);
skb               956 net/netfilter/nf_tables_api.c 	nft_ctx_init(&ctx, net, skb, nlh, family, table, NULL, nla);
skb              1069 net/netfilter/nf_tables_api.c 			      struct sk_buff *skb, const struct nlmsghdr *nlh,
skb              1080 net/netfilter/nf_tables_api.c 	nft_ctx_init(&ctx, net, skb, nlh, 0, NULL, NULL, nla);
skb              1218 net/netfilter/nf_tables_api.c static int nft_dump_stats(struct sk_buff *skb, struct nft_stats __percpu *stats)
skb              1240 net/netfilter/nf_tables_api.c 	nest = nla_nest_start_noflag(skb, NFTA_CHAIN_COUNTERS);
skb              1244 net/netfilter/nf_tables_api.c 	if (nla_put_be64(skb, NFTA_COUNTER_PACKETS, cpu_to_be64(total.pkts),
skb              1246 net/netfilter/nf_tables_api.c 	    nla_put_be64(skb, NFTA_COUNTER_BYTES, cpu_to_be64(total.bytes),
skb              1250 net/netfilter/nf_tables_api.c 	nla_nest_end(skb, nest);
skb              1257 net/netfilter/nf_tables_api.c static int nf_tables_fill_chain_info(struct sk_buff *skb, struct net *net,
skb              1266 net/netfilter/nf_tables_api.c 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct nfgenmsg), flags);
skb              1275 net/netfilter/nf_tables_api.c 	if (nla_put_string(skb, NFTA_CHAIN_TABLE, table->name))
skb              1277 net/netfilter/nf_tables_api.c 	if (nla_put_be64(skb, NFTA_CHAIN_HANDLE, cpu_to_be64(chain->handle),
skb              1280 net/netfilter/nf_tables_api.c 	if (nla_put_string(skb, NFTA_CHAIN_NAME, chain->name))
skb              1289 net/netfilter/nf_tables_api.c 		nest = nla_nest_start_noflag(skb, NFTA_CHAIN_HOOK);
skb              1292 net/netfilter/nf_tables_api.c 		if (nla_put_be32(skb, NFTA_HOOK_HOOKNUM, htonl(ops->hooknum)))
skb              1294 net/netfilter/nf_tables_api.c 		if (nla_put_be32(skb, NFTA_HOOK_PRIORITY, htonl(ops->priority)))
skb              1297 net/netfilter/nf_tables_api.c 		    nla_put_string(skb, NFTA_HOOK_DEV, basechain->dev_name))
skb              1299 net/netfilter/nf_tables_api.c 		nla_nest_end(skb, nest);
skb              1301 net/netfilter/nf_tables_api.c 		if (nla_put_be32(skb, NFTA_CHAIN_POLICY,
skb              1305 net/netfilter/nf_tables_api.c 		if (nla_put_string(skb, NFTA_CHAIN_TYPE, basechain->type->name))
skb              1310 net/netfilter/nf_tables_api.c 		if (nft_dump_stats(skb, stats))
skb              1314 net/netfilter/nf_tables_api.c 		    nla_put_be32(skb, NFTA_CHAIN_FLAGS,
skb              1319 net/netfilter/nf_tables_api.c 	if (nla_put_be32(skb, NFTA_CHAIN_USE, htonl(chain->use)))
skb              1322 net/netfilter/nf_tables_api.c 	nlmsg_end(skb, nlh);
skb              1326 net/netfilter/nf_tables_api.c 	nlmsg_trim(skb, nlh);
skb              1332 net/netfilter/nf_tables_api.c 	struct sk_buff *skb;
skb              1339 net/netfilter/nf_tables_api.c 	skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
skb              1340 net/netfilter/nf_tables_api.c 	if (skb == NULL)
skb              1343 net/netfilter/nf_tables_api.c 	err = nf_tables_fill_chain_info(skb, ctx->net, ctx->portid, ctx->seq,
skb              1347 net/netfilter/nf_tables_api.c 		kfree_skb(skb);
skb              1351 net/netfilter/nf_tables_api.c 	nfnetlink_send(skb, ctx->net, ctx->portid, NFNLGRP_NFTABLES,
skb              1358 net/netfilter/nf_tables_api.c static int nf_tables_dump_chains(struct sk_buff *skb,
skb              1365 net/netfilter/nf_tables_api.c 	struct net *net = sock_net(skb->sk);
skb              1383 net/netfilter/nf_tables_api.c 			if (nf_tables_fill_chain_info(skb, net,
skb              1384 net/netfilter/nf_tables_api.c 						      NETLINK_CB(cb->skb).portid,
skb              1392 net/netfilter/nf_tables_api.c 			nl_dump_check_consistent(cb, nlmsg_hdr(skb));
skb              1400 net/netfilter/nf_tables_api.c 	return skb->len;
skb              1405 net/netfilter/nf_tables_api.c 			      struct sk_buff *skb, const struct nlmsghdr *nlh,
skb              1423 net/netfilter/nf_tables_api.c 		return nft_netlink_dump_start_rcu(nlsk, skb, nlh, &c);
skb              1442 net/netfilter/nf_tables_api.c 	err = nf_tables_fill_chain_info(skb2, net, NETLINK_CB(skb).portid,
skb              1448 net/netfilter/nf_tables_api.c 	return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid);
skb              1879 net/netfilter/nf_tables_api.c 			      struct sk_buff *skb, const struct nlmsghdr *nlh,
skb              1952 net/netfilter/nf_tables_api.c 	nft_ctx_init(&ctx, net, skb, nlh, family, table, chain, nla);
skb              1970 net/netfilter/nf_tables_api.c 			      struct sk_buff *skb, const struct nlmsghdr *nlh,
skb              2009 net/netfilter/nf_tables_api.c 	nft_ctx_init(&ctx, net, skb, nlh, family, table, chain, nla);
skb              2132 net/netfilter/nf_tables_api.c static int nf_tables_fill_expr_info(struct sk_buff *skb,
skb              2135 net/netfilter/nf_tables_api.c 	if (nla_put_string(skb, NFTA_EXPR_NAME, expr->ops->type->name))
skb              2139 net/netfilter/nf_tables_api.c 		struct nlattr *data = nla_nest_start_noflag(skb,
skb              2143 net/netfilter/nf_tables_api.c 		if (expr->ops->dump(skb, expr) < 0)
skb              2145 net/netfilter/nf_tables_api.c 		nla_nest_end(skb, data);
skb              2148 net/netfilter/nf_tables_api.c 	return skb->len;
skb              2154 net/netfilter/nf_tables_api.c int nft_expr_dump(struct sk_buff *skb, unsigned int attr,
skb              2159 net/netfilter/nf_tables_api.c 	nest = nla_nest_start_noflag(skb, attr);
skb              2162 net/netfilter/nf_tables_api.c 	if (nf_tables_fill_expr_info(skb, expr) < 0)
skb              2164 net/netfilter/nf_tables_api.c 	nla_nest_end(skb, nest);
skb              2340 net/netfilter/nf_tables_api.c static int nf_tables_fill_rule_info(struct sk_buff *skb, struct net *net,
skb              2354 net/netfilter/nf_tables_api.c 	nlh = nlmsg_put(skb, portid, seq, type, sizeof(struct nfgenmsg), flags);
skb              2363 net/netfilter/nf_tables_api.c 	if (nla_put_string(skb, NFTA_RULE_TABLE, table->name))
skb              2365 net/netfilter/nf_tables_api.c 	if (nla_put_string(skb, NFTA_RULE_CHAIN, chain->name))
skb              2367 net/netfilter/nf_tables_api.c 	if (nla_put_be64(skb, NFTA_RULE_HANDLE, cpu_to_be64(rule->handle),
skb              2372 net/netfilter/nf_tables_api.c 		if (nla_put_be64(skb, NFTA_RULE_POSITION,
skb              2378 net/netfilter/nf_tables_api.c 	list = nla_nest_start_noflag(skb, NFTA_RULE_EXPRESSIONS);
skb              2382 net/netfilter/nf_tables_api.c 		if (nft_expr_dump(skb, NFTA_LIST_ELEM, expr) < 0)
skb              2385 net/netfilter/nf_tables_api.c 	nla_nest_end(skb, list);
skb              2389 net/netfilter/nf_tables_api.c 		if (nla_put(skb, NFTA_RULE_USERDATA, udata->len + 1,
skb              2394 net/netfilter/nf_tables_api.c 	nlmsg_end(skb, nlh);
skb              2398 net/netfilter/nf_tables_api.c 	nlmsg_trim(skb, nlh);
skb              2405 net/netfilter/nf_tables_api.c 	struct sk_buff *skb;
skb              2412 net/netfilter/nf_tables_api.c 	skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
skb              2413 net/netfilter/nf_tables_api.c 	if (skb == NULL)
skb              2416 net/netfilter/nf_tables_api.c 	err = nf_tables_fill_rule_info(skb, ctx->net, ctx->portid, ctx->seq,
skb              2420 net/netfilter/nf_tables_api.c 		kfree_skb(skb);
skb              2424 net/netfilter/nf_tables_api.c 	nfnetlink_send(skb, ctx->net, ctx->portid, NFNLGRP_NFTABLES,
skb              2436 net/netfilter/nf_tables_api.c static int __nf_tables_dump_rules(struct sk_buff *skb,
skb              2442 net/netfilter/nf_tables_api.c 	struct net *net = sock_net(skb->sk);
skb              2456 net/netfilter/nf_tables_api.c 		if (nf_tables_fill_rule_info(skb, net, NETLINK_CB(cb->skb).portid,
skb              2464 net/netfilter/nf_tables_api.c 		nl_dump_check_consistent(cb, nlmsg_hdr(skb));
skb              2473 net/netfilter/nf_tables_api.c static int nf_tables_dump_rules(struct sk_buff *skb,
skb              2481 net/netfilter/nf_tables_api.c 	struct net *net = sock_net(skb->sk);
skb              2505 net/netfilter/nf_tables_api.c 				__nf_tables_dump_rules(skb, &idx,
skb              2513 net/netfilter/nf_tables_api.c 			if (__nf_tables_dump_rules(skb, &idx, cb, table, chain))
skb              2524 net/netfilter/nf_tables_api.c 	return skb->len;
skb              2574 net/netfilter/nf_tables_api.c 			     struct sk_buff *skb, const struct nlmsghdr *nlh,
skb              2596 net/netfilter/nf_tables_api.c 		return nft_netlink_dump_start_rcu(nlsk, skb, nlh, &c);
skb              2621 net/netfilter/nf_tables_api.c 	err = nf_tables_fill_rule_info(skb2, net, NETLINK_CB(skb).portid,
skb              2627 net/netfilter/nf_tables_api.c 	return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid);
skb              2715 net/netfilter/nf_tables_api.c 			     struct sk_buff *skb, const struct nlmsghdr *nlh,
skb              2791 net/netfilter/nf_tables_api.c 	nft_ctx_init(&ctx, net, skb, nlh, family, table, chain, nla);
skb              2934 net/netfilter/nf_tables_api.c 			     struct sk_buff *skb, const struct nlmsghdr *nlh,
skb              2961 net/netfilter/nf_tables_api.c 	nft_ctx_init(&ctx, net, skb, nlh, family, table, chain, nla);
skb              3134 net/netfilter/nf_tables_api.c 				     const struct sk_buff *skb,
skb              3153 net/netfilter/nf_tables_api.c 	nft_ctx_init(ctx, net, skb, nlh, family, table, NULL, nla);
skb              3297 net/netfilter/nf_tables_api.c static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx,
skb              3307 net/netfilter/nf_tables_api.c 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct nfgenmsg),
skb              3317 net/netfilter/nf_tables_api.c 	if (nla_put_string(skb, NFTA_SET_TABLE, ctx->table->name))
skb              3319 net/netfilter/nf_tables_api.c 	if (nla_put_string(skb, NFTA_SET_NAME, set->name))
skb              3321 net/netfilter/nf_tables_api.c 	if (nla_put_be64(skb, NFTA_SET_HANDLE, cpu_to_be64(set->handle),
skb              3325 net/netfilter/nf_tables_api.c 		if (nla_put_be32(skb, NFTA_SET_FLAGS, htonl(set->flags)))
skb              3328 net/netfilter/nf_tables_api.c 	if (nla_put_be32(skb, NFTA_SET_KEY_TYPE, htonl(set->ktype)))
skb              3330 net/netfilter/nf_tables_api.c 	if (nla_put_be32(skb, NFTA_SET_KEY_LEN, htonl(set->klen)))
skb              3333 net/netfilter/nf_tables_api.c 		if (nla_put_be32(skb, NFTA_SET_DATA_TYPE, htonl(set->dtype)))
skb              3335 net/netfilter/nf_tables_api.c 		if (nla_put_be32(skb, NFTA_SET_DATA_LEN, htonl(set->dlen)))
skb              3339 net/netfilter/nf_tables_api.c 	    nla_put_be32(skb, NFTA_SET_OBJ_TYPE, htonl(set->objtype)))
skb              3343 net/netfilter/nf_tables_api.c 	    nla_put_be64(skb, NFTA_SET_TIMEOUT,
skb              3348 net/netfilter/nf_tables_api.c 	    nla_put_be32(skb, NFTA_SET_GC_INTERVAL, htonl(set->gc_int)))
skb              3352 net/netfilter/nf_tables_api.c 		if (nla_put_be32(skb, NFTA_SET_POLICY, htonl(set->policy)))
skb              3356 net/netfilter/nf_tables_api.c 	if (nla_put(skb, NFTA_SET_USERDATA, set->udlen, set->udata))
skb              3359 net/netfilter/nf_tables_api.c 	desc = nla_nest_start_noflag(skb, NFTA_SET_DESC);
skb              3363 net/netfilter/nf_tables_api.c 	    nla_put_be32(skb, NFTA_SET_DESC_SIZE, htonl(set->size)))
skb              3365 net/netfilter/nf_tables_api.c 	nla_nest_end(skb, desc);
skb              3367 net/netfilter/nf_tables_api.c 	nlmsg_end(skb, nlh);
skb              3371 net/netfilter/nf_tables_api.c 	nlmsg_trim(skb, nlh);
skb              3379 net/netfilter/nf_tables_api.c 	struct sk_buff *skb;
skb              3387 net/netfilter/nf_tables_api.c 	skb = nlmsg_new(NLMSG_GOODSIZE, gfp_flags);
skb              3388 net/netfilter/nf_tables_api.c 	if (skb == NULL)
skb              3391 net/netfilter/nf_tables_api.c 	err = nf_tables_fill_set(skb, ctx, set, event, 0);
skb              3393 net/netfilter/nf_tables_api.c 		kfree_skb(skb);
skb              3397 net/netfilter/nf_tables_api.c 	nfnetlink_send(skb, ctx->net, portid, NFNLGRP_NFTABLES, ctx->report,
skb              3404 net/netfilter/nf_tables_api.c static int nf_tables_dump_sets(struct sk_buff *skb, struct netlink_callback *cb)
skb              3409 net/netfilter/nf_tables_api.c 	struct net *net = sock_net(skb->sk);
skb              3413 net/netfilter/nf_tables_api.c 		return skb->len;
skb              3443 net/netfilter/nf_tables_api.c 			if (nf_tables_fill_set(skb, &ctx_set, set,
skb              3450 net/netfilter/nf_tables_api.c 			nl_dump_check_consistent(cb, nlmsg_hdr(skb));
skb              3460 net/netfilter/nf_tables_api.c 	return skb->len;
skb              3483 net/netfilter/nf_tables_api.c 			    struct sk_buff *skb, const struct nlmsghdr *nlh,
skb              3495 net/netfilter/nf_tables_api.c 	err = nft_ctx_init_from_setattr(&ctx, net, skb, nlh, nla, extack,
skb              3509 net/netfilter/nf_tables_api.c 		return nft_netlink_dump_start_rcu(nlsk, skb, nlh, &c);
skb              3530 net/netfilter/nf_tables_api.c 	return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid);
skb              3555 net/netfilter/nf_tables_api.c 			    struct sk_buff *skb, const struct nlmsghdr *nlh,
skb              3677 net/netfilter/nf_tables_api.c 	nft_ctx_init(&ctx, net, skb, nlh, family, table, NULL, nla);
skb              3787 net/netfilter/nf_tables_api.c 			    struct sk_buff *skb, const struct nlmsghdr *nlh,
skb              3803 net/netfilter/nf_tables_api.c 	err = nft_ctx_init_from_setattr(&ctx, net, skb, nlh, nla, extack,
skb              3985 net/netfilter/nf_tables_api.c 				      const struct sk_buff *skb,
skb              4002 net/netfilter/nf_tables_api.c 	nft_ctx_init(ctx, net, skb, nlh, family, table, NULL, nla);
skb              4006 net/netfilter/nf_tables_api.c static int nf_tables_fill_setelem(struct sk_buff *skb,
skb              4011 net/netfilter/nf_tables_api.c 	unsigned char *b = skb_tail_pointer(skb);
skb              4014 net/netfilter/nf_tables_api.c 	nest = nla_nest_start_noflag(skb, NFTA_LIST_ELEM);
skb              4018 net/netfilter/nf_tables_api.c 	if (nft_data_dump(skb, NFTA_SET_ELEM_KEY, nft_set_ext_key(ext),
skb              4023 net/netfilter/nf_tables_api.c 	    nft_data_dump(skb, NFTA_SET_ELEM_DATA, nft_set_ext_data(ext),
skb              4029 net/netfilter/nf_tables_api.c 	    nft_expr_dump(skb, NFTA_SET_ELEM_EXPR, nft_set_ext_expr(ext)) < 0)
skb              4033 net/netfilter/nf_tables_api.c 	    nla_put_string(skb, NFTA_SET_ELEM_OBJREF,
skb              4038 net/netfilter/nf_tables_api.c 	    nla_put_be32(skb, NFTA_SET_ELEM_FLAGS,
skb              4043 net/netfilter/nf_tables_api.c 	    nla_put_be64(skb, NFTA_SET_ELEM_TIMEOUT,
skb              4057 net/netfilter/nf_tables_api.c 		if (nla_put_be64(skb, NFTA_SET_ELEM_EXPIRATION,
skb              4067 net/netfilter/nf_tables_api.c 		if (nla_put(skb, NFTA_SET_ELEM_USERDATA,
skb              4072 net/netfilter/nf_tables_api.c 	nla_nest_end(skb, nest);
skb              4076 net/netfilter/nf_tables_api.c 	nlmsg_trim(skb, b);
skb              4083 net/netfilter/nf_tables_api.c 	struct sk_buff			*skb;
skb              4094 net/netfilter/nf_tables_api.c 	return nf_tables_fill_setelem(args->skb, set, elem);
skb              4102 net/netfilter/nf_tables_api.c static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
skb              4105 net/netfilter/nf_tables_api.c 	struct net *net = sock_net(skb->sk);
skb              4140 net/netfilter/nf_tables_api.c 	portid = NETLINK_CB(cb->skb).portid;
skb              4143 net/netfilter/nf_tables_api.c 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct nfgenmsg),
skb              4153 net/netfilter/nf_tables_api.c 	if (nla_put_string(skb, NFTA_SET_ELEM_LIST_TABLE, table->name))
skb              4155 net/netfilter/nf_tables_api.c 	if (nla_put_string(skb, NFTA_SET_ELEM_LIST_SET, set->name))
skb              4158 net/netfilter/nf_tables_api.c 	nest = nla_nest_start_noflag(skb, NFTA_SET_ELEM_LIST_ELEMENTS);
skb              4163 net/netfilter/nf_tables_api.c 	args.skb		= skb;
skb              4172 net/netfilter/nf_tables_api.c 	nla_nest_end(skb, nest);
skb              4173 net/netfilter/nf_tables_api.c 	nlmsg_end(skb, nlh);
skb              4181 net/netfilter/nf_tables_api.c 	return skb->len;
skb              4203 net/netfilter/nf_tables_api.c static int nf_tables_fill_setelem_info(struct sk_buff *skb,
skb              4215 net/netfilter/nf_tables_api.c 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct nfgenmsg),
skb              4225 net/netfilter/nf_tables_api.c 	if (nla_put_string(skb, NFTA_SET_TABLE, ctx->table->name))
skb              4227 net/netfilter/nf_tables_api.c 	if (nla_put_string(skb, NFTA_SET_NAME, set->name))
skb              4230 net/netfilter/nf_tables_api.c 	nest = nla_nest_start_noflag(skb, NFTA_SET_ELEM_LIST_ELEMENTS);
skb              4234 net/netfilter/nf_tables_api.c 	err = nf_tables_fill_setelem(skb, set, elem);
skb              4238 net/netfilter/nf_tables_api.c 	nla_nest_end(skb, nest);
skb              4240 net/netfilter/nf_tables_api.c 	nlmsg_end(skb, nlh);
skb              4244 net/netfilter/nf_tables_api.c 	nlmsg_trim(skb, nlh);
skb              4270 net/netfilter/nf_tables_api.c 	struct sk_buff *skb;
skb              4305 net/netfilter/nf_tables_api.c 	skb = nlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
skb              4306 net/netfilter/nf_tables_api.c 	if (skb == NULL)
skb              4309 net/netfilter/nf_tables_api.c 	err = nf_tables_fill_setelem_info(skb, ctx, ctx->seq, ctx->portid,
skb              4314 net/netfilter/nf_tables_api.c 	err = nfnetlink_unicast(skb, ctx->net, ctx->portid, MSG_DONTWAIT);
skb              4321 net/netfilter/nf_tables_api.c 	kfree_skb(skb);
skb              4329 net/netfilter/nf_tables_api.c 				struct sk_buff *skb, const struct nlmsghdr *nlh,
skb              4339 net/netfilter/nf_tables_api.c 	err = nft_ctx_init_from_elemattr(&ctx, net, skb, nlh, nla, extack,
skb              4361 net/netfilter/nf_tables_api.c 		return nft_netlink_dump_start_rcu(nlsk, skb, nlh, &c);
skb              4383 net/netfilter/nf_tables_api.c 	struct sk_buff *skb;
skb              4389 net/netfilter/nf_tables_api.c 	skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
skb              4390 net/netfilter/nf_tables_api.c 	if (skb == NULL)
skb              4393 net/netfilter/nf_tables_api.c 	err = nf_tables_fill_setelem_info(skb, ctx, 0, portid, event, flags,
skb              4396 net/netfilter/nf_tables_api.c 		kfree_skb(skb);
skb              4400 net/netfilter/nf_tables_api.c 	nfnetlink_send(skb, net, portid, NFNLGRP_NFTABLES, ctx->report,
skb              4724 net/netfilter/nf_tables_api.c 				struct sk_buff *skb, const struct nlmsghdr *nlh,
skb              4737 net/netfilter/nf_tables_api.c 	err = nft_ctx_init_from_elemattr(&ctx, net, skb, nlh, nla, extack,
skb              4921 net/netfilter/nf_tables_api.c 				struct sk_buff *skb, const struct nlmsghdr *nlh,
skb              4931 net/netfilter/nf_tables_api.c 	err = nft_ctx_init_from_elemattr(&ctx, net, skb, nlh, nla, extack,
skb              5136 net/netfilter/nf_tables_api.c static int nft_object_dump(struct sk_buff *skb, unsigned int attr,
skb              5141 net/netfilter/nf_tables_api.c 	nest = nla_nest_start_noflag(skb, attr);
skb              5144 net/netfilter/nf_tables_api.c 	if (obj->ops->dump(skb, obj, reset) < 0)
skb              5146 net/netfilter/nf_tables_api.c 	nla_nest_end(skb, nest);
skb              5216 net/netfilter/nf_tables_api.c 			    struct sk_buff *skb, const struct nlmsghdr *nlh,
skb              5258 net/netfilter/nf_tables_api.c 		nft_ctx_init(&ctx, net, skb, nlh, family, table, NULL, nla);
skb              5263 net/netfilter/nf_tables_api.c 	nft_ctx_init(&ctx, net, skb, nlh, family, table, NULL, nla);
skb              5310 net/netfilter/nf_tables_api.c static int nf_tables_fill_obj_info(struct sk_buff *skb, struct net *net,
skb              5319 net/netfilter/nf_tables_api.c 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct nfgenmsg), flags);
skb              5328 net/netfilter/nf_tables_api.c 	if (nla_put_string(skb, NFTA_OBJ_TABLE, table->name) ||
skb              5329 net/netfilter/nf_tables_api.c 	    nla_put_string(skb, NFTA_OBJ_NAME, obj->key.name) ||
skb              5330 net/netfilter/nf_tables_api.c 	    nla_put_be32(skb, NFTA_OBJ_TYPE, htonl(obj->ops->type->type)) ||
skb              5331 net/netfilter/nf_tables_api.c 	    nla_put_be32(skb, NFTA_OBJ_USE, htonl(obj->use)) ||
skb              5332 net/netfilter/nf_tables_api.c 	    nft_object_dump(skb, NFTA_OBJ_DATA, obj, reset) ||
skb              5333 net/netfilter/nf_tables_api.c 	    nla_put_be64(skb, NFTA_OBJ_HANDLE, cpu_to_be64(obj->handle),
skb              5337 net/netfilter/nf_tables_api.c 	nlmsg_end(skb, nlh);
skb              5341 net/netfilter/nf_tables_api.c 	nlmsg_trim(skb, nlh);
skb              5350 net/netfilter/nf_tables_api.c static int nf_tables_dump_obj(struct sk_buff *skb, struct netlink_callback *cb)
skb              5356 net/netfilter/nf_tables_api.c 	struct net *net = sock_net(skb->sk);
skb              5387 net/netfilter/nf_tables_api.c 			if (nf_tables_fill_obj_info(skb, net, NETLINK_CB(cb->skb).portid,
skb              5395 net/netfilter/nf_tables_api.c 			nl_dump_check_consistent(cb, nlmsg_hdr(skb));
skb              5404 net/netfilter/nf_tables_api.c 	return skb->len;
skb              5447 net/netfilter/nf_tables_api.c 			    struct sk_buff *skb, const struct nlmsghdr *nlh,
skb              5470 net/netfilter/nf_tables_api.c 		return nft_netlink_dump_start_rcu(nlsk, skb, nlh, &c);
skb              5497 net/netfilter/nf_tables_api.c 	err = nf_tables_fill_obj_info(skb2, net, NETLINK_CB(skb).portid,
skb              5503 net/netfilter/nf_tables_api.c 	return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid);
skb              5520 net/netfilter/nf_tables_api.c 			    struct sk_buff *skb, const struct nlmsghdr *nlh,
skb              5561 net/netfilter/nf_tables_api.c 	nft_ctx_init(&ctx, net, skb, nlh, family, table, NULL, nla);
skb              5570 net/netfilter/nf_tables_api.c 	struct sk_buff *skb;
skb              5577 net/netfilter/nf_tables_api.c 	skb = nlmsg_new(NLMSG_GOODSIZE, gfp);
skb              5578 net/netfilter/nf_tables_api.c 	if (skb == NULL)
skb              5581 net/netfilter/nf_tables_api.c 	err = nf_tables_fill_obj_info(skb, net, portid, seq, event, 0, family,
skb              5584 net/netfilter/nf_tables_api.c 		kfree_skb(skb);
skb              5588 net/netfilter/nf_tables_api.c 	nfnetlink_send(skb, net, portid, NFNLGRP_NFTABLES, report, gfp);
skb              5813 net/netfilter/nf_tables_api.c 				  struct sk_buff *skb,
skb              5856 net/netfilter/nf_tables_api.c 	nft_ctx_init(&ctx, net, skb, nlh, family, table, NULL, nla);
skb              5936 net/netfilter/nf_tables_api.c 				  struct sk_buff *skb,
skb              5978 net/netfilter/nf_tables_api.c 	nft_ctx_init(&ctx, net, skb, nlh, family, table, NULL, nla);
skb              5983 net/netfilter/nf_tables_api.c static int nf_tables_fill_flowtable_info(struct sk_buff *skb, struct net *net,
skb              5994 net/netfilter/nf_tables_api.c 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct nfgenmsg), flags);
skb              6003 net/netfilter/nf_tables_api.c 	if (nla_put_string(skb, NFTA_FLOWTABLE_TABLE, flowtable->table->name) ||
skb              6004 net/netfilter/nf_tables_api.c 	    nla_put_string(skb, NFTA_FLOWTABLE_NAME, flowtable->name) ||
skb              6005 net/netfilter/nf_tables_api.c 	    nla_put_be32(skb, NFTA_FLOWTABLE_USE, htonl(flowtable->use)) ||
skb              6006 net/netfilter/nf_tables_api.c 	    nla_put_be64(skb, NFTA_FLOWTABLE_HANDLE, cpu_to_be64(flowtable->handle),
skb              6010 net/netfilter/nf_tables_api.c 	nest = nla_nest_start_noflag(skb, NFTA_FLOWTABLE_HOOK);
skb              6013 net/netfilter/nf_tables_api.c 	if (nla_put_be32(skb, NFTA_FLOWTABLE_HOOK_NUM, htonl(flowtable->hooknum)) ||
skb              6014 net/netfilter/nf_tables_api.c 	    nla_put_be32(skb, NFTA_FLOWTABLE_HOOK_PRIORITY, htonl(flowtable->priority)))
skb              6017 net/netfilter/nf_tables_api.c 	nest_devs = nla_nest_start_noflag(skb, NFTA_FLOWTABLE_HOOK_DEVS);
skb              6025 net/netfilter/nf_tables_api.c 		    nla_put_string(skb, NFTA_DEVICE_NAME, dev->name))
skb              6028 net/netfilter/nf_tables_api.c 	nla_nest_end(skb, nest_devs);
skb              6029 net/netfilter/nf_tables_api.c 	nla_nest_end(skb, nest);
skb              6031 net/netfilter/nf_tables_api.c 	nlmsg_end(skb, nlh);
skb              6035 net/netfilter/nf_tables_api.c 	nlmsg_trim(skb, nlh);
skb              6043 net/netfilter/nf_tables_api.c static int nf_tables_dump_flowtable(struct sk_buff *skb,
skb              6049 net/netfilter/nf_tables_api.c 	struct net *net = sock_net(skb->sk);
skb              6073 net/netfilter/nf_tables_api.c 			if (nf_tables_fill_flowtable_info(skb, net, NETLINK_CB(cb->skb).portid,
skb              6080 net/netfilter/nf_tables_api.c 			nl_dump_check_consistent(cb, nlmsg_hdr(skb));
skb              6089 net/netfilter/nf_tables_api.c 	return skb->len;
skb              6129 net/netfilter/nf_tables_api.c 				  struct sk_buff *skb,
skb              6151 net/netfilter/nf_tables_api.c 		return nft_netlink_dump_start_rcu(nlsk, skb, nlh, &c);
skb              6171 net/netfilter/nf_tables_api.c 	err = nf_tables_fill_flowtable_info(skb2, net, NETLINK_CB(skb).portid,
skb              6178 net/netfilter/nf_tables_api.c 	return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid);
skb              6188 net/netfilter/nf_tables_api.c 	struct sk_buff *skb;
skb              6195 net/netfilter/nf_tables_api.c 	skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
skb              6196 net/netfilter/nf_tables_api.c 	if (skb == NULL)
skb              6199 net/netfilter/nf_tables_api.c 	err = nf_tables_fill_flowtable_info(skb, ctx->net, ctx->portid,
skb              6203 net/netfilter/nf_tables_api.c 		kfree_skb(skb);
skb              6207 net/netfilter/nf_tables_api.c 	nfnetlink_send(skb, ctx->net, ctx->portid, NFNLGRP_NFTABLES,
skb              6223 net/netfilter/nf_tables_api.c static int nf_tables_fill_gen_info(struct sk_buff *skb, struct net *net,
skb              6231 net/netfilter/nf_tables_api.c 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct nfgenmsg), 0);
skb              6240 net/netfilter/nf_tables_api.c 	if (nla_put_be32(skb, NFTA_GEN_ID, htonl(net->nft.base_seq)) ||
skb              6241 net/netfilter/nf_tables_api.c 	    nla_put_be32(skb, NFTA_GEN_PROC_PID, htonl(task_pid_nr(current))) ||
skb              6242 net/netfilter/nf_tables_api.c 	    nla_put_string(skb, NFTA_GEN_PROC_NAME, get_task_comm(buf, current)))
skb              6245 net/netfilter/nf_tables_api.c 	nlmsg_end(skb, nlh);
skb              6249 net/netfilter/nf_tables_api.c 	nlmsg_trim(skb, nlh);
skb              6295 net/netfilter/nf_tables_api.c static void nf_tables_gen_notify(struct net *net, struct sk_buff *skb,
skb              6298 net/netfilter/nf_tables_api.c 	struct nlmsghdr *nlh = nlmsg_hdr(skb);
skb              6310 net/netfilter/nf_tables_api.c 	err = nf_tables_fill_gen_info(skb2, net, NETLINK_CB(skb).portid,
skb              6317 net/netfilter/nf_tables_api.c 	nfnetlink_send(skb2, net, NETLINK_CB(skb).portid, NFNLGRP_NFTABLES,
skb              6321 net/netfilter/nf_tables_api.c 	nfnetlink_set_err(net, NETLINK_CB(skb).portid, NFNLGRP_NFTABLES,
skb              6326 net/netfilter/nf_tables_api.c 			    struct sk_buff *skb, const struct nlmsghdr *nlh,
skb              6337 net/netfilter/nf_tables_api.c 	err = nf_tables_fill_gen_info(skb2, net, NETLINK_CB(skb).portid,
skb              6342 net/netfilter/nf_tables_api.c 	return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid);
skb              6784 net/netfilter/nf_tables_api.c static int nf_tables_commit(struct net *net, struct sk_buff *skb)
skb              6964 net/netfilter/nf_tables_api.c 	nf_tables_gen_notify(net, skb, NFT_MSG_NEWGEN);
skb              7149 net/netfilter/nf_tables_api.c static int nf_tables_abort(struct net *net, struct sk_buff *skb, bool autoload)
skb              7375 net/netfilter/nf_tables_api.c int nft_dump_register(struct sk_buff *skb, unsigned int attr, unsigned int reg)
skb              7382 net/netfilter/nf_tables_api.c 	return nla_put_be32(skb, attr, htonl(reg));
skb              7528 net/netfilter/nf_tables_api.c int nft_verdict_dump(struct sk_buff *skb, int type, const struct nft_verdict *v)
skb              7532 net/netfilter/nf_tables_api.c 	nest = nla_nest_start_noflag(skb, type);
skb              7536 net/netfilter/nf_tables_api.c 	if (nla_put_be32(skb, NFTA_VERDICT_CODE, htonl(v->code)))
skb              7542 net/netfilter/nf_tables_api.c 		if (nla_put_string(skb, NFTA_VERDICT_CHAIN,
skb              7546 net/netfilter/nf_tables_api.c 	nla_nest_end(skb, nest);
skb              7571 net/netfilter/nf_tables_api.c static int nft_value_dump(struct sk_buff *skb, const struct nft_data *data,
skb              7574 net/netfilter/nf_tables_api.c 	return nla_put(skb, NFTA_DATA_VALUE, len, data->data);
skb              7640 net/netfilter/nf_tables_api.c int nft_data_dump(struct sk_buff *skb, int attr, const struct nft_data *data,
skb              7646 net/netfilter/nf_tables_api.c 	nest = nla_nest_start_noflag(skb, attr);
skb              7652 net/netfilter/nf_tables_api.c 		err = nft_value_dump(skb, data, len);
skb              7655 net/netfilter/nf_tables_api.c 		err = nft_verdict_dump(skb, NFTA_DATA_VERDICT, &data->verdict);
skb              7662 net/netfilter/nf_tables_api.c 	nla_nest_end(skb, nest);
skb                30 net/netfilter/nf_tables_core.c 	if (!info->trace || !pkt->skb->nf_trace)
skb                66 net/netfilter/nf_tables_core.c 	const struct sk_buff *skb = pkt->skb;
skb                71 net/netfilter/nf_tables_core.c 		ptr = skb_network_header(skb);
skb                75 net/netfilter/nf_tables_core.c 		ptr = skb_network_header(skb) + pkt->xt.thoff;
skb                80 net/netfilter/nf_tables_core.c 	if (unlikely(ptr + priv->len > skb_tail_pointer(skb)))
skb               111 net/netfilter/nf_tables_core.c 		stats->bytes += pkt->skb->len;
skb                28 net/netfilter/nf_tables_trace.c static int trace_fill_id(struct sk_buff *nlskb, struct sk_buff *skb)
skb                38 net/netfilter/nf_tables_trace.c 	id = (__be32)jhash_2words(hash32_ptr(skb), skb_get_hash(skb),
skb                39 net/netfilter/nf_tables_trace.c 				  skb->skb_iif);
skb                45 net/netfilter/nf_tables_trace.c 			     const struct sk_buff *skb,
skb                54 net/netfilter/nf_tables_trace.c 	if (!nla || skb_copy_bits(skb, off, nla_data(nla), len))
skb                61 net/netfilter/nf_tables_trace.c 				   const struct sk_buff *skb)
skb                68 net/netfilter/nf_tables_trace.c 	off = skb_mac_header(skb) - skb->data;
skb                72 net/netfilter/nf_tables_trace.c 	if (skb_copy_bits(skb, off, &veth, ETH_HLEN))
skb                75 net/netfilter/nf_tables_trace.c 	veth.h_vlan_proto = skb->vlan_proto;
skb                76 net/netfilter/nf_tables_trace.c 	veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb));
skb                77 net/netfilter/nf_tables_trace.c 	veth.h_vlan_encapsulated_proto = skb->protocol;
skb               112 net/netfilter/nf_tables_trace.c 	const struct sk_buff *skb = pkt->skb;
skb               113 net/netfilter/nf_tables_trace.c 	int off = skb_network_offset(skb);
skb               116 net/netfilter/nf_tables_trace.c 	nh_end = pkt->tprot_set ? pkt->xt.thoff : skb->len;
skb               117 net/netfilter/nf_tables_trace.c 	len = min_t(unsigned int, nh_end - skb_network_offset(skb),
skb               119 net/netfilter/nf_tables_trace.c 	if (trace_fill_header(nlskb, NFTA_TRACE_NETWORK_HEADER, skb, off, len))
skb               123 net/netfilter/nf_tables_trace.c 		len = min_t(unsigned int, skb->len - pkt->xt.thoff,
skb               125 net/netfilter/nf_tables_trace.c 		if (trace_fill_header(nlskb, NFTA_TRACE_TRANSPORT_HEADER, skb,
skb               130 net/netfilter/nf_tables_trace.c 	if (!skb_mac_header_was_set(skb))
skb               133 net/netfilter/nf_tables_trace.c 	if (skb_vlan_tag_get(skb))
skb               134 net/netfilter/nf_tables_trace.c 		return nf_trace_fill_ll_header(nlskb, skb);
skb               136 net/netfilter/nf_tables_trace.c 	off = skb_mac_header(skb) - skb->data;
skb               139 net/netfilter/nf_tables_trace.c 				 skb, off, len);
skb               188 net/netfilter/nf_tables_trace.c 	struct sk_buff *skb;
skb               217 net/netfilter/nf_tables_trace.c 	skb = nlmsg_new(size, GFP_ATOMIC);
skb               218 net/netfilter/nf_tables_trace.c 	if (!skb)
skb               222 net/netfilter/nf_tables_trace.c 	nlh = nlmsg_put(skb, 0, 0, event, sizeof(struct nfgenmsg), 0);
skb               231 net/netfilter/nf_tables_trace.c 	if (nla_put_be32(skb, NFTA_TRACE_NFPROTO, htonl(nft_pf(pkt))))
skb               234 net/netfilter/nf_tables_trace.c 	if (nla_put_be32(skb, NFTA_TRACE_TYPE, htonl(info->type)))
skb               237 net/netfilter/nf_tables_trace.c 	if (trace_fill_id(skb, pkt->skb))
skb               240 net/netfilter/nf_tables_trace.c 	if (nla_put_string(skb, NFTA_TRACE_CHAIN, info->chain->name))
skb               243 net/netfilter/nf_tables_trace.c 	if (nla_put_string(skb, NFTA_TRACE_TABLE, info->chain->table->name))
skb               246 net/netfilter/nf_tables_trace.c 	if (nf_trace_fill_rule_info(skb, info))
skb               255 net/netfilter/nf_tables_trace.c 		if (nft_verdict_dump(skb, NFTA_TRACE_VERDICT, info->verdict))
skb               259 net/netfilter/nf_tables_trace.c 		if (nla_put_be32(skb, NFTA_TRACE_POLICY,
skb               265 net/netfilter/nf_tables_trace.c 	if (pkt->skb->mark &&
skb               266 net/netfilter/nf_tables_trace.c 	    nla_put_be32(skb, NFTA_TRACE_MARK, htonl(pkt->skb->mark)))
skb               270 net/netfilter/nf_tables_trace.c 		if (nf_trace_fill_dev_info(skb, nft_in(pkt), nft_out(pkt)))
skb               273 net/netfilter/nf_tables_trace.c 		if (nf_trace_fill_pkt_info(skb, pkt))
skb               278 net/netfilter/nf_tables_trace.c 	nlmsg_end(skb, nlh);
skb               279 net/netfilter/nf_tables_trace.c 	nfnetlink_send(skb, nft_net(pkt), 0, NFNLGRP_NFTRACE, 0, GFP_ATOMIC);
skb               284 net/netfilter/nf_tables_trace.c 	kfree_skb(skb);
skb               138 net/netfilter/nfnetlink.c int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 portid,
skb               141 net/netfilter/nfnetlink.c 	return nlmsg_notify(net->nfnl, skb, portid, group, echo, flags);
skb               151 net/netfilter/nfnetlink.c int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid,
skb               154 net/netfilter/nfnetlink.c 	return netlink_unicast(net->nfnl, skb, portid, flags);
skb               159 net/netfilter/nfnetlink.c static int nfnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
skb               162 net/netfilter/nfnetlink.c 	struct net *net = sock_net(skb->sk);
skb               218 net/netfilter/nfnetlink.c 			err = nc->call_rcu(net, net->nfnl, skb, nlh,
skb               229 net/netfilter/nfnetlink.c 				err = nc->call(net, net->nfnl, skb, nlh,
skb               280 net/netfilter/nfnetlink.c static void nfnl_err_deliver(struct list_head *err_list, struct sk_buff *skb)
skb               285 net/netfilter/nfnetlink.c 		netlink_ack(skb, nfnl_err->nlh, nfnl_err->err,
skb               297 net/netfilter/nfnetlink.c static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh,
skb               300 net/netfilter/nfnetlink.c 	struct sk_buff *oskb = skb;
skb               301 net/netfilter/nfnetlink.c 	struct net *net = sock_net(skb->sk);
skb               310 net/netfilter/nfnetlink.c 		return netlink_ack(skb, nlh, -EINVAL, NULL);
skb               314 net/netfilter/nfnetlink.c 	skb = netlink_skb_clone(oskb, GFP_KERNEL);
skb               315 net/netfilter/nfnetlink.c 	if (!skb)
skb               331 net/netfilter/nfnetlink.c 			return kfree_skb(skb);
skb               338 net/netfilter/nfnetlink.c 		return kfree_skb(skb);
skb               344 net/netfilter/nfnetlink.c 		return kfree_skb(skb);
skb               351 net/netfilter/nfnetlink.c 		return kfree_skb(skb);
skb               356 net/netfilter/nfnetlink.c 	while (skb->len >= nlmsg_total_size(0)) {
skb               367 net/netfilter/nfnetlink.c 		nlh = nlmsg_hdr(skb);
skb               371 net/netfilter/nfnetlink.c 		    skb->len < nlh->nlmsg_len ||
skb               433 net/netfilter/nfnetlink.c 				err = nc->call_batch(net, net->nfnl, skb, nlh,
skb               473 net/netfilter/nfnetlink.c 		if (msglen > skb->len)
skb               474 net/netfilter/nfnetlink.c 			msglen = skb->len;
skb               475 net/netfilter/nfnetlink.c 		skb_pull(skb, msglen);
skb               481 net/netfilter/nfnetlink.c 		kfree_skb(skb);
skb               500 net/netfilter/nfnetlink.c 	kfree_skb(skb);
skb               508 net/netfilter/nfnetlink.c static void nfnetlink_rcv_skb_batch(struct sk_buff *skb, struct nlmsghdr *nlh)
skb               520 net/netfilter/nfnetlink.c 	if (msglen > skb->len)
skb               521 net/netfilter/nfnetlink.c 		msglen = skb->len;
skb               523 net/netfilter/nfnetlink.c 	if (skb->len < NLMSG_HDRLEN + sizeof(struct nfgenmsg))
skb               529 net/netfilter/nfnetlink.c 		netlink_ack(skb, nlh, err, NULL);
skb               536 net/netfilter/nfnetlink.c 	skb_pull(skb, msglen);
skb               543 net/netfilter/nfnetlink.c 	nfnetlink_rcv_batch(skb, nlh, res_id, gen_id);
skb               546 net/netfilter/nfnetlink.c static void nfnetlink_rcv(struct sk_buff *skb)
skb               548 net/netfilter/nfnetlink.c 	struct nlmsghdr *nlh = nlmsg_hdr(skb);
skb               550 net/netfilter/nfnetlink.c 	if (skb->len < NLMSG_HDRLEN ||
skb               552 net/netfilter/nfnetlink.c 	    skb->len < nlh->nlmsg_len)
skb               555 net/netfilter/nfnetlink.c 	if (!netlink_net_capable(skb, CAP_NET_ADMIN)) {
skb               556 net/netfilter/nfnetlink.c 		netlink_ack(skb, nlh, -EPERM, NULL);
skb               561 net/netfilter/nfnetlink.c 		nfnetlink_rcv_skb_batch(skb, nlh);
skb               563 net/netfilter/nfnetlink.c 		netlink_rcv_skb(skb, nfnetlink_rcv_msg);
skb                48 net/netfilter/nfnetlink_acct.c 			 struct sk_buff *skb, const struct nlmsghdr *nlh,
skb               131 net/netfilter/nfnetlink_acct.c nfnl_acct_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
skb               141 net/netfilter/nfnetlink_acct.c 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
skb               150 net/netfilter/nfnetlink_acct.c 	if (nla_put_string(skb, NFACCT_NAME, acct->name))
skb               164 net/netfilter/nfnetlink_acct.c 	if (nla_put_be64(skb, NFACCT_PKTS, cpu_to_be64(pkts),
skb               166 net/netfilter/nfnetlink_acct.c 	    nla_put_be64(skb, NFACCT_BYTES, cpu_to_be64(bytes),
skb               168 net/netfilter/nfnetlink_acct.c 	    nla_put_be32(skb, NFACCT_USE, htonl(refcount_read(&acct->refcnt))))
skb               173 net/netfilter/nfnetlink_acct.c 		if (nla_put_be32(skb, NFACCT_FLAGS, htonl(old_flags)) ||
skb               174 net/netfilter/nfnetlink_acct.c 		    nla_put_be64(skb, NFACCT_QUOTA, cpu_to_be64(*quota),
skb               178 net/netfilter/nfnetlink_acct.c 	nlmsg_end(skb, nlh);
skb               179 net/netfilter/nfnetlink_acct.c 	return skb->len;
skb               183 net/netfilter/nfnetlink_acct.c 	nlmsg_cancel(skb, nlh);
skb               188 net/netfilter/nfnetlink_acct.c nfnl_acct_dump(struct sk_buff *skb, struct netlink_callback *cb)
skb               190 net/netfilter/nfnetlink_acct.c 	struct net *net = sock_net(skb->sk);
skb               213 net/netfilter/nfnetlink_acct.c 		if (nfnl_acct_fill_info(skb, NETLINK_CB(cb->skb).portid,
skb               224 net/netfilter/nfnetlink_acct.c 	return skb->len;
skb               268 net/netfilter/nfnetlink_acct.c 			 struct sk_buff *skb, const struct nlmsghdr *nlh,
skb               284 net/netfilter/nfnetlink_acct.c 		return netlink_dump_start(nfnl, skb, nlh, &c);
skb               303 net/netfilter/nfnetlink_acct.c 		ret = nfnl_acct_fill_info(skb2, NETLINK_CB(skb).portid,
skb               311 net/netfilter/nfnetlink_acct.c 		ret = netlink_unicast(nfnl, skb2, NETLINK_CB(skb).portid,
skb               341 net/netfilter/nfnetlink_acct.c 			 struct sk_buff *skb, const struct nlmsghdr *nlh,
skb               438 net/netfilter/nfnetlink_acct.c void nfnl_acct_update(const struct sk_buff *skb, struct nf_acct *nfacct)
skb               441 net/netfilter/nfnetlink_acct.c 	atomic64_add(skb->len, &nfacct->bytes);
skb               448 net/netfilter/nfnetlink_acct.c 	struct sk_buff *skb;
skb               450 net/netfilter/nfnetlink_acct.c 	skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
skb               451 net/netfilter/nfnetlink_acct.c 	if (skb == NULL)
skb               454 net/netfilter/nfnetlink_acct.c 	ret = nfnl_acct_fill_info(skb, 0, 0, NFNL_MSG_ACCT_OVERQUOTA, 0,
skb               457 net/netfilter/nfnetlink_acct.c 		kfree_skb(skb);
skb               460 net/netfilter/nfnetlink_acct.c 	netlink_broadcast(net->nfnl, skb, 0, NFNLGRP_ACCT_QUOTA,
skb                41 net/netfilter/nfnetlink_cthelper.c nfnl_userspace_cthelper(struct sk_buff *skb, unsigned int protoff,
skb               111 net/netfilter/nfnetlink_cthelper.c nfnl_cthelper_to_nlattr(struct sk_buff *skb, const struct nf_conn *ct)
skb               116 net/netfilter/nfnetlink_cthelper.c 	    nla_put(skb, CTA_HELP_INFO, help->helper->data_len, &help->data))
skb               412 net/netfilter/nfnetlink_cthelper.c 			     struct sk_buff *skb, const struct nlmsghdr *nlh,
skb               460 net/netfilter/nfnetlink_cthelper.c nfnl_cthelper_dump_tuple(struct sk_buff *skb,
skb               465 net/netfilter/nfnetlink_cthelper.c 	nest_parms = nla_nest_start(skb, NFCTH_TUPLE);
skb               469 net/netfilter/nfnetlink_cthelper.c 	if (nla_put_be16(skb, NFCTH_TUPLE_L3PROTONUM,
skb               473 net/netfilter/nfnetlink_cthelper.c 	if (nla_put_u8(skb, NFCTH_TUPLE_L4PROTONUM, helper->tuple.dst.protonum))
skb               476 net/netfilter/nfnetlink_cthelper.c 	nla_nest_end(skb, nest_parms);
skb               484 net/netfilter/nfnetlink_cthelper.c nfnl_cthelper_dump_policy(struct sk_buff *skb,
skb               490 net/netfilter/nfnetlink_cthelper.c 	nest_parms1 = nla_nest_start(skb, NFCTH_POLICY);
skb               494 net/netfilter/nfnetlink_cthelper.c 	if (nla_put_be32(skb, NFCTH_POLICY_SET_NUM,
skb               499 net/netfilter/nfnetlink_cthelper.c 		nest_parms2 = nla_nest_start(skb, (NFCTH_POLICY_SET + i));
skb               503 net/netfilter/nfnetlink_cthelper.c 		if (nla_put_string(skb, NFCTH_POLICY_NAME,
skb               507 net/netfilter/nfnetlink_cthelper.c 		if (nla_put_be32(skb, NFCTH_POLICY_EXPECT_MAX,
skb               511 net/netfilter/nfnetlink_cthelper.c 		if (nla_put_be32(skb, NFCTH_POLICY_EXPECT_TIMEOUT,
skb               515 net/netfilter/nfnetlink_cthelper.c 		nla_nest_end(skb, nest_parms2);
skb               517 net/netfilter/nfnetlink_cthelper.c 	nla_nest_end(skb, nest_parms1);
skb               525 net/netfilter/nfnetlink_cthelper.c nfnl_cthelper_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
skb               534 net/netfilter/nfnetlink_cthelper.c 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
skb               543 net/netfilter/nfnetlink_cthelper.c 	if (nla_put_string(skb, NFCTH_NAME, helper->name))
skb               546 net/netfilter/nfnetlink_cthelper.c 	if (nla_put_be32(skb, NFCTH_QUEUE_NUM, htonl(helper->queue_num)))
skb               549 net/netfilter/nfnetlink_cthelper.c 	if (nfnl_cthelper_dump_tuple(skb, helper) < 0)
skb               552 net/netfilter/nfnetlink_cthelper.c 	if (nfnl_cthelper_dump_policy(skb, helper) < 0)
skb               555 net/netfilter/nfnetlink_cthelper.c 	if (nla_put_be32(skb, NFCTH_PRIV_DATA_LEN, htonl(helper->data_len)))
skb               563 net/netfilter/nfnetlink_cthelper.c 	if (nla_put_be32(skb, NFCTH_STATUS, htonl(status)))
skb               566 net/netfilter/nfnetlink_cthelper.c 	nlmsg_end(skb, nlh);
skb               567 net/netfilter/nfnetlink_cthelper.c 	return skb->len;
skb               571 net/netfilter/nfnetlink_cthelper.c 	nlmsg_cancel(skb, nlh);
skb               576 net/netfilter/nfnetlink_cthelper.c nfnl_cthelper_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
skb               596 net/netfilter/nfnetlink_cthelper.c 			if (nfnl_cthelper_fill_info(skb,
skb               597 net/netfilter/nfnetlink_cthelper.c 					    NETLINK_CB(cb->skb).portid,
skb               612 net/netfilter/nfnetlink_cthelper.c 	return skb->len;
skb               616 net/netfilter/nfnetlink_cthelper.c 			     struct sk_buff *skb, const struct nlmsghdr *nlh,
skb               635 net/netfilter/nfnetlink_cthelper.c 		return netlink_dump_start(nfnl, skb, nlh, &c);
skb               666 net/netfilter/nfnetlink_cthelper.c 		ret = nfnl_cthelper_fill_info(skb2, NETLINK_CB(skb).portid,
skb               675 net/netfilter/nfnetlink_cthelper.c 		ret = netlink_unicast(nfnl, skb2, NETLINK_CB(skb).portid,
skb               687 net/netfilter/nfnetlink_cthelper.c 			     struct sk_buff *skb, const struct nlmsghdr *nlh,
skb                75 net/netfilter/nfnetlink_cttimeout.c 				 struct sk_buff *skb,
skb               159 net/netfilter/nfnetlink_cttimeout.c ctnl_timeout_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
skb               170 net/netfilter/nfnetlink_cttimeout.c 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
skb               179 net/netfilter/nfnetlink_cttimeout.c 	if (nla_put_string(skb, CTA_TIMEOUT_NAME, timeout->name) ||
skb               180 net/netfilter/nfnetlink_cttimeout.c 	    nla_put_be16(skb, CTA_TIMEOUT_L3PROTO,
skb               182 net/netfilter/nfnetlink_cttimeout.c 	    nla_put_u8(skb, CTA_TIMEOUT_L4PROTO, l4proto->l4proto) ||
skb               183 net/netfilter/nfnetlink_cttimeout.c 	    nla_put_be32(skb, CTA_TIMEOUT_USE,
skb               187 net/netfilter/nfnetlink_cttimeout.c 	nest_parms = nla_nest_start(skb, CTA_TIMEOUT_DATA);
skb               191 net/netfilter/nfnetlink_cttimeout.c 	ret = l4proto->ctnl_timeout.obj_to_nlattr(skb, &timeout->timeout.data);
skb               195 net/netfilter/nfnetlink_cttimeout.c 	nla_nest_end(skb, nest_parms);
skb               197 net/netfilter/nfnetlink_cttimeout.c 	nlmsg_end(skb, nlh);
skb               198 net/netfilter/nfnetlink_cttimeout.c 	return skb->len;
skb               202 net/netfilter/nfnetlink_cttimeout.c 	nlmsg_cancel(skb, nlh);
skb               207 net/netfilter/nfnetlink_cttimeout.c ctnl_timeout_dump(struct sk_buff *skb, struct netlink_callback *cb)
skb               209 net/netfilter/nfnetlink_cttimeout.c 	struct net *net = sock_net(skb->sk);
skb               227 net/netfilter/nfnetlink_cttimeout.c 		if (ctnl_timeout_fill_info(skb, NETLINK_CB(cb->skb).portid,
skb               238 net/netfilter/nfnetlink_cttimeout.c 	return skb->len;
skb               242 net/netfilter/nfnetlink_cttimeout.c 				 struct sk_buff *skb,
skb               255 net/netfilter/nfnetlink_cttimeout.c 		return netlink_dump_start(ctnl, skb, nlh, &c);
skb               274 net/netfilter/nfnetlink_cttimeout.c 		ret = ctnl_timeout_fill_info(skb2, NETLINK_CB(skb).portid,
skb               282 net/netfilter/nfnetlink_cttimeout.c 		ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid,
skb               313 net/netfilter/nfnetlink_cttimeout.c 				 struct sk_buff *skb,
skb               345 net/netfilter/nfnetlink_cttimeout.c 				 struct sk_buff *skb,
skb               379 net/netfilter/nfnetlink_cttimeout.c cttimeout_default_fill_info(struct net *net, struct sk_buff *skb, u32 portid,
skb               391 net/netfilter/nfnetlink_cttimeout.c 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
skb               400 net/netfilter/nfnetlink_cttimeout.c 	if (nla_put_be16(skb, CTA_TIMEOUT_L3PROTO, htons(l3num)) ||
skb               401 net/netfilter/nfnetlink_cttimeout.c 	    nla_put_u8(skb, CTA_TIMEOUT_L4PROTO, l4proto->l4proto))
skb               404 net/netfilter/nfnetlink_cttimeout.c 	nest_parms = nla_nest_start(skb, CTA_TIMEOUT_DATA);
skb               408 net/netfilter/nfnetlink_cttimeout.c 	ret = l4proto->ctnl_timeout.obj_to_nlattr(skb, timeouts);
skb               412 net/netfilter/nfnetlink_cttimeout.c 	nla_nest_end(skb, nest_parms);
skb               414 net/netfilter/nfnetlink_cttimeout.c 	nlmsg_end(skb, nlh);
skb               415 net/netfilter/nfnetlink_cttimeout.c 	return skb->len;
skb               419 net/netfilter/nfnetlink_cttimeout.c 	nlmsg_cancel(skb, nlh);
skb               424 net/netfilter/nfnetlink_cttimeout.c 				 struct sk_buff *skb,
skb               493 net/netfilter/nfnetlink_cttimeout.c 	ret = cttimeout_default_fill_info(net, skb2, NETLINK_CB(skb).portid,
skb               503 net/netfilter/nfnetlink_cttimeout.c 	ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid, MSG_DONTWAIT);
skb                62 net/netfilter/nfnetlink_log.c 	struct sk_buff *skb;		/* pre-allocatd skb */
skb               226 net/netfilter/nfnetlink_log.c 	if (inst->skb)
skb               325 net/netfilter/nfnetlink_log.c 	struct sk_buff *skb;
skb               332 net/netfilter/nfnetlink_log.c 	skb = alloc_skb(n, GFP_ATOMIC | __GFP_NOWARN);
skb               333 net/netfilter/nfnetlink_log.c 	if (!skb) {
skb               338 net/netfilter/nfnetlink_log.c 			skb = alloc_skb(pkt_size, GFP_ATOMIC);
skb               342 net/netfilter/nfnetlink_log.c 	return skb;
skb               349 net/netfilter/nfnetlink_log.c 		struct nlmsghdr *nlh = nlmsg_put(inst->skb, 0, 0,
skb               354 net/netfilter/nfnetlink_log.c 			      inst->skb->len, skb_tailroom(inst->skb))) {
skb               355 net/netfilter/nfnetlink_log.c 			kfree_skb(inst->skb);
skb               359 net/netfilter/nfnetlink_log.c 	nfnetlink_unicast(inst->skb, inst->net, inst->peer_portid,
skb               363 net/netfilter/nfnetlink_log.c 	inst->skb = NULL;
skb               372 net/netfilter/nfnetlink_log.c 	if (inst->skb)
skb               382 net/netfilter/nfnetlink_log.c 	if (inst->skb)
skb               388 net/netfilter/nfnetlink_log.c static u32 nfulnl_get_bridge_size(const struct sk_buff *skb)
skb               392 net/netfilter/nfnetlink_log.c 	if (!skb_mac_header_was_set(skb))
skb               395 net/netfilter/nfnetlink_log.c 	if (skb_vlan_tag_present(skb)) {
skb               401 net/netfilter/nfnetlink_log.c 	if (skb->network_header > skb->mac_header)
skb               402 net/netfilter/nfnetlink_log.c 		size += nla_total_size(skb->network_header - skb->mac_header);
skb               407 net/netfilter/nfnetlink_log.c static int nfulnl_put_bridge(struct nfulnl_instance *inst, const struct sk_buff *skb)
skb               409 net/netfilter/nfnetlink_log.c 	if (!skb_mac_header_was_set(skb))
skb               412 net/netfilter/nfnetlink_log.c 	if (skb_vlan_tag_present(skb)) {
skb               415 net/netfilter/nfnetlink_log.c 		nest = nla_nest_start(inst->skb, NFULA_VLAN);
skb               419 net/netfilter/nfnetlink_log.c 		if (nla_put_be16(inst->skb, NFULA_VLAN_TCI, htons(skb->vlan_tci)) ||
skb               420 net/netfilter/nfnetlink_log.c 		    nla_put_be16(inst->skb, NFULA_VLAN_PROTO, skb->vlan_proto))
skb               423 net/netfilter/nfnetlink_log.c 		nla_nest_end(inst->skb, nest);
skb               426 net/netfilter/nfnetlink_log.c 	if (skb->mac_header < skb->network_header) {
skb               427 net/netfilter/nfnetlink_log.c 		int len = (int)(skb->network_header - skb->mac_header);
skb               429 net/netfilter/nfnetlink_log.c 		if (nla_put(inst->skb, NFULA_L2HDR, len, skb_mac_header(skb)))
skb               444 net/netfilter/nfnetlink_log.c 			const struct sk_buff *skb,
skb               457 net/netfilter/nfnetlink_log.c 	sk_buff_data_t old_tail = inst->skb->tail;
skb               461 net/netfilter/nfnetlink_log.c 	nlh = nlmsg_put(inst->skb, 0, 0,
skb               472 net/netfilter/nfnetlink_log.c 	pmsg.hw_protocol	= skb->protocol;
skb               475 net/netfilter/nfnetlink_log.c 	if (nla_put(inst->skb, NFULA_PACKET_HDR, sizeof(pmsg), &pmsg))
skb               479 net/netfilter/nfnetlink_log.c 	    nla_put(inst->skb, NFULA_PREFIX, plen, prefix))
skb               484 net/netfilter/nfnetlink_log.c 		if (nla_put_be32(inst->skb, NFULA_IFINDEX_INDEV,
skb               492 net/netfilter/nfnetlink_log.c 			if (nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSINDEV,
skb               498 net/netfilter/nfnetlink_log.c 			    nla_put_be32(inst->skb, NFULA_IFINDEX_INDEV,
skb               506 net/netfilter/nfnetlink_log.c 			if (nla_put_be32(inst->skb, NFULA_IFINDEX_INDEV,
skb               510 net/netfilter/nfnetlink_log.c 			physindev = nf_bridge_get_physindev(skb);
skb               512 net/netfilter/nfnetlink_log.c 			    nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSINDEV,
skb               521 net/netfilter/nfnetlink_log.c 		if (nla_put_be32(inst->skb, NFULA_IFINDEX_OUTDEV,
skb               529 net/netfilter/nfnetlink_log.c 			if (nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV,
skb               535 net/netfilter/nfnetlink_log.c 			    nla_put_be32(inst->skb, NFULA_IFINDEX_OUTDEV,
skb               543 net/netfilter/nfnetlink_log.c 			if (nla_put_be32(inst->skb, NFULA_IFINDEX_OUTDEV,
skb               547 net/netfilter/nfnetlink_log.c 			physoutdev = nf_bridge_get_physoutdev(skb);
skb               549 net/netfilter/nfnetlink_log.c 			    nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV,
skb               556 net/netfilter/nfnetlink_log.c 	if (skb->mark &&
skb               557 net/netfilter/nfnetlink_log.c 	    nla_put_be32(inst->skb, NFULA_MARK, htonl(skb->mark)))
skb               560 net/netfilter/nfnetlink_log.c 	if (indev && skb->dev &&
skb               561 net/netfilter/nfnetlink_log.c 	    skb->mac_header != skb->network_header) {
skb               566 net/netfilter/nfnetlink_log.c 		len = dev_parse_header(skb, phw.hw_addr);
skb               569 net/netfilter/nfnetlink_log.c 			if (nla_put(inst->skb, NFULA_HWADDR, sizeof(phw), &phw))
skb               574 net/netfilter/nfnetlink_log.c 	if (indev && skb_mac_header_was_set(skb)) {
skb               575 net/netfilter/nfnetlink_log.c 		if (nla_put_be16(inst->skb, NFULA_HWTYPE, htons(skb->dev->type)) ||
skb               576 net/netfilter/nfnetlink_log.c 		    nla_put_be16(inst->skb, NFULA_HWLEN,
skb               577 net/netfilter/nfnetlink_log.c 				 htons(skb->dev->hard_header_len)))
skb               580 net/netfilter/nfnetlink_log.c 		hwhdrp = skb_mac_header(skb);
skb               582 net/netfilter/nfnetlink_log.c 		if (skb->dev->type == ARPHRD_SIT)
skb               585 net/netfilter/nfnetlink_log.c 		if (hwhdrp >= skb->head &&
skb               586 net/netfilter/nfnetlink_log.c 		    nla_put(inst->skb, NFULA_HWHEADER,
skb               587 net/netfilter/nfnetlink_log.c 			    skb->dev->hard_header_len, hwhdrp))
skb               591 net/netfilter/nfnetlink_log.c 	if (hooknum <= NF_INET_FORWARD && skb->tstamp) {
skb               593 net/netfilter/nfnetlink_log.c 		struct timespec64 kts = ktime_to_timespec64(skb->tstamp);
skb               597 net/netfilter/nfnetlink_log.c 		if (nla_put(inst->skb, NFULA_TIMESTAMP, sizeof(ts), &ts))
skb               602 net/netfilter/nfnetlink_log.c 	sk = skb->sk;
skb               612 net/netfilter/nfnetlink_log.c 			if (nla_put_be32(inst->skb, NFULA_UID, uid) ||
skb               613 net/netfilter/nfnetlink_log.c 			    nla_put_be32(inst->skb, NFULA_GID, gid))
skb               621 net/netfilter/nfnetlink_log.c 	    nla_put_be32(inst->skb, NFULA_SEQ, htonl(inst->seq++)))
skb               626 net/netfilter/nfnetlink_log.c 	    nla_put_be32(inst->skb, NFULA_SEQ_GLOBAL,
skb               630 net/netfilter/nfnetlink_log.c 	if (ct && nfnl_ct->build(inst->skb, ct, ctinfo,
skb               635 net/netfilter/nfnetlink_log.c 	    nfulnl_put_bridge(inst, skb) < 0)
skb               642 net/netfilter/nfnetlink_log.c 		if (skb_tailroom(inst->skb) < nla_total_size(data_len))
skb               645 net/netfilter/nfnetlink_log.c 		nla = skb_put(inst->skb, nla_total_size(data_len));
skb               649 net/netfilter/nfnetlink_log.c 		if (skb_copy_bits(skb, 0, nla_data(nla), data_len))
skb               653 net/netfilter/nfnetlink_log.c 	nlh->nlmsg_len = inst->skb->tail - old_tail;
skb               677 net/netfilter/nfnetlink_log.c 		  const struct sk_buff *skb,
skb               725 net/netfilter/nfnetlink_log.c 	if (in && skb_mac_header_was_set(skb)) {
skb               726 net/netfilter/nfnetlink_log.c 		size += nla_total_size(skb->dev->hard_header_len)
skb               740 net/netfilter/nfnetlink_log.c 			ct = nfnl_ct->get_ct(skb, &ctinfo);
skb               746 net/netfilter/nfnetlink_log.c 		size += nfulnl_get_bridge_size(skb);
skb               767 net/netfilter/nfnetlink_log.c 		if (data_len > skb->len)
skb               768 net/netfilter/nfnetlink_log.c 			data_len = skb->len;
skb               778 net/netfilter/nfnetlink_log.c 	if (inst->skb && size > skb_tailroom(inst->skb)) {
skb               784 net/netfilter/nfnetlink_log.c 	if (!inst->skb) {
skb               785 net/netfilter/nfnetlink_log.c 		inst->skb = nfulnl_alloc_skb(net, inst->peer_portid,
skb               787 net/netfilter/nfnetlink_log.c 		if (!inst->skb)
skb               793 net/netfilter/nfnetlink_log.c 	__build_packet_message(log, inst, skb, data_len, pf,
skb               849 net/netfilter/nfnetlink_log.c 			      struct sk_buff *skb, const struct nlmsghdr *nlh,
skb               873 net/netfilter/nfnetlink_log.c 			      struct sk_buff *skb, const struct nlmsghdr *nlh,
skb               900 net/netfilter/nfnetlink_log.c 	if (inst && inst->peer_portid != NETLINK_CB(skb).portid) {
skb               936 net/netfilter/nfnetlink_log.c 					       NETLINK_CB(skb).portid,
skb               937 net/netfilter/nfnetlink_log.c 					       sk_user_ns(NETLINK_CB(skb).sk));
skb                31 net/netfilter/nfnetlink_osf.c static inline int nf_osf_ttl(const struct sk_buff *skb,
skb                34 net/netfilter/nfnetlink_osf.c 	struct in_device *in_dev = __in_dev_get_rcu(skb->dev);
skb                35 net/netfilter/nfnetlink_osf.c 	const struct iphdr *ip = ip_hdr(skb);
skb                64 net/netfilter/nfnetlink_osf.c static bool nf_osf_match_one(const struct sk_buff *skb,
skb                75 net/netfilter/nfnetlink_osf.c 	if (ctx->totlen != f->ss || !nf_osf_ttl(skb, ttl_check, f->ttl))
skb               166 net/netfilter/nfnetlink_osf.c 						const struct sk_buff *skb,
skb               173 net/netfilter/nfnetlink_osf.c 	tcp = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(struct tcphdr), _tcph);
skb               187 net/netfilter/nfnetlink_osf.c 		ctx->optp = skb_header_pointer(skb, ip_hdrlen(skb) +
skb               195 net/netfilter/nfnetlink_osf.c nf_osf_match(const struct sk_buff *skb, u_int8_t family,
skb               200 net/netfilter/nfnetlink_osf.c 	const struct iphdr *ip = ip_hdr(skb);
skb               212 net/netfilter/nfnetlink_osf.c 	tcp = nf_osf_hdr_ctx_init(&ctx, skb, ip, opts, &_tcph);
skb               225 net/netfilter/nfnetlink_osf.c 		if (!nf_osf_match_one(skb, f, ttl_check, &ctx))
skb               233 net/netfilter/nfnetlink_osf.c 			nf_log_packet(net, family, hooknum, skb,
skb               247 net/netfilter/nfnetlink_osf.c 		nf_log_packet(net, family, hooknum, skb, in, out, NULL,
skb               259 net/netfilter/nfnetlink_osf.c bool nf_osf_find(const struct sk_buff *skb,
skb               263 net/netfilter/nfnetlink_osf.c 	const struct iphdr *ip = ip_hdr(skb);
skb               273 net/netfilter/nfnetlink_osf.c 	tcp = nf_osf_hdr_ctx_init(&ctx, skb, ip, opts, &_tcph);
skb               279 net/netfilter/nfnetlink_osf.c 		if (!nf_osf_match_one(skb, f, ttl_check, &ctx))
skb               296 net/netfilter/nfnetlink_osf.c 				 struct sk_buff *skb, const struct nlmsghdr *nlh,
skb               343 net/netfilter/nfnetlink_osf.c 				    struct sk_buff *skb,
skb               237 net/netfilter/nfnetlink_queue.c 			err = ct_hook->update(entry->state.net, entry->skb);
skb               279 net/netfilter/nfnetlink_queue.c static int nfqnl_put_sk_uidgid(struct sk_buff *skb, struct sock *sk)
skb               289 net/netfilter/nfnetlink_queue.c 		if (nla_put_be32(skb, NFQA_UID,
skb               292 net/netfilter/nfnetlink_queue.c 		if (nla_put_be32(skb, NFQA_GID,
skb               304 net/netfilter/nfnetlink_queue.c static u32 nfqnl_get_sk_secctx(struct sk_buff *skb, char **secdata)
skb               308 net/netfilter/nfnetlink_queue.c 	if (!skb || !sk_fullsock(skb->sk))
skb               311 net/netfilter/nfnetlink_queue.c 	read_lock_bh(&skb->sk->sk_callback_lock);
skb               313 net/netfilter/nfnetlink_queue.c 	if (skb->secmark)
skb               314 net/netfilter/nfnetlink_queue.c 		security_secid_to_secctx(skb->secmark, secdata, &seclen);
skb               316 net/netfilter/nfnetlink_queue.c 	read_unlock_bh(&skb->sk->sk_callback_lock);
skb               323 net/netfilter/nfnetlink_queue.c 	struct sk_buff *entskb = entry->skb;
skb               340 net/netfilter/nfnetlink_queue.c static int nfqnl_put_bridge(struct nf_queue_entry *entry, struct sk_buff *skb)
skb               342 net/netfilter/nfnetlink_queue.c 	struct sk_buff *entskb = entry->skb;
skb               350 net/netfilter/nfnetlink_queue.c 		nest = nla_nest_start(skb, NFQA_VLAN);
skb               354 net/netfilter/nfnetlink_queue.c 		if (nla_put_be16(skb, NFQA_VLAN_TCI, htons(entskb->vlan_tci)) ||
skb               355 net/netfilter/nfnetlink_queue.c 		    nla_put_be16(skb, NFQA_VLAN_PROTO, entskb->vlan_proto))
skb               358 net/netfilter/nfnetlink_queue.c 		nla_nest_end(skb, nest);
skb               364 net/netfilter/nfnetlink_queue.c 		if (nla_put(skb, NFQA_L2HDR, len, skb_mac_header(entskb)))
skb               382 net/netfilter/nfnetlink_queue.c 	struct sk_buff *skb;
skb               387 net/netfilter/nfnetlink_queue.c 	struct sk_buff *entskb = entry->skb;
skb               466 net/netfilter/nfnetlink_queue.c 	skb = alloc_skb(size, GFP_ATOMIC);
skb               467 net/netfilter/nfnetlink_queue.c 	if (!skb) {
skb               472 net/netfilter/nfnetlink_queue.c 	nlh = nlmsg_put(skb, 0, 0,
skb               477 net/netfilter/nfnetlink_queue.c 		kfree_skb(skb);
skb               485 net/netfilter/nfnetlink_queue.c 	nla = __nla_reserve(skb, NFQA_PACKET_HDR, sizeof(*pmsg));
skb               494 net/netfilter/nfnetlink_queue.c 		if (nla_put_be32(skb, NFQA_IFINDEX_INDEV, htonl(indev->ifindex)))
skb               501 net/netfilter/nfnetlink_queue.c 			if (nla_put_be32(skb, NFQA_IFINDEX_PHYSINDEV,
skb               505 net/netfilter/nfnetlink_queue.c 			    nla_put_be32(skb, NFQA_IFINDEX_INDEV,
skb               513 net/netfilter/nfnetlink_queue.c 			if (nla_put_be32(skb, NFQA_IFINDEX_INDEV,
skb               519 net/netfilter/nfnetlink_queue.c 			    nla_put_be32(skb, NFQA_IFINDEX_PHYSINDEV,
skb               528 net/netfilter/nfnetlink_queue.c 		if (nla_put_be32(skb, NFQA_IFINDEX_OUTDEV, htonl(outdev->ifindex)))
skb               535 net/netfilter/nfnetlink_queue.c 			if (nla_put_be32(skb, NFQA_IFINDEX_PHYSOUTDEV,
skb               539 net/netfilter/nfnetlink_queue.c 			    nla_put_be32(skb, NFQA_IFINDEX_OUTDEV,
skb               547 net/netfilter/nfnetlink_queue.c 			if (nla_put_be32(skb, NFQA_IFINDEX_OUTDEV,
skb               553 net/netfilter/nfnetlink_queue.c 			    nla_put_be32(skb, NFQA_IFINDEX_PHYSOUTDEV,
skb               561 net/netfilter/nfnetlink_queue.c 	    nla_put_be32(skb, NFQA_MARK, htonl(entskb->mark)))
skb               573 net/netfilter/nfnetlink_queue.c 			if (nla_put(skb, NFQA_HWADDR, sizeof(phw), &phw))
skb               578 net/netfilter/nfnetlink_queue.c 	if (nfqnl_put_bridge(entry, skb) < 0)
skb               588 net/netfilter/nfnetlink_queue.c 		if (nla_put(skb, NFQA_TIMESTAMP, sizeof(ts), &ts))
skb               593 net/netfilter/nfnetlink_queue.c 	    nfqnl_put_sk_uidgid(skb, entskb->sk) < 0)
skb               596 net/netfilter/nfnetlink_queue.c 	if (seclen && nla_put(skb, NFQA_SECCTX, seclen, secdata))
skb               599 net/netfilter/nfnetlink_queue.c 	if (ct && nfnl_ct->build(skb, ct, ctinfo, NFQA_CT, NFQA_CT_INFO) < 0)
skb               603 net/netfilter/nfnetlink_queue.c 	    nla_put_be32(skb, NFQA_CAP_LEN, htonl(cap_len)))
skb               606 net/netfilter/nfnetlink_queue.c 	if (nfqnl_put_packet_info(skb, entskb, csum_verify))
skb               612 net/netfilter/nfnetlink_queue.c 		if (skb_tailroom(skb) < sizeof(*nla) + hlen)
skb               615 net/netfilter/nfnetlink_queue.c 		nla = skb_put(skb, sizeof(*nla));
skb               619 net/netfilter/nfnetlink_queue.c 		if (skb_zerocopy(skb, entskb, data_len, hlen))
skb               623 net/netfilter/nfnetlink_queue.c 	nlh->nlmsg_len = skb->len;
skb               626 net/netfilter/nfnetlink_queue.c 	return skb;
skb               630 net/netfilter/nfnetlink_queue.c 	kfree_skb(skb);
skb               642 net/netfilter/nfnetlink_queue.c 	const struct nf_conn *ct = (void *)skb_nfct(entry->skb);
skb               724 net/netfilter/nfnetlink_queue.c static void nf_bridge_adjust_skb_data(struct sk_buff *skb)
skb               726 net/netfilter/nfnetlink_queue.c 	if (nf_bridge_info_get(skb))
skb               727 net/netfilter/nfnetlink_queue.c 		__skb_push(skb, skb->network_header - skb->mac_header);
skb               730 net/netfilter/nfnetlink_queue.c static void nf_bridge_adjust_segmented_data(struct sk_buff *skb)
skb               732 net/netfilter/nfnetlink_queue.c 	if (nf_bridge_info_get(skb))
skb               733 net/netfilter/nfnetlink_queue.c 		__skb_pull(skb, skb->network_header - skb->mac_header);
skb               748 net/netfilter/nfnetlink_queue.c 			   struct sk_buff *skb, struct nf_queue_entry *entry)
skb               753 net/netfilter/nfnetlink_queue.c 	nf_bridge_adjust_segmented_data(skb);
skb               755 net/netfilter/nfnetlink_queue.c 	if (skb->next == NULL) { /* last packet, no need to copy entry */
skb               756 net/netfilter/nfnetlink_queue.c 		struct sk_buff *gso_skb = entry->skb;
skb               757 net/netfilter/nfnetlink_queue.c 		entry->skb = skb;
skb               760 net/netfilter/nfnetlink_queue.c 			entry->skb = gso_skb;
skb               764 net/netfilter/nfnetlink_queue.c 	skb_mark_not_on_list(skb);
skb               768 net/netfilter/nfnetlink_queue.c 		entry_seg->skb = skb;
skb               781 net/netfilter/nfnetlink_queue.c 	struct sk_buff *skb, *segs;
skb               794 net/netfilter/nfnetlink_queue.c 	skb = entry->skb;
skb               798 net/netfilter/nfnetlink_queue.c 		skb->protocol = htons(ETH_P_IP);
skb               801 net/netfilter/nfnetlink_queue.c 		skb->protocol = htons(ETH_P_IPV6);
skb               805 net/netfilter/nfnetlink_queue.c 	if ((queue->flags & NFQA_CFG_F_GSO) || !skb_is_gso(skb))
skb               808 net/netfilter/nfnetlink_queue.c 	nf_bridge_adjust_skb_data(skb);
skb               809 net/netfilter/nfnetlink_queue.c 	segs = skb_gso_segment(skb, 0);
skb               833 net/netfilter/nfnetlink_queue.c 		kfree_skb(skb);
skb               837 net/netfilter/nfnetlink_queue.c 	nf_bridge_adjust_segmented_data(skb);
skb               847 net/netfilter/nfnetlink_queue.c 		if (pskb_trim(e->skb, data_len))
skb               852 net/netfilter/nfnetlink_queue.c 		if (diff > skb_tailroom(e->skb)) {
skb               853 net/netfilter/nfnetlink_queue.c 			nskb = skb_copy_expand(e->skb, skb_headroom(e->skb),
skb               857 net/netfilter/nfnetlink_queue.c 			kfree_skb(e->skb);
skb               858 net/netfilter/nfnetlink_queue.c 			e->skb = nskb;
skb               860 net/netfilter/nfnetlink_queue.c 		skb_put(e->skb, diff);
skb               862 net/netfilter/nfnetlink_queue.c 	if (skb_ensure_writable(e->skb, data_len))
skb               864 net/netfilter/nfnetlink_queue.c 	skb_copy_to_linear_data(e->skb, data, data_len);
skb               865 net/netfilter/nfnetlink_queue.c 	e->skb->ip_summed = CHECKSUM_NONE;
skb               906 net/netfilter/nfnetlink_queue.c 	physinif = nf_bridge_get_physinif(entry->skb);
skb               907 net/netfilter/nfnetlink_queue.c 	physoutif = nf_bridge_get_physoutif(entry->skb);
skb              1060 net/netfilter/nfnetlink_queue.c 				    struct sk_buff *skb,
skb              1075 net/netfilter/nfnetlink_queue.c 					NETLINK_CB(skb).portid);
skb              1102 net/netfilter/nfnetlink_queue.c 			entry->skb->mark = ntohl(nla_get_be32(nfqa[NFQA_MARK]));
skb              1117 net/netfilter/nfnetlink_queue.c 	ct = nfnl_ct->get_ct(entry->skb, ctinfo);
skb              1126 net/netfilter/nfnetlink_queue.c 				      NETLINK_CB(entry->skb).portid,
skb              1147 net/netfilter/nfnetlink_queue.c 		__vlan_hwaccel_put_tag(entry->skb,
skb              1153 net/netfilter/nfnetlink_queue.c 		int mac_header_len = entry->skb->network_header -
skb              1154 net/netfilter/nfnetlink_queue.c 			entry->skb->mac_header;
skb              1159 net/netfilter/nfnetlink_queue.c 			memcpy(skb_mac_header(entry->skb),
skb              1168 net/netfilter/nfnetlink_queue.c 			      struct sk_buff *skb,
skb              1186 net/netfilter/nfnetlink_queue.c 					NETLINK_CB(skb).portid);
skb              1216 net/netfilter/nfnetlink_queue.c 		int diff = payload_len - entry->skb->len;
skb              1223 net/netfilter/nfnetlink_queue.c 			nfnl_ct->seq_adjust(entry->skb, ct, ctinfo, diff);
skb              1227 net/netfilter/nfnetlink_queue.c 		entry->skb->mark = ntohl(nla_get_be32(nfqa[NFQA_MARK]));
skb              1234 net/netfilter/nfnetlink_queue.c 			     struct sk_buff *skb, const struct nlmsghdr *nlh,
skb              1255 net/netfilter/nfnetlink_queue.c 			     struct sk_buff *skb, const struct nlmsghdr *nlh,
skb              1313 net/netfilter/nfnetlink_queue.c 	if (queue && queue->peer_portid != NETLINK_CB(skb).portid) {
skb              1326 net/netfilter/nfnetlink_queue.c 						NETLINK_CB(skb).portid);
skb               105 net/netfilter/nft_bitwise.c static int nft_bitwise_dump(struct sk_buff *skb, const struct nft_expr *expr)
skb               109 net/netfilter/nft_bitwise.c 	if (nft_dump_register(skb, NFTA_BITWISE_SREG, priv->sreg))
skb               111 net/netfilter/nft_bitwise.c 	if (nft_dump_register(skb, NFTA_BITWISE_DREG, priv->dreg))
skb               113 net/netfilter/nft_bitwise.c 	if (nla_put_be32(skb, NFTA_BITWISE_LEN, htonl(priv->len)))
skb               116 net/netfilter/nft_bitwise.c 	if (nft_data_dump(skb, NFTA_BITWISE_MASK, &priv->mask,
skb               120 net/netfilter/nft_bitwise.c 	if (nft_data_dump(skb, NFTA_BITWISE_XOR, &priv->xor,
skb               150 net/netfilter/nft_byteorder.c static int nft_byteorder_dump(struct sk_buff *skb, const struct nft_expr *expr)
skb               154 net/netfilter/nft_byteorder.c 	if (nft_dump_register(skb, NFTA_BYTEORDER_SREG, priv->sreg))
skb               156 net/netfilter/nft_byteorder.c 	if (nft_dump_register(skb, NFTA_BYTEORDER_DREG, priv->dreg))
skb               158 net/netfilter/nft_byteorder.c 	if (nla_put_be32(skb, NFTA_BYTEORDER_OP, htonl(priv->op)))
skb               160 net/netfilter/nft_byteorder.c 	if (nla_put_be32(skb, NFTA_BYTEORDER_LEN, htonl(priv->len)))
skb               162 net/netfilter/nft_byteorder.c 	if (nla_put_be32(skb, NFTA_BYTEORDER_SIZE, htonl(priv->size)))
skb                15 net/netfilter/nft_chain_filter.c 				      struct sk_buff *skb,
skb                20 net/netfilter/nft_chain_filter.c 	nft_set_pktinfo(&pkt, skb, state);
skb                21 net/netfilter/nft_chain_filter.c 	nft_set_pktinfo_ipv4(&pkt, skb);
skb                59 net/netfilter/nft_chain_filter.c static unsigned int nft_do_chain_arp(void *priv, struct sk_buff *skb,
skb                64 net/netfilter/nft_chain_filter.c 	nft_set_pktinfo(&pkt, skb, state);
skb                65 net/netfilter/nft_chain_filter.c 	nft_set_pktinfo_unspec(&pkt, skb);
skb                99 net/netfilter/nft_chain_filter.c 				      struct sk_buff *skb,
skb               104 net/netfilter/nft_chain_filter.c 	nft_set_pktinfo(&pkt, skb, state);
skb               105 net/netfilter/nft_chain_filter.c 	nft_set_pktinfo_ipv6(&pkt, skb);
skb               143 net/netfilter/nft_chain_filter.c static unsigned int nft_do_chain_inet(void *priv, struct sk_buff *skb,
skb               148 net/netfilter/nft_chain_filter.c 	nft_set_pktinfo(&pkt, skb, state);
skb               152 net/netfilter/nft_chain_filter.c 		nft_set_pktinfo_ipv4(&pkt, skb);
skb               155 net/netfilter/nft_chain_filter.c 		nft_set_pktinfo_ipv6(&pkt, skb);
skb               199 net/netfilter/nft_chain_filter.c 		    struct sk_buff *skb,
skb               204 net/netfilter/nft_chain_filter.c 	nft_set_pktinfo(&pkt, skb, state);
skb               206 net/netfilter/nft_chain_filter.c 	switch (eth_hdr(skb)->h_proto) {
skb               208 net/netfilter/nft_chain_filter.c 		nft_set_pktinfo_ipv4_validate(&pkt, skb);
skb               211 net/netfilter/nft_chain_filter.c 		nft_set_pktinfo_ipv6_validate(&pkt, skb);
skb               214 net/netfilter/nft_chain_filter.c 		nft_set_pktinfo_unspec(&pkt, skb);
skb               254 net/netfilter/nft_chain_filter.c static unsigned int nft_do_chain_netdev(void *priv, struct sk_buff *skb,
skb               259 net/netfilter/nft_chain_filter.c 	nft_set_pktinfo(&pkt, skb, state);
skb               261 net/netfilter/nft_chain_filter.c 	switch (skb->protocol) {
skb               263 net/netfilter/nft_chain_filter.c 		nft_set_pktinfo_ipv4_validate(&pkt, skb);
skb               266 net/netfilter/nft_chain_filter.c 		nft_set_pktinfo_ipv6_validate(&pkt, skb);
skb               269 net/netfilter/nft_chain_filter.c 		nft_set_pktinfo_unspec(&pkt, skb);
skb                10 net/netfilter/nft_chain_nat.c static unsigned int nft_nat_do_chain(void *priv, struct sk_buff *skb,
skb                15 net/netfilter/nft_chain_nat.c 	nft_set_pktinfo(&pkt, skb, state);
skb                20 net/netfilter/nft_chain_nat.c 		nft_set_pktinfo_ipv4(&pkt, skb);
skb                25 net/netfilter/nft_chain_nat.c 		nft_set_pktinfo_ipv6(&pkt, skb);
skb                17 net/netfilter/nft_chain_route.c 					 struct sk_buff *skb,
skb                28 net/netfilter/nft_chain_route.c 	nft_set_pktinfo(&pkt, skb, state);
skb                29 net/netfilter/nft_chain_route.c 	nft_set_pktinfo_ipv4(&pkt, skb);
skb                31 net/netfilter/nft_chain_route.c 	mark = skb->mark;
skb                32 net/netfilter/nft_chain_route.c 	iph = ip_hdr(skb);
skb                39 net/netfilter/nft_chain_route.c 		iph = ip_hdr(skb);
skb                43 net/netfilter/nft_chain_route.c 		    skb->mark != mark ||
skb                45 net/netfilter/nft_chain_route.c 			err = ip_route_me_harder(state->net, skb, RTN_UNSPEC);
skb                66 net/netfilter/nft_chain_route.c 					 struct sk_buff *skb,
skb                76 net/netfilter/nft_chain_route.c 	nft_set_pktinfo(&pkt, skb, state);
skb                77 net/netfilter/nft_chain_route.c 	nft_set_pktinfo_ipv6(&pkt, skb);
skb                80 net/netfilter/nft_chain_route.c 	memcpy(&saddr, &ipv6_hdr(skb)->saddr, sizeof(saddr));
skb                81 net/netfilter/nft_chain_route.c 	memcpy(&daddr, &ipv6_hdr(skb)->daddr, sizeof(daddr));
skb                82 net/netfilter/nft_chain_route.c 	mark = skb->mark;
skb                83 net/netfilter/nft_chain_route.c 	hop_limit = ipv6_hdr(skb)->hop_limit;
skb                86 net/netfilter/nft_chain_route.c 	flowlabel = *((u32 *)ipv6_hdr(skb));
skb                90 net/netfilter/nft_chain_route.c 	    (memcmp(&ipv6_hdr(skb)->saddr, &saddr, sizeof(saddr)) ||
skb                91 net/netfilter/nft_chain_route.c 	     memcmp(&ipv6_hdr(skb)->daddr, &daddr, sizeof(daddr)) ||
skb                92 net/netfilter/nft_chain_route.c 	     skb->mark != mark ||
skb                93 net/netfilter/nft_chain_route.c 	     ipv6_hdr(skb)->hop_limit != hop_limit ||
skb                94 net/netfilter/nft_chain_route.c 	     flowlabel != *((u32 *)ipv6_hdr(skb)))) {
skb                95 net/netfilter/nft_chain_route.c 		err = nf_ip6_route_me_harder(state->net, skb);
skb               116 net/netfilter/nft_chain_route.c 					struct sk_buff *skb,
skb               123 net/netfilter/nft_chain_route.c 		return nf_route_table_hook4(priv, skb, state);
skb               125 net/netfilter/nft_chain_route.c 		return nf_route_table_hook6(priv, skb, state);
skb               127 net/netfilter/nft_chain_route.c 		nft_set_pktinfo(&pkt, skb, state);
skb                99 net/netfilter/nft_cmp.c static int nft_cmp_dump(struct sk_buff *skb, const struct nft_expr *expr)
skb               103 net/netfilter/nft_cmp.c 	if (nft_dump_register(skb, NFTA_CMP_SREG, priv->sreg))
skb               105 net/netfilter/nft_cmp.c 	if (nla_put_be32(skb, NFTA_CMP_OP, htonl(priv->op)))
skb               108 net/netfilter/nft_cmp.c 	if (nft_data_dump(skb, NFTA_CMP_DATA, &priv->data,
skb               204 net/netfilter/nft_cmp.c static int nft_cmp_fast_dump(struct sk_buff *skb, const struct nft_expr *expr)
skb               209 net/netfilter/nft_cmp.c 	if (nft_dump_register(skb, NFTA_CMP_SREG, priv->sreg))
skb               211 net/netfilter/nft_cmp.c 	if (nla_put_be32(skb, NFTA_CMP_OP, htonl(NFT_CMP_EQ)))
skb               215 net/netfilter/nft_cmp.c 	if (nft_data_dump(skb, NFTA_CMP_DATA, &data,
skb                73 net/netfilter/nft_compat.c 	struct sk_buff *skb = pkt->skb;
skb                78 net/netfilter/nft_compat.c 	ret = target->target(skb, &pkt->xt);
skb                99 net/netfilter/nft_compat.c 	struct sk_buff *skb = pkt->skb;
skb               104 net/netfilter/nft_compat.c 	ret = target->target(skb, &pkt->xt);
skb               269 net/netfilter/nft_compat.c static int nft_extension_dump_info(struct sk_buff *skb, int attr,
skb               276 net/netfilter/nft_compat.c 	nla = nla_reserve(skb, attr, aligned_size);
skb               287 net/netfilter/nft_compat.c static int nft_target_dump(struct sk_buff *skb, const struct nft_expr *expr)
skb               292 net/netfilter/nft_compat.c 	if (nla_put_string(skb, NFTA_TARGET_NAME, target->name) ||
skb               293 net/netfilter/nft_compat.c 	    nla_put_be32(skb, NFTA_TARGET_REV, htonl(target->revision)) ||
skb               294 net/netfilter/nft_compat.c 	    nft_extension_dump_info(skb, NFTA_TARGET_INFO, info,
skb               334 net/netfilter/nft_compat.c 	struct sk_buff *skb = pkt->skb;
skb               339 net/netfilter/nft_compat.c 	ret = match->match(skb, (struct xt_action_param *)&pkt->xt);
skb               516 net/netfilter/nft_compat.c static int __nft_match_dump(struct sk_buff *skb, const struct nft_expr *expr,
skb               521 net/netfilter/nft_compat.c 	if (nla_put_string(skb, NFTA_MATCH_NAME, match->name) ||
skb               522 net/netfilter/nft_compat.c 	    nla_put_be32(skb, NFTA_MATCH_REV, htonl(match->revision)) ||
skb               523 net/netfilter/nft_compat.c 	    nft_extension_dump_info(skb, NFTA_MATCH_INFO, info,
skb               533 net/netfilter/nft_compat.c static int nft_match_dump(struct sk_buff *skb, const struct nft_expr *expr)
skb               535 net/netfilter/nft_compat.c 	return __nft_match_dump(skb, expr, nft_expr_priv(expr));
skb               538 net/netfilter/nft_compat.c static int nft_match_large_dump(struct sk_buff *skb, const struct nft_expr *e)
skb               542 net/netfilter/nft_compat.c 	return __nft_match_dump(skb, e, priv->info);
skb               570 net/netfilter/nft_compat.c nfnl_compat_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
skb               579 net/netfilter/nft_compat.c 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
skb               588 net/netfilter/nft_compat.c 	if (nla_put_string(skb, NFTA_COMPAT_NAME, name) ||
skb               589 net/netfilter/nft_compat.c 	    nla_put_be32(skb, NFTA_COMPAT_REV, htonl(rev)) ||
skb               590 net/netfilter/nft_compat.c 	    nla_put_be32(skb, NFTA_COMPAT_TYPE, htonl(target)))
skb               593 net/netfilter/nft_compat.c 	nlmsg_end(skb, nlh);
skb               594 net/netfilter/nft_compat.c 	return skb->len;
skb               598 net/netfilter/nft_compat.c 	nlmsg_cancel(skb, nlh);
skb               603 net/netfilter/nft_compat.c 			       struct sk_buff *skb, const struct nlmsghdr *nlh,
skb               661 net/netfilter/nft_compat.c 	if (nfnl_compat_fill_info(skb2, NETLINK_CB(skb).portid,
skb               671 net/netfilter/nft_compat.c 	ret = netlink_unicast(nfnl, skb2, NETLINK_CB(skb).portid,
skb                36 net/netfilter/nft_connlimit.c 	ct = nf_ct_get(pkt->skb, &ctinfo);
skb                40 net/netfilter/nft_connlimit.c 	} else if (!nf_ct_get_tuplepr(pkt->skb, skb_network_offset(pkt->skb),
skb                93 net/netfilter/nft_connlimit.c static int nft_connlimit_do_dump(struct sk_buff *skb,
skb                96 net/netfilter/nft_connlimit.c 	if (nla_put_be32(skb, NFTA_CONNLIMIT_COUNT, htonl(priv->limit)))
skb                99 net/netfilter/nft_connlimit.c 	    nla_put_be32(skb, NFTA_CONNLIMIT_FLAGS, htonl(NFT_CONNLIMIT_F_INV)))
skb               134 net/netfilter/nft_connlimit.c static int nft_connlimit_obj_dump(struct sk_buff *skb,
skb               139 net/netfilter/nft_connlimit.c 	return nft_connlimit_do_dump(skb, priv);
skb               174 net/netfilter/nft_connlimit.c static int nft_connlimit_dump(struct sk_buff *skb, const struct nft_expr *expr)
skb               178 net/netfilter/nft_connlimit.c 	return nft_connlimit_do_dump(skb, priv);
skb                41 net/netfilter/nft_counter.c 	this_cpu->bytes += pkt->skb->len;
skb               140 net/netfilter/nft_counter.c static int nft_counter_do_dump(struct sk_buff *skb,
skb               148 net/netfilter/nft_counter.c 	if (nla_put_be64(skb, NFTA_COUNTER_BYTES, cpu_to_be64(total.bytes),
skb               150 net/netfilter/nft_counter.c 	    nla_put_be64(skb, NFTA_COUNTER_PACKETS, cpu_to_be64(total.packets),
skb               163 net/netfilter/nft_counter.c static int nft_counter_obj_dump(struct sk_buff *skb,
skb               168 net/netfilter/nft_counter.c 	return nft_counter_do_dump(skb, priv, reset);
skb               203 net/netfilter/nft_counter.c static int nft_counter_dump(struct sk_buff *skb, const struct nft_expr *expr)
skb               207 net/netfilter/nft_counter.c 	return nft_counter_do_dump(skb, priv, false);
skb                71 net/netfilter/nft_ct.c 	ct = nf_ct_get(pkt->skb, &ctinfo);
skb               239 net/netfilter/nft_ct.c 	struct sk_buff *skb = pkt->skb;
skb               244 net/netfilter/nft_ct.c 	ct = nf_ct_get(skb, &ctinfo);
skb               275 net/netfilter/nft_ct.c 	nf_ct_set(skb, ct, IP_CT_NEW);
skb               284 net/netfilter/nft_ct.c 	struct sk_buff *skb = pkt->skb;
skb               291 net/netfilter/nft_ct.c 	ct = nf_ct_get(skb, &ctinfo);
skb               636 net/netfilter/nft_ct.c static int nft_ct_get_dump(struct sk_buff *skb, const struct nft_expr *expr)
skb               640 net/netfilter/nft_ct.c 	if (nft_dump_register(skb, NFTA_CT_DREG, priv->dreg))
skb               642 net/netfilter/nft_ct.c 	if (nla_put_be32(skb, NFTA_CT_KEY, htonl(priv->key)))
skb               654 net/netfilter/nft_ct.c 		if (nla_put_u8(skb, NFTA_CT_DIRECTION, priv->dir))
skb               662 net/netfilter/nft_ct.c 		    nla_put_u8(skb, NFTA_CT_DIRECTION, priv->dir))
skb               675 net/netfilter/nft_ct.c static int nft_ct_set_dump(struct sk_buff *skb, const struct nft_expr *expr)
skb               679 net/netfilter/nft_ct.c 	if (nft_dump_register(skb, NFTA_CT_SREG, priv->sreg))
skb               681 net/netfilter/nft_ct.c 	if (nla_put_be32(skb, NFTA_CT_KEY, htonl(priv->key)))
skb               687 net/netfilter/nft_ct.c 		    nla_put_u8(skb, NFTA_CT_DIRECTION, priv->dir))
skb               766 net/netfilter/nft_ct.c 	struct sk_buff *skb = pkt->skb;
skb               770 net/netfilter/nft_ct.c 	ct = nf_ct_get(pkt->skb, &ctinfo);
skb               775 net/netfilter/nft_ct.c 	nf_ct_set(skb, ct, IP_CT_UNTRACKED);
skb               831 net/netfilter/nft_ct.c 	struct nf_conn *ct = (struct nf_conn *)skb_nfct(pkt->skb);
skb               857 net/netfilter/nft_ct.c 		nf_ct_refresh(ct, pkt->skb, values[0]);
skb               927 net/netfilter/nft_ct.c static int nft_ct_timeout_obj_dump(struct sk_buff *skb,
skb               935 net/netfilter/nft_ct.c 	if (nla_put_u8(skb, NFTA_CT_TIMEOUT_L4PROTO, timeout->l4proto->l4proto) ||
skb               936 net/netfilter/nft_ct.c 	    nla_put_be16(skb, NFTA_CT_TIMEOUT_L3PROTO, htons(timeout->l3num)))
skb               939 net/netfilter/nft_ct.c 	nest_params = nla_nest_start(skb, NFTA_CT_TIMEOUT_DATA);
skb               943 net/netfilter/nft_ct.c 	ret = timeout->l4proto->ctnl_timeout.obj_to_nlattr(skb, &timeout->data);
skb               946 net/netfilter/nft_ct.c 	nla_nest_end(skb, nest_params);
skb              1067 net/netfilter/nft_ct.c 	struct nf_conn *ct = (struct nf_conn *)skb_nfct(pkt->skb);
skb              1102 net/netfilter/nft_ct.c static int nft_ct_helper_obj_dump(struct sk_buff *skb,
skb              1120 net/netfilter/nft_ct.c 	if (nla_put_string(skb, NFTA_CT_HELPER_NAME, helper->name))
skb              1123 net/netfilter/nft_ct.c 	if (nla_put_u8(skb, NFTA_CT_HELPER_L4PROTO, priv->l4proto))
skb              1126 net/netfilter/nft_ct.c 	if (nla_put_be16(skb, NFTA_CT_HELPER_L3PROTO, htons(family)))
skb              1195 net/netfilter/nft_ct.c static int nft_ct_expect_obj_dump(struct sk_buff *skb,
skb              1200 net/netfilter/nft_ct.c 	if (nla_put_be16(skb, NFTA_CT_EXPECT_L3PROTO, htons(priv->l3num)) ||
skb              1201 net/netfilter/nft_ct.c 	    nla_put_u8(skb, NFTA_CT_EXPECT_L4PROTO, priv->l4proto) ||
skb              1202 net/netfilter/nft_ct.c 	    nla_put_be16(skb, NFTA_CT_EXPECT_DPORT, priv->dport) ||
skb              1203 net/netfilter/nft_ct.c 	    nla_put_u32(skb, NFTA_CT_EXPECT_TIMEOUT, priv->timeout) ||
skb              1204 net/netfilter/nft_ct.c 	    nla_put_u8(skb, NFTA_CT_EXPECT_SIZE, priv->size))
skb              1222 net/netfilter/nft_ct.c 	ct = nf_ct_get(pkt->skb, &ctinfo);
skb                47 net/netfilter/nft_dup_netdev.c static int nft_dup_netdev_dump(struct sk_buff *skb, const struct nft_expr *expr)
skb                51 net/netfilter/nft_dup_netdev.c 	if (nft_dump_register(skb, NFTA_DUP_SREG_DEV, priv->sreg_dev))
skb               284 net/netfilter/nft_dynset.c static int nft_dynset_dump(struct sk_buff *skb, const struct nft_expr *expr)
skb               289 net/netfilter/nft_dynset.c 	if (nft_dump_register(skb, NFTA_DYNSET_SREG_KEY, priv->sreg_key))
skb               292 net/netfilter/nft_dynset.c 	    nft_dump_register(skb, NFTA_DYNSET_SREG_DATA, priv->sreg_data))
skb               294 net/netfilter/nft_dynset.c 	if (nla_put_be32(skb, NFTA_DYNSET_OP, htonl(priv->op)))
skb               296 net/netfilter/nft_dynset.c 	if (nla_put_string(skb, NFTA_DYNSET_SET_NAME, priv->set->name))
skb               298 net/netfilter/nft_dynset.c 	if (nla_put_be64(skb, NFTA_DYNSET_TIMEOUT,
skb               302 net/netfilter/nft_dynset.c 	if (priv->expr && nft_expr_dump(skb, NFTA_DYNSET_EXPR, priv->expr))
skb               304 net/netfilter/nft_dynset.c 	if (nla_put_be32(skb, NFTA_DYNSET_FLAGS, htonl(flags)))
skb                45 net/netfilter/nft_exthdr.c 	err = ipv6_find_hdr(pkt->skb, &offset, priv->type, NULL, NULL);
skb                55 net/netfilter/nft_exthdr.c 	if (skb_copy_bits(pkt->skb, offset, dest, priv->len) < 0)
skb                70 net/netfilter/nft_exthdr.c static int ipv4_find_option(struct net *net, struct sk_buff *skb,
skb                81 net/netfilter/nft_exthdr.c 	iph = skb_header_pointer(skb, 0, sizeof(_iph), &_iph);
skb                94 net/netfilter/nft_exthdr.c 	if (skb_copy_bits(skb, start, opt->__data, optlen))
skb               135 net/netfilter/nft_exthdr.c 	struct sk_buff *skb = pkt->skb;
skb               139 net/netfilter/nft_exthdr.c 	if (skb->protocol != htons(ETH_P_IP))
skb               142 net/netfilter/nft_exthdr.c 	err = ipv4_find_option(nft_net(pkt), skb, &offset, priv->type);
skb               152 net/netfilter/nft_exthdr.c 	if (skb_copy_bits(pkt->skb, offset, dest, priv->len) < 0)
skb               168 net/netfilter/nft_exthdr.c 	tcph = skb_header_pointer(pkt->skb, pkt->xt.thoff, sizeof(*tcph), buffer);
skb               176 net/netfilter/nft_exthdr.c 	return skb_header_pointer(pkt->skb, pkt->xt.thoff, *tcphdr_len, buffer);
skb               253 net/netfilter/nft_exthdr.c 		if (skb_ensure_writable(pkt->skb,
skb               282 net/netfilter/nft_exthdr.c 			inet_proto_csum_replace2(&tcph->check, pkt->skb,
skb               293 net/netfilter/nft_exthdr.c 			inet_proto_csum_replace4(&tcph->check, pkt->skb,
skb               434 net/netfilter/nft_exthdr.c static int nft_exthdr_dump_common(struct sk_buff *skb, const struct nft_exthdr *priv)
skb               436 net/netfilter/nft_exthdr.c 	if (nla_put_u8(skb, NFTA_EXTHDR_TYPE, priv->type))
skb               438 net/netfilter/nft_exthdr.c 	if (nla_put_be32(skb, NFTA_EXTHDR_OFFSET, htonl(priv->offset)))
skb               440 net/netfilter/nft_exthdr.c 	if (nla_put_be32(skb, NFTA_EXTHDR_LEN, htonl(priv->len)))
skb               442 net/netfilter/nft_exthdr.c 	if (nla_put_be32(skb, NFTA_EXTHDR_FLAGS, htonl(priv->flags)))
skb               444 net/netfilter/nft_exthdr.c 	if (nla_put_be32(skb, NFTA_EXTHDR_OP, htonl(priv->op)))
skb               452 net/netfilter/nft_exthdr.c static int nft_exthdr_dump(struct sk_buff *skb, const struct nft_expr *expr)
skb               456 net/netfilter/nft_exthdr.c 	if (nft_dump_register(skb, NFTA_EXTHDR_DREG, priv->dreg))
skb               459 net/netfilter/nft_exthdr.c 	return nft_exthdr_dump_common(skb, priv);
skb               462 net/netfilter/nft_exthdr.c static int nft_exthdr_dump_set(struct sk_buff *skb, const struct nft_expr *expr)
skb               466 net/netfilter/nft_exthdr.c 	if (nft_dump_register(skb, NFTA_EXTHDR_SREG, priv->sreg))
skb               469 net/netfilter/nft_exthdr.c 	return nft_exthdr_dump_common(skb, priv);
skb               118 net/netfilter/nft_fib.c int nft_fib_dump(struct sk_buff *skb, const struct nft_expr *expr)
skb               122 net/netfilter/nft_fib.c 	if (nft_dump_register(skb, NFTA_FIB_DREG, priv->dreg))
skb               125 net/netfilter/nft_fib.c 	if (nla_put_be32(skb, NFTA_FIB_RESULT, htonl(priv->result)))
skb               128 net/netfilter/nft_fib.c 	if (nla_put_be32(skb, NFTA_FIB_FLAGS, htonl(priv->flags)))
skb                27 net/netfilter/nft_fib_netdev.c 	switch (ntohs(pkt->skb->protocol)) {
skb                27 net/netfilter/nft_flow_offload.c 	struct dst_entry *this_dst = skb_dst(pkt->skb);
skb                53 net/netfilter/nft_flow_offload.c static bool nft_flow_offload_skip(struct sk_buff *skb, int family)
skb                55 net/netfilter/nft_flow_offload.c 	if (skb_sec_path(skb))
skb                61 net/netfilter/nft_flow_offload.c 		opt = &(IPCB(skb)->opt);
skb                84 net/netfilter/nft_flow_offload.c 	if (nft_flow_offload_skip(pkt->skb, nft_pf(pkt)))
skb                87 net/netfilter/nft_flow_offload.c 	ct = nf_ct_get(pkt->skb, &ctinfo);
skb                93 net/netfilter/nft_flow_offload.c 		tcph = skb_header_pointer(pkt->skb, pkt->xt.thoff,
skb               203 net/netfilter/nft_flow_offload.c static int nft_flow_offload_dump(struct sk_buff *skb, const struct nft_expr *expr)
skb               207 net/netfilter/nft_flow_offload.c 	if (nla_put_string(skb, NFTA_FLOW_TABLE_NAME, priv->flowtable->name))
skb                32 net/netfilter/nft_fwd_netdev.c 	skb_set_redirected(pkt->skb, true);
skb                57 net/netfilter/nft_fwd_netdev.c static int nft_fwd_netdev_dump(struct sk_buff *skb, const struct nft_expr *expr)
skb                61 net/netfilter/nft_fwd_netdev.c 	if (nft_dump_register(skb, NFTA_FWD_SREG_DEV, priv->sreg_dev))
skb                94 net/netfilter/nft_fwd_netdev.c 	struct sk_buff *skb = pkt->skb;
skb               102 net/netfilter/nft_fwd_netdev.c 		if (skb->protocol != htons(ETH_P_IP)) {
skb               106 net/netfilter/nft_fwd_netdev.c 		if (skb_try_make_writable(skb, sizeof(*iph))) {
skb               110 net/netfilter/nft_fwd_netdev.c 		iph = ip_hdr(skb);
skb               118 net/netfilter/nft_fwd_netdev.c 		if (skb->protocol != htons(ETH_P_IPV6)) {
skb               122 net/netfilter/nft_fwd_netdev.c 		if (skb_try_make_writable(skb, sizeof(*ip6h))) {
skb               126 net/netfilter/nft_fwd_netdev.c 		ip6h = ipv6_hdr(skb);
skb               140 net/netfilter/nft_fwd_netdev.c 	skb->dev = dev;
skb               141 net/netfilter/nft_fwd_netdev.c 	neigh_xmit(neigh_table, dev, addr, skb);
skb               181 net/netfilter/nft_fwd_netdev.c static int nft_fwd_neigh_dump(struct sk_buff *skb, const struct nft_expr *expr)
skb               185 net/netfilter/nft_fwd_netdev.c 	if (nft_dump_register(skb, NFTA_FWD_SREG_DEV, priv->sreg_dev) ||
skb               186 net/netfilter/nft_fwd_netdev.c 	    nft_dump_register(skb, NFTA_FWD_SREG_ADDR, priv->sreg_addr) ||
skb               187 net/netfilter/nft_fwd_netdev.c 	    nla_put_be32(skb, NFTA_FWD_NFPROTO, htonl(priv->nfproto)))
skb                51 net/netfilter/nft_hash.c 	struct sk_buff *skb = pkt->skb;
skb                54 net/netfilter/nft_hash.c 	h = reciprocal_scale(__skb_get_hash_symmetric(skb), priv->modulus);
skb               142 net/netfilter/nft_hash.c static int nft_jhash_dump(struct sk_buff *skb,
skb               147 net/netfilter/nft_hash.c 	if (nft_dump_register(skb, NFTA_HASH_SREG, priv->sreg))
skb               149 net/netfilter/nft_hash.c 	if (nft_dump_register(skb, NFTA_HASH_DREG, priv->dreg))
skb               151 net/netfilter/nft_hash.c 	if (nla_put_be32(skb, NFTA_HASH_LEN, htonl(priv->len)))
skb               153 net/netfilter/nft_hash.c 	if (nla_put_be32(skb, NFTA_HASH_MODULUS, htonl(priv->modulus)))
skb               156 net/netfilter/nft_hash.c 	    nla_put_be32(skb, NFTA_HASH_SEED, htonl(priv->seed)))
skb               159 net/netfilter/nft_hash.c 		if (nla_put_be32(skb, NFTA_HASH_OFFSET, htonl(priv->offset)))
skb               161 net/netfilter/nft_hash.c 	if (nla_put_be32(skb, NFTA_HASH_TYPE, htonl(NFT_HASH_JENKINS)))
skb               169 net/netfilter/nft_hash.c static int nft_symhash_dump(struct sk_buff *skb,
skb               174 net/netfilter/nft_hash.c 	if (nft_dump_register(skb, NFTA_HASH_DREG, priv->dreg))
skb               176 net/netfilter/nft_hash.c 	if (nla_put_be32(skb, NFTA_HASH_MODULUS, htonl(priv->modulus)))
skb               179 net/netfilter/nft_hash.c 		if (nla_put_be32(skb, NFTA_HASH_OFFSET, htonl(priv->offset)))
skb               181 net/netfilter/nft_hash.c 	if (nla_put_be32(skb, NFTA_HASH_TYPE, htonl(NFT_HASH_SYM)))
skb                84 net/netfilter/nft_immediate.c static int nft_immediate_dump(struct sk_buff *skb, const struct nft_expr *expr)
skb                88 net/netfilter/nft_immediate.c 	if (nft_dump_register(skb, NFTA_IMMEDIATE_DREG, priv->dreg))
skb                91 net/netfilter/nft_immediate.c 	return nft_data_dump(skb, NFTA_IMMEDIATE_DATA, &priv->data,
skb               104 net/netfilter/nft_limit.c static int nft_limit_dump(struct sk_buff *skb, const struct nft_limit *limit,
skb               110 net/netfilter/nft_limit.c 	if (nla_put_be64(skb, NFTA_LIMIT_RATE, cpu_to_be64(limit->rate),
skb               112 net/netfilter/nft_limit.c 	    nla_put_be64(skb, NFTA_LIMIT_UNIT, cpu_to_be64(secs),
skb               114 net/netfilter/nft_limit.c 	    nla_put_be32(skb, NFTA_LIMIT_BURST, htonl(limit->burst)) ||
skb               115 net/netfilter/nft_limit.c 	    nla_put_be32(skb, NFTA_LIMIT_TYPE, htonl(type)) ||
skb               116 net/netfilter/nft_limit.c 	    nla_put_be32(skb, NFTA_LIMIT_FLAGS, htonl(flags)))
skb               162 net/netfilter/nft_limit.c static int nft_limit_pkts_dump(struct sk_buff *skb, const struct nft_expr *expr)
skb               166 net/netfilter/nft_limit.c 	return nft_limit_dump(skb, &priv->limit, NFT_LIMIT_PKTS);
skb               183 net/netfilter/nft_limit.c 	u64 cost = div64_u64(priv->nsecs * pkt->skb->len, priv->rate);
skb               198 net/netfilter/nft_limit.c static int nft_limit_bytes_dump(struct sk_buff *skb,
skb               203 net/netfilter/nft_limit.c 	return nft_limit_dump(skb, priv, NFT_LIMIT_PKT_BYTES);
skb               264 net/netfilter/nft_limit.c static int nft_limit_obj_pkts_dump(struct sk_buff *skb,
skb               270 net/netfilter/nft_limit.c 	return nft_limit_dump(skb, &priv->limit, NFT_LIMIT_PKTS);
skb               287 net/netfilter/nft_limit.c 	u64 cost = div64_u64(priv->nsecs * pkt->skb->len, priv->rate);
skb               302 net/netfilter/nft_limit.c static int nft_limit_obj_bytes_dump(struct sk_buff *skb,
skb               308 net/netfilter/nft_limit.c 	return nft_limit_dump(skb, priv, NFT_LIMIT_PKT_BYTES);
skb                29 net/netfilter/nft_log.c static bool audit_ip4(struct audit_buffer *ab, struct sk_buff *skb)
skb                34 net/netfilter/nft_log.c 	ih = skb_header_pointer(skb, skb_network_offset(skb), sizeof(_iph), &_iph);
skb                44 net/netfilter/nft_log.c static bool audit_ip6(struct audit_buffer *ab, struct sk_buff *skb)
skb                51 net/netfilter/nft_log.c 	ih = skb_header_pointer(skb, skb_network_offset(skb), sizeof(_ip6h), &_ip6h);
skb                56 net/netfilter/nft_log.c 	ipv6_skip_exthdr(skb, skb_network_offset(skb) + sizeof(_ip6h), &nexthdr, &frag_off);
skb                66 net/netfilter/nft_log.c 	struct sk_buff *skb = pkt->skb;
skb                77 net/netfilter/nft_log.c 	audit_log_format(ab, "mark=%#x", skb->mark);
skb                81 net/netfilter/nft_log.c 		switch (eth_hdr(skb)->h_proto) {
skb                83 net/netfilter/nft_log.c 			fam = audit_ip4(ab, skb) ? NFPROTO_IPV4 : -1;
skb                86 net/netfilter/nft_log.c 			fam = audit_ip6(ab, skb) ? NFPROTO_IPV6 : -1;
skb                91 net/netfilter/nft_log.c 		fam = audit_ip4(ab, skb) ? NFPROTO_IPV4 : -1;
skb                94 net/netfilter/nft_log.c 		fam = audit_ip6(ab, skb) ? NFPROTO_IPV6 : -1;
skb               116 net/netfilter/nft_log.c 	nf_log_packet(nft_net(pkt), nft_pf(pkt), nft_hook(pkt), pkt->skb,
skb               226 net/netfilter/nft_log.c static int nft_log_dump(struct sk_buff *skb, const struct nft_expr *expr)
skb               232 net/netfilter/nft_log.c 		if (nla_put_string(skb, NFTA_LOG_PREFIX, priv->prefix))
skb               236 net/netfilter/nft_log.c 		if (nla_put_be32(skb, NFTA_LOG_LEVEL, htonl(li->u.log.level)))
skb               240 net/netfilter/nft_log.c 			if (nla_put_be32(skb, NFTA_LOG_FLAGS,
skb               246 net/netfilter/nft_log.c 		if (nla_put_be16(skb, NFTA_LOG_GROUP, htons(li->u.ulog.group)))
skb               250 net/netfilter/nft_log.c 			if (nla_put_be32(skb, NFTA_LOG_SNAPLEN,
skb               255 net/netfilter/nft_log.c 			if (nla_put_be16(skb, NFTA_LOG_QTHRESHOLD,
skb               143 net/netfilter/nft_lookup.c static int nft_lookup_dump(struct sk_buff *skb, const struct nft_expr *expr)
skb               148 net/netfilter/nft_lookup.c 	if (nla_put_string(skb, NFTA_LOOKUP_SET, priv->set->name))
skb               150 net/netfilter/nft_lookup.c 	if (nft_dump_register(skb, NFTA_LOOKUP_SREG, priv->sreg))
skb               153 net/netfilter/nft_lookup.c 		if (nft_dump_register(skb, NFTA_LOOKUP_DREG, priv->dreg))
skb               155 net/netfilter/nft_lookup.c 	if (nla_put_be32(skb, NFTA_LOOKUP_FLAGS, htonl(flags)))
skb                80 net/netfilter/nft_masq.c static int nft_masq_dump(struct sk_buff *skb, const struct nft_expr *expr)
skb                85 net/netfilter/nft_masq.c 	    nla_put_be32(skb, NFTA_MASQ_FLAGS, htonl(priv->flags)))
skb                89 net/netfilter/nft_masq.c 		if (nft_dump_register(skb, NFTA_MASQ_REG_PROTO_MIN,
skb                91 net/netfilter/nft_masq.c 		    nft_dump_register(skb, NFTA_MASQ_REG_PROTO_MAX,
skb               117 net/netfilter/nft_masq.c 	regs->verdict.code = nf_nat_masquerade_ipv4(pkt->skb, nft_hook(pkt),
skb               163 net/netfilter/nft_masq.c 	regs->verdict.code = nf_nat_masquerade_ipv6(pkt->skb, &range,
skb                64 net/netfilter/nft_meta.c 	const struct sk_buff *skb = pkt->skb;
skb                71 net/netfilter/nft_meta.c 		*dest = skb->len;
skb                74 net/netfilter/nft_meta.c 		nft_reg_store16(dest, (__force u16)skb->protocol);
skb                85 net/netfilter/nft_meta.c 		*dest = skb->priority;
skb                88 net/netfilter/nft_meta.c 		*dest = skb->mark;
skb               113 net/netfilter/nft_meta.c 		sk = skb_to_full_sk(skb);
skb               130 net/netfilter/nft_meta.c 		sk = skb_to_full_sk(skb);
skb               147 net/netfilter/nft_meta.c 		const struct dst_entry *dst = skb_dst(skb);
skb               157 net/netfilter/nft_meta.c 		*dest = skb->secmark;
skb               161 net/netfilter/nft_meta.c 		if (skb->pkt_type != PACKET_LOOPBACK) {
skb               162 net/netfilter/nft_meta.c 			nft_reg_store8(dest, skb->pkt_type);
skb               168 net/netfilter/nft_meta.c 			if (ipv4_is_multicast(ip_hdr(skb)->daddr))
skb               177 net/netfilter/nft_meta.c 			switch (skb->protocol) {
skb               179 net/netfilter/nft_meta.c 				int noff = skb_network_offset(skb);
skb               182 net/netfilter/nft_meta.c 				iph = skb_header_pointer(skb, noff,
skb               222 net/netfilter/nft_meta.c 		sk = skb_to_full_sk(skb);
skb               236 net/netfilter/nft_meta.c 		nft_reg_store8(dest, secpath_exists(skb));
skb               274 net/netfilter/nft_meta.c 	struct sk_buff *skb = pkt->skb;
skb               281 net/netfilter/nft_meta.c 		skb->mark = value;
skb               284 net/netfilter/nft_meta.c 		skb->priority = value;
skb               289 net/netfilter/nft_meta.c 		if (skb->pkt_type != value8 &&
skb               291 net/netfilter/nft_meta.c 		    skb_pkt_type_ok(skb->pkt_type))
skb               292 net/netfilter/nft_meta.c 			skb->pkt_type = value8;
skb               297 net/netfilter/nft_meta.c 		skb->nf_trace = !!value8;
skb               301 net/netfilter/nft_meta.c 		skb->secmark = value;
skb               490 net/netfilter/nft_meta.c int nft_meta_get_dump(struct sk_buff *skb,
skb               495 net/netfilter/nft_meta.c 	if (nla_put_be32(skb, NFTA_META_KEY, htonl(priv->key)))
skb               497 net/netfilter/nft_meta.c 	if (nft_dump_register(skb, NFTA_META_DREG, priv->dreg))
skb               506 net/netfilter/nft_meta.c int nft_meta_set_dump(struct sk_buff *skb, const struct nft_expr *expr)
skb               510 net/netfilter/nft_meta.c 	if (nla_put_be32(skb, NFTA_META_KEY, htonl(priv->key)))
skb               512 net/netfilter/nft_meta.c 	if (nft_dump_register(skb, NFTA_META_SREG, priv->sreg))
skb               642 net/netfilter/nft_meta.c 	struct sk_buff *skb = pkt->skb;
skb               644 net/netfilter/nft_meta.c 	skb->secmark = priv->secid;
skb               672 net/netfilter/nft_meta.c static int nft_secmark_obj_dump(struct sk_buff *skb, struct nft_object *obj,
skb               678 net/netfilter/nft_meta.c 	if (nla_put_string(skb, NFTA_SECMARK_CTX, priv->ctx))
skb                39 net/netfilter/nft_nat.c 	struct nf_conn *ct = nf_ct_get(pkt->skb, &ctinfo);
skb               205 net/netfilter/nft_nat.c static int nft_nat_dump(struct sk_buff *skb, const struct nft_expr *expr)
skb               211 net/netfilter/nft_nat.c 		if (nla_put_be32(skb, NFTA_NAT_TYPE, htonl(NFT_NAT_SNAT)))
skb               215 net/netfilter/nft_nat.c 		if (nla_put_be32(skb, NFTA_NAT_TYPE, htonl(NFT_NAT_DNAT)))
skb               220 net/netfilter/nft_nat.c 	if (nla_put_be32(skb, NFTA_NAT_FAMILY, htonl(priv->family)))
skb               224 net/netfilter/nft_nat.c 		if (nft_dump_register(skb, NFTA_NAT_REG_ADDR_MIN,
skb               226 net/netfilter/nft_nat.c 		    nft_dump_register(skb, NFTA_NAT_REG_ADDR_MAX,
skb               232 net/netfilter/nft_nat.c 		if (nft_dump_register(skb, NFTA_NAT_REG_PROTO_MIN,
skb               234 net/netfilter/nft_nat.c 		    nft_dump_register(skb, NFTA_NAT_REG_PROTO_MAX,
skb               240 net/netfilter/nft_nat.c 		if (nla_put_be32(skb, NFTA_NAT_FLAGS, htonl(priv->flags)))
skb                76 net/netfilter/nft_numgen.c static int nft_ng_dump(struct sk_buff *skb, enum nft_registers dreg,
skb                79 net/netfilter/nft_numgen.c 	if (nft_dump_register(skb, NFTA_NG_DREG, dreg))
skb                81 net/netfilter/nft_numgen.c 	if (nla_put_be32(skb, NFTA_NG_MODULUS, htonl(modulus)))
skb                83 net/netfilter/nft_numgen.c 	if (nla_put_be32(skb, NFTA_NG_TYPE, htonl(type)))
skb                85 net/netfilter/nft_numgen.c 	if (nla_put_be32(skb, NFTA_NG_OFFSET, htonl(offset)))
skb                94 net/netfilter/nft_numgen.c static int nft_ng_inc_dump(struct sk_buff *skb, const struct nft_expr *expr)
skb                98 net/netfilter/nft_numgen.c 	return nft_ng_dump(skb, priv->dreg, priv->modulus, NFT_NG_INCREMENTAL,
skb               149 net/netfilter/nft_numgen.c static int nft_ng_random_dump(struct sk_buff *skb, const struct nft_expr *expr)
skb               153 net/netfilter/nft_numgen.c 	return nft_ng_dump(skb, priv->dreg, priv->modulus, NFT_NG_RANDOM,
skb                50 net/netfilter/nft_objref.c static int nft_objref_dump(struct sk_buff *skb, const struct nft_expr *expr)
skb                54 net/netfilter/nft_objref.c 	if (nla_put_string(skb, NFTA_OBJREF_IMM_NAME, obj->key.name) ||
skb                55 net/netfilter/nft_objref.c 	    nla_put_be32(skb, NFTA_OBJREF_IMM_TYPE,
skb               155 net/netfilter/nft_objref.c static int nft_objref_map_dump(struct sk_buff *skb, const struct nft_expr *expr)
skb               159 net/netfilter/nft_objref.c 	if (nft_dump_register(skb, NFTA_OBJREF_SET_SREG, priv->sreg) ||
skb               160 net/netfilter/nft_objref.c 	    nla_put_string(skb, NFTA_OBJREF_SET_NAME, priv->set->name))
skb                25 net/netfilter/nft_osf.c 	struct sk_buff *skb = pkt->skb;
skb                31 net/netfilter/nft_osf.c 	tcp = skb_header_pointer(skb, ip_hdrlen(skb),
skb                42 net/netfilter/nft_osf.c 	if (!nf_osf_find(skb, nf_osf_fingers, priv->ttl, &data)) {
skb                90 net/netfilter/nft_osf.c static int nft_osf_dump(struct sk_buff *skb, const struct nft_expr *expr)
skb                94 net/netfilter/nft_osf.c 	if (nla_put_u8(skb, NFTA_OSF_TTL, priv->ttl))
skb                97 net/netfilter/nft_osf.c 	if (nla_put_be32(skb, NFTA_OSF_FLAGS, ntohl(priv->flags)))
skb               100 net/netfilter/nft_osf.c 	if (nft_dump_register(skb, NFTA_OSF_DREG, priv->dreg))
skb                28 net/netfilter/nft_payload.c nft_payload_copy_vlan(u32 *d, const struct sk_buff *skb, u8 offset, u8 len)
skb                30 net/netfilter/nft_payload.c 	int mac_off = skb_mac_header(skb) - skb->data;
skb                38 net/netfilter/nft_payload.c 		if (skb_copy_bits(skb, mac_off, &veth, ETH_HLEN))
skb                41 net/netfilter/nft_payload.c 		veth.h_vlan_proto = skb->vlan_proto;
skb                56 net/netfilter/nft_payload.c 	veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb));
skb                57 net/netfilter/nft_payload.c 	veth.h_vlan_encapsulated_proto = skb->protocol;
skb                70 net/netfilter/nft_payload.c 	return skb_copy_bits(skb, offset + mac_off, dst_u8, len) == 0;
skb                78 net/netfilter/nft_payload.c 	const struct sk_buff *skb = pkt->skb;
skb                85 net/netfilter/nft_payload.c 		if (!skb_mac_header_was_set(skb))
skb                88 net/netfilter/nft_payload.c 		if (skb_vlan_tag_present(skb)) {
skb                89 net/netfilter/nft_payload.c 			if (!nft_payload_copy_vlan(dest, skb,
skb                94 net/netfilter/nft_payload.c 		offset = skb_mac_header(skb) - skb->data;
skb                97 net/netfilter/nft_payload.c 		offset = skb_network_offset(skb);
skb               109 net/netfilter/nft_payload.c 	if (skb_copy_bits(skb, offset, dest, priv->len) < 0)
skb               142 net/netfilter/nft_payload.c static int nft_payload_dump(struct sk_buff *skb, const struct nft_expr *expr)
skb               146 net/netfilter/nft_payload.c 	if (nft_dump_register(skb, NFTA_PAYLOAD_DREG, priv->dreg) ||
skb               147 net/netfilter/nft_payload.c 	    nla_put_be32(skb, NFTA_PAYLOAD_BASE, htonl(priv->base)) ||
skb               148 net/netfilter/nft_payload.c 	    nla_put_be32(skb, NFTA_PAYLOAD_OFFSET, htonl(priv->offset)) ||
skb               149 net/netfilter/nft_payload.c 	    nla_put_be32(skb, NFTA_PAYLOAD_LEN, htonl(priv->len)))
skb               402 net/netfilter/nft_payload.c static bool nft_payload_udp_checksum(struct sk_buff *skb, unsigned int thoff)
skb               406 net/netfilter/nft_payload.c 	uh = skb_header_pointer(skb, thoff, sizeof(_uh), &_uh);
skb               414 net/netfilter/nft_payload.c 				     struct sk_buff *skb,
skb               422 net/netfilter/nft_payload.c 		if (!nft_payload_udp_checksum(skb, pkt->xt.thoff))
skb               440 net/netfilter/nft_payload.c 				     struct sk_buff *skb,
skb               449 net/netfilter/nft_payload.c 	if (nft_payload_l4csum_offset(pkt, skb, &l4csum_offset) < 0)
skb               452 net/netfilter/nft_payload.c 	if (skb_copy_bits(skb, l4csum_offset, &sum, sizeof(sum)) < 0)
skb               458 net/netfilter/nft_payload.c 	if (skb->ip_summed != CHECKSUM_PARTIAL) {
skb               460 net/netfilter/nft_payload.c 		if (skb->ip_summed == CHECKSUM_COMPLETE) {
skb               461 net/netfilter/nft_payload.c 			skb->csum = ~csum_add(csum_sub(~(skb->csum), fsum),
skb               469 net/netfilter/nft_payload.c 	if (skb_ensure_writable(skb, l4csum_offset + sizeof(sum)) ||
skb               470 net/netfilter/nft_payload.c 	    skb_store_bits(skb, l4csum_offset, &sum, sizeof(sum)) < 0)
skb               476 net/netfilter/nft_payload.c static int nft_payload_csum_inet(struct sk_buff *skb, const u32 *src,
skb               481 net/netfilter/nft_payload.c 	if (skb_copy_bits(skb, csum_offset, &sum, sizeof(sum)) < 0)
skb               485 net/netfilter/nft_payload.c 	if (skb_ensure_writable(skb, csum_offset + sizeof(sum)) ||
skb               486 net/netfilter/nft_payload.c 	    skb_store_bits(skb, csum_offset, &sum, sizeof(sum)) < 0)
skb               497 net/netfilter/nft_payload.c 	struct sk_buff *skb = pkt->skb;
skb               504 net/netfilter/nft_payload.c 		if (!skb_mac_header_was_set(skb))
skb               506 net/netfilter/nft_payload.c 		offset = skb_mac_header(skb) - skb->data;
skb               509 net/netfilter/nft_payload.c 		offset = skb_network_offset(skb);
skb               525 net/netfilter/nft_payload.c 	     skb->ip_summed != CHECKSUM_PARTIAL)) {
skb               526 net/netfilter/nft_payload.c 		fsum = skb_checksum(skb, offset, priv->len, 0);
skb               530 net/netfilter/nft_payload.c 		    nft_payload_csum_inet(skb, src, fsum, tsum, csum_offset))
skb               534 net/netfilter/nft_payload.c 		    nft_payload_l4csum_update(pkt, skb, fsum, tsum) < 0)
skb               538 net/netfilter/nft_payload.c 	if (skb_ensure_writable(skb, max(offset + priv->len, 0)) ||
skb               539 net/netfilter/nft_payload.c 	    skb_store_bits(skb, offset, src, priv->len) < 0)
skb               585 net/netfilter/nft_payload.c static int nft_payload_set_dump(struct sk_buff *skb, const struct nft_expr *expr)
skb               589 net/netfilter/nft_payload.c 	if (nft_dump_register(skb, NFTA_PAYLOAD_SREG, priv->sreg) ||
skb               590 net/netfilter/nft_payload.c 	    nla_put_be32(skb, NFTA_PAYLOAD_BASE, htonl(priv->base)) ||
skb               591 net/netfilter/nft_payload.c 	    nla_put_be32(skb, NFTA_PAYLOAD_OFFSET, htonl(priv->offset)) ||
skb               592 net/netfilter/nft_payload.c 	    nla_put_be32(skb, NFTA_PAYLOAD_LEN, htonl(priv->len)) ||
skb               593 net/netfilter/nft_payload.c 	    nla_put_be32(skb, NFTA_PAYLOAD_CSUM_TYPE, htonl(priv->csum_type)) ||
skb               594 net/netfilter/nft_payload.c 	    nla_put_be32(skb, NFTA_PAYLOAD_CSUM_OFFSET,
skb               596 net/netfilter/nft_payload.c 	    nla_put_be32(skb, NFTA_PAYLOAD_CSUM_FLAGS, htonl(priv->csum_flags)))
skb                42 net/netfilter/nft_queue.c 			queue = nfqueue_hash(pkt->skb, queue,
skb               130 net/netfilter/nft_queue.c static int nft_queue_dump(struct sk_buff *skb, const struct nft_expr *expr)
skb               134 net/netfilter/nft_queue.c 	if (nla_put_be16(skb, NFTA_QUEUE_NUM, htons(priv->queuenum)) ||
skb               135 net/netfilter/nft_queue.c 	    nla_put_be16(skb, NFTA_QUEUE_TOTAL, htons(priv->queues_total)) ||
skb               136 net/netfilter/nft_queue.c 	    nla_put_be16(skb, NFTA_QUEUE_FLAGS, htons(priv->flags)))
skb               146 net/netfilter/nft_queue.c nft_queue_sreg_dump(struct sk_buff *skb, const struct nft_expr *expr)
skb               150 net/netfilter/nft_queue.c 	if (nft_dump_register(skb, NFTA_QUEUE_SREG_QNUM, priv->sreg_qnum) ||
skb               151 net/netfilter/nft_queue.c 	    nla_put_be16(skb, NFTA_QUEUE_FLAGS, htons(priv->flags)))
skb                22 net/netfilter/nft_quota.c 				 const struct sk_buff *skb)
skb                24 net/netfilter/nft_quota.c 	return atomic64_add_return(skb->len, &priv->consumed) >=
skb                37 net/netfilter/nft_quota.c 	if (nft_overquota(priv, pkt->skb) ^ nft_quota_invert(priv))
skb                56 net/netfilter/nft_quota.c 	overquota = nft_overquota(priv, pkt->skb);
skb               121 net/netfilter/nft_quota.c static int nft_quota_do_dump(struct sk_buff *skb, struct nft_quota *priv,
skb               140 net/netfilter/nft_quota.c 	if (nla_put_be64(skb, NFTA_QUOTA_BYTES, cpu_to_be64(quota),
skb               142 net/netfilter/nft_quota.c 	    nla_put_be64(skb, NFTA_QUOTA_CONSUMED, cpu_to_be64(consumed_cap),
skb               144 net/netfilter/nft_quota.c 	    nla_put_be32(skb, NFTA_QUOTA_FLAGS, htonl(flags)))
skb               157 net/netfilter/nft_quota.c static int nft_quota_obj_dump(struct sk_buff *skb, struct nft_object *obj,
skb               162 net/netfilter/nft_quota.c 	return nft_quota_do_dump(skb, priv, reset);
skb               201 net/netfilter/nft_quota.c static int nft_quota_dump(struct sk_buff *skb, const struct nft_expr *expr)
skb               205 net/netfilter/nft_quota.c 	return nft_quota_do_dump(skb, priv, false);
skb               117 net/netfilter/nft_range.c static int nft_range_dump(struct sk_buff *skb, const struct nft_expr *expr)
skb               121 net/netfilter/nft_range.c 	if (nft_dump_register(skb, NFTA_RANGE_SREG, priv->sreg))
skb               123 net/netfilter/nft_range.c 	if (nla_put_be32(skb, NFTA_RANGE_OP, htonl(priv->op)))
skb               126 net/netfilter/nft_range.c 	if (nft_data_dump(skb, NFTA_RANGE_FROM_DATA, &priv->data_from,
skb               128 net/netfilter/nft_range.c 	    nft_data_dump(skb, NFTA_RANGE_TO_DATA, &priv->data_to,
skb                82 net/netfilter/nft_redir.c static int nft_redir_dump(struct sk_buff *skb, const struct nft_expr *expr)
skb                87 net/netfilter/nft_redir.c 		if (nft_dump_register(skb, NFTA_REDIR_REG_PROTO_MIN,
skb                90 net/netfilter/nft_redir.c 		if (nft_dump_register(skb, NFTA_REDIR_REG_PROTO_MAX,
skb                96 net/netfilter/nft_redir.c 	    nla_put_be32(skb, NFTA_REDIR_FLAGS, htonl(priv->flags)))
skb               123 net/netfilter/nft_redir.c 	regs->verdict.code = nf_nat_redirect_ipv4(pkt->skb, &mr, nft_hook(pkt));
skb               172 net/netfilter/nft_redir.c 		nf_nat_redirect_ipv6(pkt->skb, &range, nft_hook(pkt));
skb                62 net/netfilter/nft_reject.c int nft_reject_dump(struct sk_buff *skb, const struct nft_expr *expr)
skb                66 net/netfilter/nft_reject.c 	if (nla_put_be32(skb, NFTA_REJECT_TYPE, htonl(priv->type)))
skb                71 net/netfilter/nft_reject.c 		if (nla_put_u8(skb, NFTA_REJECT_ICMP_CODE, priv->icmp_code))
skb                27 net/netfilter/nft_reject_inet.c 			nf_send_unreach(pkt->skb, priv->icmp_code,
skb                31 net/netfilter/nft_reject_inet.c 			nf_send_reset(nft_net(pkt), pkt->skb, nft_hook(pkt));
skb                34 net/netfilter/nft_reject_inet.c 			nf_send_unreach(pkt->skb,
skb                43 net/netfilter/nft_reject_inet.c 			nf_send_unreach6(nft_net(pkt), pkt->skb,
skb                47 net/netfilter/nft_reject_inet.c 			nf_send_reset6(nft_net(pkt), pkt->skb, nft_hook(pkt));
skb                50 net/netfilter/nft_reject_inet.c 			nf_send_unreach6(nft_net(pkt), pkt->skb,
skb                93 net/netfilter/nft_reject_inet.c static int nft_reject_inet_dump(struct sk_buff *skb,
skb                98 net/netfilter/nft_reject_inet.c 	if (nla_put_be32(skb, NFTA_REJECT_TYPE, htonl(priv->type)))
skb               104 net/netfilter/nft_reject_inet.c 		if (nla_put_u8(skb, NFTA_REJECT_ICMP_CODE, priv->icmp_code))
skb                24 net/netfilter/nft_rt.c 	const struct sk_buff *skb = pkt->skb;
skb                32 net/netfilter/nft_rt.c 		fl.u.ip4.daddr = ip_hdr(skb)->saddr;
skb                36 net/netfilter/nft_rt.c 		fl.u.ip6.daddr = ipv6_hdr(skb)->saddr;
skb                58 net/netfilter/nft_rt.c 	const struct sk_buff *skb = pkt->skb;
skb                62 net/netfilter/nft_rt.c 	dst = skb_dst(skb);
skb                77 net/netfilter/nft_rt.c 						ip_hdr(skb)->daddr);
skb                84 net/netfilter/nft_rt.c 					 &ipv6_hdr(skb)->daddr),
skb               149 net/netfilter/nft_rt.c static int nft_rt_get_dump(struct sk_buff *skb,
skb               154 net/netfilter/nft_rt.c 	if (nla_put_be32(skb, NFTA_RT_KEY, htonl(priv->key)))
skb               156 net/netfilter/nft_rt.c 	if (nft_dump_register(skb, NFTA_RT_DREG, priv->dreg))
skb                22 net/netfilter/nft_socket.c 	struct sk_buff *skb = pkt->skb;
skb                23 net/netfilter/nft_socket.c 	struct sock *sk = skb->sk;
skb                32 net/netfilter/nft_socket.c 			sk = nf_sk_lookup_slow_v4(nft_net(pkt), skb, nft_in(pkt));
skb                36 net/netfilter/nft_socket.c 			sk = nf_sk_lookup_slow_v6(nft_net(pkt), skb, nft_in(pkt));
skb                67 net/netfilter/nft_socket.c 	if (sk != skb->sk)
skb               114 net/netfilter/nft_socket.c static int nft_socket_dump(struct sk_buff *skb,
skb               119 net/netfilter/nft_socket.c 	if (nla_put_u32(skb, NFTA_SOCKET_KEY, htonl(priv->key)))
skb               121 net/netfilter/nft_socket.c 	if (nft_dump_register(skb, NFTA_SOCKET_DREG, priv->dreg))
skb                54 net/netfilter/nft_synproxy.c 	struct sk_buff *skb = pkt->skb;
skb                59 net/netfilter/nft_synproxy.c 		synproxy_send_client_synack(net, skb, tcp, opts);
skb                60 net/netfilter/nft_synproxy.c 		consume_skb(skb);
skb                64 net/netfilter/nft_synproxy.c 		if (synproxy_recv_client_ack(net, skb, tcp, opts,
skb                66 net/netfilter/nft_synproxy.c 			consume_skb(skb);
skb                85 net/netfilter/nft_synproxy.c 	struct sk_buff *skb = pkt->skb;
skb                90 net/netfilter/nft_synproxy.c 		synproxy_send_client_synack_ipv6(net, skb, tcp, opts);
skb                91 net/netfilter/nft_synproxy.c 		consume_skb(skb);
skb                95 net/netfilter/nft_synproxy.c 		if (synproxy_recv_client_ack_ipv6(net, skb, tcp, opts,
skb                97 net/netfilter/nft_synproxy.c 			consume_skb(skb);
skb               111 net/netfilter/nft_synproxy.c 	struct sk_buff *skb = pkt->skb;
skb               121 net/netfilter/nft_synproxy.c 	if (nf_ip_checksum(skb, nft_hook(pkt), thoff, IPPROTO_TCP)) {
skb               126 net/netfilter/nft_synproxy.c 	tcp = skb_header_pointer(skb, pkt->xt.thoff,
skb               134 net/netfilter/nft_synproxy.c 	if (!synproxy_parse_options(skb, thoff, tcp, &opts)) {
skb               139 net/netfilter/nft_synproxy.c 	switch (skb->protocol) {
skb               228 net/netfilter/nft_synproxy.c static int nft_synproxy_do_dump(struct sk_buff *skb, struct nft_synproxy *priv)
skb               230 net/netfilter/nft_synproxy.c 	if (nla_put_be16(skb, NFTA_SYNPROXY_MSS, htons(priv->info.mss)) ||
skb               231 net/netfilter/nft_synproxy.c 	    nla_put_u8(skb, NFTA_SYNPROXY_WSCALE, priv->info.wscale) ||
skb               232 net/netfilter/nft_synproxy.c 	    nla_put_be32(skb, NFTA_SYNPROXY_FLAGS, htonl(priv->info.options)))
skb               273 net/netfilter/nft_synproxy.c static int nft_synproxy_dump(struct sk_buff *skb, const struct nft_expr *expr)
skb               277 net/netfilter/nft_synproxy.c 	return nft_synproxy_do_dump(skb, priv);
skb               314 net/netfilter/nft_synproxy.c static int nft_synproxy_obj_dump(struct sk_buff *skb,
skb               319 net/netfilter/nft_synproxy.c 	return nft_synproxy_do_dump(skb, priv);
skb                26 net/netfilter/nft_tproxy.c 	struct sk_buff *skb = pkt->skb;
skb                27 net/netfilter/nft_tproxy.c 	const struct iphdr *iph = ip_hdr(skb);
skb                33 net/netfilter/nft_tproxy.c 	hp = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_hdr), &_hdr);
skb                43 net/netfilter/nft_tproxy.c 	sk = nf_tproxy_get_sock_v4(nft_net(pkt), skb, iph->protocol,
skb                46 net/netfilter/nft_tproxy.c 				   skb->dev, NF_TPROXY_LOOKUP_ESTABLISHED);
skb                50 net/netfilter/nft_tproxy.c 	taddr = nf_tproxy_laddr4(skb, taddr, iph->daddr);
skb                60 net/netfilter/nft_tproxy.c 		sk = nf_tproxy_handle_time_wait4(nft_net(pkt), skb, taddr, tport, sk);
skb                65 net/netfilter/nft_tproxy.c 		sk = nf_tproxy_get_sock_v4(nft_net(pkt), skb, iph->protocol,
skb                68 net/netfilter/nft_tproxy.c 					   skb->dev, NF_TPROXY_LOOKUP_LISTENER);
skb                72 net/netfilter/nft_tproxy.c 		nf_tproxy_assign_sock(skb, sk);
skb                83 net/netfilter/nft_tproxy.c 	struct sk_buff *skb = pkt->skb;
skb                84 net/netfilter/nft_tproxy.c 	const struct ipv6hdr *iph = ipv6_hdr(skb);
skb               100 net/netfilter/nft_tproxy.c 	hp = skb_header_pointer(skb, thoff, sizeof(_hdr), &_hdr);
skb               110 net/netfilter/nft_tproxy.c 	sk = nf_tproxy_get_sock_v6(nft_net(pkt), skb, thoff, l4proto,
skb               117 net/netfilter/nft_tproxy.c 	taddr = *nf_tproxy_laddr6(skb, &taddr, &iph->daddr);
skb               127 net/netfilter/nft_tproxy.c 		sk = nf_tproxy_handle_time_wait6(skb, l4proto, thoff,
skb               136 net/netfilter/nft_tproxy.c 		sk = nf_tproxy_get_sock_v6(nft_net(pkt), skb, thoff,
skb               144 net/netfilter/nft_tproxy.c 		nf_tproxy_assign_sock(skb, sk);
skb               266 net/netfilter/nft_tproxy.c static int nft_tproxy_dump(struct sk_buff *skb,
skb               271 net/netfilter/nft_tproxy.c 	if (nla_put_be32(skb, NFTA_TPROXY_FAMILY, htonl(priv->family)))
skb               275 net/netfilter/nft_tproxy.c 	    nft_dump_register(skb, NFTA_TPROXY_REG_ADDR, priv->sreg_addr))
skb               279 net/netfilter/nft_tproxy.c 	    nft_dump_register(skb, NFTA_TPROXY_REG_PORT, priv->sreg_port))
skb                29 net/netfilter/nft_tunnel.c 	tun_info = skb_tunnel_info(pkt->skb);
skb               109 net/netfilter/nft_tunnel.c static int nft_tunnel_get_dump(struct sk_buff *skb,
skb               114 net/netfilter/nft_tunnel.c 	if (nla_put_be32(skb, NFTA_TUNNEL_KEY, htonl(priv->key)))
skb               116 net/netfilter/nft_tunnel.c 	if (nft_dump_register(skb, NFTA_TUNNEL_DREG, priv->dreg))
skb               118 net/netfilter/nft_tunnel.c 	if (nla_put_be32(skb, NFTA_TUNNEL_MODE, htonl(priv->mode)))
skb               435 net/netfilter/nft_tunnel.c 	struct sk_buff *skb = pkt->skb;
skb               437 net/netfilter/nft_tunnel.c 	skb_dst_drop(skb);
skb               439 net/netfilter/nft_tunnel.c 	skb_dst_set(skb, (struct dst_entry *) priv->md);
skb               442 net/netfilter/nft_tunnel.c static int nft_tunnel_ip_dump(struct sk_buff *skb, struct ip_tunnel_info *info)
skb               447 net/netfilter/nft_tunnel.c 		nest = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_IP6);
skb               451 net/netfilter/nft_tunnel.c 		if (nla_put_in6_addr(skb, NFTA_TUNNEL_KEY_IP6_SRC, &info->key.u.ipv6.src) < 0 ||
skb               452 net/netfilter/nft_tunnel.c 		    nla_put_in6_addr(skb, NFTA_TUNNEL_KEY_IP6_DST, &info->key.u.ipv6.dst) < 0 ||
skb               453 net/netfilter/nft_tunnel.c 		    nla_put_be32(skb, NFTA_TUNNEL_KEY_IP6_FLOWLABEL, info->key.label))
skb               456 net/netfilter/nft_tunnel.c 		nla_nest_end(skb, nest);
skb               458 net/netfilter/nft_tunnel.c 		nest = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_IP);
skb               462 net/netfilter/nft_tunnel.c 		if (nla_put_in_addr(skb, NFTA_TUNNEL_KEY_IP_SRC, info->key.u.ipv4.src) < 0 ||
skb               463 net/netfilter/nft_tunnel.c 		    nla_put_in_addr(skb, NFTA_TUNNEL_KEY_IP_DST, info->key.u.ipv4.dst) < 0)
skb               466 net/netfilter/nft_tunnel.c 		nla_nest_end(skb, nest);
skb               472 net/netfilter/nft_tunnel.c static int nft_tunnel_opts_dump(struct sk_buff *skb,
skb               478 net/netfilter/nft_tunnel.c 	nest = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_OPTS);
skb               483 net/netfilter/nft_tunnel.c 		if (nla_put_be32(skb, NFTA_TUNNEL_KEY_VXLAN_GBP,
skb               489 net/netfilter/nft_tunnel.c 			if (nla_put_be32(skb, NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX,
skb               494 net/netfilter/nft_tunnel.c 			if (nla_put_u8(skb, NFTA_TUNNEL_KEY_ERSPAN_V2_HWID,
skb               496 net/netfilter/nft_tunnel.c 			    nla_put_u8(skb, NFTA_TUNNEL_KEY_ERSPAN_V2_DIR,
skb               502 net/netfilter/nft_tunnel.c 	nla_nest_end(skb, nest);
skb               507 net/netfilter/nft_tunnel.c static int nft_tunnel_ports_dump(struct sk_buff *skb,
skb               510 net/netfilter/nft_tunnel.c 	if (nla_put_be16(skb, NFTA_TUNNEL_KEY_SPORT, info->key.tp_src) < 0 ||
skb               511 net/netfilter/nft_tunnel.c 	    nla_put_be16(skb, NFTA_TUNNEL_KEY_DPORT, info->key.tp_dst) < 0)
skb               517 net/netfilter/nft_tunnel.c static int nft_tunnel_flags_dump(struct sk_buff *skb,
skb               529 net/netfilter/nft_tunnel.c 	if (nla_put_be32(skb, NFTA_TUNNEL_KEY_FLAGS, htonl(flags)) < 0)
skb               535 net/netfilter/nft_tunnel.c static int nft_tunnel_obj_dump(struct sk_buff *skb,
skb               541 net/netfilter/nft_tunnel.c 	if (nla_put_be32(skb, NFTA_TUNNEL_KEY_ID,
skb               543 net/netfilter/nft_tunnel.c 	    nft_tunnel_ip_dump(skb, info) < 0 ||
skb               544 net/netfilter/nft_tunnel.c 	    nft_tunnel_ports_dump(skb, info) < 0 ||
skb               545 net/netfilter/nft_tunnel.c 	    nft_tunnel_flags_dump(skb, info) < 0 ||
skb               546 net/netfilter/nft_tunnel.c 	    nla_put_u8(skb, NFTA_TUNNEL_KEY_TOS, info->key.tos) ||
skb               547 net/netfilter/nft_tunnel.c 	    nla_put_u8(skb, NFTA_TUNNEL_KEY_TTL, info->key.ttl) ||
skb               548 net/netfilter/nft_tunnel.c 	    nft_tunnel_opts_dump(skb, priv) < 0)
skb               162 net/netfilter/nft_xfrm.c 	const struct sec_path *sp = skb_sec_path(pkt->skb);
skb               178 net/netfilter/nft_xfrm.c 	const struct dst_entry *dst = skb_dst(pkt->skb);
skb               213 net/netfilter/nft_xfrm.c static int nft_xfrm_get_dump(struct sk_buff *skb,
skb               218 net/netfilter/nft_xfrm.c 	if (nft_dump_register(skb, NFTA_XFRM_DREG, priv->dreg))
skb               221 net/netfilter/nft_xfrm.c 	if (nla_put_be32(skb, NFTA_XFRM_KEY, htonl(priv->key)))
skb               223 net/netfilter/nft_xfrm.c 	if (nla_put_u8(skb, NFTA_XFRM_DIR, priv->dir))
skb               225 net/netfilter/nft_xfrm.c 	if (nla_put_be32(skb, NFTA_XFRM_SPNUM, htonl(priv->spnum)))
skb                10 net/netfilter/utils.c __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook,
skb                13 net/netfilter/utils.c 	const struct iphdr *iph = ip_hdr(skb);
skb                16 net/netfilter/utils.c 	switch (skb->ip_summed) {
skb                21 net/netfilter/utils.c 		    !csum_fold(skb->csum)) ||
skb                23 net/netfilter/utils.c 				       skb->len - dataoff, protocol,
skb                24 net/netfilter/utils.c 				       skb->csum)) {
skb                25 net/netfilter/utils.c 			skb->ip_summed = CHECKSUM_UNNECESSARY;
skb                31 net/netfilter/utils.c 			skb->csum = 0;
skb                33 net/netfilter/utils.c 			skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
skb                34 net/netfilter/utils.c 						       skb->len - dataoff,
skb                36 net/netfilter/utils.c 		csum = __skb_checksum_complete(skb);
skb                43 net/netfilter/utils.c static __sum16 nf_ip_checksum_partial(struct sk_buff *skb, unsigned int hook,
skb                47 net/netfilter/utils.c 	const struct iphdr *iph = ip_hdr(skb);
skb                50 net/netfilter/utils.c 	switch (skb->ip_summed) {
skb                52 net/netfilter/utils.c 		if (len == skb->len - dataoff)
skb                53 net/netfilter/utils.c 			return nf_ip_checksum(skb, hook, dataoff, protocol);
skb                56 net/netfilter/utils.c 		skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr, protocol,
skb                57 net/netfilter/utils.c 					       skb->len - dataoff, 0);
skb                58 net/netfilter/utils.c 		skb->ip_summed = CHECKSUM_NONE;
skb                59 net/netfilter/utils.c 		return __skb_checksum_complete_head(skb, dataoff + len);
skb                64 net/netfilter/utils.c __sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook,
skb                67 net/netfilter/utils.c 	const struct ipv6hdr *ip6h = ipv6_hdr(skb);
skb                70 net/netfilter/utils.c 	switch (skb->ip_summed) {
skb                75 net/netfilter/utils.c 				     skb->len - dataoff, protocol,
skb                76 net/netfilter/utils.c 				     csum_sub(skb->csum,
skb                77 net/netfilter/utils.c 					      skb_checksum(skb, 0,
skb                79 net/netfilter/utils.c 			skb->ip_summed = CHECKSUM_UNNECESSARY;
skb                84 net/netfilter/utils.c 		skb->csum = ~csum_unfold(
skb                86 net/netfilter/utils.c 					     skb->len - dataoff,
skb                89 net/netfilter/utils.c 						      skb_checksum(skb, 0,
skb                91 net/netfilter/utils.c 		csum = __skb_checksum_complete(skb);
skb                97 net/netfilter/utils.c static __sum16 nf_ip6_checksum_partial(struct sk_buff *skb, unsigned int hook,
skb               101 net/netfilter/utils.c 	const struct ipv6hdr *ip6h = ipv6_hdr(skb);
skb               105 net/netfilter/utils.c 	switch (skb->ip_summed) {
skb               107 net/netfilter/utils.c 		if (len == skb->len - dataoff)
skb               108 net/netfilter/utils.c 			return nf_ip6_checksum(skb, hook, dataoff, protocol);
skb               111 net/netfilter/utils.c 		hsum = skb_checksum(skb, 0, dataoff, 0);
skb               112 net/netfilter/utils.c 		skb->csum = ~csum_unfold(csum_ipv6_magic(&ip6h->saddr,
skb               114 net/netfilter/utils.c 							 skb->len - dataoff,
skb               117 net/netfilter/utils.c 		skb->ip_summed = CHECKSUM_NONE;
skb               118 net/netfilter/utils.c 		return __skb_checksum_complete_head(skb, dataoff + len);
skb               123 net/netfilter/utils.c __sum16 nf_checksum(struct sk_buff *skb, unsigned int hook,
skb               131 net/netfilter/utils.c 		csum = nf_ip_checksum(skb, hook, dataoff, protocol);
skb               134 net/netfilter/utils.c 		csum = nf_ip6_checksum(skb, hook, dataoff, protocol);
skb               142 net/netfilter/utils.c __sum16 nf_checksum_partial(struct sk_buff *skb, unsigned int hook,
skb               150 net/netfilter/utils.c 		csum = nf_ip_checksum_partial(skb, hook, dataoff, len,
skb               154 net/netfilter/utils.c 		csum = nf_ip6_checksum_partial(skb, hook, dataoff, len,
skb               182 net/netfilter/utils.c static int nf_ip_reroute(struct sk_buff *skb, const struct nf_queue_entry *entry)
skb               188 net/netfilter/utils.c 		const struct iphdr *iph = ip_hdr(skb);
skb               191 net/netfilter/utils.c 		      skb->mark == rt_info->mark &&
skb               194 net/netfilter/utils.c 			return ip_route_me_harder(entry->state.net, skb,
skb               201 net/netfilter/utils.c int nf_reroute(struct sk_buff *skb, struct nf_queue_entry *entry)
skb               208 net/netfilter/utils.c 		ret = nf_ip_reroute(skb, entry);
skb               213 net/netfilter/utils.c 			ret = v6ops->reroute(skb, entry);
skb                31 net/netfilter/xt_AUDIT.c static bool audit_ip4(struct audit_buffer *ab, struct sk_buff *skb)
skb                36 net/netfilter/xt_AUDIT.c 	ih = skb_header_pointer(skb, skb_network_offset(skb), sizeof(_iph), &_iph);
skb                46 net/netfilter/xt_AUDIT.c static bool audit_ip6(struct audit_buffer *ab, struct sk_buff *skb)
skb                53 net/netfilter/xt_AUDIT.c 	ih = skb_header_pointer(skb, skb_network_offset(skb), sizeof(_ip6h), &_ip6h);
skb                58 net/netfilter/xt_AUDIT.c 	ipv6_skip_exthdr(skb, skb_network_offset(skb) + sizeof(_ip6h), &nexthdr, &frag_off);
skb                67 net/netfilter/xt_AUDIT.c audit_tg(struct sk_buff *skb, const struct xt_action_param *par)
skb                78 net/netfilter/xt_AUDIT.c 	audit_log_format(ab, "mark=%#x", skb->mark);
skb                82 net/netfilter/xt_AUDIT.c 		switch (eth_hdr(skb)->h_proto) {
skb                84 net/netfilter/xt_AUDIT.c 			fam = audit_ip4(ab, skb) ? NFPROTO_IPV4 : -1;
skb                87 net/netfilter/xt_AUDIT.c 			fam = audit_ip6(ab, skb) ? NFPROTO_IPV6 : -1;
skb                92 net/netfilter/xt_AUDIT.c 		fam = audit_ip4(ab, skb) ? NFPROTO_IPV4 : -1;
skb                95 net/netfilter/xt_AUDIT.c 		fam = audit_ip6(ab, skb) ? NFPROTO_IPV6 : -1;
skb               109 net/netfilter/xt_AUDIT.c audit_tg_ebt(struct sk_buff *skb, const struct xt_action_param *par)
skb               111 net/netfilter/xt_AUDIT.c 	audit_tg(skb, par);
skb                26 net/netfilter/xt_CHECKSUM.c checksum_tg(struct sk_buff *skb, const struct xt_action_param *par)
skb                28 net/netfilter/xt_CHECKSUM.c 	if (skb->ip_summed == CHECKSUM_PARTIAL && !skb_is_gso(skb))
skb                29 net/netfilter/xt_CHECKSUM.c 		skb_checksum_help(skb);
skb                29 net/netfilter/xt_CLASSIFY.c classify_tg(struct sk_buff *skb, const struct xt_action_param *par)
skb                33 net/netfilter/xt_CLASSIFY.c 	skb->priority = clinfo->priority;
skb                32 net/netfilter/xt_CONNSECMARK.c static void secmark_save(const struct sk_buff *skb)
skb                34 net/netfilter/xt_CONNSECMARK.c 	if (skb->secmark) {
skb                38 net/netfilter/xt_CONNSECMARK.c 		ct = nf_ct_get(skb, &ctinfo);
skb                40 net/netfilter/xt_CONNSECMARK.c 			ct->secmark = skb->secmark;
skb                50 net/netfilter/xt_CONNSECMARK.c static void secmark_restore(struct sk_buff *skb)
skb                52 net/netfilter/xt_CONNSECMARK.c 	if (!skb->secmark) {
skb                56 net/netfilter/xt_CONNSECMARK.c 		ct = nf_ct_get(skb, &ctinfo);
skb                58 net/netfilter/xt_CONNSECMARK.c 			skb->secmark = ct->secmark;
skb                63 net/netfilter/xt_CONNSECMARK.c connsecmark_tg(struct sk_buff *skb, const struct xt_action_param *par)
skb                69 net/netfilter/xt_CONNSECMARK.c 		secmark_save(skb);
skb                73 net/netfilter/xt_CONNSECMARK.c 		secmark_restore(skb);
skb                20 net/netfilter/xt_CT.c static inline int xt_ct_target(struct sk_buff *skb, struct nf_conn *ct)
skb                23 net/netfilter/xt_CT.c 	if (skb->_nfct != 0)
skb                28 net/netfilter/xt_CT.c 		nf_ct_set(skb, ct, IP_CT_NEW);
skb                30 net/netfilter/xt_CT.c 		nf_ct_set(skb, ct, IP_CT_UNTRACKED);
skb                36 net/netfilter/xt_CT.c static unsigned int xt_ct_target_v0(struct sk_buff *skb,
skb                42 net/netfilter/xt_CT.c 	return xt_ct_target(skb, ct);
skb                45 net/netfilter/xt_CT.c static unsigned int xt_ct_target_v1(struct sk_buff *skb,
skb                51 net/netfilter/xt_CT.c 	return xt_ct_target(skb, ct);
skb               344 net/netfilter/xt_CT.c notrack_tg(struct sk_buff *skb, const struct xt_action_param *par)
skb               347 net/netfilter/xt_CT.c 	if (skb->_nfct != 0)
skb               350 net/netfilter/xt_CT.c 	nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
skb                28 net/netfilter/xt_DSCP.c dscp_tg(struct sk_buff *skb, const struct xt_action_param *par)
skb                31 net/netfilter/xt_DSCP.c 	u_int8_t dscp = ipv4_get_dsfield(ip_hdr(skb)) >> XT_DSCP_SHIFT;
skb                34 net/netfilter/xt_DSCP.c 		if (skb_ensure_writable(skb, sizeof(struct iphdr)))
skb                37 net/netfilter/xt_DSCP.c 		ipv4_change_dsfield(ip_hdr(skb),
skb                46 net/netfilter/xt_DSCP.c dscp_tg6(struct sk_buff *skb, const struct xt_action_param *par)
skb                49 net/netfilter/xt_DSCP.c 	u_int8_t dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> XT_DSCP_SHIFT;
skb                52 net/netfilter/xt_DSCP.c 		if (skb_ensure_writable(skb, sizeof(struct ipv6hdr)))
skb                55 net/netfilter/xt_DSCP.c 		ipv6_change_dsfield(ipv6_hdr(skb),
skb                72 net/netfilter/xt_DSCP.c tos_tg(struct sk_buff *skb, const struct xt_action_param *par)
skb                75 net/netfilter/xt_DSCP.c 	struct iphdr *iph = ip_hdr(skb);
skb                82 net/netfilter/xt_DSCP.c 		if (skb_ensure_writable(skb, sizeof(struct iphdr)))
skb                84 net/netfilter/xt_DSCP.c 		iph = ip_hdr(skb);
skb                92 net/netfilter/xt_DSCP.c tos_tg6(struct sk_buff *skb, const struct xt_action_param *par)
skb                95 net/netfilter/xt_DSCP.c 	struct ipv6hdr *iph = ipv6_hdr(skb);
skb               102 net/netfilter/xt_DSCP.c 		if (skb_ensure_writable(skb, sizeof(struct iphdr)))
skb               104 net/netfilter/xt_DSCP.c 		iph = ipv6_hdr(skb);
skb                26 net/netfilter/xt_HL.c ttl_tg(struct sk_buff *skb, const struct xt_action_param *par)
skb                32 net/netfilter/xt_HL.c 	if (skb_ensure_writable(skb, sizeof(*iph)))
skb                35 net/netfilter/xt_HL.c 	iph = ip_hdr(skb);
skb                66 net/netfilter/xt_HL.c hl_tg6(struct sk_buff *skb, const struct xt_action_param *par)
skb                72 net/netfilter/xt_HL.c 	if (skb_ensure_writable(skb, sizeof(*ip6h)))
skb                75 net/netfilter/xt_HL.c 	ip6h = ipv6_hdr(skb);
skb                77 net/netfilter/xt_HMARK.c hmark_ct_set_htuple(const struct sk_buff *skb, struct hmark_tuple *t,
skb                82 net/netfilter/xt_HMARK.c 	struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
skb               132 net/netfilter/xt_HMARK.c hmark_set_tuple_ports(const struct sk_buff *skb, unsigned int nhoff,
skb               142 net/netfilter/xt_HMARK.c 	if (skb_copy_bits(skb, nhoff, &t->uports, sizeof(t->uports)) < 0)
skb               149 net/netfilter/xt_HMARK.c static int get_inner6_hdr(const struct sk_buff *skb, int *offset)
skb               153 net/netfilter/xt_HMARK.c 	icmp6h = skb_header_pointer(skb, *offset, sizeof(_ih6), &_ih6);
skb               165 net/netfilter/xt_HMARK.c hmark_pkt_set_htuple_ipv6(const struct sk_buff *skb, struct hmark_tuple *t,
skb               174 net/netfilter/xt_HMARK.c 	ip6 = (struct ipv6hdr *) (skb->data + skb_network_offset(skb));
skb               175 net/netfilter/xt_HMARK.c 	nexthdr = ipv6_find_hdr(skb, &nhoff, -1, &fragoff, &flag);
skb               182 net/netfilter/xt_HMARK.c 	if (get_inner6_hdr(skb, &nhoff)) {
skb               183 net/netfilter/xt_HMARK.c 		ip6 = skb_header_pointer(skb, nhoff, sizeof(_ip6), &_ip6);
skb               188 net/netfilter/xt_HMARK.c 		nexthdr = ipv6_find_hdr(skb, &nhoff, -1, &fragoff, &flag);
skb               206 net/netfilter/xt_HMARK.c 	hmark_set_tuple_ports(skb, nhoff, t, info);
skb               211 net/netfilter/xt_HMARK.c hmark_tg_v6(struct sk_buff *skb, const struct xt_action_param *par)
skb               219 net/netfilter/xt_HMARK.c 		if (hmark_ct_set_htuple(skb, &t, info) < 0)
skb               222 net/netfilter/xt_HMARK.c 		if (hmark_pkt_set_htuple_ipv6(skb, &t, info) < 0)
skb               226 net/netfilter/xt_HMARK.c 	skb->mark = hmark_hash(&t, info);
skb               231 net/netfilter/xt_HMARK.c static int get_inner_hdr(const struct sk_buff *skb, int iphsz, int *nhoff)
skb               237 net/netfilter/xt_HMARK.c 	icmph = skb_header_pointer(skb, *nhoff + iphsz, sizeof(_ih), &_ih);
skb               254 net/netfilter/xt_HMARK.c hmark_pkt_set_htuple_ipv4(const struct sk_buff *skb, struct hmark_tuple *t,
skb               258 net/netfilter/xt_HMARK.c 	int nhoff = skb_network_offset(skb);
skb               260 net/netfilter/xt_HMARK.c 	ip = (struct iphdr *) (skb->data + nhoff);
skb               263 net/netfilter/xt_HMARK.c 		if (get_inner_hdr(skb, ip->ihl * 4, &nhoff)) {
skb               264 net/netfilter/xt_HMARK.c 			ip = skb_header_pointer(skb, nhoff, sizeof(_ip), &_ip);
skb               286 net/netfilter/xt_HMARK.c 	hmark_set_tuple_ports(skb, (ip->ihl * 4) + nhoff, t, info);
skb               292 net/netfilter/xt_HMARK.c hmark_tg_v4(struct sk_buff *skb, const struct xt_action_param *par)
skb               300 net/netfilter/xt_HMARK.c 		if (hmark_ct_set_htuple(skb, &t, info) < 0)
skb               303 net/netfilter/xt_HMARK.c 		if (hmark_pkt_set_htuple_ipv4(skb, &t, info) < 0)
skb               307 net/netfilter/xt_HMARK.c 	skb->mark = hmark_hash(&t, info);
skb               166 net/netfilter/xt_IDLETIMER.c static unsigned int idletimer_tg_target(struct sk_buff *skb,
skb                42 net/netfilter/xt_LED.c led_tg(struct sk_buff *skb, const struct xt_action_param *par)
skb                29 net/netfilter/xt_LOG.c log_tg(struct sk_buff *skb, const struct xt_action_param *par)
skb                39 net/netfilter/xt_LOG.c 	nf_log_packet(net, xt_family(par), xt_hooknum(par), skb, xt_in(par),
skb                35 net/netfilter/xt_MASQUERADE.c masquerade_tg(struct sk_buff *skb, const struct xt_action_param *par)
skb                45 net/netfilter/xt_MASQUERADE.c 	return nf_nat_masquerade_ipv4(skb, xt_hooknum(par), &range,
skb                56 net/netfilter/xt_MASQUERADE.c masquerade_tg6(struct sk_buff *skb, const struct xt_action_param *par)
skb                58 net/netfilter/xt_MASQUERADE.c 	return nf_nat_masquerade_ipv6(skb, par->targinfo, xt_out(par));
skb                19 net/netfilter/xt_NETMAP.c netmap_tg6(struct sk_buff *skb, const struct xt_action_param *par)
skb                28 net/netfilter/xt_NETMAP.c 	ct = nf_ct_get(skb, &ctinfo);
skb                35 net/netfilter/xt_NETMAP.c 		new_addr.in6 = ipv6_hdr(skb)->daddr;
skb                37 net/netfilter/xt_NETMAP.c 		new_addr.in6 = ipv6_hdr(skb)->saddr;
skb                69 net/netfilter/xt_NETMAP.c netmap_tg4(struct sk_buff *skb, const struct xt_action_param *par)
skb                81 net/netfilter/xt_NETMAP.c 	ct = nf_ct_get(skb, &ctinfo);
skb                87 net/netfilter/xt_NETMAP.c 		new_ip = ip_hdr(skb)->daddr & ~netmask;
skb                89 net/netfilter/xt_NETMAP.c 		new_ip = ip_hdr(skb)->saddr & ~netmask;
skb                21 net/netfilter/xt_NFLOG.c nflog_tg(struct sk_buff *skb, const struct xt_action_param *par)
skb                36 net/netfilter/xt_NFLOG.c 	nf_log_packet(net, xt_family(par), xt_hooknum(par), skb, xt_in(par),
skb                29 net/netfilter/xt_NFQUEUE.c nfqueue_tg(struct sk_buff *skb, const struct xt_action_param *par)
skb                37 net/netfilter/xt_NFQUEUE.c nfqueue_tg_v1(struct sk_buff *skb, const struct xt_action_param *par)
skb                43 net/netfilter/xt_NFQUEUE.c 		queue = nfqueue_hash(skb, queue, info->queues_total,
skb                50 net/netfilter/xt_NFQUEUE.c nfqueue_tg_v2(struct sk_buff *skb, const struct xt_action_param *par)
skb                53 net/netfilter/xt_NFQUEUE.c 	unsigned int ret = nfqueue_tg_v1(skb, par);
skb                86 net/netfilter/xt_NFQUEUE.c nfqueue_tg_v3(struct sk_buff *skb, const struct xt_action_param *par)
skb                98 net/netfilter/xt_NFQUEUE.c 			queue = nfqueue_hash(skb, queue, info->queues_total,
skb                94 net/netfilter/xt_RATEEST.c xt_rateest_tg(struct sk_buff *skb, const struct xt_action_param *par)
skb               100 net/netfilter/xt_RATEEST.c 	stats->bytes += skb->len;
skb                29 net/netfilter/xt_REDIRECT.c redirect_tg6(struct sk_buff *skb, const struct xt_action_param *par)
skb                31 net/netfilter/xt_REDIRECT.c 	return nf_nat_redirect_ipv6(skb, par->targinfo, xt_hooknum(par));
skb                66 net/netfilter/xt_REDIRECT.c redirect_tg4(struct sk_buff *skb, const struct xt_action_param *par)
skb                68 net/netfilter/xt_REDIRECT.c 	return nf_nat_redirect_ipv4(skb, par->targinfo, xt_hooknum(par));
skb                29 net/netfilter/xt_SECMARK.c secmark_tg(struct sk_buff *skb, const struct xt_action_param *par)
skb                42 net/netfilter/xt_SECMARK.c 	skb->secmark = secmark;
skb                44 net/netfilter/xt_TCPMSS.c 				    const struct sk_buff *skb,
skb                54 net/netfilter/xt_TCPMSS.c 		fl4->daddr = ip_hdr(skb)->saddr;
skb                59 net/netfilter/xt_TCPMSS.c 		fl6->daddr = ipv6_hdr(skb)->saddr;
skb                71 net/netfilter/xt_TCPMSS.c tcpmss_mangle_packet(struct sk_buff *skb,
skb                89 net/netfilter/xt_TCPMSS.c 	if (skb_ensure_writable(skb, skb->len))
skb                92 net/netfilter/xt_TCPMSS.c 	len = skb->len - tcphoff;
skb                96 net/netfilter/xt_TCPMSS.c 	tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
skb               104 net/netfilter/xt_TCPMSS.c 		unsigned int in_mtu = tcpmss_reverse_mtu(net, skb, family);
skb               105 net/netfilter/xt_TCPMSS.c 		unsigned int min_mtu = min(dst_mtu(skb_dst(skb)), in_mtu);
skb               133 net/netfilter/xt_TCPMSS.c 			inet_proto_csum_replace2(&tcph->check, skb,
skb               154 net/netfilter/xt_TCPMSS.c 	if (skb_tailroom(skb) < TCPOLEN_MSS) {
skb               155 net/netfilter/xt_TCPMSS.c 		if (pskb_expand_head(skb, 0,
skb               156 net/netfilter/xt_TCPMSS.c 				     TCPOLEN_MSS - skb_tailroom(skb),
skb               159 net/netfilter/xt_TCPMSS.c 		tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
skb               162 net/netfilter/xt_TCPMSS.c 	skb_put(skb, TCPOLEN_MSS);
skb               179 net/netfilter/xt_TCPMSS.c 	inet_proto_csum_replace2(&tcph->check, skb,
skb               186 net/netfilter/xt_TCPMSS.c 	inet_proto_csum_replace4(&tcph->check, skb, 0, *((__be32 *)opt), false);
skb               190 net/netfilter/xt_TCPMSS.c 	inet_proto_csum_replace2(&tcph->check, skb,
skb               196 net/netfilter/xt_TCPMSS.c tcpmss_tg4(struct sk_buff *skb, const struct xt_action_param *par)
skb               198 net/netfilter/xt_TCPMSS.c 	struct iphdr *iph = ip_hdr(skb);
skb               202 net/netfilter/xt_TCPMSS.c 	ret = tcpmss_mangle_packet(skb, par,
skb               209 net/netfilter/xt_TCPMSS.c 		iph = ip_hdr(skb);
skb               219 net/netfilter/xt_TCPMSS.c tcpmss_tg6(struct sk_buff *skb, const struct xt_action_param *par)
skb               221 net/netfilter/xt_TCPMSS.c 	struct ipv6hdr *ipv6h = ipv6_hdr(skb);
skb               228 net/netfilter/xt_TCPMSS.c 	tcphoff = ipv6_skip_exthdr(skb, sizeof(*ipv6h), &nexthdr, &frag_off);
skb               231 net/netfilter/xt_TCPMSS.c 	ret = tcpmss_mangle_packet(skb, par,
skb               238 net/netfilter/xt_TCPMSS.c 		ipv6h = ipv6_hdr(skb);
skb               241 net/netfilter/xt_TCPMSS.c 		if (skb->ip_summed == CHECKSUM_COMPLETE)
skb               242 net/netfilter/xt_TCPMSS.c 			skb->csum = csum_add(csum_sub(skb->csum, oldlen),
skb                29 net/netfilter/xt_TCPOPTSTRIP.c tcpoptstrip_mangle_packet(struct sk_buff *skb,
skb                44 net/netfilter/xt_TCPOPTSTRIP.c 	tcph = skb_header_pointer(skb, tcphoff, sizeof(_th), &_th);
skb                52 net/netfilter/xt_TCPOPTSTRIP.c 	if (skb_ensure_writable(skb, tcphoff + tcp_hdrlen))
skb                56 net/netfilter/xt_TCPOPTSTRIP.c 	tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
skb                79 net/netfilter/xt_TCPOPTSTRIP.c 			inet_proto_csum_replace2(&tcph->check, skb, htons(o),
skb                89 net/netfilter/xt_TCPOPTSTRIP.c tcpoptstrip_tg4(struct sk_buff *skb, const struct xt_action_param *par)
skb                91 net/netfilter/xt_TCPOPTSTRIP.c 	return tcpoptstrip_mangle_packet(skb, par, ip_hdrlen(skb));
skb                96 net/netfilter/xt_TCPOPTSTRIP.c tcpoptstrip_tg6(struct sk_buff *skb, const struct xt_action_param *par)
skb                98 net/netfilter/xt_TCPOPTSTRIP.c 	struct ipv6hdr *ipv6h = ipv6_hdr(skb);
skb               104 net/netfilter/xt_TCPOPTSTRIP.c 	tcphoff = ipv6_skip_exthdr(skb, sizeof(*ipv6h), &nexthdr, &frag_off);
skb               108 net/netfilter/xt_TCPOPTSTRIP.c 	return tcpoptstrip_mangle_packet(skb, par, tcphoff);
skb                37 net/netfilter/xt_TEE.c tee_tg4(struct sk_buff *skb, const struct xt_action_param *par)
skb                42 net/netfilter/xt_TEE.c 	nf_dup_ipv4(xt_net(par), skb, xt_hooknum(par), &info->gw.in, oif);
skb                49 net/netfilter/xt_TEE.c tee_tg6(struct sk_buff *skb, const struct xt_action_param *par)
skb                54 net/netfilter/xt_TEE.c 	nf_dup_ipv6(xt_net(par), skb, xt_hooknum(par), &info->gw.in6, oif);
skb                36 net/netfilter/xt_TPROXY.c tproxy_tg4(struct net *net, struct sk_buff *skb, __be32 laddr, __be16 lport,
skb                39 net/netfilter/xt_TPROXY.c 	const struct iphdr *iph = ip_hdr(skb);
skb                43 net/netfilter/xt_TPROXY.c 	hp = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_hdr), &_hdr);
skb                51 net/netfilter/xt_TPROXY.c 	sk = nf_tproxy_get_sock_v4(net, skb, iph->protocol,
skb                54 net/netfilter/xt_TPROXY.c 				   skb->dev, NF_TPROXY_LOOKUP_ESTABLISHED);
skb                56 net/netfilter/xt_TPROXY.c 	laddr = nf_tproxy_laddr4(skb, laddr, iph->daddr);
skb                63 net/netfilter/xt_TPROXY.c 		sk = nf_tproxy_handle_time_wait4(net, skb, laddr, lport, sk);
skb                67 net/netfilter/xt_TPROXY.c 		sk = nf_tproxy_get_sock_v4(net, skb, iph->protocol,
skb                70 net/netfilter/xt_TPROXY.c 					   skb->dev, NF_TPROXY_LOOKUP_LISTENER);
skb                76 net/netfilter/xt_TPROXY.c 		skb->mark = (skb->mark & ~mark_mask) ^ mark_value;
skb                80 net/netfilter/xt_TPROXY.c 			 &laddr, ntohs(lport), skb->mark);
skb                82 net/netfilter/xt_TPROXY.c 		nf_tproxy_assign_sock(skb, sk);
skb                88 net/netfilter/xt_TPROXY.c 		 &iph->daddr, ntohs(hp->dest), skb->mark);
skb                93 net/netfilter/xt_TPROXY.c tproxy_tg4_v0(struct sk_buff *skb, const struct xt_action_param *par)
skb                97 net/netfilter/xt_TPROXY.c 	return tproxy_tg4(xt_net(par), skb, tgi->laddr, tgi->lport,
skb               102 net/netfilter/xt_TPROXY.c tproxy_tg4_v1(struct sk_buff *skb, const struct xt_action_param *par)
skb               106 net/netfilter/xt_TPROXY.c 	return tproxy_tg4(xt_net(par), skb, tgi->laddr.ip, tgi->lport,
skb               113 net/netfilter/xt_TPROXY.c tproxy_tg6_v1(struct sk_buff *skb, const struct xt_action_param *par)
skb               115 net/netfilter/xt_TPROXY.c 	const struct ipv6hdr *iph = ipv6_hdr(skb);
skb               124 net/netfilter/xt_TPROXY.c 	tproto = ipv6_find_hdr(skb, &thoff, -1, NULL, NULL);
skb               130 net/netfilter/xt_TPROXY.c 	hp = skb_header_pointer(skb, thoff, sizeof(_hdr), &_hdr);
skb               140 net/netfilter/xt_TPROXY.c 	sk = nf_tproxy_get_sock_v6(xt_net(par), skb, thoff, tproto,
skb               145 net/netfilter/xt_TPROXY.c 	laddr = nf_tproxy_laddr6(skb, &tgi->laddr.in6, &iph->daddr);
skb               152 net/netfilter/xt_TPROXY.c 		sk = nf_tproxy_handle_time_wait6(skb, tproto, thoff,
skb               161 net/netfilter/xt_TPROXY.c 		sk = nf_tproxy_get_sock_v6(xt_net(par), skb, thoff,
skb               170 net/netfilter/xt_TPROXY.c 		skb->mark = (skb->mark & ~tgi->mark_mask) ^ tgi->mark_value;
skb               174 net/netfilter/xt_TPROXY.c 			 laddr, ntohs(lport), skb->mark);
skb               176 net/netfilter/xt_TPROXY.c 		nf_tproxy_assign_sock(skb, sk);
skb               182 net/netfilter/xt_TPROXY.c 		 &iph->daddr, ntohs(hp->dest), skb->mark);
skb                26 net/netfilter/xt_TRACE.c trace_tg(struct sk_buff *skb, const struct xt_action_param *par)
skb                28 net/netfilter/xt_TRACE.c 	skb->nf_trace = 1;
skb                89 net/netfilter/xt_addrtype.c 	const struct sk_buff *skb, const struct xt_addrtype_info_v1 *info)
skb                91 net/netfilter/xt_addrtype.c 	const struct ipv6hdr *iph = ipv6_hdr(skb);
skb               111 net/netfilter/xt_addrtype.c addrtype_mt_v0(const struct sk_buff *skb, struct xt_action_param *par)
skb               115 net/netfilter/xt_addrtype.c 	const struct iphdr *iph = ip_hdr(skb);
skb               129 net/netfilter/xt_addrtype.c addrtype_mt_v1(const struct sk_buff *skb, struct xt_action_param *par)
skb               144 net/netfilter/xt_addrtype.c 		return addrtype_mt6(net, dev, skb, info);
skb               146 net/netfilter/xt_addrtype.c 	iph = ip_hdr(skb);
skb                89 net/netfilter/xt_bpf.c static bool bpf_mt(const struct sk_buff *skb, struct xt_action_param *par)
skb                93 net/netfilter/xt_bpf.c 	return BPF_PROG_RUN(info->filter, skb);
skb                96 net/netfilter/xt_bpf.c static bool bpf_mt_v1(const struct sk_buff *skb, struct xt_action_param *par)
skb               100 net/netfilter/xt_bpf.c 	return !!bpf_prog_run_save_cb(info->filter, (struct sk_buff *) skb);
skb               101 net/netfilter/xt_cgroup.c cgroup_mt_v0(const struct sk_buff *skb, struct xt_action_param *par)
skb               104 net/netfilter/xt_cgroup.c 	struct sock *sk = skb->sk;
skb               109 net/netfilter/xt_cgroup.c 	return (info->id == sock_cgroup_classid(&skb->sk->sk_cgrp_data)) ^
skb               113 net/netfilter/xt_cgroup.c static bool cgroup_mt_v1(const struct sk_buff *skb, struct xt_action_param *par)
skb               116 net/netfilter/xt_cgroup.c 	struct sock_cgroup_data *skcd = &skb->sk->sk_cgrp_data;
skb               118 net/netfilter/xt_cgroup.c 	struct sock *sk = skb->sk;
skb               131 net/netfilter/xt_cgroup.c static bool cgroup_mt_v2(const struct sk_buff *skb, struct xt_action_param *par)
skb               134 net/netfilter/xt_cgroup.c 	struct sock_cgroup_data *skcd = &skb->sk->sk_cgrp_data;
skb               136 net/netfilter/xt_cgroup.c 	struct sock *sk = skb->sk;
skb                60 net/netfilter/xt_cluster.c xt_cluster_is_multicast_addr(const struct sk_buff *skb, u_int8_t family)
skb                66 net/netfilter/xt_cluster.c 		is_multicast = ipv4_is_multicast(ip_hdr(skb)->daddr);
skb                69 net/netfilter/xt_cluster.c 		is_multicast = ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr);
skb                79 net/netfilter/xt_cluster.c xt_cluster_mt(const struct sk_buff *skb, struct xt_action_param *par)
skb                81 net/netfilter/xt_cluster.c 	struct sk_buff *pskb = (struct sk_buff *)skb;
skb               104 net/netfilter/xt_cluster.c 	if (!xt_cluster_is_multicast_addr(skb, xt_family(par)) &&
skb               105 net/netfilter/xt_cluster.c 	    skb->pkt_type == PACKET_MULTICAST) {
skb               109 net/netfilter/xt_cluster.c 	ct = nf_ct_get(skb, &ctinfo);
skb                20 net/netfilter/xt_comment.c comment_mt(const struct sk_buff *skb, struct xt_action_param *par)
skb                21 net/netfilter/xt_connbytes.c connbytes_mt(const struct sk_buff *skb, struct xt_action_param *par)
skb                32 net/netfilter/xt_connbytes.c 	ct = nf_ct_get(skb, &ctinfo);
skb                20 net/netfilter/xt_connlabel.c connlabel_mt(const struct sk_buff *skb, struct xt_action_param *par)
skb                28 net/netfilter/xt_connlabel.c 	ct = nf_ct_get(skb, &ctinfo);
skb                30 net/netfilter/xt_connlimit.c connlimit_mt(const struct sk_buff *skb, struct xt_action_param *par)
skb                42 net/netfilter/xt_connlimit.c 	ct = nf_ct_get(skb, &ctinfo);
skb                46 net/netfilter/xt_connlimit.c 	} else if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb),
skb                52 net/netfilter/xt_connlimit.c 		const struct ipv6hdr *iph = ipv6_hdr(skb);
skb                64 net/netfilter/xt_connlimit.c 		const struct iphdr *iph = ip_hdr(skb);
skb                27 net/netfilter/xt_connmark.c connmark_tg_shift(struct sk_buff *skb, const struct xt_connmark_tginfo2 *info)
skb                34 net/netfilter/xt_connmark.c 	ct = nf_ct_get(skb, &ctinfo);
skb                52 net/netfilter/xt_connmark.c 		new_targetmark = (skb->mark & info->nfmask);
skb                72 net/netfilter/xt_connmark.c 		newmark = (skb->mark & ~info->nfmask) ^
skb                74 net/netfilter/xt_connmark.c 		skb->mark = newmark;
skb                81 net/netfilter/xt_connmark.c connmark_tg(struct sk_buff *skb, const struct xt_action_param *par)
skb                91 net/netfilter/xt_connmark.c 	return connmark_tg_shift(skb, &info2);
skb                95 net/netfilter/xt_connmark.c connmark_tg_v2(struct sk_buff *skb, const struct xt_action_param *par)
skb                99 net/netfilter/xt_connmark.c 	return connmark_tg_shift(skb, info);
skb               119 net/netfilter/xt_connmark.c connmark_mt(const struct sk_buff *skb, struct xt_action_param *par)
skb               125 net/netfilter/xt_connmark.c 	ct = nf_ct_get(skb, &ctinfo);
skb               162 net/netfilter/xt_conntrack.c conntrack_mt(const struct sk_buff *skb, struct xt_action_param *par,
skb               170 net/netfilter/xt_conntrack.c 	ct = nf_ct_get(skb, &ctinfo);
skb               243 net/netfilter/xt_conntrack.c conntrack_mt_v1(const struct sk_buff *skb, struct xt_action_param *par)
skb               247 net/netfilter/xt_conntrack.c 	return conntrack_mt(skb, par, info->state_mask, info->status_mask);
skb               251 net/netfilter/xt_conntrack.c conntrack_mt_v2(const struct sk_buff *skb, struct xt_action_param *par)
skb               255 net/netfilter/xt_conntrack.c 	return conntrack_mt(skb, par, info->state_mask, info->status_mask);
skb               259 net/netfilter/xt_conntrack.c conntrack_mt_v3(const struct sk_buff *skb, struct xt_action_param *par)
skb               263 net/netfilter/xt_conntrack.c 	return conntrack_mt(skb, par, info->state_mask, info->status_mask);
skb                33 net/netfilter/xt_cpu.c static bool cpu_mt(const struct sk_buff *skb, struct xt_action_param *par)
skb                35 net/netfilter/xt_dccp.c 		 const struct sk_buff *skb,
skb                53 net/netfilter/xt_dccp.c 	op = skb_header_pointer(skb, protoff + optoff, optlen, dccp_optbuf);
skb                89 net/netfilter/xt_dccp.c match_option(u_int8_t option, const struct sk_buff *skb, unsigned int protoff,
skb                92 net/netfilter/xt_dccp.c 	return dccp_find_option(option, skb, protoff, dh, hotdrop);
skb                96 net/netfilter/xt_dccp.c dccp_mt(const struct sk_buff *skb, struct xt_action_param *par)
skb               105 net/netfilter/xt_dccp.c 	dh = skb_header_pointer(skb, par->thoff, sizeof(_dh), &_dh);
skb               119 net/netfilter/xt_dccp.c 		&& DCCHECK(match_option(info->option, skb, par->thoff, dh,
skb                19 net/netfilter/xt_devgroup.c static bool devgroup_mt(const struct sk_buff *skb, struct xt_action_param *par)
skb                25 net/netfilter/xt_dscp.c dscp_mt(const struct sk_buff *skb, struct xt_action_param *par)
skb                28 net/netfilter/xt_dscp.c 	u_int8_t dscp = ipv4_get_dsfield(ip_hdr(skb)) >> XT_DSCP_SHIFT;
skb                34 net/netfilter/xt_dscp.c dscp_mt6(const struct sk_buff *skb, struct xt_action_param *par)
skb                37 net/netfilter/xt_dscp.c 	u_int8_t dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> XT_DSCP_SHIFT;
skb                52 net/netfilter/xt_dscp.c static bool tos_mt(const struct sk_buff *skb, struct xt_action_param *par)
skb                57 net/netfilter/xt_dscp.c 		return ((ip_hdr(skb)->tos & info->tos_mask) ==
skb                60 net/netfilter/xt_dscp.c 		return ((ipv6_get_dsfield(ipv6_hdr(skb)) & info->tos_mask) ==
skb                27 net/netfilter/xt_ecn.c static bool match_tcp(const struct sk_buff *skb, struct xt_action_param *par)
skb                36 net/netfilter/xt_ecn.c 	th = skb_header_pointer(skb, par->thoff, sizeof(_tcph), &_tcph);
skb                63 net/netfilter/xt_ecn.c static inline bool match_ip(const struct sk_buff *skb,
skb                66 net/netfilter/xt_ecn.c 	return ((ip_hdr(skb)->tos & XT_ECN_IP_MASK) == einfo->ip_ect) ^
skb                70 net/netfilter/xt_ecn.c static bool ecn_mt4(const struct sk_buff *skb, struct xt_action_param *par)
skb                74 net/netfilter/xt_ecn.c 	if (info->operation & XT_ECN_OP_MATCH_IP && !match_ip(skb, info))
skb                78 net/netfilter/xt_ecn.c 	    !match_tcp(skb, par))
skb               104 net/netfilter/xt_ecn.c static inline bool match_ipv6(const struct sk_buff *skb,
skb               107 net/netfilter/xt_ecn.c 	return (((ipv6_hdr(skb)->flow_lbl[0] >> 4) & XT_ECN_IP_MASK) ==
skb               112 net/netfilter/xt_ecn.c static bool ecn_mt6(const struct sk_buff *skb, struct xt_action_param *par)
skb               116 net/netfilter/xt_ecn.c 	if (info->operation & XT_ECN_OP_MATCH_IP && !match_ipv6(skb, info))
skb               120 net/netfilter/xt_ecn.c 	    !match_tcp(skb, par))
skb                36 net/netfilter/xt_esp.c static bool esp_mt(const struct sk_buff *skb, struct xt_action_param *par)
skb                46 net/netfilter/xt_esp.c 	eh = skb_header_pointer(skb, par->thoff, sizeof(_esp), &_esp);
skb               639 net/netfilter/xt_hashlimit.c 		   const struct sk_buff *skb, unsigned int protoff)
skb               650 net/netfilter/xt_hashlimit.c 			dst->ip.dst = maskl(ip_hdr(skb)->daddr,
skb               653 net/netfilter/xt_hashlimit.c 			dst->ip.src = maskl(ip_hdr(skb)->saddr,
skb               659 net/netfilter/xt_hashlimit.c 		nexthdr = ip_hdr(skb)->protocol;
skb               667 net/netfilter/xt_hashlimit.c 			memcpy(&dst->ip6.dst, &ipv6_hdr(skb)->daddr,
skb               672 net/netfilter/xt_hashlimit.c 			memcpy(&dst->ip6.src, &ipv6_hdr(skb)->saddr,
skb               680 net/netfilter/xt_hashlimit.c 		nexthdr = ipv6_hdr(skb)->nexthdr;
skb               681 net/netfilter/xt_hashlimit.c 		protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr, &frag_off);
skb               694 net/netfilter/xt_hashlimit.c 		ports = skb_header_pointer(skb, protoff + poff, sizeof(_ports),
skb               725 net/netfilter/xt_hashlimit.c hashlimit_mt_common(const struct sk_buff *skb, struct xt_action_param *par,
skb               735 net/netfilter/xt_hashlimit.c 	if (hashlimit_init_dst(hinfo, &dst, skb, par->thoff) < 0)
skb               760 net/netfilter/xt_hashlimit.c 		cost = (cfg->mode & XT_HASHLIMIT_BYTES) ? skb->len : 1;
skb               774 net/netfilter/xt_hashlimit.c 		cost = hashlimit_byte_cost(skb->len, dh);
skb               798 net/netfilter/xt_hashlimit.c hashlimit_mt_v1(const struct sk_buff *skb, struct xt_action_param *par)
skb               809 net/netfilter/xt_hashlimit.c 	return hashlimit_mt_common(skb, par, hinfo, &cfg, 1);
skb               813 net/netfilter/xt_hashlimit.c hashlimit_mt_v2(const struct sk_buff *skb, struct xt_action_param *par)
skb               824 net/netfilter/xt_hashlimit.c 	return hashlimit_mt_common(skb, par, hinfo, &cfg, 2);
skb               828 net/netfilter/xt_hashlimit.c hashlimit_mt(const struct sk_buff *skb, struct xt_action_param *par)
skb               833 net/netfilter/xt_hashlimit.c 	return hashlimit_mt_common(skb, par, hinfo, &info->cfg, 3);
skb                24 net/netfilter/xt_helper.c helper_mt(const struct sk_buff *skb, struct xt_action_param *par)
skb                33 net/netfilter/xt_helper.c 	ct = nf_ct_get(skb, &ctinfo);
skb                25 net/netfilter/xt_hl.c static bool ttl_mt(const struct sk_buff *skb, struct xt_action_param *par)
skb                28 net/netfilter/xt_hl.c 	const u8 ttl = ip_hdr(skb)->ttl;
skb                44 net/netfilter/xt_hl.c static bool hl_mt6(const struct sk_buff *skb, struct xt_action_param *par)
skb                47 net/netfilter/xt_hl.c 	const struct ipv6hdr *ip6h = ipv6_hdr(skb);
skb                40 net/netfilter/xt_ipcomp.c static bool comp_mt(const struct sk_buff *skb, struct xt_action_param *par)
skb                50 net/netfilter/xt_ipcomp.c 	chdr = skb_header_pointer(skb, par->thoff, sizeof(_comphdr), &_comphdr);
skb                17 net/netfilter/xt_iprange.c iprange_mt4(const struct sk_buff *skb, struct xt_action_param *par)
skb                20 net/netfilter/xt_iprange.c 	const struct iphdr *iph = ip_hdr(skb);
skb                66 net/netfilter/xt_iprange.c iprange_mt6(const struct sk_buff *skb, struct xt_action_param *par)
skb                69 net/netfilter/xt_iprange.c 	const struct ipv6hdr *iph = ipv6_hdr(skb);
skb                49 net/netfilter/xt_ipvs.c ipvs_mt(const struct sk_buff *skb, struct xt_action_param *par)
skb                61 net/netfilter/xt_ipvs.c 		match = skb->ipvs_property ^
skb                67 net/netfilter/xt_ipvs.c 	if (!skb->ipvs_property) {
skb                72 net/netfilter/xt_ipvs.c 	ip_vs_fill_iph_skb(family, skb, true, &iph);
skb                90 net/netfilter/xt_ipvs.c 	cp = pp->conn_out_get(ipvs, family, skb, &iph);
skb               118 net/netfilter/xt_ipvs.c 		struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
skb                79 net/netfilter/xt_l2tp.c static bool l2tp_udp_mt(const struct sk_buff *skb, struct xt_action_param *par, u16 thoff)
skb                95 net/netfilter/xt_l2tp.c 	lh = skb_header_pointer(skb, offs, 2, &lhbuf);
skb               113 net/netfilter/xt_l2tp.c 		lh = skb_header_pointer(skb, offs + 4, 4, &lhbuf);
skb               123 net/netfilter/xt_l2tp.c 		lh = skb_header_pointer(skb, offs + 2, 4, &lhbuf);
skb               139 net/netfilter/xt_l2tp.c static bool l2tp_ip_mt(const struct sk_buff *skb, struct xt_action_param *par, u16 thoff)
skb               147 net/netfilter/xt_l2tp.c 	lh = skb_header_pointer(skb, thoff, sizeof(lhbuf), &lhbuf);
skb               155 net/netfilter/xt_l2tp.c 		lh = skb_header_pointer(skb, thoff + 8, sizeof(lhbuf),
skb               170 net/netfilter/xt_l2tp.c static bool l2tp_mt4(const struct sk_buff *skb, struct xt_action_param *par)
skb               172 net/netfilter/xt_l2tp.c 	struct iphdr *iph = ip_hdr(skb);
skb               178 net/netfilter/xt_l2tp.c 		return l2tp_udp_mt(skb, par, par->thoff);
skb               180 net/netfilter/xt_l2tp.c 		return l2tp_ip_mt(skb, par, par->thoff);
skb               187 net/netfilter/xt_l2tp.c static bool l2tp_mt6(const struct sk_buff *skb, struct xt_action_param *par)
skb               193 net/netfilter/xt_l2tp.c 	ipproto = ipv6_find_hdr(skb, &thoff, -1, &fragoff, NULL);
skb               200 net/netfilter/xt_l2tp.c 		return l2tp_udp_mt(skb, par, thoff);
skb               202 net/netfilter/xt_l2tp.c 		return l2tp_ip_mt(skb, par, thoff);
skb                21 net/netfilter/xt_length.c length_mt(const struct sk_buff *skb, struct xt_action_param *par)
skb                24 net/netfilter/xt_length.c 	u_int16_t pktlen = ntohs(ip_hdr(skb)->tot_len);
skb                30 net/netfilter/xt_length.c length_mt6(const struct sk_buff *skb, struct xt_action_param *par)
skb                33 net/netfilter/xt_length.c 	const u_int16_t pktlen = ntohs(ipv6_hdr(skb)->payload_len) +
skb                65 net/netfilter/xt_limit.c limit_mt(const struct sk_buff *skb, struct xt_action_param *par)
skb                25 net/netfilter/xt_mac.c static bool mac_mt(const struct sk_buff *skb, struct xt_action_param *par)
skb                30 net/netfilter/xt_mac.c 	if (skb->dev == NULL || skb->dev->type != ARPHRD_ETHER)
skb                32 net/netfilter/xt_mac.c 	if (skb_mac_header(skb) < skb->head)
skb                34 net/netfilter/xt_mac.c 	if (skb_mac_header(skb) + ETH_HLEN > skb->data)
skb                36 net/netfilter/xt_mac.c 	ret  = ether_addr_equal(eth_hdr(skb)->h_source, info->srcaddr);
skb                26 net/netfilter/xt_mark.c mark_tg(struct sk_buff *skb, const struct xt_action_param *par)
skb                30 net/netfilter/xt_mark.c 	skb->mark = (skb->mark & ~info->mask) ^ info->mark;
skb                35 net/netfilter/xt_mark.c mark_mt(const struct sk_buff *skb, struct xt_action_param *par)
skb                39 net/netfilter/xt_mark.c 	return ((skb->mark & info->mask) == info->mark) ^ info->invert;
skb                86 net/netfilter/xt_multiport.c multiport_mt(const struct sk_buff *skb, struct xt_action_param *par)
skb                95 net/netfilter/xt_multiport.c 	pptr = skb_header_pointer(skb, par->thoff, sizeof(_ports), _ports);
skb                52 net/netfilter/xt_nat.c xt_snat_target_v0(struct sk_buff *skb, const struct xt_action_param *par)
skb                59 net/netfilter/xt_nat.c 	ct = nf_ct_get(skb, &ctinfo);
skb                69 net/netfilter/xt_nat.c xt_dnat_target_v0(struct sk_buff *skb, const struct xt_action_param *par)
skb                76 net/netfilter/xt_nat.c 	ct = nf_ct_get(skb, &ctinfo);
skb                85 net/netfilter/xt_nat.c xt_snat_target_v1(struct sk_buff *skb, const struct xt_action_param *par)
skb                92 net/netfilter/xt_nat.c 	ct = nf_ct_get(skb, &ctinfo);
skb               104 net/netfilter/xt_nat.c xt_dnat_target_v1(struct sk_buff *skb, const struct xt_action_param *par)
skb               111 net/netfilter/xt_nat.c 	ct = nf_ct_get(skb, &ctinfo);
skb               122 net/netfilter/xt_nat.c xt_snat_target_v2(struct sk_buff *skb, const struct xt_action_param *par)
skb               128 net/netfilter/xt_nat.c 	ct = nf_ct_get(skb, &ctinfo);
skb               137 net/netfilter/xt_nat.c xt_dnat_target_v2(struct sk_buff *skb, const struct xt_action_param *par)
skb               143 net/netfilter/xt_nat.c 	ct = nf_ct_get(skb, &ctinfo);
skb                21 net/netfilter/xt_nfacct.c static bool nfacct_mt(const struct sk_buff *skb, struct xt_action_param *par)
skb                26 net/netfilter/xt_nfacct.c 	nfnl_acct_update(skb, info->nfacct);
skb                28 net/netfilter/xt_osf.c xt_osf_match_packet(const struct sk_buff *skb, struct xt_action_param *p)
skb                30 net/netfilter/xt_osf.c 	return nf_osf_match(skb, xt_family(p), xt_hooknum(p), xt_in(p),
skb                63 net/netfilter/xt_owner.c owner_mt(const struct sk_buff *skb, struct xt_action_param *par)
skb                67 net/netfilter/xt_owner.c 	struct sock *sk = skb_to_full_sk(skb);
skb                24 net/netfilter/xt_physdev.c physdev_mt(const struct sk_buff *skb, struct xt_action_param *par)
skb                34 net/netfilter/xt_physdev.c 	if (!nf_bridge_info_exists(skb)) {
skb                54 net/netfilter/xt_physdev.c 	physdev = nf_bridge_get_physoutdev(skb);
skb                62 net/netfilter/xt_physdev.c 	physdev = nf_bridge_get_physindev(skb);
skb                23 net/netfilter/xt_pkttype.c pkttype_mt(const struct sk_buff *skb, struct xt_action_param *par)
skb                28 net/netfilter/xt_pkttype.c 	if (skb->pkt_type != PACKET_LOOPBACK)
skb                29 net/netfilter/xt_pkttype.c 		type = skb->pkt_type;
skb                31 net/netfilter/xt_pkttype.c 	    ipv4_is_multicast(ip_hdr(skb)->daddr))
skb                52 net/netfilter/xt_policy.c match_policy_in(const struct sk_buff *skb, const struct xt_policy_info *info,
skb                56 net/netfilter/xt_policy.c 	const struct sec_path *sp = skb_sec_path(skb);
skb                82 net/netfilter/xt_policy.c match_policy_out(const struct sk_buff *skb, const struct xt_policy_info *info,
skb                86 net/netfilter/xt_policy.c 	const struct dst_entry *dst = skb_dst(skb);
skb               111 net/netfilter/xt_policy.c policy_mt(const struct sk_buff *skb, struct xt_action_param *par)
skb               117 net/netfilter/xt_policy.c 		ret = match_policy_in(skb, info, xt_family(par));
skb               119 net/netfilter/xt_policy.c 		ret = match_policy_out(skb, info, xt_family(par));
skb                27 net/netfilter/xt_quota.c quota_mt(const struct sk_buff *skb, struct xt_action_param *par)
skb                34 net/netfilter/xt_quota.c 	if (priv->quota >= skb->len) {
skb                35 net/netfilter/xt_quota.c 		priv->quota -= skb->len;
skb                15 net/netfilter/xt_rateest.c xt_rateest_mt(const struct sk_buff *skb, struct xt_action_param *par)
skb                22 net/netfilter/xt_realm.c realm_mt(const struct sk_buff *skb, struct xt_action_param *par)
skb                25 net/netfilter/xt_realm.c 	const struct dst_entry *dst = skb_dst(skb);
skb               233 net/netfilter/xt_recent.c recent_mt(const struct sk_buff *skb, struct xt_action_param *par)
skb               245 net/netfilter/xt_recent.c 		const struct iphdr *iph = ip_hdr(skb);
skb               254 net/netfilter/xt_recent.c 		const struct ipv6hdr *iph = ipv6_hdr(skb);
skb               266 net/netfilter/xt_recent.c 	    (!skb->sk || !net_eq(net, sock_net(skb->sk))))
skb                40 net/netfilter/xt_sctp.c match_packet(const struct sk_buff *skb,
skb                60 net/netfilter/xt_sctp.c 		sch = skb_header_pointer(skb, offset, sizeof(_sch), &_sch);
skb                74 net/netfilter/xt_sctp.c 		pr_debug("skb->len: %d\toffset: %d\n", skb->len, offset);
skb               103 net/netfilter/xt_sctp.c 	} while (offset < skb->len);
skb               119 net/netfilter/xt_sctp.c sctp_mt(const struct sk_buff *skb, struct xt_action_param *par)
skb               130 net/netfilter/xt_sctp.c 	sh = skb_header_pointer(skb, par->thoff, sizeof(_sh), &_sh);
skb               144 net/netfilter/xt_sctp.c 		SCCHECK(match_packet(skb, par->thoff + sizeof(_sh),
skb                29 net/netfilter/xt_set.c match_set(ip_set_id_t index, const struct sk_buff *skb,
skb                33 net/netfilter/xt_set.c 	if (ip_set_test(index, skb, par, opt))
skb                54 net/netfilter/xt_set.c set_match_v0(const struct sk_buff *skb, struct xt_action_param *par)
skb                62 net/netfilter/xt_set.c 	return match_set(info->match_set.index, skb, par, &opt,
skb               118 net/netfilter/xt_set.c set_match_v1(const struct sk_buff *skb, struct xt_action_param *par)
skb               129 net/netfilter/xt_set.c 	return match_set(info->match_set.index, skb, par, &opt,
skb               166 net/netfilter/xt_set.c set_match_v3(const struct sk_buff *skb, struct xt_action_param *par)
skb               179 net/netfilter/xt_set.c 	return match_set(info->match_set.index, skb, par, &opt,
skb               189 net/netfilter/xt_set.c set_match_v4(const struct sk_buff *skb, struct xt_action_param *par)
skb               202 net/netfilter/xt_set.c 	return match_set(info->match_set.index, skb, par, &opt,
skb               212 net/netfilter/xt_set.c set_target_v0(struct sk_buff *skb, const struct xt_action_param *par)
skb               224 net/netfilter/xt_set.c 		ip_set_add(info->add_set.index, skb, par, &add_opt);
skb               226 net/netfilter/xt_set.c 		ip_set_del(info->del_set.index, skb, par, &del_opt);
skb               287 net/netfilter/xt_set.c set_target_v1(struct sk_buff *skb, const struct xt_action_param *par)
skb               299 net/netfilter/xt_set.c 		ip_set_add(info->add_set.index, skb, par, &add_opt);
skb               301 net/netfilter/xt_set.c 		ip_set_del(info->del_set.index, skb, par, &del_opt);
skb               358 net/netfilter/xt_set.c set_target_v2(struct sk_buff *skb, const struct xt_action_param *par)
skb               374 net/netfilter/xt_set.c 		ip_set_add(info->add_set.index, skb, par, &add_opt);
skb               376 net/netfilter/xt_set.c 		ip_set_del(info->del_set.index, skb, par, &del_opt);
skb               389 net/netfilter/xt_set.c set_target_v3(struct sk_buff *skb, const struct xt_action_param *par)
skb               409 net/netfilter/xt_set.c 		ip_set_add(info->add_set.index, skb, par, &add_opt);
skb               411 net/netfilter/xt_set.c 		ip_set_del(info->del_set.index, skb, par, &del_opt);
skb               416 net/netfilter/xt_set.c 		ret = match_set(info->map_set.index, skb, par, &map_opt,
skb               421 net/netfilter/xt_set.c 			skb->mark = (skb->mark & ~MOPT(map_opt,skbmarkmask))
skb               424 net/netfilter/xt_set.c 			skb->priority = MOPT(map_opt, skbprio);
skb               426 net/netfilter/xt_set.c 		    skb->dev &&
skb               427 net/netfilter/xt_set.c 		    skb->dev->real_num_tx_queues > MOPT(map_opt, skbqueue))
skb               428 net/netfilter/xt_set.c 			skb_set_queue_mapping(skb, MOPT(map_opt, skbqueue));
skb                49 net/netfilter/xt_socket.c socket_match(const struct sk_buff *skb, struct xt_action_param *par,
skb                52 net/netfilter/xt_socket.c 	struct sk_buff *pskb = (struct sk_buff *)skb;
skb                53 net/netfilter/xt_socket.c 	struct sock *sk = skb->sk;
skb                59 net/netfilter/xt_socket.c 		sk = nf_sk_lookup_slow_v4(xt_net(par), skb, xt_in(par));
skb                82 net/netfilter/xt_socket.c 		if (sk != skb->sk)
skb                93 net/netfilter/xt_socket.c socket_mt4_v0(const struct sk_buff *skb, struct xt_action_param *par)
skb                99 net/netfilter/xt_socket.c 	return socket_match(skb, par, &xt_info_v0);
skb               103 net/netfilter/xt_socket.c socket_mt4_v1_v2_v3(const struct sk_buff *skb, struct xt_action_param *par)
skb               105 net/netfilter/xt_socket.c 	return socket_match(skb, par, par->matchinfo);
skb               110 net/netfilter/xt_socket.c socket_mt6_v1_v2_v3(const struct sk_buff *skb, struct xt_action_param *par)
skb               113 net/netfilter/xt_socket.c 	struct sk_buff *pskb = (struct sk_buff *)skb;
skb               114 net/netfilter/xt_socket.c 	struct sock *sk = skb->sk;
skb               120 net/netfilter/xt_socket.c 		sk = nf_sk_lookup_slow_v6(xt_net(par), skb, xt_in(par));
skb               143 net/netfilter/xt_socket.c 		if (sk != skb->sk)
skb                21 net/netfilter/xt_state.c state_mt(const struct sk_buff *skb, struct xt_action_param *par)
skb                26 net/netfilter/xt_state.c 	struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
skb                29 net/netfilter/xt_statistic.c statistic_mt(const struct sk_buff *skb, struct xt_action_param *par)
skb                24 net/netfilter/xt_string.c string_mt(const struct sk_buff *skb, struct xt_action_param *par)
skb                31 net/netfilter/xt_string.c 	return (skb_find_text((struct sk_buff *)skb, conf->from_offset,
skb                25 net/netfilter/xt_tcpmss.c tcpmss_mt(const struct sk_buff *skb, struct xt_action_param *par)
skb                36 net/netfilter/xt_tcpmss.c 	th = skb_header_pointer(skb, par->thoff, sizeof(_tcph), &_tcph);
skb                49 net/netfilter/xt_tcpmss.c 	op = skb_header_pointer(skb, par->thoff + sizeof(*th), optlen, _opt);
skb                33 net/netfilter/xt_tcpudp.c 		const struct sk_buff *skb,
skb                50 net/netfilter/xt_tcpudp.c 	op = skb_header_pointer(skb, protoff + sizeof(struct tcphdr),
skb                66 net/netfilter/xt_tcpudp.c static bool tcp_mt(const struct sk_buff *skb, struct xt_action_param *par)
skb                87 net/netfilter/xt_tcpudp.c 	th = skb_header_pointer(skb, par->thoff, sizeof(_tcph), &_tcph);
skb               112 net/netfilter/xt_tcpudp.c 		if (!tcp_find_option(tcpinfo->option, skb, par->thoff,
skb               129 net/netfilter/xt_tcpudp.c static bool udp_mt(const struct sk_buff *skb, struct xt_action_param *par)
skb               139 net/netfilter/xt_tcpudp.c 	uh = skb_header_pointer(skb, par->thoff, sizeof(_udph), &_udph);
skb               158 net/netfilter/xt_time.c time_mt(const struct sk_buff *skb, struct xt_action_param *par)
skb                18 net/netfilter/xt_u32.c 			 const struct sk_buff *skb)
skb                39 net/netfilter/xt_u32.c 		if (skb->len < 4 || pos > skb->len - 4)
skb                42 net/netfilter/xt_u32.c 		if (skb_copy_bits(skb, pos, &n, sizeof(n)) < 0)
skb                65 net/netfilter/xt_u32.c 				if (at + 4 < at || skb->len < at + 4 ||
skb                66 net/netfilter/xt_u32.c 				    pos > skb->len - at - 4)
skb                69 net/netfilter/xt_u32.c 				if (skb_copy_bits(skb, at + pos, &n,
skb                90 net/netfilter/xt_u32.c static bool u32_mt(const struct sk_buff *skb, struct xt_action_param *par)
skb                95 net/netfilter/xt_u32.c 	ret = u32_match_it(data, skb);
skb                38 net/netlabel/netlabel_calipso.c 	struct sk_buff *skb;
skb                98 net/netlabel/netlabel_calipso.c static int netlbl_calipso_add(struct sk_buff *skb, struct genl_info *info)
skb               108 net/netlabel/netlabel_calipso.c 	netlbl_netlink_auditinfo(skb, &audit_info);
skb               130 net/netlabel/netlabel_calipso.c static int netlbl_calipso_list(struct sk_buff *skb, struct genl_info *info)
skb               197 net/netlabel/netlabel_calipso.c 	data = genlmsg_put(cb_arg->skb, NETLINK_CB(cb_arg->nl_cb->skb).portid,
skb               203 net/netlabel/netlabel_calipso.c 	ret_val = nla_put_u32(cb_arg->skb, NLBL_CALIPSO_A_DOI, doi_def->doi);
skb               206 net/netlabel/netlabel_calipso.c 	ret_val = nla_put_u32(cb_arg->skb,
skb               212 net/netlabel/netlabel_calipso.c 	genlmsg_end(cb_arg->skb, data);
skb               216 net/netlabel/netlabel_calipso.c 	genlmsg_cancel(cb_arg->skb, data);
skb               230 net/netlabel/netlabel_calipso.c static int netlbl_calipso_listall(struct sk_buff *skb,
skb               237 net/netlabel/netlabel_calipso.c 	cb_arg.skb = skb;
skb               243 net/netlabel/netlabel_calipso.c 	return skb->len;
skb               279 net/netlabel/netlabel_calipso.c static int netlbl_calipso_remove(struct sk_buff *skb, struct genl_info *info)
skb               290 net/netlabel/netlabel_calipso.c 	netlbl_netlink_auditinfo(skb, &audit_info);
skb               621 net/netlabel/netlabel_calipso.c unsigned char *calipso_optptr(const struct sk_buff *skb)
skb               627 net/netlabel/netlabel_calipso.c 		ret_val = ops->skbuff_optptr(skb);
skb               663 net/netlabel/netlabel_calipso.c int calipso_skbuff_setattr(struct sk_buff *skb,
skb               671 net/netlabel/netlabel_calipso.c 		ret_val = ops->skbuff_setattr(skb, doi_def, secattr);
skb               684 net/netlabel/netlabel_calipso.c int calipso_skbuff_delattr(struct sk_buff *skb)
skb               690 net/netlabel/netlabel_calipso.c 		ret_val = ops->skbuff_delattr(skb);
skb               126 net/netlabel/netlabel_calipso.h unsigned char *calipso_optptr(const struct sk_buff *skb);
skb               129 net/netlabel/netlabel_calipso.h int calipso_skbuff_setattr(struct sk_buff *skb,
skb               132 net/netlabel/netlabel_calipso.h int calipso_skbuff_delattr(struct sk_buff *skb);
skb                37 net/netlabel/netlabel_cipso_v4.c 	struct sk_buff *skb;
skb               403 net/netlabel/netlabel_cipso_v4.c static int netlbl_cipsov4_add(struct sk_buff *skb, struct genl_info *info)
skb               413 net/netlabel/netlabel_cipso_v4.c 	netlbl_netlink_auditinfo(skb, &audit_info);
skb               448 net/netlabel/netlabel_cipso_v4.c static int netlbl_cipsov4_list(struct sk_buff *skb, struct genl_info *info)
skb               616 net/netlabel/netlabel_cipso_v4.c 	data = genlmsg_put(cb_arg->skb, NETLINK_CB(cb_arg->nl_cb->skb).portid,
skb               622 net/netlabel/netlabel_cipso_v4.c 	ret_val = nla_put_u32(cb_arg->skb, NLBL_CIPSOV4_A_DOI, doi_def->doi);
skb               625 net/netlabel/netlabel_cipso_v4.c 	ret_val = nla_put_u32(cb_arg->skb,
skb               631 net/netlabel/netlabel_cipso_v4.c 	genlmsg_end(cb_arg->skb, data);
skb               635 net/netlabel/netlabel_cipso_v4.c 	genlmsg_cancel(cb_arg->skb, data);
skb               649 net/netlabel/netlabel_cipso_v4.c static int netlbl_cipsov4_listall(struct sk_buff *skb,
skb               656 net/netlabel/netlabel_cipso_v4.c 	cb_arg.skb = skb;
skb               662 net/netlabel/netlabel_cipso_v4.c 	return skb->len;
skb               698 net/netlabel/netlabel_cipso_v4.c static int netlbl_cipsov4_remove(struct sk_buff *skb, struct genl_info *info)
skb               709 net/netlabel/netlabel_cipso_v4.c 	netlbl_netlink_auditinfo(skb, &audit_info);
skb              1275 net/netlabel/netlabel_kapi.c int netlbl_skbuff_setattr(struct sk_buff *skb,
skb              1289 net/netlabel/netlabel_kapi.c 		hdr4 = ip_hdr(skb);
skb              1298 net/netlabel/netlabel_kapi.c 			ret_val = cipso_v4_skbuff_setattr(skb, entry->cipso,
skb              1304 net/netlabel/netlabel_kapi.c 			ret_val = cipso_v4_skbuff_delattr(skb);
skb              1312 net/netlabel/netlabel_kapi.c 		hdr6 = ipv6_hdr(skb);
skb              1321 net/netlabel/netlabel_kapi.c 			ret_val = calipso_skbuff_setattr(skb, entry->calipso,
skb              1327 net/netlabel/netlabel_kapi.c 			ret_val = calipso_skbuff_delattr(skb);
skb              1356 net/netlabel/netlabel_kapi.c int netlbl_skbuff_getattr(const struct sk_buff *skb,
skb              1364 net/netlabel/netlabel_kapi.c 		ptr = cipso_v4_optptr(skb);
skb              1370 net/netlabel/netlabel_kapi.c 		ptr = calipso_optptr(skb);
skb              1377 net/netlabel/netlabel_kapi.c 	return netlbl_unlabel_getattr(skb, family, secattr);
skb              1393 net/netlabel/netlabel_kapi.c void netlbl_skbuff_err(struct sk_buff *skb, u16 family, int error, int gateway)
skb              1397 net/netlabel/netlabel_kapi.c 		if (cipso_v4_optptr(skb))
skb              1398 net/netlabel/netlabel_kapi.c 			cipso_v4_error(skb, error, gateway);
skb              1432 net/netlabel/netlabel_kapi.c int netlbl_cache_add(const struct sk_buff *skb, u16 family,
skb              1442 net/netlabel/netlabel_kapi.c 		ptr = cipso_v4_optptr(skb);
skb              1448 net/netlabel/netlabel_kapi.c 		ptr = calipso_optptr(skb);
skb                44 net/netlabel/netlabel_mgmt.c 	struct sk_buff *skb;
skb               280 net/netlabel/netlabel_mgmt.c static int netlbl_mgmt_listentry(struct sk_buff *skb,
skb               292 net/netlabel/netlabel_mgmt.c 		ret_val = nla_put_string(skb,
skb               298 net/netlabel/netlabel_mgmt.c 	ret_val = nla_put_u16(skb, NLBL_MGMT_A_FAMILY, entry->family);
skb               304 net/netlabel/netlabel_mgmt.c 		nla_a = nla_nest_start_noflag(skb, NLBL_MGMT_A_SELECTORLIST);
skb               312 net/netlabel/netlabel_mgmt.c 			nla_b = nla_nest_start_noflag(skb,
skb               318 net/netlabel/netlabel_mgmt.c 			ret_val = nla_put_in_addr(skb, NLBL_MGMT_A_IPV4ADDR,
skb               323 net/netlabel/netlabel_mgmt.c 			ret_val = nla_put_in_addr(skb, NLBL_MGMT_A_IPV4MASK,
skb               328 net/netlabel/netlabel_mgmt.c 			ret_val = nla_put_u32(skb, NLBL_MGMT_A_PROTOCOL,
skb               334 net/netlabel/netlabel_mgmt.c 				ret_val = nla_put_u32(skb, NLBL_MGMT_A_CV4DOI,
skb               341 net/netlabel/netlabel_mgmt.c 			nla_nest_end(skb, nla_b);
skb               347 net/netlabel/netlabel_mgmt.c 			nla_b = nla_nest_start_noflag(skb,
skb               352 net/netlabel/netlabel_mgmt.c 			ret_val = nla_put_in6_addr(skb, NLBL_MGMT_A_IPV6ADDR,
skb               356 net/netlabel/netlabel_mgmt.c 			ret_val = nla_put_in6_addr(skb, NLBL_MGMT_A_IPV6MASK,
skb               361 net/netlabel/netlabel_mgmt.c 			ret_val = nla_put_u32(skb, NLBL_MGMT_A_PROTOCOL,
skb               368 net/netlabel/netlabel_mgmt.c 				ret_val = nla_put_u32(skb, NLBL_MGMT_A_CLPDOI,
skb               375 net/netlabel/netlabel_mgmt.c 			nla_nest_end(skb, nla_b);
skb               379 net/netlabel/netlabel_mgmt.c 		nla_nest_end(skb, nla_a);
skb               382 net/netlabel/netlabel_mgmt.c 		ret_val = nla_put_u32(skb, NLBL_MGMT_A_PROTOCOL,
skb               386 net/netlabel/netlabel_mgmt.c 		ret_val = nla_put_u32(skb, NLBL_MGMT_A_PROTOCOL,
skb               390 net/netlabel/netlabel_mgmt.c 		ret_val = nla_put_u32(skb, NLBL_MGMT_A_CV4DOI,
skb               394 net/netlabel/netlabel_mgmt.c 		ret_val = nla_put_u32(skb, NLBL_MGMT_A_PROTOCOL,
skb               398 net/netlabel/netlabel_mgmt.c 		ret_val = nla_put_u32(skb, NLBL_MGMT_A_CLPDOI,
skb               421 net/netlabel/netlabel_mgmt.c static int netlbl_mgmt_add(struct sk_buff *skb, struct genl_info *info)
skb               437 net/netlabel/netlabel_mgmt.c 	netlbl_netlink_auditinfo(skb, &audit_info);
skb               452 net/netlabel/netlabel_mgmt.c static int netlbl_mgmt_remove(struct sk_buff *skb, struct genl_info *info)
skb               460 net/netlabel/netlabel_mgmt.c 	netlbl_netlink_auditinfo(skb, &audit_info);
skb               484 net/netlabel/netlabel_mgmt.c 	data = genlmsg_put(cb_arg->skb, NETLINK_CB(cb_arg->nl_cb->skb).portid,
skb               490 net/netlabel/netlabel_mgmt.c 	ret_val = netlbl_mgmt_listentry(cb_arg->skb, entry);
skb               495 net/netlabel/netlabel_mgmt.c 	genlmsg_end(cb_arg->skb, data);
skb               499 net/netlabel/netlabel_mgmt.c 	genlmsg_cancel(cb_arg->skb, data);
skb               514 net/netlabel/netlabel_mgmt.c static int netlbl_mgmt_listall(struct sk_buff *skb,
skb               522 net/netlabel/netlabel_mgmt.c 	cb_arg.skb = skb;
skb               532 net/netlabel/netlabel_mgmt.c 	return skb->len;
skb               545 net/netlabel/netlabel_mgmt.c static int netlbl_mgmt_adddef(struct sk_buff *skb, struct genl_info *info)
skb               560 net/netlabel/netlabel_mgmt.c 	netlbl_netlink_auditinfo(skb, &audit_info);
skb               575 net/netlabel/netlabel_mgmt.c static int netlbl_mgmt_removedef(struct sk_buff *skb, struct genl_info *info)
skb               579 net/netlabel/netlabel_mgmt.c 	netlbl_netlink_auditinfo(skb, &audit_info);
skb               595 net/netlabel/netlabel_mgmt.c static int netlbl_mgmt_listdef(struct sk_buff *skb, struct genl_info *info)
skb               649 net/netlabel/netlabel_mgmt.c static int netlbl_mgmt_protocols_cb(struct sk_buff *skb,
skb               656 net/netlabel/netlabel_mgmt.c 	data = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
skb               662 net/netlabel/netlabel_mgmt.c 	ret_val = nla_put_u32(skb, NLBL_MGMT_A_PROTOCOL, protocol);
skb               666 net/netlabel/netlabel_mgmt.c 	genlmsg_end(skb, data);
skb               670 net/netlabel/netlabel_mgmt.c 	genlmsg_cancel(skb, data);
skb               683 net/netlabel/netlabel_mgmt.c static int netlbl_mgmt_protocols(struct sk_buff *skb,
skb               689 net/netlabel/netlabel_mgmt.c 		if (netlbl_mgmt_protocols_cb(skb,
skb               696 net/netlabel/netlabel_mgmt.c 		if (netlbl_mgmt_protocols_cb(skb,
skb               704 net/netlabel/netlabel_mgmt.c 		if (netlbl_mgmt_protocols_cb(skb,
skb               714 net/netlabel/netlabel_mgmt.c 	return skb->len;
skb               727 net/netlabel/netlabel_mgmt.c static int netlbl_mgmt_version(struct sk_buff *skb, struct genl_info *info)
skb                95 net/netlabel/netlabel_unlabeled.c 	struct sk_buff *skb;
skb               808 net/netlabel/netlabel_unlabeled.c static int netlbl_unlabel_accept(struct sk_buff *skb, struct genl_info *info)
skb               816 net/netlabel/netlabel_unlabeled.c 			netlbl_netlink_auditinfo(skb, &audit_info);
skb               835 net/netlabel/netlabel_unlabeled.c static int netlbl_unlabel_list(struct sk_buff *skb, struct genl_info *info)
skb               876 net/netlabel/netlabel_unlabeled.c static int netlbl_unlabel_staticadd(struct sk_buff *skb,
skb               899 net/netlabel/netlabel_unlabeled.c 	netlbl_netlink_auditinfo(skb, &audit_info);
skb               928 net/netlabel/netlabel_unlabeled.c static int netlbl_unlabel_staticadddef(struct sk_buff *skb,
skb               949 net/netlabel/netlabel_unlabeled.c 	netlbl_netlink_auditinfo(skb, &audit_info);
skb               977 net/netlabel/netlabel_unlabeled.c static int netlbl_unlabel_staticremove(struct sk_buff *skb,
skb               996 net/netlabel/netlabel_unlabeled.c 	netlbl_netlink_auditinfo(skb, &audit_info);
skb              1019 net/netlabel/netlabel_unlabeled.c static int netlbl_unlabel_staticremovedef(struct sk_buff *skb,
skb              1036 net/netlabel/netlabel_unlabeled.c 	netlbl_netlink_auditinfo(skb, &audit_info);
skb              1078 net/netlabel/netlabel_unlabeled.c 	data = genlmsg_put(cb_arg->skb, NETLINK_CB(cb_arg->nl_cb->skb).portid,
skb              1090 net/netlabel/netlabel_unlabeled.c 		ret_val = nla_put_string(cb_arg->skb,
skb              1101 net/netlabel/netlabel_unlabeled.c 		ret_val = nla_put_in_addr(cb_arg->skb,
skb              1108 net/netlabel/netlabel_unlabeled.c 		ret_val = nla_put_in_addr(cb_arg->skb,
skb              1116 net/netlabel/netlabel_unlabeled.c 		ret_val = nla_put_in6_addr(cb_arg->skb,
skb              1122 net/netlabel/netlabel_unlabeled.c 		ret_val = nla_put_in6_addr(cb_arg->skb,
skb              1134 net/netlabel/netlabel_unlabeled.c 	ret_val = nla_put(cb_arg->skb,
skb              1143 net/netlabel/netlabel_unlabeled.c 	genlmsg_end(cb_arg->skb, data);
skb              1147 net/netlabel/netlabel_unlabeled.c 	genlmsg_cancel(cb_arg->skb, data);
skb              1162 net/netlabel/netlabel_unlabeled.c static int netlbl_unlabel_staticlist(struct sk_buff *skb,
skb              1178 net/netlabel/netlabel_unlabeled.c 	cb_arg.skb = skb;
skb              1231 net/netlabel/netlabel_unlabeled.c 	return skb->len;
skb              1245 net/netlabel/netlabel_unlabeled.c static int netlbl_unlabel_staticlistdef(struct sk_buff *skb,
skb              1257 net/netlabel/netlabel_unlabeled.c 	cb_arg.skb = skb;
skb              1296 net/netlabel/netlabel_unlabeled.c 	return skb->len;
skb              1451 net/netlabel/netlabel_unlabeled.c int netlbl_unlabel_getattr(const struct sk_buff *skb,
skb              1458 net/netlabel/netlabel_unlabeled.c 	iface = netlbl_unlhsh_search_iface(skb->skb_iif);
skb              1469 net/netlabel/netlabel_unlabeled.c 	if (family == PF_INET6 && ip_hdr(skb)->version == 4)
skb              1478 net/netlabel/netlabel_unlabeled.c 		hdr4 = ip_hdr(skb);
skb              1491 net/netlabel/netlabel_unlabeled.c 		hdr6 = ipv6_hdr(skb);
skb               224 net/netlabel/netlabel_unlabeled.h int netlbl_unlabel_getattr(const struct sk_buff *skb,
skb                34 net/netlabel/netlabel_user.h static inline void netlbl_netlink_auditinfo(struct sk_buff *skb,
skb               154 net/netlink/af_netlink.c static struct sk_buff *netlink_to_full_skb(const struct sk_buff *skb,
skb               157 net/netlink/af_netlink.c 	unsigned int len = skb_end_offset(skb);
skb               164 net/netlink/af_netlink.c 	NETLINK_CB(new).portid = NETLINK_CB(skb).portid;
skb               165 net/netlink/af_netlink.c 	NETLINK_CB(new).dst_group = NETLINK_CB(skb).dst_group;
skb               166 net/netlink/af_netlink.c 	NETLINK_CB(new).creds = NETLINK_CB(skb).creds;
skb               168 net/netlink/af_netlink.c 	skb_put_data(new, skb->data, len);
skb               250 net/netlink/af_netlink.c static bool netlink_filter_tap(const struct sk_buff *skb)
skb               252 net/netlink/af_netlink.c 	struct sock *sk = skb->sk;
skb               272 net/netlink/af_netlink.c static int __netlink_deliver_tap_skb(struct sk_buff *skb,
skb               276 net/netlink/af_netlink.c 	struct sock *sk = skb->sk;
skb               284 net/netlink/af_netlink.c 	if (is_vmalloc_addr(skb->head))
skb               285 net/netlink/af_netlink.c 		nskb = netlink_to_full_skb(skb, GFP_ATOMIC);
skb               287 net/netlink/af_netlink.c 		nskb = skb_clone(skb, GFP_ATOMIC);
skb               303 net/netlink/af_netlink.c static void __netlink_deliver_tap(struct sk_buff *skb, struct netlink_tap_net *nn)
skb               308 net/netlink/af_netlink.c 	if (!netlink_filter_tap(skb))
skb               312 net/netlink/af_netlink.c 		ret = __netlink_deliver_tap_skb(skb, tmp->dev);
skb               318 net/netlink/af_netlink.c static void netlink_deliver_tap(struct net *net, struct sk_buff *skb)
skb               325 net/netlink/af_netlink.c 		__netlink_deliver_tap(skb, nn);
skb               331 net/netlink/af_netlink.c 				       struct sk_buff *skb)
skb               334 net/netlink/af_netlink.c 		netlink_deliver_tap(sock_net(dst), skb);
skb               361 net/netlink/af_netlink.c static void netlink_skb_destructor(struct sk_buff *skb)
skb               363 net/netlink/af_netlink.c 	if (is_vmalloc_addr(skb->head)) {
skb               364 net/netlink/af_netlink.c 		if (!skb->cloned ||
skb               365 net/netlink/af_netlink.c 		    !atomic_dec_return(&(skb_shinfo(skb)->dataref)))
skb               366 net/netlink/af_netlink.c 			vfree(skb->head);
skb               368 net/netlink/af_netlink.c 		skb->head = NULL;
skb               370 net/netlink/af_netlink.c 	if (skb->sk != NULL)
skb               371 net/netlink/af_netlink.c 		sock_rfree(skb);
skb               374 net/netlink/af_netlink.c static void netlink_skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
skb               376 net/netlink/af_netlink.c 	WARN_ON(skb->sk != NULL);
skb               377 net/netlink/af_netlink.c 	skb->sk = sk;
skb               378 net/netlink/af_netlink.c 	skb->destructor = netlink_skb_destructor;
skb               379 net/netlink/af_netlink.c 	atomic_add(skb->truesize, &sk->sk_rmem_alloc);
skb               380 net/netlink/af_netlink.c 	sk_mem_charge(sk, skb->truesize);
skb               391 net/netlink/af_netlink.c 		kfree_skb(nlk->cb.skb);
skb               871 net/netlink/af_netlink.c bool netlink_ns_capable(const struct sk_buff *skb,
skb               874 net/netlink/af_netlink.c 	return __netlink_ns_capable(&NETLINK_CB(skb), user_ns, cap);
skb               887 net/netlink/af_netlink.c bool netlink_capable(const struct sk_buff *skb, int cap)
skb               889 net/netlink/af_netlink.c 	return netlink_ns_capable(skb, &init_user_ns, cap);
skb               903 net/netlink/af_netlink.c bool netlink_net_capable(const struct sk_buff *skb, int cap)
skb               905 net/netlink/af_netlink.c 	return netlink_ns_capable(skb, sock_net(skb->sk)->user_ns, cap);
skb              1171 net/netlink/af_netlink.c 	struct sk_buff *skb;
skb              1184 net/netlink/af_netlink.c 	skb = __build_skb(data, size);
skb              1185 net/netlink/af_netlink.c 	if (skb == NULL)
skb              1188 net/netlink/af_netlink.c 		skb->destructor = netlink_skb_destructor;
skb              1190 net/netlink/af_netlink.c 	return skb;
skb              1203 net/netlink/af_netlink.c int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
skb              1217 net/netlink/af_netlink.c 			kfree_skb(skb);
skb              1234 net/netlink/af_netlink.c 			kfree_skb(skb);
skb              1239 net/netlink/af_netlink.c 	netlink_skb_set_owner_r(skb, sk);
skb              1243 net/netlink/af_netlink.c static int __netlink_sendskb(struct sock *sk, struct sk_buff *skb)
skb              1245 net/netlink/af_netlink.c 	int len = skb->len;
skb              1247 net/netlink/af_netlink.c 	netlink_deliver_tap(sock_net(sk), skb);
skb              1249 net/netlink/af_netlink.c 	skb_queue_tail(&sk->sk_receive_queue, skb);
skb              1254 net/netlink/af_netlink.c int netlink_sendskb(struct sock *sk, struct sk_buff *skb)
skb              1256 net/netlink/af_netlink.c 	int len = __netlink_sendskb(sk, skb);
skb              1262 net/netlink/af_netlink.c void netlink_detachskb(struct sock *sk, struct sk_buff *skb)
skb              1264 net/netlink/af_netlink.c 	kfree_skb(skb);
skb              1268 net/netlink/af_netlink.c static struct sk_buff *netlink_trim(struct sk_buff *skb, gfp_t allocation)
skb              1272 net/netlink/af_netlink.c 	WARN_ON(skb->sk != NULL);
skb              1273 net/netlink/af_netlink.c 	delta = skb->end - skb->tail;
skb              1274 net/netlink/af_netlink.c 	if (is_vmalloc_addr(skb->head) || delta * 2 < skb->truesize)
skb              1275 net/netlink/af_netlink.c 		return skb;
skb              1277 net/netlink/af_netlink.c 	if (skb_shared(skb)) {
skb              1278 net/netlink/af_netlink.c 		struct sk_buff *nskb = skb_clone(skb, allocation);
skb              1280 net/netlink/af_netlink.c 			return skb;
skb              1281 net/netlink/af_netlink.c 		consume_skb(skb);
skb              1282 net/netlink/af_netlink.c 		skb = nskb;
skb              1285 net/netlink/af_netlink.c 	pskb_expand_head(skb, 0, -delta,
skb              1288 net/netlink/af_netlink.c 	return skb;
skb              1291 net/netlink/af_netlink.c static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb,
skb              1299 net/netlink/af_netlink.c 		ret = skb->len;
skb              1300 net/netlink/af_netlink.c 		netlink_skb_set_owner_r(skb, sk);
skb              1301 net/netlink/af_netlink.c 		NETLINK_CB(skb).sk = ssk;
skb              1302 net/netlink/af_netlink.c 		netlink_deliver_tap_kernel(sk, ssk, skb);
skb              1303 net/netlink/af_netlink.c 		nlk->netlink_rcv(skb);
skb              1304 net/netlink/af_netlink.c 		consume_skb(skb);
skb              1306 net/netlink/af_netlink.c 		kfree_skb(skb);
skb              1312 net/netlink/af_netlink.c int netlink_unicast(struct sock *ssk, struct sk_buff *skb,
skb              1319 net/netlink/af_netlink.c 	skb = netlink_trim(skb, gfp_any());
skb              1325 net/netlink/af_netlink.c 		kfree_skb(skb);
skb              1329 net/netlink/af_netlink.c 		return netlink_unicast_kernel(sk, skb, ssk);
skb              1331 net/netlink/af_netlink.c 	if (sk_filter(sk, skb)) {
skb              1332 net/netlink/af_netlink.c 		err = skb->len;
skb              1333 net/netlink/af_netlink.c 		kfree_skb(skb);
skb              1338 net/netlink/af_netlink.c 	err = netlink_attachskb(sk, skb, &timeo, ssk);
skb              1344 net/netlink/af_netlink.c 	return netlink_sendskb(sk, skb);
skb              1367 net/netlink/af_netlink.c bool netlink_strict_get_check(struct sk_buff *skb)
skb              1369 net/netlink/af_netlink.c 	const struct netlink_sock *nlk = nlk_sk(NETLINK_CB(skb).sk);
skb              1375 net/netlink/af_netlink.c static int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
skb              1381 net/netlink/af_netlink.c 		netlink_skb_set_owner_r(skb, sk);
skb              1382 net/netlink/af_netlink.c 		__netlink_sendskb(sk, skb);
skb              1398 net/netlink/af_netlink.c 	struct sk_buff *skb, *skb2;
skb              1399 net/netlink/af_netlink.c 	int (*tx_filter)(struct sock *dsk, struct sk_buff *skb, void *data);
skb              1435 net/netlink/af_netlink.c 		if (skb_shared(p->skb)) {
skb              1436 net/netlink/af_netlink.c 			p->skb2 = skb_clone(p->skb, p->allocation);
skb              1438 net/netlink/af_netlink.c 			p->skb2 = skb_get(p->skb);
skb              1481 net/netlink/af_netlink.c int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 portid,
skb              1483 net/netlink/af_netlink.c 	int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data),
skb              1490 net/netlink/af_netlink.c 	skb = netlink_trim(skb, allocation);
skb              1501 net/netlink/af_netlink.c 	info.skb = skb;
skb              1513 net/netlink/af_netlink.c 	consume_skb(skb);
skb              1532 net/netlink/af_netlink.c int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 portid,
skb              1535 net/netlink/af_netlink.c 	return netlink_broadcast_filtered(ssk, skb, portid, group, allocation,
skb              1825 net/netlink/af_netlink.c static void netlink_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
skb              1829 net/netlink/af_netlink.c 	info.group = NETLINK_CB(skb).dst_group;
skb              1834 net/netlink/af_netlink.c 					 struct sk_buff *skb)
skb              1836 net/netlink/af_netlink.c 	if (!NETLINK_CB(skb).nsid_is_set)
skb              1840 net/netlink/af_netlink.c 		 &NETLINK_CB(skb).nsid);
skb              1850 net/netlink/af_netlink.c 	struct sk_buff *skb;
skb              1893 net/netlink/af_netlink.c 	skb = netlink_alloc_large_skb(len, dst_group);
skb              1894 net/netlink/af_netlink.c 	if (skb == NULL)
skb              1897 net/netlink/af_netlink.c 	NETLINK_CB(skb).portid	= nlk->portid;
skb              1898 net/netlink/af_netlink.c 	NETLINK_CB(skb).dst_group = dst_group;
skb              1899 net/netlink/af_netlink.c 	NETLINK_CB(skb).creds	= scm.creds;
skb              1900 net/netlink/af_netlink.c 	NETLINK_CB(skb).flags	= netlink_skb_flags;
skb              1903 net/netlink/af_netlink.c 	if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
skb              1904 net/netlink/af_netlink.c 		kfree_skb(skb);
skb              1908 net/netlink/af_netlink.c 	err = security_netlink_send(sk, skb);
skb              1910 net/netlink/af_netlink.c 		kfree_skb(skb);
skb              1915 net/netlink/af_netlink.c 		refcount_inc(&skb->users);
skb              1916 net/netlink/af_netlink.c 		netlink_broadcast(sk, skb, dst_portid, dst_group, GFP_KERNEL);
skb              1918 net/netlink/af_netlink.c 	err = netlink_unicast(sk, skb, dst_portid, msg->msg_flags&MSG_DONTWAIT);
skb              1933 net/netlink/af_netlink.c 	struct sk_buff *skb, *data_skb;
skb              1941 net/netlink/af_netlink.c 	skb = skb_recv_datagram(sk, flags, noblock, &err);
skb              1942 net/netlink/af_netlink.c 	if (skb == NULL)
skb              1945 net/netlink/af_netlink.c 	data_skb = skb;
skb              1948 net/netlink/af_netlink.c 	if (unlikely(skb_shinfo(skb)->frag_list)) {
skb              1960 net/netlink/af_netlink.c 			data_skb = skb_shinfo(skb)->frag_list;
skb              1982 net/netlink/af_netlink.c 		addr->nl_pid	= NETLINK_CB(skb).portid;
skb              1983 net/netlink/af_netlink.c 		addr->nl_groups	= netlink_group_mask(NETLINK_CB(skb).dst_group);
skb              1988 net/netlink/af_netlink.c 		netlink_cmsg_recv_pktinfo(msg, skb);
skb              1990 net/netlink/af_netlink.c 		netlink_cmsg_listen_all_nsid(sk, msg, skb);
skb              1993 net/netlink/af_netlink.c 	scm.creds = *NETLINK_CREDS(skb);
skb              1997 net/netlink/af_netlink.c 	skb_free_datagram(sk, skb);
skb              2166 net/netlink/af_netlink.c __nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, int type, int len, int flags)
skb              2171 net/netlink/af_netlink.c 	nlh = skb_put(skb, NLMSG_ALIGN(size));
skb              2193 net/netlink/af_netlink.c 	struct sk_buff *skb = NULL;
skb              2219 net/netlink/af_netlink.c 		skb = alloc_skb(alloc_size,
skb              2223 net/netlink/af_netlink.c 	if (!skb) {
skb              2225 net/netlink/af_netlink.c 		skb = alloc_skb(alloc_size, GFP_KERNEL);
skb              2227 net/netlink/af_netlink.c 	if (!skb)
skb              2240 net/netlink/af_netlink.c 	skb_reserve(skb, skb_tailroom(skb) - alloc_size);
skb              2241 net/netlink/af_netlink.c 	netlink_skb_set_owner_r(skb, sk);
skb              2245 net/netlink/af_netlink.c 		nlk->dump_done_errno = cb->dump(skb, cb);
skb              2250 net/netlink/af_netlink.c 	    skb_tailroom(skb) < nlmsg_total_size(sizeof(nlk->dump_done_errno))) {
skb              2253 net/netlink/af_netlink.c 		if (sk_filter(sk, skb))
skb              2254 net/netlink/af_netlink.c 			kfree_skb(skb);
skb              2256 net/netlink/af_netlink.c 			__netlink_sendskb(sk, skb);
skb              2260 net/netlink/af_netlink.c 	nlh = nlmsg_put_answer(skb, cb, NLMSG_DONE,
skb              2273 net/netlink/af_netlink.c 		if (!nla_put_string(skb, NLMSGERR_ATTR_MSG, extack._msg))
skb              2274 net/netlink/af_netlink.c 			nlmsg_end(skb, nlh);
skb              2277 net/netlink/af_netlink.c 	if (sk_filter(sk, skb))
skb              2278 net/netlink/af_netlink.c 		kfree_skb(skb);
skb              2280 net/netlink/af_netlink.c 		__netlink_sendskb(sk, skb);
skb              2287 net/netlink/af_netlink.c 	skb = cb->skb;
skb              2290 net/netlink/af_netlink.c 	consume_skb(skb);
skb              2295 net/netlink/af_netlink.c 	kfree_skb(skb);
skb              2299 net/netlink/af_netlink.c int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
skb              2308 net/netlink/af_netlink.c 	refcount_inc(&skb->users);
skb              2310 net/netlink/af_netlink.c 	sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).portid);
skb              2337 net/netlink/af_netlink.c 	cb->skb = skb;
skb              2339 net/netlink/af_netlink.c 	nlk2 = nlk_sk(NETLINK_CB(skb).sk);
skb              2371 net/netlink/af_netlink.c 	kfree_skb(skb);
skb              2379 net/netlink/af_netlink.c 	struct sk_buff *skb;
skb              2412 net/netlink/af_netlink.c 	skb = nlmsg_new(payload + tlvlen, GFP_KERNEL);
skb              2413 net/netlink/af_netlink.c 	if (!skb) {
skb              2419 net/netlink/af_netlink.c 	rep = __nlmsg_put(skb, NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
skb              2427 net/netlink/af_netlink.c 			WARN_ON(nla_put_string(skb, NLMSGERR_ATTR_MSG,
skb              2435 net/netlink/af_netlink.c 				WARN_ON(nla_put_u32(skb, NLMSGERR_ATTR_OFFS,
skb              2440 net/netlink/af_netlink.c 				WARN_ON(nla_put(skb, NLMSGERR_ATTR_COOKIE,
skb              2446 net/netlink/af_netlink.c 	nlmsg_end(skb, rep);
skb              2448 net/netlink/af_netlink.c 	netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).portid, MSG_DONTWAIT);
skb              2452 net/netlink/af_netlink.c int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *,
skb              2460 net/netlink/af_netlink.c 	while (skb->len >= nlmsg_total_size(0)) {
skb              2464 net/netlink/af_netlink.c 		nlh = nlmsg_hdr(skb);
skb              2467 net/netlink/af_netlink.c 		if (nlh->nlmsg_len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len)
skb              2478 net/netlink/af_netlink.c 		err = cb(skb, nlh, &extack);
skb              2484 net/netlink/af_netlink.c 			netlink_ack(skb, nlh, err, &extack);
skb              2488 net/netlink/af_netlink.c 		if (msglen > skb->len)
skb              2489 net/netlink/af_netlink.c 			msglen = skb->len;
skb              2490 net/netlink/af_netlink.c 		skb_pull(skb, msglen);
skb              2506 net/netlink/af_netlink.c int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 portid,
skb              2515 net/netlink/af_netlink.c 			refcount_inc(&skb->users);
skb              2521 net/netlink/af_netlink.c 		err = nlmsg_multicast(sk, skb, exclude_portid, group, flags);
skb              2527 net/netlink/af_netlink.c 		err2 = nlmsg_unicast(sk, skb, portid);
skb                42 net/netlink/af_netlink.h 	void			(*netlink_rcv)(struct sk_buff *skb);
skb                23 net/netlink/diag.c static int sk_diag_put_flags(struct sock *sk, struct sk_buff *skb)
skb                41 net/netlink/diag.c 	return nla_put_u32(skb, NETLINK_DIAG_FLAGS, flags);
skb                44 net/netlink/diag.c static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
skb                52 net/netlink/diag.c 	nlh = nlmsg_put(skb, portid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*rep),
skb                70 net/netlink/diag.c 	    sk_diag_dump_groups(sk, skb))
skb                74 net/netlink/diag.c 	    sock_diag_put_meminfo(sk, skb, NETLINK_DIAG_MEMINFO))
skb                78 net/netlink/diag.c 	    sk_diag_put_flags(sk, skb))
skb                81 net/netlink/diag.c 	nlmsg_end(skb, nlh);
skb                85 net/netlink/diag.c 	nlmsg_cancel(skb, nlh);
skb                89 net/netlink/diag.c static int __netlink_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
skb                94 net/netlink/diag.c 	struct net *net = sock_net(skb->sk);
skb               136 net/netlink/diag.c 		if (sk_diag_fill(sk, skb, req,
skb               137 net/netlink/diag.c 				 NETLINK_CB(cb->skb).portid,
skb               166 net/netlink/diag.c 		if (sk_diag_fill(sk, skb, req,
skb               167 net/netlink/diag.c 				 NETLINK_CB(cb->skb).portid,
skb               184 net/netlink/diag.c static int netlink_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
skb               196 net/netlink/diag.c 			err = __netlink_diag_dump(skb, cb, i, s_num);
skb               206 net/netlink/diag.c 		err = __netlink_diag_dump(skb, cb, req->sdiag_protocol, s_num);
skb               209 net/netlink/diag.c 	return err < 0 ? err : skb->len;
skb               224 net/netlink/diag.c static int netlink_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
skb               227 net/netlink/diag.c 	struct net *net = sock_net(skb->sk);
skb               237 net/netlink/diag.c 		return netlink_dump_start(net->diag_nlsk, skb, h, &c);
skb               441 net/netlink/genetlink.c void *genlmsg_put(struct sk_buff *skb, u32 portid, u32 seq,
skb               447 net/netlink/genetlink.c 	nlh = nlmsg_put(skb, portid, seq, family->id, GENL_HDRLEN +
skb               475 net/netlink/genetlink.c static int genl_lock_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
skb               482 net/netlink/genetlink.c 	rc = ops->dumpit(skb, cb);
skb               502 net/netlink/genetlink.c 			       struct sk_buff *skb,
skb               507 net/netlink/genetlink.c 	struct net *net = sock_net(skb->sk);
skb               526 net/netlink/genetlink.c 	    !netlink_capable(skb, CAP_NET_ADMIN))
skb               530 net/netlink/genetlink.c 	    !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
skb               572 net/netlink/genetlink.c 			rc = __netlink_dump_start(net->genl_sock, skb, nlh, &c);
skb               583 net/netlink/genetlink.c 			rc = __netlink_dump_start(net->genl_sock, skb, nlh, &c);
skb               614 net/netlink/genetlink.c 	info.snd_portid = NETLINK_CB(skb).portid;
skb               624 net/netlink/genetlink.c 		err = family->pre_doit(ops, skb, &info);
skb               629 net/netlink/genetlink.c 	err = ops->doit(skb, &info);
skb               632 net/netlink/genetlink.c 		family->post_doit(ops, skb, &info);
skb               641 net/netlink/genetlink.c static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
skb               654 net/netlink/genetlink.c 	err = genl_family_rcv_msg(family, skb, nlh, extack);
skb               662 net/netlink/genetlink.c static void genl_rcv(struct sk_buff *skb)
skb               665 net/netlink/genetlink.c 	netlink_rcv_skb(skb, &genl_rcv_msg);
skb               676 net/netlink/genetlink.c 			  u32 flags, struct sk_buff *skb, u8 cmd)
skb               680 net/netlink/genetlink.c 	hdr = genlmsg_put(skb, portid, seq, &genl_ctrl, flags, cmd);
skb               684 net/netlink/genetlink.c 	if (nla_put_string(skb, CTRL_ATTR_FAMILY_NAME, family->name) ||
skb               685 net/netlink/genetlink.c 	    nla_put_u16(skb, CTRL_ATTR_FAMILY_ID, family->id) ||
skb               686 net/netlink/genetlink.c 	    nla_put_u32(skb, CTRL_ATTR_VERSION, family->version) ||
skb               687 net/netlink/genetlink.c 	    nla_put_u32(skb, CTRL_ATTR_HDRSIZE, family->hdrsize) ||
skb               688 net/netlink/genetlink.c 	    nla_put_u32(skb, CTRL_ATTR_MAXATTR, family->maxattr))
skb               695 net/netlink/genetlink.c 		nla_ops = nla_nest_start_noflag(skb, CTRL_ATTR_OPS);
skb               711 net/netlink/genetlink.c 			nest = nla_nest_start_noflag(skb, i + 1);
skb               715 net/netlink/genetlink.c 			if (nla_put_u32(skb, CTRL_ATTR_OP_ID, ops->cmd) ||
skb               716 net/netlink/genetlink.c 			    nla_put_u32(skb, CTRL_ATTR_OP_FLAGS, op_flags))
skb               719 net/netlink/genetlink.c 			nla_nest_end(skb, nest);
skb               722 net/netlink/genetlink.c 		nla_nest_end(skb, nla_ops);
skb               729 net/netlink/genetlink.c 		nla_grps = nla_nest_start_noflag(skb, CTRL_ATTR_MCAST_GROUPS);
skb               739 net/netlink/genetlink.c 			nest = nla_nest_start_noflag(skb, i + 1);
skb               743 net/netlink/genetlink.c 			if (nla_put_u32(skb, CTRL_ATTR_MCAST_GRP_ID,
skb               745 net/netlink/genetlink.c 			    nla_put_string(skb, CTRL_ATTR_MCAST_GRP_NAME,
skb               749 net/netlink/genetlink.c 			nla_nest_end(skb, nest);
skb               751 net/netlink/genetlink.c 		nla_nest_end(skb, nla_grps);
skb               754 net/netlink/genetlink.c 	genlmsg_end(skb, hdr);
skb               758 net/netlink/genetlink.c 	genlmsg_cancel(skb, hdr);
skb               765 net/netlink/genetlink.c 				struct sk_buff *skb, u8 cmd)
skb               771 net/netlink/genetlink.c 	hdr = genlmsg_put(skb, portid, seq, &genl_ctrl, flags, cmd);
skb               775 net/netlink/genetlink.c 	if (nla_put_string(skb, CTRL_ATTR_FAMILY_NAME, family->name) ||
skb               776 net/netlink/genetlink.c 	    nla_put_u16(skb, CTRL_ATTR_FAMILY_ID, family->id))
skb               779 net/netlink/genetlink.c 	nla_grps = nla_nest_start_noflag(skb, CTRL_ATTR_MCAST_GROUPS);
skb               783 net/netlink/genetlink.c 	nest = nla_nest_start_noflag(skb, 1);
skb               787 net/netlink/genetlink.c 	if (nla_put_u32(skb, CTRL_ATTR_MCAST_GRP_ID, grp_id) ||
skb               788 net/netlink/genetlink.c 	    nla_put_string(skb, CTRL_ATTR_MCAST_GRP_NAME,
skb               792 net/netlink/genetlink.c 	nla_nest_end(skb, nest);
skb               793 net/netlink/genetlink.c 	nla_nest_end(skb, nla_grps);
skb               795 net/netlink/genetlink.c 	genlmsg_end(skb, hdr);
skb               799 net/netlink/genetlink.c 	genlmsg_cancel(skb, hdr);
skb               803 net/netlink/genetlink.c static int ctrl_dumpfamily(struct sk_buff *skb, struct netlink_callback *cb)
skb               807 net/netlink/genetlink.c 	struct net *net = sock_net(skb->sk);
skb               818 net/netlink/genetlink.c 		if (ctrl_fill_info(rt, NETLINK_CB(cb->skb).portid,
skb               820 net/netlink/genetlink.c 				   skb, CTRL_CMD_NEWFAMILY) < 0) {
skb               827 net/netlink/genetlink.c 	return skb->len;
skb               833 net/netlink/genetlink.c 	struct sk_buff *skb;
skb               836 net/netlink/genetlink.c 	skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
skb               837 net/netlink/genetlink.c 	if (skb == NULL)
skb               840 net/netlink/genetlink.c 	err = ctrl_fill_info(family, portid, seq, 0, skb, cmd);
skb               842 net/netlink/genetlink.c 		nlmsg_free(skb);
skb               846 net/netlink/genetlink.c 	return skb;
skb               854 net/netlink/genetlink.c 	struct sk_buff *skb;
skb               857 net/netlink/genetlink.c 	skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
skb               858 net/netlink/genetlink.c 	if (skb == NULL)
skb               862 net/netlink/genetlink.c 				   seq, 0, skb, cmd);
skb               864 net/netlink/genetlink.c 		nlmsg_free(skb);
skb               868 net/netlink/genetlink.c 	return skb;
skb               877 net/netlink/genetlink.c static int ctrl_getfamily(struct sk_buff *skb, struct genl_info *info)
skb              1110 net/netlink/genetlink.c static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group,
skb              1120 net/netlink/genetlink.c 			tmp = skb_clone(skb, flags);
skb              1136 net/netlink/genetlink.c 	err = nlmsg_multicast(prev->genl_sock, skb, portid, group, flags);
skb              1143 net/netlink/genetlink.c 	kfree_skb(skb);
skb              1148 net/netlink/genetlink.c 			    struct sk_buff *skb, u32 portid,
skb              1154 net/netlink/genetlink.c 	return genlmsg_mcast(skb, portid, group, flags);
skb              1158 net/netlink/genetlink.c void genl_notify(const struct genl_family *family, struct sk_buff *skb,
skb              1171 net/netlink/genetlink.c 	nlmsg_notify(sk, skb, info->snd_portid, group, report, flags);
skb               237 net/netrom/af_netrom.c 	struct sk_buff *skb;
skb               249 net/netrom/af_netrom.c 	while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
skb               250 net/netrom/af_netrom.c 		if (skb->sk != sk) { /* A pending connection */
skb               252 net/netrom/af_netrom.c 			sock_set_flag(skb->sk, SOCK_DEAD);
skb               253 net/netrom/af_netrom.c 			nr_start_heartbeat(skb->sk);
skb               254 net/netrom/af_netrom.c 			nr_sk(skb->sk)->state = NR_STATE_0;
skb               257 net/netrom/af_netrom.c 		kfree_skb(skb);
skb               746 net/netrom/af_netrom.c 	struct sk_buff *skb;
skb               772 net/netrom/af_netrom.c 		skb = skb_dequeue(&sk->sk_receive_queue);
skb               773 net/netrom/af_netrom.c 		if (skb)
skb               793 net/netrom/af_netrom.c 	newsk = skb->sk;
skb               797 net/netrom/af_netrom.c 	kfree_skb(skb);
skb               839 net/netrom/af_netrom.c int nr_rx_frame(struct sk_buff *skb, struct net_device *dev)
skb               850 net/netrom/af_netrom.c 	skb_orphan(skb);
skb               856 net/netrom/af_netrom.c 	src  = (ax25_address *)(skb->data + 0);
skb               857 net/netrom/af_netrom.c 	dest = (ax25_address *)(skb->data + 7);
skb               859 net/netrom/af_netrom.c 	circuit_index      = skb->data[15];
skb               860 net/netrom/af_netrom.c 	circuit_id         = skb->data[16];
skb               861 net/netrom/af_netrom.c 	peer_circuit_index = skb->data[17];
skb               862 net/netrom/af_netrom.c 	peer_circuit_id    = skb->data[18];
skb               863 net/netrom/af_netrom.c 	frametype          = skb->data[19] & 0x0F;
skb               864 net/netrom/af_netrom.c 	flags              = skb->data[19] & 0xF0;
skb               871 net/netrom/af_netrom.c 		skb_pull(skb, NR_NETWORK_LEN + NR_TRANSPORT_LEN);
skb               872 net/netrom/af_netrom.c 		skb_reset_transport_header(skb);
skb               874 net/netrom/af_netrom.c 		return nr_rx_ip(skb, dev);
skb               899 net/netrom/af_netrom.c 		skb_reset_transport_header(skb);
skb               901 net/netrom/af_netrom.c 		if (frametype == NR_CONNACK && skb->len == 22)
skb               906 net/netrom/af_netrom.c 		ret = nr_process_rx_frame(sk, skb);
skb               927 net/netrom/af_netrom.c 			nr_transmit_reset(skb, 1);
skb               934 net/netrom/af_netrom.c 	user = (ax25_address *)(skb->data + 21);
skb               938 net/netrom/af_netrom.c 		nr_transmit_refusal(skb, 0);
skb               946 net/netrom/af_netrom.c 	window = skb->data[20];
skb               949 net/netrom/af_netrom.c 	skb->sk             = make;
skb               950 net/netrom/af_netrom.c 	skb->destructor     = sock_efree;
skb               976 net/netrom/af_netrom.c 	if (skb->len == 37) {
skb               977 net/netrom/af_netrom.c 		timeout = skb->data[36] * 256 + skb->data[35];
skb               994 net/netrom/af_netrom.c 	skb_queue_head(&sk->sk_receive_queue, skb);
skb              1017 net/netrom/af_netrom.c 	struct sk_buff *skb;
skb              1073 net/netrom/af_netrom.c 	if ((skb = sock_alloc_send_skb(sk, size, msg->msg_flags & MSG_DONTWAIT, &err)) == NULL)
skb              1076 net/netrom/af_netrom.c 	skb_reserve(skb, size - len);
skb              1077 net/netrom/af_netrom.c 	skb_reset_transport_header(skb);
skb              1083 net/netrom/af_netrom.c 	asmptr = skb_push(skb, NR_TRANSPORT_LEN);
skb              1096 net/netrom/af_netrom.c 	skb_put(skb, len);
skb              1099 net/netrom/af_netrom.c 	if (memcpy_from_msg(skb_transport_header(skb), msg, len)) {
skb              1100 net/netrom/af_netrom.c 		kfree_skb(skb);
skb              1106 net/netrom/af_netrom.c 		kfree_skb(skb);
skb              1111 net/netrom/af_netrom.c 	nr_output(sk, skb);	/* Shove it onto the queue */
skb              1125 net/netrom/af_netrom.c 	struct sk_buff *skb;
skb              1140 net/netrom/af_netrom.c 	if ((skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &er)) == NULL) {
skb              1145 net/netrom/af_netrom.c 	skb_reset_transport_header(skb);
skb              1146 net/netrom/af_netrom.c 	copied     = skb->len;
skb              1153 net/netrom/af_netrom.c 	er = skb_copy_datagram_msg(skb, 0, msg, copied);
skb              1155 net/netrom/af_netrom.c 		skb_free_datagram(sk, skb);
skb              1163 net/netrom/af_netrom.c 		skb_copy_from_linear_data_offset(skb, 7, sax->sax25_call.ax25_call,
skb              1168 net/netrom/af_netrom.c 	skb_free_datagram(sk, skb);
skb              1193 net/netrom/af_netrom.c 		struct sk_buff *skb;
skb              1198 net/netrom/af_netrom.c 		if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL)
skb              1199 net/netrom/af_netrom.c 			amount = skb->len;
skb                40 net/netrom/nr_dev.c int nr_rx_ip(struct sk_buff *skb, struct net_device *dev)
skb                50 net/netrom/nr_dev.c 	stats->rx_bytes += skb->len;
skb                52 net/netrom/nr_dev.c 	skb->protocol = htons(ETH_P_IP);
skb                55 net/netrom/nr_dev.c 	skb->dev      = dev;
skb                56 net/netrom/nr_dev.c 	skb->mac_header = skb->network_header;
skb                57 net/netrom/nr_dev.c 	skb_reset_network_header(skb);
skb                58 net/netrom/nr_dev.c 	skb->pkt_type = PACKET_HOST;
skb                60 net/netrom/nr_dev.c 	netif_rx(skb);
skb                65 net/netrom/nr_dev.c static int nr_header(struct sk_buff *skb, struct net_device *dev,
skb                69 net/netrom/nr_dev.c 	unsigned char *buff = skb_push(skb, NR_NETWORK_LEN + NR_TRANSPORT_LEN);
skb               139 net/netrom/nr_dev.c static netdev_tx_t nr_xmit(struct sk_buff *skb, struct net_device *dev)
skb               142 net/netrom/nr_dev.c 	unsigned int len = skb->len;
skb               144 net/netrom/nr_dev.c 	if (!nr_route_frame(skb, NULL)) {
skb               145 net/netrom/nr_dev.c 		kfree_skb(skb);
skb                29 net/netrom/nr_in.c static int nr_queue_rx_frame(struct sock *sk, struct sk_buff *skb, int more)
skb                31 net/netrom/nr_in.c 	struct sk_buff *skbo, *skbn = skb;
skb                34 net/netrom/nr_in.c 	skb_pull(skb, NR_NETWORK_LEN + NR_TRANSPORT_LEN);
skb                39 net/netrom/nr_in.c 		nr->fraglen += skb->len;
skb                40 net/netrom/nr_in.c 		skb_queue_tail(&nr->frag_queue, skb);
skb                45 net/netrom/nr_in.c 		nr->fraglen += skb->len;
skb                46 net/netrom/nr_in.c 		skb_queue_tail(&nr->frag_queue, skb);
skb                71 net/netrom/nr_in.c static int nr_state1_machine(struct sock *sk, struct sk_buff *skb,
skb                80 net/netrom/nr_in.c 		nr->your_index = skb->data[17];
skb                81 net/netrom/nr_in.c 		nr->your_id    = skb->data[18];
skb                88 net/netrom/nr_in.c 		nr->window     = skb->data[20];
skb               115 net/netrom/nr_in.c static int nr_state2_machine(struct sock *sk, struct sk_buff *skb,
skb               146 net/netrom/nr_in.c static int nr_state3_machine(struct sock *sk, struct sk_buff *skb, int frametype)
skb               155 net/netrom/nr_in.c 	nr = skb->data[18];
skb               156 net/netrom/nr_in.c 	ns = skb->data[17];
skb               227 net/netrom/nr_in.c 		skb_queue_head(&nrom->reseq_queue, skb);
skb               277 net/netrom/nr_in.c int nr_process_rx_frame(struct sock *sk, struct sk_buff *skb)
skb               285 net/netrom/nr_in.c 	frametype = skb->data[19];
skb               289 net/netrom/nr_in.c 		queued = nr_state1_machine(sk, skb, frametype);
skb               292 net/netrom/nr_in.c 		queued = nr_state2_machine(sk, skb, frametype);
skb               295 net/netrom/nr_in.c 		queued = nr_state3_machine(sk, skb, frametype);
skb                30 net/netrom/nr_loopback.c int nr_loopback_queue(struct sk_buff *skb)
skb                34 net/netrom/nr_loopback.c 	if ((skbn = alloc_skb(skb->len, GFP_ATOMIC)) != NULL) {
skb                35 net/netrom/nr_loopback.c 		skb_copy_from_linear_data(skb, skb_put(skbn, skb->len), skb->len);
skb                44 net/netrom/nr_loopback.c 	kfree_skb(skb);
skb                50 net/netrom/nr_loopback.c 	struct sk_buff *skb;
skb                54 net/netrom/nr_loopback.c 	if ((skb = skb_dequeue(&loopback_queue)) != NULL) {
skb                55 net/netrom/nr_loopback.c 		nr_dest = (ax25_address *)(skb->data + 7);
skb                59 net/netrom/nr_loopback.c 		if (dev == NULL || nr_rx_frame(skb, dev) == 0)
skb                60 net/netrom/nr_loopback.c 			kfree_skb(skb);
skb                32 net/netrom/nr_out.c void nr_output(struct sock *sk, struct sk_buff *skb)
skb                38 net/netrom/nr_out.c 	if (skb->len - NR_TRANSPORT_LEN > NR_MAX_PACKET_SIZE) {
skb                40 net/netrom/nr_out.c 		skb_copy_from_linear_data(skb, transport, NR_TRANSPORT_LEN);
skb                41 net/netrom/nr_out.c 		skb_pull(skb, NR_TRANSPORT_LEN);
skb                43 net/netrom/nr_out.c 		frontlen = skb_headroom(skb);
skb                45 net/netrom/nr_out.c 		while (skb->len > 0) {
skb                51 net/netrom/nr_out.c 			len = (NR_MAX_PACKET_SIZE > skb->len) ? skb->len : NR_MAX_PACKET_SIZE;
skb                54 net/netrom/nr_out.c 			skb_copy_from_linear_data(skb, skb_put(skbn, len), len);
skb                55 net/netrom/nr_out.c 			skb_pull(skb, len);
skb                61 net/netrom/nr_out.c 			if (skb->len > 0)
skb                67 net/netrom/nr_out.c 		kfree_skb(skb);
skb                69 net/netrom/nr_out.c 		skb_queue_tail(&sk->sk_write_queue, skb);		/* Throw it on the queue */
skb                79 net/netrom/nr_out.c static void nr_send_iframe(struct sock *sk, struct sk_buff *skb)
skb                83 net/netrom/nr_out.c 	if (skb == NULL)
skb                86 net/netrom/nr_out.c 	skb->data[2] = nr->vs;
skb                87 net/netrom/nr_out.c 	skb->data[3] = nr->vr;
skb                90 net/netrom/nr_out.c 		skb->data[4] |= NR_CHOKE_FLAG;
skb                94 net/netrom/nr_out.c 	nr_transmit_buffer(sk, skb);
skb                99 net/netrom/nr_out.c 	struct sk_buff *skb, *skbn;
skb               102 net/netrom/nr_out.c 	if ((skb = skb_peek(&nr->ack_queue)) == NULL)
skb               105 net/netrom/nr_out.c 	if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL)
skb               125 net/netrom/nr_out.c 	struct sk_buff *skb, *skbn;
skb               153 net/netrom/nr_out.c 	skb = skb_dequeue(&sk->sk_write_queue);
skb               156 net/netrom/nr_out.c 		if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) {
skb               157 net/netrom/nr_out.c 			skb_queue_head(&sk->sk_write_queue, skb);
skb               173 net/netrom/nr_out.c 		skb_queue_tail(&nr->ack_queue, skb);
skb               176 net/netrom/nr_out.c 		 (skb = skb_dequeue(&sk->sk_write_queue)) != NULL);
skb               185 net/netrom/nr_out.c void nr_transmit_buffer(struct sock *sk, struct sk_buff *skb)
skb               193 net/netrom/nr_out.c 	dptr = skb_push(skb, NR_NETWORK_LEN);
skb               209 net/netrom/nr_out.c 	if (!nr_route_frame(skb, NULL)) {
skb               210 net/netrom/nr_out.c 		kfree_skb(skb);
skb               748 net/netrom/nr_route.c int nr_route_frame(struct sk_buff *skb, ax25_cb *ax25)
skb               760 net/netrom/nr_route.c 	nr_src  = (ax25_address *)(skb->data + 0);
skb               761 net/netrom/nr_route.c 	nr_dest = (ax25_address *)(skb->data + 7);
skb               773 net/netrom/nr_route.c 			ret = nr_loopback_queue(skb);
skb               775 net/netrom/nr_route.c 			ret = nr_rx_frame(skb, dev);
skb               784 net/netrom/nr_route.c 	if (skb->data[14] == 1) {
skb               810 net/netrom/nr_route.c 	if ((skbn=skb_copy_expand(skb, dev->hard_header_len, 0, GFP_ATOMIC)) == NULL) {
skb               816 net/netrom/nr_route.c 	kfree_skb(skb);
skb               817 net/netrom/nr_route.c 	skb=skbn;
skb               818 net/netrom/nr_route.c 	skb->data[14]--;
skb               820 net/netrom/nr_route.c 	dptr  = skb_push(skb, 1);
skb               824 net/netrom/nr_route.c 	nr_neigh->ax25 = ax25_send_frame(skb, 256,
skb                49 net/netrom/nr_subr.c 	struct sk_buff *skb;
skb                56 net/netrom/nr_subr.c 			skb = skb_dequeue(&nrom->ack_queue);
skb                57 net/netrom/nr_subr.c 			kfree_skb(skb);
skb                70 net/netrom/nr_subr.c 	struct sk_buff *skb, *skb_prev = NULL;
skb                72 net/netrom/nr_subr.c 	while ((skb = skb_dequeue(&nr_sk(sk)->ack_queue)) != NULL) {
skb                74 net/netrom/nr_subr.c 			skb_queue_head(&sk->sk_write_queue, skb);
skb                76 net/netrom/nr_subr.c 			skb_append(skb_prev, skb, &sk->sk_write_queue);
skb                77 net/netrom/nr_subr.c 		skb_prev = skb;
skb               122 net/netrom/nr_subr.c 	struct sk_buff *skb;
skb               144 net/netrom/nr_subr.c 	if ((skb = alloc_skb(len, GFP_ATOMIC)) == NULL)
skb               150 net/netrom/nr_subr.c 	skb_reserve(skb, NR_NETWORK_LEN);
skb               152 net/netrom/nr_subr.c 	dptr = skb_put(skb, skb_tailroom(skb));
skb               205 net/netrom/nr_subr.c 	nr_transmit_buffer(sk, skb);
skb               211 net/netrom/nr_subr.c void __nr_transmit_reply(struct sk_buff *skb, int mine, unsigned char cmdflags)
skb               226 net/netrom/nr_subr.c 	skb_copy_from_linear_data_offset(skb, 7, dptr, AX25_ADDR_LEN);
skb               232 net/netrom/nr_subr.c 	skb_copy_from_linear_data(skb, dptr, AX25_ADDR_LEN);
skb               243 net/netrom/nr_subr.c 		*dptr++ = skb->data[15];
skb               244 net/netrom/nr_subr.c 		*dptr++ = skb->data[16];
skb               246 net/netrom/nr_subr.c 		*dptr++ = skb->data[15];
skb               247 net/netrom/nr_subr.c 		*dptr++ = skb->data[16];
skb               486 net/nfc/core.c int nfc_data_exchange(struct nfc_dev *dev, u32 target_idx, struct sk_buff *skb,
skb               492 net/nfc/core.c 		 dev_name(&dev->dev), target_idx, skb->len);
skb               498 net/nfc/core.c 		kfree_skb(skb);
skb               505 net/nfc/core.c 			kfree_skb(skb);
skb               512 net/nfc/core.c 		rc = dev->ops->im_transceive(dev, dev->active_target, skb, cb,
skb               519 net/nfc/core.c 		rc = dev->ops->tm_send(dev, skb);
skb               522 net/nfc/core.c 		kfree_skb(skb);
skb               653 net/nfc/core.c int nfc_tm_data_received(struct nfc_dev *dev, struct sk_buff *skb)
skb               657 net/nfc/core.c 		kfree_skb(skb);
skb               661 net/nfc/core.c 	return nfc_llcp_data_received(dev, skb);
skb               713 net/nfc/core.c 	struct sk_buff *skb;
skb               719 net/nfc/core.c 	skb = sock_alloc_send_skb(sk, total_size, flags & MSG_DONTWAIT, err);
skb               720 net/nfc/core.c 	if (skb)
skb               721 net/nfc/core.c 		skb_reserve(skb, dev->tx_headroom + NFC_HEADER_SIZE);
skb               723 net/nfc/core.c 	return skb;
skb               734 net/nfc/core.c 	struct sk_buff *skb;
skb               738 net/nfc/core.c 	skb = alloc_skb(total_size, gfp);
skb               740 net/nfc/core.c 	if (skb)
skb               741 net/nfc/core.c 		skb_reserve(skb, 1);
skb               743 net/nfc/core.c 	return skb;
skb                49 net/nfc/digital.h 		     struct sk_buff *skb, struct digital_tg_mdaa_params *params,
skb                55 net/nfc/digital.h 				      struct sk_buff *skb, u16 timeout,
skb                59 net/nfc/digital.h 	return digital_send_cmd(ddev, DIGITAL_CMD_IN_SEND, skb, NULL, timeout,
skb                71 net/nfc/digital.h 				struct sk_buff *skb);
skb                73 net/nfc/digital.h 				struct sk_buff *skb);
skb                84 net/nfc/digital.h 			    struct nfc_target *target, struct sk_buff *skb,
skb                89 net/nfc/digital.h 			struct sk_buff *skb, u16 timeout,
skb                92 net/nfc/digital.h 	return digital_send_cmd(ddev, DIGITAL_CMD_TG_SEND, skb, NULL, timeout,
skb               112 net/nfc/digital.h int digital_tg_send_dep_res(struct nfc_digital_dev *ddev, struct sk_buff *skb);
skb               125 net/nfc/digital.h void digital_skb_add_crc(struct sk_buff *skb, crc_func_t crc_func, u16 init,
skb               128 net/nfc/digital.h static inline void digital_skb_add_crc_a(struct sk_buff *skb)
skb               130 net/nfc/digital.h 	digital_skb_add_crc(skb, crc_ccitt, CRC_A_INIT, 0, 0);
skb               133 net/nfc/digital.h static inline void digital_skb_add_crc_b(struct sk_buff *skb)
skb               135 net/nfc/digital.h 	digital_skb_add_crc(skb, crc_ccitt, CRC_B_INIT, 1, 0);
skb               138 net/nfc/digital.h static inline void digital_skb_add_crc_f(struct sk_buff *skb)
skb               140 net/nfc/digital.h 	digital_skb_add_crc(skb, crc_itu_t, CRC_F_INIT, 0, 1);
skb               143 net/nfc/digital.h static inline void digital_skb_add_crc_none(struct sk_buff *skb)
skb               148 net/nfc/digital.h int digital_skb_check_crc(struct sk_buff *skb, crc_func_t crc_func,
skb               151 net/nfc/digital.h static inline int digital_skb_check_crc_a(struct sk_buff *skb)
skb               153 net/nfc/digital.h 	return digital_skb_check_crc(skb, crc_ccitt, CRC_A_INIT, 0, 0);
skb               156 net/nfc/digital.h static inline int digital_skb_check_crc_b(struct sk_buff *skb)
skb               158 net/nfc/digital.h 	return digital_skb_check_crc(skb, crc_ccitt, CRC_B_INIT, 1, 0);
skb               161 net/nfc/digital.h static inline int digital_skb_check_crc_f(struct sk_buff *skb)
skb               163 net/nfc/digital.h 	return digital_skb_check_crc(skb, crc_itu_t, CRC_F_INIT, 0, 1);
skb               166 net/nfc/digital.h static inline int digital_skb_check_crc_none(struct sk_buff *skb)
skb                45 net/nfc/digital_core.c 	struct sk_buff *skb;
skb                47 net/nfc/digital_core.c 	skb = alloc_skb(len + ddev->tx_headroom + ddev->tx_tailroom,
skb                49 net/nfc/digital_core.c 	if (skb)
skb                50 net/nfc/digital_core.c 		skb_reserve(skb, ddev->tx_headroom);
skb                52 net/nfc/digital_core.c 	return skb;
skb                55 net/nfc/digital_core.c void digital_skb_add_crc(struct sk_buff *skb, crc_func_t crc_func, u16 init,
skb                60 net/nfc/digital_core.c 	crc = crc_func(init, skb->data, skb->len);
skb                68 net/nfc/digital_core.c 	skb_put_u8(skb, crc & 0xFF);
skb                69 net/nfc/digital_core.c 	skb_put_u8(skb, (crc >> 8) & 0xFF);
skb                72 net/nfc/digital_core.c int digital_skb_check_crc(struct sk_buff *skb, crc_func_t crc_func,
skb                78 net/nfc/digital_core.c 	if (skb->len <= 2)
skb                81 net/nfc/digital_core.c 	crc = crc_func(crc_init, skb->data, skb->len - 2);
skb                89 net/nfc/digital_core.c 	rc = (skb->data[skb->len - 2] - (crc & 0xFF)) +
skb                90 net/nfc/digital_core.c 	     (skb->data[skb->len - 1] - ((crc >> 8) & 0xFF));
skb                95 net/nfc/digital_core.c 	skb_trim(skb, skb->len - 2);
skb               228 net/nfc/digital_core.c 		     struct sk_buff *skb, struct digital_tg_mdaa_params *params,
skb               240 net/nfc/digital_core.c 	cmd->req = skb;
skb               311 net/nfc/digital_core.c 	int (*check_crc)(struct sk_buff *skb);
skb               312 net/nfc/digital_core.c 	void (*add_crc)(struct sk_buff *skb);
skb               648 net/nfc/digital_core.c static int digital_tg_send(struct nfc_dev *dev, struct sk_buff *skb)
skb               652 net/nfc/digital_core.c 	return digital_tg_send_dep_res(ddev, skb);
skb               694 net/nfc/digital_core.c 			   struct sk_buff *skb, data_exchange_cb_t cb,
skb               709 net/nfc/digital_core.c 		rc = digital_in_send_dep_req(ddev, target, skb, data_exch);
skb               715 net/nfc/digital_core.c 		rc = digital_in_iso_dep_push_sod(ddev, skb);
skb               720 net/nfc/digital_core.c 	ddev->skb_add_crc(skb);
skb               722 net/nfc/digital_core.c 	rc = digital_in_send_cmd(ddev, skb, 500, digital_in_send_complete,
skb               172 net/nfc/digital_dep.c 				     struct sk_buff *skb)
skb               174 net/nfc/digital_dep.c 	skb_push(skb, sizeof(u8));
skb               176 net/nfc/digital_dep.c 	skb->data[0] = skb->len;
skb               179 net/nfc/digital_dep.c 		*(u8 *)skb_push(skb, sizeof(u8)) = DIGITAL_NFC_DEP_NFCA_SOD_SB;
skb               183 net/nfc/digital_dep.c 				    struct sk_buff *skb)
skb               187 net/nfc/digital_dep.c 	if (skb->len < 2)
skb               191 net/nfc/digital_dep.c 		skb_pull(skb, sizeof(u8));
skb               193 net/nfc/digital_dep.c 	size = skb->data[0];
skb               194 net/nfc/digital_dep.c 	if (size != skb->len)
skb               197 net/nfc/digital_dep.c 	skb_pull(skb, sizeof(u8));
skb               203 net/nfc/digital_dep.c digital_send_dep_data_prep(struct nfc_digital_dev *ddev, struct sk_buff *skb,
skb               209 net/nfc/digital_dep.c 	if (skb->len > ddev->remote_payload_max) {
skb               220 net/nfc/digital_dep.c 		skb_put_data(new_skb, skb->data, ddev->remote_payload_max);
skb               221 net/nfc/digital_dep.c 		skb_pull(skb, ddev->remote_payload_max);
skb               223 net/nfc/digital_dep.c 		ddev->chaining_skb = skb;
skb               227 net/nfc/digital_dep.c 		new_skb = skb;
skb               365 net/nfc/digital_dep.c 	struct sk_buff *skb;
skb               370 net/nfc/digital_dep.c 	skb = digital_skb_alloc(ddev, sizeof(*psl_req));
skb               371 net/nfc/digital_dep.c 	if (!skb)
skb               374 net/nfc/digital_dep.c 	skb_put(skb, sizeof(*psl_req));
skb               376 net/nfc/digital_dep.c 	psl_req = (struct digital_psl_req *)skb->data;
skb               390 net/nfc/digital_dep.c 	digital_skb_push_dep_sod(ddev, skb);
skb               392 net/nfc/digital_dep.c 	ddev->skb_add_crc(skb);
skb               394 net/nfc/digital_dep.c 	rc = digital_in_send_cmd(ddev, skb, ddev->dep_rwt,
skb               397 net/nfc/digital_dep.c 		kfree_skb(skb);
skb               478 net/nfc/digital_dep.c 	struct sk_buff *skb;
skb               491 net/nfc/digital_dep.c 	skb = digital_skb_alloc(ddev, size);
skb               492 net/nfc/digital_dep.c 	if (!skb)
skb               495 net/nfc/digital_dep.c 	skb_put(skb, sizeof(struct digital_atr_req));
skb               497 net/nfc/digital_dep.c 	atr_req = (struct digital_atr_req *)skb->data;
skb               517 net/nfc/digital_dep.c 		skb_put_data(skb, gb, gb_len);
skb               520 net/nfc/digital_dep.c 	digital_skb_push_dep_sod(ddev, skb);
skb               522 net/nfc/digital_dep.c 	ddev->skb_add_crc(skb);
skb               524 net/nfc/digital_dep.c 	rc = digital_in_send_cmd(ddev, skb, DIGITAL_ATR_RES_RWT,
skb               527 net/nfc/digital_dep.c 		kfree_skb(skb);
skb               536 net/nfc/digital_dep.c 	struct sk_buff *skb;
skb               539 net/nfc/digital_dep.c 	skb = digital_skb_alloc(ddev, 1);
skb               540 net/nfc/digital_dep.c 	if (!skb)
skb               543 net/nfc/digital_dep.c 	skb_push(skb, sizeof(struct digital_dep_req_res));
skb               545 net/nfc/digital_dep.c 	dep_req = (struct digital_dep_req_res *)skb->data;
skb               552 net/nfc/digital_dep.c 	digital_skb_push_dep_sod(ddev, skb);
skb               554 net/nfc/digital_dep.c 	ddev->skb_add_crc(skb);
skb               556 net/nfc/digital_dep.c 	ddev->saved_skb = pskb_copy(skb, GFP_KERNEL);
skb               558 net/nfc/digital_dep.c 	rc = digital_in_send_cmd(ddev, skb, ddev->dep_rwt,
skb               561 net/nfc/digital_dep.c 		kfree_skb(skb);
skb               573 net/nfc/digital_dep.c 	struct sk_buff *skb;
skb               576 net/nfc/digital_dep.c 	skb = digital_skb_alloc(ddev, 1);
skb               577 net/nfc/digital_dep.c 	if (!skb)
skb               580 net/nfc/digital_dep.c 	skb_push(skb, sizeof(struct digital_dep_req_res));
skb               582 net/nfc/digital_dep.c 	dep_req = (struct digital_dep_req_res *)skb->data;
skb               589 net/nfc/digital_dep.c 	digital_skb_push_dep_sod(ddev, skb);
skb               591 net/nfc/digital_dep.c 	ddev->skb_add_crc(skb);
skb               593 net/nfc/digital_dep.c 	rc = digital_in_send_cmd(ddev, skb, ddev->dep_rwt,
skb               596 net/nfc/digital_dep.c 		kfree_skb(skb);
skb               605 net/nfc/digital_dep.c 	struct sk_buff *skb;
skb               608 net/nfc/digital_dep.c 	skb = digital_skb_alloc(ddev, 1);
skb               609 net/nfc/digital_dep.c 	if (!skb)
skb               612 net/nfc/digital_dep.c 	skb_push(skb, sizeof(struct digital_dep_req_res));
skb               614 net/nfc/digital_dep.c 	dep_req = (struct digital_dep_req_res *)skb->data;
skb               620 net/nfc/digital_dep.c 	digital_skb_push_dep_sod(ddev, skb);
skb               622 net/nfc/digital_dep.c 	ddev->skb_add_crc(skb);
skb               624 net/nfc/digital_dep.c 	rc = digital_in_send_cmd(ddev, skb, ddev->dep_rwt,
skb               627 net/nfc/digital_dep.c 		kfree_skb(skb);
skb               636 net/nfc/digital_dep.c 	struct sk_buff *skb;
skb               644 net/nfc/digital_dep.c 	skb = digital_skb_alloc(ddev, 1);
skb               645 net/nfc/digital_dep.c 	if (!skb)
skb               648 net/nfc/digital_dep.c 	skb_put_u8(skb, rtox);
skb               650 net/nfc/digital_dep.c 	skb_push(skb, sizeof(struct digital_dep_req_res));
skb               652 net/nfc/digital_dep.c 	dep_req = (struct digital_dep_req_res *)skb->data;
skb               659 net/nfc/digital_dep.c 	digital_skb_push_dep_sod(ddev, skb);
skb               661 net/nfc/digital_dep.c 	ddev->skb_add_crc(skb);
skb               663 net/nfc/digital_dep.c 	rc = digital_in_send_cmd(ddev, skb, rwt_int,
skb               666 net/nfc/digital_dep.c 		kfree_skb(skb);
skb               908 net/nfc/digital_dep.c 			    struct nfc_target *target, struct sk_buff *skb,
skb               915 net/nfc/digital_dep.c 	skb_push(skb, sizeof(struct digital_dep_req_res));
skb               917 net/nfc/digital_dep.c 	dep_req = (struct digital_dep_req_res *)skb->data;
skb               928 net/nfc/digital_dep.c 	tmp_skb = digital_send_dep_data_prep(ddev, skb, dep_req, data_exch);
skb               941 net/nfc/digital_dep.c 		if (tmp_skb != skb)
skb               985 net/nfc/digital_dep.c 	struct sk_buff *skb;
skb               988 net/nfc/digital_dep.c 	skb = digital_skb_alloc(ddev, 1);
skb               989 net/nfc/digital_dep.c 	if (!skb)
skb               992 net/nfc/digital_dep.c 	skb_push(skb, sizeof(struct digital_dep_req_res));
skb               994 net/nfc/digital_dep.c 	dep_res = (struct digital_dep_req_res *)skb->data;
skb              1004 net/nfc/digital_dep.c 		skb_put_data(skb, &ddev->did, sizeof(ddev->did));
skb              1010 net/nfc/digital_dep.c 	digital_skb_push_dep_sod(ddev, skb);
skb              1012 net/nfc/digital_dep.c 	ddev->skb_add_crc(skb);
skb              1014 net/nfc/digital_dep.c 	ddev->saved_skb = pskb_copy(skb, GFP_KERNEL);
skb              1016 net/nfc/digital_dep.c 	rc = digital_tg_send_cmd(ddev, skb, 1500, digital_tg_recv_dep_req,
skb              1019 net/nfc/digital_dep.c 		kfree_skb(skb);
skb              1030 net/nfc/digital_dep.c 	struct sk_buff *skb;
skb              1033 net/nfc/digital_dep.c 	skb = digital_skb_alloc(ddev, 1);
skb              1034 net/nfc/digital_dep.c 	if (!skb)
skb              1037 net/nfc/digital_dep.c 	skb_push(skb, sizeof(struct digital_dep_req_res));
skb              1039 net/nfc/digital_dep.c 	dep_res = (struct digital_dep_req_res *)skb->data;
skb              1048 net/nfc/digital_dep.c 		skb_put_data(skb, &ddev->did, sizeof(ddev->did));
skb              1051 net/nfc/digital_dep.c 	digital_skb_push_dep_sod(ddev, skb);
skb              1053 net/nfc/digital_dep.c 	ddev->skb_add_crc(skb);
skb              1055 net/nfc/digital_dep.c 	rc = digital_tg_send_cmd(ddev, skb, 1500, digital_tg_recv_dep_req,
skb              1058 net/nfc/digital_dep.c 		kfree_skb(skb);
skb              1298 net/nfc/digital_dep.c int digital_tg_send_dep_res(struct nfc_digital_dev *ddev, struct sk_buff *skb)
skb              1304 net/nfc/digital_dep.c 	skb_push(skb, sizeof(struct digital_dep_req_res));
skb              1306 net/nfc/digital_dep.c 	dep_res = (struct digital_dep_req_res *)skb->data;
skb              1315 net/nfc/digital_dep.c 		skb_put_data(skb, &ddev->did, sizeof(ddev->did));
skb              1323 net/nfc/digital_dep.c 	tmp_skb = digital_send_dep_data_prep(ddev, skb, dep_res, NULL);
skb              1336 net/nfc/digital_dep.c 		if (tmp_skb != skb)
skb              1370 net/nfc/digital_dep.c 	struct sk_buff *skb;
skb              1373 net/nfc/digital_dep.c 	skb = digital_skb_alloc(ddev, sizeof(struct digital_psl_res));
skb              1374 net/nfc/digital_dep.c 	if (!skb)
skb              1377 net/nfc/digital_dep.c 	skb_put(skb, sizeof(struct digital_psl_res));
skb              1379 net/nfc/digital_dep.c 	psl_res = (struct digital_psl_res *)skb->data;
skb              1385 net/nfc/digital_dep.c 	digital_skb_push_dep_sod(ddev, skb);
skb              1387 net/nfc/digital_dep.c 	ddev->skb_add_crc(skb);
skb              1391 net/nfc/digital_dep.c 	rc = digital_tg_send_cmd(ddev, skb, 0, digital_tg_send_psl_res_complete,
skb              1394 net/nfc/digital_dep.c 		kfree_skb(skb);
skb              1494 net/nfc/digital_dep.c 	struct sk_buff *skb;
skb              1503 net/nfc/digital_dep.c 	skb = digital_skb_alloc(ddev, sizeof(struct digital_atr_res) + gb_len);
skb              1504 net/nfc/digital_dep.c 	if (!skb)
skb              1507 net/nfc/digital_dep.c 	skb_put(skb, sizeof(struct digital_atr_res));
skb              1508 net/nfc/digital_dep.c 	atr_res = (struct digital_atr_res *)skb->data;
skb              1522 net/nfc/digital_dep.c 		skb_put(skb, gb_len);
skb              1528 net/nfc/digital_dep.c 	digital_skb_push_dep_sod(ddev, skb);
skb              1530 net/nfc/digital_dep.c 	ddev->skb_add_crc(skb);
skb              1534 net/nfc/digital_dep.c 	rc = digital_tg_send_cmd(ddev, skb, 999,
skb              1537 net/nfc/digital_dep.c 		kfree_skb(skb);
skb               168 net/nfc/digital_technology.c 				struct sk_buff *skb)
skb               173 net/nfc/digital_technology.c 	if (skb->len < 1)
skb               176 net/nfc/digital_technology.c 	pcb = *skb->data;
skb               190 net/nfc/digital_technology.c 	skb_pull(skb, 1);
skb               196 net/nfc/digital_technology.c 				struct sk_buff *skb)
skb               202 net/nfc/digital_technology.c 	if (skb->len + 3 > ddev->target_fsc)
skb               205 net/nfc/digital_technology.c 	skb_push(skb, 1);
skb               207 net/nfc/digital_technology.c 	*skb->data = DIGITAL_ISO_DEP_I_PCB | ddev->curr_nfc_dep_pni;
skb               255 net/nfc/digital_technology.c 	struct sk_buff *skb;
skb               257 net/nfc/digital_technology.c 	skb = digital_skb_alloc(ddev, 2);
skb               258 net/nfc/digital_technology.c 	if (!skb)
skb               261 net/nfc/digital_technology.c 	skb_put_u8(skb, DIGITAL_RATS_BYTE1);
skb               262 net/nfc/digital_technology.c 	skb_put_u8(skb, DIGITAL_RATS_PARAM);
skb               264 net/nfc/digital_technology.c 	rc = digital_in_send_cmd(ddev, skb, 30, digital_in_recv_ats,
skb               267 net/nfc/digital_technology.c 		kfree_skb(skb);
skb               345 net/nfc/digital_technology.c 	struct sk_buff *skb;
skb               350 net/nfc/digital_technology.c 	skb = digital_skb_alloc(ddev, sizeof(struct digital_sel_req));
skb               351 net/nfc/digital_technology.c 	if (!skb)
skb               354 net/nfc/digital_technology.c 	skb_put(skb, sizeof(struct digital_sel_req));
skb               355 net/nfc/digital_technology.c 	sel_req = (struct digital_sel_req *)skb->data;
skb               375 net/nfc/digital_technology.c 		digital_skb_add_crc_a(skb);
skb               378 net/nfc/digital_technology.c 	rc = digital_in_send_cmd(ddev, skb, 30, digital_in_recv_sel_res,
skb               382 net/nfc/digital_technology.c 		kfree_skb(skb);
skb               446 net/nfc/digital_technology.c 	struct sk_buff *skb;
skb               454 net/nfc/digital_technology.c 	skb = digital_skb_alloc(ddev, 2);
skb               455 net/nfc/digital_technology.c 	if (!skb)
skb               465 net/nfc/digital_technology.c 	skb_put_u8(skb, sel_cmd);
skb               466 net/nfc/digital_technology.c 	skb_put_u8(skb, DIGITAL_SDD_REQ_SEL_PAR);
skb               468 net/nfc/digital_technology.c 	return digital_in_send_cmd(ddev, skb, 30, digital_in_recv_sdd_res,
skb               519 net/nfc/digital_technology.c 	struct sk_buff *skb;
skb               532 net/nfc/digital_technology.c 	skb = digital_skb_alloc(ddev, 1);
skb               533 net/nfc/digital_technology.c 	if (!skb)
skb               536 net/nfc/digital_technology.c 	skb_put_u8(skb, DIGITAL_CMD_SENS_REQ);
skb               538 net/nfc/digital_technology.c 	rc = digital_in_send_cmd(ddev, skb, 30, digital_in_recv_sens_res, NULL);
skb               540 net/nfc/digital_technology.c 		kfree_skb(skb);
skb               613 net/nfc/digital_technology.c 	struct sk_buff *skb;
skb               616 net/nfc/digital_technology.c 	skb = digital_skb_alloc(ddev, sizeof(*attrib_req));
skb               617 net/nfc/digital_technology.c 	if (!skb)
skb               620 net/nfc/digital_technology.c 	attrib_req = skb_put(skb, sizeof(*attrib_req));
skb               633 net/nfc/digital_technology.c 	rc = digital_in_send_cmd(ddev, skb, 30, digital_in_recv_attrib_res,
skb               636 net/nfc/digital_technology.c 		kfree_skb(skb);
skb               707 net/nfc/digital_technology.c 	struct sk_buff *skb;
skb               720 net/nfc/digital_technology.c 	skb = digital_skb_alloc(ddev, sizeof(*sensb_req));
skb               721 net/nfc/digital_technology.c 	if (!skb)
skb               724 net/nfc/digital_technology.c 	sensb_req = skb_put(skb, sizeof(*sensb_req));
skb               730 net/nfc/digital_technology.c 	rc = digital_in_send_cmd(ddev, skb, 30, digital_in_recv_sensb_res,
skb               733 net/nfc/digital_technology.c 		kfree_skb(skb);
skb               795 net/nfc/digital_technology.c 	struct sk_buff *skb;
skb               810 net/nfc/digital_technology.c 	skb = digital_skb_alloc(ddev, size);
skb               811 net/nfc/digital_technology.c 	if (!skb)
skb               814 net/nfc/digital_technology.c 	skb_put(skb, size);
skb               816 net/nfc/digital_technology.c 	sensf_req = (struct digital_sensf_req *)skb->data;
skb               823 net/nfc/digital_technology.c 	*(u8 *)skb_push(skb, 1) = size + 1;
skb               826 net/nfc/digital_technology.c 		digital_skb_add_crc_f(skb);
skb               828 net/nfc/digital_technology.c 	rc = digital_in_send_cmd(ddev, skb, 30, digital_in_recv_sensf_res,
skb               831 net/nfc/digital_technology.c 		kfree_skb(skb);
skb               886 net/nfc/digital_technology.c 	struct sk_buff *skb;
skb               899 net/nfc/digital_technology.c 	skb = digital_skb_alloc(ddev, sizeof(*req));
skb               900 net/nfc/digital_technology.c 	if (!skb)
skb               903 net/nfc/digital_technology.c 	skb_put(skb, sizeof(*req) - sizeof(req->mask)); /* No mask */
skb               904 net/nfc/digital_technology.c 	req = (struct digital_iso15693_inv_req *)skb->data;
skb               915 net/nfc/digital_technology.c 	rc = digital_in_send_cmd(ddev, skb, 30,
skb               918 net/nfc/digital_technology.c 		kfree_skb(skb);
skb               925 net/nfc/digital_technology.c 	struct sk_buff *skb;
skb               928 net/nfc/digital_technology.c 	skb = digital_skb_alloc(ddev, 1);
skb               929 net/nfc/digital_technology.c 	if (!skb)
skb               932 net/nfc/digital_technology.c 	skb_put_u8(skb, DIGITAL_SEL_RES_NFC_DEP);
skb               935 net/nfc/digital_technology.c 		digital_skb_add_crc_a(skb);
skb               940 net/nfc/digital_technology.c 		kfree_skb(skb);
skb               944 net/nfc/digital_technology.c 	rc = digital_tg_send_cmd(ddev, skb, 300, digital_tg_recv_atr_req,
skb               947 net/nfc/digital_technology.c 		kfree_skb(skb);
skb               984 net/nfc/digital_technology.c 	struct sk_buff *skb;
skb               988 net/nfc/digital_technology.c 	skb = digital_skb_alloc(ddev, sizeof(struct digital_sdd_res));
skb               989 net/nfc/digital_technology.c 	if (!skb)
skb               992 net/nfc/digital_technology.c 	skb_put(skb, sizeof(struct digital_sdd_res));
skb               993 net/nfc/digital_technology.c 	sdd_res = (struct digital_sdd_res *)skb->data;
skb              1005 net/nfc/digital_technology.c 		kfree_skb(skb);
skb              1009 net/nfc/digital_technology.c 	rc = digital_tg_send_cmd(ddev, skb, 300, digital_tg_recv_sel_req,
skb              1012 net/nfc/digital_technology.c 		kfree_skb(skb);
skb              1048 net/nfc/digital_technology.c 	struct sk_buff *skb;
skb              1052 net/nfc/digital_technology.c 	skb = digital_skb_alloc(ddev, 2);
skb              1053 net/nfc/digital_technology.c 	if (!skb)
skb              1056 net/nfc/digital_technology.c 	sens_res = skb_put(skb, 2);
skb              1064 net/nfc/digital_technology.c 		kfree_skb(skb);
skb              1068 net/nfc/digital_technology.c 	rc = digital_tg_send_cmd(ddev, skb, 300, digital_tg_recv_sdd_req,
skb              1071 net/nfc/digital_technology.c 		kfree_skb(skb);
skb              1120 net/nfc/digital_technology.c 	struct sk_buff *skb;
skb              1130 net/nfc/digital_technology.c 	skb = digital_skb_alloc(ddev, size);
skb              1131 net/nfc/digital_technology.c 	if (!skb)
skb              1134 net/nfc/digital_technology.c 	skb_put(skb, size);
skb              1136 net/nfc/digital_technology.c 	sensf_res = (struct digital_sensf_res *)skb->data;
skb              1156 net/nfc/digital_technology.c 	*(u8 *)skb_push(skb, sizeof(u8)) = size + 1;
skb              1159 net/nfc/digital_technology.c 		digital_skb_add_crc_f(skb);
skb              1161 net/nfc/digital_technology.c 	rc = digital_tg_send_cmd(ddev, skb, 300,
skb              1164 net/nfc/digital_technology.c 		kfree_skb(skb);
skb                39 net/nfc/hci/command.c static void nfc_hci_execute_cb(void *context, struct sk_buff *skb, int err)
skb                47 net/nfc/hci/command.c 		hcp_ew->result_skb = skb;
skb                49 net/nfc/hci/command.c 		kfree_skb(skb);
skb                57 net/nfc/hci/command.c 			       struct sk_buff **skb)
skb                82 net/nfc/hci/command.c 		if (skb)
skb                83 net/nfc/hci/command.c 			*skb = hcp_ew.result_skb;
skb               113 net/nfc/hci/command.c 		     const u8 *param, size_t param_len, struct sk_buff **skb)
skb               123 net/nfc/hci/command.c 	return nfc_hci_execute_cmd(hdev, pipe, cmd, param, param_len, skb);
skb               177 net/nfc/hci/command.c 		      struct sk_buff **skb)
skb               182 net/nfc/hci/command.c 				&idx, 1, skb);
skb               188 net/nfc/hci/command.c 	struct sk_buff *skb;
skb               194 net/nfc/hci/command.c 				NULL, 0, &skb);
skb               200 net/nfc/hci/command.c 		kfree_skb(skb);
skb               217 net/nfc/hci/command.c 	struct sk_buff *skb;
skb               230 net/nfc/hci/command.c 				      (u8 *) &params, sizeof(params), &skb);
skb               234 net/nfc/hci/command.c 	resp = (struct hci_create_pipe_resp *)skb->data;
skb               236 net/nfc/hci/command.c 	kfree_skb(skb);
skb                68 net/nfc/hci/core.c 	struct sk_buff *skb;
skb                98 net/nfc/hci/core.c 	while ((skb = skb_dequeue(&msg->msg_frags)) != NULL) {
skb                99 net/nfc/hci/core.c 		r = nfc_llc_xmit_from_hci(hdev->llc, skb);
skb               101 net/nfc/hci/core.c 			kfree_skb(skb);
skb               130 net/nfc/hci/core.c 	struct sk_buff *skb;
skb               136 net/nfc/hci/core.c 	while ((skb = skb_dequeue(&hdev->msg_rx_queue)) != NULL) {
skb               137 net/nfc/hci/core.c 		pipe = skb->data[0];
skb               138 net/nfc/hci/core.c 		skb_pull(skb, NFC_HCI_HCP_PACKET_HEADER_LEN);
skb               139 net/nfc/hci/core.c 		message = (struct hcp_message *)skb->data;
skb               142 net/nfc/hci/core.c 		skb_pull(skb, NFC_HCI_HCP_MESSAGE_HEADER_LEN);
skb               144 net/nfc/hci/core.c 		nfc_hci_hcp_message_rx(hdev, pipe, type, instruction, skb);
skb               149 net/nfc/hci/core.c 				     struct sk_buff *skb)
skb               155 net/nfc/hci/core.c 					  skb, err);
skb               157 net/nfc/hci/core.c 		kfree_skb(skb);
skb               166 net/nfc/hci/core.c 			   struct sk_buff *skb)
skb               171 net/nfc/hci/core.c 		kfree_skb(skb);
skb               175 net/nfc/hci/core.c 	__nfc_hci_cmd_completion(hdev, nfc_hci_result_to_errno(result), skb);
skb               182 net/nfc/hci/core.c 			  struct sk_buff *skb)
skb               201 net/nfc/hci/core.c 		if (skb->len != 5) {
skb               205 net/nfc/hci/core.c 		create_info = (struct hci_create_pipe_resp *)skb->data;
skb               229 net/nfc/hci/core.c 		if (skb->len != 1) {
skb               233 net/nfc/hci/core.c 		delete_info = (struct hci_delete_pipe_noti *)skb->data;
skb               244 net/nfc/hci/core.c 		if (skb->len != 1) {
skb               248 net/nfc/hci/core.c 		cleared_info = (struct hci_all_pipe_cleared_noti *)skb->data;
skb               258 net/nfc/hci/core.c 		hdev->ops->cmd_received(hdev, pipe, cmd, skb);
skb               264 net/nfc/hci/core.c 	kfree_skb(skb);
skb               382 net/nfc/hci/core.c 			    struct sk_buff *skb)
skb               399 net/nfc/hci/core.c 		r = hdev->ops->event_received(hdev, pipe, event, skb);
skb               406 net/nfc/hci/core.c 		if (skb->len < 1) {	/* no status data? */
skb               411 net/nfc/hci/core.c 		if (skb->data[0] == 3) {
skb               421 net/nfc/hci/core.c 		if (skb->data[0] != 0) {
skb               435 net/nfc/hci/core.c 	kfree_skb(skb);
skb               466 net/nfc/hci/core.c 	struct sk_buff *skb = NULL;
skb               479 net/nfc/hci/core.c 			      NFC_HCI_ADMIN_SESSION_IDENTITY, &skb);
skb               483 net/nfc/hci/core.c 	if (skb->len && skb->len == strlen(hdev->init_data.session_id) &&
skb               484 net/nfc/hci/core.c 		(memcmp(hdev->init_data.session_id, skb->data,
skb               485 net/nfc/hci/core.c 			   skb->len) == 0) && hdev->ops->load_session) {
skb               515 net/nfc/hci/core.c 	kfree_skb(skb);
skb               523 net/nfc/hci/core.c 	struct sk_buff *skb;
skb               526 net/nfc/hci/core.c 			      NFC_HCI_ID_MGMT_VERSION_SW, &skb);
skb               534 net/nfc/hci/core.c 	if (skb->len != 3) {
skb               535 net/nfc/hci/core.c 		kfree_skb(skb);
skb               539 net/nfc/hci/core.c 	hdev->sw_romlib = (skb->data[0] & 0xf0) >> 4;
skb               540 net/nfc/hci/core.c 	hdev->sw_patch = skb->data[0] & 0x0f;
skb               541 net/nfc/hci/core.c 	hdev->sw_flashlib_major = skb->data[1];
skb               542 net/nfc/hci/core.c 	hdev->sw_flashlib_minor = skb->data[2];
skb               544 net/nfc/hci/core.c 	kfree_skb(skb);
skb               547 net/nfc/hci/core.c 			      NFC_HCI_ID_MGMT_VERSION_HW, &skb);
skb               551 net/nfc/hci/core.c 	if (skb->len != 3) {
skb               552 net/nfc/hci/core.c 		kfree_skb(skb);
skb               556 net/nfc/hci/core.c 	hdev->hw_derivative = (skb->data[0] & 0xe0) >> 5;
skb               557 net/nfc/hci/core.c 	hdev->hw_version = skb->data[0] & 0x1f;
skb               558 net/nfc/hci/core.c 	hdev->hw_mpw = (skb->data[1] & 0xc0) >> 6;
skb               559 net/nfc/hci/core.c 	hdev->hw_software = skb->data[1] & 0x3f;
skb               560 net/nfc/hci/core.c 	hdev->hw_bsid = skb->data[2];
skb               562 net/nfc/hci/core.c 	kfree_skb(skb);
skb               699 net/nfc/hci/core.c static void hci_transceive_cb(void *context, struct sk_buff *skb, int err)
skb               711 net/nfc/hci/core.c 			skb_trim(skb, skb->len - 1); /* RF Err ind */
skb               713 net/nfc/hci/core.c 		hdev->async_cb(hdev->async_cb_context, skb, err);
skb               717 net/nfc/hci/core.c 			kfree_skb(skb);
skb               723 net/nfc/hci/core.c 			  struct sk_buff *skb, data_exchange_cb_t cb,
skb               735 net/nfc/hci/core.c 			r = hdev->ops->im_transceive(hdev, target, skb, cb,
skb               741 net/nfc/hci/core.c 		*(u8 *)skb_push(skb, 1) = 0;	/* CTR, see spec:10.2.2.1 */
skb               748 net/nfc/hci/core.c 					   NFC_HCI_WR_XCHG_DATA, skb->data,
skb               749 net/nfc/hci/core.c 					   skb->len, hci_transceive_cb, hdev);
skb               753 net/nfc/hci/core.c 			r = hdev->ops->im_transceive(hdev, target, skb, cb,
skb               763 net/nfc/hci/core.c 	kfree_skb(skb);
skb               768 net/nfc/hci/core.c static int hci_tm_send(struct nfc_dev *nfc_dev, struct sk_buff *skb)
skb               773 net/nfc/hci/core.c 		kfree_skb(skb);
skb               777 net/nfc/hci/core.c 	return hdev->ops->tm_send(hdev, skb);
skb               854 net/nfc/hci/core.c static void nfc_hci_recv_from_llc(struct nfc_hci_dev *hdev, struct sk_buff *skb)
skb               864 net/nfc/hci/core.c 	packet = (struct hcp_packet *)skb->data;
skb               866 net/nfc/hci/core.c 		skb_queue_tail(&hdev->rx_hcp_frags, skb);
skb               873 net/nfc/hci/core.c 		skb_queue_tail(&hdev->rx_hcp_frags, skb);
skb               900 net/nfc/hci/core.c 		hcp_skb = skb;
skb              1085 net/nfc/hci/core.c void nfc_hci_recv_frame(struct nfc_hci_dev *hdev, struct sk_buff *skb)
skb              1087 net/nfc/hci/core.c 	nfc_llc_rcv_from_drv(hdev->llc, skb);
skb                77 net/nfc/hci/hci.h 			    u8 instruction, struct sk_buff *skb);
skb                46 net/nfc/hci/hcp.c 		struct sk_buff *skb;
skb                61 net/nfc/hci/hcp.c 		skb = alloc_skb(skb_len, GFP_KERNEL);
skb                62 net/nfc/hci/hcp.c 		if (skb == NULL) {
skb                66 net/nfc/hci/hcp.c 		skb_reserve(skb, ndev->tx_headroom);
skb                68 net/nfc/hci/hcp.c 		skb_put(skb, NFC_HCI_HCP_PACKET_HEADER_LEN + data_link_len);
skb                71 net/nfc/hci/hcp.c 		packet = (struct hcp_packet *)skb->data;
skb                90 net/nfc/hci/hcp.c 		skb_queue_tail(&cmd->msg_frags, skb);
skb               120 net/nfc/hci/hcp.c 			    u8 instruction, struct sk_buff *skb)
skb               124 net/nfc/hci/hcp.c 		nfc_hci_resp_received(hdev, instruction, skb);
skb               127 net/nfc/hci/hcp.c 		nfc_hci_cmd_received(hdev, pipe, instruction, skb);
skb               130 net/nfc/hci/hcp.c 		nfc_hci_event_received(hdev, pipe, instruction, skb);
skb               135 net/nfc/hci/hcp.c 		kfree_skb(skb);
skb               137 net/nfc/hci/llc.c void nfc_llc_rcv_from_drv(struct nfc_llc *llc, struct sk_buff *skb)
skb               139 net/nfc/hci/llc.c 	llc->ops->rcv_from_drv(llc, skb);
skb               142 net/nfc/hci/llc.c int nfc_llc_xmit_from_hci(struct nfc_llc *llc, struct sk_buff *skb)
skb               144 net/nfc/hci/llc.c 	return llc->ops->xmit_from_hci(llc, skb);
skb                23 net/nfc/hci/llc.h 	void (*rcv_from_drv) (struct nfc_llc *llc, struct sk_buff *skb);
skb                24 net/nfc/hci/llc.h 	int (*xmit_from_hci) (struct nfc_llc *llc, struct sk_buff *skb);
skb                60 net/nfc/hci/llc_nop.c static void llc_nop_rcv_from_drv(struct nfc_llc *llc, struct sk_buff *skb)
skb                64 net/nfc/hci/llc_nop.c 	llc_nop->rcv_to_hci(llc_nop->hdev, skb);
skb                67 net/nfc/hci/llc_nop.c static int llc_nop_xmit_from_hci(struct nfc_llc *llc, struct sk_buff *skb)
skb                71 net/nfc/hci/llc_nop.c 	return llc_nop->xmit_to_drv(llc_nop->hdev, skb);
skb               101 net/nfc/hci/llc_shdlc.c #define SHDLC_DUMP_SKB(info, skb)				  \
skb               105 net/nfc/hci/llc_shdlc.c 		       16, 1, skb->data, skb->len, 0);		  \
skb               129 net/nfc/hci/llc_shdlc.c 	struct sk_buff *skb;
skb               131 net/nfc/hci/llc_shdlc.c 	skb = alloc_skb(shdlc->tx_headroom + SHDLC_LLC_HEAD_ROOM +
skb               133 net/nfc/hci/llc_shdlc.c 	if (skb)
skb               134 net/nfc/hci/llc_shdlc.c 		skb_reserve(skb, shdlc->tx_headroom + SHDLC_LLC_HEAD_ROOM);
skb               136 net/nfc/hci/llc_shdlc.c 	return skb;
skb               144 net/nfc/hci/llc_shdlc.c 	struct sk_buff *skb;
skb               148 net/nfc/hci/llc_shdlc.c 	skb = llc_shdlc_alloc_skb(shdlc, 0);
skb               149 net/nfc/hci/llc_shdlc.c 	if (skb == NULL)
skb               152 net/nfc/hci/llc_shdlc.c 	*(u8 *)skb_push(skb, 1) = SHDLC_CONTROL_HEAD_S | (sframe_type << 3) | nr;
skb               154 net/nfc/hci/llc_shdlc.c 	r = shdlc->xmit_to_drv(shdlc->hdev, skb);
skb               156 net/nfc/hci/llc_shdlc.c 	kfree_skb(skb);
skb               163 net/nfc/hci/llc_shdlc.c 				  struct sk_buff *skb,
skb               170 net/nfc/hci/llc_shdlc.c 	*(u8 *)skb_push(skb, 1) = SHDLC_CONTROL_HEAD_U | uframe_modifier;
skb               172 net/nfc/hci/llc_shdlc.c 	r = shdlc->xmit_to_drv(shdlc->hdev, skb);
skb               174 net/nfc/hci/llc_shdlc.c 	kfree_skb(skb);
skb               185 net/nfc/hci/llc_shdlc.c 	struct sk_buff *skb;
skb               193 net/nfc/hci/llc_shdlc.c 		skb = skb_dequeue(&shdlc->ack_pending_q);
skb               194 net/nfc/hci/llc_shdlc.c 		kfree_skb(skb);
skb               208 net/nfc/hci/llc_shdlc.c 		skb = skb_peek(&shdlc->ack_pending_q);
skb               210 net/nfc/hci/llc_shdlc.c 		mod_timer(&shdlc->t2_timer, *(unsigned long *)skb->cb +
skb               224 net/nfc/hci/llc_shdlc.c 				  struct sk_buff *skb, int ns, int nr)
skb               246 net/nfc/hci/llc_shdlc.c 	if (skb->len) {
skb               247 net/nfc/hci/llc_shdlc.c 		shdlc->rcv_to_hci(shdlc->hdev, skb);
skb               248 net/nfc/hci/llc_shdlc.c 		skb = NULL;
skb               260 net/nfc/hci/llc_shdlc.c 	kfree_skb(skb);
skb               275 net/nfc/hci/llc_shdlc.c 	struct sk_buff *skb;
skb               279 net/nfc/hci/llc_shdlc.c 	while ((skb = skb_dequeue_tail(&shdlc->ack_pending_q))) {
skb               280 net/nfc/hci/llc_shdlc.c 		skb_pull(skb, 1);	/* remove control field */
skb               281 net/nfc/hci/llc_shdlc.c 		skb_queue_head(&shdlc->send_q, skb);
skb               288 net/nfc/hci/llc_shdlc.c 	struct sk_buff *skb;
skb               301 net/nfc/hci/llc_shdlc.c 				skb = skb_dequeue(&shdlc->ack_pending_q);
skb               302 net/nfc/hci/llc_shdlc.c 				kfree_skb(skb);
skb               314 net/nfc/hci/llc_shdlc.c 	struct sk_buff *skb;
skb               325 net/nfc/hci/llc_shdlc.c 				skb = llc_shdlc_alloc_skb(shdlc, 0);
skb               326 net/nfc/hci/llc_shdlc.c 				if (skb)
skb               327 net/nfc/hci/llc_shdlc.c 					skb_queue_tail(&shdlc->send_q, skb);
skb               366 net/nfc/hci/llc_shdlc.c 	struct sk_buff *skb;
skb               370 net/nfc/hci/llc_shdlc.c 	skb = llc_shdlc_alloc_skb(shdlc, 2);
skb               371 net/nfc/hci/llc_shdlc.c 	if (skb == NULL)
skb               374 net/nfc/hci/llc_shdlc.c 	skb_put_u8(skb, SHDLC_MAX_WINDOW);
skb               375 net/nfc/hci/llc_shdlc.c 	skb_put_u8(skb, SHDLC_SREJ_SUPPORT ? 1 : 0);
skb               377 net/nfc/hci/llc_shdlc.c 	return llc_shdlc_send_u_frame(shdlc, skb, U_FRAME_RSET);
skb               382 net/nfc/hci/llc_shdlc.c 	struct sk_buff *skb;
skb               386 net/nfc/hci/llc_shdlc.c 	skb = llc_shdlc_alloc_skb(shdlc, 0);
skb               387 net/nfc/hci/llc_shdlc.c 	if (skb == NULL)
skb               390 net/nfc/hci/llc_shdlc.c 	return llc_shdlc_send_u_frame(shdlc, skb, U_FRAME_UA);
skb               394 net/nfc/hci/llc_shdlc.c 				  struct sk_buff *skb,
skb               412 net/nfc/hci/llc_shdlc.c 			if (skb->len > 0)
skb               413 net/nfc/hci/llc_shdlc.c 				w = skb->data[0];
skb               415 net/nfc/hci/llc_shdlc.c 			if (skb->len > 1)
skb               416 net/nfc/hci/llc_shdlc.c 				srej_support = skb->data[1] & 0x01 ? true :
skb               456 net/nfc/hci/llc_shdlc.c 	kfree_skb(skb);
skb               461 net/nfc/hci/llc_shdlc.c 	struct sk_buff *skb;
skb               471 net/nfc/hci/llc_shdlc.c 	while ((skb = skb_dequeue(&shdlc->rcv_q)) != NULL) {
skb               472 net/nfc/hci/llc_shdlc.c 		control = skb->data[0];
skb               473 net/nfc/hci/llc_shdlc.c 		skb_pull(skb, 1);
skb               482 net/nfc/hci/llc_shdlc.c 			llc_shdlc_rcv_i_frame(shdlc, skb, ns, nr);
skb               491 net/nfc/hci/llc_shdlc.c 			kfree_skb(skb);
skb               495 net/nfc/hci/llc_shdlc.c 			llc_shdlc_rcv_u_frame(shdlc, skb, u_frame_modifier);
skb               499 net/nfc/hci/llc_shdlc.c 			kfree_skb(skb);
skb               520 net/nfc/hci/llc_shdlc.c 	struct sk_buff *skb;
skb               541 net/nfc/hci/llc_shdlc.c 		skb = skb_dequeue(&shdlc->send_q);
skb               543 net/nfc/hci/llc_shdlc.c 		*(u8 *)skb_push(skb, 1) = SHDLC_CONTROL_HEAD_I | (shdlc->ns << 3) |
skb               548 net/nfc/hci/llc_shdlc.c 		SHDLC_DUMP_SKB("shdlc frame written", skb);
skb               550 net/nfc/hci/llc_shdlc.c 		r = shdlc->xmit_to_drv(shdlc->hdev, skb);
skb               559 net/nfc/hci/llc_shdlc.c 		*(unsigned long *)skb->cb = time_sent;
skb               561 net/nfc/hci/llc_shdlc.c 		skb_queue_tail(&shdlc->ack_pending_q, skb);
skb               725 net/nfc/hci/llc_shdlc.c static void llc_shdlc_recv_frame(struct llc_shdlc *shdlc, struct sk_buff *skb)
skb               727 net/nfc/hci/llc_shdlc.c 	if (skb == NULL) {
skb               731 net/nfc/hci/llc_shdlc.c 		SHDLC_DUMP_SKB("incoming frame", skb);
skb               732 net/nfc/hci/llc_shdlc.c 		skb_queue_tail(&shdlc->rcv_q, skb);
skb               805 net/nfc/hci/llc_shdlc.c static void llc_shdlc_rcv_from_drv(struct nfc_llc *llc, struct sk_buff *skb)
skb               809 net/nfc/hci/llc_shdlc.c 	llc_shdlc_recv_frame(shdlc, skb);
skb               812 net/nfc/hci/llc_shdlc.c static int llc_shdlc_xmit_from_hci(struct nfc_llc *llc, struct sk_buff *skb)
skb               816 net/nfc/hci/llc_shdlc.c 	skb_queue_tail(&shdlc->send_q, skb);
skb               213 net/nfc/llcp.h 			       struct sk_buff *skb, u8 direction);
skb               229 net/nfc/llcp.h void nfc_llcp_recv(void *data, struct sk_buff *skb, int err);
skb               236 net/nfc/llcp.h void nfc_llcp_recv(void *data, struct sk_buff *skb, int err);
skb               314 net/nfc/llcp_commands.c 	struct sk_buff *skb;
skb               320 net/nfc/llcp_commands.c 	skb = nfc_alloc_send_skb(sock->dev, &sock->sk, MSG_DONTWAIT,
skb               322 net/nfc/llcp_commands.c 	if (skb == NULL) {
skb               327 net/nfc/llcp_commands.c 	skb = llcp_add_header(skb, sock->dsap, sock->ssap, cmd);
skb               329 net/nfc/llcp_commands.c 	return skb;
skb               334 net/nfc/llcp_commands.c 	struct sk_buff *skb;
skb               348 net/nfc/llcp_commands.c 	skb = llcp_allocate_pdu(sock, LLCP_PDU_DISC, 0);
skb               349 net/nfc/llcp_commands.c 	if (skb == NULL)
skb               352 net/nfc/llcp_commands.c 	skb_queue_tail(&local->tx_queue, skb);
skb               359 net/nfc/llcp_commands.c 	struct sk_buff *skb;
skb               372 net/nfc/llcp_commands.c 	skb = alloc_skb(size, GFP_KERNEL);
skb               373 net/nfc/llcp_commands.c 	if (skb == NULL)
skb               376 net/nfc/llcp_commands.c 	skb_reserve(skb, dev->tx_headroom + NFC_HEADER_SIZE);
skb               378 net/nfc/llcp_commands.c 	skb = llcp_add_header(skb, 0, 0, LLCP_PDU_SYMM);
skb               380 net/nfc/llcp_commands.c 	__net_timestamp(skb);
skb               382 net/nfc/llcp_commands.c 	nfc_llcp_send_to_raw_sock(local, skb, NFC_DIRECTION_TX);
skb               384 net/nfc/llcp_commands.c 	return nfc_data_exchange(dev, local->target_idx, skb,
skb               391 net/nfc/llcp_commands.c 	struct sk_buff *skb;
skb               439 net/nfc/llcp_commands.c 	skb = llcp_allocate_pdu(sock, LLCP_PDU_CONNECT, size);
skb               440 net/nfc/llcp_commands.c 	if (skb == NULL) {
skb               445 net/nfc/llcp_commands.c 	llcp_add_tlv(skb, service_name_tlv, service_name_tlv_length);
skb               446 net/nfc/llcp_commands.c 	llcp_add_tlv(skb, miux_tlv, miux_tlv_length);
skb               447 net/nfc/llcp_commands.c 	llcp_add_tlv(skb, rw_tlv, rw_tlv_length);
skb               449 net/nfc/llcp_commands.c 	skb_queue_tail(&local->tx_queue, skb);
skb               467 net/nfc/llcp_commands.c 	struct sk_buff *skb;
skb               500 net/nfc/llcp_commands.c 	skb = llcp_allocate_pdu(sock, LLCP_PDU_CC, size);
skb               501 net/nfc/llcp_commands.c 	if (skb == NULL) {
skb               506 net/nfc/llcp_commands.c 	llcp_add_tlv(skb, miux_tlv, miux_tlv_length);
skb               507 net/nfc/llcp_commands.c 	llcp_add_tlv(skb, rw_tlv, rw_tlv_length);
skb               509 net/nfc/llcp_commands.c 	skb_queue_tail(&local->tx_queue, skb);
skb               526 net/nfc/llcp_commands.c 	struct sk_buff *skb;
skb               541 net/nfc/llcp_commands.c 	skb = alloc_skb(size, GFP_KERNEL);
skb               542 net/nfc/llcp_commands.c 	if (skb == NULL)
skb               545 net/nfc/llcp_commands.c 	skb_reserve(skb, dev->tx_headroom + NFC_HEADER_SIZE);
skb               547 net/nfc/llcp_commands.c 	skb = llcp_add_header(skb, LLCP_SAP_SDP, LLCP_SAP_SDP, LLCP_PDU_SNL);
skb               549 net/nfc/llcp_commands.c 	return skb;
skb               557 net/nfc/llcp_commands.c 	struct sk_buff *skb;
skb               559 net/nfc/llcp_commands.c 	skb = nfc_llcp_allocate_snl(local, tlvs_len);
skb               560 net/nfc/llcp_commands.c 	if (IS_ERR(skb))
skb               561 net/nfc/llcp_commands.c 		return PTR_ERR(skb);
skb               564 net/nfc/llcp_commands.c 		skb_put_data(skb, sdp->tlv, sdp->tlv_len);
skb               571 net/nfc/llcp_commands.c 	skb_queue_tail(&local->tx_queue, skb);
skb               581 net/nfc/llcp_commands.c 	struct sk_buff *skb;
skb               583 net/nfc/llcp_commands.c 	skb = nfc_llcp_allocate_snl(local, tlvs_len);
skb               584 net/nfc/llcp_commands.c 	if (IS_ERR(skb))
skb               585 net/nfc/llcp_commands.c 		return PTR_ERR(skb);
skb               596 net/nfc/llcp_commands.c 		skb_put_data(skb, sdreq->tlv, sdreq->tlv_len);
skb               605 net/nfc/llcp_commands.c 	skb_queue_tail(&local->tx_queue, skb);
skb               612 net/nfc/llcp_commands.c 	struct sk_buff *skb;
skb               628 net/nfc/llcp_commands.c 	skb = alloc_skb(size, GFP_KERNEL);
skb               629 net/nfc/llcp_commands.c 	if (skb == NULL)
skb               632 net/nfc/llcp_commands.c 	skb_reserve(skb, dev->tx_headroom + NFC_HEADER_SIZE);
skb               634 net/nfc/llcp_commands.c 	skb = llcp_add_header(skb, dsap, ssap, LLCP_PDU_DM);
skb               636 net/nfc/llcp_commands.c 	skb_put_data(skb, &reason, 1);
skb               638 net/nfc/llcp_commands.c 	skb_queue_head(&local->tx_queue, skb);
skb               792 net/nfc/llcp_commands.c 	struct sk_buff *skb;
skb               801 net/nfc/llcp_commands.c 	skb = llcp_allocate_pdu(sock, LLCP_PDU_RR, LLCP_SEQUENCE_SIZE);
skb               802 net/nfc/llcp_commands.c 	if (skb == NULL)
skb               805 net/nfc/llcp_commands.c 	skb_put(skb, LLCP_SEQUENCE_SIZE);
skb               807 net/nfc/llcp_commands.c 	skb->data[2] = sock->recv_n;
skb               809 net/nfc/llcp_commands.c 	skb_queue_head(&local->tx_queue, skb);
skb                21 net/nfc/llcp_core.c static void nfc_llcp_rx_skb(struct nfc_llcp_local *local, struct sk_buff *skb);
skb               675 net/nfc/llcp_core.c 			       struct sk_buff *skb, u8 direction)
skb               688 net/nfc/llcp_core.c 			skb_copy = __pskb_copy_fclone(skb, NFC_RAW_HEADER_SIZE,
skb               718 net/nfc/llcp_core.c 	struct sk_buff *skb;
skb               722 net/nfc/llcp_core.c 	skb = skb_dequeue(&local->tx_queue);
skb               723 net/nfc/llcp_core.c 	if (skb != NULL) {
skb               724 net/nfc/llcp_core.c 		sk = skb->sk;
skb               727 net/nfc/llcp_core.c 		if (llcp_sock == NULL && nfc_llcp_ptype(skb) == LLCP_PDU_I) {
skb               728 net/nfc/llcp_core.c 			kfree_skb(skb);
skb               731 net/nfc/llcp_core.c 			skb_queue_head(&local->tx_queue, skb);
skb               735 net/nfc/llcp_core.c 			u8 ptype = nfc_llcp_ptype(skb);
skb               740 net/nfc/llcp_core.c 					     16, 1, skb->data, skb->len, true);
skb               750 net/nfc/llcp_core.c 				copy_skb = skb_copy(skb, GFP_ATOMIC);
skb               752 net/nfc/llcp_core.c 			__net_timestamp(skb);
skb               754 net/nfc/llcp_core.c 			nfc_llcp_send_to_raw_sock(local, skb,
skb               758 net/nfc/llcp_core.c 						skb, nfc_llcp_recv, local);
skb               818 net/nfc/llcp_core.c static u8 *nfc_llcp_connect_sn(struct sk_buff *skb, size_t *sn_len)
skb               820 net/nfc/llcp_core.c 	u8 *tlv = &skb->data[2], type, length;
skb               821 net/nfc/llcp_core.c 	size_t tlv_array_len = skb->len - LLCP_HEADER_SIZE, offset = 0;
skb               842 net/nfc/llcp_core.c 			     struct sk_buff *skb)
skb               848 net/nfc/llcp_core.c 	dsap = nfc_llcp_dsap(skb);
skb               849 net/nfc/llcp_core.c 	ssap = nfc_llcp_ssap(skb);
skb               851 net/nfc/llcp_core.c 	ui_cb = nfc_llcp_ui_skb_cb(skb);
skb               863 net/nfc/llcp_core.c 	skb_pull(skb, LLCP_HEADER_SIZE);
skb               864 net/nfc/llcp_core.c 	if (!sock_queue_rcv_skb(&llcp_sock->sk, skb)) {
skb               869 net/nfc/llcp_core.c 		skb_get(skb);
skb               878 net/nfc/llcp_core.c 				  struct sk_buff *skb)
skb               884 net/nfc/llcp_core.c 	dsap = nfc_llcp_dsap(skb);
skb               885 net/nfc/llcp_core.c 	ssap = nfc_llcp_ssap(skb);
skb               899 net/nfc/llcp_core.c 		sn = nfc_llcp_connect_sn(skb, &sn_len);
skb               970 net/nfc/llcp_core.c 	nfc_llcp_parse_connection_tlv(new_sock, &skb->data[LLCP_HEADER_SIZE],
skb               971 net/nfc/llcp_core.c 				      skb->len - LLCP_HEADER_SIZE);
skb              1028 net/nfc/llcp_core.c 			       struct sk_buff *skb)
skb              1034 net/nfc/llcp_core.c 	ptype = nfc_llcp_ptype(skb);
skb              1035 net/nfc/llcp_core.c 	dsap = nfc_llcp_dsap(skb);
skb              1036 net/nfc/llcp_core.c 	ssap = nfc_llcp_ssap(skb);
skb              1037 net/nfc/llcp_core.c 	ns = nfc_llcp_ns(skb);
skb              1038 net/nfc/llcp_core.c 	nr = nfc_llcp_nr(skb);
skb              1064 net/nfc/llcp_core.c 		skb_pull(skb, LLCP_HEADER_SIZE + LLCP_SEQUENCE_SIZE);
skb              1065 net/nfc/llcp_core.c 		if (!sock_queue_rcv_skb(&llcp_sock->sk, skb)) {
skb              1070 net/nfc/llcp_core.c 			skb_get(skb);
skb              1115 net/nfc/llcp_core.c 			       struct sk_buff *skb)
skb              1121 net/nfc/llcp_core.c 	dsap = nfc_llcp_dsap(skb);
skb              1122 net/nfc/llcp_core.c 	ssap = nfc_llcp_ssap(skb);
skb              1158 net/nfc/llcp_core.c static void nfc_llcp_recv_cc(struct nfc_llcp_local *local, struct sk_buff *skb)
skb              1164 net/nfc/llcp_core.c 	dsap = nfc_llcp_dsap(skb);
skb              1165 net/nfc/llcp_core.c 	ssap = nfc_llcp_ssap(skb);
skb              1182 net/nfc/llcp_core.c 	nfc_llcp_parse_connection_tlv(llcp_sock, &skb->data[LLCP_HEADER_SIZE],
skb              1183 net/nfc/llcp_core.c 				      skb->len - LLCP_HEADER_SIZE);
skb              1191 net/nfc/llcp_core.c static void nfc_llcp_recv_dm(struct nfc_llcp_local *local, struct sk_buff *skb)
skb              1197 net/nfc/llcp_core.c 	dsap = nfc_llcp_dsap(skb);
skb              1198 net/nfc/llcp_core.c 	ssap = nfc_llcp_ssap(skb);
skb              1199 net/nfc/llcp_core.c 	reason = skb->data[2];
skb              1229 net/nfc/llcp_core.c 			      struct sk_buff *skb)
skb              1241 net/nfc/llcp_core.c 	dsap = nfc_llcp_dsap(skb);
skb              1242 net/nfc/llcp_core.c 	ssap = nfc_llcp_ssap(skb);
skb              1251 net/nfc/llcp_core.c 	tlv = &skb->data[LLCP_HEADER_SIZE];
skb              1252 net/nfc/llcp_core.c 	tlv_len = skb->len - LLCP_HEADER_SIZE;
skb              1363 net/nfc/llcp_core.c static void nfc_llcp_recv_agf(struct nfc_llcp_local *local, struct sk_buff *skb)
skb              1369 net/nfc/llcp_core.c 	if (skb->len <= LLCP_HEADER_SIZE) {
skb              1374 net/nfc/llcp_core.c 	skb_pull(skb, LLCP_HEADER_SIZE);
skb              1376 net/nfc/llcp_core.c 	while (skb->len > LLCP_AGF_PDU_HEADER_SIZE) {
skb              1377 net/nfc/llcp_core.c 		pdu_len = skb->data[0] << 8 | skb->data[1];
skb              1379 net/nfc/llcp_core.c 		skb_pull(skb, LLCP_AGF_PDU_HEADER_SIZE);
skb              1381 net/nfc/llcp_core.c 		if (pdu_len < LLCP_HEADER_SIZE || pdu_len > skb->len) {
skb              1386 net/nfc/llcp_core.c 		ptype = nfc_llcp_ptype(skb);
skb              1397 net/nfc/llcp_core.c 		skb_put_data(new_skb, skb->data, pdu_len);
skb              1403 net/nfc/llcp_core.c 		skb_pull(skb, pdu_len);
skb              1407 net/nfc/llcp_core.c static void nfc_llcp_rx_skb(struct nfc_llcp_local *local, struct sk_buff *skb)
skb              1411 net/nfc/llcp_core.c 	ptype = nfc_llcp_ptype(skb);
skb              1412 net/nfc/llcp_core.c 	dsap = nfc_llcp_dsap(skb);
skb              1413 net/nfc/llcp_core.c 	ssap = nfc_llcp_ssap(skb);
skb              1419 net/nfc/llcp_core.c 				     skb->data, skb->len, true);
skb              1428 net/nfc/llcp_core.c 		nfc_llcp_recv_ui(local, skb);
skb              1433 net/nfc/llcp_core.c 		nfc_llcp_recv_connect(local, skb);
skb              1438 net/nfc/llcp_core.c 		nfc_llcp_recv_disc(local, skb);
skb              1443 net/nfc/llcp_core.c 		nfc_llcp_recv_cc(local, skb);
skb              1448 net/nfc/llcp_core.c 		nfc_llcp_recv_dm(local, skb);
skb              1453 net/nfc/llcp_core.c 		nfc_llcp_recv_snl(local, skb);
skb              1460 net/nfc/llcp_core.c 		nfc_llcp_recv_hdlc(local, skb);
skb              1465 net/nfc/llcp_core.c 		nfc_llcp_recv_agf(local, skb);
skb              1474 net/nfc/llcp_core.c 	struct sk_buff *skb;
skb              1476 net/nfc/llcp_core.c 	skb = local->rx_pending;
skb              1477 net/nfc/llcp_core.c 	if (skb == NULL) {
skb              1482 net/nfc/llcp_core.c 	__net_timestamp(skb);
skb              1484 net/nfc/llcp_core.c 	nfc_llcp_send_to_raw_sock(local, skb, NFC_DIRECTION_RX);
skb              1486 net/nfc/llcp_core.c 	nfc_llcp_rx_skb(local, skb);
skb              1493 net/nfc/llcp_core.c static void __nfc_llcp_recv(struct nfc_llcp_local *local, struct sk_buff *skb)
skb              1495 net/nfc/llcp_core.c 	local->rx_pending = skb;
skb              1500 net/nfc/llcp_core.c void nfc_llcp_recv(void *data, struct sk_buff *skb, int err)
skb              1510 net/nfc/llcp_core.c 	__nfc_llcp_recv(local, skb);
skb              1513 net/nfc/llcp_core.c int nfc_llcp_data_received(struct nfc_dev *dev, struct sk_buff *skb)
skb              1519 net/nfc/llcp_core.c 		kfree_skb(skb);
skb              1523 net/nfc/llcp_core.c 	__nfc_llcp_recv(local, skb);
skb               807 net/nfc/llcp_sock.c 	struct sk_buff *skb, *cskb;
skb               825 net/nfc/llcp_sock.c 	skb = skb_recv_datagram(sk, flags, noblock, &err);
skb               826 net/nfc/llcp_sock.c 	if (!skb) {
skb               836 net/nfc/llcp_sock.c 	rlen = skb->len;		/* real length of skb */
skb               839 net/nfc/llcp_sock.c 	cskb = skb;
skb               842 net/nfc/llcp_sock.c 			skb_queue_head(&sk->sk_receive_queue, skb);
skb               846 net/nfc/llcp_sock.c 	sock_recv_timestamp(msg, sk, skb);
skb               849 net/nfc/llcp_sock.c 		struct nfc_llcp_ui_cb *ui_cb = nfc_llcp_ui_skb_cb(skb);
skb               871 net/nfc/llcp_sock.c 			skb_pull(skb, copied);
skb               872 net/nfc/llcp_sock.c 			if (skb->len) {
skb               873 net/nfc/llcp_sock.c 				skb_queue_head(&sk->sk_receive_queue, skb);
skb               878 net/nfc/llcp_sock.c 		kfree_skb(skb);
skb               402 net/nfc/nci/core.c static void nci_nfcc_loopback_cb(void *context, struct sk_buff *skb, int err)
skb               413 net/nfc/nci/core.c 	conn_info->rx_skb = skb;
skb               424 net/nfc/nci/core.c 	struct sk_buff *skb;
skb               447 net/nfc/nci/core.c 	skb = nci_skb_alloc(ndev, NCI_DATA_HDR_SIZE + data_len, GFP_KERNEL);
skb               448 net/nfc/nci/core.c 	if (!skb)
skb               451 net/nfc/nci/core.c 	skb_reserve(skb, NCI_DATA_HDR_SIZE);
skb               452 net/nfc/nci/core.c 	skb_put_data(skb, data, data_len);
skb               455 net/nfc/nci/core.c 	loopback_data.data = skb;
skb               988 net/nfc/nci/core.c 			  struct sk_buff *skb,
skb               999 net/nfc/nci/core.c 	pr_debug("target_idx %d, len %d\n", target->idx, skb->len);
skb              1013 net/nfc/nci/core.c 	rc = nci_send_data(ndev, NCI_STATIC_RF_CONN_ID, skb);
skb              1020 net/nfc/nci/core.c static int nci_tm_send(struct nfc_dev *nfc_dev, struct sk_buff *skb)
skb              1025 net/nfc/nci/core.c 	rc = nci_send_data(ndev, NCI_STATIC_RF_CONN_ID, skb);
skb              1276 net/nfc/nci/core.c int nci_recv_frame(struct nci_dev *ndev, struct sk_buff *skb)
skb              1278 net/nfc/nci/core.c 	pr_debug("len %d\n", skb->len);
skb              1282 net/nfc/nci/core.c 		kfree_skb(skb);
skb              1287 net/nfc/nci/core.c 	skb_queue_tail(&ndev->rx_q, skb);
skb              1294 net/nfc/nci/core.c int nci_send_frame(struct nci_dev *ndev, struct sk_buff *skb)
skb              1296 net/nfc/nci/core.c 	pr_debug("len %d\n", skb->len);
skb              1299 net/nfc/nci/core.c 		kfree_skb(skb);
skb              1304 net/nfc/nci/core.c 	skb_orphan(skb);
skb              1307 net/nfc/nci/core.c 	nfc_send_to_raw_sock(ndev->nfc_dev, skb,
skb              1310 net/nfc/nci/core.c 	return ndev->ops->send(ndev, skb);
skb              1318 net/nfc/nci/core.c 	struct sk_buff *skb;
skb              1322 net/nfc/nci/core.c 	skb = nci_skb_alloc(ndev, (NCI_CTRL_HDR_SIZE + plen), GFP_KERNEL);
skb              1323 net/nfc/nci/core.c 	if (!skb) {
skb              1328 net/nfc/nci/core.c 	hdr = skb_put(skb, NCI_CTRL_HDR_SIZE);
skb              1337 net/nfc/nci/core.c 		skb_put_data(skb, payload, plen);
skb              1339 net/nfc/nci/core.c 	skb_queue_tail(&ndev->cmd_q, skb);
skb              1367 net/nfc/nci/core.c 			     struct sk_buff *skb, struct nci_driver_ops *ops,
skb              1376 net/nfc/nci/core.c 	return op->rsp(ndev, skb);
skb              1380 net/nfc/nci/core.c 			     struct sk_buff *skb, struct nci_driver_ops *ops,
skb              1389 net/nfc/nci/core.c 	return op->ntf(ndev, skb);
skb              1393 net/nfc/nci/core.c 			struct sk_buff *skb)
skb              1395 net/nfc/nci/core.c 	return nci_op_rsp_packet(ndev, opcode, skb, ndev->ops->prop_ops,
skb              1400 net/nfc/nci/core.c 			struct sk_buff *skb)
skb              1402 net/nfc/nci/core.c 	return nci_op_ntf_packet(ndev, opcode, skb, ndev->ops->prop_ops,
skb              1407 net/nfc/nci/core.c 			struct sk_buff *skb)
skb              1409 net/nfc/nci/core.c 	return nci_op_rsp_packet(ndev, opcode, skb, ndev->ops->core_ops,
skb              1414 net/nfc/nci/core.c 			struct sk_buff *skb)
skb              1416 net/nfc/nci/core.c 	return nci_op_ntf_packet(ndev, opcode, skb, ndev->ops->core_ops,
skb              1426 net/nfc/nci/core.c 	struct sk_buff *skb;
skb              1436 net/nfc/nci/core.c 		skb = skb_dequeue(&ndev->tx_q);
skb              1437 net/nfc/nci/core.c 		if (!skb)
skb              1446 net/nfc/nci/core.c 			 nci_pbf(skb->data),
skb              1447 net/nfc/nci/core.c 			 nci_conn_id(skb->data),
skb              1448 net/nfc/nci/core.c 			 nci_plen(skb->data));
skb              1450 net/nfc/nci/core.c 		nci_send_frame(ndev, skb);
skb              1462 net/nfc/nci/core.c 	struct sk_buff *skb;
skb              1464 net/nfc/nci/core.c 	while ((skb = skb_dequeue(&ndev->rx_q))) {
skb              1467 net/nfc/nci/core.c 		nfc_send_to_raw_sock(ndev->nfc_dev, skb,
skb              1471 net/nfc/nci/core.c 		switch (nci_mt(skb->data)) {
skb              1473 net/nfc/nci/core.c 			nci_rsp_packet(ndev, skb);
skb              1477 net/nfc/nci/core.c 			nci_ntf_packet(ndev, skb);
skb              1481 net/nfc/nci/core.c 			nci_rx_data_packet(ndev, skb);
skb              1485 net/nfc/nci/core.c 			pr_err("unknown MT 0x%x\n", nci_mt(skb->data));
skb              1486 net/nfc/nci/core.c 			kfree_skb(skb);
skb              1508 net/nfc/nci/core.c 	struct sk_buff *skb;
skb              1514 net/nfc/nci/core.c 		skb = skb_dequeue(&ndev->cmd_q);
skb              1515 net/nfc/nci/core.c 		if (!skb)
skb              1521 net/nfc/nci/core.c 			 nci_pbf(skb->data),
skb              1522 net/nfc/nci/core.c 			 nci_opcode_gid(nci_opcode(skb->data)),
skb              1523 net/nfc/nci/core.c 			 nci_opcode_oid(nci_opcode(skb->data)),
skb              1524 net/nfc/nci/core.c 			 nci_plen(skb->data));
skb              1526 net/nfc/nci/core.c 		nci_send_frame(ndev, skb);
skb                26 net/nfc/nci/data.c void nci_data_exchange_complete(struct nci_dev *ndev, struct sk_buff *skb,
skb                35 net/nfc/nci/data.c 		kfree_skb(skb);
skb                42 net/nfc/nci/data.c 	pr_debug("len %d, err %d\n", skb ? skb->len : 0, err);
skb                50 net/nfc/nci/data.c 		cb(cb_context, skb, err);
skb                51 net/nfc/nci/data.c 	} else if (skb) {
skb                55 net/nfc/nci/data.c 		kfree_skb(skb);
skb                66 net/nfc/nci/data.c 				     struct sk_buff *skb,
skb                70 net/nfc/nci/data.c 	int plen = skb->len;
skb                72 net/nfc/nci/data.c 	hdr = skb_push(skb, NCI_DATA_HDR_SIZE);
skb                95 net/nfc/nci/data.c 				   struct sk_buff *skb) {
skb                97 net/nfc/nci/data.c 	int total_len = skb->len;
skb                98 net/nfc/nci/data.c 	unsigned char *data = skb->data;
skb               154 net/nfc/nci/data.c 	kfree_skb(skb);
skb               167 net/nfc/nci/data.c int nci_send_data(struct nci_dev *ndev, __u8 conn_id, struct sk_buff *skb)
skb               172 net/nfc/nci/data.c 	pr_debug("conn_id 0x%x, plen %d\n", conn_id, skb->len);
skb               181 net/nfc/nci/data.c 	if (skb->len <= conn_info->max_pkt_payload_len) {
skb               183 net/nfc/nci/data.c 		nci_push_data_hdr(ndev, conn_id, skb, NCI_PBF_LAST);
skb               185 net/nfc/nci/data.c 		skb_queue_tail(&ndev->tx_q, skb);
skb               188 net/nfc/nci/data.c 		rc = nci_queue_tx_data_frags(ndev, conn_id, skb);
skb               201 net/nfc/nci/data.c 	kfree_skb(skb);
skb               211 net/nfc/nci/data.c 				 struct sk_buff *skb,
skb               226 net/nfc/nci/data.c 		if (skb_cow_head(skb, reassembly_len)) {
skb               229 net/nfc/nci/data.c 			kfree_skb(skb);
skb               230 net/nfc/nci/data.c 			skb = NULL;
skb               240 net/nfc/nci/data.c 		memcpy(skb_push(skb, reassembly_len),
skb               251 net/nfc/nci/data.c 		ndev->rx_data_reassembly = skb;
skb               258 net/nfc/nci/data.c 		err = nfc_tm_data_received(ndev->nfc_dev, skb);
skb               262 net/nfc/nci/data.c 		nci_data_exchange_complete(ndev, skb, conn_id, err);
skb               267 net/nfc/nci/data.c void nci_rx_data_packet(struct nci_dev *ndev, struct sk_buff *skb)
skb               269 net/nfc/nci/data.c 	__u8 pbf = nci_pbf(skb->data);
skb               271 net/nfc/nci/data.c 	__u8 conn_id = nci_conn_id(skb->data);
skb               274 net/nfc/nci/data.c 	pr_debug("len %d\n", skb->len);
skb               277 net/nfc/nci/data.c 		 nci_pbf(skb->data),
skb               278 net/nfc/nci/data.c 		 nci_conn_id(skb->data),
skb               279 net/nfc/nci/data.c 		 nci_plen(skb->data));
skb               281 net/nfc/nci/data.c 	conn_info = nci_get_conn_info_by_conn_id(ndev, nci_conn_id(skb->data));
skb               286 net/nfc/nci/data.c 	skb_pull(skb, NCI_DATA_HDR_SIZE);
skb               294 net/nfc/nci/data.c 		status = skb->data[skb->len - 1];
skb               295 net/nfc/nci/data.c 		skb_trim(skb, (skb->len - 1));
skb               298 net/nfc/nci/data.c 	nci_add_rx_data_frag(ndev, skb, pbf, conn_id, nci_to_errno(status));
skb               146 net/nfc/nci/hci.c 	struct sk_buff *skb;
skb               155 net/nfc/nci/hci.c 	skb = nci_skb_alloc(ndev, conn_info->max_pkt_payload_len +
skb               157 net/nfc/nci/hci.c 	if (!skb)
skb               160 net/nfc/nci/hci.c 	skb_reserve(skb, NCI_DATA_HDR_SIZE + 2);
skb               161 net/nfc/nci/hci.c 	*(u8 *)skb_push(skb, 1) = data_type;
skb               168 net/nfc/nci/hci.c 		    (skb->len + 1) >= data_len) {
skb               172 net/nfc/nci/hci.c 			len = conn_info->max_pkt_payload_len - skb->len - 1;
skb               175 net/nfc/nci/hci.c 		*(u8 *)skb_push(skb, 1) = cb;
skb               178 net/nfc/nci/hci.c 			skb_put_data(skb, data + i, len);
skb               180 net/nfc/nci/hci.c 		r = nci_send_data(ndev, conn_info->conn_id, skb);
skb               187 net/nfc/nci/hci.c 			skb = nci_skb_alloc(ndev,
skb               190 net/nfc/nci/hci.c 			if (!skb)
skb               193 net/nfc/nci/hci.c 			skb_reserve(skb, NCI_DATA_HDR_SIZE + 1);
skb               224 net/nfc/nci/hci.c 		     struct sk_buff **skb)
skb               253 net/nfc/nci/hci.c 		if (!r && skb)
skb               254 net/nfc/nci/hci.c 			*skb = conn_info->rx_skb;
skb               276 net/nfc/nci/hci.c 				   u8 event, struct sk_buff *skb)
skb               279 net/nfc/nci/hci.c 		ndev->ops->hci_event_received(ndev, pipe, event, skb);
skb               283 net/nfc/nci/hci.c 				 u8 cmd, struct sk_buff *skb)
skb               296 net/nfc/nci/hci.c 		if (skb->len != 5) {
skb               300 net/nfc/nci/hci.c 		create_info = (struct nci_hci_create_pipe_resp *)skb->data;
skb               326 net/nfc/nci/hci.c 		if (skb->len != 1) {
skb               330 net/nfc/nci/hci.c 		delete_info = (struct nci_hci_delete_pipe_noti *)skb->data;
skb               342 net/nfc/nci/hci.c 		if (skb->len != 1) {
skb               348 net/nfc/nci/hci.c 			(struct nci_hci_all_pipe_cleared_noti *)skb->data;
skb               357 net/nfc/nci/hci.c 		ndev->ops->hci_cmd_received(ndev, pipe, cmd, skb);
skb               362 net/nfc/nci/hci.c 	kfree_skb(skb);
skb               366 net/nfc/nci/hci.c 				  u8 result, struct sk_buff *skb)
skb               377 net/nfc/nci/hci.c 	conn_info->rx_skb = skb;
skb               387 net/nfc/nci/hci.c 				   u8 type, u8 instruction, struct sk_buff *skb)
skb               391 net/nfc/nci/hci.c 		nci_hci_resp_received(ndev, pipe, instruction, skb);
skb               394 net/nfc/nci/hci.c 		nci_hci_cmd_received(ndev, pipe, instruction, skb);
skb               397 net/nfc/nci/hci.c 		nci_hci_event_received(ndev, pipe, instruction, skb);
skb               402 net/nfc/nci/hci.c 		kfree_skb(skb);
skb               413 net/nfc/nci/hci.c 	struct sk_buff *skb;
skb               417 net/nfc/nci/hci.c 	while ((skb = skb_dequeue(&hdev->msg_rx_queue)) != NULL) {
skb               418 net/nfc/nci/hci.c 		pipe = NCI_HCP_MSG_GET_PIPE(skb->data[0]);
skb               419 net/nfc/nci/hci.c 		skb_pull(skb, NCI_HCI_HCP_PACKET_HEADER_LEN);
skb               420 net/nfc/nci/hci.c 		message = (struct nci_hcp_message *)skb->data;
skb               423 net/nfc/nci/hci.c 		skb_pull(skb, NCI_HCI_HCP_MESSAGE_HEADER_LEN);
skb               426 net/nfc/nci/hci.c 				       type, instruction, skb);
skb               431 net/nfc/nci/hci.c 			      struct sk_buff *skb, int err)
skb               447 net/nfc/nci/hci.c 	packet = (struct nci_hcp_packet *)skb->data;
skb               449 net/nfc/nci/hci.c 		skb_queue_tail(&ndev->hci_dev->rx_hcp_frags, skb);
skb               456 net/nfc/nci/hci.c 		skb_queue_tail(&ndev->hci_dev->rx_hcp_frags, skb);
skb               483 net/nfc/nci/hci.c 		hcp_skb = skb;
skb               529 net/nfc/nci/hci.c 	struct sk_buff *skb;
skb               541 net/nfc/nci/hci.c 				   (u8 *)&params, sizeof(params), &skb);
skb               545 net/nfc/nci/hci.c 	resp = (struct nci_hci_create_pipe_resp *)skb->data;
skb               547 net/nfc/nci/hci.c 	kfree_skb(skb);
skb               611 net/nfc/nci/hci.c 		      struct sk_buff **skb)
skb               644 net/nfc/nci/hci.c 		if (!r && skb)
skb               645 net/nfc/nci/hci.c 			*skb = conn_info->rx_skb;
skb               723 net/nfc/nci/hci.c 	struct sk_buff *skb;
skb               749 net/nfc/nci/hci.c 			      NCI_HCI_ADMIN_PARAM_SESSION_IDENTITY, &skb);
skb               753 net/nfc/nci/hci.c 	if (skb->len &&
skb               754 net/nfc/nci/hci.c 	    skb->len == strlen(ndev->hci_dev->init_data.session_id) &&
skb               755 net/nfc/nci/hci.c 	    !memcmp(ndev->hci_dev->init_data.session_id, skb->data, skb->len) &&
skb               777 net/nfc/nci/hci.c 	kfree_skb(skb);
skb                31 net/nfc/nci/ntf.c 					     struct sk_buff *skb)
skb                33 net/nfc/nci/ntf.c 	struct nci_core_conn_credit_ntf *ntf = (void *) skb->data;
skb                66 net/nfc/nci/ntf.c 					      struct sk_buff *skb)
skb                68 net/nfc/nci/ntf.c 	__u8 status = skb->data[0];
skb                80 net/nfc/nci/ntf.c 						struct sk_buff *skb)
skb                82 net/nfc/nci/ntf.c 	struct nci_core_intf_error_ntf *ntf = (void *) skb->data;
skb               305 net/nfc/nci/ntf.c 				       struct sk_buff *skb)
skb               308 net/nfc/nci/ntf.c 	__u8 *data = skb->data;
skb               505 net/nfc/nci/ntf.c 					     struct sk_buff *skb)
skb               509 net/nfc/nci/ntf.c 	__u8 *data = skb->data;
skb               667 net/nfc/nci/ntf.c 					 struct sk_buff *skb)
skb               670 net/nfc/nci/ntf.c 	struct nci_rf_deactivate_ntf *ntf = (void *) skb->data;
skb               711 net/nfc/nci/ntf.c 					  struct sk_buff *skb)
skb               715 net/nfc/nci/ntf.c 				(struct nci_nfcee_discover_ntf *)skb->data;
skb               731 net/nfc/nci/ntf.c 					struct sk_buff *skb)
skb               736 net/nfc/nci/ntf.c void nci_ntf_packet(struct nci_dev *ndev, struct sk_buff *skb)
skb               738 net/nfc/nci/ntf.c 	__u16 ntf_opcode = nci_opcode(skb->data);
skb               741 net/nfc/nci/ntf.c 		 nci_pbf(skb->data),
skb               744 net/nfc/nci/ntf.c 		 nci_plen(skb->data));
skb               747 net/nfc/nci/ntf.c 	skb_pull(skb, NCI_CTRL_HDR_SIZE);
skb               750 net/nfc/nci/ntf.c 		if (nci_prop_ntf_packet(ndev, ntf_opcode, skb) == -ENOTSUPP) {
skb               760 net/nfc/nci/ntf.c 		nci_core_conn_credits_ntf_packet(ndev, skb);
skb               764 net/nfc/nci/ntf.c 		nci_core_generic_error_ntf_packet(ndev, skb);
skb               768 net/nfc/nci/ntf.c 		nci_core_conn_intf_error_ntf_packet(ndev, skb);
skb               772 net/nfc/nci/ntf.c 		nci_rf_discover_ntf_packet(ndev, skb);
skb               776 net/nfc/nci/ntf.c 		nci_rf_intf_activated_ntf_packet(ndev, skb);
skb               780 net/nfc/nci/ntf.c 		nci_rf_deactivate_ntf_packet(ndev, skb);
skb               784 net/nfc/nci/ntf.c 		nci_nfcee_discover_ntf_packet(ndev, skb);
skb               788 net/nfc/nci/ntf.c 		nci_nfcee_action_ntf_packet(ndev, skb);
skb               796 net/nfc/nci/ntf.c 	nci_core_ntf_packet(ndev, ntf_opcode, skb);
skb               798 net/nfc/nci/ntf.c 	kfree_skb(skb);
skb                28 net/nfc/nci/rsp.c static void nci_core_reset_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb)
skb                30 net/nfc/nci/rsp.c 	struct nci_core_reset_rsp *rsp = (void *) skb->data;
skb                43 net/nfc/nci/rsp.c static void nci_core_init_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb)
skb                45 net/nfc/nci/rsp.c 	struct nci_core_init_rsp_1 *rsp_1 = (void *) skb->data;
skb                66 net/nfc/nci/rsp.c 	rsp_2 = (void *) (skb->data + 6 + rsp_1->num_supported_rf_interfaces);
skb               110 net/nfc/nci/rsp.c 					   struct sk_buff *skb)
skb               112 net/nfc/nci/rsp.c 	struct nci_core_set_config_rsp *rsp = (void *) skb->data;
skb               120 net/nfc/nci/rsp.c 				       struct sk_buff *skb)
skb               122 net/nfc/nci/rsp.c 	__u8 status = skb->data[0];
skb               129 net/nfc/nci/rsp.c static void nci_rf_disc_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb)
skb               132 net/nfc/nci/rsp.c 	__u8 status = skb->data[0];
skb               160 net/nfc/nci/rsp.c 					  struct sk_buff *skb)
skb               162 net/nfc/nci/rsp.c 	__u8 status = skb->data[0];
skb               172 net/nfc/nci/rsp.c 					 struct sk_buff *skb)
skb               174 net/nfc/nci/rsp.c 	__u8 status = skb->data[0];
skb               188 net/nfc/nci/rsp.c 					  struct sk_buff *skb)
skb               192 net/nfc/nci/rsp.c 	if (skb->len != 2) {
skb               197 net/nfc/nci/rsp.c 	discover_rsp = (struct nci_nfcee_discover_rsp *)skb->data;
skb               205 net/nfc/nci/rsp.c 					  struct sk_buff *skb)
skb               207 net/nfc/nci/rsp.c 	__u8 status = skb->data[0];
skb               214 net/nfc/nci/rsp.c 					    struct sk_buff *skb)
skb               216 net/nfc/nci/rsp.c 	__u8 status = skb->data[0];
skb               223 net/nfc/nci/rsp.c 		rsp = (struct nci_core_conn_create_rsp *)skb->data;
skb               269 net/nfc/nci/rsp.c 					   struct sk_buff *skb)
skb               272 net/nfc/nci/rsp.c 	__u8 status = skb->data[0];
skb               286 net/nfc/nci/rsp.c void nci_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb)
skb               288 net/nfc/nci/rsp.c 	__u16 rsp_opcode = nci_opcode(skb->data);
skb               294 net/nfc/nci/rsp.c 		 nci_pbf(skb->data),
skb               297 net/nfc/nci/rsp.c 		 nci_plen(skb->data));
skb               300 net/nfc/nci/rsp.c 	skb_pull(skb, NCI_CTRL_HDR_SIZE);
skb               303 net/nfc/nci/rsp.c 		if (nci_prop_rsp_packet(ndev, rsp_opcode, skb) == -ENOTSUPP) {
skb               313 net/nfc/nci/rsp.c 		nci_core_reset_rsp_packet(ndev, skb);
skb               317 net/nfc/nci/rsp.c 		nci_core_init_rsp_packet(ndev, skb);
skb               321 net/nfc/nci/rsp.c 		nci_core_set_config_rsp_packet(ndev, skb);
skb               325 net/nfc/nci/rsp.c 		nci_core_conn_create_rsp_packet(ndev, skb);
skb               329 net/nfc/nci/rsp.c 		nci_core_conn_close_rsp_packet(ndev, skb);
skb               333 net/nfc/nci/rsp.c 		nci_rf_disc_map_rsp_packet(ndev, skb);
skb               337 net/nfc/nci/rsp.c 		nci_rf_disc_rsp_packet(ndev, skb);
skb               341 net/nfc/nci/rsp.c 		nci_rf_disc_select_rsp_packet(ndev, skb);
skb               345 net/nfc/nci/rsp.c 		nci_rf_deactivate_rsp_packet(ndev, skb);
skb               349 net/nfc/nci/rsp.c 		nci_nfcee_discover_rsp_packet(ndev, skb);
skb               353 net/nfc/nci/rsp.c 		nci_nfcee_mode_set_rsp_packet(ndev, skb);
skb               361 net/nfc/nci/rsp.c 	nci_core_rsp_packet(ndev, rsp_opcode, skb);
skb               363 net/nfc/nci/rsp.c 	kfree_skb(skb);
skb                30 net/nfc/nci/spi.c static int __nci_spi_send(struct nci_spi *nspi, struct sk_buff *skb,
skb                38 net/nfc/nci/spi.c 	if (skb) {
skb                39 net/nfc/nci/spi.c 		t.tx_buf = skb->data;
skb                40 net/nfc/nci/spi.c 		t.len = skb->len;
skb                58 net/nfc/nci/spi.c 		 struct sk_buff *skb)
skb                60 net/nfc/nci/spi.c 	unsigned int payload_len = skb->len;
skb                66 net/nfc/nci/spi.c 	hdr = skb_push(skb, NCI_SPI_HDR_LEN);
skb                75 net/nfc/nci/spi.c 		crc = crc_ccitt(CRC_INIT, skb->data, skb->len);
skb                76 net/nfc/nci/spi.c 		skb_put_u8(skb, crc >> 8);
skb                77 net/nfc/nci/spi.c 		skb_put_u8(skb, crc & 0xFF);
skb                94 net/nfc/nci/spi.c 	ret = __nci_spi_send(nspi, skb, 0);
skb               107 net/nfc/nci/spi.c 	kfree_skb(skb);
skb               147 net/nfc/nci/spi.c 	struct sk_buff *skb;
skb               152 net/nfc/nci/spi.c 	skb = nci_skb_alloc(nspi->ndev, 0, GFP_KERNEL);
skb               155 net/nfc/nci/spi.c 	hdr = skb_push(skb, NCI_SPI_HDR_LEN);
skb               161 net/nfc/nci/spi.c 	crc = crc_ccitt(CRC_INIT, skb->data, skb->len);
skb               162 net/nfc/nci/spi.c 	skb_put_u8(skb, crc >> 8);
skb               163 net/nfc/nci/spi.c 	skb_put_u8(skb, crc & 0xFF);
skb               165 net/nfc/nci/spi.c 	ret = __nci_spi_send(nspi, skb, 0);
skb               167 net/nfc/nci/spi.c 	kfree_skb(skb);
skb               174 net/nfc/nci/spi.c 	struct sk_buff *skb;
skb               209 net/nfc/nci/spi.c 	skb = nci_skb_alloc(nspi->ndev, rx_len, GFP_KERNEL);
skb               210 net/nfc/nci/spi.c 	if (!skb)
skb               216 net/nfc/nci/spi.c 	rx.rx_buf = skb_put(skb, rx_len);
skb               228 net/nfc/nci/spi.c 		*(u8 *)skb_push(skb, 1) = resp_hdr[1];
skb               229 net/nfc/nci/spi.c 		*(u8 *)skb_push(skb, 1) = resp_hdr[0];
skb               232 net/nfc/nci/spi.c 	return skb;
skb               235 net/nfc/nci/spi.c 	kfree_skb(skb);
skb               240 net/nfc/nci/spi.c static int nci_spi_check_crc(struct sk_buff *skb)
skb               242 net/nfc/nci/spi.c 	u16 crc_data = (skb->data[skb->len - 2] << 8) |
skb               243 net/nfc/nci/spi.c 			skb->data[skb->len - 1];
skb               246 net/nfc/nci/spi.c 	ret = (crc_ccitt(CRC_INIT, skb->data, skb->len - NCI_SPI_CRC_LEN)
skb               249 net/nfc/nci/spi.c 	skb_trim(skb, skb->len - NCI_SPI_CRC_LEN);
skb               254 net/nfc/nci/spi.c static u8 nci_spi_get_ack(struct sk_buff *skb)
skb               258 net/nfc/nci/spi.c 	ret = skb->data[0] >> NCI_SPI_ACK_SHIFT;
skb               261 net/nfc/nci/spi.c 	skb_pull(skb, 2);
skb               279 net/nfc/nci/spi.c 	struct sk_buff *skb;
skb               282 net/nfc/nci/spi.c 	skb = __nci_spi_read(nspi);
skb               283 net/nfc/nci/spi.c 	if (!skb)
skb               287 net/nfc/nci/spi.c 		if (!nci_spi_check_crc(skb)) {
skb               295 net/nfc/nci/spi.c 		nspi->req_result = nci_spi_get_ack(skb);
skb               303 net/nfc/nci/spi.c 	if (!skb->len) {
skb               304 net/nfc/nci/spi.c 		kfree_skb(skb);
skb               305 net/nfc/nci/spi.c 		skb = NULL;
skb               314 net/nfc/nci/spi.c 	return skb;
skb                53 net/nfc/nci/uart.c 	struct sk_buff *skb = nu->tx_skb;
skb                55 net/nfc/nci/uart.c 	if (!skb)
skb                56 net/nfc/nci/uart.c 		skb = skb_dequeue(&nu->tx_q);
skb                60 net/nfc/nci/uart.c 	return skb;
skb                87 net/nfc/nci/uart.c 	struct sk_buff *skb;
skb                95 net/nfc/nci/uart.c 	while ((skb = nci_uart_dequeue(nu))) {
skb                99 net/nfc/nci/uart.c 		len = tty->ops->write(tty, skb->data, skb->len);
skb               100 net/nfc/nci/uart.c 		skb_pull(skb, len);
skb               101 net/nfc/nci/uart.c 		if (skb->len) {
skb               102 net/nfc/nci/uart.c 			nu->tx_skb = skb;
skb               105 net/nfc/nci/uart.c 		kfree_skb(skb);
skb               312 net/nfc/nci/uart.c static int nci_uart_send(struct nci_uart *nu, struct sk_buff *skb)
skb               315 net/nfc/nci/uart.c 	skb_queue_tail(&nu->tx_q, skb);
skb               390 net/nfc/nci/uart.c static int nci_uart_default_recv(struct nci_uart *nu, struct sk_buff *skb)
skb               392 net/nfc/nci/uart.c 	return nci_recv_frame(nu->ndev, skb);
skb                66 net/nfc/netlink.c 	hdr = genlmsg_put(msg, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
skb               133 net/nfc/netlink.c static int nfc_genl_dump_targets(struct sk_buff *skb,
skb               153 net/nfc/netlink.c 		rc = nfc_genl_send_target(skb, &dev->targets[i], cb,
skb               165 net/nfc/netlink.c 	return skb->len;
skb               601 net/nfc/netlink.c static int nfc_genl_dump_devices(struct sk_buff *skb,
skb               628 net/nfc/netlink.c 		rc = nfc_genl_send_device(skb, dev, NETLINK_CB(cb->skb).portid,
skb               640 net/nfc/netlink.c 	return skb->len;
skb               723 net/nfc/netlink.c static int nfc_genl_get_device(struct sk_buff *skb, struct genl_info *info)
skb               761 net/nfc/netlink.c static int nfc_genl_dev_up(struct sk_buff *skb, struct genl_info *info)
skb               782 net/nfc/netlink.c static int nfc_genl_dev_down(struct sk_buff *skb, struct genl_info *info)
skb               803 net/nfc/netlink.c static int nfc_genl_start_poll(struct sk_buff *skb, struct genl_info *info)
skb               844 net/nfc/netlink.c static int nfc_genl_stop_poll(struct sk_buff *skb, struct genl_info *info)
skb               884 net/nfc/netlink.c static int nfc_genl_activate_target(struct sk_buff *skb, struct genl_info *info)
skb               911 net/nfc/netlink.c static int nfc_genl_deactivate_target(struct sk_buff *skb,
skb               936 net/nfc/netlink.c static int nfc_genl_dep_link_up(struct sk_buff *skb, struct genl_info *info)
skb               971 net/nfc/netlink.c static int nfc_genl_dep_link_down(struct sk_buff *skb, struct genl_info *info)
skb              1018 net/nfc/netlink.c static int nfc_genl_llc_get_params(struct sk_buff *skb, struct genl_info *info)
skb              1067 net/nfc/netlink.c static int nfc_genl_llc_set_params(struct sk_buff *skb, struct genl_info *info)
skb              1133 net/nfc/netlink.c static int nfc_genl_llc_sdreq(struct sk_buff *skb, struct genl_info *info)
skb              1221 net/nfc/netlink.c static int nfc_genl_fw_download(struct sk_buff *skb, struct genl_info *info)
skb              1278 net/nfc/netlink.c static int nfc_genl_enable_se(struct sk_buff *skb, struct genl_info *info)
skb              1301 net/nfc/netlink.c static int nfc_genl_disable_se(struct sk_buff *skb, struct genl_info *info)
skb              1356 net/nfc/netlink.c static int nfc_genl_dump_ses(struct sk_buff *skb,
skb              1383 net/nfc/netlink.c 		rc = nfc_genl_send_se(skb, dev, NETLINK_CB(cb->skb).portid,
skb              1395 net/nfc/netlink.c 	return skb->len;
skb              1496 net/nfc/netlink.c static int nfc_genl_se_io(struct sk_buff *skb, struct genl_info *info)
skb              1537 net/nfc/netlink.c static int nfc_genl_vendor_cmd(struct sk_buff *skb,
skb              1586 net/nfc/netlink.c static inline void *nfc_hdr_put(struct sk_buff *skb, u32 portid, u32 seq,
skb              1590 net/nfc/netlink.c 	return genlmsg_put(skb, portid, seq, &nfc_genl_family, flags, cmd);
skb              1599 net/nfc/netlink.c 	struct sk_buff *skb;
skb              1602 net/nfc/netlink.c 	skb = nlmsg_new(approxlen + 100, gfp);
skb              1603 net/nfc/netlink.c 	if (!skb)
skb              1606 net/nfc/netlink.c 	hdr = nfc_hdr_put(skb, portid, seq, 0, NFC_CMD_VENDOR);
skb              1608 net/nfc/netlink.c 		kfree_skb(skb);
skb              1612 net/nfc/netlink.c 	if (nla_put_u32(skb, NFC_ATTR_DEVICE_INDEX, dev->idx))
skb              1614 net/nfc/netlink.c 	if (nla_put_u32(skb, NFC_ATTR_VENDOR_ID, oui))
skb              1616 net/nfc/netlink.c 	if (nla_put_u32(skb, NFC_ATTR_VENDOR_SUBCMD, subcmd))
skb              1619 net/nfc/netlink.c 	((void **)skb->cb)[0] = dev;
skb              1620 net/nfc/netlink.c 	((void **)skb->cb)[1] = hdr;
skb              1622 net/nfc/netlink.c 	return skb;
skb              1625 net/nfc/netlink.c 	kfree_skb(skb);
skb              1644 net/nfc/netlink.c int nfc_vendor_cmd_reply(struct sk_buff *skb)
skb              1646 net/nfc/netlink.c 	struct nfc_dev *dev = ((void **)skb->cb)[0];
skb              1647 net/nfc/netlink.c 	void *hdr = ((void **)skb->cb)[1];
skb              1650 net/nfc/netlink.c 	memset(skb->cb, 0, sizeof(skb->cb));
skb              1653 net/nfc/netlink.c 		kfree_skb(skb);
skb              1657 net/nfc/netlink.c 	genlmsg_end(skb, hdr);
skb              1658 net/nfc/netlink.c 	return genlmsg_reply(skb, dev->cur_cmd_info);
skb                53 net/nfc/nfc.h  int nfc_llcp_data_received(struct nfc_dev *dev, struct sk_buff *skb);
skb               144 net/nfc/nfc.h  int nfc_data_exchange(struct nfc_dev *dev, u32 target_idx, struct sk_buff *skb,
skb               131 net/nfc/rawsock.c static int rawsock_add_header(struct sk_buff *skb)
skb               133 net/nfc/rawsock.c 	*(u8 *)skb_push(skb, NFC_HEADER_SIZE) = 0;
skb               138 net/nfc/rawsock.c static void rawsock_data_exchange_complete(void *context, struct sk_buff *skb,
skb               150 net/nfc/rawsock.c 	err = rawsock_add_header(skb);
skb               154 net/nfc/rawsock.c 	err = sock_queue_rcv_skb(sk, skb);
skb               169 net/nfc/rawsock.c 	kfree_skb(skb);
skb               181 net/nfc/rawsock.c 	struct sk_buff *skb;
skb               191 net/nfc/rawsock.c 	skb = skb_dequeue(&sk->sk_write_queue);
skb               194 net/nfc/rawsock.c 	rc = nfc_data_exchange(dev, target_idx, skb,
skb               206 net/nfc/rawsock.c 	struct sk_buff *skb;
skb               217 net/nfc/rawsock.c 	skb = nfc_alloc_send_skb(dev, sk, msg->msg_flags, len, &rc);
skb               218 net/nfc/rawsock.c 	if (skb == NULL)
skb               221 net/nfc/rawsock.c 	rc = memcpy_from_msg(skb_put(skb, len), msg, len);
skb               223 net/nfc/rawsock.c 		kfree_skb(skb);
skb               228 net/nfc/rawsock.c 	__skb_queue_tail(&sk->sk_write_queue, skb);
skb               243 net/nfc/rawsock.c 	struct sk_buff *skb;
skb               249 net/nfc/rawsock.c 	skb = skb_recv_datagram(sk, flags, noblock, &rc);
skb               250 net/nfc/rawsock.c 	if (!skb)
skb               253 net/nfc/rawsock.c 	copied = skb->len;
skb               259 net/nfc/rawsock.c 	rc = skb_copy_datagram_msg(skb, 0, msg, copied);
skb               261 net/nfc/rawsock.c 	skb_free_datagram(sk, skb);
skb               358 net/nfc/rawsock.c void nfc_send_to_raw_sock(struct nfc_dev *dev, struct sk_buff *skb,
skb               369 net/nfc/rawsock.c 			skb_copy = __pskb_copy_fclone(skb, NFC_RAW_HEADER_SIZE,
skb                14 net/nsh/nsh.c  int nsh_push(struct sk_buff *skb, const struct nshhdr *pushed_nh)
skb                20 net/nsh/nsh.c  	if (skb->mac_len) {
skb                23 net/nsh/nsh.c  		next_proto = tun_p_from_eth_p(skb->protocol);
skb                29 net/nsh/nsh.c  	if (skb_cow_head(skb, length) < 0)
skb                32 net/nsh/nsh.c  	skb_push(skb, length);
skb                33 net/nsh/nsh.c  	nh = (struct nshhdr *)(skb->data);
skb                36 net/nsh/nsh.c  	skb_postpush_rcsum(skb, nh, length);
skb                38 net/nsh/nsh.c  	skb->protocol = htons(ETH_P_NSH);
skb                39 net/nsh/nsh.c  	skb_reset_mac_header(skb);
skb                40 net/nsh/nsh.c  	skb_reset_network_header(skb);
skb                41 net/nsh/nsh.c  	skb_reset_mac_len(skb);
skb                47 net/nsh/nsh.c  int nsh_pop(struct sk_buff *skb)
skb                53 net/nsh/nsh.c  	if (!pskb_may_pull(skb, NSH_BASE_HDR_LEN))
skb                55 net/nsh/nsh.c  	nh = (struct nshhdr *)(skb->data);
skb                60 net/nsh/nsh.c  	if (!pskb_may_pull(skb, length))
skb                66 net/nsh/nsh.c  	skb_pull_rcsum(skb, length);
skb                67 net/nsh/nsh.c  	skb_reset_mac_header(skb);
skb                68 net/nsh/nsh.c  	skb_reset_network_header(skb);
skb                69 net/nsh/nsh.c  	skb_reset_mac_len(skb);
skb                70 net/nsh/nsh.c  	skb->protocol = inner_proto;
skb                76 net/nsh/nsh.c  static struct sk_buff *nsh_gso_segment(struct sk_buff *skb,
skb                84 net/nsh/nsh.c  	skb_reset_network_header(skb);
skb                86 net/nsh/nsh.c  	nhoff = skb->network_header - skb->mac_header;
skb                87 net/nsh/nsh.c  	mac_len = skb->mac_len;
skb                89 net/nsh/nsh.c  	if (unlikely(!pskb_may_pull(skb, NSH_BASE_HDR_LEN)))
skb                91 net/nsh/nsh.c  	nsh_len = nsh_hdr_len(nsh_hdr(skb));
skb                94 net/nsh/nsh.c  	if (unlikely(!pskb_may_pull(skb, nsh_len)))
skb                97 net/nsh/nsh.c  	proto = tun_p_to_eth_p(nsh_hdr(skb)->np);
skb               101 net/nsh/nsh.c  	__skb_pull(skb, nsh_len);
skb               103 net/nsh/nsh.c  	skb_reset_mac_header(skb);
skb               104 net/nsh/nsh.c  	skb->mac_len = proto == htons(ETH_P_TEB) ? ETH_HLEN : 0;
skb               105 net/nsh/nsh.c  	skb->protocol = proto;
skb               108 net/nsh/nsh.c  	segs = skb_mac_gso_segment(skb, features);
skb               110 net/nsh/nsh.c  		skb_gso_error_unwind(skb, htons(ETH_P_NSH), nsh_len,
skb               111 net/nsh/nsh.c  				     skb->network_header - nhoff,
skb               116 net/nsh/nsh.c  	for (skb = segs; skb; skb = skb->next) {
skb               117 net/nsh/nsh.c  		skb->protocol = htons(ETH_P_NSH);
skb               118 net/nsh/nsh.c  		__skb_push(skb, nsh_len);
skb               119 net/nsh/nsh.c  		skb_set_mac_header(skb, -nhoff);
skb               120 net/nsh/nsh.c  		skb->network_header = skb->mac_header + mac_len;
skb               121 net/nsh/nsh.c  		skb->mac_len = mac_len;
skb                36 net/openvswitch/actions.c 	struct sk_buff *skb;
skb               123 net/openvswitch/actions.c static struct deferred_action *add_deferred_actions(struct sk_buff *skb,
skb               134 net/openvswitch/actions.c 		da->skb = skb;
skb               153 net/openvswitch/actions.c static int clone_execute(struct datapath *dp, struct sk_buff *skb,
skb               159 net/openvswitch/actions.c static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
skb               163 net/openvswitch/actions.c static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
skb               168 net/openvswitch/actions.c 	err = skb_mpls_push(skb, mpls->mpls_lse, mpls->mpls_ethertype,
skb               169 net/openvswitch/actions.c 			    skb->mac_len,
skb               178 net/openvswitch/actions.c static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key,
skb               183 net/openvswitch/actions.c 	err = skb_mpls_pop(skb, ethertype, skb->mac_len,
skb               192 net/openvswitch/actions.c static int set_mpls(struct sk_buff *skb, struct sw_flow_key *flow_key,
skb               199 net/openvswitch/actions.c 	stack = mpls_hdr(skb);
skb               201 net/openvswitch/actions.c 	err = skb_mpls_update_lse(skb, lse);
skb               209 net/openvswitch/actions.c static int pop_vlan(struct sk_buff *skb, struct sw_flow_key *key)
skb               213 net/openvswitch/actions.c 	err = skb_vlan_pop(skb);
skb               214 net/openvswitch/actions.c 	if (skb_vlan_tag_present(skb)) {
skb               223 net/openvswitch/actions.c static int push_vlan(struct sk_buff *skb, struct sw_flow_key *key,
skb               226 net/openvswitch/actions.c 	if (skb_vlan_tag_present(skb)) {
skb               232 net/openvswitch/actions.c 	return skb_vlan_push(skb, vlan->vlan_tpid,
skb               248 net/openvswitch/actions.c static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *flow_key,
skb               254 net/openvswitch/actions.c 	err = skb_ensure_writable(skb, ETH_HLEN);
skb               258 net/openvswitch/actions.c 	skb_postpull_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
skb               260 net/openvswitch/actions.c 	ether_addr_copy_masked(eth_hdr(skb)->h_source, key->eth_src,
skb               262 net/openvswitch/actions.c 	ether_addr_copy_masked(eth_hdr(skb)->h_dest, key->eth_dst,
skb               265 net/openvswitch/actions.c 	skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
skb               267 net/openvswitch/actions.c 	ether_addr_copy(flow_key->eth.src, eth_hdr(skb)->h_source);
skb               268 net/openvswitch/actions.c 	ether_addr_copy(flow_key->eth.dst, eth_hdr(skb)->h_dest);
skb               275 net/openvswitch/actions.c static int pop_eth(struct sk_buff *skb, struct sw_flow_key *key)
skb               277 net/openvswitch/actions.c 	skb_pull_rcsum(skb, ETH_HLEN);
skb               278 net/openvswitch/actions.c 	skb_reset_mac_header(skb);
skb               279 net/openvswitch/actions.c 	skb_reset_mac_len(skb);
skb               287 net/openvswitch/actions.c static int push_eth(struct sk_buff *skb, struct sw_flow_key *key,
skb               293 net/openvswitch/actions.c 	if (skb_cow_head(skb, ETH_HLEN) < 0)
skb               296 net/openvswitch/actions.c 	skb_push(skb, ETH_HLEN);
skb               297 net/openvswitch/actions.c 	skb_reset_mac_header(skb);
skb               298 net/openvswitch/actions.c 	skb_reset_mac_len(skb);
skb               300 net/openvswitch/actions.c 	hdr = eth_hdr(skb);
skb               303 net/openvswitch/actions.c 	hdr->h_proto = skb->protocol;
skb               305 net/openvswitch/actions.c 	skb_postpush_rcsum(skb, hdr, ETH_HLEN);
skb               313 net/openvswitch/actions.c static int push_nsh(struct sk_buff *skb, struct sw_flow_key *key,
skb               318 net/openvswitch/actions.c 	err = nsh_push(skb, nh);
skb               328 net/openvswitch/actions.c static int pop_nsh(struct sk_buff *skb, struct sw_flow_key *key)
skb               332 net/openvswitch/actions.c 	err = nsh_pop(skb);
skb               337 net/openvswitch/actions.c 	if (skb->protocol == htons(ETH_P_TEB))
skb               345 net/openvswitch/actions.c static void update_ip_l4_checksum(struct sk_buff *skb, struct iphdr *nh,
skb               348 net/openvswitch/actions.c 	int transport_len = skb->len - skb_transport_offset(skb);
skb               355 net/openvswitch/actions.c 			inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
skb               359 net/openvswitch/actions.c 			struct udphdr *uh = udp_hdr(skb);
skb               361 net/openvswitch/actions.c 			if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
skb               362 net/openvswitch/actions.c 				inet_proto_csum_replace4(&uh->check, skb,
skb               371 net/openvswitch/actions.c static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
skb               374 net/openvswitch/actions.c 	update_ip_l4_checksum(skb, nh, *addr, new_addr);
skb               376 net/openvswitch/actions.c 	skb_clear_hash(skb);
skb               380 net/openvswitch/actions.c static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto,
skb               383 net/openvswitch/actions.c 	int transport_len = skb->len - skb_transport_offset(skb);
skb               387 net/openvswitch/actions.c 			inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb,
skb               391 net/openvswitch/actions.c 			struct udphdr *uh = udp_hdr(skb);
skb               393 net/openvswitch/actions.c 			if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
skb               394 net/openvswitch/actions.c 				inet_proto_csum_replace16(&uh->check, skb,
skb               402 net/openvswitch/actions.c 			inet_proto_csum_replace16(&icmp6_hdr(skb)->icmp6_cksum,
skb               403 net/openvswitch/actions.c 						  skb, addr, new_addr, true);
skb               416 net/openvswitch/actions.c static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto,
skb               421 net/openvswitch/actions.c 		update_ipv6_checksum(skb, l4_proto, addr, new_addr);
skb               423 net/openvswitch/actions.c 	skb_clear_hash(skb);
skb               435 net/openvswitch/actions.c static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl,
skb               444 net/openvswitch/actions.c static int set_ipv4(struct sk_buff *skb, struct sw_flow_key *flow_key,
skb               452 net/openvswitch/actions.c 	err = skb_ensure_writable(skb, skb_network_offset(skb) +
skb               457 net/openvswitch/actions.c 	nh = ip_hdr(skb);
skb               467 net/openvswitch/actions.c 			set_ip_addr(skb, nh, &nh->saddr, new_addr);
skb               475 net/openvswitch/actions.c 			set_ip_addr(skb, nh, &nh->daddr, new_addr);
skb               484 net/openvswitch/actions.c 		set_ip_ttl(skb, nh, key->ipv4_ttl, mask->ipv4_ttl);
skb               496 net/openvswitch/actions.c static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
skb               503 net/openvswitch/actions.c 	err = skb_ensure_writable(skb, skb_network_offset(skb) +
skb               508 net/openvswitch/actions.c 	nh = ipv6_hdr(skb);
skb               521 net/openvswitch/actions.c 			set_ipv6_addr(skb, flow_key->ip.proto, saddr, masked,
skb               538 net/openvswitch/actions.c 				recalc_csum = (ipv6_find_hdr(skb, &offset,
skb               543 net/openvswitch/actions.c 			set_ipv6_addr(skb, flow_key->ip.proto, daddr, masked,
skb               567 net/openvswitch/actions.c static int set_nsh(struct sk_buff *skb, struct sw_flow_key *flow_key,
skb               585 net/openvswitch/actions.c 	if (!pskb_may_pull(skb, skb_network_offset(skb) + NSH_BASE_HDR_LEN))
skb               588 net/openvswitch/actions.c 	nh = nsh_hdr(skb);
skb               592 net/openvswitch/actions.c 	err = skb_ensure_writable(skb, skb_network_offset(skb) +
skb               597 net/openvswitch/actions.c 	nh = nsh_hdr(skb);
skb               598 net/openvswitch/actions.c 	skb_postpull_rcsum(skb, nh, length);
skb               626 net/openvswitch/actions.c 	skb_postpush_rcsum(skb, nh, length);
skb               631 net/openvswitch/actions.c static void set_tp_port(struct sk_buff *skb, __be16 *port,
skb               634 net/openvswitch/actions.c 	inet_proto_csum_replace2(check, skb, *port, new_port, false);
skb               638 net/openvswitch/actions.c static int set_udp(struct sk_buff *skb, struct sw_flow_key *flow_key,
skb               646 net/openvswitch/actions.c 	err = skb_ensure_writable(skb, skb_transport_offset(skb) +
skb               651 net/openvswitch/actions.c 	uh = udp_hdr(skb);
skb               656 net/openvswitch/actions.c 	if (uh->check && skb->ip_summed != CHECKSUM_PARTIAL) {
skb               658 net/openvswitch/actions.c 			set_tp_port(skb, &uh->source, src, &uh->check);
skb               662 net/openvswitch/actions.c 			set_tp_port(skb, &uh->dest, dst, &uh->check);
skb               675 net/openvswitch/actions.c 	skb_clear_hash(skb);
skb               680 net/openvswitch/actions.c static int set_tcp(struct sk_buff *skb, struct sw_flow_key *flow_key,
skb               688 net/openvswitch/actions.c 	err = skb_ensure_writable(skb, skb_transport_offset(skb) +
skb               693 net/openvswitch/actions.c 	th = tcp_hdr(skb);
skb               696 net/openvswitch/actions.c 		set_tp_port(skb, &th->source, src, &th->check);
skb               701 net/openvswitch/actions.c 		set_tp_port(skb, &th->dest, dst, &th->check);
skb               704 net/openvswitch/actions.c 	skb_clear_hash(skb);
skb               709 net/openvswitch/actions.c static int set_sctp(struct sk_buff *skb, struct sw_flow_key *flow_key,
skb               713 net/openvswitch/actions.c 	unsigned int sctphoff = skb_transport_offset(skb);
skb               718 net/openvswitch/actions.c 	err = skb_ensure_writable(skb, sctphoff + sizeof(struct sctphdr));
skb               722 net/openvswitch/actions.c 	sh = sctp_hdr(skb);
skb               724 net/openvswitch/actions.c 	old_correct_csum = sctp_compute_cksum(skb, sctphoff);
skb               729 net/openvswitch/actions.c 	new_csum = sctp_compute_cksum(skb, sctphoff);
skb               734 net/openvswitch/actions.c 	skb_clear_hash(skb);
skb               741 net/openvswitch/actions.c static int ovs_vport_output(struct net *net, struct sock *sk, struct sk_buff *skb)
skb               746 net/openvswitch/actions.c 	if (skb_cow_head(skb, data->l2_len) < 0) {
skb               747 net/openvswitch/actions.c 		kfree_skb(skb);
skb               751 net/openvswitch/actions.c 	__skb_dst_copy(skb, data->dst);
skb               752 net/openvswitch/actions.c 	*OVS_CB(skb) = data->cb;
skb               753 net/openvswitch/actions.c 	skb->inner_protocol = data->inner_protocol;
skb               755 net/openvswitch/actions.c 		__vlan_hwaccel_put_tag(skb, data->vlan_proto, data->vlan_tci & ~VLAN_CFI_MASK);
skb               757 net/openvswitch/actions.c 		__vlan_hwaccel_clear_tag(skb);
skb               760 net/openvswitch/actions.c 	skb_push(skb, data->l2_len);
skb               761 net/openvswitch/actions.c 	memcpy(skb->data, &data->l2_data, data->l2_len);
skb               762 net/openvswitch/actions.c 	skb_postpush_rcsum(skb, skb->data, data->l2_len);
skb               763 net/openvswitch/actions.c 	skb_reset_mac_header(skb);
skb               765 net/openvswitch/actions.c 	if (eth_p_mpls(skb->protocol)) {
skb               766 net/openvswitch/actions.c 		skb->inner_network_header = skb->network_header;
skb               767 net/openvswitch/actions.c 		skb_set_network_header(skb, data->network_offset);
skb               768 net/openvswitch/actions.c 		skb_reset_mac_len(skb);
skb               771 net/openvswitch/actions.c 	ovs_vport_send(vport, skb, data->mac_proto);
skb               789 net/openvswitch/actions.c static void prepare_frag(struct vport *vport, struct sk_buff *skb,
skb               792 net/openvswitch/actions.c 	unsigned int hlen = skb_network_offset(skb);
skb               796 net/openvswitch/actions.c 	data->dst = skb->_skb_refdst;
skb               798 net/openvswitch/actions.c 	data->cb = *OVS_CB(skb);
skb               799 net/openvswitch/actions.c 	data->inner_protocol = skb->inner_protocol;
skb               801 net/openvswitch/actions.c 	if (skb_vlan_tag_present(skb))
skb               802 net/openvswitch/actions.c 		data->vlan_tci = skb_vlan_tag_get(skb) | VLAN_CFI_MASK;
skb               805 net/openvswitch/actions.c 	data->vlan_proto = skb->vlan_proto;
skb               808 net/openvswitch/actions.c 	memcpy(&data->l2_data, skb->data, hlen);
skb               810 net/openvswitch/actions.c 	memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
skb               811 net/openvswitch/actions.c 	skb_pull(skb, hlen);
skb               815 net/openvswitch/actions.c 			 struct sk_buff *skb, u16 mru,
skb               820 net/openvswitch/actions.c 	if (eth_p_mpls(skb->protocol)) {
skb               821 net/openvswitch/actions.c 		orig_network_offset = skb_network_offset(skb);
skb               822 net/openvswitch/actions.c 		skb->network_header = skb->inner_network_header;
skb               825 net/openvswitch/actions.c 	if (skb_network_offset(skb) > MAX_L2_LEN) {
skb               834 net/openvswitch/actions.c 		prepare_frag(vport, skb, orig_network_offset,
skb               840 net/openvswitch/actions.c 		orig_dst = skb->_skb_refdst;
skb               841 net/openvswitch/actions.c 		skb_dst_set_noref(skb, &ovs_dst);
skb               842 net/openvswitch/actions.c 		IPCB(skb)->frag_max_size = mru;
skb               844 net/openvswitch/actions.c 		ip_do_fragment(net, skb->sk, skb, ovs_vport_output);
skb               854 net/openvswitch/actions.c 		prepare_frag(vport, skb, orig_network_offset,
skb               861 net/openvswitch/actions.c 		orig_dst = skb->_skb_refdst;
skb               862 net/openvswitch/actions.c 		skb_dst_set_noref(skb, &ovs_rt.dst);
skb               863 net/openvswitch/actions.c 		IP6CB(skb)->frag_max_size = mru;
skb               865 net/openvswitch/actions.c 		v6ops->fragment(net, skb->sk, skb, ovs_vport_output);
skb               876 net/openvswitch/actions.c 	kfree_skb(skb);
skb               879 net/openvswitch/actions.c static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port,
skb               885 net/openvswitch/actions.c 		u16 mru = OVS_CB(skb)->mru;
skb               886 net/openvswitch/actions.c 		u32 cutlen = OVS_CB(skb)->cutlen;
skb               889 net/openvswitch/actions.c 			if (skb->len - cutlen > ovs_mac_header_len(key))
skb               890 net/openvswitch/actions.c 				pskb_trim(skb, skb->len - cutlen);
skb               892 net/openvswitch/actions.c 				pskb_trim(skb, ovs_mac_header_len(key));
skb               896 net/openvswitch/actions.c 		           (skb->len <= mru + vport->dev->hard_header_len))) {
skb               897 net/openvswitch/actions.c 			ovs_vport_send(vport, skb, ovs_key_mac_proto(key));
skb               901 net/openvswitch/actions.c 			ovs_fragment(net, vport, skb, mru, key);
skb               903 net/openvswitch/actions.c 			kfree_skb(skb);
skb               906 net/openvswitch/actions.c 		kfree_skb(skb);
skb               910 net/openvswitch/actions.c static int output_userspace(struct datapath *dp, struct sk_buff *skb,
skb               921 net/openvswitch/actions.c 	upcall.mru = OVS_CB(skb)->mru;
skb               942 net/openvswitch/actions.c 				err = dev_fill_metadata_dst(vport->dev, skb);
skb               944 net/openvswitch/actions.c 					upcall.egress_tun_info = skb_tunnel_info(skb);
skb               960 net/openvswitch/actions.c 	return ovs_dp_upcall(dp, skb, key, &upcall, cutlen);
skb               967 net/openvswitch/actions.c static int sample(struct datapath *dp, struct sk_buff *skb,
skb               985 net/openvswitch/actions.c 			consume_skb(skb);
skb               990 net/openvswitch/actions.c 	return clone_execute(dp, skb, key, 0, actions, rem, last,
skb               998 net/openvswitch/actions.c static int clone(struct datapath *dp, struct sk_buff *skb,
skb              1012 net/openvswitch/actions.c 	return clone_execute(dp, skb, key, 0, actions, rem, last,
skb              1016 net/openvswitch/actions.c static void execute_hash(struct sk_buff *skb, struct sw_flow_key *key,
skb              1023 net/openvswitch/actions.c 	hash = skb_get_hash(skb);
skb              1031 net/openvswitch/actions.c static int execute_set_action(struct sk_buff *skb,
skb              1039 net/openvswitch/actions.c 		skb_dst_drop(skb);
skb              1041 net/openvswitch/actions.c 		skb_dst_set(skb, (struct dst_entry *)tun->tun_dst);
skb              1051 net/openvswitch/actions.c static int execute_masked_set_action(struct sk_buff *skb,
skb              1059 net/openvswitch/actions.c 		OVS_SET_MASKED(skb->priority, nla_get_u32(a),
skb              1061 net/openvswitch/actions.c 		flow_key->phy.priority = skb->priority;
skb              1065 net/openvswitch/actions.c 		OVS_SET_MASKED(skb->mark, nla_get_u32(a), *get_mask(a, u32 *));
skb              1066 net/openvswitch/actions.c 		flow_key->phy.skb_mark = skb->mark;
skb              1075 net/openvswitch/actions.c 		err = set_eth_addr(skb, flow_key, nla_data(a),
skb              1080 net/openvswitch/actions.c 		err = set_nsh(skb, flow_key, a);
skb              1084 net/openvswitch/actions.c 		err = set_ipv4(skb, flow_key, nla_data(a),
skb              1089 net/openvswitch/actions.c 		err = set_ipv6(skb, flow_key, nla_data(a),
skb              1094 net/openvswitch/actions.c 		err = set_tcp(skb, flow_key, nla_data(a),
skb              1099 net/openvswitch/actions.c 		err = set_udp(skb, flow_key, nla_data(a),
skb              1104 net/openvswitch/actions.c 		err = set_sctp(skb, flow_key, nla_data(a),
skb              1109 net/openvswitch/actions.c 		err = set_mpls(skb, flow_key, nla_data(a), get_mask(a,
skb              1126 net/openvswitch/actions.c static int execute_recirc(struct datapath *dp, struct sk_buff *skb,
skb              1135 net/openvswitch/actions.c 		err = ovs_flow_key_update(skb, key);
skb              1142 net/openvswitch/actions.c 	return clone_execute(dp, skb, key, recirc_id, NULL, 0, last, true);
skb              1145 net/openvswitch/actions.c static int execute_check_pkt_len(struct datapath *dp, struct sk_buff *skb,
skb              1160 net/openvswitch/actions.c 	if (skb->len <= arg->pkt_len) {
skb              1175 net/openvswitch/actions.c 	return clone_execute(dp, skb, key, 0, nla_data(actions),
skb              1180 net/openvswitch/actions.c static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
skb              1201 net/openvswitch/actions.c 				do_output(dp, skb, port, key);
skb              1207 net/openvswitch/actions.c 			clone = skb_clone(skb, GFP_ATOMIC);
skb              1210 net/openvswitch/actions.c 			OVS_CB(skb)->cutlen = 0;
skb              1217 net/openvswitch/actions.c 			if (skb->len > trunc->max_len)
skb              1218 net/openvswitch/actions.c 				OVS_CB(skb)->cutlen = skb->len - trunc->max_len;
skb              1223 net/openvswitch/actions.c 			output_userspace(dp, skb, key, a, attr,
skb              1224 net/openvswitch/actions.c 						     len, OVS_CB(skb)->cutlen);
skb              1225 net/openvswitch/actions.c 			OVS_CB(skb)->cutlen = 0;
skb              1229 net/openvswitch/actions.c 			execute_hash(skb, key, a);
skb              1233 net/openvswitch/actions.c 			err = push_mpls(skb, key, nla_data(a));
skb              1237 net/openvswitch/actions.c 			err = pop_mpls(skb, key, nla_get_be16(a));
skb              1241 net/openvswitch/actions.c 			err = push_vlan(skb, key, nla_data(a));
skb              1245 net/openvswitch/actions.c 			err = pop_vlan(skb, key);
skb              1251 net/openvswitch/actions.c 			err = execute_recirc(dp, skb, key, a, last);
skb              1263 net/openvswitch/actions.c 			err = execute_set_action(skb, key, nla_data(a));
skb              1268 net/openvswitch/actions.c 			err = execute_masked_set_action(skb, key, nla_data(a));
skb              1274 net/openvswitch/actions.c 			err = sample(dp, skb, key, a, last);
skb              1283 net/openvswitch/actions.c 				err = ovs_flow_key_update(skb, key);
skb              1288 net/openvswitch/actions.c 			err = ovs_ct_execute(ovs_dp_get_net(dp), skb, key,
skb              1297 net/openvswitch/actions.c 			err = ovs_ct_clear(skb, key);
skb              1301 net/openvswitch/actions.c 			err = push_eth(skb, key, nla_data(a));
skb              1305 net/openvswitch/actions.c 			err = pop_eth(skb, key);
skb              1316 net/openvswitch/actions.c 			err = push_nsh(skb, key, nh);
skb              1321 net/openvswitch/actions.c 			err = pop_nsh(skb, key);
skb              1325 net/openvswitch/actions.c 			if (ovs_meter_execute(dp, skb, key, nla_get_u32(a))) {
skb              1326 net/openvswitch/actions.c 				consume_skb(skb);
skb              1334 net/openvswitch/actions.c 			err = clone(dp, skb, key, a, last);
skb              1344 net/openvswitch/actions.c 			err = execute_check_pkt_len(dp, skb, key, a, last);
skb              1353 net/openvswitch/actions.c 			kfree_skb(skb);
skb              1358 net/openvswitch/actions.c 	consume_skb(skb);
skb              1368 net/openvswitch/actions.c static int clone_execute(struct datapath *dp, struct sk_buff *skb,
skb              1376 net/openvswitch/actions.c 	skb = last ? skb : skb_clone(skb, GFP_ATOMIC);
skb              1377 net/openvswitch/actions.c 	if (!skb) {
skb              1397 net/openvswitch/actions.c 			err = do_execute_actions(dp, skb, clone,
skb              1404 net/openvswitch/actions.c 			ovs_dp_process_packet(skb, clone);
skb              1410 net/openvswitch/actions.c 	da = add_deferred_actions(skb, key, actions, len);
skb              1420 net/openvswitch/actions.c 		kfree_skb(skb);
skb              1446 net/openvswitch/actions.c 		struct sk_buff *skb = da->skb;
skb              1452 net/openvswitch/actions.c 			do_execute_actions(dp, skb, key, actions, actions_len);
skb              1454 net/openvswitch/actions.c 			ovs_dp_process_packet(skb, key);
skb              1462 net/openvswitch/actions.c int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb,
skb              1472 net/openvswitch/actions.c 		kfree_skb(skb);
skb              1477 net/openvswitch/actions.c 	OVS_CB(skb)->acts_origlen = acts->orig_len;
skb              1478 net/openvswitch/actions.c 	err = do_execute_actions(dp, skb, key,
skb               233 net/openvswitch/conntrack.c static void ovs_ct_update_key(const struct sk_buff *skb,
skb               243 net/openvswitch/conntrack.c 	ct = nf_ct_get(skb, &ctinfo);
skb               274 net/openvswitch/conntrack.c void ovs_ct_fill_key(const struct sk_buff *skb, struct sw_flow_key *key)
skb               276 net/openvswitch/conntrack.c 	ovs_ct_update_key(skb, NULL, key, false, false);
skb               284 net/openvswitch/conntrack.c 		   const struct sw_flow_key *output, struct sk_buff *skb)
skb               286 net/openvswitch/conntrack.c 	if (nla_put_u32(skb, OVS_KEY_ATTR_CT_STATE, output->ct_state))
skb               290 net/openvswitch/conntrack.c 	    nla_put_u16(skb, OVS_KEY_ATTR_CT_ZONE, output->ct_zone))
skb               294 net/openvswitch/conntrack.c 	    nla_put_u32(skb, OVS_KEY_ATTR_CT_MARK, output->ct.mark))
skb               298 net/openvswitch/conntrack.c 	    nla_put(skb, OVS_KEY_ATTR_CT_LABELS, sizeof(output->ct.labels),
skb               311 net/openvswitch/conntrack.c 			if (nla_put(skb, OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4,
skb               322 net/openvswitch/conntrack.c 			if (nla_put(skb, OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6,
skb               432 net/openvswitch/conntrack.c static int ovs_ct_helper(struct sk_buff *skb, u16 proto)
skb               441 net/openvswitch/conntrack.c 	ct = nf_ct_get(skb, &ctinfo);
skb               455 net/openvswitch/conntrack.c 		protoff = ip_hdrlen(skb);
skb               458 net/openvswitch/conntrack.c 		u8 nexthdr = ipv6_hdr(skb)->nexthdr;
skb               462 net/openvswitch/conntrack.c 		ofs = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr,
skb               476 net/openvswitch/conntrack.c 	err = helper->help(skb, protoff, ct, ctinfo);
skb               485 net/openvswitch/conntrack.c 	    !nf_ct_seq_adjust(skb, ct, ctinfo, protoff))
skb               494 net/openvswitch/conntrack.c 			    u16 zone, struct sk_buff *skb)
skb               496 net/openvswitch/conntrack.c 	struct ovs_skb_cb ovs_cb = *OVS_CB(skb);
skb               502 net/openvswitch/conntrack.c 		memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
skb               503 net/openvswitch/conntrack.c 		err = ip_defrag(net, skb, user);
skb               507 net/openvswitch/conntrack.c 		ovs_cb.mru = IPCB(skb)->frag_max_size;
skb               512 net/openvswitch/conntrack.c 		memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
skb               513 net/openvswitch/conntrack.c 		err = nf_ct_frag6_gather(net, skb, user);
skb               516 net/openvswitch/conntrack.c 				kfree_skb(skb);
skb               520 net/openvswitch/conntrack.c 		key->ip.proto = ipv6_hdr(skb)->nexthdr;
skb               521 net/openvswitch/conntrack.c 		ovs_cb.mru = IP6CB(skb)->frag_max_size;
skb               524 net/openvswitch/conntrack.c 		kfree_skb(skb);
skb               531 net/openvswitch/conntrack.c 	ovs_flow_key_update_l3l4(skb, key);
skb               534 net/openvswitch/conntrack.c 	skb_clear_hash(skb);
skb               535 net/openvswitch/conntrack.c 	skb->ignore_df = 1;
skb               536 net/openvswitch/conntrack.c 	*OVS_CB(skb) = ovs_cb;
skb               543 net/openvswitch/conntrack.c 		   u16 proto, const struct sk_buff *skb)
skb               548 net/openvswitch/conntrack.c 	if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb), proto, net, &tuple))
skb               607 net/openvswitch/conntrack.c 		     u8 l3num, struct sk_buff *skb, bool natted)
skb               613 net/openvswitch/conntrack.c 	if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb), l3num,
skb               644 net/openvswitch/conntrack.c 	nf_ct_set(skb, ct, ovs_ct_get_info(h));
skb               652 net/openvswitch/conntrack.c 				struct sk_buff *skb,
skb               668 net/openvswitch/conntrack.c 		ct = ovs_ct_find_existing(net, &info->zone, info->family, skb,
skb               680 net/openvswitch/conntrack.c 			    struct sk_buff *skb)
skb               686 net/openvswitch/conntrack.c 	ct = nf_ct_get(skb, &ctinfo);
skb               688 net/openvswitch/conntrack.c 		ct = ovs_ct_executed(net, key, info, skb, &ct_executed);
skb               691 net/openvswitch/conntrack.c 		nf_ct_get(skb, &ctinfo);
skb               723 net/openvswitch/conntrack.c 		nf_ct_set(skb, NULL, 0);
skb               735 net/openvswitch/conntrack.c static int ovs_ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct,
skb               742 net/openvswitch/conntrack.c 	nh_off = skb_network_offset(skb);
skb               743 net/openvswitch/conntrack.c 	skb_pull_rcsum(skb, nh_off);
skb               755 net/openvswitch/conntrack.c 		    skb->protocol == htons(ETH_P_IP) &&
skb               756 net/openvswitch/conntrack.c 		    ip_hdr(skb)->protocol == IPPROTO_ICMP) {
skb               757 net/openvswitch/conntrack.c 			if (!nf_nat_icmp_reply_translation(skb, ct, ctinfo,
skb               762 net/openvswitch/conntrack.c 			   skb->protocol == htons(ETH_P_IPV6)) {
skb               764 net/openvswitch/conntrack.c 			u8 nexthdr = ipv6_hdr(skb)->nexthdr;
skb               765 net/openvswitch/conntrack.c 			int hdrlen = ipv6_skip_exthdr(skb,
skb               770 net/openvswitch/conntrack.c 				if (!nf_nat_icmpv6_reply_translation(skb, ct,
skb               806 net/openvswitch/conntrack.c 	err = nf_nat_packet(ct, ctinfo, hooknum, skb);
skb               808 net/openvswitch/conntrack.c 	skb_push(skb, nh_off);
skb               809 net/openvswitch/conntrack.c 	skb_postpush_rcsum(skb, skb->data, nh_off);
skb               815 net/openvswitch/conntrack.c 			       const struct sk_buff *skb,
skb               823 net/openvswitch/conntrack.c 			key->ipv4.addr.src = ip_hdr(skb)->saddr;
skb               825 net/openvswitch/conntrack.c 			memcpy(&key->ipv6.addr.src, &ipv6_hdr(skb)->saddr,
skb               831 net/openvswitch/conntrack.c 			src = udp_hdr(skb)->source;
skb               833 net/openvswitch/conntrack.c 			src = tcp_hdr(skb)->source;
skb               835 net/openvswitch/conntrack.c 			src = sctp_hdr(skb)->source;
skb               845 net/openvswitch/conntrack.c 			key->ipv4.addr.dst = ip_hdr(skb)->daddr;
skb               847 net/openvswitch/conntrack.c 			memcpy(&key->ipv6.addr.dst, &ipv6_hdr(skb)->daddr,
skb               853 net/openvswitch/conntrack.c 			dst = udp_hdr(skb)->dest;
skb               855 net/openvswitch/conntrack.c 			dst = tcp_hdr(skb)->dest;
skb               857 net/openvswitch/conntrack.c 			dst = sctp_hdr(skb)->dest;
skb               868 net/openvswitch/conntrack.c 		      struct sk_buff *skb, struct nf_conn *ct,
skb               904 net/openvswitch/conntrack.c 	err = ovs_ct_nat_execute(skb, ct, ctinfo, &info->range, maniptype);
skb               913 net/openvswitch/conntrack.c 		err = ovs_ct_nat_execute(skb, ct, ctinfo, &info->range,
skb               919 net/openvswitch/conntrack.c 		ovs_nat_update_key(key, skb, maniptype);
skb               926 net/openvswitch/conntrack.c 		      struct sk_buff *skb, struct nf_conn *ct,
skb               941 net/openvswitch/conntrack.c 			   struct sk_buff *skb)
skb               948 net/openvswitch/conntrack.c 	bool cached = skb_nfct_cached(net, key, info, skb);
skb               963 net/openvswitch/conntrack.c 			if (skb_nfct(skb))
skb               964 net/openvswitch/conntrack.c 				nf_conntrack_put(skb_nfct(skb));
skb               966 net/openvswitch/conntrack.c 			nf_ct_set(skb, tmpl, IP_CT_NEW);
skb               969 net/openvswitch/conntrack.c 		err = nf_conntrack_in(skb, &state);
skb               980 net/openvswitch/conntrack.c 		ovs_ct_update_key(skb, info, key, true, true);
skb               983 net/openvswitch/conntrack.c 	ct = nf_ct_get(skb, &ctinfo);
skb               997 net/openvswitch/conntrack.c 		    ovs_ct_nat(net, key, info, skb, ct, ctinfo) != NF_ACCEPT) {
skb              1026 net/openvswitch/conntrack.c 		    ovs_ct_helper(skb, info->family) != NF_ACCEPT) {
skb              1037 net/openvswitch/conntrack.c 			 struct sk_buff *skb)
skb              1048 net/openvswitch/conntrack.c 	exp = ovs_ct_expect_find(net, &info->zone, info->family, skb);
skb              1061 net/openvswitch/conntrack.c 		err = __ovs_ct_lookup(net, key, info, skb);
skb              1065 net/openvswitch/conntrack.c 		ct = (struct nf_conn *)skb_nfct(skb);
skb              1170 net/openvswitch/conntrack.c 			 struct sk_buff *skb)
skb              1176 net/openvswitch/conntrack.c 	err = __ovs_ct_lookup(net, key, info, skb);
skb              1181 net/openvswitch/conntrack.c 	ct = nf_ct_get(skb, &ctinfo);
skb              1239 net/openvswitch/conntrack.c 	if (nf_conntrack_confirm(skb) != NF_ACCEPT)
skb              1251 net/openvswitch/conntrack.c static int ovs_skb_network_trim(struct sk_buff *skb)
skb              1256 net/openvswitch/conntrack.c 	switch (skb->protocol) {
skb              1258 net/openvswitch/conntrack.c 		len = ntohs(ip_hdr(skb)->tot_len);
skb              1262 net/openvswitch/conntrack.c 			+ ntohs(ipv6_hdr(skb)->payload_len);
skb              1265 net/openvswitch/conntrack.c 		len = skb->len;
skb              1268 net/openvswitch/conntrack.c 	err = pskb_trim_rcsum(skb, len);
skb              1270 net/openvswitch/conntrack.c 		kfree_skb(skb);
skb              1278 net/openvswitch/conntrack.c int ovs_ct_execute(struct net *net, struct sk_buff *skb,
skb              1286 net/openvswitch/conntrack.c 	nh_ofs = skb_network_offset(skb);
skb              1287 net/openvswitch/conntrack.c 	skb_pull_rcsum(skb, nh_ofs);
skb              1289 net/openvswitch/conntrack.c 	err = ovs_skb_network_trim(skb);
skb              1294 net/openvswitch/conntrack.c 		err = handle_fragments(net, key, info->zone.id, skb);
skb              1300 net/openvswitch/conntrack.c 		err = ovs_ct_commit(net, key, info, skb);
skb              1302 net/openvswitch/conntrack.c 		err = ovs_ct_lookup(net, key, info, skb);
skb              1304 net/openvswitch/conntrack.c 	skb_push(skb, nh_ofs);
skb              1305 net/openvswitch/conntrack.c 	skb_postpush_rcsum(skb, skb->data, nh_ofs);
skb              1307 net/openvswitch/conntrack.c 		kfree_skb(skb);
skb              1311 net/openvswitch/conntrack.c int ovs_ct_clear(struct sk_buff *skb, struct sw_flow_key *key)
skb              1313 net/openvswitch/conntrack.c 	if (skb_nfct(skb)) {
skb              1314 net/openvswitch/conntrack.c 		nf_conntrack_put(skb_nfct(skb));
skb              1315 net/openvswitch/conntrack.c 		nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
skb              1316 net/openvswitch/conntrack.c 		ovs_ct_fill_key(skb, key);
skb              1712 net/openvswitch/conntrack.c 			       struct sk_buff *skb)
skb              1716 net/openvswitch/conntrack.c 	start = nla_nest_start_noflag(skb, OVS_CT_ATTR_NAT);
skb              1721 net/openvswitch/conntrack.c 		if (nla_put_flag(skb, OVS_NAT_ATTR_SRC))
skb              1724 net/openvswitch/conntrack.c 		if (nla_put_flag(skb, OVS_NAT_ATTR_DST))
skb              1733 net/openvswitch/conntrack.c 			if (nla_put_in_addr(skb, OVS_NAT_ATTR_IP_MIN,
skb              1737 net/openvswitch/conntrack.c 			     (nla_put_in_addr(skb, OVS_NAT_ATTR_IP_MAX,
skb              1742 net/openvswitch/conntrack.c 			if (nla_put_in6_addr(skb, OVS_NAT_ATTR_IP_MIN,
skb              1747 net/openvswitch/conntrack.c 			     (nla_put_in6_addr(skb, OVS_NAT_ATTR_IP_MAX,
skb              1755 net/openvswitch/conntrack.c 	    (nla_put_u16(skb, OVS_NAT_ATTR_PROTO_MIN,
skb              1758 net/openvswitch/conntrack.c 	      nla_put_u16(skb, OVS_NAT_ATTR_PROTO_MAX,
skb              1763 net/openvswitch/conntrack.c 	    nla_put_flag(skb, OVS_NAT_ATTR_PERSISTENT))
skb              1766 net/openvswitch/conntrack.c 	    nla_put_flag(skb, OVS_NAT_ATTR_PROTO_HASH))
skb              1769 net/openvswitch/conntrack.c 	    nla_put_flag(skb, OVS_NAT_ATTR_PROTO_RANDOM))
skb              1772 net/openvswitch/conntrack.c 	nla_nest_end(skb, start);
skb              1779 net/openvswitch/conntrack.c 			  struct sk_buff *skb)
skb              1783 net/openvswitch/conntrack.c 	start = nla_nest_start_noflag(skb, OVS_ACTION_ATTR_CT);
skb              1787 net/openvswitch/conntrack.c 	if (ct_info->commit && nla_put_flag(skb, ct_info->force
skb              1792 net/openvswitch/conntrack.c 	    nla_put_u16(skb, OVS_CT_ATTR_ZONE, ct_info->zone.id))
skb              1795 net/openvswitch/conntrack.c 	    nla_put(skb, OVS_CT_ATTR_MARK, sizeof(ct_info->mark),
skb              1800 net/openvswitch/conntrack.c 	    nla_put(skb, OVS_CT_ATTR_LABELS, sizeof(ct_info->labels),
skb              1804 net/openvswitch/conntrack.c 		if (nla_put_string(skb, OVS_CT_ATTR_HELPER,
skb              1809 net/openvswitch/conntrack.c 	    nla_put_u32(skb, OVS_CT_ATTR_EVENTMASK, ct_info->eventmask))
skb              1812 net/openvswitch/conntrack.c 		if (nla_put_string(skb, OVS_CT_ATTR_TIMEOUT, ct_info->timeout))
skb              1817 net/openvswitch/conntrack.c 	if (ct_info->nat && !ovs_ct_nat_to_attr(ct_info, skb))
skb              1820 net/openvswitch/conntrack.c 	nla_nest_end(skb, start);
skb              1906 net/openvswitch/conntrack.c 	struct sk_buff *skb;
skb              1908 net/openvswitch/conntrack.c 	skb = genlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
skb              1909 net/openvswitch/conntrack.c 	if (!skb)
skb              1912 net/openvswitch/conntrack.c 	*ovs_reply_header = genlmsg_put(skb, info->snd_portid,
skb              1917 net/openvswitch/conntrack.c 		nlmsg_free(skb);
skb              1922 net/openvswitch/conntrack.c 	return skb;
skb              2117 net/openvswitch/conntrack.c static int ovs_ct_limit_cmd_set(struct sk_buff *skb, struct genl_info *info)
skb              2122 net/openvswitch/conntrack.c 	struct ovs_net *ovs_net = net_generic(sock_net(skb->sk), ovs_net_id);
skb              2151 net/openvswitch/conntrack.c static int ovs_ct_limit_cmd_del(struct sk_buff *skb, struct genl_info *info)
skb              2156 net/openvswitch/conntrack.c 	struct ovs_net *ovs_net = net_generic(sock_net(skb->sk), ovs_net_id);
skb              2183 net/openvswitch/conntrack.c static int ovs_ct_limit_cmd_get(struct sk_buff *skb, struct genl_info *info)
skb              2189 net/openvswitch/conntrack.c 	struct net *net = sock_net(skb->sk);
skb                26 net/openvswitch/conntrack.h int ovs_ct_clear(struct sk_buff *skb, struct sw_flow_key *key);
skb                28 net/openvswitch/conntrack.h void ovs_ct_fill_key(const struct sk_buff *skb, struct sw_flow_key *key);
skb                30 net/openvswitch/conntrack.h 		   const struct sw_flow_key *output, struct sk_buff *skb);
skb                57 net/openvswitch/conntrack.h 					struct sk_buff *skb)
skb                62 net/openvswitch/conntrack.h static inline int ovs_ct_execute(struct net *net, struct sk_buff *skb,
skb                66 net/openvswitch/conntrack.h 	kfree_skb(skb);
skb                70 net/openvswitch/conntrack.h static inline int ovs_ct_clear(struct sk_buff *skb,
skb                76 net/openvswitch/conntrack.h static inline void ovs_ct_fill_key(const struct sk_buff *skb,
skb                91 net/openvswitch/conntrack.h 				 struct sk_buff *skb)
skb                79 net/openvswitch/datapath.c 		       struct sk_buff *skb, struct genl_info *info)
skb                81 net/openvswitch/datapath.c 	genl_notify(family, skb, info, 0, GFP_KERNEL);
skb               216 net/openvswitch/datapath.c void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key)
skb               218 net/openvswitch/datapath.c 	const struct vport *p = OVS_CB(skb)->input_vport;
skb               236 net/openvswitch/datapath.c 		upcall.portid = ovs_vport_find_upcall_portid(p, skb);
skb               237 net/openvswitch/datapath.c 		upcall.mru = OVS_CB(skb)->mru;
skb               238 net/openvswitch/datapath.c 		error = ovs_dp_upcall(dp, skb, key, &upcall, 0);
skb               240 net/openvswitch/datapath.c 			kfree_skb(skb);
skb               242 net/openvswitch/datapath.c 			consume_skb(skb);
skb               247 net/openvswitch/datapath.c 	ovs_flow_stats_update(flow, key->tp.flags, skb);
skb               249 net/openvswitch/datapath.c 	error = ovs_execute_actions(dp, skb, sf_acts, key);
skb               264 net/openvswitch/datapath.c int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
skb               277 net/openvswitch/datapath.c 	if (!skb_is_gso(skb))
skb               278 net/openvswitch/datapath.c 		err = queue_userspace_packet(dp, skb, key, upcall_info, cutlen);
skb               280 net/openvswitch/datapath.c 		err = queue_gso_packets(dp, skb, key, upcall_info, cutlen);
skb               296 net/openvswitch/datapath.c static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb,
skb               301 net/openvswitch/datapath.c 	unsigned int gso_type = skb_shinfo(skb)->gso_type;
skb               306 net/openvswitch/datapath.c 	BUILD_BUG_ON(sizeof(*OVS_CB(skb)) > SKB_SGO_CB_OFFSET);
skb               307 net/openvswitch/datapath.c 	segs = __skb_gso_segment(skb, NETIF_F_SG, false);
skb               323 net/openvswitch/datapath.c 	skb = segs;
skb               325 net/openvswitch/datapath.c 		if (gso_type & SKB_GSO_UDP && skb != segs)
skb               328 net/openvswitch/datapath.c 		err = queue_userspace_packet(dp, skb, key, upcall_info, cutlen);
skb               332 net/openvswitch/datapath.c 	} while ((skb = skb->next));
skb               335 net/openvswitch/datapath.c 	skb = segs;
skb               337 net/openvswitch/datapath.c 		nskb = skb->next;
skb               339 net/openvswitch/datapath.c 			kfree_skb(skb);
skb               341 net/openvswitch/datapath.c 			consume_skb(skb);
skb               342 net/openvswitch/datapath.c 	} while ((skb = nskb));
skb               373 net/openvswitch/datapath.c static void pad_packet(struct datapath *dp, struct sk_buff *skb)
skb               376 net/openvswitch/datapath.c 		size_t plen = NLA_ALIGN(skb->len) - skb->len;
skb               379 net/openvswitch/datapath.c 			skb_put_zero(skb, plen);
skb               383 net/openvswitch/datapath.c static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
skb               400 net/openvswitch/datapath.c 	if (skb_vlan_tag_present(skb)) {
skb               401 net/openvswitch/datapath.c 		nskb = skb_clone(skb, GFP_ATOMIC);
skb               409 net/openvswitch/datapath.c 		skb = nskb;
skb               412 net/openvswitch/datapath.c 	if (nla_attr_size(skb->len) > USHRT_MAX) {
skb               418 net/openvswitch/datapath.c 	if (skb->ip_summed == CHECKSUM_PARTIAL &&
skb               419 net/openvswitch/datapath.c 	    (err = skb_csum_hwoffload_help(skb, 0)))
skb               427 net/openvswitch/datapath.c 		hlen = skb_zerocopy_headlen(skb);
skb               429 net/openvswitch/datapath.c 		hlen = skb->len;
skb               432 net/openvswitch/datapath.c 			      OVS_CB(skb)->acts_origlen);
skb               499 net/openvswitch/datapath.c 				skb->len)) {
skb               512 net/openvswitch/datapath.c 	nla->nla_len = nla_attr_size(skb->len - cutlen);
skb               514 net/openvswitch/datapath.c 	err = skb_zerocopy(user_skb, skb, skb->len - cutlen, hlen);
skb               527 net/openvswitch/datapath.c 		skb_tx_error(skb);
skb               533 net/openvswitch/datapath.c static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
skb               536 net/openvswitch/datapath.c 	struct net *net = sock_net(skb->sk);
skb               735 net/openvswitch/datapath.c 				   struct sk_buff *skb)
skb               744 net/openvswitch/datapath.c 	    nla_put_u64_64bit(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used),
skb               749 net/openvswitch/datapath.c 	    nla_put_64bit(skb, OVS_FLOW_ATTR_STATS,
skb               755 net/openvswitch/datapath.c 	     nla_put_u8(skb, OVS_FLOW_ATTR_TCP_FLAGS, (u8)ntohs(tcp_flags)))
skb               763 net/openvswitch/datapath.c 				     struct sk_buff *skb, int skb_orig_len)
skb               778 net/openvswitch/datapath.c 	start = nla_nest_start_noflag(skb, OVS_FLOW_ATTR_ACTIONS);
skb               784 net/openvswitch/datapath.c 					  sf_acts->actions_len, skb);
skb               787 net/openvswitch/datapath.c 			nla_nest_end(skb, start);
skb               792 net/openvswitch/datapath.c 			nla_nest_cancel(skb, start);
skb               803 net/openvswitch/datapath.c 				  struct sk_buff *skb, u32 portid,
skb               806 net/openvswitch/datapath.c 	const int skb_orig_len = skb->len;
skb               810 net/openvswitch/datapath.c 	ovs_header = genlmsg_put(skb, portid, seq, &dp_flow_genl_family,
skb               817 net/openvswitch/datapath.c 	err = ovs_nla_put_identifier(flow, skb);
skb               822 net/openvswitch/datapath.c 		err = ovs_nla_put_masked_key(flow, skb);
skb               828 net/openvswitch/datapath.c 		err = ovs_nla_put_mask(flow, skb);
skb               833 net/openvswitch/datapath.c 	err = ovs_flow_cmd_fill_stats(flow, skb);
skb               838 net/openvswitch/datapath.c 		err = ovs_flow_cmd_fill_actions(flow, skb, skb_orig_len);
skb               843 net/openvswitch/datapath.c 	genlmsg_end(skb, ovs_header);
skb               847 net/openvswitch/datapath.c 	genlmsg_cancel(skb, ovs_header);
skb               858 net/openvswitch/datapath.c 	struct sk_buff *skb;
skb               865 net/openvswitch/datapath.c 	skb = genlmsg_new(len, GFP_KERNEL);
skb               866 net/openvswitch/datapath.c 	if (!skb)
skb               869 net/openvswitch/datapath.c 	return skb;
skb               878 net/openvswitch/datapath.c 	struct sk_buff *skb;
skb               881 net/openvswitch/datapath.c 	skb = ovs_flow_cmd_alloc_info(ovsl_dereference(flow->sf_acts),
skb               883 net/openvswitch/datapath.c 	if (IS_ERR_OR_NULL(skb))
skb               884 net/openvswitch/datapath.c 		return skb;
skb               886 net/openvswitch/datapath.c 	retval = ovs_flow_cmd_fill_info(flow, dp_ifindex, skb,
skb               890 net/openvswitch/datapath.c 		kfree_skb(skb);
skb               891 net/openvswitch/datapath.c 		skb = ERR_PTR(retval);
skb               893 net/openvswitch/datapath.c 	return skb;
skb               896 net/openvswitch/datapath.c static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
skb               898 net/openvswitch/datapath.c 	struct net *net = sock_net(skb->sk);
skb              1135 net/openvswitch/datapath.c static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info)
skb              1137 net/openvswitch/datapath.c 	struct net *net = sock_net(skb->sk);
skb              1237 net/openvswitch/datapath.c static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
skb              1241 net/openvswitch/datapath.c 	struct net *net = sock_net(skb->sk);
skb              1267 net/openvswitch/datapath.c 	dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
skb              1296 net/openvswitch/datapath.c static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
skb              1300 net/openvswitch/datapath.c 	struct net *net = sock_net(skb->sk);
skb              1322 net/openvswitch/datapath.c 	dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
skb              1363 net/openvswitch/datapath.c 			netlink_set_err(sock_net(skb->sk)->genl_sock, 0, 0, PTR_ERR(reply));
skb              1375 net/openvswitch/datapath.c static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
skb              1391 net/openvswitch/datapath.c 	dp = get_dp_rcu(sock_net(skb->sk), ovs_header->dp_ifindex);
skb              1408 net/openvswitch/datapath.c 		if (ovs_flow_cmd_fill_info(flow, ovs_header->dp_ifindex, skb,
skb              1409 net/openvswitch/datapath.c 					   NETLINK_CB(cb->skb).portid,
skb              1418 net/openvswitch/datapath.c 	return skb->len;
skb              1483 net/openvswitch/datapath.c static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb,
skb              1491 net/openvswitch/datapath.c 	ovs_header = genlmsg_put(skb, portid, seq, &dp_datapath_genl_family,
skb              1498 net/openvswitch/datapath.c 	err = nla_put_string(skb, OVS_DP_ATTR_NAME, ovs_dp_name(dp));
skb              1503 net/openvswitch/datapath.c 	if (nla_put_64bit(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats),
skb              1507 net/openvswitch/datapath.c 	if (nla_put_64bit(skb, OVS_DP_ATTR_MEGAFLOW_STATS,
skb              1512 net/openvswitch/datapath.c 	if (nla_put_u32(skb, OVS_DP_ATTR_USER_FEATURES, dp->user_features))
skb              1515 net/openvswitch/datapath.c 	genlmsg_end(skb, ovs_header);
skb              1519 net/openvswitch/datapath.c 	genlmsg_cancel(skb, ovs_header);
skb              1547 net/openvswitch/datapath.c static void ovs_dp_reset_user_features(struct sk_buff *skb, struct genl_info *info)
skb              1551 net/openvswitch/datapath.c 	dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
skb              1589 net/openvswitch/datapath.c static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
skb              1612 net/openvswitch/datapath.c 	ovs_dp_set_net(dp, sock_net(skb->sk));
skb              1667 net/openvswitch/datapath.c 				ovs_dp_reset_user_features(skb, info);
skb              1727 net/openvswitch/datapath.c static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info)
skb              1738 net/openvswitch/datapath.c 	dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
skb              1760 net/openvswitch/datapath.c static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info)
skb              1771 net/openvswitch/datapath.c 	dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
skb              1795 net/openvswitch/datapath.c static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info)
skb              1806 net/openvswitch/datapath.c 	dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
skb              1824 net/openvswitch/datapath.c static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
skb              1826 net/openvswitch/datapath.c 	struct ovs_net *ovs_net = net_generic(sock_net(skb->sk), ovs_net_id);
skb              1834 net/openvswitch/datapath.c 		    ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).portid,
skb              1844 net/openvswitch/datapath.c 	return skb->len;
skb              1893 net/openvswitch/datapath.c static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
skb              1901 net/openvswitch/datapath.c 	ovs_header = genlmsg_put(skb, portid, seq, &dp_vport_genl_family,
skb              1908 net/openvswitch/datapath.c 	if (nla_put_u32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no) ||
skb              1909 net/openvswitch/datapath.c 	    nla_put_u32(skb, OVS_VPORT_ATTR_TYPE, vport->ops->type) ||
skb              1910 net/openvswitch/datapath.c 	    nla_put_string(skb, OVS_VPORT_ATTR_NAME,
skb              1912 net/openvswitch/datapath.c 	    nla_put_u32(skb, OVS_VPORT_ATTR_IFINDEX, vport->dev->ifindex))
skb              1918 net/openvswitch/datapath.c 		if (nla_put_s32(skb, OVS_VPORT_ATTR_NETNSID, id))
skb              1923 net/openvswitch/datapath.c 	if (nla_put_64bit(skb, OVS_VPORT_ATTR_STATS,
skb              1928 net/openvswitch/datapath.c 	if (ovs_vport_get_upcall_portids(vport, skb))
skb              1931 net/openvswitch/datapath.c 	err = ovs_vport_get_options(vport, skb);
skb              1935 net/openvswitch/datapath.c 	genlmsg_end(skb, ovs_header);
skb              1941 net/openvswitch/datapath.c 	genlmsg_cancel(skb, ovs_header);
skb              1954 net/openvswitch/datapath.c 	struct sk_buff *skb;
skb              1957 net/openvswitch/datapath.c 	skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
skb              1958 net/openvswitch/datapath.c 	if (!skb)
skb              1961 net/openvswitch/datapath.c 	retval = ovs_vport_cmd_fill_info(vport, skb, net, portid, seq, 0, cmd,
skb              1965 net/openvswitch/datapath.c 	return skb;
skb              2036 net/openvswitch/datapath.c static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
skb              2065 net/openvswitch/datapath.c 	dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
skb              2125 net/openvswitch/datapath.c static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
skb              2137 net/openvswitch/datapath.c 	vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
skb              2178 net/openvswitch/datapath.c static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)
skb              2193 net/openvswitch/datapath.c 	vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
skb              2233 net/openvswitch/datapath.c static int ovs_vport_cmd_get(struct sk_buff *skb, struct genl_info *info)
skb              2246 net/openvswitch/datapath.c 	vport = lookup_vport(sock_net(skb->sk), ovs_header, a);
skb              2264 net/openvswitch/datapath.c static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
skb              2272 net/openvswitch/datapath.c 	dp = get_dp_rcu(sock_net(skb->sk), ovs_header->dp_ifindex);
skb              2283 net/openvswitch/datapath.c 			    ovs_vport_cmd_fill_info(vport, skb,
skb              2284 net/openvswitch/datapath.c 						    sock_net(skb->sk),
skb              2285 net/openvswitch/datapath.c 						    NETLINK_CB(cb->skb).portid,
skb              2302 net/openvswitch/datapath.c 	return skb->len;
skb               103 net/openvswitch/datapath.h #define OVS_CB(skb) ((struct ovs_skb_cb *)(skb)->cb)
skb               223 net/openvswitch/datapath.h void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key);
skb               233 net/openvswitch/datapath.h int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb,
skb                60 net/openvswitch/flow.c 			   const struct sk_buff *skb)
skb                64 net/openvswitch/flow.c 	int len = skb->len + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
skb               174 net/openvswitch/flow.c static int check_header(struct sk_buff *skb, int len)
skb               176 net/openvswitch/flow.c 	if (unlikely(skb->len < len))
skb               178 net/openvswitch/flow.c 	if (unlikely(!pskb_may_pull(skb, len)))
skb               183 net/openvswitch/flow.c static bool arphdr_ok(struct sk_buff *skb)
skb               185 net/openvswitch/flow.c 	return pskb_may_pull(skb, skb_network_offset(skb) +
skb               189 net/openvswitch/flow.c static int check_iphdr(struct sk_buff *skb)
skb               191 net/openvswitch/flow.c 	unsigned int nh_ofs = skb_network_offset(skb);
skb               195 net/openvswitch/flow.c 	err = check_header(skb, nh_ofs + sizeof(struct iphdr));
skb               199 net/openvswitch/flow.c 	ip_len = ip_hdrlen(skb);
skb               201 net/openvswitch/flow.c 		     skb->len < nh_ofs + ip_len))
skb               204 net/openvswitch/flow.c 	skb_set_transport_header(skb, nh_ofs + ip_len);
skb               208 net/openvswitch/flow.c static bool tcphdr_ok(struct sk_buff *skb)
skb               210 net/openvswitch/flow.c 	int th_ofs = skb_transport_offset(skb);
skb               213 net/openvswitch/flow.c 	if (unlikely(!pskb_may_pull(skb, th_ofs + sizeof(struct tcphdr))))
skb               216 net/openvswitch/flow.c 	tcp_len = tcp_hdrlen(skb);
skb               218 net/openvswitch/flow.c 		     skb->len < th_ofs + tcp_len))
skb               224 net/openvswitch/flow.c static bool udphdr_ok(struct sk_buff *skb)
skb               226 net/openvswitch/flow.c 	return pskb_may_pull(skb, skb_transport_offset(skb) +
skb               230 net/openvswitch/flow.c static bool sctphdr_ok(struct sk_buff *skb)
skb               232 net/openvswitch/flow.c 	return pskb_may_pull(skb, skb_transport_offset(skb) +
skb               236 net/openvswitch/flow.c static bool icmphdr_ok(struct sk_buff *skb)
skb               238 net/openvswitch/flow.c 	return pskb_may_pull(skb, skb_transport_offset(skb) +
skb               242 net/openvswitch/flow.c static int parse_ipv6hdr(struct sk_buff *skb, struct sw_flow_key *key)
skb               246 net/openvswitch/flow.c 	unsigned int nh_ofs = skb_network_offset(skb);
skb               251 net/openvswitch/flow.c 	err = check_header(skb, nh_ofs + sizeof(*nh));
skb               255 net/openvswitch/flow.c 	nh = ipv6_hdr(skb);
skb               264 net/openvswitch/flow.c 	nexthdr = ipv6_find_hdr(skb, &payload_ofs, -1, &frag_off, &flags);
skb               284 net/openvswitch/flow.c 	skb_set_transport_header(skb, nh_ofs + nh_len);
skb               289 net/openvswitch/flow.c static bool icmp6hdr_ok(struct sk_buff *skb)
skb               291 net/openvswitch/flow.c 	return pskb_may_pull(skb, skb_transport_offset(skb) +
skb               301 net/openvswitch/flow.c static int parse_vlan_tag(struct sk_buff *skb, struct vlan_head *key_vh,
skb               304 net/openvswitch/flow.c 	struct vlan_head *vh = (struct vlan_head *)skb->data;
skb               309 net/openvswitch/flow.c 	if (unlikely(skb->len < sizeof(struct vlan_head) + sizeof(__be16)))
skb               312 net/openvswitch/flow.c 	if (unlikely(!pskb_may_pull(skb, sizeof(struct vlan_head) +
skb               316 net/openvswitch/flow.c 	vh = (struct vlan_head *)skb->data;
skb               321 net/openvswitch/flow.c 		int offset = skb->data - skb_mac_header(skb);
skb               325 net/openvswitch/flow.c 		__skb_push(skb, offset);
skb               326 net/openvswitch/flow.c 		err = __skb_vlan_pop(skb, &tci);
skb               327 net/openvswitch/flow.c 		__skb_pull(skb, offset);
skb               330 net/openvswitch/flow.c 		__vlan_hwaccel_put_tag(skb, key_vh->tpid, tci);
skb               332 net/openvswitch/flow.c 		__skb_pull(skb, sizeof(struct vlan_head));
skb               345 net/openvswitch/flow.c static int parse_vlan(struct sk_buff *skb, struct sw_flow_key *key)
skb               349 net/openvswitch/flow.c 	if (skb_vlan_tag_present(skb)) {
skb               350 net/openvswitch/flow.c 		key->eth.vlan.tci = htons(skb->vlan_tci) | htons(VLAN_CFI_MASK);
skb               351 net/openvswitch/flow.c 		key->eth.vlan.tpid = skb->vlan_proto;
skb               354 net/openvswitch/flow.c 		res = parse_vlan_tag(skb, &key->eth.vlan, true);
skb               360 net/openvswitch/flow.c 	res = parse_vlan_tag(skb, &key->eth.cvlan, false);
skb               367 net/openvswitch/flow.c static __be16 parse_ethertype(struct sk_buff *skb)
skb               379 net/openvswitch/flow.c 	proto = *(__be16 *) skb->data;
skb               380 net/openvswitch/flow.c 	__skb_pull(skb, sizeof(__be16));
skb               385 net/openvswitch/flow.c 	if (skb->len < sizeof(struct llc_snap_hdr))
skb               388 net/openvswitch/flow.c 	if (unlikely(!pskb_may_pull(skb, sizeof(struct llc_snap_hdr))))
skb               391 net/openvswitch/flow.c 	llc = (struct llc_snap_hdr *) skb->data;
skb               397 net/openvswitch/flow.c 	__skb_pull(skb, sizeof(struct llc_snap_hdr));
skb               405 net/openvswitch/flow.c static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key,
skb               408 net/openvswitch/flow.c 	struct icmp6hdr *icmp = icmp6_hdr(skb);
skb               420 net/openvswitch/flow.c 		int icmp_len = skb->len - skb_transport_offset(skb);
skb               430 net/openvswitch/flow.c 		if (unlikely(skb_linearize(skb)))
skb               433 net/openvswitch/flow.c 		nd = (struct nd_msg *)skb_transport_header(skb);
skb               479 net/openvswitch/flow.c static int parse_nsh(struct sk_buff *skb, struct sw_flow_key *key)
skb               482 net/openvswitch/flow.c 	unsigned int nh_ofs = skb_network_offset(skb);
skb               486 net/openvswitch/flow.c 	err = check_header(skb, nh_ofs + NSH_BASE_HDR_LEN);
skb               490 net/openvswitch/flow.c 	nh = nsh_hdr(skb);
skb               497 net/openvswitch/flow.c 	err = check_header(skb, nh_ofs + length);
skb               501 net/openvswitch/flow.c 	nh = nsh_hdr(skb);
skb               532 net/openvswitch/flow.c static int key_extract_l3l4(struct sk_buff *skb, struct sw_flow_key *key)
skb               541 net/openvswitch/flow.c 		error = check_iphdr(skb);
skb               546 net/openvswitch/flow.c 				skb->transport_header = skb->network_header;
skb               552 net/openvswitch/flow.c 		nh = ip_hdr(skb);
skb               567 net/openvswitch/flow.c 			skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
skb               574 net/openvswitch/flow.c 			if (tcphdr_ok(skb)) {
skb               575 net/openvswitch/flow.c 				struct tcphdr *tcp = tcp_hdr(skb);
skb               584 net/openvswitch/flow.c 			if (udphdr_ok(skb)) {
skb               585 net/openvswitch/flow.c 				struct udphdr *udp = udp_hdr(skb);
skb               592 net/openvswitch/flow.c 			if (sctphdr_ok(skb)) {
skb               593 net/openvswitch/flow.c 				struct sctphdr *sctp = sctp_hdr(skb);
skb               600 net/openvswitch/flow.c 			if (icmphdr_ok(skb)) {
skb               601 net/openvswitch/flow.c 				struct icmphdr *icmp = icmp_hdr(skb);
skb               615 net/openvswitch/flow.c 		bool arp_available = arphdr_ok(skb);
skb               617 net/openvswitch/flow.c 		arp = (struct arp_eth_header *)skb_network_header(skb);
skb               642 net/openvswitch/flow.c 		skb_set_inner_network_header(skb, skb->mac_len);
skb               646 net/openvswitch/flow.c 			error = check_header(skb, skb->mac_len + stack_len);
skb               650 net/openvswitch/flow.c 			memcpy(&lse, skb_inner_network_header(skb), MPLS_HLEN);
skb               655 net/openvswitch/flow.c 			skb_set_inner_network_header(skb, skb->mac_len + stack_len);
skb               664 net/openvswitch/flow.c 		nh_len = parse_ipv6hdr(skb, key);
skb               672 net/openvswitch/flow.c 				skb->transport_header = skb->network_header;
skb               685 net/openvswitch/flow.c 		if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
skb               690 net/openvswitch/flow.c 			if (tcphdr_ok(skb)) {
skb               691 net/openvswitch/flow.c 				struct tcphdr *tcp = tcp_hdr(skb);
skb               699 net/openvswitch/flow.c 			if (udphdr_ok(skb)) {
skb               700 net/openvswitch/flow.c 				struct udphdr *udp = udp_hdr(skb);
skb               707 net/openvswitch/flow.c 			if (sctphdr_ok(skb)) {
skb               708 net/openvswitch/flow.c 				struct sctphdr *sctp = sctp_hdr(skb);
skb               715 net/openvswitch/flow.c 			if (icmp6hdr_ok(skb)) {
skb               716 net/openvswitch/flow.c 				error = parse_icmpv6(skb, key, nh_len);
skb               724 net/openvswitch/flow.c 		error = parse_nsh(skb, key);
skb               756 net/openvswitch/flow.c static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
skb               763 net/openvswitch/flow.c 	skb_reset_mac_header(skb);
skb               768 net/openvswitch/flow.c 		if (unlikely(eth_type_vlan(skb->protocol)))
skb               771 net/openvswitch/flow.c 		skb_reset_network_header(skb);
skb               772 net/openvswitch/flow.c 		key->eth.type = skb->protocol;
skb               774 net/openvswitch/flow.c 		eth = eth_hdr(skb);
skb               778 net/openvswitch/flow.c 		__skb_pull(skb, 2 * ETH_ALEN);
skb               783 net/openvswitch/flow.c 		if (unlikely(parse_vlan(skb, key)))
skb               786 net/openvswitch/flow.c 		key->eth.type = parse_ethertype(skb);
skb               795 net/openvswitch/flow.c 			skb->protocol = key->eth.cvlan.tpid;
skb               797 net/openvswitch/flow.c 			skb->protocol = key->eth.type;
skb               799 net/openvswitch/flow.c 		skb_reset_network_header(skb);
skb               800 net/openvswitch/flow.c 		__skb_push(skb, skb->data - skb_mac_header(skb));
skb               803 net/openvswitch/flow.c 	skb_reset_mac_len(skb);
skb               806 net/openvswitch/flow.c 	return key_extract_l3l4(skb, key);
skb               812 net/openvswitch/flow.c int ovs_flow_key_update_l3l4(struct sk_buff *skb, struct sw_flow_key *key)
skb               814 net/openvswitch/flow.c 	return key_extract_l3l4(skb, key);
skb               817 net/openvswitch/flow.c int ovs_flow_key_update(struct sk_buff *skb, struct sw_flow_key *key)
skb               821 net/openvswitch/flow.c 	res = key_extract(skb, key);
skb               828 net/openvswitch/flow.c static int key_extract_mac_proto(struct sk_buff *skb)
skb               830 net/openvswitch/flow.c 	switch (skb->dev->type) {
skb               834 net/openvswitch/flow.c 		if (skb->protocol == htons(ETH_P_TEB))
skb               843 net/openvswitch/flow.c 			 struct sk_buff *skb, struct sw_flow_key *key)
skb               872 net/openvswitch/flow.c 	key->phy.priority = skb->priority;
skb               873 net/openvswitch/flow.c 	key->phy.in_port = OVS_CB(skb)->input_vport->port_no;
skb               874 net/openvswitch/flow.c 	key->phy.skb_mark = skb->mark;
skb               876 net/openvswitch/flow.c 	res = key_extract_mac_proto(skb);
skb               883 net/openvswitch/flow.c 		tc_ext = skb_ext_find(skb, TC_SKB_EXT);
skb               892 net/openvswitch/flow.c 	err = key_extract(skb, key);
skb               894 net/openvswitch/flow.c 		ovs_ct_fill_key(skb, key);   /* Must be after key_extract(). */
skb               899 net/openvswitch/flow.c 				   struct sk_buff *skb,
skb               924 net/openvswitch/flow.c 	skb->protocol = key->eth.type;
skb               925 net/openvswitch/flow.c 	err = key_extract(skb, key);
skb               272 net/openvswitch/flow.h int ovs_flow_key_update(struct sk_buff *skb, struct sw_flow_key *key);
skb               273 net/openvswitch/flow.h int ovs_flow_key_update_l3l4(struct sk_buff *skb, struct sw_flow_key *key);
skb               275 net/openvswitch/flow.h 			 struct sk_buff *skb,
skb               279 net/openvswitch/flow.h 				   struct sk_buff *skb,
skb               840 net/openvswitch/flow_netlink.c static int vxlan_opt_to_nlattr(struct sk_buff *skb,
skb               846 net/openvswitch/flow_netlink.c 	nla = nla_nest_start_noflag(skb, OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS);
skb               850 net/openvswitch/flow_netlink.c 	if (nla_put_u32(skb, OVS_VXLAN_EXT_GBP, opts->gbp) < 0)
skb               853 net/openvswitch/flow_netlink.c 	nla_nest_end(skb, nla);
skb               857 net/openvswitch/flow_netlink.c static int __ip_tun_to_nlattr(struct sk_buff *skb,
skb               863 net/openvswitch/flow_netlink.c 	    nla_put_be64(skb, OVS_TUNNEL_KEY_ATTR_ID, output->tun_id,
skb               868 net/openvswitch/flow_netlink.c 		return nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_IPV4_INFO_BRIDGE)
skb               874 net/openvswitch/flow_netlink.c 		    nla_put_in_addr(skb, OVS_TUNNEL_KEY_ATTR_IPV4_SRC,
skb               878 net/openvswitch/flow_netlink.c 		    nla_put_in_addr(skb, OVS_TUNNEL_KEY_ATTR_IPV4_DST,
skb               884 net/openvswitch/flow_netlink.c 		    nla_put_in6_addr(skb, OVS_TUNNEL_KEY_ATTR_IPV6_SRC,
skb               888 net/openvswitch/flow_netlink.c 		    nla_put_in6_addr(skb, OVS_TUNNEL_KEY_ATTR_IPV6_DST,
skb               894 net/openvswitch/flow_netlink.c 	    nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TOS, output->tos))
skb               896 net/openvswitch/flow_netlink.c 	if (nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TTL, output->ttl))
skb               899 net/openvswitch/flow_netlink.c 	    nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT))
skb               902 net/openvswitch/flow_netlink.c 	    nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_CSUM))
skb               905 net/openvswitch/flow_netlink.c 	    nla_put_be16(skb, OVS_TUNNEL_KEY_ATTR_TP_SRC, output->tp_src))
skb               908 net/openvswitch/flow_netlink.c 	    nla_put_be16(skb, OVS_TUNNEL_KEY_ATTR_TP_DST, output->tp_dst))
skb               911 net/openvswitch/flow_netlink.c 	    nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_OAM))
skb               915 net/openvswitch/flow_netlink.c 		    nla_put(skb, OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS,
skb               919 net/openvswitch/flow_netlink.c 			 vxlan_opt_to_nlattr(skb, tun_opts, swkey_tun_opts_len))
skb               922 net/openvswitch/flow_netlink.c 			 nla_put(skb, OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS,
skb               930 net/openvswitch/flow_netlink.c static int ip_tun_to_nlattr(struct sk_buff *skb,
skb               938 net/openvswitch/flow_netlink.c 	nla = nla_nest_start_noflag(skb, OVS_KEY_ATTR_TUNNEL);
skb               942 net/openvswitch/flow_netlink.c 	err = __ip_tun_to_nlattr(skb, output, tun_opts, swkey_tun_opts_len,
skb               947 net/openvswitch/flow_netlink.c 	nla_nest_end(skb, nla);
skb               951 net/openvswitch/flow_netlink.c int ovs_nla_put_tunnel_info(struct sk_buff *skb,
skb               954 net/openvswitch/flow_netlink.c 	return __ip_tun_to_nlattr(skb, &tun_info->key,
skb              1931 net/openvswitch/flow_netlink.c static int ovs_nla_put_vlan(struct sk_buff *skb, const struct vlan_head *vh,
skb              1936 net/openvswitch/flow_netlink.c 	if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, eth_type) ||
skb              1937 net/openvswitch/flow_netlink.c 	    nla_put_be16(skb, OVS_KEY_ATTR_VLAN, vh->tci))
skb              1943 net/openvswitch/flow_netlink.c 			     struct sk_buff *skb)
skb              1947 net/openvswitch/flow_netlink.c 	start = nla_nest_start_noflag(skb, OVS_KEY_ATTR_NSH);
skb              1951 net/openvswitch/flow_netlink.c 	if (nla_put(skb, OVS_NSH_KEY_ATTR_BASE, sizeof(nsh->base), &nsh->base))
skb              1955 net/openvswitch/flow_netlink.c 		if (nla_put(skb, OVS_NSH_KEY_ATTR_MD1,
skb              1962 net/openvswitch/flow_netlink.c 	nla_nest_end(skb, start);
skb              1972 net/openvswitch/flow_netlink.c 			     struct sk_buff *skb)
skb              1979 net/openvswitch/flow_netlink.c 	if (nla_put_u32(skb, OVS_KEY_ATTR_RECIRC_ID, output->recirc_id))
skb              1982 net/openvswitch/flow_netlink.c 	if (nla_put_u32(skb, OVS_KEY_ATTR_DP_HASH, output->ovs_flow_hash))
skb              1985 net/openvswitch/flow_netlink.c 	if (nla_put_u32(skb, OVS_KEY_ATTR_PRIORITY, output->phy.priority))
skb              1994 net/openvswitch/flow_netlink.c 		if (ip_tun_to_nlattr(skb, &output->tun_key, opts,
skb              2001 net/openvswitch/flow_netlink.c 			if (nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT, 0xffffffff))
skb              2007 net/openvswitch/flow_netlink.c 		if (nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT,
skb              2012 net/openvswitch/flow_netlink.c 	if (nla_put_u32(skb, OVS_KEY_ATTR_SKB_MARK, output->phy.skb_mark))
skb              2015 net/openvswitch/flow_netlink.c 	if (ovs_ct_put_key(swkey, output, skb))
skb              2019 net/openvswitch/flow_netlink.c 		nla = nla_reserve(skb, OVS_KEY_ATTR_ETHERNET, sizeof(*eth_key));
skb              2028 net/openvswitch/flow_netlink.c 			if (ovs_nla_put_vlan(skb, &output->eth.vlan, is_mask))
skb              2030 net/openvswitch/flow_netlink.c 			encap = nla_nest_start_noflag(skb, OVS_KEY_ATTR_ENCAP);
skb              2035 net/openvswitch/flow_netlink.c 				if (ovs_nla_put_vlan(skb, &output->eth.cvlan, is_mask))
skb              2037 net/openvswitch/flow_netlink.c 				in_encap = nla_nest_start_noflag(skb,
skb              2052 net/openvswitch/flow_netlink.c 				if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE,
skb              2059 net/openvswitch/flow_netlink.c 	if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, output->eth.type))
skb              2073 net/openvswitch/flow_netlink.c 		nla = nla_reserve(skb, OVS_KEY_ATTR_IPV4, sizeof(*ipv4_key));
skb              2086 net/openvswitch/flow_netlink.c 		nla = nla_reserve(skb, OVS_KEY_ATTR_IPV6, sizeof(*ipv6_key));
skb              2100 net/openvswitch/flow_netlink.c 		if (nsh_key_to_nlattr(&output->nsh, is_mask, skb))
skb              2106 net/openvswitch/flow_netlink.c 		nla = nla_reserve(skb, OVS_KEY_ATTR_ARP, sizeof(*arp_key));
skb              2119 net/openvswitch/flow_netlink.c 		nla = nla_reserve(skb, OVS_KEY_ATTR_MPLS, sizeof(*mpls_key));
skb              2133 net/openvswitch/flow_netlink.c 			nla = nla_reserve(skb, OVS_KEY_ATTR_TCP, sizeof(*tcp_key));
skb              2139 net/openvswitch/flow_netlink.c 			if (nla_put_be16(skb, OVS_KEY_ATTR_TCP_FLAGS,
skb              2145 net/openvswitch/flow_netlink.c 			nla = nla_reserve(skb, OVS_KEY_ATTR_UDP, sizeof(*udp_key));
skb              2154 net/openvswitch/flow_netlink.c 			nla = nla_reserve(skb, OVS_KEY_ATTR_SCTP, sizeof(*sctp_key));
skb              2164 net/openvswitch/flow_netlink.c 			nla = nla_reserve(skb, OVS_KEY_ATTR_ICMP, sizeof(*icmp_key));
skb              2174 net/openvswitch/flow_netlink.c 			nla = nla_reserve(skb, OVS_KEY_ATTR_ICMPV6,
skb              2186 net/openvswitch/flow_netlink.c 				nla = nla_reserve(skb, OVS_KEY_ATTR_ND, sizeof(*nd_key));
skb              2200 net/openvswitch/flow_netlink.c 		nla_nest_end(skb, in_encap);
skb              2202 net/openvswitch/flow_netlink.c 		nla_nest_end(skb, encap);
skb              2212 net/openvswitch/flow_netlink.c 		    struct sk_buff *skb)
skb              2217 net/openvswitch/flow_netlink.c 	nla = nla_nest_start_noflag(skb, attr);
skb              2220 net/openvswitch/flow_netlink.c 	err = __ovs_nla_put_key(swkey, output, is_mask, skb);
skb              2223 net/openvswitch/flow_netlink.c 	nla_nest_end(skb, nla);
skb              2229 net/openvswitch/flow_netlink.c int ovs_nla_put_identifier(const struct sw_flow *flow, struct sk_buff *skb)
skb              2232 net/openvswitch/flow_netlink.c 		return nla_put(skb, OVS_FLOW_ATTR_UFID, flow->id.ufid_len,
skb              2236 net/openvswitch/flow_netlink.c 			       OVS_FLOW_ATTR_KEY, false, skb);
skb              2240 net/openvswitch/flow_netlink.c int ovs_nla_put_masked_key(const struct sw_flow *flow, struct sk_buff *skb)
skb              2243 net/openvswitch/flow_netlink.c 				OVS_FLOW_ATTR_KEY, false, skb);
skb              2247 net/openvswitch/flow_netlink.c int ovs_nla_put_mask(const struct sw_flow *flow, struct sk_buff *skb)
skb              2250 net/openvswitch/flow_netlink.c 				OVS_FLOW_ATTR_MASK, true, skb);
skb              3237 net/openvswitch/flow_netlink.c 				 struct sk_buff *skb)
skb              3244 net/openvswitch/flow_netlink.c 	start = nla_nest_start_noflag(skb, OVS_ACTION_ATTR_SAMPLE);
skb              3252 net/openvswitch/flow_netlink.c 	if (nla_put_u32(skb, OVS_SAMPLE_ATTR_PROBABILITY, arg->probability)) {
skb              3257 net/openvswitch/flow_netlink.c 	ac_start = nla_nest_start_noflag(skb, OVS_SAMPLE_ATTR_ACTIONS);
skb              3263 net/openvswitch/flow_netlink.c 	err = ovs_nla_put_actions(actions, rem, skb);
skb              3267 net/openvswitch/flow_netlink.c 		nla_nest_cancel(skb, ac_start);
skb              3268 net/openvswitch/flow_netlink.c 		nla_nest_cancel(skb, start);
skb              3270 net/openvswitch/flow_netlink.c 		nla_nest_end(skb, ac_start);
skb              3271 net/openvswitch/flow_netlink.c 		nla_nest_end(skb, start);
skb              3278 net/openvswitch/flow_netlink.c 				struct sk_buff *skb)
skb              3283 net/openvswitch/flow_netlink.c 	start = nla_nest_start_noflag(skb, OVS_ACTION_ATTR_CLONE);
skb              3287 net/openvswitch/flow_netlink.c 	err = ovs_nla_put_actions(nla_data(attr), rem, skb);
skb              3290 net/openvswitch/flow_netlink.c 		nla_nest_cancel(skb, start);
skb              3292 net/openvswitch/flow_netlink.c 		nla_nest_end(skb, start);
skb              3298 net/openvswitch/flow_netlink.c 					struct sk_buff *skb)
skb              3305 net/openvswitch/flow_netlink.c 	start = nla_nest_start_noflag(skb, OVS_ACTION_ATTR_CHECK_PKT_LEN);
skb              3315 net/openvswitch/flow_netlink.c 	if (nla_put_u16(skb, OVS_CHECK_PKT_LEN_ATTR_PKT_LEN, arg->pkt_len)) {
skb              3324 net/openvswitch/flow_netlink.c 	ac_start =  nla_nest_start_noflag(skb,
skb              3331 net/openvswitch/flow_netlink.c 	err = ovs_nla_put_actions(nla_data(a), nla_len(a), skb);
skb              3333 net/openvswitch/flow_netlink.c 		nla_nest_cancel(skb, ac_start);
skb              3336 net/openvswitch/flow_netlink.c 		nla_nest_end(skb, ac_start);
skb              3343 net/openvswitch/flow_netlink.c 	ac_start =  nla_nest_start_noflag(skb,
skb              3350 net/openvswitch/flow_netlink.c 	err = ovs_nla_put_actions(nla_data(a), nla_len(a), skb);
skb              3352 net/openvswitch/flow_netlink.c 		nla_nest_cancel(skb, ac_start);
skb              3355 net/openvswitch/flow_netlink.c 		nla_nest_end(skb, ac_start);
skb              3358 net/openvswitch/flow_netlink.c 	nla_nest_end(skb, start);
skb              3362 net/openvswitch/flow_netlink.c 	nla_nest_cancel(skb, start);
skb              3366 net/openvswitch/flow_netlink.c static int set_action_to_attr(const struct nlattr *a, struct sk_buff *skb)
skb              3378 net/openvswitch/flow_netlink.c 		start = nla_nest_start_noflag(skb, OVS_ACTION_ATTR_SET);
skb              3382 net/openvswitch/flow_netlink.c 		err =  ip_tun_to_nlattr(skb, &tun_info->key,
skb              3388 net/openvswitch/flow_netlink.c 		nla_nest_end(skb, start);
skb              3392 net/openvswitch/flow_netlink.c 		if (nla_put(skb, OVS_ACTION_ATTR_SET, nla_len(a), ovs_key))
skb              3401 net/openvswitch/flow_netlink.c 						struct sk_buff *skb)
skb              3410 net/openvswitch/flow_netlink.c 	nla = nla_nest_start_noflag(skb, OVS_ACTION_ATTR_SET);
skb              3414 net/openvswitch/flow_netlink.c 	if (nla_put(skb, nla_type(ovs_key), key_len, nla_data(ovs_key)))
skb              3417 net/openvswitch/flow_netlink.c 	nla_nest_end(skb, nla);
skb              3421 net/openvswitch/flow_netlink.c int ovs_nla_put_actions(const struct nlattr *attr, int len, struct sk_buff *skb)
skb              3431 net/openvswitch/flow_netlink.c 			err = set_action_to_attr(a, skb);
skb              3437 net/openvswitch/flow_netlink.c 			err = masked_set_action_to_set_action_attr(a, skb);
skb              3443 net/openvswitch/flow_netlink.c 			err = sample_action_to_attr(a, skb);
skb              3449 net/openvswitch/flow_netlink.c 			err = ovs_ct_action_to_attr(nla_data(a), skb);
skb              3455 net/openvswitch/flow_netlink.c 			err = clone_action_to_attr(a, skb);
skb              3461 net/openvswitch/flow_netlink.c 			err = check_pkt_len_action_to_attr(a, skb);
skb              3467 net/openvswitch/flow_netlink.c 			if (nla_put(skb, type, nla_len(a), nla_data(a)))
skb                41 net/openvswitch/flow_netlink.h int ovs_nla_put_identifier(const struct sw_flow *flow, struct sk_buff *skb);
skb                42 net/openvswitch/flow_netlink.h int ovs_nla_put_masked_key(const struct sw_flow *flow, struct sk_buff *skb);
skb                43 net/openvswitch/flow_netlink.h int ovs_nla_put_mask(const struct sw_flow *flow, struct sk_buff *skb);
skb                49 net/openvswitch/flow_netlink.h int ovs_nla_put_tunnel_info(struct sk_buff *skb,
skb                63 net/openvswitch/flow_netlink.h 			int len, struct sk_buff *skb);
skb                89 net/openvswitch/meter.c 	struct sk_buff *skb;
skb                92 net/openvswitch/meter.c 	skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
skb                93 net/openvswitch/meter.c 	if (!skb)
skb                96 net/openvswitch/meter.c 	*ovs_reply_header = genlmsg_put(skb, info->snd_portid,
skb               100 net/openvswitch/meter.c 		nlmsg_free(skb);
skb               105 net/openvswitch/meter.c 	return skb;
skb               150 net/openvswitch/meter.c static int ovs_meter_cmd_features(struct sk_buff *skb, struct genl_info *info)
skb               268 net/openvswitch/meter.c static int ovs_meter_cmd_set(struct sk_buff *skb, struct genl_info *info)
skb               296 net/openvswitch/meter.c 	dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
skb               337 net/openvswitch/meter.c static int ovs_meter_cmd_get(struct sk_buff *skb, struct genl_info *info)
skb               360 net/openvswitch/meter.c 	dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
skb               390 net/openvswitch/meter.c static int ovs_meter_cmd_del(struct sk_buff *skb, struct genl_info *info)
skb               412 net/openvswitch/meter.c 	dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
skb               442 net/openvswitch/meter.c bool ovs_meter_execute(struct datapath *dp, struct sk_buff *skb,
skb               474 net/openvswitch/meter.c 	meter->stats.n_bytes += skb->len;
skb               486 net/openvswitch/meter.c 	cost = (meter->kbps) ? skb->len * 8 : 1000;
skb               511 net/openvswitch/meter.c 		band->stats.n_bytes += skb->len;
skb                48 net/openvswitch/meter.h bool ovs_meter_execute(struct datapath *dp, struct sk_buff *skb,
skb                42 net/openvswitch/vport-geneve.c 			      struct sk_buff *skb)
skb                46 net/openvswitch/vport-geneve.c 	if (nla_put_u16(skb, OVS_TUNNEL_ATTR_DST_PORT, geneve_port->dst_port))
skb                34 net/openvswitch/vport-internal_dev.c internal_dev_xmit(struct sk_buff *skb, struct net_device *netdev)
skb                38 net/openvswitch/vport-internal_dev.c 	len = skb->len;
skb                40 net/openvswitch/vport-internal_dev.c 	err = ovs_vport_receive(internal_dev_priv(netdev)->vport, skb, NULL);
skb               225 net/openvswitch/vport-internal_dev.c static netdev_tx_t internal_dev_recv(struct sk_buff *skb)
skb               227 net/openvswitch/vport-internal_dev.c 	struct net_device *netdev = skb->dev;
skb               231 net/openvswitch/vport-internal_dev.c 		kfree_skb(skb);
skb               236 net/openvswitch/vport-internal_dev.c 	skb_dst_drop(skb);
skb               237 net/openvswitch/vport-internal_dev.c 	nf_reset_ct(skb);
skb               238 net/openvswitch/vport-internal_dev.c 	secpath_reset(skb);
skb               240 net/openvswitch/vport-internal_dev.c 	skb->pkt_type = PACKET_HOST;
skb               241 net/openvswitch/vport-internal_dev.c 	skb->protocol = eth_type_trans(skb, netdev);
skb               242 net/openvswitch/vport-internal_dev.c 	skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
skb               247 net/openvswitch/vport-internal_dev.c 	stats->rx_bytes += skb->len;
skb               250 net/openvswitch/vport-internal_dev.c 	netif_rx(skb);
skb                29 net/openvswitch/vport-netdev.c static void netdev_port_receive(struct sk_buff *skb)
skb                33 net/openvswitch/vport-netdev.c 	vport = ovs_netdev_get_vport(skb->dev);
skb                37 net/openvswitch/vport-netdev.c 	if (unlikely(skb_warn_if_lro(skb)))
skb                43 net/openvswitch/vport-netdev.c 	skb = skb_share_check(skb, GFP_ATOMIC);
skb                44 net/openvswitch/vport-netdev.c 	if (unlikely(!skb))
skb                47 net/openvswitch/vport-netdev.c 	if (skb->dev->type == ARPHRD_ETHER) {
skb                48 net/openvswitch/vport-netdev.c 		skb_push(skb, ETH_HLEN);
skb                49 net/openvswitch/vport-netdev.c 		skb_postpush_rcsum(skb, skb->data, ETH_HLEN);
skb                51 net/openvswitch/vport-netdev.c 	ovs_vport_receive(vport, skb, skb_tunnel_info(skb));
skb                54 net/openvswitch/vport-netdev.c 	kfree_skb(skb);
skb                60 net/openvswitch/vport-netdev.c 	struct sk_buff *skb = *pskb;
skb                62 net/openvswitch/vport-netdev.c 	if (unlikely(skb->pkt_type == PACKET_LOOPBACK))
skb                65 net/openvswitch/vport-netdev.c 	netdev_port_receive(skb);
skb                22 net/openvswitch/vport-vxlan.c static int vxlan_get_options(const struct vport *vport, struct sk_buff *skb)
skb                27 net/openvswitch/vport-vxlan.c 	if (nla_put_u16(skb, OVS_TUNNEL_ATTR_DST_PORT, ntohs(dst_port)))
skb                33 net/openvswitch/vport-vxlan.c 		exts = nla_nest_start_noflag(skb, OVS_TUNNEL_ATTR_EXTENSION);
skb                38 net/openvswitch/vport-vxlan.c 		    nla_put_flag(skb, OVS_VXLAN_EXT_GBP))
skb                41 net/openvswitch/vport-vxlan.c 		nla_nest_end(skb, exts);
skb               299 net/openvswitch/vport.c int ovs_vport_get_options(const struct vport *vport, struct sk_buff *skb)
skb               307 net/openvswitch/vport.c 	nla = nla_nest_start_noflag(skb, OVS_VPORT_ATTR_OPTIONS);
skb               311 net/openvswitch/vport.c 	err = vport->ops->get_options(vport, skb);
skb               313 net/openvswitch/vport.c 		nla_nest_cancel(skb, nla);
skb               317 net/openvswitch/vport.c 	nla_nest_end(skb, nla);
skb               374 net/openvswitch/vport.c 				 struct sk_buff *skb)
skb               381 net/openvswitch/vport.c 		return nla_put(skb, OVS_VPORT_ATTR_UPCALL_PID,
skb               384 net/openvswitch/vport.c 		return nla_put_u32(skb, OVS_VPORT_ATTR_UPCALL_PID, ids->ids[0]);
skb               398 net/openvswitch/vport.c u32 ovs_vport_find_upcall_portid(const struct vport *vport, struct sk_buff *skb)
skb               409 net/openvswitch/vport.c 	hash = skb_get_hash(skb);
skb               424 net/openvswitch/vport.c int ovs_vport_receive(struct vport *vport, struct sk_buff *skb,
skb               430 net/openvswitch/vport.c 	OVS_CB(skb)->input_vport = vport;
skb               431 net/openvswitch/vport.c 	OVS_CB(skb)->mru = 0;
skb               432 net/openvswitch/vport.c 	OVS_CB(skb)->cutlen = 0;
skb               433 net/openvswitch/vport.c 	if (unlikely(dev_net(skb->dev) != ovs_dp_get_net(vport->dp))) {
skb               436 net/openvswitch/vport.c 		mark = skb->mark;
skb               437 net/openvswitch/vport.c 		skb_scrub_packet(skb, true);
skb               438 net/openvswitch/vport.c 		skb->mark = mark;
skb               443 net/openvswitch/vport.c 	error = ovs_flow_key_extract(tun_info, skb, &key);
skb               445 net/openvswitch/vport.c 		kfree_skb(skb);
skb               448 net/openvswitch/vport.c 	ovs_dp_process_packet(skb, &key);
skb               452 net/openvswitch/vport.c static int packet_length(const struct sk_buff *skb,
skb               455 net/openvswitch/vport.c 	int length = skb->len - dev->hard_header_len;
skb               457 net/openvswitch/vport.c 	if (!skb_vlan_tag_present(skb) &&
skb               458 net/openvswitch/vport.c 	    eth_type_vlan(skb->protocol))
skb               469 net/openvswitch/vport.c void ovs_vport_send(struct vport *vport, struct sk_buff *skb, u8 mac_proto)
skb               476 net/openvswitch/vport.c 			skb_reset_network_header(skb);
skb               477 net/openvswitch/vport.c 			skb_reset_mac_len(skb);
skb               478 net/openvswitch/vport.c 			skb->protocol = htons(ETH_P_TEB);
skb               492 net/openvswitch/vport.c 	if (unlikely(packet_length(skb, vport->dev) > mtu &&
skb               493 net/openvswitch/vport.c 		     !skb_is_gso(skb))) {
skb               496 net/openvswitch/vport.c 				     packet_length(skb, vport->dev), mtu);
skb               501 net/openvswitch/vport.c 	skb->dev = vport->dev;
skb               502 net/openvswitch/vport.c 	vport->ops->send(skb);
skb               506 net/openvswitch/vport.c 	kfree_skb(skb);
skb               131 net/openvswitch/vport.h 	netdev_tx_t (*send) (struct sk_buff *skb);
skb               187 net/openvswitch/vport.h void ovs_vport_send(struct vport *vport, struct sk_buff *skb, u8 mac_proto);
skb               183 net/packet/af_packet.c static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
skb               206 net/packet/af_packet.c static u16 packet_pick_tx_queue(struct sk_buff *skb);
skb               238 net/packet/af_packet.c static int packet_direct_xmit(struct sk_buff *skb)
skb               240 net/packet/af_packet.c 	return dev_direct_xmit(skb, packet_pick_tx_queue(skb));
skb               272 net/packet/af_packet.c static u16 packet_pick_tx_queue(struct sk_buff *skb)
skb               274 net/packet/af_packet.c 	struct net_device *dev = skb->dev;
skb               280 net/packet/af_packet.c 	skb->sender_cpu = cpu + 1;
skb               282 net/packet/af_packet.c 	skb_record_rx_queue(skb, cpu % dev->real_num_tx_queues);
skb               284 net/packet/af_packet.c 		queue_index = ops->ndo_select_queue(dev, skb, NULL);
skb               287 net/packet/af_packet.c 		queue_index = netdev_pick_tx(dev, skb, NULL);
skb               411 net/packet/af_packet.c static __u32 tpacket_get_timestamp(struct sk_buff *skb, struct timespec *ts,
skb               414 net/packet/af_packet.c 	struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
skb               421 net/packet/af_packet.c 	if (ktime_to_timespec_cond(skb->tstamp, ts))
skb               428 net/packet/af_packet.c 				    struct sk_buff *skb)
skb               434 net/packet/af_packet.c 	if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
skb               952 net/packet/af_packet.c 	ppd->hv1.tp_rxhash = skb_get_hash(pkc->skb);
skb               964 net/packet/af_packet.c 	if (skb_vlan_tag_present(pkc->skb)) {
skb               965 net/packet/af_packet.c 		ppd->hv1.tp_vlan_tci = skb_vlan_tag_get(pkc->skb);
skb               966 net/packet/af_packet.c 		ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->vlan_proto);
skb              1006 net/packet/af_packet.c 					    struct sk_buff *skb,
skb              1039 net/packet/af_packet.c 	pkc->skb = skb;
skb              1067 net/packet/af_packet.c 					    struct sk_buff *skb,
skb              1078 net/packet/af_packet.c 		return __packet_lookup_frame_in_block(po, skb, len);
skb              1229 net/packet/af_packet.c 				 const struct sk_buff *skb)
skb              1237 net/packet/af_packet.c 				   - (skb ? skb->truesize : 0);
skb              1262 net/packet/af_packet.c static int packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb)
skb              1266 net/packet/af_packet.c 	ret = __packet_rcv_has_room(po, skb);
skb              1297 net/packet/af_packet.c static bool fanout_flow_is_huge(struct packet_sock *po, struct sk_buff *skb)
skb              1303 net/packet/af_packet.c 	rxhash = skb_get_hash(skb);
skb              1318 net/packet/af_packet.c 				      struct sk_buff *skb,
skb              1321 net/packet/af_packet.c 	return reciprocal_scale(__skb_get_hash_symmetric(skb), num);
skb              1325 net/packet/af_packet.c 				    struct sk_buff *skb,
skb              1334 net/packet/af_packet.c 				     struct sk_buff *skb,
skb              1341 net/packet/af_packet.c 				     struct sk_buff *skb,
skb              1348 net/packet/af_packet.c 					  struct sk_buff *skb,
skb              1358 net/packet/af_packet.c 		room = packet_rcv_has_room(po, skb);
skb              1360 net/packet/af_packet.c 		    (room == ROOM_LOW && !fanout_flow_is_huge(po, skb)))
skb              1369 net/packet/af_packet.c 		    packet_rcv_has_room(po_next, skb) == ROOM_NORMAL) {
skb              1387 net/packet/af_packet.c 				    struct sk_buff *skb,
skb              1390 net/packet/af_packet.c 	return skb_get_queue_mapping(skb) % num;
skb              1394 net/packet/af_packet.c 				     struct sk_buff *skb,
skb              1403 net/packet/af_packet.c 		ret = bpf_prog_run_clear_cb(prog, skb) % num;
skb              1414 net/packet/af_packet.c static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
skb              1424 net/packet/af_packet.c 		kfree_skb(skb);
skb              1429 net/packet/af_packet.c 		skb = ip_check_defrag(net, skb, IP_DEFRAG_AF_PACKET);
skb              1430 net/packet/af_packet.c 		if (!skb)
skb              1436 net/packet/af_packet.c 		idx = fanout_demux_hash(f, skb, num);
skb              1439 net/packet/af_packet.c 		idx = fanout_demux_lb(f, skb, num);
skb              1442 net/packet/af_packet.c 		idx = fanout_demux_cpu(f, skb, num);
skb              1445 net/packet/af_packet.c 		idx = fanout_demux_rnd(f, skb, num);
skb              1448 net/packet/af_packet.c 		idx = fanout_demux_qm(f, skb, num);
skb              1451 net/packet/af_packet.c 		idx = fanout_demux_rollover(f, skb, 0, false, num);
skb              1455 net/packet/af_packet.c 		idx = fanout_demux_bpf(f, skb, num);
skb              1460 net/packet/af_packet.c 		idx = fanout_demux_rollover(f, skb, idx, true, num);
skb              1463 net/packet/af_packet.c 	return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev);
skb              1776 net/packet/af_packet.c 					  struct sk_buff *skb)
skb              1785 net/packet/af_packet.c 	skb_reset_mac_header(skb);
skb              1786 net/packet/af_packet.c 	return likely(eth_hdr(skb)->h_proto == htons(ETH_P_8021Q));
skb              1793 net/packet/af_packet.c static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
skb              1817 net/packet/af_packet.c 	if (skb->pkt_type == PACKET_LOOPBACK)
skb              1823 net/packet/af_packet.c 	skb = skb_share_check(skb, GFP_ATOMIC);
skb              1824 net/packet/af_packet.c 	if (skb == NULL)
skb              1828 net/packet/af_packet.c 	skb_dst_drop(skb);
skb              1831 net/packet/af_packet.c 	nf_reset_ct(skb);
skb              1833 net/packet/af_packet.c 	spkt = &PACKET_SKB_CB(skb)->sa.pkt;
skb              1835 net/packet/af_packet.c 	skb_push(skb, skb->data - skb_mac_header(skb));
skb              1843 net/packet/af_packet.c 	spkt->spkt_protocol = skb->protocol;
skb              1850 net/packet/af_packet.c 	if (sock_queue_rcv_skb(sk, skb) == 0)
skb              1854 net/packet/af_packet.c 	kfree_skb(skb);
skb              1859 net/packet/af_packet.c static void packet_parse_headers(struct sk_buff *skb, struct socket *sock)
skb              1861 net/packet/af_packet.c 	if ((!skb->protocol || skb->protocol == htons(ETH_P_ALL)) &&
skb              1863 net/packet/af_packet.c 		skb_reset_mac_header(skb);
skb              1864 net/packet/af_packet.c 		skb->protocol = dev_parse_header_protocol(skb);
skb              1867 net/packet/af_packet.c 	skb_probe_transport_header(skb);
skb              1880 net/packet/af_packet.c 	struct sk_buff *skb = NULL;
skb              1932 net/packet/af_packet.c 	if (!skb) {
skb              1938 net/packet/af_packet.c 		skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL);
skb              1939 net/packet/af_packet.c 		if (skb == NULL)
skb              1945 net/packet/af_packet.c 		skb_reserve(skb, reserved);
skb              1946 net/packet/af_packet.c 		skb_reset_network_header(skb);
skb              1950 net/packet/af_packet.c 			skb->data -= hhlen;
skb              1951 net/packet/af_packet.c 			skb->tail -= hhlen;
skb              1953 net/packet/af_packet.c 				skb_reset_network_header(skb);
skb              1955 net/packet/af_packet.c 		err = memcpy_from_msg(skb_put(skb, len), msg, len);
skb              1961 net/packet/af_packet.c 	if (!dev_validate_header(dev, skb->data, len)) {
skb              1966 net/packet/af_packet.c 	    !packet_extra_vlan_len_allowed(dev, skb)) {
skb              1978 net/packet/af_packet.c 	skb->protocol = proto;
skb              1979 net/packet/af_packet.c 	skb->dev = dev;
skb              1980 net/packet/af_packet.c 	skb->priority = sk->sk_priority;
skb              1981 net/packet/af_packet.c 	skb->mark = sk->sk_mark;
skb              1982 net/packet/af_packet.c 	skb->tstamp = sockc.transmit_time;
skb              1984 net/packet/af_packet.c 	skb_setup_tx_timestamp(skb, sockc.tsflags);
skb              1987 net/packet/af_packet.c 		skb->no_fcs = 1;
skb              1989 net/packet/af_packet.c 	packet_parse_headers(skb, sock);
skb              1991 net/packet/af_packet.c 	dev_queue_xmit(skb);
skb              1998 net/packet/af_packet.c 	kfree_skb(skb);
skb              2002 net/packet/af_packet.c static unsigned int run_filter(struct sk_buff *skb,
skb              2011 net/packet/af_packet.c 		res = bpf_prog_run_clear_cb(filter->prog, skb);
skb              2017 net/packet/af_packet.c static int packet_rcv_vnet(struct msghdr *msg, const struct sk_buff *skb,
skb              2026 net/packet/af_packet.c 	if (virtio_net_hdr_from_skb(skb, &vnet_hdr, vio_le(), true, 0))
skb              2044 net/packet/af_packet.c static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
skb              2050 net/packet/af_packet.c 	u8 *skb_head = skb->data;
skb              2051 net/packet/af_packet.c 	int skb_len = skb->len;
skb              2055 net/packet/af_packet.c 	if (skb->pkt_type == PACKET_LOOPBACK)
skb              2064 net/packet/af_packet.c 	skb->dev = dev;
skb              2075 net/packet/af_packet.c 			skb_push(skb, skb->data - skb_mac_header(skb));
skb              2076 net/packet/af_packet.c 		else if (skb->pkt_type == PACKET_OUTGOING) {
skb              2078 net/packet/af_packet.c 			skb_pull(skb, skb_network_offset(skb));
skb              2082 net/packet/af_packet.c 	snaplen = skb->len;
skb              2084 net/packet/af_packet.c 	res = run_filter(skb, sk, snaplen);
skb              2093 net/packet/af_packet.c 	if (skb_shared(skb)) {
skb              2094 net/packet/af_packet.c 		struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
skb              2098 net/packet/af_packet.c 		if (skb_head != skb->data) {
skb              2099 net/packet/af_packet.c 			skb->data = skb_head;
skb              2100 net/packet/af_packet.c 			skb->len = skb_len;
skb              2102 net/packet/af_packet.c 		consume_skb(skb);
skb              2103 net/packet/af_packet.c 		skb = nskb;
skb              2106 net/packet/af_packet.c 	sock_skb_cb_check_size(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8);
skb              2108 net/packet/af_packet.c 	sll = &PACKET_SKB_CB(skb)->sa.ll;
skb              2110 net/packet/af_packet.c 	sll->sll_pkttype = skb->pkt_type;
skb              2116 net/packet/af_packet.c 	sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
skb              2121 net/packet/af_packet.c 	PACKET_SKB_CB(skb)->sa.origlen = skb->len;
skb              2123 net/packet/af_packet.c 	if (pskb_trim(skb, snaplen))
skb              2126 net/packet/af_packet.c 	skb_set_owner_r(skb, sk);
skb              2127 net/packet/af_packet.c 	skb->dev = NULL;
skb              2128 net/packet/af_packet.c 	skb_dst_drop(skb);
skb              2131 net/packet/af_packet.c 	nf_reset_ct(skb);
skb              2135 net/packet/af_packet.c 	sock_skb_set_dropcount(sk, skb);
skb              2136 net/packet/af_packet.c 	__skb_queue_tail(&sk->sk_receive_queue, skb);
skb              2147 net/packet/af_packet.c 	if (skb_head != skb->data && skb_shared(skb)) {
skb              2148 net/packet/af_packet.c 		skb->data = skb_head;
skb              2149 net/packet/af_packet.c 		skb->len = skb_len;
skb              2153 net/packet/af_packet.c 		consume_skb(skb);
skb              2155 net/packet/af_packet.c 		kfree_skb(skb);
skb              2159 net/packet/af_packet.c static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
skb              2166 net/packet/af_packet.c 	u8 *skb_head = skb->data;
skb              2167 net/packet/af_packet.c 	int skb_len = skb->len;
skb              2185 net/packet/af_packet.c 	if (skb->pkt_type == PACKET_LOOPBACK)
skb              2196 net/packet/af_packet.c 			skb_push(skb, skb->data - skb_mac_header(skb));
skb              2197 net/packet/af_packet.c 		else if (skb->pkt_type == PACKET_OUTGOING) {
skb              2199 net/packet/af_packet.c 			skb_pull(skb, skb_network_offset(skb));
skb              2203 net/packet/af_packet.c 	snaplen = skb->len;
skb              2205 net/packet/af_packet.c 	res = run_filter(skb, sk, snaplen);
skb              2210 net/packet/af_packet.c 	if (__packet_rcv_has_room(po, skb) == ROOM_NONE) {
skb              2215 net/packet/af_packet.c 	if (skb->ip_summed == CHECKSUM_PARTIAL)
skb              2217 net/packet/af_packet.c 	else if (skb->pkt_type != PACKET_OUTGOING &&
skb              2218 net/packet/af_packet.c 		 (skb->ip_summed == CHECKSUM_COMPLETE ||
skb              2219 net/packet/af_packet.c 		  skb_csum_unnecessary(skb)))
skb              2229 net/packet/af_packet.c 		unsigned int maclen = skb_network_offset(skb);
skb              2243 net/packet/af_packet.c 				if (skb_shared(skb)) {
skb              2244 net/packet/af_packet.c 					copy_skb = skb_clone(skb, GFP_ATOMIC);
skb              2246 net/packet/af_packet.c 					copy_skb = skb_get(skb);
skb              2247 net/packet/af_packet.c 					skb_head = skb->data;
skb              2273 net/packet/af_packet.c 	h.raw = packet_current_rx_frame(po, skb,
skb              2286 net/packet/af_packet.c 	    virtio_net_hdr_from_skb(skb, h.raw + macoff -
skb              2310 net/packet/af_packet.c 	skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
skb              2312 net/packet/af_packet.c 	if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
skb              2319 net/packet/af_packet.c 		h.h1->tp_len = skb->len;
skb              2328 net/packet/af_packet.c 		h.h2->tp_len = skb->len;
skb              2334 net/packet/af_packet.c 		if (skb_vlan_tag_present(skb)) {
skb              2335 net/packet/af_packet.c 			h.h2->tp_vlan_tci = skb_vlan_tag_get(skb);
skb              2336 net/packet/af_packet.c 			h.h2->tp_vlan_tpid = ntohs(skb->vlan_proto);
skb              2350 net/packet/af_packet.c 		h.h3->tp_len = skb->len;
skb              2364 net/packet/af_packet.c 	sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
skb              2367 net/packet/af_packet.c 	sll->sll_protocol = skb->protocol;
skb              2368 net/packet/af_packet.c 	sll->sll_pkttype = skb->pkt_type;
skb              2400 net/packet/af_packet.c 	if (skb_head != skb->data && skb_shared(skb)) {
skb              2401 net/packet/af_packet.c 		skb->data = skb_head;
skb              2402 net/packet/af_packet.c 		skb->len = skb_len;
skb              2406 net/packet/af_packet.c 		consume_skb(skb);
skb              2408 net/packet/af_packet.c 		kfree_skb(skb);
skb              2421 net/packet/af_packet.c static void tpacket_destruct_skb(struct sk_buff *skb)
skb              2423 net/packet/af_packet.c 	struct packet_sock *po = pkt_sk(skb->sk);
skb              2429 net/packet/af_packet.c 		ph = skb_zcopy_get_nouarg(skb);
skb              2432 net/packet/af_packet.c 		ts = __packet_set_timestamp(po, ph, skb);
skb              2439 net/packet/af_packet.c 	sock_wfree(skb);
skb              2471 net/packet/af_packet.c static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
skb              2484 net/packet/af_packet.c 	skb->protocol = proto;
skb              2485 net/packet/af_packet.c 	skb->dev = dev;
skb              2486 net/packet/af_packet.c 	skb->priority = po->sk.sk_priority;
skb              2487 net/packet/af_packet.c 	skb->mark = po->sk.sk_mark;
skb              2488 net/packet/af_packet.c 	skb->tstamp = sockc->transmit_time;
skb              2489 net/packet/af_packet.c 	skb_setup_tx_timestamp(skb, sockc->tsflags);
skb              2490 net/packet/af_packet.c 	skb_zcopy_set_nouarg(skb, ph.raw);
skb              2492 net/packet/af_packet.c 	skb_reserve(skb, hlen);
skb              2493 net/packet/af_packet.c 	skb_reset_network_header(skb);
skb              2498 net/packet/af_packet.c 		err = dev_hard_header(skb, dev, ntohs(proto), addr,
skb              2505 net/packet/af_packet.c 		skb_push(skb, dev->hard_header_len);
skb              2506 net/packet/af_packet.c 		skb_put(skb, copylen - dev->hard_header_len);
skb              2507 net/packet/af_packet.c 		err = skb_store_bits(skb, 0, data, hdrlen);
skb              2510 net/packet/af_packet.c 		if (!dev_validate_header(dev, skb->data, hdrlen))
skb              2521 net/packet/af_packet.c 	skb->data_len = to_write;
skb              2522 net/packet/af_packet.c 	skb->len += to_write;
skb              2523 net/packet/af_packet.c 	skb->truesize += to_write;
skb              2527 net/packet/af_packet.c 		nr_frags = skb_shinfo(skb)->nr_frags;
skb              2539 net/packet/af_packet.c 		skb_fill_page_desc(skb, nr_frags, page, offset, len);
skb              2546 net/packet/af_packet.c 	packet_parse_headers(skb, sock);
skb              2621 net/packet/af_packet.c 	struct sk_buff *skb = NULL;
skb              2696 net/packet/af_packet.c 			if (need_wait && skb) {
skb              2708 net/packet/af_packet.c 		skb = NULL;
skb              2729 net/packet/af_packet.c 		skb = sock_alloc_send_skb(&po->sk,
skb              2734 net/packet/af_packet.c 		if (unlikely(skb == NULL)) {
skb              2740 net/packet/af_packet.c 		tp_len = tpacket_fill_skb(po, skb, ph, dev, data, tp_len, proto,
skb              2745 net/packet/af_packet.c 		    !packet_extra_vlan_len_allowed(dev, skb))
skb              2754 net/packet/af_packet.c 				kfree_skb(skb);
skb              2764 net/packet/af_packet.c 			if (virtio_net_hdr_to_skb(skb, vnet_hdr, vio_le())) {
skb              2768 net/packet/af_packet.c 			virtio_net_hdr_set_proto(skb, vnet_hdr);
skb              2771 net/packet/af_packet.c 		skb->destructor = tpacket_destruct_skb;
skb              2776 net/packet/af_packet.c 		err = po->xmit(skb);
skb              2782 net/packet/af_packet.c 				skb = NULL;
skb              2807 net/packet/af_packet.c 	kfree_skb(skb);
skb              2820 net/packet/af_packet.c 	struct sk_buff *skb;
skb              2826 net/packet/af_packet.c 	skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
skb              2828 net/packet/af_packet.c 	if (!skb)
skb              2831 net/packet/af_packet.c 	skb_reserve(skb, reserve);
skb              2832 net/packet/af_packet.c 	skb_put(skb, linear);
skb              2833 net/packet/af_packet.c 	skb->data_len = len - linear;
skb              2834 net/packet/af_packet.c 	skb->len += len - linear;
skb              2836 net/packet/af_packet.c 	return skb;
skb              2843 net/packet/af_packet.c 	struct sk_buff *skb;
skb              2921 net/packet/af_packet.c 	skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, linear,
skb              2923 net/packet/af_packet.c 	if (skb == NULL)
skb              2926 net/packet/af_packet.c 	skb_reset_network_header(skb);
skb              2930 net/packet/af_packet.c 		offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len);
skb              2934 net/packet/af_packet.c 		skb_reserve(skb, -reserve);
skb              2937 net/packet/af_packet.c 			skb_reset_network_header(skb);
skb              2941 net/packet/af_packet.c 	err = skb_copy_datagram_from_iter(skb, offset, &msg->msg_iter, len);
skb              2946 net/packet/af_packet.c 	    !dev_validate_header(dev, skb->data, len)) {
skb              2951 net/packet/af_packet.c 	skb_setup_tx_timestamp(skb, sockc.tsflags);
skb              2954 net/packet/af_packet.c 	    !packet_extra_vlan_len_allowed(dev, skb)) {
skb              2959 net/packet/af_packet.c 	skb->protocol = proto;
skb              2960 net/packet/af_packet.c 	skb->dev = dev;
skb              2961 net/packet/af_packet.c 	skb->priority = sk->sk_priority;
skb              2962 net/packet/af_packet.c 	skb->mark = sockc.mark;
skb              2963 net/packet/af_packet.c 	skb->tstamp = sockc.transmit_time;
skb              2966 net/packet/af_packet.c 		err = virtio_net_hdr_to_skb(skb, &vnet_hdr, vio_le());
skb              2970 net/packet/af_packet.c 		virtio_net_hdr_set_proto(skb, &vnet_hdr);
skb              2973 net/packet/af_packet.c 	packet_parse_headers(skb, sock);
skb              2976 net/packet/af_packet.c 		skb->no_fcs = 1;
skb              2978 net/packet/af_packet.c 	err = po->xmit(skb);
skb              2987 net/packet/af_packet.c 	kfree_skb(skb);
skb              3317 net/packet/af_packet.c 	struct sk_buff *skb;
skb              3347 net/packet/af_packet.c 	skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
skb              3355 net/packet/af_packet.c 	if (skb == NULL)
skb              3361 net/packet/af_packet.c 		err = packet_rcv_vnet(msg, skb, &len);
skb              3371 net/packet/af_packet.c 	copied = skb->len;
skb              3377 net/packet/af_packet.c 	err = skb_copy_datagram_msg(skb, 0, msg, copied);
skb              3382 net/packet/af_packet.c 		struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
skb              3385 net/packet/af_packet.c 		origlen = PACKET_SKB_CB(skb)->sa.origlen;
skb              3387 net/packet/af_packet.c 		sll->sll_protocol = skb->protocol;
skb              3390 net/packet/af_packet.c 	sock_recv_ts_and_drops(msg, sk, skb);
skb              3403 net/packet/af_packet.c 			struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
skb              3415 net/packet/af_packet.c 		memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa, copy_len);
skb              3422 net/packet/af_packet.c 		if (skb->ip_summed == CHECKSUM_PARTIAL)
skb              3424 net/packet/af_packet.c 		else if (skb->pkt_type != PACKET_OUTGOING &&
skb              3425 net/packet/af_packet.c 			 (skb->ip_summed == CHECKSUM_COMPLETE ||
skb              3426 net/packet/af_packet.c 			  skb_csum_unnecessary(skb)))
skb              3430 net/packet/af_packet.c 		aux.tp_snaplen = skb->len;
skb              3432 net/packet/af_packet.c 		aux.tp_net = skb_network_offset(skb);
skb              3433 net/packet/af_packet.c 		if (skb_vlan_tag_present(skb)) {
skb              3434 net/packet/af_packet.c 			aux.tp_vlan_tci = skb_vlan_tag_get(skb);
skb              3435 net/packet/af_packet.c 			aux.tp_vlan_tpid = ntohs(skb->vlan_proto);
skb              3448 net/packet/af_packet.c 	err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied);
skb              3451 net/packet/af_packet.c 	skb_free_datagram(sk, skb);
skb              4129 net/packet/af_packet.c 		struct sk_buff *skb;
skb              4133 net/packet/af_packet.c 		skb = skb_peek(&sk->sk_receive_queue);
skb              4134 net/packet/af_packet.c 		if (skb)
skb              4135 net/packet/af_packet.c 			amount = skb->len;
skb                98 net/packet/diag.c static int pdiag_put_rings_cfg(struct packet_sock *po, struct sk_buff *skb)
skb               104 net/packet/diag.c 			PACKET_DIAG_RX_RING, skb);
skb               107 net/packet/diag.c 				PACKET_DIAG_TX_RING, skb);
skb               129 net/packet/diag.c static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
skb               139 net/packet/diag.c 	nlh = nlmsg_put(skb, portid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*rp), flags);
skb               151 net/packet/diag.c 			pdiag_put_info(po, skb))
skb               155 net/packet/diag.c 	    nla_put_u32(skb, PACKET_DIAG_UID,
skb               160 net/packet/diag.c 			pdiag_put_mclist(po, skb))
skb               164 net/packet/diag.c 			pdiag_put_rings_cfg(po, skb))
skb               168 net/packet/diag.c 			pdiag_put_fanout(po, skb))
skb               172 net/packet/diag.c 	    sock_diag_put_meminfo(sk, skb, PACKET_DIAG_MEMINFO))
skb               176 net/packet/diag.c 	    sock_diag_put_filterinfo(may_report_filterinfo, sk, skb,
skb               180 net/packet/diag.c 	nlmsg_end(skb, nlh);
skb               184 net/packet/diag.c 	nlmsg_cancel(skb, nlh);
skb               188 net/packet/diag.c static int packet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
skb               196 net/packet/diag.c 	net = sock_net(skb->sk);
skb               198 net/packet/diag.c 	may_report_filterinfo = netlink_net_capable(cb->skb, CAP_NET_ADMIN);
skb               207 net/packet/diag.c 		if (sk_diag_fill(sk, skb, req,
skb               209 net/packet/diag.c 				 sk_user_ns(NETLINK_CB(cb->skb).sk),
skb               210 net/packet/diag.c 				 NETLINK_CB(cb->skb).portid,
skb               221 net/packet/diag.c 	return skb->len;
skb               224 net/packet/diag.c static int packet_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
skb               227 net/packet/diag.c 	struct net *net = sock_net(skb->sk);
skb               242 net/packet/diag.c 		return netlink_dump_start(net->diag_nlsk, skb, h, &c);
skb                40 net/packet/internal.h 	struct sk_buff	*skb;
skb               136 net/packet/internal.h 	int			(*xmit)(struct sk_buff *skb);
skb               117 net/phonet/af_phonet.c static int pn_header_create(struct sk_buff *skb, struct net_device *dev,
skb               121 net/phonet/af_phonet.c 	u8 *media = skb_push(skb, 1);
skb               132 net/phonet/af_phonet.c static int pn_header_parse(const struct sk_buff *skb, unsigned char *haddr)
skb               134 net/phonet/af_phonet.c 	const u8 *media = skb_mac_header(skb);
skb               148 net/phonet/af_phonet.c static int pn_send(struct sk_buff *skb, struct net_device *dev,
skb               154 net/phonet/af_phonet.c 	if (skb->len + 2 > 0xffff /* Phonet length field limit */ ||
skb               155 net/phonet/af_phonet.c 	    skb->len + sizeof(struct phonethdr) > dev->mtu) {
skb               166 net/phonet/af_phonet.c 	skb_reset_transport_header(skb);
skb               167 net/phonet/af_phonet.c 	WARN_ON(skb_headroom(skb) & 1); /* HW assumes word alignment */
skb               168 net/phonet/af_phonet.c 	skb_push(skb, sizeof(struct phonethdr));
skb               169 net/phonet/af_phonet.c 	skb_reset_network_header(skb);
skb               170 net/phonet/af_phonet.c 	ph = pn_hdr(skb);
skb               174 net/phonet/af_phonet.c 	ph->pn_length = __cpu_to_be16(skb->len + 2 - sizeof(*ph));
skb               178 net/phonet/af_phonet.c 	skb->protocol = htons(ETH_P_PHONET);
skb               179 net/phonet/af_phonet.c 	skb->priority = 0;
skb               180 net/phonet/af_phonet.c 	skb->dev = dev;
skb               182 net/phonet/af_phonet.c 	if (skb->pkt_type == PACKET_LOOPBACK) {
skb               183 net/phonet/af_phonet.c 		skb_reset_mac_header(skb);
skb               184 net/phonet/af_phonet.c 		skb_orphan(skb);
skb               185 net/phonet/af_phonet.c 		err = (irq ? netif_rx(skb) : netif_rx_ni(skb)) ? -ENOBUFS : 0;
skb               187 net/phonet/af_phonet.c 		err = dev_hard_header(skb, dev, ntohs(skb->protocol),
skb               188 net/phonet/af_phonet.c 					NULL, NULL, skb->len);
skb               193 net/phonet/af_phonet.c 		err = dev_queue_xmit(skb);
skb               200 net/phonet/af_phonet.c 	kfree_skb(skb);
skb               207 net/phonet/af_phonet.c 	struct sk_buff *skb = alloc_skb(MAX_PHONET_HEADER + len, GFP_ATOMIC);
skb               208 net/phonet/af_phonet.c 	if (skb == NULL)
skb               212 net/phonet/af_phonet.c 		skb->pkt_type = PACKET_LOOPBACK;
skb               214 net/phonet/af_phonet.c 	skb_reserve(skb, MAX_PHONET_HEADER);
skb               215 net/phonet/af_phonet.c 	__skb_put(skb, len);
skb               216 net/phonet/af_phonet.c 	skb_copy_to_linear_data(skb, data, len);
skb               217 net/phonet/af_phonet.c 	return pn_send(skb, dev, dst, src, res, 1);
skb               224 net/phonet/af_phonet.c int pn_skb_send(struct sock *sk, struct sk_buff *skb,
skb               249 net/phonet/af_phonet.c 		skb->pkt_type = PACKET_LOOPBACK;
skb               256 net/phonet/af_phonet.c 			skb->pkt_type = PACKET_LOOPBACK;
skb               272 net/phonet/af_phonet.c 	err = pn_send(skb, dev, dst, src, res, 0);
skb               277 net/phonet/af_phonet.c 	kfree_skb(skb);
skb               285 net/phonet/af_phonet.c static inline int can_respond(struct sk_buff *skb)
skb               291 net/phonet/af_phonet.c 	if (!pskb_may_pull(skb, 3))
skb               294 net/phonet/af_phonet.c 	ph = pn_hdr(skb);
skb               295 net/phonet/af_phonet.c 	if (ph->pn_res == PN_PREFIX && !pskb_may_pull(skb, 5))
skb               300 net/phonet/af_phonet.c 	ph = pn_hdr(skb); /* re-acquires the pointer */
skb               301 net/phonet/af_phonet.c 	pm = pn_msg(skb);
skb               358 net/phonet/af_phonet.c static int phonet_rcv(struct sk_buff *skb, struct net_device *dev,
skb               367 net/phonet/af_phonet.c 	skb = skb_share_check(skb, GFP_ATOMIC);
skb               368 net/phonet/af_phonet.c 	if (!skb)
skb               372 net/phonet/af_phonet.c 	if (!pskb_pull(skb, sizeof(struct phonethdr)))
skb               376 net/phonet/af_phonet.c 	ph = pn_hdr(skb);
skb               381 net/phonet/af_phonet.c 	if ((len > skb->len) || pskb_trim(skb, len))
skb               383 net/phonet/af_phonet.c 	skb_reset_transport_header(skb);
skb               385 net/phonet/af_phonet.c 	pn_skb_get_dst_sockaddr(skb, &sa);
skb               389 net/phonet/af_phonet.c 		pn_deliver_sock_broadcast(net, skb);
skb               397 net/phonet/af_phonet.c 			return sk_receive_skb(sk, skb, 0);
skb               406 net/phonet/af_phonet.c 			return sk_receive_skb(sk, skb, 0);
skb               408 net/phonet/af_phonet.c 		if (can_respond(skb)) {
skb               409 net/phonet/af_phonet.c 			send_obj_unreachable(skb);
skb               410 net/phonet/af_phonet.c 			send_reset_indications(skb);
skb               412 net/phonet/af_phonet.c 	} else if (unlikely(skb->pkt_type == PACKET_LOOPBACK))
skb               425 net/phonet/af_phonet.c 		__skb_push(skb, sizeof(struct phonethdr));
skb               426 net/phonet/af_phonet.c 		skb->dev = out_dev;
skb               434 net/phonet/af_phonet.c 		if (skb_cow_head(skb, out_dev->hard_header_len))
skb               437 net/phonet/af_phonet.c 		if (dev_hard_header(skb, out_dev, ETH_P_PHONET, NULL, NULL,
skb               438 net/phonet/af_phonet.c 					skb->len) < 0)
skb               440 net/phonet/af_phonet.c 		dev_queue_xmit(skb);
skb               448 net/phonet/af_phonet.c 	kfree_skb(skb);
skb                23 net/phonet/datagram.c static int pn_backlog_rcv(struct sock *sk, struct sk_buff *skb);
skb                33 net/phonet/datagram.c 	struct sk_buff *skb;
skb                39 net/phonet/datagram.c 		skb = skb_peek(&sk->sk_receive_queue);
skb                40 net/phonet/datagram.c 		answ = skb ? skb->len : 0;
skb                76 net/phonet/datagram.c 	struct sk_buff *skb;
skb                92 net/phonet/datagram.c 	skb = sock_alloc_send_skb(sk, MAX_PHONET_HEADER + len,
skb                94 net/phonet/datagram.c 	if (skb == NULL)
skb                96 net/phonet/datagram.c 	skb_reserve(skb, MAX_PHONET_HEADER);
skb                98 net/phonet/datagram.c 	err = memcpy_from_msg((void *)skb_put(skb, len), msg, len);
skb               100 net/phonet/datagram.c 		kfree_skb(skb);
skb               108 net/phonet/datagram.c 	err = pn_skb_send(sk, skb, target);
skb               117 net/phonet/datagram.c 	struct sk_buff *skb = NULL;
skb               126 net/phonet/datagram.c 	skb = skb_recv_datagram(sk, flags, noblock, &rval);
skb               127 net/phonet/datagram.c 	if (skb == NULL)
skb               130 net/phonet/datagram.c 	pn_skb_get_src_sockaddr(skb, &sa);
skb               132 net/phonet/datagram.c 	copylen = skb->len;
skb               138 net/phonet/datagram.c 	rval = skb_copy_datagram_msg(skb, 0, msg, copylen);
skb               144 net/phonet/datagram.c 	rval = (flags & MSG_TRUNC) ? skb->len : copylen;
skb               153 net/phonet/datagram.c 	skb_free_datagram(sk, skb);
skb               160 net/phonet/datagram.c static int pn_backlog_rcv(struct sock *sk, struct sk_buff *skb)
skb               162 net/phonet/datagram.c 	int err = sock_queue_rcv_skb(sk, skb);
skb               165 net/phonet/datagram.c 		kfree_skb(skb);
skb                33 net/phonet/pep-gprs.c static __be16 gprs_type_trans(struct sk_buff *skb)
skb                38 net/phonet/pep-gprs.c 	pvfc = skb_header_pointer(skb, 0, 1, &buf);
skb                75 net/phonet/pep-gprs.c static int gprs_recv(struct gprs_dev *gp, struct sk_buff *skb)
skb                79 net/phonet/pep-gprs.c 	__be16 protocol = gprs_type_trans(skb);
skb                86 net/phonet/pep-gprs.c 	if (skb_headroom(skb) & 3) {
skb                99 net/phonet/pep-gprs.c 		skb_shinfo(rskb)->frag_list = skb;
skb               100 net/phonet/pep-gprs.c 		rskb->len += skb->len;
skb               105 net/phonet/pep-gprs.c 		skb_walk_frags(skb, fs)
skb               107 net/phonet/pep-gprs.c 		skb->next = skb_shinfo(skb)->frag_list;
skb               108 net/phonet/pep-gprs.c 		skb_frag_list_init(skb);
skb               109 net/phonet/pep-gprs.c 		skb->len -= flen;
skb               110 net/phonet/pep-gprs.c 		skb->data_len -= flen;
skb               111 net/phonet/pep-gprs.c 		skb->truesize -= flen;
skb               113 net/phonet/pep-gprs.c 		skb = rskb;
skb               116 net/phonet/pep-gprs.c 	skb->protocol = protocol;
skb               117 net/phonet/pep-gprs.c 	skb_reset_mac_header(skb);
skb               118 net/phonet/pep-gprs.c 	skb->dev = dev;
skb               122 net/phonet/pep-gprs.c 		dev->stats.rx_bytes += skb->len;
skb               123 net/phonet/pep-gprs.c 		netif_rx(skb);
skb               124 net/phonet/pep-gprs.c 		skb = NULL;
skb               129 net/phonet/pep-gprs.c 	if (skb) {
skb               130 net/phonet/pep-gprs.c 		dev_kfree_skb(skb);
skb               139 net/phonet/pep-gprs.c 	struct sk_buff *skb;
skb               141 net/phonet/pep-gprs.c 	while ((skb = pep_read(sk)) != NULL) {
skb               142 net/phonet/pep-gprs.c 		skb_orphan(skb);
skb               143 net/phonet/pep-gprs.c 		gprs_recv(gp, skb);
skb               173 net/phonet/pep-gprs.c static netdev_tx_t gprs_xmit(struct sk_buff *skb, struct net_device *dev)
skb               179 net/phonet/pep-gprs.c 	switch (skb->protocol) {
skb               184 net/phonet/pep-gprs.c 		dev_kfree_skb(skb);
skb               188 net/phonet/pep-gprs.c 	skb_orphan(skb);
skb               189 net/phonet/pep-gprs.c 	skb_set_owner_w(skb, sk);
skb               190 net/phonet/pep-gprs.c 	len = skb->len;
skb               191 net/phonet/pep-gprs.c 	err = pep_write(sk, skb);
skb                45 net/phonet/pep.c static unsigned char *pep_get_sb(struct sk_buff *skb, u8 *ptype, u8 *plen,
skb                55 net/phonet/pep.c 	ph = skb_header_pointer(skb, 0, 2, &h);
skb                56 net/phonet/pep.c 	if (ph == NULL || ph->sb_len < 2 || !pskb_may_pull(skb, ph->sb_len))
skb                64 net/phonet/pep.c 	data = skb_header_pointer(skb, 2, buflen, buf);
skb                65 net/phonet/pep.c 	__skb_pull(skb, 2 + ph->sb_len);
skb                72 net/phonet/pep.c 	struct sk_buff *skb = alloc_skb(MAX_PNPIPE_HEADER + len, priority);
skb                73 net/phonet/pep.c 	if (!skb)
skb                75 net/phonet/pep.c 	skb_set_owner_w(skb, sk);
skb                77 net/phonet/pep.c 	skb_reserve(skb, MAX_PNPIPE_HEADER);
skb                78 net/phonet/pep.c 	__skb_put(skb, len);
skb                79 net/phonet/pep.c 	skb_copy_to_linear_data(skb, payload, len);
skb                80 net/phonet/pep.c 	__skb_push(skb, sizeof(struct pnpipehdr));
skb                81 net/phonet/pep.c 	skb_reset_transport_header(skb);
skb                82 net/phonet/pep.c 	return skb;
skb                90 net/phonet/pep.c 	struct sk_buff *skb;
skb                93 net/phonet/pep.c 	skb = pep_alloc_skb(sk, data, len, priority);
skb                94 net/phonet/pep.c 	if (!skb)
skb                97 net/phonet/pep.c 	ph = pnp_hdr(skb);
skb               104 net/phonet/pep.c 	return pn_skb_send(sk, skb, &peer);
skb               112 net/phonet/pep.c 	struct sk_buff *skb;
skb               114 net/phonet/pep.c 	skb = pep_alloc_skb(sk, data, len, priority);
skb               115 net/phonet/pep.c 	if (!skb)
skb               118 net/phonet/pep.c 	ph = pnp_hdr(skb);
skb               123 net/phonet/pep.c 	return pn_skb_send(sk, skb, NULL);
skb               133 net/phonet/pep.c 	struct sk_buff *skb;
skb               135 net/phonet/pep.c 	skb = pep_alloc_skb(sk, data, len, GFP_KERNEL);
skb               136 net/phonet/pep.c 	if (!skb)
skb               139 net/phonet/pep.c 	ph = pnp_hdr(skb);
skb               144 net/phonet/pep.c 	return pn_skb_send(sk, skb, NULL);
skb               159 net/phonet/pep.c static int pep_accept_conn(struct sock *sk, struct sk_buff *skb)
skb               176 net/phonet/pep.c 	return pep_reply(sk, skb, PN_PIPE_NO_ERROR, data, sizeof(data),
skb               180 net/phonet/pep.c static int pep_reject_conn(struct sock *sk, struct sk_buff *skb, u8 code,
skb               185 net/phonet/pep.c 	return pep_reply(sk, skb, code, data, sizeof(data), priority);
skb               194 net/phonet/pep.c 	struct sk_buff *skb;
skb               203 net/phonet/pep.c 	skb = pep_alloc_skb(sk, data, 4, priority);
skb               204 net/phonet/pep.c 	if (!skb)
skb               207 net/phonet/pep.c 	ph = pnp_hdr(skb);
skb               214 net/phonet/pep.c 	return pn_skb_send(sk, skb, &dst);
skb               252 net/phonet/pep.c static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb)
skb               258 net/phonet/pep.c 	if (!pskb_may_pull(skb, sizeof(*hdr) + 4))
skb               261 net/phonet/pep.c 	hdr = pnp_hdr(skb);
skb               304 net/phonet/pep.c static int pipe_rcv_created(struct sock *sk, struct sk_buff *skb)
skb               307 net/phonet/pep.c 	struct pnpipehdr *hdr = pnp_hdr(skb);
skb               311 net/phonet/pep.c 	__skb_pull(skb, sizeof(*hdr));
skb               314 net/phonet/pep.c 		u8 *data = pep_get_sb(skb, &type, &len, buf);
skb               333 net/phonet/pep.c static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
skb               336 net/phonet/pep.c 	struct pnpipehdr *hdr = pnp_hdr(skb);
skb               344 net/phonet/pep.c 		pep_reject_conn(sk, skb, PN_PIPE_ERR_PEP_IN_USE, GFP_ATOMIC);
skb               348 net/phonet/pep.c 		pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC);
skb               356 net/phonet/pep.c 		pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC);
skb               374 net/phonet/pep.c 		pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC);
skb               382 net/phonet/pep.c 		__skb_pull(skb, 4);
skb               387 net/phonet/pep.c 		__skb_pull(skb, 1);
skb               390 net/phonet/pep.c 		__skb_pull(skb, 3); /* Pipe data header */
skb               392 net/phonet/pep.c 			err = sock_queue_rcv_skb(sk, skb);
skb               409 net/phonet/pep.c 		pipe_rcv_status(sk, skb);
skb               413 net/phonet/pep.c 		err = pipe_rcv_created(sk, skb);
skb               417 net/phonet/pep.c 		err = pipe_rcv_created(sk, skb);
skb               447 net/phonet/pep.c 	kfree_skb(skb);
skb               451 net/phonet/pep.c 	skb->dev = NULL;
skb               452 net/phonet/pep.c 	skb_set_owner_r(skb, sk);
skb               453 net/phonet/pep.c 	skb_queue_tail(queue, skb);
skb               482 net/phonet/pep.c static int pep_connresp_rcv(struct sock *sk, struct sk_buff *skb)
skb               488 net/phonet/pep.c 	if (!pskb_pull(skb, sizeof(*hdr) + 4))
skb               491 net/phonet/pep.c 	hdr = pnp_hdr(skb);
skb               499 net/phonet/pep.c 		const u8 *data = pep_get_sb(skb, &type, &len, buf);
skb               524 net/phonet/pep.c static int pep_enableresp_rcv(struct sock *sk, struct sk_buff *skb)
skb               526 net/phonet/pep.c 	struct pnpipehdr *hdr = pnp_hdr(skb);
skb               549 net/phonet/pep.c static int pipe_handler_do_rcv(struct sock *sk, struct sk_buff *skb)
skb               552 net/phonet/pep.c 	struct pnpipehdr *hdr = pnp_hdr(skb);
skb               557 net/phonet/pep.c 		__skb_pull(skb, 1);
skb               560 net/phonet/pep.c 		__skb_pull(skb, 3); /* Pipe data header */
skb               562 net/phonet/pep.c 			err = sock_queue_rcv_skb(sk, skb);
skb               575 net/phonet/pep.c 		skb->dev = NULL;
skb               576 net/phonet/pep.c 		skb_set_owner_r(skb, sk);
skb               577 net/phonet/pep.c 		skb_queue_tail(&sk->sk_receive_queue, skb);
skb               587 net/phonet/pep.c 		if (pep_connresp_rcv(sk, skb)) {
skb               603 net/phonet/pep.c 		if (pep_enableresp_rcv(sk, skb)) {
skb               617 net/phonet/pep.c 		pipe_rcv_status(sk, skb);
skb               620 net/phonet/pep.c 	kfree_skb(skb);
skb               654 net/phonet/pep.c static int pep_do_rcv(struct sock *sk, struct sk_buff *skb)
skb               662 net/phonet/pep.c 	if (!pskb_may_pull(skb, sizeof(*hdr)))
skb               665 net/phonet/pep.c 	hdr = pnp_hdr(skb);
skb               670 net/phonet/pep.c 	pn_skb_get_dst_sockaddr(skb, &dst);
skb               675 net/phonet/pep.c 		return sk_receive_skb(sknode, skb, 1);
skb               680 net/phonet/pep.c 			pep_reject_conn(sk, skb, PN_PIPE_ERR_PEP_IN_USE,
skb               684 net/phonet/pep.c 		skb_queue_head(&sk->sk_receive_queue, skb);
skb               691 net/phonet/pep.c 		pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC);
skb               695 net/phonet/pep.c 		pep_ctrlreq_error(sk, skb, PN_PIPE_INVALID_HANDLE, GFP_ATOMIC);
skb               708 net/phonet/pep.c 			return pipe_handler_do_rcv(sk, skb);
skb               711 net/phonet/pep.c 	kfree_skb(skb);
skb               719 net/phonet/pep.c 	struct sk_buff *skb;
skb               721 net/phonet/pep.c 	skb = pep_alloc_skb(sk, NULL, 0, GFP_KERNEL);
skb               722 net/phonet/pep.c 	if (!skb)
skb               725 net/phonet/pep.c 	ph = pnp_hdr(skb);
skb               730 net/phonet/pep.c 	return pn_skb_send(sk, skb, NULL);
skb               767 net/phonet/pep.c 	struct sk_buff *skb;
skb               775 net/phonet/pep.c 	skb = skb_recv_datagram(sk, 0, flags & O_NONBLOCK, errp);
skb               776 net/phonet/pep.c 	if (!skb)
skb               787 net/phonet/pep.c 	if (!pskb_may_pull(skb, sizeof(*hdr) + 4))
skb               790 net/phonet/pep.c 	hdr = pnp_hdr(skb);
skb               800 net/phonet/pep.c 		pep_reject_conn(sk, skb, PN_PIPE_ERR_INVALID_PARAM,
skb               810 net/phonet/pep.c 		const u8 *data = pep_get_sb(skb, &type, &len, buf);
skb               832 net/phonet/pep.c 		pep_reject_conn(sk, skb, PN_PIPE_ERR_PEP_IN_USE, GFP_KERNEL);
skb               840 net/phonet/pep.c 		pep_reject_conn(sk, skb, PN_PIPE_ERR_OVERLOAD, GFP_KERNEL);
skb               852 net/phonet/pep.c 	pn_skb_get_dst_sockaddr(skb, &dst);
skb               853 net/phonet/pep.c 	pn_skb_get_src_sockaddr(skb, &src);
skb               869 net/phonet/pep.c 	err = pep_accept_conn(newsk, skb);
skb               878 net/phonet/pep.c 	kfree_skb(skb);
skb              1081 net/phonet/pep.c static int pipe_skb_send(struct sock *sk, struct sk_buff *skb)
skb              1089 net/phonet/pep.c 		kfree_skb(skb);
skb              1093 net/phonet/pep.c 	skb_push(skb, 3 + pn->aligned);
skb              1094 net/phonet/pep.c 	skb_reset_transport_header(skb);
skb              1095 net/phonet/pep.c 	ph = pnp_hdr(skb);
skb              1103 net/phonet/pep.c 	err = pn_skb_send(sk, skb, NULL);
skb              1114 net/phonet/pep.c 	struct sk_buff *skb;
skb              1127 net/phonet/pep.c 	skb = sock_alloc_send_skb(sk, MAX_PNPIPE_HEADER + len,
skb              1129 net/phonet/pep.c 	if (!skb)
skb              1132 net/phonet/pep.c 	skb_reserve(skb, MAX_PHONET_HEADER + 3 + pn->aligned);
skb              1133 net/phonet/pep.c 	err = memcpy_from_msg(skb_put(skb, len), msg, len);
skb              1179 net/phonet/pep.c 	err = pipe_skb_send(sk, skb);
skb              1182 net/phonet/pep.c 	skb = NULL;
skb              1186 net/phonet/pep.c 	kfree_skb(skb);
skb              1197 net/phonet/pep.c int pep_write(struct sock *sk, struct sk_buff *skb)
skb              1203 net/phonet/pep.c 		return pipe_skb_send(sk, skb);
skb              1207 net/phonet/pep.c 		kfree_skb(skb);
skb              1210 net/phonet/pep.c 	skb_shinfo(rskb)->frag_list = skb;
skb              1211 net/phonet/pep.c 	rskb->len += skb->len;
skb              1216 net/phonet/pep.c 	skb_walk_frags(skb, fs)
skb              1218 net/phonet/pep.c 	skb->next = skb_shinfo(skb)->frag_list;
skb              1219 net/phonet/pep.c 	skb_frag_list_init(skb);
skb              1220 net/phonet/pep.c 	skb->len -= flen;
skb              1221 net/phonet/pep.c 	skb->data_len -= flen;
skb              1222 net/phonet/pep.c 	skb->truesize -= flen;
skb              1230 net/phonet/pep.c 	struct sk_buff *skb = skb_dequeue(&sk->sk_receive_queue);
skb              1234 net/phonet/pep.c 	return skb;
skb              1240 net/phonet/pep.c 	struct sk_buff *skb;
skb              1256 net/phonet/pep.c 		skb = skb_dequeue(&pn->ctrlreq_queue);
skb              1257 net/phonet/pep.c 		if (skb) {
skb              1258 net/phonet/pep.c 			pep_ctrlreq_error(sk, skb, PN_PIPE_NO_ERROR,
skb              1267 net/phonet/pep.c 	skb = skb_recv_datagram(sk, flags, noblock, &err);
skb              1269 net/phonet/pep.c 	if (skb == NULL) {
skb              1281 net/phonet/pep.c 	if (skb->len > len)
skb              1284 net/phonet/pep.c 		len = skb->len;
skb              1286 net/phonet/pep.c 	err = skb_copy_datagram_msg(skb, 0, msg, len);
skb              1288 net/phonet/pep.c 		err = (flags & MSG_TRUNC) ? skb->len : len;
skb              1290 net/phonet/pep.c 	skb_free_datagram(sk, skb);
skb                22 net/phonet/pn_netlink.c static int fill_addr(struct sk_buff *skb, struct net_device *dev, u8 addr,
skb                27 net/phonet/pn_netlink.c 	struct sk_buff *skb;
skb                30 net/phonet/pn_netlink.c 	skb = nlmsg_new(NLMSG_ALIGN(sizeof(struct ifaddrmsg)) +
skb                32 net/phonet/pn_netlink.c 	if (skb == NULL)
skb                34 net/phonet/pn_netlink.c 	err = fill_addr(skb, dev, addr, 0, 0, event);
skb                37 net/phonet/pn_netlink.c 		kfree_skb(skb);
skb                40 net/phonet/pn_netlink.c 	rtnl_notify(skb, dev_net(dev), 0,
skb                51 net/phonet/pn_netlink.c static int addr_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
skb                54 net/phonet/pn_netlink.c 	struct net *net = sock_net(skb->sk);
skb                61 net/phonet/pn_netlink.c 	if (!netlink_capable(skb, CAP_NET_ADMIN))
skb                64 net/phonet/pn_netlink.c 	if (!netlink_capable(skb, CAP_SYS_ADMIN))
skb                95 net/phonet/pn_netlink.c static int fill_addr(struct sk_buff *skb, struct net_device *dev, u8 addr,
skb               101 net/phonet/pn_netlink.c 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*ifm), 0);
skb               111 net/phonet/pn_netlink.c 	if (nla_put_u8(skb, IFA_LOCAL, addr))
skb               113 net/phonet/pn_netlink.c 	nlmsg_end(skb, nlh);
skb               117 net/phonet/pn_netlink.c 	nlmsg_cancel(skb, nlh);
skb               121 net/phonet/pn_netlink.c static int getaddr_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
skb               128 net/phonet/pn_netlink.c 	pndevs = phonet_device_list(sock_net(skb->sk));
skb               143 net/phonet/pn_netlink.c 			if (fill_addr(skb, pnd->netdev, addr << 2,
skb               144 net/phonet/pn_netlink.c 					 NETLINK_CB(cb->skb).portid,
skb               155 net/phonet/pn_netlink.c 	return skb->len;
skb               160 net/phonet/pn_netlink.c static int fill_route(struct sk_buff *skb, struct net_device *dev, u8 dst,
skb               166 net/phonet/pn_netlink.c 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*rtm), 0);
skb               180 net/phonet/pn_netlink.c 	if (nla_put_u8(skb, RTA_DST, dst) ||
skb               181 net/phonet/pn_netlink.c 	    nla_put_u32(skb, RTA_OIF, dev->ifindex))
skb               183 net/phonet/pn_netlink.c 	nlmsg_end(skb, nlh);
skb               187 net/phonet/pn_netlink.c 	nlmsg_cancel(skb, nlh);
skb               193 net/phonet/pn_netlink.c 	struct sk_buff *skb;
skb               196 net/phonet/pn_netlink.c 	skb = nlmsg_new(NLMSG_ALIGN(sizeof(struct ifaddrmsg)) +
skb               198 net/phonet/pn_netlink.c 	if (skb == NULL)
skb               200 net/phonet/pn_netlink.c 	err = fill_route(skb, dev, dst, 0, 0, event);
skb               203 net/phonet/pn_netlink.c 		kfree_skb(skb);
skb               206 net/phonet/pn_netlink.c 	rtnl_notify(skb, dev_net(dev), 0,
skb               218 net/phonet/pn_netlink.c static int route_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
skb               221 net/phonet/pn_netlink.c 	struct net *net = sock_net(skb->sk);
skb               228 net/phonet/pn_netlink.c 	if (!netlink_capable(skb, CAP_NET_ADMIN))
skb               231 net/phonet/pn_netlink.c 	if (!netlink_capable(skb, CAP_SYS_ADMIN))
skb               263 net/phonet/pn_netlink.c static int route_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
skb               265 net/phonet/pn_netlink.c 	struct net *net = sock_net(skb->sk);
skb               275 net/phonet/pn_netlink.c 		if (fill_route(skb, dev, addr << 2, NETLINK_CB(cb->skb).portid,
skb               284 net/phonet/pn_netlink.c 	return skb->len;
skb               104 net/phonet/socket.c void pn_deliver_sock_broadcast(struct net *net, struct sk_buff *skb)
skb               121 net/phonet/socket.c 			clone = skb_clone(skb, GFP_ATOMIC);
skb                85 net/psample/psample.c 					    NETLINK_CB(cb->skb).portid,
skb               210 net/psample/psample.c void psample_sample_packet(struct psample_group *group, struct sk_buff *skb,
skb               227 net/psample/psample.c 	data_len = min(skb->len, trunc_size);
skb               257 net/psample/psample.c 	ret = nla_put_u32(nl_skb, PSAMPLE_ATTR_ORIGSIZE, skb->len);
skb               277 net/psample/psample.c 		if (skb_copy_bits(skb, 0, nla_data(nla), data_len))
skb               131 net/qrtr/qrtr.c static int qrtr_local_enqueue(struct qrtr_node *node, struct sk_buff *skb,
skb               134 net/qrtr/qrtr.c static int qrtr_bcast_enqueue(struct qrtr_node *node, struct sk_buff *skb,
skb               175 net/qrtr/qrtr.c static int qrtr_node_enqueue(struct qrtr_node *node, struct sk_buff *skb,
skb               180 net/qrtr/qrtr.c 	size_t len = skb->len;
skb               183 net/qrtr/qrtr.c 	hdr = skb_push(skb, sizeof(*hdr));
skb               199 net/qrtr/qrtr.c 	skb_put_padto(skb, ALIGN(len, 4) + sizeof(*hdr));
skb               203 net/qrtr/qrtr.c 		rc = node->ep->xmit(node->ep, skb);
skb               205 net/qrtr/qrtr.c 		kfree_skb(skb);
skb               256 net/qrtr/qrtr.c 	struct sk_buff *skb;
skb               265 net/qrtr/qrtr.c 	skb = netdev_alloc_skb(NULL, len);
skb               266 net/qrtr/qrtr.c 	if (!skb)
skb               269 net/qrtr/qrtr.c 	cb = (struct qrtr_cb *)skb->cb;
skb               317 net/qrtr/qrtr.c 	skb_put_data(skb, data + hdrlen, size);
skb               319 net/qrtr/qrtr.c 	skb_queue_tail(&node->rx_queue, skb);
skb               325 net/qrtr/qrtr.c 	kfree_skb(skb);
skb               343 net/qrtr/qrtr.c 	struct sk_buff *skb;
skb               345 net/qrtr/qrtr.c 	skb = alloc_skb(QRTR_HDR_MAX_SIZE + pkt_len, GFP_KERNEL);
skb               346 net/qrtr/qrtr.c 	if (!skb)
skb               349 net/qrtr/qrtr.c 	skb_reserve(skb, QRTR_HDR_MAX_SIZE);
skb               350 net/qrtr/qrtr.c 	*pkt = skb_put_zero(skb, pkt_len);
skb               352 net/qrtr/qrtr.c 	return skb;
skb               368 net/qrtr/qrtr.c 	struct sk_buff *skb;
skb               370 net/qrtr/qrtr.c 	while ((skb = skb_dequeue(&node->rx_queue)) != NULL) {
skb               375 net/qrtr/qrtr.c 		cb = (struct qrtr_cb *)skb->cb;
skb               386 net/qrtr/qrtr.c 			kfree_skb(skb);
skb               388 net/qrtr/qrtr.c 			if (sock_queue_rcv_skb(&ipc->sk, skb))
skb               389 net/qrtr/qrtr.c 				kfree_skb(skb);
skb               395 net/qrtr/qrtr.c 			skb = qrtr_alloc_ctrl_packet(&pkt);
skb               396 net/qrtr/qrtr.c 			if (!skb)
skb               403 net/qrtr/qrtr.c 			if (qrtr_node_enqueue(node, skb, QRTR_TYPE_RESUME_TX,
skb               457 net/qrtr/qrtr.c 	struct sk_buff *skb;
skb               464 net/qrtr/qrtr.c 	skb = qrtr_alloc_ctrl_packet(&pkt);
skb               465 net/qrtr/qrtr.c 	if (skb) {
skb               467 net/qrtr/qrtr.c 		qrtr_local_enqueue(NULL, skb, QRTR_TYPE_BYE, &src, &dst);
skb               505 net/qrtr/qrtr.c 	struct sk_buff *skb;
skb               513 net/qrtr/qrtr.c 	skb = qrtr_alloc_ctrl_packet(&pkt);
skb               514 net/qrtr/qrtr.c 	if (skb) {
skb               519 net/qrtr/qrtr.c 		skb_set_owner_w(skb, &ipc->sk);
skb               520 net/qrtr/qrtr.c 		qrtr_bcast_enqueue(NULL, skb, QRTR_TYPE_DEL_CLIENT, &ipc->us,
skb               669 net/qrtr/qrtr.c static int qrtr_local_enqueue(struct qrtr_node *node, struct sk_buff *skb,
skb               677 net/qrtr/qrtr.c 	if (!ipc || &ipc->sk == skb->sk) { /* do not send to self */
skb               678 net/qrtr/qrtr.c 		kfree_skb(skb);
skb               682 net/qrtr/qrtr.c 	cb = (struct qrtr_cb *)skb->cb;
skb               686 net/qrtr/qrtr.c 	if (sock_queue_rcv_skb(&ipc->sk, skb)) {
skb               688 net/qrtr/qrtr.c 		kfree_skb(skb);
skb               698 net/qrtr/qrtr.c static int qrtr_bcast_enqueue(struct qrtr_node *node, struct sk_buff *skb,
skb               706 net/qrtr/qrtr.c 		skbn = skb_clone(skb, GFP_KERNEL);
skb               709 net/qrtr/qrtr.c 		skb_set_owner_w(skbn, skb->sk);
skb               714 net/qrtr/qrtr.c 	qrtr_local_enqueue(NULL, skb, type, from, to);
skb               728 net/qrtr/qrtr.c 	struct sk_buff *skb;
skb               784 net/qrtr/qrtr.c 	skb = sock_alloc_send_skb(sk, plen + QRTR_HDR_MAX_SIZE,
skb               786 net/qrtr/qrtr.c 	if (!skb)
skb               789 net/qrtr/qrtr.c 	skb_reserve(skb, QRTR_HDR_MAX_SIZE);
skb               791 net/qrtr/qrtr.c 	rc = memcpy_from_msg(skb_put(skb, len), msg, len);
skb               793 net/qrtr/qrtr.c 		kfree_skb(skb);
skb               800 net/qrtr/qrtr.c 			kfree_skb(skb);
skb               805 net/qrtr/qrtr.c 		skb_copy_bits(skb, 0, &qrtr_type, 4);
skb               809 net/qrtr/qrtr.c 	rc = enqueue_fn(node, skb, type, &ipc->us, addr);
skb               825 net/qrtr/qrtr.c 	struct sk_buff *skb;
skb               836 net/qrtr/qrtr.c 	skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
skb               838 net/qrtr/qrtr.c 	if (!skb) {
skb               843 net/qrtr/qrtr.c 	copied = skb->len;
skb               849 net/qrtr/qrtr.c 	rc = skb_copy_datagram_msg(skb, 0, msg, copied);
skb               855 net/qrtr/qrtr.c 		cb = (struct qrtr_cb *)skb->cb;
skb               863 net/qrtr/qrtr.c 	skb_free_datagram(sk, skb);
skb               933 net/qrtr/qrtr.c 	struct sk_buff *skb;
skb               948 net/qrtr/qrtr.c 		skb = skb_peek(&sk->sk_receive_queue);
skb               949 net/qrtr/qrtr.c 		if (skb)
skb               950 net/qrtr/qrtr.c 			len = skb->len;
skb              1074 net/qrtr/qrtr.c static int qrtr_addr_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
skb              1081 net/qrtr/qrtr.c 	if (!netlink_capable(skb, CAP_NET_ADMIN))
skb              1084 net/qrtr/qrtr.c 	if (!netlink_capable(skb, CAP_SYS_ADMIN))
skb                21 net/qrtr/qrtr.h 	int (*xmit)(struct qrtr_endpoint *ep, struct sk_buff *skb);
skb                40 net/qrtr/smd.c static int qcom_smd_qrtr_send(struct qrtr_endpoint *ep, struct sk_buff *skb)
skb                45 net/qrtr/smd.c 	rc = skb_linearize(skb);
skb                49 net/qrtr/smd.c 	rc = rpmsg_send(qdev->channel, skb->data, skb->len);
skb                53 net/qrtr/smd.c 		kfree_skb(skb);
skb                55 net/qrtr/smd.c 		consume_skb(skb);
skb                19 net/qrtr/tun.c static int qrtr_tun_send(struct qrtr_endpoint *ep, struct sk_buff *skb)
skb                23 net/qrtr/tun.c 	skb_queue_tail(&tun->queue, skb);
skb                53 net/qrtr/tun.c 	struct sk_buff *skb;
skb                56 net/qrtr/tun.c 	while (!(skb = skb_dequeue(&tun->queue))) {
skb                66 net/qrtr/tun.c 	count = min_t(size_t, iov_iter_count(to), skb->len);
skb                67 net/qrtr/tun.c 	if (copy_to_iter(skb->data, count, to) != count)
skb                70 net/qrtr/tun.c 	kfree_skb(skb);
skb               114 net/qrtr/tun.c 	struct sk_buff *skb;
skb               120 net/qrtr/tun.c 		skb = skb_dequeue(&tun->queue);
skb               121 net/qrtr/tun.c 		kfree_skb(skb);
skb                65 net/rds/tcp_recv.c 	struct sk_buff *skb;
skb                73 net/rds/tcp_recv.c 	skb_queue_walk(&tinc->ti_skb_list, skb) {
skb                75 net/rds/tcp_recv.c 		for (skb_off = 0; skb_off < skb->len; skb_off += to_copy) {
skb                77 net/rds/tcp_recv.c 			to_copy = min(to_copy, skb->len - skb_off);
skb                79 net/rds/tcp_recv.c 			if (skb_copy_datagram_iter(skb, skb_off, to, to_copy))
skb               108 net/rds/tcp_recv.c 	struct sk_buff *skb;
skb               123 net/rds/tcp_recv.c 	skb_queue_walk(&tinc->ti_skb_list, skb) {
skb               125 net/rds/tcp_recv.c 		while (skb_off < skb->len) {
skb               127 net/rds/tcp_recv.c 					skb->len - skb_off);
skb               132 net/rds/tcp_recv.c 			ret = skb_copy_bits(skb, skb_off,
skb               154 net/rds/tcp_recv.c static int rds_tcp_data_recv(read_descriptor_t *desc, struct sk_buff *skb,
skb               164 net/rds/tcp_recv.c 	rdsdebug("tcp data tc %p skb %p offset %u len %zu\n", tc, skb, offset,
skb               196 net/rds/tcp_recv.c 				 skb);
skb               197 net/rds/tcp_recv.c 			skb_copy_bits(skb, offset,
skb               218 net/rds/tcp_recv.c 			clone = pskb_extract(skb, offset, to_copy, arg->gfp);
skb               228 net/rds/tcp_recv.c 				 skb, skb->data, skb->len, offset, to_copy,
skb               256 net/rds/tcp_recv.c 		 len, left, skb->len,
skb               311 net/rose/af_rose.c 	struct sk_buff *skb;
skb               320 net/rose/af_rose.c 	while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
skb               321 net/rose/af_rose.c 		if (skb->sk != sk) {	/* A pending connection */
skb               323 net/rose/af_rose.c 			sock_set_flag(skb->sk, SOCK_DEAD);
skb               324 net/rose/af_rose.c 			rose_start_heartbeat(skb->sk);
skb               325 net/rose/af_rose.c 			rose_sk(skb->sk)->state = ROSE_STATE_0;
skb               328 net/rose/af_rose.c 		kfree_skb(skb);
skb               855 net/rose/af_rose.c 	struct sk_buff *skb;
skb               882 net/rose/af_rose.c 		skb = skb_dequeue(&sk->sk_receive_queue);
skb               883 net/rose/af_rose.c 		if (skb)
skb               903 net/rose/af_rose.c 	newsk = skb->sk;
skb               907 net/rose/af_rose.c 	skb->sk = NULL;
skb               908 net/rose/af_rose.c 	kfree_skb(skb);
skb               947 net/rose/af_rose.c int rose_rx_call_request(struct sk_buff *skb, struct net_device *dev, struct rose_neigh *neigh, unsigned int lci)
skb               955 net/rose/af_rose.c 	skb->sk = NULL;		/* Initially we don't know who it's for */
skb               962 net/rose/af_rose.c 	if (!rose_parse_facilities(skb->data + ROSE_CALL_REQ_FACILITIES_OFF,
skb               963 net/rose/af_rose.c 				   skb->len - ROSE_CALL_REQ_FACILITIES_OFF,
skb               980 net/rose/af_rose.c 	skb->sk     = make;
skb              1018 net/rose/af_rose.c 	skb_queue_head(&sk->sk_receive_queue, skb);
skb              1035 net/rose/af_rose.c 	struct sk_buff *skb;
skb              1090 net/rose/af_rose.c 	if ((skb = sock_alloc_send_skb(sk, size, msg->msg_flags & MSG_DONTWAIT, &err)) == NULL)
skb              1093 net/rose/af_rose.c 	skb_reserve(skb, AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN);
skb              1099 net/rose/af_rose.c 	skb_reset_transport_header(skb);
skb              1100 net/rose/af_rose.c 	skb_put(skb, len);
skb              1102 net/rose/af_rose.c 	err = memcpy_from_msg(skb_transport_header(skb), msg, len);
skb              1104 net/rose/af_rose.c 		kfree_skb(skb);
skb              1113 net/rose/af_rose.c 		qbit = skb->data[0];
skb              1114 net/rose/af_rose.c 		skb_pull(skb, 1);
skb              1120 net/rose/af_rose.c 	asmptr = skb_push(skb, ROSE_MIN_LEN);
skb              1131 net/rose/af_rose.c 		kfree_skb(skb);
skb              1137 net/rose/af_rose.c 	if (skb->len - ROSE_MIN_LEN > ROSE_PACLEN) {
skb              1144 net/rose/af_rose.c 		skb_copy_from_linear_data(skb, header, ROSE_MIN_LEN);
skb              1145 net/rose/af_rose.c 		skb_pull(skb, ROSE_MIN_LEN);
skb              1147 net/rose/af_rose.c 		frontlen = skb_headroom(skb);
skb              1149 net/rose/af_rose.c 		while (skb->len > 0) {
skb              1151 net/rose/af_rose.c 				kfree_skb(skb);
skb              1161 net/rose/af_rose.c 			lg = (ROSE_PACLEN > skb->len) ? skb->len : ROSE_PACLEN;
skb              1164 net/rose/af_rose.c 			skb_copy_from_linear_data(skb, skb_put(skbn, lg), lg);
skb              1165 net/rose/af_rose.c 			skb_pull(skb, lg);
skb              1171 net/rose/af_rose.c 			if (skb->len > 0)
skb              1177 net/rose/af_rose.c 		skb->free = 1;
skb              1178 net/rose/af_rose.c 		kfree_skb(skb);
skb              1180 net/rose/af_rose.c 		skb_queue_tail(&sk->sk_write_queue, skb);		/* Throw it on the queue */
skb              1183 net/rose/af_rose.c 	skb_queue_tail(&sk->sk_write_queue, skb);	/* Shove it onto the queue */
skb              1199 net/rose/af_rose.c 	struct sk_buff *skb;
skb              1210 net/rose/af_rose.c 	if ((skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &er)) == NULL)
skb              1213 net/rose/af_rose.c 	qbit = (skb->data[0] & ROSE_Q_BIT) == ROSE_Q_BIT;
skb              1215 net/rose/af_rose.c 	skb_pull(skb, ROSE_MIN_LEN);
skb              1218 net/rose/af_rose.c 		asmptr  = skb_push(skb, 1);
skb              1222 net/rose/af_rose.c 	skb_reset_transport_header(skb);
skb              1223 net/rose/af_rose.c 	copied     = skb->len;
skb              1230 net/rose/af_rose.c 	skb_copy_datagram_msg(skb, 0, msg, copied);
skb              1248 net/rose/af_rose.c 	skb_free_datagram(sk, skb);
skb              1271 net/rose/af_rose.c 		struct sk_buff *skb;
skb              1274 net/rose/af_rose.c 		if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL)
skb              1275 net/rose/af_rose.c 			amount = skb->len;
skb                35 net/rose/rose_dev.c static int rose_header(struct sk_buff *skb, struct net_device *dev,
skb                39 net/rose/rose_dev.c 	unsigned char *buff = skb_push(skb, ROSE_MIN_LEN + 2);
skb                97 net/rose/rose_dev.c static netdev_tx_t rose_xmit(struct sk_buff *skb, struct net_device *dev)
skb               100 net/rose/rose_dev.c 	unsigned int len = skb->len;
skb               107 net/rose/rose_dev.c 	if (!rose_route_frame(skb, NULL)) {
skb               108 net/rose/rose_dev.c 		dev_kfree_skb(skb);
skb                36 net/rose/rose_in.c static int rose_state1_machine(struct sock *sk, struct sk_buff *skb, int frametype)
skb                57 net/rose/rose_in.c 		rose_disconnect(sk, ECONNREFUSED, skb->data[3], skb->data[4]);
skb                73 net/rose/rose_in.c static int rose_state2_machine(struct sock *sk, struct sk_buff *skb, int frametype)
skb                80 net/rose/rose_in.c 		rose_disconnect(sk, 0, skb->data[3], skb->data[4]);
skb               101 net/rose/rose_in.c static int rose_state3_machine(struct sock *sk, struct sk_buff *skb, int frametype, int ns, int nr, int q, int d, int m)
skb               121 net/rose/rose_in.c 		rose_disconnect(sk, 0, skb->data[3], skb->data[4]);
skb               164 net/rose/rose_in.c 			if (sk_filter_trim_cap(sk, skb, ROSE_MIN_LEN) == 0 &&
skb               165 net/rose/rose_in.c 			    __sock_queue_rcv_skb(sk, skb) == 0) {
skb               212 net/rose/rose_in.c static int rose_state4_machine(struct sock *sk, struct sk_buff *skb, int frametype)
skb               234 net/rose/rose_in.c 		rose_disconnect(sk, 0, skb->data[3], skb->data[4]);
skb               250 net/rose/rose_in.c static int rose_state5_machine(struct sock *sk, struct sk_buff *skb, int frametype)
skb               254 net/rose/rose_in.c 		rose_disconnect(sk, 0, skb->data[3], skb->data[4]);
skb               262 net/rose/rose_in.c int rose_process_rx_frame(struct sock *sk, struct sk_buff *skb)
skb               270 net/rose/rose_in.c 	frametype = rose_decode(skb, &ns, &nr, &q, &d, &m);
skb               274 net/rose/rose_in.c 		queued = rose_state1_machine(sk, skb, frametype);
skb               277 net/rose/rose_in.c 		queued = rose_state2_machine(sk, skb, frametype);
skb               280 net/rose/rose_in.c 		queued = rose_state3_machine(sk, skb, frametype, ns, nr, q, d, m);
skb               283 net/rose/rose_in.c 		queued = rose_state4_machine(sk, skb, frametype);
skb               286 net/rose/rose_in.c 		queued = rose_state5_machine(sk, skb, frametype);
skb                95 net/rose/rose_link.c static int rose_send_frame(struct sk_buff *skb, struct rose_neigh *neigh)
skb               106 net/rose/rose_link.c 	neigh->ax25 = ax25_send_frame(skb, 260, rose_call, &neigh->callsign, neigh->digipeat, neigh->dev);
skb               139 net/rose/rose_link.c void rose_link_rx_restart(struct sk_buff *skb, struct rose_neigh *neigh, unsigned short frametype)
skb               147 net/rose/rose_link.c 		neigh->dce_mode  = (skb->data[3] == ROSE_DTE_ORIGINATED);
skb               157 net/rose/rose_link.c 		pr_warn("ROSE: received diagnostic #%d - %3ph\n", skb->data[3],
skb               158 net/rose/rose_link.c 			skb->data + 4);
skb               178 net/rose/rose_link.c 	struct sk_buff *skb;
skb               184 net/rose/rose_link.c 	if ((skb = alloc_skb(len, GFP_ATOMIC)) == NULL)
skb               187 net/rose/rose_link.c 	skb_reserve(skb, AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN);
skb               189 net/rose/rose_link.c 	dptr = skb_put(skb, ROSE_MIN_LEN + 3);
skb               198 net/rose/rose_link.c 	if (!rose_send_frame(skb, neigh))
skb               199 net/rose/rose_link.c 		kfree_skb(skb);
skb               207 net/rose/rose_link.c 	struct sk_buff *skb;
skb               213 net/rose/rose_link.c 	if ((skb = alloc_skb(len, GFP_ATOMIC)) == NULL)
skb               216 net/rose/rose_link.c 	skb_reserve(skb, AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN);
skb               218 net/rose/rose_link.c 	dptr = skb_put(skb, ROSE_MIN_LEN + 1);
skb               225 net/rose/rose_link.c 	if (!rose_send_frame(skb, neigh))
skb               226 net/rose/rose_link.c 		kfree_skb(skb);
skb               235 net/rose/rose_link.c 	struct sk_buff *skb;
skb               241 net/rose/rose_link.c 	if ((skb = alloc_skb(len, GFP_ATOMIC)) == NULL)
skb               244 net/rose/rose_link.c 	skb_reserve(skb, AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN);
skb               246 net/rose/rose_link.c 	dptr = skb_put(skb, ROSE_MIN_LEN + 3);
skb               255 net/rose/rose_link.c 	if (!rose_send_frame(skb, neigh))
skb               256 net/rose/rose_link.c 		kfree_skb(skb);
skb               259 net/rose/rose_link.c void rose_transmit_link(struct sk_buff *skb, struct rose_neigh *neigh)
skb               264 net/rose/rose_link.c 		rose_loopback_queue(skb, neigh);
skb               271 net/rose/rose_link.c 	dptr = skb_push(skb, 1);
skb               275 net/rose/rose_link.c 		if (!rose_send_frame(skb, neigh))
skb               276 net/rose/rose_link.c 			kfree_skb(skb);
skb               278 net/rose/rose_link.c 		skb_queue_tail(&neigh->queue, skb);
skb                34 net/rose/rose_loopback.c int rose_loopback_queue(struct sk_buff *skb, struct rose_neigh *neigh)
skb                39 net/rose/rose_loopback.c 		skbn = skb_clone(skb, GFP_ATOMIC);
skb                42 net/rose/rose_loopback.c 		consume_skb(skb);
skb                48 net/rose/rose_loopback.c 		kfree_skb(skb);
skb                61 net/rose/rose_loopback.c 	struct sk_buff *skb;
skb                70 net/rose/rose_loopback.c 		skb = skb_dequeue(&loopback_queue);
skb                71 net/rose/rose_loopback.c 		if (!skb)
skb                73 net/rose/rose_loopback.c 		if (skb->len < ROSE_MIN_LEN) {
skb                74 net/rose/rose_loopback.c 			kfree_skb(skb);
skb                77 net/rose/rose_loopback.c 		lci_i     = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF);
skb                78 net/rose/rose_loopback.c 		frametype = skb->data[2];
skb                80 net/rose/rose_loopback.c 		    (skb->len <= ROSE_CALL_REQ_FACILITIES_OFF ||
skb                81 net/rose/rose_loopback.c 		     skb->data[ROSE_CALL_REQ_ADDR_LEN_OFF] !=
skb                83 net/rose/rose_loopback.c 			kfree_skb(skb);
skb                86 net/rose/rose_loopback.c 		dest      = (rose_address *)(skb->data + ROSE_CALL_REQ_DEST_ADDR_OFF);
skb                89 net/rose/rose_loopback.c 		skb_reset_transport_header(skb);
skb                93 net/rose/rose_loopback.c 			if (rose_process_rx_frame(sk, skb) == 0)
skb                94 net/rose/rose_loopback.c 				kfree_skb(skb);
skb               100 net/rose/rose_loopback.c 				if (rose_rx_call_request(skb, dev, rose_loopback_neigh, lci_o) == 0)
skb               101 net/rose/rose_loopback.c 					kfree_skb(skb);
skb               103 net/rose/rose_loopback.c 				kfree_skb(skb);
skb               106 net/rose/rose_loopback.c 			kfree_skb(skb);
skb               115 net/rose/rose_loopback.c 	struct sk_buff *skb;
skb               119 net/rose/rose_loopback.c 	while ((skb = skb_dequeue(&loopback_queue)) != NULL) {
skb               120 net/rose/rose_loopback.c 		skb->sk = NULL;
skb               121 net/rose/rose_loopback.c 		kfree_skb(skb);
skb                30 net/rose/rose_out.c static void rose_send_iframe(struct sock *sk, struct sk_buff *skb)
skb                34 net/rose/rose_out.c 	if (skb == NULL)
skb                37 net/rose/rose_out.c 	skb->data[2] |= (rose->vr << 5) & 0xE0;
skb                38 net/rose/rose_out.c 	skb->data[2] |= (rose->vs << 1) & 0x0E;
skb                42 net/rose/rose_out.c 	rose_transmit_link(skb, rose->neighbour);
skb                48 net/rose/rose_out.c 	struct sk_buff *skb, *skbn;
skb                73 net/rose/rose_out.c 	skb  = skb_dequeue(&sk->sk_write_queue);
skb                76 net/rose/rose_out.c 		if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) {
skb                77 net/rose/rose_out.c 			skb_queue_head(&sk->sk_write_queue, skb);
skb                93 net/rose/rose_out.c 		skb_queue_tail(&rose->ack_queue, skb);
skb                96 net/rose/rose_out.c 		 (skb = skb_dequeue(&sk->sk_write_queue)) != NULL);
skb               852 net/rose/rose_route.c int rose_route_frame(struct sk_buff *skb, ax25_cb *ax25)
skb               866 net/rose/rose_route.c 	if (skb->len < ROSE_MIN_LEN)
skb               870 net/rose/rose_route.c 		return rose_loopback_queue(skb, NULL);
skb               872 net/rose/rose_route.c 	frametype = skb->data[2];
skb               873 net/rose/rose_route.c 	lci = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF);
skb               875 net/rose/rose_route.c 	    (skb->len <= ROSE_CALL_REQ_FACILITIES_OFF ||
skb               876 net/rose/rose_route.c 	     skb->data[ROSE_CALL_REQ_ADDR_LEN_OFF] !=
skb               879 net/rose/rose_route.c 	src_addr  = (rose_address *)(skb->data + ROSE_CALL_REQ_SRC_ADDR_OFF);
skb               880 net/rose/rose_route.c 	dest_addr = (rose_address *)(skb->data + ROSE_CALL_REQ_DEST_ADDR_OFF);
skb               909 net/rose/rose_route.c 		rose_link_rx_restart(skb, rose_neigh, frametype);
skb               937 net/rose/rose_route.c 			skb_reset_transport_header(skb);
skb               938 net/rose/rose_route.c 			res = rose_process_rx_frame(sk, skb);
skb               948 net/rose/rose_route.c 			res = rose_rx_call_request(skb, dev, rose_neigh, lci);
skb               970 net/rose/rose_route.c 				skb->data[0] &= 0xF0;
skb               971 net/rose/rose_route.c 				skb->data[0] |= (rose_route->lci2 >> 8) & 0x0F;
skb               972 net/rose/rose_route.c 				skb->data[1]  = (rose_route->lci2 >> 0) & 0xFF;
skb               973 net/rose/rose_route.c 				rose_transmit_link(skb, rose_route->neigh2);
skb               991 net/rose/rose_route.c 				skb->data[0] &= 0xF0;
skb               992 net/rose/rose_route.c 				skb->data[0] |= (rose_route->lci1 >> 8) & 0x0F;
skb               993 net/rose/rose_route.c 				skb->data[1]  = (rose_route->lci1 >> 0) & 0xFF;
skb               994 net/rose/rose_route.c 				rose_transmit_link(skb, rose_route->neigh1);
skb              1020 net/rose/rose_route.c 	if (!rose_parse_facilities(skb->data + ROSE_CALL_REQ_FACILITIES_OFF,
skb              1021 net/rose/rose_route.c 				   skb->len - ROSE_CALL_REQ_FACILITIES_OFF,
skb              1073 net/rose/rose_route.c 	skb->data[0] &= 0xF0;
skb              1074 net/rose/rose_route.c 	skb->data[0] |= (rose_route->lci2 >> 8) & 0x0F;
skb              1075 net/rose/rose_route.c 	skb->data[1]  = (rose_route->lci2 >> 0) & 0xFF;
skb              1077 net/rose/rose_route.c 	rose_transmit_link(skb, rose_route->neigh2);
skb                45 net/rose/rose_subr.c 	struct sk_buff *skb;
skb                53 net/rose/rose_subr.c 			skb = skb_dequeue(&rose->ack_queue);
skb                54 net/rose/rose_subr.c 			kfree_skb(skb);
skb                62 net/rose/rose_subr.c 	struct sk_buff *skb, *skb_prev = NULL;
skb                69 net/rose/rose_subr.c 	while ((skb = skb_dequeue(&rose_sk(sk)->ack_queue)) != NULL) {
skb                71 net/rose/rose_subr.c 			skb_queue_head(&sk->sk_write_queue, skb);
skb                73 net/rose/rose_subr.c 			skb_append(skb_prev, skb, &sk->sk_write_queue);
skb                74 net/rose/rose_subr.c 		skb_prev = skb;
skb               102 net/rose/rose_subr.c 	struct sk_buff *skb;
skb               124 net/rose/rose_subr.c 	skb = alloc_skb(reserve + len + maxfaclen, GFP_ATOMIC);
skb               125 net/rose/rose_subr.c 	if (!skb)
skb               131 net/rose/rose_subr.c 	skb_reserve(skb, reserve);
skb               133 net/rose/rose_subr.c 	dptr = skb_put(skb, len);
skb               149 net/rose/rose_subr.c 		skb_put(skb, faclen);
skb               194 net/rose/rose_subr.c 		kfree_skb(skb);
skb               198 net/rose/rose_subr.c 	rose_transmit_link(skb, rose->neighbour);
skb               201 net/rose/rose_subr.c int rose_decode(struct sk_buff *skb, int *ns, int *nr, int *q, int *d, int *m)
skb               205 net/rose/rose_subr.c 	frame = skb->data;
skb               891 net/rxrpc/ar-internal.h 					struct sk_buff *skb,
skb               896 net/rxrpc/ar-internal.h 	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
skb               902 net/rxrpc/ar-internal.h #define rxrpc_abort_eproto(call, skb, eproto_why, abort_why, abort_code) \
skb               903 net/rxrpc/ar-internal.h 	__rxrpc_abort_eproto((call), (skb), tracepoint_string(eproto_why), \
skb               246 net/rxrpc/call_accept.c static void rxrpc_send_ping(struct rxrpc_call *call, struct sk_buff *skb)
skb               248 net/rxrpc/call_accept.c 	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
skb               249 net/rxrpc/call_accept.c 	ktime_t now = skb->tstamp;
skb               268 net/rxrpc/call_accept.c 						    struct sk_buff *skb)
skb               297 net/rxrpc/call_accept.c 			if (rxrpc_extract_addr_from_skb(&peer->srx, skb) < 0)
skb               315 net/rxrpc/call_accept.c 		rxrpc_new_incoming_connection(rx, conn, sec, key, skb);
skb               351 net/rxrpc/call_accept.c 					   struct sk_buff *skb)
skb               353 net/rxrpc/call_accept.c 	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
skb               367 net/rxrpc/call_accept.c 		skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
skb               368 net/rxrpc/call_accept.c 		skb->priority = RX_INVALID_OPERATION;
skb               377 net/rxrpc/call_accept.c 	conn = rxrpc_find_connection_rcu(local, skb, &peer);
skb               379 net/rxrpc/call_accept.c 	if (!conn && !rxrpc_look_up_server_security(local, rx, &sec, &key, skb))
skb               382 net/rxrpc/call_accept.c 	call = rxrpc_alloc_incoming_call(rx, local, peer, conn, sec, key, skb);
skb               385 net/rxrpc/call_accept.c 		skb->mark = RXRPC_SKB_MARK_REJECT_BUSY;
skb               393 net/rxrpc/call_accept.c 	rxrpc_incoming_call(rx, call, skb);
skb               434 net/rxrpc/call_accept.c 	rxrpc_send_ping(call, skb);
skb               451 net/rxrpc/call_accept.c 	_leave(" = NULL [%u]", skb->mark);
skb               159 net/rxrpc/call_event.c 	struct sk_buff *skb;
skb               195 net/rxrpc/call_event.c 		skb = call->rxtx_buffer[ix];
skb               196 net/rxrpc/call_event.c 		rxrpc_see_skb(skb, rxrpc_skb_seen);
skb               199 net/rxrpc/call_event.c 			if (ktime_after(skb->tstamp, max_age)) {
skb               200 net/rxrpc/call_event.c 				if (ktime_before(skb->tstamp, oldest))
skb               201 net/rxrpc/call_event.c 					oldest = skb->tstamp;
skb               212 net/rxrpc/call_event.c 				       ktime_to_ns(ktime_sub(skb->tstamp, max_age)));
skb               251 net/rxrpc/call_event.c 		skb = call->rxtx_buffer[ix];
skb               252 net/rxrpc/call_event.c 		rxrpc_get_skb(skb, rxrpc_skb_got);
skb               255 net/rxrpc/call_event.c 		if (rxrpc_send_data_packet(call, skb, true) < 0) {
skb               256 net/rxrpc/call_event.c 			rxrpc_free_skb(skb, rxrpc_skb_freed);
skb               263 net/rxrpc/call_event.c 		rxrpc_free_skb(skb, rxrpc_skb_freed);
skb               331 net/rxrpc/call_object.c 			 struct sk_buff *skb)
skb               334 net/rxrpc/call_object.c 	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
skb               346 net/rxrpc/call_object.c 	call->cong_tstamp	= skb->tstamp;
skb                23 net/rxrpc/conn_event.c 				       struct sk_buff *skb,
skb                26 net/rxrpc/conn_event.c 	struct rxrpc_skb_priv *sp = skb ? rxrpc_skb(skb) : NULL;
skb                52 net/rxrpc/conn_event.c 	if (skb && call_id != sp->hdr.callNumber)
skb                92 net/rxrpc/conn_event.c 		pkt.ack.maxSkew		= htons(skb ? skb->priority : 0);
skb                95 net/rxrpc/conn_event.c 		pkt.ack.serial		= htonl(skb ? sp->hdr.serial : 0);
skb                96 net/rxrpc/conn_event.c 		pkt.ack.reason		= skb ? RXRPC_ACK_DUPLICATE : RXRPC_ACK_IDLE;
skb               284 net/rxrpc/conn_event.c 			       struct sk_buff *skb,
skb               287 net/rxrpc/conn_event.c 	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
skb               302 net/rxrpc/conn_event.c 		rxrpc_conn_retransmit_call(conn, skb,
skb               311 net/rxrpc/conn_event.c 		if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header),
skb               327 net/rxrpc/conn_event.c 		return conn->security->respond_to_challenge(conn, skb,
skb               331 net/rxrpc/conn_event.c 		ret = conn->security->verify_response(conn, skb, _abort_code);
skb               443 net/rxrpc/conn_event.c 	struct sk_buff *skb;
skb               456 net/rxrpc/conn_event.c 	while ((skb = skb_dequeue(&conn->rx_queue))) {
skb               457 net/rxrpc/conn_event.c 		rxrpc_see_skb(skb, rxrpc_skb_seen);
skb               458 net/rxrpc/conn_event.c 		ret = rxrpc_process_event(conn, skb, &abort_code);
skb               469 net/rxrpc/conn_event.c 			rxrpc_free_skb(skb, rxrpc_skb_freed);
skb               477 net/rxrpc/conn_event.c 	skb_queue_head(&conn->rx_queue, skb);
skb               483 net/rxrpc/conn_event.c 	rxrpc_free_skb(skb, rxrpc_skb_freed);
skb                74 net/rxrpc/conn_object.c 						   struct sk_buff *skb,
skb                79 net/rxrpc/conn_object.c 	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
skb                85 net/rxrpc/conn_object.c 	if (rxrpc_extract_addr_from_skb(&srx, skb) < 0)
skb               109 net/rxrpc/conn_object.c 		conn = rxrpc_find_service_conn_rcu(peer, skb);
skb                22 net/rxrpc/conn_service.c 						     struct sk_buff *skb)
skb                26 net/rxrpc/conn_service.c 	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
skb               153 net/rxrpc/conn_service.c 				   struct sk_buff *skb)
skb               155 net/rxrpc/conn_service.c 	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
skb                39 net/rxrpc/input.c 					struct sk_buff *skb,
skb                59 net/rxrpc/input.c 			call->cong_tstamp = skb->tstamp;
skb                83 net/rxrpc/input.c 			call->cong_tstamp = skb->tstamp;
skb                96 net/rxrpc/input.c 		if (ktime_before(skb->tstamp,
skb               101 net/rxrpc/input.c 		call->cong_tstamp = skb->tstamp;
skb               159 net/rxrpc/input.c 	call->cong_tstamp = skb->tstamp;
skb               201 net/rxrpc/input.c 	struct sk_buff *skb, *list = NULL;
skb               218 net/rxrpc/input.c 		skb = call->rxtx_buffer[ix];
skb               220 net/rxrpc/input.c 		rxrpc_see_skb(skb, rxrpc_skb_rotated);
skb               223 net/rxrpc/input.c 		skb->next = list;
skb               224 net/rxrpc/input.c 		list = skb;
skb               242 net/rxrpc/input.c 		skb = list;
skb               243 net/rxrpc/input.c 		list = skb->next;
skb               244 net/rxrpc/input.c 		skb_mark_not_on_list(skb);
skb               245 net/rxrpc/input.c 		rxrpc_free_skb(skb, rxrpc_skb_freed);
skb               345 net/rxrpc/input.c static bool rxrpc_validate_data(struct sk_buff *skb)
skb               347 net/rxrpc/input.c 	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
skb               349 net/rxrpc/input.c 	unsigned int len = skb->len;
skb               365 net/rxrpc/input.c 		if (skb_copy_bits(skb, offset, &flags, 1) < 0)
skb               412 net/rxrpc/input.c static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb)
skb               414 net/rxrpc/input.c 	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
skb               423 net/rxrpc/input.c 	       call->rx_hard_ack, call->rx_top, skb->len, seq0);
skb               430 net/rxrpc/input.c 		rxrpc_free_skb(skb, rxrpc_skb_freed);
skb               547 net/rxrpc/input.c 			rxrpc_get_skb(skb, rxrpc_skb_got);
skb               550 net/rxrpc/input.c 		call->rxtx_buffer[ix] = skb;
skb               567 net/rxrpc/input.c 			skb = NULL;
skb               607 net/rxrpc/input.c 	rxrpc_free_skb(skb, rxrpc_skb_freed);
skb               620 net/rxrpc/input.c 	struct sk_buff *skb;
skb               625 net/rxrpc/input.c 		skb = call->rxtx_buffer[ix];
skb               626 net/rxrpc/input.c 		if (!skb)
skb               629 net/rxrpc/input.c 		sent_at = skb->tstamp;
skb               631 net/rxrpc/input.c 		sp = rxrpc_skb(skb);
skb               712 net/rxrpc/input.c static void rxrpc_input_ackinfo(struct rxrpc_call *call, struct sk_buff *skb,
skb               715 net/rxrpc/input.c 	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
skb               839 net/rxrpc/input.c static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
skb               842 net/rxrpc/input.c 	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
skb               855 net/rxrpc/input.c 	if (skb_copy_bits(skb, offset, &buf.ack, sizeof(buf.ack)) < 0) {
skb               874 net/rxrpc/input.c 		rxrpc_input_ping_response(call, skb->tstamp, acked_serial,
skb               877 net/rxrpc/input.c 		rxrpc_input_requested_ack(call, skb->tstamp, acked_serial,
skb               901 net/rxrpc/input.c 	if (skb->len >= ioffset + sizeof(buf.info) &&
skb               902 net/rxrpc/input.c 	    skb_copy_bits(skb, ioffset, &buf.info, sizeof(buf.info)) < 0)
skb               914 net/rxrpc/input.c 	call->acks_latest_ts = skb->tstamp;
skb               921 net/rxrpc/input.c 		rxrpc_input_ackinfo(call, skb, &buf.info);
skb               957 net/rxrpc/input.c 		if (skb_copy_bits(skb, offset, buf.acks, nr_acks) < 0) {
skb               973 net/rxrpc/input.c 	rxrpc_congestion_management(call, skb, &summary, acked_serial);
skb               981 net/rxrpc/input.c static void rxrpc_input_ackall(struct rxrpc_call *call, struct sk_buff *skb)
skb               984 net/rxrpc/input.c 	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
skb               999 net/rxrpc/input.c static void rxrpc_input_abort(struct rxrpc_call *call, struct sk_buff *skb)
skb              1001 net/rxrpc/input.c 	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
skb              1007 net/rxrpc/input.c 	if (skb->len >= 4 &&
skb              1008 net/rxrpc/input.c 	    skb_copy_bits(skb, sizeof(struct rxrpc_wire_header),
skb              1025 net/rxrpc/input.c 				    struct sk_buff *skb)
skb              1027 net/rxrpc/input.c 	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
skb              1030 net/rxrpc/input.c 	_enter("%p,%p", call, skb);
skb              1044 net/rxrpc/input.c 		rxrpc_input_data(call, skb);
skb              1048 net/rxrpc/input.c 		rxrpc_input_ack(call, skb);
skb              1061 net/rxrpc/input.c 		rxrpc_input_abort(call, skb);
skb              1065 net/rxrpc/input.c 		rxrpc_input_ackall(call, skb);
skb              1072 net/rxrpc/input.c 	rxrpc_free_skb(skb, rxrpc_skb_freed);
skb              1114 net/rxrpc/input.c 				      struct sk_buff *skb)
skb              1116 net/rxrpc/input.c 	_enter("%p,%p", conn, skb);
skb              1118 net/rxrpc/input.c 	skb_queue_tail(&conn->rx_queue, skb);
skb              1127 net/rxrpc/input.c 				       struct sk_buff *skb)
skb              1129 net/rxrpc/input.c 	_enter("%p,%p", local, skb);
skb              1132 net/rxrpc/input.c 		skb_queue_tail(&local->event_queue, skb);
skb              1135 net/rxrpc/input.c 		rxrpc_free_skb(skb, rxrpc_skb_freed);
skb              1142 net/rxrpc/input.c static void rxrpc_reject_packet(struct rxrpc_local *local, struct sk_buff *skb)
skb              1147 net/rxrpc/input.c 		skb_queue_tail(&local->reject_queue, skb);
skb              1150 net/rxrpc/input.c 		rxrpc_free_skb(skb, rxrpc_skb_freed);
skb              1158 net/rxrpc/input.c int rxrpc_extract_header(struct rxrpc_skb_priv *sp, struct sk_buff *skb)
skb              1163 net/rxrpc/input.c 	if (skb_copy_bits(skb, 0, &whdr, sizeof(whdr)) < 0) {
skb              1194 net/rxrpc/input.c int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb)
skb              1208 net/rxrpc/input.c 		kfree_skb(skb);
skb              1211 net/rxrpc/input.c 	if (skb->tstamp == 0)
skb              1212 net/rxrpc/input.c 		skb->tstamp = ktime_get_real();
skb              1214 net/rxrpc/input.c 	rxrpc_new_skb(skb, rxrpc_skb_received);
skb              1216 net/rxrpc/input.c 	skb_pull(skb, sizeof(struct udphdr));
skb              1221 net/rxrpc/input.c 	sp = rxrpc_skb(skb);
skb              1224 net/rxrpc/input.c 	if (rxrpc_extract_header(sp, skb) < 0)
skb              1231 net/rxrpc/input.c 			rxrpc_free_skb(skb, rxrpc_skb_lost);
skb              1236 net/rxrpc/input.c 	if (skb->tstamp == 0)
skb              1237 net/rxrpc/input.c 		skb->tstamp = ktime_get_real();
skb              1244 net/rxrpc/input.c 		rxrpc_post_packet_to_local(local, skb);
skb              1263 net/rxrpc/input.c 		if (!rxrpc_validate_data(skb))
skb              1270 net/rxrpc/input.c 			struct sk_buff *nskb = skb_unshare(skb, GFP_ATOMIC);
skb              1272 net/rxrpc/input.c 				rxrpc_eaten_skb(skb, rxrpc_skb_unshared_nomem);
skb              1276 net/rxrpc/input.c 			if (nskb != skb) {
skb              1277 net/rxrpc/input.c 				rxrpc_eaten_skb(skb, rxrpc_skb_received);
skb              1278 net/rxrpc/input.c 				skb = nskb;
skb              1279 net/rxrpc/input.c 				rxrpc_new_skb(skb, rxrpc_skb_unshared);
skb              1280 net/rxrpc/input.c 				sp = rxrpc_skb(skb);
skb              1323 net/rxrpc/input.c 	conn = rxrpc_find_connection_rcu(local, skb, &peer);
skb              1344 net/rxrpc/input.c 			rxrpc_post_packet_to_conn(conn, skb);
skb              1379 net/rxrpc/input.c 			rxrpc_post_packet_to_conn(conn, skb);
skb              1409 net/rxrpc/input.c 		call = rxrpc_new_incoming_call(local, rx, skb);
skb              1417 net/rxrpc/input.c 	rxrpc_input_call_packet(call, skb);
skb              1421 net/rxrpc/input.c 	rxrpc_free_skb(skb, rxrpc_skb_freed);
skb              1429 net/rxrpc/input.c 	skb->priority = RXKADINCONSISTENCY;
skb              1435 net/rxrpc/input.c 	skb->priority = RX_INVALID_OPERATION;
skb              1447 net/rxrpc/input.c 	skb->priority = RX_PROTOCOL_ERROR;
skb              1449 net/rxrpc/input.c 	skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
skb              1451 net/rxrpc/input.c 	trace_rxrpc_rx_done(skb->mark, skb->priority);
skb              1452 net/rxrpc/input.c 	rxrpc_reject_packet(local, skb);
skb                22 net/rxrpc/insecure.c 			      struct sk_buff *skb,
skb                29 net/rxrpc/insecure.c static int none_verify_packet(struct rxrpc_call *call, struct sk_buff *skb,
skb                40 net/rxrpc/insecure.c static void none_locate_data(struct rxrpc_call *call, struct sk_buff *skb,
skb                46 net/rxrpc/insecure.c 				     struct sk_buff *skb,
skb                49 net/rxrpc/insecure.c 	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
skb                57 net/rxrpc/insecure.c 				struct sk_buff *skb,
skb                60 net/rxrpc/insecure.c 	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
skb                26 net/rxrpc/local_event.c 				       struct sk_buff *skb)
skb                29 net/rxrpc/local_event.c 	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
skb                38 net/rxrpc/local_event.c 	if (rxrpc_extract_addr_from_skb(&srx, skb) < 0)
skb                84 net/rxrpc/local_event.c 	struct sk_buff *skb;
skb                89 net/rxrpc/local_event.c 	skb = skb_dequeue(&local->event_queue);
skb                90 net/rxrpc/local_event.c 	if (skb) {
skb                91 net/rxrpc/local_event.c 		struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
skb                93 net/rxrpc/local_event.c 		rxrpc_see_skb(skb, rxrpc_skb_seen);
skb                98 net/rxrpc/local_event.c 			if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header),
skb               103 net/rxrpc/local_event.c 				rxrpc_send_version_request(local, &sp->hdr, skb);
skb               111 net/rxrpc/local_event.c 		rxrpc_free_skb(skb, rxrpc_skb_freed);
skb               314 net/rxrpc/output.c int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
skb               319 net/rxrpc/output.c 	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
skb               326 net/rxrpc/output.c 	_enter(",{%d}", skb->len);
skb               349 net/rxrpc/output.c 	iov[1].iov_base = skb->head;
skb               350 net/rxrpc/output.c 	iov[1].iov_len = skb->len;
skb               399 net/rxrpc/output.c 	skb->tstamp = ktime_get_real();
skb               424 net/rxrpc/output.c 			call->peer->rtt_last_req = skb->tstamp;
skb               471 net/rxrpc/output.c 	skb->tstamp = ktime_get_real();
skb               514 net/rxrpc/output.c 	struct sk_buff *skb;
skb               535 net/rxrpc/output.c 	while ((skb = skb_dequeue(&local->reject_queue))) {
skb               536 net/rxrpc/output.c 		rxrpc_see_skb(skb, rxrpc_skb_seen);
skb               537 net/rxrpc/output.c 		sp = rxrpc_skb(skb);
skb               539 net/rxrpc/output.c 		switch (skb->mark) {
skb               547 net/rxrpc/output.c 			code = htonl(skb->priority);
skb               552 net/rxrpc/output.c 			rxrpc_free_skb(skb, rxrpc_skb_freed);
skb               556 net/rxrpc/output.c 		if (rxrpc_extract_addr_from_skb(&srx, skb) == 0) {
skb               577 net/rxrpc/output.c 		rxrpc_free_skb(skb, rxrpc_skb_freed);
skb                29 net/rxrpc/peer_event.c 						     const struct sk_buff *skb,
skb                32 net/rxrpc/peer_event.c 	struct sock_exterr_skb *serr = SKB_EXT_ERR(skb);
skb                53 net/rxrpc/peer_event.c 			       skb_network_header(skb) + serr->addr_offset,
skb                59 net/rxrpc/peer_event.c 			       skb_network_header(skb) + serr->addr_offset + 12,
skb                63 net/rxrpc/peer_event.c 			memcpy(&srx->transport.sin.sin_addr, &ip_hdr(skb)->saddr,
skb                76 net/rxrpc/peer_event.c 			       skb_network_header(skb) + serr->addr_offset,
skb                85 net/rxrpc/peer_event.c 			       skb_network_header(skb) + serr->addr_offset,
skb                90 net/rxrpc/peer_event.c 			       &ipv6_hdr(skb)->saddr,
skb               152 net/rxrpc/peer_event.c 	struct sk_buff *skb;
skb               167 net/rxrpc/peer_event.c 	skb = sock_dequeue_err_skb(sk);
skb               168 net/rxrpc/peer_event.c 	if (!skb) {
skb               173 net/rxrpc/peer_event.c 	rxrpc_new_skb(skb, rxrpc_skb_received);
skb               174 net/rxrpc/peer_event.c 	serr = SKB_EXT_ERR(skb);
skb               175 net/rxrpc/peer_event.c 	if (!skb->len && serr->ee.ee_origin == SO_EE_ORIGIN_TIMESTAMPING) {
skb               178 net/rxrpc/peer_event.c 		rxrpc_free_skb(skb, rxrpc_skb_freed);
skb               182 net/rxrpc/peer_event.c 	peer = rxrpc_lookup_peer_icmp_rcu(local, skb, &srx);
skb               187 net/rxrpc/peer_event.c 		rxrpc_free_skb(skb, rxrpc_skb_freed);
skb               199 net/rxrpc/peer_event.c 		rxrpc_free_skb(skb, rxrpc_skb_freed);
skb               207 net/rxrpc/peer_event.c 	rxrpc_free_skb(skb, rxrpc_skb_freed);
skb               177 net/rxrpc/recvmsg.c 	struct sk_buff *skb;
skb               192 net/rxrpc/recvmsg.c 	skb = call->rxtx_buffer[ix];
skb               193 net/rxrpc/recvmsg.c 	rxrpc_see_skb(skb, rxrpc_skb_rotated);
skb               194 net/rxrpc/recvmsg.c 	sp = rxrpc_skb(skb);
skb               208 net/rxrpc/recvmsg.c 	rxrpc_free_skb(skb, rxrpc_skb_freed);
skb               232 net/rxrpc/recvmsg.c static int rxrpc_verify_packet(struct rxrpc_call *call, struct sk_buff *skb,
skb               236 net/rxrpc/recvmsg.c 	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
skb               248 net/rxrpc/recvmsg.c 		if (skb_copy_bits(skb, offset - 2, &tmp, 2) < 0)
skb               254 net/rxrpc/recvmsg.c 	return call->security->verify_packet(call, skb, offset, len,
skb               268 net/rxrpc/recvmsg.c static int rxrpc_locate_data(struct rxrpc_call *call, struct sk_buff *skb,
skb               273 net/rxrpc/recvmsg.c 	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
skb               283 net/rxrpc/recvmsg.c 	len = skb->len - offset;
skb               290 net/rxrpc/recvmsg.c 		ret = rxrpc_verify_packet(call, skb, annotation, offset, len);
skb               299 net/rxrpc/recvmsg.c 	call->security->locate_data(call, skb, _offset, _len);
skb               313 net/rxrpc/recvmsg.c 	struct sk_buff *skb;
skb               343 net/rxrpc/recvmsg.c 		skb = call->rxtx_buffer[ix];
skb               344 net/rxrpc/recvmsg.c 		if (!skb) {
skb               350 net/rxrpc/recvmsg.c 		rxrpc_see_skb(skb, rxrpc_skb_seen);
skb               351 net/rxrpc/recvmsg.c 		sp = rxrpc_skb(skb);
skb               361 net/rxrpc/recvmsg.c 			sock_recv_timestamp(msg, sock->sk, skb);
skb               364 net/rxrpc/recvmsg.c 			ret2 = rxrpc_locate_data(call, skb,
skb               385 net/rxrpc/recvmsg.c 			ret2 = skb_copy_datagram_iter(skb, rx_pkt_offset, iter,
skb               743 net/rxrpc/recvmsg.c 	struct sk_buff *skb;
skb               761 net/rxrpc/recvmsg.c 	skb = call->rxtx_buffer[seq & RXRPC_RXTX_BUFF_MASK];
skb               762 net/rxrpc/recvmsg.c 	if (!skb)
skb               765 net/rxrpc/recvmsg.c 	*_ts = skb_get_ktime(skb);
skb               179 net/rxrpc/rxkad.c 				    struct sk_buff *skb,
skb               184 net/rxrpc/rxkad.c 	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
skb               216 net/rxrpc/rxkad.c 				       struct sk_buff *skb,
skb               230 net/rxrpc/rxkad.c 	sp = rxrpc_skb(skb);
skb               252 net/rxrpc/rxkad.c 	if (skb_shinfo(skb)->nr_frags > 16)
skb               259 net/rxrpc/rxkad.c 	err = skb_to_sgvec(skb, sg, 0, len);
skb               277 net/rxrpc/rxkad.c 			       struct sk_buff *skb,
skb               288 net/rxrpc/rxkad.c 	sp = rxrpc_skb(skb);
skb               332 net/rxrpc/rxkad.c 		ret = rxkad_secure_packet_auth(call, skb, data_size, sechdr,
skb               336 net/rxrpc/rxkad.c 		ret = rxkad_secure_packet_encrypt(call, skb, data_size,
skb               351 net/rxrpc/rxkad.c static int rxkad_verify_packet_1(struct rxrpc_call *call, struct sk_buff *skb,
skb               367 net/rxrpc/rxkad.c 		aborted = rxrpc_abort_eproto(call, skb, "rxkad_1_hdr", "V1H",
skb               376 net/rxrpc/rxkad.c 	ret = skb_to_sgvec(skb, sg, offset, 8);
skb               390 net/rxrpc/rxkad.c 	if (skb_copy_bits(skb, offset, &sechdr, sizeof(sechdr)) < 0) {
skb               391 net/rxrpc/rxkad.c 		aborted = rxrpc_abort_eproto(call, skb, "rxkad_1_len", "XV1",
skb               405 net/rxrpc/rxkad.c 		aborted = rxrpc_abort_eproto(call, skb, "rxkad_1_check", "V1C",
skb               411 net/rxrpc/rxkad.c 		aborted = rxrpc_abort_eproto(call, skb, "rxkad_1_datalen", "V1L",
skb               428 net/rxrpc/rxkad.c static int rxkad_verify_packet_2(struct rxrpc_call *call, struct sk_buff *skb,
skb               442 net/rxrpc/rxkad.c 	_enter(",{%d}", skb->len);
skb               445 net/rxrpc/rxkad.c 		aborted = rxrpc_abort_eproto(call, skb, "rxkad_2_hdr", "V2H",
skb               454 net/rxrpc/rxkad.c 	nsg = skb_shinfo(skb)->nr_frags;
skb               464 net/rxrpc/rxkad.c 	ret = skb_to_sgvec(skb, sg, offset, len);
skb               484 net/rxrpc/rxkad.c 	if (skb_copy_bits(skb, offset, &sechdr, sizeof(sechdr)) < 0) {
skb               485 net/rxrpc/rxkad.c 		aborted = rxrpc_abort_eproto(call, skb, "rxkad_2_len", "XV2",
skb               499 net/rxrpc/rxkad.c 		aborted = rxrpc_abort_eproto(call, skb, "rxkad_2_check", "V2C",
skb               505 net/rxrpc/rxkad.c 		aborted = rxrpc_abort_eproto(call, skb, "rxkad_2_datalen", "V2L",
skb               527 net/rxrpc/rxkad.c static int rxkad_verify_packet(struct rxrpc_call *call, struct sk_buff *skb,
skb               570 net/rxrpc/rxkad.c 		aborted = rxrpc_abort_eproto(call, skb, "rxkad_csum", "VCK",
skb               579 net/rxrpc/rxkad.c 		return rxkad_verify_packet_1(call, skb, offset, len, seq, req);
skb               581 net/rxrpc/rxkad.c 		return rxkad_verify_packet_2(call, skb, offset, len, seq, req);
skb               595 net/rxrpc/rxkad.c static void rxkad_locate_data_1(struct rxrpc_call *call, struct sk_buff *skb,
skb               600 net/rxrpc/rxkad.c 	if (skb_copy_bits(skb, *_offset, &sechdr, sizeof(sechdr)) < 0)
skb               609 net/rxrpc/rxkad.c static void rxkad_locate_data_2(struct rxrpc_call *call, struct sk_buff *skb,
skb               614 net/rxrpc/rxkad.c 	if (skb_copy_bits(skb, *_offset, &sechdr, sizeof(sechdr)) < 0)
skb               623 net/rxrpc/rxkad.c static void rxkad_locate_data(struct rxrpc_call *call, struct sk_buff *skb,
skb               628 net/rxrpc/rxkad.c 		rxkad_locate_data_1(call, skb, _offset, _len);
skb               631 net/rxrpc/rxkad.c 		rxkad_locate_data_2(call, skb, _offset, _len);
skb               809 net/rxrpc/rxkad.c 				      struct sk_buff *skb,
skb               815 net/rxrpc/rxkad.c 	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
skb               834 net/rxrpc/rxkad.c 	if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header),
skb               895 net/rxrpc/rxkad.c 				struct sk_buff *skb,
skb               902 net/rxrpc/rxkad.c 	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
skb              1087 net/rxrpc/rxkad.c 				 struct sk_buff *skb,
skb              1091 net/rxrpc/rxkad.c 	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
skb              1109 net/rxrpc/rxkad.c 	if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header),
skb              1112 net/rxrpc/rxkad.c 	if (!pskb_pull(skb, sizeof(*response)))
skb              1144 net/rxrpc/rxkad.c 	if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header),
skb              1148 net/rxrpc/rxkad.c 	ret = rxkad_decrypt_ticket(conn, skb, ticket, ticket_len, &session_key,
skb               109 net/rxrpc/security.c 				   struct sk_buff *skb)
skb               112 net/rxrpc/security.c 	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
skb               125 net/rxrpc/security.c 		skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
skb               126 net/rxrpc/security.c 		skb->priority = RX_INVALID_OPERATION;
skb               137 net/rxrpc/security.c 		skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
skb               138 net/rxrpc/security.c 		skb->priority = RX_INVALID_OPERATION;
skb               149 net/rxrpc/security.c 		skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
skb               150 net/rxrpc/security.c 		skb->priority = sec->no_key_abort;
skb               195 net/rxrpc/sendmsg.c 			      struct sk_buff *skb, bool last,
skb               198 net/rxrpc/sendmsg.c 	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
skb               204 net/rxrpc/sendmsg.c 	_net("queue skb %p [%d]", skb, seq);
skb               214 net/rxrpc/sendmsg.c 	skb->tstamp = ktime_get_real();
skb               217 net/rxrpc/sendmsg.c 	rxrpc_get_skb(skb, rxrpc_skb_got);
skb               220 net/rxrpc/sendmsg.c 	call->rxtx_buffer[ix] = skb;
skb               258 net/rxrpc/sendmsg.c 	ret = rxrpc_send_data_packet(call, skb, false);
skb               282 net/rxrpc/sendmsg.c 	rxrpc_free_skb(skb, rxrpc_skb_freed);
skb               298 net/rxrpc/sendmsg.c 	struct sk_buff *skb;
skb               321 net/rxrpc/sendmsg.c 	skb = call->tx_pending;
skb               323 net/rxrpc/sendmsg.c 	rxrpc_see_skb(skb, rxrpc_skb_seen);
skb               331 net/rxrpc/sendmsg.c 		if (!skb) {
skb               363 net/rxrpc/sendmsg.c 			skb = sock_alloc_send_skb(
skb               365 net/rxrpc/sendmsg.c 			if (!skb)
skb               368 net/rxrpc/sendmsg.c 			sp = rxrpc_skb(skb);
skb               370 net/rxrpc/sendmsg.c 			rxrpc_new_skb(skb, rxrpc_skb_new);
skb               372 net/rxrpc/sendmsg.c 			_debug("ALLOC SEND %p", skb);
skb               374 net/rxrpc/sendmsg.c 			ASSERTCMP(skb->mark, ==, 0);
skb               377 net/rxrpc/sendmsg.c 			skb_reserve(skb, call->conn->security_size);
skb               378 net/rxrpc/sendmsg.c 			skb->len += call->conn->security_size;
skb               381 net/rxrpc/sendmsg.c 			if (sp->remain > skb_tailroom(skb))
skb               382 net/rxrpc/sendmsg.c 				sp->remain = skb_tailroom(skb);
skb               385 net/rxrpc/sendmsg.c 			       skb_headroom(skb),
skb               386 net/rxrpc/sendmsg.c 			       skb_tailroom(skb),
skb               387 net/rxrpc/sendmsg.c 			       skb_headlen(skb),
skb               390 net/rxrpc/sendmsg.c 			skb->ip_summed = CHECKSUM_UNNECESSARY;
skb               394 net/rxrpc/sendmsg.c 		sp = rxrpc_skb(skb);
skb               398 net/rxrpc/sendmsg.c 			int copy = skb_tailroom(skb);
skb               406 net/rxrpc/sendmsg.c 			ret = skb_add_data(skb, &msg->msg_iter, copy);
skb               411 net/rxrpc/sendmsg.c 			skb->mark += copy;
skb               431 net/rxrpc/sendmsg.c 				pad = conn->security_size + skb->mark;
skb               436 net/rxrpc/sendmsg.c 					skb_put_zero(skb, pad);
skb               452 net/rxrpc/sendmsg.c 				call, skb, skb->mark, skb->head);
skb               456 net/rxrpc/sendmsg.c 			ret = rxrpc_queue_packet(rx, call, skb,
skb               460 net/rxrpc/sendmsg.c 			skb = NULL;
skb               467 net/rxrpc/sendmsg.c 	call->tx_pending = skb;
skb               472 net/rxrpc/sendmsg.c 	rxrpc_free_skb(skb, rxrpc_skb_freed);
skb                17 net/rxrpc/skbuff.c #define is_tx_skb(skb) (rxrpc_skb(skb)->rx_flags & RXRPC_SKB_TX_BUFFER)
skb                18 net/rxrpc/skbuff.c #define select_skb_count(skb) (is_tx_skb(skb) ? &rxrpc_n_tx_skbs : &rxrpc_n_rx_skbs)
skb                23 net/rxrpc/skbuff.c void rxrpc_new_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
skb                26 net/rxrpc/skbuff.c 	int n = atomic_inc_return(select_skb_count(skb));
skb                27 net/rxrpc/skbuff.c 	trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n,
skb                28 net/rxrpc/skbuff.c 			rxrpc_skb(skb)->rx_flags, here);
skb                34 net/rxrpc/skbuff.c void rxrpc_see_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
skb                37 net/rxrpc/skbuff.c 	if (skb) {
skb                38 net/rxrpc/skbuff.c 		int n = atomic_read(select_skb_count(skb));
skb                39 net/rxrpc/skbuff.c 		trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n,
skb                40 net/rxrpc/skbuff.c 				rxrpc_skb(skb)->rx_flags, here);
skb                47 net/rxrpc/skbuff.c void rxrpc_get_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
skb                50 net/rxrpc/skbuff.c 	int n = atomic_inc_return(select_skb_count(skb));
skb                51 net/rxrpc/skbuff.c 	trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n,
skb                52 net/rxrpc/skbuff.c 			rxrpc_skb(skb)->rx_flags, here);
skb                53 net/rxrpc/skbuff.c 	skb_get(skb);
skb                59 net/rxrpc/skbuff.c void rxrpc_eaten_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
skb                63 net/rxrpc/skbuff.c 	trace_rxrpc_skb(skb, op, 0, n, 0, here);
skb                69 net/rxrpc/skbuff.c void rxrpc_free_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
skb                72 net/rxrpc/skbuff.c 	if (skb) {
skb                74 net/rxrpc/skbuff.c 		CHECK_SLAB_OKAY(&skb->users);
skb                75 net/rxrpc/skbuff.c 		n = atomic_dec_return(select_skb_count(skb));
skb                76 net/rxrpc/skbuff.c 		trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n,
skb                77 net/rxrpc/skbuff.c 				rxrpc_skb(skb)->rx_flags, here);
skb                78 net/rxrpc/skbuff.c 		kfree_skb(skb);
skb                88 net/rxrpc/skbuff.c 	struct sk_buff *skb;
skb                89 net/rxrpc/skbuff.c 	while ((skb = skb_dequeue((list))) != NULL) {
skb                90 net/rxrpc/skbuff.c 		int n = atomic_dec_return(select_skb_count(skb));
skb                91 net/rxrpc/skbuff.c 		trace_rxrpc_skb(skb, rxrpc_skb_purged,
skb                92 net/rxrpc/skbuff.c 				refcount_read(&skb->users), n,
skb                93 net/rxrpc/skbuff.c 				rxrpc_skb(skb)->rx_flags, here);
skb                94 net/rxrpc/skbuff.c 		kfree_skb(skb);
skb                16 net/rxrpc/utils.c int rxrpc_extract_addr_from_skb(struct sockaddr_rxrpc *srx, struct sk_buff *skb)
skb                20 net/rxrpc/utils.c 	switch (ntohs(skb->protocol)) {
skb                25 net/rxrpc/utils.c 		srx->transport.sin.sin_port = udp_hdr(skb)->source;
skb                26 net/rxrpc/utils.c 		srx->transport.sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
skb                34 net/rxrpc/utils.c 		srx->transport.sin6.sin6_port = udp_hdr(skb)->source;
skb                35 net/rxrpc/utils.c 		srx->transport.sin6.sin6_addr = ipv6_hdr(skb)->saddr;
skb                41 net/rxrpc/utils.c 				    ntohs(skb->protocol));
skb               214 net/sched/act_api.c static int tcf_dump_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb,
skb               240 net/sched/act_api.c 		nest = nla_nest_start_noflag(skb, n_i);
skb               245 net/sched/act_api.c 		err = tcf_action_dump_1(skb, p, 0, 0);
skb               248 net/sched/act_api.c 			nlmsg_trim(skb, nest);
skb               251 net/sched/act_api.c 		nla_nest_end(skb, nest);
skb               269 net/sched/act_api.c 	nla_nest_cancel(skb, nest);
skb               287 net/sched/act_api.c static int tcf_del_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb,
skb               298 net/sched/act_api.c 	nest = nla_nest_start_noflag(skb, 0);
skb               301 net/sched/act_api.c 	if (nla_put_string(skb, TCA_KIND, ops->kind))
skb               317 net/sched/act_api.c 	if (nla_put_u32(skb, TCA_FCNT, n_i))
skb               319 net/sched/act_api.c 	nla_nest_end(skb, nest);
skb               323 net/sched/act_api.c 	nla_nest_cancel(skb, nest);
skb               327 net/sched/act_api.c int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb,
skb               335 net/sched/act_api.c 		return tcf_del_walker(idrinfo, skb, ops);
skb               337 net/sched/act_api.c 		return tcf_dump_walker(idrinfo, skb, cb);
skb               647 net/sched/act_api.c int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions,
skb               655 net/sched/act_api.c 	if (skb_skip_tc_classify(skb))
skb               667 net/sched/act_api.c 		ret = a->ops->act(skb, a, res);
skb               748 net/sched/act_api.c tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
skb               750 net/sched/act_api.c 	return a->ops->dump(skb, a, bind, ref);
skb               754 net/sched/act_api.c tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
skb               757 net/sched/act_api.c 	unsigned char *b = skb_tail_pointer(skb);
skb               761 net/sched/act_api.c 	if (nla_put_string(skb, TCA_KIND, a->ops->kind))
skb               763 net/sched/act_api.c 	if (tcf_action_copy_stats(skb, a, 0))
skb               769 net/sched/act_api.c 		if (nla_put(skb, TCA_ACT_COOKIE, cookie->len, cookie->data)) {
skb               776 net/sched/act_api.c 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
skb               779 net/sched/act_api.c 	err = tcf_action_dump_old(skb, a, bind, ref);
skb               781 net/sched/act_api.c 		nla_nest_end(skb, nest);
skb               786 net/sched/act_api.c 	nlmsg_trim(skb, b);
skb               791 net/sched/act_api.c int tcf_action_dump(struct sk_buff *skb, struct tc_action *actions[],
skb               800 net/sched/act_api.c 		nest = nla_nest_start_noflag(skb, i + 1);
skb               803 net/sched/act_api.c 		err = tcf_action_dump_1(skb, a, bind, ref);
skb               806 net/sched/act_api.c 		nla_nest_end(skb, nest);
skb               814 net/sched/act_api.c 	nla_nest_cancel(skb, nest);
skb               992 net/sched/act_api.c int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *p,
skb              1006 net/sched/act_api.c 			err = gnet_stats_start_copy_compat(skb, 0,
skb              1014 net/sched/act_api.c 		err = gnet_stats_start_copy(skb, TCA_ACT_STATS,
skb              1038 net/sched/act_api.c static int tca_get_fill(struct sk_buff *skb, struct tc_action *actions[],
skb              1044 net/sched/act_api.c 	unsigned char *b = skb_tail_pointer(skb);
skb              1047 net/sched/act_api.c 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*t), flags);
skb              1055 net/sched/act_api.c 	nest = nla_nest_start_noflag(skb, TCA_ACT_TAB);
skb              1059 net/sched/act_api.c 	if (tcf_action_dump(skb, actions, bind, ref) < 0)
skb              1062 net/sched/act_api.c 	nla_nest_end(skb, nest);
skb              1064 net/sched/act_api.c 	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
skb              1065 net/sched/act_api.c 	return skb->len;
skb              1068 net/sched/act_api.c 	nlmsg_trim(skb, b);
skb              1077 net/sched/act_api.c 	struct sk_buff *skb;
skb              1079 net/sched/act_api.c 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
skb              1080 net/sched/act_api.c 	if (!skb)
skb              1082 net/sched/act_api.c 	if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, event,
skb              1085 net/sched/act_api.c 		kfree_skb(skb);
skb              1089 net/sched/act_api.c 	return rtnl_unicast(skb, net, portid);
skb              1140 net/sched/act_api.c 	struct sk_buff *skb;
skb              1151 net/sched/act_api.c 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
skb              1152 net/sched/act_api.c 	if (!skb)
skb              1155 net/sched/act_api.c 	b = skb_tail_pointer(skb);
skb              1170 net/sched/act_api.c 	nlh = nlmsg_put(skb, portid, n->nlmsg_seq, RTM_DELACTION,
skb              1181 net/sched/act_api.c 	nest = nla_nest_start_noflag(skb, TCA_ACT_TAB);
skb              1187 net/sched/act_api.c 	err = ops->walk(net, skb, &dcb, RTM_DELACTION, ops, extack);
skb              1189 net/sched/act_api.c 		nla_nest_cancel(skb, nest);
skb              1193 net/sched/act_api.c 	nla_nest_end(skb, nest);
skb              1195 net/sched/act_api.c 	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
skb              1198 net/sched/act_api.c 	err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
skb              1210 net/sched/act_api.c 	kfree_skb(skb);
skb              1248 net/sched/act_api.c 	struct sk_buff *skb;
skb              1250 net/sched/act_api.c 	skb = alloc_skb(attr_size <= NLMSG_GOODSIZE ? NLMSG_GOODSIZE : attr_size,
skb              1252 net/sched/act_api.c 	if (!skb)
skb              1255 net/sched/act_api.c 	if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, RTM_DELACTION,
skb              1258 net/sched/act_api.c 		kfree_skb(skb);
skb              1266 net/sched/act_api.c 		kfree_skb(skb);
skb              1270 net/sched/act_api.c 	ret = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
skb              1329 net/sched/act_api.c 	struct sk_buff *skb;
skb              1332 net/sched/act_api.c 	skb = alloc_skb(attr_size <= NLMSG_GOODSIZE ? NLMSG_GOODSIZE : attr_size,
skb              1334 net/sched/act_api.c 	if (!skb)
skb              1337 net/sched/act_api.c 	if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, n->nlmsg_flags,
skb              1340 net/sched/act_api.c 		kfree_skb(skb);
skb              1344 net/sched/act_api.c 	err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
skb              1382 net/sched/act_api.c static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n,
skb              1385 net/sched/act_api.c 	struct net *net = sock_net(skb->sk);
skb              1387 net/sched/act_api.c 	u32 portid = skb ? NETLINK_CB(skb).portid : 0;
skb              1391 net/sched/act_api.c 	    !netlink_capable(skb, CAP_NET_ADMIN))
skb              1455 net/sched/act_api.c static int tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
skb              1457 net/sched/act_api.c 	struct net *net = sock_net(skb->sk);
skb              1459 net/sched/act_api.c 	unsigned char *b = skb_tail_pointer(skb);
skb              1497 net/sched/act_api.c 	nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
skb              1510 net/sched/act_api.c 	count_attr = nla_reserve(skb, TCA_ROOT_COUNT, sizeof(u32));
skb              1514 net/sched/act_api.c 	nest = nla_nest_start_noflag(skb, TCA_ACT_TAB);
skb              1518 net/sched/act_api.c 	ret = a_o->walk(net, skb, cb, RTM_GETACTION, a_o, NULL);
skb              1523 net/sched/act_api.c 		nla_nest_end(skb, nest);
skb              1524 net/sched/act_api.c 		ret = skb->len;
skb              1529 net/sched/act_api.c 		nlmsg_trim(skb, b);
skb              1531 net/sched/act_api.c 	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
skb              1532 net/sched/act_api.c 	if (NETLINK_CB(cb->skb).portid && ret)
skb              1535 net/sched/act_api.c 	return skb->len;
skb              1539 net/sched/act_api.c 	nlmsg_trim(skb, b);
skb              1540 net/sched/act_api.c 	return skb->len;
skb                34 net/sched/act_bpf.c static int tcf_bpf_act(struct sk_buff *skb, const struct tc_action *act,
skb                37 net/sched/act_bpf.c 	bool at_ingress = skb_at_tc_ingress(skb);
skb                43 net/sched/act_bpf.c 	bstats_cpu_update(this_cpu_ptr(prog->common.cpu_bstats), skb);
skb                48 net/sched/act_bpf.c 		__skb_push(skb, skb->mac_len);
skb                49 net/sched/act_bpf.c 		bpf_compute_data_pointers(skb);
skb                50 net/sched/act_bpf.c 		filter_res = BPF_PROG_RUN(filter, skb);
skb                51 net/sched/act_bpf.c 		__skb_pull(skb, skb->mac_len);
skb                53 net/sched/act_bpf.c 		bpf_compute_data_pointers(skb);
skb                54 net/sched/act_bpf.c 		filter_res = BPF_PROG_RUN(filter, skb);
skb                96 net/sched/act_bpf.c 				 struct sk_buff *skb)
skb               100 net/sched/act_bpf.c 	if (nla_put_u16(skb, TCA_ACT_BPF_OPS_LEN, prog->bpf_num_ops))
skb               103 net/sched/act_bpf.c 	nla = nla_reserve(skb, TCA_ACT_BPF_OPS, prog->bpf_num_ops *
skb               114 net/sched/act_bpf.c 				  struct sk_buff *skb)
skb               119 net/sched/act_bpf.c 	    nla_put_string(skb, TCA_ACT_BPF_NAME, prog->bpf_name))
skb               122 net/sched/act_bpf.c 	if (nla_put_u32(skb, TCA_ACT_BPF_ID, prog->filter->aux->id))
skb               125 net/sched/act_bpf.c 	nla = nla_reserve(skb, TCA_ACT_BPF_TAG, sizeof(prog->filter->tag));
skb               134 net/sched/act_bpf.c static int tcf_bpf_dump(struct sk_buff *skb, struct tc_action *act,
skb               137 net/sched/act_bpf.c 	unsigned char *tp = skb_tail_pointer(skb);
skb               149 net/sched/act_bpf.c 	if (nla_put(skb, TCA_ACT_BPF_PARMS, sizeof(opt), &opt))
skb               153 net/sched/act_bpf.c 		ret = tcf_bpf_dump_ebpf_info(prog, skb);
skb               155 net/sched/act_bpf.c 		ret = tcf_bpf_dump_bpf_info(prog, skb);
skb               160 net/sched/act_bpf.c 	if (nla_put_64bit(skb, TCA_ACT_BPF_TM, sizeof(tm), &tm,
skb               165 net/sched/act_bpf.c 	return skb->len;
skb               169 net/sched/act_bpf.c 	nlmsg_trim(skb, tp);
skb               391 net/sched/act_bpf.c static int tcf_bpf_walker(struct net *net, struct sk_buff *skb,
skb               398 net/sched/act_bpf.c 	return tcf_generic_walker(tn, skb, cb, type, ops, extack);
skb                31 net/sched/act_connmark.c static int tcf_connmark_act(struct sk_buff *skb, const struct tc_action *a,
skb                44 net/sched/act_connmark.c 	bstats_update(&ca->tcf_bstats, skb);
skb                46 net/sched/act_connmark.c 	if (skb->protocol == htons(ETH_P_IP)) {
skb                47 net/sched/act_connmark.c 		if (skb->len < sizeof(struct iphdr))
skb                51 net/sched/act_connmark.c 	} else if (skb->protocol == htons(ETH_P_IPV6)) {
skb                52 net/sched/act_connmark.c 		if (skb->len < sizeof(struct ipv6hdr))
skb                60 net/sched/act_connmark.c 	c = nf_ct_get(skb, &ctinfo);
skb                62 net/sched/act_connmark.c 		skb->mark = c->mark;
skb                68 net/sched/act_connmark.c 	if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb),
skb                82 net/sched/act_connmark.c 	skb->mark = c->mark;
skb               169 net/sched/act_connmark.c static inline int tcf_connmark_dump(struct sk_buff *skb, struct tc_action *a,
skb               172 net/sched/act_connmark.c 	unsigned char *b = skb_tail_pointer(skb);
skb               184 net/sched/act_connmark.c 	if (nla_put(skb, TCA_CONNMARK_PARMS, sizeof(opt), &opt))
skb               188 net/sched/act_connmark.c 	if (nla_put_64bit(skb, TCA_CONNMARK_TM, sizeof(t), &t,
skb               193 net/sched/act_connmark.c 	return skb->len;
skb               197 net/sched/act_connmark.c 	nlmsg_trim(skb, b);
skb               201 net/sched/act_connmark.c static int tcf_connmark_walker(struct net *net, struct sk_buff *skb,
skb               208 net/sched/act_connmark.c 	return tcf_generic_walker(tn, skb, cb, type, ops, extack);
skb               135 net/sched/act_csum.c static void *tcf_csum_skb_nextlayer(struct sk_buff *skb,
skb               139 net/sched/act_csum.c 	int ntkoff = skb_network_offset(skb);
skb               142 net/sched/act_csum.c 	if (!pskb_may_pull(skb, ipl + ntkoff) || (ipl < hl) ||
skb               143 net/sched/act_csum.c 	    skb_try_make_writable(skb, hl + ntkoff))
skb               146 net/sched/act_csum.c 		return (void *)(skb_network_header(skb) + ihl);
skb               149 net/sched/act_csum.c static int tcf_csum_ipv4_icmp(struct sk_buff *skb, unsigned int ihl,
skb               154 net/sched/act_csum.c 	icmph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*icmph));
skb               159 net/sched/act_csum.c 	skb->csum = csum_partial(icmph, ipl - ihl, 0);
skb               160 net/sched/act_csum.c 	icmph->checksum = csum_fold(skb->csum);
skb               162 net/sched/act_csum.c 	skb->ip_summed = CHECKSUM_NONE;
skb               167 net/sched/act_csum.c static int tcf_csum_ipv4_igmp(struct sk_buff *skb,
skb               172 net/sched/act_csum.c 	igmph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*igmph));
skb               177 net/sched/act_csum.c 	skb->csum = csum_partial(igmph, ipl - ihl, 0);
skb               178 net/sched/act_csum.c 	igmph->csum = csum_fold(skb->csum);
skb               180 net/sched/act_csum.c 	skb->ip_summed = CHECKSUM_NONE;
skb               185 net/sched/act_csum.c static int tcf_csum_ipv6_icmp(struct sk_buff *skb, unsigned int ihl,
skb               191 net/sched/act_csum.c 	icmp6h = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*icmp6h));
skb               195 net/sched/act_csum.c 	ip6h = ipv6_hdr(skb);
skb               197 net/sched/act_csum.c 	skb->csum = csum_partial(icmp6h, ipl - ihl, 0);
skb               200 net/sched/act_csum.c 					      skb->csum);
skb               202 net/sched/act_csum.c 	skb->ip_summed = CHECKSUM_NONE;
skb               207 net/sched/act_csum.c static int tcf_csum_ipv4_tcp(struct sk_buff *skb, unsigned int ihl,
skb               213 net/sched/act_csum.c 	if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
skb               216 net/sched/act_csum.c 	tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph));
skb               220 net/sched/act_csum.c 	iph = ip_hdr(skb);
skb               222 net/sched/act_csum.c 	skb->csum = csum_partial(tcph, ipl - ihl, 0);
skb               224 net/sched/act_csum.c 				   iph->saddr, iph->daddr, skb->csum);
skb               226 net/sched/act_csum.c 	skb->ip_summed = CHECKSUM_NONE;
skb               231 net/sched/act_csum.c static int tcf_csum_ipv6_tcp(struct sk_buff *skb, unsigned int ihl,
skb               237 net/sched/act_csum.c 	if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
skb               240 net/sched/act_csum.c 	tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph));
skb               244 net/sched/act_csum.c 	ip6h = ipv6_hdr(skb);
skb               246 net/sched/act_csum.c 	skb->csum = csum_partial(tcph, ipl - ihl, 0);
skb               249 net/sched/act_csum.c 				      skb->csum);
skb               251 net/sched/act_csum.c 	skb->ip_summed = CHECKSUM_NONE;
skb               256 net/sched/act_csum.c static int tcf_csum_ipv4_udp(struct sk_buff *skb, unsigned int ihl,
skb               263 net/sched/act_csum.c 	if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
skb               273 net/sched/act_csum.c 	udph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*udph));
skb               277 net/sched/act_csum.c 	iph = ip_hdr(skb);
skb               286 net/sched/act_csum.c 				skb->csum = csum_partial(udph, ipl - ihl, 0);
skb               288 net/sched/act_csum.c 				skb->csum = csum_partial(udph, ul, 0);
skb               295 net/sched/act_csum.c 			skb->csum = csum_partial(udph, ul, 0);
skb               300 net/sched/act_csum.c 						skb->csum);
skb               306 net/sched/act_csum.c 	skb->ip_summed = CHECKSUM_NONE;
skb               312 net/sched/act_csum.c static int tcf_csum_ipv6_udp(struct sk_buff *skb, unsigned int ihl,
skb               319 net/sched/act_csum.c 	if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
skb               329 net/sched/act_csum.c 	udph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*udph));
skb               333 net/sched/act_csum.c 	ip6h = ipv6_hdr(skb);
skb               340 net/sched/act_csum.c 			skb->csum = csum_partial(udph, ipl - ihl, 0);
skb               343 net/sched/act_csum.c 			skb->csum = csum_partial(udph, ul, 0);
skb               351 net/sched/act_csum.c 		skb->csum = csum_partial(udph, ul, 0);
skb               356 net/sched/act_csum.c 				      skb->csum);
skb               361 net/sched/act_csum.c 	skb->ip_summed = CHECKSUM_NONE;
skb               367 net/sched/act_csum.c static int tcf_csum_sctp(struct sk_buff *skb, unsigned int ihl,
skb               372 net/sched/act_csum.c 	if (skb_is_gso(skb) && skb_is_gso_sctp(skb))
skb               375 net/sched/act_csum.c 	sctph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*sctph));
skb               379 net/sched/act_csum.c 	sctph->checksum = sctp_compute_cksum(skb,
skb               380 net/sched/act_csum.c 					     skb_network_offset(skb) + ihl);
skb               381 net/sched/act_csum.c 	skb->ip_summed = CHECKSUM_NONE;
skb               382 net/sched/act_csum.c 	skb->csum_not_inet = 0;
skb               387 net/sched/act_csum.c static int tcf_csum_ipv4(struct sk_buff *skb, u32 update_flags)
skb               392 net/sched/act_csum.c 	ntkoff = skb_network_offset(skb);
skb               394 net/sched/act_csum.c 	if (!pskb_may_pull(skb, sizeof(*iph) + ntkoff))
skb               397 net/sched/act_csum.c 	iph = ip_hdr(skb);
skb               402 net/sched/act_csum.c 			if (!tcf_csum_ipv4_icmp(skb, iph->ihl * 4,
skb               408 net/sched/act_csum.c 			if (!tcf_csum_ipv4_igmp(skb, iph->ihl * 4,
skb               414 net/sched/act_csum.c 			if (!tcf_csum_ipv4_tcp(skb, iph->ihl * 4,
skb               420 net/sched/act_csum.c 			if (!tcf_csum_ipv4_udp(skb, iph->ihl * 4,
skb               426 net/sched/act_csum.c 			if (!tcf_csum_ipv4_udp(skb, iph->ihl * 4,
skb               432 net/sched/act_csum.c 		    !tcf_csum_sctp(skb, iph->ihl * 4, ntohs(iph->tot_len)))
skb               438 net/sched/act_csum.c 		if (skb_try_make_writable(skb, sizeof(*iph) + ntkoff))
skb               441 net/sched/act_csum.c 		ip_send_check(ip_hdr(skb));
skb               486 net/sched/act_csum.c static int tcf_csum_ipv6(struct sk_buff *skb, u32 update_flags)
skb               495 net/sched/act_csum.c 	ntkoff = skb_network_offset(skb);
skb               499 net/sched/act_csum.c 	if (!pskb_may_pull(skb, hl + ntkoff))
skb               502 net/sched/act_csum.c 	ip6h = ipv6_hdr(skb);
skb               514 net/sched/act_csum.c 			if (!pskb_may_pull(skb, hl + sizeof(*ip6xh) + ntkoff))
skb               516 net/sched/act_csum.c 			ip6xh = (void *)(skb_network_header(skb) + hl);
skb               518 net/sched/act_csum.c 			if (!pskb_may_pull(skb, hl + ixhl + ntkoff))
skb               520 net/sched/act_csum.c 			ip6xh = (void *)(skb_network_header(skb) + hl);
skb               529 net/sched/act_csum.c 				if (!tcf_csum_ipv6_icmp(skb,
skb               535 net/sched/act_csum.c 				if (!tcf_csum_ipv6_tcp(skb,
skb               541 net/sched/act_csum.c 				if (!tcf_csum_ipv6_udp(skb, hl,
skb               547 net/sched/act_csum.c 				if (!tcf_csum_ipv6_udp(skb, hl,
skb               553 net/sched/act_csum.c 			    !tcf_csum_sctp(skb, hl, pl + sizeof(*ip6h)))
skb               559 net/sched/act_csum.c 	} while (pskb_may_pull(skb, hl + 1 + ntkoff));
skb               569 net/sched/act_csum.c static int tcf_csum_act(struct sk_buff *skb, const struct tc_action *a,
skb               583 net/sched/act_csum.c 	bstats_cpu_update(this_cpu_ptr(p->common.cpu_bstats), skb);
skb               590 net/sched/act_csum.c 	protocol = tc_skb_protocol(skb);
skb               594 net/sched/act_csum.c 		if (!tcf_csum_ipv4(skb, update_flags))
skb               598 net/sched/act_csum.c 		if (!tcf_csum_ipv6(skb, update_flags))
skb               603 net/sched/act_csum.c 		if (skb_vlan_tag_present(skb) && !orig_vlan_tag_present) {
skb               604 net/sched/act_csum.c 			protocol = skb->protocol;
skb               607 net/sched/act_csum.c 			struct vlan_hdr *vlan = (struct vlan_hdr *)skb->data;
skb               610 net/sched/act_csum.c 			skb_pull(skb, VLAN_HLEN);
skb               611 net/sched/act_csum.c 			skb_reset_network_header(skb);
skb               620 net/sched/act_csum.c 		skb_push(skb, VLAN_HLEN);
skb               621 net/sched/act_csum.c 		skb_reset_network_header(skb);
skb               632 net/sched/act_csum.c static int tcf_csum_dump(struct sk_buff *skb, struct tc_action *a, int bind,
skb               635 net/sched/act_csum.c 	unsigned char *b = skb_tail_pointer(skb);
skb               651 net/sched/act_csum.c 	if (nla_put(skb, TCA_CSUM_PARMS, sizeof(opt), &opt))
skb               655 net/sched/act_csum.c 	if (nla_put_64bit(skb, TCA_CSUM_TM, sizeof(t), &t, TCA_CSUM_PAD))
skb               659 net/sched/act_csum.c 	return skb->len;
skb               663 net/sched/act_csum.c 	nlmsg_trim(skb, b);
skb               677 net/sched/act_csum.c static int tcf_csum_walker(struct net *net, struct sk_buff *skb,
skb               684 net/sched/act_csum.c 	return tcf_generic_walker(tn, skb, cb, type, ops, extack);
skb                43 net/sched/act_ct.c static bool tcf_ct_skb_nfct_cached(struct net *net, struct sk_buff *skb,
skb                49 net/sched/act_ct.c 	ct = nf_ct_get(skb, &ctinfo);
skb                63 net/sched/act_ct.c 		nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
skb                77 net/sched/act_ct.c static int tcf_ct_skb_network_trim(struct sk_buff *skb, int family)
skb                84 net/sched/act_ct.c 		len = ntohs(ip_hdr(skb)->tot_len);
skb                88 net/sched/act_ct.c 			+ ntohs(ipv6_hdr(skb)->payload_len);
skb                91 net/sched/act_ct.c 		len = skb->len;
skb                94 net/sched/act_ct.c 	err = pskb_trim_rcsum(skb, len);
skb                99 net/sched/act_ct.c static u8 tcf_ct_skb_nf_family(struct sk_buff *skb)
skb               103 net/sched/act_ct.c 	switch (skb->protocol) {
skb               117 net/sched/act_ct.c static int tcf_ct_ipv4_is_fragment(struct sk_buff *skb, bool *frag)
skb               121 net/sched/act_ct.c 	len =  skb_network_offset(skb) + sizeof(struct iphdr);
skb               122 net/sched/act_ct.c 	if (unlikely(skb->len < len))
skb               124 net/sched/act_ct.c 	if (unlikely(!pskb_may_pull(skb, len)))
skb               127 net/sched/act_ct.c 	*frag = ip_is_fragment(ip_hdr(skb));
skb               131 net/sched/act_ct.c static int tcf_ct_ipv6_is_fragment(struct sk_buff *skb, bool *frag)
skb               137 net/sched/act_ct.c 	len =  skb_network_offset(skb) + sizeof(struct ipv6hdr);
skb               138 net/sched/act_ct.c 	if (unlikely(skb->len < len))
skb               140 net/sched/act_ct.c 	if (unlikely(!pskb_may_pull(skb, len)))
skb               143 net/sched/act_ct.c 	nexthdr = ipv6_find_hdr(skb, &payload_ofs, -1, &frag_off, &flags);
skb               151 net/sched/act_ct.c static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
skb               160 net/sched/act_ct.c 	ct = nf_ct_get(skb, &ctinfo);
skb               165 net/sched/act_ct.c 		err = tcf_ct_ipv4_is_fragment(skb, &frag);
skb               167 net/sched/act_ct.c 		err = tcf_ct_ipv6_is_fragment(skb, &frag);
skb               171 net/sched/act_ct.c 	skb_get(skb);
skb               176 net/sched/act_ct.c 		memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
skb               178 net/sched/act_ct.c 		err = ip_defrag(net, skb, user);
skb               186 net/sched/act_ct.c 		memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
skb               187 net/sched/act_ct.c 		err = nf_ct_frag6_gather(net, skb, user);
skb               196 net/sched/act_ct.c 	skb_clear_hash(skb);
skb               197 net/sched/act_ct.c 	skb->ignore_df = 1;
skb               201 net/sched/act_ct.c 	kfree_skb(skb);
skb               220 net/sched/act_ct.c static int ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct,
skb               236 net/sched/act_ct.c 		if (skb->protocol == htons(ETH_P_IP) &&
skb               237 net/sched/act_ct.c 		    ip_hdr(skb)->protocol == IPPROTO_ICMP) {
skb               238 net/sched/act_ct.c 			if (!nf_nat_icmp_reply_translation(skb, ct, ctinfo,
skb               243 net/sched/act_ct.c 			   skb->protocol == htons(ETH_P_IPV6)) {
skb               245 net/sched/act_ct.c 			u8 nexthdr = ipv6_hdr(skb)->nexthdr;
skb               246 net/sched/act_ct.c 			int hdrlen = ipv6_skip_exthdr(skb,
skb               251 net/sched/act_ct.c 				if (!nf_nat_icmpv6_reply_translation(skb, ct,
skb               287 net/sched/act_ct.c 	err = nf_nat_packet(ct, ctinfo, hooknum, skb);
skb               324 net/sched/act_ct.c static int tcf_ct_act_nat(struct sk_buff *skb,
skb               363 net/sched/act_ct.c 	err = ct_nat_execute(skb, ct, ctinfo, range, maniptype);
skb               371 net/sched/act_ct.c 		err = ct_nat_execute(skb, ct, ctinfo, range, maniptype);
skb               379 net/sched/act_ct.c static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
skb               382 net/sched/act_ct.c 	struct net *net = dev_net(skb->dev);
skb               402 net/sched/act_ct.c 		ct = nf_ct_get(skb, &ctinfo);
skb               405 net/sched/act_ct.c 			nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
skb               411 net/sched/act_ct.c 	family = tcf_ct_skb_nf_family(skb);
skb               418 net/sched/act_ct.c 	nh_ofs = skb_network_offset(skb);
skb               419 net/sched/act_ct.c 	skb_pull_rcsum(skb, nh_ofs);
skb               420 net/sched/act_ct.c 	err = tcf_ct_handle_fragments(net, skb, family, p->zone);
skb               428 net/sched/act_ct.c 	err = tcf_ct_skb_network_trim(skb, family);
skb               437 net/sched/act_ct.c 	cached = tcf_ct_skb_nfct_cached(net, skb, p->zone, force);
skb               441 net/sched/act_ct.c 			ct = nf_ct_get(skb, &ctinfo);
skb               442 net/sched/act_ct.c 			if (skb_nfct(skb))
skb               443 net/sched/act_ct.c 				nf_conntrack_put(skb_nfct(skb));
skb               445 net/sched/act_ct.c 			nf_ct_set(skb, tmpl, IP_CT_NEW);
skb               451 net/sched/act_ct.c 		err = nf_conntrack_in(skb, &state);
skb               456 net/sched/act_ct.c 	ct = nf_ct_get(skb, &ctinfo);
skb               461 net/sched/act_ct.c 	err = tcf_ct_act_nat(skb, ct, ctinfo, p->ct_action, &p->range, commit);
skb               472 net/sched/act_ct.c 		nf_conntrack_confirm(skb);
skb               476 net/sched/act_ct.c 	skb_push_rcsum(skb, nh_ofs);
skb               479 net/sched/act_ct.c 	bstats_cpu_update(this_cpu_ptr(a->cpu_bstats), skb);
skb               766 net/sched/act_ct.c static int tcf_ct_dump_key_val(struct sk_buff *skb,
skb               776 net/sched/act_ct.c 	err = nla_put(skb, val_type, len, val);
skb               781 net/sched/act_ct.c 		err = nla_put(skb, mask_type, len, mask);
skb               789 net/sched/act_ct.c static int tcf_ct_dump_nat(struct sk_buff *skb, struct tcf_ct_params *p)
skb               801 net/sched/act_ct.c 			if (nla_put_in_addr(skb, TCA_CT_NAT_IPV4_MIN,
skb               804 net/sched/act_ct.c 			if (nla_put_in_addr(skb, TCA_CT_NAT_IPV4_MAX,
skb               808 net/sched/act_ct.c 			if (nla_put_in6_addr(skb, TCA_CT_NAT_IPV6_MIN,
skb               811 net/sched/act_ct.c 			if (nla_put_in6_addr(skb, TCA_CT_NAT_IPV6_MAX,
skb               818 net/sched/act_ct.c 		if (nla_put_be16(skb, TCA_CT_NAT_PORT_MIN,
skb               821 net/sched/act_ct.c 		if (nla_put_be16(skb, TCA_CT_NAT_PORT_MAX,
skb               829 net/sched/act_ct.c static inline int tcf_ct_dump(struct sk_buff *skb, struct tc_action *a,
skb               832 net/sched/act_ct.c 	unsigned char *b = skb_tail_pointer(skb);
skb               848 net/sched/act_ct.c 	if (tcf_ct_dump_key_val(skb,
skb               858 net/sched/act_ct.c 	    tcf_ct_dump_key_val(skb,
skb               865 net/sched/act_ct.c 	    tcf_ct_dump_key_val(skb,
skb               872 net/sched/act_ct.c 	    tcf_ct_dump_key_val(skb,
skb               878 net/sched/act_ct.c 	if (tcf_ct_dump_nat(skb, p))
skb               882 net/sched/act_ct.c 	if (nla_put(skb, TCA_CT_PARMS, sizeof(opt), &opt))
skb               886 net/sched/act_ct.c 	if (nla_put_64bit(skb, TCA_CT_TM, sizeof(t), &t, TCA_CT_PAD))
skb               890 net/sched/act_ct.c 	return skb->len;
skb               893 net/sched/act_ct.c 	nlmsg_trim(skb, b);
skb               897 net/sched/act_ct.c static int tcf_ct_walker(struct net *net, struct sk_buff *skb,
skb               904 net/sched/act_ct.c 	return tcf_generic_walker(tn, skb, cb, type, ops, extack);
skb                32 net/sched/act_ctinfo.c 				struct sk_buff *skb, int wlen, int proto)
skb                41 net/sched/act_ctinfo.c 		dscp = ipv4_get_dsfield(ip_hdr(skb)) & ~INET_ECN_MASK;
skb                43 net/sched/act_ctinfo.c 			if (likely(!skb_try_make_writable(skb, wlen))) {
skb                44 net/sched/act_ctinfo.c 				ipv4_change_dsfield(ip_hdr(skb),
skb                54 net/sched/act_ctinfo.c 		dscp = ipv6_get_dsfield(ipv6_hdr(skb)) & ~INET_ECN_MASK;
skb                56 net/sched/act_ctinfo.c 			if (likely(!skb_try_make_writable(skb, wlen))) {
skb                57 net/sched/act_ctinfo.c 				ipv6_change_dsfield(ipv6_hdr(skb),
skb                73 net/sched/act_ctinfo.c 				  struct sk_buff *skb)
skb                76 net/sched/act_ctinfo.c 	skb->mark = ct->mark & cp->cpmarkmask;
skb                79 net/sched/act_ctinfo.c static int tcf_ctinfo_act(struct sk_buff *skb, const struct tc_action *a,
skb                95 net/sched/act_ctinfo.c 	bstats_update(&ca->tcf_bstats, skb);
skb                98 net/sched/act_ctinfo.c 	wlen = skb_network_offset(skb);
skb                99 net/sched/act_ctinfo.c 	if (tc_skb_protocol(skb) == htons(ETH_P_IP)) {
skb               101 net/sched/act_ctinfo.c 		if (!pskb_may_pull(skb, wlen))
skb               105 net/sched/act_ctinfo.c 	} else if (tc_skb_protocol(skb) == htons(ETH_P_IPV6)) {
skb               107 net/sched/act_ctinfo.c 		if (!pskb_may_pull(skb, wlen))
skb               115 net/sched/act_ctinfo.c 	ct = nf_ct_get(skb, &ctinfo);
skb               117 net/sched/act_ctinfo.c 		if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb),
skb               132 net/sched/act_ctinfo.c 			tcf_ctinfo_dscp_set(ct, ca, cp, skb, wlen, proto);
skb               135 net/sched/act_ctinfo.c 		tcf_ctinfo_cpmark_set(ct, ca, cp, skb);
skb               282 net/sched/act_ctinfo.c static int tcf_ctinfo_dump(struct sk_buff *skb, struct tc_action *a,
skb               291 net/sched/act_ctinfo.c 	unsigned char *b = skb_tail_pointer(skb);
skb               300 net/sched/act_ctinfo.c 	if (nla_put_64bit(skb, TCA_CTINFO_TM, sizeof(t), &t, TCA_CTINFO_PAD))
skb               304 net/sched/act_ctinfo.c 	if (nla_put(skb, TCA_CTINFO_ACT, sizeof(opt), &opt))
skb               307 net/sched/act_ctinfo.c 	if (nla_put_u16(skb, TCA_CTINFO_ZONE, cp->zone))
skb               311 net/sched/act_ctinfo.c 		if (nla_put_u32(skb, TCA_CTINFO_PARMS_DSCP_MASK,
skb               314 net/sched/act_ctinfo.c 		if (nla_put_u32(skb, TCA_CTINFO_PARMS_DSCP_STATEMASK,
skb               320 net/sched/act_ctinfo.c 		if (nla_put_u32(skb, TCA_CTINFO_PARMS_CPMARK_MASK,
skb               325 net/sched/act_ctinfo.c 	if (nla_put_u64_64bit(skb, TCA_CTINFO_STATS_DSCP_SET,
skb               329 net/sched/act_ctinfo.c 	if (nla_put_u64_64bit(skb, TCA_CTINFO_STATS_DSCP_ERROR,
skb               333 net/sched/act_ctinfo.c 	if (nla_put_u64_64bit(skb, TCA_CTINFO_STATS_CPMARK_SET,
skb               338 net/sched/act_ctinfo.c 	return skb->len;
skb               342 net/sched/act_ctinfo.c 	nlmsg_trim(skb, b);
skb               346 net/sched/act_ctinfo.c static int tcf_ctinfo_walker(struct net *net, struct sk_buff *skb,
skb               353 net/sched/act_ctinfo.c 	return tcf_generic_walker(tn, skb, cb, type, ops, extack);
skb               150 net/sched/act_gact.c static int tcf_gact_act(struct sk_buff *skb, const struct tc_action *a,
skb               164 net/sched/act_gact.c 	bstats_cpu_update(this_cpu_ptr(gact->common.cpu_bstats), skb);
skb               192 net/sched/act_gact.c static int tcf_gact_dump(struct sk_buff *skb, struct tc_action *a,
skb               195 net/sched/act_gact.c 	unsigned char *b = skb_tail_pointer(skb);
skb               206 net/sched/act_gact.c 	if (nla_put(skb, TCA_GACT_PARMS, sizeof(opt), &opt))
skb               216 net/sched/act_gact.c 		if (nla_put(skb, TCA_GACT_PROB, sizeof(p_opt), &p_opt))
skb               221 net/sched/act_gact.c 	if (nla_put_64bit(skb, TCA_GACT_TM, sizeof(t), &t, TCA_GACT_PAD))
skb               225 net/sched/act_gact.c 	return skb->len;
skb               229 net/sched/act_gact.c 	nlmsg_trim(skb, b);
skb               233 net/sched/act_gact.c static int tcf_gact_walker(struct net *net, struct sk_buff *skb,
skb               240 net/sched/act_gact.c 	return tcf_generic_walker(tn, skb, cb, type, ops, extack);
skb                61 net/sched/act_ife.c int ife_get_meta_u32(struct sk_buff *skb, struct tcf_meta_info *mi)
skb                64 net/sched/act_ife.c 		return nla_put_u32(skb, mi->metaid, *(u32 *)mi->metaval);
skb                66 net/sched/act_ife.c 		return nla_put(skb, mi->metaid, 0, NULL);
skb               105 net/sched/act_ife.c int ife_get_meta_u16(struct sk_buff *skb, struct tcf_meta_info *mi)
skb               108 net/sched/act_ife.c 		return nla_put_u16(skb, mi->metaid, *(u16 *)mi->metaval);
skb               110 net/sched/act_ife.c 		return nla_put(skb, mi->metaid, 0, NULL);
skb               374 net/sched/act_ife.c static int dump_metalist(struct sk_buff *skb, struct tcf_ife_info *ife)
skb               378 net/sched/act_ife.c 	unsigned char *b = skb_tail_pointer(skb);
skb               385 net/sched/act_ife.c 	nest = nla_nest_start_noflag(skb, TCA_IFE_METALST);
skb               390 net/sched/act_ife.c 		if (!e->ops->get(skb, e))
skb               397 net/sched/act_ife.c 	nla_nest_end(skb, nest);
skb               402 net/sched/act_ife.c 	nlmsg_trim(skb, b);
skb               618 net/sched/act_ife.c static int tcf_ife_dump(struct sk_buff *skb, struct tc_action *a, int bind,
skb               621 net/sched/act_ife.c 	unsigned char *b = skb_tail_pointer(skb);
skb               637 net/sched/act_ife.c 	if (nla_put(skb, TCA_IFE_PARMS, sizeof(opt), &opt))
skb               641 net/sched/act_ife.c 	if (nla_put_64bit(skb, TCA_IFE_TM, sizeof(t), &t, TCA_IFE_PAD))
skb               645 net/sched/act_ife.c 		if (nla_put(skb, TCA_IFE_DMAC, ETH_ALEN, p->eth_dst))
skb               650 net/sched/act_ife.c 		if (nla_put(skb, TCA_IFE_SMAC, ETH_ALEN, p->eth_src))
skb               654 net/sched/act_ife.c 	if (nla_put(skb, TCA_IFE_TYPE, 2, &p->eth_type))
skb               657 net/sched/act_ife.c 	if (dump_metalist(skb, ife)) {
skb               663 net/sched/act_ife.c 	return skb->len;
skb               667 net/sched/act_ife.c 	nlmsg_trim(skb, b);
skb               671 net/sched/act_ife.c static int find_decode_metaid(struct sk_buff *skb, struct tcf_ife_info *ife,
skb               681 net/sched/act_ife.c 				return e->ops->decode(skb, mdata, mlen);
skb               689 net/sched/act_ife.c static int tcf_ife_decode(struct sk_buff *skb, const struct tc_action *a,
skb               698 net/sched/act_ife.c 	bstats_cpu_update(this_cpu_ptr(ife->common.cpu_bstats), skb);
skb               701 net/sched/act_ife.c 	if (skb_at_tc_ingress(skb))
skb               702 net/sched/act_ife.c 		skb_push(skb, skb->dev->hard_header_len);
skb               704 net/sched/act_ife.c 	tlv_data = ife_decode(skb, &metalen);
skb               723 net/sched/act_ife.c 		if (find_decode_metaid(skb, ife, mtype, dlen, curr_data)) {
skb               738 net/sched/act_ife.c 	skb->protocol = eth_type_trans(skb, skb->dev);
skb               739 net/sched/act_ife.c 	skb_reset_network_header(skb);
skb               747 net/sched/act_ife.c static int ife_get_sz(struct sk_buff *skb, struct tcf_ife_info *ife)
skb               754 net/sched/act_ife.c 			run_sz = e->ops->check_presence(skb, e);
skb               762 net/sched/act_ife.c static int tcf_ife_encode(struct sk_buff *skb, const struct tc_action *a,
skb               773 net/sched/act_ife.c 	u16 metalen = ife_get_sz(skb, ife);
skb               774 net/sched/act_ife.c 	int hdrm = metalen + skb->dev->hard_header_len + IFE_METAHDRLEN;
skb               776 net/sched/act_ife.c 	int new_len = skb->len + hdrm;
skb               781 net/sched/act_ife.c 	if (!skb_at_tc_ingress(skb)) {
skb               782 net/sched/act_ife.c 		if (new_len > skb->dev->mtu)
skb               786 net/sched/act_ife.c 	bstats_cpu_update(this_cpu_ptr(ife->common.cpu_bstats), skb);
skb               803 net/sched/act_ife.c 	if (skb_at_tc_ingress(skb))
skb               804 net/sched/act_ife.c 		skb_push(skb, skb->dev->hard_header_len);
skb               806 net/sched/act_ife.c 	ife_meta = ife_encode(skb, metalen);
skb               816 net/sched/act_ife.c 			err = e->ops->encode(skb, (void *)(ife_meta + skboff),
skb               828 net/sched/act_ife.c 	oethh = (struct ethhdr *)skb->data;
skb               836 net/sched/act_ife.c 	if (skb_at_tc_ingress(skb))
skb               837 net/sched/act_ife.c 		skb_pull(skb, skb->dev->hard_header_len);
skb               842 net/sched/act_ife.c static int tcf_ife_act(struct sk_buff *skb, const struct tc_action *a,
skb               851 net/sched/act_ife.c 		ret = tcf_ife_encode(skb, a, res, p);
skb               855 net/sched/act_ife.c 	return tcf_ife_decode(skb, a, res);
skb               858 net/sched/act_ife.c static int tcf_ife_walker(struct net *net, struct sk_buff *skb,
skb               865 net/sched/act_ife.c 	return tcf_generic_walker(tn, skb, cb, type, ops, extack);
skb               223 net/sched/act_ipt.c static int tcf_ipt_act(struct sk_buff *skb, const struct tc_action *a,
skb               230 net/sched/act_ipt.c 		.net	= dev_net(skb->dev),
skb               231 net/sched/act_ipt.c 		.in	= skb->dev,
skb               236 net/sched/act_ipt.c 	if (skb_unclone(skb, GFP_ATOMIC))
skb               242 net/sched/act_ipt.c 	bstats_update(&ipt->tcf_bstats, skb);
skb               251 net/sched/act_ipt.c 	ret = par.target->target(skb, &par);
skb               275 net/sched/act_ipt.c static int tcf_ipt_dump(struct sk_buff *skb, struct tc_action *a, int bind,
skb               278 net/sched/act_ipt.c 	unsigned char *b = skb_tail_pointer(skb);
skb               298 net/sched/act_ipt.c 	if (nla_put(skb, TCA_IPT_TARG, ipt->tcfi_t->u.user.target_size, t) ||
skb               299 net/sched/act_ipt.c 	    nla_put_u32(skb, TCA_IPT_INDEX, ipt->tcf_index) ||
skb               300 net/sched/act_ipt.c 	    nla_put_u32(skb, TCA_IPT_HOOK, ipt->tcfi_hook) ||
skb               301 net/sched/act_ipt.c 	    nla_put(skb, TCA_IPT_CNT, sizeof(struct tc_cnt), &c) ||
skb               302 net/sched/act_ipt.c 	    nla_put_string(skb, TCA_IPT_TABLE, ipt->tcfi_tname))
skb               306 net/sched/act_ipt.c 	if (nla_put_64bit(skb, TCA_IPT_TM, sizeof(tm), &tm, TCA_IPT_PAD))
skb               311 net/sched/act_ipt.c 	return skb->len;
skb               315 net/sched/act_ipt.c 	nlmsg_trim(skb, b);
skb               320 net/sched/act_ipt.c static int tcf_ipt_walker(struct net *net, struct sk_buff *skb,
skb               327 net/sched/act_ipt.c 	return tcf_generic_walker(tn, skb, cb, type, ops, extack);
skb               369 net/sched/act_ipt.c static int tcf_xt_walker(struct net *net, struct sk_buff *skb,
skb               376 net/sched/act_ipt.c 	return tcf_generic_walker(tn, skb, cb, type, ops, extack);
skb                21 net/sched/act_meta_mark.c static int skbmark_encode(struct sk_buff *skb, void *skbdata,
skb                24 net/sched/act_meta_mark.c 	u32 ifemark = skb->mark;
skb                29 net/sched/act_meta_mark.c static int skbmark_decode(struct sk_buff *skb, void *data, u16 len)
skb                33 net/sched/act_meta_mark.c 	skb->mark = ntohl(ifemark);
skb                37 net/sched/act_meta_mark.c static int skbmark_check(struct sk_buff *skb, struct tcf_meta_info *e)
skb                39 net/sched/act_meta_mark.c 	return ife_check_meta_u32(skb->mark, e);
skb                21 net/sched/act_meta_skbprio.c static int skbprio_check(struct sk_buff *skb, struct tcf_meta_info *e)
skb                23 net/sched/act_meta_skbprio.c 	return ife_check_meta_u32(skb->priority, e);
skb                26 net/sched/act_meta_skbprio.c static int skbprio_encode(struct sk_buff *skb, void *skbdata,
skb                29 net/sched/act_meta_skbprio.c 	u32 ifeprio = skb->priority; /* avoid having to cast skb->priority*/
skb                34 net/sched/act_meta_skbprio.c static int skbprio_decode(struct sk_buff *skb, void *data, u16 len)
skb                38 net/sched/act_meta_skbprio.c 	skb->priority = ntohl(ifeprio);
skb                21 net/sched/act_meta_skbtcindex.c static int skbtcindex_encode(struct sk_buff *skb, void *skbdata,
skb                24 net/sched/act_meta_skbtcindex.c 	u32 ifetc_index = skb->tc_index;
skb                29 net/sched/act_meta_skbtcindex.c static int skbtcindex_decode(struct sk_buff *skb, void *data, u16 len)
skb                33 net/sched/act_meta_skbtcindex.c 	skb->tc_index = ntohs(ifetc_index);
skb                37 net/sched/act_meta_skbtcindex.c static int skbtcindex_check(struct sk_buff *skb, struct tcf_meta_info *e)
skb                39 net/sched/act_meta_skbtcindex.c 	return ife_check_meta_u16(skb->tc_index, e);
skb               210 net/sched/act_mirred.c static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a,
skb               214 net/sched/act_mirred.c 	struct sk_buff *skb2 = skb;
skb               230 net/sched/act_mirred.c 				     netdev_name(skb->dev));
skb               236 net/sched/act_mirred.c 	bstats_cpu_update(this_cpu_ptr(m->common.cpu_bstats), skb);
skb               258 net/sched/act_mirred.c 	use_reinsert = skb_at_tc_ingress(skb) && is_redirect &&
skb               261 net/sched/act_mirred.c 		skb2 = skb_clone(skb, GFP_ATOMIC);
skb               269 net/sched/act_mirred.c 	at_nh = skb->data == skb_network_header(skb);
skb               271 net/sched/act_mirred.c 		mac_len = skb_at_tc_ingress(skb) ? skb->mac_len :
skb               272 net/sched/act_mirred.c 			  skb_network_header(skb) - skb_mac_header(skb);
skb               282 net/sched/act_mirred.c 	skb2->skb_iif = skb->dev->ifindex;
skb               293 net/sched/act_mirred.c 			skb_tc_reinsert(skb, res);
skb               328 net/sched/act_mirred.c static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind,
skb               331 net/sched/act_mirred.c 	unsigned char *b = skb_tail_pointer(skb);
skb               348 net/sched/act_mirred.c 	if (nla_put(skb, TCA_MIRRED_PARMS, sizeof(opt), &opt))
skb               352 net/sched/act_mirred.c 	if (nla_put_64bit(skb, TCA_MIRRED_TM, sizeof(t), &t, TCA_MIRRED_PAD))
skb               356 net/sched/act_mirred.c 	return skb->len;
skb               360 net/sched/act_mirred.c 	nlmsg_trim(skb, b);
skb               364 net/sched/act_mirred.c static int tcf_mirred_walker(struct net *net, struct sk_buff *skb,
skb               371 net/sched/act_mirred.c 	return tcf_generic_walker(tn, skb, cb, type, ops, extack);
skb                53 net/sched/act_mpls.c static int tcf_mpls_act(struct sk_buff *skb, const struct tc_action *a,
skb                62 net/sched/act_mpls.c 	bstats_cpu_update(this_cpu_ptr(m->common.cpu_bstats), skb);
skb                67 net/sched/act_mpls.c 	if (skb_at_tc_ingress(skb)) {
skb                68 net/sched/act_mpls.c 		skb_push_rcsum(skb, skb->mac_len);
skb                69 net/sched/act_mpls.c 		mac_len = skb->mac_len;
skb                71 net/sched/act_mpls.c 		mac_len = skb_network_header(skb) - skb_mac_header(skb);
skb                80 net/sched/act_mpls.c 		if (skb_mpls_pop(skb, p->tcfm_proto, mac_len,
skb                81 net/sched/act_mpls.c 				 skb->dev && skb->dev->type == ARPHRD_ETHER))
skb                85 net/sched/act_mpls.c 		new_lse = tcf_mpls_get_lse(NULL, p, !eth_p_mpls(skb->protocol));
skb                86 net/sched/act_mpls.c 		if (skb_mpls_push(skb, new_lse, p->tcfm_proto, mac_len,
skb                87 net/sched/act_mpls.c 				  skb->dev && skb->dev->type == ARPHRD_ETHER))
skb                91 net/sched/act_mpls.c 		new_lse = tcf_mpls_get_lse(mpls_hdr(skb), p, false);
skb                92 net/sched/act_mpls.c 		if (skb_mpls_update_lse(skb, new_lse))
skb                96 net/sched/act_mpls.c 		if (skb_mpls_dec_ttl(skb))
skb               101 net/sched/act_mpls.c 	if (skb_at_tc_ingress(skb))
skb               102 net/sched/act_mpls.c 		skb_pull_rcsum(skb, skb->mac_len);
skb               297 net/sched/act_mpls.c static int tcf_mpls_dump(struct sk_buff *skb, struct tc_action *a,
skb               300 net/sched/act_mpls.c 	unsigned char *b = skb_tail_pointer(skb);
skb               315 net/sched/act_mpls.c 	if (nla_put(skb, TCA_MPLS_PARMS, sizeof(opt), &opt))
skb               319 net/sched/act_mpls.c 	    nla_put_u32(skb, TCA_MPLS_LABEL, p->tcfm_label))
skb               323 net/sched/act_mpls.c 	    nla_put_u8(skb, TCA_MPLS_TC, p->tcfm_tc))
skb               326 net/sched/act_mpls.c 	if (p->tcfm_ttl && nla_put_u8(skb, TCA_MPLS_TTL, p->tcfm_ttl))
skb               330 net/sched/act_mpls.c 	    nla_put_u8(skb, TCA_MPLS_BOS, p->tcfm_bos))
skb               333 net/sched/act_mpls.c 	if (nla_put_be16(skb, TCA_MPLS_PROTO, p->tcfm_proto))
skb               338 net/sched/act_mpls.c 	if (nla_put_64bit(skb, TCA_MPLS_TM, sizeof(t), &t, TCA_MPLS_PAD))
skb               343 net/sched/act_mpls.c 	return skb->len;
skb               347 net/sched/act_mpls.c 	nlmsg_trim(skb, b);
skb               351 net/sched/act_mpls.c static int tcf_mpls_walker(struct net *net, struct sk_buff *skb,
skb               358 net/sched/act_mpls.c 	return tcf_generic_walker(tn, skb, cb, type, ops, extack);
skb               105 net/sched/act_nat.c static int tcf_nat_act(struct sk_buff *skb, const struct tc_action *a,
skb               128 net/sched/act_nat.c 	bstats_update(&p->tcf_bstats, skb);
skb               135 net/sched/act_nat.c 	noff = skb_network_offset(skb);
skb               136 net/sched/act_nat.c 	if (!pskb_may_pull(skb, sizeof(*iph) + noff))
skb               139 net/sched/act_nat.c 	iph = ip_hdr(skb);
skb               147 net/sched/act_nat.c 		if (skb_try_make_writable(skb, sizeof(*iph) + noff))
skb               154 net/sched/act_nat.c 		iph = ip_hdr(skb);
skb               174 net/sched/act_nat.c 		if (!pskb_may_pull(skb, ihl + sizeof(*tcph) + noff) ||
skb               175 net/sched/act_nat.c 		    skb_try_make_writable(skb, ihl + sizeof(*tcph) + noff))
skb               178 net/sched/act_nat.c 		tcph = (void *)(skb_network_header(skb) + ihl);
skb               179 net/sched/act_nat.c 		inet_proto_csum_replace4(&tcph->check, skb, addr, new_addr,
skb               187 net/sched/act_nat.c 		if (!pskb_may_pull(skb, ihl + sizeof(*udph) + noff) ||
skb               188 net/sched/act_nat.c 		    skb_try_make_writable(skb, ihl + sizeof(*udph) + noff))
skb               191 net/sched/act_nat.c 		udph = (void *)(skb_network_header(skb) + ihl);
skb               192 net/sched/act_nat.c 		if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
skb               193 net/sched/act_nat.c 			inet_proto_csum_replace4(&udph->check, skb, addr,
skb               204 net/sched/act_nat.c 		if (!pskb_may_pull(skb, ihl + sizeof(*icmph) + noff))
skb               207 net/sched/act_nat.c 		icmph = (void *)(skb_network_header(skb) + ihl);
skb               214 net/sched/act_nat.c 		if (!pskb_may_pull(skb, ihl + sizeof(*icmph) + sizeof(*iph) +
skb               218 net/sched/act_nat.c 		icmph = (void *)(skb_network_header(skb) + ihl);
skb               228 net/sched/act_nat.c 		if (skb_try_make_writable(skb, ihl + sizeof(*icmph) +
skb               232 net/sched/act_nat.c 		icmph = (void *)(skb_network_header(skb) + ihl);
skb               244 net/sched/act_nat.c 		inet_proto_csum_replace4(&icmph->checksum, skb, addr, new_addr,
skb               262 net/sched/act_nat.c static int tcf_nat_dump(struct sk_buff *skb, struct tc_action *a,
skb               265 net/sched/act_nat.c 	unsigned char *b = skb_tail_pointer(skb);
skb               281 net/sched/act_nat.c 	if (nla_put(skb, TCA_NAT_PARMS, sizeof(opt), &opt))
skb               285 net/sched/act_nat.c 	if (nla_put_64bit(skb, TCA_NAT_TM, sizeof(t), &t, TCA_NAT_PAD))
skb               289 net/sched/act_nat.c 	return skb->len;
skb               293 net/sched/act_nat.c 	nlmsg_trim(skb, b);
skb               297 net/sched/act_nat.c static int tcf_nat_walker(struct net *net, struct sk_buff *skb,
skb               304 net/sched/act_nat.c 	return tcf_generic_walker(tn, skb, cb, type, ops, extack);
skb               105 net/sched/act_pedit.c static int tcf_pedit_key_ex_dump(struct sk_buff *skb,
skb               108 net/sched/act_pedit.c 	struct nlattr *keys_start = nla_nest_start_noflag(skb,
skb               116 net/sched/act_pedit.c 		key_start = nla_nest_start_noflag(skb, TCA_PEDIT_KEY_EX);
skb               120 net/sched/act_pedit.c 		if (nla_put_u16(skb, TCA_PEDIT_KEY_EX_HTYPE, keys_ex->htype) ||
skb               121 net/sched/act_pedit.c 		    nla_put_u16(skb, TCA_PEDIT_KEY_EX_CMD, keys_ex->cmd))
skb               124 net/sched/act_pedit.c 		nla_nest_end(skb, key_start);
skb               129 net/sched/act_pedit.c 	nla_nest_end(skb, keys_start);
skb               133 net/sched/act_pedit.c 	nla_nest_cancel(skb, keys_start);
skb               264 net/sched/act_pedit.c static bool offset_valid(struct sk_buff *skb, int offset)
skb               266 net/sched/act_pedit.c 	if (offset > 0 && offset > skb->len)
skb               269 net/sched/act_pedit.c 	if  (offset < 0 && -offset > skb_headroom(skb))
skb               275 net/sched/act_pedit.c static int pedit_skb_hdr_offset(struct sk_buff *skb,
skb               282 net/sched/act_pedit.c 		if (skb_mac_header_was_set(skb)) {
skb               283 net/sched/act_pedit.c 			*hoffset = skb_mac_offset(skb);
skb               290 net/sched/act_pedit.c 		*hoffset = skb_network_offset(skb);
skb               295 net/sched/act_pedit.c 		if (skb_transport_header_was_set(skb)) {
skb               296 net/sched/act_pedit.c 			*hoffset = skb_transport_offset(skb);
skb               308 net/sched/act_pedit.c static int tcf_pedit_act(struct sk_buff *skb, const struct tc_action *a,
skb               314 net/sched/act_pedit.c 	if (skb_unclone(skb, GFP_ATOMIC))
skb               342 net/sched/act_pedit.c 			rc = pedit_skb_hdr_offset(skb, htype, &hoffset);
skb               352 net/sched/act_pedit.c 				if (!offset_valid(skb, hoffset + tkey->at)) {
skb               357 net/sched/act_pedit.c 				d = skb_header_pointer(skb, hoffset + tkey->at,
skb               369 net/sched/act_pedit.c 			if (!offset_valid(skb, hoffset + offset)) {
skb               375 net/sched/act_pedit.c 			ptr = skb_header_pointer(skb, hoffset + offset,
skb               395 net/sched/act_pedit.c 				skb_store_bits(skb, hoffset + offset, ptr, 4);
skb               406 net/sched/act_pedit.c 	bstats_update(&p->tcf_bstats, skb);
skb               411 net/sched/act_pedit.c static int tcf_pedit_dump(struct sk_buff *skb, struct tc_action *a,
skb               414 net/sched/act_pedit.c 	unsigned char *b = skb_tail_pointer(skb);
skb               438 net/sched/act_pedit.c 		if (tcf_pedit_key_ex_dump(skb,
skb               443 net/sched/act_pedit.c 		if (nla_put(skb, TCA_PEDIT_PARMS_EX, s, opt))
skb               446 net/sched/act_pedit.c 		if (nla_put(skb, TCA_PEDIT_PARMS, s, opt))
skb               451 net/sched/act_pedit.c 	if (nla_put_64bit(skb, TCA_PEDIT_TM, sizeof(t), &t, TCA_PEDIT_PAD))
skb               456 net/sched/act_pedit.c 	return skb->len;
skb               460 net/sched/act_pedit.c 	nlmsg_trim(skb, b);
skb               465 net/sched/act_pedit.c static int tcf_pedit_walker(struct net *net, struct sk_buff *skb,
skb               472 net/sched/act_pedit.c 	return tcf_generic_walker(tn, skb, cb, type, ops, extack);
skb                28 net/sched/act_police.c static int tcf_police_walker(struct net *net, struct sk_buff *skb,
skb                35 net/sched/act_police.c 	return tcf_generic_walker(tn, skb, cb, type, ops, extack);
skb               218 net/sched/act_police.c static int tcf_police_act(struct sk_buff *skb, const struct tc_action *a,
skb               227 net/sched/act_police.c 	bstats_cpu_update(this_cpu_ptr(police->common.cpu_bstats), skb);
skb               240 net/sched/act_police.c 	if (qdisc_pkt_len(skb) <= p->tcfp_mtu) {
skb               254 net/sched/act_police.c 						    qdisc_pkt_len(skb));
skb               259 net/sched/act_police.c 		toks -= (s64)psched_l2t_ns(&p->rate, qdisc_pkt_len(skb));
skb               304 net/sched/act_police.c static int tcf_police_dump(struct sk_buff *skb, struct tc_action *a,
skb               307 net/sched/act_police.c 	unsigned char *b = skb_tail_pointer(skb);
skb               326 net/sched/act_police.c 		    nla_put_u64_64bit(skb, TCA_POLICE_RATE64,
skb               334 net/sched/act_police.c 		    nla_put_u64_64bit(skb, TCA_POLICE_PEAKRATE64,
skb               339 net/sched/act_police.c 	if (nla_put(skb, TCA_POLICE_TBF, sizeof(opt), &opt))
skb               342 net/sched/act_police.c 	    nla_put_u32(skb, TCA_POLICE_RESULT, p->tcfp_result))
skb               345 net/sched/act_police.c 	    nla_put_u32(skb, TCA_POLICE_AVRATE, p->tcfp_ewma_rate))
skb               352 net/sched/act_police.c 	if (nla_put_64bit(skb, TCA_POLICE_TM, sizeof(t), &t, TCA_POLICE_PAD))
skb               356 net/sched/act_police.c 	return skb->len;
skb               360 net/sched/act_police.c 	nlmsg_trim(skb, b);
skb               158 net/sched/act_sample.c static int tcf_sample_act(struct sk_buff *skb, const struct tc_action *a,
skb               169 net/sched/act_sample.c 	bstats_cpu_update(this_cpu_ptr(s->common.cpu_bstats), skb);
skb               176 net/sched/act_sample.c 		if (!skb_at_tc_ingress(skb)) {
skb               177 net/sched/act_sample.c 			iif = skb->skb_iif;
skb               178 net/sched/act_sample.c 			oif = skb->dev->ifindex;
skb               180 net/sched/act_sample.c 			iif = skb->dev->ifindex;
skb               185 net/sched/act_sample.c 		if (skb_at_tc_ingress(skb) && tcf_sample_dev_ok_push(skb->dev))
skb               186 net/sched/act_sample.c 			skb_push(skb, skb->mac_len);
skb               188 net/sched/act_sample.c 		size = s->truncate ? s->trunc_size : skb->len;
skb               189 net/sched/act_sample.c 		psample_sample_packet(psample_group, skb, size, iif, oif,
skb               192 net/sched/act_sample.c 		if (skb_at_tc_ingress(skb) && tcf_sample_dev_ok_push(skb->dev))
skb               193 net/sched/act_sample.c 			skb_pull(skb, skb->mac_len);
skb               199 net/sched/act_sample.c static int tcf_sample_dump(struct sk_buff *skb, struct tc_action *a,
skb               202 net/sched/act_sample.c 	unsigned char *b = skb_tail_pointer(skb);
skb               213 net/sched/act_sample.c 	if (nla_put(skb, TCA_SAMPLE_PARMS, sizeof(opt), &opt))
skb               217 net/sched/act_sample.c 	if (nla_put_64bit(skb, TCA_SAMPLE_TM, sizeof(t), &t, TCA_SAMPLE_PAD))
skb               220 net/sched/act_sample.c 	if (nla_put_u32(skb, TCA_SAMPLE_RATE, s->rate))
skb               224 net/sched/act_sample.c 		if (nla_put_u32(skb, TCA_SAMPLE_TRUNC_SIZE, s->trunc_size))
skb               227 net/sched/act_sample.c 	if (nla_put_u32(skb, TCA_SAMPLE_PSAMPLE_GROUP, s->psample_group_num))
skb               231 net/sched/act_sample.c 	return skb->len;
skb               235 net/sched/act_sample.c 	nlmsg_trim(skb, b);
skb               239 net/sched/act_sample.c static int tcf_sample_walker(struct net *net, struct sk_buff *skb,
skb               246 net/sched/act_sample.c 	return tcf_generic_walker(tn, skb, cb, type, ops, extack);
skb                25 net/sched/act_simple.c static int tcf_simp_act(struct sk_buff *skb, const struct tc_action *a,
skb                32 net/sched/act_simple.c 	bstats_update(&d->tcf_bstats, skb);
skb               170 net/sched/act_simple.c static int tcf_simp_dump(struct sk_buff *skb, struct tc_action *a,
skb               173 net/sched/act_simple.c 	unsigned char *b = skb_tail_pointer(skb);
skb               184 net/sched/act_simple.c 	if (nla_put(skb, TCA_DEF_PARMS, sizeof(opt), &opt) ||
skb               185 net/sched/act_simple.c 	    nla_put_string(skb, TCA_DEF_DATA, d->tcfd_defdata))
skb               189 net/sched/act_simple.c 	if (nla_put_64bit(skb, TCA_DEF_TM, sizeof(t), &t, TCA_DEF_PAD))
skb               193 net/sched/act_simple.c 	return skb->len;
skb               197 net/sched/act_simple.c 	nlmsg_trim(skb, b);
skb               201 net/sched/act_simple.c static int tcf_simp_walker(struct net *net, struct sk_buff *skb,
skb               208 net/sched/act_simple.c 	return tcf_generic_walker(tn, skb, cb, type, ops, extack);
skb                26 net/sched/act_skbedit.c static int tcf_skbedit_act(struct sk_buff *skb, const struct tc_action *a,
skb                34 net/sched/act_skbedit.c 	bstats_cpu_update(this_cpu_ptr(d->common.cpu_bstats), skb);
skb                40 net/sched/act_skbedit.c 		skb->priority = params->priority;
skb                42 net/sched/act_skbedit.c 		int wlen = skb_network_offset(skb);
skb                44 net/sched/act_skbedit.c 		switch (tc_skb_protocol(skb)) {
skb                47 net/sched/act_skbedit.c 			if (!pskb_may_pull(skb, wlen))
skb                49 net/sched/act_skbedit.c 			skb->priority = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
skb                54 net/sched/act_skbedit.c 			if (!pskb_may_pull(skb, wlen))
skb                56 net/sched/act_skbedit.c 			skb->priority = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
skb                61 net/sched/act_skbedit.c 	    skb->dev->real_num_tx_queues > params->queue_mapping)
skb                62 net/sched/act_skbedit.c 		skb_set_queue_mapping(skb, params->queue_mapping);
skb                64 net/sched/act_skbedit.c 		skb->mark &= ~params->mask;
skb                65 net/sched/act_skbedit.c 		skb->mark |= params->mark & params->mask;
skb                68 net/sched/act_skbedit.c 		skb->pkt_type = params->ptype;
skb               228 net/sched/act_skbedit.c static int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a,
skb               231 net/sched/act_skbedit.c 	unsigned char *b = skb_tail_pointer(skb);
skb               247 net/sched/act_skbedit.c 	if (nla_put(skb, TCA_SKBEDIT_PARMS, sizeof(opt), &opt))
skb               250 net/sched/act_skbedit.c 	    nla_put_u32(skb, TCA_SKBEDIT_PRIORITY, params->priority))
skb               253 net/sched/act_skbedit.c 	    nla_put_u16(skb, TCA_SKBEDIT_QUEUE_MAPPING, params->queue_mapping))
skb               256 net/sched/act_skbedit.c 	    nla_put_u32(skb, TCA_SKBEDIT_MARK, params->mark))
skb               259 net/sched/act_skbedit.c 	    nla_put_u16(skb, TCA_SKBEDIT_PTYPE, params->ptype))
skb               262 net/sched/act_skbedit.c 	    nla_put_u32(skb, TCA_SKBEDIT_MASK, params->mask))
skb               267 net/sched/act_skbedit.c 	    nla_put(skb, TCA_SKBEDIT_FLAGS, sizeof(pure_flags), &pure_flags))
skb               271 net/sched/act_skbedit.c 	if (nla_put_64bit(skb, TCA_SKBEDIT_TM, sizeof(t), &t, TCA_SKBEDIT_PAD))
skb               275 net/sched/act_skbedit.c 	return skb->len;
skb               279 net/sched/act_skbedit.c 	nlmsg_trim(skb, b);
skb               293 net/sched/act_skbedit.c static int tcf_skbedit_walker(struct net *net, struct sk_buff *skb,
skb               300 net/sched/act_skbedit.c 	return tcf_generic_walker(tn, skb, cb, type, ops, extack);
skb                24 net/sched/act_skbmod.c static int tcf_skbmod_act(struct sk_buff *skb, const struct tc_action *a,
skb                34 net/sched/act_skbmod.c 	bstats_cpu_update(this_cpu_ptr(d->common.cpu_bstats), skb);
skb                40 net/sched/act_skbmod.c 	err = skb_ensure_writable(skb, MAX_EDIT_LEN);
skb                51 net/sched/act_skbmod.c 		ether_addr_copy(eth_hdr(skb)->h_dest, p->eth_dst);
skb                53 net/sched/act_skbmod.c 		ether_addr_copy(eth_hdr(skb)->h_source, p->eth_src);
skb                55 net/sched/act_skbmod.c 		eth_hdr(skb)->h_proto = p->eth_type;
skb                60 net/sched/act_skbmod.c 		ether_addr_copy((u8 *)tmpaddr, eth_hdr(skb)->h_dest);
skb                61 net/sched/act_skbmod.c 		ether_addr_copy(eth_hdr(skb)->h_dest, eth_hdr(skb)->h_source);
skb                62 net/sched/act_skbmod.c 		ether_addr_copy(eth_hdr(skb)->h_source, (u8 *)tmpaddr);
skb               214 net/sched/act_skbmod.c static int tcf_skbmod_dump(struct sk_buff *skb, struct tc_action *a,
skb               218 net/sched/act_skbmod.c 	unsigned char *b = skb_tail_pointer(skb);
skb               232 net/sched/act_skbmod.c 	if (nla_put(skb, TCA_SKBMOD_PARMS, sizeof(opt), &opt))
skb               235 net/sched/act_skbmod.c 	    nla_put(skb, TCA_SKBMOD_DMAC, ETH_ALEN, p->eth_dst))
skb               238 net/sched/act_skbmod.c 	    nla_put(skb, TCA_SKBMOD_SMAC, ETH_ALEN, p->eth_src))
skb               241 net/sched/act_skbmod.c 	    nla_put_u16(skb, TCA_SKBMOD_ETYPE, ntohs(p->eth_type)))
skb               245 net/sched/act_skbmod.c 	if (nla_put_64bit(skb, TCA_SKBMOD_TM, sizeof(t), &t, TCA_SKBMOD_PAD))
skb               249 net/sched/act_skbmod.c 	return skb->len;
skb               252 net/sched/act_skbmod.c 	nlmsg_trim(skb, b);
skb               256 net/sched/act_skbmod.c static int tcf_skbmod_walker(struct net *net, struct sk_buff *skb,
skb               263 net/sched/act_skbmod.c 	return tcf_generic_walker(tn, skb, cb, type, ops, extack);
skb                24 net/sched/act_tunnel_key.c static int tunnel_key_act(struct sk_buff *skb, const struct tc_action *a,
skb                34 net/sched/act_tunnel_key.c 	bstats_cpu_update(this_cpu_ptr(t->common.cpu_bstats), skb);
skb                39 net/sched/act_tunnel_key.c 		skb_dst_drop(skb);
skb                42 net/sched/act_tunnel_key.c 		skb_dst_drop(skb);
skb                43 net/sched/act_tunnel_key.c 		skb_dst_set(skb, dst_clone(&params->tcft_enc_metadata->dst));
skb               425 net/sched/act_tunnel_key.c static int tunnel_key_geneve_opts_dump(struct sk_buff *skb,
skb               432 net/sched/act_tunnel_key.c 	start = nla_nest_start_noflag(skb, TCA_TUNNEL_KEY_ENC_OPTS_GENEVE);
skb               439 net/sched/act_tunnel_key.c 		if (nla_put_be16(skb, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS,
skb               441 net/sched/act_tunnel_key.c 		    nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE,
skb               443 net/sched/act_tunnel_key.c 		    nla_put(skb, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA,
skb               445 net/sched/act_tunnel_key.c 			nla_nest_cancel(skb, start);
skb               453 net/sched/act_tunnel_key.c 	nla_nest_end(skb, start);
skb               457 net/sched/act_tunnel_key.c static int tunnel_key_opts_dump(struct sk_buff *skb,
skb               466 net/sched/act_tunnel_key.c 	start = nla_nest_start_noflag(skb, TCA_TUNNEL_KEY_ENC_OPTS);
skb               471 net/sched/act_tunnel_key.c 		err = tunnel_key_geneve_opts_dump(skb, info);
skb               476 net/sched/act_tunnel_key.c 		nla_nest_cancel(skb, start);
skb               480 net/sched/act_tunnel_key.c 	nla_nest_end(skb, start);
skb               484 net/sched/act_tunnel_key.c static int tunnel_key_dump_addresses(struct sk_buff *skb,
skb               493 net/sched/act_tunnel_key.c 		if (!nla_put_in_addr(skb, TCA_TUNNEL_KEY_ENC_IPV4_SRC, saddr) &&
skb               494 net/sched/act_tunnel_key.c 		    !nla_put_in_addr(skb, TCA_TUNNEL_KEY_ENC_IPV4_DST, daddr))
skb               502 net/sched/act_tunnel_key.c 		if (!nla_put_in6_addr(skb,
skb               504 net/sched/act_tunnel_key.c 		    !nla_put_in6_addr(skb,
skb               512 net/sched/act_tunnel_key.c static int tunnel_key_dump(struct sk_buff *skb, struct tc_action *a,
skb               515 net/sched/act_tunnel_key.c 	unsigned char *b = skb_tail_pointer(skb);
skb               531 net/sched/act_tunnel_key.c 	if (nla_put(skb, TCA_TUNNEL_KEY_PARMS, sizeof(opt), &opt))
skb               541 net/sched/act_tunnel_key.c 		     nla_put_be32(skb, TCA_TUNNEL_KEY_ENC_KEY_ID, key_id)) ||
skb               542 net/sched/act_tunnel_key.c 		    tunnel_key_dump_addresses(skb,
skb               545 net/sched/act_tunnel_key.c 		      nla_put_be16(skb, TCA_TUNNEL_KEY_ENC_DST_PORT,
skb               547 net/sched/act_tunnel_key.c 		    nla_put_u8(skb, TCA_TUNNEL_KEY_NO_CSUM,
skb               549 net/sched/act_tunnel_key.c 		    tunnel_key_opts_dump(skb, info))
skb               552 net/sched/act_tunnel_key.c 		if (key->tos && nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_TOS, key->tos))
skb               555 net/sched/act_tunnel_key.c 		if (key->ttl && nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_TTL, key->ttl))
skb               560 net/sched/act_tunnel_key.c 	if (nla_put_64bit(skb, TCA_TUNNEL_KEY_TM, sizeof(tm),
skb               565 net/sched/act_tunnel_key.c 	return skb->len;
skb               569 net/sched/act_tunnel_key.c 	nlmsg_trim(skb, b);
skb               573 net/sched/act_tunnel_key.c static int tunnel_key_walker(struct net *net, struct sk_buff *skb,
skb               580 net/sched/act_tunnel_key.c 	return tcf_generic_walker(tn, skb, cb, type, ops, extack);
skb                22 net/sched/act_vlan.c static int tcf_vlan_act(struct sk_buff *skb, const struct tc_action *a,
skb                32 net/sched/act_vlan.c 	bstats_cpu_update(this_cpu_ptr(v->common.cpu_bstats), skb);
skb                37 net/sched/act_vlan.c 	if (skb_at_tc_ingress(skb))
skb                38 net/sched/act_vlan.c 		skb_push_rcsum(skb, skb->mac_len);
skb                46 net/sched/act_vlan.c 		err = skb_vlan_pop(skb);
skb                51 net/sched/act_vlan.c 		err = skb_vlan_push(skb, p->tcfv_push_proto, p->tcfv_push_vid |
skb                58 net/sched/act_vlan.c 		if (!skb_vlan_tagged(skb))
skb                61 net/sched/act_vlan.c 		if (skb_vlan_tag_present(skb)) {
skb                62 net/sched/act_vlan.c 			tci = skb_vlan_tag_get(skb);
skb                63 net/sched/act_vlan.c 			__vlan_hwaccel_clear_tag(skb);
skb                66 net/sched/act_vlan.c 			err = __skb_vlan_pop(skb, &tci);
skb                78 net/sched/act_vlan.c 		__vlan_hwaccel_put_tag(skb, p->tcfv_push_proto, tci);
skb                85 net/sched/act_vlan.c 	if (skb_at_tc_ingress(skb))
skb                86 net/sched/act_vlan.c 		skb_pull_rcsum(skb, skb->mac_len);
skb               252 net/sched/act_vlan.c static int tcf_vlan_dump(struct sk_buff *skb, struct tc_action *a,
skb               255 net/sched/act_vlan.c 	unsigned char *b = skb_tail_pointer(skb);
skb               269 net/sched/act_vlan.c 	if (nla_put(skb, TCA_VLAN_PARMS, sizeof(opt), &opt))
skb               274 net/sched/act_vlan.c 	    (nla_put_u16(skb, TCA_VLAN_PUSH_VLAN_ID, p->tcfv_push_vid) ||
skb               275 net/sched/act_vlan.c 	     nla_put_be16(skb, TCA_VLAN_PUSH_VLAN_PROTOCOL,
skb               277 net/sched/act_vlan.c 	     (nla_put_u8(skb, TCA_VLAN_PUSH_VLAN_PRIORITY,
skb               282 net/sched/act_vlan.c 	if (nla_put_64bit(skb, TCA_VLAN_TM, sizeof(t), &t, TCA_VLAN_PAD))
skb               286 net/sched/act_vlan.c 	return skb->len;
skb               290 net/sched/act_vlan.c 	nlmsg_trim(skb, b);
skb               294 net/sched/act_vlan.c static int tcf_vlan_walker(struct net *net, struct sk_buff *skb,
skb               301 net/sched/act_vlan.c 	return tcf_generic_walker(tn, skb, cb, type, ops, extack);
skb              1562 net/sched/cls_api.c int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
skb              1574 net/sched/cls_api.c 		__be16 protocol = tc_skb_protocol(skb);
skb              1581 net/sched/cls_api.c 		err = tp->classify(skb, tp, res);
skb              1593 net/sched/cls_api.c 				ext = skb_ext_add(skb, TC_SKB_EXT);
skb              1781 net/sched/cls_api.c static int tcf_fill_node(struct net *net, struct sk_buff *skb,
skb              1789 net/sched/cls_api.c 	unsigned char *b = skb_tail_pointer(skb);
skb              1791 net/sched/cls_api.c 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
skb              1806 net/sched/cls_api.c 	if (nla_put_string(skb, TCA_KIND, tp->ops->kind))
skb              1808 net/sched/cls_api.c 	if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index))
skb              1814 net/sched/cls_api.c 		    tp->ops->dump(net, tp, fh, skb, tcm, rtnl_held) < 0)
skb              1817 net/sched/cls_api.c 	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
skb              1818 net/sched/cls_api.c 	return skb->len;
skb              1822 net/sched/cls_api.c 	nlmsg_trim(skb, b);
skb              1832 net/sched/cls_api.c 	struct sk_buff *skb;
skb              1836 net/sched/cls_api.c 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
skb              1837 net/sched/cls_api.c 	if (!skb)
skb              1840 net/sched/cls_api.c 	if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
skb              1843 net/sched/cls_api.c 		kfree_skb(skb);
skb              1848 net/sched/cls_api.c 		err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
skb              1850 net/sched/cls_api.c 		err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
skb              1864 net/sched/cls_api.c 	struct sk_buff *skb;
skb              1868 net/sched/cls_api.c 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
skb              1869 net/sched/cls_api.c 	if (!skb)
skb              1872 net/sched/cls_api.c 	if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
skb              1876 net/sched/cls_api.c 		kfree_skb(skb);
skb              1882 net/sched/cls_api.c 		kfree_skb(skb);
skb              1887 net/sched/cls_api.c 		err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
skb              1889 net/sched/cls_api.c 		err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
skb              1919 net/sched/cls_api.c static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
skb              1922 net/sched/cls_api.c 	struct net *net = sock_net(skb->sk);
skb              1942 net/sched/cls_api.c 	if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
skb              2104 net/sched/cls_api.c 	err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh,
skb              2108 net/sched/cls_api.c 		tfilter_notify(net, skb, n, tp, block, q, parent, fh,
skb              2146 net/sched/cls_api.c static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
skb              2149 net/sched/cls_api.c 	struct net *net = sock_net(skb->sk);
skb              2167 net/sched/cls_api.c 	if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
skb              2239 net/sched/cls_api.c 		tfilter_notify_chain(net, skb, block, q, parent, n,
skb              2263 net/sched/cls_api.c 		tfilter_notify(net, skb, n, tp, block, q, parent, fh,
skb              2278 net/sched/cls_api.c 		err = tfilter_del_notify(net, skb, n, tp, block,
skb              2306 net/sched/cls_api.c static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
skb              2309 net/sched/cls_api.c 	struct net *net = sock_net(skb->sk);
skb              2407 net/sched/cls_api.c 		err = tfilter_notify(net, skb, n, tp, block, q, parent,
skb              2430 net/sched/cls_api.c 	struct sk_buff *skb;
skb              2440 net/sched/cls_api.c 	struct net *net = sock_net(a->skb->sk);
skb              2442 net/sched/cls_api.c 	return tcf_fill_node(net, a->skb, tp, a->block, a->q, a->parent,
skb              2443 net/sched/cls_api.c 			     n, NETLINK_CB(a->cb->skb).portid,
skb              2449 net/sched/cls_api.c 			   struct sk_buff *skb, struct netlink_callback *cb,
skb              2452 net/sched/cls_api.c 	struct net *net = sock_net(skb->sk);
skb              2476 net/sched/cls_api.c 			if (tcf_fill_node(net, skb, tp, block, q, parent, NULL,
skb              2477 net/sched/cls_api.c 					  NETLINK_CB(cb->skb).portid,
skb              2486 net/sched/cls_api.c 		arg.skb = skb;
skb              2509 net/sched/cls_api.c static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
skb              2512 net/sched/cls_api.c 	struct net *net = sock_net(skb->sk);
skb              2523 net/sched/cls_api.c 		return skb->len;
skb              2548 net/sched/cls_api.c 			return skb->len;
skb              2586 net/sched/cls_api.c 		if (!tcf_chain_dump(chain, q, parent, skb, cb,
skb              2600 net/sched/cls_api.c 	if (skb->len == 0 && err)
skb              2602 net/sched/cls_api.c 	return skb->len;
skb              2607 net/sched/cls_api.c 			      struct net *net, struct sk_buff *skb,
skb              2611 net/sched/cls_api.c 	unsigned char *b = skb_tail_pointer(skb);
skb              2620 net/sched/cls_api.c 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
skb              2636 net/sched/cls_api.c 	if (nla_put_u32(skb, TCA_CHAIN, chain_index))
skb              2640 net/sched/cls_api.c 		if (nla_put_string(skb, TCA_KIND, ops->kind))
skb              2642 net/sched/cls_api.c 		if (ops->tmplt_dump(skb, net, priv) < 0)
skb              2646 net/sched/cls_api.c 	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
skb              2647 net/sched/cls_api.c 	return skb->len;
skb              2651 net/sched/cls_api.c 	nlmsg_trim(skb, b);
skb              2661 net/sched/cls_api.c 	struct sk_buff *skb;
skb              2664 net/sched/cls_api.c 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
skb              2665 net/sched/cls_api.c 	if (!skb)
skb              2669 net/sched/cls_api.c 			       chain->index, net, skb, block, portid,
skb              2671 net/sched/cls_api.c 		kfree_skb(skb);
skb              2676 net/sched/cls_api.c 		err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
skb              2678 net/sched/cls_api.c 		err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
skb              2693 net/sched/cls_api.c 	struct sk_buff *skb;
skb              2695 net/sched/cls_api.c 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
skb              2696 net/sched/cls_api.c 	if (!skb)
skb              2699 net/sched/cls_api.c 	if (tc_chain_fill_node(tmplt_ops, tmplt_priv, chain_index, net, skb,
skb              2701 net/sched/cls_api.c 		kfree_skb(skb);
skb              2706 net/sched/cls_api.c 		return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
skb              2708 net/sched/cls_api.c 	return rtnetlink_send(skb, net, portid, RTNLGRP_TC, flags & NLM_F_ECHO);
skb              2759 net/sched/cls_api.c static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n,
skb              2762 net/sched/cls_api.c 	struct net *net = sock_net(skb->sk);
skb              2774 net/sched/cls_api.c 	    !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
skb              2858 net/sched/cls_api.c 		tfilter_notify_chain(net, skb, block, q, parent, n,
skb              2868 net/sched/cls_api.c 		err = tc_chain_notify(chain, skb, n->nlmsg_seq,
skb              2894 net/sched/cls_api.c static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb)
skb              2896 net/sched/cls_api.c 	struct net *net = sock_net(skb->sk);
skb              2908 net/sched/cls_api.c 		return skb->len;
skb              2933 net/sched/cls_api.c 			return skb->len;
skb              2976 net/sched/cls_api.c 					 chain->index, net, skb, block,
skb              2977 net/sched/cls_api.c 					 NETLINK_CB(cb->skb).portid,
skb              2992 net/sched/cls_api.c 	if (skb->len == 0 && err)
skb              2994 net/sched/cls_api.c 	return skb->len;
skb              3074 net/sched/cls_api.c int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts)
skb              3086 net/sched/cls_api.c 			nest = nla_nest_start_noflag(skb, exts->action);
skb              3090 net/sched/cls_api.c 			if (tcf_action_dump(skb, exts->actions, 0, 0) < 0)
skb              3092 net/sched/cls_api.c 			nla_nest_end(skb, nest);
skb              3095 net/sched/cls_api.c 			nest = nla_nest_start_noflag(skb, exts->police);
skb              3098 net/sched/cls_api.c 			if (tcf_action_dump_old(skb, act, 0, 0) < 0)
skb              3100 net/sched/cls_api.c 			nla_nest_end(skb, nest);
skb              3106 net/sched/cls_api.c 	nla_nest_cancel(skb, nest);
skb              3115 net/sched/cls_api.c int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts)
skb              3119 net/sched/cls_api.c 	if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0)
skb                39 net/sched/cls_basic.c static int basic_classify(struct sk_buff *skb, const struct tcf_proto *tp,
skb                48 net/sched/cls_basic.c 		if (!tcf_em_tree_match(skb, &f->ematches, NULL))
skb                52 net/sched/cls_basic.c 		r = tcf_exts_exec(skb, &f->exts, res);
skb               280 net/sched/cls_basic.c 		      struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
skb               288 net/sched/cls_basic.c 		return skb->len;
skb               292 net/sched/cls_basic.c 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
skb               297 net/sched/cls_basic.c 	    nla_put_u32(skb, TCA_BASIC_CLASSID, f->res.classid))
skb               307 net/sched/cls_basic.c 	if (nla_put_64bit(skb, TCA_BASIC_PCNT,
skb               312 net/sched/cls_basic.c 	if (tcf_exts_dump(skb, &f->exts) < 0 ||
skb               313 net/sched/cls_basic.c 	    tcf_em_tree_dump(skb, &f->ematches, TCA_BASIC_EMATCHES) < 0)
skb               316 net/sched/cls_basic.c 	nla_nest_end(skb, nest);
skb               318 net/sched/cls_basic.c 	if (tcf_exts_dump_stats(skb, &f->exts) < 0)
skb               321 net/sched/cls_basic.c 	return skb->len;
skb               324 net/sched/cls_basic.c 	nla_nest_cancel(skb, nest);
skb                80 net/sched/cls_bpf.c static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
skb                84 net/sched/cls_bpf.c 	bool at_ingress = skb_at_tc_ingress(skb);
skb                93 net/sched/cls_bpf.c 		qdisc_skb_cb(skb)->tc_classid = prog->res.classid;
skb                99 net/sched/cls_bpf.c 			__skb_push(skb, skb->mac_len);
skb               100 net/sched/cls_bpf.c 			bpf_compute_data_pointers(skb);
skb               101 net/sched/cls_bpf.c 			filter_res = BPF_PROG_RUN(prog->filter, skb);
skb               102 net/sched/cls_bpf.c 			__skb_pull(skb, skb->mac_len);
skb               104 net/sched/cls_bpf.c 			bpf_compute_data_pointers(skb);
skb               105 net/sched/cls_bpf.c 			filter_res = BPF_PROG_RUN(prog->filter, skb);
skb               111 net/sched/cls_bpf.c 				       qdisc_skb_cb(skb)->tc_classid;
skb               128 net/sched/cls_bpf.c 		ret = tcf_exts_exec(skb, &prog->exts, res);
skb               543 net/sched/cls_bpf.c 				 struct sk_buff *skb)
skb               547 net/sched/cls_bpf.c 	if (nla_put_u16(skb, TCA_BPF_OPS_LEN, prog->bpf_num_ops))
skb               550 net/sched/cls_bpf.c 	nla = nla_reserve(skb, TCA_BPF_OPS, prog->bpf_num_ops *
skb               561 net/sched/cls_bpf.c 				  struct sk_buff *skb)
skb               566 net/sched/cls_bpf.c 	    nla_put_string(skb, TCA_BPF_NAME, prog->bpf_name))
skb               569 net/sched/cls_bpf.c 	if (nla_put_u32(skb, TCA_BPF_ID, prog->filter->aux->id))
skb               572 net/sched/cls_bpf.c 	nla = nla_reserve(skb, TCA_BPF_TAG, sizeof(prog->filter->tag));
skb               582 net/sched/cls_bpf.c 			struct sk_buff *skb, struct tcmsg *tm, bool rtnl_held)
skb               590 net/sched/cls_bpf.c 		return skb->len;
skb               596 net/sched/cls_bpf.c 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
skb               601 net/sched/cls_bpf.c 	    nla_put_u32(skb, TCA_BPF_CLASSID, prog->res.classid))
skb               605 net/sched/cls_bpf.c 		ret = cls_bpf_dump_ebpf_info(prog, skb);
skb               607 net/sched/cls_bpf.c 		ret = cls_bpf_dump_bpf_info(prog, skb);
skb               611 net/sched/cls_bpf.c 	if (tcf_exts_dump(skb, &prog->exts) < 0)
skb               616 net/sched/cls_bpf.c 	if (bpf_flags && nla_put_u32(skb, TCA_BPF_FLAGS, bpf_flags))
skb               619 net/sched/cls_bpf.c 	    nla_put_u32(skb, TCA_BPF_FLAGS_GEN, prog->gen_flags))
skb               622 net/sched/cls_bpf.c 	nla_nest_end(skb, nest);
skb               624 net/sched/cls_bpf.c 	if (tcf_exts_dump_stats(skb, &prog->exts) < 0)
skb               627 net/sched/cls_bpf.c 	return skb->len;
skb               630 net/sched/cls_bpf.c 	nla_nest_cancel(skb, nest);
skb                25 net/sched/cls_cgroup.c static int cls_cgroup_classify(struct sk_buff *skb, const struct tcf_proto *tp,
skb                29 net/sched/cls_cgroup.c 	u32 classid = task_get_classid(skb);
skb                35 net/sched/cls_cgroup.c 	if (!tcf_em_tree_match(skb, &head->ematches, NULL))
skb                41 net/sched/cls_cgroup.c 	return tcf_exts_exec(skb, &head->exts, res);
skb               171 net/sched/cls_cgroup.c 			   struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
skb               178 net/sched/cls_cgroup.c 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
skb               182 net/sched/cls_cgroup.c 	if (tcf_exts_dump(skb, &head->exts) < 0 ||
skb               183 net/sched/cls_cgroup.c 	    tcf_em_tree_dump(skb, &head->ematches, TCA_CGROUP_EMATCHES) < 0)
skb               186 net/sched/cls_cgroup.c 	nla_nest_end(skb, nest);
skb               188 net/sched/cls_cgroup.c 	if (tcf_exts_dump_stats(skb, &head->exts) < 0)
skb               191 net/sched/cls_cgroup.c 	return skb->len;
skb               194 net/sched/cls_cgroup.c 	nla_nest_cancel(skb, nest);
skb                66 net/sched/cls_flow.c static u32 flow_get_src(const struct sk_buff *skb, const struct flow_keys *flow)
skb                73 net/sched/cls_flow.c 	return addr_fold(skb->sk);
skb                76 net/sched/cls_flow.c static u32 flow_get_dst(const struct sk_buff *skb, const struct flow_keys *flow)
skb                83 net/sched/cls_flow.c 	return addr_fold(skb_dst(skb)) ^ (__force u16) tc_skb_protocol(skb);
skb                86 net/sched/cls_flow.c static u32 flow_get_proto(const struct sk_buff *skb,
skb                92 net/sched/cls_flow.c static u32 flow_get_proto_src(const struct sk_buff *skb,
skb                98 net/sched/cls_flow.c 	return addr_fold(skb->sk);
skb               101 net/sched/cls_flow.c static u32 flow_get_proto_dst(const struct sk_buff *skb,
skb               107 net/sched/cls_flow.c 	return addr_fold(skb_dst(skb)) ^ (__force u16) tc_skb_protocol(skb);
skb               110 net/sched/cls_flow.c static u32 flow_get_iif(const struct sk_buff *skb)
skb               112 net/sched/cls_flow.c 	return skb->skb_iif;
skb               115 net/sched/cls_flow.c static u32 flow_get_priority(const struct sk_buff *skb)
skb               117 net/sched/cls_flow.c 	return skb->priority;
skb               120 net/sched/cls_flow.c static u32 flow_get_mark(const struct sk_buff *skb)
skb               122 net/sched/cls_flow.c 	return skb->mark;
skb               125 net/sched/cls_flow.c static u32 flow_get_nfct(const struct sk_buff *skb)
skb               128 net/sched/cls_flow.c 	return addr_fold(skb_nfct(skb));
skb               135 net/sched/cls_flow.c #define CTTUPLE(skb, member)						\
skb               138 net/sched/cls_flow.c 	const struct nf_conn *ct = nf_ct_get(skb, &ctinfo);		\
skb               144 net/sched/cls_flow.c #define CTTUPLE(skb, member)						\
skb               151 net/sched/cls_flow.c static u32 flow_get_nfct_src(const struct sk_buff *skb,
skb               154 net/sched/cls_flow.c 	switch (tc_skb_protocol(skb)) {
skb               156 net/sched/cls_flow.c 		return ntohl(CTTUPLE(skb, src.u3.ip));
skb               158 net/sched/cls_flow.c 		return ntohl(CTTUPLE(skb, src.u3.ip6[3]));
skb               161 net/sched/cls_flow.c 	return flow_get_src(skb, flow);
skb               164 net/sched/cls_flow.c static u32 flow_get_nfct_dst(const struct sk_buff *skb,
skb               167 net/sched/cls_flow.c 	switch (tc_skb_protocol(skb)) {
skb               169 net/sched/cls_flow.c 		return ntohl(CTTUPLE(skb, dst.u3.ip));
skb               171 net/sched/cls_flow.c 		return ntohl(CTTUPLE(skb, dst.u3.ip6[3]));
skb               174 net/sched/cls_flow.c 	return flow_get_dst(skb, flow);
skb               177 net/sched/cls_flow.c static u32 flow_get_nfct_proto_src(const struct sk_buff *skb,
skb               180 net/sched/cls_flow.c 	return ntohs(CTTUPLE(skb, src.u.all));
skb               182 net/sched/cls_flow.c 	return flow_get_proto_src(skb, flow);
skb               185 net/sched/cls_flow.c static u32 flow_get_nfct_proto_dst(const struct sk_buff *skb,
skb               188 net/sched/cls_flow.c 	return ntohs(CTTUPLE(skb, dst.u.all));
skb               190 net/sched/cls_flow.c 	return flow_get_proto_dst(skb, flow);
skb               193 net/sched/cls_flow.c static u32 flow_get_rtclassid(const struct sk_buff *skb)
skb               196 net/sched/cls_flow.c 	if (skb_dst(skb))
skb               197 net/sched/cls_flow.c 		return skb_dst(skb)->tclassid;
skb               202 net/sched/cls_flow.c static u32 flow_get_skuid(const struct sk_buff *skb)
skb               204 net/sched/cls_flow.c 	struct sock *sk = skb_to_full_sk(skb);
skb               214 net/sched/cls_flow.c static u32 flow_get_skgid(const struct sk_buff *skb)
skb               216 net/sched/cls_flow.c 	struct sock *sk = skb_to_full_sk(skb);
skb               226 net/sched/cls_flow.c static u32 flow_get_vlan_tag(const struct sk_buff *skb)
skb               230 net/sched/cls_flow.c 	if (vlan_get_tag(skb, &tag) < 0)
skb               235 net/sched/cls_flow.c static u32 flow_get_rxhash(struct sk_buff *skb)
skb               237 net/sched/cls_flow.c 	return skb_get_hash(skb);
skb               240 net/sched/cls_flow.c static u32 flow_key_get(struct sk_buff *skb, int key, struct flow_keys *flow)
skb               244 net/sched/cls_flow.c 		return flow_get_src(skb, flow);
skb               246 net/sched/cls_flow.c 		return flow_get_dst(skb, flow);
skb               248 net/sched/cls_flow.c 		return flow_get_proto(skb, flow);
skb               250 net/sched/cls_flow.c 		return flow_get_proto_src(skb, flow);
skb               252 net/sched/cls_flow.c 		return flow_get_proto_dst(skb, flow);
skb               254 net/sched/cls_flow.c 		return flow_get_iif(skb);
skb               256 net/sched/cls_flow.c 		return flow_get_priority(skb);
skb               258 net/sched/cls_flow.c 		return flow_get_mark(skb);
skb               260 net/sched/cls_flow.c 		return flow_get_nfct(skb);
skb               262 net/sched/cls_flow.c 		return flow_get_nfct_src(skb, flow);
skb               264 net/sched/cls_flow.c 		return flow_get_nfct_dst(skb, flow);
skb               266 net/sched/cls_flow.c 		return flow_get_nfct_proto_src(skb, flow);
skb               268 net/sched/cls_flow.c 		return flow_get_nfct_proto_dst(skb, flow);
skb               270 net/sched/cls_flow.c 		return flow_get_rtclassid(skb);
skb               272 net/sched/cls_flow.c 		return flow_get_skuid(skb);
skb               274 net/sched/cls_flow.c 		return flow_get_skgid(skb);
skb               276 net/sched/cls_flow.c 		return flow_get_vlan_tag(skb);
skb               278 net/sched/cls_flow.c 		return flow_get_rxhash(skb);
skb               295 net/sched/cls_flow.c static int flow_classify(struct sk_buff *skb, const struct tcf_proto *tp,
skb               309 net/sched/cls_flow.c 		if (!tcf_em_tree_match(skb, &f->ematches, NULL))
skb               314 net/sched/cls_flow.c 			skb_flow_dissect_flow_keys(skb, &flow_keys, 0);
skb               319 net/sched/cls_flow.c 			keys[n] = flow_key_get(skb, key, &flow_keys);
skb               336 net/sched/cls_flow.c 		r = tcf_exts_exec(skb, &f->exts, res);
skb               619 net/sched/cls_flow.c 		     struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
skb               625 net/sched/cls_flow.c 		return skb->len;
skb               629 net/sched/cls_flow.c 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
skb               633 net/sched/cls_flow.c 	if (nla_put_u32(skb, TCA_FLOW_KEYS, f->keymask) ||
skb               634 net/sched/cls_flow.c 	    nla_put_u32(skb, TCA_FLOW_MODE, f->mode))
skb               638 net/sched/cls_flow.c 		if (nla_put_u32(skb, TCA_FLOW_MASK, f->mask) ||
skb               639 net/sched/cls_flow.c 		    nla_put_u32(skb, TCA_FLOW_XOR, f->xor))
skb               643 net/sched/cls_flow.c 	    nla_put_u32(skb, TCA_FLOW_RSHIFT, f->rshift))
skb               646 net/sched/cls_flow.c 	    nla_put_u32(skb, TCA_FLOW_ADDEND, f->addend))
skb               650 net/sched/cls_flow.c 	    nla_put_u32(skb, TCA_FLOW_DIVISOR, f->divisor))
skb               653 net/sched/cls_flow.c 	    nla_put_u32(skb, TCA_FLOW_BASECLASS, f->baseclass))
skb               657 net/sched/cls_flow.c 	    nla_put_u32(skb, TCA_FLOW_PERTURB, f->perturb_period / HZ))
skb               660 net/sched/cls_flow.c 	if (tcf_exts_dump(skb, &f->exts) < 0)
skb               664 net/sched/cls_flow.c 	    tcf_em_tree_dump(skb, &f->ematches, TCA_FLOW_EMATCHES) < 0)
skb               667 net/sched/cls_flow.c 	nla_nest_end(skb, nest);
skb               669 net/sched/cls_flow.c 	if (tcf_exts_dump_stats(skb, &f->exts) < 0)
skb               672 net/sched/cls_flow.c 	return skb->len;
skb               675 net/sched/cls_flow.c 	nla_nest_cancel(skb, nest);
skb               296 net/sched/cls_flower.c static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
skb               309 net/sched/cls_flower.c 		skb_flow_dissect_meta(skb, &mask->dissector, &skb_key);
skb               313 net/sched/cls_flower.c 		skb_key.basic.n_proto = skb->protocol;
skb               314 net/sched/cls_flower.c 		skb_flow_dissect_tunnel_info(skb, &mask->dissector, &skb_key);
skb               315 net/sched/cls_flower.c 		skb_flow_dissect_ct(skb, &mask->dissector, &skb_key,
skb               318 net/sched/cls_flower.c 		skb_flow_dissect(skb, &mask->dissector, &skb_key, 0);
skb               325 net/sched/cls_flower.c 			return tcf_exts_exec(skb, &f->exts, res);
skb              1963 net/sched/cls_flower.c static int fl_dump_key_val(struct sk_buff *skb,
skb              1971 net/sched/cls_flower.c 	err = nla_put(skb, val_type, len, val);
skb              1975 net/sched/cls_flower.c 		err = nla_put(skb, mask_type, len, mask);
skb              1982 net/sched/cls_flower.c static int fl_dump_key_port_range(struct sk_buff *skb, struct fl_flow_key *key,
skb              1985 net/sched/cls_flower.c 	if (fl_dump_key_val(skb, &key->tp_range.tp_min.dst,
skb              1989 net/sched/cls_flower.c 	    fl_dump_key_val(skb, &key->tp_range.tp_max.dst,
skb              1993 net/sched/cls_flower.c 	    fl_dump_key_val(skb, &key->tp_range.tp_min.src,
skb              1997 net/sched/cls_flower.c 	    fl_dump_key_val(skb, &key->tp_range.tp_max.src,
skb              2006 net/sched/cls_flower.c static int fl_dump_key_mpls(struct sk_buff *skb,
skb              2015 net/sched/cls_flower.c 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TTL,
skb              2021 net/sched/cls_flower.c 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TC,
skb              2027 net/sched/cls_flower.c 		err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_LABEL,
skb              2033 net/sched/cls_flower.c 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_BOS,
skb              2041 net/sched/cls_flower.c static int fl_dump_key_ip(struct sk_buff *skb, bool encap,
skb              2050 net/sched/cls_flower.c 	if (fl_dump_key_val(skb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos)) ||
skb              2051 net/sched/cls_flower.c 	    fl_dump_key_val(skb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl)))
skb              2057 net/sched/cls_flower.c static int fl_dump_key_vlan(struct sk_buff *skb,
skb              2067 net/sched/cls_flower.c 		err = nla_put_u16(skb, vlan_id_key,
skb              2073 net/sched/cls_flower.c 		err = nla_put_u8(skb, vlan_prio_key,
skb              2092 net/sched/cls_flower.c static int fl_dump_key_flags(struct sk_buff *skb, u32 flags_key, u32 flags_mask)
skb              2113 net/sched/cls_flower.c 	err = nla_put(skb, TCA_FLOWER_KEY_FLAGS, 4, &_key);
skb              2117 net/sched/cls_flower.c 	return nla_put(skb, TCA_FLOWER_KEY_FLAGS_MASK, 4, &_mask);
skb              2120 net/sched/cls_flower.c static int fl_dump_key_geneve_opt(struct sk_buff *skb,
skb              2127 net/sched/cls_flower.c 	nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_GENEVE);
skb              2134 net/sched/cls_flower.c 		if (nla_put_be16(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS,
skb              2137 net/sched/cls_flower.c 		if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE,
skb              2140 net/sched/cls_flower.c 		if (nla_put(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA,
skb              2146 net/sched/cls_flower.c 	nla_nest_end(skb, nest);
skb              2150 net/sched/cls_flower.c 	nla_nest_cancel(skb, nest);
skb              2154 net/sched/cls_flower.c static int fl_dump_key_ct(struct sk_buff *skb,
skb              2159 net/sched/cls_flower.c 	    fl_dump_key_val(skb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE,
skb              2165 net/sched/cls_flower.c 	    fl_dump_key_val(skb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE,
skb              2171 net/sched/cls_flower.c 	    fl_dump_key_val(skb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK,
skb              2177 net/sched/cls_flower.c 	    fl_dump_key_val(skb, &key->ct_labels, TCA_FLOWER_KEY_CT_LABELS,
skb              2188 net/sched/cls_flower.c static int fl_dump_key_options(struct sk_buff *skb, int enc_opt_type,
skb              2197 net/sched/cls_flower.c 	nest = nla_nest_start_noflag(skb, enc_opt_type);
skb              2203 net/sched/cls_flower.c 		err = fl_dump_key_geneve_opt(skb, enc_opts);
skb              2210 net/sched/cls_flower.c 	nla_nest_end(skb, nest);
skb              2214 net/sched/cls_flower.c 	nla_nest_cancel(skb, nest);
skb              2218 net/sched/cls_flower.c static int fl_dump_key_enc_opt(struct sk_buff *skb,
skb              2224 net/sched/cls_flower.c 	err = fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS, key_opts);
skb              2228 net/sched/cls_flower.c 	return fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS_MASK, msk_opts);
skb              2231 net/sched/cls_flower.c static int fl_dump_key(struct sk_buff *skb, struct net *net,
skb              2238 net/sched/cls_flower.c 		if (dev && nla_put_string(skb, TCA_FLOWER_INDEV, dev->name))
skb              2242 net/sched/cls_flower.c 	if (fl_dump_key_val(skb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
skb              2245 net/sched/cls_flower.c 	    fl_dump_key_val(skb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
skb              2248 net/sched/cls_flower.c 	    fl_dump_key_val(skb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
skb              2253 net/sched/cls_flower.c 	if (fl_dump_key_mpls(skb, &key->mpls, &mask->mpls))
skb              2256 net/sched/cls_flower.c 	if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_VLAN_ID,
skb              2260 net/sched/cls_flower.c 	if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_CVLAN_ID,
skb              2264 net/sched/cls_flower.c 	     nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
skb              2270 net/sched/cls_flower.c 			if (nla_put_be16(skb, TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
skb              2274 net/sched/cls_flower.c 			if (nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
skb              2282 net/sched/cls_flower.c 	    (fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
skb              2285 net/sched/cls_flower.c 	    fl_dump_key_ip(skb, false, &key->ip, &mask->ip)))
skb              2289 net/sched/cls_flower.c 	    (fl_dump_key_val(skb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
skb              2292 net/sched/cls_flower.c 	     fl_dump_key_val(skb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
skb              2297 net/sched/cls_flower.c 		 (fl_dump_key_val(skb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
skb              2300 net/sched/cls_flower.c 		  fl_dump_key_val(skb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
skb              2306 net/sched/cls_flower.c 	    (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
skb              2309 net/sched/cls_flower.c 	     fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
skb              2312 net/sched/cls_flower.c 	     fl_dump_key_val(skb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
skb              2317 net/sched/cls_flower.c 		 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
skb              2320 net/sched/cls_flower.c 		  fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
skb              2325 net/sched/cls_flower.c 		 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
skb              2328 net/sched/cls_flower.c 		  fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
skb              2334 net/sched/cls_flower.c 		 (fl_dump_key_val(skb, &key->icmp.type,
skb              2338 net/sched/cls_flower.c 		  fl_dump_key_val(skb, &key->icmp.code,
skb              2345 net/sched/cls_flower.c 		 (fl_dump_key_val(skb, &key->icmp.type,
skb              2349 net/sched/cls_flower.c 		  fl_dump_key_val(skb, &key->icmp.code,
skb              2356 net/sched/cls_flower.c 		 (fl_dump_key_val(skb, &key->arp.sip,
skb              2360 net/sched/cls_flower.c 		  fl_dump_key_val(skb, &key->arp.tip,
skb              2364 net/sched/cls_flower.c 		  fl_dump_key_val(skb, &key->arp.op,
skb              2368 net/sched/cls_flower.c 		  fl_dump_key_val(skb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
skb              2371 net/sched/cls_flower.c 		  fl_dump_key_val(skb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
skb              2379 net/sched/cls_flower.c 	     fl_dump_key_port_range(skb, key, mask))
skb              2383 net/sched/cls_flower.c 	    (fl_dump_key_val(skb, &key->enc_ipv4.src,
skb              2387 net/sched/cls_flower.c 	     fl_dump_key_val(skb, &key->enc_ipv4.dst,
skb              2393 net/sched/cls_flower.c 		 (fl_dump_key_val(skb, &key->enc_ipv6.src,
skb              2397 net/sched/cls_flower.c 		 fl_dump_key_val(skb, &key->enc_ipv6.dst,
skb              2404 net/sched/cls_flower.c 	if (fl_dump_key_val(skb, &key->enc_key_id, TCA_FLOWER_KEY_ENC_KEY_ID,
skb              2407 net/sched/cls_flower.c 	    fl_dump_key_val(skb, &key->enc_tp.src,
skb              2412 net/sched/cls_flower.c 	    fl_dump_key_val(skb, &key->enc_tp.dst,
skb              2417 net/sched/cls_flower.c 	    fl_dump_key_ip(skb, true, &key->enc_ip, &mask->enc_ip) ||
skb              2418 net/sched/cls_flower.c 	    fl_dump_key_enc_opt(skb, &key->enc_opts, &mask->enc_opts))
skb              2421 net/sched/cls_flower.c 	if (fl_dump_key_ct(skb, &key->ct, &mask->ct))
skb              2424 net/sched/cls_flower.c 	if (fl_dump_key_flags(skb, key->control.flags, mask->control.flags))
skb              2434 net/sched/cls_flower.c 		   struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
skb              2442 net/sched/cls_flower.c 		return skb->len;
skb              2446 net/sched/cls_flower.c 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
skb              2453 net/sched/cls_flower.c 	    nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid))
skb              2460 net/sched/cls_flower.c 	if (fl_dump_key(skb, net, key, mask))
skb              2463 net/sched/cls_flower.c 	if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags))
skb              2471 net/sched/cls_flower.c 	if (nla_put_u32(skb, TCA_FLOWER_IN_HW_COUNT, f->in_hw_count))
skb              2474 net/sched/cls_flower.c 	if (tcf_exts_dump(skb, &f->exts))
skb              2477 net/sched/cls_flower.c 	nla_nest_end(skb, nest);
skb              2479 net/sched/cls_flower.c 	if (tcf_exts_dump_stats(skb, &f->exts) < 0)
skb              2482 net/sched/cls_flower.c 	return skb->len;
skb              2487 net/sched/cls_flower.c 	nla_nest_cancel(skb, nest);
skb              2491 net/sched/cls_flower.c static int fl_tmplt_dump(struct sk_buff *skb, struct net *net, void *tmplt_priv)
skb              2497 net/sched/cls_flower.c 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
skb              2504 net/sched/cls_flower.c 	if (fl_dump_key(skb, net, key, mask))
skb              2507 net/sched/cls_flower.c 	nla_nest_end(skb, nest);
skb              2509 net/sched/cls_flower.c 	return skb->len;
skb              2512 net/sched/cls_flower.c 	nla_nest_cancel(skb, nest);
skb                50 net/sched/cls_fw.c static int fw_classify(struct sk_buff *skb, const struct tcf_proto *tp,
skb                56 net/sched/cls_fw.c 	u32 id = skb->mark;
skb                65 net/sched/cls_fw.c 				if (!tcf_match_indev(skb, f->ifindex))
skb                67 net/sched/cls_fw.c 				r = tcf_exts_exec(skb, &f->exts, res);
skb               376 net/sched/cls_fw.c 		   struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
skb               383 net/sched/cls_fw.c 		return skb->len;
skb               388 net/sched/cls_fw.c 		return skb->len;
skb               390 net/sched/cls_fw.c 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
skb               395 net/sched/cls_fw.c 	    nla_put_u32(skb, TCA_FW_CLASSID, f->res.classid))
skb               400 net/sched/cls_fw.c 		if (dev && nla_put_string(skb, TCA_FW_INDEV, dev->name))
skb               404 net/sched/cls_fw.c 	    nla_put_u32(skb, TCA_FW_MASK, head->mask))
skb               407 net/sched/cls_fw.c 	if (tcf_exts_dump(skb, &f->exts) < 0)
skb               410 net/sched/cls_fw.c 	nla_nest_end(skb, nest);
skb               412 net/sched/cls_fw.c 	if (tcf_exts_dump_stats(skb, &f->exts) < 0)
skb               415 net/sched/cls_fw.c 	return skb->len;
skb               418 net/sched/cls_fw.c 	nla_nest_cancel(skb, nest);
skb                27 net/sched/cls_matchall.c static int mall_classify(struct sk_buff *skb, const struct tcf_proto *tp,
skb                40 net/sched/cls_matchall.c 	return tcf_exts_exec(skb, &head->exts, res);
skb               345 net/sched/cls_matchall.c 		     struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
skb               353 net/sched/cls_matchall.c 		return skb->len;
skb               360 net/sched/cls_matchall.c 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
skb               365 net/sched/cls_matchall.c 	    nla_put_u32(skb, TCA_MATCHALL_CLASSID, head->res.classid))
skb               368 net/sched/cls_matchall.c 	if (head->flags && nla_put_u32(skb, TCA_MATCHALL_FLAGS, head->flags))
skb               377 net/sched/cls_matchall.c 	if (nla_put_64bit(skb, TCA_MATCHALL_PCNT,
skb               382 net/sched/cls_matchall.c 	if (tcf_exts_dump(skb, &head->exts))
skb               385 net/sched/cls_matchall.c 	nla_nest_end(skb, nest);
skb               387 net/sched/cls_matchall.c 	if (tcf_exts_dump_stats(skb, &head->exts) < 0)
skb               390 net/sched/cls_matchall.c 	return skb->len;
skb               393 net/sched/cls_matchall.c 	nla_nest_cancel(skb, nest);
skb               113 net/sched/cls_route.c 		int r = tcf_exts_exec(skb, &f->exts, res);	\
skb               124 net/sched/cls_route.c static int route4_classify(struct sk_buff *skb, const struct tcf_proto *tp,
skb               134 net/sched/cls_route.c 	dst = skb_dst(skb);
skb               140 net/sched/cls_route.c 	iif = inet_iif(skb);
skb               596 net/sched/cls_route.c 		       struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
skb               603 net/sched/cls_route.c 		return skb->len;
skb               607 net/sched/cls_route.c 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
skb               613 net/sched/cls_route.c 		if (nla_put_u32(skb, TCA_ROUTE4_TO, id))
skb               618 net/sched/cls_route.c 		    nla_put_u32(skb, TCA_ROUTE4_IIF, f->iif))
skb               622 net/sched/cls_route.c 		if (nla_put_u32(skb, TCA_ROUTE4_FROM, id))
skb               626 net/sched/cls_route.c 	    nla_put_u32(skb, TCA_ROUTE4_CLASSID, f->res.classid))
skb               629 net/sched/cls_route.c 	if (tcf_exts_dump(skb, &f->exts) < 0)
skb               632 net/sched/cls_route.c 	nla_nest_end(skb, nest);
skb               634 net/sched/cls_route.c 	if (tcf_exts_dump_stats(skb, &f->exts) < 0)
skb               637 net/sched/cls_route.c 	return skb->len;
skb               640 net/sched/cls_route.c 	nla_nest_cancel(skb, nest);
skb               120 net/sched/cls_rsvp.h 	int r = tcf_exts_exec(skb, &f->exts, res);	\
skb               127 net/sched/cls_rsvp.h static int rsvp_classify(struct sk_buff *skb, const struct tcf_proto *tp,
skb               141 net/sched/cls_rsvp.h 	if (!pskb_network_may_pull(skb, sizeof(*nhptr)))
skb               143 net/sched/cls_rsvp.h 	nhptr = ipv6_hdr(skb);
skb               147 net/sched/cls_rsvp.h 	if (!pskb_network_may_pull(skb, sizeof(*nhptr)))
skb               149 net/sched/cls_rsvp.h 	nhptr = ip_hdr(skb);
skb               691 net/sched/cls_rsvp.h 		     struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
skb               699 net/sched/cls_rsvp.h 		return skb->len;
skb               704 net/sched/cls_rsvp.h 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
skb               708 net/sched/cls_rsvp.h 	if (nla_put(skb, TCA_RSVP_DST, sizeof(s->dst), &s->dst))
skb               716 net/sched/cls_rsvp.h 	if (nla_put(skb, TCA_RSVP_PINFO, sizeof(pinfo), &pinfo))
skb               719 net/sched/cls_rsvp.h 	    nla_put_u32(skb, TCA_RSVP_CLASSID, f->res.classid))
skb               722 net/sched/cls_rsvp.h 	    nla_put(skb, TCA_RSVP_SRC, sizeof(f->src), f->src))
skb               725 net/sched/cls_rsvp.h 	if (tcf_exts_dump(skb, &f->exts) < 0)
skb               728 net/sched/cls_rsvp.h 	nla_nest_end(skb, nest);
skb               730 net/sched/cls_rsvp.h 	if (tcf_exts_dump_stats(skb, &f->exts) < 0)
skb               732 net/sched/cls_rsvp.h 	return skb->len;
skb               735 net/sched/cls_rsvp.h 	nla_nest_cancel(skb, nest);
skb               102 net/sched/cls_tcindex.c static int tcindex_classify(struct sk_buff *skb, const struct tcf_proto *tp,
skb               107 net/sched/cls_tcindex.c 	int key = (skb->tc_index & p->mask) >> p->shift;
skb               110 net/sched/cls_tcindex.c 		 skb, tp, res, p);
skb               126 net/sched/cls_tcindex.c 	return tcf_exts_exec(skb, &f->exts, res);
skb               631 net/sched/cls_tcindex.c 			struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
skb               638 net/sched/cls_tcindex.c 		 tp, fh, skb, t, p, r);
skb               641 net/sched/cls_tcindex.c 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
skb               647 net/sched/cls_tcindex.c 		if (nla_put_u32(skb, TCA_TCINDEX_HASH, p->hash) ||
skb               648 net/sched/cls_tcindex.c 		    nla_put_u16(skb, TCA_TCINDEX_MASK, p->mask) ||
skb               649 net/sched/cls_tcindex.c 		    nla_put_u32(skb, TCA_TCINDEX_SHIFT, p->shift) ||
skb               650 net/sched/cls_tcindex.c 		    nla_put_u32(skb, TCA_TCINDEX_FALL_THROUGH, p->fall_through))
skb               652 net/sched/cls_tcindex.c 		nla_nest_end(skb, nest);
skb               674 net/sched/cls_tcindex.c 		    nla_put_u32(skb, TCA_TCINDEX_CLASSID, r->res.classid))
skb               677 net/sched/cls_tcindex.c 		if (tcf_exts_dump(skb, &r->exts) < 0)
skb               679 net/sched/cls_tcindex.c 		nla_nest_end(skb, nest);
skb               681 net/sched/cls_tcindex.c 		if (tcf_exts_dump_stats(skb, &r->exts) < 0)
skb               685 net/sched/cls_tcindex.c 	return skb->len;
skb               688 net/sched/cls_tcindex.c 	nla_nest_cancel(skb, nest);
skb               103 net/sched/cls_u32.c static int u32_classify(struct sk_buff *skb, const struct tcf_proto *tp,
skb               112 net/sched/cls_u32.c 	unsigned int off = skb_network_offset(skb);
skb               140 net/sched/cls_u32.c 		if ((skb->mark & n->mask) != n->val) {
skb               152 net/sched/cls_u32.c 			if (skb_headroom(skb) + toff > INT_MAX)
skb               155 net/sched/cls_u32.c 			data = skb_header_pointer(skb, toff, 4, &hdata);
skb               174 net/sched/cls_u32.c 				if (!tcf_match_indev(skb, n->ifindex)) {
skb               181 net/sched/cls_u32.c 				r = tcf_exts_exec(skb, &n->exts, res);
skb               205 net/sched/cls_u32.c 			data = skb_header_pointer(skb, off + n->sel.hoff, 4,
skb               220 net/sched/cls_u32.c 				data = skb_header_pointer(skb,
skb               235 net/sched/cls_u32.c 		if (off < skb->len)
skb              1272 net/sched/cls_u32.c 		    struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
skb              1279 net/sched/cls_u32.c 		return skb->len;
skb              1283 net/sched/cls_u32.c 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
skb              1291 net/sched/cls_u32.c 		if (nla_put_u32(skb, TCA_U32_DIVISOR, divisor))
skb              1299 net/sched/cls_u32.c 		if (nla_put(skb, TCA_U32_SEL,
skb              1307 net/sched/cls_u32.c 			if (nla_put_u32(skb, TCA_U32_HASH, htid))
skb              1311 net/sched/cls_u32.c 		    nla_put_u32(skb, TCA_U32_CLASSID, n->res.classid))
skb              1316 net/sched/cls_u32.c 		    nla_put_u32(skb, TCA_U32_LINK, ht_down->handle))
skb              1319 net/sched/cls_u32.c 		if (n->flags && nla_put_u32(skb, TCA_U32_FLAGS, n->flags))
skb              1335 net/sched/cls_u32.c 			if (nla_put(skb, TCA_U32_MARK, sizeof(mark), &mark))
skb              1340 net/sched/cls_u32.c 		if (tcf_exts_dump(skb, &n->exts) < 0)
skb              1346 net/sched/cls_u32.c 			if (dev && nla_put_string(skb, TCA_U32_INDEV, dev->name))
skb              1366 net/sched/cls_u32.c 		if (nla_put_64bit(skb, TCA_U32_PCNT,
skb              1377 net/sched/cls_u32.c 	nla_nest_end(skb, nest);
skb              1380 net/sched/cls_u32.c 		if (tcf_exts_dump_stats(skb, &n->exts) < 0)
skb              1382 net/sched/cls_u32.c 	return skb->len;
skb              1385 net/sched/cls_u32.c 	nla_nest_cancel(skb, nest);
skb                44 net/sched/em_canid.c static canid_t em_canid_get_id(struct sk_buff *skb)
skb                47 net/sched/em_canid.c 	struct can_frame *cf = (struct can_frame *)skb->data;
skb                92 net/sched/em_canid.c static int em_canid_match(struct sk_buff *skb, struct tcf_ematch *m,
skb               101 net/sched/em_canid.c 	can_id = em_canid_get_id(skb);
skb               189 net/sched/em_canid.c static int em_canid_dump(struct sk_buff *skb, struct tcf_ematch *m)
skb               197 net/sched/em_canid.c 	if (nla_put_nohdr(skb, sizeof(struct can_filter) * cm->rules_count,
skb                21 net/sched/em_cmp.c static int em_cmp_match(struct sk_buff *skb, struct tcf_ematch *em,
skb                25 net/sched/em_cmp.c 	unsigned char *ptr = tcf_get_base_ptr(skb, cmp->layer) + cmp->off;
skb                28 net/sched/em_cmp.c 	if (!tcf_valid_offset(skb, ptr, cmp->align))
skb                50 net/sched/em_ipset.c static int em_ipset_match(struct sk_buff *skb, struct tcf_ematch *em,
skb                62 net/sched/em_ipset.c 	switch (tc_skb_protocol(skb)) {
skb                65 net/sched/em_ipset.c 		if (!pskb_network_may_pull(skb, sizeof(struct iphdr)))
skb                67 net/sched/em_ipset.c 		acpar.thoff = ip_hdrlen(skb);
skb                71 net/sched/em_ipset.c 		if (!pskb_network_may_pull(skb, sizeof(struct ipv6hdr)))
skb                86 net/sched/em_ipset.c 	network_offset = skb_network_offset(skb);
skb                87 net/sched/em_ipset.c 	skb_pull(skb, network_offset);
skb                89 net/sched/em_ipset.c 	dev = skb->dev;
skb                93 net/sched/em_ipset.c 	if (skb->skb_iif)
skb                94 net/sched/em_ipset.c 		indev = dev_get_by_index_rcu(em->net, skb->skb_iif);
skb               100 net/sched/em_ipset.c 	ret = ip_set_test(set->index, skb, &acpar, &opt);
skb               104 net/sched/em_ipset.c 	skb_push(skb, network_offset);
skb               205 net/sched/em_ipt.c static int em_ipt_match(struct sk_buff *skb, struct tcf_ematch *em,
skb               215 net/sched/em_ipt.c 	switch (tc_skb_protocol(skb)) {
skb               217 net/sched/em_ipt.c 		if (!pskb_network_may_pull(skb, sizeof(struct iphdr)))
skb               223 net/sched/em_ipt.c 		if (!pskb_network_may_pull(skb, sizeof(struct ipv6hdr)))
skb               234 net/sched/em_ipt.c 	if (skb->skb_iif)
skb               235 net/sched/em_ipt.c 		indev = dev_get_by_index_rcu(em->net, skb->skb_iif);
skb               238 net/sched/em_ipt.c 			   indev ?: skb->dev, skb->dev, NULL, em->net, NULL);
skb               244 net/sched/em_ipt.c 	ret = im->match->match(skb, &acpar);
skb               250 net/sched/em_ipt.c static int em_ipt_dump(struct sk_buff *skb, struct tcf_ematch *em)
skb               254 net/sched/em_ipt.c 	if (nla_put_string(skb, TCA_EM_IPT_MATCH_NAME, im->match->name) < 0)
skb               256 net/sched/em_ipt.c 	if (nla_put_u32(skb, TCA_EM_IPT_HOOK, im->hook) < 0)
skb               258 net/sched/em_ipt.c 	if (nla_put_u8(skb, TCA_EM_IPT_MATCH_REVISION, im->match->revision) < 0)
skb               260 net/sched/em_ipt.c 	if (nla_put_u8(skb, TCA_EM_IPT_NFPROTO, im->nfproto) < 0)
skb               262 net/sched/em_ipt.c 	if (nla_put(skb, TCA_EM_IPT_MATCH_DATA,
skb                99 net/sched/em_meta.c #define META_COLLECTOR(FUNC) static void meta_##FUNC(struct sk_buff *skb, \
skb               160 net/sched/em_meta.c 	*err = int_dev(skb->dev, dst);
skb               165 net/sched/em_meta.c 	*err = var_dev(skb->dev, dst);
skb               176 net/sched/em_meta.c 	if (skb_vlan_tag_present(skb))
skb               177 net/sched/em_meta.c 		dst->value = skb_vlan_tag_get(skb);
skb               178 net/sched/em_meta.c 	else if (!__vlan_get_tag(skb, &tag))
skb               192 net/sched/em_meta.c 	dst->value = skb->priority;
skb               198 net/sched/em_meta.c 	dst->value = tc_skb_protocol(skb);
skb               203 net/sched/em_meta.c 	dst->value = skb->pkt_type;
skb               208 net/sched/em_meta.c 	dst->value = skb->len;
skb               213 net/sched/em_meta.c 	dst->value = skb->data_len;
skb               218 net/sched/em_meta.c 	dst->value = skb->mac_len;
skb               223 net/sched/em_meta.c 	dst->value = skb_get_hash(skb);
skb               232 net/sched/em_meta.c 	dst->value = skb->mark;
skb               241 net/sched/em_meta.c 	dst->value = skb->tc_index;
skb               250 net/sched/em_meta.c 	if (unlikely(skb_dst(skb) == NULL))
skb               254 net/sched/em_meta.c 		dst->value = skb_dst(skb)->tclassid;
skb               262 net/sched/em_meta.c 	if (unlikely(skb_rtable(skb) == NULL))
skb               265 net/sched/em_meta.c 		dst->value = inet_iif(skb);
skb               272 net/sched/em_meta.c #define skip_nonlocal(skb) \
skb               273 net/sched/em_meta.c 	(unlikely(skb->sk == NULL))
skb               277 net/sched/em_meta.c 	if (skip_nonlocal(skb)) {
skb               281 net/sched/em_meta.c 	dst->value = skb->sk->sk_family;
skb               286 net/sched/em_meta.c 	if (skip_nonlocal(skb)) {
skb               290 net/sched/em_meta.c 	dst->value = skb->sk->sk_state;
skb               295 net/sched/em_meta.c 	if (skip_nonlocal(skb)) {
skb               299 net/sched/em_meta.c 	dst->value = skb->sk->sk_reuse;
skb               304 net/sched/em_meta.c 	if (skip_nonlocal(skb)) {
skb               309 net/sched/em_meta.c 	dst->value = skb->sk->sk_bound_dev_if;
skb               314 net/sched/em_meta.c 	if (skip_nonlocal(skb)) {
skb               319 net/sched/em_meta.c 	if (skb->sk->sk_bound_dev_if == 0) {
skb               326 net/sched/em_meta.c 		dev = dev_get_by_index_rcu(sock_net(skb->sk),
skb               327 net/sched/em_meta.c 					   skb->sk->sk_bound_dev_if);
skb               335 net/sched/em_meta.c 	if (skip_nonlocal(skb)) {
skb               339 net/sched/em_meta.c 	dst->value = refcount_read(&skb->sk->sk_refcnt);
skb               344 net/sched/em_meta.c 	const struct sock *sk = skb_to_full_sk(skb);
skb               355 net/sched/em_meta.c 	const struct sock *sk = skb_to_full_sk(skb);
skb               366 net/sched/em_meta.c 	const struct sock *sk = skb_to_full_sk(skb);
skb               377 net/sched/em_meta.c 	const struct sock *sk = skb_to_full_sk(skb);
skb               388 net/sched/em_meta.c 	const struct sock *sk = skb_to_full_sk(skb);
skb               399 net/sched/em_meta.c 	const struct sock *sk = skb_to_full_sk(skb);
skb               410 net/sched/em_meta.c 	const struct sock *sk = skb_to_full_sk(skb);
skb               421 net/sched/em_meta.c 	const struct sock *sk = skb_to_full_sk(skb);
skb               432 net/sched/em_meta.c 	const struct sock *sk = skb_to_full_sk(skb);
skb               443 net/sched/em_meta.c 	const struct sock *sk = skb_to_full_sk(skb);
skb               454 net/sched/em_meta.c 	const struct sock *sk = skb_to_full_sk(skb);
skb               465 net/sched/em_meta.c 	const struct sock *sk = skb_to_full_sk(skb);
skb               476 net/sched/em_meta.c 	const struct sock *sk = skb_to_full_sk(skb);
skb               487 net/sched/em_meta.c 	if (skip_nonlocal(skb)) {
skb               491 net/sched/em_meta.c 	dst->value = skb->sk->sk_hash;
skb               496 net/sched/em_meta.c 	const struct sock *sk = skb_to_full_sk(skb);
skb               507 net/sched/em_meta.c 	const struct sock *sk = skb_to_full_sk(skb);
skb               518 net/sched/em_meta.c 	const struct sock *sk = skb_to_full_sk(skb);
skb               529 net/sched/em_meta.c 	const struct sock *sk = skb_to_full_sk(skb);
skb               540 net/sched/em_meta.c 	const struct sock *sk = skb_to_full_sk(skb);
skb               551 net/sched/em_meta.c 	const struct sock *sk = skb_to_full_sk(skb);
skb               562 net/sched/em_meta.c 	const struct sock *sk = skb_to_full_sk(skb);
skb               573 net/sched/em_meta.c 	const struct sock *sk = skb_to_full_sk(skb);
skb               584 net/sched/em_meta.c 	const struct sock *sk = skb_to_full_sk(skb);
skb               595 net/sched/em_meta.c 	const struct sock *sk = skb_to_full_sk(skb);
skb               717 net/sched/em_meta.c static int meta_var_dump(struct sk_buff *skb, struct meta_value *v, int tlv)
skb               720 net/sched/em_meta.c 	    nla_put(skb, tlv, v->len, (void *) v->val))
skb               769 net/sched/em_meta.c static int meta_int_dump(struct sk_buff *skb, struct meta_value *v, int tlv)
skb               772 net/sched/em_meta.c 		if (nla_put(skb, tlv, sizeof(unsigned long), &v->val))
skb               775 net/sched/em_meta.c 		if (nla_put_u32(skb, tlv, v->val))
skb               822 net/sched/em_meta.c static int meta_get(struct sk_buff *skb, struct tcf_pkt_info *info,
skb               833 net/sched/em_meta.c 	meta_ops(v)->get(skb, info, v, dst, &err);
skb               843 net/sched/em_meta.c static int em_meta_match(struct sk_buff *skb, struct tcf_ematch *m,
skb               850 net/sched/em_meta.c 	if (meta_get(skb, info, &meta->lvalue, &l_value) < 0 ||
skb               851 net/sched/em_meta.c 	    meta_get(skb, info, &meta->rvalue, &r_value) < 0)
skb               962 net/sched/em_meta.c static int em_meta_dump(struct sk_buff *skb, struct tcf_ematch *em)
skb               972 net/sched/em_meta.c 	if (nla_put(skb, TCA_EM_META_HDR, sizeof(hdr), &hdr))
skb               976 net/sched/em_meta.c 	if (ops->dump(skb, &meta->lvalue, TCA_EM_META_LVALUE) < 0 ||
skb               977 net/sched/em_meta.c 	    ops->dump(skb, &meta->rvalue, TCA_EM_META_RVALUE) < 0)
skb                39 net/sched/em_nbyte.c static int em_nbyte_match(struct sk_buff *skb, struct tcf_ematch *em,
skb                43 net/sched/em_nbyte.c 	unsigned char *ptr = tcf_get_base_ptr(skb, nbyte->hdr.layer);
skb                47 net/sched/em_nbyte.c 	if (!tcf_valid_offset(skb, ptr, nbyte->hdr.len))
skb                28 net/sched/em_text.c static int em_text_match(struct sk_buff *skb, struct tcf_ematch *m,
skb                34 net/sched/em_text.c 	from = tcf_get_base_ptr(skb, tm->from_layer) - skb->data;
skb                37 net/sched/em_text.c 	to = tcf_get_base_ptr(skb, tm->to_layer) - skb->data;
skb                40 net/sched/em_text.c 	return skb_find_text(skb, from, to, tm->config) != UINT_MAX;
skb               104 net/sched/em_text.c static int em_text_dump(struct sk_buff *skb, struct tcf_ematch *m)
skb               117 net/sched/em_text.c 	if (nla_put_nohdr(skb, sizeof(conf), &conf) < 0)
skb               119 net/sched/em_text.c 	if (nla_append(skb, conf.pattern_len,
skb                17 net/sched/em_u32.c static int em_u32_match(struct sk_buff *skb, struct tcf_ematch *em,
skb                21 net/sched/em_u32.c 	const unsigned char *ptr = skb_network_header(skb);
skb                31 net/sched/em_u32.c 	if (!tcf_valid_offset(skb, ptr, sizeof(u32)))
skb               436 net/sched/ematch.c int tcf_em_tree_dump(struct sk_buff *skb, struct tcf_ematch_tree *tree, int tlv)
skb               443 net/sched/ematch.c 	top_start = nla_nest_start_noflag(skb, tlv);
skb               447 net/sched/ematch.c 	if (nla_put(skb, TCA_EMATCH_TREE_HDR, sizeof(tree->hdr), &tree->hdr))
skb               450 net/sched/ematch.c 	list_start = nla_nest_start_noflag(skb, TCA_EMATCH_TREE_LIST);
skb               454 net/sched/ematch.c 	tail = skb_tail_pointer(skb);
skb               464 net/sched/ematch.c 		if (nla_put(skb, i + 1, sizeof(em_hdr), &em_hdr))
skb               468 net/sched/ematch.c 			if (em->ops->dump(skb, em) < 0)
skb               472 net/sched/ematch.c 			nla_put_nohdr(skb, sizeof(u), &u);
skb               474 net/sched/ematch.c 			nla_put_nohdr(skb, em->datalen, (void *) em->data);
skb               476 net/sched/ematch.c 		tail = skb_tail_pointer(skb);
skb               480 net/sched/ematch.c 	nla_nest_end(skb, list_start);
skb               481 net/sched/ematch.c 	nla_nest_end(skb, top_start);
skb               490 net/sched/ematch.c static inline int tcf_em_match(struct sk_buff *skb, struct tcf_ematch *em,
skb               493 net/sched/ematch.c 	int r = em->ops->match(skb, em, info);
skb               499 net/sched/ematch.c int __tcf_em_tree_match(struct sk_buff *skb, struct tcf_ematch_tree *tree,
skb               519 net/sched/ematch.c 		res = tcf_em_match(skb, cur_match, info);
skb               538 net/sched/sch_api.c static int qdisc_dump_stab(struct sk_buff *skb, struct qdisc_size_table *stab)
skb               542 net/sched/sch_api.c 	nest = nla_nest_start_noflag(skb, TCA_STAB);
skb               545 net/sched/sch_api.c 	if (nla_put(skb, TCA_STAB_BASE, sizeof(stab->szopts), &stab->szopts))
skb               547 net/sched/sch_api.c 	nla_nest_end(skb, nest);
skb               549 net/sched/sch_api.c 	return skb->len;
skb               555 net/sched/sch_api.c void __qdisc_calculate_pkt_len(struct sk_buff *skb,
skb               560 net/sched/sch_api.c 	pkt_len = skb->len + stab->szopts.overhead;
skb               580 net/sched/sch_api.c 	qdisc_skb_cb(skb)->pkt_len = pkt_len;
skb               867 net/sched/sch_api.c static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
skb               874 net/sched/sch_api.c 	unsigned char *b = skb_tail_pointer(skb);
skb               881 net/sched/sch_api.c 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
skb               892 net/sched/sch_api.c 	if (nla_put_string(skb, TCA_KIND, q->ops->id))
skb               897 net/sched/sch_api.c 		    nla_put_u32(skb, TCA_INGRESS_BLOCK, block_index))
skb               903 net/sched/sch_api.c 		    nla_put_u32(skb, TCA_EGRESS_BLOCK, block_index))
skb               906 net/sched/sch_api.c 	if (q->ops->dump && q->ops->dump(q, skb) < 0)
skb               908 net/sched/sch_api.c 	if (nla_put_u8(skb, TCA_HW_OFFLOAD, !!(q->flags & TCQ_F_OFFLOADED)))
skb               913 net/sched/sch_api.c 	if (stab && qdisc_dump_stab(skb, stab) < 0)
skb               916 net/sched/sch_api.c 	if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
skb               937 net/sched/sch_api.c 	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
skb               938 net/sched/sch_api.c 	return skb->len;
skb               942 net/sched/sch_api.c 	nlmsg_trim(skb, b);
skb               960 net/sched/sch_api.c 	struct sk_buff *skb;
skb               963 net/sched/sch_api.c 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
skb               964 net/sched/sch_api.c 	if (!skb)
skb               968 net/sched/sch_api.c 		if (tc_fill_qdisc(skb, old, clid, portid, n->nlmsg_seq,
skb               973 net/sched/sch_api.c 		if (tc_fill_qdisc(skb, new, clid, portid, n->nlmsg_seq,
skb               978 net/sched/sch_api.c 	if (skb->len)
skb               979 net/sched/sch_api.c 		return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
skb               983 net/sched/sch_api.c 	kfree_skb(skb);
skb               987 net/sched/sch_api.c static void notify_and_destroy(struct net *net, struct sk_buff *skb,
skb               992 net/sched/sch_api.c 		qdisc_notify(net, skb, n, clid, old, new);
skb              1021 net/sched/sch_api.c 		       struct sk_buff *skb, struct nlmsghdr *n, u32 classid,
skb              1067 net/sched/sch_api.c 			notify_and_destroy(net, skb, n, classid,
skb              1076 net/sched/sch_api.c 			notify_and_destroy(net, skb, n, classid, old, new);
skb              1103 net/sched/sch_api.c 		notify_and_destroy(net, skb, n, classid, old, new);
skb              1407 net/sched/sch_api.c static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
skb              1410 net/sched/sch_api.c 	struct net *net = sock_net(skb->sk);
skb              1420 net/sched/sch_api.c 	    !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
skb              1479 net/sched/sch_api.c 		err = qdisc_graft(dev, p, skb, n, clid, NULL, q, extack);
skb              1483 net/sched/sch_api.c 		qdisc_notify(net, skb, n, clid, NULL, q);
skb              1492 net/sched/sch_api.c static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
skb              1495 net/sched/sch_api.c 	struct net *net = sock_net(skb->sk);
skb              1503 net/sched/sch_api.c 	if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
skb              1625 net/sched/sch_api.c 		qdisc_notify(net, skb, n, clid, NULL, q);
skb              1663 net/sched/sch_api.c 	err = qdisc_graft(dev, p, skb, n, clid, q, NULL, extack);
skb              1673 net/sched/sch_api.c static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb,
skb              1690 net/sched/sch_api.c 		    tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
skb              1712 net/sched/sch_api.c 		    tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
skb              1727 net/sched/sch_api.c static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
skb              1729 net/sched/sch_api.c 	struct net *net = sock_net(skb->sk);
skb              1757 net/sched/sch_api.c 		if (tc_dump_qdisc_root(dev->qdisc, skb, cb, &q_idx, s_q_idx,
skb              1763 net/sched/sch_api.c 		    tc_dump_qdisc_root(dev_queue->qdisc_sleeping, skb, cb,
skb              1776 net/sched/sch_api.c 	return skb->len;
skb              1785 net/sched/sch_api.c static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
skb              1791 net/sched/sch_api.c 	unsigned char *b = skb_tail_pointer(skb);
skb              1796 net/sched/sch_api.c 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
skb              1807 net/sched/sch_api.c 	if (nla_put_string(skb, TCA_KIND, q->ops->id))
skb              1809 net/sched/sch_api.c 	if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0)
skb              1812 net/sched/sch_api.c 	if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
skb              1822 net/sched/sch_api.c 	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
skb              1823 net/sched/sch_api.c 	return skb->len;
skb              1827 net/sched/sch_api.c 	nlmsg_trim(skb, b);
skb              1835 net/sched/sch_api.c 	struct sk_buff *skb;
skb              1839 net/sched/sch_api.c 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
skb              1840 net/sched/sch_api.c 	if (!skb)
skb              1843 net/sched/sch_api.c 	if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0, event) < 0) {
skb              1844 net/sched/sch_api.c 		kfree_skb(skb);
skb              1848 net/sched/sch_api.c 	err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
skb              1861 net/sched/sch_api.c 	struct sk_buff *skb;
skb              1867 net/sched/sch_api.c 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
skb              1868 net/sched/sch_api.c 	if (!skb)
skb              1871 net/sched/sch_api.c 	if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0,
skb              1873 net/sched/sch_api.c 		kfree_skb(skb);
skb              1879 net/sched/sch_api.c 		kfree_skb(skb);
skb              1883 net/sched/sch_api.c 	err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
skb              1975 net/sched/sch_api.c static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n,
skb              1978 net/sched/sch_api.c 	struct net *net = sock_net(skb->sk);
skb              1992 net/sched/sch_api.c 	    !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
skb              2080 net/sched/sch_api.c 			err = tclass_del_notify(net, cops, skb, n, q, cl);
skb              2085 net/sched/sch_api.c 			err = tclass_notify(net, skb, n, q, cl, RTM_NEWTCLASS);
skb              2103 net/sched/sch_api.c 		tclass_notify(net, skb, n, q, new_cl, RTM_NEWTCLASS);
skb              2114 net/sched/sch_api.c 	struct sk_buff		*skb;
skb              2123 net/sched/sch_api.c 	return tc_fill_tclass(a->skb, q, cl, NETLINK_CB(a->cb->skb).portid,
skb              2128 net/sched/sch_api.c static int tc_dump_tclass_qdisc(struct Qdisc *q, struct sk_buff *skb,
skb              2144 net/sched/sch_api.c 	arg.skb = skb;
skb              2157 net/sched/sch_api.c static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb,
skb              2167 net/sched/sch_api.c 	if (tc_dump_tclass_qdisc(root, skb, tcm, cb, t_p, s_t) < 0)
skb              2176 net/sched/sch_api.c 		    tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
skb              2181 net/sched/sch_api.c 		if (tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
skb              2188 net/sched/sch_api.c static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
skb              2191 net/sched/sch_api.c 	struct net *net = sock_net(skb->sk);
skb              2205 net/sched/sch_api.c 	if (tc_dump_tclass_root(dev->qdisc, skb, tcm, cb, &t, s_t) < 0)
skb              2210 net/sched/sch_api.c 	    tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb,
skb              2218 net/sched/sch_api.c 	return skb->len;
skb                51 net/sched/sch_atm.c 					   struct sk_buff *skb); /* chaining */
skb               172 net/sched/sch_atm.c static void sch_atm_pop(struct atm_vcc *vcc, struct sk_buff *skb)
skb               176 net/sched/sch_atm.c 	pr_debug("sch_atm_pop(vcc %p,skb %p,[qdisc %p])\n", vcc, skb, p);
skb               177 net/sched/sch_atm.c 	VCC2FLOW(vcc)->old_pop(vcc, skb);
skb               377 net/sched/sch_atm.c static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
skb               386 net/sched/sch_atm.c 	pr_debug("atm_tc_enqueue(skb %p,sch %p,[qdisc %p])\n", skb, sch, p);
skb               389 net/sched/sch_atm.c 	if (TC_H_MAJ(skb->priority) != sch->handle ||
skb               390 net/sched/sch_atm.c 	    !(flow = (struct atm_flow_data *)atm_tc_find(sch, skb->priority))) {
skb               396 net/sched/sch_atm.c 				result = tcf_classify(skb, fl, &res, true);
skb               413 net/sched/sch_atm.c 			ATM_SKB(skb)->atm_options = flow->vcc->atm_options;
skb               420 net/sched/sch_atm.c 			__qdisc_drop(skb, to_free);
skb               423 net/sched/sch_atm.c 			__qdisc_drop(skb, to_free);
skb               429 net/sched/sch_atm.c 				ATM_SKB(skb)->atm_options |= ATM_ATMOPT_CLP;
skb               435 net/sched/sch_atm.c 	ret = qdisc_enqueue(skb, flow->q, to_free);
skb               474 net/sched/sch_atm.c 	struct sk_buff *skb;
skb               484 net/sched/sch_atm.c 		while ((skb = flow->q->ops->peek(flow->q))) {
skb               485 net/sched/sch_atm.c 			if (!atm_may_send(flow->vcc, skb->truesize))
skb               488 net/sched/sch_atm.c 			skb = qdisc_dequeue_peeked(flow->q);
skb               489 net/sched/sch_atm.c 			if (unlikely(!skb))
skb               492 net/sched/sch_atm.c 			qdisc_bstats_update(sch, skb);
skb               493 net/sched/sch_atm.c 			bstats_update(&flow->bstats, skb);
skb               496 net/sched/sch_atm.c 			skb_pull(skb, skb_network_offset(skb));
skb               497 net/sched/sch_atm.c 			if (skb_headroom(skb) < flow->hdr_len) {
skb               500 net/sched/sch_atm.c 				new = skb_realloc_headroom(skb, flow->hdr_len);
skb               501 net/sched/sch_atm.c 				dev_kfree_skb(skb);
skb               504 net/sched/sch_atm.c 				skb = new;
skb               507 net/sched/sch_atm.c 				 skb_network_header(skb), skb->data);
skb               508 net/sched/sch_atm.c 			ATM_SKB(skb)->vcc = flow->vcc;
skb               509 net/sched/sch_atm.c 			memcpy(skb_push(skb, flow->hdr_len), flow->hdr,
skb               511 net/sched/sch_atm.c 			refcount_add(skb->truesize,
skb               514 net/sched/sch_atm.c 			flow->vcc->send(flow->vcc, skb);
skb               522 net/sched/sch_atm.c 	struct sk_buff *skb;
skb               526 net/sched/sch_atm.c 	skb = qdisc_dequeue_peeked(p->link.q);
skb               527 net/sched/sch_atm.c 	if (skb)
skb               529 net/sched/sch_atm.c 	return skb;
skb               601 net/sched/sch_atm.c 			     struct sk_buff *skb, struct tcmsg *tcm)
skb               608 net/sched/sch_atm.c 		sch, p, flow, skb, tcm);
skb               614 net/sched/sch_atm.c 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
skb               618 net/sched/sch_atm.c 	if (nla_put(skb, TCA_ATM_HDR, flow->hdr_len, flow->hdr))
skb               629 net/sched/sch_atm.c 		if (nla_put(skb, TCA_ATM_ADDR, sizeof(pvc), &pvc))
skb               632 net/sched/sch_atm.c 		if (nla_put_u32(skb, TCA_ATM_STATE, state))
skb               636 net/sched/sch_atm.c 		if (nla_put_u32(skb, TCA_ATM_EXCESS, flow->common.classid))
skb               639 net/sched/sch_atm.c 		if (nla_put_u32(skb, TCA_ATM_EXCESS, 0))
skb               642 net/sched/sch_atm.c 	return nla_nest_end(skb, nest);
skb               645 net/sched/sch_atm.c 	nla_nest_cancel(skb, nest);
skb               662 net/sched/sch_atm.c static int atm_tc_dump(struct Qdisc *sch, struct sk_buff *skb)
skb                16 net/sched/sch_blackhole.c static int blackhole_enqueue(struct sk_buff *skb, struct Qdisc *sch,
skb                19 net/sched/sch_blackhole.c 	qdisc_drop(skb, sch, to_free);
skb               283 net/sched/sch_cake.c static struct cobalt_skb_cb *get_cobalt_cb(const struct sk_buff *skb)
skb               285 net/sched/sch_cake.c 	qdisc_cb_private_validate(skb, sizeof(struct cobalt_skb_cb));
skb               286 net/sched/sch_cake.c 	return (struct cobalt_skb_cb *)qdisc_skb_cb(skb)->data;
skb               289 net/sched/sch_cake.c static ktime_t cobalt_get_enqueue_time(const struct sk_buff *skb)
skb               291 net/sched/sch_cake.c 	return get_cobalt_cb(skb)->enqueue_time;
skb               294 net/sched/sch_cake.c static void cobalt_set_enqueue_time(struct sk_buff *skb,
skb               297 net/sched/sch_cake.c 	get_cobalt_cb(skb)->enqueue_time = now;
skb               507 net/sched/sch_cake.c 			       struct sk_buff *skb,
skb               529 net/sched/sch_cake.c 	sojourn = ktime_to_ns(ktime_sub(now, cobalt_get_enqueue_time(skb)));
skb               553 net/sched/sch_cake.c 		drop = !(vars->ecn_marked = INET_ECN_set_ce(skb));
skb               589 net/sched/sch_cake.c 				 const struct sk_buff *skb)
skb               593 net/sched/sch_cake.c 	bool rev = !skb->_nfct;
skb               595 net/sched/sch_cake.c 	if (tc_skb_protocol(skb) != htons(ETH_P_IP))
skb               598 net/sched/sch_cake.c 	if (!nf_ct_get_tuple_skb(&tuple, skb))
skb               625 net/sched/sch_cake.c static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,
skb               640 net/sched/sch_cake.c 	skb_flow_dissect_flow_keys(skb, &keys,
skb               644 net/sched/sch_cake.c 		cake_update_flowkeys(&keys, skb);
skb               818 net/sched/sch_cake.c 	struct sk_buff *skb = flow->head;
skb               820 net/sched/sch_cake.c 	if (skb) {
skb               821 net/sched/sch_cake.c 		flow->head = skb->next;
skb               822 net/sched/sch_cake.c 		skb_mark_not_on_list(skb);
skb               825 net/sched/sch_cake.c 	return skb;
skb               830 net/sched/sch_cake.c static void flow_queue_add(struct cake_flow *flow, struct sk_buff *skb)
skb               833 net/sched/sch_cake.c 		flow->head = skb;
skb               835 net/sched/sch_cake.c 		flow->tail->next = skb;
skb               836 net/sched/sch_cake.c 	flow->tail = skb;
skb               837 net/sched/sch_cake.c 	skb->next = NULL;
skb               840 net/sched/sch_cake.c static struct iphdr *cake_get_iphdr(const struct sk_buff *skb,
skb               843 net/sched/sch_cake.c 	unsigned int offset = skb_network_offset(skb);
skb               846 net/sched/sch_cake.c 	iph = skb_header_pointer(skb, offset, sizeof(struct iphdr), buf);
skb               852 net/sched/sch_cake.c 		return skb_header_pointer(skb, offset + iph->ihl * 4,
skb               859 net/sched/sch_cake.c 		return skb_header_pointer(skb, offset, sizeof(struct ipv6hdr),
skb               865 net/sched/sch_cake.c static struct tcphdr *cake_get_tcphdr(const struct sk_buff *skb,
skb               868 net/sched/sch_cake.c 	unsigned int offset = skb_network_offset(skb);
skb               875 net/sched/sch_cake.c 	ipv6h = skb_header_pointer(skb, offset, sizeof(_ipv6h), &_ipv6h);
skb               888 net/sched/sch_cake.c 			ipv6h = skb_header_pointer(skb, offset,
skb               909 net/sched/sch_cake.c 	tcph = skb_header_pointer(skb, offset, sizeof(_tcph), &_tcph);
skb               913 net/sched/sch_cake.c 	return skb_header_pointer(skb, offset,
skb              1122 net/sched/sch_cake.c 	const struct sk_buff *skb;
skb              1132 net/sched/sch_cake.c 	skb = flow->tail;
skb              1133 net/sched/sch_cake.c 	tcph = cake_get_tcphdr(skb, _tcph, sizeof(_tcph));
skb              1134 net/sched/sch_cake.c 	iph = cake_get_iphdr(skb, &_iph);
skb              1153 net/sched/sch_cake.c 	     skb_check && skb_check != skb;
skb              1249 net/sched/sch_cake.c 	if (elig_ack && aggressive && elig_ack->next == skb &&
skb              1309 net/sched/sch_cake.c static u32 cake_overhead(struct cake_sched_data *q, const struct sk_buff *skb)
skb              1311 net/sched/sch_cake.c 	const struct skb_shared_info *shinfo = skb_shinfo(skb);
skb              1313 net/sched/sch_cake.c 	u32 off = skb_network_offset(skb);
skb              1314 net/sched/sch_cake.c 	u32 len = qdisc_pkt_len(skb);
skb              1323 net/sched/sch_cake.c 	hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
skb              1331 net/sched/sch_cake.c 		th = skb_header_pointer(skb, skb_transport_offset(skb),
skb              1338 net/sched/sch_cake.c 		if (skb_header_pointer(skb, skb_transport_offset(skb),
skb              1344 net/sched/sch_cake.c 		segs = DIV_ROUND_UP(skb->len - hdr_len,
skb              1350 net/sched/sch_cake.c 	last_len = skb->len - shinfo->gso_size * (segs - 1);
skb              1430 net/sched/sch_cake.c 			       struct sk_buff *skb,
skb              1433 net/sched/sch_cake.c 	u32 len = get_cobalt_cb(skb)->adjusted_len;
skb              1469 net/sched/sch_cake.c 	struct sk_buff *skb;
skb              1486 net/sched/sch_cake.c 	skb = dequeue_head(flow);
skb              1487 net/sched/sch_cake.c 	if (unlikely(!skb)) {
skb              1496 net/sched/sch_cake.c 	len = qdisc_pkt_len(skb);
skb              1497 net/sched/sch_cake.c 	q->buffer_used      -= skb->truesize;
skb              1508 net/sched/sch_cake.c 		cake_advance_shaper(q, b, skb, now, true);
skb              1510 net/sched/sch_cake.c 	__qdisc_drop(skb, to_free);
skb              1518 net/sched/sch_cake.c static u8 cake_handle_diffserv(struct sk_buff *skb, u16 wash)
skb              1520 net/sched/sch_cake.c 	int wlen = skb_network_offset(skb);
skb              1523 net/sched/sch_cake.c 	switch (tc_skb_protocol(skb)) {
skb              1526 net/sched/sch_cake.c 		if (!pskb_may_pull(skb, wlen) ||
skb              1527 net/sched/sch_cake.c 		    skb_try_make_writable(skb, wlen))
skb              1530 net/sched/sch_cake.c 		dscp = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
skb              1532 net/sched/sch_cake.c 			ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, 0);
skb              1537 net/sched/sch_cake.c 		if (!pskb_may_pull(skb, wlen) ||
skb              1538 net/sched/sch_cake.c 		    skb_try_make_writable(skb, wlen))
skb              1541 net/sched/sch_cake.c 		dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
skb              1543 net/sched/sch_cake.c 			ipv6_change_dsfield(ipv6_hdr(skb), INET_ECN_MASK, 0);
skb              1556 net/sched/sch_cake.c 					     struct sk_buff *skb)
skb              1565 net/sched/sch_cake.c 	dscp = cake_handle_diffserv(skb,
skb              1567 net/sched/sch_cake.c 	mark = (skb->mark & q->fwmark_mask) >> q->fwmark_shft;
skb              1575 net/sched/sch_cake.c 	else if (TC_H_MAJ(skb->priority) == sch->handle &&
skb              1576 net/sched/sch_cake.c 		 TC_H_MIN(skb->priority) > 0 &&
skb              1577 net/sched/sch_cake.c 		 TC_H_MIN(skb->priority) <= q->tin_cnt)
skb              1578 net/sched/sch_cake.c 		tin = q->tin_order[TC_H_MIN(skb->priority) - 1];
skb              1591 net/sched/sch_cake.c 			 struct sk_buff *skb, int flow_mode, int *qerr)
skb              1604 net/sched/sch_cake.c 	result = tcf_classify(skb, filter, &res, false);
skb              1624 net/sched/sch_cake.c 	*t = cake_select_tin(sch, skb);
skb              1625 net/sched/sch_cake.c 	return cake_hash(*t, skb, flow_mode, flow, host) + 1;
skb              1630 net/sched/sch_cake.c static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
skb              1634 net/sched/sch_cake.c 	int len = qdisc_pkt_len(skb);
skb              1643 net/sched/sch_cake.c 	idx = cake_classify(sch, &b, skb, q->flow_mode, &ret);
skb              1647 net/sched/sch_cake.c 		__qdisc_drop(skb, to_free);
skb              1677 net/sched/sch_cake.c 	if (skb_is_gso(skb) && q->rate_flags & CAKE_FLAG_SPLIT_GSO) {
skb              1679 net/sched/sch_cake.c 		netdev_features_t features = netif_skb_features(skb);
skb              1682 net/sched/sch_cake.c 		segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
skb              1684 net/sched/sch_cake.c 			return qdisc_drop(skb, sch, to_free);
skb              1711 net/sched/sch_cake.c 		consume_skb(skb);
skb              1714 net/sched/sch_cake.c 		cobalt_set_enqueue_time(skb, now);
skb              1715 net/sched/sch_cake.c 		get_cobalt_cb(skb)->adjusted_len = cake_overhead(q, skb);
skb              1716 net/sched/sch_cake.c 		flow_queue_add(flow, skb);
skb              1726 net/sched/sch_cake.c 			q->buffer_used += skb->truesize - ack->truesize;
skb              1734 net/sched/sch_cake.c 			q->buffer_used      += skb->truesize;
skb              1853 net/sched/sch_cake.c 	struct sk_buff *skb = NULL;
skb              1857 net/sched/sch_cake.c 		skb = dequeue_head(flow);
skb              1858 net/sched/sch_cake.c 		len = qdisc_pkt_len(skb);
skb              1862 net/sched/sch_cake.c 		q->buffer_used		 -= skb->truesize;
skb              1868 net/sched/sch_cake.c 	return skb;
skb              1875 net/sched/sch_cake.c 	struct sk_buff *skb;
skb              1879 net/sched/sch_cake.c 		while (!!(skb = cake_dequeue_one(sch)))
skb              1880 net/sched/sch_cake.c 			kfree_skb(skb);
skb              1892 net/sched/sch_cake.c 	struct sk_buff *skb;
skb              2045 net/sched/sch_cake.c 		skb = cake_dequeue_one(sch);
skb              2046 net/sched/sch_cake.c 		if (!skb) {
skb              2098 net/sched/sch_cake.c 		if (!cobalt_should_drop(&flow->cvars, &b->cparams, now, skb,
skb              2107 net/sched/sch_cake.c 			len = cake_advance_shaper(q, b, skb,
skb              2114 net/sched/sch_cake.c 		qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(skb));
skb              2116 net/sched/sch_cake.c 		kfree_skb(skb);
skb              2122 net/sched/sch_cake.c 	qdisc_bstats_update(sch, skb);
skb              2125 net/sched/sch_cake.c 	delay = ktime_to_ns(ktime_sub(now, cobalt_get_enqueue_time(skb)));
skb              2132 net/sched/sch_cake.c 	len = cake_advance_shaper(q, b, skb, now, false);
skb              2160 net/sched/sch_cake.c 	return skb;
skb              2735 net/sched/sch_cake.c static int cake_dump(struct Qdisc *sch, struct sk_buff *skb)
skb              2740 net/sched/sch_cake.c 	opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
skb              2744 net/sched/sch_cake.c 	if (nla_put_u64_64bit(skb, TCA_CAKE_BASE_RATE64, q->rate_bps,
skb              2748 net/sched/sch_cake.c 	if (nla_put_u32(skb, TCA_CAKE_FLOW_MODE,
skb              2752 net/sched/sch_cake.c 	if (nla_put_u32(skb, TCA_CAKE_RTT, q->interval))
skb              2755 net/sched/sch_cake.c 	if (nla_put_u32(skb, TCA_CAKE_TARGET, q->target))
skb              2758 net/sched/sch_cake.c 	if (nla_put_u32(skb, TCA_CAKE_MEMORY, q->buffer_config_limit))
skb              2761 net/sched/sch_cake.c 	if (nla_put_u32(skb, TCA_CAKE_AUTORATE,
skb              2765 net/sched/sch_cake.c 	if (nla_put_u32(skb, TCA_CAKE_INGRESS,
skb              2769 net/sched/sch_cake.c 	if (nla_put_u32(skb, TCA_CAKE_ACK_FILTER, q->ack_filter))
skb              2772 net/sched/sch_cake.c 	if (nla_put_u32(skb, TCA_CAKE_NAT,
skb              2776 net/sched/sch_cake.c 	if (nla_put_u32(skb, TCA_CAKE_DIFFSERV_MODE, q->tin_mode))
skb              2779 net/sched/sch_cake.c 	if (nla_put_u32(skb, TCA_CAKE_WASH,
skb              2783 net/sched/sch_cake.c 	if (nla_put_u32(skb, TCA_CAKE_OVERHEAD, q->rate_overhead))
skb              2787 net/sched/sch_cake.c 		if (nla_put_u32(skb, TCA_CAKE_RAW, 0))
skb              2790 net/sched/sch_cake.c 	if (nla_put_u32(skb, TCA_CAKE_ATM, q->atm_mode))
skb              2793 net/sched/sch_cake.c 	if (nla_put_u32(skb, TCA_CAKE_MPU, q->rate_mpu))
skb              2796 net/sched/sch_cake.c 	if (nla_put_u32(skb, TCA_CAKE_SPLIT_GSO,
skb              2800 net/sched/sch_cake.c 	if (nla_put_u32(skb, TCA_CAKE_FWMARK, q->fwmark_mask))
skb              2803 net/sched/sch_cake.c 	return nla_nest_end(skb, opts);
skb              2811 net/sched/sch_cake.c 	struct nlattr *stats = nla_nest_start_noflag(d->skb, TCA_STATS_APP);
skb              2820 net/sched/sch_cake.c 		if (nla_put_u32(d->skb, TCA_CAKE_STATS_ ## attr, data)) \
skb              2824 net/sched/sch_cake.c 		if (nla_put_u64_64bit(d->skb, TCA_CAKE_STATS_ ## attr, \
skb              2841 net/sched/sch_cake.c 	tstats = nla_nest_start_noflag(d->skb, TCA_CAKE_STATS_TIN_STATS);
skb              2846 net/sched/sch_cake.c 		if (nla_put_u32(d->skb, TCA_CAKE_TIN_STATS_ ## attr, data)) \
skb              2850 net/sched/sch_cake.c 		if (nla_put_u64_64bit(d->skb, TCA_CAKE_TIN_STATS_ ## attr, \
skb              2858 net/sched/sch_cake.c 		ts = nla_nest_start_noflag(d->skb, i + 1);
skb              2894 net/sched/sch_cake.c 		nla_nest_end(d->skb, ts);
skb              2900 net/sched/sch_cake.c 	nla_nest_end(d->skb, tstats);
skb              2901 net/sched/sch_cake.c 	return nla_nest_end(d->skb, stats);
skb              2904 net/sched/sch_cake.c 	nla_nest_cancel(d->skb, stats);
skb              2939 net/sched/sch_cake.c 			   struct sk_buff *skb, struct tcmsg *tcm)
skb              2957 net/sched/sch_cake.c 		const struct sk_buff *skb;
skb              2963 net/sched/sch_cake.c 			skb = flow->head;
skb              2964 net/sched/sch_cake.c 			while (skb) {
skb              2966 net/sched/sch_cake.c 				skb = skb->next;
skb              2978 net/sched/sch_cake.c 		stats = nla_nest_start_noflag(d->skb, TCA_STATS_APP);
skb              2983 net/sched/sch_cake.c 		if (nla_put_u32(d->skb, TCA_CAKE_STATS_ ## attr, data)) \
skb              2987 net/sched/sch_cake.c 		if (nla_put_s32(d->skb, TCA_CAKE_STATS_ ## attr, data)) \
skb              3008 net/sched/sch_cake.c 		if (nla_nest_end(d->skb, stats) < 0)
skb              3015 net/sched/sch_cake.c 	nla_nest_cancel(d->skb, stats);
skb               179 net/sched/sch_cbq.c cbq_reclassify(struct sk_buff *skb, struct cbq_class *this)
skb               205 net/sched/sch_cbq.c cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
skb               211 net/sched/sch_cbq.c 	u32 prio = skb->priority;
skb               231 net/sched/sch_cbq.c 		result = tcf_classify(skb, fl, &res, true);
skb               257 net/sched/sch_cbq.c 			return cbq_reclassify(skb, cl);
skb               359 net/sched/sch_cbq.c cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
skb               364 net/sched/sch_cbq.c 	struct cbq_class *cl = cbq_classify(skb, sch, &ret);
skb               372 net/sched/sch_cbq.c 		__qdisc_drop(skb, to_free);
skb               376 net/sched/sch_cbq.c 	ret = qdisc_enqueue(skb, cl->q, to_free);
skb               683 net/sched/sch_cbq.c 	struct sk_buff *skb;
skb               709 net/sched/sch_cbq.c 			skb = cl->q->dequeue(cl->q);
skb               715 net/sched/sch_cbq.c 			if (skb == NULL)
skb               718 net/sched/sch_cbq.c 			cl->deficit -= qdisc_pkt_len(skb);
skb               726 net/sched/sch_cbq.c 				borrow->xstats.borrows += qdisc_pkt_len(skb);
skb               727 net/sched/sch_cbq.c 				cl->xstats.borrows += qdisc_pkt_len(skb);
skb               730 net/sched/sch_cbq.c 			q->tx_len = qdisc_pkt_len(skb);
skb               737 net/sched/sch_cbq.c 			return skb;
skb               785 net/sched/sch_cbq.c 	struct sk_buff *skb;
skb               792 net/sched/sch_cbq.c 		skb = cbq_dequeue_prio(sch, prio);
skb               793 net/sched/sch_cbq.c 		if (skb)
skb               794 net/sched/sch_cbq.c 			return skb;
skb               802 net/sched/sch_cbq.c 	struct sk_buff *skb;
skb               816 net/sched/sch_cbq.c 		skb = cbq_dequeue_1(sch);
skb               817 net/sched/sch_cbq.c 		if (skb) {
skb               818 net/sched/sch_cbq.c 			qdisc_bstats_update(sch, skb);
skb               820 net/sched/sch_cbq.c 			return skb;
skb              1232 net/sched/sch_cbq.c static int cbq_dump_rate(struct sk_buff *skb, struct cbq_class *cl)
skb              1234 net/sched/sch_cbq.c 	unsigned char *b = skb_tail_pointer(skb);
skb              1236 net/sched/sch_cbq.c 	if (nla_put(skb, TCA_CBQ_RATE, sizeof(cl->R_tab->rate), &cl->R_tab->rate))
skb              1238 net/sched/sch_cbq.c 	return skb->len;
skb              1241 net/sched/sch_cbq.c 	nlmsg_trim(skb, b);
skb              1245 net/sched/sch_cbq.c static int cbq_dump_lss(struct sk_buff *skb, struct cbq_class *cl)
skb              1247 net/sched/sch_cbq.c 	unsigned char *b = skb_tail_pointer(skb);
skb              1262 net/sched/sch_cbq.c 	if (nla_put(skb, TCA_CBQ_LSSOPT, sizeof(opt), &opt))
skb              1264 net/sched/sch_cbq.c 	return skb->len;
skb              1267 net/sched/sch_cbq.c 	nlmsg_trim(skb, b);
skb              1271 net/sched/sch_cbq.c static int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl)
skb              1273 net/sched/sch_cbq.c 	unsigned char *b = skb_tail_pointer(skb);
skb              1282 net/sched/sch_cbq.c 	if (nla_put(skb, TCA_CBQ_WRROPT, sizeof(opt), &opt))
skb              1284 net/sched/sch_cbq.c 	return skb->len;
skb              1287 net/sched/sch_cbq.c 	nlmsg_trim(skb, b);
skb              1291 net/sched/sch_cbq.c static int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl)
skb              1293 net/sched/sch_cbq.c 	unsigned char *b = skb_tail_pointer(skb);
skb              1300 net/sched/sch_cbq.c 		if (nla_put(skb, TCA_CBQ_FOPT, sizeof(opt), &opt))
skb              1303 net/sched/sch_cbq.c 	return skb->len;
skb              1306 net/sched/sch_cbq.c 	nlmsg_trim(skb, b);
skb              1310 net/sched/sch_cbq.c static int cbq_dump_attr(struct sk_buff *skb, struct cbq_class *cl)
skb              1312 net/sched/sch_cbq.c 	if (cbq_dump_lss(skb, cl) < 0 ||
skb              1313 net/sched/sch_cbq.c 	    cbq_dump_rate(skb, cl) < 0 ||
skb              1314 net/sched/sch_cbq.c 	    cbq_dump_wrr(skb, cl) < 0 ||
skb              1315 net/sched/sch_cbq.c 	    cbq_dump_fopt(skb, cl) < 0)
skb              1320 net/sched/sch_cbq.c static int cbq_dump(struct Qdisc *sch, struct sk_buff *skb)
skb              1325 net/sched/sch_cbq.c 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
skb              1328 net/sched/sch_cbq.c 	if (cbq_dump_attr(skb, &q->link) < 0)
skb              1330 net/sched/sch_cbq.c 	return nla_nest_end(skb, nest);
skb              1333 net/sched/sch_cbq.c 	nla_nest_cancel(skb, nest);
skb              1348 net/sched/sch_cbq.c 	       struct sk_buff *skb, struct tcmsg *tcm)
skb              1360 net/sched/sch_cbq.c 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
skb              1363 net/sched/sch_cbq.c 	if (cbq_dump_attr(skb, cl) < 0)
skb              1365 net/sched/sch_cbq.c 	return nla_nest_end(skb, nest);
skb              1368 net/sched/sch_cbq.c 	nla_nest_cancel(skb, nest);
skb                80 net/sched/sch_cbs.c 	int (*enqueue)(struct sk_buff *skb, struct Qdisc *sch,
skb                87 net/sched/sch_cbs.c static int cbs_child_enqueue(struct sk_buff *skb, struct Qdisc *sch,
skb                91 net/sched/sch_cbs.c 	unsigned int len = qdisc_pkt_len(skb);
skb                94 net/sched/sch_cbs.c 	err = child->ops->enqueue(skb, child, to_free);
skb               104 net/sched/sch_cbs.c static int cbs_enqueue_offload(struct sk_buff *skb, struct Qdisc *sch,
skb               110 net/sched/sch_cbs.c 	return cbs_child_enqueue(skb, sch, qdisc, to_free);
skb               113 net/sched/sch_cbs.c static int cbs_enqueue_soft(struct sk_buff *skb, struct Qdisc *sch,
skb               127 net/sched/sch_cbs.c 	return cbs_child_enqueue(skb, sch, qdisc, to_free);
skb               130 net/sched/sch_cbs.c static int cbs_enqueue(struct sk_buff *skb, struct Qdisc *sch,
skb               135 net/sched/sch_cbs.c 	return q->enqueue(skb, sch, to_free);
skb               162 net/sched/sch_cbs.c 	struct sk_buff *skb;
skb               164 net/sched/sch_cbs.c 	skb = child->ops->dequeue(child);
skb               165 net/sched/sch_cbs.c 	if (!skb)
skb               168 net/sched/sch_cbs.c 	qdisc_qstats_backlog_dec(sch, skb);
skb               169 net/sched/sch_cbs.c 	qdisc_bstats_update(sch, skb);
skb               172 net/sched/sch_cbs.c 	return skb;
skb               180 net/sched/sch_cbs.c 	struct sk_buff *skb;
skb               206 net/sched/sch_cbs.c 	skb = cbs_child_dequeue(sch, qdisc);
skb               207 net/sched/sch_cbs.c 	if (!skb)
skb               210 net/sched/sch_cbs.c 	len = qdisc_pkt_len(skb);
skb               227 net/sched/sch_cbs.c 	return skb;
skb               451 net/sched/sch_cbs.c static int cbs_dump(struct Qdisc *sch, struct sk_buff *skb)
skb               457 net/sched/sch_cbs.c 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
skb               467 net/sched/sch_cbs.c 	if (nla_put(skb, TCA_CBS_PARMS, sizeof(opt), &opt))
skb               470 net/sched/sch_cbs.c 	return nla_nest_end(skb, nest);
skb               473 net/sched/sch_cbs.c 	nla_nest_cancel(skb, nest);
skb               478 net/sched/sch_cbs.c 			  struct sk_buff *skb, struct tcmsg *tcm)
skb               118 net/sched/sch_choke.c 	struct sk_buff *skb = q->tab[idx];
skb               127 net/sched/sch_choke.c 	qdisc_qstats_backlog_dec(sch, skb);
skb               128 net/sched/sch_choke.c 	qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(skb));
skb               129 net/sched/sch_choke.c 	qdisc_drop(skb, sch, to_free);
skb               139 net/sched/sch_choke.c static inline struct choke_skb_cb *choke_skb_cb(const struct sk_buff *skb)
skb               141 net/sched/sch_choke.c 	qdisc_cb_private_validate(skb, sizeof(struct choke_skb_cb));
skb               142 net/sched/sch_choke.c 	return (struct choke_skb_cb *)qdisc_skb_cb(skb)->data;
skb               145 net/sched/sch_choke.c static inline void choke_set_classid(struct sk_buff *skb, u16 classid)
skb               147 net/sched/sch_choke.c 	choke_skb_cb(skb)->classid = classid;
skb               189 net/sched/sch_choke.c 	struct sk_buff *skb;
skb               194 net/sched/sch_choke.c 		skb = q->tab[*pidx];
skb               195 net/sched/sch_choke.c 		if (skb)
skb               196 net/sched/sch_choke.c 			return skb;
skb               219 net/sched/sch_choke.c static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch,
skb               225 net/sched/sch_choke.c 	choke_skb_cb(skb)->keys_valid = 0;
skb               238 net/sched/sch_choke.c 		if (choke_match_random(q, skb, &idx)) {
skb               250 net/sched/sch_choke.c 			    !INET_ECN_set_ce(skb)) {
skb               262 net/sched/sch_choke.c 				if (!use_ecn(q) || !INET_ECN_set_ce(skb)) {
skb               275 net/sched/sch_choke.c 		q->tab[q->tail] = skb;
skb               278 net/sched/sch_choke.c 		qdisc_qstats_backlog_inc(sch, skb);
skb               283 net/sched/sch_choke.c 	return qdisc_drop(skb, sch, to_free);
skb               286 net/sched/sch_choke.c 	qdisc_drop(skb, sch, to_free);
skb               293 net/sched/sch_choke.c 	struct sk_buff *skb;
skb               301 net/sched/sch_choke.c 	skb = q->tab[q->head];
skb               305 net/sched/sch_choke.c 	qdisc_qstats_backlog_dec(sch, skb);
skb               306 net/sched/sch_choke.c 	qdisc_bstats_update(sch, skb);
skb               308 net/sched/sch_choke.c 	return skb;
skb               316 net/sched/sch_choke.c 		struct sk_buff *skb = q->tab[q->head];
skb               319 net/sched/sch_choke.c 		if (!skb)
skb               321 net/sched/sch_choke.c 		rtnl_qdisc_drop(skb, sch);
skb               392 net/sched/sch_choke.c 				struct sk_buff *skb = q->tab[q->head];
skb               395 net/sched/sch_choke.c 				if (!skb)
skb               398 net/sched/sch_choke.c 					ntab[tail++] = skb;
skb               401 net/sched/sch_choke.c 				dropped += qdisc_pkt_len(skb);
skb               402 net/sched/sch_choke.c 				qdisc_qstats_backlog_dec(sch, skb);
skb               404 net/sched/sch_choke.c 				rtnl_qdisc_drop(skb, sch);
skb               439 net/sched/sch_choke.c static int choke_dump(struct Qdisc *sch, struct sk_buff *skb)
skb               453 net/sched/sch_choke.c 	opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
skb               457 net/sched/sch_choke.c 	if (nla_put(skb, TCA_CHOKE_PARMS, sizeof(opt), &opt) ||
skb               458 net/sched/sch_choke.c 	    nla_put_u32(skb, TCA_CHOKE_MAX_P, q->parms.max_P))
skb               460 net/sched/sch_choke.c 	return nla_nest_end(skb, opts);
skb               463 net/sched/sch_choke.c 	nla_nest_cancel(skb, opts);
skb                72 net/sched/sch_codel.c 	struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
skb                74 net/sched/sch_codel.c 	if (skb) {
skb                75 net/sched/sch_codel.c 		sch->qstats.backlog -= qdisc_pkt_len(skb);
skb                76 net/sched/sch_codel.c 		prefetch(&skb->end); /* we'll need skb_shinfo() */
skb                78 net/sched/sch_codel.c 	return skb;
skb                81 net/sched/sch_codel.c static void drop_func(struct sk_buff *skb, void *ctx)
skb                85 net/sched/sch_codel.c 	kfree_skb(skb);
skb                92 net/sched/sch_codel.c 	struct sk_buff *skb;
skb                94 net/sched/sch_codel.c 	skb = codel_dequeue(sch, &sch->qstats.backlog, &q->params, &q->vars,
skb               106 net/sched/sch_codel.c 	if (skb)
skb               107 net/sched/sch_codel.c 		qdisc_bstats_update(sch, skb);
skb               108 net/sched/sch_codel.c 	return skb;
skb               111 net/sched/sch_codel.c static int codel_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
skb               117 net/sched/sch_codel.c 		codel_set_enqueue_time(skb);
skb               118 net/sched/sch_codel.c 		return qdisc_enqueue_tail(skb, sch);
skb               122 net/sched/sch_codel.c 	return qdisc_drop(skb, sch, to_free);
skb               177 net/sched/sch_codel.c 		struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
skb               179 net/sched/sch_codel.c 		dropped += qdisc_pkt_len(skb);
skb               180 net/sched/sch_codel.c 		qdisc_qstats_backlog_dec(sch, skb);
skb               181 net/sched/sch_codel.c 		rtnl_qdisc_drop(skb, sch);
skb               216 net/sched/sch_codel.c static int codel_dump(struct Qdisc *sch, struct sk_buff *skb)
skb               221 net/sched/sch_codel.c 	opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
skb               225 net/sched/sch_codel.c 	if (nla_put_u32(skb, TCA_CODEL_TARGET,
skb               227 net/sched/sch_codel.c 	    nla_put_u32(skb, TCA_CODEL_LIMIT,
skb               229 net/sched/sch_codel.c 	    nla_put_u32(skb, TCA_CODEL_INTERVAL,
skb               231 net/sched/sch_codel.c 	    nla_put_u32(skb, TCA_CODEL_ECN,
skb               235 net/sched/sch_codel.c 	    nla_put_u32(skb, TCA_CODEL_CE_THRESHOLD,
skb               238 net/sched/sch_codel.c 	return nla_nest_end(skb, opts);
skb               241 net/sched/sch_codel.c 	nla_nest_cancel(skb, opts);
skb               236 net/sched/sch_drr.c 			  struct sk_buff *skb, struct tcmsg *tcm)
skb               245 net/sched/sch_drr.c 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
skb               248 net/sched/sch_drr.c 	if (nla_put_u32(skb, TCA_DRR_QUANTUM, cl->quantum))
skb               250 net/sched/sch_drr.c 	return nla_nest_end(skb, nest);
skb               253 net/sched/sch_drr.c 	nla_nest_cancel(skb, nest);
skb               302 net/sched/sch_drr.c static struct drr_class *drr_classify(struct sk_buff *skb, struct Qdisc *sch,
skb               311 net/sched/sch_drr.c 	if (TC_H_MAJ(skb->priority ^ sch->handle) == 0) {
skb               312 net/sched/sch_drr.c 		cl = drr_find_class(sch, skb->priority);
skb               319 net/sched/sch_drr.c 	result = tcf_classify(skb, fl, &res, false);
skb               340 net/sched/sch_drr.c static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch,
skb               343 net/sched/sch_drr.c 	unsigned int len = qdisc_pkt_len(skb);
skb               349 net/sched/sch_drr.c 	cl = drr_classify(skb, sch, &err);
skb               353 net/sched/sch_drr.c 		__qdisc_drop(skb, to_free);
skb               358 net/sched/sch_drr.c 	err = qdisc_enqueue(skb, cl->qdisc, to_free);
skb               381 net/sched/sch_drr.c 	struct sk_buff *skb;
skb               388 net/sched/sch_drr.c 		skb = cl->qdisc->ops->peek(cl->qdisc);
skb               389 net/sched/sch_drr.c 		if (skb == NULL) {
skb               394 net/sched/sch_drr.c 		len = qdisc_pkt_len(skb);
skb               397 net/sched/sch_drr.c 			skb = qdisc_dequeue_peeked(cl->qdisc);
skb               398 net/sched/sch_drr.c 			if (unlikely(skb == NULL))
skb               403 net/sched/sch_drr.c 			bstats_update(&cl->bstats, skb);
skb               404 net/sched/sch_drr.c 			qdisc_bstats_update(sch, skb);
skb               405 net/sched/sch_drr.c 			qdisc_qstats_backlog_dec(sch, skb);
skb               407 net/sched/sch_drr.c 			return skb;
skb               201 net/sched/sch_dsmark.c static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch,
skb               204 net/sched/sch_dsmark.c 	unsigned int len = qdisc_pkt_len(skb);
skb               208 net/sched/sch_dsmark.c 	pr_debug("%s(skb %p,sch %p,[qdisc %p])\n", __func__, skb, sch, p);
skb               211 net/sched/sch_dsmark.c 		int wlen = skb_network_offset(skb);
skb               213 net/sched/sch_dsmark.c 		switch (tc_skb_protocol(skb)) {
skb               216 net/sched/sch_dsmark.c 			if (!pskb_may_pull(skb, wlen) ||
skb               217 net/sched/sch_dsmark.c 			    skb_try_make_writable(skb, wlen))
skb               220 net/sched/sch_dsmark.c 			skb->tc_index = ipv4_get_dsfield(ip_hdr(skb))
skb               226 net/sched/sch_dsmark.c 			if (!pskb_may_pull(skb, wlen) ||
skb               227 net/sched/sch_dsmark.c 			    skb_try_make_writable(skb, wlen))
skb               230 net/sched/sch_dsmark.c 			skb->tc_index = ipv6_get_dsfield(ipv6_hdr(skb))
skb               234 net/sched/sch_dsmark.c 			skb->tc_index = 0;
skb               239 net/sched/sch_dsmark.c 	if (TC_H_MAJ(skb->priority) == sch->handle)
skb               240 net/sched/sch_dsmark.c 		skb->tc_index = TC_H_MIN(skb->priority);
skb               244 net/sched/sch_dsmark.c 		int result = tcf_classify(skb, fl, &res, false);
skb               253 net/sched/sch_dsmark.c 			__qdisc_drop(skb, to_free);
skb               260 net/sched/sch_dsmark.c 			skb->tc_index = TC_H_MIN(res.classid);
skb               265 net/sched/sch_dsmark.c 				skb->tc_index = p->default_index;
skb               270 net/sched/sch_dsmark.c 	err = qdisc_enqueue(skb, p->q, to_free);
skb               283 net/sched/sch_dsmark.c 	qdisc_drop(skb, sch, to_free);
skb               290 net/sched/sch_dsmark.c 	struct sk_buff *skb;
skb               295 net/sched/sch_dsmark.c 	skb = qdisc_dequeue_peeked(p->q);
skb               296 net/sched/sch_dsmark.c 	if (skb == NULL)
skb               299 net/sched/sch_dsmark.c 	qdisc_bstats_update(sch, skb);
skb               300 net/sched/sch_dsmark.c 	qdisc_qstats_backlog_dec(sch, skb);
skb               303 net/sched/sch_dsmark.c 	index = skb->tc_index & (p->indices - 1);
skb               304 net/sched/sch_dsmark.c 	pr_debug("index %d->%d\n", skb->tc_index, index);
skb               306 net/sched/sch_dsmark.c 	switch (tc_skb_protocol(skb)) {
skb               308 net/sched/sch_dsmark.c 		ipv4_change_dsfield(ip_hdr(skb), p->mv[index].mask,
skb               312 net/sched/sch_dsmark.c 		ipv6_change_dsfield(ipv6_hdr(skb), p->mv[index].mask,
skb               323 net/sched/sch_dsmark.c 				__func__, ntohs(tc_skb_protocol(skb)));
skb               327 net/sched/sch_dsmark.c 	return skb;
skb               427 net/sched/sch_dsmark.c 			     struct sk_buff *skb, struct tcmsg *tcm)
skb               440 net/sched/sch_dsmark.c 	opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
skb               443 net/sched/sch_dsmark.c 	if (nla_put_u8(skb, TCA_DSMARK_MASK, p->mv[cl - 1].mask) ||
skb               444 net/sched/sch_dsmark.c 	    nla_put_u8(skb, TCA_DSMARK_VALUE, p->mv[cl - 1].value))
skb               447 net/sched/sch_dsmark.c 	return nla_nest_end(skb, opts);
skb               450 net/sched/sch_dsmark.c 	nla_nest_cancel(skb, opts);
skb               454 net/sched/sch_dsmark.c static int dsmark_dump(struct Qdisc *sch, struct sk_buff *skb)
skb               459 net/sched/sch_dsmark.c 	opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
skb               462 net/sched/sch_dsmark.c 	if (nla_put_u16(skb, TCA_DSMARK_INDICES, p->indices))
skb               466 net/sched/sch_dsmark.c 	    nla_put_u16(skb, TCA_DSMARK_DEFAULT_INDEX, p->default_index))
skb               470 net/sched/sch_dsmark.c 	    nla_put_flag(skb, TCA_DSMARK_SET_TC_INDEX))
skb               473 net/sched/sch_dsmark.c 	return nla_nest_end(skb, opts);
skb               476 net/sched/sch_dsmark.c 	nla_nest_cancel(skb, opts);
skb               123 net/sched/sch_etf.c 	struct sk_buff *skb = etf_peek_timesortedlist(sch);
skb               126 net/sched/sch_etf.c 	if (!skb) {
skb               131 net/sched/sch_etf.c 	next = ktime_sub_ns(skb->tstamp, q->delta);
skb               135 net/sched/sch_etf.c static void report_sock_error(struct sk_buff *skb, u32 err, u8 code)
skb               139 net/sched/sch_etf.c 	ktime_t txtime = skb->tstamp;
skb               140 net/sched/sch_etf.c 	struct sock *sk = skb->sk;
skb               145 net/sched/sch_etf.c 	clone = skb_clone(skb, GFP_ATOMIC);
skb               177 net/sched/sch_etf.c 		struct sk_buff *skb;
skb               180 net/sched/sch_etf.c 		skb = rb_to_skb(parent);
skb               181 net/sched/sch_etf.c 		if (ktime_compare(txtime, skb->tstamp) >= 0) {
skb               200 net/sched/sch_etf.c static void timesortedlist_drop(struct Qdisc *sch, struct sk_buff *skb,
skb               207 net/sched/sch_etf.c 	skb_rbtree_walk_from_safe(skb, tmp) {
skb               208 net/sched/sch_etf.c 		if (ktime_after(skb->tstamp, now))
skb               211 net/sched/sch_etf.c 		rb_erase_cached(&skb->rbnode, &q->head);
skb               216 net/sched/sch_etf.c 		skb->next = NULL;
skb               217 net/sched/sch_etf.c 		skb->prev = NULL;
skb               218 net/sched/sch_etf.c 		skb->dev = qdisc_dev(sch);
skb               220 net/sched/sch_etf.c 		report_sock_error(skb, ECANCELED, SO_EE_CODE_TXTIME_MISSED);
skb               222 net/sched/sch_etf.c 		qdisc_qstats_backlog_dec(sch, skb);
skb               223 net/sched/sch_etf.c 		qdisc_drop(skb, sch, &to_free);
skb               231 net/sched/sch_etf.c static void timesortedlist_remove(struct Qdisc *sch, struct sk_buff *skb)
skb               235 net/sched/sch_etf.c 	rb_erase_cached(&skb->rbnode, &q->head);
skb               240 net/sched/sch_etf.c 	skb->next = NULL;
skb               241 net/sched/sch_etf.c 	skb->prev = NULL;
skb               242 net/sched/sch_etf.c 	skb->dev = qdisc_dev(sch);
skb               244 net/sched/sch_etf.c 	qdisc_qstats_backlog_dec(sch, skb);
skb               246 net/sched/sch_etf.c 	qdisc_bstats_update(sch, skb);
skb               248 net/sched/sch_etf.c 	q->last = skb->tstamp;
skb               256 net/sched/sch_etf.c 	struct sk_buff *skb;
skb               259 net/sched/sch_etf.c 	skb = etf_peek_timesortedlist(sch);
skb               260 net/sched/sch_etf.c 	if (!skb)
skb               266 net/sched/sch_etf.c 	if (ktime_before(skb->tstamp, now)) {
skb               267 net/sched/sch_etf.c 		timesortedlist_drop(sch, skb, now);
skb               268 net/sched/sch_etf.c 		skb = NULL;
skb               276 net/sched/sch_etf.c 		timesortedlist_remove(sch, skb);
skb               277 net/sched/sch_etf.c 		skb->tstamp = now;
skb               281 net/sched/sch_etf.c 	next = ktime_sub_ns(skb->tstamp, q->delta);
skb               285 net/sched/sch_etf.c 		timesortedlist_remove(sch, skb);
skb               287 net/sched/sch_etf.c 		skb = NULL;
skb               293 net/sched/sch_etf.c 	return skb;
skb               426 net/sched/sch_etf.c 		struct sk_buff *skb = rb_to_skb(p);
skb               430 net/sched/sch_etf.c 		rb_erase_cached(&skb->rbnode, &q->head);
skb               431 net/sched/sch_etf.c 		rtnl_kfree_skbs(skb, skb);
skb               466 net/sched/sch_etf.c static int etf_dump(struct Qdisc *sch, struct sk_buff *skb)
skb               472 net/sched/sch_etf.c 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
skb               487 net/sched/sch_etf.c 	if (nla_put(skb, TCA_ETF_PARMS, sizeof(opt), &opt))
skb               490 net/sched/sch_etf.c 	return nla_nest_end(skb, nest);
skb               493 net/sched/sch_etf.c 	nla_nest_cancel(skb, nest);
skb                18 net/sched/sch_fifo.c static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch,
skb                21 net/sched/sch_fifo.c 	if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <= sch->limit))
skb                22 net/sched/sch_fifo.c 		return qdisc_enqueue_tail(skb, sch);
skb                24 net/sched/sch_fifo.c 	return qdisc_drop(skb, sch, to_free);
skb                27 net/sched/sch_fifo.c static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch,
skb                31 net/sched/sch_fifo.c 		return qdisc_enqueue_tail(skb, sch);
skb                33 net/sched/sch_fifo.c 	return qdisc_drop(skb, sch, to_free);
skb                36 net/sched/sch_fifo.c static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch,
skb                42 net/sched/sch_fifo.c 		return qdisc_enqueue_tail(skb, sch);
skb                48 net/sched/sch_fifo.c 	qdisc_enqueue_tail(skb, sch);
skb                88 net/sched/sch_fifo.c static int fifo_dump(struct Qdisc *sch, struct sk_buff *skb)
skb                92 net/sched/sch_fifo.c 	if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
skb                94 net/sched/sch_fifo.c 	return skb->len;
skb                57 net/sched/sch_fq.c static inline struct fq_skb_cb *fq_skb_cb(struct sk_buff *skb)
skb                59 net/sched/sch_fq.c 	qdisc_cb_private_validate(skb, sizeof(struct fq_skb_cb));
skb                60 net/sched/sch_fq.c 	return (struct fq_skb_cb *)qdisc_skb_cb(skb)->data;
skb               241 net/sched/sch_fq.c static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q)
skb               244 net/sched/sch_fq.c 	struct sock *sk = skb->sk;
skb               249 net/sched/sch_fq.c 	if (unlikely((skb->priority & TC_PRIO_MAX) == TC_PRIO_CONTROL))
skb               262 net/sched/sch_fq.c 		unsigned long hash = skb_get_hash(skb) & q->orphan_mask;
skb               268 net/sched/sch_fq.c 		skb_orphan(skb);
skb               270 net/sched/sch_fq.c 		unsigned long hash = skb_get_hash(skb) & q->orphan_mask;
skb               300 net/sched/sch_fq.c 			if (unlikely(skb->sk == sk &&
skb               328 net/sched/sch_fq.c 	if (skb->sk == sk) {
skb               346 net/sched/sch_fq.c 	struct sk_buff *skb = skb_rb_first(&flow->t_root);
skb               349 net/sched/sch_fq.c 	if (!skb)
skb               353 net/sched/sch_fq.c 		return skb;
skb               355 net/sched/sch_fq.c 	if (fq_skb_cb(skb)->time_to_send < fq_skb_cb(head)->time_to_send)
skb               356 net/sched/sch_fq.c 		return skb;
skb               361 net/sched/sch_fq.c 			  struct sk_buff *skb)
skb               363 net/sched/sch_fq.c 	if (skb == flow->head) {
skb               364 net/sched/sch_fq.c 		flow->head = skb->next;
skb               366 net/sched/sch_fq.c 		rb_erase(&skb->rbnode, &flow->t_root);
skb               367 net/sched/sch_fq.c 		skb->dev = qdisc_dev(sch);
skb               374 net/sched/sch_fq.c 	struct sk_buff *skb = fq_peek(flow);
skb               376 net/sched/sch_fq.c 	if (skb) {
skb               377 net/sched/sch_fq.c 		fq_erase_head(sch, flow, skb);
skb               378 net/sched/sch_fq.c 		skb_mark_not_on_list(skb);
skb               380 net/sched/sch_fq.c 		qdisc_qstats_backlog_dec(sch, skb);
skb               383 net/sched/sch_fq.c 	return skb;
skb               386 net/sched/sch_fq.c static void flow_queue_add(struct fq_flow *flow, struct sk_buff *skb)
skb               391 net/sched/sch_fq.c 	fq_skb_cb(skb)->time_to_send = skb->tstamp ?: ktime_get_ns();
skb               395 net/sched/sch_fq.c 	    fq_skb_cb(skb)->time_to_send >= fq_skb_cb(flow->tail)->time_to_send) {
skb               397 net/sched/sch_fq.c 			flow->head = skb;
skb               399 net/sched/sch_fq.c 			flow->tail->next = skb;
skb               400 net/sched/sch_fq.c 		flow->tail = skb;
skb               401 net/sched/sch_fq.c 		skb->next = NULL;
skb               411 net/sched/sch_fq.c 		if (fq_skb_cb(skb)->time_to_send >= fq_skb_cb(aux)->time_to_send)
skb               416 net/sched/sch_fq.c 	rb_link_node(&skb->rbnode, parent, p);
skb               417 net/sched/sch_fq.c 	rb_insert_color(&skb->rbnode, &flow->t_root);
skb               420 net/sched/sch_fq.c static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
skb               427 net/sched/sch_fq.c 		return qdisc_drop(skb, sch, to_free);
skb               429 net/sched/sch_fq.c 	f = fq_classify(skb, q);
skb               432 net/sched/sch_fq.c 		return qdisc_drop(skb, sch, to_free);
skb               436 net/sched/sch_fq.c 	qdisc_qstats_backlog_inc(sch, skb);
skb               445 net/sched/sch_fq.c 	flow_queue_add(f, skb);
skb               486 net/sched/sch_fq.c 	struct sk_buff *skb;
skb               495 net/sched/sch_fq.c 	skb = fq_dequeue_head(sch, &q->internal);
skb               496 net/sched/sch_fq.c 	if (skb)
skb               521 net/sched/sch_fq.c 	skb = fq_peek(f);
skb               522 net/sched/sch_fq.c 	if (skb) {
skb               523 net/sched/sch_fq.c 		u64 time_next_packet = max_t(u64, fq_skb_cb(skb)->time_to_send,
skb               534 net/sched/sch_fq.c 			INET_ECN_set_ce(skb);
skb               539 net/sched/sch_fq.c 	skb = fq_dequeue_head(sch, f);
skb               540 net/sched/sch_fq.c 	if (!skb) {
skb               551 net/sched/sch_fq.c 	prefetch(&skb->end);
skb               552 net/sched/sch_fq.c 	plen = qdisc_pkt_len(skb);
skb               564 net/sched/sch_fq.c 	if (!skb->tstamp) {
skb               565 net/sched/sch_fq.c 		if (skb->sk)
skb               566 net/sched/sch_fq.c 			rate = min(skb->sk->sk_pacing_rate, rate);
skb               598 net/sched/sch_fq.c 	qdisc_bstats_update(sch, skb);
skb               599 net/sched/sch_fq.c 	return skb;
skb               607 net/sched/sch_fq.c 		struct sk_buff *skb = rb_to_skb(p);
skb               610 net/sched/sch_fq.c 		rb_erase(&skb->rbnode, &flow->t_root);
skb               611 net/sched/sch_fq.c 		rtnl_kfree_skbs(skb, skb);
skb               843 net/sched/sch_fq.c 		struct sk_buff *skb = fq_dequeue(sch);
skb               845 net/sched/sch_fq.c 		if (!skb)
skb               847 net/sched/sch_fq.c 		drop_len += qdisc_pkt_len(skb);
skb               848 net/sched/sch_fq.c 		rtnl_kfree_skbs(skb, skb);
skb               901 net/sched/sch_fq.c static int fq_dump(struct Qdisc *sch, struct sk_buff *skb)
skb               907 net/sched/sch_fq.c 	opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
skb               915 net/sched/sch_fq.c 	if (nla_put_u32(skb, TCA_FQ_PLIMIT, sch->limit) ||
skb               916 net/sched/sch_fq.c 	    nla_put_u32(skb, TCA_FQ_FLOW_PLIMIT, q->flow_plimit) ||
skb               917 net/sched/sch_fq.c 	    nla_put_u32(skb, TCA_FQ_QUANTUM, q->quantum) ||
skb               918 net/sched/sch_fq.c 	    nla_put_u32(skb, TCA_FQ_INITIAL_QUANTUM, q->initial_quantum) ||
skb               919 net/sched/sch_fq.c 	    nla_put_u32(skb, TCA_FQ_RATE_ENABLE, q->rate_enable) ||
skb               920 net/sched/sch_fq.c 	    nla_put_u32(skb, TCA_FQ_FLOW_MAX_RATE,
skb               922 net/sched/sch_fq.c 	    nla_put_u32(skb, TCA_FQ_FLOW_REFILL_DELAY,
skb               924 net/sched/sch_fq.c 	    nla_put_u32(skb, TCA_FQ_ORPHAN_MASK, q->orphan_mask) ||
skb               925 net/sched/sch_fq.c 	    nla_put_u32(skb, TCA_FQ_LOW_RATE_THRESHOLD,
skb               927 net/sched/sch_fq.c 	    nla_put_u32(skb, TCA_FQ_CE_THRESHOLD, (u32)ce_threshold) ||
skb               928 net/sched/sch_fq.c 	    nla_put_u32(skb, TCA_FQ_BUCKETS_LOG, q->fq_trees_log))
skb               931 net/sched/sch_fq.c 	return nla_nest_end(skb, opts);
skb                72 net/sched/sch_fq_codel.c 				  struct sk_buff *skb)
skb                74 net/sched/sch_fq_codel.c 	return reciprocal_scale(skb_get_hash(skb), q->flows_cnt);
skb                77 net/sched/sch_fq_codel.c static unsigned int fq_codel_classify(struct sk_buff *skb, struct Qdisc *sch,
skb                85 net/sched/sch_fq_codel.c 	if (TC_H_MAJ(skb->priority) == sch->handle &&
skb                86 net/sched/sch_fq_codel.c 	    TC_H_MIN(skb->priority) > 0 &&
skb                87 net/sched/sch_fq_codel.c 	    TC_H_MIN(skb->priority) <= q->flows_cnt)
skb                88 net/sched/sch_fq_codel.c 		return TC_H_MIN(skb->priority);
skb                92 net/sched/sch_fq_codel.c 		return fq_codel_hash(q, skb) + 1;
skb                95 net/sched/sch_fq_codel.c 	result = tcf_classify(skb, filter, &res, false);
skb               119 net/sched/sch_fq_codel.c 	struct sk_buff *skb = flow->head;
skb               121 net/sched/sch_fq_codel.c 	flow->head = skb->next;
skb               122 net/sched/sch_fq_codel.c 	skb_mark_not_on_list(skb);
skb               123 net/sched/sch_fq_codel.c 	return skb;
skb               128 net/sched/sch_fq_codel.c 				  struct sk_buff *skb)
skb               131 net/sched/sch_fq_codel.c 		flow->head = skb;
skb               133 net/sched/sch_fq_codel.c 		flow->tail->next = skb;
skb               134 net/sched/sch_fq_codel.c 	flow->tail = skb;
skb               135 net/sched/sch_fq_codel.c 	skb->next = NULL;
skb               142 net/sched/sch_fq_codel.c 	struct sk_buff *skb;
skb               169 net/sched/sch_fq_codel.c 		skb = dequeue_head(flow);
skb               170 net/sched/sch_fq_codel.c 		len += qdisc_pkt_len(skb);
skb               171 net/sched/sch_fq_codel.c 		mem += get_codel_cb(skb)->mem_usage;
skb               172 net/sched/sch_fq_codel.c 		__qdisc_drop(skb, to_free);
skb               185 net/sched/sch_fq_codel.c static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch,
skb               195 net/sched/sch_fq_codel.c 	idx = fq_codel_classify(skb, sch, &ret);
skb               199 net/sched/sch_fq_codel.c 		__qdisc_drop(skb, to_free);
skb               204 net/sched/sch_fq_codel.c 	codel_set_enqueue_time(skb);
skb               206 net/sched/sch_fq_codel.c 	flow_queue_add(flow, skb);
skb               207 net/sched/sch_fq_codel.c 	q->backlogs[idx] += qdisc_pkt_len(skb);
skb               208 net/sched/sch_fq_codel.c 	qdisc_qstats_backlog_inc(sch, skb);
skb               215 net/sched/sch_fq_codel.c 	get_codel_cb(skb)->mem_usage = skb->truesize;
skb               216 net/sched/sch_fq_codel.c 	q->memory_usage += get_codel_cb(skb)->mem_usage;
skb               225 net/sched/sch_fq_codel.c 	pkt_len = qdisc_pkt_len(skb);
skb               261 net/sched/sch_fq_codel.c 	struct sk_buff *skb = NULL;
skb               265 net/sched/sch_fq_codel.c 		skb = dequeue_head(flow);
skb               266 net/sched/sch_fq_codel.c 		q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb);
skb               267 net/sched/sch_fq_codel.c 		q->memory_usage -= get_codel_cb(skb)->mem_usage;
skb               269 net/sched/sch_fq_codel.c 		sch->qstats.backlog -= qdisc_pkt_len(skb);
skb               271 net/sched/sch_fq_codel.c 	return skb;
skb               274 net/sched/sch_fq_codel.c static void drop_func(struct sk_buff *skb, void *ctx)
skb               278 net/sched/sch_fq_codel.c 	kfree_skb(skb);
skb               285 net/sched/sch_fq_codel.c 	struct sk_buff *skb;
skb               304 net/sched/sch_fq_codel.c 	skb = codel_dequeue(sch, &sch->qstats.backlog, &q->cparams,
skb               308 net/sched/sch_fq_codel.c 	if (!skb) {
skb               316 net/sched/sch_fq_codel.c 	qdisc_bstats_update(sch, skb);
skb               317 net/sched/sch_fq_codel.c 	flow->deficit -= qdisc_pkt_len(skb);
skb               327 net/sched/sch_fq_codel.c 	return skb;
skb               427 net/sched/sch_fq_codel.c 		struct sk_buff *skb = fq_codel_dequeue(sch);
skb               429 net/sched/sch_fq_codel.c 		q->cstats.drop_len += qdisc_pkt_len(skb);
skb               430 net/sched/sch_fq_codel.c 		rtnl_kfree_skbs(skb, skb);
skb               513 net/sched/sch_fq_codel.c static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb)
skb               518 net/sched/sch_fq_codel.c 	opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
skb               522 net/sched/sch_fq_codel.c 	if (nla_put_u32(skb, TCA_FQ_CODEL_TARGET,
skb               524 net/sched/sch_fq_codel.c 	    nla_put_u32(skb, TCA_FQ_CODEL_LIMIT,
skb               526 net/sched/sch_fq_codel.c 	    nla_put_u32(skb, TCA_FQ_CODEL_INTERVAL,
skb               528 net/sched/sch_fq_codel.c 	    nla_put_u32(skb, TCA_FQ_CODEL_ECN,
skb               530 net/sched/sch_fq_codel.c 	    nla_put_u32(skb, TCA_FQ_CODEL_QUANTUM,
skb               532 net/sched/sch_fq_codel.c 	    nla_put_u32(skb, TCA_FQ_CODEL_DROP_BATCH_SIZE,
skb               534 net/sched/sch_fq_codel.c 	    nla_put_u32(skb, TCA_FQ_CODEL_MEMORY_LIMIT,
skb               536 net/sched/sch_fq_codel.c 	    nla_put_u32(skb, TCA_FQ_CODEL_FLOWS,
skb               541 net/sched/sch_fq_codel.c 	    nla_put_u32(skb, TCA_FQ_CODEL_CE_THRESHOLD,
skb               545 net/sched/sch_fq_codel.c 	return nla_nest_end(skb, opts);
skb               609 net/sched/sch_fq_codel.c 			  struct sk_buff *skb, struct tcmsg *tcm)
skb               625 net/sched/sch_fq_codel.c 		const struct sk_buff *skb;
skb               645 net/sched/sch_fq_codel.c 			skb = flow->head;
skb               646 net/sched/sch_fq_codel.c 			while (skb) {
skb               648 net/sched/sch_fq_codel.c 				skb = skb->next;
skb                55 net/sched/sch_generic.c 	struct sk_buff *skb;
skb                62 net/sched/sch_generic.c 	skb = skb_peek(&q->skb_bad_txq);
skb                63 net/sched/sch_generic.c 	if (skb) {
skb                65 net/sched/sch_generic.c 		txq = skb_get_tx_queue(txq->dev, skb);
skb                67 net/sched/sch_generic.c 			skb = __skb_dequeue(&q->skb_bad_txq);
skb                69 net/sched/sch_generic.c 				qdisc_qstats_cpu_backlog_dec(q, skb);
skb                72 net/sched/sch_generic.c 				qdisc_qstats_backlog_dec(q, skb);
skb                76 net/sched/sch_generic.c 			skb = SKB_XOFF_MAGIC;
skb                83 net/sched/sch_generic.c 	return skb;
skb                88 net/sched/sch_generic.c 	struct sk_buff *skb = skb_peek(&q->skb_bad_txq);
skb                90 net/sched/sch_generic.c 	if (unlikely(skb))
skb                91 net/sched/sch_generic.c 		skb = __skb_dequeue_bad_txq(q);
skb                93 net/sched/sch_generic.c 	return skb;
skb                97 net/sched/sch_generic.c 					     struct sk_buff *skb)
skb               106 net/sched/sch_generic.c 	__skb_queue_tail(&q->skb_bad_txq, skb);
skb               109 net/sched/sch_generic.c 		qdisc_qstats_cpu_backlog_inc(q, skb);
skb               112 net/sched/sch_generic.c 		qdisc_qstats_backlog_inc(q, skb);
skb               120 net/sched/sch_generic.c static inline void dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
skb               129 net/sched/sch_generic.c 	while (skb) {
skb               130 net/sched/sch_generic.c 		struct sk_buff *next = skb->next;
skb               132 net/sched/sch_generic.c 		__skb_queue_tail(&q->gso_skb, skb);
skb               137 net/sched/sch_generic.c 			qdisc_qstats_cpu_backlog_inc(q, skb);
skb               141 net/sched/sch_generic.c 			qdisc_qstats_backlog_inc(q, skb);
skb               145 net/sched/sch_generic.c 		skb = next;
skb               153 net/sched/sch_generic.c 				 struct sk_buff *skb,
skb               157 net/sched/sch_generic.c 	int bytelimit = qdisc_avail_bulklimit(txq) - skb->len;
skb               166 net/sched/sch_generic.c 		skb->next = nskb;
skb               167 net/sched/sch_generic.c 		skb = nskb;
skb               170 net/sched/sch_generic.c 	skb_mark_not_on_list(skb);
skb               177 net/sched/sch_generic.c 				      struct sk_buff *skb,
skb               180 net/sched/sch_generic.c 	int mapping = skb_get_queue_mapping(skb);
skb               192 net/sched/sch_generic.c 		skb->next = nskb;
skb               193 net/sched/sch_generic.c 		skb = nskb;
skb               196 net/sched/sch_generic.c 	skb_mark_not_on_list(skb);
skb               206 net/sched/sch_generic.c 	struct sk_buff *skb = NULL;
skb               217 net/sched/sch_generic.c 		skb = skb_peek(&q->gso_skb);
skb               222 net/sched/sch_generic.c 		if (!skb) {
skb               230 net/sched/sch_generic.c 		if (xfrm_offload(skb))
skb               233 net/sched/sch_generic.c 		txq = skb_get_tx_queue(txq->dev, skb);
skb               235 net/sched/sch_generic.c 			skb = __skb_dequeue(&q->gso_skb);
skb               237 net/sched/sch_generic.c 				qdisc_qstats_cpu_backlog_dec(q, skb);
skb               240 net/sched/sch_generic.c 				qdisc_qstats_backlog_dec(q, skb);
skb               244 net/sched/sch_generic.c 			skb = NULL;
skb               255 net/sched/sch_generic.c 		return skb;
skb               257 net/sched/sch_generic.c 	skb = qdisc_dequeue_skb_bad_txq(q);
skb               258 net/sched/sch_generic.c 	if (unlikely(skb)) {
skb               259 net/sched/sch_generic.c 		if (skb == SKB_XOFF_MAGIC)
skb               263 net/sched/sch_generic.c 	skb = q->dequeue(q);
skb               264 net/sched/sch_generic.c 	if (skb) {
skb               267 net/sched/sch_generic.c 			try_bulk_dequeue_skb(q, skb, txq, packets);
skb               269 net/sched/sch_generic.c 			try_bulk_dequeue_skb_slow(q, skb, packets);
skb               272 net/sched/sch_generic.c 	trace_qdisc_dequeue(q, txq, *packets, skb);
skb               273 net/sched/sch_generic.c 	return skb;
skb               285 net/sched/sch_generic.c bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
skb               298 net/sched/sch_generic.c 		skb = validate_xmit_skb_list(skb, dev, &again);
skb               305 net/sched/sch_generic.c 		dev_requeue_skb(skb, q);
skb               310 net/sched/sch_generic.c 	if (likely(skb)) {
skb               313 net/sched/sch_generic.c 			skb = dev_hard_start_xmit(skb, dev, txq, &ret);
skb               331 net/sched/sch_generic.c 		dev_requeue_skb(skb, q);
skb               362 net/sched/sch_generic.c 	struct sk_buff *skb;
skb               366 net/sched/sch_generic.c 	skb = dequeue_skb(q, &validate, packets);
skb               367 net/sched/sch_generic.c 	if (unlikely(!skb))
skb               374 net/sched/sch_generic.c 	txq = skb_get_tx_queue(dev, skb);
skb               376 net/sched/sch_generic.c 	return sch_direct_xmit(skb, q, dev, txq, root_lock, validate);
skb               527 net/sched/sch_generic.c static int noop_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
skb               530 net/sched/sch_generic.c 	__qdisc_drop(skb, to_free);
skb               621 net/sched/sch_generic.c static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
skb               624 net/sched/sch_generic.c 	int band = prio2band[skb->priority & TC_PRIO_MAX];
skb               627 net/sched/sch_generic.c 	unsigned int pkt_len = qdisc_pkt_len(skb);
skb               630 net/sched/sch_generic.c 	err = skb_array_produce(q, skb);
skb               634 net/sched/sch_generic.c 			return qdisc_drop_cpu(skb, qdisc, to_free);
skb               636 net/sched/sch_generic.c 			return qdisc_drop(skb, qdisc, to_free);
skb               646 net/sched/sch_generic.c 	struct sk_buff *skb = NULL;
skb               649 net/sched/sch_generic.c 	for (band = 0; band < PFIFO_FAST_BANDS && !skb; band++) {
skb               655 net/sched/sch_generic.c 		skb = __skb_array_consume(q);
skb               657 net/sched/sch_generic.c 	if (likely(skb)) {
skb               658 net/sched/sch_generic.c 		qdisc_update_stats_at_dequeue(qdisc, skb);
skb               663 net/sched/sch_generic.c 	return skb;
skb               669 net/sched/sch_generic.c 	struct sk_buff *skb = NULL;
skb               672 net/sched/sch_generic.c 	for (band = 0; band < PFIFO_FAST_BANDS && !skb; band++) {
skb               675 net/sched/sch_generic.c 		skb = __skb_array_peek(q);
skb               678 net/sched/sch_generic.c 	return skb;
skb               688 net/sched/sch_generic.c 		struct sk_buff *skb;
skb               696 net/sched/sch_generic.c 		while ((skb = __skb_array_consume(q)) != NULL)
skb               697 net/sched/sch_generic.c 			kfree_skb(skb);
skb               711 net/sched/sch_generic.c static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb)
skb               716 net/sched/sch_generic.c 	if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
skb               718 net/sched/sch_generic.c 	return skb->len;
skb               912 net/sched/sch_generic.c 	struct sk_buff *skb, *tmp;
skb               917 net/sched/sch_generic.c 	skb_queue_walk_safe(&qdisc->gso_skb, skb, tmp) {
skb               918 net/sched/sch_generic.c 		__skb_unlink(skb, &qdisc->gso_skb);
skb               919 net/sched/sch_generic.c 		kfree_skb_list(skb);
skb               922 net/sched/sch_generic.c 	skb_queue_walk_safe(&qdisc->skb_bad_txq, skb, tmp) {
skb               923 net/sched/sch_generic.c 		__skb_unlink(skb, &qdisc->skb_bad_txq);
skb               924 net/sched/sch_generic.c 		kfree_skb_list(skb);
skb               952 net/sched/sch_generic.c 	struct sk_buff *skb, *tmp;
skb               968 net/sched/sch_generic.c 	skb_queue_walk_safe(&qdisc->gso_skb, skb, tmp) {
skb               969 net/sched/sch_generic.c 		__skb_unlink(skb, &qdisc->gso_skb);
skb               970 net/sched/sch_generic.c 		kfree_skb_list(skb);
skb               973 net/sched/sch_generic.c 	skb_queue_walk_safe(&qdisc->skb_bad_txq, skb, tmp) {
skb               974 net/sched/sch_generic.c 		__skb_unlink(skb, &qdisc->skb_bad_txq);
skb               975 net/sched/sch_generic.c 		kfree_skb_list(skb);
skb               122 net/sched/sch_gred.c static inline u16 tc_index_to_dp(struct sk_buff *skb)
skb               124 net/sched/sch_gred.c 	return skb->tc_index & GRED_VQ_MASK;
skb               164 net/sched/sch_gred.c static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch,
skb               170 net/sched/sch_gred.c 	u16 dp = tc_index_to_dp(skb);
skb               181 net/sched/sch_gred.c 			if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <=
skb               183 net/sched/sch_gred.c 				return qdisc_enqueue_tail(skb, sch);
skb               190 net/sched/sch_gred.c 		skb->tc_index = (skb->tc_index & ~GRED_VQ_MASK) | dp;
skb               206 net/sched/sch_gred.c 	q->bytesin += qdisc_pkt_len(skb);
skb               227 net/sched/sch_gred.c 		if (!gred_use_ecn(q) || !INET_ECN_set_ce(skb)) {
skb               238 net/sched/sch_gred.c 		    !INET_ECN_set_ce(skb)) {
skb               246 net/sched/sch_gred.c 	if (gred_backlog(t, q, sch) + qdisc_pkt_len(skb) <= q->limit) {
skb               247 net/sched/sch_gred.c 		q->backlog += qdisc_pkt_len(skb);
skb               248 net/sched/sch_gred.c 		return qdisc_enqueue_tail(skb, sch);
skb               253 net/sched/sch_gred.c 	return qdisc_drop(skb, sch, to_free);
skb               256 net/sched/sch_gred.c 	qdisc_drop(skb, sch, to_free);
skb               262 net/sched/sch_gred.c 	struct sk_buff *skb;
skb               265 net/sched/sch_gred.c 	skb = qdisc_dequeue_head(sch);
skb               267 net/sched/sch_gred.c 	if (skb) {
skb               269 net/sched/sch_gred.c 		u16 dp = tc_index_to_dp(skb);
skb               273 net/sched/sch_gred.c 					     tc_index_to_dp(skb));
skb               275 net/sched/sch_gred.c 			q->backlog -= qdisc_pkt_len(skb);
skb               286 net/sched/sch_gred.c 		return skb;
skb               757 net/sched/sch_gred.c static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
skb               773 net/sched/sch_gred.c 	opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
skb               776 net/sched/sch_gred.c 	if (nla_put(skb, TCA_GRED_DPS, sizeof(sopt), &sopt))
skb               784 net/sched/sch_gred.c 	if (nla_put(skb, TCA_GRED_MAX_P, sizeof(max_p), max_p))
skb               787 net/sched/sch_gred.c 	if (nla_put_u32(skb, TCA_GRED_LIMIT, sch->limit))
skb               791 net/sched/sch_gred.c 	parms = nla_nest_start_noflag(skb, TCA_GRED_PARMS);
skb               835 net/sched/sch_gred.c 		if (nla_append(skb, sizeof(opt), &opt) < 0)
skb               839 net/sched/sch_gred.c 	nla_nest_end(skb, parms);
skb               842 net/sched/sch_gred.c 	vqs = nla_nest_start_noflag(skb, TCA_GRED_VQ_LIST);
skb               853 net/sched/sch_gred.c 		vq = nla_nest_start_noflag(skb, TCA_GRED_VQ_ENTRY);
skb               857 net/sched/sch_gred.c 		if (nla_put_u32(skb, TCA_GRED_VQ_DP, q->DP))
skb               860 net/sched/sch_gred.c 		if (nla_put_u32(skb, TCA_GRED_VQ_FLAGS, q->red_flags))
skb               864 net/sched/sch_gred.c 		if (nla_put_u64_64bit(skb, TCA_GRED_VQ_STAT_BYTES, q->bytesin,
skb               867 net/sched/sch_gred.c 		if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PACKETS, q->packetsin))
skb               869 net/sched/sch_gred.c 		if (nla_put_u32(skb, TCA_GRED_VQ_STAT_BACKLOG,
skb               872 net/sched/sch_gred.c 		if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PROB_DROP,
skb               875 net/sched/sch_gred.c 		if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PROB_MARK,
skb               878 net/sched/sch_gred.c 		if (nla_put_u32(skb, TCA_GRED_VQ_STAT_FORCED_DROP,
skb               881 net/sched/sch_gred.c 		if (nla_put_u32(skb, TCA_GRED_VQ_STAT_FORCED_MARK,
skb               884 net/sched/sch_gred.c 		if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PDROP, q->stats.pdrop))
skb               886 net/sched/sch_gred.c 		if (nla_put_u32(skb, TCA_GRED_VQ_STAT_OTHER, q->stats.other))
skb               889 net/sched/sch_gred.c 		nla_nest_end(skb, vq);
skb               891 net/sched/sch_gred.c 	nla_nest_end(skb, vqs);
skb               893 net/sched/sch_gred.c 	return nla_nest_end(skb, opts);
skb               896 net/sched/sch_gred.c 	nla_nest_cancel(skb, opts);
skb               834 net/sched/sch_hfsc.c 	struct sk_buff *skb;
skb               837 net/sched/sch_hfsc.c 	skb = sch->ops->peek(sch);
skb               838 net/sched/sch_hfsc.c 	if (unlikely(skb == NULL)) {
skb               842 net/sched/sch_hfsc.c 	len = qdisc_pkt_len(skb);
skb              1116 net/sched/sch_hfsc.c hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
skb              1124 net/sched/sch_hfsc.c 	if (TC_H_MAJ(skb->priority ^ sch->handle) == 0 &&
skb              1125 net/sched/sch_hfsc.c 	    (cl = hfsc_find_class(skb->priority, sch)) != NULL)
skb              1132 net/sched/sch_hfsc.c 	while (tcf && (result = tcf_classify(skb, tcf, &res, false)) >= 0) {
skb              1254 net/sched/sch_hfsc.c hfsc_dump_sc(struct sk_buff *skb, int attr, struct internal_sc *sc)
skb              1261 net/sched/sch_hfsc.c 	if (nla_put(skb, attr, sizeof(tsc), &tsc))
skb              1264 net/sched/sch_hfsc.c 	return skb->len;
skb              1271 net/sched/sch_hfsc.c hfsc_dump_curves(struct sk_buff *skb, struct hfsc_class *cl)
skb              1274 net/sched/sch_hfsc.c 	    (hfsc_dump_sc(skb, TCA_HFSC_RSC, &cl->cl_rsc) < 0))
skb              1278 net/sched/sch_hfsc.c 	    (hfsc_dump_sc(skb, TCA_HFSC_FSC, &cl->cl_fsc) < 0))
skb              1282 net/sched/sch_hfsc.c 	    (hfsc_dump_sc(skb, TCA_HFSC_USC, &cl->cl_usc) < 0))
skb              1285 net/sched/sch_hfsc.c 	return skb->len;
skb              1292 net/sched/sch_hfsc.c hfsc_dump_class(struct Qdisc *sch, unsigned long arg, struct sk_buff *skb,
skb              1304 net/sched/sch_hfsc.c 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
skb              1307 net/sched/sch_hfsc.c 	if (hfsc_dump_curves(skb, cl) < 0)
skb              1309 net/sched/sch_hfsc.c 	return nla_nest_end(skb, nest);
skb              1312 net/sched/sch_hfsc.c 	nla_nest_cancel(skb, nest);
skb              1515 net/sched/sch_hfsc.c hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb)
skb              1518 net/sched/sch_hfsc.c 	unsigned char *b = skb_tail_pointer(skb);
skb              1522 net/sched/sch_hfsc.c 	if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt))
skb              1524 net/sched/sch_hfsc.c 	return skb->len;
skb              1527 net/sched/sch_hfsc.c 	nlmsg_trim(skb, b);
skb              1532 net/sched/sch_hfsc.c hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
skb              1534 net/sched/sch_hfsc.c 	unsigned int len = qdisc_pkt_len(skb);
skb              1539 net/sched/sch_hfsc.c 	cl = hfsc_classify(skb, sch, &err);
skb              1543 net/sched/sch_hfsc.c 		__qdisc_drop(skb, to_free);
skb              1548 net/sched/sch_hfsc.c 	err = qdisc_enqueue(skb, cl->qdisc, to_free);
skb              1583 net/sched/sch_hfsc.c 	struct sk_buff *skb;
skb              1614 net/sched/sch_hfsc.c 	skb = qdisc_dequeue_peeked(cl->qdisc);
skb              1615 net/sched/sch_hfsc.c 	if (skb == NULL) {
skb              1620 net/sched/sch_hfsc.c 	bstats_update(&cl->bstats, skb);
skb              1621 net/sched/sch_hfsc.c 	update_vf(cl, qdisc_pkt_len(skb), cur_time);
skb              1623 net/sched/sch_hfsc.c 		cl->cl_cumul += qdisc_pkt_len(skb);
skb              1639 net/sched/sch_hfsc.c 	qdisc_bstats_update(sch, skb);
skb              1640 net/sched/sch_hfsc.c 	qdisc_qstats_backlog_dec(sch, skb);
skb              1643 net/sched/sch_hfsc.c 	return skb;
skb               247 net/sched/sch_hhf.c static enum wdrr_bucket_idx hhf_classify(struct sk_buff *skb, struct Qdisc *sch)
skb               267 net/sched/sch_hhf.c 	hash = skb_get_hash_perturb(skb, &q->perturbation);
skb               289 net/sched/sch_hhf.c 	pkt_len = qdisc_pkt_len(skb);
skb               331 net/sched/sch_hhf.c 	struct sk_buff *skb = bucket->head;
skb               333 net/sched/sch_hhf.c 	bucket->head = skb->next;
skb               334 net/sched/sch_hhf.c 	skb_mark_not_on_list(skb);
skb               335 net/sched/sch_hhf.c 	return skb;
skb               339 net/sched/sch_hhf.c static void bucket_add(struct wdrr_bucket *bucket, struct sk_buff *skb)
skb               342 net/sched/sch_hhf.c 		bucket->head = skb;
skb               344 net/sched/sch_hhf.c 		bucket->tail->next = skb;
skb               345 net/sched/sch_hhf.c 	bucket->tail = skb;
skb               346 net/sched/sch_hhf.c 	skb->next = NULL;
skb               360 net/sched/sch_hhf.c 		struct sk_buff *skb = dequeue_head(bucket);
skb               363 net/sched/sch_hhf.c 		qdisc_qstats_backlog_dec(sch, skb);
skb               364 net/sched/sch_hhf.c 		qdisc_drop(skb, sch, to_free);
skb               371 net/sched/sch_hhf.c static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch,
skb               379 net/sched/sch_hhf.c 	idx = hhf_classify(skb, sch);
skb               382 net/sched/sch_hhf.c 	bucket_add(bucket, skb);
skb               383 net/sched/sch_hhf.c 	qdisc_qstats_backlog_inc(sch, skb);
skb               421 net/sched/sch_hhf.c 	struct sk_buff *skb = NULL;
skb               444 net/sched/sch_hhf.c 		skb = dequeue_head(bucket);
skb               446 net/sched/sch_hhf.c 		qdisc_qstats_backlog_dec(sch, skb);
skb               449 net/sched/sch_hhf.c 	if (!skb) {
skb               457 net/sched/sch_hhf.c 	qdisc_bstats_update(sch, skb);
skb               458 net/sched/sch_hhf.c 	bucket->deficit -= qdisc_pkt_len(skb);
skb               460 net/sched/sch_hhf.c 	return skb;
skb               465 net/sched/sch_hhf.c 	struct sk_buff *skb;
skb               467 net/sched/sch_hhf.c 	while ((skb = hhf_dequeue(sch)) != NULL)
skb               468 net/sched/sch_hhf.c 		rtnl_kfree_skbs(skb, skb);
skb               566 net/sched/sch_hhf.c 		struct sk_buff *skb = hhf_dequeue(sch);
skb               568 net/sched/sch_hhf.c 		rtnl_kfree_skbs(skb, skb);
skb               654 net/sched/sch_hhf.c static int hhf_dump(struct Qdisc *sch, struct sk_buff *skb)
skb               659 net/sched/sch_hhf.c 	opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
skb               663 net/sched/sch_hhf.c 	if (nla_put_u32(skb, TCA_HHF_BACKLOG_LIMIT, sch->limit) ||
skb               664 net/sched/sch_hhf.c 	    nla_put_u32(skb, TCA_HHF_QUANTUM, q->quantum) ||
skb               665 net/sched/sch_hhf.c 	    nla_put_u32(skb, TCA_HHF_HH_FLOWS_LIMIT, q->hh_flows_limit) ||
skb               666 net/sched/sch_hhf.c 	    nla_put_u32(skb, TCA_HHF_RESET_TIMEOUT,
skb               668 net/sched/sch_hhf.c 	    nla_put_u32(skb, TCA_HHF_ADMIT_BYTES, q->hhf_admit_bytes) ||
skb               669 net/sched/sch_hhf.c 	    nla_put_u32(skb, TCA_HHF_EVICT_TIMEOUT,
skb               671 net/sched/sch_hhf.c 	    nla_put_u32(skb, TCA_HHF_NON_HH_WEIGHT, q->hhf_non_hh_weight))
skb               674 net/sched/sch_hhf.c 	return nla_nest_end(skb, opts);
skb               209 net/sched/sch_htb.c static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
skb               222 net/sched/sch_htb.c 	if (skb->priority == sch->handle)
skb               224 net/sched/sch_htb.c 	cl = htb_find(skb->priority, sch);
skb               235 net/sched/sch_htb.c 	while (tcf && (result = tcf_classify(skb, tcf, &res, false)) >= 0) {
skb               579 net/sched/sch_htb.c static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
skb               583 net/sched/sch_htb.c 	unsigned int len = qdisc_pkt_len(skb);
skb               585 net/sched/sch_htb.c 	struct htb_class *cl = htb_classify(skb, sch, &ret);
skb               590 net/sched/sch_htb.c 			__qdisc_enqueue_tail(skb, &q->direct_queue);
skb               593 net/sched/sch_htb.c 			return qdisc_drop(skb, sch, to_free);
skb               599 net/sched/sch_htb.c 		__qdisc_drop(skb, to_free);
skb               602 net/sched/sch_htb.c 	} else if ((ret = qdisc_enqueue(skb, cl->leaf.q,
skb               656 net/sched/sch_htb.c 			     int level, struct sk_buff *skb)
skb               658 net/sched/sch_htb.c 	int bytes = qdisc_pkt_len(skb);
skb               687 net/sched/sch_htb.c 			bstats_update(&cl->bstats, skb);
skb               826 net/sched/sch_htb.c 	struct sk_buff *skb = NULL;
skb               860 net/sched/sch_htb.c 		skb = cl->leaf.q->dequeue(cl->leaf.q);
skb               861 net/sched/sch_htb.c 		if (likely(skb != NULL))
skb               871 net/sched/sch_htb.c 	if (likely(skb != NULL)) {
skb               872 net/sched/sch_htb.c 		bstats_update(&cl->bstats, skb);
skb               873 net/sched/sch_htb.c 		cl->leaf.deficit[level] -= qdisc_pkt_len(skb);
skb               884 net/sched/sch_htb.c 		htb_charge_class(q, cl, level, skb);
skb               886 net/sched/sch_htb.c 	return skb;
skb               891 net/sched/sch_htb.c 	struct sk_buff *skb;
skb               898 net/sched/sch_htb.c 	skb = __qdisc_dequeue_head(&q->direct_queue);
skb               899 net/sched/sch_htb.c 	if (skb != NULL) {
skb               901 net/sched/sch_htb.c 		qdisc_bstats_update(sch, skb);
skb               902 net/sched/sch_htb.c 		qdisc_qstats_backlog_dec(sch, skb);
skb               904 net/sched/sch_htb.c 		return skb;
skb               934 net/sched/sch_htb.c 			skb = htb_dequeue_tree(q, prio, level);
skb               935 net/sched/sch_htb.c 			if (likely(skb != NULL))
skb               944 net/sched/sch_htb.c 	return skb;
skb              1043 net/sched/sch_htb.c static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
skb              1060 net/sched/sch_htb.c 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
skb              1063 net/sched/sch_htb.c 	if (nla_put(skb, TCA_HTB_INIT, sizeof(gopt), &gopt) ||
skb              1064 net/sched/sch_htb.c 	    nla_put_u32(skb, TCA_HTB_DIRECT_QLEN, q->direct_qlen))
skb              1067 net/sched/sch_htb.c 	return nla_nest_end(skb, nest);
skb              1070 net/sched/sch_htb.c 	nla_nest_cancel(skb, nest);
skb              1075 net/sched/sch_htb.c 			  struct sk_buff *skb, struct tcmsg *tcm)
skb              1089 net/sched/sch_htb.c 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
skb              1102 net/sched/sch_htb.c 	if (nla_put(skb, TCA_HTB_PARMS, sizeof(opt), &opt))
skb              1105 net/sched/sch_htb.c 	    nla_put_u64_64bit(skb, TCA_HTB_RATE64, cl->rate.rate_bytes_ps,
skb              1109 net/sched/sch_htb.c 	    nla_put_u64_64bit(skb, TCA_HTB_CEIL64, cl->ceil.rate_bytes_ps,
skb              1113 net/sched/sch_htb.c 	return nla_nest_end(skb, nest);
skb              1116 net/sched/sch_htb.c 	nla_nest_cancel(skb, nest);
skb               101 net/sched/sch_ingress.c static int ingress_dump(struct Qdisc *sch, struct sk_buff *skb)
skb               105 net/sched/sch_ingress.c 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
skb               109 net/sched/sch_ingress.c 	return nla_nest_end(skb, nest);
skb               112 net/sched/sch_ingress.c 	nla_nest_cancel(skb, nest);
skb               128 net/sched/sch_mq.c static int mq_dump(struct Qdisc *sch, struct sk_buff *skb)
skb               233 net/sched/sch_mq.c 			 struct sk_buff *skb, struct tcmsg *tcm)
skb               344 net/sched/sch_mqprio.c 		      struct tc_mqprio_qopt *opt, struct sk_buff *skb)
skb               350 net/sched/sch_mqprio.c 		nest = nla_nest_start_noflag(skb, TCA_MQPRIO_MIN_RATE64);
skb               355 net/sched/sch_mqprio.c 			if (nla_put(skb, TCA_MQPRIO_MIN_RATE64,
skb               360 net/sched/sch_mqprio.c 		nla_nest_end(skb, nest);
skb               364 net/sched/sch_mqprio.c 		nest = nla_nest_start_noflag(skb, TCA_MQPRIO_MAX_RATE64);
skb               369 net/sched/sch_mqprio.c 			if (nla_put(skb, TCA_MQPRIO_MAX_RATE64,
skb               374 net/sched/sch_mqprio.c 		nla_nest_end(skb, nest);
skb               379 net/sched/sch_mqprio.c 	nla_nest_cancel(skb, nest);
skb               383 net/sched/sch_mqprio.c static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb)
skb               387 net/sched/sch_mqprio.c 	struct nlattr *nla = (struct nlattr *)skb_tail_pointer(skb);
skb               437 net/sched/sch_mqprio.c 	if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
skb               441 net/sched/sch_mqprio.c 	    nla_put_u16(skb, TCA_MQPRIO_MODE, priv->mode))
skb               445 net/sched/sch_mqprio.c 	    nla_put_u16(skb, TCA_MQPRIO_SHAPER, priv->shaper))
skb               450 net/sched/sch_mqprio.c 	    (dump_rates(priv, &opt, skb) != 0))
skb               453 net/sched/sch_mqprio.c 	return nla_nest_end(skb, nla);
skb               455 net/sched/sch_mqprio.c 	nlmsg_trim(skb, nla);
skb               489 net/sched/sch_mqprio.c 			 struct sk_buff *skb, struct tcmsg *tcm)
skb                30 net/sched/sch_multiq.c multiq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
skb                39 net/sched/sch_multiq.c 	err = tcf_classify(skb, fl, &res, false);
skb                51 net/sched/sch_multiq.c 	band = skb_get_queue_mapping(skb);
skb                60 net/sched/sch_multiq.c multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
skb                66 net/sched/sch_multiq.c 	qdisc = multiq_classify(skb, sch, &ret);
skb                72 net/sched/sch_multiq.c 		__qdisc_drop(skb, to_free);
skb                77 net/sched/sch_multiq.c 	ret = qdisc_enqueue(skb, qdisc, to_free);
skb                91 net/sched/sch_multiq.c 	struct sk_buff *skb;
skb               106 net/sched/sch_multiq.c 			skb = qdisc->dequeue(qdisc);
skb               107 net/sched/sch_multiq.c 			if (skb) {
skb               108 net/sched/sch_multiq.c 				qdisc_bstats_update(sch, skb);
skb               110 net/sched/sch_multiq.c 				return skb;
skb               123 net/sched/sch_multiq.c 	struct sk_buff *skb;
skb               138 net/sched/sch_multiq.c 			skb = qdisc->ops->peek(qdisc);
skb               139 net/sched/sch_multiq.c 			if (skb)
skb               140 net/sched/sch_multiq.c 				return skb;
skb               262 net/sched/sch_multiq.c static int multiq_dump(struct Qdisc *sch, struct sk_buff *skb)
skb               265 net/sched/sch_multiq.c 	unsigned char *b = skb_tail_pointer(skb);
skb               271 net/sched/sch_multiq.c 	if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
skb               274 net/sched/sch_multiq.c 	return skb->len;
skb               277 net/sched/sch_multiq.c 	nlmsg_trim(skb, b);
skb               325 net/sched/sch_multiq.c 			     struct sk_buff *skb, struct tcmsg *tcm)
skb               161 net/sched/sch_netem.c static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
skb               164 net/sched/sch_netem.c 	qdisc_cb_private_validate(skb, sizeof(struct netem_skb_cb));
skb               165 net/sched/sch_netem.c 	return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data;
skb               366 net/sched/sch_netem.c 		struct sk_buff *skb = rb_to_skb(p);
skb               369 net/sched/sch_netem.c 		rb_erase(&skb->rbnode, &q->t_root);
skb               370 net/sched/sch_netem.c 		rtnl_kfree_skbs(skb, skb);
skb               393 net/sched/sch_netem.c 			struct sk_buff *skb;
skb               396 net/sched/sch_netem.c 			skb = rb_to_skb(parent);
skb               397 net/sched/sch_netem.c 			if (tnext >= netem_skb_cb(skb)->time_to_send)
skb               412 net/sched/sch_netem.c static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch,
skb               416 net/sched/sch_netem.c 	netdev_features_t features = netif_skb_features(skb);
skb               418 net/sched/sch_netem.c 	segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
skb               421 net/sched/sch_netem.c 		qdisc_drop(skb, sch, to_free);
skb               424 net/sched/sch_netem.c 	consume_skb(skb);
skb               434 net/sched/sch_netem.c static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
skb               442 net/sched/sch_netem.c 	unsigned int prev_len = qdisc_pkt_len(skb);
skb               448 net/sched/sch_netem.c 	skb->prev = NULL;
skb               456 net/sched/sch_netem.c 		if (q->ecn && INET_ECN_set_ce(skb))
skb               463 net/sched/sch_netem.c 		__qdisc_drop(skb, to_free);
skb               471 net/sched/sch_netem.c 		skb_orphan_partial(skb);
skb               478 net/sched/sch_netem.c 	if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
skb               495 net/sched/sch_netem.c 		if (skb_is_gso(skb)) {
skb               496 net/sched/sch_netem.c 			skb = netem_segment(skb, sch, to_free);
skb               497 net/sched/sch_netem.c 			if (!skb)
skb               499 net/sched/sch_netem.c 			segs = skb->next;
skb               500 net/sched/sch_netem.c 			skb_mark_not_on_list(skb);
skb               501 net/sched/sch_netem.c 			qdisc_skb_cb(skb)->pkt_len = skb->len;
skb               504 net/sched/sch_netem.c 		skb = skb_unshare(skb, GFP_ATOMIC);
skb               505 net/sched/sch_netem.c 		if (unlikely(!skb)) {
skb               509 net/sched/sch_netem.c 		if (skb->ip_summed == CHECKSUM_PARTIAL &&
skb               510 net/sched/sch_netem.c 		    skb_checksum_help(skb)) {
skb               511 net/sched/sch_netem.c 			qdisc_drop(skb, sch, to_free);
skb               512 net/sched/sch_netem.c 			skb = NULL;
skb               516 net/sched/sch_netem.c 		skb->data[prandom_u32() % skb_headlen(skb)] ^=
skb               522 net/sched/sch_netem.c 		skb->next = segs;
skb               523 net/sched/sch_netem.c 		qdisc_drop_all(skb, sch, to_free);
skb               527 net/sched/sch_netem.c 	qdisc_qstats_backlog_inc(sch, skb);
skb               529 net/sched/sch_netem.c 	cb = netem_skb_cb(skb);
skb               576 net/sched/sch_netem.c 			delay += packet_time_ns(qdisc_pkt_len(skb), q);
skb               581 net/sched/sch_netem.c 		tfifo_enqueue(skb, sch);
skb               590 net/sched/sch_netem.c 		__qdisc_enqueue_head(skb, &sch->q);
skb               599 net/sched/sch_netem.c 		len = skb ? skb->len : 0;
skb               600 net/sched/sch_netem.c 		nb = skb ? 1 : 0;
skb               619 net/sched/sch_netem.c 	} else if (!skb) {
skb               650 net/sched/sch_netem.c 	struct sk_buff *skb = skb_rb_first(&q->t_root);
skb               653 net/sched/sch_netem.c 	if (!skb)
skb               656 net/sched/sch_netem.c 		return skb;
skb               658 net/sched/sch_netem.c 	t1 = netem_skb_cb(skb)->time_to_send;
skb               661 net/sched/sch_netem.c 		return skb;
skb               665 net/sched/sch_netem.c static void netem_erase_head(struct netem_sched_data *q, struct sk_buff *skb)
skb               667 net/sched/sch_netem.c 	if (skb == q->t_head) {
skb               668 net/sched/sch_netem.c 		q->t_head = skb->next;
skb               672 net/sched/sch_netem.c 		rb_erase(&skb->rbnode, &q->t_root);
skb               679 net/sched/sch_netem.c 	struct sk_buff *skb;
skb               682 net/sched/sch_netem.c 	skb = __qdisc_dequeue_head(&sch->q);
skb               683 net/sched/sch_netem.c 	if (skb) {
skb               684 net/sched/sch_netem.c 		qdisc_qstats_backlog_dec(sch, skb);
skb               686 net/sched/sch_netem.c 		qdisc_bstats_update(sch, skb);
skb               687 net/sched/sch_netem.c 		return skb;
skb               689 net/sched/sch_netem.c 	skb = netem_peek(q);
skb               690 net/sched/sch_netem.c 	if (skb) {
skb               695 net/sched/sch_netem.c 		time_to_send = netem_skb_cb(skb)->time_to_send;
skb               700 net/sched/sch_netem.c 			netem_erase_head(q, skb);
skb               702 net/sched/sch_netem.c 			qdisc_qstats_backlog_dec(sch, skb);
skb               703 net/sched/sch_netem.c 			skb->next = NULL;
skb               704 net/sched/sch_netem.c 			skb->prev = NULL;
skb               708 net/sched/sch_netem.c 			skb->dev = qdisc_dev(sch);
skb               712 net/sched/sch_netem.c 				q->slot.bytes_left -= qdisc_pkt_len(skb);
skb               719 net/sched/sch_netem.c 				unsigned int pkt_len = qdisc_pkt_len(skb);
skb               723 net/sched/sch_netem.c 				err = qdisc_enqueue(skb, q->qdisc, &to_free);
skb               737 net/sched/sch_netem.c 			skb = q->qdisc->ops->dequeue(q->qdisc);
skb               738 net/sched/sch_netem.c 			if (skb)
skb               748 net/sched/sch_netem.c 		skb = q->qdisc->ops->dequeue(q->qdisc);
skb               749 net/sched/sch_netem.c 		if (skb)
skb              1082 net/sched/sch_netem.c 			   struct sk_buff *skb)
skb              1086 net/sched/sch_netem.c 	nest = nla_nest_start_noflag(skb, TCA_NETEM_LOSS);
skb              1093 net/sched/sch_netem.c 		nla_nest_cancel(skb, nest);
skb              1105 net/sched/sch_netem.c 		if (nla_put(skb, NETEM_LOSS_GI, sizeof(gi), &gi))
skb              1117 net/sched/sch_netem.c 		if (nla_put(skb, NETEM_LOSS_GE, sizeof(ge), &ge))
skb              1123 net/sched/sch_netem.c 	nla_nest_end(skb, nest);
skb              1127 net/sched/sch_netem.c 	nla_nest_cancel(skb, nest);
skb              1131 net/sched/sch_netem.c static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
skb              1134 net/sched/sch_netem.c 	struct nlattr *nla = (struct nlattr *) skb_tail_pointer(skb);
skb              1150 net/sched/sch_netem.c 	if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt))
skb              1153 net/sched/sch_netem.c 	if (nla_put(skb, TCA_NETEM_LATENCY64, sizeof(q->latency), &q->latency))
skb              1156 net/sched/sch_netem.c 	if (nla_put(skb, TCA_NETEM_JITTER64, sizeof(q->jitter), &q->jitter))
skb              1162 net/sched/sch_netem.c 	if (nla_put(skb, TCA_NETEM_CORR, sizeof(cor), &cor))
skb              1167 net/sched/sch_netem.c 	if (nla_put(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder))
skb              1172 net/sched/sch_netem.c 	if (nla_put(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt))
skb              1176 net/sched/sch_netem.c 		if (nla_put_u64_64bit(skb, TCA_NETEM_RATE64, q->rate,
skb              1186 net/sched/sch_netem.c 	if (nla_put(skb, TCA_NETEM_RATE, sizeof(rate), &rate))
skb              1189 net/sched/sch_netem.c 	if (q->ecn && nla_put_u32(skb, TCA_NETEM_ECN, q->ecn))
skb              1192 net/sched/sch_netem.c 	if (dump_loss_model(q, skb) != 0)
skb              1202 net/sched/sch_netem.c 		if (nla_put(skb, TCA_NETEM_SLOT, sizeof(slot), &slot))
skb              1206 net/sched/sch_netem.c 	return nla_nest_end(skb, nla);
skb              1209 net/sched/sch_netem.c 	nlmsg_trim(skb, nla);
skb              1214 net/sched/sch_netem.c 			  struct sk_buff *skb, struct tcmsg *tcm)
skb               151 net/sched/sch_pie.c static int pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
skb               162 net/sched/sch_pie.c 	if (!drop_early(sch, skb->len)) {
skb               165 net/sched/sch_pie.c 		   INET_ECN_set_ce(skb)) {
skb               179 net/sched/sch_pie.c 		return qdisc_enqueue_tail(skb, sch);
skb               186 net/sched/sch_pie.c 	return qdisc_drop(skb, sch, to_free);
skb               253 net/sched/sch_pie.c 		struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
skb               255 net/sched/sch_pie.c 		dropped += qdisc_pkt_len(skb);
skb               256 net/sched/sch_pie.c 		qdisc_qstats_backlog_dec(sch, skb);
skb               257 net/sched/sch_pie.c 		rtnl_qdisc_drop(skb, sch);
skb               265 net/sched/sch_pie.c static void pie_process_dequeue(struct Qdisc *sch, struct sk_buff *skb)
skb               289 net/sched/sch_pie.c 		q->vars.dq_count += skb->len;
skb               481 net/sched/sch_pie.c static int pie_dump(struct Qdisc *sch, struct sk_buff *skb)
skb               486 net/sched/sch_pie.c 	opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
skb               491 net/sched/sch_pie.c 	if (nla_put_u32(skb, TCA_PIE_TARGET,
skb               494 net/sched/sch_pie.c 	    nla_put_u32(skb, TCA_PIE_LIMIT, sch->limit) ||
skb               495 net/sched/sch_pie.c 	    nla_put_u32(skb, TCA_PIE_TUPDATE,
skb               497 net/sched/sch_pie.c 	    nla_put_u32(skb, TCA_PIE_ALPHA, q->params.alpha) ||
skb               498 net/sched/sch_pie.c 	    nla_put_u32(skb, TCA_PIE_BETA, q->params.beta) ||
skb               499 net/sched/sch_pie.c 	    nla_put_u32(skb, TCA_PIE_ECN, q->params.ecn) ||
skb               500 net/sched/sch_pie.c 	    nla_put_u32(skb, TCA_PIE_BYTEMODE, q->params.bytemode))
skb               503 net/sched/sch_pie.c 	return nla_nest_end(skb, opts);
skb               506 net/sched/sch_pie.c 	nla_nest_cancel(skb, opts);
skb               532 net/sched/sch_pie.c 	struct sk_buff *skb = qdisc_dequeue_head(sch);
skb               534 net/sched/sch_pie.c 	if (!skb)
skb               537 net/sched/sch_pie.c 	pie_process_dequeue(sch, skb);
skb               538 net/sched/sch_pie.c 	return skb;
skb                87 net/sched/sch_plug.c static int plug_enqueue(struct sk_buff *skb, struct Qdisc *sch,
skb                92 net/sched/sch_plug.c 	if (likely(sch->qstats.backlog + skb->len <= q->limit)) {
skb                95 net/sched/sch_plug.c 		return qdisc_enqueue_tail(skb, sch);
skb                98 net/sched/sch_plug.c 	return qdisc_drop(skb, sch, to_free);
skb                31 net/sched/sch_prio.c prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
skb                34 net/sched/sch_prio.c 	u32 band = skb->priority;
skb                40 net/sched/sch_prio.c 	if (TC_H_MAJ(skb->priority) != sch->handle) {
skb                42 net/sched/sch_prio.c 		err = tcf_classify(skb, fl, &res, false);
skb                69 net/sched/sch_prio.c prio_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
skb                71 net/sched/sch_prio.c 	unsigned int len = qdisc_pkt_len(skb);
skb                75 net/sched/sch_prio.c 	qdisc = prio_classify(skb, sch, &ret);
skb                81 net/sched/sch_prio.c 		__qdisc_drop(skb, to_free);
skb                86 net/sched/sch_prio.c 	ret = qdisc_enqueue(skb, qdisc, to_free);
skb               104 net/sched/sch_prio.c 		struct sk_buff *skb = qdisc->ops->peek(qdisc);
skb               105 net/sched/sch_prio.c 		if (skb)
skb               106 net/sched/sch_prio.c 			return skb;
skb               118 net/sched/sch_prio.c 		struct sk_buff *skb = qdisc_dequeue_peeked(qdisc);
skb               119 net/sched/sch_prio.c 		if (skb) {
skb               120 net/sched/sch_prio.c 			qdisc_bstats_update(sch, skb);
skb               121 net/sched/sch_prio.c 			qdisc_qstats_backlog_dec(sch, skb);
skb               123 net/sched/sch_prio.c 			return skb;
skb               264 net/sched/sch_prio.c static int prio_dump(struct Qdisc *sch, struct sk_buff *skb)
skb               267 net/sched/sch_prio.c 	unsigned char *b = skb_tail_pointer(skb);
skb               278 net/sched/sch_prio.c 	if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
skb               281 net/sched/sch_prio.c 	return skb->len;
skb               284 net/sched/sch_prio.c 	nlmsg_trim(skb, b);
skb               347 net/sched/sch_prio.c static int prio_dump_class(struct Qdisc *sch, unsigned long cl, struct sk_buff *skb,
skb               610 net/sched/sch_qfq.c 			  struct sk_buff *skb, struct tcmsg *tcm)
skb               619 net/sched/sch_qfq.c 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
skb               622 net/sched/sch_qfq.c 	if (nla_put_u32(skb, TCA_QFQ_WEIGHT, cl->agg->class_weight) ||
skb               623 net/sched/sch_qfq.c 	    nla_put_u32(skb, TCA_QFQ_LMAX, cl->agg->lmax))
skb               625 net/sched/sch_qfq.c 	return nla_nest_end(skb, nest);
skb               628 net/sched/sch_qfq.c 	nla_nest_cancel(skb, nest);
skb               676 net/sched/sch_qfq.c static struct qfq_class *qfq_classify(struct sk_buff *skb, struct Qdisc *sch,
skb               685 net/sched/sch_qfq.c 	if (TC_H_MAJ(skb->priority ^ sch->handle) == 0) {
skb               686 net/sched/sch_qfq.c 		pr_debug("qfq_classify: found %d\n", skb->priority);
skb               687 net/sched/sch_qfq.c 		cl = qfq_find_class(sch, skb->priority);
skb               694 net/sched/sch_qfq.c 	result = tcf_classify(skb, fl, &res, false);
skb               993 net/sched/sch_qfq.c 	struct sk_buff *skb;
skb               996 net/sched/sch_qfq.c 	skb = (*cl)->qdisc->ops->peek((*cl)->qdisc);
skb               997 net/sched/sch_qfq.c 	if (skb == NULL)
skb              1000 net/sched/sch_qfq.c 		*len = qdisc_pkt_len(skb);
skb              1002 net/sched/sch_qfq.c 	return skb;
skb              1082 net/sched/sch_qfq.c 	struct sk_buff *skb = NULL;
skb              1090 net/sched/sch_qfq.c 		skb = qfq_peek_skb(in_serv_agg, &cl, &len);
skb              1127 net/sched/sch_qfq.c 		skb = qfq_peek_skb(in_serv_agg, &cl, &len);
skb              1129 net/sched/sch_qfq.c 	if (!skb)
skb              1132 net/sched/sch_qfq.c 	qdisc_qstats_backlog_dec(sch, skb);
skb              1134 net/sched/sch_qfq.c 	qdisc_bstats_update(sch, skb);
skb              1151 net/sched/sch_qfq.c 	return skb;
skb              1197 net/sched/sch_qfq.c static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
skb              1200 net/sched/sch_qfq.c 	unsigned int len = qdisc_pkt_len(skb), gso_segs;
skb              1207 net/sched/sch_qfq.c 	cl = qfq_classify(skb, sch, &err);
skb              1211 net/sched/sch_qfq.c 		__qdisc_drop(skb, to_free);
skb              1222 net/sched/sch_qfq.c 			return qdisc_drop(skb, sch, to_free);
skb              1226 net/sched/sch_qfq.c 	gso_segs = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
skb              1228 net/sched/sch_qfq.c 	err = qdisc_enqueue(skb, cl->qdisc, to_free);
skb              1246 net/sched/sch_qfq.c 		if (unlikely(skb == cl->qdisc->ops->peek(cl->qdisc)) &&
skb                57 net/sched/sch_red.c static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch,
skb                77 net/sched/sch_red.c 		if (!red_use_ecn(q) || !INET_ECN_set_ce(skb)) {
skb                88 net/sched/sch_red.c 		    !INET_ECN_set_ce(skb)) {
skb                97 net/sched/sch_red.c 	ret = qdisc_enqueue(skb, child, to_free);
skb                99 net/sched/sch_red.c 		qdisc_qstats_backlog_inc(sch, skb);
skb               108 net/sched/sch_red.c 	qdisc_drop(skb, sch, to_free);
skb               114 net/sched/sch_red.c 	struct sk_buff *skb;
skb               118 net/sched/sch_red.c 	skb = child->dequeue(child);
skb               119 net/sched/sch_red.c 	if (skb) {
skb               120 net/sched/sch_red.c 		qdisc_bstats_update(sch, skb);
skb               121 net/sched/sch_red.c 		qdisc_qstats_backlog_dec(sch, skb);
skb               127 net/sched/sch_red.c 	return skb;
skb               299 net/sched/sch_red.c static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
skb               318 net/sched/sch_red.c 	opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
skb               321 net/sched/sch_red.c 	if (nla_put(skb, TCA_RED_PARMS, sizeof(opt), &opt) ||
skb               322 net/sched/sch_red.c 	    nla_put_u32(skb, TCA_RED_MAX_P, q->parms.max_P))
skb               324 net/sched/sch_red.c 	return nla_nest_end(skb, opts);
skb               327 net/sched/sch_red.c 	nla_nest_cancel(skb, opts);
skb               358 net/sched/sch_red.c 			  struct sk_buff *skb, struct tcmsg *tcm)
skb                92 net/sched/sch_sfb.c static inline struct sfb_skb_cb *sfb_skb_cb(const struct sk_buff *skb)
skb                94 net/sched/sch_sfb.c 	qdisc_cb_private_validate(skb, sizeof(struct sfb_skb_cb));
skb                95 net/sched/sch_sfb.c 	return (struct sfb_skb_cb *)qdisc_skb_cb(skb)->data;
skb               102 net/sched/sch_sfb.c static u32 sfb_hash(const struct sk_buff *skb, u32 slot)
skb               104 net/sched/sch_sfb.c 	return sfb_skb_cb(skb)->hashes[slot];
skb               138 net/sched/sch_sfb.c static void increment_qlen(const struct sk_buff *skb, struct sfb_sched_data *q)
skb               142 net/sched/sch_sfb.c 	sfbhash = sfb_hash(skb, 0);
skb               146 net/sched/sch_sfb.c 	sfbhash = sfb_hash(skb, 1);
skb               167 net/sched/sch_sfb.c static void decrement_qlen(const struct sk_buff *skb, struct sfb_sched_data *q)
skb               171 net/sched/sch_sfb.c 	sfbhash = sfb_hash(skb, 0);
skb               175 net/sched/sch_sfb.c 	sfbhash = sfb_hash(skb, 1);
skb               234 net/sched/sch_sfb.c static bool sfb_rate_limit(struct sk_buff *skb, struct sfb_sched_data *q)
skb               254 net/sched/sch_sfb.c static bool sfb_classify(struct sk_buff *skb, struct tcf_proto *fl,
skb               260 net/sched/sch_sfb.c 	result = tcf_classify(skb, fl, &res, false);
skb               279 net/sched/sch_sfb.c static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
skb               316 net/sched/sch_sfb.c 		if (!sfb_classify(skb, fl, &ret, &salt))
skb               320 net/sched/sch_sfb.c 		sfbhash = skb_get_hash_perturb(skb, &q->bins[slot].perturbation);
skb               326 net/sched/sch_sfb.c 	sfb_skb_cb(skb)->hashes[slot] = sfbhash;
skb               344 net/sched/sch_sfb.c 	sfb_skb_cb(skb)->hashes[slot] = 0;
skb               355 net/sched/sch_sfb.c 			sfbhash = skb_get_hash_perturb(skb,
skb               359 net/sched/sch_sfb.c 			sfb_skb_cb(skb)->hashes[slot] = sfbhash;
skb               372 net/sched/sch_sfb.c 		if (sfb_rate_limit(skb, q)) {
skb               393 net/sched/sch_sfb.c 		if (INET_ECN_set_ce(skb)) {
skb               402 net/sched/sch_sfb.c 	ret = qdisc_enqueue(skb, child, to_free);
skb               404 net/sched/sch_sfb.c 		qdisc_qstats_backlog_inc(sch, skb);
skb               406 net/sched/sch_sfb.c 		increment_qlen(skb, q);
skb               414 net/sched/sch_sfb.c 	qdisc_drop(skb, sch, to_free);
skb               419 net/sched/sch_sfb.c 	kfree_skb(skb);
skb               427 net/sched/sch_sfb.c 	struct sk_buff *skb;
skb               429 net/sched/sch_sfb.c 	skb = child->dequeue(q->qdisc);
skb               431 net/sched/sch_sfb.c 	if (skb) {
skb               432 net/sched/sch_sfb.c 		qdisc_bstats_update(sch, skb);
skb               433 net/sched/sch_sfb.c 		qdisc_qstats_backlog_dec(sch, skb);
skb               435 net/sched/sch_sfb.c 		decrement_qlen(skb, q);
skb               438 net/sched/sch_sfb.c 	return skb;
skb               565 net/sched/sch_sfb.c static int sfb_dump(struct Qdisc *sch, struct sk_buff *skb)
skb               582 net/sched/sch_sfb.c 	opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
skb               585 net/sched/sch_sfb.c 	if (nla_put(skb, TCA_SFB_PARMS, sizeof(opt), &opt))
skb               587 net/sched/sch_sfb.c 	return nla_nest_end(skb, opts);
skb               590 net/sched/sch_sfb.c 	nla_nest_cancel(skb, opts);
skb               612 net/sched/sch_sfb.c 			  struct sk_buff *skb, struct tcmsg *tcm)
skb               158 net/sched/sch_sfq.c 			     const struct sk_buff *skb)
skb               160 net/sched/sch_sfq.c 	return skb_get_hash_perturb(skb, &q->perturbation) & (q->divisor - 1);
skb               163 net/sched/sch_sfq.c static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
skb               171 net/sched/sch_sfq.c 	if (TC_H_MAJ(skb->priority) == sch->handle &&
skb               172 net/sched/sch_sfq.c 	    TC_H_MIN(skb->priority) > 0 &&
skb               173 net/sched/sch_sfq.c 	    TC_H_MIN(skb->priority) <= q->divisor)
skb               174 net/sched/sch_sfq.c 		return TC_H_MIN(skb->priority);
skb               178 net/sched/sch_sfq.c 		return sfq_hash(q, skb) + 1;
skb               181 net/sched/sch_sfq.c 	result = tcf_classify(skb, fl, &res, false);
skb               259 net/sched/sch_sfq.c 	struct sk_buff *skb = slot->skblist_prev;
skb               261 net/sched/sch_sfq.c 	slot->skblist_prev = skb->prev;
skb               262 net/sched/sch_sfq.c 	skb->prev->next = (struct sk_buff *)slot;
skb               263 net/sched/sch_sfq.c 	skb->next = skb->prev = NULL;
skb               264 net/sched/sch_sfq.c 	return skb;
skb               270 net/sched/sch_sfq.c 	struct sk_buff *skb = slot->skblist_next;
skb               272 net/sched/sch_sfq.c 	slot->skblist_next = skb->next;
skb               273 net/sched/sch_sfq.c 	skb->next->prev = (struct sk_buff *)slot;
skb               274 net/sched/sch_sfq.c 	skb->next = skb->prev = NULL;
skb               275 net/sched/sch_sfq.c 	return skb;
skb               285 net/sched/sch_sfq.c static inline void slot_queue_add(struct sfq_slot *slot, struct sk_buff *skb)
skb               287 net/sched/sch_sfq.c 	skb->prev = slot->skblist_prev;
skb               288 net/sched/sch_sfq.c 	skb->next = (struct sk_buff *)slot;
skb               289 net/sched/sch_sfq.c 	slot->skblist_prev->next = skb;
skb               290 net/sched/sch_sfq.c 	slot->skblist_prev = skb;
skb               297 net/sched/sch_sfq.c 	struct sk_buff *skb;
skb               306 net/sched/sch_sfq.c 		skb = q->headdrop ? slot_dequeue_head(slot) : slot_dequeue_tail(slot);
skb               307 net/sched/sch_sfq.c 		len = qdisc_pkt_len(skb);
skb               311 net/sched/sch_sfq.c 		qdisc_qstats_backlog_dec(sch, skb);
skb               312 net/sched/sch_sfq.c 		qdisc_drop(skb, sch, to_free);
skb               346 net/sched/sch_sfq.c sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
skb               356 net/sched/sch_sfq.c 	hash = sfq_classify(skb, sch, &ret);
skb               360 net/sched/sch_sfq.c 		__qdisc_drop(skb, to_free);
skb               370 net/sched/sch_sfq.c 			return qdisc_drop(skb, sch, to_free);
skb               397 net/sched/sch_sfq.c 				if (INET_ECN_set_ce(skb)) {
skb               414 net/sched/sch_sfq.c 				if (INET_ECN_set_ce(skb)) {
skb               427 net/sched/sch_sfq.c 			return qdisc_drop(skb, sch, to_free);
skb               431 net/sched/sch_sfq.c 		delta = qdisc_pkt_len(head) - qdisc_pkt_len(skb);
skb               436 net/sched/sch_sfq.c 		slot_queue_add(slot, skb);
skb               442 net/sched/sch_sfq.c 	qdisc_qstats_backlog_inc(sch, skb);
skb               443 net/sched/sch_sfq.c 	slot->backlog += qdisc_pkt_len(skb);
skb               444 net/sched/sch_sfq.c 	slot_queue_add(slot, skb);
skb               470 net/sched/sch_sfq.c 		qdisc_tree_reduce_backlog(sch, 0, dropped - qdisc_pkt_len(skb));
skb               483 net/sched/sch_sfq.c 	struct sk_buff *skb;
skb               499 net/sched/sch_sfq.c 	skb = slot_dequeue_head(slot);
skb               501 net/sched/sch_sfq.c 	qdisc_bstats_update(sch, skb);
skb               503 net/sched/sch_sfq.c 	qdisc_qstats_backlog_dec(sch, skb);
skb               504 net/sched/sch_sfq.c 	slot->backlog -= qdisc_pkt_len(skb);
skb               511 net/sched/sch_sfq.c 			return skb;
skb               515 net/sched/sch_sfq.c 		slot->allot -= SFQ_ALLOT_SIZE(qdisc_pkt_len(skb));
skb               517 net/sched/sch_sfq.c 	return skb;
skb               523 net/sched/sch_sfq.c 	struct sk_buff *skb;
skb               525 net/sched/sch_sfq.c 	while ((skb = sfq_dequeue(sch)) != NULL)
skb               526 net/sched/sch_sfq.c 		rtnl_kfree_skbs(skb, skb);
skb               538 net/sched/sch_sfq.c 	struct sk_buff *skb;
skb               552 net/sched/sch_sfq.c 			skb = slot_dequeue_head(slot);
skb               554 net/sched/sch_sfq.c 			__skb_queue_tail(&list, skb);
skb               562 net/sched/sch_sfq.c 	while ((skb = __skb_dequeue(&list)) != NULL) {
skb               563 net/sched/sch_sfq.c 		unsigned int hash = sfq_hash(q, skb);
skb               571 net/sched/sch_sfq.c 				qdisc_qstats_backlog_dec(sch, skb);
skb               572 net/sched/sch_sfq.c 				drop_len += qdisc_pkt_len(skb);
skb               573 net/sched/sch_sfq.c 				kfree_skb(skb);
skb               583 net/sched/sch_sfq.c 		slot_queue_add(slot, skb);
skb               588 net/sched/sch_sfq.c 		slot->backlog += qdisc_pkt_len(skb);
skb               788 net/sched/sch_sfq.c static int sfq_dump(struct Qdisc *sch, struct sk_buff *skb)
skb               791 net/sched/sch_sfq.c 	unsigned char *b = skb_tail_pointer(skb);
skb               815 net/sched/sch_sfq.c 	if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
skb               818 net/sched/sch_sfq.c 	return skb->len;
skb               821 net/sched/sch_sfq.c 	nlmsg_trim(skb, b);
skb               856 net/sched/sch_sfq.c 			  struct sk_buff *skb, struct tcmsg *tcm)
skb                68 net/sched/sch_skbprio.c static int skbprio_enqueue(struct sk_buff *skb, struct Qdisc *sch,
skb                79 net/sched/sch_skbprio.c 	prio = min(skb->priority, max_priority);
skb                83 net/sched/sch_skbprio.c 		__skb_queue_tail(qdisc, skb);
skb                84 net/sched/sch_skbprio.c 		qdisc_qstats_backlog_inc(sch, skb);
skb                85 net/sched/sch_skbprio.c 		q->qstats[prio].backlog += qdisc_pkt_len(skb);
skb               103 net/sched/sch_skbprio.c 		return qdisc_drop(skb, sch, to_free);
skb               106 net/sched/sch_skbprio.c 	__skb_queue_tail(qdisc, skb);
skb               107 net/sched/sch_skbprio.c 	qdisc_qstats_backlog_inc(sch, skb);
skb               108 net/sched/sch_skbprio.c 	q->qstats[prio].backlog += qdisc_pkt_len(skb);
skb               143 net/sched/sch_skbprio.c 	struct sk_buff *skb = __skb_dequeue(hpq);
skb               145 net/sched/sch_skbprio.c 	if (unlikely(!skb))
skb               149 net/sched/sch_skbprio.c 	qdisc_qstats_backlog_dec(sch, skb);
skb               150 net/sched/sch_skbprio.c 	qdisc_bstats_update(sch, skb);
skb               152 net/sched/sch_skbprio.c 	q->qstats[q->highest_prio].backlog -= qdisc_pkt_len(skb);
skb               164 net/sched/sch_skbprio.c 	return skb;
skb               199 net/sched/sch_skbprio.c static int skbprio_dump(struct Qdisc *sch, struct sk_buff *skb)
skb               205 net/sched/sch_skbprio.c 	if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
skb               208 net/sched/sch_skbprio.c 	return skb->len;
skb               247 net/sched/sch_skbprio.c 			     struct sk_buff *skb, struct tcmsg *tcm)
skb               184 net/sched/sch_taprio.c static struct sched_entry *find_entry_to_transmit(struct sk_buff *skb,
skb               202 net/sched/sch_taprio.c 	tc = netdev_get_prio_tc_map(dev, skb->priority);
skb               203 net/sched/sch_taprio.c 	packet_transmit_time = length_to_duration(q, qdisc_pkt_len(skb));
skb               261 net/sched/sch_taprio.c static bool is_valid_interval(struct sk_buff *skb, struct Qdisc *sch)
skb               272 net/sched/sch_taprio.c 	entry = find_entry_to_transmit(skb, sch, sched, admin, skb->tstamp,
skb               293 net/sched/sch_taprio.c static ktime_t get_tcp_tstamp(struct taprio_sched *q, struct sk_buff *skb)
skb               295 net/sched/sch_taprio.c 	unsigned int offset = skb_network_offset(skb);
skb               300 net/sched/sch_taprio.c 	ipv6h = skb_header_pointer(skb, offset, sizeof(_ipv6h), &_ipv6h);
skb               312 net/sched/sch_taprio.c 			ipv6h = skb_header_pointer(skb, offset,
skb               324 net/sched/sch_taprio.c 	return ktime_mono_to_any(skb->skb_mstamp_ns, q->tk_offset);
skb               342 net/sched/sch_taprio.c static long get_packet_txtime(struct sk_buff *skb, struct Qdisc *sch)
skb               355 net/sched/sch_taprio.c 	tcp_tstamp = get_tcp_tstamp(q, skb);
skb               370 net/sched/sch_taprio.c 	len = qdisc_pkt_len(skb);
skb               376 net/sched/sch_taprio.c 		entry = find_entry_to_transmit(skb, sch, sched, admin,
skb               413 net/sched/sch_taprio.c static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch,
skb               420 net/sched/sch_taprio.c 	queue = skb_get_queue_mapping(skb);
skb               424 net/sched/sch_taprio.c 		return qdisc_drop(skb, sch, to_free);
skb               426 net/sched/sch_taprio.c 	if (skb->sk && sock_flag(skb->sk, SOCK_TXTIME)) {
skb               427 net/sched/sch_taprio.c 		if (!is_valid_interval(skb, sch))
skb               428 net/sched/sch_taprio.c 			return qdisc_drop(skb, sch, to_free);
skb               430 net/sched/sch_taprio.c 		skb->tstamp = get_packet_txtime(skb, sch);
skb               431 net/sched/sch_taprio.c 		if (!skb->tstamp)
skb               432 net/sched/sch_taprio.c 			return qdisc_drop(skb, sch, to_free);
skb               435 net/sched/sch_taprio.c 	qdisc_qstats_backlog_inc(sch, skb);
skb               438 net/sched/sch_taprio.c 	return qdisc_enqueue(skb, child, to_free);
skb               446 net/sched/sch_taprio.c 	struct sk_buff *skb;
skb               466 net/sched/sch_taprio.c 		skb = child->ops->peek(child);
skb               467 net/sched/sch_taprio.c 		if (!skb)
skb               471 net/sched/sch_taprio.c 			return skb;
skb               473 net/sched/sch_taprio.c 		prio = skb->priority;
skb               479 net/sched/sch_taprio.c 		return skb;
skb               489 net/sched/sch_taprio.c 	struct sk_buff *skb;
skb               498 net/sched/sch_taprio.c 		skb = child->ops->peek(child);
skb               499 net/sched/sch_taprio.c 		if (!skb)
skb               502 net/sched/sch_taprio.c 		return skb;
skb               526 net/sched/sch_taprio.c 	struct sk_buff *skb = NULL;
skb               554 net/sched/sch_taprio.c 			skb = child->ops->dequeue(child);
skb               555 net/sched/sch_taprio.c 			if (!skb)
skb               560 net/sched/sch_taprio.c 		skb = child->ops->peek(child);
skb               561 net/sched/sch_taprio.c 		if (!skb)
skb               564 net/sched/sch_taprio.c 		prio = skb->priority;
skb               568 net/sched/sch_taprio.c 			skb = NULL;
skb               572 net/sched/sch_taprio.c 		len = qdisc_pkt_len(skb);
skb               581 net/sched/sch_taprio.c 			skb = NULL;
skb               588 net/sched/sch_taprio.c 			skb = NULL;
skb               592 net/sched/sch_taprio.c 		skb = child->ops->dequeue(child);
skb               593 net/sched/sch_taprio.c 		if (unlikely(!skb))
skb               597 net/sched/sch_taprio.c 		qdisc_bstats_update(sch, skb);
skb               598 net/sched/sch_taprio.c 		qdisc_qstats_backlog_dec(sch, skb);
skb               607 net/sched/sch_taprio.c 	return skb;
skb               614 net/sched/sch_taprio.c 	struct sk_buff *skb;
skb               623 net/sched/sch_taprio.c 		skb = child->ops->dequeue(child);
skb               624 net/sched/sch_taprio.c 		if (unlikely(!skb))
skb               627 net/sched/sch_taprio.c 		qdisc_bstats_update(sch, skb);
skb               628 net/sched/sch_taprio.c 		qdisc_qstats_backlog_dec(sch, skb);
skb               631 net/sched/sch_taprio.c 		return skb;
skb              1777 net/sched/sch_taprio.c static int taprio_dump(struct Qdisc *sch, struct sk_buff *skb)
skb              1798 net/sched/sch_taprio.c 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
skb              1802 net/sched/sch_taprio.c 	if (nla_put(skb, TCA_TAPRIO_ATTR_PRIOMAP, sizeof(opt), &opt))
skb              1806 net/sched/sch_taprio.c 	    nla_put_s32(skb, TCA_TAPRIO_ATTR_SCHED_CLOCKID, q->clockid))
skb              1809 net/sched/sch_taprio.c 	if (q->flags && nla_put_u32(skb, TCA_TAPRIO_ATTR_FLAGS, q->flags))
skb              1813 net/sched/sch_taprio.c 	    nla_put_u32(skb, TCA_TAPRIO_ATTR_TXTIME_DELAY, q->txtime_delay))
skb              1816 net/sched/sch_taprio.c 	if (oper && dump_schedule(skb, oper))
skb              1822 net/sched/sch_taprio.c 	sched_nest = nla_nest_start_noflag(skb, TCA_TAPRIO_ATTR_ADMIN_SCHED);
skb              1826 net/sched/sch_taprio.c 	if (dump_schedule(skb, admin))
skb              1829 net/sched/sch_taprio.c 	nla_nest_end(skb, sched_nest);
skb              1834 net/sched/sch_taprio.c 	return nla_nest_end(skb, nest);
skb              1837 net/sched/sch_taprio.c 	nla_nest_cancel(skb, sched_nest);
skb              1840 net/sched/sch_taprio.c 	nla_nest_cancel(skb, nest);
skb              1867 net/sched/sch_taprio.c 			     struct sk_buff *skb, struct tcmsg *tcm)
skb               143 net/sched/sch_tbf.c static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch,
skb               148 net/sched/sch_tbf.c 	netdev_features_t features = netif_skb_features(skb);
skb               149 net/sched/sch_tbf.c 	unsigned int len = 0, prev_len = qdisc_pkt_len(skb);
skb               152 net/sched/sch_tbf.c 	segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
skb               155 net/sched/sch_tbf.c 		return qdisc_drop(skb, sch, to_free);
skb               175 net/sched/sch_tbf.c 	consume_skb(skb);
skb               179 net/sched/sch_tbf.c static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch,
skb               183 net/sched/sch_tbf.c 	unsigned int len = qdisc_pkt_len(skb);
skb               186 net/sched/sch_tbf.c 	if (qdisc_pkt_len(skb) > q->max_size) {
skb               187 net/sched/sch_tbf.c 		if (skb_is_gso(skb) &&
skb               188 net/sched/sch_tbf.c 		    skb_gso_validate_mac_len(skb, q->max_size))
skb               189 net/sched/sch_tbf.c 			return tbf_segment(skb, sch, to_free);
skb               190 net/sched/sch_tbf.c 		return qdisc_drop(skb, sch, to_free);
skb               192 net/sched/sch_tbf.c 	ret = qdisc_enqueue(skb, q->qdisc, to_free);
skb               212 net/sched/sch_tbf.c 	struct sk_buff *skb;
skb               214 net/sched/sch_tbf.c 	skb = q->qdisc->ops->peek(q->qdisc);
skb               216 net/sched/sch_tbf.c 	if (skb) {
skb               220 net/sched/sch_tbf.c 		unsigned int len = qdisc_pkt_len(skb);
skb               237 net/sched/sch_tbf.c 			skb = qdisc_dequeue_peeked(q->qdisc);
skb               238 net/sched/sch_tbf.c 			if (unlikely(!skb))
skb               244 net/sched/sch_tbf.c 			qdisc_qstats_backlog_dec(sch, skb);
skb               246 net/sched/sch_tbf.c 			qdisc_bstats_update(sch, skb);
skb               247 net/sched/sch_tbf.c 			return skb;
skb               440 net/sched/sch_tbf.c static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb)
skb               447 net/sched/sch_tbf.c 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
skb               459 net/sched/sch_tbf.c 	if (nla_put(skb, TCA_TBF_PARMS, sizeof(opt), &opt))
skb               462 net/sched/sch_tbf.c 	    nla_put_u64_64bit(skb, TCA_TBF_RATE64, q->rate.rate_bytes_ps,
skb               467 net/sched/sch_tbf.c 	    nla_put_u64_64bit(skb, TCA_TBF_PRATE64, q->peak.rate_bytes_ps,
skb               471 net/sched/sch_tbf.c 	return nla_nest_end(skb, nest);
skb               474 net/sched/sch_tbf.c 	nla_nest_cancel(skb, nest);
skb               479 net/sched/sch_tbf.c 			  struct sk_buff *skb, struct tcmsg *tcm)
skb                76 net/sched/sch_teql.c teql_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
skb                82 net/sched/sch_teql.c 		__skb_queue_tail(&q->q, skb);
skb                86 net/sched/sch_teql.c 	return qdisc_drop(skb, sch, to_free);
skb                94 net/sched/sch_teql.c 	struct sk_buff *skb;
skb                97 net/sched/sch_teql.c 	skb = __skb_dequeue(&dat->q);
skb               101 net/sched/sch_teql.c 	if (skb == NULL) {
skb               108 net/sched/sch_teql.c 		qdisc_bstats_update(sch, skb);
skb               111 net/sched/sch_teql.c 	return skb;
skb               216 net/sched/sch_teql.c __teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res,
skb               223 net/sched/sch_teql.c 	n = dst_neigh_lookup_skb(dst, skb);
skb               242 net/sched/sch_teql.c 		err = dev_hard_header(skb, dev, ntohs(tc_skb_protocol(skb)),
skb               243 net/sched/sch_teql.c 				      haddr, NULL, skb->len);
skb               254 net/sched/sch_teql.c static inline int teql_resolve(struct sk_buff *skb,
skb               259 net/sched/sch_teql.c 	struct dst_entry *dst = skb_dst(skb);
skb               269 net/sched/sch_teql.c 	res = __teql_resolve(skb, skb_res, dev, txq, dst);
skb               275 net/sched/sch_teql.c static netdev_tx_t teql_master_xmit(struct sk_buff *skb, struct net_device *dev)
skb               281 net/sched/sch_teql.c 	int subq = skb_get_queue_mapping(skb);
skb               306 net/sched/sch_teql.c 		switch (teql_resolve(skb, skb_res, slave, slave_txq)) {
skb               309 net/sched/sch_teql.c 				unsigned int length = qdisc_pkt_len(skb);
skb               312 net/sched/sch_teql.c 				    netdev_start_xmit(skb, slave, slave_txq, false) ==
skb               333 net/sched/sch_teql.c 		__skb_pull(skb, skb_network_offset(skb));
skb               337 net/sched/sch_teql.c 		skb_res = skb;
skb               349 net/sched/sch_teql.c 	dev_kfree_skb(skb);
skb              1015 net/sctp/associola.c 				chunk->auth_chunk = skb_clone(chunk->skb,
skb               707 net/sctp/auth.c 			      struct sk_buff *skb, struct sctp_auth_chunk *auth,
skb               736 net/sctp/auth.c 	end = skb_tail_pointer(skb);
skb               278 net/sctp/chunk.c 		__skb_pull(chunk->skb, (__u8 *)chunk->chunk_hdr -
skb               279 net/sctp/chunk.c 				       chunk->skb->data);
skb                71 net/sctp/diag.c static int inet_diag_msg_sctpladdrs_fill(struct sk_buff *skb,
skb                83 net/sctp/diag.c 	attr = nla_reserve(skb, INET_DIAG_LOCALS, addrlen * addrcnt);
skb                97 net/sctp/diag.c static int inet_diag_msg_sctpaddrs_fill(struct sk_buff *skb,
skb               105 net/sctp/diag.c 	attr = nla_reserve(skb, INET_DIAG_PEERS,
skb               124 net/sctp/diag.c 			       struct sk_buff *skb,
skb               139 net/sctp/diag.c 	nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r),
skb               156 net/sctp/diag.c 	if (inet_diag_msg_attrs_fill(sk, skb, r, ext, user_ns, net_admin))
skb               181 net/sctp/diag.c 		if (nla_put(skb, INET_DIAG_SKMEMINFO, sizeof(mem), &mem) < 0)
skb               188 net/sctp/diag.c 		attr = nla_reserve_64bit(skb, INET_DIAG_INFO,
skb               202 net/sctp/diag.c 	if (inet_diag_msg_sctpladdrs_fill(skb, addr_list))
skb               206 net/sctp/diag.c 		if (nla_put_string(skb, INET_DIAG_CONG, "reno") < 0)
skb               209 net/sctp/diag.c 	if (asoc && inet_diag_msg_sctpaddrs_fill(skb, asoc))
skb               212 net/sctp/diag.c 	nlmsg_end(skb, nlh);
skb               216 net/sctp/diag.c 	nlmsg_cancel(skb, nlh);
skb               222 net/sctp/diag.c 	struct sk_buff *skb;
skb               253 net/sctp/diag.c 	struct sk_buff *in_skb = commp->skb;
skb               300 net/sctp/diag.c 	struct sk_buff *skb = commp->skb;
skb               319 net/sctp/diag.c 		    inet_sctp_diag_fill(sk, NULL, skb, r,
skb               320 net/sctp/diag.c 					sk_user_ns(NETLINK_CB(cb->skb).sk),
skb               321 net/sctp/diag.c 					NETLINK_CB(cb->skb).portid,
skb               330 net/sctp/diag.c 		if (inet_sctp_diag_fill(sk, assoc, skb, r,
skb               331 net/sctp/diag.c 					sk_user_ns(NETLINK_CB(cb->skb).sk),
skb               332 net/sctp/diag.c 					NETLINK_CB(cb->skb).portid,
skb               372 net/sctp/diag.c 	struct sk_buff *skb = commp->skb;
skb               375 net/sctp/diag.c 	struct net *net = sock_net(skb->sk);
skb               400 net/sctp/diag.c 	if (inet_sctp_diag_fill(sk, NULL, skb, r,
skb               401 net/sctp/diag.c 				sk_user_ns(NETLINK_CB(cb->skb).sk),
skb               402 net/sctp/diag.c 				NETLINK_CB(cb->skb).portid,
skb               438 net/sctp/diag.c 		.skb = in_skb,
skb               468 net/sctp/diag.c static void sctp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
skb               472 net/sctp/diag.c 	struct net *net = sock_net(skb->sk);
skb               474 net/sctp/diag.c 		.skb = skb,
skb               477 net/sctp/diag.c 		.net_admin = netlink_net_capable(cb->skb, CAP_NET_ADMIN),
skb               356 net/sctp/endpointola.c 				chunk->auth_chunk = skb_clone(chunk->skb,
skb                50 net/sctp/input.c 				      struct sk_buff *skb,
skb                55 net/sctp/input.c 					struct net *net, struct sk_buff *skb,
skb                64 net/sctp/input.c static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb);
skb                68 net/sctp/input.c static inline int sctp_rcv_checksum(struct net *net, struct sk_buff *skb)
skb                70 net/sctp/input.c 	struct sctphdr *sh = sctp_hdr(skb);
skb                72 net/sctp/input.c 	__le32 val = sctp_compute_cksum(skb, 0);
skb                85 net/sctp/input.c int sctp_rcv(struct sk_buff *skb)
skb                97 net/sctp/input.c 	struct net *net = dev_net(skb->dev);
skb                98 net/sctp/input.c 	bool is_gso = skb_is_gso(skb) && skb_is_gso_sctp(skb);
skb               100 net/sctp/input.c 	if (skb->pkt_type != PACKET_HOST)
skb               108 net/sctp/input.c 	if (skb->len < sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr) +
skb               109 net/sctp/input.c 		       skb_transport_offset(skb))
skb               116 net/sctp/input.c 	if ((!is_gso && skb_linearize(skb)) ||
skb               117 net/sctp/input.c 	    !pskb_may_pull(skb, sizeof(struct sctphdr)))
skb               121 net/sctp/input.c 	__skb_pull(skb, skb_transport_offset(skb));
skb               123 net/sctp/input.c 	skb->csum_valid = 0; /* Previous value not applicable */
skb               124 net/sctp/input.c 	if (skb_csum_unnecessary(skb))
skb               125 net/sctp/input.c 		__skb_decr_checksum_unnecessary(skb);
skb               128 net/sctp/input.c 		 sctp_rcv_checksum(net, skb) < 0)
skb               130 net/sctp/input.c 	skb->csum_valid = 1;
skb               132 net/sctp/input.c 	__skb_pull(skb, sizeof(struct sctphdr));
skb               134 net/sctp/input.c 	family = ipver2af(ip_hdr(skb)->version);
skb               138 net/sctp/input.c 	SCTP_INPUT_CB(skb)->af = af;
skb               141 net/sctp/input.c 	af->from_skb(&src, skb, 1);
skb               142 net/sctp/input.c 	af->from_skb(&dest, skb, 0);
skb               155 net/sctp/input.c 	if (!af->addr_valid(&src, NULL, skb) ||
skb               156 net/sctp/input.c 	    !af->addr_valid(&dest, NULL, skb))
skb               159 net/sctp/input.c 	asoc = __sctp_rcv_lookup(net, skb, &src, &dest, &transport);
skb               162 net/sctp/input.c 		ep = __sctp_rcv_lookup_endpoint(net, skb, &dest, &src);
skb               172 net/sctp/input.c 	if (sk->sk_bound_dev_if && (sk->sk_bound_dev_if != af->skb_iif(skb))) {
skb               196 net/sctp/input.c 		if (sctp_rcv_ootb(skb)) {
skb               202 net/sctp/input.c 	if (!xfrm_policy_check(sk, XFRM_POLICY_IN, skb, family))
skb               204 net/sctp/input.c 	nf_reset_ct(skb);
skb               206 net/sctp/input.c 	if (sk_filter(sk, skb))
skb               210 net/sctp/input.c 	chunk = sctp_chunkify(skb, asoc, sk, GFP_ATOMIC);
skb               213 net/sctp/input.c 	SCTP_INPUT_CB(skb)->chunk = chunk;
skb               219 net/sctp/input.c 	chunk->sctp_hdr = sctp_hdr(skb);
skb               247 net/sctp/input.c 		if (sctp_add_backlog(sk, skb)) {
skb               250 net/sctp/input.c 			skb = NULL; /* sctp_chunk_free already freed the skb */
skb               271 net/sctp/input.c 	kfree_skb(skb);
skb               289 net/sctp/input.c int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
skb               291 net/sctp/input.c 	struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk;
skb               325 net/sctp/input.c 			if (sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf)))
skb               340 net/sctp/input.c 			if (!sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf)))
skb               360 net/sctp/input.c static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb)
skb               362 net/sctp/input.c 	struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk;
skb               367 net/sctp/input.c 	ret = sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf));
skb               420 net/sctp/input.c 			struct sk_buff *skb)
skb               428 net/sctp/input.c 		dst->ops->redirect(dst, sk, skb);
skb               471 net/sctp/input.c struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *skb,
skb               493 net/sctp/input.c 	af->from_skb(&saddr, skb, 1);
skb               494 net/sctp/input.c 	af->from_skb(&daddr, skb, 0);
skb               519 net/sctp/input.c 		chunkhdr = skb_header_pointer(skb, skb_transport_offset(skb) +
skb               571 net/sctp/input.c int sctp_v4_err(struct sk_buff *skb, __u32 info)
skb               573 net/sctp/input.c 	const struct iphdr *iph = (const struct iphdr *)skb->data;
skb               575 net/sctp/input.c 	const int type = icmp_hdr(skb)->type;
skb               576 net/sctp/input.c 	const int code = icmp_hdr(skb)->code;
skb               583 net/sctp/input.c 	struct net *net = dev_net(skb->dev);
skb               586 net/sctp/input.c 	saveip = skb->network_header;
skb               587 net/sctp/input.c 	savesctp = skb->transport_header;
skb               588 net/sctp/input.c 	skb_reset_network_header(skb);
skb               589 net/sctp/input.c 	skb_set_transport_header(skb, ihlen);
skb               590 net/sctp/input.c 	sk = sctp_err_lookup(net, AF_INET, skb, sctp_hdr(skb), &asoc, &transport);
skb               592 net/sctp/input.c 	skb->network_header = saveip;
skb               593 net/sctp/input.c 	skb->transport_header = savesctp;
skb               634 net/sctp/input.c 		sctp_icmp_redirect(sk, transport, skb);
skb               665 net/sctp/input.c static int sctp_rcv_ootb(struct sk_buff *skb)
skb               673 net/sctp/input.c 		if (offset + sizeof(_ch) > skb->len)
skb               676 net/sctp/input.c 		ch = skb_header_pointer(skb, offset, sizeof(*ch), &_ch);
skb               683 net/sctp/input.c 		if (ch_end > skb->len)
skb               705 net/sctp/input.c 		if (SCTP_CID_INIT == ch->type && (void *)ch != skb->data)
skb               709 net/sctp/input.c 	} while (ch_end < skb->len);
skb               828 net/sctp/input.c 					struct net *net, struct sk_buff *skb,
skb               856 net/sctp/input.c 		sk = reuseport_select_sock(sk, phash, skb,
skb              1097 net/sctp/input.c 	struct sk_buff *skb,
skb              1103 net/sctp/input.c 	struct sctphdr *sh = sctp_hdr(skb);
skb              1124 net/sctp/input.c 	init = (struct sctp_init_chunk *)skb->data;
skb              1193 net/sctp/input.c 				      struct sk_buff *skb,
skb              1206 net/sctp/input.c 	ch = (struct sctp_chunkhdr *)skb->data;
skb              1213 net/sctp/input.c 		if (ch_end > skb_tail_pointer(skb))
skb              1237 net/sctp/input.c 						sctp_hdr(skb)->source,
skb              1248 net/sctp/input.c 	} while (ch_end < skb_tail_pointer(skb));
skb              1260 net/sctp/input.c 				      struct sk_buff *skb,
skb              1271 net/sctp/input.c 	if (skb_is_gso(skb) && skb_is_gso_sctp(skb))
skb              1274 net/sctp/input.c 	ch = (struct sctp_chunkhdr *)skb->data;
skb              1281 net/sctp/input.c 	if (SCTP_PAD4(ntohs(ch->length)) > skb->len)
skb              1286 net/sctp/input.c 		return __sctp_rcv_init_lookup(net, skb, laddr, transportp);
skb              1288 net/sctp/input.c 	return __sctp_rcv_walk_lookup(net, skb, laddr, transportp);
skb              1293 net/sctp/input.c 				      struct sk_buff *skb,
skb              1308 net/sctp/input.c 	asoc = __sctp_rcv_lookup_harder(net, skb, laddr, transportp);
skb               124 net/sctp/inqueue.c 			if (chunk->head_skb == chunk->skb) {
skb               125 net/sctp/inqueue.c 				chunk->skb = skb_shinfo(chunk->skb)->frag_list;
skb               128 net/sctp/inqueue.c 			if (chunk->skb->next) {
skb               129 net/sctp/inqueue.c 				chunk->skb = chunk->skb->next;
skb               134 net/sctp/inqueue.c 				chunk->skb = chunk->head_skb;
skb               141 net/sctp/inqueue.c 			skb_pull(chunk->skb, chunk->chunk_end - chunk->skb->data);
skb               158 net/sctp/inqueue.c 		if (skb_is_gso(chunk->skb) && skb_is_gso_sctp(chunk->skb)) {
skb               162 net/sctp/inqueue.c 			if (skb_shinfo(chunk->skb)->frag_list)
skb               163 net/sctp/inqueue.c 				chunk->head_skb = chunk->skb;
skb               166 net/sctp/inqueue.c 			if (chunk->head_skb && chunk->skb->data_len == chunk->skb->len)
skb               167 net/sctp/inqueue.c 				chunk->skb = skb_shinfo(chunk->skb)->frag_list;
skb               169 net/sctp/inqueue.c 			if (WARN_ON(!chunk->skb)) {
skb               170 net/sctp/inqueue.c 				__SCTP_INC_STATS(dev_net(chunk->skb->dev), SCTP_MIB_IN_PKT_DISCARDS);
skb               177 net/sctp/inqueue.c 			sock_rps_save_rxhash(chunk->asoc->base.sk, chunk->skb);
skb               183 net/sctp/inqueue.c 		ch = (struct sctp_chunkhdr *)chunk->skb->data;
skb               192 net/sctp/inqueue.c 				*cb = SCTP_INPUT_CB(chunk->skb),
skb               202 net/sctp/inqueue.c 	skb_pull(chunk->skb, sizeof(*ch));
skb               205 net/sctp/inqueue.c 	if (chunk->chunk_end + sizeof(*ch) <= skb_tail_pointer(chunk->skb)) {
skb               208 net/sctp/inqueue.c 	} else if (chunk->chunk_end > skb_tail_pointer(chunk->skb)) {
skb               211 net/sctp/inqueue.c 		chunk->chunk_end = skb_tail_pointer(chunk->skb);
skb               221 net/sctp/inqueue.c 		 ntohs(chunk->chunk_hdr->length), chunk->skb->len);
skb               125 net/sctp/ipv6.c static int sctp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
skb               135 net/sctp/ipv6.c 	struct net *net = dev_net(skb->dev);
skb               137 net/sctp/ipv6.c 	idev = in6_dev_get(skb->dev);
skb               140 net/sctp/ipv6.c 	saveip	 = skb->network_header;
skb               141 net/sctp/ipv6.c 	savesctp = skb->transport_header;
skb               142 net/sctp/ipv6.c 	skb_reset_network_header(skb);
skb               143 net/sctp/ipv6.c 	skb_set_transport_header(skb, offset);
skb               144 net/sctp/ipv6.c 	sk = sctp_err_lookup(net, AF_INET6, skb, sctp_hdr(skb), &asoc, &transport);
skb               146 net/sctp/ipv6.c 	skb->network_header   = saveip;
skb               147 net/sctp/ipv6.c 	skb->transport_header = savesctp;
skb               170 net/sctp/ipv6.c 		sctp_icmp_redirect(sk, transport, skb);
skb               194 net/sctp/ipv6.c static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport)
skb               196 net/sctp/ipv6.c 	struct sock *sk = skb->sk;
skb               202 net/sctp/ipv6.c 	pr_debug("%s: skb:%p, len:%d, src:%pI6 dst:%pI6\n", __func__, skb,
skb               203 net/sctp/ipv6.c 		 skb->len, &fl6->saddr, &fl6->daddr);
skb               212 net/sctp/ipv6.c 		skb->ignore_df = 1;
skb               217 net/sctp/ipv6.c 	res = ip6_xmit(sk, skb, fl6, sk->sk_mark, rcu_dereference(np->opt),
skb               477 net/sctp/ipv6.c static void sctp_v6_from_skb(union sctp_addr *addr, struct sk_buff *skb,
skb               481 net/sctp/ipv6.c 	struct sctphdr *sh = sctp_hdr(skb);
skb               486 net/sctp/ipv6.c 	addr->v6.sin6_scope_id = ((struct inet6_skb_parm *)skb->cb)->iif;
skb               490 net/sctp/ipv6.c 		sa->sin6_addr = ipv6_hdr(skb)->saddr;
skb               493 net/sctp/ipv6.c 		sa->sin6_addr = ipv6_hdr(skb)->daddr;
skb               659 net/sctp/ipv6.c 			      const struct sk_buff *skb)
skb               671 net/sctp/ipv6.c 		return sctp_get_af_specific(AF_INET)->addr_valid(addr, sp, skb);
skb               782 net/sctp/ipv6.c static int sctp_v6_skb_iif(const struct sk_buff *skb)
skb               784 net/sctp/ipv6.c 	return IP6CB(skb)->iif;
skb               788 net/sctp/ipv6.c static int sctp_v6_is_ce(const struct sk_buff *skb)
skb               790 net/sctp/ipv6.c 	return *((__u32 *)(ipv6_hdr(skb))) & (__force __u32)htonl(1 << 20);
skb               838 net/sctp/ipv6.c static void sctp_inet6_skb_msgname(struct sk_buff *skb, char *msgname,
skb               848 net/sctp/ipv6.c 	sh = sctp_hdr(skb);
skb               850 net/sctp/ipv6.c 	if (ip_hdr(skb)->version == 4) {
skb               853 net/sctp/ipv6.c 		addr->v4.sin_addr.s_addr = ip_hdr(skb)->saddr;
skb               858 net/sctp/ipv6.c 		addr->v6.sin6_addr = ipv6_hdr(skb)->saddr;
skb               860 net/sctp/ipv6.c 			addr->v6.sin6_scope_id = sctp_v6_skb_iif(skb);
skb               865 net/sctp/ipv6.c 	*addr_len = sctp_v6_addr_to_user(sctp_sk(skb->sk), addr);
skb              1055 net/sctp/ipv6.c static int sctp6_rcv(struct sk_buff *skb)
skb              1057 net/sctp/ipv6.c 	return sctp_rcv(skb) ? -1 : 0;
skb                26 net/sctp/offload.c static __le32 sctp_gso_make_checksum(struct sk_buff *skb)
skb                28 net/sctp/offload.c 	skb->ip_summed = CHECKSUM_NONE;
skb                29 net/sctp/offload.c 	skb->csum_not_inet = 0;
skb                30 net/sctp/offload.c 	gso_reset_checksum(skb, ~0);
skb                31 net/sctp/offload.c 	return sctp_compute_cksum(skb, skb_transport_offset(skb));
skb                34 net/sctp/offload.c static struct sk_buff *sctp_gso_segment(struct sk_buff *skb,
skb                40 net/sctp/offload.c 	if (!skb_is_gso_sctp(skb))
skb                43 net/sctp/offload.c 	sh = sctp_hdr(skb);
skb                44 net/sctp/offload.c 	if (!pskb_may_pull(skb, sizeof(*sh)))
skb                47 net/sctp/offload.c 	__skb_pull(skb, sizeof(*sh));
skb                49 net/sctp/offload.c 	if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
skb                51 net/sctp/offload.c 		struct skb_shared_info *pinfo = skb_shinfo(skb);
skb                55 net/sctp/offload.c 		if (skb->len != skb->data_len) {
skb                60 net/sctp/offload.c 		skb_walk_frags(skb, frag_iter)
skb                67 net/sctp/offload.c 	segs = skb_segment(skb, features | NETIF_F_HW_CSUM | NETIF_F_SG);
skb                73 net/sctp/offload.c 		for (skb = segs; skb; skb = skb->next) {
skb                74 net/sctp/offload.c 			if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb                75 net/sctp/offload.c 				sh = sctp_hdr(skb);
skb                76 net/sctp/offload.c 				sh->checksum = sctp_gso_make_checksum(skb);
skb               185 net/sctp/output.c 		 packet, packet->size, chunk, chunk->skb ? chunk->skb->len : -1);
skb               194 net/sctp/output.c 				chunk->skb->sk->sk_err = -error;
skb               390 net/sctp/output.c static void sctp_packet_gso_append(struct sk_buff *head, struct sk_buff *skb)
skb               393 net/sctp/output.c 		skb_shinfo(head)->frag_list = skb;
skb               395 net/sctp/output.c 		SCTP_OUTPUT_CB(head)->last->next = skb;
skb               396 net/sctp/output.c 	SCTP_OUTPUT_CB(head)->last = skb;
skb               398 net/sctp/output.c 	head->truesize += skb->truesize;
skb               399 net/sctp/output.c 	head->data_len += skb->len;
skb               400 net/sctp/output.c 	head->len += skb->len;
skb               401 net/sctp/output.c 	refcount_add(skb->truesize, &head->sk->sk_wmem_alloc);
skb               403 net/sctp/output.c 	__skb_header_release(skb);
skb               431 net/sctp/output.c 			int padded = SCTP_PAD4(chunk->skb->len);
skb               462 net/sctp/output.c 			padding = SCTP_PAD4(chunk->skb->len) - chunk->skb->len;
skb               464 net/sctp/output.c 				skb_put_zero(chunk->skb, padding);
skb               470 net/sctp/output.c 			skb_put_data(nskb, chunk->skb->data, chunk->skb->len);
skb               477 net/sctp/output.c 				 ntohs(chunk->chunk_hdr->length), chunk->skb->len,
skb               480 net/sctp/output.c 			pkt_size -= SCTP_PAD4(chunk->skb->len);
skb               560 net/sctp/output.c 	sk = chunk->skb->sk;
skb               718 net/sctp/output.c 	if (chunk->skb->len + q->out_qlen > transport->pathmtu -
skb               806 net/sctp/output.c 			maxsize -= SCTP_PAD4(packet->auth->skb->len);
skb                65 net/sctp/outqueue.c 	q->out_qlen += ch->skb->len;
skb                86 net/sctp/outqueue.c 	q->out_qlen += ch->skb->len;
skb               373 net/sctp/outqueue.c 		msg_len -= chk->skb->truesize + sizeof(struct sctp_chunk);
skb               407 net/sctp/outqueue.c 		msg_len -= chk->skb->truesize + sizeof(struct sctp_chunk);
skb              1087 net/sctp/outqueue.c 			 chunk->skb ? chunk->skb->head : NULL, chunk->skb ?
skb              1088 net/sctp/outqueue.c 			 refcount_read(&chunk->skb->users) : -1);
skb               214 net/sctp/protocol.c static void sctp_v4_from_skb(union sctp_addr *addr, struct sk_buff *skb,
skb               218 net/sctp/protocol.c 	struct sctphdr *sh = sctp_hdr(skb);
skb               225 net/sctp/protocol.c 		sa->sin_addr.s_addr = ip_hdr(skb)->saddr;
skb               228 net/sctp/protocol.c 		sa->sin_addr.s_addr = ip_hdr(skb)->daddr;
skb               328 net/sctp/protocol.c 			      const struct sk_buff *skb)
skb               339 net/sctp/protocol.c 	if (skb && skb_rtable(skb)->rt_flags & RTCF_BROADCAST)
skb               555 net/sctp/protocol.c static int sctp_v4_skb_iif(const struct sk_buff *skb)
skb               557 net/sctp/protocol.c 	return inet_iif(skb);
skb               561 net/sctp/protocol.c static int sctp_v4_is_ce(const struct sk_buff *skb)
skb               563 net/sctp/protocol.c 	return INET_ECN_is_ce(ip_hdr(skb)->tos);
skb               910 net/sctp/protocol.c static void sctp_inet_skb_msgname(struct sk_buff *skb, char *msgname, int *len)
skb               913 net/sctp/protocol.c 		struct sctphdr *sh = sctp_hdr(skb);
skb               918 net/sctp/protocol.c 		sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
skb               973 net/sctp/protocol.c static inline int sctp_v4_xmit(struct sk_buff *skb,
skb               976 net/sctp/protocol.c 	struct inet_sock *inet = inet_sk(skb->sk);
skb               979 net/sctp/protocol.c 	pr_debug("%s: skb:%p, len:%d, src:%pI4, dst:%pI4\n", __func__, skb,
skb               980 net/sctp/protocol.c 		 skb->len, &transport->fl.u.ip4.saddr,
skb               991 net/sctp/protocol.c 	return __ip_queue_xmit(&inet->sk, skb, &transport->fl, dscp);
skb                71 net/sctp/sm_make_chunk.c static void sctp_control_release_owner(struct sk_buff *skb)
skb                73 net/sctp/sm_make_chunk.c 	struct sctp_chunk *chunk = skb_shinfo(skb)->destructor_arg;
skb               100 net/sctp/sm_make_chunk.c 	struct sk_buff *skb = chunk->skb;
skb               113 net/sctp/sm_make_chunk.c 	skb->sk = asoc ? asoc->base.sk : NULL;
skb               114 net/sctp/sm_make_chunk.c 	skb_shinfo(skb)->destructor_arg = chunk;
skb               115 net/sctp/sm_make_chunk.c 	skb->destructor = sctp_control_release_owner;
skb               121 net/sctp/sm_make_chunk.c 	struct sk_buff *skb = chunk->skb;
skb               123 net/sctp/sm_make_chunk.c 	return SCTP_INPUT_CB(skb)->af->skb_iif(skb);
skb               154 net/sctp/sm_make_chunk.c 	if (skb_tailroom(chunk->skb) < len)
skb              1062 net/sctp/sm_make_chunk.c 	target = skb_put(chunk->skb, len);
skb              1071 net/sctp/sm_make_chunk.c 	chunk->chunk_end = skb_tail_pointer(chunk->skb);
skb              1305 net/sctp/sm_make_chunk.c 	skb_put_zero(retval->skb, hmac_desc->hmac_len);
skb              1310 net/sctp/sm_make_chunk.c 	retval->chunk_end = skb_tail_pointer(retval->skb);
skb              1330 net/sctp/sm_make_chunk.c struct sctp_chunk *sctp_chunkify(struct sk_buff *skb,
skb              1341 net/sctp/sm_make_chunk.c 		pr_debug("%s: chunkifying skb:%p w/o an sk\n", __func__, skb);
skb              1344 net/sctp/sm_make_chunk.c 	retval->skb		= skb;
skb              1389 net/sctp/sm_make_chunk.c 	struct sk_buff *skb;
skb              1398 net/sctp/sm_make_chunk.c 	skb = alloc_skb(chunklen, gfp);
skb              1399 net/sctp/sm_make_chunk.c 	if (!skb)
skb              1403 net/sctp/sm_make_chunk.c 	chunk_hdr = (struct sctp_chunkhdr *)skb_put(skb, sizeof(*chunk_hdr));
skb              1409 net/sctp/sm_make_chunk.c 	retval = sctp_chunkify(skb, asoc, sk, gfp);
skb              1411 net/sctp/sm_make_chunk.c 		kfree_skb(skb);
skb              1458 net/sctp/sm_make_chunk.c 	consume_skb(chunk->skb);
skb              1497 net/sctp/sm_make_chunk.c 	skb_put_zero(chunk->skb, padlen);
skb              1498 net/sctp/sm_make_chunk.c 	target = skb_put_data(chunk->skb, data, len);
skb              1502 net/sctp/sm_make_chunk.c 	chunk->chunk_end = skb_tail_pointer(chunk->skb);
skb              1517 net/sctp/sm_make_chunk.c 	target = skb_put(chunk->skb, len);
skb              1526 net/sctp/sm_make_chunk.c 	chunk->chunk_end = skb_tail_pointer(chunk->skb);
skb              1589 net/sctp/sm_make_chunk.c 	struct sk_buff *skb;
skb              1597 net/sctp/sm_make_chunk.c 	skb = chunk->skb;
skb              1599 net/sctp/sm_make_chunk.c 	SCTP_INPUT_CB(skb)->af->from_skb(&asoc->c.peer_addr, skb, 1);
skb              1707 net/sctp/sm_make_chunk.c 	struct sk_buff *skb = chunk->skb;
skb              1794 net/sctp/sm_make_chunk.c 		kt = skb_get_ktime(skb);
skb              3055 net/sctp/sm_make_chunk.c 	if (!af->is_any(&addr) && !af->addr_valid(&addr, NULL, asconf->skb))
skb              3242 net/sctp/sm_make_chunk.c 	hdr = (struct sctp_addiphdr *)asconf->skb->data;
skb              3247 net/sctp/sm_make_chunk.c 	addr_param = (union sctp_addr_param *)(asconf->skb->data + length);
skb              3393 net/sctp/sm_make_chunk.c 	asconf_ack_param = (struct sctp_addip_param *)(asconf_ack->skb->data +
skb              3432 net/sctp/sm_make_chunk.c 	int asconf_len = asconf->skb->len;
skb              3442 net/sctp/sm_make_chunk.c 	addr_param = (union sctp_addr_param *)(asconf->skb->data + length);
skb              3457 net/sctp/sm_make_chunk.c 	if (asconf_ack->skb->len == sizeof(struct sctp_addiphdr))
skb               777 net/sctp/sm_sideeffect.c 	hbinfo = (struct sctp_sender_hb_info *)chunk->skb->data;
skb               957 net/sctp/sm_sideeffect.c 	while (chunk->chunk_end > chunk->skb->data) {
skb               958 net/sctp/sm_sideeffect.c 		err_hdr = (struct sctp_errhdr *)(chunk->skb->data);
skb               318 net/sctp/sm_statefuns.c 					chunk->skb))
skb               396 net/sctp/sm_statefuns.c 	chunk->subh.init_hdr = (struct sctp_inithdr *)chunk->skb->data;
skb               399 net/sctp/sm_statefuns.c 	chunk->param_hdr.v = skb_pull(chunk->skb, sizeof(struct sctp_inithdr));
skb               529 net/sctp/sm_statefuns.c 	chunk->subh.init_hdr = (struct sctp_inithdr *)chunk->skb->data;
skb               583 net/sctp/sm_statefuns.c 	chunk->param_hdr.v = skb_pull(chunk->skb, sizeof(struct sctp_inithdr));
skb               641 net/sctp/sm_statefuns.c 	auth.skb = chunk->auth_chunk;
skb               729 net/sctp/sm_statefuns.c 		(struct sctp_signed_cookie *)chunk->skb->data;
skb               730 net/sctp/sm_statefuns.c 	if (!pskb_pull(chunk->skb, ntohs(chunk->chunk_hdr->length) -
skb               927 net/sctp/sm_statefuns.c 	security_inet_conn_established(ep->base.sk, chunk->skb);
skb              1139 net/sctp/sm_statefuns.c 	chunk->subh.hb_hdr = (struct sctp_heartbeathdr *)chunk->skb->data;
skb              1147 net/sctp/sm_statefuns.c 	if (!pskb_pull(chunk->skb, paylen))
skb              1211 net/sctp/sm_statefuns.c 	hbinfo = (struct sctp_sender_hb_info *)chunk->skb->data;
skb              1468 net/sctp/sm_statefuns.c 					chunk->skb))
skb              1497 net/sctp/sm_statefuns.c 	chunk->subh.init_hdr = (struct sctp_inithdr *)chunk->skb->data;
skb              1500 net/sctp/sm_statefuns.c 	chunk->param_hdr.v = skb_pull(chunk->skb, sizeof(struct sctp_inithdr));
skb              2135 net/sctp/sm_statefuns.c 	chunk->subh.cookie_hdr = (struct sctp_signed_cookie *)chunk->skb->data;
skb              2136 net/sctp/sm_statefuns.c 	if (!pskb_pull(chunk->skb, ntohs(chunk->chunk_hdr->length) -
skb              2174 net/sctp/sm_statefuns.c 					chunk->skb)) {
skb              2450 net/sctp/sm_statefuns.c 	err = (struct sctp_errhdr *)(chunk->skb->data);
skb              2606 net/sctp/sm_statefuns.c 		error = ((struct sctp_errhdr *)chunk->skb->data)->cause;
skb              2653 net/sctp/sm_statefuns.c 		error = ((struct sctp_errhdr *)chunk->skb->data)->cause;
skb              2775 net/sctp/sm_statefuns.c 	sdh = (struct sctp_shutdownhdr *)chunk->skb->data;
skb              2776 net/sctp/sm_statefuns.c 	skb_pull(chunk->skb, sizeof(*sdh));
skb              2862 net/sctp/sm_statefuns.c 	sdh = (struct sctp_shutdownhdr *)chunk->skb->data;
skb              2979 net/sctp/sm_statefuns.c 	cwr = (struct sctp_cwrhdr *)chunk->skb->data;
skb              2980 net/sctp/sm_statefuns.c 	skb_pull(chunk->skb, sizeof(*cwr));
skb              3033 net/sctp/sm_statefuns.c 	ecne = (struct sctp_ecnehdr *)chunk->skb->data;
skb              3034 net/sctp/sm_statefuns.c 	skb_pull(chunk->skb, sizeof(*ecne));
skb              3391 net/sctp/sm_statefuns.c 	abort->skb->sk = ep->base.sk;
skb              3540 net/sctp/sm_statefuns.c 	struct sk_buff *skb = chunk->skb;
skb              3558 net/sctp/sm_statefuns.c 		if (ch_end > skb_tail_pointer(skb))
skb              3594 net/sctp/sm_statefuns.c 	} while (ch_end < skb_tail_pointer(skb));
skb              3655 net/sctp/sm_statefuns.c 	shut->skb->sk = ep->base.sk;
skb              3748 net/sctp/sm_statefuns.c 	hdr = (struct sctp_addiphdr *)chunk->skb->data;
skb              3892 net/sctp/sm_statefuns.c 	addip_hdr = (struct sctp_addiphdr *)asconf_ack->skb->data;
skb              4074 net/sctp/sm_statefuns.c 	fwdtsn_hdr = (struct sctp_fwdtsn_hdr *)chunk->skb->data;
skb              4078 net/sctp/sm_statefuns.c 	skb_pull(chunk->skb, len);
skb              4141 net/sctp/sm_statefuns.c 	fwdtsn_hdr = (struct sctp_fwdtsn_hdr *)chunk->skb->data;
skb              4145 net/sctp/sm_statefuns.c 	skb_pull(chunk->skb, len);
skb              4214 net/sctp/sm_statefuns.c 	auth_hdr = (struct sctp_authhdr *)chunk->skb->data;
skb              4216 net/sctp/sm_statefuns.c 	skb_pull(chunk->skb, sizeof(*auth_hdr));
skb              4251 net/sctp/sm_statefuns.c 	skb_pull(chunk->skb, sig_len);
skb              4259 net/sctp/sm_statefuns.c 	sctp_auth_calculate_hmac(asoc, chunk->skb,
skb              4303 net/sctp/sm_statefuns.c 	auth_hdr = (struct sctp_authhdr *)chunk->skb->data;
skb              4616 net/sctp/sm_statefuns.c 		abort->skb->sk = ep->base.sk;
skb              6154 net/sctp/sm_statefuns.c 	sack = (struct sctp_sackhdr *) chunk->skb->data;
skb              6160 net/sctp/sm_statefuns.c 	if (len > chunk->skb->len)
skb              6163 net/sctp/sm_statefuns.c 	skb_pull(chunk->skb, len);
skb              6203 net/sctp/sm_statefuns.c 		abort->skb->sk = ep->base.sk;
skb              6313 net/sctp/sm_statefuns.c 			err_chunk->skb->sk = ep->base.sk;
skb              6339 net/sctp/sm_statefuns.c 	data_hdr = (struct sctp_datahdr *)chunk->skb->data;
skb              6341 net/sctp/sm_statefuns.c 	skb_pull(chunk->skb, sctp_datahdr_len(&asoc->stream));
skb              6359 net/sctp/sm_statefuns.c 		struct sctp_af *af = SCTP_INPUT_CB(chunk->skb)->af;
skb              6362 net/sctp/sm_statefuns.c 		if (af->is_ce(sctp_gso_headskb(chunk->skb))) {
skb                72 net/sctp/socket.c static void sctp_wfree(struct sk_buff *skb);
skb               133 net/sctp/socket.c 	skb_set_owner_w(chunk->skb, sk);
skb               135 net/sctp/socket.c 	chunk->skb->destructor = sctp_wfree;
skb               137 net/sctp/socket.c 	skb_shinfo(chunk->skb)->destructor_arg = chunk;
skb               140 net/sctp/socket.c 	asoc->sndbuf_used += chunk->skb->truesize + sizeof(struct sctp_chunk);
skb               141 net/sctp/socket.c 	sk->sk_wmem_queued += chunk->skb->truesize + sizeof(struct sctp_chunk);
skb               142 net/sctp/socket.c 	sk_mem_charge(sk, chunk->skb->truesize);
skb               147 net/sctp/socket.c 	skb_orphan(chunk->skb);
skb               156 net/sctp/socket.c 		if ((clear && asoc->base.sk == c->skb->sk) ||	\
skb               157 net/sctp/socket.c 		    (!clear && asoc->base.sk != c->skb->sk))	\
skb               194 net/sctp/socket.c 	struct sk_buff *skb, *tmp;
skb               196 net/sctp/socket.c 	sctp_skb_for_each(skb, &asoc->ulpq.lobby, tmp)
skb               197 net/sctp/socket.c 		cb(skb, sk);
skb               199 net/sctp/socket.c 	sctp_skb_for_each(skb, &asoc->ulpq.reasm, tmp)
skb               200 net/sctp/socket.c 		cb(skb, sk);
skb               202 net/sctp/socket.c 	sctp_skb_for_each(skb, &asoc->ulpq.reasm_uo, tmp)
skb               203 net/sctp/socket.c 		cb(skb, sk);
skb              2049 net/sctp/socket.c static int sctp_skb_pull(struct sk_buff *skb, int len)
skb              2052 net/sctp/socket.c 	int skb_len = skb_headlen(skb);
skb              2056 net/sctp/socket.c 		__skb_pull(skb, len);
skb              2060 net/sctp/socket.c 	__skb_pull(skb, skb_len);
skb              2062 net/sctp/socket.c 	skb_walk_frags(skb, list) {
skb              2064 net/sctp/socket.c 		skb->len -= (len-rlen);
skb              2065 net/sctp/socket.c 		skb->data_len -= (len-rlen);
skb              2096 net/sctp/socket.c 	struct sk_buff *skb, *head_skb;
skb              2113 net/sctp/socket.c 	skb = sctp_skb_recv_datagram(sk, flags, noblock, &err);
skb              2114 net/sctp/socket.c 	if (!skb)
skb              2120 net/sctp/socket.c 	skb_len = skb->len;
skb              2126 net/sctp/socket.c 	err = skb_copy_datagram_msg(skb, 0, msg, copied);
skb              2128 net/sctp/socket.c 	event = sctp_skb2event(skb);
skb              2136 net/sctp/socket.c 		head_skb = skb;
skb              2165 net/sctp/socket.c 		sctp_skb_pull(skb, copied);
skb              2166 net/sctp/socket.c 		skb_queue_head(&sk->sk_receive_queue, skb);
skb              2186 net/sctp/socket.c 		kfree_skb(skb);
skb              4959 net/sctp/socket.c 		struct sk_buff *skb;
skb              4962 net/sctp/socket.c 		skb = skb_peek(&sk->sk_receive_queue);
skb              4963 net/sctp/socket.c 		if (skb != NULL) {
skb              4968 net/sctp/socket.c 			amount = skb->len;
skb              8851 net/sctp/socket.c 	struct sk_buff *skb;
skb              8868 net/sctp/socket.c 			skb = skb_peek(&sk->sk_receive_queue);
skb              8869 net/sctp/socket.c 			if (skb)
skb              8870 net/sctp/socket.c 				refcount_inc(&skb->users);
skb              8872 net/sctp/socket.c 			skb = __skb_dequeue(&sk->sk_receive_queue);
skb              8875 net/sctp/socket.c 		if (skb)
skb              8876 net/sctp/socket.c 			return skb;
skb              8981 net/sctp/socket.c static void sctp_wfree(struct sk_buff *skb)
skb              8983 net/sctp/socket.c 	struct sctp_chunk *chunk = skb_shinfo(skb)->destructor_arg;
skb              8987 net/sctp/socket.c 	sk_mem_uncharge(sk, skb->truesize);
skb              8988 net/sctp/socket.c 	sk->sk_wmem_queued -= skb->truesize + sizeof(struct sctp_chunk);
skb              8989 net/sctp/socket.c 	asoc->sndbuf_used -= skb->truesize + sizeof(struct sctp_chunk);
skb              9013 net/sctp/socket.c 	sock_wfree(skb);
skb              9024 net/sctp/socket.c void sctp_sock_rfree(struct sk_buff *skb)
skb              9026 net/sctp/socket.c 	struct sock *sk = skb->sk;
skb              9027 net/sctp/socket.c 	struct sctp_ulpevent *event = sctp_skb2event(skb);
skb              9270 net/sctp/socket.c static void sctp_skb_set_owner_r_frag(struct sk_buff *skb, struct sock *sk)
skb              9274 net/sctp/socket.c 	if (!skb->data_len)
skb              9278 net/sctp/socket.c 	skb_walk_frags(skb, frag)
skb              9282 net/sctp/socket.c 	sctp_skb_set_owner_r(skb, sk);
skb              9365 net/sctp/socket.c 	struct sk_buff *skb, *tmp;
skb              9415 net/sctp/socket.c 	sctp_skb_for_each(skb, &oldsk->sk_receive_queue, tmp) {
skb              9416 net/sctp/socket.c 		event = sctp_skb2event(skb);
skb              9418 net/sctp/socket.c 			__skb_unlink(skb, &oldsk->sk_receive_queue);
skb              9419 net/sctp/socket.c 			__skb_queue_tail(&newsk->sk_receive_queue, skb);
skb              9420 net/sctp/socket.c 			sctp_skb_set_owner_r_frag(skb, newsk);
skb              9444 net/sctp/socket.c 		sctp_skb_for_each(skb, &oldsp->pd_lobby, tmp) {
skb              9445 net/sctp/socket.c 			event = sctp_skb2event(skb);
skb              9447 net/sctp/socket.c 				__skb_unlink(skb, &oldsp->pd_lobby);
skb              9448 net/sctp/socket.c 				__skb_queue_tail(queue, skb);
skb              9449 net/sctp/socket.c 				sctp_skb_set_owner_r_frag(skb, newsk);
skb               477 net/sctp/stream_interleave.c 	struct sk_buff *skb;
skb               479 net/sctp/stream_interleave.c 	skb = __skb_peek(skb_list);
skb               480 net/sctp/stream_interleave.c 	event = sctp_skb2event(skb);
skb               488 net/sctp/stream_interleave.c 		sk_mark_napi_id(sk, skb);
skb               499 net/sctp/stream_interleave.c 		__skb_queue_tail(&sk->sk_receive_queue, skb);
skb               254 net/sctp/stream_sched.c 	q->out_qlen -= ch->skb->len;
skb                52 net/sctp/ulpevent.c 	struct sk_buff *skb;
skb                54 net/sctp/ulpevent.c 	skb = alloc_skb(size, gfp);
skb                55 net/sctp/ulpevent.c 	if (!skb)
skb                58 net/sctp/ulpevent.c 	event = sctp_skb2event(skb);
skb                59 net/sctp/ulpevent.c 	sctp_ulpevent_init(event, msg_flags, skb->truesize);
skb                80 net/sctp/ulpevent.c 	struct sk_buff *skb;
skb                86 net/sctp/ulpevent.c 	skb = sctp_event2skb(event);
skb                89 net/sctp/ulpevent.c 	sctp_skb_set_owner_r(skb, asoc->base.sk);
skb               121 net/sctp/ulpevent.c 	struct sk_buff *skb;
skb               130 net/sctp/ulpevent.c 		skb = skb_copy_expand(chunk->skb,
skb               133 net/sctp/ulpevent.c 		if (!skb)
skb               137 net/sctp/ulpevent.c 		event = sctp_skb2event(skb);
skb               138 net/sctp/ulpevent.c 		sctp_ulpevent_init(event, MSG_NOTIFICATION, skb->truesize);
skb               141 net/sctp/ulpevent.c 		sac = skb_push(skb, sizeof(struct sctp_assoc_change));
skb               144 net/sctp/ulpevent.c 		skb_trim(skb, sizeof(struct sctp_assoc_change) +
skb               153 net/sctp/ulpevent.c 		skb = sctp_event2skb(event);
skb               154 net/sctp/ulpevent.c 		sac = skb_put(skb, sizeof(struct sctp_assoc_change));
skb               189 net/sctp/ulpevent.c 	sac->sac_length = skb->len;
skb               248 net/sctp/ulpevent.c 	struct sk_buff *skb;
skb               255 net/sctp/ulpevent.c 	skb = sctp_event2skb(event);
skb               256 net/sctp/ulpevent.c 	spc = skb_put(skb, sizeof(struct sctp_paddr_change));
skb               362 net/sctp/ulpevent.c 	struct sk_buff *skb;
skb               366 net/sctp/ulpevent.c 	ch = (struct sctp_errhdr *)(chunk->skb->data);
skb               371 net/sctp/ulpevent.c 	skb_pull(chunk->skb, sizeof(*ch));
skb               376 net/sctp/ulpevent.c 	skb = skb_copy_expand(chunk->skb, sizeof(*sre), 0, gfp);
skb               379 net/sctp/ulpevent.c 	skb_pull(chunk->skb, elen);
skb               380 net/sctp/ulpevent.c 	if (!skb)
skb               384 net/sctp/ulpevent.c 	event = sctp_skb2event(skb);
skb               385 net/sctp/ulpevent.c 	sctp_ulpevent_init(event, MSG_NOTIFICATION, skb->truesize);
skb               387 net/sctp/ulpevent.c 	sre = skb_push(skb, sizeof(*sre));
skb               390 net/sctp/ulpevent.c 	skb_trim(skb, sizeof(*sre) + elen);
skb               396 net/sctp/ulpevent.c 	sre->sre_length = skb->len;
skb               417 net/sctp/ulpevent.c 	struct sk_buff *skb;
skb               423 net/sctp/ulpevent.c 	skb = skb_copy_expand(chunk->skb,
skb               427 net/sctp/ulpevent.c 	if (!skb)
skb               431 net/sctp/ulpevent.c 	skb_pull(skb, sctp_datachk_len(&asoc->stream));
skb               435 net/sctp/ulpevent.c 	event = sctp_skb2event(skb);
skb               436 net/sctp/ulpevent.c 	sctp_ulpevent_init(event, MSG_NOTIFICATION, skb->truesize);
skb               438 net/sctp/ulpevent.c 	ssf = skb_push(skb, sizeof(struct sctp_send_failed));
skb               471 net/sctp/ulpevent.c 	skb_trim(skb, ssf->ssf_length);
skb               525 net/sctp/ulpevent.c 	struct sk_buff *skb;
skb               532 net/sctp/ulpevent.c 	skb = sctp_event2skb(event);
skb               533 net/sctp/ulpevent.c 	sse = skb_put(skb, sizeof(struct sctp_shutdown_event));
skb               587 net/sctp/ulpevent.c 	struct sk_buff *skb;
skb               594 net/sctp/ulpevent.c 	skb = sctp_event2skb(event);
skb               595 net/sctp/ulpevent.c 	sai = skb_put(skb, sizeof(struct sctp_adaptation_event));
skb               622 net/sctp/ulpevent.c 	struct sk_buff *skb = chunk->skb;
skb               639 net/sctp/ulpevent.c 	if (rx_count >= sk->sk_rcvbuf || !sk_rmem_schedule(sk, skb, datalen))
skb               643 net/sctp/ulpevent.c 	skb = skb_clone(chunk->skb, gfp);
skb               644 net/sctp/ulpevent.c 	if (!skb)
skb               670 net/sctp/ulpevent.c 	skb_trim(skb, chunk->chunk_end - padding - skb->data);
skb               673 net/sctp/ulpevent.c 	event = sctp_skb2event(skb);
skb               679 net/sctp/ulpevent.c 	sctp_ulpevent_init(event, 0, skb->len + sizeof(struct sk_buff));
skb               700 net/sctp/ulpevent.c 	kfree_skb(skb);
skb               720 net/sctp/ulpevent.c 	struct sk_buff *skb;
skb               727 net/sctp/ulpevent.c 	skb = sctp_event2skb(event);
skb               728 net/sctp/ulpevent.c 	pd = skb_put(skb, sizeof(struct sctp_pdapi_event));
skb               773 net/sctp/ulpevent.c 	struct sk_buff *skb;
skb               780 net/sctp/ulpevent.c 	skb = sctp_event2skb(event);
skb               781 net/sctp/ulpevent.c 	ak = skb_put(skb, sizeof(struct sctp_authkey_event));
skb               811 net/sctp/ulpevent.c 	struct sk_buff *skb;
skb               818 net/sctp/ulpevent.c 	skb = sctp_event2skb(event);
skb               819 net/sctp/ulpevent.c 	sdry = skb_put(skb, sizeof(struct sctp_sender_dry_event));
skb               836 net/sctp/ulpevent.c 	struct sk_buff *skb;
skb               844 net/sctp/ulpevent.c 	skb = sctp_event2skb(event);
skb               845 net/sctp/ulpevent.c 	sreset = skb_put(skb, length);
skb               865 net/sctp/ulpevent.c 	struct sk_buff *skb;
skb               872 net/sctp/ulpevent.c 	skb = sctp_event2skb(event);
skb               873 net/sctp/ulpevent.c 	areset = skb_put(skb, sizeof(struct sctp_assoc_reset_event));
skb               892 net/sctp/ulpevent.c 	struct sk_buff *skb;
skb               899 net/sctp/ulpevent.c 	skb = sctp_event2skb(event);
skb               900 net/sctp/ulpevent.c 	schange = skb_put(skb, sizeof(struct sctp_stream_change_event));
skb               919 net/sctp/ulpevent.c 	struct sk_buff *skb;
skb               921 net/sctp/ulpevent.c 	skb = sctp_event2skb(event);
skb               922 net/sctp/ulpevent.c 	notification = (union sctp_notification *) skb->data;
skb               984 net/sctp/ulpevent.c 					 const struct sk_buff *skb)
skb               994 net/sctp/ulpevent.c 	nxtinfo.nxt_length = skb->len;
skb              1005 net/sctp/ulpevent.c 	struct sk_buff *skb;
skb              1008 net/sctp/ulpevent.c 	skb = sctp_skb_recv_datagram(sk, MSG_PEEK, 1, &err);
skb              1009 net/sctp/ulpevent.c 	if (skb != NULL) {
skb              1010 net/sctp/ulpevent.c 		__sctp_ulpevent_read_nxtinfo(sctp_skb2event(skb),
skb              1011 net/sctp/ulpevent.c 					     msghdr, skb);
skb              1013 net/sctp/ulpevent.c 		kfree_skb(skb);
skb              1023 net/sctp/ulpevent.c 	struct sk_buff *skb, *frag;
skb              1025 net/sctp/ulpevent.c 	skb = sctp_event2skb(event);
skb              1028 net/sctp/ulpevent.c 	sctp_assoc_rwnd_decrease(asoc, skb_headlen(skb));
skb              1030 net/sctp/ulpevent.c 	if (!skb->data_len)
skb              1039 net/sctp/ulpevent.c 	skb_walk_frags(skb, frag)
skb              1048 net/sctp/ulpevent.c 	struct sk_buff *skb, *frag;
skb              1058 net/sctp/ulpevent.c 	skb = sctp_event2skb(event);
skb              1059 net/sctp/ulpevent.c 	len = skb->len;
skb              1061 net/sctp/ulpevent.c 	if (!skb->data_len)
skb              1065 net/sctp/ulpevent.c 	skb_walk_frags(skb, frag) {
skb              1081 net/sctp/ulpevent.c 	struct sk_buff *skb, *frag;
skb              1083 net/sctp/ulpevent.c 	skb = sctp_event2skb(event);
skb              1085 net/sctp/ulpevent.c 	if (!skb->data_len)
skb              1089 net/sctp/ulpevent.c 	skb_walk_frags(skb, frag) {
skb              1119 net/sctp/ulpevent.c 	struct sk_buff *skb;
skb              1122 net/sctp/ulpevent.c 	while ((skb = skb_dequeue(list)) != NULL) {
skb              1123 net/sctp/ulpevent.c 		struct sctp_ulpevent *event = sctp_skb2event(skb);
skb              1126 net/sctp/ulpevent.c 			data_unread += skb->len;
skb                59 net/sctp/ulpqueue.c 	struct sk_buff *skb;
skb                62 net/sctp/ulpqueue.c 	while ((skb = __skb_dequeue(&ulpq->lobby)) != NULL) {
skb                63 net/sctp/ulpqueue.c 		event = sctp_skb2event(skb);
skb                67 net/sctp/ulpqueue.c 	while ((skb = __skb_dequeue(&ulpq->reasm)) != NULL) {
skb                68 net/sctp/ulpqueue.c 		event = sctp_skb2event(skb);
skb                72 net/sctp/ulpqueue.c 	while ((skb = __skb_dequeue(&ulpq->reasm_uo)) != NULL) {
skb                73 net/sctp/ulpqueue.c 		event = sctp_skb2event(skb);
skb               148 net/sctp/ulpqueue.c 			struct sk_buff *skb, *tmp;
skb               151 net/sctp/ulpqueue.c 			sctp_skb_for_each(skb, &sp->pd_lobby, tmp) {
skb               152 net/sctp/ulpqueue.c 				event = sctp_skb2event(skb);
skb               154 net/sctp/ulpqueue.c 					__skb_unlink(skb, &sp->pd_lobby);
skb               156 net/sctp/ulpqueue.c 							 skb);
skb               188 net/sctp/ulpqueue.c 	struct sk_buff *skb;
skb               191 net/sctp/ulpqueue.c 	skb = __skb_peek(skb_list);
skb               192 net/sctp/ulpqueue.c 	event = sctp_skb2event(skb);
skb               203 net/sctp/ulpqueue.c 		sk_mark_napi_id(sk, skb);
skb               970 net/sctp/ulpqueue.c 	struct sk_buff *skb, *flist, *last;
skb               976 net/sctp/ulpqueue.c 	while ((skb = skb_peek_tail(list)) != NULL) {
skb               977 net/sctp/ulpqueue.c 		event = sctp_skb2event(skb);
skb               988 net/sctp/ulpqueue.c 		freed += skb_headlen(skb);
skb               989 net/sctp/ulpqueue.c 		flist = skb_shinfo(skb)->frag_list;
skb              1000 net/sctp/ulpqueue.c 		__skb_unlink(skb, list);
skb              1033 net/sctp/ulpqueue.c 	struct sk_buff *skb;
skb              1047 net/sctp/ulpqueue.c 	skb = skb_peek(&asoc->ulpq.reasm);
skb              1048 net/sctp/ulpqueue.c 	if (skb != NULL) {
skb              1049 net/sctp/ulpqueue.c 		ctsn = sctp_skb2event(skb)->tsn;
skb              1092 net/sctp/ulpqueue.c 	if (sk_rmem_schedule(asoc->base.sk, chunk->skb, needed) &&
skb                63 net/smc/smc_diag.c static int smc_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb,
skb                67 net/smc/smc_diag.c 	if (nla_put_u8(skb, SMC_DIAG_SHUTDOWN, sk->sk_shutdown))
skb                75 net/smc/smc_diag.c static int __smc_diag_dump(struct sock *sk, struct sk_buff *skb,
skb                86 net/smc/smc_diag.c 	nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
skb               100 net/smc/smc_diag.c 	user_ns = sk_user_ns(NETLINK_CB(cb->skb).sk);
skb               101 net/smc/smc_diag.c 	if (smc_diag_msg_attrs_fill(sk, skb, r, user_ns))
skb               106 net/smc/smc_diag.c 	if (nla_put(skb, SMC_DIAG_FALLBACK, sizeof(fallback), &fallback) < 0)
skb               145 net/smc/smc_diag.c 		if (nla_put(skb, SMC_DIAG_CONNINFO, sizeof(cinfo), &cinfo) < 0)
skb               166 net/smc/smc_diag.c 		if (nla_put(skb, SMC_DIAG_LGRINFO, sizeof(linfo), &linfo) < 0)
skb               181 net/smc/smc_diag.c 		if (nla_put(skb, SMC_DIAG_DMBINFO, sizeof(dinfo), &dinfo) < 0)
skb               185 net/smc/smc_diag.c 	nlmsg_end(skb, nlh);
skb               189 net/smc/smc_diag.c 	nlmsg_cancel(skb, nlh);
skb               193 net/smc/smc_diag.c static int smc_diag_dump_proto(struct proto *prot, struct sk_buff *skb,
skb               196 net/smc/smc_diag.c 	struct net *net = sock_net(skb->sk);
skb               210 net/smc/smc_diag.c 		rc = __smc_diag_dump(sk, skb, cb, nlmsg_data(cb->nlh), bc);
skb               220 net/smc/smc_diag.c static int smc_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
skb               224 net/smc/smc_diag.c 	rc = smc_diag_dump_proto(&smc_proto, skb, cb);
skb               226 net/smc/smc_diag.c 		rc = smc_diag_dump_proto(&smc_proto6, skb, cb);
skb               230 net/smc/smc_diag.c static int smc_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
skb               232 net/smc/smc_diag.c 	struct net *net = sock_net(skb->sk);
skb               241 net/smc/smc_diag.c 			return netlink_dump_start(net->diag_nlsk, skb, h, &c);
skb               415 net/smc/smc_pnet.c static int smc_pnet_add(struct sk_buff *skb, struct genl_info *info)
skb               435 net/smc/smc_pnet.c static int smc_pnet_del(struct sk_buff *skb, struct genl_info *info)
skb               451 net/smc/smc_pnet.c static int smc_pnet_dumpinfo(struct sk_buff *skb,
skb               457 net/smc/smc_pnet.c 	hdr = genlmsg_put(skb, portid, seq, &smc_pnet_nl_family,
skb               461 net/smc/smc_pnet.c 	if (smc_pnet_set_nla(skb, pnetelem) < 0) {
skb               462 net/smc/smc_pnet.c 		genlmsg_cancel(skb, hdr);
skb               465 net/smc/smc_pnet.c 	genlmsg_end(skb, hdr);
skb               469 net/smc/smc_pnet.c static int _smc_pnet_dump(struct net *net, struct sk_buff *skb, u32 portid,
skb               496 net/smc/smc_pnet.c 		if (smc_pnet_dumpinfo(skb, portid, seq, NLM_F_MULTI,
skb               525 net/smc/smc_pnet.c 				if (smc_pnet_dumpinfo(skb, portid, seq,
skb               548 net/smc/smc_pnet.c 			if (smc_pnet_dumpinfo(skb, portid, seq, NLM_F_MULTI,
skb               560 net/smc/smc_pnet.c static int smc_pnet_dump(struct sk_buff *skb, struct netlink_callback *cb)
skb               562 net/smc/smc_pnet.c 	struct net *net = sock_net(skb->sk);
skb               565 net/smc/smc_pnet.c 	idx = _smc_pnet_dump(net, skb, NETLINK_CB(cb->skb).portid,
skb               569 net/smc/smc_pnet.c 	return skb->len;
skb               573 net/smc/smc_pnet.c static int smc_pnet_get(struct sk_buff *skb, struct genl_info *info)
skb               601 net/smc/smc_pnet.c static int smc_pnet_flush(struct sk_buff *skb, struct genl_info *info)
skb               708 net/socket.c   static bool skb_is_err_queue(const struct sk_buff *skb)
skb               715 net/socket.c   	return skb->pkt_type == PACKET_OUTGOING;
skb               726 net/socket.c   static bool skb_is_swtx_tstamp(const struct sk_buff *skb, int false_tstamp)
skb               728 net/socket.c   	return skb->tstamp && !false_tstamp && skb_is_err_queue(skb);
skb               731 net/socket.c   static void put_ts_pktinfo(struct msghdr *msg, struct sk_buff *skb)
skb               736 net/socket.c   	if (!skb_mac_header_was_set(skb))
skb               742 net/socket.c   	orig_dev = dev_get_by_napi_id(skb_napi_id(skb));
skb               747 net/socket.c   	ts_pktinfo.pkt_length = skb->len - skb_mac_offset(skb);
skb               756 net/socket.c   	struct sk_buff *skb)
skb               764 net/socket.c   		skb_hwtstamps(skb);
skb               768 net/socket.c   	if (need_software_tstamp && skb->tstamp == 0) {
skb               769 net/socket.c   		__net_timestamp(skb);
skb               778 net/socket.c   				skb_get_new_timestamp(skb, &tv);
skb               784 net/socket.c   				skb_get_timestamp(skb, &tv);
skb               792 net/socket.c   				skb_get_new_timestampns(skb, &ts);
skb               798 net/socket.c   				skb_get_timestampns(skb, &ts);
skb               807 net/socket.c   	    ktime_to_timespec64_cond(skb->tstamp, tss.ts + 0))
skb               811 net/socket.c   	    !skb_is_swtx_tstamp(skb, false_tstamp) &&
skb               815 net/socket.c   		    !skb_is_err_queue(skb))
skb               816 net/socket.c   			put_ts_pktinfo(msg, skb);
skb               824 net/socket.c   		if (skb_is_err_queue(skb) && skb->len &&
skb               825 net/socket.c   		    SKB_EXT_ERR(skb)->opt_stats)
skb               827 net/socket.c   				 skb->len, skb->data);
skb               833 net/socket.c   	struct sk_buff *skb)
skb               839 net/socket.c   	if (!skb->wifi_acked_valid)
skb               842 net/socket.c   	ack = skb->wifi_acked;
skb               849 net/socket.c   				   struct sk_buff *skb)
skb               851 net/socket.c   	if (sock_flag(sk, SOCK_RXQ_OVFL) && skb && SOCK_SKB_CB(skb)->dropcount)
skb               853 net/socket.c   			sizeof(__u32), &SOCK_SKB_CB(skb)->dropcount);
skb               857 net/socket.c   	struct sk_buff *skb)
skb               859 net/socket.c   	sock_recv_timestamp(msg, sk, skb);
skb               860 net/socket.c   	sock_recv_drops(msg, sk, skb);
skb                38 net/strparser/strparser.c static inline struct _strp_msg *_strp_msg(struct sk_buff *skb)
skb                40 net/strparser/strparser.c 	return (struct _strp_msg *)((void *)skb->cb +
skb               103 net/strparser/strparser.c 	struct sk_buff *head, *skb;
skb               160 net/strparser/strparser.c 				skb = alloc_skb_for_msg(head);
skb               161 net/strparser/strparser.c 				if (!skb) {
skb               168 net/strparser/strparser.c 				strp->skb_head = skb;
skb               169 net/strparser/strparser.c 				head = skb;
skb               179 net/strparser/strparser.c 		skb = skb_clone(orig_skb, GFP_ATOMIC);
skb               180 net/strparser/strparser.c 		if (!skb) {
skb               190 net/strparser/strparser.c 			head = skb;
skb               201 net/strparser/strparser.c 			if (skb_has_frag_list(skb)) {
skb               202 net/strparser/strparser.c 				err = skb_unclone(skb, GFP_ATOMIC);
skb               211 net/strparser/strparser.c 			*strp->skb_nextp = skb;
skb               212 net/strparser/strparser.c 			strp->skb_nextp = &skb->next;
skb               213 net/strparser/strparser.c 			head->data_len += skb->len;
skb               214 net/strparser/strparser.c 			head->len += skb->len;
skb               215 net/strparser/strparser.c 			head->truesize += skb->truesize;
skb               249 net/strparser/strparser.c 					  skb->len - stm->strp.offset) {
skb                35 net/sunrpc/socklib.c 	if (unlikely(skb_copy_bits(desc->skb, desc->offset, to, len)))
skb                58 net/sunrpc/socklib.c 	csum2 = skb_copy_and_csum_bits(desc->skb, pos, to, len, 0);
skb               154 net/sunrpc/socklib.c int csum_partial_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb)
skb               158 net/sunrpc/socklib.c 	desc.skb = skb;
skb               160 net/sunrpc/socklib.c 	desc.count = skb->len - desc.offset;
skb               162 net/sunrpc/socklib.c 	if (skb_csum_unnecessary(skb))
skb               165 net/sunrpc/socklib.c 	desc.csum = csum_partial(skb->data, desc.offset, skb->csum);
skb               168 net/sunrpc/socklib.c 	if (desc.offset != skb->len) {
skb               170 net/sunrpc/socklib.c 		csum2 = skb_checksum(skb, desc.offset, skb->len - desc.offset, 0);
skb               177 net/sunrpc/socklib.c 	if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
skb               178 net/sunrpc/socklib.c 	    !skb->csum_complete_sw)
skb               179 net/sunrpc/socklib.c 		netdev_rx_csum_fault(skb->dev, skb);
skb               115 net/sunrpc/svcsock.c 	struct sk_buff *skb = rqstp->rq_xprt_ctxt;
skb               117 net/sunrpc/svcsock.c 	if (skb) {
skb               122 net/sunrpc/svcsock.c 		dprintk("svc: service %p, releasing skb %p\n", rqstp, skb);
skb               123 net/sunrpc/svcsock.c 		skb_free_datagram_locked(svsk->sk_sk, skb);
skb               129 net/sunrpc/svcsock.c 	struct sk_buff *skb = rqstp->rq_xprt_ctxt;
skb               131 net/sunrpc/svcsock.c 	if (skb) {
skb               134 net/sunrpc/svcsock.c 		dprintk("svc: service %p, releasing skb %p\n", rqstp, skb);
skb               135 net/sunrpc/svcsock.c 		consume_skb(skb);
skb               500 net/sunrpc/svcsock.c 	struct sk_buff	*skb;
skb               527 net/sunrpc/svcsock.c 	skb = NULL;
skb               531 net/sunrpc/svcsock.c 		skb = skb_recv_udp(svsk->sk_sk, 0, 1, &err);
skb               533 net/sunrpc/svcsock.c 	if (skb == NULL) {
skb               543 net/sunrpc/svcsock.c 	if (skb->tstamp == 0) {
skb               544 net/sunrpc/svcsock.c 		skb->tstamp = ktime_get_real();
skb               548 net/sunrpc/svcsock.c 	sock_write_timestamp(svsk->sk_sk, skb->tstamp);
skb               551 net/sunrpc/svcsock.c 	len  = skb->len;
skb               563 net/sunrpc/svcsock.c 	if (skb_is_nonlinear(skb)) {
skb               566 net/sunrpc/svcsock.c 		if (csum_partial_copy_to_xdr(&rqstp->rq_arg, skb)) {
skb               572 net/sunrpc/svcsock.c 		consume_skb(skb);
skb               575 net/sunrpc/svcsock.c 		rqstp->rq_arg.head[0].iov_base = skb->data;
skb               577 net/sunrpc/svcsock.c 		if (skb_checksum_complete(skb))
skb               579 net/sunrpc/svcsock.c 		rqstp->rq_xprt_ctxt = skb;
skb               599 net/sunrpc/svcsock.c 	kfree_skb(skb);
skb              1370 net/sunrpc/xprtsock.c 		struct sk_buff *skb)
skb              1378 net/sunrpc/xprtsock.c 	repsize = skb->len;
skb              1385 net/sunrpc/xprtsock.c 	xp = skb_header_pointer(skb, 0, sizeof(_xid), &_xid);
skb              1403 net/sunrpc/xprtsock.c 	if (csum_partial_copy_to_xdr(&rovr->rq_private_buf, skb)) {
skb              1424 net/sunrpc/xprtsock.c 	struct sk_buff *skb;
skb              1433 net/sunrpc/xprtsock.c 		skb = skb_recv_udp(sk, 0, 1, &err);
skb              1434 net/sunrpc/xprtsock.c 		if (skb == NULL)
skb              1436 net/sunrpc/xprtsock.c 		xs_udp_data_read_skb(&transport->xprt, sk, skb);
skb              1437 net/sunrpc/xprtsock.c 		consume_skb(skb);
skb               174 net/tipc/bcast.c 	struct sk_buff *skb, *_skb;
skb               193 net/tipc/bcast.c 		skb_queue_walk(xmitq, skb) {
skb               194 net/tipc/bcast.c 			_skb = pskb_copy_for_clone(skb, GFP_ATOMIC);
skb               310 net/tipc/bcast.c static int tipc_mcast_send_sync(struct net *net, struct sk_buff *skb,
skb               324 net/tipc/bcast.c 	hdr = buf_msg(skb);
skb               378 net/tipc/bcast.c 	struct sk_buff *skb;
skb               393 net/tipc/bcast.c 		skb = skb_peek(pkts);
skb               394 net/tipc/bcast.c 		hdr = buf_msg(skb);
skb               401 net/tipc/bcast.c 			rc = tipc_mcast_send_sync(net, skb, method, dests);
skb               429 net/tipc/bcast.c int tipc_bcast_rcv(struct net *net, struct tipc_link *l, struct sk_buff *skb)
skb               431 net/tipc/bcast.c 	struct tipc_msg *hdr = buf_msg(skb);
skb               439 net/tipc/bcast.c 		kfree_skb(skb);
skb               445 net/tipc/bcast.c 		rc = tipc_link_bc_nack_rcv(l, skb, &xmitq);
skb               447 net/tipc/bcast.c 		rc = tipc_link_rcv(l, skb, NULL);
skb               774 net/tipc/bcast.c 	struct sk_buff *skb, *_skb, *tmp;
skb               779 net/tipc/bcast.c 	skb = skb_peek(inputq);
skb               780 net/tipc/bcast.c 	if (!skb)
skb               783 net/tipc/bcast.c 	hdr = buf_msg(skb);
skb               809 net/tipc/bcast.c 		__skb_queue_tail(defq, skb);
skb               818 net/tipc/bcast.c 		__skb_queue_tail(defq, skb);
skb               825 net/tipc/bcast.c 		__skb_queue_tail(defq, skb);
skb               835 net/tipc/bcast.c 		kfree_skb(skb);
skb                92 net/tipc/bcast.h int tipc_bcast_rcv(struct net *net, struct tipc_link *l, struct sk_buff *skb);
skb                69 net/tipc/bearer.c static int tipc_l2_rcv_msg(struct sk_buff *skb, struct net_device *dev,
skb               243 net/tipc/bearer.c 	struct sk_buff *skb;
skb               319 net/tipc/bearer.c 	res = tipc_disc_create(net, b, &b->bcast_addr, &skb);
skb               327 net/tipc/bearer.c 	if (skb)
skb               328 net/tipc/bearer.c 		tipc_bearer_xmit_skb(net, bearer_id, skb, &b->bcast_addr);
skb               446 net/tipc/bearer.c int tipc_l2_send_msg(struct net *net, struct sk_buff *skb,
skb               456 net/tipc/bearer.c 	delta = SKB_DATA_ALIGN(dev->hard_header_len - skb_headroom(skb));
skb               457 net/tipc/bearer.c 	if ((delta > 0) && pskb_expand_head(skb, delta, 0, GFP_ATOMIC)) {
skb               458 net/tipc/bearer.c 		kfree_skb(skb);
skb               461 net/tipc/bearer.c 	skb_reset_network_header(skb);
skb               462 net/tipc/bearer.c 	skb->dev = dev;
skb               463 net/tipc/bearer.c 	skb->protocol = htons(ETH_P_TIPC);
skb               464 net/tipc/bearer.c 	dev_hard_header(skb, dev, ETH_P_TIPC, dest->value,
skb               465 net/tipc/bearer.c 			dev->dev_addr, skb->len);
skb               466 net/tipc/bearer.c 	dev_queue_xmit(skb);
skb               499 net/tipc/bearer.c 			  struct sk_buff *skb,
skb               502 net/tipc/bearer.c 	struct tipc_msg *hdr = buf_msg(skb);
skb               508 net/tipc/bearer.c 		b->media->send_msg(net, skb, b, dest);
skb               510 net/tipc/bearer.c 		kfree_skb(skb);
skb               521 net/tipc/bearer.c 	struct sk_buff *skb, *tmp;
skb               530 net/tipc/bearer.c 	skb_queue_walk_safe(xmitq, skb, tmp) {
skb               532 net/tipc/bearer.c 		if (likely(test_bit(0, &b->up) || msg_is_reset(buf_msg(skb))))
skb               533 net/tipc/bearer.c 			b->media->send_msg(net, skb, b, dst);
skb               535 net/tipc/bearer.c 			kfree_skb(skb);
skb               548 net/tipc/bearer.c 	struct sk_buff *skb, *tmp;
skb               555 net/tipc/bearer.c 	skb_queue_walk_safe(xmitq, skb, tmp) {
skb               556 net/tipc/bearer.c 		hdr = buf_msg(skb);
skb               560 net/tipc/bearer.c 		b->media->send_msg(net, skb, b, &b->bcast_addr);
skb               576 net/tipc/bearer.c static int tipc_l2_rcv_msg(struct sk_buff *skb, struct net_device *dev,
skb               585 net/tipc/bearer.c 		   (skb->pkt_type <= PACKET_MULTICAST))) {
skb               586 net/tipc/bearer.c 		skb_mark_not_on_list(skb);
skb               587 net/tipc/bearer.c 		tipc_rcv(dev_net(b->pt.dev), skb, b);
skb               592 net/tipc/bearer.c 	kfree_skb(skb);
skb               685 net/tipc/bearer.c 	struct sk_buff *skb, *_skb;
skb               689 net/tipc/bearer.c 		skb = pskb_copy(_skb, GFP_ATOMIC);
skb               690 net/tipc/bearer.c 		if (!skb)
skb               693 net/tipc/bearer.c 		exp = SKB_DATA_ALIGN(dev->hard_header_len - skb_headroom(skb));
skb               694 net/tipc/bearer.c 		if (exp > 0 && pskb_expand_head(skb, exp, 0, GFP_ATOMIC)) {
skb               695 net/tipc/bearer.c 			kfree_skb(skb);
skb               699 net/tipc/bearer.c 		skb_reset_network_header(skb);
skb               700 net/tipc/bearer.c 		dev_hard_header(skb, dev, ETH_P_TIPC, dev->dev_addr,
skb               701 net/tipc/bearer.c 				dev->dev_addr, skb->len);
skb               702 net/tipc/bearer.c 		skb->dev = dev;
skb               703 net/tipc/bearer.c 		skb->pkt_type = PACKET_HOST;
skb               704 net/tipc/bearer.c 		skb->ip_summed = CHECKSUM_UNNECESSARY;
skb               705 net/tipc/bearer.c 		skb->protocol = eth_type_trans(skb, dev);
skb               706 net/tipc/bearer.c 		netif_rx_ni(skb);
skb               710 net/tipc/bearer.c static int tipc_loopback_rcv_pkt(struct sk_buff *skb, struct net_device *dev,
skb               713 net/tipc/bearer.c 	consume_skb(skb);
skb               749 net/tipc/bearer.c 	hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
skb               754 net/tipc/bearer.c 	attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_BEARER);
skb               758 net/tipc/bearer.c 	if (nla_put_string(msg->skb, TIPC_NLA_BEARER_NAME, bearer->name))
skb               761 net/tipc/bearer.c 	prop = nla_nest_start_noflag(msg->skb, TIPC_NLA_BEARER_PROP);
skb               764 net/tipc/bearer.c 	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, bearer->priority))
skb               766 net/tipc/bearer.c 	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, bearer->tolerance))
skb               768 net/tipc/bearer.c 	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bearer->window))
skb               771 net/tipc/bearer.c 		if (nla_put_u32(msg->skb, TIPC_NLA_PROP_MTU, bearer->mtu))
skb               774 net/tipc/bearer.c 	nla_nest_end(msg->skb, prop);
skb               783 net/tipc/bearer.c 	nla_nest_end(msg->skb, attrs);
skb               784 net/tipc/bearer.c 	genlmsg_end(msg->skb, hdr);
skb               789 net/tipc/bearer.c 	nla_nest_cancel(msg->skb, prop);
skb               791 net/tipc/bearer.c 	nla_nest_cancel(msg->skb, attrs);
skb               793 net/tipc/bearer.c 	genlmsg_cancel(msg->skb, hdr);
skb               798 net/tipc/bearer.c int tipc_nl_bearer_dump(struct sk_buff *skb, struct netlink_callback *cb)
skb               804 net/tipc/bearer.c 	struct net *net = sock_net(skb->sk);
skb               810 net/tipc/bearer.c 	msg.skb = skb;
skb               811 net/tipc/bearer.c 	msg.portid = NETLINK_CB(cb->skb).portid;
skb               827 net/tipc/bearer.c 	return skb->len;
skb               830 net/tipc/bearer.c int tipc_nl_bearer_get(struct sk_buff *skb, struct genl_info *info)
skb               857 net/tipc/bearer.c 	msg.skb = rep;
skb               881 net/tipc/bearer.c int __tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info)
skb               887 net/tipc/bearer.c 	struct net *net = sock_net(skb->sk);
skb               912 net/tipc/bearer.c int tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info)
skb               917 net/tipc/bearer.c 	err = __tipc_nl_bearer_disable(skb, info);
skb               923 net/tipc/bearer.c int __tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info)
skb               928 net/tipc/bearer.c 	struct net *net = sock_net(skb->sk);
skb               966 net/tipc/bearer.c int tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info)
skb               971 net/tipc/bearer.c 	err = __tipc_nl_bearer_enable(skb, info);
skb               977 net/tipc/bearer.c int tipc_nl_bearer_add(struct sk_buff *skb, struct genl_info *info)
skb               983 net/tipc/bearer.c 	struct net *net = sock_net(skb->sk);
skb              1020 net/tipc/bearer.c int __tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info)
skb              1024 net/tipc/bearer.c 	struct net *net = sock_net(skb->sk);
skb              1077 net/tipc/bearer.c int tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info)
skb              1082 net/tipc/bearer.c 	err = __tipc_nl_bearer_set(skb, info);
skb              1095 net/tipc/bearer.c 	hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
skb              1100 net/tipc/bearer.c 	attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_MEDIA);
skb              1104 net/tipc/bearer.c 	if (nla_put_string(msg->skb, TIPC_NLA_MEDIA_NAME, media->name))
skb              1107 net/tipc/bearer.c 	prop = nla_nest_start_noflag(msg->skb, TIPC_NLA_MEDIA_PROP);
skb              1110 net/tipc/bearer.c 	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, media->priority))
skb              1112 net/tipc/bearer.c 	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, media->tolerance))
skb              1114 net/tipc/bearer.c 	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, media->window))
skb              1117 net/tipc/bearer.c 		if (nla_put_u32(msg->skb, TIPC_NLA_PROP_MTU, media->mtu))
skb              1120 net/tipc/bearer.c 	nla_nest_end(msg->skb, prop);
skb              1121 net/tipc/bearer.c 	nla_nest_end(msg->skb, attrs);
skb              1122 net/tipc/bearer.c 	genlmsg_end(msg->skb, hdr);
skb              1127 net/tipc/bearer.c 	nla_nest_cancel(msg->skb, prop);
skb              1129 net/tipc/bearer.c 	nla_nest_cancel(msg->skb, attrs);
skb              1131 net/tipc/bearer.c 	genlmsg_cancel(msg->skb, hdr);
skb              1136 net/tipc/bearer.c int tipc_nl_media_dump(struct sk_buff *skb, struct netlink_callback *cb)
skb              1145 net/tipc/bearer.c 	msg.skb = skb;
skb              1146 net/tipc/bearer.c 	msg.portid = NETLINK_CB(cb->skb).portid;
skb              1159 net/tipc/bearer.c 	return skb->len;
skb              1162 net/tipc/bearer.c int tipc_nl_media_get(struct sk_buff *skb, struct genl_info *info)
skb              1188 net/tipc/bearer.c 	msg.skb = rep;
skb              1212 net/tipc/bearer.c int __tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info)
skb              1263 net/tipc/bearer.c int tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info)
skb              1268 net/tipc/bearer.c 	err = __tipc_nl_media_set(skb, info);
skb               179 net/tipc/bearer.h void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b);
skb               193 net/tipc/bearer.h int tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info);
skb               194 net/tipc/bearer.h int __tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info);
skb               195 net/tipc/bearer.h int tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info);
skb               196 net/tipc/bearer.h int __tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info);
skb               197 net/tipc/bearer.h int tipc_nl_bearer_dump(struct sk_buff *skb, struct netlink_callback *cb);
skb               198 net/tipc/bearer.h int tipc_nl_bearer_get(struct sk_buff *skb, struct genl_info *info);
skb               199 net/tipc/bearer.h int tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info);
skb               200 net/tipc/bearer.h int __tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info);
skb               201 net/tipc/bearer.h int tipc_nl_bearer_add(struct sk_buff *skb, struct genl_info *info);
skb               203 net/tipc/bearer.h int tipc_nl_media_dump(struct sk_buff *skb, struct netlink_callback *cb);
skb               204 net/tipc/bearer.h int tipc_nl_media_get(struct sk_buff *skb, struct genl_info *info);
skb               205 net/tipc/bearer.h int tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info);
skb               206 net/tipc/bearer.h int __tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info);
skb               228 net/tipc/bearer.h 			  struct sk_buff *skb,
skb                49 net/tipc/diag.c static int __tipc_add_sock_diag(struct sk_buff *skb,
skb                57 net/tipc/diag.c 	nlh = nlmsg_put_answer(skb, cb, SOCK_DIAG_BY_FAMILY, 0,
skb                62 net/tipc/diag.c 	err = tipc_sk_fill_sock_diag(skb, cb, tsk, req->tidiag_states,
skb                67 net/tipc/diag.c 	nlmsg_end(skb, nlh);
skb                71 net/tipc/diag.c static int tipc_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
skb                73 net/tipc/diag.c 	return tipc_nl_sk_walk(skb, cb, __tipc_add_sock_diag);
skb                76 net/tipc/diag.c static int tipc_sock_diag_handler_dump(struct sk_buff *skb,
skb                80 net/tipc/diag.c 	struct net *net = sock_net(skb->sk);
skb                91 net/tipc/diag.c 		netlink_dump_start(net->diag_nlsk, skb, h, &c);
skb                69 net/tipc/discover.c 	struct sk_buff *skb;
skb                80 net/tipc/discover.c static void tipc_disc_init_msg(struct net *net, struct sk_buff *skb,
skb                87 net/tipc/discover.c 	hdr = buf_msg(skb);
skb               106 net/tipc/discover.c 	struct sk_buff *skb;
skb               108 net/tipc/discover.c 	skb = tipc_buf_acquire(MAX_H_SIZE + NODE_ID_LEN, GFP_ATOMIC);
skb               109 net/tipc/discover.c 	if (!skb)
skb               111 net/tipc/discover.c 	hdr = buf_msg(skb);
skb               112 net/tipc/discover.c 	tipc_disc_init_msg(net, skb, mtyp, b);
skb               115 net/tipc/discover.c 	tipc_bearer_xmit_skb(net, b->identity, skb, maddr);
skb               162 net/tipc/discover.c 		msg_set_prevnode(buf_msg(d->skb), sugg_addr);
skb               170 net/tipc/discover.c 		msg_set_prevnode(buf_msg(d->skb), tn->trial_addr);
skb               171 net/tipc/discover.c 		msg_set_type(buf_msg(d->skb), DSC_REQ_MSG);
skb               191 net/tipc/discover.c void tipc_disc_rcv(struct net *net, struct sk_buff *skb,
skb               195 net/tipc/discover.c 	struct tipc_msg *hdr = buf_msg(skb);
skb               211 net/tipc/discover.c 	skb_linearize(skb);
skb               212 net/tipc/discover.c 	hdr = buf_msg(skb);
skb               220 net/tipc/discover.c 	kfree_skb(skb);
skb               292 net/tipc/discover.c 	struct sk_buff *skb = NULL;
skb               321 net/tipc/discover.c 		msg_set_type(buf_msg(d->skb), DSC_REQ_MSG);
skb               322 net/tipc/discover.c 		msg_set_prevnode(buf_msg(d->skb), tn->trial_addr);
skb               327 net/tipc/discover.c 	skb = skb_clone(d->skb, GFP_ATOMIC);
skb               331 net/tipc/discover.c 	if (skb)
skb               332 net/tipc/discover.c 		tipc_bearer_xmit_skb(net, bearer_id, skb, &maddr);
skb               345 net/tipc/discover.c 		     struct tipc_media_addr *dest, struct sk_buff **skb)
skb               353 net/tipc/discover.c 	d->skb = tipc_buf_acquire(MAX_H_SIZE + NODE_ID_LEN, GFP_ATOMIC);
skb               354 net/tipc/discover.c 	if (!d->skb) {
skb               358 net/tipc/discover.c 	tipc_disc_init_msg(net, d->skb, DSC_REQ_MSG, b);
skb               363 net/tipc/discover.c 		msg_set_type(buf_msg(d->skb), DSC_TRIAL_MSG);
skb               375 net/tipc/discover.c 	*skb = skb_clone(d->skb, GFP_ATOMIC);
skb               386 net/tipc/discover.c 	kfree_skb(d->skb);
skb               400 net/tipc/discover.c 	struct sk_buff *skb;
skb               403 net/tipc/discover.c 	tipc_disc_init_msg(net, d->skb, DSC_REQ_MSG, b);
skb               411 net/tipc/discover.c 	skb = skb_clone(d->skb, GFP_ATOMIC);
skb               413 net/tipc/discover.c 	if (skb)
skb               414 net/tipc/discover.c 		tipc_bearer_xmit_skb(net, b->identity, skb, &maddr);
skb                43 net/tipc/discover.h 		     struct tipc_media_addr *dest, struct sk_buff **skb);
skb               465 net/tipc/group.c static void tipc_group_sort_msg(struct sk_buff *skb, struct sk_buff_head *defq)
skb               467 net/tipc/group.c 	struct tipc_msg *_hdr, *hdr = buf_msg(skb);
skb               478 net/tipc/group.c 			__skb_queue_before(defq, _skb, skb);
skb               484 net/tipc/group.c 	__skb_queue_tail(defq, skb);
skb               492 net/tipc/group.c 	struct sk_buff *skb = __skb_dequeue(inputq);
skb               500 net/tipc/group.c 	if (!skb)
skb               503 net/tipc/group.c 	hdr = buf_msg(skb);
skb               517 net/tipc/group.c 	TIPC_SKB_CB(skb)->orig_member = m->instance;
skb               519 net/tipc/group.c 	tipc_group_sort_msg(skb, defq);
skb               521 net/tipc/group.c 	while ((skb = skb_peek(defq))) {
skb               522 net/tipc/group.c 		hdr = buf_msg(skb);
skb               559 net/tipc/group.c 			__skb_queue_tail(inputq, skb);
skb               561 net/tipc/group.c 			kfree_skb(skb);
skb               578 net/tipc/group.c 	kfree_skb(skb);
skb               667 net/tipc/group.c 	struct sk_buff *skb;
skb               680 net/tipc/group.c 	skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_GRP_MEMBER_EVT,
skb               683 net/tipc/group.c 	if (!skb)
skb               686 net/tipc/group.c 	hdr = buf_msg(skb);
skb               692 net/tipc/group.c 	TIPC_SKB_CB(skb)->orig_member = m->instance;
skb               693 net/tipc/group.c 	__skb_queue_tail(inputq, skb);
skb               700 net/tipc/group.c 	struct sk_buff *skb;
skb               703 net/tipc/group.c 	skb = tipc_msg_create(GROUP_PROTOCOL, mtyp, INT_H_SIZE, 0,
skb               706 net/tipc/group.c 	if (!skb)
skb               714 net/tipc/group.c 	hdr = buf_msg(skb);
skb               731 net/tipc/group.c 	__skb_queue_tail(xmitq, skb);
skb               919 net/tipc/group.c int tipc_group_fill_sock_diag(struct tipc_group *grp, struct sk_buff *skb)
skb               921 net/tipc/group.c 	struct nlattr *group = nla_nest_start_noflag(skb, TIPC_NLA_SOCK_GROUP);
skb               926 net/tipc/group.c 	if (nla_put_u32(skb, TIPC_NLA_SOCK_GROUP_ID,
skb               928 net/tipc/group.c 	    nla_put_u32(skb, TIPC_NLA_SOCK_GROUP_INSTANCE,
skb               930 net/tipc/group.c 	    nla_put_u32(skb, TIPC_NLA_SOCK_GROUP_BC_SEND_NEXT,
skb               935 net/tipc/group.c 		if (nla_put_flag(skb, TIPC_NLA_SOCK_GROUP_NODE_SCOPE))
skb               939 net/tipc/group.c 		if (nla_put_flag(skb, TIPC_NLA_SOCK_GROUP_CLUSTER_SCOPE))
skb               943 net/tipc/group.c 		if (nla_put_flag(skb, TIPC_NLA_SOCK_GROUP_OPEN))
skb               946 net/tipc/group.c 	nla_nest_end(skb, group);
skb               950 net/tipc/group.c 	nla_nest_cancel(skb, group);
skb                75 net/tipc/group.h int tipc_group_fill_sock_diag(struct tipc_group *grp, struct sk_buff *skb);
skb               235 net/tipc/link.c static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
skb               380 net/tipc/link.c 	struct sk_buff *skb = skb_peek(&l->deferdq);
skb               385 net/tipc/link.c 	if (skb)
skb               386 net/tipc/link.c 		gap = buf_seqno(skb) - l->rcv_nxt;
skb               716 net/tipc/link.c 	struct sk_buff *skb;
skb               724 net/tipc/link.c 	skb = skb_peek(&l->transmq);
skb               725 net/tipc/link.c 	if (!skb)
skb               727 net/tipc/link.c 	msg = buf_msg(skb);
skb               831 net/tipc/link.c 	struct sk_buff *skb;
skb               834 net/tipc/link.c 	skb = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0,
skb               836 net/tipc/link.c 	if (!skb)
skb               838 net/tipc/link.c 	msg_set_dest_droppable(buf_msg(skb), true);
skb               839 net/tipc/link.c 	TIPC_SKB_CB(skb)->chain_imp = msg_importance(hdr);
skb               840 net/tipc/link.c 	skb_queue_tail(&l->wakeupq, skb);
skb               856 net/tipc/link.c 	struct sk_buff *skb, *tmp;
skb               866 net/tipc/link.c 	skb_queue_walk_safe(wakeupq, skb, tmp) {
skb               867 net/tipc/link.c 		imp = TIPC_SKB_CB(skb)->chain_imp;
skb               871 net/tipc/link.c 		__skb_unlink(skb, wakeupq);
skb               872 net/tipc/link.c 		__skb_queue_tail(&tmpq, skb);
skb               951 net/tipc/link.c 	struct sk_buff *skb, *_skb, **tskb;
skb               979 net/tipc/link.c 		skb = skb_peek(list);
skb               980 net/tipc/link.c 		hdr = buf_msg(skb);
skb               986 net/tipc/link.c 			_skb = skb_clone(skb, GFP_ATOMIC);
skb               992 net/tipc/link.c 			__skb_queue_tail(transmq, skb);
skb               995 net/tipc/link.c 				TIPC_SKB_CB(skb)->nxt_retr = TIPC_BC_RETR_LIM;
skb               997 net/tipc/link.c 			TIPC_SKB_CB(skb)->ackers = l->ackers;
skb              1028 net/tipc/link.c 	struct sk_buff *skb, *_skb;
skb              1036 net/tipc/link.c 		skb = skb_peek(&l->backlogq);
skb              1037 net/tipc/link.c 		if (!skb)
skb              1039 net/tipc/link.c 		_skb = skb_clone(skb, GFP_ATOMIC);
skb              1043 net/tipc/link.c 		hdr = buf_msg(skb);
skb              1046 net/tipc/link.c 		if (unlikely(skb == l->backlog[imp].target_bskb))
skb              1048 net/tipc/link.c 		__skb_queue_tail(&l->transmq, skb);
skb              1051 net/tipc/link.c 			TIPC_SKB_CB(skb)->nxt_retr = TIPC_BC_RETR_LIM;
skb              1054 net/tipc/link.c 		TIPC_SKB_CB(skb)->ackers = l->ackers;
skb              1077 net/tipc/link.c 	struct sk_buff *skb = skb_peek(&l->transmq);
skb              1080 net/tipc/link.c 	if (!skb)
skb              1083 net/tipc/link.c 	if (!TIPC_SKB_CB(skb)->retr_cnt)
skb              1086 net/tipc/link.c 	if (!time_after(jiffies, TIPC_SKB_CB(skb)->retr_stamp +
skb              1090 net/tipc/link.c 	hdr = buf_msg(skb);
skb              1101 net/tipc/link.c 		jiffies_to_msecs(TIPC_SKB_CB(skb)->retr_stamp),
skb              1102 net/tipc/link.c 		TIPC_SKB_CB(skb)->retr_cnt);
skb              1128 net/tipc/link.c 	struct sk_buff *_skb, *skb = skb_peek(&l->transmq);
skb              1134 net/tipc/link.c 	if (!skb)
skb              1144 net/tipc/link.c 	skb_queue_walk(&l->transmq, skb) {
skb              1145 net/tipc/link.c 		hdr = buf_msg(skb);
skb              1151 net/tipc/link.c 		if (time_before(jiffies, TIPC_SKB_CB(skb)->nxt_retr))
skb              1153 net/tipc/link.c 		TIPC_SKB_CB(skb)->nxt_retr = TIPC_BC_RETR_LIM;
skb              1154 net/tipc/link.c 		_skb = __pskb_copy(skb, LL_MAX_HEADER + MIN_H_SIZE, GFP_ATOMIC);
skb              1165 net/tipc/link.c 		if (!TIPC_SKB_CB(skb)->retr_cnt++)
skb              1166 net/tipc/link.c 			TIPC_SKB_CB(skb)->retr_stamp = jiffies;
skb              1176 net/tipc/link.c static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb,
skb              1180 net/tipc/link.c 	struct tipc_msg *hdr = buf_msg(skb);
skb              1188 net/tipc/link.c 			skb_queue_tail(mc_inputq, skb);
skb              1193 net/tipc/link.c 		skb_queue_tail(inputq, skb);
skb              1196 net/tipc/link.c 		skb_queue_tail(mc_inputq, skb);
skb              1200 net/tipc/link.c 		skb_queue_tail(l->namedq, skb);
skb              1209 net/tipc/link.c 		kfree_skb(skb);
skb              1218 net/tipc/link.c static int tipc_link_input(struct tipc_link *l, struct sk_buff *skb,
skb              1222 net/tipc/link.c 	struct tipc_msg *hdr = buf_msg(skb);
skb              1232 net/tipc/link.c 		while (tipc_msg_extract(skb, &iskb, &pos))
skb              1238 net/tipc/link.c 		if (tipc_buf_append(reasm_skb, &skb)) {
skb              1240 net/tipc/link.c 			tipc_data_input(l, skb, inputq);
skb              1252 net/tipc/link.c 	kfree_skb(skb);
skb              1263 net/tipc/link.c static int tipc_link_tnl_rcv(struct tipc_link *l, struct sk_buff *skb,
skb              1269 net/tipc/link.c 	struct tipc_msg *hdr = buf_msg(skb);
skb              1276 net/tipc/link.c 		kfree_skb(skb);
skb              1282 net/tipc/link.c 		if (unlikely(!tipc_msg_extract(skb, &iskb, &ipos))) {
skb              1287 net/tipc/link.c 		kfree_skb(skb);
skb              1297 net/tipc/link.c 		if (!tipc_buf_append(reasm_tnlmsg, &skb)) {
skb              1304 net/tipc/link.c 		iskb = skb;
skb              1331 net/tipc/link.c 	struct sk_buff *skb, *tmp;
skb              1333 net/tipc/link.c 	skb_queue_walk_safe(&l->transmq, skb, tmp) {
skb              1334 net/tipc/link.c 		if (more(buf_seqno(skb), acked))
skb              1336 net/tipc/link.c 		__skb_unlink(skb, &l->transmq);
skb              1337 net/tipc/link.c 		kfree_skb(skb);
skb              1351 net/tipc/link.c 	struct sk_buff *skb = skb_peek(&l->deferdq);
skb              1356 net/tipc/link.c 	if (!skb)
skb              1359 net/tipc/link.c 	expect = buf_seqno(skb);
skb              1360 net/tipc/link.c 	skb_queue_walk(&l->deferdq, skb) {
skb              1361 net/tipc/link.c 		seqno = buf_seqno(skb);
skb              1404 net/tipc/link.c 	struct sk_buff *skb, *_skb, *tmp;
skb              1412 net/tipc/link.c 	skb_queue_walk_safe(&l->transmq, skb, tmp) {
skb              1413 net/tipc/link.c 		seqno = buf_seqno(skb);
skb              1418 net/tipc/link.c 			__skb_unlink(skb, &l->transmq);
skb              1419 net/tipc/link.c 			kfree_skb(skb);
skb              1427 net/tipc/link.c 			if (time_before(jiffies, TIPC_SKB_CB(skb)->nxt_retr))
skb              1429 net/tipc/link.c 			TIPC_SKB_CB(skb)->nxt_retr = TIPC_UC_RETR_TIME;
skb              1430 net/tipc/link.c 			_skb = __pskb_copy(skb, LL_MAX_HEADER + MIN_H_SIZE,
skb              1442 net/tipc/link.c 			if (!TIPC_SKB_CB(skb)->retr_cnt++)
skb              1443 net/tipc/link.c 				TIPC_SKB_CB(skb)->retr_stamp = jiffies;
skb              1491 net/tipc/link.c 	struct sk_buff *skb;
skb              1499 net/tipc/link.c 	skb = skb_peek_tail(xmitq);
skb              1500 net/tipc/link.c 	if (skb && (l->state == LINK_RESET))
skb              1501 net/tipc/link.c 		msg_set_peer_stopping(buf_msg(skb), 1);
skb              1533 net/tipc/link.c int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
skb              1537 net/tipc/link.c 	struct tipc_msg *hdr = buf_msg(skb);
skb              1543 net/tipc/link.c 		return tipc_link_proto_rcv(l, skb, xmitq);
skb              1549 net/tipc/link.c 		hdr = buf_msg(skb);
skb              1575 net/tipc/link.c 			__tipc_skb_queue_sorted(defq, seqno, skb);
skb              1585 net/tipc/link.c 			rc |= tipc_link_tnl_rcv(l, skb, l->inputq);
skb              1586 net/tipc/link.c 		else if (!tipc_data_input(l, skb, l->inputq))
skb              1587 net/tipc/link.c 			rc |= tipc_link_input(l, skb, l->inputq, &l->reasm_buf);
skb              1592 net/tipc/link.c 	} while ((skb = __tipc_skb_dequeue(defq, l->rcv_nxt)));
skb              1596 net/tipc/link.c 	kfree_skb(skb);
skb              1606 net/tipc/link.c 	struct sk_buff *skb;
skb              1625 net/tipc/link.c 	skb = tipc_msg_create(LINK_PROTOCOL, mtyp, INT_H_SIZE,
skb              1628 net/tipc/link.c 	if (!skb)
skb              1631 net/tipc/link.c 	hdr = buf_msg(skb);
skb              1658 net/tipc/link.c 		skb_trim(skb, INT_H_SIZE + glen + dlen);
skb              1670 net/tipc/link.c 		skb_trim(skb, INT_H_SIZE + TIPC_MAX_IF_NAME);
skb              1676 net/tipc/link.c 	skb->priority = TC_PRIO_CONTROL;
skb              1677 net/tipc/link.c 	__skb_queue_tail(xmitq, skb);
skb              1678 net/tipc/link.c 	trace_tipc_proto_build(skb, false, l->name);
skb              1687 net/tipc/link.c 	struct sk_buff *skb;
skb              1691 net/tipc/link.c 	skb = tipc_msg_create(TUNNEL_PROTOCOL, FAILOVER_MSG,
skb              1694 net/tipc/link.c 	if (!skb) {
skb              1699 net/tipc/link.c 	hdr = buf_msg(skb);
skb              1707 net/tipc/link.c 	__skb_queue_tail(&tnlq, skb);
skb              1718 net/tipc/link.c 	struct sk_buff *skb, *tnlskb;
skb              1735 net/tipc/link.c 	skb = tipc_msg_create(TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG,
skb              1738 net/tipc/link.c 	if (!skb) {
skb              1742 net/tipc/link.c 	__skb_queue_tail(&tnlq, skb);
skb              1783 net/tipc/link.c 	skb_queue_walk(queue, skb) {
skb              1784 net/tipc/link.c 		hdr = buf_msg(skb);
skb              1800 net/tipc/link.c 				rc = tipc_msg_fragment(skb, &tnlhdr, tnl->mtu,
skb              1837 net/tipc/link.c 		skb_queue_walk(&tnlq, skb) {
skb              1838 net/tipc/link.c 			hdr = buf_msg(skb);
skb              1939 net/tipc/link.c static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
skb              1942 net/tipc/link.c 	struct tipc_msg *hdr = buf_msg(skb);
skb              1959 net/tipc/link.c 	trace_tipc_proto_rcv(skb, false, l->name);
skb              1966 net/tipc/link.c 	skb_linearize(skb);
skb              1967 net/tipc/link.c 	hdr = buf_msg(skb);
skb              1971 net/tipc/link.c 		trace_tipc_skb_dump(skb, false, "PROTO invalid (1)!");
skb              2083 net/tipc/link.c 	kfree_skb(skb);
skb              2093 net/tipc/link.c 	struct sk_buff *skb;
skb              2099 net/tipc/link.c 	skb = tipc_msg_create(BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE,
skb              2101 net/tipc/link.c 	if (!skb)
skb              2103 net/tipc/link.c 	hdr = buf_msg(skb);
skb              2111 net/tipc/link.c 	__skb_queue_tail(xmitq, skb);
skb              2228 net/tipc/link.c 	struct sk_buff *skb, *tmp;
skb              2239 net/tipc/link.c 	skb_queue_walk(&snd_l->transmq, skb) {
skb              2240 net/tipc/link.c 		if (more(buf_seqno(skb), l->acked))
skb              2245 net/tipc/link.c 	skb_queue_walk_from_safe(&snd_l->transmq, skb, tmp) {
skb              2246 net/tipc/link.c 		if (more(buf_seqno(skb), acked))
skb              2248 net/tipc/link.c 		if (!--TIPC_SKB_CB(skb)->ackers) {
skb              2249 net/tipc/link.c 			__skb_unlink(skb, &snd_l->transmq);
skb              2250 net/tipc/link.c 			kfree_skb(skb);
skb              2263 net/tipc/link.c int tipc_link_bc_nack_rcv(struct tipc_link *l, struct sk_buff *skb,
skb              2266 net/tipc/link.c 	struct tipc_msg *hdr = buf_msg(skb);
skb              2275 net/tipc/link.c 	kfree_skb(skb);
skb              2368 net/tipc/link.c static int __tipc_nl_add_stats(struct sk_buff *skb, struct tipc_stats *s)
skb              2416 net/tipc/link.c 	stats = nla_nest_start_noflag(skb, TIPC_NLA_LINK_STATS);
skb              2421 net/tipc/link.c 		if (nla_put_u32(skb, map[i].key, map[i].val))
skb              2424 net/tipc/link.c 	nla_nest_end(skb, stats);
skb              2428 net/tipc/link.c 	nla_nest_cancel(skb, stats);
skb              2443 net/tipc/link.c 	hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
skb              2448 net/tipc/link.c 	attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK);
skb              2452 net/tipc/link.c 	if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, link->name))
skb              2454 net/tipc/link.c 	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST, tipc_cluster_mask(self)))
skb              2456 net/tipc/link.c 	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->mtu))
skb              2458 net/tipc/link.c 	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->stats.recv_pkts))
skb              2460 net/tipc/link.c 	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->stats.sent_pkts))
skb              2464 net/tipc/link.c 		if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
skb              2467 net/tipc/link.c 		if (nla_put_flag(msg->skb, TIPC_NLA_LINK_ACTIVE))
skb              2470 net/tipc/link.c 	prop = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK_PROP);
skb              2473 net/tipc/link.c 	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
skb              2475 net/tipc/link.c 	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, link->tolerance))
skb              2477 net/tipc/link.c 	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN,
skb              2480 net/tipc/link.c 	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
skb              2482 net/tipc/link.c 	nla_nest_end(msg->skb, prop);
skb              2484 net/tipc/link.c 	err = __tipc_nl_add_stats(msg->skb, &link->stats);
skb              2488 net/tipc/link.c 	nla_nest_end(msg->skb, attrs);
skb              2489 net/tipc/link.c 	genlmsg_end(msg->skb, hdr);
skb              2494 net/tipc/link.c 	nla_nest_cancel(msg->skb, prop);
skb              2496 net/tipc/link.c 	nla_nest_cancel(msg->skb, attrs);
skb              2498 net/tipc/link.c 	genlmsg_cancel(msg->skb, hdr);
skb              2503 net/tipc/link.c static int __tipc_nl_add_bc_link_stat(struct sk_buff *skb,
skb              2537 net/tipc/link.c 	nest = nla_nest_start_noflag(skb, TIPC_NLA_LINK_STATS);
skb              2542 net/tipc/link.c 		if (nla_put_u32(skb, map[i].key, map[i].val))
skb              2545 net/tipc/link.c 	nla_nest_end(skb, nest);
skb              2549 net/tipc/link.c 	nla_nest_cancel(skb, nest);
skb              2570 net/tipc/link.c 	hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
skb              2577 net/tipc/link.c 	attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK);
skb              2582 net/tipc/link.c 	if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
skb              2585 net/tipc/link.c 	if (nla_put_flag(msg->skb, TIPC_NLA_LINK_BROADCAST))
skb              2587 net/tipc/link.c 	if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, bcl->name))
skb              2589 net/tipc/link.c 	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, 0))
skb              2591 net/tipc/link.c 	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, 0))
skb              2594 net/tipc/link.c 	prop = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK_PROP);
skb              2597 net/tipc/link.c 	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bcl->window))
skb              2599 net/tipc/link.c 	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_BROADCAST, bc_mode))
skb              2602 net/tipc/link.c 		if (nla_put_u32(msg->skb, TIPC_NLA_PROP_BROADCAST_RATIO,
skb              2605 net/tipc/link.c 	nla_nest_end(msg->skb, prop);
skb              2607 net/tipc/link.c 	err = __tipc_nl_add_bc_link_stat(msg->skb, &bcl->stats);
skb              2612 net/tipc/link.c 	nla_nest_end(msg->skb, attrs);
skb              2613 net/tipc/link.c 	genlmsg_end(msg->skb, hdr);
skb              2618 net/tipc/link.c 	nla_nest_cancel(msg->skb, prop);
skb              2620 net/tipc/link.c 	nla_nest_cancel(msg->skb, attrs);
skb              2623 net/tipc/link.c 	genlmsg_cancel(msg->skb, hdr);
skb               132 net/tipc/link.h int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
skb               151 net/tipc/link.h int tipc_link_bc_nack_rcv(struct tipc_link *l, struct sk_buff *skb,
skb               709 net/tipc/monitor.c 	hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
skb               714 net/tipc/monitor.c 	attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_MON_PEER);
skb               718 net/tipc/monitor.c 	if (nla_put_u32(msg->skb, TIPC_NLA_MON_PEER_ADDR, peer->addr))
skb               720 net/tipc/monitor.c 	if (nla_put_u32(msg->skb, TIPC_NLA_MON_PEER_APPLIED, peer->applied))
skb               724 net/tipc/monitor.c 		if (nla_put_flag(msg->skb, TIPC_NLA_MON_PEER_UP))
skb               727 net/tipc/monitor.c 		if (nla_put_flag(msg->skb, TIPC_NLA_MON_PEER_LOCAL))
skb               730 net/tipc/monitor.c 		if (nla_put_flag(msg->skb, TIPC_NLA_MON_PEER_HEAD))
skb               734 net/tipc/monitor.c 		if (nla_put_u32(msg->skb, TIPC_NLA_MON_PEER_DOMGEN, dom->gen))
skb               736 net/tipc/monitor.c 		if (nla_put_u64_64bit(msg->skb, TIPC_NLA_MON_PEER_UPMAP,
skb               739 net/tipc/monitor.c 		if (nla_put(msg->skb, TIPC_NLA_MON_PEER_MEMBERS,
skb               744 net/tipc/monitor.c 	nla_nest_end(msg->skb, attrs);
skb               745 net/tipc/monitor.c 	genlmsg_end(msg->skb, hdr);
skb               749 net/tipc/monitor.c 	nla_nest_cancel(msg->skb, attrs);
skb               751 net/tipc/monitor.c 	genlmsg_cancel(msg->skb, hdr);
skb               798 net/tipc/monitor.c 	hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
skb               803 net/tipc/monitor.c 	attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_MON);
skb               808 net/tipc/monitor.c 	if (nla_put_u32(msg->skb, TIPC_NLA_MON_REF, bearer_id))
skb               811 net/tipc/monitor.c 		if (nla_put_flag(msg->skb, TIPC_NLA_MON_ACTIVE))
skb               813 net/tipc/monitor.c 	if (nla_put_string(msg->skb, TIPC_NLA_MON_BEARER_NAME, bearer_name))
skb               815 net/tipc/monitor.c 	if (nla_put_u32(msg->skb, TIPC_NLA_MON_PEERCNT, mon->peer_cnt))
skb               817 net/tipc/monitor.c 	if (nla_put_u32(msg->skb, TIPC_NLA_MON_LISTGEN, mon->list_gen))
skb               821 net/tipc/monitor.c 	nla_nest_end(msg->skb, attrs);
skb               822 net/tipc/monitor.c 	genlmsg_end(msg->skb, hdr);
skb               828 net/tipc/monitor.c 	nla_nest_cancel(msg->skb, attrs);
skb               830 net/tipc/monitor.c 	genlmsg_cancel(msg->skb, hdr);
skb                63 net/tipc/msg.c 	struct sk_buff *skb;
skb                66 net/tipc/msg.c 	skb = alloc_skb_fclone(buf_size, gfp);
skb                67 net/tipc/msg.c 	if (skb) {
skb                68 net/tipc/msg.c 		skb_reserve(skb, BUF_HEADROOM);
skb                69 net/tipc/msg.c 		skb_put(skb, size);
skb                70 net/tipc/msg.c 		skb->next = NULL;
skb                72 net/tipc/msg.c 	return skb;
skb               206 net/tipc/msg.c 	struct sk_buff *skb = *_skb;
skb               211 net/tipc/msg.c 	if (unlikely(skb->truesize / buf_roundup_len(skb) >= 4)) {
skb               212 net/tipc/msg.c 		skb = skb_copy_expand(skb, BUF_HEADROOM, 0, GFP_ATOMIC);
skb               213 net/tipc/msg.c 		if (!skb)
skb               216 net/tipc/msg.c 		*_skb = skb;
skb               219 net/tipc/msg.c 	if (unlikely(TIPC_SKB_CB(skb)->validated))
skb               221 net/tipc/msg.c 	if (unlikely(!pskb_may_pull(skb, MIN_H_SIZE)))
skb               224 net/tipc/msg.c 	hsz = msg_hdr_sz(buf_msg(skb));
skb               227 net/tipc/msg.c 	if (unlikely(!pskb_may_pull(skb, hsz)))
skb               230 net/tipc/msg.c 	hdr = buf_msg(skb);
skb               239 net/tipc/msg.c 	if (unlikely(skb->len < msz))
skb               242 net/tipc/msg.c 	TIPC_SKB_CB(skb)->validated = true;
skb               257 net/tipc/msg.c int tipc_msg_fragment(struct sk_buff *skb, const struct tipc_msg *hdr,
skb               266 net/tipc/msg.c 	if (skb_linearize(skb))
skb               269 net/tipc/msg.c 	data = (u8 *)skb->data;
skb               270 net/tipc/msg.c 	dsz = msg_size(buf_msg(skb));
skb               325 net/tipc/msg.c 	struct sk_buff *skb;
skb               336 net/tipc/msg.c 		skb = tipc_buf_acquire(msz, GFP_KERNEL);
skb               339 net/tipc/msg.c 		if (unlikely(!skb)) {
skb               349 net/tipc/msg.c 		skb_orphan(skb);
skb               350 net/tipc/msg.c 		__skb_queue_tail(list, skb);
skb               351 net/tipc/msg.c 		skb_copy_to_linear_data(skb, mhdr, mhsz);
skb               352 net/tipc/msg.c 		pktpos = skb->data + mhsz;
skb               367 net/tipc/msg.c 	skb = tipc_buf_acquire(pktmax, GFP_KERNEL);
skb               368 net/tipc/msg.c 	if (!skb)
skb               370 net/tipc/msg.c 	skb_orphan(skb);
skb               371 net/tipc/msg.c 	__skb_queue_tail(list, skb);
skb               372 net/tipc/msg.c 	pktpos = skb->data;
skb               373 net/tipc/msg.c 	skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE);
skb               376 net/tipc/msg.c 	skb_copy_to_linear_data_offset(skb, INT_H_SIZE, mhdr, mhsz);
skb               398 net/tipc/msg.c 		skb = tipc_buf_acquire(pktsz, GFP_KERNEL);
skb               399 net/tipc/msg.c 		if (!skb) {
skb               403 net/tipc/msg.c 		skb_orphan(skb);
skb               404 net/tipc/msg.c 		__skb_queue_tail(list, skb);
skb               408 net/tipc/msg.c 		skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE);
skb               409 net/tipc/msg.c 		pktpos = skb->data + INT_H_SIZE;
skb               413 net/tipc/msg.c 	msg_set_type(buf_msg(skb), LAST_FRAGMENT);
skb               429 net/tipc/msg.c bool tipc_msg_bundle(struct sk_buff *skb, struct tipc_msg *msg, u32 mtu)
skb               439 net/tipc/msg.c 	if (!skb)
skb               441 net/tipc/msg.c 	bmsg = buf_msg(skb);
skb               452 net/tipc/msg.c 	if (unlikely(skb_tailroom(skb) < (pad + msz)))
skb               460 net/tipc/msg.c 	skb_put(skb, pad + msz);
skb               461 net/tipc/msg.c 	skb_copy_to_linear_data_offset(skb, start, msg, msz);
skb               476 net/tipc/msg.c bool tipc_msg_extract(struct sk_buff *skb, struct sk_buff **iskb, int *pos)
skb               482 net/tipc/msg.c 	if (unlikely(skb_linearize(skb)))
skb               485 net/tipc/msg.c 	hdr = buf_msg(skb);
skb               506 net/tipc/msg.c 	kfree_skb(skb);
skb               521 net/tipc/msg.c bool tipc_msg_make_bundle(struct sk_buff **skb,  struct tipc_msg *msg,
skb               551 net/tipc/msg.c 	*skb = _skb;
skb               563 net/tipc/msg.c bool tipc_msg_reverse(u32 own_node,  struct sk_buff **skb, int err)
skb               565 net/tipc/msg.c 	struct sk_buff *_skb = *skb;
skb               589 net/tipc/msg.c 	*skb = tipc_buf_acquire(hlen + dlen, GFP_ATOMIC);
skb               590 net/tipc/msg.c 	if (!*skb)
skb               592 net/tipc/msg.c 	memcpy((*skb)->data, _skb->data, msg_hdr_sz(_hdr));
skb               593 net/tipc/msg.c 	memcpy((*skb)->data + hlen, msg_data(_hdr), dlen);
skb               596 net/tipc/msg.c 	hdr = buf_msg(*skb);
skb               611 net/tipc/msg.c 	*skb = NULL;
skb               617 net/tipc/msg.c 	struct sk_buff *skb, *_skb;
skb               619 net/tipc/msg.c 	skb_queue_walk(msg, skb) {
skb               620 net/tipc/msg.c 		_skb = skb_clone(skb, GFP_ATOMIC);
skb               638 net/tipc/msg.c bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err)
skb               640 net/tipc/msg.c 	struct tipc_msg *msg = buf_msg(skb);
skb               651 net/tipc/msg.c 	if (skb_linearize(skb))
skb               653 net/tipc/msg.c 	msg = buf_msg(skb);
skb               668 net/tipc/msg.c 	if (!skb_cloned(skb))
skb               678 net/tipc/msg.c 	struct sk_buff *skb, *tmp = NULL;
skb               683 net/tipc/msg.c 	while ((skb = __skb_dequeue(list))) {
skb               684 net/tipc/msg.c 		skb->next = NULL;
skb               685 net/tipc/msg.c 		if (tipc_buf_append(&tmp, &skb)) {
skb               686 net/tipc/msg.c 			__skb_queue_tail(list, skb);
skb               703 net/tipc/msg.c 	struct sk_buff *skb, *_skb;
skb               710 net/tipc/msg.c 		skb = skb_peek(list);
skb               711 net/tipc/msg.c 		hdr_len = skb_headroom(skb) + msg_hdr_sz(buf_msg(skb));
skb               712 net/tipc/msg.c 		_skb = __pskb_copy(skb, hdr_len, GFP_ATOMIC);
skb               720 net/tipc/msg.c 	skb_queue_walk(list, skb) {
skb               721 net/tipc/msg.c 		frag = skb_clone(skb, GFP_ATOMIC);
skb               741 net/tipc/msg.c 	struct sk_buff *skb, *_skb;
skb               743 net/tipc/msg.c 	skb_queue_walk(msg, skb) {
skb               744 net/tipc/msg.c 		_skb = pskb_copy(skb, GFP_ATOMIC);
skb               761 net/tipc/msg.c 			     struct sk_buff *skb)
skb               766 net/tipc/msg.c 		__skb_queue_head(list, skb);
skb               771 net/tipc/msg.c 		__skb_queue_tail(list, skb);
skb               780 net/tipc/msg.c 		__skb_queue_before(list, _skb, skb);
skb               783 net/tipc/msg.c 	kfree_skb(skb);
skb               786 net/tipc/msg.c void tipc_skb_reject(struct net *net, int err, struct sk_buff *skb,
skb               789 net/tipc/msg.c 	if (tipc_msg_reverse(tipc_own_addr(net), &skb, err))
skb               790 net/tipc/msg.c 		__skb_queue_tail(xmitq, skb);
skb               153 net/tipc/msg.h static inline struct tipc_msg *buf_msg(struct sk_buff *skb)
skb               155 net/tipc/msg.h 	return (struct tipc_msg *)skb->data;
skb              1051 net/tipc/msg.h bool tipc_msg_reverse(u32 own_addr, struct sk_buff **skb, int err);
skb              1052 net/tipc/msg.h void tipc_skb_reject(struct net *net, int err, struct sk_buff *skb,
skb              1060 net/tipc/msg.h bool tipc_msg_bundle(struct sk_buff *skb, struct tipc_msg *msg, u32 mtu);
skb              1061 net/tipc/msg.h bool tipc_msg_make_bundle(struct sk_buff **skb, struct tipc_msg *msg,
skb              1063 net/tipc/msg.h bool tipc_msg_extract(struct sk_buff *skb, struct sk_buff **iskb, int *pos);
skb              1064 net/tipc/msg.h int tipc_msg_fragment(struct sk_buff *skb, const struct tipc_msg *hdr,
skb              1068 net/tipc/msg.h bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err);
skb              1074 net/tipc/msg.h 			     struct sk_buff *skb);
skb              1077 net/tipc/msg.h static inline u16 buf_seqno(struct sk_buff *skb)
skb              1079 net/tipc/msg.h 	return msg_seqno(buf_msg(skb));
skb              1082 net/tipc/msg.h static inline int buf_roundup_len(struct sk_buff *skb)
skb              1084 net/tipc/msg.h 	return (skb->len / 1024 + 1) * 1024;
skb              1094 net/tipc/msg.h 	struct sk_buff *skb;
skb              1097 net/tipc/msg.h 	skb = skb_peek(list);
skb              1098 net/tipc/msg.h 	if (skb)
skb              1099 net/tipc/msg.h 		skb_get(skb);
skb              1101 net/tipc/msg.h 	return skb;
skb              1114 net/tipc/msg.h 	struct sk_buff *skb;
skb              1119 net/tipc/msg.h 	skb_queue_walk(list, skb) {
skb              1120 net/tipc/msg.h 		dport = msg_destport(buf_msg(skb));
skb              1121 net/tipc/msg.h 		if (!filter || skb_queue_is_last(list, skb))
skb              1139 net/tipc/msg.h 	struct sk_buff *_skb, *tmp, *skb = NULL;
skb              1145 net/tipc/msg.h 			skb = _skb;
skb              1150 net/tipc/msg.h 	return skb;
skb              1194 net/tipc/msg.h 	struct sk_buff *skb = skb_peek(list);
skb              1196 net/tipc/msg.h 	if (skb && less_eq(buf_seqno(skb), seqno)) {
skb              1197 net/tipc/msg.h 		__skb_unlink(skb, list);
skb              1198 net/tipc/msg.h 		return skb;
skb                91 net/tipc/name_distr.c 	struct sk_buff *skb;
skb               100 net/tipc/name_distr.c 	skb = named_prepare_buf(net, PUBLICATION, ITEM_SIZE, 0);
skb               101 net/tipc/name_distr.c 	if (!skb) {
skb               106 net/tipc/name_distr.c 	item = (struct distr_item *)msg_data(buf_msg(skb));
skb               108 net/tipc/name_distr.c 	return skb;
skb               147 net/tipc/name_distr.c 	struct sk_buff *skb = NULL;
skb               155 net/tipc/name_distr.c 		if (!skb) {
skb               156 net/tipc/name_distr.c 			skb = named_prepare_buf(net, PUBLICATION, msg_rem,
skb               158 net/tipc/name_distr.c 			if (!skb) {
skb               162 net/tipc/name_distr.c 			msg_set_bc_ack_invalid(buf_msg(skb), true);
skb               163 net/tipc/name_distr.c 			item = (struct distr_item *)msg_data(buf_msg(skb));
skb               173 net/tipc/name_distr.c 			__skb_queue_tail(list, skb);
skb               174 net/tipc/name_distr.c 			skb = NULL;
skb               178 net/tipc/name_distr.c 	if (skb) {
skb               179 net/tipc/name_distr.c 		msg_set_size(buf_msg(skb), INT_H_SIZE + (msg_dsz - msg_rem));
skb               180 net/tipc/name_distr.c 		skb_trim(skb, INT_H_SIZE + (msg_dsz - msg_rem));
skb               181 net/tipc/name_distr.c 		__skb_queue_tail(list, skb);
skb               308 net/tipc/name_distr.c 	struct sk_buff *skb;
skb               312 net/tipc/name_distr.c 	for (skb = skb_dequeue(inputq); skb; skb = skb_dequeue(inputq)) {
skb               313 net/tipc/name_distr.c 		skb_linearize(skb);
skb               314 net/tipc/name_distr.c 		msg = buf_msg(skb);
skb               323 net/tipc/name_distr.c 		kfree_skb(skb);
skb               617 net/tipc/name_table.c 	struct sk_buff *skb = NULL;
skb               630 net/tipc/name_table.c 		skb = tipc_named_publish(net, p);
skb               635 net/tipc/name_table.c 	if (skb)
skb               636 net/tipc/name_table.c 		tipc_node_broadcast(net, skb);
skb               649 net/tipc/name_table.c 	struct sk_buff *skb = NULL;
skb               657 net/tipc/name_table.c 		skb = tipc_named_withdraw(net, p);
skb               666 net/tipc/name_table.c 	if (skb) {
skb               667 net/tipc/name_table.c 		tipc_node_broadcast(net, skb);
skb               826 net/tipc/name_table.c 		hdr = genlmsg_put(msg->skb, msg->portid, msg->seq,
skb               832 net/tipc/name_table.c 		attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_NAME_TABLE);
skb               836 net/tipc/name_table.c 		b = nla_nest_start_noflag(msg->skb, TIPC_NLA_NAME_TABLE_PUBL);
skb               840 net/tipc/name_table.c 		if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_TYPE, service->type))
skb               842 net/tipc/name_table.c 		if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_LOWER, sr->lower))
skb               844 net/tipc/name_table.c 		if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_UPPER, sr->upper))
skb               846 net/tipc/name_table.c 		if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_SCOPE, p->scope))
skb               848 net/tipc/name_table.c 		if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_NODE, p->node))
skb               850 net/tipc/name_table.c 		if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_REF, p->port))
skb               852 net/tipc/name_table.c 		if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_KEY, p->key))
skb               855 net/tipc/name_table.c 		nla_nest_end(msg->skb, b);
skb               856 net/tipc/name_table.c 		nla_nest_end(msg->skb, attrs);
skb               857 net/tipc/name_table.c 		genlmsg_end(msg->skb, hdr);
skb               864 net/tipc/name_table.c 	nla_nest_cancel(msg->skb, b);
skb               866 net/tipc/name_table.c 	nla_nest_cancel(msg->skb, attrs);
skb               868 net/tipc/name_table.c 	genlmsg_cancel(msg->skb, hdr);
skb               942 net/tipc/name_table.c int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb)
skb               944 net/tipc/name_table.c 	struct net *net = sock_net(skb->sk);
skb               955 net/tipc/name_table.c 	msg.skb = skb;
skb               956 net/tipc/name_table.c 	msg.portid = NETLINK_CB(cb->skb).portid;
skb               980 net/tipc/name_table.c 	return skb->len;
skb               107 net/tipc/name_table.h int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb);
skb               187 net/tipc/net.c 	hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
skb               192 net/tipc/net.c 	attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_NET);
skb               196 net/tipc/net.c 	if (nla_put_u32(msg->skb, TIPC_NLA_NET_ID, tn->net_id))
skb               198 net/tipc/net.c 	if (nla_put_u64_64bit(msg->skb, TIPC_NLA_NET_NODEID, *w0, 0))
skb               200 net/tipc/net.c 	if (nla_put_u64_64bit(msg->skb, TIPC_NLA_NET_NODEID_W1, *w1, 0))
skb               202 net/tipc/net.c 	nla_nest_end(msg->skb, attrs);
skb               203 net/tipc/net.c 	genlmsg_end(msg->skb, hdr);
skb               208 net/tipc/net.c 	nla_nest_cancel(msg->skb, attrs);
skb               210 net/tipc/net.c 	genlmsg_cancel(msg->skb, hdr);
skb               215 net/tipc/net.c int tipc_nl_net_dump(struct sk_buff *skb, struct netlink_callback *cb)
skb               217 net/tipc/net.c 	struct net *net = sock_net(skb->sk);
skb               225 net/tipc/net.c 	msg.skb = skb;
skb               226 net/tipc/net.c 	msg.portid = NETLINK_CB(cb->skb).portid;
skb               237 net/tipc/net.c 	return skb->len;
skb               240 net/tipc/net.c int __tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info)
skb               243 net/tipc/net.c 	struct net *net = sock_net(skb->sk);
skb               295 net/tipc/net.c int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info)
skb               300 net/tipc/net.c 	err = __tipc_nl_net_set(skb, info);
skb                47 net/tipc/net.h int tipc_nl_net_dump(struct sk_buff *skb, struct netlink_callback *cb);
skb                48 net/tipc/net.h int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info);
skb                49 net/tipc/net.h int __tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info);
skb                44 net/tipc/netlink.h 	struct sk_buff *skb;
skb                72 net/tipc/netlink_compat.c 	int (*doit)(struct sk_buff *skb, struct genl_info *info);
skb                74 net/tipc/netlink_compat.c 			 struct sk_buff *skb, struct tipc_nl_compat_msg *msg);
skb                77 net/tipc/netlink_compat.c static int tipc_skb_tailroom(struct sk_buff *skb)
skb                82 net/tipc/netlink_compat.c 	tailroom = skb_tailroom(skb);
skb                83 net/tipc/netlink_compat.c 	limit = TIPC_SKB_MAX - skb->len;
skb                96 net/tipc/netlink_compat.c static int tipc_add_tlv(struct sk_buff *skb, u16 type, void *data, u16 len)
skb                98 net/tipc/netlink_compat.c 	struct tlv_desc *tlv = (struct tlv_desc *)skb_tail_pointer(skb);
skb               100 net/tipc/netlink_compat.c 	if (tipc_skb_tailroom(skb) < TLV_SPACE(len))
skb               103 net/tipc/netlink_compat.c 	skb_put(skb, TLV_SPACE(len));
skb               112 net/tipc/netlink_compat.c static void tipc_tlv_init(struct sk_buff *skb, u16 type)
skb               114 net/tipc/netlink_compat.c 	struct tlv_desc *tlv = (struct tlv_desc *)skb->data;
skb               118 net/tipc/netlink_compat.c 	skb_put(skb, sizeof(struct tlv_desc));
skb               121 net/tipc/netlink_compat.c static int tipc_tlv_sprintf(struct sk_buff *skb, const char *fmt, ...)
skb               130 net/tipc/netlink_compat.c 	rem = tipc_skb_tailroom(skb);
skb               132 net/tipc/netlink_compat.c 	tlv = (struct tlv_desc *)skb->data;
skb               141 net/tipc/netlink_compat.c 	skb_put(skb, n);
skb               192 net/tipc/netlink_compat.c 	cb.skb = arg;
skb               395 net/tipc/netlink_compat.c 					struct sk_buff *skb,
skb               405 net/tipc/netlink_compat.c 	bearer = nla_nest_start_noflag(skb, TIPC_NLA_BEARER);
skb               418 net/tipc/netlink_compat.c 	if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, b->name))
skb               421 net/tipc/netlink_compat.c 	if (nla_put_u32(skb, TIPC_NLA_BEARER_DOMAIN, ntohl(b->disc_domain)))
skb               425 net/tipc/netlink_compat.c 		prop = nla_nest_start_noflag(skb, TIPC_NLA_BEARER_PROP);
skb               428 net/tipc/netlink_compat.c 		if (nla_put_u32(skb, TIPC_NLA_PROP_PRIO, ntohl(b->priority)))
skb               430 net/tipc/netlink_compat.c 		nla_nest_end(skb, prop);
skb               432 net/tipc/netlink_compat.c 	nla_nest_end(skb, bearer);
skb               438 net/tipc/netlink_compat.c 					 struct sk_buff *skb,
skb               447 net/tipc/netlink_compat.c 	bearer = nla_nest_start_noflag(skb, TIPC_NLA_BEARER);
skb               459 net/tipc/netlink_compat.c 	if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, name))
skb               462 net/tipc/netlink_compat.c 	nla_nest_end(skb, bearer);
skb               675 net/tipc/netlink_compat.c static int __tipc_add_link_prop(struct sk_buff *skb,
skb               681 net/tipc/netlink_compat.c 		return nla_put_u32(skb, TIPC_NLA_PROP_PRIO, ntohl(lc->value));
skb               683 net/tipc/netlink_compat.c 		return nla_put_u32(skb, TIPC_NLA_PROP_TOL, ntohl(lc->value));
skb               685 net/tipc/netlink_compat.c 		return nla_put_u32(skb, TIPC_NLA_PROP_WIN, ntohl(lc->value));
skb               691 net/tipc/netlink_compat.c static int tipc_nl_compat_media_set(struct sk_buff *skb,
skb               700 net/tipc/netlink_compat.c 	media = nla_nest_start_noflag(skb, TIPC_NLA_MEDIA);
skb               704 net/tipc/netlink_compat.c 	if (nla_put_string(skb, TIPC_NLA_MEDIA_NAME, lc->name))
skb               707 net/tipc/netlink_compat.c 	prop = nla_nest_start_noflag(skb, TIPC_NLA_MEDIA_PROP);
skb               711 net/tipc/netlink_compat.c 	__tipc_add_link_prop(skb, msg, lc);
skb               712 net/tipc/netlink_compat.c 	nla_nest_end(skb, prop);
skb               713 net/tipc/netlink_compat.c 	nla_nest_end(skb, media);
skb               718 net/tipc/netlink_compat.c static int tipc_nl_compat_bearer_set(struct sk_buff *skb,
skb               727 net/tipc/netlink_compat.c 	bearer = nla_nest_start_noflag(skb, TIPC_NLA_BEARER);
skb               731 net/tipc/netlink_compat.c 	if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, lc->name))
skb               734 net/tipc/netlink_compat.c 	prop = nla_nest_start_noflag(skb, TIPC_NLA_BEARER_PROP);
skb               738 net/tipc/netlink_compat.c 	__tipc_add_link_prop(skb, msg, lc);
skb               739 net/tipc/netlink_compat.c 	nla_nest_end(skb, prop);
skb               740 net/tipc/netlink_compat.c 	nla_nest_end(skb, bearer);
skb               745 net/tipc/netlink_compat.c static int __tipc_nl_compat_link_set(struct sk_buff *skb,
skb               754 net/tipc/netlink_compat.c 	link = nla_nest_start_noflag(skb, TIPC_NLA_LINK);
skb               758 net/tipc/netlink_compat.c 	if (nla_put_string(skb, TIPC_NLA_LINK_NAME, lc->name))
skb               761 net/tipc/netlink_compat.c 	prop = nla_nest_start_noflag(skb, TIPC_NLA_LINK_PROP);
skb               765 net/tipc/netlink_compat.c 	__tipc_add_link_prop(skb, msg, lc);
skb               766 net/tipc/netlink_compat.c 	nla_nest_end(skb, prop);
skb               767 net/tipc/netlink_compat.c 	nla_nest_end(skb, link);
skb               773 net/tipc/netlink_compat.c 				   struct sk_buff *skb,
skb               795 net/tipc/netlink_compat.c 		return tipc_nl_compat_media_set(skb, msg);
skb               801 net/tipc/netlink_compat.c 		return tipc_nl_compat_bearer_set(skb, msg);
skb               804 net/tipc/netlink_compat.c 	return __tipc_nl_compat_link_set(skb, msg);
skb               808 net/tipc/netlink_compat.c 					   struct sk_buff *skb,
skb               817 net/tipc/netlink_compat.c 	link = nla_nest_start_noflag(skb, TIPC_NLA_LINK);
skb               829 net/tipc/netlink_compat.c 	if (nla_put_string(skb, TIPC_NLA_LINK_NAME, name))
skb               832 net/tipc/netlink_compat.c 	nla_nest_end(skb, link);
skb              1105 net/tipc/netlink_compat.c 				  struct sk_buff *skb,
skb              1113 net/tipc/netlink_compat.c 	net = nla_nest_start_noflag(skb, TIPC_NLA_NET);
skb              1118 net/tipc/netlink_compat.c 		if (nla_put_u32(skb, TIPC_NLA_NET_ADDR, val))
skb              1121 net/tipc/netlink_compat.c 		if (nla_put_u32(skb, TIPC_NLA_NET_ID, val))
skb              1124 net/tipc/netlink_compat.c 	nla_nest_end(skb, net);
skb              1261 net/tipc/netlink_compat.c static int tipc_nl_compat_recv(struct sk_buff *skb, struct genl_info *info)
skb              1272 net/tipc/netlink_compat.c 	req_nlh = (struct nlmsghdr *)skb->data;
skb              1276 net/tipc/netlink_compat.c 	msg.dst_sk = skb->sk;
skb              1278 net/tipc/netlink_compat.c 	if ((msg.cmd & 0xC000) && (!netlink_net_capable(skb, CAP_NET_ADMIN))) {
skb              1305 net/tipc/netlink_compat.c 	genlmsg_unicast(msg.net, msg.rep, NETLINK_CB(skb).portid);
skb              1326 net/tipc/node.c 	struct sk_buff *skb;
skb              1348 net/tipc/node.c 		skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG,
skb              1352 net/tipc/node.c 		if (likely(skb))
skb              1353 net/tipc/node.c 			skb_queue_tail(inputq, skb);
skb              1399 net/tipc/node.c 	hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
skb              1404 net/tipc/node.c 	attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_NODE);
skb              1408 net/tipc/node.c 	if (nla_put_u32(msg->skb, TIPC_NLA_NODE_ADDR, node->addr))
skb              1411 net/tipc/node.c 		if (nla_put_flag(msg->skb, TIPC_NLA_NODE_UP))
skb              1414 net/tipc/node.c 	nla_nest_end(msg->skb, attrs);
skb              1415 net/tipc/node.c 	genlmsg_end(msg->skb, hdr);
skb              1420 net/tipc/node.c 	nla_nest_cancel(msg->skb, attrs);
skb              1422 net/tipc/node.c 	genlmsg_cancel(msg->skb, hdr);
skb              1490 net/tipc/node.c int tipc_node_xmit_skb(struct net *net, struct sk_buff *skb, u32 dnode,
skb              1496 net/tipc/node.c 	__skb_queue_tail(&head, skb);
skb              1506 net/tipc/node.c 	struct sk_buff *skb;
skb              1509 net/tipc/node.c 	while ((skb = __skb_dequeue(xmitq))) {
skb              1510 net/tipc/node.c 		selector = msg_origport(buf_msg(skb));
skb              1511 net/tipc/node.c 		dnode = msg_destnode(buf_msg(skb));
skb              1512 net/tipc/node.c 		tipc_node_xmit_skb(net, skb, dnode, selector);
skb              1517 net/tipc/node.c void tipc_node_broadcast(struct net *net, struct sk_buff *skb)
skb              1530 net/tipc/node.c 		txskb = pskb_copy(skb, GFP_ATOMIC);
skb              1538 net/tipc/node.c 	kfree_skb(skb);
skb              1590 net/tipc/node.c static void tipc_node_bc_rcv(struct net *net, struct sk_buff *skb, int bearer_id)
skb              1596 net/tipc/node.c 	struct tipc_msg *hdr = buf_msg(skb);
skb              1609 net/tipc/node.c 		kfree_skb(skb);
skb              1615 net/tipc/node.c 	rc = tipc_bcast_rcv(net, be->link, skb);
skb              1647 net/tipc/node.c static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
skb              1650 net/tipc/node.c 	struct tipc_msg *hdr = buf_msg(skb);
skb              1662 net/tipc/node.c 		trace_tipc_skb_dump(skb, false, "skb for node state check");
skb              1683 net/tipc/node.c 		trace_tipc_skb_dump(skb, false, "PROTO invalid (2)!");
skb              1800 net/tipc/node.c void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
skb              1814 net/tipc/node.c 	TIPC_SKB_CB(skb)->validated = false;
skb              1815 net/tipc/node.c 	if (unlikely(!tipc_msg_validate(&skb)))
skb              1817 net/tipc/node.c 	hdr = buf_msg(skb);
skb              1824 net/tipc/node.c 			return tipc_disc_rcv(net, skb, b);
skb              1826 net/tipc/node.c 			return tipc_node_bc_rcv(net, skb, bearer_id);
skb              1850 net/tipc/node.c 			rc = tipc_link_rcv(le->link, skb, &xmitq);
skb              1851 net/tipc/node.c 			skb = NULL;
skb              1858 net/tipc/node.c 	if (unlikely(skb)) {
skb              1859 net/tipc/node.c 		if (unlikely(skb_linearize(skb)))
skb              1862 net/tipc/node.c 		if (tipc_node_check_state(n, skb, bearer_id, &xmitq)) {
skb              1864 net/tipc/node.c 				rc = tipc_link_rcv(le->link, skb, &xmitq);
skb              1865 net/tipc/node.c 				skb = NULL;
skb              1891 net/tipc/node.c 	kfree_skb(skb);
skb              1924 net/tipc/node.c int tipc_nl_peer_rm(struct sk_buff *skb, struct genl_info *info)
skb              1926 net/tipc/node.c 	struct net *net = sock_net(skb->sk);
skb              1978 net/tipc/node.c int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb)
skb              1981 net/tipc/node.c 	struct net *net = sock_net(skb->sk);
skb              1991 net/tipc/node.c 	msg.skb = skb;
skb              1992 net/tipc/node.c 	msg.portid = NETLINK_CB(cb->skb).portid;
skb              2037 net/tipc/node.c 	return skb->len;
skb              2078 net/tipc/node.c int tipc_nl_node_set_link(struct sk_buff *skb, struct genl_info *info)
skb              2088 net/tipc/node.c 	struct net *net = sock_net(skb->sk);
skb              2157 net/tipc/node.c int tipc_nl_node_get_link(struct sk_buff *skb, struct genl_info *info)
skb              2182 net/tipc/node.c 	msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
skb              2183 net/tipc/node.c 	if (!msg.skb)
skb              2215 net/tipc/node.c 	return genlmsg_reply(msg.skb, info);
skb              2218 net/tipc/node.c 	nlmsg_free(msg.skb);
skb              2222 net/tipc/node.c int tipc_nl_node_reset_link_stats(struct sk_buff *skb, struct genl_info *info)
skb              2230 net/tipc/node.c 	struct net *net = sock_net(skb->sk);
skb              2296 net/tipc/node.c int tipc_nl_node_dump_link(struct sk_buff *skb, struct netlink_callback *cb)
skb              2298 net/tipc/node.c 	struct net *net = sock_net(skb->sk);
skb              2310 net/tipc/node.c 	msg.skb = skb;
skb              2311 net/tipc/node.c 	msg.portid = NETLINK_CB(cb->skb).portid;
skb              2364 net/tipc/node.c 	return skb->len;
skb              2367 net/tipc/node.c int tipc_nl_node_set_monitor(struct sk_buff *skb, struct genl_info *info)
skb              2370 net/tipc/node.c 	struct net *net = sock_net(skb->sk);
skb              2401 net/tipc/node.c 	hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
skb              2406 net/tipc/node.c 	attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_MON);
skb              2412 net/tipc/node.c 	if (nla_put_u32(msg->skb, TIPC_NLA_MON_ACTIVATION_THRESHOLD, val))
skb              2415 net/tipc/node.c 	nla_nest_end(msg->skb, attrs);
skb              2416 net/tipc/node.c 	genlmsg_end(msg->skb, hdr);
skb              2421 net/tipc/node.c 	nla_nest_cancel(msg->skb, attrs);
skb              2423 net/tipc/node.c 	genlmsg_cancel(msg->skb, hdr);
skb              2428 net/tipc/node.c int tipc_nl_node_get_monitor(struct sk_buff *skb, struct genl_info *info)
skb              2430 net/tipc/node.c 	struct net *net = sock_net(skb->sk);
skb              2434 net/tipc/node.c 	msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
skb              2435 net/tipc/node.c 	if (!msg.skb)
skb              2442 net/tipc/node.c 		nlmsg_free(msg.skb);
skb              2446 net/tipc/node.c 	return genlmsg_reply(msg.skb, info);
skb              2449 net/tipc/node.c int tipc_nl_node_dump_monitor(struct sk_buff *skb, struct netlink_callback *cb)
skb              2451 net/tipc/node.c 	struct net *net = sock_net(skb->sk);
skb              2460 net/tipc/node.c 	msg.skb = skb;
skb              2461 net/tipc/node.c 	msg.portid = NETLINK_CB(cb->skb).portid;
skb              2473 net/tipc/node.c 	return skb->len;
skb              2476 net/tipc/node.c int tipc_nl_node_dump_monitor_peer(struct sk_buff *skb,
skb              2479 net/tipc/node.c 	struct net *net = sock_net(skb->sk);
skb              2516 net/tipc/node.c 	msg.skb = skb;
skb              2517 net/tipc/node.c 	msg.portid = NETLINK_CB(cb->skb).portid;
skb              2530 net/tipc/node.c 	return skb->len;
skb                88 net/tipc/node.h int tipc_node_xmit_skb(struct net *net, struct sk_buff *skb, u32 dest,
skb                92 net/tipc/node.h void tipc_node_broadcast(struct net *net, struct sk_buff *skb);
skb                98 net/tipc/node.h int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb);
skb                99 net/tipc/node.h int tipc_nl_node_dump_link(struct sk_buff *skb, struct netlink_callback *cb);
skb               100 net/tipc/node.h int tipc_nl_node_reset_link_stats(struct sk_buff *skb, struct genl_info *info);
skb               101 net/tipc/node.h int tipc_nl_node_get_link(struct sk_buff *skb, struct genl_info *info);
skb               102 net/tipc/node.h int tipc_nl_node_set_link(struct sk_buff *skb, struct genl_info *info);
skb               103 net/tipc/node.h int tipc_nl_peer_rm(struct sk_buff *skb, struct genl_info *info);
skb               105 net/tipc/node.h int tipc_nl_node_set_monitor(struct sk_buff *skb, struct genl_info *info);
skb               106 net/tipc/node.h int tipc_nl_node_get_monitor(struct sk_buff *skb, struct genl_info *info);
skb               107 net/tipc/node.h int tipc_nl_node_dump_monitor(struct sk_buff *skb, struct netlink_callback *cb);
skb               108 net/tipc/node.h int tipc_nl_node_dump_monitor_peer(struct sk_buff *skb,
skb               122 net/tipc/socket.c static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb);
skb               243 net/tipc/socket.c static void tipc_sk_respond(struct sock *sk, struct sk_buff *skb, int err)
skb               249 net/tipc/socket.c 	if (!tipc_msg_reverse(onode, &skb, err))
skb               252 net/tipc/socket.c 	trace_tipc_sk_rej_msg(sk, skb, TIPC_DUMP_NONE, "@sk_respond!");
skb               253 net/tipc/socket.c 	dnode = msg_destnode(buf_msg(skb));
skb               254 net/tipc/socket.c 	selector = msg_origport(buf_msg(skb));
skb               255 net/tipc/socket.c 	tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector);
skb               265 net/tipc/socket.c 	struct sk_buff *skb;
skb               267 net/tipc/socket.c 	while ((skb = __skb_dequeue(&sk->sk_receive_queue)))
skb               268 net/tipc/socket.c 		tipc_sk_respond(sk, skb, TIPC_ERR_NO_PORT);
skb               509 net/tipc/socket.c 	struct sk_buff *skb;
skb               521 net/tipc/socket.c 	while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
skb               522 net/tipc/socket.c 		if (TIPC_SKB_CB(skb)->bytes_read) {
skb               523 net/tipc/socket.c 			kfree_skb(skb);
skb               531 net/tipc/socket.c 		tipc_sk_respond(sk, skb, error);
skb               538 net/tipc/socket.c 		skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
skb               542 net/tipc/socket.c 		if (skb)
skb               543 net/tipc/socket.c 			tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
skb              1136 net/tipc/socket.c 	struct sk_buff *skb, *_skb;
skb              1147 net/tipc/socket.c 	skb = tipc_skb_peek(arrvq, &inputq->lock);
skb              1148 net/tipc/socket.c 	for (; skb; skb = tipc_skb_peek(arrvq, &inputq->lock)) {
skb              1149 net/tipc/socket.c 		hdr = buf_msg(skb);
skb              1152 net/tipc/socket.c 		hlen = skb_headroom(skb) + msg_hdr_sz(hdr);
skb              1158 net/tipc/socket.c 			if (skb_peek(arrvq) == skb) {
skb              1160 net/tipc/socket.c 				__skb_queue_tail(inputq, skb);
skb              1162 net/tipc/socket.c 			kfree_skb(skb);
skb              1190 net/tipc/socket.c 			_skb = __pskb_copy(skb, hlen, GFP_ATOMIC);
skb              1200 net/tipc/socket.c 		if (skb_peek(arrvq) == skb) {
skb              1206 net/tipc/socket.c 		kfree_skb(skb);
skb              1216 net/tipc/socket.c static void tipc_sk_conn_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb,
skb              1220 net/tipc/socket.c 	struct tipc_msg *hdr = buf_msg(skb);
skb              1228 net/tipc/socket.c 		trace_tipc_sk_drop_msg(sk, skb, TIPC_DUMP_NONE, "@proto_rcv!");
skb              1245 net/tipc/socket.c 		__skb_queue_tail(inputq, skb);
skb              1253 net/tipc/socket.c 		if (tipc_msg_reverse(onode, &skb, TIPC_OK))
skb              1254 net/tipc/socket.c 			__skb_queue_tail(xmitq, skb);
skb              1267 net/tipc/socket.c 	kfree_skb(skb);
skb              1553 net/tipc/socket.c static void tipc_sk_set_orig_addr(struct msghdr *m, struct sk_buff *skb)
skb              1556 net/tipc/socket.c 	struct tipc_msg *hdr = buf_msg(skb);
skb              1577 net/tipc/socket.c 	srcaddr->member.addr.name.name.instance = TIPC_SKB_CB(skb)->orig_member;
skb              1592 net/tipc/socket.c static int tipc_sk_anc_data_recv(struct msghdr *m, struct sk_buff *skb,
skb              1604 net/tipc/socket.c 	msg = buf_msg(skb);
skb              1615 net/tipc/socket.c 			if (skb_linearize(skb))
skb              1617 net/tipc/socket.c 			msg = buf_msg(skb);
skb              1662 net/tipc/socket.c 	struct sk_buff *skb = NULL;
skb              1669 net/tipc/socket.c 	skb = tipc_msg_create(CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0,
skb              1672 net/tipc/socket.c 	if (!skb)
skb              1674 net/tipc/socket.c 	msg = buf_msg(skb);
skb              1683 net/tipc/socket.c 	tipc_node_xmit_skb(net, skb, dnode, msg_link_selector(msg));
skb              1747 net/tipc/socket.c 	struct sk_buff *skb;
skb              1767 net/tipc/socket.c 		skb = skb_peek(&sk->sk_receive_queue);
skb              1768 net/tipc/socket.c 		hdr = buf_msg(skb);
skb              1779 net/tipc/socket.c 	tipc_sk_set_orig_addr(m, skb);
skb              1780 net/tipc/socket.c 	rc = tipc_sk_anc_data_recv(m, skb, tsk);
skb              1783 net/tipc/socket.c 	hdr = buf_msg(skb);
skb              1790 net/tipc/socket.c 		rc = skb_copy_datagram_msg(skb, hlen, m, copy);
skb              1851 net/tipc/socket.c 	struct sk_buff *skb;
skb              1877 net/tipc/socket.c 		skb = skb_peek(&sk->sk_receive_queue);
skb              1878 net/tipc/socket.c 		skb_cb = TIPC_SKB_CB(skb);
skb              1879 net/tipc/socket.c 		hdr = buf_msg(skb);
skb              1892 net/tipc/socket.c 			tipc_sk_set_orig_addr(m, skb);
skb              1893 net/tipc/socket.c 			rc = tipc_sk_anc_data_recv(m, skb, tsk);
skb              1896 net/tipc/socket.c 			hdr = buf_msg(skb);
skb              1903 net/tipc/socket.c 			rc = skb_copy_datagram_msg(skb, hlen + offset, m, copy);
skb              1983 net/tipc/socket.c 	struct sk_buff *skb = __skb_dequeue(inputq);
skb              1985 net/tipc/socket.c 	struct tipc_msg *hdr = buf_msg(skb);
skb              1991 net/tipc/socket.c 		tipc_sk_conn_proto_rcv(tsk, skb, inputq, xmitq);
skb              2014 net/tipc/socket.c 	kfree_skb(skb);
skb              2023 net/tipc/socket.c static bool tipc_sk_filter_connect(struct tipc_sock *tsk, struct sk_buff *skb)
skb              2027 net/tipc/socket.c 	struct tipc_msg *hdr = buf_msg(skb);
skb              2122 net/tipc/socket.c static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *skb)
skb              2125 net/tipc/socket.c 	struct tipc_msg *hdr = buf_msg(skb);
skb              2150 net/tipc/socket.c static void tipc_sk_filter_rcv(struct sock *sk, struct sk_buff *skb,
skb              2156 net/tipc/socket.c 	struct tipc_msg *hdr = buf_msg(skb);
skb              2162 net/tipc/socket.c 	trace_tipc_sk_filter_rcv(sk, skb, TIPC_DUMP_ALL, " ");
skb              2163 net/tipc/socket.c 	TIPC_SKB_CB(skb)->bytes_read = 0;
skb              2165 net/tipc/socket.c 	__skb_queue_tail(&inputq, skb);
skb              2177 net/tipc/socket.c 	while ((skb = __skb_dequeue(&inputq))) {
skb              2178 net/tipc/socket.c 		hdr = buf_msg(skb);
skb              2179 net/tipc/socket.c 		limit = rcvbuf_limit(sk, skb);
skb              2180 net/tipc/socket.c 		if ((sk_conn && !tipc_sk_filter_connect(tsk, skb)) ||
skb              2184 net/tipc/socket.c 		else if (sk_rmem_alloc_get(sk) + skb->truesize >= limit) {
skb              2185 net/tipc/socket.c 			trace_tipc_sk_dump(sk, skb, TIPC_DUMP_ALL,
skb              2192 net/tipc/socket.c 			if (tipc_msg_reverse(tipc_own_addr(net), &skb, err)) {
skb              2193 net/tipc/socket.c 				trace_tipc_sk_rej_msg(sk, skb, TIPC_DUMP_NONE,
skb              2195 net/tipc/socket.c 				__skb_queue_tail(xmitq, skb);
skb              2200 net/tipc/socket.c 		__skb_queue_tail(&sk->sk_receive_queue, skb);
skb              2201 net/tipc/socket.c 		skb_set_owner_r(skb, sk);
skb              2202 net/tipc/socket.c 		trace_tipc_sk_overlimit2(sk, skb, TIPC_DUMP_ALL,
skb              2215 net/tipc/socket.c static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
skb              2223 net/tipc/socket.c 	tipc_sk_filter_rcv(sk, skb, &xmitq);
skb              2245 net/tipc/socket.c 	struct sk_buff *skb;
skb              2254 net/tipc/socket.c 		skb = tipc_skb_dequeue(inputq, dport);
skb              2255 net/tipc/socket.c 		if (unlikely(!skb))
skb              2260 net/tipc/socket.c 			tipc_sk_filter_rcv(sk, skb, xmitq);
skb              2268 net/tipc/socket.c 		lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt);
skb              2269 net/tipc/socket.c 		if (likely(!sk_add_backlog(sk, skb, lim))) {
skb              2270 net/tipc/socket.c 			trace_tipc_sk_overlimit1(sk, skb, TIPC_DUMP_ALL,
skb              2275 net/tipc/socket.c 		trace_tipc_sk_dump(sk, skb, TIPC_DUMP_ALL, "err_overload!");
skb              2279 net/tipc/socket.c 		if (tipc_msg_reverse(onode, &skb, TIPC_ERR_OVERLOAD)) {
skb              2280 net/tipc/socket.c 			trace_tipc_sk_rej_msg(sk, skb, TIPC_DUMP_ALL,
skb              2282 net/tipc/socket.c 			__skb_queue_tail(xmitq, skb);
skb              2301 net/tipc/socket.c 	struct sk_buff *skb;
skb              2320 net/tipc/socket.c 		skb = tipc_skb_dequeue(inputq, dport);
skb              2321 net/tipc/socket.c 		if (!skb)
skb              2326 net/tipc/socket.c 		if (tipc_msg_lookup_dest(net, skb, &err))
skb              2330 net/tipc/socket.c 		if (!tipc_msg_reverse(tipc_own_addr(net), &skb, err))
skb              2333 net/tipc/socket.c 		trace_tipc_sk_rej_msg(NULL, skb, TIPC_DUMP_NONE, "@sk_rcv!");
skb              2335 net/tipc/socket.c 		dnode = msg_destnode(buf_msg(skb));
skb              2336 net/tipc/socket.c 		tipc_node_xmit_skb(net, skb, dnode, dport);
skb              2644 net/tipc/socket.c 	struct sk_buff *skb;
skb              2654 net/tipc/socket.c 	skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE, INT_H_SIZE, 0,
skb              2656 net/tipc/socket.c 	if (skb)
skb              2657 net/tipc/socket.c 		__skb_queue_tail(list, skb);
skb              3273 net/tipc/socket.c static int __tipc_nl_add_sk_con(struct sk_buff *skb, struct tipc_sock *tsk)
skb              3282 net/tipc/socket.c 	nest = nla_nest_start_noflag(skb, TIPC_NLA_SOCK_CON);
skb              3286 net/tipc/socket.c 	if (nla_put_u32(skb, TIPC_NLA_CON_NODE, peer_node))
skb              3288 net/tipc/socket.c 	if (nla_put_u32(skb, TIPC_NLA_CON_SOCK, peer_port))
skb              3292 net/tipc/socket.c 		if (nla_put_flag(skb, TIPC_NLA_CON_FLAG))
skb              3294 net/tipc/socket.c 		if (nla_put_u32(skb, TIPC_NLA_CON_TYPE, tsk->conn_type))
skb              3296 net/tipc/socket.c 		if (nla_put_u32(skb, TIPC_NLA_CON_INST, tsk->conn_instance))
skb              3299 net/tipc/socket.c 	nla_nest_end(skb, nest);
skb              3304 net/tipc/socket.c 	nla_nest_cancel(skb, nest);
skb              3309 net/tipc/socket.c static int __tipc_nl_add_sk_info(struct sk_buff *skb, struct tipc_sock
skb              3312 net/tipc/socket.c 	struct net *net = sock_net(skb->sk);
skb              3315 net/tipc/socket.c 	if (nla_put_u32(skb, TIPC_NLA_SOCK_REF, tsk->portid) ||
skb              3316 net/tipc/socket.c 	    nla_put_u32(skb, TIPC_NLA_SOCK_ADDR, tipc_own_addr(net)))
skb              3320 net/tipc/socket.c 		if (__tipc_nl_add_sk_con(skb, tsk))
skb              3323 net/tipc/socket.c 		if (nla_put_flag(skb, TIPC_NLA_SOCK_HAS_PUBL))
skb              3330 net/tipc/socket.c static int __tipc_nl_add_sk(struct sk_buff *skb, struct netlink_callback *cb,
skb              3336 net/tipc/socket.c 	hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
skb              3341 net/tipc/socket.c 	attrs = nla_nest_start_noflag(skb, TIPC_NLA_SOCK);
skb              3345 net/tipc/socket.c 	if (__tipc_nl_add_sk_info(skb, tsk))
skb              3348 net/tipc/socket.c 	nla_nest_end(skb, attrs);
skb              3349 net/tipc/socket.c 	genlmsg_end(skb, hdr);
skb              3354 net/tipc/socket.c 	nla_nest_cancel(skb, attrs);
skb              3356 net/tipc/socket.c 	genlmsg_cancel(skb, hdr);
skb              3361 net/tipc/socket.c int tipc_nl_sk_walk(struct sk_buff *skb, struct netlink_callback *cb,
skb              3362 net/tipc/socket.c 		    int (*skb_handler)(struct sk_buff *skb,
skb              3384 net/tipc/socket.c 		err = skb_handler(skb, cb, tsk);
skb              3396 net/tipc/socket.c 	return skb->len;
skb              3402 net/tipc/socket.c 	return __tipc_dump_start(cb, sock_net(cb->skb->sk));
skb              3434 net/tipc/socket.c int tipc_sk_fill_sock_diag(struct sk_buff *skb, struct netlink_callback *cb,
skb              3446 net/tipc/socket.c 	attrs = nla_nest_start_noflag(skb, TIPC_NLA_SOCK);
skb              3450 net/tipc/socket.c 	if (__tipc_nl_add_sk_info(skb, tsk))
skb              3453 net/tipc/socket.c 	if (nla_put_u32(skb, TIPC_NLA_SOCK_TYPE, (u32)sk->sk_type) ||
skb              3454 net/tipc/socket.c 	    nla_put_u32(skb, TIPC_NLA_SOCK_TIPC_STATE, (u32)sk->sk_state) ||
skb              3455 net/tipc/socket.c 	    nla_put_u32(skb, TIPC_NLA_SOCK_INO, sock_i_ino(sk)) ||
skb              3456 net/tipc/socket.c 	    nla_put_u32(skb, TIPC_NLA_SOCK_UID,
skb              3457 net/tipc/socket.c 			from_kuid_munged(sk_user_ns(NETLINK_CB(cb->skb).sk),
skb              3459 net/tipc/socket.c 	    nla_put_u64_64bit(skb, TIPC_NLA_SOCK_COOKIE,
skb              3464 net/tipc/socket.c 	stat = nla_nest_start_noflag(skb, TIPC_NLA_SOCK_STAT);
skb              3468 net/tipc/socket.c 	if (nla_put_u32(skb, TIPC_NLA_SOCK_STAT_RCVQ,
skb              3470 net/tipc/socket.c 	    nla_put_u32(skb, TIPC_NLA_SOCK_STAT_SENDQ,
skb              3472 net/tipc/socket.c 	    nla_put_u32(skb, TIPC_NLA_SOCK_STAT_DROP,
skb              3477 net/tipc/socket.c 	    nla_put_flag(skb, TIPC_NLA_SOCK_STAT_LINK_CONG))
skb              3481 net/tipc/socket.c 	    nla_put_flag(skb, TIPC_NLA_SOCK_STAT_CONN_CONG))
skb              3484 net/tipc/socket.c 	nla_nest_end(skb, stat);
skb              3487 net/tipc/socket.c 		if (tipc_group_fill_sock_diag(tsk->group, skb))
skb              3490 net/tipc/socket.c 	nla_nest_end(skb, attrs);
skb              3495 net/tipc/socket.c 	nla_nest_cancel(skb, stat);
skb              3497 net/tipc/socket.c 	nla_nest_cancel(skb, attrs);
skb              3503 net/tipc/socket.c int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb)
skb              3505 net/tipc/socket.c 	return tipc_nl_sk_walk(skb, cb, __tipc_nl_add_sk);
skb              3509 net/tipc/socket.c static int __tipc_nl_add_sk_publ(struct sk_buff *skb,
skb              3516 net/tipc/socket.c 	hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
skb              3521 net/tipc/socket.c 	attrs = nla_nest_start_noflag(skb, TIPC_NLA_PUBL);
skb              3525 net/tipc/socket.c 	if (nla_put_u32(skb, TIPC_NLA_PUBL_KEY, publ->key))
skb              3527 net/tipc/socket.c 	if (nla_put_u32(skb, TIPC_NLA_PUBL_TYPE, publ->type))
skb              3529 net/tipc/socket.c 	if (nla_put_u32(skb, TIPC_NLA_PUBL_LOWER, publ->lower))
skb              3531 net/tipc/socket.c 	if (nla_put_u32(skb, TIPC_NLA_PUBL_UPPER, publ->upper))
skb              3534 net/tipc/socket.c 	nla_nest_end(skb, attrs);
skb              3535 net/tipc/socket.c 	genlmsg_end(skb, hdr);
skb              3540 net/tipc/socket.c 	nla_nest_cancel(skb, attrs);
skb              3542 net/tipc/socket.c 	genlmsg_cancel(skb, hdr);
skb              3548 net/tipc/socket.c static int __tipc_nl_list_sk_publ(struct sk_buff *skb,
skb              3577 net/tipc/socket.c 		err = __tipc_nl_add_sk_publ(skb, cb, p);
skb              3588 net/tipc/socket.c int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb)
skb              3594 net/tipc/socket.c 	struct net *net = sock_net(skb->sk);
skb              3628 net/tipc/socket.c 	err = __tipc_nl_list_sk_publ(skb, cb, tsk, &last_publ);
skb              3638 net/tipc/socket.c 	return skb->len;
skb              3715 net/tipc/socket.c bool tipc_sk_overlimit1(struct sock *sk, struct sk_buff *skb)
skb              3718 net/tipc/socket.c 	unsigned int lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt);
skb              3733 net/tipc/socket.c bool tipc_sk_overlimit2(struct sock *sk, struct sk_buff *skb)
skb              3735 net/tipc/socket.c 	unsigned int lim = rcvbuf_limit(sk, skb);
skb                62 net/tipc/socket.h int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb);
skb                63 net/tipc/socket.h int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb);
skb                64 net/tipc/socket.h int tipc_sk_fill_sock_diag(struct sk_buff *skb, struct netlink_callback *cb,
skb                67 net/tipc/socket.h int tipc_nl_sk_walk(struct sk_buff *skb, struct netlink_callback *cb,
skb                68 net/tipc/socket.h 		    int (*skb_handler)(struct sk_buff *skb,
skb                75 net/tipc/socket.h bool tipc_sk_overlimit1(struct sock *sk, struct sk_buff *skb);
skb                76 net/tipc/socket.h bool tipc_sk_overlimit2(struct sock *sk, struct sk_buff *skb);
skb               603 net/tipc/topsrv.c 	struct sk_buff *skb;
skb               605 net/tipc/topsrv.c 	skb = tipc_msg_create(TOP_SRV, 0, INT_H_SIZE, sizeof(*evt),
skb               607 net/tipc/topsrv.c 	if (!skb)
skb               609 net/tipc/topsrv.c 	msg_set_dest_droppable(buf_msg(skb), true);
skb               610 net/tipc/topsrv.c 	memcpy(msg_data(buf_msg(skb)), evt, sizeof(*evt));
skb               612 net/tipc/topsrv.c 	__skb_queue_tail(&evtq, skb);
skb                53 net/tipc/trace.c int tipc_skb_dump(struct sk_buff *skb, bool more, char *buf)
skb                60 net/tipc/trace.c 	if (!skb) {
skb                65 net/tipc/trace.c 	hdr = buf_msg(skb);
skb                66 net/tipc/trace.c 	skbcb = TIPC_SKB_CB(skb);
skb               133 net/tipc/trace.c 		       (skb->dev) ? skb->dev->name : "n/a");
skb               134 net/tipc/trace.c 	i += scnprintf(buf + i, sz - i, " %u", skb->len);
skb               135 net/tipc/trace.c 	i += scnprintf(buf + i, sz - i, " %u", skb->data_len);
skb               136 net/tipc/trace.c 	i += scnprintf(buf + i, sz - i, " %u", skb->hdr_len);
skb               137 net/tipc/trace.c 	i += scnprintf(buf + i, sz - i, " %u", skb->truesize);
skb               138 net/tipc/trace.c 	i += scnprintf(buf + i, sz - i, " %u", skb_cloned(skb));
skb               139 net/tipc/trace.c 	i += scnprintf(buf + i, sz - i, " %p", skb->sk);
skb               140 net/tipc/trace.c 	i += scnprintf(buf + i, sz - i, " %u", skb_shinfo(skb)->nr_frags);
skb               142 net/tipc/trace.c 		       ktime_to_ms(skb_get_ktime(skb)));
skb               144 net/tipc/trace.c 		       ktime_to_ms(skb_hwtstamps(skb)->hwtstamp));
skb               171 net/tipc/trace.c 	struct sk_buff *hskb, *tskb, *skb, *tmp;
skb               195 net/tipc/trace.c 		skb_queue_walk_safe(list, skb, tmp) {
skb               202 net/tipc/trace.c 			i += tipc_skb_dump(skb, false, buf + i);
skb               129 net/tipc/trace.h int tipc_skb_dump(struct sk_buff *skb, bool more, char *buf);
skb               138 net/tipc/trace.h 	TP_PROTO(struct sk_buff *skb, bool more, const char *header),
skb               140 net/tipc/trace.h 	TP_ARGS(skb, more, header),
skb               149 net/tipc/trace.h 		tipc_skb_dump(skb, more, __get_str(buf));
skb               157 net/tipc/trace.h 	TP_PROTO(struct sk_buff *skb, bool more, const char *header), \
skb               158 net/tipc/trace.h 	TP_ARGS(skb, more, header))
skb               190 net/tipc/trace.h 	TP_PROTO(struct sock *sk, struct sk_buff *skb, u16 dqueues,
skb               193 net/tipc/trace.h 	TP_ARGS(sk, skb, dqueues, header),
skb               199 net/tipc/trace.h 		__dynamic_array(char, skb_buf, (skb) ? SKB_LMIN : 1)
skb               206 net/tipc/trace.h 		if (skb)
skb               207 net/tipc/trace.h 			tipc_skb_dump(skb, false, __get_str(skb_buf));
skb               218 net/tipc/trace.h 	TP_PROTO(struct sock *sk, struct sk_buff *skb, u16 dqueues, \
skb               220 net/tipc/trace.h 	TP_ARGS(sk, skb, dqueues, header), \
skb               237 net/tipc/trace.h 	TP_PROTO(struct sock *sk, struct sk_buff *skb, u16 dqueues, \
skb               239 net/tipc/trace.h 	TP_ARGS(sk, skb, dqueues, header), \
skb               241 net/tipc/trace.h DEFINE_SK_EVENT_FILTER_COND(tipc_sk_overlimit1, tipc_sk_overlimit1(sk, skb));
skb               242 net/tipc/trace.h DEFINE_SK_EVENT_FILTER_COND(tipc_sk_overlimit2, tipc_sk_overlimit2(sk, skb));
skb               160 net/tipc/udp_media.c static int tipc_udp_xmit(struct net *net, struct sk_buff *skb,
skb               176 net/tipc/udp_media.c 				.flowi4_mark = skb->mark,
skb               188 net/tipc/udp_media.c 		udp_tunnel_xmit_skb(rt, ub->ubsock->sk, skb, src->ipv4.s_addr,
skb               210 net/tipc/udp_media.c 		err = udp_tunnel6_xmit_skb(ndst, ub->ubsock->sk, skb, NULL,
skb               220 net/tipc/udp_media.c 	kfree_skb(skb);
skb               224 net/tipc/udp_media.c static int tipc_udp_send_msg(struct net *net, struct sk_buff *skb,
skb               234 net/tipc/udp_media.c 	if (skb_headroom(skb) < UDP_MIN_HEADROOM) {
skb               235 net/tipc/udp_media.c 		err = pskb_expand_head(skb, UDP_MIN_HEADROOM, 0, GFP_ATOMIC);
skb               240 net/tipc/udp_media.c 	skb_set_inner_protocol(skb, htons(ETH_P_TIPC));
skb               248 net/tipc/udp_media.c 		return tipc_udp_xmit(net, skb, ub, src, dst,
skb               255 net/tipc/udp_media.c 		_skb = pskb_copy(skb, GFP_ATOMIC);
skb               268 net/tipc/udp_media.c 	kfree_skb(skb);
skb               324 net/tipc/udp_media.c static int tipc_udp_rcast_disc(struct tipc_bearer *b, struct sk_buff *skb)
skb               333 net/tipc/udp_media.c 	src.port = udp_hdr(skb)->source;
skb               335 net/tipc/udp_media.c 	if (ip_hdr(skb)->version == 4) {
skb               336 net/tipc/udp_media.c 		struct iphdr *iphdr = ip_hdr(skb);
skb               343 net/tipc/udp_media.c 	} else if (ip_hdr(skb)->version == 6) {
skb               344 net/tipc/udp_media.c 		struct ipv6hdr *iphdr = ipv6_hdr(skb);
skb               362 net/tipc/udp_media.c static int tipc_udp_recv(struct sock *sk, struct sk_buff *skb)
skb               374 net/tipc/udp_media.c 	skb_pull(skb, sizeof(struct udphdr));
skb               375 net/tipc/udp_media.c 	hdr = buf_msg(skb);
skb               382 net/tipc/udp_media.c 		tipc_rcv(sock_net(sk), skb, b);
skb               387 net/tipc/udp_media.c 		err = tipc_udp_rcast_disc(b, skb);
skb               393 net/tipc/udp_media.c 	kfree_skb(skb);
skb               416 net/tipc/udp_media.c static int __tipc_nl_add_udp_addr(struct sk_buff *skb,
skb               426 net/tipc/udp_media.c 		if (nla_put(skb, nla_t, sizeof(ip4), &ip4))
skb               437 net/tipc/udp_media.c 		if (nla_put(skb, nla_t, sizeof(ip6), &ip6))
skb               445 net/tipc/udp_media.c int tipc_udp_nl_dump_remoteip(struct sk_buff *skb, struct netlink_callback *cb)
skb               449 net/tipc/udp_media.c 	u32 portid = NETLINK_CB(cb->skb).portid;
skb               458 net/tipc/udp_media.c 		struct net *net = sock_net(skb->sk);
skb               489 net/tipc/udp_media.c 		struct net *net = sock_net(skb->sk);
skb               511 net/tipc/udp_media.c 		hdr = genlmsg_put(skb, portid, cb->nlh->nlmsg_seq,
skb               517 net/tipc/udp_media.c 		err = __tipc_nl_add_udp_addr(skb, &rcast->addr,
skb               520 net/tipc/udp_media.c 			genlmsg_cancel(skb, hdr);
skb               523 net/tipc/udp_media.c 		genlmsg_end(skb, hdr);
skb               532 net/tipc/udp_media.c 	return skb->len;
skb               546 net/tipc/udp_media.c 	nest = nla_nest_start_noflag(msg->skb, TIPC_NLA_BEARER_UDP_OPTS);
skb               550 net/tipc/udp_media.c 	if (__tipc_nl_add_udp_addr(msg->skb, src, TIPC_NLA_UDP_LOCAL))
skb               554 net/tipc/udp_media.c 	if (__tipc_nl_add_udp_addr(msg->skb, dst, TIPC_NLA_UDP_REMOTE))
skb               558 net/tipc/udp_media.c 		if (nla_put_flag(msg->skb, TIPC_NLA_UDP_MULTI_REMOTEIP))
skb               562 net/tipc/udp_media.c 	nla_nest_end(msg->skb, nest);
skb               565 net/tipc/udp_media.c 	nla_nest_cancel(msg->skb, nest);
skb                46 net/tipc/udp_media.h int tipc_udp_nl_dump_remoteip(struct sk_buff *skb, struct netlink_callback *cb);
skb               209 net/tls/tls_device.c 	struct sk_buff *skb;
skb               213 net/tls/tls_device.c 	skb = tcp_write_queue_tail(sk);
skb               214 net/tls/tls_device.c 	if (skb)
skb               215 net/tls/tls_device.c 		TCP_SKB_CB(skb)->eor = 1;
skb               726 net/tls/tls_device.c 					   struct sock *sk, struct sk_buff *skb)
skb               753 net/tls/tls_device.c 	rxm = strp_msg(skb);
skb               770 net/tls/tls_device.c static int tls_device_reencrypt(struct sock *sk, struct sk_buff *skb)
skb               772 net/tls/tls_device.c 	struct strp_msg *rxm = strp_msg(skb);
skb               784 net/tls/tls_device.c 	nsg = skb_cow_data(skb, 0, &unused);
skb               794 net/tls/tls_device.c 	err = skb_copy_bits(skb, offset, buf,
skb               800 net/tls/tls_device.c 	err = decrypt_skb(sk, skb, sg);
skb               808 net/tls/tls_device.c 	if (skb_pagelen(skb) > offset) {
skb               809 net/tls/tls_device.c 		copy = min_t(int, skb_pagelen(skb) - offset, data_len);
skb               811 net/tls/tls_device.c 		if (skb->decrypted) {
skb               812 net/tls/tls_device.c 			err = skb_store_bits(skb, offset, buf, copy);
skb               821 net/tls/tls_device.c 	pos = skb_pagelen(skb);
skb               822 net/tls/tls_device.c 	skb_walk_frags(skb, skb_iter) {
skb               855 net/tls/tls_device.c int tls_device_decrypted(struct sock *sk, struct sk_buff *skb)
skb               859 net/tls/tls_device.c 	int is_decrypted = skb->decrypted;
skb               864 net/tls/tls_device.c 	skb_walk_frags(skb, skb_iter) {
skb               880 net/tls/tls_device.c 		tls_device_core_ctrl_rx_resync(tls_ctx, ctx, sk, skb);
skb               885 net/tls/tls_device.c 	return tls_device_reencrypt(sk, skb);
skb               914 net/tls/tls_device.c 	struct sk_buff *skb;
skb              1008 net/tls/tls_device.c 	skb = tcp_write_queue_tail(sk);
skb              1009 net/tls/tls_device.c 	if (skb)
skb              1010 net/tls/tls_device.c 		TCP_SKB_CB(skb)->eor = 1;
skb               167 net/tls/tls_device_fallback.c static void update_chksum(struct sk_buff *skb, int headln)
skb               169 net/tls/tls_device_fallback.c 	struct tcphdr *th = tcp_hdr(skb);
skb               170 net/tls/tls_device_fallback.c 	int datalen = skb->len - headln;
skb               177 net/tls/tls_device_fallback.c 	if (likely(skb->ip_summed == CHECKSUM_PARTIAL))
skb               180 net/tls/tls_device_fallback.c 	skb->ip_summed = CHECKSUM_PARTIAL;
skb               181 net/tls/tls_device_fallback.c 	skb->csum_start = skb_transport_header(skb) - skb->head;
skb               182 net/tls/tls_device_fallback.c 	skb->csum_offset = offsetof(struct tcphdr, check);
skb               184 net/tls/tls_device_fallback.c 	if (skb->sk->sk_family == AF_INET6) {
skb               185 net/tls/tls_device_fallback.c 		ipv6h = ipv6_hdr(skb);
skb               189 net/tls/tls_device_fallback.c 		iph = ip_hdr(skb);
skb               195 net/tls/tls_device_fallback.c static void complete_skb(struct sk_buff *nskb, struct sk_buff *skb, int headln)
skb               197 net/tls/tls_device_fallback.c 	struct sock *sk = skb->sk;
skb               200 net/tls/tls_device_fallback.c 	skb_copy_header(nskb, skb);
skb               202 net/tls/tls_device_fallback.c 	skb_put(nskb, skb->len);
skb               203 net/tls/tls_device_fallback.c 	memcpy(nskb->data, skb->data, headln);
skb               205 net/tls/tls_device_fallback.c 	nskb->destructor = skb->destructor;
skb               207 net/tls/tls_device_fallback.c 	skb->destructor = NULL;
skb               208 net/tls/tls_device_fallback.c 	skb->sk = NULL;
skb               216 net/tls/tls_device_fallback.c 	delta = nskb->truesize - skb->truesize;
skb               229 net/tls/tls_device_fallback.c 		      struct sk_buff *skb,
skb               235 net/tls/tls_device_fallback.c 	int tcp_payload_offset = skb_transport_offset(skb) + tcp_hdrlen(skb);
skb               236 net/tls/tls_device_fallback.c 	int payload_len = skb->len - tcp_payload_offset;
skb               237 net/tls/tls_device_fallback.c 	u32 tcp_seq = ntohl(tcp_hdr(skb)->seq);
skb               286 net/tls/tls_device_fallback.c 	if (skb_to_sgvec(skb, &sg_in[i], tcp_payload_offset, payload_len) < 0)
skb               310 net/tls/tls_device_fallback.c 				   struct sk_buff *skb,
skb               313 net/tls/tls_device_fallback.c 	int tcp_payload_offset = skb_transport_offset(skb) + tcp_hdrlen(skb);
skb               315 net/tls/tls_device_fallback.c 	int payload_len = skb->len - tcp_payload_offset;
skb               341 net/tls/tls_device_fallback.c 	nskb = alloc_skb(skb_headroom(skb) + skb->len, GFP_ATOMIC);
skb               345 net/tls/tls_device_fallback.c 	skb_reserve(nskb, skb_headroom(skb));
skb               354 net/tls/tls_device_fallback.c 	complete_skb(nskb, skb, tcp_payload_offset);
skb               372 net/tls/tls_device_fallback.c static struct sk_buff *tls_sw_fallback(struct sock *sk, struct sk_buff *skb)
skb               374 net/tls/tls_device_fallback.c 	int tcp_payload_offset = skb_transport_offset(skb) + tcp_hdrlen(skb);
skb               377 net/tls/tls_device_fallback.c 	int payload_len = skb->len - tcp_payload_offset;
skb               392 net/tls/tls_device_fallback.c 		return skb;
skb               401 net/tls/tls_device_fallback.c 	if (fill_sg_in(sg_in, skb, ctx, &rcd_sn, &sync_size, &resync_sgs)) {
skb               404 net/tls/tls_device_fallback.c 			nskb = skb_get(skb);
skb               408 net/tls/tls_device_fallback.c 	nskb = tls_enc_skb(tls_ctx, sg_out, sg_in, skb, sync_size, rcd_sn);
skb               416 net/tls/tls_device_fallback.c 		consume_skb(skb);
skb               418 net/tls/tls_device_fallback.c 		kfree_skb(skb);
skb               424 net/tls/tls_device_fallback.c 				      struct sk_buff *skb)
skb               427 net/tls/tls_device_fallback.c 		return skb;
skb               429 net/tls/tls_device_fallback.c 	return tls_sw_fallback(sk, skb);
skb               433 net/tls/tls_device_fallback.c struct sk_buff *tls_encrypt_skb(struct sk_buff *skb)
skb               435 net/tls/tls_device_fallback.c 	return tls_sw_fallback(skb->sk, skb);
skb               816 net/tls/tls_main.c static int tls_get_info(const struct sock *sk, struct sk_buff *skb)
skb               823 net/tls/tls_main.c 	start = nla_nest_start_noflag(skb, INET_ULP_INFO_TLS);
skb               835 net/tls/tls_main.c 		err = nla_put_u16(skb, TLS_INFO_VERSION, version);
skb               841 net/tls/tls_main.c 		err = nla_put_u16(skb, TLS_INFO_CIPHER, cipher_type);
skb               845 net/tls/tls_main.c 	err = nla_put_u16(skb, TLS_INFO_TXCONF, tls_user_config(ctx, true));
skb               849 net/tls/tls_main.c 	err = nla_put_u16(skb, TLS_INFO_RXCONF, tls_user_config(ctx, false));
skb               854 net/tls/tls_main.c 	nla_nest_end(skb, start);
skb               859 net/tls/tls_main.c 	nla_nest_cancel(skb, start);
skb                45 net/tls/tls_sw.c static int __skb_nsg(struct sk_buff *skb, int offset, int len,
skb                48 net/tls/tls_sw.c         int start = skb_headlen(skb);
skb                66 net/tls/tls_sw.c         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb                71 net/tls/tls_sw.c                 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
skb                85 net/tls/tls_sw.c         if (unlikely(skb_has_frag_list(skb))) {
skb                86 net/tls/tls_sw.c                 skb_walk_frags(skb, frag_iter) {
skb               116 net/tls/tls_sw.c static int skb_nsg(struct sk_buff *skb, int offset, int len)
skb               118 net/tls/tls_sw.c         return __skb_nsg(skb, offset, len, 0);
skb               122 net/tls/tls_sw.c 			  struct tls_prot_info *prot, struct sk_buff *skb)
skb               124 net/tls/tls_sw.c 	struct strp_msg *rxm = strp_msg(skb);
skb               136 net/tls/tls_sw.c 			err = skb_copy_bits(skb,
skb               160 net/tls/tls_sw.c 	struct sk_buff *skb;
skb               164 net/tls/tls_sw.c 	skb = (struct sk_buff *)req->data;
skb               165 net/tls/tls_sw.c 	tls_ctx = tls_get_ctx(skb->sk);
skb               172 net/tls/tls_sw.c 		tls_err_abort(skb->sk, err);
skb               174 net/tls/tls_sw.c 		struct strp_msg *rxm = strp_msg(skb);
skb               177 net/tls/tls_sw.c 		pad = padding_length(ctx, prot, skb);
skb               180 net/tls/tls_sw.c 			tls_err_abort(skb->sk, pad);
skb               191 net/tls/tls_sw.c 	skb->sk = NULL;
skb               215 net/tls/tls_sw.c 			     struct sk_buff *skb,
skb               241 net/tls/tls_sw.c 		skb->sk = sk;
skb               244 net/tls/tls_sw.c 					  tls_decrypt_done, skb);
skb              1285 net/tls/tls_sw.c 	struct sk_buff *skb;
skb              1288 net/tls/tls_sw.c 	while (!(skb = ctx->recv_pkt) && sk_psock_queue_empty(psock)) {
skb              1308 net/tls/tls_sw.c 			      ctx->recv_pkt != skb ||
skb              1321 net/tls/tls_sw.c 	return skb;
skb              1390 net/tls/tls_sw.c static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
skb              1398 net/tls/tls_sw.c 	struct strp_msg *rxm = strp_msg(skb);
skb              1414 net/tls/tls_sw.c 		n_sgin = skb_nsg(skb, rxm->offset + prot->prepend_size,
skb              1419 net/tls/tls_sw.c 		n_sgin = skb_cow_data(skb, 0, &unused);
skb              1457 net/tls/tls_sw.c 	err = skb_copy_bits(skb, rxm->offset + TLS_HEADER_SIZE,
skb              1481 net/tls/tls_sw.c 	err = skb_to_sgvec(skb, &sgin[1],
skb              1514 net/tls/tls_sw.c 	err = tls_do_decryption(sk, skb, sgin, sgout, iv,
skb              1527 net/tls/tls_sw.c static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb,
skb              1534 net/tls/tls_sw.c 	struct strp_msg *rxm = strp_msg(skb);
skb              1539 net/tls/tls_sw.c 			err = tls_device_decrypted(sk, skb);
skb              1546 net/tls/tls_sw.c 			err = decrypt_internal(sk, skb, dest, NULL, chunk, zc,
skb              1559 net/tls/tls_sw.c 		pad = padding_length(ctx, prot, skb);
skb              1576 net/tls/tls_sw.c int decrypt_skb(struct sock *sk, struct sk_buff *skb,
skb              1582 net/tls/tls_sw.c 	return decrypt_internal(sk, skb, NULL, sgout, &chunk, &zc, false);
skb              1585 net/tls/tls_sw.c static bool tls_sw_advance_skb(struct sock *sk, struct sk_buff *skb,
skb              1591 net/tls/tls_sw.c 	if (skb) {
skb              1592 net/tls/tls_sw.c 		struct strp_msg *rxm = strp_msg(skb);
skb              1599 net/tls/tls_sw.c 		consume_skb(skb);
skb              1623 net/tls/tls_sw.c 	struct sk_buff *skb = skb_peek(&ctx->rx_list);
skb              1630 net/tls/tls_sw.c 	if (!ctrl && skb) {
skb              1631 net/tls/tls_sw.c 		tlm = tls_msg(skb);
skb              1635 net/tls/tls_sw.c 	while (skip && skb) {
skb              1636 net/tls/tls_sw.c 		struct strp_msg *rxm = strp_msg(skb);
skb              1637 net/tls/tls_sw.c 		tlm = tls_msg(skb);
skb              1647 net/tls/tls_sw.c 		skb = skb_peek_next(skb, &ctx->rx_list);
skb              1650 net/tls/tls_sw.c 	while (len && skb) {
skb              1652 net/tls/tls_sw.c 		struct strp_msg *rxm = strp_msg(skb);
skb              1655 net/tls/tls_sw.c 		tlm = tls_msg(skb);
skb              1677 net/tls/tls_sw.c 			int err = skb_copy_datagram_msg(skb, rxm->offset + skip,
skb              1704 net/tls/tls_sw.c 		next_skb = skb_peek_next(skb, &ctx->rx_list);
skb              1707 net/tls/tls_sw.c 			skb_unlink(skb, &ctx->rx_list);
skb              1708 net/tls/tls_sw.c 			consume_skb(skb);
skb              1711 net/tls/tls_sw.c 		skb = next_skb;
skb              1733 net/tls/tls_sw.c 	struct sk_buff *skb;
skb              1776 net/tls/tls_sw.c 		skb = tls_wait_data(sk, psock, flags, timeo, &err);
skb              1777 net/tls/tls_sw.c 		if (!skb) {
skb              1790 net/tls/tls_sw.c 			tlm = tls_msg(skb);
skb              1797 net/tls/tls_sw.c 		rxm = strp_msg(skb);
skb              1812 net/tls/tls_sw.c 		err = decrypt_skb_update(sk, skb, &msg->msg_iter,
skb              1864 net/tls/tls_sw.c 			err = skb_copy_datagram_msg(skb, rxm->offset,
skb              1884 net/tls/tls_sw.c 			skb_queue_tail(&ctx->rx_list, skb);
skb              1885 net/tls/tls_sw.c 			skb = NULL;
skb              1888 net/tls/tls_sw.c 		if (tls_sw_advance_skb(sk, skb, chunk)) {
skb              1957 net/tls/tls_sw.c 	struct sk_buff *skb;
skb              1968 net/tls/tls_sw.c 	skb = tls_wait_data(sk, NULL, flags, timeo, &err);
skb              1969 net/tls/tls_sw.c 	if (!skb)
skb              1973 net/tls/tls_sw.c 		err = decrypt_skb_update(sk, skb, NULL, &chunk, &zc, false);
skb              1987 net/tls/tls_sw.c 	rxm = strp_msg(skb);
skb              1990 net/tls/tls_sw.c 	copied = skb_splice_bits(skb, sk, rxm->offset, pipe, chunk, flags);
skb              1995 net/tls/tls_sw.c 		tls_sw_advance_skb(sk, skb, copied);
skb              2019 net/tls/tls_sw.c static int tls_read_size(struct strparser *strp, struct sk_buff *skb)
skb              2025 net/tls/tls_sw.c 	struct strp_msg *rxm = strp_msg(skb);
skb              2031 net/tls/tls_sw.c 	if (rxm->offset + prot->prepend_size > skb->len)
skb              2041 net/tls/tls_sw.c 	ret = skb_copy_bits(skb, rxm->offset, header, prot->prepend_size);
skb              2072 net/tls/tls_sw.c 				     TCP_SKB_CB(skb)->seq + rxm->offset);
skb              2081 net/tls/tls_sw.c static void tls_queue(struct strparser *strp, struct sk_buff *skb)
skb              2088 net/tls/tls_sw.c 	ctx->recv_pkt = skb;
skb               139 net/unix/af_unix.c static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
skb               141 net/unix/af_unix.c 	UNIXCB(skb).secid = scm->secid;
skb               144 net/unix/af_unix.c static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
skb               146 net/unix/af_unix.c 	scm->secid = UNIXCB(skb).secid;
skb               149 net/unix/af_unix.c static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
skb               151 net/unix/af_unix.c 	return (scm->secid == UNIXCB(skb).secid);
skb               154 net/unix/af_unix.c static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
skb               157 net/unix/af_unix.c static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
skb               160 net/unix/af_unix.c static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
skb               520 net/unix/af_unix.c 	struct sk_buff *skb;
skb               559 net/unix/af_unix.c 	while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
skb               561 net/unix/af_unix.c 			unix_release_sock(skb->sk, 1);
skb               563 net/unix/af_unix.c 		UNIXCB(skb).consumed = skb->len;
skb               564 net/unix/af_unix.c 		kfree_skb(skb);
skb              1221 net/unix/af_unix.c 	struct sk_buff *skb = NULL;
skb              1251 net/unix/af_unix.c 	skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL);
skb              1252 net/unix/af_unix.c 	if (skb == NULL)
skb              1381 net/unix/af_unix.c 	__skb_queue_tail(&other->sk_receive_queue, skb);
skb              1393 net/unix/af_unix.c 	kfree_skb(skb);
skb              1403 net/unix/af_unix.c 	struct sock *ska = socka->sk, *skb = sockb->sk;
skb              1407 net/unix/af_unix.c 	sock_hold(skb);
skb              1408 net/unix/af_unix.c 	unix_peer(ska) = skb;
skb              1409 net/unix/af_unix.c 	unix_peer(skb) = ska;
skb              1411 net/unix/af_unix.c 	init_peercred(skb);
skb              1415 net/unix/af_unix.c 		skb->sk_state = TCP_ESTABLISHED;
skb              1436 net/unix/af_unix.c 	struct sk_buff *skb;
skb              1451 net/unix/af_unix.c 	skb = skb_recv_datagram(sk, 0, flags&O_NONBLOCK, &err);
skb              1452 net/unix/af_unix.c 	if (!skb) {
skb              1459 net/unix/af_unix.c 	tsk = skb->sk;
skb              1460 net/unix/af_unix.c 	skb_free_datagram(sk, skb);
skb              1508 net/unix/af_unix.c static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
skb              1512 net/unix/af_unix.c 	UNIXCB(skb).pid  = get_pid(scm->pid);
skb              1513 net/unix/af_unix.c 	UNIXCB(skb).uid = scm->creds.uid;
skb              1514 net/unix/af_unix.c 	UNIXCB(skb).gid = scm->creds.gid;
skb              1515 net/unix/af_unix.c 	UNIXCB(skb).fp = NULL;
skb              1516 net/unix/af_unix.c 	unix_get_secdata(scm, skb);
skb              1518 net/unix/af_unix.c 		err = unix_attach_fds(scm, skb);
skb              1520 net/unix/af_unix.c 	skb->destructor = unix_destruct_scm;
skb              1537 net/unix/af_unix.c static void maybe_add_creds(struct sk_buff *skb, const struct socket *sock,
skb              1540 net/unix/af_unix.c 	if (UNIXCB(skb).pid)
skb              1543 net/unix/af_unix.c 		UNIXCB(skb).pid  = get_pid(task_tgid(current));
skb              1544 net/unix/af_unix.c 		current_uid_gid(&UNIXCB(skb).uid, &UNIXCB(skb).gid);
skb              1566 net/unix/af_unix.c static bool unix_skb_scm_eq(struct sk_buff *skb,
skb              1569 net/unix/af_unix.c 	const struct unix_skb_parms *u = &UNIXCB(skb);
skb              1574 net/unix/af_unix.c 	       unix_secdata_eq(scm, skb);
skb              1592 net/unix/af_unix.c 	struct sk_buff *skb;
skb              1637 net/unix/af_unix.c 	skb = sock_alloc_send_pskb(sk, len - data_len, data_len,
skb              1640 net/unix/af_unix.c 	if (skb == NULL)
skb              1643 net/unix/af_unix.c 	err = unix_scm_to_skb(&scm, skb, true);
skb              1647 net/unix/af_unix.c 	skb_put(skb, len - data_len);
skb              1648 net/unix/af_unix.c 	skb->data_len = data_len;
skb              1649 net/unix/af_unix.c 	skb->len = len;
skb              1650 net/unix/af_unix.c 	err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, len);
skb              1668 net/unix/af_unix.c 	if (sk_filter(other, skb) < 0) {
skb              1760 net/unix/af_unix.c 		__net_timestamp(skb);
skb              1761 net/unix/af_unix.c 	maybe_add_creds(skb, sock, other);
skb              1762 net/unix/af_unix.c 	skb_queue_tail(&other->sk_receive_queue, skb);
skb              1774 net/unix/af_unix.c 	kfree_skb(skb);
skb              1793 net/unix/af_unix.c 	struct sk_buff *skb;
skb              1834 net/unix/af_unix.c 		skb = sock_alloc_send_pskb(sk, size - data_len, data_len,
skb              1837 net/unix/af_unix.c 		if (!skb)
skb              1841 net/unix/af_unix.c 		err = unix_scm_to_skb(&scm, skb, !fds_sent);
skb              1843 net/unix/af_unix.c 			kfree_skb(skb);
skb              1848 net/unix/af_unix.c 		skb_put(skb, size - data_len);
skb              1849 net/unix/af_unix.c 		skb->data_len = data_len;
skb              1850 net/unix/af_unix.c 		skb->len = size;
skb              1851 net/unix/af_unix.c 		err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size);
skb              1853 net/unix/af_unix.c 			kfree_skb(skb);
skb              1863 net/unix/af_unix.c 		maybe_add_creds(skb, sock, other);
skb              1864 net/unix/af_unix.c 		skb_queue_tail(&other->sk_receive_queue, skb);
skb              1876 net/unix/af_unix.c 	kfree_skb(skb);
skb              1894 net/unix/af_unix.c 	struct sk_buff *skb, *newskb = NULL, *tail = NULL;
skb              1944 net/unix/af_unix.c 	skb = skb_peek_tail(&other->sk_receive_queue);
skb              1945 net/unix/af_unix.c 	if (tail && tail == skb) {
skb              1946 net/unix/af_unix.c 		skb = newskb;
skb              1947 net/unix/af_unix.c 	} else if (!skb || !unix_skb_scm_eq(skb, &scm)) {
skb              1949 net/unix/af_unix.c 			skb = newskb;
skb              1951 net/unix/af_unix.c 			tail = skb;
skb              1963 net/unix/af_unix.c 	if (skb_append_pagefrags(skb, page, offset, size)) {
skb              1964 net/unix/af_unix.c 		tail = skb;
skb              1968 net/unix/af_unix.c 	skb->len += size;
skb              1969 net/unix/af_unix.c 	skb->data_len += size;
skb              1970 net/unix/af_unix.c 	skb->truesize += size;
skb              1974 net/unix/af_unix.c 		err = unix_scm_to_skb(&scm, skb, false);
skb              2048 net/unix/af_unix.c 	struct sk_buff *skb, *last;
skb              2063 net/unix/af_unix.c 		skb = __skb_try_recv_datagram(sk, flags, NULL, &skip, &err,
skb              2065 net/unix/af_unix.c 		if (skb)
skb              2075 net/unix/af_unix.c 	if (!skb) { /* implies iolock unlocked */
skb              2091 net/unix/af_unix.c 		unix_copy_addr(msg, skb->sk);
skb              2093 net/unix/af_unix.c 	if (size > skb->len - skip)
skb              2094 net/unix/af_unix.c 		size = skb->len - skip;
skb              2095 net/unix/af_unix.c 	else if (size < skb->len - skip)
skb              2098 net/unix/af_unix.c 	err = skb_copy_datagram_msg(skb, skip, msg, size);
skb              2103 net/unix/af_unix.c 		__sock_recv_timestamp(msg, sk, skb);
skb              2107 net/unix/af_unix.c 	scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
skb              2108 net/unix/af_unix.c 	unix_set_secdata(&scm, skb);
skb              2111 net/unix/af_unix.c 		if (UNIXCB(skb).fp)
skb              2112 net/unix/af_unix.c 			unix_detach_fds(&scm, skb);
skb              2114 net/unix/af_unix.c 		sk_peek_offset_bwd(sk, skb->len);
skb              2131 net/unix/af_unix.c 		if (UNIXCB(skb).fp)
skb              2132 net/unix/af_unix.c 			scm.fp = scm_fp_dup(UNIXCB(skb).fp);
skb              2134 net/unix/af_unix.c 	err = (flags & MSG_TRUNC) ? skb->len - skip : size;
skb              2139 net/unix/af_unix.c 	skb_free_datagram(sk, skb);
skb              2188 net/unix/af_unix.c static unsigned int unix_skb_len(const struct sk_buff *skb)
skb              2190 net/unix/af_unix.c 	return skb->len - UNIXCB(skb).consumed;
skb              2247 net/unix/af_unix.c 		struct sk_buff *skb, *last;
skb              2255 net/unix/af_unix.c 		last = skb = skb_peek(&sk->sk_receive_queue);
skb              2258 net/unix/af_unix.c 		if (skb == NULL) {
skb              2296 net/unix/af_unix.c 		while (skip >= unix_skb_len(skb)) {
skb              2297 net/unix/af_unix.c 			skip -= unix_skb_len(skb);
skb              2298 net/unix/af_unix.c 			last = skb;
skb              2299 net/unix/af_unix.c 			last_len = skb->len;
skb              2300 net/unix/af_unix.c 			skb = skb_peek_next(skb, &sk->sk_receive_queue);
skb              2301 net/unix/af_unix.c 			if (!skb)
skb              2309 net/unix/af_unix.c 			if (!unix_skb_scm_eq(skb, &scm))
skb              2313 net/unix/af_unix.c 			scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
skb              2314 net/unix/af_unix.c 			unix_set_secdata(&scm, skb);
skb              2322 net/unix/af_unix.c 			unix_copy_addr(state->msg, skb->sk);
skb              2326 net/unix/af_unix.c 		chunk = min_t(unsigned int, unix_skb_len(skb) - skip, size);
skb              2327 net/unix/af_unix.c 		skb_get(skb);
skb              2328 net/unix/af_unix.c 		chunk = state->recv_actor(skb, skip, chunk, state);
skb              2329 net/unix/af_unix.c 		drop_skb = !unix_skb_len(skb);
skb              2331 net/unix/af_unix.c 		consume_skb(skb);
skb              2354 net/unix/af_unix.c 			UNIXCB(skb).consumed += chunk;
skb              2358 net/unix/af_unix.c 			if (UNIXCB(skb).fp)
skb              2359 net/unix/af_unix.c 				unix_detach_fds(&scm, skb);
skb              2361 net/unix/af_unix.c 			if (unix_skb_len(skb))
skb              2364 net/unix/af_unix.c 			skb_unlink(skb, &sk->sk_receive_queue);
skb              2365 net/unix/af_unix.c 			consume_skb(skb);
skb              2372 net/unix/af_unix.c 			if (UNIXCB(skb).fp)
skb              2373 net/unix/af_unix.c 				scm.fp = scm_fp_dup(UNIXCB(skb).fp);
skb              2377 net/unix/af_unix.c 			if (UNIXCB(skb).fp)
skb              2381 net/unix/af_unix.c 			last = skb;
skb              2382 net/unix/af_unix.c 			last_len = skb->len;
skb              2384 net/unix/af_unix.c 			skb = skb_peek_next(skb, &sk->sk_receive_queue);
skb              2385 net/unix/af_unix.c 			if (skb)
skb              2401 net/unix/af_unix.c static int unix_stream_read_actor(struct sk_buff *skb,
skb              2407 net/unix/af_unix.c 	ret = skb_copy_datagram_msg(skb, UNIXCB(skb).consumed + skip,
skb              2426 net/unix/af_unix.c static int unix_stream_splice_actor(struct sk_buff *skb,
skb              2430 net/unix/af_unix.c 	return skb_splice_bits(skb, state->socket->sk,
skb              2431 net/unix/af_unix.c 			       UNIXCB(skb).consumed + skip,
skb              2505 net/unix/af_unix.c 	struct sk_buff *skb;
skb              2514 net/unix/af_unix.c 		skb_queue_walk(&sk->sk_receive_queue, skb)
skb              2515 net/unix/af_unix.c 			amount += unix_skb_len(skb);
skb              2517 net/unix/af_unix.c 		skb = skb_peek(&sk->sk_receive_queue);
skb              2518 net/unix/af_unix.c 		if (skb)
skb              2519 net/unix/af_unix.c 			amount = skb->len;
skb                62 net/unix/diag.c 	struct sk_buff *skb;
skb                77 net/unix/diag.c 		skb_queue_walk(&sk->sk_receive_queue, skb) {
skb                80 net/unix/diag.c 			req = skb->sk;
skb               122 net/unix/diag.c static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req,
skb               128 net/unix/diag.c 	nlh = nlmsg_put(skb, portid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*rep),
skb               142 net/unix/diag.c 	    sk_diag_dump_name(sk, skb))
skb               146 net/unix/diag.c 	    sk_diag_dump_vfs(sk, skb))
skb               150 net/unix/diag.c 	    sk_diag_dump_peer(sk, skb))
skb               154 net/unix/diag.c 	    sk_diag_dump_icons(sk, skb))
skb               158 net/unix/diag.c 	    sk_diag_show_rqlen(sk, skb))
skb               162 net/unix/diag.c 	    sock_diag_put_meminfo(sk, skb, UNIX_DIAG_MEMINFO))
skb               165 net/unix/diag.c 	if (nla_put_u8(skb, UNIX_DIAG_SHUTDOWN, sk->sk_shutdown))
skb               169 net/unix/diag.c 	    sk_diag_dump_uid(sk, skb))
skb               172 net/unix/diag.c 	nlmsg_end(skb, nlh);
skb               176 net/unix/diag.c 	nlmsg_cancel(skb, nlh);
skb               180 net/unix/diag.c static int sk_diag_dump(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req,
skb               192 net/unix/diag.c 	return sk_diag_fill(sk, skb, req, portid, seq, flags, sk_ino);
skb               195 net/unix/diag.c static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
skb               199 net/unix/diag.c 	struct net *net = sock_net(skb->sk);
skb               220 net/unix/diag.c 			if (sk_diag_dump(sk, skb, req,
skb               221 net/unix/diag.c 					 NETLINK_CB(cb->skb).portid,
skb               234 net/unix/diag.c 	return skb->len;
skb               309 net/unix/diag.c static int unix_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
skb               312 net/unix/diag.c 	struct net *net = sock_net(skb->sk);
skb               321 net/unix/diag.c 		return netlink_dump_start(net->diag_nlsk, skb, h, &c);
skb               323 net/unix/diag.c 		return unix_diag_get_exact(skb, h, nlmsg_data(h));
skb                94 net/unix/garbage.c 	struct sk_buff *skb;
skb                98 net/unix/garbage.c 	skb_queue_walk_safe(&x->sk_receive_queue, skb, next) {
skb               100 net/unix/garbage.c 		if (UNIXCB(skb).fp) {
skb               103 net/unix/garbage.c 			int nfd = UNIXCB(skb).fp->count;
skb               104 net/unix/garbage.c 			struct file **fp = UNIXCB(skb).fp->fp;
skb               125 net/unix/garbage.c 				__skb_unlink(skb, &x->sk_receive_queue);
skb               126 net/unix/garbage.c 				__skb_queue_tail(hitlist, skb);
skb               139 net/unix/garbage.c 		struct sk_buff *skb;
skb               148 net/unix/garbage.c 		skb_queue_walk_safe(&x->sk_receive_queue, skb, next) {
skb               149 net/unix/garbage.c 			u = unix_sk(skb->sk);
skb               103 net/unix/scm.c int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
skb               115 net/unix/scm.c 	UNIXCB(skb).fp = scm_fp_dup(scm->fp);
skb               116 net/unix/scm.c 	if (!UNIXCB(skb).fp)
skb               125 net/unix/scm.c void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
skb               129 net/unix/scm.c 	scm->fp = UNIXCB(skb).fp;
skb               130 net/unix/scm.c 	UNIXCB(skb).fp = NULL;
skb               137 net/unix/scm.c void unix_destruct_scm(struct sk_buff *skb)
skb               142 net/unix/scm.c 	scm.pid  = UNIXCB(skb).pid;
skb               143 net/unix/scm.c 	if (UNIXCB(skb).fp)
skb               144 net/unix/scm.c 		unix_detach_fds(&scm, skb);
skb               149 net/unix/scm.c 	sock_wfree(skb);
skb                 7 net/unix/scm.h int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb);
skb                 8 net/unix/scm.h void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb);
skb               115 net/vmw_vsock/af_vsock.c static int vsock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
skb               644 net/vmw_vsock/af_vsock.c 		struct sk_buff *skb;
skb               665 net/vmw_vsock/af_vsock.c 		while ((skb = skb_dequeue(&sk->sk_receive_queue)))
skb               666 net/vmw_vsock/af_vsock.c 			kfree_skb(skb);
skb               694 net/vmw_vsock/af_vsock.c static int vsock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
skb               698 net/vmw_vsock/af_vsock.c 	err = sock_queue_rcv_skb(sk, skb);
skb               700 net/vmw_vsock/af_vsock.c 		kfree_skb(skb);
skb                60 net/vmw_vsock/af_vsock_tap.c static int __vsock_deliver_tap_skb(struct sk_buff *skb,
skb                64 net/vmw_vsock/af_vsock_tap.c 	struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
skb                80 net/vmw_vsock/af_vsock_tap.c static void __vsock_deliver_tap(struct sk_buff *skb)
skb                86 net/vmw_vsock/af_vsock_tap.c 		ret = __vsock_deliver_tap_skb(skb, tmp->dev);
skb                94 net/vmw_vsock/af_vsock_tap.c 	struct sk_buff *skb;
skb               101 net/vmw_vsock/af_vsock_tap.c 	skb = build_skb(opaque);
skb               102 net/vmw_vsock/af_vsock_tap.c 	if (skb) {
skb               103 net/vmw_vsock/af_vsock_tap.c 		__vsock_deliver_tap(skb);
skb               104 net/vmw_vsock/af_vsock_tap.c 		consume_skb(skb);
skb                14 net/vmw_vsock/diag.c static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
skb                21 net/vmw_vsock/diag.c 	nlh = nlmsg_put(skb, portid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*rep),
skb                48 net/vmw_vsock/diag.c static int vsock_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
skb                59 net/vmw_vsock/diag.c 	net = sock_net(skb->sk);
skb                85 net/vmw_vsock/diag.c 				if (sk_diag_fill(sk, skb,
skb                86 net/vmw_vsock/diag.c 						 NETLINK_CB(cb->skb).portid,
skb               119 net/vmw_vsock/diag.c 			if (sk_diag_fill(sk, skb,
skb               120 net/vmw_vsock/diag.c 					 NETLINK_CB(cb->skb).portid,
skb               138 net/vmw_vsock/diag.c 	return skb->len;
skb               141 net/vmw_vsock/diag.c static int vsock_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
skb               144 net/vmw_vsock/diag.c 	struct net *net = sock_net(skb->sk);
skb               153 net/vmw_vsock/diag.c 		return netlink_dump_start(net->diag_nlsk, skb, h, &c);
skb                99 net/vmw_vsock/virtio_transport_common.c 	struct sk_buff *skb;
skb               110 net/vmw_vsock/virtio_transport_common.c 	skb = alloc_skb(sizeof(*hdr) + sizeof(pkt->hdr) + payload_len,
skb               112 net/vmw_vsock/virtio_transport_common.c 	if (!skb)
skb               115 net/vmw_vsock/virtio_transport_common.c 	hdr = skb_put(skb, sizeof(*hdr));
skb               148 net/vmw_vsock/virtio_transport_common.c 	skb_put_data(skb, &pkt->hdr, sizeof(pkt->hdr));
skb               151 net/vmw_vsock/virtio_transport_common.c 		skb_put_data(skb, payload_buf, payload_len);
skb               154 net/vmw_vsock/virtio_transport_common.c 	return skb;
skb               622 net/vmw_vsock/vmci_transport.c 	struct sk_buff *skb;
skb               643 net/vmw_vsock/vmci_transport.c 	skb = alloc_skb(size, GFP_ATOMIC);
skb               644 net/vmw_vsock/vmci_transport.c 	if (!skb)
skb               649 net/vmw_vsock/vmci_transport.c 	skb_put(skb, size);
skb               650 net/vmw_vsock/vmci_transport.c 	memcpy(skb->data, dg, size);
skb               651 net/vmw_vsock/vmci_transport.c 	sk_receive_skb(sk, skb, 0);
skb              1753 net/vmw_vsock/vmci_transport.c 	struct sk_buff *skb;
skb              1762 net/vmw_vsock/vmci_transport.c 	skb = skb_recv_datagram(&vsk->sk, flags, noblock, &err);
skb              1763 net/vmw_vsock/vmci_transport.c 	if (!skb)
skb              1766 net/vmw_vsock/vmci_transport.c 	dg = (struct vmci_datagram *)skb->data;
skb              1773 net/vmw_vsock/vmci_transport.c 	if (payload_len != skb->len - sizeof(*dg)) {
skb              1784 net/vmw_vsock/vmci_transport.c 	err = skb_copy_datagram_msg(skb, sizeof(*dg), msg, payload_len);
skb              1797 net/vmw_vsock/vmci_transport.c 	skb_free_datagram(&vsk->sk, skb);
skb               114 net/wimax/op-msg.c 	struct sk_buff *skb;
skb               120 net/wimax/op-msg.c 	skb = genlmsg_new(msg_size, gfp_flags);
skb               121 net/wimax/op-msg.c 	if (skb == NULL)
skb               123 net/wimax/op-msg.c 	genl_msg = genlmsg_put(skb, 0, 0, &wimax_gnl_family,
skb               129 net/wimax/op-msg.c 	result = nla_put_u32(skb, WIMAX_GNL_MSG_IFIDX,
skb               136 net/wimax/op-msg.c 		result = nla_put_string(skb, WIMAX_GNL_MSG_PIPE_NAME,
skb               143 net/wimax/op-msg.c 	result = nla_put(skb, WIMAX_GNL_MSG_DATA, size, msg);
skb               149 net/wimax/op-msg.c 	genlmsg_end(skb, genl_msg);
skb               150 net/wimax/op-msg.c 	return skb;
skb               155 net/wimax/op-msg.c 	nlmsg_free(skb);
skb               258 net/wimax/op-msg.c int wimax_msg_send(struct wimax_dev *wimax_dev, struct sk_buff *skb)
skb               261 net/wimax/op-msg.c 	void *msg = skb->data;
skb               262 net/wimax/op-msg.c 	size_t size = skb->len;
skb               267 net/wimax/op-msg.c 	genlmsg_multicast(&wimax_gnl_family, skb, 0, 0, GFP_KERNEL);
skb               298 net/wimax/op-msg.c 	struct sk_buff *skb;
skb               300 net/wimax/op-msg.c 	skb = wimax_msg_alloc(wimax_dev, pipe_name, buf, size, gfp_flags);
skb               301 net/wimax/op-msg.c 	if (IS_ERR(skb))
skb               302 net/wimax/op-msg.c 		result = PTR_ERR(skb);
skb               304 net/wimax/op-msg.c 		result = wimax_msg_send(wimax_dev, skb);
skb               317 net/wimax/op-msg.c int wimax_gnl_doit_msg_from_user(struct sk_buff *skb, struct genl_info *info)
skb               328 net/wimax/op-msg.c 	d_fnstart(3, NULL, "(skb %p info %p)\n", skb, info);
skb               389 net/wimax/op-msg.c 	d_fnend(3, NULL, "(skb %p info %p) = %d\n", skb, info, result);
skb                87 net/wimax/op-reset.c int wimax_gnl_doit_reset(struct sk_buff *skb, struct genl_info *info)
skb                92 net/wimax/op-reset.c 	d_fnstart(3, NULL, "(skb %p info %p)\n", skb, info);
skb               106 net/wimax/op-reset.c 	d_fnend(3, NULL, "(skb %p info %p) = %d\n", skb, info, result);
skb               398 net/wimax/op-rfkill.c int wimax_gnl_doit_rfkill(struct sk_buff *skb, struct genl_info *info)
skb               405 net/wimax/op-rfkill.c 	d_fnstart(3, NULL, "(skb %p info %p)\n", skb, info);
skb               429 net/wimax/op-rfkill.c 	d_fnend(3, NULL, "(skb %p info %p) = %d\n", skb, info, result);
skb                31 net/wimax/op-state-get.c int wimax_gnl_doit_state_get(struct sk_buff *skb, struct genl_info *info)
skb                36 net/wimax/op-state-get.c 	d_fnstart(3, NULL, "(skb %p info %p)\n", skb, info);
skb                50 net/wimax/op-state-get.c 	d_fnend(3, NULL, "(skb %p info %p) = %d\n", skb, info, result);
skb                79 net/wimax/wimax-internal.h int wimax_gnl_doit_msg_from_user(struct sk_buff *skb, struct genl_info *info);
skb                80 net/wimax/wimax-internal.h int wimax_gnl_doit_reset(struct sk_buff *skb, struct genl_info *info);
skb                81 net/wimax/wimax-internal.h int wimax_gnl_doit_rfkill(struct sk_buff *skb, struct genl_info *info);
skb                82 net/wimax/wimax-internal.h int wimax_gnl_doit_state_get(struct sk_buff *skb, struct genl_info *info);
skb               153 net/wireless/lib80211_crypt_ccmp.c static int lib80211_ccmp_hdr(struct sk_buff *skb, int hdr_len,
skb               160 net/wireless/lib80211_crypt_ccmp.c 	if (skb_headroom(skb) < CCMP_HDR_LEN || skb->len < hdr_len)
skb               166 net/wireless/lib80211_crypt_ccmp.c 	pos = skb_push(skb, CCMP_HDR_LEN);
skb               190 net/wireless/lib80211_crypt_ccmp.c static int lib80211_ccmp_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
skb               201 net/wireless/lib80211_crypt_ccmp.c 	if (skb_tailroom(skb) < CCMP_MIC_LEN || skb->len < hdr_len)
skb               204 net/wireless/lib80211_crypt_ccmp.c 	data_len = skb->len - hdr_len;
skb               205 net/wireless/lib80211_crypt_ccmp.c 	len = lib80211_ccmp_hdr(skb, hdr_len, NULL, 0, priv);
skb               213 net/wireless/lib80211_crypt_ccmp.c 	hdr = (struct ieee80211_hdr *)skb->data;
skb               216 net/wireless/lib80211_crypt_ccmp.c 	skb_put(skb, CCMP_MIC_LEN);
skb               220 net/wireless/lib80211_crypt_ccmp.c 	sg_set_buf(&sg[1], skb->data + hdr_len + CCMP_HDR_LEN,
skb               254 net/wireless/lib80211_crypt_ccmp.c static int lib80211_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
skb               265 net/wireless/lib80211_crypt_ccmp.c 	size_t data_len = skb->len - hdr_len - CCMP_HDR_LEN;
skb               267 net/wireless/lib80211_crypt_ccmp.c 	if (skb->len < hdr_len + CCMP_HDR_LEN + CCMP_MIC_LEN) {
skb               272 net/wireless/lib80211_crypt_ccmp.c 	hdr = (struct ieee80211_hdr *)skb->data;
skb               273 net/wireless/lib80211_crypt_ccmp.c 	pos = skb->data + hdr_len;
skb               340 net/wireless/lib80211_crypt_ccmp.c 	memmove(skb->data + CCMP_HDR_LEN, skb->data, hdr_len);
skb               341 net/wireless/lib80211_crypt_ccmp.c 	skb_pull(skb, CCMP_HDR_LEN);
skb               342 net/wireless/lib80211_crypt_ccmp.c 	skb_trim(skb, skb->len - CCMP_MIC_LEN);
skb               283 net/wireless/lib80211_crypt_tkip.c static int lib80211_tkip_hdr(struct sk_buff *skb, int hdr_len,
skb               290 net/wireless/lib80211_crypt_tkip.c 	hdr = (struct ieee80211_hdr *)skb->data;
skb               292 net/wireless/lib80211_crypt_tkip.c 	if (skb_headroom(skb) < TKIP_HDR_LEN || skb->len < hdr_len)
skb               305 net/wireless/lib80211_crypt_tkip.c 	pos = skb_push(skb, TKIP_HDR_LEN);
skb               327 net/wireless/lib80211_crypt_tkip.c static int lib80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
skb               335 net/wireless/lib80211_crypt_tkip.c 		struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
skb               341 net/wireless/lib80211_crypt_tkip.c 	if (skb_tailroom(skb) < 4 || skb->len < hdr_len)
skb               344 net/wireless/lib80211_crypt_tkip.c 	len = skb->len - hdr_len;
skb               345 net/wireless/lib80211_crypt_tkip.c 	pos = skb->data + hdr_len;
skb               347 net/wireless/lib80211_crypt_tkip.c 	if ((lib80211_tkip_hdr(skb, hdr_len, rc4key, 16, priv)) < 0)
skb               351 net/wireless/lib80211_crypt_tkip.c 	icv = skb_put(skb, 4);
skb               376 net/wireless/lib80211_crypt_tkip.c static int lib80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
skb               388 net/wireless/lib80211_crypt_tkip.c 	hdr = (struct ieee80211_hdr *)skb->data;
skb               396 net/wireless/lib80211_crypt_tkip.c 	if (skb->len < hdr_len + TKIP_HDR_LEN + 4)
skb               399 net/wireless/lib80211_crypt_tkip.c 	pos = skb->data + hdr_len;
skb               437 net/wireless/lib80211_crypt_tkip.c 	plen = skb->len - hdr_len - 12;
skb               467 net/wireless/lib80211_crypt_tkip.c 	memmove(skb->data + TKIP_HDR_LEN, skb->data, hdr_len);
skb               468 net/wireless/lib80211_crypt_tkip.c 	skb_pull(skb, TKIP_HDR_LEN);
skb               469 net/wireless/lib80211_crypt_tkip.c 	skb_trim(skb, skb->len - 4);
skb               506 net/wireless/lib80211_crypt_tkip.c static void michael_mic_hdr(struct sk_buff *skb, u8 * hdr)
skb               510 net/wireless/lib80211_crypt_tkip.c 	hdr11 = (struct ieee80211_hdr *)skb->data;
skb               541 net/wireless/lib80211_crypt_tkip.c static int lib80211_michael_mic_add(struct sk_buff *skb, int hdr_len,
skb               547 net/wireless/lib80211_crypt_tkip.c 	if (skb_tailroom(skb) < 8 || skb->len < hdr_len) {
skb               550 net/wireless/lib80211_crypt_tkip.c 		       skb_tailroom(skb), hdr_len, skb->len);
skb               554 net/wireless/lib80211_crypt_tkip.c 	michael_mic_hdr(skb, tkey->tx_hdr);
skb               555 net/wireless/lib80211_crypt_tkip.c 	pos = skb_put(skb, 8);
skb               557 net/wireless/lib80211_crypt_tkip.c 			skb->data + hdr_len, skb->len - 8 - hdr_len, pos))
skb               584 net/wireless/lib80211_crypt_tkip.c static int lib80211_michael_mic_verify(struct sk_buff *skb, int keyidx,
skb               593 net/wireless/lib80211_crypt_tkip.c 	michael_mic_hdr(skb, tkey->rx_hdr);
skb               595 net/wireless/lib80211_crypt_tkip.c 			skb->data + hdr_len, skb->len - 8 - hdr_len, mic))
skb               597 net/wireless/lib80211_crypt_tkip.c 	if (memcmp(mic, skb->data + skb->len - 8, 8) != 0) {
skb               599 net/wireless/lib80211_crypt_tkip.c 		hdr = (struct ieee80211_hdr *)skb->data;
skb               602 net/wireless/lib80211_crypt_tkip.c 		       skb->dev ? skb->dev->name : "N/A", hdr->addr2,
skb               604 net/wireless/lib80211_crypt_tkip.c 		if (skb->dev)
skb               605 net/wireless/lib80211_crypt_tkip.c 			lib80211_michael_mic_failure(skb->dev, hdr, keyidx);
skb               615 net/wireless/lib80211_crypt_tkip.c 	skb_trim(skb, skb->len - 8);
skb                63 net/wireless/lib80211_crypt_wep.c static int lib80211_wep_build_iv(struct sk_buff *skb, int hdr_len,
skb                70 net/wireless/lib80211_crypt_wep.c 	if (skb_headroom(skb) < 4 || skb->len < hdr_len)
skb                73 net/wireless/lib80211_crypt_wep.c 	pos = skb_push(skb, 4);
skb               105 net/wireless/lib80211_crypt_wep.c static int lib80211_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
skb               113 net/wireless/lib80211_crypt_wep.c 	if (skb_tailroom(skb) < 4)
skb               117 net/wireless/lib80211_crypt_wep.c 	if (lib80211_wep_build_iv(skb, hdr_len, NULL, 0, priv))
skb               121 net/wireless/lib80211_crypt_wep.c 	skb_copy_from_linear_data_offset(skb, hdr_len, key, 3);
skb               126 net/wireless/lib80211_crypt_wep.c 	len = skb->len - hdr_len - 4;
skb               127 net/wireless/lib80211_crypt_wep.c 	pos = skb->data + hdr_len + 4;
skb               132 net/wireless/lib80211_crypt_wep.c 	icv = skb_put(skb, 4);
skb               151 net/wireless/lib80211_crypt_wep.c static int lib80211_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
skb               158 net/wireless/lib80211_crypt_wep.c 	if (skb->len < hdr_len + 8)
skb               161 net/wireless/lib80211_crypt_wep.c 	pos = skb->data + hdr_len;
skb               175 net/wireless/lib80211_crypt_wep.c 	plen = skb->len - hdr_len - 8;
skb               191 net/wireless/lib80211_crypt_wep.c 	memmove(skb->data + 4, skb->data, hdr_len);
skb               192 net/wireless/lib80211_crypt_wep.c 	skb_pull(skb, 4);
skb               193 net/wireless/lib80211_crypt_wep.c 	skb_trim(skb, skb->len - 4);
skb               823 net/wireless/nl80211.c 		*wdev = __cfg80211_wdev_from_attrs(sock_net(cb->skb->sk),
skb               857 net/wireless/nl80211.c void *nl80211hdr_put(struct sk_buff *skb, u32 portid, u32 seq,
skb               861 net/wireless/nl80211.c 	return genlmsg_put(skb, portid, seq, &nl80211_fam, flags, cmd);
skb              2462 net/wireless/nl80211.c static int nl80211_dump_wiphy_parse(struct sk_buff *skb,
skb              2492 net/wireless/nl80211.c 		netdev = __dev_get_by_index(sock_net(skb->sk), ifidx);
skb              2510 net/wireless/nl80211.c static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb)
skb              2524 net/wireless/nl80211.c 		ret = nl80211_dump_wiphy_parse(skb, cb, state);
skb              2534 net/wireless/nl80211.c 		if (!net_eq(wiphy_net(&rdev->wiphy), sock_net(skb->sk)))
skb              2544 net/wireless/nl80211.c 						 skb,
skb              2545 net/wireless/nl80211.c 						 NETLINK_CB(cb->skb).portid,
skb              2563 net/wireless/nl80211.c 				    !skb->len && !state->split &&
skb              2580 net/wireless/nl80211.c 	return skb->len;
skb              2589 net/wireless/nl80211.c static int nl80211_get_wiphy(struct sk_buff *skb, struct genl_info *info)
skb              2830 net/wireless/nl80211.c static int nl80211_set_channel(struct sk_buff *skb, struct genl_info *info)
skb              2838 net/wireless/nl80211.c static int nl80211_set_wds_peer(struct sk_buff *skb, struct genl_info *info)
skb              2861 net/wireless/nl80211.c static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
skb              3287 net/wireless/nl80211.c static int nl80211_dump_interface(struct sk_buff *skb, struct netlink_callback *cb)
skb              3304 net/wireless/nl80211.c 		ret = nl80211_dump_wiphy_parse(skb, cb, &state);
skb              3323 net/wireless/nl80211.c 		if (!net_eq(wiphy_net(&rdev->wiphy), sock_net(skb->sk)))
skb              3340 net/wireless/nl80211.c 			if (nl80211_send_iface(skb, NETLINK_CB(cb->skb).portid,
skb              3355 net/wireless/nl80211.c 	ret = skb->len;
skb              3362 net/wireless/nl80211.c static int nl80211_get_interface(struct sk_buff *skb, struct genl_info *info)
skb              3501 net/wireless/nl80211.c static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info)
skb              3571 net/wireless/nl80211.c static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
skb              3669 net/wireless/nl80211.c static int nl80211_del_interface(struct sk_buff *skb, struct genl_info *info)
skb              3690 net/wireless/nl80211.c static int nl80211_set_noack_map(struct sk_buff *skb, struct genl_info *info)
skb              3754 net/wireless/nl80211.c static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info)
skb              3828 net/wireless/nl80211.c static int nl80211_set_key(struct sk_buff *skb, struct genl_info *info)
skb              3917 net/wireless/nl80211.c static int nl80211_new_key(struct sk_buff *skb, struct genl_info *info)
skb              3966 net/wireless/nl80211.c static int nl80211_del_key(struct sk_buff *skb, struct genl_info *info)
skb              4086 net/wireless/nl80211.c static int nl80211_set_mac_acl(struct sk_buff *skb, struct genl_info *info)
skb              4632 net/wireless/nl80211.c static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
skb              4829 net/wireless/nl80211.c static int nl80211_set_beacon(struct sk_buff *skb, struct genl_info *info)
skb              4858 net/wireless/nl80211.c static int nl80211_stop_ap(struct sk_buff *skb, struct genl_info *info)
skb              5277 net/wireless/nl80211.c static int nl80211_dump_station(struct sk_buff *skb,
skb              5311 net/wireless/nl80211.c 		if (nl80211_send_station(skb, NL80211_CMD_NEW_STATION,
skb              5312 net/wireless/nl80211.c 				NETLINK_CB(cb->skb).portid,
skb              5323 net/wireless/nl80211.c 	err = skb->len;
skb              5330 net/wireless/nl80211.c static int nl80211_get_station(struct sk_buff *skb, struct genl_info *info)
skb              5697 net/wireless/nl80211.c static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
skb              5827 net/wireless/nl80211.c static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
skb              6055 net/wireless/nl80211.c static int nl80211_del_station(struct sk_buff *skb, struct genl_info *info)
skb              6159 net/wireless/nl80211.c static int nl80211_dump_mpath(struct sk_buff *skb,
skb              6193 net/wireless/nl80211.c 		if (nl80211_send_mpath(skb, NETLINK_CB(cb->skb).portid,
skb              6204 net/wireless/nl80211.c 	err = skb->len;
skb              6210 net/wireless/nl80211.c static int nl80211_get_mpath(struct sk_buff *skb, struct genl_info *info)
skb              6250 net/wireless/nl80211.c static int nl80211_set_mpath(struct sk_buff *skb, struct genl_info *info)
skb              6275 net/wireless/nl80211.c static int nl80211_new_mpath(struct sk_buff *skb, struct genl_info *info)
skb              6300 net/wireless/nl80211.c static int nl80211_del_mpath(struct sk_buff *skb, struct genl_info *info)
skb              6318 net/wireless/nl80211.c static int nl80211_get_mpp(struct sk_buff *skb, struct genl_info *info)
skb              6358 net/wireless/nl80211.c static int nl80211_dump_mpp(struct sk_buff *skb,
skb              6392 net/wireless/nl80211.c 		if (nl80211_send_mpath(skb, NETLINK_CB(cb->skb).portid,
skb              6403 net/wireless/nl80211.c 	err = skb->len;
skb              6409 net/wireless/nl80211.c static int nl80211_set_bss(struct sk_buff *skb, struct genl_info *info)
skb              6484 net/wireless/nl80211.c static int nl80211_req_set_reg(struct sk_buff *skb, struct genl_info *info)
skb              6529 net/wireless/nl80211.c static int nl80211_reload_regdb(struct sk_buff *skb, struct genl_info *info)
skb              6534 net/wireless/nl80211.c static int nl80211_get_mesh_config(struct sk_buff *skb,
skb              6903 net/wireless/nl80211.c static int nl80211_update_mesh_config(struct sk_buff *skb,
skb              6996 net/wireless/nl80211.c static int nl80211_get_reg_do(struct sk_buff *skb, struct genl_info *info)
skb              7068 net/wireless/nl80211.c 	void *hdr = nl80211hdr_put(msg, NETLINK_CB(cb->skb).portid, seq, flags,
skb              7100 net/wireless/nl80211.c static int nl80211_get_reg_dump(struct sk_buff *skb,
skb              7110 net/wireless/nl80211.c 		err = nl80211_send_regdom(skb, cb, cb->nlh->nlmsg_seq,
skb              7127 net/wireless/nl80211.c 		err = nl80211_send_regdom(skb, cb, cb->nlh->nlmsg_seq,
skb              7136 net/wireless/nl80211.c 	err = skb->len;
skb              7193 net/wireless/nl80211.c static int nl80211_set_reg(struct sk_buff *skb, struct genl_info *info)
skb              7501 net/wireless/nl80211.c static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
skb              7751 net/wireless/nl80211.c static int nl80211_abort_scan(struct sk_buff *skb, struct genl_info *info)
skb              8243 net/wireless/nl80211.c static int nl80211_start_sched_scan(struct sk_buff *skb,
skb              8298 net/wireless/nl80211.c static int nl80211_stop_sched_scan(struct sk_buff *skb,
skb              8324 net/wireless/nl80211.c static int nl80211_start_radar_detection(struct sk_buff *skb,
skb              8381 net/wireless/nl80211.c static int nl80211_notify_radar_detection(struct sk_buff *skb,
skb              8435 net/wireless/nl80211.c static int nl80211_channel_switch(struct sk_buff *skb, struct genl_info *info)
skb              8619 net/wireless/nl80211.c 	hdr = nl80211hdr_put(msg, NETLINK_CB(cb->skb).portid, seq, flags,
skb              8743 net/wireless/nl80211.c static int nl80211_dump_scan(struct sk_buff *skb, struct netlink_callback *cb)
skb              8775 net/wireless/nl80211.c 		if (nl80211_send_bss(skb, cb,
skb              8789 net/wireless/nl80211.c 	return skb->len;
skb              8866 net/wireless/nl80211.c static int nl80211_dump_survey(struct sk_buff *skb, struct netlink_callback *cb)
skb              8912 net/wireless/nl80211.c 		if (nl80211_send_survey(skb,
skb              8913 net/wireless/nl80211.c 				NETLINK_CB(cb->skb).portid,
skb              8922 net/wireless/nl80211.c 	res = skb->len;
skb              8936 net/wireless/nl80211.c static int nl80211_authenticate(struct sk_buff *skb, struct genl_info *info)
skb              9182 net/wireless/nl80211.c static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
skb              9305 net/wireless/nl80211.c static int nl80211_deauthenticate(struct sk_buff *skb, struct genl_info *info)
skb              9353 net/wireless/nl80211.c static int nl80211_disassociate(struct sk_buff *skb, struct genl_info *info)
skb              9429 net/wireless/nl80211.c static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info)
skb              9585 net/wireless/nl80211.c static int nl80211_leave_ibss(struct sk_buff *skb, struct genl_info *info)
skb              9599 net/wireless/nl80211.c static int nl80211_set_mcast_rate(struct sk_buff *skb, struct genl_info *info)
skb              9637 net/wireless/nl80211.c 	struct sk_buff *skb;
skb              9641 net/wireless/nl80211.c 	skb = nlmsg_new(approxlen + 100, gfp);
skb              9642 net/wireless/nl80211.c 	if (!skb)
skb              9645 net/wireless/nl80211.c 	hdr = nl80211hdr_put(skb, portid, seq, 0, cmd);
skb              9647 net/wireless/nl80211.c 		kfree_skb(skb);
skb              9651 net/wireless/nl80211.c 	if (nla_put_u32(skb, NL80211_ATTR_WIPHY, rdev->wiphy_idx))
skb              9655 net/wireless/nl80211.c 		if (nla_put_u32(skb, NL80211_ATTR_VENDOR_ID,
skb              9658 net/wireless/nl80211.c 		if (nla_put_u32(skb, NL80211_ATTR_VENDOR_SUBCMD,
skb              9664 net/wireless/nl80211.c 		if (nla_put_u64_64bit(skb, NL80211_ATTR_WDEV,
skb              9668 net/wireless/nl80211.c 		    nla_put_u32(skb, NL80211_ATTR_IFINDEX,
skb              9673 net/wireless/nl80211.c 	data = nla_nest_start_noflag(skb, attr);
skb              9677 net/wireless/nl80211.c 	((void **)skb->cb)[0] = rdev;
skb              9678 net/wireless/nl80211.c 	((void **)skb->cb)[1] = hdr;
skb              9679 net/wireless/nl80211.c 	((void **)skb->cb)[2] = data;
skb              9681 net/wireless/nl80211.c 	return skb;
skb              9684 net/wireless/nl80211.c 	kfree_skb(skb);
skb              9721 net/wireless/nl80211.c void __cfg80211_send_event_skb(struct sk_buff *skb, gfp_t gfp)
skb              9723 net/wireless/nl80211.c 	struct cfg80211_registered_device *rdev = ((void **)skb->cb)[0];
skb              9724 net/wireless/nl80211.c 	void *hdr = ((void **)skb->cb)[1];
skb              9725 net/wireless/nl80211.c 	struct nlmsghdr *nlhdr = nlmsg_hdr(skb);
skb              9726 net/wireless/nl80211.c 	struct nlattr *data = ((void **)skb->cb)[2];
skb              9730 net/wireless/nl80211.c 	memset(skb->cb, 0, sizeof(skb->cb));
skb              9732 net/wireless/nl80211.c 	nla_nest_end(skb, data);
skb              9733 net/wireless/nl80211.c 	genlmsg_end(skb, hdr);
skb              9736 net/wireless/nl80211.c 		genlmsg_unicast(wiphy_net(&rdev->wiphy), skb,
skb              9743 net/wireless/nl80211.c 					skb, 0, mcgrp, gfp);
skb              9749 net/wireless/nl80211.c static int nl80211_testmode_do(struct sk_buff *skb, struct genl_info *info)
skb              9780 net/wireless/nl80211.c static int nl80211_testmode_dump(struct sk_buff *skb,
skb              9819 net/wireless/nl80211.c 		rdev = __cfg80211_rdev_from_attrs(sock_net(skb->sk), attrbuf);
skb              9841 net/wireless/nl80211.c 		void *hdr = nl80211hdr_put(skb, NETLINK_CB(cb->skb).portid,
skb              9849 net/wireless/nl80211.c 		if (nla_put_u32(skb, NL80211_ATTR_WIPHY, phy_idx)) {
skb              9850 net/wireless/nl80211.c 			genlmsg_cancel(skb, hdr);
skb              9854 net/wireless/nl80211.c 		tmdata = nla_nest_start_noflag(skb, NL80211_ATTR_TESTDATA);
skb              9856 net/wireless/nl80211.c 			genlmsg_cancel(skb, hdr);
skb              9859 net/wireless/nl80211.c 		err = rdev_testmode_dump(rdev, skb, cb, data, data_len);
skb              9860 net/wireless/nl80211.c 		nla_nest_end(skb, tmdata);
skb              9863 net/wireless/nl80211.c 			genlmsg_cancel(skb, hdr);
skb              9866 net/wireless/nl80211.c 			genlmsg_cancel(skb, hdr);
skb              9870 net/wireless/nl80211.c 		genlmsg_end(skb, hdr);
skb              9873 net/wireless/nl80211.c 	err = skb->len;
skb              9883 net/wireless/nl80211.c static int nl80211_connect(struct sk_buff *skb, struct genl_info *info)
skb              10117 net/wireless/nl80211.c static int nl80211_update_connect_params(struct sk_buff *skb,
skb              10198 net/wireless/nl80211.c static int nl80211_disconnect(struct sk_buff *skb, struct genl_info *info)
skb              10227 net/wireless/nl80211.c static int nl80211_wiphy_netns(struct sk_buff *skb, struct genl_info *info)
skb              10258 net/wireless/nl80211.c static int nl80211_setdel_pmksa(struct sk_buff *skb, struct genl_info *info)
skb              10316 net/wireless/nl80211.c static int nl80211_flush_pmksa(struct sk_buff *skb, struct genl_info *info)
skb              10331 net/wireless/nl80211.c static int nl80211_tdls_mgmt(struct sk_buff *skb, struct genl_info *info)
skb              10368 net/wireless/nl80211.c static int nl80211_tdls_oper(struct sk_buff *skb, struct genl_info *info)
skb              10389 net/wireless/nl80211.c static int nl80211_remain_on_channel(struct sk_buff *skb,
skb              10468 net/wireless/nl80211.c static int nl80211_cancel_remain_on_channel(struct sk_buff *skb,
skb              10486 net/wireless/nl80211.c static int nl80211_set_tx_bitrate_mask(struct sk_buff *skb,
skb              10504 net/wireless/nl80211.c static int nl80211_register_mgmt(struct sk_buff *skb, struct genl_info *info)
skb              10540 net/wireless/nl80211.c static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
skb              10675 net/wireless/nl80211.c static int nl80211_tx_mgmt_cancel_wait(struct sk_buff *skb, struct genl_info *info)
skb              10706 net/wireless/nl80211.c static int nl80211_set_power_save(struct sk_buff *skb, struct genl_info *info)
skb              10736 net/wireless/nl80211.c static int nl80211_get_power_save(struct sk_buff *skb, struct genl_info *info)
skb              10942 net/wireless/nl80211.c static int nl80211_set_cqm(struct sk_buff *skb, struct genl_info *info)
skb              10985 net/wireless/nl80211.c static int nl80211_join_ocb(struct sk_buff *skb, struct genl_info *info)
skb              10999 net/wireless/nl80211.c static int nl80211_leave_ocb(struct sk_buff *skb, struct genl_info *info)
skb              11007 net/wireless/nl80211.c static int nl80211_join_mesh(struct sk_buff *skb, struct genl_info *info)
skb              11127 net/wireless/nl80211.c static int nl80211_leave_mesh(struct sk_buff *skb, struct genl_info *info)
skb              11303 net/wireless/nl80211.c static int nl80211_get_wowlan(struct sk_buff *skb, struct genl_info *info)
skb              11554 net/wireless/nl80211.c static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
skb              11816 net/wireless/nl80211.c static int nl80211_get_coalesce(struct sk_buff *skb, struct genl_info *info)
skb              11956 net/wireless/nl80211.c static int nl80211_set_coalesce(struct sk_buff *skb, struct genl_info *info)
skb              12024 net/wireless/nl80211.c static int nl80211_set_rekey_data(struct sk_buff *skb, struct genl_info *info)
skb              12073 net/wireless/nl80211.c static int nl80211_register_unexpected_frame(struct sk_buff *skb,
skb              12090 net/wireless/nl80211.c static int nl80211_probe_client(struct sk_buff *skb,
skb              12144 net/wireless/nl80211.c static int nl80211_register_beacons(struct sk_buff *skb, struct genl_info *info)
skb              12178 net/wireless/nl80211.c static int nl80211_start_p2p_device(struct sk_buff *skb, struct genl_info *info)
skb              12206 net/wireless/nl80211.c static int nl80211_stop_p2p_device(struct sk_buff *skb, struct genl_info *info)
skb              12222 net/wireless/nl80211.c static int nl80211_start_nan(struct sk_buff *skb, struct genl_info *info)
skb              12266 net/wireless/nl80211.c static int nl80211_stop_nan(struct sk_buff *skb, struct genl_info *info)
skb              12330 net/wireless/nl80211.c static int nl80211_nan_add_func(struct sk_buff *skb,
skb              12569 net/wireless/nl80211.c static int nl80211_nan_del_func(struct sk_buff *skb,
skb              12592 net/wireless/nl80211.c static int nl80211_nan_change_config(struct sk_buff *skb,
skb              12774 net/wireless/nl80211.c static int nl80211_get_protocol_features(struct sk_buff *skb,
skb              12801 net/wireless/nl80211.c static int nl80211_update_ft_ies(struct sk_buff *skb, struct genl_info *info)
skb              12822 net/wireless/nl80211.c static int nl80211_crit_protocol_start(struct sk_buff *skb,
skb              12864 net/wireless/nl80211.c static int nl80211_crit_protocol_stop(struct sk_buff *skb,
skb              12903 net/wireless/nl80211.c static int nl80211_vendor_cmd(struct sk_buff *skb, struct genl_info *info)
skb              12978 net/wireless/nl80211.c static int nl80211_prepare_vendor_dump(struct sk_buff *skb,
skb              13031 net/wireless/nl80211.c 	*wdev = __cfg80211_wdev_from_attrs(sock_net(skb->sk), attrbuf);
skb              13035 net/wireless/nl80211.c 	*rdev = __cfg80211_rdev_from_attrs(sock_net(skb->sk), attrbuf);
skb              13093 net/wireless/nl80211.c static int nl80211_vendor_cmd_dump(struct sk_buff *skb,
skb              13106 net/wireless/nl80211.c 	err = nl80211_prepare_vendor_dump(skb, cb, &rdev, &wdev);
skb              13136 net/wireless/nl80211.c 		void *hdr = nl80211hdr_put(skb, NETLINK_CB(cb->skb).portid,
skb              13142 net/wireless/nl80211.c 		if (nla_put_u32(skb, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
skb              13143 net/wireless/nl80211.c 		    (wdev && nla_put_u64_64bit(skb, NL80211_ATTR_WDEV,
skb              13146 net/wireless/nl80211.c 			genlmsg_cancel(skb, hdr);
skb              13150 net/wireless/nl80211.c 		vendor_data = nla_nest_start_noflag(skb,
skb              13153 net/wireless/nl80211.c 			genlmsg_cancel(skb, hdr);
skb              13157 net/wireless/nl80211.c 		err = vcmd->dumpit(&rdev->wiphy, wdev, skb, data, data_len,
skb              13159 net/wireless/nl80211.c 		nla_nest_end(skb, vendor_data);
skb              13162 net/wireless/nl80211.c 			genlmsg_cancel(skb, hdr);
skb              13165 net/wireless/nl80211.c 			genlmsg_cancel(skb, hdr);
skb              13169 net/wireless/nl80211.c 		genlmsg_end(skb, hdr);
skb              13172 net/wireless/nl80211.c 	err = skb->len;
skb              13195 net/wireless/nl80211.c int cfg80211_vendor_cmd_reply(struct sk_buff *skb)
skb              13197 net/wireless/nl80211.c 	struct cfg80211_registered_device *rdev = ((void **)skb->cb)[0];
skb              13198 net/wireless/nl80211.c 	void *hdr = ((void **)skb->cb)[1];
skb              13199 net/wireless/nl80211.c 	struct nlattr *data = ((void **)skb->cb)[2];
skb              13202 net/wireless/nl80211.c 	memset(skb->cb, 0, sizeof(skb->cb));
skb              13205 net/wireless/nl80211.c 		kfree_skb(skb);
skb              13209 net/wireless/nl80211.c 	nla_nest_end(skb, data);
skb              13210 net/wireless/nl80211.c 	genlmsg_end(skb, hdr);
skb              13211 net/wireless/nl80211.c 	return genlmsg_reply(skb, rdev->cur_cmd_info);
skb              13226 net/wireless/nl80211.c static int nl80211_set_qos_map(struct sk_buff *skb,
skb              13277 net/wireless/nl80211.c static int nl80211_add_tx_ts(struct sk_buff *skb, struct genl_info *info)
skb              13335 net/wireless/nl80211.c static int nl80211_del_tx_ts(struct sk_buff *skb, struct genl_info *info)
skb              13357 net/wireless/nl80211.c static int nl80211_tdls_channel_switch(struct sk_buff *skb,
skb              13417 net/wireless/nl80211.c static int nl80211_tdls_cancel_channel_switch(struct sk_buff *skb,
skb              13450 net/wireless/nl80211.c static int nl80211_set_multicast_to_unicast(struct sk_buff *skb,
skb              13472 net/wireless/nl80211.c static int nl80211_set_pmk(struct sk_buff *skb, struct genl_info *info)
skb              13529 net/wireless/nl80211.c static int nl80211_del_pmk(struct sk_buff *skb, struct genl_info *info)
skb              13556 net/wireless/nl80211.c static int nl80211_external_auth(struct sk_buff *skb, struct genl_info *info)
skb              13599 net/wireless/nl80211.c static int nl80211_tx_control_port(struct sk_buff *skb, struct genl_info *info)
skb              13661 net/wireless/nl80211.c static int nl80211_get_ftm_responder_stats(struct sk_buff *skb,
skb              13732 net/wireless/nl80211.c static int nl80211_update_owe_info(struct sk_buff *skb, struct genl_info *info)
skb              13757 net/wireless/nl80211.c static int nl80211_probe_mesh_link(struct sk_buff *skb, struct genl_info *info)
skb              13812 net/wireless/nl80211.c static int nl80211_pre_doit(const struct genl_ops *ops, struct sk_buff *skb,
skb              13874 net/wireless/nl80211.c static void nl80211_post_doit(const struct genl_ops *ops, struct sk_buff *skb,
skb              13897 net/wireless/nl80211.c 		struct nlmsghdr *nlh = nlmsg_hdr(skb);
skb              15905 net/wireless/nl80211.c 				     struct sk_buff *skb,
skb              15910 net/wireless/nl80211.c 	struct ethhdr *ehdr = eth_hdr(skb);
skb              15912 net/wireless/nl80211.c 	u16 proto = be16_to_cpu(skb->protocol);
skb              15922 net/wireless/nl80211.c 	msg = nlmsg_new(100 + skb->len, gfp);
skb              15942 net/wireless/nl80211.c 	frame = nla_reserve(msg, NL80211_ATTR_FRAME, skb->len);
skb              15946 net/wireless/nl80211.c 	skb_copy_bits(skb, 0, nla_data(frame), skb->len);
skb              15957 net/wireless/nl80211.c 			      struct sk_buff *skb, bool unencrypted)
skb              15961 net/wireless/nl80211.c 	trace_cfg80211_rx_control_port(dev, skb, unencrypted);
skb              15962 net/wireless/nl80211.c 	ret = __nl80211_rx_control_port(dev, skb, unencrypted, GFP_ATOMIC);
skb                16 net/wireless/nl80211.h void *nl80211hdr_put(struct sk_buff *skb, u32 portid, u32 seq,
skb               128 net/wireless/nl80211.h int nl80211_pmsr_start(struct sk_buff *skb, struct genl_info *info);
skb               129 net/wireless/nl80211.h int nl80211_pmsr_dump_results(struct sk_buff *skb, struct netlink_callback *cb);
skb               209 net/wireless/pmsr.c int nl80211_pmsr_start(struct sk_buff *skb, struct genl_info *info)
skb               627 net/wireless/rdev-ops.h 				     struct sk_buff *skb,
skb               633 net/wireless/rdev-ops.h 	ret = rdev->ops->testmode_dump(&rdev->wiphy, skb, cb, data, len);
skb              2837 net/wireless/trace.h 	TP_PROTO(struct net_device *netdev, struct sk_buff *skb,
skb              2839 net/wireless/trace.h 	TP_ARGS(netdev, skb, unencrypted),
skb              2849 net/wireless/trace.h 		__entry->len = skb->len;
skb              2850 net/wireless/trace.h 		MAC_ASSIGN(from, eth_hdr(skb)->h_source);
skb              2851 net/wireless/trace.h 		__entry->proto = be16_to_cpu(skb->protocol);
skb               414 net/wireless/util.c unsigned int ieee80211_get_hdrlen_from_skb(const struct sk_buff *skb)
skb               417 net/wireless/util.c 			(const struct ieee80211_hdr *)skb->data;
skb               420 net/wireless/util.c 	if (unlikely(skb->len < 10))
skb               423 net/wireless/util.c 	if (unlikely(hdrlen > skb->len))
skb               450 net/wireless/util.c int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr,
skb               454 net/wireless/util.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
skb               467 net/wireless/util.c 	if (skb->len < hdrlen + 8)
skb               483 net/wireless/util.c 		skb_copy_bits(skb, hdrlen, &mesh_flags, 1);
skb               505 net/wireless/util.c 				skb_copy_bits(skb, hdrlen +
skb               523 net/wireless/util.c 				skb_copy_bits(skb, hdrlen +
skb               537 net/wireless/util.c 	skb_copy_bits(skb, hdrlen, &payload, sizeof(payload));
skb               548 net/wireless/util.c 		tmp.h_proto = htons(skb->len - hdrlen);
skb               550 net/wireless/util.c 	pskb_pull(skb, hdrlen);
skb               553 net/wireless/util.c 		ehdr = skb_push(skb, sizeof(struct ethhdr));
skb               561 net/wireless/util.c __frame_add_frag(struct sk_buff *skb, struct page *page,
skb               564 net/wireless/util.c 	struct skb_shared_info *sh = skb_shinfo(skb);
skb               569 net/wireless/util.c 	skb_add_rx_frag(skb, sh->nr_frags, page, page_offset, len, size);
skb               573 net/wireless/util.c __ieee80211_amsdu_copy_frag(struct sk_buff *skb, struct sk_buff *frame,
skb               576 net/wireless/util.c 	struct skb_shared_info *sh = skb_shinfo(skb);
skb               581 net/wireless/util.c 	int head_size = skb->len - skb->data_len;
skb               584 net/wireless/util.c 	frag_page = virt_to_head_page(skb->head);
skb               585 net/wireless/util.c 	frag_ptr = skb->data;
skb               615 net/wireless/util.c __ieee80211_amsdu_copy(struct sk_buff *skb, unsigned int hlen,
skb               621 net/wireless/util.c 	if (skb->len - offset < len)
skb               641 net/wireless/util.c 	skb_copy_bits(skb, offset, skb_put(frame, cur_len), cur_len);
skb               648 net/wireless/util.c 	__ieee80211_amsdu_copy_frag(skb, frame, offset, len);
skb               653 net/wireless/util.c void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
skb               664 net/wireless/util.c 	bool reuse_frag = skb->head_frag && !skb_has_frag_list(skb);
skb               673 net/wireless/util.c 		skb_copy_bits(skb, offset, &eth, sizeof(eth));
skb               679 net/wireless/util.c 		remaining = skb->len - offset;
skb               695 net/wireless/util.c 		if (!skb_is_nonlinear(skb) && !reuse_frag && last) {
skb               696 net/wireless/util.c 			skb_pull(skb, offset);
skb               697 net/wireless/util.c 			frame = skb;
skb               700 net/wireless/util.c 			frame = __ieee80211_amsdu_copy(skb, hlen, offset, len,
skb               709 net/wireless/util.c 		frame->dev = skb->dev;
skb               710 net/wireless/util.c 		frame->priority = skb->priority;
skb               726 net/wireless/util.c 		dev_kfree_skb(skb);
skb               732 net/wireless/util.c 	dev_kfree_skb(skb);
skb               737 net/wireless/util.c unsigned int cfg80211_classify8021d(struct sk_buff *skb,
skb               749 net/wireless/util.c 	if (skb->priority >= 256 && skb->priority <= 263) {
skb               750 net/wireless/util.c 		ret = skb->priority - 256;
skb               754 net/wireless/util.c 	if (skb_vlan_tag_present(skb)) {
skb               755 net/wireless/util.c 		vlan_priority = (skb_vlan_tag_get(skb) & VLAN_PRIO_MASK)
skb               763 net/wireless/util.c 	switch (skb->protocol) {
skb               765 net/wireless/util.c 		dscp = ipv4_get_dsfield(ip_hdr(skb)) & 0xfc;
skb               768 net/wireless/util.c 		dscp = ipv6_get_dsfield(ipv6_hdr(skb)) & 0xfc;
skb               774 net/wireless/util.c 		mpls = skb_header_pointer(skb, sizeof(struct ethhdr),
skb              1995 net/wireless/util.c 	struct sk_buff *skb;
skb              2000 net/wireless/util.c 	skb = dev_alloc_skb(sizeof(*msg));
skb              2001 net/wireless/util.c 	if (!skb)
skb              2003 net/wireless/util.c 	msg = skb_put(skb, sizeof(*msg));
skb              2019 net/wireless/util.c 	skb->dev = dev;
skb              2020 net/wireless/util.c 	skb->protocol = eth_type_trans(skb, dev);
skb              2021 net/wireless/util.c 	memset(skb->cb, 0, sizeof(skb->cb));
skb              2022 net/wireless/util.c 	netif_rx_ni(skb);
skb               347 net/wireless/wext-core.c 	struct sk_buff *skb;
skb               352 net/wireless/wext-core.c 		while ((skb = skb_dequeue(&net->wext_nlevents)))
skb               353 net/wireless/wext-core.c 			rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL,
skb               419 net/wireless/wext-core.c 					      struct sk_buff *skb)
skb               424 net/wireless/wext-core.c 	nlh = nlmsg_put(skb, 0, 0, RTM_NEWLINK, sizeof(*r), 0);
skb               436 net/wireless/wext-core.c 	if (nla_put_string(skb, IFLA_IFNAME, dev->name))
skb               441 net/wireless/wext-core.c 	nlmsg_cancel(skb, nlh);
skb               464 net/wireless/wext-core.c 	struct sk_buff *skb;
skb               555 net/wireless/wext-core.c 	skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
skb               556 net/wireless/wext-core.c 	if (!skb)
skb               560 net/wireless/wext-core.c 	nlh = rtnetlink_ifinfo_prep(dev, skb);
skb               562 net/wireless/wext-core.c 		kfree_skb(skb);
skb               567 net/wireless/wext-core.c 	nla = nla_reserve(skb, IFLA_WIRELESS, event_len);
skb               569 net/wireless/wext-core.c 		kfree_skb(skb);
skb               582 net/wireless/wext-core.c 	nlmsg_end(skb, nlh);
skb               589 net/wireless/wext-core.c 		kfree_skb(skb);
skb               596 net/wireless/wext-core.c 		kfree_skb(skb);
skb               604 net/wireless/wext-core.c 		kfree_skb(skb);
skb               629 net/wireless/wext-core.c 	skb_shinfo(skb)->frag_list = compskb;
skb               631 net/wireless/wext-core.c 	skb_queue_tail(&dev_net(dev)->wext_nlevents, skb);
skb                83 net/x25/af_x25.c int x25_parse_address_block(struct sk_buff *skb,
skb                91 net/x25/af_x25.c 	if (!pskb_may_pull(skb, 1)) {
skb                97 net/x25/af_x25.c 	len = *skb->data;
skb               100 net/x25/af_x25.c 	if (!pskb_may_pull(skb, needed)) {
skb               107 net/x25/af_x25.c 	return x25_addr_ntoa(skb->data, called_addr, calling_addr);
skb               275 net/x25/af_x25.c 					struct sk_buff *skb)
skb               294 net/x25/af_x25.c 				skb->len >= x25_sk(s)->cudmatchlength) {
skb               296 net/x25/af_x25.c 					skb->data,
skb               386 net/x25/af_x25.c 	struct sk_buff *skb;
skb               394 net/x25/af_x25.c 	while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
skb               395 net/x25/af_x25.c 		if (skb->sk != sk) {		/* A pending connection */
skb               399 net/x25/af_x25.c 			skb->sk->sk_state = TCP_LISTEN;
skb               400 net/x25/af_x25.c 			sock_set_flag(skb->sk, SOCK_DEAD);
skb               401 net/x25/af_x25.c 			x25_start_heartbeat(skb->sk);
skb               402 net/x25/af_x25.c 			x25_sk(skb->sk)->state = X25_STATE_0;
skb               405 net/x25/af_x25.c 		kfree_skb(skb);
skb               870 net/x25/af_x25.c 	struct sk_buff *skb;
skb               888 net/x25/af_x25.c 	skb = skb_dequeue(&sk->sk_receive_queue);
skb               890 net/x25/af_x25.c 	if (!skb->sk)
skb               892 net/x25/af_x25.c 	newsk		 = skb->sk;
skb               896 net/x25/af_x25.c 	skb->sk = NULL;
skb               897 net/x25/af_x25.c 	kfree_skb(skb);
skb               931 net/x25/af_x25.c int x25_rx_call_request(struct sk_buff *skb, struct x25_neigh *nb,
skb               945 net/x25/af_x25.c 	skb_pull(skb, X25_STD_MIN_LEN);
skb               953 net/x25/af_x25.c 	addr_len = x25_parse_address_block(skb, &source_addr, &dest_addr);
skb               956 net/x25/af_x25.c 	skb_pull(skb, addr_len);
skb               965 net/x25/af_x25.c 	if (!pskb_may_pull(skb, 1))
skb               967 net/x25/af_x25.c 	len = skb->data[0] + 1;
skb               968 net/x25/af_x25.c 	if (!pskb_may_pull(skb, len))
skb               970 net/x25/af_x25.c 	skb_pull(skb,len);
skb               975 net/x25/af_x25.c 	if (skb->len > X25_MAX_CUD_LEN)
skb               982 net/x25/af_x25.c 	if (!pskb_may_pull(skb, skb->len))
skb               988 net/x25/af_x25.c 	sk = x25_find_listener(&source_addr,skb);
skb               989 net/x25/af_x25.c 	skb_push(skb,len);
skb              1000 net/x25/af_x25.c 		skb_push(skb, addr_len + X25_STD_MIN_LEN);
skb              1002 net/x25/af_x25.c 				x25_forward_call(&dest_addr, nb, skb, lci) > 0)
skb              1005 net/x25/af_x25.c 			kfree_skb(skb);
skb              1017 net/x25/af_x25.c 	len = x25_negotiate_facilities(skb, sk, &facilities, &dte_facilities);
skb              1038 net/x25/af_x25.c 	skb_pull(skb, len);
skb              1040 net/x25/af_x25.c 	skb->sk     = make;
skb              1066 net/x25/af_x25.c 	skb_copy_from_linear_data(skb, makex25->calluserdata.cuddata, skb->len);
skb              1067 net/x25/af_x25.c 	makex25->calluserdata.cudlength = skb->len;
skb              1073 net/x25/af_x25.c 	skb_queue_head(&sk->sk_receive_queue, skb);
skb              1097 net/x25/af_x25.c 	struct sk_buff *skb;
skb              1167 net/x25/af_x25.c 	skb = sock_alloc_send_skb(sk, size, noblock, &rc);
skb              1169 net/x25/af_x25.c 	if (!skb)
skb              1171 net/x25/af_x25.c 	X25_SKB_CB(skb)->flags = msg->msg_flags;
skb              1173 net/x25/af_x25.c 	skb_reserve(skb, X25_MAX_L2_LEN + X25_EXT_MIN_LEN);
skb              1180 net/x25/af_x25.c 	skb_reset_transport_header(skb);
skb              1181 net/x25/af_x25.c 	skb_put(skb, len);
skb              1183 net/x25/af_x25.c 	rc = memcpy_from_msg(skb_transport_header(skb), msg, len);
skb              1192 net/x25/af_x25.c 		if (!pskb_may_pull(skb, 1))
skb              1195 net/x25/af_x25.c 		qbit = skb->data[0];
skb              1196 net/x25/af_x25.c 		skb_pull(skb, 1);
skb              1206 net/x25/af_x25.c 			asmptr    = skb_push(skb, X25_STD_MIN_LEN);
skb              1211 net/x25/af_x25.c 			asmptr    = skb_push(skb, X25_STD_MIN_LEN);
skb              1219 net/x25/af_x25.c 			asmptr    = skb_push(skb, X25_EXT_MIN_LEN);
skb              1226 net/x25/af_x25.c 			asmptr    = skb_push(skb, X25_STD_MIN_LEN);
skb              1233 net/x25/af_x25.c 			skb->data[0] |= X25_Q_BIT;
skb              1244 net/x25/af_x25.c 		skb_queue_tail(&x25->interrupt_out_queue, skb);
skb              1246 net/x25/af_x25.c 		rc = x25_output(sk, skb);
skb              1249 net/x25/af_x25.c 			kfree_skb(skb);
skb              1260 net/x25/af_x25.c 	kfree_skb(skb);
skb              1273 net/x25/af_x25.c 	struct sk_buff *skb;
skb              1298 net/x25/af_x25.c 		skb = skb_dequeue(&x25->interrupt_in_queue);
skb              1300 net/x25/af_x25.c 		if (!pskb_may_pull(skb, X25_STD_MIN_LEN))
skb              1303 net/x25/af_x25.c 		skb_pull(skb, X25_STD_MIN_LEN);
skb              1309 net/x25/af_x25.c 			asmptr  = skb_push(skb, 1);
skb              1317 net/x25/af_x25.c 		skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
skb              1320 net/x25/af_x25.c 		if (!skb)
skb              1323 net/x25/af_x25.c 		if (!pskb_may_pull(skb, header_len))
skb              1326 net/x25/af_x25.c 		qbit = (skb->data[0] & X25_Q_BIT) == X25_Q_BIT;
skb              1328 net/x25/af_x25.c 		skb_pull(skb, header_len);
skb              1331 net/x25/af_x25.c 			asmptr  = skb_push(skb, 1);
skb              1336 net/x25/af_x25.c 	skb_reset_transport_header(skb);
skb              1337 net/x25/af_x25.c 	copied = skb->len;
skb              1347 net/x25/af_x25.c 	rc = skb_copy_datagram_msg(skb, 0, msg, copied);
skb              1360 net/x25/af_x25.c 	skb_free_datagram(sk, skb);
skb              1386 net/x25/af_x25.c 		struct sk_buff *skb;
skb              1393 net/x25/af_x25.c 		if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL)
skb              1394 net/x25/af_x25.c 			amount = skb->len;
skb                26 net/x25/x25_dev.c static int x25_receive_data(struct sk_buff *skb, struct x25_neigh *nb)
skb                32 net/x25/x25_dev.c 	if (!pskb_may_pull(skb, X25_STD_MIN_LEN))
skb                35 net/x25/x25_dev.c 	frametype = skb->data[2];
skb                36 net/x25/x25_dev.c 	lci = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF);
skb                43 net/x25/x25_dev.c 		x25_link_control(skb, nb, frametype);
skb                53 net/x25/x25_dev.c 		skb_reset_transport_header(skb);
skb                56 net/x25/x25_dev.c 			queued = x25_process_rx_frame(sk, skb);
skb                58 net/x25/x25_dev.c 			queued = !sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf));
skb                69 net/x25/x25_dev.c 		return x25_rx_call_request(skb, nb, lci);
skb                76 net/x25/x25_dev.c 	if (x25_forward_data(lci, nb, skb)) {
skb                80 net/x25/x25_dev.c 		kfree_skb(skb);
skb                94 net/x25/x25_dev.c int x25_lapb_receive_frame(struct sk_buff *skb, struct net_device *dev,
skb               103 net/x25/x25_dev.c 	nskb = skb_copy(skb, GFP_ATOMIC);
skb               106 net/x25/x25_dev.c 	kfree_skb(skb);
skb               107 net/x25/x25_dev.c 	skb = nskb;
skb               118 net/x25/x25_dev.c 	if (!pskb_may_pull(skb, 1)) {
skb               123 net/x25/x25_dev.c 	switch (skb->data[0]) {
skb               126 net/x25/x25_dev.c 		skb_pull(skb, 1);
skb               127 net/x25/x25_dev.c 		if (x25_receive_data(skb, nb)) {
skb               143 net/x25/x25_dev.c 	kfree_skb(skb);
skb               150 net/x25/x25_dev.c 	struct sk_buff *skb;
skb               155 net/x25/x25_dev.c 		if ((skb = alloc_skb(1, GFP_ATOMIC)) == NULL) {
skb               159 net/x25/x25_dev.c 		ptr  = skb_put(skb, 1);
skb               171 net/x25/x25_dev.c 	skb->protocol = htons(ETH_P_X25);
skb               172 net/x25/x25_dev.c 	skb->dev      = nb->dev;
skb               174 net/x25/x25_dev.c 	dev_queue_xmit(skb);
skb               179 net/x25/x25_dev.c 	struct sk_buff *skb;
skb               189 net/x25/x25_dev.c 	skb = alloc_skb(1, GFP_ATOMIC);
skb               190 net/x25/x25_dev.c 	if (!skb) {
skb               195 net/x25/x25_dev.c 	ptr  = skb_put(skb, 1);
skb               198 net/x25/x25_dev.c 	skb->protocol = htons(ETH_P_X25);
skb               199 net/x25/x25_dev.c 	skb->dev      = nb->dev;
skb               200 net/x25/x25_dev.c 	dev_queue_xmit(skb);
skb               203 net/x25/x25_dev.c void x25_send_frame(struct sk_buff *skb, struct x25_neigh *nb)
skb               207 net/x25/x25_dev.c 	skb_reset_network_header(skb);
skb               211 net/x25/x25_dev.c 		dptr  = skb_push(skb, 1);
skb               217 net/x25/x25_dev.c 		kfree_skb(skb);
skb               221 net/x25/x25_dev.c 		kfree_skb(skb);
skb               225 net/x25/x25_dev.c 	skb->protocol = htons(ETH_P_X25);
skb               226 net/x25/x25_dev.c 	skb->dev      = nb->dev;
skb               228 net/x25/x25_dev.c 	dev_queue_xmit(skb);
skb                41 net/x25/x25_facilities.c int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
skb                60 net/x25/x25_facilities.c 	if (!pskb_may_pull(skb, 1))
skb                63 net/x25/x25_facilities.c 	len = skb->data[0];
skb                65 net/x25/x25_facilities.c 	if (!pskb_may_pull(skb, 1 + len))
skb                68 net/x25/x25_facilities.c 	p = skb->data + 1;
skb               182 net/x25/x25_facilities.c 	return p - skb->data;
skb               265 net/x25/x25_facilities.c int x25_negotiate_facilities(struct sk_buff *skb, struct sock *sk,
skb               277 net/x25/x25_facilities.c 	len = x25_parse_facilities(skb, &theirs, dte, &x25->vc_facil_mask);
skb                18 net/x25/x25_forward.c 			struct sk_buff *skb, int lci)
skb                74 net/x25/x25_forward.c 	if ( (skbn = skb_clone(skb, GFP_ATOMIC)) == NULL){
skb                92 net/x25/x25_forward.c int x25_forward_data(int lci, struct x25_neigh *from, struct sk_buff *skb) {
skb               119 net/x25/x25_forward.c 	if ( (skbn = pskb_copy(skb, GFP_ATOMIC)) == NULL){
skb                32 net/x25/x25_in.c static int x25_queue_rx_frame(struct sock *sk, struct sk_buff *skb, int more)
skb                34 net/x25/x25_in.c 	struct sk_buff *skbo, *skbn = skb;
skb                38 net/x25/x25_in.c 		x25->fraglen += skb->len;
skb                39 net/x25/x25_in.c 		skb_queue_tail(&x25->fragment_queue, skb);
skb                40 net/x25/x25_in.c 		skb_set_owner_r(skb, sk);
skb                45 net/x25/x25_in.c 		int len = x25->fraglen + skb->len;
skb                48 net/x25/x25_in.c 			kfree_skb(skb);
skb                52 net/x25/x25_in.c 		skb_queue_tail(&x25->fragment_queue, skb);
skb                87 net/x25/x25_in.c static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametype)
skb               107 net/x25/x25_in.c 		if (!pskb_may_pull(skb, X25_STD_MIN_LEN))
skb               109 net/x25/x25_in.c 		skb_pull(skb, X25_STD_MIN_LEN);
skb               111 net/x25/x25_in.c 		len = x25_parse_address_block(skb, &source_addr,
skb               114 net/x25/x25_in.c 			skb_pull(skb, len);
skb               118 net/x25/x25_in.c 		len = x25_parse_facilities(skb, &x25->facilities,
skb               122 net/x25/x25_in.c 			skb_pull(skb, len);
skb               128 net/x25/x25_in.c 		if (skb->len > 0) {
skb               129 net/x25/x25_in.c 			if (skb->len > X25_MAX_CUD_LEN)
skb               132 net/x25/x25_in.c 			skb_copy_bits(skb, 0, x25->calluserdata.cuddata,
skb               133 net/x25/x25_in.c 				skb->len);
skb               134 net/x25/x25_in.c 			x25->calluserdata.cudlength = skb->len;
skb               150 net/x25/x25_in.c 		if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2))
skb               154 net/x25/x25_in.c 		x25_disconnect(sk, ECONNREFUSED, skb->data[3], skb->data[4]);
skb               175 net/x25/x25_in.c static int x25_state2_machine(struct sock *sk, struct sk_buff *skb, int frametype)
skb               180 net/x25/x25_in.c 			if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2))
skb               184 net/x25/x25_in.c 			x25_disconnect(sk, 0, skb->data[3], skb->data[4]);
skb               208 net/x25/x25_in.c static int x25_state3_machine(struct sock *sk, struct sk_buff *skb, int frametype, int ns, int nr, int q, int d, int m)
skb               230 net/x25/x25_in.c 			if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2))
skb               234 net/x25/x25_in.c 			x25_disconnect(sk, 0, skb->data[3], skb->data[4]);
skb               275 net/x25/x25_in.c 				if (x25_queue_rx_frame(sk, skb, m) == 0) {
skb               315 net/x25/x25_in.c 				queued = !sock_queue_rcv_skb(sk, skb);
skb               317 net/x25/x25_in.c 				skb_set_owner_r(skb, sk);
skb               318 net/x25/x25_in.c 				skb_queue_tail(&x25->interrupt_in_queue, skb);
skb               344 net/x25/x25_in.c static int x25_state4_machine(struct sock *sk, struct sk_buff *skb, int frametype)
skb               365 net/x25/x25_in.c 			if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2))
skb               369 net/x25/x25_in.c 			x25_disconnect(sk, 0, skb->data[3], skb->data[4]);
skb               386 net/x25/x25_in.c int x25_process_rx_frame(struct sock *sk, struct sk_buff *skb)
skb               394 net/x25/x25_in.c 	frametype = x25_decode(sk, skb, &ns, &nr, &q, &d, &m);
skb               398 net/x25/x25_in.c 		queued = x25_state1_machine(sk, skb, frametype);
skb               401 net/x25/x25_in.c 		queued = x25_state2_machine(sk, skb, frametype);
skb               404 net/x25/x25_in.c 		queued = x25_state3_machine(sk, skb, frametype, ns, nr, q, d, m);
skb               407 net/x25/x25_in.c 		queued = x25_state4_machine(sk, skb, frametype);
skb               416 net/x25/x25_in.c int x25_backlog_rcv(struct sock *sk, struct sk_buff *skb)
skb               418 net/x25/x25_in.c 	int queued = x25_process_rx_frame(sk, skb);
skb               421 net/x25/x25_in.c 		kfree_skb(skb);
skb                69 net/x25/x25_link.c void x25_link_control(struct sk_buff *skb, struct x25_neigh *nb,
skb                90 net/x25/x25_link.c 		if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 4))
skb                94 net/x25/x25_link.c 		       skb->data[3], skb->data[4],
skb                95 net/x25/x25_link.c 		       skb->data[5], skb->data[6]);
skb               116 net/x25/x25_link.c 	struct sk_buff *skb = alloc_skb(len, GFP_ATOMIC);
skb               118 net/x25/x25_link.c 	if (!skb)
skb               121 net/x25/x25_link.c 	skb_reserve(skb, X25_MAX_L2_LEN);
skb               123 net/x25/x25_link.c 	dptr = skb_put(skb, X25_STD_MIN_LEN + 2);
skb               131 net/x25/x25_link.c 	skb->sk = NULL;
skb               133 net/x25/x25_link.c 	x25_send_frame(skb, nb);
skb               143 net/x25/x25_link.c 	struct sk_buff *skb = alloc_skb(len, GFP_ATOMIC);
skb               145 net/x25/x25_link.c 	if (!skb)
skb               148 net/x25/x25_link.c 	skb_reserve(skb, X25_MAX_L2_LEN);
skb               150 net/x25/x25_link.c 	dptr = skb_put(skb, X25_STD_MIN_LEN);
skb               156 net/x25/x25_link.c 	skb->sk = NULL;
skb               158 net/x25/x25_link.c 	x25_send_frame(skb, nb);
skb               170 net/x25/x25_link.c 	struct sk_buff *skb = alloc_skb(len, GFP_ATOMIC);
skb               172 net/x25/x25_link.c 	if (!skb)
skb               175 net/x25/x25_link.c 	skb_reserve(skb, X25_MAX_L2_LEN);
skb               177 net/x25/x25_link.c 	dptr = skb_put(skb, X25_STD_MIN_LEN + 2);
skb               187 net/x25/x25_link.c 	skb->sk = NULL;
skb               189 net/x25/x25_link.c 	x25_send_frame(skb, nb);
skb               192 net/x25/x25_link.c void x25_transmit_link(struct sk_buff *skb, struct x25_neigh *nb)
skb               196 net/x25/x25_link.c 		skb_queue_tail(&nb->queue, skb);
skb               202 net/x25/x25_link.c 		skb_queue_tail(&nb->queue, skb);
skb               205 net/x25/x25_link.c 		x25_send_frame(skb, nb);
skb                47 net/x25/x25_out.c int x25_output(struct sock *sk, struct sk_buff *skb)
skb                52 net/x25/x25_out.c 	int sent=0, noblock = X25_SKB_CB(skb)->flags & MSG_DONTWAIT;
skb                58 net/x25/x25_out.c 	if (skb->len - header_len > max_len) {
skb                60 net/x25/x25_out.c 		skb_copy_from_linear_data(skb, header, header_len);
skb                61 net/x25/x25_out.c 		skb_pull(skb, header_len);
skb                63 net/x25/x25_out.c 		frontlen = skb_headroom(skb);
skb                65 net/x25/x25_out.c 		while (skb->len > 0) {
skb                72 net/x25/x25_out.c 					kfree_skb(skb);
skb                83 net/x25/x25_out.c 			len = max_len > skb->len ? skb->len : max_len;
skb                86 net/x25/x25_out.c 			skb_copy_from_linear_data(skb, skb_put(skbn, len), len);
skb                87 net/x25/x25_out.c 			skb_pull(skb, len);
skb                93 net/x25/x25_out.c 			if (skb->len > 0) {
skb               104 net/x25/x25_out.c 		kfree_skb(skb);
skb               106 net/x25/x25_out.c 		skb_queue_tail(&sk->sk_write_queue, skb);
skb               107 net/x25/x25_out.c 		sent = skb->len - header_len;
skb               116 net/x25/x25_out.c static void x25_send_iframe(struct sock *sk, struct sk_buff *skb)
skb               120 net/x25/x25_out.c 	if (!skb)
skb               124 net/x25/x25_out.c 		skb->data[2]  = (x25->vs << 1) & 0xFE;
skb               125 net/x25/x25_out.c 		skb->data[3] &= X25_EXT_M_BIT;
skb               126 net/x25/x25_out.c 		skb->data[3] |= (x25->vr << 1) & 0xFE;
skb               128 net/x25/x25_out.c 		skb->data[2] &= X25_STD_M_BIT;
skb               129 net/x25/x25_out.c 		skb->data[2] |= (x25->vs << 1) & 0x0E;
skb               130 net/x25/x25_out.c 		skb->data[2] |= (x25->vr << 5) & 0xE0;
skb               133 net/x25/x25_out.c 	x25_transmit_link(skb, x25->neighbour);
skb               138 net/x25/x25_out.c 	struct sk_buff *skb, *skbn;
skb               152 net/x25/x25_out.c 		skb = skb_dequeue(&x25->interrupt_out_queue);
skb               153 net/x25/x25_out.c 		x25_transmit_link(skb, x25->neighbour);
skb               177 net/x25/x25_out.c 	skb = skb_dequeue(&sk->sk_write_queue);
skb               180 net/x25/x25_out.c 		if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) {
skb               181 net/x25/x25_out.c 			skb_queue_head(&sk->sk_write_queue, skb);
skb               197 net/x25/x25_out.c 		skb_queue_tail(&x25->ack_queue, skb);
skb               200 net/x25/x25_out.c 		 (skb = skb_dequeue(&sk->sk_write_queue)) != NULL);
skb                53 net/x25/x25_subr.c 	struct sk_buff *skb;
skb                62 net/x25/x25_subr.c 			skb = skb_dequeue(&x25->ack_queue);
skb                63 net/x25/x25_subr.c 			kfree_skb(skb);
skb                70 net/x25/x25_subr.c 	struct sk_buff *skb, *skb_prev = NULL;
skb                77 net/x25/x25_subr.c 	while ((skb = skb_dequeue(&x25_sk(sk)->ack_queue)) != NULL) {
skb                79 net/x25/x25_subr.c 			skb_queue_head(&sk->sk_write_queue, skb);
skb                81 net/x25/x25_subr.c 			skb_append(skb_prev, skb, &sk->sk_write_queue);
skb                82 net/x25/x25_subr.c 		skb_prev = skb;
skb               112 net/x25/x25_subr.c 	struct sk_buff *skb;
skb               152 net/x25/x25_subr.c 	if ((skb = alloc_skb(len, GFP_ATOMIC)) == NULL)
skb               158 net/x25/x25_subr.c 	skb_reserve(skb, X25_MAX_L2_LEN);
skb               163 net/x25/x25_subr.c 	dptr = skb_put(skb, 2);
skb               182 net/x25/x25_subr.c 			dptr    = skb_put(skb, 1);
skb               186 net/x25/x25_subr.c 			skb_put_data(skb, addresses, len);
skb               191 net/x25/x25_subr.c 			skb_put_data(skb, facilities, len);
skb               192 net/x25/x25_subr.c 			skb_put_data(skb, x25->calluserdata.cuddata,
skb               198 net/x25/x25_subr.c 			dptr    = skb_put(skb, 2);
skb               205 net/x25/x25_subr.c 			skb_put_data(skb, facilities, len);
skb               211 net/x25/x25_subr.c 				skb_put_data(skb,
skb               219 net/x25/x25_subr.c 			dptr    = skb_put(skb, 3);
skb               226 net/x25/x25_subr.c 			dptr    = skb_put(skb, 3);
skb               236 net/x25/x25_subr.c 				dptr     = skb_put(skb, 2);
skb               240 net/x25/x25_subr.c 				dptr     = skb_put(skb, 1);
skb               249 net/x25/x25_subr.c 			dptr  = skb_put(skb, 1);
skb               254 net/x25/x25_subr.c 	x25_transmit_link(skb, x25->neighbour);
skb               260 net/x25/x25_subr.c int x25_decode(struct sock *sk, struct sk_buff *skb, int *ns, int *nr, int *q,
skb               266 net/x25/x25_subr.c 	if (!pskb_may_pull(skb, X25_STD_MIN_LEN))
skb               268 net/x25/x25_subr.c 	frame = skb->data;
skb               293 net/x25/x25_subr.c 			if (!pskb_may_pull(skb, X25_EXT_MIN_LEN))
skb               295 net/x25/x25_subr.c 			frame = skb->data;
skb               311 net/x25/x25_subr.c 			if (!pskb_may_pull(skb, X25_EXT_MIN_LEN))
skb               313 net/x25/x25_subr.c 			frame = skb->data;
skb               326 net/xdp/xsk.c  static void xsk_destruct_skb(struct sk_buff *skb)
skb               328 net/xdp/xsk.c  	u64 addr = (u64)(long)skb_shinfo(skb)->destructor_arg;
skb               329 net/xdp/xsk.c  	struct xdp_sock *xs = xdp_sk(skb->sk);
skb               336 net/xdp/xsk.c  	sock_wfree(skb);
skb               345 net/xdp/xsk.c  	struct sk_buff *skb;
skb               364 net/xdp/xsk.c  		skb = sock_alloc_send_skb(sk, len, 1, &err);
skb               365 net/xdp/xsk.c  		if (unlikely(!skb)) {
skb               370 net/xdp/xsk.c  		skb_put(skb, len);
skb               373 net/xdp/xsk.c  		err = skb_store_bits(skb, 0, buffer, len);
skb               375 net/xdp/xsk.c  			kfree_skb(skb);
skb               379 net/xdp/xsk.c  		skb->dev = xs->dev;
skb               380 net/xdp/xsk.c  		skb->priority = sk->sk_priority;
skb               381 net/xdp/xsk.c  		skb->mark = sk->sk_mark;
skb               382 net/xdp/xsk.c  		skb_shinfo(skb)->destructor_arg = (void *)(long)desc.addr;
skb               383 net/xdp/xsk.c  		skb->destructor = xsk_destruct_skb;
skb               385 net/xdp/xsk.c  		err = dev_direct_xmit(skb, xs->queue_id);
skb               147 net/xdp/xsk_diag.c 				  sk_user_ns(NETLINK_CB(cb->skb).sk),
skb               148 net/xdp/xsk_diag.c 				  NETLINK_CB(cb->skb).portid,
skb                22 net/xfrm/xfrm_device.c static void __xfrm_transport_prep(struct xfrm_state *x, struct sk_buff *skb,
skb                25 net/xfrm/xfrm_device.c 	struct xfrm_offload *xo = xfrm_offload(skb);
skb                27 net/xfrm/xfrm_device.c 	skb_reset_mac_len(skb);
skb                29 net/xfrm/xfrm_device.c 		skb->transport_header -= x->props.header_len;
skb                31 net/xfrm/xfrm_device.c 	pskb_pull(skb, skb_transport_offset(skb) + x->props.header_len);
skb                34 net/xfrm/xfrm_device.c static void __xfrm_mode_tunnel_prep(struct xfrm_state *x, struct sk_buff *skb,
skb                38 net/xfrm/xfrm_device.c 	struct xfrm_offload *xo = xfrm_offload(skb);
skb                41 net/xfrm/xfrm_device.c 		skb->transport_header = skb->network_header + hsize;
skb                43 net/xfrm/xfrm_device.c 	skb_reset_mac_len(skb);
skb                44 net/xfrm/xfrm_device.c 	pskb_pull(skb, skb->mac_len + x->props.header_len);
skb                48 net/xfrm/xfrm_device.c static void xfrm_outer_mode_prep(struct xfrm_state *x, struct sk_buff *skb)
skb                53 net/xfrm/xfrm_device.c 			return __xfrm_mode_tunnel_prep(x, skb,
skb                56 net/xfrm/xfrm_device.c 			return __xfrm_mode_tunnel_prep(x, skb,
skb                61 net/xfrm/xfrm_device.c 			return __xfrm_transport_prep(x, skb,
skb                64 net/xfrm/xfrm_device.c 			return __xfrm_transport_prep(x, skb,
skb                74 net/xfrm/xfrm_device.c struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again)
skb                82 net/xfrm/xfrm_device.c 	struct xfrm_offload *xo = xfrm_offload(skb);
skb                86 net/xfrm/xfrm_device.c 		return skb;
skb                91 net/xfrm/xfrm_device.c 	sp = skb_sec_path(skb);
skb                94 net/xfrm/xfrm_device.c 		return skb;
skb               103 net/xfrm/xfrm_device.c 		return skb;
skb               106 net/xfrm/xfrm_device.c 	if (skb_is_gso(skb)) {
skb               107 net/xfrm/xfrm_device.c 		struct net_device *dev = skb->dev;
skb               116 net/xfrm/xfrm_device.c 			segs = skb_gso_segment(skb, esp_features);
skb               118 net/xfrm/xfrm_device.c 				kfree_skb(skb);
skb               122 net/xfrm/xfrm_device.c 				consume_skb(skb);
skb               123 net/xfrm/xfrm_device.c 				skb = segs;
skb               128 net/xfrm/xfrm_device.c 	if (!skb->next) {
skb               129 net/xfrm/xfrm_device.c 		esp_features |= skb->dev->gso_partial_features;
skb               130 net/xfrm/xfrm_device.c 		xfrm_outer_mode_prep(x, skb);
skb               134 net/xfrm/xfrm_device.c 		err = x->type_offload->xmit(x, skb, esp_features);
skb               140 net/xfrm/xfrm_device.c 			kfree_skb(skb);
skb               144 net/xfrm/xfrm_device.c 		skb_push(skb, skb->data - skb_mac_header(skb));
skb               146 net/xfrm/xfrm_device.c 		return skb;
skb               149 net/xfrm/xfrm_device.c 	skb2 = skb;
skb               154 net/xfrm/xfrm_device.c 		esp_features |= skb->dev->gso_partial_features;
skb               171 net/xfrm/xfrm_device.c 			if (skb == skb2)
skb               172 net/xfrm/xfrm_device.c 				skb = nskb;
skb               174 net/xfrm/xfrm_device.c 			if (!skb)
skb               186 net/xfrm/xfrm_device.c 	return skb;
skb               261 net/xfrm/xfrm_device.c bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
skb               264 net/xfrm/xfrm_device.c 	struct dst_entry *dst = skb_dst(skb);
skb               274 net/xfrm/xfrm_device.c 		if (skb->len <= mtu)
skb               277 net/xfrm/xfrm_device.c 		if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu))
skb               285 net/xfrm/xfrm_device.c 		return x->xso.dev->xfrmdev_ops->xdo_dev_offload_ok(skb, x);
skb               291 net/xfrm/xfrm_device.c void xfrm_dev_resume(struct sk_buff *skb)
skb               293 net/xfrm/xfrm_device.c 	struct net_device *dev = skb->dev;
skb               300 net/xfrm/xfrm_device.c 	txq = netdev_core_pick_tx(dev, skb, NULL);
skb               304 net/xfrm/xfrm_device.c 		skb = dev_hard_start_xmit(skb, dev, txq, &ret);
skb               310 net/xfrm/xfrm_device.c 		skb_queue_tail(&sd->xfrm_backlog, skb);
skb               322 net/xfrm/xfrm_device.c 	struct sk_buff *skb;
skb               334 net/xfrm/xfrm_device.c 		skb = __skb_dequeue(&list);
skb               335 net/xfrm/xfrm_device.c 		xfrm_dev_resume(skb);
skb                 9 net/xfrm/xfrm_inout.h static inline void xfrm6_beet_make_header(struct sk_buff *skb)
skb                11 net/xfrm/xfrm_inout.h 	struct ipv6hdr *iph = ipv6_hdr(skb);
skb                15 net/xfrm/xfrm_inout.h 	memcpy(iph->flow_lbl, XFRM_MODE_SKB_CB(skb)->flow_lbl,
skb                17 net/xfrm/xfrm_inout.h 	iph->nexthdr = XFRM_MODE_SKB_CB(skb)->protocol;
skb                19 net/xfrm/xfrm_inout.h 	ipv6_change_dsfield(iph, 0, XFRM_MODE_SKB_CB(skb)->tos);
skb                20 net/xfrm/xfrm_inout.h 	iph->hop_limit = XFRM_MODE_SKB_CB(skb)->ttl;
skb                23 net/xfrm/xfrm_inout.h static inline void xfrm4_beet_make_header(struct sk_buff *skb)
skb                25 net/xfrm/xfrm_inout.h 	struct iphdr *iph = ip_hdr(skb);
skb                30 net/xfrm/xfrm_inout.h 	iph->protocol = XFRM_MODE_SKB_CB(skb)->protocol;
skb                31 net/xfrm/xfrm_inout.h 	iph->tos = XFRM_MODE_SKB_CB(skb)->tos;
skb                33 net/xfrm/xfrm_inout.h 	iph->id = XFRM_MODE_SKB_CB(skb)->id;
skb                34 net/xfrm/xfrm_inout.h 	iph->frag_off = XFRM_MODE_SKB_CB(skb)->frag_off;
skb                35 net/xfrm/xfrm_inout.h 	iph->ttl = XFRM_MODE_SKB_CB(skb)->ttl;
skb                38 net/xfrm/xfrm_input.c 	int (*finish)(struct net *net, struct sock *sk, struct sk_buff *skb);
skb                99 net/xfrm/xfrm_input.c static int xfrm_rcv_cb(struct sk_buff *skb, unsigned int family, u8 protocol,
skb               108 net/xfrm/xfrm_input.c 	ret = afinfo->callback(skb, protocol, err);
skb               114 net/xfrm/xfrm_input.c struct sec_path *secpath_set(struct sk_buff *skb)
skb               116 net/xfrm/xfrm_input.c 	struct sec_path *sp, *tmp = skb_ext_find(skb, SKB_EXT_SEC_PATH);
skb               118 net/xfrm/xfrm_input.c 	sp = skb_ext_add(skb, SKB_EXT_SEC_PATH);
skb               136 net/xfrm/xfrm_input.c int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq)
skb               153 net/xfrm/xfrm_input.c 		if (!pskb_may_pull(skb, sizeof(struct ip_comp_hdr)))
skb               155 net/xfrm/xfrm_input.c 		*spi = htonl(ntohs(*(__be16 *)(skb_transport_header(skb) + 2)));
skb               162 net/xfrm/xfrm_input.c 	if (!pskb_may_pull(skb, hlen))
skb               165 net/xfrm/xfrm_input.c 	*spi = *(__be32 *)(skb_transport_header(skb) + offset);
skb               166 net/xfrm/xfrm_input.c 	*seq = *(__be32 *)(skb_transport_header(skb) + offset_seq);
skb               171 net/xfrm/xfrm_input.c static int xfrm4_remove_beet_encap(struct xfrm_state *x, struct sk_buff *skb)
skb               177 net/xfrm/xfrm_input.c 	if (unlikely(XFRM_MODE_SKB_CB(skb)->protocol == IPPROTO_BEETPH)) {
skb               181 net/xfrm/xfrm_input.c 		if (!pskb_may_pull(skb, sizeof(*ph)))
skb               184 net/xfrm/xfrm_input.c 		ph = (struct ip_beet_phdr *)skb->data;
skb               191 net/xfrm/xfrm_input.c 		XFRM_MODE_SKB_CB(skb)->protocol = ph->nexthdr;
skb               193 net/xfrm/xfrm_input.c 		if (!pskb_may_pull(skb, phlen))
skb               195 net/xfrm/xfrm_input.c 		__skb_pull(skb, phlen);
skb               198 net/xfrm/xfrm_input.c 	skb_push(skb, sizeof(*iph));
skb               199 net/xfrm/xfrm_input.c 	skb_reset_network_header(skb);
skb               200 net/xfrm/xfrm_input.c 	skb_mac_header_rebuild(skb);
skb               202 net/xfrm/xfrm_input.c 	xfrm4_beet_make_header(skb);
skb               204 net/xfrm/xfrm_input.c 	iph = ip_hdr(skb);
skb               207 net/xfrm/xfrm_input.c 	iph->tot_len = htons(skb->len);
skb               211 net/xfrm/xfrm_input.c 	iph->check = ip_fast_csum(skb_network_header(skb), iph->ihl);
skb               217 net/xfrm/xfrm_input.c static void ipip_ecn_decapsulate(struct sk_buff *skb)
skb               219 net/xfrm/xfrm_input.c 	struct iphdr *inner_iph = ipip_hdr(skb);
skb               221 net/xfrm/xfrm_input.c 	if (INET_ECN_is_ce(XFRM_MODE_SKB_CB(skb)->tos))
skb               225 net/xfrm/xfrm_input.c static int xfrm4_remove_tunnel_encap(struct xfrm_state *x, struct sk_buff *skb)
skb               229 net/xfrm/xfrm_input.c 	if (XFRM_MODE_SKB_CB(skb)->protocol != IPPROTO_IPIP)
skb               232 net/xfrm/xfrm_input.c 	if (!pskb_may_pull(skb, sizeof(struct iphdr)))
skb               235 net/xfrm/xfrm_input.c 	err = skb_unclone(skb, GFP_ATOMIC);
skb               240 net/xfrm/xfrm_input.c 		ipv4_copy_dscp(XFRM_MODE_SKB_CB(skb)->tos, ipip_hdr(skb));
skb               242 net/xfrm/xfrm_input.c 		ipip_ecn_decapsulate(skb);
skb               244 net/xfrm/xfrm_input.c 	skb_reset_network_header(skb);
skb               245 net/xfrm/xfrm_input.c 	skb_mac_header_rebuild(skb);
skb               246 net/xfrm/xfrm_input.c 	if (skb->mac_len)
skb               247 net/xfrm/xfrm_input.c 		eth_hdr(skb)->h_proto = skb->protocol;
skb               255 net/xfrm/xfrm_input.c static void ipip6_ecn_decapsulate(struct sk_buff *skb)
skb               257 net/xfrm/xfrm_input.c 	struct ipv6hdr *inner_iph = ipipv6_hdr(skb);
skb               259 net/xfrm/xfrm_input.c 	if (INET_ECN_is_ce(XFRM_MODE_SKB_CB(skb)->tos))
skb               260 net/xfrm/xfrm_input.c 		IP6_ECN_set_ce(skb, inner_iph);
skb               263 net/xfrm/xfrm_input.c static int xfrm6_remove_tunnel_encap(struct xfrm_state *x, struct sk_buff *skb)
skb               267 net/xfrm/xfrm_input.c 	if (XFRM_MODE_SKB_CB(skb)->protocol != IPPROTO_IPV6)
skb               269 net/xfrm/xfrm_input.c 	if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
skb               272 net/xfrm/xfrm_input.c 	err = skb_unclone(skb, GFP_ATOMIC);
skb               277 net/xfrm/xfrm_input.c 		ipv6_copy_dscp(ipv6_get_dsfield(ipv6_hdr(skb)),
skb               278 net/xfrm/xfrm_input.c 			       ipipv6_hdr(skb));
skb               280 net/xfrm/xfrm_input.c 		ipip6_ecn_decapsulate(skb);
skb               282 net/xfrm/xfrm_input.c 	skb_reset_network_header(skb);
skb               283 net/xfrm/xfrm_input.c 	skb_mac_header_rebuild(skb);
skb               284 net/xfrm/xfrm_input.c 	if (skb->mac_len)
skb               285 net/xfrm/xfrm_input.c 		eth_hdr(skb)->h_proto = skb->protocol;
skb               293 net/xfrm/xfrm_input.c static int xfrm6_remove_beet_encap(struct xfrm_state *x, struct sk_buff *skb)
skb               299 net/xfrm/xfrm_input.c 	err = skb_cow_head(skb, size + skb->mac_len);
skb               303 net/xfrm/xfrm_input.c 	__skb_push(skb, size);
skb               304 net/xfrm/xfrm_input.c 	skb_reset_network_header(skb);
skb               305 net/xfrm/xfrm_input.c 	skb_mac_header_rebuild(skb);
skb               307 net/xfrm/xfrm_input.c 	xfrm6_beet_make_header(skb);
skb               309 net/xfrm/xfrm_input.c 	ip6h = ipv6_hdr(skb);
skb               310 net/xfrm/xfrm_input.c 	ip6h->payload_len = htons(skb->len - size);
skb               331 net/xfrm/xfrm_input.c 			     struct sk_buff *skb)
skb               336 net/xfrm/xfrm_input.c 			return xfrm4_remove_beet_encap(x, skb);
skb               338 net/xfrm/xfrm_input.c 			return xfrm6_remove_beet_encap(x, skb);
skb               342 net/xfrm/xfrm_input.c 			return xfrm4_remove_tunnel_encap(x, skb);
skb               344 net/xfrm/xfrm_input.c 			return xfrm6_remove_tunnel_encap(x, skb);
skb               352 net/xfrm/xfrm_input.c static int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb)
skb               361 net/xfrm/xfrm_input.c 		err = afinfo->extract_input(x, skb);
skb               368 net/xfrm/xfrm_input.c 		inner_mode = xfrm_ip2inner_mode(x, XFRM_MODE_SKB_CB(skb)->protocol);
skb               375 net/xfrm/xfrm_input.c 		skb->protocol = htons(ETH_P_IP);
skb               378 net/xfrm/xfrm_input.c 		skb->protocol = htons(ETH_P_IPV6);
skb               385 net/xfrm/xfrm_input.c 	return xfrm_inner_mode_encap_remove(x, inner_mode, skb);
skb               396 net/xfrm/xfrm_input.c static int xfrm4_transport_input(struct xfrm_state *x, struct sk_buff *skb)
skb               398 net/xfrm/xfrm_input.c 	int ihl = skb->data - skb_transport_header(skb);
skb               400 net/xfrm/xfrm_input.c 	if (skb->transport_header != skb->network_header) {
skb               401 net/xfrm/xfrm_input.c 		memmove(skb_transport_header(skb),
skb               402 net/xfrm/xfrm_input.c 			skb_network_header(skb), ihl);
skb               403 net/xfrm/xfrm_input.c 		skb->network_header = skb->transport_header;
skb               405 net/xfrm/xfrm_input.c 	ip_hdr(skb)->tot_len = htons(skb->len + ihl);
skb               406 net/xfrm/xfrm_input.c 	skb_reset_transport_header(skb);
skb               410 net/xfrm/xfrm_input.c static int xfrm6_transport_input(struct xfrm_state *x, struct sk_buff *skb)
skb               413 net/xfrm/xfrm_input.c 	int ihl = skb->data - skb_transport_header(skb);
skb               415 net/xfrm/xfrm_input.c 	if (skb->transport_header != skb->network_header) {
skb               416 net/xfrm/xfrm_input.c 		memmove(skb_transport_header(skb),
skb               417 net/xfrm/xfrm_input.c 			skb_network_header(skb), ihl);
skb               418 net/xfrm/xfrm_input.c 		skb->network_header = skb->transport_header;
skb               420 net/xfrm/xfrm_input.c 	ipv6_hdr(skb)->payload_len = htons(skb->len + ihl -
skb               422 net/xfrm/xfrm_input.c 	skb_reset_transport_header(skb);
skb               432 net/xfrm/xfrm_input.c 				 struct sk_buff *skb)
skb               437 net/xfrm/xfrm_input.c 		return xfrm_prepare_input(x, skb);
skb               440 net/xfrm/xfrm_input.c 			return xfrm4_transport_input(x, skb);
skb               442 net/xfrm/xfrm_input.c 			return xfrm6_transport_input(x, skb);
skb               455 net/xfrm/xfrm_input.c int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
skb               458 net/xfrm/xfrm_input.c 	struct net *net = dev_net(skb->dev);
skb               465 net/xfrm/xfrm_input.c 	u32 mark = skb->mark;
skb               471 net/xfrm/xfrm_input.c 	struct xfrm_offload *xo = xfrm_offload(skb);
skb               475 net/xfrm/xfrm_input.c 		x = xfrm_input_state(skb);
skb               485 net/xfrm/xfrm_input.c 				dev_put(skb->dev);
skb               494 net/xfrm/xfrm_input.c 			seq = XFRM_SKB_CB(skb)->seq.input.low;
skb               500 net/xfrm/xfrm_input.c 		seq = XFRM_SPI_SKB_CB(skb)->seq;
skb               504 net/xfrm/xfrm_input.c 			family = XFRM_SPI_SKB_CB(skb)->family;
skb               513 net/xfrm/xfrm_input.c 					xfrm_audit_state_icvfail(x, skb,
skb               529 net/xfrm/xfrm_input.c 			if ((err = xfrm_parse_spi(skb, nexthdr, &spi, &seq)) != 0) {
skb               538 net/xfrm/xfrm_input.c 	family = XFRM_SPI_SKB_CB(skb)->family;
skb               543 net/xfrm/xfrm_input.c 		if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4)
skb               544 net/xfrm/xfrm_input.c 			mark = be32_to_cpu(XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4->parms.i_key);
skb               547 net/xfrm/xfrm_input.c 		if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6)
skb               548 net/xfrm/xfrm_input.c 			mark = be32_to_cpu(XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6->parms.i_key);
skb               552 net/xfrm/xfrm_input.c 	sp = secpath_set(skb);
skb               559 net/xfrm/xfrm_input.c 	if (!spi && (err = xfrm_parse_spi(skb, nexthdr, &spi, &seq)) != 0) {
skb               560 net/xfrm/xfrm_input.c 		secpath_reset(skb);
skb               565 net/xfrm/xfrm_input.c 	daddr = (xfrm_address_t *)(skb_network_header(skb) +
skb               566 net/xfrm/xfrm_input.c 				   XFRM_SPI_SKB_CB(skb)->daddroff);
skb               568 net/xfrm/xfrm_input.c 		sp = skb_sec_path(skb);
skb               571 net/xfrm/xfrm_input.c 			secpath_reset(skb);
skb               578 net/xfrm/xfrm_input.c 			secpath_reset(skb);
skb               580 net/xfrm/xfrm_input.c 			xfrm_audit_state_notfound(skb, family, spi, seq);
skb               584 net/xfrm/xfrm_input.c 		skb->mark = xfrm_smark_get(skb->mark, x);
skb               588 net/xfrm/xfrm_input.c 		skb_dst_force(skb);
skb               589 net/xfrm/xfrm_input.c 		if (!skb_dst(skb)) {
skb               611 net/xfrm/xfrm_input.c 		if (x->repl->check(x, skb, seq)) {
skb               623 net/xfrm/xfrm_input.c 		if (xfrm_tunnel_check(skb, x, family)) {
skb               630 net/xfrm/xfrm_input.c 		XFRM_SKB_CB(skb)->seq.input.low = seq;
skb               631 net/xfrm/xfrm_input.c 		XFRM_SKB_CB(skb)->seq.input.hi = seq_hi;
skb               633 net/xfrm/xfrm_input.c 		dev_hold(skb->dev);
skb               636 net/xfrm/xfrm_input.c 			nexthdr = x->type_offload->input_tail(x, skb);
skb               638 net/xfrm/xfrm_input.c 			nexthdr = x->type->input(x, skb);
skb               643 net/xfrm/xfrm_input.c 		dev_put(skb->dev);
skb               648 net/xfrm/xfrm_input.c 				xfrm_audit_state_icvfail(x, skb,
skb               659 net/xfrm/xfrm_input.c 		if (async && x->repl->recheck(x, skb, seq)) {
skb               666 net/xfrm/xfrm_input.c 		x->curlft.bytes += skb->len;
skb               671 net/xfrm/xfrm_input.c 		XFRM_MODE_SKB_CB(skb)->protocol = nexthdr;
skb               676 net/xfrm/xfrm_input.c 			inner_mode = xfrm_ip2inner_mode(x, XFRM_MODE_SKB_CB(skb)->protocol);
skb               683 net/xfrm/xfrm_input.c 		if (xfrm_inner_mode_input(x, inner_mode, skb)) {
skb               700 net/xfrm/xfrm_input.c 		err = xfrm_parse_spi(skb, nexthdr, &spi, &seq);
skb               708 net/xfrm/xfrm_input.c 	err = xfrm_rcv_cb(skb, family, x->type->proto, 0);
skb               712 net/xfrm/xfrm_input.c 	nf_reset_ct(skb);
skb               715 net/xfrm/xfrm_input.c 		sp = skb_sec_path(skb);
skb               718 net/xfrm/xfrm_input.c 		skb_dst_drop(skb);
skb               719 net/xfrm/xfrm_input.c 		gro_cells_receive(&gro_cells, skb);
skb               722 net/xfrm/xfrm_input.c 		xo = xfrm_offload(skb);
skb               730 net/xfrm/xfrm_input.c 			err = afinfo->transport_finish(skb, xfrm_gro || async);
skb               733 net/xfrm/xfrm_input.c 			sp = skb_sec_path(skb);
skb               736 net/xfrm/xfrm_input.c 			skb_dst_drop(skb);
skb               737 net/xfrm/xfrm_input.c 			gro_cells_receive(&gro_cells, skb);
skb               747 net/xfrm/xfrm_input.c 	xfrm_rcv_cb(skb, family, x && x->type ? x->type->proto : nexthdr, -1);
skb               748 net/xfrm/xfrm_input.c 	kfree_skb(skb);
skb               753 net/xfrm/xfrm_input.c int xfrm_input_resume(struct sk_buff *skb, int nexthdr)
skb               755 net/xfrm/xfrm_input.c 	return xfrm_input(skb, nexthdr, 0, -1);
skb               763 net/xfrm/xfrm_input.c 	struct sk_buff *skb;
skb               768 net/xfrm/xfrm_input.c 	while ((skb = __skb_dequeue(&queue)))
skb               769 net/xfrm/xfrm_input.c 		XFRM_TRANS_SKB_CB(skb)->finish(dev_net(skb->dev), NULL, skb);
skb               772 net/xfrm/xfrm_input.c int xfrm_trans_queue(struct sk_buff *skb,
skb               783 net/xfrm/xfrm_input.c 	XFRM_TRANS_SKB_CB(skb)->finish = finish;
skb               784 net/xfrm/xfrm_input.c 	__skb_queue_tail(&trans->queue, skb);
skb                73 net/xfrm/xfrm_interface.c static struct xfrm_if *xfrmi_decode_session(struct sk_buff *skb,
skb                80 net/xfrm/xfrm_interface.c 	if (!secpath_exists(skb) || !skb->dev)
skb                85 net/xfrm/xfrm_interface.c 		ifindex = inet6_sdif(skb);
skb                88 net/xfrm/xfrm_interface.c 		ifindex = inet_sdif(skb);
skb                92 net/xfrm/xfrm_interface.c 		ifindex = skb->dev->ifindex;
skb                94 net/xfrm/xfrm_interface.c 	xfrmn = net_generic(xs_net(xfrm_input_state(skb)), xfrmi_net_id);
skb               181 net/xfrm/xfrm_interface.c static void xfrmi_scrub_packet(struct sk_buff *skb, bool xnet)
skb               183 net/xfrm/xfrm_interface.c 	skb->tstamp = 0;
skb               184 net/xfrm/xfrm_interface.c 	skb->pkt_type = PACKET_HOST;
skb               185 net/xfrm/xfrm_interface.c 	skb->skb_iif = 0;
skb               186 net/xfrm/xfrm_interface.c 	skb->ignore_df = 0;
skb               187 net/xfrm/xfrm_interface.c 	skb_dst_drop(skb);
skb               188 net/xfrm/xfrm_interface.c 	nf_reset_ct(skb);
skb               189 net/xfrm/xfrm_interface.c 	nf_reset_trace(skb);
skb               194 net/xfrm/xfrm_interface.c 	ipvs_reset(skb);
skb               195 net/xfrm/xfrm_interface.c 	secpath_reset(skb);
skb               196 net/xfrm/xfrm_interface.c 	skb_orphan(skb);
skb               197 net/xfrm/xfrm_interface.c 	skb->mark = 0;
skb               200 net/xfrm/xfrm_interface.c static int xfrmi_rcv_cb(struct sk_buff *skb, int err)
skb               209 net/xfrm/xfrm_interface.c 	if (err && !secpath_exists(skb))
skb               212 net/xfrm/xfrm_interface.c 	x = xfrm_input_state(skb);
skb               219 net/xfrm/xfrm_interface.c 	skb->dev = dev;
skb               228 net/xfrm/xfrm_interface.c 	xnet = !net_eq(xi->net, dev_net(skb->dev));
skb               234 net/xfrm/xfrm_interface.c 			inner_mode = xfrm_ip2inner_mode(x, XFRM_MODE_SKB_CB(skb)->protocol);
skb               236 net/xfrm/xfrm_interface.c 				XFRM_INC_STATS(dev_net(skb->dev),
skb               242 net/xfrm/xfrm_interface.c 		if (!xfrm_policy_check(NULL, XFRM_POLICY_IN, skb,
skb               247 net/xfrm/xfrm_interface.c 	xfrmi_scrub_packet(skb, xnet);
skb               253 net/xfrm/xfrm_interface.c 	tstats->rx_bytes += skb->len;
skb               260 net/xfrm/xfrm_interface.c xfrmi_xmit2(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
skb               264 net/xfrm/xfrm_interface.c 	struct dst_entry *dst = skb_dst(skb);
skb               265 net/xfrm/xfrm_interface.c 	unsigned int length = skb->len;
skb               296 net/xfrm/xfrm_interface.c 	if (!skb->ignore_df && skb->len > mtu) {
skb               297 net/xfrm/xfrm_interface.c 		skb_dst_update_pmtu_no_confirm(skb, mtu);
skb               299 net/xfrm/xfrm_interface.c 		if (skb->protocol == htons(ETH_P_IPV6)) {
skb               303 net/xfrm/xfrm_interface.c 			icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
skb               305 net/xfrm/xfrm_interface.c 			icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
skb               313 net/xfrm/xfrm_interface.c 	xfrmi_scrub_packet(skb, !net_eq(xi->net, dev_net(dev)));
skb               314 net/xfrm/xfrm_interface.c 	skb_dst_set(skb, dst);
skb               315 net/xfrm/xfrm_interface.c 	skb->dev = tdev;
skb               317 net/xfrm/xfrm_interface.c 	err = dst_output(xi->net, skb->sk, skb);
skb               333 net/xfrm/xfrm_interface.c 	dst_link_failure(skb);
skb               339 net/xfrm/xfrm_interface.c static netdev_tx_t xfrmi_xmit(struct sk_buff *skb, struct net_device *dev)
skb               343 net/xfrm/xfrm_interface.c 	struct dst_entry *dst = skb_dst(skb);
skb               349 net/xfrm/xfrm_interface.c 	switch (skb->protocol) {
skb               351 net/xfrm/xfrm_interface.c 		xfrm_decode_session(skb, &fl, AF_INET6);
skb               352 net/xfrm/xfrm_interface.c 		memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
skb               362 net/xfrm/xfrm_interface.c 			skb_dst_set(skb, dst);
skb               366 net/xfrm/xfrm_interface.c 		xfrm_decode_session(skb, &fl, AF_INET);
skb               367 net/xfrm/xfrm_interface.c 		memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
skb               378 net/xfrm/xfrm_interface.c 			skb_dst_set(skb, &rt->dst);
skb               387 net/xfrm/xfrm_interface.c 	ret = xfrmi_xmit2(skb, dev, &fl);
skb               396 net/xfrm/xfrm_interface.c 	kfree_skb(skb);
skb               400 net/xfrm/xfrm_interface.c static int xfrmi4_err(struct sk_buff *skb, u32 info)
skb               402 net/xfrm/xfrm_interface.c 	const struct iphdr *iph = (const struct iphdr *)skb->data;
skb               403 net/xfrm/xfrm_interface.c 	struct net *net = dev_net(skb->dev);
skb               414 net/xfrm/xfrm_interface.c 		esph = (struct ip_esp_hdr *)(skb->data+(iph->ihl<<2));
skb               418 net/xfrm/xfrm_interface.c 		ah = (struct ip_auth_hdr *)(skb->data+(iph->ihl<<2));
skb               422 net/xfrm/xfrm_interface.c 		ipch = (struct ip_comp_hdr *)(skb->data+(iph->ihl<<2));
skb               429 net/xfrm/xfrm_interface.c 	switch (icmp_hdr(skb)->type) {
skb               431 net/xfrm/xfrm_interface.c 		if (icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
skb               439 net/xfrm/xfrm_interface.c 	x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
skb               450 net/xfrm/xfrm_interface.c 	if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH)
skb               451 net/xfrm/xfrm_interface.c 		ipv4_update_pmtu(skb, net, info, 0, protocol);
skb               453 net/xfrm/xfrm_interface.c 		ipv4_redirect(skb, net, 0, protocol);
skb               459 net/xfrm/xfrm_interface.c static int xfrmi6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
skb               462 net/xfrm/xfrm_interface.c 	const struct ipv6hdr *iph = (const struct ipv6hdr *)skb->data;
skb               463 net/xfrm/xfrm_interface.c 	struct net *net = dev_net(skb->dev);
skb               474 net/xfrm/xfrm_interface.c 		esph = (struct ip_esp_hdr *)(skb->data + offset);
skb               478 net/xfrm/xfrm_interface.c 		ah = (struct ip_auth_hdr *)(skb->data + offset);
skb               482 net/xfrm/xfrm_interface.c 		ipch = (struct ip_comp_hdr *)(skb->data + offset);
skb               493 net/xfrm/xfrm_interface.c 	x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
skb               505 net/xfrm/xfrm_interface.c 		ip6_redirect(skb, net, skb->dev->ifindex, 0,
skb               508 net/xfrm/xfrm_interface.c 		ip6_update_pmtu(skb, net, info, 0, 0, sock_net_uid(net, NULL));
skb               712 net/xfrm/xfrm_interface.c static int xfrmi_fill_info(struct sk_buff *skb, const struct net_device *dev)
skb               717 net/xfrm/xfrm_interface.c 	if (nla_put_u32(skb, IFLA_XFRM_LINK, parm->link) ||
skb               718 net/xfrm/xfrm_interface.c 	    nla_put_u32(skb, IFLA_XFRM_IF_ID, parm->if_id))
skb                38 net/xfrm/xfrm_ipcomp.c static int ipcomp_decompress(struct xfrm_state *x, struct sk_buff *skb)
skb                41 net/xfrm/xfrm_ipcomp.c 	const int plen = skb->len;
skb                43 net/xfrm/xfrm_ipcomp.c 	const u8 *start = skb->data;
skb                59 net/xfrm/xfrm_ipcomp.c 	if (len > skb_tailroom(skb))
skb                60 net/xfrm/xfrm_ipcomp.c 		len = skb_tailroom(skb);
skb                62 net/xfrm/xfrm_ipcomp.c 	__skb_put(skb, len);
skb                65 net/xfrm/xfrm_ipcomp.c 	skb_copy_to_linear_data(skb, scratch, len);
skb                72 net/xfrm/xfrm_ipcomp.c 		if (WARN_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS))
skb                75 net/xfrm/xfrm_ipcomp.c 		frag = skb_shinfo(skb)->frags + skb_shinfo(skb)->nr_frags;
skb                92 net/xfrm/xfrm_ipcomp.c 		skb->truesize += len;
skb                93 net/xfrm/xfrm_ipcomp.c 		skb->data_len += len;
skb                94 net/xfrm/xfrm_ipcomp.c 		skb->len += len;
skb                96 net/xfrm/xfrm_ipcomp.c 		skb_shinfo(skb)->nr_frags++;
skb               106 net/xfrm/xfrm_ipcomp.c int ipcomp_input(struct xfrm_state *x, struct sk_buff *skb)
skb               112 net/xfrm/xfrm_ipcomp.c 	if (skb_linearize_cow(skb))
skb               115 net/xfrm/xfrm_ipcomp.c 	skb->ip_summed = CHECKSUM_NONE;
skb               118 net/xfrm/xfrm_ipcomp.c 	ipch = (void *)skb->data;
skb               121 net/xfrm/xfrm_ipcomp.c 	skb->transport_header = skb->network_header + sizeof(*ipch);
skb               122 net/xfrm/xfrm_ipcomp.c 	__skb_pull(skb, sizeof(*ipch));
skb               123 net/xfrm/xfrm_ipcomp.c 	err = ipcomp_decompress(x, skb);
skb               134 net/xfrm/xfrm_ipcomp.c static int ipcomp_compress(struct xfrm_state *x, struct sk_buff *skb)
skb               137 net/xfrm/xfrm_ipcomp.c 	const int plen = skb->len;
skb               139 net/xfrm/xfrm_ipcomp.c 	u8 *start = skb->data;
skb               159 net/xfrm/xfrm_ipcomp.c 	pskb_trim(skb, dlen + sizeof(struct ip_comp_hdr));
skb               167 net/xfrm/xfrm_ipcomp.c int ipcomp_output(struct xfrm_state *x, struct sk_buff *skb)
skb               173 net/xfrm/xfrm_ipcomp.c 	if (skb->len < ipcd->threshold) {
skb               178 net/xfrm/xfrm_ipcomp.c 	if (skb_linearize_cow(skb))
skb               181 net/xfrm/xfrm_ipcomp.c 	err = ipcomp_compress(x, skb);
skb               188 net/xfrm/xfrm_ipcomp.c 	ipch = ip_comp_hdr(skb);
skb               189 net/xfrm/xfrm_ipcomp.c 	ipch->nexthdr = *skb_mac_header(skb);
skb               192 net/xfrm/xfrm_ipcomp.c 	*skb_mac_header(skb) = IPPROTO_COMP;
skb               194 net/xfrm/xfrm_ipcomp.c 	skb_push(skb, -skb_network_offset(skb));
skb                21 net/xfrm/xfrm_output.c static int xfrm_output2(struct net *net, struct sock *sk, struct sk_buff *skb);
skb                22 net/xfrm/xfrm_output.c static int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb);
skb                24 net/xfrm/xfrm_output.c static int xfrm_skb_check_space(struct sk_buff *skb)
skb                26 net/xfrm/xfrm_output.c 	struct dst_entry *dst = skb_dst(skb);
skb                28 net/xfrm/xfrm_output.c 		- skb_headroom(skb);
skb                29 net/xfrm/xfrm_output.c 	int ntail = dst->dev->needed_tailroom - skb_tailroom(skb);
skb                38 net/xfrm/xfrm_output.c 	return pskb_expand_head(skb, nhead, ntail, GFP_ATOMIC);
skb                45 net/xfrm/xfrm_output.c static struct dst_entry *skb_dst_pop(struct sk_buff *skb)
skb                47 net/xfrm/xfrm_output.c 	struct dst_entry *child = dst_clone(xfrm_dst_child(skb_dst(skb)));
skb                49 net/xfrm/xfrm_output.c 	skb_dst_drop(skb);
skb                58 net/xfrm/xfrm_output.c static int xfrm4_transport_output(struct xfrm_state *x, struct sk_buff *skb)
skb                60 net/xfrm/xfrm_output.c 	struct iphdr *iph = ip_hdr(skb);
skb                63 net/xfrm/xfrm_output.c 	skb_set_inner_transport_header(skb, skb_transport_offset(skb));
skb                65 net/xfrm/xfrm_output.c 	skb_set_network_header(skb, -x->props.header_len);
skb                66 net/xfrm/xfrm_output.c 	skb->mac_header = skb->network_header +
skb                68 net/xfrm/xfrm_output.c 	skb->transport_header = skb->network_header + ihl;
skb                69 net/xfrm/xfrm_output.c 	__skb_pull(skb, ihl);
skb                70 net/xfrm/xfrm_output.c 	memmove(skb_network_header(skb), iph, ihl);
skb                79 net/xfrm/xfrm_output.c static int xfrm6_transport_output(struct xfrm_state *x, struct sk_buff *skb)
skb                86 net/xfrm/xfrm_output.c 	iph = ipv6_hdr(skb);
skb                87 net/xfrm/xfrm_output.c 	skb_set_inner_transport_header(skb, skb_transport_offset(skb));
skb                89 net/xfrm/xfrm_output.c 	hdr_len = x->type->hdr_offset(x, skb, &prevhdr);
skb                92 net/xfrm/xfrm_output.c 	skb_set_mac_header(skb,
skb                93 net/xfrm/xfrm_output.c 			   (prevhdr - x->props.header_len) - skb->data);
skb                94 net/xfrm/xfrm_output.c 	skb_set_network_header(skb, -x->props.header_len);
skb                95 net/xfrm/xfrm_output.c 	skb->transport_header = skb->network_header + hdr_len;
skb                96 net/xfrm/xfrm_output.c 	__skb_pull(skb, hdr_len);
skb                97 net/xfrm/xfrm_output.c 	memmove(ipv6_hdr(skb), iph, hdr_len);
skb               110 net/xfrm/xfrm_output.c static int xfrm6_ro_output(struct xfrm_state *x, struct sk_buff *skb)
skb               117 net/xfrm/xfrm_output.c 	iph = ipv6_hdr(skb);
skb               119 net/xfrm/xfrm_output.c 	hdr_len = x->type->hdr_offset(x, skb, &prevhdr);
skb               122 net/xfrm/xfrm_output.c 	skb_set_mac_header(skb,
skb               123 net/xfrm/xfrm_output.c 			   (prevhdr - x->props.header_len) - skb->data);
skb               124 net/xfrm/xfrm_output.c 	skb_set_network_header(skb, -x->props.header_len);
skb               125 net/xfrm/xfrm_output.c 	skb->transport_header = skb->network_header + hdr_len;
skb               126 net/xfrm/xfrm_output.c 	__skb_pull(skb, hdr_len);
skb               127 net/xfrm/xfrm_output.c 	memmove(ipv6_hdr(skb), iph, hdr_len);
skb               142 net/xfrm/xfrm_output.c static int xfrm4_beet_encap_add(struct xfrm_state *x, struct sk_buff *skb)
skb               149 net/xfrm/xfrm_output.c 	optlen = XFRM_MODE_SKB_CB(skb)->optlen;
skb               153 net/xfrm/xfrm_output.c 	skb_set_network_header(skb, -x->props.header_len - hdrlen +
skb               154 net/xfrm/xfrm_output.c 			       (XFRM_MODE_SKB_CB(skb)->ihl - sizeof(*top_iph)));
skb               156 net/xfrm/xfrm_output.c 		skb->network_header += IPV4_BEET_PHMAXLEN;
skb               157 net/xfrm/xfrm_output.c 	skb->mac_header = skb->network_header +
skb               159 net/xfrm/xfrm_output.c 	skb->transport_header = skb->network_header + sizeof(*top_iph);
skb               161 net/xfrm/xfrm_output.c 	xfrm4_beet_make_header(skb);
skb               163 net/xfrm/xfrm_output.c 	ph = __skb_pull(skb, XFRM_MODE_SKB_CB(skb)->ihl - hdrlen);
skb               165 net/xfrm/xfrm_output.c 	top_iph = ip_hdr(skb);
skb               191 net/xfrm/xfrm_output.c static int xfrm4_tunnel_encap_add(struct xfrm_state *x, struct sk_buff *skb)
skb               193 net/xfrm/xfrm_output.c 	struct dst_entry *dst = skb_dst(skb);
skb               197 net/xfrm/xfrm_output.c 	skb_set_inner_network_header(skb, skb_network_offset(skb));
skb               198 net/xfrm/xfrm_output.c 	skb_set_inner_transport_header(skb, skb_transport_offset(skb));
skb               200 net/xfrm/xfrm_output.c 	skb_set_network_header(skb, -x->props.header_len);
skb               201 net/xfrm/xfrm_output.c 	skb->mac_header = skb->network_header +
skb               203 net/xfrm/xfrm_output.c 	skb->transport_header = skb->network_header + sizeof(*top_iph);
skb               204 net/xfrm/xfrm_output.c 	top_iph = ip_hdr(skb);
skb               209 net/xfrm/xfrm_output.c 	top_iph->protocol = xfrm_af2proto(skb_dst(skb)->ops->family);
skb               215 net/xfrm/xfrm_output.c 		top_iph->tos = XFRM_MODE_SKB_CB(skb)->tos;
skb               217 net/xfrm/xfrm_output.c 					    XFRM_MODE_SKB_CB(skb)->tos);
skb               224 net/xfrm/xfrm_output.c 		0 : (XFRM_MODE_SKB_CB(skb)->frag_off & htons(IP_DF));
skb               230 net/xfrm/xfrm_output.c 	ip_select_ident(dev_net(dst->dev), skb, NULL);
skb               236 net/xfrm/xfrm_output.c static int xfrm6_tunnel_encap_add(struct xfrm_state *x, struct sk_buff *skb)
skb               238 net/xfrm/xfrm_output.c 	struct dst_entry *dst = skb_dst(skb);
skb               242 net/xfrm/xfrm_output.c 	skb_set_inner_network_header(skb, skb_network_offset(skb));
skb               243 net/xfrm/xfrm_output.c 	skb_set_inner_transport_header(skb, skb_transport_offset(skb));
skb               245 net/xfrm/xfrm_output.c 	skb_set_network_header(skb, -x->props.header_len);
skb               246 net/xfrm/xfrm_output.c 	skb->mac_header = skb->network_header +
skb               248 net/xfrm/xfrm_output.c 	skb->transport_header = skb->network_header + sizeof(*top_iph);
skb               249 net/xfrm/xfrm_output.c 	top_iph = ipv6_hdr(skb);
skb               253 net/xfrm/xfrm_output.c 	memcpy(top_iph->flow_lbl, XFRM_MODE_SKB_CB(skb)->flow_lbl,
skb               255 net/xfrm/xfrm_output.c 	top_iph->nexthdr = xfrm_af2proto(skb_dst(skb)->ops->family);
skb               260 net/xfrm/xfrm_output.c 		dsfield = XFRM_MODE_SKB_CB(skb)->tos;
skb               261 net/xfrm/xfrm_output.c 	dsfield = INET_ECN_encapsulate(dsfield, XFRM_MODE_SKB_CB(skb)->tos);
skb               271 net/xfrm/xfrm_output.c static int xfrm6_beet_encap_add(struct xfrm_state *x, struct sk_buff *skb)
skb               278 net/xfrm/xfrm_output.c 	optlen = XFRM_MODE_SKB_CB(skb)->optlen;
skb               282 net/xfrm/xfrm_output.c 	skb_set_network_header(skb, -x->props.header_len - hdr_len);
skb               284 net/xfrm/xfrm_output.c 		skb->network_header += IPV4_BEET_PHMAXLEN;
skb               285 net/xfrm/xfrm_output.c 	skb->mac_header = skb->network_header +
skb               287 net/xfrm/xfrm_output.c 	skb->transport_header = skb->network_header + sizeof(*top_iph);
skb               288 net/xfrm/xfrm_output.c 	ph = __skb_pull(skb, XFRM_MODE_SKB_CB(skb)->ihl - hdr_len);
skb               290 net/xfrm/xfrm_output.c 	xfrm6_beet_make_header(skb);
skb               292 net/xfrm/xfrm_output.c 	top_iph = ipv6_hdr(skb);
skb               322 net/xfrm/xfrm_output.c static int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb)
skb               326 net/xfrm/xfrm_output.c 	err = xfrm_inner_extract_output(x, skb);
skb               330 net/xfrm/xfrm_output.c 	IPCB(skb)->flags |= IPSKB_XFRM_TUNNEL_SIZE;
skb               331 net/xfrm/xfrm_output.c 	skb->protocol = htons(ETH_P_IP);
skb               335 net/xfrm/xfrm_output.c 		return xfrm4_beet_encap_add(x, skb);
skb               337 net/xfrm/xfrm_output.c 		return xfrm4_tunnel_encap_add(x, skb);
skb               344 net/xfrm/xfrm_output.c static int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb)
skb               349 net/xfrm/xfrm_output.c 	err = xfrm_inner_extract_output(x, skb);
skb               353 net/xfrm/xfrm_output.c 	skb->ignore_df = 1;
skb               354 net/xfrm/xfrm_output.c 	skb->protocol = htons(ETH_P_IPV6);
skb               358 net/xfrm/xfrm_output.c 		return xfrm6_beet_encap_add(x, skb);
skb               360 net/xfrm/xfrm_output.c 		return xfrm6_tunnel_encap_add(x, skb);
skb               370 net/xfrm/xfrm_output.c static int xfrm_outer_mode_output(struct xfrm_state *x, struct sk_buff *skb)
skb               376 net/xfrm/xfrm_output.c 			return xfrm4_prepare_output(x, skb);
skb               378 net/xfrm/xfrm_output.c 			return xfrm6_prepare_output(x, skb);
skb               382 net/xfrm/xfrm_output.c 			return xfrm4_transport_output(x, skb);
skb               384 net/xfrm/xfrm_output.c 			return xfrm6_transport_output(x, skb);
skb               388 net/xfrm/xfrm_output.c 			return xfrm6_ro_output(x, skb);
skb               400 net/xfrm/xfrm_output.c int pktgen_xfrm_outer_mode_output(struct xfrm_state *x, struct sk_buff *skb)
skb               402 net/xfrm/xfrm_output.c 	return xfrm_outer_mode_output(x, skb);
skb               407 net/xfrm/xfrm_output.c static int xfrm_output_one(struct sk_buff *skb, int err)
skb               409 net/xfrm/xfrm_output.c 	struct dst_entry *dst = skb_dst(skb);
skb               417 net/xfrm/xfrm_output.c 		err = xfrm_skb_check_space(skb);
skb               423 net/xfrm/xfrm_output.c 		skb->mark = xfrm_smark_get(skb->mark, x);
skb               425 net/xfrm/xfrm_output.c 		err = xfrm_outer_mode_output(x, skb);
skb               445 net/xfrm/xfrm_output.c 		err = x->repl->overflow(x, skb);
skb               451 net/xfrm/xfrm_output.c 		x->curlft.bytes += skb->len;
skb               456 net/xfrm/xfrm_output.c 		skb_dst_force(skb);
skb               457 net/xfrm/xfrm_output.c 		if (!skb_dst(skb)) {
skb               463 net/xfrm/xfrm_output.c 		if (xfrm_offload(skb)) {
skb               464 net/xfrm/xfrm_output.c 			x->type_offload->encap(x, skb);
skb               467 net/xfrm/xfrm_output.c 			skb->encapsulation = 0;
skb               469 net/xfrm/xfrm_output.c 			err = x->type->output(x, skb);
skb               480 net/xfrm/xfrm_output.c 		dst = skb_dst_pop(skb);
skb               486 net/xfrm/xfrm_output.c 		skb_dst_set(skb, dst);
skb               495 net/xfrm/xfrm_output.c 	kfree_skb(skb);
skb               500 net/xfrm/xfrm_output.c int xfrm_output_resume(struct sk_buff *skb, int err)
skb               502 net/xfrm/xfrm_output.c 	struct net *net = xs_net(skb_dst(skb)->xfrm);
skb               504 net/xfrm/xfrm_output.c 	while (likely((err = xfrm_output_one(skb, err)) == 0)) {
skb               505 net/xfrm/xfrm_output.c 		nf_reset_ct(skb);
skb               507 net/xfrm/xfrm_output.c 		err = skb_dst(skb)->ops->local_out(net, skb->sk, skb);
skb               511 net/xfrm/xfrm_output.c 		if (!skb_dst(skb)->xfrm)
skb               512 net/xfrm/xfrm_output.c 			return dst_output(net, skb->sk, skb);
skb               514 net/xfrm/xfrm_output.c 		err = nf_hook(skb_dst(skb)->ops->family,
skb               515 net/xfrm/xfrm_output.c 			      NF_INET_POST_ROUTING, net, skb->sk, skb,
skb               516 net/xfrm/xfrm_output.c 			      NULL, skb_dst(skb)->dev, xfrm_output2);
skb               529 net/xfrm/xfrm_output.c static int xfrm_output2(struct net *net, struct sock *sk, struct sk_buff *skb)
skb               531 net/xfrm/xfrm_output.c 	return xfrm_output_resume(skb, 1);
skb               534 net/xfrm/xfrm_output.c static int xfrm_output_gso(struct net *net, struct sock *sk, struct sk_buff *skb)
skb               538 net/xfrm/xfrm_output.c 	BUILD_BUG_ON(sizeof(*IPCB(skb)) > SKB_SGO_CB_OFFSET);
skb               539 net/xfrm/xfrm_output.c 	BUILD_BUG_ON(sizeof(*IP6CB(skb)) > SKB_SGO_CB_OFFSET);
skb               540 net/xfrm/xfrm_output.c 	segs = skb_gso_segment(skb, 0);
skb               541 net/xfrm/xfrm_output.c 	kfree_skb(skb);
skb               565 net/xfrm/xfrm_output.c int xfrm_output(struct sock *sk, struct sk_buff *skb)
skb               567 net/xfrm/xfrm_output.c 	struct net *net = dev_net(skb_dst(skb)->dev);
skb               568 net/xfrm/xfrm_output.c 	struct xfrm_state *x = skb_dst(skb)->xfrm;
skb               571 net/xfrm/xfrm_output.c 	secpath_reset(skb);
skb               573 net/xfrm/xfrm_output.c 	if (xfrm_dev_offload_ok(skb, x)) {
skb               576 net/xfrm/xfrm_output.c 		sp = secpath_set(skb);
skb               579 net/xfrm/xfrm_output.c 			kfree_skb(skb);
skb               582 net/xfrm/xfrm_output.c 		skb->encapsulation = 1;
skb               588 net/xfrm/xfrm_output.c 		if (skb_is_gso(skb)) {
skb               589 net/xfrm/xfrm_output.c 			if (skb->inner_protocol)
skb               590 net/xfrm/xfrm_output.c 				return xfrm_output_gso(net, sk, skb);
skb               592 net/xfrm/xfrm_output.c 			skb_shinfo(skb)->gso_type |= SKB_GSO_ESP;
skb               599 net/xfrm/xfrm_output.c 		if (skb_is_gso(skb))
skb               600 net/xfrm/xfrm_output.c 			return xfrm_output_gso(net, sk, skb);
skb               603 net/xfrm/xfrm_output.c 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb               604 net/xfrm/xfrm_output.c 		err = skb_checksum_help(skb);
skb               607 net/xfrm/xfrm_output.c 			kfree_skb(skb);
skb               613 net/xfrm/xfrm_output.c 	return xfrm_output2(net, sk, skb);
skb               617 net/xfrm/xfrm_output.c static int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb)
skb               625 net/xfrm/xfrm_output.c 				xfrm_af2proto(skb_dst(skb)->ops->family));
skb               635 net/xfrm/xfrm_output.c 		err = afinfo->extract_output(x, skb);
skb               641 net/xfrm/xfrm_output.c void xfrm_local_error(struct sk_buff *skb, int mtu)
skb               646 net/xfrm/xfrm_output.c 	if (skb->protocol == htons(ETH_P_IP))
skb               648 net/xfrm/xfrm_output.c 	else if (skb->protocol == htons(ETH_P_IPV6) &&
skb               649 net/xfrm/xfrm_output.c 		 skb->sk->sk_family == AF_INET6)
skb               656 net/xfrm/xfrm_output.c 		afinfo->local_error(skb, mtu);
skb              2751 net/xfrm/xfrm_policy.c 	struct sk_buff *skb;
skb              2761 net/xfrm/xfrm_policy.c 	skb = skb_peek(&pq->hold_queue);
skb              2762 net/xfrm/xfrm_policy.c 	if (!skb) {
skb              2766 net/xfrm/xfrm_policy.c 	dst = skb_dst(skb);
skb              2767 net/xfrm/xfrm_policy.c 	sk = skb->sk;
skb              2768 net/xfrm/xfrm_policy.c 	xfrm_decode_session(skb, &fl, dst->ops->family);
skb              2798 net/xfrm/xfrm_policy.c 		skb = __skb_dequeue(&list);
skb              2800 net/xfrm/xfrm_policy.c 		xfrm_decode_session(skb, &fl, skb_dst(skb)->ops->family);
skb              2801 net/xfrm/xfrm_policy.c 		dst_hold(xfrm_dst_path(skb_dst(skb)));
skb              2802 net/xfrm/xfrm_policy.c 		dst = xfrm_lookup(net, xfrm_dst_path(skb_dst(skb)), &fl, skb->sk, 0);
skb              2804 net/xfrm/xfrm_policy.c 			kfree_skb(skb);
skb              2808 net/xfrm/xfrm_policy.c 		nf_reset_ct(skb);
skb              2809 net/xfrm/xfrm_policy.c 		skb_dst_drop(skb);
skb              2810 net/xfrm/xfrm_policy.c 		skb_dst_set(skb, dst);
skb              2812 net/xfrm/xfrm_policy.c 		dst_output(net, skb->sk, skb);
skb              2825 net/xfrm/xfrm_policy.c static int xdst_queue_output(struct net *net, struct sock *sk, struct sk_buff *skb)
skb              2828 net/xfrm/xfrm_policy.c 	struct dst_entry *dst = skb_dst(skb);
skb              2833 net/xfrm/xfrm_policy.c 	if (unlikely(skb_fclone_busy(sk, skb))) {
skb              2834 net/xfrm/xfrm_policy.c 		kfree_skb(skb);
skb              2839 net/xfrm/xfrm_policy.c 		kfree_skb(skb);
skb              2843 net/xfrm/xfrm_policy.c 	skb_dst_force(skb);
skb              2858 net/xfrm/xfrm_policy.c 	__skb_queue_tail(&pq->hold_queue, skb);
skb              3197 net/xfrm/xfrm_policy.c xfrm_secpath_reject(int idx, struct sk_buff *skb, const struct flowi *fl)
skb              3199 net/xfrm/xfrm_policy.c 	struct sec_path *sp = skb_sec_path(skb);
skb              3207 net/xfrm/xfrm_policy.c 	return x->type->reject(x, skb, fl);
skb              3263 net/xfrm/xfrm_policy.c decode_session4(struct sk_buff *skb, struct flowi *fl, bool reverse)
skb              3265 net/xfrm/xfrm_policy.c 	const struct iphdr *iph = ip_hdr(skb);
skb              3267 net/xfrm/xfrm_policy.c 	u8 *xprth = skb_network_header(skb) + ihl * 4;
skb              3271 net/xfrm/xfrm_policy.c 	if (skb_dst(skb) && skb_dst(skb)->dev)
skb              3272 net/xfrm/xfrm_policy.c 		oif = skb_dst(skb)->dev->ifindex;
skb              3275 net/xfrm/xfrm_policy.c 	fl4->flowi4_mark = skb->mark;
skb              3276 net/xfrm/xfrm_policy.c 	fl4->flowi4_oif = reverse ? skb->skb_iif : oif;
skb              3290 net/xfrm/xfrm_policy.c 			if (xprth + 4 < skb->data ||
skb              3291 net/xfrm/xfrm_policy.c 			    pskb_may_pull(skb, xprth + 4 - skb->data)) {
skb              3294 net/xfrm/xfrm_policy.c 				xprth = skb_network_header(skb) + ihl * 4;
skb              3302 net/xfrm/xfrm_policy.c 			if (xprth + 2 < skb->data ||
skb              3303 net/xfrm/xfrm_policy.c 			    pskb_may_pull(skb, xprth + 2 - skb->data)) {
skb              3306 net/xfrm/xfrm_policy.c 				xprth = skb_network_header(skb) + ihl * 4;
skb              3314 net/xfrm/xfrm_policy.c 			if (xprth + 4 < skb->data ||
skb              3315 net/xfrm/xfrm_policy.c 			    pskb_may_pull(skb, xprth + 4 - skb->data)) {
skb              3318 net/xfrm/xfrm_policy.c 				xprth = skb_network_header(skb) + ihl * 4;
skb              3325 net/xfrm/xfrm_policy.c 			if (xprth + 8 < skb->data ||
skb              3326 net/xfrm/xfrm_policy.c 			    pskb_may_pull(skb, xprth + 8 - skb->data)) {
skb              3329 net/xfrm/xfrm_policy.c 				xprth = skb_network_header(skb) + ihl * 4;
skb              3336 net/xfrm/xfrm_policy.c 			if (xprth + 4 < skb->data ||
skb              3337 net/xfrm/xfrm_policy.c 			    pskb_may_pull(skb, xprth + 4 - skb->data)) {
skb              3340 net/xfrm/xfrm_policy.c 				xprth = skb_network_header(skb) + ihl * 4;
skb              3347 net/xfrm/xfrm_policy.c 			if (xprth + 12 < skb->data ||
skb              3348 net/xfrm/xfrm_policy.c 			    pskb_may_pull(skb, xprth + 12 - skb->data)) {
skb              3352 net/xfrm/xfrm_policy.c 				xprth = skb_network_header(skb) + ihl * 4;
skb              3372 net/xfrm/xfrm_policy.c decode_session6(struct sk_buff *skb, struct flowi *fl, bool reverse)
skb              3376 net/xfrm/xfrm_policy.c 	const struct ipv6hdr *hdr = ipv6_hdr(skb);
skb              3379 net/xfrm/xfrm_policy.c 	const unsigned char *nh = skb_network_header(skb);
skb              3380 net/xfrm/xfrm_policy.c 	u16 nhoff = IP6CB(skb)->nhoff;
skb              3389 net/xfrm/xfrm_policy.c 	if (skb_dst(skb) && skb_dst(skb)->dev)
skb              3390 net/xfrm/xfrm_policy.c 		oif = skb_dst(skb)->dev->ifindex;
skb              3393 net/xfrm/xfrm_policy.c 	fl6->flowi6_mark = skb->mark;
skb              3394 net/xfrm/xfrm_policy.c 	fl6->flowi6_oif = reverse ? skb->skb_iif : oif;
skb              3399 net/xfrm/xfrm_policy.c 	while (nh + offset + sizeof(*exthdr) < skb->data ||
skb              3400 net/xfrm/xfrm_policy.c 	       pskb_may_pull(skb, nh + offset + sizeof(*exthdr) - skb->data)) {
skb              3401 net/xfrm/xfrm_policy.c 		nh = skb_network_header(skb);
skb              3420 net/xfrm/xfrm_policy.c 			if (!onlyproto && (nh + offset + 4 < skb->data ||
skb              3421 net/xfrm/xfrm_policy.c 			     pskb_may_pull(skb, nh + offset + 4 - skb->data))) {
skb              3424 net/xfrm/xfrm_policy.c 				nh = skb_network_header(skb);
skb              3432 net/xfrm/xfrm_policy.c 			if (!onlyproto && (nh + offset + 2 < skb->data ||
skb              3433 net/xfrm/xfrm_policy.c 			    pskb_may_pull(skb, nh + offset + 2 - skb->data))) {
skb              3436 net/xfrm/xfrm_policy.c 				nh = skb_network_header(skb);
skb              3446 net/xfrm/xfrm_policy.c 			if (!onlyproto && (nh + offset + 3 < skb->data ||
skb              3447 net/xfrm/xfrm_policy.c 			    pskb_may_pull(skb, nh + offset + 3 - skb->data))) {
skb              3450 net/xfrm/xfrm_policy.c 				nh = skb_network_header(skb);
skb              3470 net/xfrm/xfrm_policy.c int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
skb              3475 net/xfrm/xfrm_policy.c 		decode_session4(skb, fl, reverse);
skb              3479 net/xfrm/xfrm_policy.c 		decode_session6(skb, fl, reverse);
skb              3486 net/xfrm/xfrm_policy.c 	return security_xfrm_decode_session(skb, &fl->flowi_secid);
skb              3502 net/xfrm/xfrm_policy.c int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
skb              3505 net/xfrm/xfrm_policy.c 	struct net *net = dev_net(skb->dev);
skb              3523 net/xfrm/xfrm_policy.c 		xi = ifcb->decode_session(skb, family);
skb              3534 net/xfrm/xfrm_policy.c 	if (__xfrm_decode_session(skb, &fl, family, reverse) < 0) {
skb              3539 net/xfrm/xfrm_policy.c 	nf_nat_decode_session(skb, &fl, family);
skb              3542 net/xfrm/xfrm_policy.c 	sp = skb_sec_path(skb);
skb              3575 net/xfrm/xfrm_policy.c 			xfrm_secpath_reject(xerr_idx, skb, &fl);
skb              3610 net/xfrm/xfrm_policy.c 		sp = skb_sec_path(skb);
skb              3661 net/xfrm/xfrm_policy.c 	xfrm_secpath_reject(xerr_idx, skb, &fl);
skb              3668 net/xfrm/xfrm_policy.c int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
skb              3670 net/xfrm/xfrm_policy.c 	struct net *net = dev_net(skb->dev);
skb              3675 net/xfrm/xfrm_policy.c 	if (xfrm_decode_session(skb, &fl, family) < 0) {
skb              3680 net/xfrm/xfrm_policy.c 	skb_dst_force(skb);
skb              3681 net/xfrm/xfrm_policy.c 	if (!skb_dst(skb)) {
skb              3686 net/xfrm/xfrm_policy.c 	dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, XFRM_LOOKUP_QUEUE);
skb              3691 net/xfrm/xfrm_policy.c 	skb_dst_set(skb, dst);
skb              3742 net/xfrm/xfrm_policy.c static void xfrm_link_failure(struct sk_buff *skb)
skb              3884 net/xfrm/xfrm_policy.c 					   struct sk_buff *skb,
skb              3889 net/xfrm/xfrm_policy.c 	if (!skb)
skb              3891 net/xfrm/xfrm_policy.c 	return path->ops->neigh_lookup(path, skb, daddr);
skb                84 net/xfrm/xfrm_replay.c static int xfrm_replay_overflow(struct xfrm_state *x, struct sk_buff *skb)
skb                90 net/xfrm/xfrm_replay.c 		XFRM_SKB_CB(skb)->seq.output.low = ++x->replay.oseq;
skb                91 net/xfrm/xfrm_replay.c 		XFRM_SKB_CB(skb)->seq.output.hi = 0;
skb                94 net/xfrm/xfrm_replay.c 			xfrm_audit_state_replay_overflow(x, skb);
skb               107 net/xfrm/xfrm_replay.c 		      struct sk_buff *skb, __be32 net_seq)
skb               134 net/xfrm/xfrm_replay.c 	xfrm_audit_state_replay(x, skb, net_seq);
skb               162 net/xfrm/xfrm_replay.c static int xfrm_replay_overflow_bmp(struct xfrm_state *x, struct sk_buff *skb)
skb               169 net/xfrm/xfrm_replay.c 		XFRM_SKB_CB(skb)->seq.output.low = ++replay_esn->oseq;
skb               170 net/xfrm/xfrm_replay.c 		XFRM_SKB_CB(skb)->seq.output.hi = 0;
skb               173 net/xfrm/xfrm_replay.c 			xfrm_audit_state_replay_overflow(x, skb);
skb               186 net/xfrm/xfrm_replay.c 				 struct sk_buff *skb, __be32 net_seq)
skb               225 net/xfrm/xfrm_replay.c 	xfrm_audit_state_replay(x, skb, net_seq);
skb               394 net/xfrm/xfrm_replay.c static int xfrm_replay_overflow_esn(struct xfrm_state *x, struct sk_buff *skb)
skb               401 net/xfrm/xfrm_replay.c 		XFRM_SKB_CB(skb)->seq.output.low = ++replay_esn->oseq;
skb               402 net/xfrm/xfrm_replay.c 		XFRM_SKB_CB(skb)->seq.output.hi = replay_esn->oseq_hi;
skb               405 net/xfrm/xfrm_replay.c 			XFRM_SKB_CB(skb)->seq.output.hi = ++replay_esn->oseq_hi;
skb               410 net/xfrm/xfrm_replay.c 				xfrm_audit_state_replay_overflow(x, skb);
skb               424 net/xfrm/xfrm_replay.c 				 struct sk_buff *skb, __be32 net_seq)
skb               478 net/xfrm/xfrm_replay.c 	xfrm_audit_state_replay(x, skb, net_seq);
skb               483 net/xfrm/xfrm_replay.c 				   struct sk_buff *skb, __be32 net_seq)
skb               485 net/xfrm/xfrm_replay.c 	if (unlikely(XFRM_SKB_CB(skb)->seq.input.hi !=
skb               491 net/xfrm/xfrm_replay.c 	return xfrm_replay_check_esn(x, skb, net_seq);
skb               553 net/xfrm/xfrm_replay.c static int xfrm_replay_overflow_offload(struct xfrm_state *x, struct sk_buff *skb)
skb               557 net/xfrm/xfrm_replay.c 	struct xfrm_offload *xo = xfrm_offload(skb);
skb               561 net/xfrm/xfrm_replay.c 		return xfrm_replay_overflow(x, skb);
skb               564 net/xfrm/xfrm_replay.c 		if (!skb_is_gso(skb)) {
skb               565 net/xfrm/xfrm_replay.c 			XFRM_SKB_CB(skb)->seq.output.low = ++oseq;
skb               568 net/xfrm/xfrm_replay.c 			XFRM_SKB_CB(skb)->seq.output.low = oseq + 1;
skb               570 net/xfrm/xfrm_replay.c 			oseq += skb_shinfo(skb)->gso_segs;
skb               573 net/xfrm/xfrm_replay.c 		XFRM_SKB_CB(skb)->seq.output.hi = 0;
skb               576 net/xfrm/xfrm_replay.c 			xfrm_audit_state_replay_overflow(x, skb);
skb               591 net/xfrm/xfrm_replay.c static int xfrm_replay_overflow_offload_bmp(struct xfrm_state *x, struct sk_buff *skb)
skb               594 net/xfrm/xfrm_replay.c 	struct xfrm_offload *xo = xfrm_offload(skb);
skb               600 net/xfrm/xfrm_replay.c 		return xfrm_replay_overflow_bmp(x, skb);
skb               603 net/xfrm/xfrm_replay.c 		if (!skb_is_gso(skb)) {
skb               604 net/xfrm/xfrm_replay.c 			XFRM_SKB_CB(skb)->seq.output.low = ++oseq;
skb               607 net/xfrm/xfrm_replay.c 			XFRM_SKB_CB(skb)->seq.output.low = oseq + 1;
skb               609 net/xfrm/xfrm_replay.c 			oseq += skb_shinfo(skb)->gso_segs;
skb               612 net/xfrm/xfrm_replay.c 		XFRM_SKB_CB(skb)->seq.output.hi = 0;
skb               615 net/xfrm/xfrm_replay.c 			xfrm_audit_state_replay_overflow(x, skb);
skb               630 net/xfrm/xfrm_replay.c static int xfrm_replay_overflow_offload_esn(struct xfrm_state *x, struct sk_buff *skb)
skb               633 net/xfrm/xfrm_replay.c 	struct xfrm_offload *xo = xfrm_offload(skb);
skb               640 net/xfrm/xfrm_replay.c 		return xfrm_replay_overflow_esn(x, skb);
skb               643 net/xfrm/xfrm_replay.c 		if (!skb_is_gso(skb)) {
skb               644 net/xfrm/xfrm_replay.c 			XFRM_SKB_CB(skb)->seq.output.low = ++oseq;
skb               645 net/xfrm/xfrm_replay.c 			XFRM_SKB_CB(skb)->seq.output.hi = oseq_hi;
skb               649 net/xfrm/xfrm_replay.c 			XFRM_SKB_CB(skb)->seq.output.low = oseq + 1;
skb               650 net/xfrm/xfrm_replay.c 			XFRM_SKB_CB(skb)->seq.output.hi = oseq_hi;
skb               653 net/xfrm/xfrm_replay.c 			oseq += skb_shinfo(skb)->gso_segs;
skb               657 net/xfrm/xfrm_replay.c 			XFRM_SKB_CB(skb)->seq.output.hi = ++oseq_hi;
skb               663 net/xfrm/xfrm_replay.c 				xfrm_audit_state_replay_overflow(x, skb);
skb              2611 net/xfrm/xfrm_state.c static void xfrm_audit_helper_pktinfo(struct sk_buff *skb, u16 family,
skb              2619 net/xfrm/xfrm_state.c 		iph4 = ip_hdr(skb);
skb              2624 net/xfrm/xfrm_state.c 		iph6 = ipv6_hdr(skb);
skb              2664 net/xfrm/xfrm_state.c 				      struct sk_buff *skb)
skb              2672 net/xfrm/xfrm_state.c 	xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
skb              2682 net/xfrm/xfrm_state.c 			     struct sk_buff *skb, __be32 net_seq)
skb              2690 net/xfrm/xfrm_state.c 	xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
skb              2698 net/xfrm/xfrm_state.c void xfrm_audit_state_notfound_simple(struct sk_buff *skb, u16 family)
skb              2705 net/xfrm/xfrm_state.c 	xfrm_audit_helper_pktinfo(skb, family, audit_buf);
skb              2710 net/xfrm/xfrm_state.c void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family,
skb              2719 net/xfrm/xfrm_state.c 	xfrm_audit_helper_pktinfo(skb, family, audit_buf);
skb              2728 net/xfrm/xfrm_state.c 			      struct sk_buff *skb, u8 proto)
skb              2737 net/xfrm/xfrm_state.c 	xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
skb              2738 net/xfrm/xfrm_state.c 	if (xfrm_parse_spi(skb, proto, &net_spi, &net_seq) == 0) {
skb               671 net/xfrm/xfrm_user.c static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
skb               674 net/xfrm/xfrm_user.c 	struct net *net = sock_net(skb->sk);
skb               750 net/xfrm/xfrm_user.c static int xfrm_del_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
skb               753 net/xfrm/xfrm_user.c 	struct net *net = sock_net(skb->sk);
skb               813 net/xfrm/xfrm_user.c static int copy_sec_ctx(struct xfrm_sec_ctx *s, struct sk_buff *skb)
skb               819 net/xfrm/xfrm_user.c 	attr = nla_reserve(skb, XFRMA_SEC_CTX, ctx_size);
skb               834 net/xfrm/xfrm_user.c static int copy_user_offload(struct xfrm_state_offload *xso, struct sk_buff *skb)
skb               839 net/xfrm/xfrm_user.c 	attr = nla_reserve(skb, XFRMA_OFFLOAD_DEV, sizeof(*xuo));
skb               851 net/xfrm/xfrm_user.c static int copy_to_user_auth(struct xfrm_algo_auth *auth, struct sk_buff *skb)
skb               856 net/xfrm/xfrm_user.c 	nla = nla_reserve(skb, XFRMA_ALG_AUTH,
skb               869 net/xfrm/xfrm_user.c static int xfrm_smark_put(struct sk_buff *skb, struct xfrm_mark *m)
skb               874 net/xfrm/xfrm_user.c 		ret = nla_put_u32(skb, XFRMA_SET_MARK, m->v);
skb               876 net/xfrm/xfrm_user.c 			ret = nla_put_u32(skb, XFRMA_SET_MARK_MASK, m->m);
skb               884 net/xfrm/xfrm_user.c 				    struct sk_buff *skb)
skb               891 net/xfrm/xfrm_user.c 		ret = nla_put_u32(skb, XFRMA_SA_EXTRA_FLAGS,
skb               898 net/xfrm/xfrm_user.c 		ret = nla_put(skb, XFRMA_COADDR, sizeof(*x->coaddr), x->coaddr);
skb               903 net/xfrm/xfrm_user.c 		ret = nla_put_u64_64bit(skb, XFRMA_LASTUSED, x->lastused,
skb               909 net/xfrm/xfrm_user.c 		ret = nla_put(skb, XFRMA_ALG_AEAD, aead_len(x->aead), x->aead);
skb               914 net/xfrm/xfrm_user.c 		ret = copy_to_user_auth(x->aalg, skb);
skb               916 net/xfrm/xfrm_user.c 			ret = nla_put(skb, XFRMA_ALG_AUTH_TRUNC,
skb               922 net/xfrm/xfrm_user.c 		ret = nla_put(skb, XFRMA_ALG_CRYPT, xfrm_alg_len(x->ealg), x->ealg);
skb               927 net/xfrm/xfrm_user.c 		ret = nla_put(skb, XFRMA_ALG_COMP, sizeof(*(x->calg)), x->calg);
skb               932 net/xfrm/xfrm_user.c 		ret = nla_put(skb, XFRMA_ENCAP, sizeof(*x->encap), x->encap);
skb               937 net/xfrm/xfrm_user.c 		ret = nla_put_u32(skb, XFRMA_TFCPAD, x->tfcpad);
skb               941 net/xfrm/xfrm_user.c 	ret = xfrm_mark_put(skb, &x->mark);
skb               945 net/xfrm/xfrm_user.c 	ret = xfrm_smark_put(skb, &x->props.smark);
skb               950 net/xfrm/xfrm_user.c 		ret = nla_put(skb, XFRMA_REPLAY_ESN_VAL,
skb               954 net/xfrm/xfrm_user.c 		ret = nla_put(skb, XFRMA_REPLAY_VAL, sizeof(x->replay),
skb               959 net/xfrm/xfrm_user.c 		ret = copy_user_offload(&x->xso, skb);
skb               963 net/xfrm/xfrm_user.c 		ret = nla_put_u32(skb, XFRMA_IF_ID, x->if_id);
skb               968 net/xfrm/xfrm_user.c 		ret = copy_sec_ctx(x->security, skb);
skb               977 net/xfrm/xfrm_user.c 	struct sk_buff *skb = sp->out_skb;
skb               982 net/xfrm/xfrm_user.c 	nlh = nlmsg_put(skb, NETLINK_CB(in_skb).portid, sp->nlmsg_seq,
skb               989 net/xfrm/xfrm_user.c 	err = copy_to_user_state_extra(x, p, skb);
skb               991 net/xfrm/xfrm_user.c 		nlmsg_cancel(skb, nlh);
skb               994 net/xfrm/xfrm_user.c 	nlmsg_end(skb, nlh);
skb              1001 net/xfrm/xfrm_user.c 	struct sock *sk = cb->skb->sk;
skb              1010 net/xfrm/xfrm_user.c static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb)
skb              1012 net/xfrm/xfrm_user.c 	struct net *net = sock_net(skb->sk);
skb              1019 net/xfrm/xfrm_user.c 	info.in_skb = cb->skb;
skb              1020 net/xfrm/xfrm_user.c 	info.out_skb = skb;
skb              1051 net/xfrm/xfrm_user.c 	return skb->len;
skb              1058 net/xfrm/xfrm_user.c 	struct sk_buff *skb;
skb              1061 net/xfrm/xfrm_user.c 	skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
skb              1062 net/xfrm/xfrm_user.c 	if (!skb)
skb              1066 net/xfrm/xfrm_user.c 	info.out_skb = skb;
skb              1072 net/xfrm/xfrm_user.c 		kfree_skb(skb);
skb              1076 net/xfrm/xfrm_user.c 	return skb;
skb              1082 net/xfrm/xfrm_user.c static inline int xfrm_nlmsg_multicast(struct net *net, struct sk_buff *skb,
skb              1088 net/xfrm/xfrm_user.c 		kfree_skb(skb);
skb              1092 net/xfrm/xfrm_user.c 	return nlmsg_multicast(nlsk, skb, pid, group, GFP_ATOMIC);
skb              1104 net/xfrm/xfrm_user.c static int build_spdinfo(struct sk_buff *skb, struct net *net,
skb              1116 net/xfrm/xfrm_user.c 	nlh = nlmsg_put(skb, portid, seq, XFRM_MSG_NEWSPDINFO, sizeof(u32), 0);
skb              1141 net/xfrm/xfrm_user.c 	err = nla_put(skb, XFRMA_SPD_INFO, sizeof(spc), &spc);
skb              1143 net/xfrm/xfrm_user.c 		err = nla_put(skb, XFRMA_SPD_HINFO, sizeof(sph), &sph);
skb              1145 net/xfrm/xfrm_user.c 		err = nla_put(skb, XFRMA_SPD_IPV4_HTHRESH, sizeof(spt4), &spt4);
skb              1147 net/xfrm/xfrm_user.c 		err = nla_put(skb, XFRMA_SPD_IPV6_HTHRESH, sizeof(spt6), &spt6);
skb              1149 net/xfrm/xfrm_user.c 		nlmsg_cancel(skb, nlh);
skb              1153 net/xfrm/xfrm_user.c 	nlmsg_end(skb, nlh);
skb              1157 net/xfrm/xfrm_user.c static int xfrm_set_spdinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
skb              1160 net/xfrm/xfrm_user.c 	struct net *net = sock_net(skb->sk);
skb              1202 net/xfrm/xfrm_user.c static int xfrm_get_spdinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
skb              1205 net/xfrm/xfrm_user.c 	struct net *net = sock_net(skb->sk);
skb              1208 net/xfrm/xfrm_user.c 	u32 sportid = NETLINK_CB(skb).portid;
skb              1229 net/xfrm/xfrm_user.c static int build_sadinfo(struct sk_buff *skb, struct net *net,
skb              1238 net/xfrm/xfrm_user.c 	nlh = nlmsg_put(skb, portid, seq, XFRM_MSG_NEWSADINFO, sizeof(u32), 0);
skb              1249 net/xfrm/xfrm_user.c 	err = nla_put_u32(skb, XFRMA_SAD_CNT, si.sadcnt);
skb              1251 net/xfrm/xfrm_user.c 		err = nla_put(skb, XFRMA_SAD_HINFO, sizeof(sh), &sh);
skb              1253 net/xfrm/xfrm_user.c 		nlmsg_cancel(skb, nlh);
skb              1257 net/xfrm/xfrm_user.c 	nlmsg_end(skb, nlh);
skb              1261 net/xfrm/xfrm_user.c static int xfrm_get_sadinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
skb              1264 net/xfrm/xfrm_user.c 	struct net *net = sock_net(skb->sk);
skb              1267 net/xfrm/xfrm_user.c 	u32 sportid = NETLINK_CB(skb).portid;
skb              1281 net/xfrm/xfrm_user.c static int xfrm_get_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
skb              1284 net/xfrm/xfrm_user.c 	struct net *net = sock_net(skb->sk);
skb              1294 net/xfrm/xfrm_user.c 	resp_skb = xfrm_state_netlink(skb, x, nlh->nlmsg_seq);
skb              1298 net/xfrm/xfrm_user.c 		err = nlmsg_unicast(net->xfrm.nlsk, resp_skb, NETLINK_CB(skb).portid);
skb              1305 net/xfrm/xfrm_user.c static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh,
skb              1308 net/xfrm/xfrm_user.c 	struct net *net = sock_net(skb->sk);
skb              1355 net/xfrm/xfrm_user.c 	resp_skb = xfrm_state_netlink(skb, x, nlh->nlmsg_seq);
skb              1361 net/xfrm/xfrm_user.c 	err = nlmsg_unicast(net->xfrm.nlsk, resp_skb, NETLINK_CB(skb).portid);
skb              1645 net/xfrm/xfrm_user.c static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
skb              1648 net/xfrm/xfrm_user.c 	struct net *net = sock_net(skb->sk);
skb              1690 net/xfrm/xfrm_user.c static int copy_to_user_tmpl(struct xfrm_policy *xp, struct sk_buff *skb)
skb              1715 net/xfrm/xfrm_user.c 	return nla_put(skb, XFRMA_TMPL,
skb              1719 net/xfrm/xfrm_user.c static inline int copy_to_user_state_sec_ctx(struct xfrm_state *x, struct sk_buff *skb)
skb              1722 net/xfrm/xfrm_user.c 		return copy_sec_ctx(x->security, skb);
skb              1727 net/xfrm/xfrm_user.c static inline int copy_to_user_sec_ctx(struct xfrm_policy *xp, struct sk_buff *skb)
skb              1730 net/xfrm/xfrm_user.c 		return copy_sec_ctx(xp->security, skb);
skb              1743 net/xfrm/xfrm_user.c static int copy_to_user_policy_type(u8 type, struct sk_buff *skb)
skb              1751 net/xfrm/xfrm_user.c 	return nla_put(skb, XFRMA_POLICY_TYPE, sizeof(upt), &upt);
skb              1755 net/xfrm/xfrm_user.c static inline int copy_to_user_policy_type(u8 type, struct sk_buff *skb)
skb              1766 net/xfrm/xfrm_user.c 	struct sk_buff *skb = sp->out_skb;
skb              1770 net/xfrm/xfrm_user.c 	nlh = nlmsg_put(skb, NETLINK_CB(in_skb).portid, sp->nlmsg_seq,
skb              1777 net/xfrm/xfrm_user.c 	err = copy_to_user_tmpl(xp, skb);
skb              1779 net/xfrm/xfrm_user.c 		err = copy_to_user_sec_ctx(xp, skb);
skb              1781 net/xfrm/xfrm_user.c 		err = copy_to_user_policy_type(xp->type, skb);
skb              1783 net/xfrm/xfrm_user.c 		err = xfrm_mark_put(skb, &xp->mark);
skb              1785 net/xfrm/xfrm_user.c 		err = xfrm_if_id_put(skb, xp->if_id);
skb              1787 net/xfrm/xfrm_user.c 		nlmsg_cancel(skb, nlh);
skb              1790 net/xfrm/xfrm_user.c 	nlmsg_end(skb, nlh);
skb              1797 net/xfrm/xfrm_user.c 	struct net *net = sock_net(cb->skb->sk);
skb              1813 net/xfrm/xfrm_user.c static int xfrm_dump_policy(struct sk_buff *skb, struct netlink_callback *cb)
skb              1815 net/xfrm/xfrm_user.c 	struct net *net = sock_net(skb->sk);
skb              1819 net/xfrm/xfrm_user.c 	info.in_skb = cb->skb;
skb              1820 net/xfrm/xfrm_user.c 	info.out_skb = skb;
skb              1826 net/xfrm/xfrm_user.c 	return skb->len;
skb              1834 net/xfrm/xfrm_user.c 	struct sk_buff *skb;
skb              1837 net/xfrm/xfrm_user.c 	skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
skb              1838 net/xfrm/xfrm_user.c 	if (!skb)
skb              1842 net/xfrm/xfrm_user.c 	info.out_skb = skb;
skb              1848 net/xfrm/xfrm_user.c 		kfree_skb(skb);
skb              1852 net/xfrm/xfrm_user.c 	return skb;
skb              1855 net/xfrm/xfrm_user.c static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
skb              1858 net/xfrm/xfrm_user.c 	struct net *net = sock_net(skb->sk);
skb              1911 net/xfrm/xfrm_user.c 		resp_skb = xfrm_policy_netlink(skb, xp, p->dir, nlh->nlmsg_seq);
skb              1916 net/xfrm/xfrm_user.c 					    NETLINK_CB(skb).portid);
skb              1936 net/xfrm/xfrm_user.c static int xfrm_flush_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
skb              1939 net/xfrm/xfrm_user.c 	struct net *net = sock_net(skb->sk);
skb              1974 net/xfrm/xfrm_user.c static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, const struct km_event *c)
skb              1980 net/xfrm/xfrm_user.c 	nlh = nlmsg_put(skb, c->portid, c->seq, XFRM_MSG_NEWAE, sizeof(*id), 0);
skb              1995 net/xfrm/xfrm_user.c 		err = nla_put(skb, XFRMA_REPLAY_ESN_VAL,
skb              1999 net/xfrm/xfrm_user.c 		err = nla_put(skb, XFRMA_REPLAY_VAL, sizeof(x->replay),
skb              2004 net/xfrm/xfrm_user.c 	err = nla_put_64bit(skb, XFRMA_LTIME_VAL, sizeof(x->curlft), &x->curlft,
skb              2010 net/xfrm/xfrm_user.c 		err = nla_put_u32(skb, XFRMA_REPLAY_THRESH, x->replay_maxdiff);
skb              2015 net/xfrm/xfrm_user.c 		err = nla_put_u32(skb, XFRMA_ETIMER_THRESH,
skb              2020 net/xfrm/xfrm_user.c 	err = xfrm_mark_put(skb, &x->mark);
skb              2024 net/xfrm/xfrm_user.c 	err = xfrm_if_id_put(skb, x->if_id);
skb              2028 net/xfrm/xfrm_user.c 	nlmsg_end(skb, nlh);
skb              2032 net/xfrm/xfrm_user.c 	nlmsg_cancel(skb, nlh);
skb              2036 net/xfrm/xfrm_user.c static int xfrm_get_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
skb              2039 net/xfrm/xfrm_user.c 	struct net *net = sock_net(skb->sk);
skb              2074 net/xfrm/xfrm_user.c 	err = nlmsg_unicast(net->xfrm.nlsk, r_skb, NETLINK_CB(skb).portid);
skb              2080 net/xfrm/xfrm_user.c static int xfrm_new_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
skb              2083 net/xfrm/xfrm_user.c 	struct net *net = sock_net(skb->sk);
skb              2131 net/xfrm/xfrm_user.c static int xfrm_flush_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
skb              2134 net/xfrm/xfrm_user.c 	struct net *net = sock_net(skb->sk);
skb              2159 net/xfrm/xfrm_user.c static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
skb              2162 net/xfrm/xfrm_user.c 	struct net *net = sock_net(skb->sk);
skb              2223 net/xfrm/xfrm_user.c static int xfrm_add_sa_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
skb              2226 net/xfrm/xfrm_user.c 	struct net *net = sock_net(skb->sk);
skb              2257 net/xfrm/xfrm_user.c static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh,
skb              2260 net/xfrm/xfrm_user.c 	struct net *net = sock_net(skb->sk);
skb              2362 net/xfrm/xfrm_user.c static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh,
skb              2371 net/xfrm/xfrm_user.c 	struct net *net = sock_net(skb->sk);
skb              2404 net/xfrm/xfrm_user.c static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh,
skb              2412 net/xfrm/xfrm_user.c static int copy_to_user_migrate(const struct xfrm_migrate *m, struct sk_buff *skb)
skb              2427 net/xfrm/xfrm_user.c 	return nla_put(skb, XFRMA_MIGRATE, sizeof(um), &um);
skb              2430 net/xfrm/xfrm_user.c static int copy_to_user_kmaddress(const struct xfrm_kmaddress *k, struct sk_buff *skb)
skb              2440 net/xfrm/xfrm_user.c 	return nla_put(skb, XFRMA_KMADDRESS, sizeof(uk), &uk);
skb              2453 net/xfrm/xfrm_user.c static int build_migrate(struct sk_buff *skb, const struct xfrm_migrate *m,
skb              2463 net/xfrm/xfrm_user.c 	nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_MIGRATE, sizeof(*pol_id), 0);
skb              2474 net/xfrm/xfrm_user.c 		err = copy_to_user_kmaddress(k, skb);
skb              2479 net/xfrm/xfrm_user.c 		err = nla_put(skb, XFRMA_ENCAP, sizeof(*encap), encap);
skb              2483 net/xfrm/xfrm_user.c 	err = copy_to_user_policy_type(type, skb);
skb              2487 net/xfrm/xfrm_user.c 		err = copy_to_user_migrate(mp, skb);
skb              2492 net/xfrm/xfrm_user.c 	nlmsg_end(skb, nlh);
skb              2496 net/xfrm/xfrm_user.c 	nlmsg_cancel(skb, nlh);
skb              2506 net/xfrm/xfrm_user.c 	struct sk_buff *skb;
skb              2509 net/xfrm/xfrm_user.c 	skb = nlmsg_new(xfrm_migrate_msgsize(num_migrate, !!k, !!encap),
skb              2511 net/xfrm/xfrm_user.c 	if (skb == NULL)
skb              2515 net/xfrm/xfrm_user.c 	err = build_migrate(skb, m, num_migrate, k, sel, encap, dir, type);
skb              2518 net/xfrm/xfrm_user.c 	return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_MIGRATE);
skb              2633 net/xfrm/xfrm_user.c static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
skb              2636 net/xfrm/xfrm_user.c 	struct net *net = sock_net(skb->sk);
skb              2652 net/xfrm/xfrm_user.c 	if (!netlink_net_capable(skb, CAP_NET_ADMIN))
skb              2667 net/xfrm/xfrm_user.c 			return netlink_dump_start(net->xfrm.nlsk, skb, nlh, &c);
skb              2680 net/xfrm/xfrm_user.c 	return link->doit(skb, nlh, attrs);
skb              2683 net/xfrm/xfrm_user.c static void xfrm_netlink_rcv(struct sk_buff *skb)
skb              2685 net/xfrm/xfrm_user.c 	struct net *net = sock_net(skb->sk);
skb              2688 net/xfrm/xfrm_user.c 	netlink_rcv_skb(skb, &xfrm_user_rcv_msg);
skb              2698 net/xfrm/xfrm_user.c static int build_expire(struct sk_buff *skb, struct xfrm_state *x, const struct km_event *c)
skb              2704 net/xfrm/xfrm_user.c 	nlh = nlmsg_put(skb, c->portid, 0, XFRM_MSG_EXPIRE, sizeof(*ue), 0);
skb              2714 net/xfrm/xfrm_user.c 	err = xfrm_mark_put(skb, &x->mark);
skb              2718 net/xfrm/xfrm_user.c 	err = xfrm_if_id_put(skb, x->if_id);
skb              2722 net/xfrm/xfrm_user.c 	nlmsg_end(skb, nlh);
skb              2729 net/xfrm/xfrm_user.c 	struct sk_buff *skb;
skb              2731 net/xfrm/xfrm_user.c 	skb = nlmsg_new(xfrm_expire_msgsize(), GFP_ATOMIC);
skb              2732 net/xfrm/xfrm_user.c 	if (skb == NULL)
skb              2735 net/xfrm/xfrm_user.c 	if (build_expire(skb, x, c) < 0) {
skb              2736 net/xfrm/xfrm_user.c 		kfree_skb(skb);
skb              2740 net/xfrm/xfrm_user.c 	return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_EXPIRE);
skb              2746 net/xfrm/xfrm_user.c 	struct sk_buff *skb;
skb              2749 net/xfrm/xfrm_user.c 	skb = nlmsg_new(xfrm_aevent_msgsize(x), GFP_ATOMIC);
skb              2750 net/xfrm/xfrm_user.c 	if (skb == NULL)
skb              2753 net/xfrm/xfrm_user.c 	err = build_aevent(skb, x, c);
skb              2756 net/xfrm/xfrm_user.c 	return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_AEVENTS);
skb              2764 net/xfrm/xfrm_user.c 	struct sk_buff *skb;
skb              2767 net/xfrm/xfrm_user.c 	skb = nlmsg_new(len, GFP_ATOMIC);
skb              2768 net/xfrm/xfrm_user.c 	if (skb == NULL)
skb              2771 net/xfrm/xfrm_user.c 	nlh = nlmsg_put(skb, c->portid, c->seq, XFRM_MSG_FLUSHSA, sizeof(*p), 0);
skb              2773 net/xfrm/xfrm_user.c 		kfree_skb(skb);
skb              2780 net/xfrm/xfrm_user.c 	nlmsg_end(skb, nlh);
skb              2782 net/xfrm/xfrm_user.c 	return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_SA);
skb              2835 net/xfrm/xfrm_user.c 	struct sk_buff *skb;
skb              2848 net/xfrm/xfrm_user.c 	skb = nlmsg_new(len, GFP_ATOMIC);
skb              2849 net/xfrm/xfrm_user.c 	if (skb == NULL)
skb              2852 net/xfrm/xfrm_user.c 	nlh = nlmsg_put(skb, c->portid, c->seq, c->event, headlen, 0);
skb              2868 net/xfrm/xfrm_user.c 		attr = nla_reserve(skb, XFRMA_SA, sizeof(*p));
skb              2875 net/xfrm/xfrm_user.c 	err = copy_to_user_state_extra(x, p, skb);
skb              2879 net/xfrm/xfrm_user.c 	nlmsg_end(skb, nlh);
skb              2881 net/xfrm/xfrm_user.c 	return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_SA);
skb              2884 net/xfrm/xfrm_user.c 	kfree_skb(skb);
skb              2922 net/xfrm/xfrm_user.c static int build_acquire(struct sk_buff *skb, struct xfrm_state *x,
skb              2930 net/xfrm/xfrm_user.c 	nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_ACQUIRE, sizeof(*ua), 0);
skb              2944 net/xfrm/xfrm_user.c 	err = copy_to_user_tmpl(xp, skb);
skb              2946 net/xfrm/xfrm_user.c 		err = copy_to_user_state_sec_ctx(x, skb);
skb              2948 net/xfrm/xfrm_user.c 		err = copy_to_user_policy_type(xp->type, skb);
skb              2950 net/xfrm/xfrm_user.c 		err = xfrm_mark_put(skb, &xp->mark);
skb              2952 net/xfrm/xfrm_user.c 		err = xfrm_if_id_put(skb, xp->if_id);
skb              2954 net/xfrm/xfrm_user.c 		nlmsg_cancel(skb, nlh);
skb              2958 net/xfrm/xfrm_user.c 	nlmsg_end(skb, nlh);
skb              2966 net/xfrm/xfrm_user.c 	struct sk_buff *skb;
skb              2969 net/xfrm/xfrm_user.c 	skb = nlmsg_new(xfrm_acquire_msgsize(x, xp), GFP_ATOMIC);
skb              2970 net/xfrm/xfrm_user.c 	if (skb == NULL)
skb              2973 net/xfrm/xfrm_user.c 	err = build_acquire(skb, x, xt, xp);
skb              2976 net/xfrm/xfrm_user.c 	return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_ACQUIRE);
skb              3048 net/xfrm/xfrm_user.c static int build_polexpire(struct sk_buff *skb, struct xfrm_policy *xp,
skb              3056 net/xfrm/xfrm_user.c 	nlh = nlmsg_put(skb, c->portid, 0, XFRM_MSG_POLEXPIRE, sizeof(*upe), 0);
skb              3062 net/xfrm/xfrm_user.c 	err = copy_to_user_tmpl(xp, skb);
skb              3064 net/xfrm/xfrm_user.c 		err = copy_to_user_sec_ctx(xp, skb);
skb              3066 net/xfrm/xfrm_user.c 		err = copy_to_user_policy_type(xp->type, skb);
skb              3068 net/xfrm/xfrm_user.c 		err = xfrm_mark_put(skb, &xp->mark);
skb              3070 net/xfrm/xfrm_user.c 		err = xfrm_if_id_put(skb, xp->if_id);
skb              3072 net/xfrm/xfrm_user.c 		nlmsg_cancel(skb, nlh);
skb              3077 net/xfrm/xfrm_user.c 	nlmsg_end(skb, nlh);
skb              3084 net/xfrm/xfrm_user.c 	struct sk_buff *skb;
skb              3087 net/xfrm/xfrm_user.c 	skb = nlmsg_new(xfrm_polexpire_msgsize(xp), GFP_ATOMIC);
skb              3088 net/xfrm/xfrm_user.c 	if (skb == NULL)
skb              3091 net/xfrm/xfrm_user.c 	err = build_polexpire(skb, xp, dir, c);
skb              3094 net/xfrm/xfrm_user.c 	return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_EXPIRE);
skb              3104 net/xfrm/xfrm_user.c 	struct sk_buff *skb;
skb              3117 net/xfrm/xfrm_user.c 	skb = nlmsg_new(len, GFP_ATOMIC);
skb              3118 net/xfrm/xfrm_user.c 	if (skb == NULL)
skb              3121 net/xfrm/xfrm_user.c 	nlh = nlmsg_put(skb, c->portid, c->seq, c->event, headlen, 0);
skb              3138 net/xfrm/xfrm_user.c 		attr = nla_reserve(skb, XFRMA_POLICY, sizeof(*p));
skb              3147 net/xfrm/xfrm_user.c 	err = copy_to_user_tmpl(xp, skb);
skb              3149 net/xfrm/xfrm_user.c 		err = copy_to_user_policy_type(xp->type, skb);
skb              3151 net/xfrm/xfrm_user.c 		err = xfrm_mark_put(skb, &xp->mark);
skb              3153 net/xfrm/xfrm_user.c 		err = xfrm_if_id_put(skb, xp->if_id);
skb              3157 net/xfrm/xfrm_user.c 	nlmsg_end(skb, nlh);
skb              3159 net/xfrm/xfrm_user.c 	return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_POLICY);
skb              3162 net/xfrm/xfrm_user.c 	kfree_skb(skb);
skb              3170 net/xfrm/xfrm_user.c 	struct sk_buff *skb;
skb              3173 net/xfrm/xfrm_user.c 	skb = nlmsg_new(userpolicy_type_attrsize(), GFP_ATOMIC);
skb              3174 net/xfrm/xfrm_user.c 	if (skb == NULL)
skb              3177 net/xfrm/xfrm_user.c 	nlh = nlmsg_put(skb, c->portid, c->seq, XFRM_MSG_FLUSHPOLICY, 0, 0);
skb              3181 net/xfrm/xfrm_user.c 	err = copy_to_user_policy_type(c->data.type, skb);
skb              3185 net/xfrm/xfrm_user.c 	nlmsg_end(skb, nlh);
skb              3187 net/xfrm/xfrm_user.c 	return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_POLICY);
skb              3190 net/xfrm/xfrm_user.c 	kfree_skb(skb);
skb              3220 net/xfrm/xfrm_user.c static int build_report(struct sk_buff *skb, u8 proto,
skb              3226 net/xfrm/xfrm_user.c 	nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_REPORT, sizeof(*ur), 0);
skb              3235 net/xfrm/xfrm_user.c 		int err = nla_put(skb, XFRMA_COADDR, sizeof(*addr), addr);
skb              3237 net/xfrm/xfrm_user.c 			nlmsg_cancel(skb, nlh);
skb              3241 net/xfrm/xfrm_user.c 	nlmsg_end(skb, nlh);
skb              3248 net/xfrm/xfrm_user.c 	struct sk_buff *skb;
skb              3251 net/xfrm/xfrm_user.c 	skb = nlmsg_new(xfrm_report_msgsize(), GFP_ATOMIC);
skb              3252 net/xfrm/xfrm_user.c 	if (skb == NULL)
skb              3255 net/xfrm/xfrm_user.c 	err = build_report(skb, proto, sel, addr);
skb              3258 net/xfrm/xfrm_user.c 	return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_REPORT);
skb              3266 net/xfrm/xfrm_user.c static int build_mapping(struct sk_buff *skb, struct xfrm_state *x,
skb              3272 net/xfrm/xfrm_user.c 	nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_MAPPING, sizeof(*um), 0);
skb              3288 net/xfrm/xfrm_user.c 	nlmsg_end(skb, nlh);
skb              3296 net/xfrm/xfrm_user.c 	struct sk_buff *skb;
skb              3305 net/xfrm/xfrm_user.c 	skb = nlmsg_new(xfrm_mapping_msgsize(), GFP_ATOMIC);
skb              3306 net/xfrm/xfrm_user.c 	if (skb == NULL)
skb              3309 net/xfrm/xfrm_user.c 	err = build_mapping(skb, x, ipaddr, sport);
skb              3312 net/xfrm/xfrm_user.c 	return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_MAPPING);
skb                56 samples/bpf/hbm_edt_kern.c int _hbm_out_cg(struct __sk_buff *skb)
skb                68 samples/bpf/hbm_edt_kern.c 	int len = skb->len;
skb                74 samples/bpf/hbm_edt_kern.c 	if (qsp != NULL && !qsp->loopback && (skb->ifindex == 1))
skb                77 samples/bpf/hbm_edt_kern.c 	hbm_get_pkt_info(skb, &pkti);
skb               107 samples/bpf/hbm_edt_kern.c 	skb->tstamp = sendtime;
skb               128 samples/bpf/hbm_edt_kern.c 		if (bpf_skb_ecn_set_ce(skb)) {
skb                87 samples/bpf/hbm_kern.h static int get_tcp_info(struct __sk_buff *skb, struct hbm_pkt_info *pkti)
skb                92 samples/bpf/hbm_kern.h 	sk = skb->sk;
skb               113 samples/bpf/hbm_kern.h static void hbm_get_pkt_info(struct __sk_buff *skb,
skb               121 samples/bpf/hbm_kern.h 	bpf_skb_load_bytes(skb, 0, &iph, 12);
skb               137 samples/bpf/hbm_kern.h 		get_tcp_info(skb, pkti);
skb                58 samples/bpf/hbm_out_kern.c int _hbm_out_cg(struct __sk_buff *skb)
skb                61 samples/bpf/hbm_out_kern.c 	int len = skb->len;
skb                76 samples/bpf/hbm_out_kern.c 	if (qsp != NULL && !qsp->loopback && (skb->ifindex == 1))
skb                79 samples/bpf/hbm_out_kern.c 	hbm_get_pkt_info(skb, &pkti);
skb               145 samples/bpf/hbm_out_kern.c 		if (bpf_skb_ecn_set_ce(skb)) {
skb                67 samples/bpf/lwt_len_hist_kern.c int do_len_hist(struct __sk_buff *skb)
skb                71 samples/bpf/lwt_len_hist_kern.c 	key = log2l(skb->len);
skb                27 samples/bpf/parse_ldabs.c int handle_ingress(struct __sk_buff *skb)
skb                31 samples/bpf/parse_ldabs.c 	if (load_half(skb, offsetof(struct ethhdr, h_proto)) != ETH_P_IP)
skb                33 samples/bpf/parse_ldabs.c 	if (load_byte(skb, ETH_HLEN + offsetof(struct iphdr, protocol)) != IPPROTO_UDP ||
skb                34 samples/bpf/parse_ldabs.c 	    load_byte(skb, ETH_HLEN) != 0x45)
skb                36 samples/bpf/parse_ldabs.c 	if (ip_is_fragment(skb, ETH_HLEN))
skb                38 samples/bpf/parse_ldabs.c 	if (load_half(skb, troff + offsetof(struct udphdr, dest)) == DEFAULT_PKTGEN_UDP_PORT)
skb                27 samples/bpf/parse_simple.c int handle_ingress(struct __sk_buff *skb)
skb                29 samples/bpf/parse_simple.c 	void *data = (void *)(long)skb->data;
skb                33 samples/bpf/parse_simple.c 	void *data_end = (void *)(long)skb->data_end;
skb               113 samples/bpf/parse_varlen.c int handle_ingress(struct __sk_buff *skb)
skb               115 samples/bpf/parse_varlen.c 	void *data = (void *)(long)skb->data;
skb               117 samples/bpf/parse_varlen.c 	void *data_end = (void *)(long)skb->data_end;
skb                15 samples/bpf/sockex1_kern.c int bpf_prog1(struct __sk_buff *skb)
skb                17 samples/bpf/sockex1_kern.c 	int index = load_byte(skb, ETH_HLEN + offsetof(struct iphdr, protocol));
skb                20 samples/bpf/sockex1_kern.c 	if (skb->pkt_type != PACKET_OUTGOING)
skb                25 samples/bpf/sockex1_kern.c 		__sync_fetch_and_add(value, skb->len);
skb                61 samples/bpf/sockex2_kern.c static inline __u64 parse_ip(struct __sk_buff *skb, __u64 nhoff, __u64 *ip_proto,
skb                66 samples/bpf/sockex2_kern.c 	if (unlikely(ip_is_fragment(skb, nhoff)))
skb                69 samples/bpf/sockex2_kern.c 		*ip_proto = load_byte(skb, nhoff + offsetof(struct iphdr, protocol));
skb                72 samples/bpf/sockex2_kern.c 		flow->src = load_word(skb, nhoff + offsetof(struct iphdr, saddr));
skb                73 samples/bpf/sockex2_kern.c 		flow->dst = load_word(skb, nhoff + offsetof(struct iphdr, daddr));
skb                76 samples/bpf/sockex2_kern.c 	verlen = load_byte(skb, nhoff + 0/*offsetof(struct iphdr, ihl)*/);
skb                85 samples/bpf/sockex2_kern.c static inline __u64 parse_ipv6(struct __sk_buff *skb, __u64 nhoff, __u64 *ip_proto,
skb                88 samples/bpf/sockex2_kern.c 	*ip_proto = load_byte(skb,
skb                90 samples/bpf/sockex2_kern.c 	flow->src = ipv6_addr_hash(skb,
skb                92 samples/bpf/sockex2_kern.c 	flow->dst = ipv6_addr_hash(skb,
skb                99 samples/bpf/sockex2_kern.c static inline bool flow_dissector(struct __sk_buff *skb,
skb               104 samples/bpf/sockex2_kern.c 	__u64 proto = load_half(skb, 12);
skb               108 samples/bpf/sockex2_kern.c 		proto = load_half(skb, nhoff + offsetof(struct vlan_hdr,
skb               114 samples/bpf/sockex2_kern.c 		proto = load_half(skb, nhoff + offsetof(struct vlan_hdr,
skb               120 samples/bpf/sockex2_kern.c 		nhoff = parse_ip(skb, nhoff, &ip_proto, flow);
skb               122 samples/bpf/sockex2_kern.c 		nhoff = parse_ipv6(skb, nhoff, &ip_proto, flow);
skb               133 samples/bpf/sockex2_kern.c 		__u64 gre_flags = load_half(skb,
skb               135 samples/bpf/sockex2_kern.c 		__u64 gre_proto = load_half(skb,
skb               151 samples/bpf/sockex2_kern.c 			proto = load_half(skb,
skb               158 samples/bpf/sockex2_kern.c 			nhoff = parse_ip(skb, nhoff, &ip_proto, flow);
skb               160 samples/bpf/sockex2_kern.c 			nhoff = parse_ipv6(skb, nhoff, &ip_proto, flow);
skb               166 samples/bpf/sockex2_kern.c 		nhoff = parse_ip(skb, nhoff, &ip_proto, flow);
skb               169 samples/bpf/sockex2_kern.c 		nhoff = parse_ipv6(skb, nhoff, &ip_proto, flow);
skb               179 samples/bpf/sockex2_kern.c 		flow->ports = load_word(skb, nhoff);
skb               200 samples/bpf/sockex2_kern.c int bpf_prog2(struct __sk_buff *skb)
skb               206 samples/bpf/sockex2_kern.c 	if (!flow_dissector(skb, &flow))
skb               213 samples/bpf/sockex2_kern.c 		__sync_fetch_and_add(&value->bytes, skb->len);
skb               215 samples/bpf/sockex2_kern.c 		struct pair val = {1, skb->len};
skb                39 samples/bpf/sockex3_kern.c static inline void parse_eth_proto(struct __sk_buff *skb, u32 proto)
skb                44 samples/bpf/sockex3_kern.c 		bpf_tail_call(skb, &jmp_table, PARSE_VLAN);
skb                48 samples/bpf/sockex3_kern.c 		bpf_tail_call(skb, &jmp_table, PARSE_MPLS);
skb                51 samples/bpf/sockex3_kern.c 		bpf_tail_call(skb, &jmp_table, PARSE_IP);
skb                54 samples/bpf/sockex3_kern.c 		bpf_tail_call(skb, &jmp_table, PARSE_IPV6);
skb               122 samples/bpf/sockex3_kern.c static void update_stats(struct __sk_buff *skb, struct globals *g)
skb               130 samples/bpf/sockex3_kern.c 		__sync_fetch_and_add(&value->bytes, skb->len);
skb               132 samples/bpf/sockex3_kern.c 		struct pair val = {1, skb->len};
skb               138 samples/bpf/sockex3_kern.c static __always_inline void parse_ip_proto(struct __sk_buff *skb,
skb               141 samples/bpf/sockex3_kern.c 	__u32 nhoff = skb->cb[0];
skb               151 samples/bpf/sockex3_kern.c 		__u32 gre_flags = load_half(skb,
skb               153 samples/bpf/sockex3_kern.c 		__u32 gre_proto = load_half(skb,
skb               167 samples/bpf/sockex3_kern.c 		skb->cb[0] = nhoff;
skb               168 samples/bpf/sockex3_kern.c 		parse_eth_proto(skb, gre_proto);
skb               172 samples/bpf/sockex3_kern.c 		parse_eth_proto(skb, ETH_P_IP);
skb               175 samples/bpf/sockex3_kern.c 		parse_eth_proto(skb, ETH_P_IPV6);
skb               179 samples/bpf/sockex3_kern.c 		g->flow.ports = load_word(skb, nhoff);
skb               182 samples/bpf/sockex3_kern.c 		update_stats(skb, g);
skb               189 samples/bpf/sockex3_kern.c PROG(PARSE_IP)(struct __sk_buff *skb)
skb               197 samples/bpf/sockex3_kern.c 	nhoff = skb->cb[0];
skb               199 samples/bpf/sockex3_kern.c 	if (unlikely(ip_is_fragment(skb, nhoff)))
skb               202 samples/bpf/sockex3_kern.c 	ip_proto = load_byte(skb, nhoff + offsetof(struct iphdr, protocol));
skb               205 samples/bpf/sockex3_kern.c 		g->flow.src = load_word(skb, nhoff + offsetof(struct iphdr, saddr));
skb               206 samples/bpf/sockex3_kern.c 		g->flow.dst = load_word(skb, nhoff + offsetof(struct iphdr, daddr));
skb               209 samples/bpf/sockex3_kern.c 	verlen = load_byte(skb, nhoff + 0/*offsetof(struct iphdr, ihl)*/);
skb               212 samples/bpf/sockex3_kern.c 	skb->cb[0] = nhoff;
skb               213 samples/bpf/sockex3_kern.c 	parse_ip_proto(skb, g, ip_proto);
skb               217 samples/bpf/sockex3_kern.c PROG(PARSE_IPV6)(struct __sk_buff *skb)
skb               225 samples/bpf/sockex3_kern.c 	nhoff = skb->cb[0];
skb               227 samples/bpf/sockex3_kern.c 	ip_proto = load_byte(skb,
skb               229 samples/bpf/sockex3_kern.c 	g->flow.src = ipv6_addr_hash(skb,
skb               231 samples/bpf/sockex3_kern.c 	g->flow.dst = ipv6_addr_hash(skb,
skb               235 samples/bpf/sockex3_kern.c 	skb->cb[0] = nhoff;
skb               236 samples/bpf/sockex3_kern.c 	parse_ip_proto(skb, g, ip_proto);
skb               240 samples/bpf/sockex3_kern.c PROG(PARSE_VLAN)(struct __sk_buff *skb)
skb               244 samples/bpf/sockex3_kern.c 	nhoff = skb->cb[0];
skb               246 samples/bpf/sockex3_kern.c 	proto = load_half(skb, nhoff + offsetof(struct vlan_hdr,
skb               249 samples/bpf/sockex3_kern.c 	skb->cb[0] = nhoff;
skb               251 samples/bpf/sockex3_kern.c 	parse_eth_proto(skb, proto);
skb               256 samples/bpf/sockex3_kern.c PROG(PARSE_MPLS)(struct __sk_buff *skb)
skb               260 samples/bpf/sockex3_kern.c 	nhoff = skb->cb[0];
skb               262 samples/bpf/sockex3_kern.c 	label = load_word(skb, nhoff);
skb               264 samples/bpf/sockex3_kern.c 	skb->cb[0] = nhoff;
skb               267 samples/bpf/sockex3_kern.c 		__u8 verlen = load_byte(skb, nhoff);
skb               269 samples/bpf/sockex3_kern.c 			parse_eth_proto(skb, ETH_P_IP);
skb               271 samples/bpf/sockex3_kern.c 			parse_eth_proto(skb, ETH_P_IPV6);
skb               273 samples/bpf/sockex3_kern.c 		parse_eth_proto(skb, ETH_P_MPLS_UC);
skb               280 samples/bpf/sockex3_kern.c int main_prog(struct __sk_buff *skb)
skb               283 samples/bpf/sockex3_kern.c 	__u32 proto = load_half(skb, 12);
skb               285 samples/bpf/sockex3_kern.c 	skb->cb[0] = nhoff;
skb               286 samples/bpf/sockex3_kern.c 	parse_eth_proto(skb, proto);
skb                59 samples/bpf/tc_l2_redirect_kern.c int _l2_to_iptun_ingress_forward(struct __sk_buff *skb)
skb                62 samples/bpf/tc_l2_redirect_kern.c 	void *data = (void *)(long)skb->data;
skb                64 samples/bpf/tc_l2_redirect_kern.c 	void *data_end = (void *)(long)skb->data_end;
skb               110 samples/bpf/tc_l2_redirect_kern.c int _l2_to_iptun_ingress_redirect(struct __sk_buff *skb)
skb               113 samples/bpf/tc_l2_redirect_kern.c 	void *data = (void *)(long)skb->data;
skb               115 samples/bpf/tc_l2_redirect_kern.c 	void *data_end = (void *)(long)skb->data_end;
skb               146 samples/bpf/tc_l2_redirect_kern.c 	bpf_skb_set_tunnel_key(skb, &tkey, sizeof(tkey), 0);
skb               151 samples/bpf/tc_l2_redirect_kern.c int _l2_to_ip6tun_ingress_redirect(struct __sk_buff *skb)
skb               154 samples/bpf/tc_l2_redirect_kern.c 	void *data = (void *)(long)skb->data;
skb               156 samples/bpf/tc_l2_redirect_kern.c 	void *data_end = (void *)(long)skb->data_end;
skb               201 samples/bpf/tc_l2_redirect_kern.c 	bpf_skb_set_tunnel_key(skb, &tkey, sizeof(tkey), BPF_F_TUNINFO_IPV6);
skb               206 samples/bpf/tc_l2_redirect_kern.c int _drop_non_tun_vip(struct __sk_buff *skb)
skb               209 samples/bpf/tc_l2_redirect_kern.c 	void *data = (void *)(long)skb->data;
skb               211 samples/bpf/tc_l2_redirect_kern.c 	void *data_end = (void *)(long)skb->data_end;
skb                15 samples/bpf/tcbpf1_kern.c static inline void set_dst_mac(struct __sk_buff *skb, char *mac)
skb                17 samples/bpf/tcbpf1_kern.c 	bpf_skb_store_bytes(skb, 0, mac, ETH_ALEN, 1);
skb                23 samples/bpf/tcbpf1_kern.c static inline void set_ip_tos(struct __sk_buff *skb, __u8 new_tos)
skb                25 samples/bpf/tcbpf1_kern.c 	__u8 old_tos = load_byte(skb, TOS_OFF);
skb                27 samples/bpf/tcbpf1_kern.c 	bpf_l3_csum_replace(skb, IP_CSUM_OFF, htons(old_tos), htons(new_tos), 2);
skb                28 samples/bpf/tcbpf1_kern.c 	bpf_skb_store_bytes(skb, TOS_OFF, &new_tos, sizeof(new_tos), 0);
skb                36 samples/bpf/tcbpf1_kern.c static inline void set_tcp_ip_src(struct __sk_buff *skb, __u32 new_ip)
skb                38 samples/bpf/tcbpf1_kern.c 	__u32 old_ip = _htonl(load_word(skb, IP_SRC_OFF));
skb                40 samples/bpf/tcbpf1_kern.c 	bpf_l4_csum_replace(skb, TCP_CSUM_OFF, old_ip, new_ip, IS_PSEUDO | sizeof(new_ip));
skb                41 samples/bpf/tcbpf1_kern.c 	bpf_l3_csum_replace(skb, IP_CSUM_OFF, old_ip, new_ip, sizeof(new_ip));
skb                42 samples/bpf/tcbpf1_kern.c 	bpf_skb_store_bytes(skb, IP_SRC_OFF, &new_ip, sizeof(new_ip), 0);
skb                46 samples/bpf/tcbpf1_kern.c static inline void set_tcp_dest_port(struct __sk_buff *skb, __u16 new_port)
skb                48 samples/bpf/tcbpf1_kern.c 	__u16 old_port = htons(load_half(skb, TCP_DPORT_OFF));
skb                50 samples/bpf/tcbpf1_kern.c 	bpf_l4_csum_replace(skb, TCP_CSUM_OFF, old_port, new_port, sizeof(new_port));
skb                51 samples/bpf/tcbpf1_kern.c 	bpf_skb_store_bytes(skb, TCP_DPORT_OFF, &new_port, sizeof(new_port), 0);
skb                55 samples/bpf/tcbpf1_kern.c int bpf_prog1(struct __sk_buff *skb)
skb                57 samples/bpf/tcbpf1_kern.c 	__u8 proto = load_byte(skb, ETH_HLEN + offsetof(struct iphdr, protocol));
skb                61 samples/bpf/tcbpf1_kern.c 		set_ip_tos(skb, 8);
skb                62 samples/bpf/tcbpf1_kern.c 		set_tcp_ip_src(skb, 0xA010101);
skb                63 samples/bpf/tcbpf1_kern.c 		set_tcp_dest_port(skb, 5001);
skb                69 samples/bpf/tcbpf1_kern.c int _redirect_xmit(struct __sk_buff *skb)
skb                71 samples/bpf/tcbpf1_kern.c 	return bpf_redirect(skb->ifindex + 1, 0);
skb                74 samples/bpf/tcbpf1_kern.c int _redirect_recv(struct __sk_buff *skb)
skb                76 samples/bpf/tcbpf1_kern.c 	return bpf_redirect(skb->ifindex + 1, 1);
skb                79 samples/bpf/tcbpf1_kern.c int _clone_redirect_xmit(struct __sk_buff *skb)
skb                81 samples/bpf/tcbpf1_kern.c 	bpf_clone_redirect(skb, skb->ifindex + 1, 0);
skb                85 samples/bpf/tcbpf1_kern.c int _clone_redirect_recv(struct __sk_buff *skb)
skb                87 samples/bpf/tcbpf1_kern.c 	bpf_clone_redirect(skb, skb->ifindex + 1, 1);
skb                42 samples/bpf/test_cgrp2_tc_kern.c int handle_egress(struct __sk_buff *skb)
skb                44 samples/bpf/test_cgrp2_tc_kern.c 	void *data = (void *)(long)skb->data;
skb                47 samples/bpf/test_cgrp2_tc_kern.c 	void *data_end = (void *)(long)skb->data_end;
skb                61 samples/bpf/test_cgrp2_tc_kern.c 	} else if (bpf_skb_under_cgroup(skb, &test_cgrp2_array_pin, 0) != 1) {
skb                37 samples/bpf/test_lwt_bpf.c int do_nop(struct __sk_buff *skb)
skb                44 samples/bpf/test_lwt_bpf.c int do_test_ctx(struct __sk_buff *skb)
skb                46 samples/bpf/test_lwt_bpf.c 	skb->cb[0] = CB_MAGIC;
skb                47 samples/bpf/test_lwt_bpf.c 	printk("len %d hash %d protocol %d\n", skb->len, skb->hash,
skb                48 samples/bpf/test_lwt_bpf.c 	       skb->protocol);
skb                49 samples/bpf/test_lwt_bpf.c 	printk("cb %d ingress_ifindex %d ifindex %d\n", skb->cb[0],
skb                50 samples/bpf/test_lwt_bpf.c 	       skb->ingress_ifindex, skb->ifindex);
skb                57 samples/bpf/test_lwt_bpf.c int do_test_cb(struct __sk_buff *skb)
skb                59 samples/bpf/test_lwt_bpf.c 	printk("cb0: %x cb1: %x cb2: %x\n", skb->cb[0], skb->cb[1],
skb                60 samples/bpf/test_lwt_bpf.c 	       skb->cb[2]);
skb                61 samples/bpf/test_lwt_bpf.c 	printk("cb3: %x cb4: %x\n", skb->cb[3], skb->cb[4]);
skb                68 samples/bpf/test_lwt_bpf.c int do_test_data(struct __sk_buff *skb)
skb                70 samples/bpf/test_lwt_bpf.c 	void *data = (void *)(long)skb->data;
skb                71 samples/bpf/test_lwt_bpf.c 	void *data_end = (void *)(long)skb->data_end;
skb                92 samples/bpf/test_lwt_bpf.c static inline int rewrite(struct __sk_buff *skb, uint32_t old_ip,
skb                98 samples/bpf/test_lwt_bpf.c 	ret = bpf_skb_load_bytes(skb, IP_PROTO_OFF, &proto, 1);
skb               120 samples/bpf/test_lwt_bpf.c 		ret = bpf_l4_csum_replace(skb, off, old_ip, new_ip,
skb               128 samples/bpf/test_lwt_bpf.c 	ret = bpf_l3_csum_replace(skb, IP_CSUM_OFF, old_ip, new_ip, sizeof(new_ip));
skb               135 samples/bpf/test_lwt_bpf.c 		ret = bpf_skb_store_bytes(skb, IP_DST_OFF, &new_ip, sizeof(new_ip), 0);
skb               137 samples/bpf/test_lwt_bpf.c 		ret = bpf_skb_store_bytes(skb, IP_SRC_OFF, &new_ip, sizeof(new_ip), 0);
skb               149 samples/bpf/test_lwt_bpf.c int do_test_rewrite(struct __sk_buff *skb)
skb               154 samples/bpf/test_lwt_bpf.c 	ret = bpf_skb_load_bytes(skb, IP_DST_OFF, &old_ip, 4);
skb               162 samples/bpf/test_lwt_bpf.c 		return rewrite(skb, old_ip, new_ip, 1);
skb               168 samples/bpf/test_lwt_bpf.c static inline int __do_push_ll_and_redirect(struct __sk_buff *skb)
skb               174 samples/bpf/test_lwt_bpf.c 	ret = bpf_skb_change_head(skb, 14, 0);
skb               183 samples/bpf/test_lwt_bpf.c 	ret = bpf_skb_store_bytes(skb, 0, &ehdr, sizeof(ehdr), 0);
skb               193 samples/bpf/test_lwt_bpf.c int do_push_ll_and_redirect_silent(struct __sk_buff *skb)
skb               195 samples/bpf/test_lwt_bpf.c 	return __do_push_ll_and_redirect(skb);
skb               199 samples/bpf/test_lwt_bpf.c int do_push_ll_and_redirect(struct __sk_buff *skb)
skb               203 samples/bpf/test_lwt_bpf.c 	ret = __do_push_ll_and_redirect(skb);
skb               210 samples/bpf/test_lwt_bpf.c static inline void __fill_garbage(struct __sk_buff *skb)
skb               214 samples/bpf/test_lwt_bpf.c 	bpf_skb_store_bytes(skb, 0, &f, sizeof(f), 0);
skb               215 samples/bpf/test_lwt_bpf.c 	bpf_skb_store_bytes(skb, 8, &f, sizeof(f), 0);
skb               216 samples/bpf/test_lwt_bpf.c 	bpf_skb_store_bytes(skb, 16, &f, sizeof(f), 0);
skb               217 samples/bpf/test_lwt_bpf.c 	bpf_skb_store_bytes(skb, 24, &f, sizeof(f), 0);
skb               218 samples/bpf/test_lwt_bpf.c 	bpf_skb_store_bytes(skb, 32, &f, sizeof(f), 0);
skb               219 samples/bpf/test_lwt_bpf.c 	bpf_skb_store_bytes(skb, 40, &f, sizeof(f), 0);
skb               220 samples/bpf/test_lwt_bpf.c 	bpf_skb_store_bytes(skb, 48, &f, sizeof(f), 0);
skb               221 samples/bpf/test_lwt_bpf.c 	bpf_skb_store_bytes(skb, 56, &f, sizeof(f), 0);
skb               222 samples/bpf/test_lwt_bpf.c 	bpf_skb_store_bytes(skb, 64, &f, sizeof(f), 0);
skb               223 samples/bpf/test_lwt_bpf.c 	bpf_skb_store_bytes(skb, 72, &f, sizeof(f), 0);
skb               224 samples/bpf/test_lwt_bpf.c 	bpf_skb_store_bytes(skb, 80, &f, sizeof(f), 0);
skb               225 samples/bpf/test_lwt_bpf.c 	bpf_skb_store_bytes(skb, 88, &f, sizeof(f), 0);
skb               229 samples/bpf/test_lwt_bpf.c int do_fill_garbage(struct __sk_buff *skb)
skb               231 samples/bpf/test_lwt_bpf.c 	__fill_garbage(skb);
skb               237 samples/bpf/test_lwt_bpf.c int do_fill_garbage_and_redirect(struct __sk_buff *skb)
skb               240 samples/bpf/test_lwt_bpf.c 	__fill_garbage(skb);
skb               247 samples/bpf/test_lwt_bpf.c int do_drop_all(struct __sk_buff *skb)
skb                28 samples/bpf/tracex1_kern.c 	struct sk_buff *skb;
skb                32 samples/bpf/tracex1_kern.c 	skb = (struct sk_buff *) PT_REGS_PARM1(ctx);
skb                33 samples/bpf/tracex1_kern.c 	dev = _(skb->dev);
skb                34 samples/bpf/tracex1_kern.c 	len = _(skb->len);
skb                41 samples/bpf/tracex1_kern.c 		bpf_trace_printk(fmt, sizeof(fmt), skb, len);
skb                45 samples/connector/cn_test.c 	struct sk_buff *skb;
skb                53 samples/connector/cn_test.c 	skb = alloc_skb(size, GFP_ATOMIC);
skb                54 samples/connector/cn_test.c 	if (!skb) {
skb                59 samples/connector/cn_test.c 	nlh = nlmsg_put(skb, 0, 0x123, NLMSG_DONE, size - sizeof(*nlh), 0);
skb                61 samples/connector/cn_test.c 		kfree_skb(skb);
skb               104 samples/connector/cn_test.c 	NETLINK_CB(skb).dst_group = ctl->group;
skb               106 samples/connector/cn_test.c 	netlink_unicast(nls, skb, 0, 0);
skb              1026 security/apparmor/lsm.c static int apparmor_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
skb              1030 security/apparmor/lsm.c 	if (!skb->secmark)
skb              1034 security/apparmor/lsm.c 				      skb->secmark, sk);
skb              1105 security/apparmor/lsm.c 					    struct sk_buff *skb, u32 *secid)
skb              1132 security/apparmor/lsm.c static int apparmor_inet_conn_request(struct sock *sk, struct sk_buff *skb,
skb              1137 security/apparmor/lsm.c 	if (!skb->secmark)
skb              1141 security/apparmor/lsm.c 				      skb->secmark, sk);
skb              1614 security/apparmor/lsm.c 					  struct sk_buff *skb,
skb              1620 security/apparmor/lsm.c 	if (!skb->secmark)
skb              1623 security/apparmor/lsm.c 	sk = skb_to_full_sk(skb);
skb              1629 security/apparmor/lsm.c 				    skb->secmark, sk))
skb              1637 security/apparmor/lsm.c 					    struct sk_buff *skb,
skb              1640 security/apparmor/lsm.c 	return apparmor_ip_postroute(priv, skb, state);
skb              1645 security/apparmor/lsm.c 					    struct sk_buff *skb,
skb              1648 security/apparmor/lsm.c 	return apparmor_ip_postroute(priv, skb, state);
skb                39 security/lsm_audit.c int ipv4_skb_to_auditdata(struct sk_buff *skb,
skb                45 security/lsm_audit.c 	ih = ip_hdr(skb);
skb                60 security/lsm_audit.c 		struct tcphdr *th = tcp_hdr(skb);
skb                69 security/lsm_audit.c 		struct udphdr *uh = udp_hdr(skb);
skb                78 security/lsm_audit.c 		struct dccp_hdr *dh = dccp_hdr(skb);
skb                87 security/lsm_audit.c 		struct sctphdr *sh = sctp_hdr(skb);
skb               108 security/lsm_audit.c int ipv6_skb_to_auditdata(struct sk_buff *skb,
skb               116 security/lsm_audit.c 	ip6 = ipv6_hdr(skb);
skb               124 security/lsm_audit.c 	offset = skb_network_offset(skb);
skb               127 security/lsm_audit.c 	offset = ipv6_skip_exthdr(skb, offset, &nexthdr, &frag_off);
skb               136 security/lsm_audit.c 		th = skb_header_pointer(skb, offset, sizeof(_tcph), &_tcph);
skb               147 security/lsm_audit.c 		uh = skb_header_pointer(skb, offset, sizeof(_udph), &_udph);
skb               158 security/lsm_audit.c 		dh = skb_header_pointer(skb, offset, sizeof(_dccph), &_dccph);
skb               169 security/lsm_audit.c 		sh = skb_header_pointer(skb, offset, sizeof(_sctph), &_sctph);
skb              1904 security/security.c int security_netlink_send(struct sock *sk, struct sk_buff *skb)
skb              1906 security/security.c 	return call_int_hook(netlink_send, 0, sk, skb);
skb              2047 security/security.c int security_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
skb              2049 security/security.c 	return call_int_hook(socket_sock_rcv_skb, 0, sk, skb);
skb              2060 security/security.c int security_socket_getpeersec_dgram(struct socket *sock, struct sk_buff *skb, u32 *secid)
skb              2063 security/security.c 			     skb, secid);
skb              2102 security/security.c 			struct sk_buff *skb, struct request_sock *req)
skb              2104 security/security.c 	return call_int_hook(inet_conn_request, 0, sk, skb, req);
skb              2115 security/security.c 			struct sk_buff *skb)
skb              2117 security/security.c 	call_void_hook(inet_conn_established, sk, skb);
skb              2175 security/security.c int security_sctp_assoc_request(struct sctp_endpoint *ep, struct sk_buff *skb)
skb              2177 security/security.c 	return call_int_hook(sctp_assoc_request, 0, ep, skb);
skb              2305 security/security.c int security_xfrm_decode_session(struct sk_buff *skb, u32 *secid)
skb              2307 security/security.c 	return call_int_hook(xfrm_decode_session, 0, skb, secid, 1);
skb              2310 security/security.c void security_skb_classify_flow(struct sk_buff *skb, struct flowi *fl)
skb              2312 security/security.c 	int rc = call_int_hook(xfrm_decode_session, 0, skb, &fl->flowi_secid,
skb              4175 security/selinux/hooks.c static int selinux_parse_skb_ipv4(struct sk_buff *skb,
skb              4181 security/selinux/hooks.c 	offset = skb_network_offset(skb);
skb              4182 security/selinux/hooks.c 	ih = skb_header_pointer(skb, offset, sizeof(_iph), &_iph);
skb              4205 security/selinux/hooks.c 		th = skb_header_pointer(skb, offset, sizeof(_tcph), &_tcph);
skb              4221 security/selinux/hooks.c 		uh = skb_header_pointer(skb, offset, sizeof(_udph), &_udph);
skb              4237 security/selinux/hooks.c 		dh = skb_header_pointer(skb, offset, sizeof(_dccph), &_dccph);
skb              4254 security/selinux/hooks.c 		sh = skb_header_pointer(skb, offset, sizeof(_sctph), &_sctph);
skb              4273 security/selinux/hooks.c static int selinux_parse_skb_ipv6(struct sk_buff *skb,
skb              4281 security/selinux/hooks.c 	offset = skb_network_offset(skb);
skb              4282 security/selinux/hooks.c 	ip6 = skb_header_pointer(skb, offset, sizeof(_ipv6h), &_ipv6h);
skb              4292 security/selinux/hooks.c 	offset = ipv6_skip_exthdr(skb, offset, &nexthdr, &frag_off);
skb              4303 security/selinux/hooks.c 		th = skb_header_pointer(skb, offset, sizeof(_tcph), &_tcph);
skb              4315 security/selinux/hooks.c 		uh = skb_header_pointer(skb, offset, sizeof(_udph), &_udph);
skb              4327 security/selinux/hooks.c 		dh = skb_header_pointer(skb, offset, sizeof(_dccph), &_dccph);
skb              4340 security/selinux/hooks.c 		sh = skb_header_pointer(skb, offset, sizeof(_sctph), &_sctph);
skb              4359 security/selinux/hooks.c static int selinux_parse_skb(struct sk_buff *skb, struct common_audit_data *ad,
skb              4367 security/selinux/hooks.c 		ret = selinux_parse_skb_ipv4(skb, ad, proto);
skb              4376 security/selinux/hooks.c 		ret = selinux_parse_skb_ipv6(skb, ad, proto);
skb              4415 security/selinux/hooks.c static int selinux_skb_peerlbl_sid(struct sk_buff *skb, u16 family, u32 *sid)
skb              4422 security/selinux/hooks.c 	err = selinux_xfrm_skb_sid(skb, &xfrm_sid);
skb              4425 security/selinux/hooks.c 	err = selinux_netlbl_skbuff_getsid(skb, family, &nlbl_type, &nlbl_sid);
skb              4958 security/selinux/hooks.c static int selinux_sock_rcv_skb_compat(struct sock *sk, struct sk_buff *skb,
skb              4970 security/selinux/hooks.c 	ad.u.net->netif = skb->skb_iif;
skb              4972 security/selinux/hooks.c 	err = selinux_parse_skb(skb, &ad, &addrp, 1, NULL);
skb              4978 security/selinux/hooks.c 				   sk_sid, skb->secmark, SECCLASS_PACKET,
skb              4984 security/selinux/hooks.c 	err = selinux_netlbl_sock_rcv_skb(sksec, skb, family, &ad);
skb              4987 security/selinux/hooks.c 	err = selinux_xfrm_sock_rcv_skb(sksec->sid, skb, &ad);
skb              4992 security/selinux/hooks.c static int selinux_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
skb              5008 security/selinux/hooks.c 	if (family == PF_INET6 && skb->protocol == htons(ETH_P_IP))
skb              5016 security/selinux/hooks.c 		return selinux_sock_rcv_skb_compat(sk, skb, family);
skb              5025 security/selinux/hooks.c 	ad.u.net->netif = skb->skb_iif;
skb              5027 security/selinux/hooks.c 	err = selinux_parse_skb(skb, &ad, &addrp, 1, NULL);
skb              5034 security/selinux/hooks.c 		err = selinux_skb_peerlbl_sid(skb, family, &peer_sid);
skb              5037 security/selinux/hooks.c 		err = selinux_inet_sys_rcv_skb(sock_net(sk), skb->skb_iif,
skb              5040 security/selinux/hooks.c 			selinux_netlbl_err(skb, family, err, 0);
skb              5047 security/selinux/hooks.c 			selinux_netlbl_err(skb, family, err, 0);
skb              5054 security/selinux/hooks.c 				   sk_sid, skb->secmark, SECCLASS_PACKET,
skb              5099 security/selinux/hooks.c static int selinux_socket_getpeersec_dgram(struct socket *sock, struct sk_buff *skb, u32 *secid)
skb              5105 security/selinux/hooks.c 	if (skb && skb->protocol == htons(ETH_P_IP))
skb              5107 security/selinux/hooks.c 	else if (skb && skb->protocol == htons(ETH_P_IPV6))
skb              5117 security/selinux/hooks.c 	} else if (skb)
skb              5118 security/selinux/hooks.c 		selinux_skb_peerlbl_sid(skb, family, &peer_secid);
skb              5193 security/selinux/hooks.c 				      struct sk_buff *skb)
skb              5212 security/selinux/hooks.c 		err = selinux_skb_peerlbl_sid(skb, ep->base.sk->sk_family,
skb              5258 security/selinux/hooks.c 	return selinux_netlbl_sctp_assoc_request(ep, skb);
skb              5361 security/selinux/hooks.c static int selinux_inet_conn_request(struct sock *sk, struct sk_buff *skb,
skb              5370 security/selinux/hooks.c 	err = selinux_skb_peerlbl_sid(skb, family, &peersid);
skb              5399 security/selinux/hooks.c static void selinux_inet_conn_established(struct sock *sk, struct sk_buff *skb)
skb              5405 security/selinux/hooks.c 	if (family == PF_INET6 && skb->protocol == htons(ETH_P_IP))
skb              5408 security/selinux/hooks.c 	selinux_skb_peerlbl_sid(skb, family, &sksec->peer_sid);
skb              5522 security/selinux/hooks.c static int selinux_nlmsg_perm(struct sock *sk, struct sk_buff *skb)
skb              5526 security/selinux/hooks.c 	unsigned int data_len = skb->len;
skb              5527 security/selinux/hooks.c 	unsigned char *data = skb->data;
skb              5582 security/selinux/hooks.c static unsigned int selinux_ip_forward(struct sk_buff *skb,
skb              5604 security/selinux/hooks.c 	if (selinux_skb_peerlbl_sid(skb, family, &peer_sid) != 0)
skb              5611 security/selinux/hooks.c 	if (selinux_parse_skb(skb, &ad, &addrp, 1, NULL) != 0)
skb              5618 security/selinux/hooks.c 			selinux_netlbl_err(skb, family, err, 1);
skb              5625 security/selinux/hooks.c 				 peer_sid, skb->secmark,
skb              5634 security/selinux/hooks.c 		if (selinux_netlbl_skbuff_setsid(skb, family, peer_sid) != 0)
skb              5641 security/selinux/hooks.c 					 struct sk_buff *skb,
skb              5644 security/selinux/hooks.c 	return selinux_ip_forward(skb, state->in, PF_INET);
skb              5649 security/selinux/hooks.c 					 struct sk_buff *skb,
skb              5652 security/selinux/hooks.c 	return selinux_ip_forward(skb, state->in, PF_INET6);
skb              5656 security/selinux/hooks.c static unsigned int selinux_ip_output(struct sk_buff *skb,
skb              5668 security/selinux/hooks.c 	sk = skb->sk;
skb              5692 security/selinux/hooks.c 	if (selinux_netlbl_skbuff_setsid(skb, family, sid) != 0)
skb              5699 security/selinux/hooks.c 					struct sk_buff *skb,
skb              5702 security/selinux/hooks.c 	return selinux_ip_output(skb, PF_INET);
skb              5707 security/selinux/hooks.c 					struct sk_buff *skb,
skb              5710 security/selinux/hooks.c 	return selinux_ip_output(skb, PF_INET6);
skb              5714 security/selinux/hooks.c static unsigned int selinux_ip_postroute_compat(struct sk_buff *skb,
skb              5718 security/selinux/hooks.c 	struct sock *sk = skb_to_full_sk(skb);
skb              5733 security/selinux/hooks.c 	if (selinux_parse_skb(skb, &ad, &addrp, 0, &proto))
skb              5738 security/selinux/hooks.c 				 sksec->sid, skb->secmark,
skb              5742 security/selinux/hooks.c 	if (selinux_xfrm_postroute_last(sksec->sid, skb, &ad, proto))
skb              5748 security/selinux/hooks.c static unsigned int selinux_ip_postroute(struct sk_buff *skb,
skb              5767 security/selinux/hooks.c 		return selinux_ip_postroute_compat(skb, ifindex, family);
skb              5774 security/selinux/hooks.c 	sk = skb_to_full_sk(skb);
skb              5788 security/selinux/hooks.c 	if (skb_dst(skb) != NULL && skb_dst(skb)->xfrm != NULL &&
skb              5798 security/selinux/hooks.c 		if (skb->skb_iif) {
skb              5800 security/selinux/hooks.c 			if (selinux_skb_peerlbl_sid(skb, family, &peer_sid))
skb              5820 security/selinux/hooks.c 		if (selinux_skb_peerlbl_sid(skb, family, &skb_sid))
skb              5831 security/selinux/hooks.c 				if (IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED)
skb              5835 security/selinux/hooks.c 				if (IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED)
skb              5857 security/selinux/hooks.c 	if (selinux_parse_skb(skb, &ad, &addrp, 0, NULL))
skb              5862 security/selinux/hooks.c 				 peer_sid, skb->secmark,
skb              5889 security/selinux/hooks.c 					   struct sk_buff *skb,
skb              5892 security/selinux/hooks.c 	return selinux_ip_postroute(skb, state->out, PF_INET);
skb              5897 security/selinux/hooks.c 					   struct sk_buff *skb,
skb              5900 security/selinux/hooks.c 	return selinux_ip_postroute(skb, state->out, PF_INET6);
skb              5906 security/selinux/hooks.c static int selinux_netlink_send(struct sock *sk, struct sk_buff *skb)
skb              5908 security/selinux/hooks.c 	return selinux_nlmsg_perm(sk, skb);
skb                29 security/selinux/include/netlabel.h void selinux_netlbl_err(struct sk_buff *skb, u16 family, int error,
skb                35 security/selinux/include/netlabel.h int selinux_netlbl_skbuff_getsid(struct sk_buff *skb,
skb                39 security/selinux/include/netlabel.h int selinux_netlbl_skbuff_setsid(struct sk_buff *skb,
skb                43 security/selinux/include/netlabel.h 				     struct sk_buff *skb);
skb                49 security/selinux/include/netlabel.h 				struct sk_buff *skb,
skb                65 security/selinux/include/netlabel.h static inline void selinux_netlbl_err(struct sk_buff *skb,
skb                85 security/selinux/include/netlabel.h static inline int selinux_netlbl_skbuff_getsid(struct sk_buff *skb,
skb                94 security/selinux/include/netlabel.h static inline int selinux_netlbl_skbuff_setsid(struct sk_buff *skb,
skb               108 security/selinux/include/netlabel.h 						    struct sk_buff *skb)
skb               132 security/selinux/include/netlabel.h 					      struct sk_buff *skb,
skb                39 security/selinux/include/xfrm.h int selinux_xfrm_sock_rcv_skb(u32 sk_sid, struct sk_buff *skb,
skb                41 security/selinux/include/xfrm.h int selinux_xfrm_postroute_last(u32 sk_sid, struct sk_buff *skb,
skb                43 security/selinux/include/xfrm.h int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall);
skb                44 security/selinux/include/xfrm.h int selinux_xfrm_skb_sid(struct sk_buff *skb, u32 *sid);
skb                61 security/selinux/include/xfrm.h static inline int selinux_xfrm_sock_rcv_skb(u32 sk_sid, struct sk_buff *skb,
skb                67 security/selinux/include/xfrm.h static inline int selinux_xfrm_postroute_last(u32 sk_sid, struct sk_buff *skb,
skb                74 security/selinux/include/xfrm.h static inline int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid,
skb                85 security/selinux/include/xfrm.h static inline int selinux_xfrm_skb_sid(struct sk_buff *skb, u32 *sid)
skb                41 security/selinux/netlabel.c static int selinux_netlbl_sidlookup_cached(struct sk_buff *skb,
skb                52 security/selinux/netlabel.c 		netlbl_cache_add(skb, family, secattr);
skb               141 security/selinux/netlabel.c void selinux_netlbl_err(struct sk_buff *skb, u16 family, int error, int gateway)
skb               143 security/selinux/netlabel.c 	netlbl_skbuff_err(skb, family, error, gateway);
skb               188 security/selinux/netlabel.c int selinux_netlbl_skbuff_getsid(struct sk_buff *skb,
skb               202 security/selinux/netlabel.c 	rc = netlbl_skbuff_getattr(skb, family, &secattr);
skb               204 security/selinux/netlabel.c 		rc = selinux_netlbl_sidlookup_cached(skb, family,
skb               225 security/selinux/netlabel.c int selinux_netlbl_skbuff_setsid(struct sk_buff *skb,
skb               236 security/selinux/netlabel.c 	sk = skb_to_full_sk(skb);
skb               253 security/selinux/netlabel.c 	rc = netlbl_skbuff_setattr(skb, family, secattr);
skb               272 security/selinux/netlabel.c 				     struct sk_buff *skb)
skb               293 security/selinux/netlabel.c 	if (ip_hdr(skb)->version == 4) {
skb               295 security/selinux/netlabel.c 		addr4.sin_addr.s_addr = ip_hdr(skb)->saddr;
skb               297 security/selinux/netlabel.c 	} else if (IS_ENABLED(CONFIG_IPV6) && ip_hdr(skb)->version == 6) {
skb               299 security/selinux/netlabel.c 		addr6.sin6_addr = ipv6_hdr(skb)->saddr;
skb               429 security/selinux/netlabel.c 				struct sk_buff *skb,
skb               442 security/selinux/netlabel.c 	rc = netlbl_skbuff_getattr(skb, family, &secattr);
skb               444 security/selinux/netlabel.c 		rc = selinux_netlbl_sidlookup_cached(skb, family,
skb               469 security/selinux/netlabel.c 		netlbl_skbuff_err(skb, family, rc, 0);
skb                71 security/selinux/netlink.c 	struct sk_buff *skb;
skb                76 security/selinux/netlink.c 	skb = nlmsg_new(len, GFP_USER);
skb                77 security/selinux/netlink.c 	if (!skb)
skb                80 security/selinux/netlink.c 	tmp = skb->tail;
skb                81 security/selinux/netlink.c 	nlh = nlmsg_put(skb, 0, 0, msgtype, len, 0);
skb                85 security/selinux/netlink.c 	nlh->nlmsg_len = skb->tail - tmp;
skb                86 security/selinux/netlink.c 	NETLINK_CB(skb).dst_group = SELNLGRP_AVC;
skb                87 security/selinux/netlink.c 	netlink_broadcast(selnl, skb, 0, SELNLGRP_AVC, GFP_USER);
skb                92 security/selinux/netlink.c 	kfree_skb(skb);
skb               212 security/selinux/xfrm.c static u32 selinux_xfrm_skb_sid_egress(struct sk_buff *skb)
skb               214 security/selinux/xfrm.c 	struct dst_entry *dst = skb_dst(skb);
skb               226 security/selinux/xfrm.c static int selinux_xfrm_skb_sid_ingress(struct sk_buff *skb,
skb               230 security/selinux/xfrm.c 	struct sec_path *sp = skb_sec_path(skb);
skb               261 security/selinux/xfrm.c int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall)
skb               263 security/selinux/xfrm.c 	if (skb == NULL) {
skb               267 security/selinux/xfrm.c 	return selinux_xfrm_skb_sid_ingress(skb, sid, ckall);
skb               270 security/selinux/xfrm.c int selinux_xfrm_skb_sid(struct sk_buff *skb, u32 *sid)
skb               274 security/selinux/xfrm.c 	rc = selinux_xfrm_skb_sid_ingress(skb, sid, 0);
skb               276 security/selinux/xfrm.c 		*sid = selinux_xfrm_skb_sid_egress(skb);
skb               404 security/selinux/xfrm.c int selinux_xfrm_sock_rcv_skb(u32 sk_sid, struct sk_buff *skb,
skb               408 security/selinux/xfrm.c 	struct sec_path *sp = skb_sec_path(skb);
skb               438 security/selinux/xfrm.c int selinux_xfrm_postroute_last(u32 sk_sid, struct sk_buff *skb,
skb               455 security/selinux/xfrm.c 	dst = skb_dst(skb);
skb              3777 security/smack/smack_lsm.c static int smk_skb_to_addr_ipv6(struct sk_buff *skb, struct sockaddr_in6 *sip)
skb              3791 security/smack/smack_lsm.c 	offset = skb_network_offset(skb);
skb              3792 security/smack/smack_lsm.c 	ip6 = skb_header_pointer(skb, offset, sizeof(_ipv6h), &_ipv6h);
skb              3799 security/smack/smack_lsm.c 	offset = ipv6_skip_exthdr(skb, offset, &nexthdr, &frag_off);
skb              3806 security/smack/smack_lsm.c 		th = skb_header_pointer(skb, offset, sizeof(_tcph), &_tcph);
skb              3812 security/smack/smack_lsm.c 		uh = skb_header_pointer(skb, offset, sizeof(_udph), &_udph);
skb              3817 security/smack/smack_lsm.c 		dh = skb_header_pointer(skb, offset, sizeof(_dccph), &_dccph);
skb              3833 security/smack/smack_lsm.c static int smack_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
skb              3848 security/smack/smack_lsm.c 	if (family == PF_INET6 && skb->protocol == htons(ETH_P_IP))
skb              3860 security/smack/smack_lsm.c 		if (skb && skb->secmark != 0) {
skb              3861 security/smack/smack_lsm.c 			skp = smack_from_secid(skb->secmark);
skb              3870 security/smack/smack_lsm.c 		rc = netlbl_skbuff_getattr(skb, family, &secattr);
skb              3884 security/smack/smack_lsm.c 		ad.a.u.net->netif = skb->skb_iif;
skb              3885 security/smack/smack_lsm.c 		ipv4_skb_to_auditdata(skb, &ad.a, NULL);
skb              3897 security/smack/smack_lsm.c 			netlbl_skbuff_err(skb, family, rc, 0);
skb              3901 security/smack/smack_lsm.c 		proto = smk_skb_to_addr_ipv6(skb, &sadd);
skb              3906 security/smack/smack_lsm.c 		if (skb && skb->secmark != 0)
skb              3907 security/smack/smack_lsm.c 			skp = smack_from_secid(skb->secmark);
skb              3914 security/smack/smack_lsm.c 		if (skb == NULL)
skb              3919 security/smack/smack_lsm.c 		ad.a.u.net->netif = skb->skb_iif;
skb              3920 security/smack/smack_lsm.c 		ipv6_skb_to_auditdata(skb, &ad.a, NULL);
skb              3930 security/smack/smack_lsm.c 			icmpv6_send(skb, ICMPV6_DEST_UNREACH,
skb              3984 security/smack/smack_lsm.c 					 struct sk_buff *skb, u32 *secid)
skb              3994 security/smack/smack_lsm.c 	if (skb != NULL) {
skb              3995 security/smack/smack_lsm.c 		if (skb->protocol == htons(ETH_P_IP))
skb              3998 security/smack/smack_lsm.c 		else if (skb->protocol == htons(ETH_P_IPV6))
skb              4012 security/smack/smack_lsm.c 		s = skb->secmark;
skb              4022 security/smack/smack_lsm.c 		rc = netlbl_skbuff_getattr(skb, family, &secattr);
skb              4031 security/smack/smack_lsm.c 		s = skb->secmark;
skb              4073 security/smack/smack_lsm.c static int smack_inet_conn_request(struct sock *sk, struct sk_buff *skb,
skb              4096 security/smack/smack_lsm.c 		if (skb->protocol == htons(ETH_P_IP))
skb              4109 security/smack/smack_lsm.c 	if (skb && skb->secmark != 0) {
skb              4110 security/smack/smack_lsm.c 		skp = smack_from_secid(skb->secmark);
skb              4116 security/smack/smack_lsm.c 	rc = netlbl_skbuff_getattr(skb, family, &secattr);
skb              4130 security/smack/smack_lsm.c 	ad.a.u.net->netif = skb->skb_iif;
skb              4131 security/smack/smack_lsm.c 	ipv4_skb_to_auditdata(skb, &ad.a, NULL);
skb              4153 security/smack/smack_lsm.c 	hdr = ip_hdr(skb);
skb                24 security/smack/smack_netfilter.c 					struct sk_buff *skb,
skb                27 security/smack/smack_netfilter.c 	struct sock *sk = skb_to_full_sk(skb);
skb                34 security/smack/smack_netfilter.c 		skb->secmark = skp->smk_secid;
skb                42 security/smack/smack_netfilter.c 					struct sk_buff *skb,
skb                45 security/smack/smack_netfilter.c 	struct sock *sk = skb_to_full_sk(skb);
skb                52 security/smack/smack_netfilter.c 		skb->secmark = skp->smk_secid;
skb               241 tools/testing/selftests/bpf/bpf_helpers.h unsigned long long load_byte(void *skb,
skb               243 tools/testing/selftests/bpf/bpf_helpers.h unsigned long long load_half(void *skb,
skb               245 tools/testing/selftests/bpf/bpf_helpers.h unsigned long long load_word(void *skb,
skb                 6 tools/testing/selftests/bpf/prog_tests/skb_ctx.c 	struct __sk_buff skb = {
skb                17 tools/testing/selftests/bpf/prog_tests/skb_ctx.c 		.ctx_in = &skb,
skb                18 tools/testing/selftests/bpf/prog_tests/skb_ctx.c 		.ctx_size_in = sizeof(skb),
skb                19 tools/testing/selftests/bpf/prog_tests/skb_ctx.c 		.ctx_out = &skb,
skb                20 tools/testing/selftests/bpf/prog_tests/skb_ctx.c 		.ctx_size_out = sizeof(skb),
skb                36 tools/testing/selftests/bpf/prog_tests/skb_ctx.c 	tattr.ctx_size_in = sizeof(skb);
skb                43 tools/testing/selftests/bpf/prog_tests/skb_ctx.c 	tattr.ctx_size_out = sizeof(skb);
skb                47 tools/testing/selftests/bpf/prog_tests/skb_ctx.c 	skb.len = 1;
skb                50 tools/testing/selftests/bpf/prog_tests/skb_ctx.c 	skb.len = 0;
skb                52 tools/testing/selftests/bpf/prog_tests/skb_ctx.c 	skb.tc_index = 1;
skb                55 tools/testing/selftests/bpf/prog_tests/skb_ctx.c 	skb.tc_index = 0;
skb                59 tools/testing/selftests/bpf/prog_tests/skb_ctx.c 	skb.hash = 1;
skb                62 tools/testing/selftests/bpf/prog_tests/skb_ctx.c 	skb.hash = 0;
skb                64 tools/testing/selftests/bpf/prog_tests/skb_ctx.c 	skb.sk = (struct bpf_sock *)1;
skb                67 tools/testing/selftests/bpf/prog_tests/skb_ctx.c 	skb.sk = 0;
skb                75 tools/testing/selftests/bpf/prog_tests/skb_ctx.c 	CHECK_ATTR(tattr.ctx_size_out != sizeof(skb),
skb                78 tools/testing/selftests/bpf/prog_tests/skb_ctx.c 		   sizeof(skb), tattr.ctx_size_out);
skb                81 tools/testing/selftests/bpf/prog_tests/skb_ctx.c 		CHECK_ATTR(skb.cb[i] != i + 2,
skb                84 tools/testing/selftests/bpf/prog_tests/skb_ctx.c 			   skb.cb[i], i + 2);
skb                85 tools/testing/selftests/bpf/prog_tests/skb_ctx.c 	CHECK_ATTR(skb.priority != 7,
skb                88 tools/testing/selftests/bpf/prog_tests/skb_ctx.c 		   skb.priority, 7);
skb                91 tools/testing/selftests/bpf/progs/bpf_flow.c static __always_inline void *bpf_flow_dissect_get_header(struct __sk_buff *skb,
skb                95 tools/testing/selftests/bpf/progs/bpf_flow.c 	void *data_end = (void *)(long)skb->data_end;
skb                96 tools/testing/selftests/bpf/progs/bpf_flow.c 	void *data = (void *)(long)skb->data;
skb                97 tools/testing/selftests/bpf/progs/bpf_flow.c 	__u16 thoff = skb->flow_keys->thoff;
skb               108 tools/testing/selftests/bpf/progs/bpf_flow.c 	if (bpf_skb_load_bytes(skb, thoff, buffer, hdr_size))
skb               115 tools/testing/selftests/bpf/progs/bpf_flow.c static __always_inline int parse_eth_proto(struct __sk_buff *skb, __be16 proto)
skb               117 tools/testing/selftests/bpf/progs/bpf_flow.c 	struct bpf_flow_keys *keys = skb->flow_keys;
skb               121 tools/testing/selftests/bpf/progs/bpf_flow.c 		bpf_tail_call(skb, &jmp_table, IP);
skb               124 tools/testing/selftests/bpf/progs/bpf_flow.c 		bpf_tail_call(skb, &jmp_table, IPV6);
skb               128 tools/testing/selftests/bpf/progs/bpf_flow.c 		bpf_tail_call(skb, &jmp_table, MPLS);
skb               132 tools/testing/selftests/bpf/progs/bpf_flow.c 		bpf_tail_call(skb, &jmp_table, VLAN);
skb               143 tools/testing/selftests/bpf/progs/bpf_flow.c int _dissect(struct __sk_buff *skb)
skb               145 tools/testing/selftests/bpf/progs/bpf_flow.c 	struct bpf_flow_keys *keys = skb->flow_keys;
skb               147 tools/testing/selftests/bpf/progs/bpf_flow.c 	return parse_eth_proto(skb, keys->n_proto);
skb               151 tools/testing/selftests/bpf/progs/bpf_flow.c static __always_inline int parse_ip_proto(struct __sk_buff *skb, __u8 proto)
skb               153 tools/testing/selftests/bpf/progs/bpf_flow.c 	struct bpf_flow_keys *keys = skb->flow_keys;
skb               154 tools/testing/selftests/bpf/progs/bpf_flow.c 	void *data_end = (void *)(long)skb->data_end;
skb               163 tools/testing/selftests/bpf/progs/bpf_flow.c 		icmp = bpf_flow_dissect_get_header(skb, sizeof(*icmp), &_icmp);
skb               172 tools/testing/selftests/bpf/progs/bpf_flow.c 		return parse_eth_proto(skb, bpf_htons(ETH_P_IP));
skb               178 tools/testing/selftests/bpf/progs/bpf_flow.c 		return parse_eth_proto(skb, bpf_htons(ETH_P_IPV6));
skb               180 tools/testing/selftests/bpf/progs/bpf_flow.c 		gre = bpf_flow_dissect_get_header(skb, sizeof(*gre), &_gre);
skb               201 tools/testing/selftests/bpf/progs/bpf_flow.c 			eth = bpf_flow_dissect_get_header(skb, sizeof(*eth),
skb               208 tools/testing/selftests/bpf/progs/bpf_flow.c 			return parse_eth_proto(skb, eth->h_proto);
skb               210 tools/testing/selftests/bpf/progs/bpf_flow.c 			return parse_eth_proto(skb, gre->proto);
skb               213 tools/testing/selftests/bpf/progs/bpf_flow.c 		tcp = bpf_flow_dissect_get_header(skb, sizeof(*tcp), &_tcp);
skb               228 tools/testing/selftests/bpf/progs/bpf_flow.c 		udp = bpf_flow_dissect_get_header(skb, sizeof(*udp), &_udp);
skb               242 tools/testing/selftests/bpf/progs/bpf_flow.c static __always_inline int parse_ipv6_proto(struct __sk_buff *skb, __u8 nexthdr)
skb               244 tools/testing/selftests/bpf/progs/bpf_flow.c 	struct bpf_flow_keys *keys = skb->flow_keys;
skb               249 tools/testing/selftests/bpf/progs/bpf_flow.c 		bpf_tail_call(skb, &jmp_table, IPV6OP);
skb               252 tools/testing/selftests/bpf/progs/bpf_flow.c 		bpf_tail_call(skb, &jmp_table, IPV6FR);
skb               255 tools/testing/selftests/bpf/progs/bpf_flow.c 		return parse_ip_proto(skb, nexthdr);
skb               261 tools/testing/selftests/bpf/progs/bpf_flow.c PROG(IP)(struct __sk_buff *skb)
skb               263 tools/testing/selftests/bpf/progs/bpf_flow.c 	void *data_end = (void *)(long)skb->data_end;
skb               264 tools/testing/selftests/bpf/progs/bpf_flow.c 	struct bpf_flow_keys *keys = skb->flow_keys;
skb               265 tools/testing/selftests/bpf/progs/bpf_flow.c 	void *data = (void *)(long)skb->data;
skb               269 tools/testing/selftests/bpf/progs/bpf_flow.c 	iph = bpf_flow_dissect_get_header(skb, sizeof(*iph), &_iph);
skb               307 tools/testing/selftests/bpf/progs/bpf_flow.c 	return parse_ip_proto(skb, iph->protocol);
skb               310 tools/testing/selftests/bpf/progs/bpf_flow.c PROG(IPV6)(struct __sk_buff *skb)
skb               312 tools/testing/selftests/bpf/progs/bpf_flow.c 	struct bpf_flow_keys *keys = skb->flow_keys;
skb               315 tools/testing/selftests/bpf/progs/bpf_flow.c 	ip6h = bpf_flow_dissect_get_header(skb, sizeof(*ip6h), &_ip6h);
skb               329 tools/testing/selftests/bpf/progs/bpf_flow.c 	return parse_ipv6_proto(skb, ip6h->nexthdr);
skb               332 tools/testing/selftests/bpf/progs/bpf_flow.c PROG(IPV6OP)(struct __sk_buff *skb)
skb               334 tools/testing/selftests/bpf/progs/bpf_flow.c 	struct bpf_flow_keys *keys = skb->flow_keys;
skb               337 tools/testing/selftests/bpf/progs/bpf_flow.c 	ip6h = bpf_flow_dissect_get_header(skb, sizeof(*ip6h), &_ip6h);
skb               347 tools/testing/selftests/bpf/progs/bpf_flow.c 	return parse_ipv6_proto(skb, ip6h->nexthdr);
skb               350 tools/testing/selftests/bpf/progs/bpf_flow.c PROG(IPV6FR)(struct __sk_buff *skb)
skb               352 tools/testing/selftests/bpf/progs/bpf_flow.c 	struct bpf_flow_keys *keys = skb->flow_keys;
skb               355 tools/testing/selftests/bpf/progs/bpf_flow.c 	fragh = bpf_flow_dissect_get_header(skb, sizeof(*fragh), &_fragh);
skb               373 tools/testing/selftests/bpf/progs/bpf_flow.c 	return parse_ipv6_proto(skb, fragh->nexthdr);
skb               376 tools/testing/selftests/bpf/progs/bpf_flow.c PROG(MPLS)(struct __sk_buff *skb)
skb               378 tools/testing/selftests/bpf/progs/bpf_flow.c 	struct bpf_flow_keys *keys = skb->flow_keys;
skb               381 tools/testing/selftests/bpf/progs/bpf_flow.c 	mpls = bpf_flow_dissect_get_header(skb, sizeof(*mpls), &_mpls);
skb               388 tools/testing/selftests/bpf/progs/bpf_flow.c PROG(VLAN)(struct __sk_buff *skb)
skb               390 tools/testing/selftests/bpf/progs/bpf_flow.c 	struct bpf_flow_keys *keys = skb->flow_keys;
skb               395 tools/testing/selftests/bpf/progs/bpf_flow.c 		vlan = bpf_flow_dissect_get_header(skb, sizeof(*vlan), &_vlan);
skb               406 tools/testing/selftests/bpf/progs/bpf_flow.c 	vlan = bpf_flow_dissect_get_header(skb, sizeof(*vlan), &_vlan);
skb               418 tools/testing/selftests/bpf/progs/bpf_flow.c 	return parse_eth_proto(skb, vlan->h_vlan_encapsulated_proto);
skb                 9 tools/testing/selftests/bpf/progs/loop4.c int combinations(volatile struct __sk_buff* skb)
skb                15 tools/testing/selftests/bpf/progs/loop4.c 		if (skb->len)
skb                10 tools/testing/selftests/bpf/progs/loop5.c int while_true(volatile struct __sk_buff* skb)
skb                15 tools/testing/selftests/bpf/progs/loop5.c 		if (skb->len)
skb                26 tools/testing/selftests/bpf/progs/netcnt_prog.c int bpf_nextcnt(struct __sk_buff *skb)
skb                38 tools/testing/selftests/bpf/progs/netcnt_prog.c 	percpu_cnt->bytes += skb->len;
skb                 8 tools/testing/selftests/bpf/progs/sockmap_parse_prog.c int bpf_prog1(struct __sk_buff *skb)
skb                10 tools/testing/selftests/bpf/progs/sockmap_parse_prog.c 	void *data_end = (void *)(long) skb->data_end;
skb                11 tools/testing/selftests/bpf/progs/sockmap_parse_prog.c 	void *data = (void *)(long) skb->data;
skb                12 tools/testing/selftests/bpf/progs/sockmap_parse_prog.c 	__u32 lport = skb->local_port;
skb                13 tools/testing/selftests/bpf/progs/sockmap_parse_prog.c 	__u32 rport = skb->remote_port;
skb                19 tools/testing/selftests/bpf/progs/sockmap_parse_prog.c 		err = bpf_skb_pull_data(skb, 10);
skb                23 tools/testing/selftests/bpf/progs/sockmap_parse_prog.c 		data_end = (void *)(long)skb->data_end;
skb                24 tools/testing/selftests/bpf/progs/sockmap_parse_prog.c 		data = (void *)(long)skb->data;
skb                35 tools/testing/selftests/bpf/progs/sockmap_parse_prog.c 	return skb->len;
skb                36 tools/testing/selftests/bpf/progs/sockmap_verdict_prog.c int bpf_prog2(struct __sk_buff *skb)
skb                38 tools/testing/selftests/bpf/progs/sockmap_verdict_prog.c 	void *data_end = (void *)(long) skb->data_end;
skb                39 tools/testing/selftests/bpf/progs/sockmap_verdict_prog.c 	void *data = (void *)(long) skb->data;
skb                40 tools/testing/selftests/bpf/progs/sockmap_verdict_prog.c 	__u32 lport = skb->local_port;
skb                41 tools/testing/selftests/bpf/progs/sockmap_verdict_prog.c 	__u32 rport = skb->remote_port;
skb                61 tools/testing/selftests/bpf/progs/sockmap_verdict_prog.c 		return bpf_sk_redirect_map(skb, &sock_map_rx, sk, 0);
skb                62 tools/testing/selftests/bpf/progs/sockmap_verdict_prog.c 	return bpf_sk_redirect_map(skb, &sock_map_tx, sk, 0);
skb                72 tools/testing/selftests/bpf/progs/test_global_data.c int load_static_data(struct __sk_buff *skb)
skb               325 tools/testing/selftests/bpf/progs/test_l4lb.c 					  bool is_ipv6, struct __sk_buff *skb)
skb               327 tools/testing/selftests/bpf/progs/test_l4lb.c 	void *pkt_start = (void *)(long)skb->data;
skb               448 tools/testing/selftests/bpf/progs/test_l4lb.c 	bpf_skb_set_tunnel_key(skb, &tkey, sizeof(tkey), tun_flag);
skb               325 tools/testing/selftests/bpf/progs/test_l4lb_noinline.c 			  bool is_ipv6, struct __sk_buff *skb)
skb               327 tools/testing/selftests/bpf/progs/test_l4lb_noinline.c 	void *pkt_start = (void *)(long)skb->data;
skb               448 tools/testing/selftests/bpf/progs/test_l4lb_noinline.c 	bpf_skb_set_tunnel_key(skb, &tkey, sizeof(tkey), tun_flag);
skb                16 tools/testing/selftests/bpf/progs/test_lwt_ip_encap.c int bpf_lwt_encap_gre(struct __sk_buff *skb)
skb                39 tools/testing/selftests/bpf/progs/test_lwt_ip_encap.c 	hdr.iph.tot_len = bpf_htons(skb->len + sizeof(struct encap_hdr));
skb                41 tools/testing/selftests/bpf/progs/test_lwt_ip_encap.c 	hdr.greh.protocol = skb->protocol;
skb                43 tools/testing/selftests/bpf/progs/test_lwt_ip_encap.c 	err = bpf_lwt_push_encap(skb, BPF_LWT_ENCAP_IP, &hdr,
skb                52 tools/testing/selftests/bpf/progs/test_lwt_ip_encap.c int bpf_lwt_encap_gre6(struct __sk_buff *skb)
skb                63 tools/testing/selftests/bpf/progs/test_lwt_ip_encap.c 	hdr.ip6hdr.payload_len = bpf_htons(skb->len + sizeof(struct grehdr));
skb                75 tools/testing/selftests/bpf/progs/test_lwt_ip_encap.c 	hdr.greh.protocol = skb->protocol;
skb                77 tools/testing/selftests/bpf/progs/test_lwt_ip_encap.c 	err = bpf_lwt_push_encap(skb, BPF_LWT_ENCAP_IP, &hdr,
skb                53 tools/testing/selftests/bpf/progs/test_lwt_seg6local.c static __always_inline struct ip6_srh_t *get_srh(struct __sk_buff *skb)
skb                60 tools/testing/selftests/bpf/progs/test_lwt_seg6local.c 	data_end = (void *)(long)skb->data_end;
skb                61 tools/testing/selftests/bpf/progs/test_lwt_seg6local.c 	cursor = (void *)(long)skb->data;
skb                88 tools/testing/selftests/bpf/progs/test_lwt_seg6local.c int update_tlv_pad(struct __sk_buff *skb, uint32_t new_pad,
skb                94 tools/testing/selftests/bpf/progs/test_lwt_seg6local.c 		err = bpf_lwt_seg6_adjust_srh(skb, pad_off,
skb               108 tools/testing/selftests/bpf/progs/test_lwt_seg6local.c 		err = bpf_lwt_seg6_store_bytes(skb, pad_off,
skb               118 tools/testing/selftests/bpf/progs/test_lwt_seg6local.c int is_valid_tlv_boundary(struct __sk_buff *skb, struct ip6_srh_t *srh,
skb               126 tools/testing/selftests/bpf/progs/test_lwt_seg6local.c 	srh_off = (char *)srh - (char *)(long)skb->data;
skb               144 tools/testing/selftests/bpf/progs/test_lwt_seg6local.c 		err = bpf_skb_load_bytes(skb, cur_off, &tlv, sizeof(tlv));
skb               177 tools/testing/selftests/bpf/progs/test_lwt_seg6local.c int add_tlv(struct __sk_buff *skb, struct ip6_srh_t *srh, uint32_t tlv_off,
skb               180 tools/testing/selftests/bpf/progs/test_lwt_seg6local.c 	uint32_t srh_off = (char *)srh - (char *)(long)skb->data;
skb               193 tools/testing/selftests/bpf/progs/test_lwt_seg6local.c 	err = is_valid_tlv_boundary(skb, srh, &tlv_off, &pad_size, &pad_off);
skb               197 tools/testing/selftests/bpf/progs/test_lwt_seg6local.c 	err = bpf_lwt_seg6_adjust_srh(skb, tlv_off, sizeof(*itlv) + itlv->len);
skb               201 tools/testing/selftests/bpf/progs/test_lwt_seg6local.c 	err = bpf_lwt_seg6_store_bytes(skb, tlv_off, (void *)itlv, tlv_size);
skb               217 tools/testing/selftests/bpf/progs/test_lwt_seg6local.c 	return update_tlv_pad(skb, new_pad, pad_size, pad_off);
skb               221 tools/testing/selftests/bpf/progs/test_lwt_seg6local.c int delete_tlv(struct __sk_buff *skb, struct ip6_srh_t *srh,
skb               224 tools/testing/selftests/bpf/progs/test_lwt_seg6local.c 	uint32_t srh_off = (char *)srh - (char *)(long)skb->data;
skb               234 tools/testing/selftests/bpf/progs/test_lwt_seg6local.c 	err = is_valid_tlv_boundary(skb, srh, &tlv_off, &pad_size, &pad_off);
skb               238 tools/testing/selftests/bpf/progs/test_lwt_seg6local.c 	err = bpf_skb_load_bytes(skb, tlv_off, &tlv, sizeof(tlv));
skb               242 tools/testing/selftests/bpf/progs/test_lwt_seg6local.c 	err = bpf_lwt_seg6_adjust_srh(skb, tlv_off, -(sizeof(tlv) + tlv.len));
skb               255 tools/testing/selftests/bpf/progs/test_lwt_seg6local.c 	return update_tlv_pad(skb, new_pad, pad_size, pad_off);
skb               259 tools/testing/selftests/bpf/progs/test_lwt_seg6local.c int has_egr_tlv(struct __sk_buff *skb, struct ip6_srh_t *srh)
skb               265 tools/testing/selftests/bpf/progs/test_lwt_seg6local.c 	if (bpf_skb_load_bytes(skb, tlv_offset, &tlv, sizeof(struct sr6_tlv_t)))
skb               271 tools/testing/selftests/bpf/progs/test_lwt_seg6local.c 		if (bpf_skb_load_bytes(skb, tlv_offset + 4, &egr_addr, 16))
skb               286 tools/testing/selftests/bpf/progs/test_lwt_seg6local.c int __encap_srh(struct __sk_buff *skb)
skb               312 tools/testing/selftests/bpf/progs/test_lwt_seg6local.c 	err = bpf_lwt_push_encap(skb, 0, (void *)srh, sizeof(srh_buf));
skb               322 tools/testing/selftests/bpf/progs/test_lwt_seg6local.c int __add_egr_x(struct __sk_buff *skb)
skb               326 tools/testing/selftests/bpf/progs/test_lwt_seg6local.c 	struct ip6_srh_t *srh = get_srh(skb);
skb               337 tools/testing/selftests/bpf/progs/test_lwt_seg6local.c 	err = add_tlv(skb, srh, (srh->hdrlen+1) << 3,
skb               343 tools/testing/selftests/bpf/progs/test_lwt_seg6local.c 	err = bpf_lwt_seg6_store_bytes(skb, offset,
skb               350 tools/testing/selftests/bpf/progs/test_lwt_seg6local.c 	err = bpf_lwt_seg6_action(skb, SEG6_LOCAL_ACTION_END_X,
skb               360 tools/testing/selftests/bpf/progs/test_lwt_seg6local.c int __pop_egr(struct __sk_buff *skb)
skb               362 tools/testing/selftests/bpf/progs/test_lwt_seg6local.c 	struct ip6_srh_t *srh = get_srh(skb);
skb               376 tools/testing/selftests/bpf/progs/test_lwt_seg6local.c 	if (!has_egr_tlv(skb, srh))
skb               379 tools/testing/selftests/bpf/progs/test_lwt_seg6local.c 	err = delete_tlv(skb, srh, 8 + (srh->first_segment + 1) * 16);
skb               384 tools/testing/selftests/bpf/progs/test_lwt_seg6local.c 	if (bpf_lwt_seg6_store_bytes(skb, offset, (void *)&new_flags,
skb               389 tools/testing/selftests/bpf/progs/test_lwt_seg6local.c 	if (bpf_lwt_seg6_store_bytes(skb, offset, (void *)&new_tag,
skb               399 tools/testing/selftests/bpf/progs/test_lwt_seg6local.c int __inspect_t(struct __sk_buff *skb)
skb               401 tools/testing/selftests/bpf/progs/test_lwt_seg6local.c 	struct ip6_srh_t *srh = get_srh(skb);
skb               417 tools/testing/selftests/bpf/progs/test_lwt_seg6local.c 	err = bpf_lwt_seg6_action(skb, SEG6_LOCAL_ACTION_END_T,
skb                34 tools/testing/selftests/bpf/progs/test_map_lock.c int bpf_map_lock_test(struct __sk_buff *skb)
skb                24 tools/testing/selftests/bpf/progs/test_obj_id.c int test_obj_id(struct __sk_buff *skb)
skb                21 tools/testing/selftests/bpf/progs/test_pkt_access.c int process(struct __sk_buff *skb)
skb                23 tools/testing/selftests/bpf/progs/test_pkt_access.c 	void *data_end = (void *)(long)skb->data_end;
skb                24 tools/testing/selftests/bpf/progs/test_pkt_access.c 	void *data = (void *)(long)skb->data;
skb                15 tools/testing/selftests/bpf/progs/test_pkt_md_access.c 		TYPE tmp = *(volatile TYPE *)&skb->FIELD;		\
skb                16 tools/testing/selftests/bpf/progs/test_pkt_md_access.c 		if (tmp != ((*(volatile __u32 *)&skb->FIELD) & MASK))	\
skb                23 tools/testing/selftests/bpf/progs/test_pkt_md_access.c 		TYPE tmp = *((volatile TYPE *)&skb->FIELD +		\
skb                24 tools/testing/selftests/bpf/progs/test_pkt_md_access.c 			      TEST_FIELD_OFFSET(skb->FIELD, TYPE));	\
skb                25 tools/testing/selftests/bpf/progs/test_pkt_md_access.c 		if (tmp != ((*(volatile __u32 *)&skb->FIELD) & MASK))	\
skb                31 tools/testing/selftests/bpf/progs/test_pkt_md_access.c int process(struct __sk_buff *skb)
skb                53 tools/testing/selftests/bpf/progs/test_seg6_loop.c static __always_inline struct ip6_srh_t *get_srh(struct __sk_buff *skb)
skb                60 tools/testing/selftests/bpf/progs/test_seg6_loop.c 	data_end = (void *)(long)skb->data_end;
skb                61 tools/testing/selftests/bpf/progs/test_seg6_loop.c 	cursor = (void *)(long)skb->data;
skb                87 tools/testing/selftests/bpf/progs/test_seg6_loop.c static __always_inline int update_tlv_pad(struct __sk_buff *skb,
skb                94 tools/testing/selftests/bpf/progs/test_seg6_loop.c 		err = bpf_lwt_seg6_adjust_srh(skb, pad_off,
skb               108 tools/testing/selftests/bpf/progs/test_seg6_loop.c 		err = bpf_lwt_seg6_store_bytes(skb, pad_off,
skb               117 tools/testing/selftests/bpf/progs/test_seg6_loop.c static __always_inline int is_valid_tlv_boundary(struct __sk_buff *skb,
skb               127 tools/testing/selftests/bpf/progs/test_seg6_loop.c 	srh_off = (char *)srh - (char *)(long)skb->data;
skb               147 tools/testing/selftests/bpf/progs/test_seg6_loop.c 		err = bpf_skb_load_bytes(skb, cur_off, &tlv, sizeof(tlv));
skb               179 tools/testing/selftests/bpf/progs/test_seg6_loop.c static __always_inline int add_tlv(struct __sk_buff *skb,
skb               183 tools/testing/selftests/bpf/progs/test_seg6_loop.c 	uint32_t srh_off = (char *)srh - (char *)(long)skb->data;
skb               196 tools/testing/selftests/bpf/progs/test_seg6_loop.c 	err = is_valid_tlv_boundary(skb, srh, &tlv_off, &pad_size, &pad_off);
skb               200 tools/testing/selftests/bpf/progs/test_seg6_loop.c 	err = bpf_lwt_seg6_adjust_srh(skb, tlv_off, sizeof(*itlv) + itlv->len);
skb               204 tools/testing/selftests/bpf/progs/test_seg6_loop.c 	err = bpf_lwt_seg6_store_bytes(skb, tlv_off, (void *)itlv, tlv_size);
skb               220 tools/testing/selftests/bpf/progs/test_seg6_loop.c 	return update_tlv_pad(skb, new_pad, pad_size, pad_off);
skb               226 tools/testing/selftests/bpf/progs/test_seg6_loop.c int __add_egr_x(struct __sk_buff *skb)
skb               230 tools/testing/selftests/bpf/progs/test_seg6_loop.c 	struct ip6_srh_t *srh = get_srh(skb);
skb               241 tools/testing/selftests/bpf/progs/test_seg6_loop.c 	err = add_tlv(skb, srh, (srh->hdrlen+1) << 3,
skb               247 tools/testing/selftests/bpf/progs/test_seg6_loop.c 	err = bpf_lwt_seg6_store_bytes(skb, offset,
skb               254 tools/testing/selftests/bpf/progs/test_seg6_loop.c 	err = bpf_lwt_seg6_action(skb, SEG6_LOCAL_ACTION_END_X,
skb                57 tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c int bpf_sk_lookup_test0(struct __sk_buff *skb)
skb                59 tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c 	void *data_end = (void *)(long)skb->data_end;
skb                60 tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c 	void *data = (void *)(long)skb->data;
skb                75 tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c 	sk = bpf_sk_lookup_tcp(skb, tuple, tuple_len, BPF_F_CURRENT_NETNS, 0);
skb                82 tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c int bpf_sk_lookup_test1(struct __sk_buff *skb)
skb                87 tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c 	sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0);
skb                94 tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c int bpf_sk_lookup_uaf(struct __sk_buff *skb)
skb               100 tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c 	sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0);
skb               109 tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c int bpf_sk_lookup_modptr(struct __sk_buff *skb)
skb               115 tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c 	sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0);
skb               124 tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c int bpf_sk_lookup_modptr_or_null(struct __sk_buff *skb)
skb               130 tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c 	sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0);
skb               138 tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c int bpf_sk_lookup_test2(struct __sk_buff *skb)
skb               142 tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c 	bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0);
skb               147 tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c int bpf_sk_lookup_test3(struct __sk_buff *skb)
skb               152 tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c 	sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0);
skb               159 tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c int bpf_sk_lookup_test4(struct __sk_buff *skb)
skb               164 tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c 	sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0);
skb               169 tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c void lookup_no_release(struct __sk_buff *skb)
skb               172 tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c 	bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0);
skb               176 tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c int bpf_sk_lookup_test5(struct __sk_buff *skb)
skb               178 tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c 	lookup_no_release(skb);
skb                20 tools/testing/selftests/bpf/progs/test_skb_cgroup_id_kern.c static __always_inline void log_nth_level(struct __sk_buff *skb, __u32 level)
skb                27 tools/testing/selftests/bpf/progs/test_skb_cgroup_id_kern.c 	id = bpf_skb_ancestor_cgroup_id(skb, level);
skb                32 tools/testing/selftests/bpf/progs/test_skb_cgroup_id_kern.c int log_cgroup_id(struct __sk_buff *skb)
skb                37 tools/testing/selftests/bpf/progs/test_skb_cgroup_id_kern.c 	log_nth_level(skb, 0);
skb                38 tools/testing/selftests/bpf/progs/test_skb_cgroup_id_kern.c 	log_nth_level(skb, 1);
skb                39 tools/testing/selftests/bpf/progs/test_skb_cgroup_id_kern.c 	log_nth_level(skb, 2);
skb                40 tools/testing/selftests/bpf/progs/test_skb_cgroup_id_kern.c 	log_nth_level(skb, 3);
skb                10 tools/testing/selftests/bpf/progs/test_skb_ctx.c int process(struct __sk_buff *skb)
skb                14 tools/testing/selftests/bpf/progs/test_skb_ctx.c 		if (skb->cb[i] != i + 1)
skb                16 tools/testing/selftests/bpf/progs/test_skb_ctx.c 		skb->cb[i]++;
skb                18 tools/testing/selftests/bpf/progs/test_skb_ctx.c 	skb->priority++;
skb               140 tools/testing/selftests/bpf/progs/test_sock_fields_kern.c int egress_read_sock_fields(struct __sk_buff *skb)
skb               152 tools/testing/selftests/bpf/progs/test_sock_fields_kern.c 	sk = skb->sk;
skb               215 tools/testing/selftests/bpf/progs/test_sock_fields_kern.c int ingress_read_sock_fields(struct __sk_buff *skb)
skb               225 tools/testing/selftests/bpf/progs/test_sock_fields_kern.c 	sk = skb->sk;
skb                49 tools/testing/selftests/bpf/progs/test_spin_lock.c int bpf_sping_lock_test(struct __sk_buff *skb)
skb                27 tools/testing/selftests/bpf/progs/test_tc_edt.c static inline int throttle_flow(struct __sk_buff *skb)
skb                31 tools/testing/selftests/bpf/progs/test_tc_edt.c 	uint64_t delay_ns = ((uint64_t)skb->len) * NS_PER_SEC /
skb                39 tools/testing/selftests/bpf/progs/test_tc_edt.c 	tstamp = skb->tstamp;
skb                56 tools/testing/selftests/bpf/progs/test_tc_edt.c 		bpf_skb_ecn_set_ce(skb);
skb                60 tools/testing/selftests/bpf/progs/test_tc_edt.c 	skb->tstamp = next_tstamp;
skb                65 tools/testing/selftests/bpf/progs/test_tc_edt.c static inline int handle_tcp(struct __sk_buff *skb, struct tcphdr *tcp)
skb                67 tools/testing/selftests/bpf/progs/test_tc_edt.c 	void *data_end = (void *)(long)skb->data_end;
skb                74 tools/testing/selftests/bpf/progs/test_tc_edt.c 		return throttle_flow(skb);
skb                79 tools/testing/selftests/bpf/progs/test_tc_edt.c static inline int handle_ipv4(struct __sk_buff *skb)
skb                81 tools/testing/selftests/bpf/progs/test_tc_edt.c 	void *data_end = (void *)(long)skb->data_end;
skb                82 tools/testing/selftests/bpf/progs/test_tc_edt.c 	void *data = (void *)(long)skb->data;
skb                97 tools/testing/selftests/bpf/progs/test_tc_edt.c 		return handle_tcp(skb, (struct tcphdr *)(((void *)iph) + ihl));
skb               102 tools/testing/selftests/bpf/progs/test_tc_edt.c SEC("cls_test") int tc_prog(struct __sk_buff *skb)
skb               104 tools/testing/selftests/bpf/progs/test_tc_edt.c 	if (skb->protocol == bpf_htons(ETH_P_IP))
skb               105 tools/testing/selftests/bpf/progs/test_tc_edt.c 		return handle_ipv4(skb);
skb                72 tools/testing/selftests/bpf/progs/test_tc_tunnel.c static __always_inline int encap_ipv4(struct __sk_buff *skb, __u8 encap_proto,
skb                97 tools/testing/selftests/bpf/progs/test_tc_tunnel.c 		if (bpf_skb_load_bytes(skb, ETH_HLEN, &iph6_inner,
skb               114 tools/testing/selftests/bpf/progs/test_tc_tunnel.c 		if (bpf_skb_load_bytes(skb, ETH_HLEN, &iph_inner,
skb               125 tools/testing/selftests/bpf/progs/test_tc_tunnel.c 	if (bpf_skb_load_bytes(skb, ETH_HLEN + tcp_off,
skb               179 tools/testing/selftests/bpf/progs/test_tc_tunnel.c 		if (bpf_skb_load_bytes(skb, 0, (__u8 *)&h_outer + olen,
skb               187 tools/testing/selftests/bpf/progs/test_tc_tunnel.c 	if (bpf_skb_adjust_room(skb, olen, BPF_ADJ_ROOM_MAC, flags))
skb               199 tools/testing/selftests/bpf/progs/test_tc_tunnel.c 	if (bpf_skb_store_bytes(skb, ETH_HLEN, &h_outer, olen,
skb               207 tools/testing/selftests/bpf/progs/test_tc_tunnel.c 		if (bpf_skb_load_bytes(skb, 0, &eth, sizeof(eth)) < 0)
skb               210 tools/testing/selftests/bpf/progs/test_tc_tunnel.c 		if (bpf_skb_store_bytes(skb, 0, &eth, sizeof(eth), 0) < 0)
skb               217 tools/testing/selftests/bpf/progs/test_tc_tunnel.c static __always_inline int encap_ipv6(struct __sk_buff *skb, __u8 encap_proto,
skb               228 tools/testing/selftests/bpf/progs/test_tc_tunnel.c 	if (bpf_skb_load_bytes(skb, ETH_HLEN, &iph_inner,
skb               233 tools/testing/selftests/bpf/progs/test_tc_tunnel.c 	if (bpf_skb_load_bytes(skb, ETH_HLEN + sizeof(iph_inner),
skb               286 tools/testing/selftests/bpf/progs/test_tc_tunnel.c 		if (bpf_skb_load_bytes(skb, 0, (__u8 *)&h_outer + olen,
skb               294 tools/testing/selftests/bpf/progs/test_tc_tunnel.c 	if (bpf_skb_adjust_room(skb, olen, BPF_ADJ_ROOM_MAC, flags))
skb               305 tools/testing/selftests/bpf/progs/test_tc_tunnel.c 	if (bpf_skb_store_bytes(skb, ETH_HLEN, &h_outer, olen,
skb               313 tools/testing/selftests/bpf/progs/test_tc_tunnel.c int __encap_ipip_none(struct __sk_buff *skb)
skb               315 tools/testing/selftests/bpf/progs/test_tc_tunnel.c 	if (skb->protocol == __bpf_constant_htons(ETH_P_IP))
skb               316 tools/testing/selftests/bpf/progs/test_tc_tunnel.c 		return encap_ipv4(skb, IPPROTO_IPIP, ETH_P_IP);
skb               322 tools/testing/selftests/bpf/progs/test_tc_tunnel.c int __encap_gre_none(struct __sk_buff *skb)
skb               324 tools/testing/selftests/bpf/progs/test_tc_tunnel.c 	if (skb->protocol == __bpf_constant_htons(ETH_P_IP))
skb               325 tools/testing/selftests/bpf/progs/test_tc_tunnel.c 		return encap_ipv4(skb, IPPROTO_GRE, ETH_P_IP);
skb               331 tools/testing/selftests/bpf/progs/test_tc_tunnel.c int __encap_gre_mpls(struct __sk_buff *skb)
skb               333 tools/testing/selftests/bpf/progs/test_tc_tunnel.c 	if (skb->protocol == __bpf_constant_htons(ETH_P_IP))
skb               334 tools/testing/selftests/bpf/progs/test_tc_tunnel.c 		return encap_ipv4(skb, IPPROTO_GRE, ETH_P_MPLS_UC);
skb               340 tools/testing/selftests/bpf/progs/test_tc_tunnel.c int __encap_gre_eth(struct __sk_buff *skb)
skb               342 tools/testing/selftests/bpf/progs/test_tc_tunnel.c 	if (skb->protocol == __bpf_constant_htons(ETH_P_IP))
skb               343 tools/testing/selftests/bpf/progs/test_tc_tunnel.c 		return encap_ipv4(skb, IPPROTO_GRE, ETH_P_TEB);
skb               349 tools/testing/selftests/bpf/progs/test_tc_tunnel.c int __encap_udp_none(struct __sk_buff *skb)
skb               351 tools/testing/selftests/bpf/progs/test_tc_tunnel.c 	if (skb->protocol == __bpf_constant_htons(ETH_P_IP))
skb               352 tools/testing/selftests/bpf/progs/test_tc_tunnel.c 		return encap_ipv4(skb, IPPROTO_UDP, ETH_P_IP);
skb               358 tools/testing/selftests/bpf/progs/test_tc_tunnel.c int __encap_udp_mpls(struct __sk_buff *skb)
skb               360 tools/testing/selftests/bpf/progs/test_tc_tunnel.c 	if (skb->protocol == __bpf_constant_htons(ETH_P_IP))
skb               361 tools/testing/selftests/bpf/progs/test_tc_tunnel.c 		return encap_ipv4(skb, IPPROTO_UDP, ETH_P_MPLS_UC);
skb               367 tools/testing/selftests/bpf/progs/test_tc_tunnel.c int __encap_udp_eth(struct __sk_buff *skb)
skb               369 tools/testing/selftests/bpf/progs/test_tc_tunnel.c 	if (skb->protocol == __bpf_constant_htons(ETH_P_IP))
skb               370 tools/testing/selftests/bpf/progs/test_tc_tunnel.c 		return encap_ipv4(skb, IPPROTO_UDP, ETH_P_TEB);
skb               376 tools/testing/selftests/bpf/progs/test_tc_tunnel.c int __encap_sit_none(struct __sk_buff *skb)
skb               378 tools/testing/selftests/bpf/progs/test_tc_tunnel.c 	if (skb->protocol == __bpf_constant_htons(ETH_P_IPV6))
skb               379 tools/testing/selftests/bpf/progs/test_tc_tunnel.c 		return encap_ipv4(skb, IPPROTO_IPV6, ETH_P_IP);
skb               385 tools/testing/selftests/bpf/progs/test_tc_tunnel.c int __encap_ip6tnl_none(struct __sk_buff *skb)
skb               387 tools/testing/selftests/bpf/progs/test_tc_tunnel.c 	if (skb->protocol == __bpf_constant_htons(ETH_P_IPV6))
skb               388 tools/testing/selftests/bpf/progs/test_tc_tunnel.c 		return encap_ipv6(skb, IPPROTO_IPV6, ETH_P_IPV6);
skb               394 tools/testing/selftests/bpf/progs/test_tc_tunnel.c int __encap_ip6gre_none(struct __sk_buff *skb)
skb               396 tools/testing/selftests/bpf/progs/test_tc_tunnel.c 	if (skb->protocol == __bpf_constant_htons(ETH_P_IPV6))
skb               397 tools/testing/selftests/bpf/progs/test_tc_tunnel.c 		return encap_ipv6(skb, IPPROTO_GRE, ETH_P_IPV6);
skb               403 tools/testing/selftests/bpf/progs/test_tc_tunnel.c int __encap_ip6gre_mpls(struct __sk_buff *skb)
skb               405 tools/testing/selftests/bpf/progs/test_tc_tunnel.c 	if (skb->protocol == __bpf_constant_htons(ETH_P_IPV6))
skb               406 tools/testing/selftests/bpf/progs/test_tc_tunnel.c 		return encap_ipv6(skb, IPPROTO_GRE, ETH_P_MPLS_UC);
skb               412 tools/testing/selftests/bpf/progs/test_tc_tunnel.c int __encap_ip6gre_eth(struct __sk_buff *skb)
skb               414 tools/testing/selftests/bpf/progs/test_tc_tunnel.c 	if (skb->protocol == __bpf_constant_htons(ETH_P_IPV6))
skb               415 tools/testing/selftests/bpf/progs/test_tc_tunnel.c 		return encap_ipv6(skb, IPPROTO_GRE, ETH_P_TEB);
skb               421 tools/testing/selftests/bpf/progs/test_tc_tunnel.c int __encap_ip6udp_none(struct __sk_buff *skb)
skb               423 tools/testing/selftests/bpf/progs/test_tc_tunnel.c 	if (skb->protocol == __bpf_constant_htons(ETH_P_IPV6))
skb               424 tools/testing/selftests/bpf/progs/test_tc_tunnel.c 		return encap_ipv6(skb, IPPROTO_UDP, ETH_P_IPV6);
skb               430 tools/testing/selftests/bpf/progs/test_tc_tunnel.c int __encap_ip6udp_mpls(struct __sk_buff *skb)
skb               432 tools/testing/selftests/bpf/progs/test_tc_tunnel.c 	if (skb->protocol == __bpf_constant_htons(ETH_P_IPV6))
skb               433 tools/testing/selftests/bpf/progs/test_tc_tunnel.c 		return encap_ipv6(skb, IPPROTO_UDP, ETH_P_MPLS_UC);
skb               439 tools/testing/selftests/bpf/progs/test_tc_tunnel.c int __encap_ip6udp_eth(struct __sk_buff *skb)
skb               441 tools/testing/selftests/bpf/progs/test_tc_tunnel.c 	if (skb->protocol == __bpf_constant_htons(ETH_P_IPV6))
skb               442 tools/testing/selftests/bpf/progs/test_tc_tunnel.c 		return encap_ipv6(skb, IPPROTO_UDP, ETH_P_TEB);
skb               447 tools/testing/selftests/bpf/progs/test_tc_tunnel.c static int decap_internal(struct __sk_buff *skb, int off, int len, char proto)
skb               460 tools/testing/selftests/bpf/progs/test_tc_tunnel.c 		if (bpf_skb_load_bytes(skb, off + len, &greh, sizeof(greh)) < 0)
skb               473 tools/testing/selftests/bpf/progs/test_tc_tunnel.c 		if (bpf_skb_load_bytes(skb, off + len, &udph, sizeof(udph)) < 0)
skb               488 tools/testing/selftests/bpf/progs/test_tc_tunnel.c 	if (bpf_skb_adjust_room(skb, -olen, BPF_ADJ_ROOM_MAC,
skb               495 tools/testing/selftests/bpf/progs/test_tc_tunnel.c static int decap_ipv4(struct __sk_buff *skb)
skb               499 tools/testing/selftests/bpf/progs/test_tc_tunnel.c 	if (bpf_skb_load_bytes(skb, ETH_HLEN, &iph_outer,
skb               506 tools/testing/selftests/bpf/progs/test_tc_tunnel.c 	return decap_internal(skb, ETH_HLEN, sizeof(iph_outer),
skb               510 tools/testing/selftests/bpf/progs/test_tc_tunnel.c static int decap_ipv6(struct __sk_buff *skb)
skb               514 tools/testing/selftests/bpf/progs/test_tc_tunnel.c 	if (bpf_skb_load_bytes(skb, ETH_HLEN, &iph_outer,
skb               518 tools/testing/selftests/bpf/progs/test_tc_tunnel.c 	return decap_internal(skb, ETH_HLEN, sizeof(iph_outer),
skb               523 tools/testing/selftests/bpf/progs/test_tc_tunnel.c int decap_f(struct __sk_buff *skb)
skb               525 tools/testing/selftests/bpf/progs/test_tc_tunnel.c 	switch (skb->protocol) {
skb               527 tools/testing/selftests/bpf/progs/test_tc_tunnel.c 		return decap_ipv4(skb);
skb               529 tools/testing/selftests/bpf/progs/test_tc_tunnel.c 		return decap_ipv6(skb);
skb               152 tools/testing/selftests/bpf/progs/test_tcp_check_syncookie_kern.c int check_syncookie_clsact(struct __sk_buff *skb)
skb               154 tools/testing/selftests/bpf/progs/test_tcp_check_syncookie_kern.c 	check_syncookie(skb, (void *)(long)skb->data,
skb               155 tools/testing/selftests/bpf/progs/test_tcp_check_syncookie_kern.c 			(void *)(long)skb->data_end);
skb                47 tools/testing/selftests/bpf/progs/test_tunnel_kern.c int _gre_set_tunnel(struct __sk_buff *skb)
skb                58 tools/testing/selftests/bpf/progs/test_tunnel_kern.c 	ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key),
skb                69 tools/testing/selftests/bpf/progs/test_tunnel_kern.c int _gre_get_tunnel(struct __sk_buff *skb)
skb                75 tools/testing/selftests/bpf/progs/test_tunnel_kern.c 	ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0);
skb                86 tools/testing/selftests/bpf/progs/test_tunnel_kern.c int _ip6gretap_set_tunnel(struct __sk_buff *skb)
skb                98 tools/testing/selftests/bpf/progs/test_tunnel_kern.c 	ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key),
skb               110 tools/testing/selftests/bpf/progs/test_tunnel_kern.c int _ip6gretap_get_tunnel(struct __sk_buff *skb)
skb               116 tools/testing/selftests/bpf/progs/test_tunnel_kern.c 	ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key),
skb               130 tools/testing/selftests/bpf/progs/test_tunnel_kern.c int _erspan_set_tunnel(struct __sk_buff *skb)
skb               142 tools/testing/selftests/bpf/progs/test_tunnel_kern.c 	ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key),
skb               163 tools/testing/selftests/bpf/progs/test_tunnel_kern.c 	ret = bpf_skb_set_tunnel_opt(skb, &md, sizeof(md));
skb               173 tools/testing/selftests/bpf/progs/test_tunnel_kern.c int _erspan_get_tunnel(struct __sk_buff *skb)
skb               181 tools/testing/selftests/bpf/progs/test_tunnel_kern.c 	ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0);
skb               187 tools/testing/selftests/bpf/progs/test_tunnel_kern.c 	ret = bpf_skb_get_tunnel_opt(skb, &md, sizeof(md));
skb               214 tools/testing/selftests/bpf/progs/test_tunnel_kern.c int _ip4ip6erspan_set_tunnel(struct __sk_buff *skb)
skb               226 tools/testing/selftests/bpf/progs/test_tunnel_kern.c 	ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key),
skb               248 tools/testing/selftests/bpf/progs/test_tunnel_kern.c 	ret = bpf_skb_set_tunnel_opt(skb, &md, sizeof(md));
skb               258 tools/testing/selftests/bpf/progs/test_tunnel_kern.c int _ip4ip6erspan_get_tunnel(struct __sk_buff *skb)
skb               266 tools/testing/selftests/bpf/progs/test_tunnel_kern.c 	ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key),
skb               273 tools/testing/selftests/bpf/progs/test_tunnel_kern.c 	ret = bpf_skb_get_tunnel_opt(skb, &md, sizeof(md));
skb               300 tools/testing/selftests/bpf/progs/test_tunnel_kern.c int _vxlan_set_tunnel(struct __sk_buff *skb)
skb               312 tools/testing/selftests/bpf/progs/test_tunnel_kern.c 	ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key),
skb               320 tools/testing/selftests/bpf/progs/test_tunnel_kern.c 	ret = bpf_skb_set_tunnel_opt(skb, &md, sizeof(md));
skb               330 tools/testing/selftests/bpf/progs/test_tunnel_kern.c int _vxlan_get_tunnel(struct __sk_buff *skb)
skb               337 tools/testing/selftests/bpf/progs/test_tunnel_kern.c 	ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0);
skb               343 tools/testing/selftests/bpf/progs/test_tunnel_kern.c 	ret = bpf_skb_get_tunnel_opt(skb, &md, sizeof(md));
skb               356 tools/testing/selftests/bpf/progs/test_tunnel_kern.c int _ip6vxlan_set_tunnel(struct __sk_buff *skb)
skb               367 tools/testing/selftests/bpf/progs/test_tunnel_kern.c 	ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key),
skb               378 tools/testing/selftests/bpf/progs/test_tunnel_kern.c int _ip6vxlan_get_tunnel(struct __sk_buff *skb)
skb               384 tools/testing/selftests/bpf/progs/test_tunnel_kern.c 	ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key),
skb               398 tools/testing/selftests/bpf/progs/test_tunnel_kern.c int _geneve_set_tunnel(struct __sk_buff *skb)
skb               419 tools/testing/selftests/bpf/progs/test_tunnel_kern.c 	ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key),
skb               426 tools/testing/selftests/bpf/progs/test_tunnel_kern.c 	ret = bpf_skb_set_tunnel_opt(skb, &gopt, sizeof(gopt));
skb               436 tools/testing/selftests/bpf/progs/test_tunnel_kern.c int _geneve_get_tunnel(struct __sk_buff *skb)
skb               443 tools/testing/selftests/bpf/progs/test_tunnel_kern.c 	ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0);
skb               449 tools/testing/selftests/bpf/progs/test_tunnel_kern.c 	ret = bpf_skb_get_tunnel_opt(skb, &gopt, sizeof(gopt));
skb               461 tools/testing/selftests/bpf/progs/test_tunnel_kern.c int _ip6geneve_set_tunnel(struct __sk_buff *skb)
skb               473 tools/testing/selftests/bpf/progs/test_tunnel_kern.c 	ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key),
skb               489 tools/testing/selftests/bpf/progs/test_tunnel_kern.c 	ret = bpf_skb_set_tunnel_opt(skb, &gopt, sizeof(gopt));
skb               499 tools/testing/selftests/bpf/progs/test_tunnel_kern.c int _ip6geneve_get_tunnel(struct __sk_buff *skb)
skb               506 tools/testing/selftests/bpf/progs/test_tunnel_kern.c 	ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key),
skb               513 tools/testing/selftests/bpf/progs/test_tunnel_kern.c 	ret = bpf_skb_get_tunnel_opt(skb, &gopt, sizeof(gopt));
skb               526 tools/testing/selftests/bpf/progs/test_tunnel_kern.c int _ipip_set_tunnel(struct __sk_buff *skb)
skb               529 tools/testing/selftests/bpf/progs/test_tunnel_kern.c 	void *data = (void *)(long)skb->data;
skb               532 tools/testing/selftests/bpf/progs/test_tunnel_kern.c 	void *data_end = (void *)(long)skb->data_end;
skb               556 tools/testing/selftests/bpf/progs/test_tunnel_kern.c 	ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key), 0);
skb               566 tools/testing/selftests/bpf/progs/test_tunnel_kern.c int _ipip_get_tunnel(struct __sk_buff *skb)
skb               572 tools/testing/selftests/bpf/progs/test_tunnel_kern.c 	ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0);
skb               583 tools/testing/selftests/bpf/progs/test_tunnel_kern.c int _ipip6_set_tunnel(struct __sk_buff *skb)
skb               586 tools/testing/selftests/bpf/progs/test_tunnel_kern.c 	void *data = (void *)(long)skb->data;
skb               589 tools/testing/selftests/bpf/progs/test_tunnel_kern.c 	void *data_end = (void *)(long)skb->data_end;
skb               602 tools/testing/selftests/bpf/progs/test_tunnel_kern.c 	ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key),
skb               613 tools/testing/selftests/bpf/progs/test_tunnel_kern.c int _ipip6_get_tunnel(struct __sk_buff *skb)
skb               619 tools/testing/selftests/bpf/progs/test_tunnel_kern.c 	ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key),
skb               632 tools/testing/selftests/bpf/progs/test_tunnel_kern.c int _ip6ip6_set_tunnel(struct __sk_buff *skb)
skb               635 tools/testing/selftests/bpf/progs/test_tunnel_kern.c 	void *data = (void *)(long)skb->data;
skb               638 tools/testing/selftests/bpf/progs/test_tunnel_kern.c 	void *data_end = (void *)(long)skb->data_end;
skb               668 tools/testing/selftests/bpf/progs/test_tunnel_kern.c 	ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key),
skb               679 tools/testing/selftests/bpf/progs/test_tunnel_kern.c int _ip6ip6_get_tunnel(struct __sk_buff *skb)
skb               685 tools/testing/selftests/bpf/progs/test_tunnel_kern.c 	ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key),
skb               698 tools/testing/selftests/bpf/progs/test_tunnel_kern.c int _xfrm_get_state(struct __sk_buff *skb)
skb               704 tools/testing/selftests/bpf/progs/test_tunnel_kern.c 	ret = bpf_skb_get_xfrm_state(skb, 0, &x, sizeof(x), 0);
skb                30 tools/testing/selftests/bpf/test_queue_stack_map.h int _test(struct __sk_buff *skb)
skb                32 tools/testing/selftests/bpf/test_queue_stack_map.h 	void *data_end = (void *)(long)skb->data_end;
skb                33 tools/testing/selftests/bpf/test_queue_stack_map.h 	void *data = (void *)(long)skb->data;
skb                88 tools/testing/selftests/bpf/test_sockmap_kern.h int bpf_prog1(struct __sk_buff *skb)
skb                90 tools/testing/selftests/bpf/test_sockmap_kern.h 	return skb->len;
skb                94 tools/testing/selftests/bpf/test_sockmap_kern.h int bpf_prog2(struct __sk_buff *skb)
skb                96 tools/testing/selftests/bpf/test_sockmap_kern.h 	__u32 lport = skb->local_port;
skb                97 tools/testing/selftests/bpf/test_sockmap_kern.h 	__u32 rport = skb->remote_port;
skb               106 tools/testing/selftests/bpf/test_sockmap_kern.h 	len = (__u32)skb->data_end - (__u32)skb->data;
skb               116 tools/testing/selftests/bpf/test_sockmap_kern.h 	return bpf_sk_redirect_map(skb, &sock_map, ret, flags);
skb               118 tools/testing/selftests/bpf/test_sockmap_kern.h 	return bpf_sk_redirect_hash(skb, &sock_map, &ret, flags);